max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
SandboxAgent/logging_helpers.py | lhoste-bell/knix | 167 | 12632538 | # Copyright 2020 The KNIX Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import re
import process_utils
def setup_logger(sandboxid, log_filename):
logger = logging.getLogger(sandboxid)
logger.setLevel(logging.DEBUG)
# File handler
hdlr = logging.FileHandler(log_filename)
hdlr.setLevel(logging.DEBUG)
hdlr.formatter = logging.Formatter("[%(asctime)s] [%(levelname)s] %(message)s")
logger.addHandler(hdlr)
# StreamHandler
shdlr = logging.StreamHandler()
shdlr.setLevel(logging.DEBUG)
shdlr.formatter = logging.Formatter("[%(asctime)s] [%(levelname)s] %(message)s")
logger.addHandler(shdlr)
global print
print = logger.info
return logger
def setup_fluentbit_and_elasticsearch_index(logger, fluentbit_folder, elasticsearch_address, index_wf, index_fe):
return setup_fluentbit(logger, fluentbit_folder, elasticsearch_address, index_wf, index_fe)
def setup_fluentbit(logger, fluentbit_folder, elasticsearch_address, index_wf, index_fe):
elasticsearch = elasticsearch_address.strip().split(':')
if elasticsearch is not None and len(elasticsearch) > 1 and elasticsearch[0] is not None and elasticsearch[0] != '' and elasticsearch[1] is not None and elasticsearch[1] != '':
# Generate fluent-bit configuration file
try:
gen_fluentbit_config(logger, fluentbit_folder, elasticsearch[0], elasticsearch[1], index_wf, index_fe)
# Launch fluent-bit
return launch_fluentbit(logger, fluentbit_folder)
except Exception as exc:
logger.exception("Unable to launch fluent-bit: %s", str(exc))
return None, _
def gen_fluentbit_config(logger, fluentbit_folder, elasticsearch_host, elasticsearch_port, index_wf, index_fe):
text = ''
input_file = fluentbit_folder + '/conf/fluent-bit.conf.j2'
output_file = fluentbit_folder + '/conf/fluent-bit.conf'
# read template file
logger.info("Reading: %s", input_file)
with open(input_file) as f:
text = f.read()
# replace templates with real values
text = re.sub('{{ ELASTICSEARCH_HOST }}', elasticsearch_host, text)
text = re.sub('{{ ELASTICSEARCH_PORT }}', elasticsearch_port, text)
text = re.sub('{{ INDEX_NAME_WF }}', index_wf, text)
text = re.sub('{{ INDEX_NAME_FE }}', index_fe, text)
# write config file
with open(output_file, 'w') as f:
f.write(text)
logger.info("Written: %s", output_file)
def launch_fluentbit(logger, fluentbit_folder):
cmd = fluentbit_folder + '/bin/fluent-bit'
cmd = cmd + ' -c ' + fluentbit_folder + '/conf/fluent-bit.conf'
logger.info("Launching fluent-bit via cmd: %s", cmd)
command_args_map = {}
command_args_map["command"] = cmd
command_args_map["wait_output"] = True
error, process = process_utils.run_command(cmd, logger, wait_output=True)
if error is None:
logger.info("fluent-bit launched with pid: %s", str(process.pid))
return process, command_args_map
return None, _
|
tests/test_modeling_albert.py | legacyai/tf-transformers | 116 | 12632570 | # coding=utf-8
# Copyright 2021 TF-Transformers Authors.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test Albert Models"""
import unittest
import tensorflow as tf
from absl import logging
from transformers import AlbertTokenizerFast as Tokenizer
from tf_transformers.models import AlbertModel as Model
logging.get_absl_logger().name = "albert_testing"
MODEL_NAME = 'albert-base-v2'
class ModelTest(unittest.TestCase):
@classmethod
def setUpClass(self):
print("--------------------setUP--------------------------------------")
self.model = Model.from_pretrained(MODEL_NAME)
# self.model_ar = Model.from_pretrained(MODEL_NAME, use_auto_regressive=True)
self.tokenizer = Tokenizer.from_pretrained(MODEL_NAME)
# @unittest.skip
def test_tf_conversion(self):
import shutil
try:
shutil.rmtree("/tmp/tf_transformers_cache/{}".format(MODEL_NAME))
except:
pass
model = Model.from_pretrained(MODEL_NAME, convert_fn_type='tf')
logging.info("Test: TF Conversion. ✅")
# @unittest.skip
def test_pt_conversion(self):
import shutil
try:
shutil.rmtree("/tmp/tf_transformers_cache/{}".format(MODEL_NAME))
except:
pass
model = Model.from_pretrained(MODEL_NAME, convert_fn_type='pt')
logging.info("Test: PT Conversion. ✅")
@unittest.skip("Albert has to be fine tuned for this")
def test_auto_regressive(self):
"""Test Text Generation using Non Cache and Cached"""
text = "<NAME> is one of the finest"
inputs_tf = self.tokenizer(text, return_tensors="tf")
inputs = {}
inputs["input_ids"] = inputs_tf["input_ids"]
predictions_non_auto_regressive = []
predictions_prob_non_auto_regressive = []
for i in range(10):
outputs = self.model(inputs)
predicted_ids = tf.cast(tf.expand_dims(tf.argmax(outputs["last_token_logits"], axis=1), 1), tf.int32)
inputs["input_ids"] = tf.concat([inputs["input_ids"], predicted_ids], axis=1)
predictions_non_auto_regressive.append(predicted_ids)
predictions_prob_non_auto_regressive.append(
tf.expand_dims(tf.reduce_max(outputs["last_token_logits"], axis=1), 1)
)
predictions_non_auto_regressive = tf.concat(predictions_non_auto_regressive, axis=1)
predictions_prob_non_auto_regressive = tf.concat(predictions_prob_non_auto_regressive, axis=1)
# -------------------------------------------------------------------------------------------- # noqa
# Cached
inputs_tf = self.tokenizer(text, return_tensors="tf")
inputs = {}
inputs["input_ids"] = inputs_tf["input_ids"]
seq_length = tf.shape(inputs["input_ids"])[1]
batch_size = tf.shape(inputs["input_ids"])[0]
inputs["all_cache_key"] = tf.zeros((12, batch_size, 12, seq_length, 64))
inputs["all_cache_value"] = tf.zeros((12, batch_size, 12, seq_length, 64))
inputs["past_length"] = tf.zeros(shape=(1, batch_size), dtype=tf.int32)
predictions_auto_regressive = []
predictions_prob_auto_regressive = []
past_lengths = []
for i in range(10):
outputs = self.model_ar(inputs)
predicted_ids = tf.cast(tf.expand_dims(tf.argmax(outputs["last_token_logits"], axis=1), 1), tf.int32)
inputs["input_ids"] = predicted_ids
inputs["all_cache_key"] = outputs["all_cache_key"]
inputs["all_cache_value"] = outputs["all_cache_value"]
inputs["past_length"] = outputs["past_length"]
past_lengths.append(inputs["past_length"])
predictions_auto_regressive.append(predicted_ids)
predictions_prob_auto_regressive.append(
tf.expand_dims(tf.reduce_max(outputs["last_token_logits"], axis=1), 1)
)
predictions_auto_regressive = tf.concat(predictions_auto_regressive, axis=1)
predictions_prob_auto_regressive = tf.concat(predictions_prob_auto_regressive, axis=1)
# Assert predictions
tf.debugging.assert_near(predictions_prob_auto_regressive, predictions_prob_non_auto_regressive, rtol=1.0)
tf.debugging.assert_equal(predictions_auto_regressive, predictions_non_auto_regressive)
logging.info("Test: Successful Auto Regressive Encoder. ✅")
@unittest.skip("Albert has to be fine tuned for this")
def test_auto_regressive_batch(self):
"""Test Batch Text Generation Auto Regressive"""
text = ['<NAME> is one of the finest', 'I love stars because']
# -1 is important
input_ids = tf.ragged.constant(self.tokenizer(text)["input_ids"]).to_tensor(-1)
inputs = {}
inputs["input_ids"] = input_ids
seq_length = tf.shape(inputs["input_ids"])[1]
batch_size = tf.shape(inputs["input_ids"])[0]
inputs["all_cache_key"] = tf.zeros((12, batch_size, 12, seq_length, 64))
inputs["all_cache_value"] = tf.zeros((12, batch_size, 12, seq_length, 64))
inputs["past_length"] = tf.zeros(shape=(1, batch_size), dtype=tf.int32)
predictions_auto_regressive = []
predictions_prob_auto_regressive = []
past_lengths = []
for i in range(10):
outputs = self.model_ar(inputs)
predicted_ids = tf.cast(tf.expand_dims(tf.argmax(outputs["last_token_logits"], axis=1), 1), tf.int32)
if i == 0:
masks = tf.cast(tf.not_equal(input_ids, -1), tf.float32)
masks = tf.reshape(
masks,
(1, batch_size, 1, seq_length, 1),
)
outputs["all_cache_key"] = outputs["all_cache_key"] * masks
outputs["all_cache_value"] = outputs["all_cache_value"] * masks
inputs["input_ids"] = predicted_ids
inputs["all_cache_key"] = outputs["all_cache_key"]
inputs["all_cache_value"] = outputs["all_cache_value"]
inputs["past_length"] = outputs["past_length"]
past_lengths.append(inputs["past_length"])
predictions_auto_regressive.append(predicted_ids)
predictions_prob_auto_regressive.append(
tf.expand_dims(tf.reduce_max(outputs["last_token_logits"], axis=1), 1)
)
predictions_auto_regressive = tf.concat(predictions_auto_regressive, axis=1)
predictions_prob_auto_regressive = tf.concat(predictions_prob_auto_regressive, axis=1)
expected_prediction = [
[1938, 287, 262, 995, 13, 679, 318, 257, 845, 922],
[484, 821, 523, 881, 517, 621, 655, 257, 3491, 13],
]
expected_probs = [
[
-110.00343322753906,
-84.10372161865234,
-60.758541107177734,
-94.87692260742188,
-72.66572570800781,
-124.67924499511719,
-100.1087417602539,
-103.07884216308594,
-108.038330078125,
-108.75567626953125,
],
[
-92.4664535522461,
-122.232177734375,
-114.12687683105469,
-110.21340942382812,
-106.74520111083984,
-108.79459381103516,
-89.76094055175781,
-84.4063720703125,
-102.25302124023438,
-78.72990417480469,
],
]
tf.debugging.assert_equal(predictions_auto_regressive.numpy().tolist(), expected_prediction)
tf.debugging.assert_near(predictions_prob_auto_regressive.numpy().tolist(), expected_probs)
logging.info("Test: Successful Batch Auto Regressive Encoder. ✅")
@unittest.skip("Albert has to be fine tuned for this")
def test_auto_regressive_saved_model(self):
"""Test Auto Regressive using Decoder Saved Model"""
import shutil
import tempfile
from tf_transformers.text import TextDecoder
text = ['<NAME> is one of the finest', 'I love stars because']
dirpath = tempfile.mkdtemp()
saved_model_dir = "{}/model_pb".format(dirpath)
self.model_ar.save_as_serialize_module(saved_model_dir, overwrite=True)
# Load saved model .
loaded = tf.saved_model.load(saved_model_dir)
decoder = TextDecoder(model=loaded)
# Pad -1
input_ids = tf.ragged.constant(self.tokenizer(text)["input_ids"]).to_tensor(-1)
inputs = {}
inputs["input_ids"] = input_ids
decoder_results = decoder.decode(inputs, mode="greedy", max_iterations=10, eos_id=-100)
predicted_ids = decoder_results["predicted_ids"].numpy().tolist()
expected_ids = [
[[1938, 287, 262, 995, 13, 679, 318, 257, 845, 922]],
[[484, 821, 523, 881, 517, 621, 655, 257, 3491, 13]],
]
tf.debugging.assert_equal(predicted_ids, expected_ids)
shutil.rmtree(dirpath)
logging.info("Test: Successful Batch Auto Regressive Encoder Saved Model. ✅")
@unittest.skip("Albert has to be fine tuned for this")
def test_auto_regressive_keras_model(self):
"""Test Auto Regressive using Decoder Keras Model"""
from tf_transformers.text import TextDecoder
text = ['<NAME> is one of the finest', 'I love stars because']
decoder = TextDecoder(model=self.model_ar)
# Pad -1
input_ids = tf.ragged.constant(self.tokenizer(text)["input_ids"]).to_tensor(-1)
inputs = {}
inputs["input_ids"] = input_ids
decoder_results = decoder.decode(inputs, mode="greedy", max_iterations=10, eos_id=-100)
predicted_ids = decoder_results["predicted_ids"].numpy().tolist()
expected_ids = [
[[1938, 287, 262, 995, 13, 679, 318, 257, 845, 922]],
[[484, 821, 523, 881, 517, 621, 655, 257, 3491, 13]],
]
tf.debugging.assert_equal(predicted_ids, expected_ids)
logging.info("Test: Successful Batch Auto Regressive Encoder Keras Model. ✅")
# def test_auto_regressive_encoder_decoder():
# from transformers import GPT2Tokenizer
# tokenizer = GPT2Tokenizer.from_pretrained(model_name)
# from tf_transformers.models import EncoderDecoder
# # Without Cache
# encoder_layer, encoder_config = GPT2Model.get_model(
# model_name=model_name, mask_mode="user_defined", return_layer=True
# )
# decoder_layer, decoder_config = GPT2Model.get_model(model_name=model_name, return_layer=True, use_decoder=True)
# # Decoder layer wont load from checkpoint
# # As the graph is different
# # Get decoder variables index and name as dict
# # Assign encoder weights to decoder wherever it matches variable name
# num_assigned = 0
# decoder_var = {var.name: index for index, var in enumerate(decoder_layer.variables)}
# for encoder_var in encoder_layer.variables:
# if encoder_var.name in decoder_var:
# index = decoder_var[encoder_var.name]
# decoder_layer.variables[index].assign(encoder_var)
# num_assigned += 1
# model = EncoderDecoder(encoder_layer, decoder_layer, share_embeddings=True, share_encoder=True)
# # Check encoder decoder generation without caching
# text = "<NAME> is one of the finest"
# encoder_input_ids = tf.expand_dims(tf.ragged.constant(tokenizer(text)["input_ids"]), 0)
# encoder_input_mask = tf.ones_like(encoder_input_ids)
# decoder_input_ids = tf.constant([[1]])
# inputs = {}
# inputs["encoder_input_ids"] = encoder_input_ids
# inputs["encoder_input_mask"] = encoder_input_mask
# inputs["decoder_input_ids"] = decoder_input_ids
# predictions_non_auto_regressive = []
# predictions_prob_non_auto_regressive = []
# for i in range(10):
# outputs = model(inputs)
# predicted_ids = tf.cast(tf.expand_dims(tf.argmax(outputs["last_token_logits"], axis=1), 1), tf.int32)
# inputs["encoder_input_ids"] = tf.concat([inputs["encoder_input_ids"], predicted_ids], axis=1)
# inputs["encoder_input_mask"] = tf.ones_like(inputs["encoder_input_ids"])
# predictions_non_auto_regressive.append(predicted_ids)
# predictions_prob_non_auto_regressive.append(
# tf.expand_dims(tf.reduce_max(outputs["last_token_logits"], axis=1), 1)
# )
# predictions_non_auto_regressive = tf.concat(predictions_non_auto_regressive, axis=1)
# predictions_prob_non_auto_regressive = tf.concat(predictions_prob_non_auto_regressive, axis=1)
# # Cache
# encoder_layer, encoder_config = GPT2Model.get_model(
# model_name=model_name, mask_mode="user_defined", return_layer=True
# )
# decoder_layer, decoder_config = GPT2Model.get_model(
# model_name=model_name, return_layer=True, use_decoder=True, use_auto_regressive=True
# )
# # Decoder layer wont load from checkpoint
# # As the graph is different
# # Get decoder variables index and name as dict
# # Assign encoder weights to decoder wherever it matches variable name
# num_assigned = 0
# decoder_var = {var.name: index for index, var in enumerate(decoder_layer.variables)}
# for encoder_var in encoder_layer.variables:
# if encoder_var.name in decoder_var:
# index = decoder_var[encoder_var.name]
# decoder_layer.variables[index].assign(encoder_var)
# num_assigned += 1
# model = EncoderDecoder(encoder_layer, decoder_layer, share_embeddings=True, share_encoder=True)
# # Check encoder decoder generation caching
# encoder_hidden_dim = encoder_config["embedding_size"]
# num_hidden_layers = decoder_config["num_hidden_layers"]
# num_attention_heads = decoder_config["num_attention_heads"]
# attention_head_size = decoder_config["attention_head_size"]
# text = "<NAME> is one of the finest"
# encoder_input_ids = tf.expand_dims(tf.ragged.constant(tokenizer(text)["input_ids"]), 0)
# encoder_input_mask = tf.ones_like(encoder_input_ids)
# decoder_input_ids = tf.constant([[1]])
# batch_size = tf.shape(encoder_input_ids)[0]
# seq_length = tf.shape(encoder_input_ids)[1]
# encoder_hidden_states = tf.zeros((batch_size, seq_length, 768))
# decoder_all_cache_key = tf.zeros(
# (num_hidden_layers, batch_size, num_attention_heads, seq_length, attention_head_size)
# )
# decoder_all_cahce_value = tf.zeros(
# (num_hidden_layers, batch_size, num_attention_heads, seq_length, attention_head_size)
# )
# inputs = {}
# inputs["encoder_input_ids"] = encoder_input_ids
# inputs["encoder_input_mask"] = encoder_input_mask
# inputs["decoder_input_ids"] = decoder_input_ids
# inputs["encoder_hidden_states"] = encoder_hidden_states
# inputs["decoder_all_cache_key"] = decoder_all_cache_key
# inputs["decoder_all_cache_value"] = decoder_all_cahce_value
# predictions_auto_regressive = []
# predictions_prob_auto_regressive = []
# for i in range(10):
# outputs = model(inputs)
# predicted_ids = tf.cast(tf.expand_dims(tf.argmax(outputs["last_token_logits"], axis=1), 1), tf.int32)
# inputs["input_ids"] = predicted_ids
# inputs["decoder_all_cache_key"] = outputs["decoder_all_cache_key"]
# inputs["decoder_all_cache_value"] = outputs["decoder_all_cache_value"]
# inputs["encoder_hidden_states"] = outputs["encoder_hidden_states"]
# predictions_auto_regressive.append(predicted_ids)
# predictions_prob_auto_regressive.append(
# tf.expand_dims(tf.reduce_max(outputs["last_token_logits"], axis=1), 1)
# )
# predictions_auto_regressive = tf.concat(predictions_auto_regressive, axis=1)
# predictions_prob_auto_regressive = tf.concat(predictions_prob_auto_regressive, axis=1)
# tf.assert_equal(predictions_auto_regressive, predictions_non_auto_regressive)
# logging.info("Test: Successful Auto Regressive Encoder Decoder.")
if __name__ == '__main__':
unittest.main()
|
test/test_utils/pytest_cache.py | haohanchen-yagao/deep-learning-containers | 383 | 12632573 | import json
import os
import logging
import sys
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.DEBUG)
LOGGER.addHandler(logging.StreamHandler(sys.stdout))
class PytestCache:
"""
A handler for pytest cache
Contains methods for uploading/dowloading pytest cache file to/from ec2 instances and s3 buckets
"""
def __init__(self, s3_client, account_id):
self.s3_client = s3_client
self.bucket_name = f"dlc-test-execution-results-{account_id}"
def download_pytest_cache_from_s3_to_local(self,
current_dir,
commit_id,
framework,
version,
build_context,
test_type):
"""
Download pytest cache file from directory in s3 to local box
:param current_dir: directory where the script is executed. .pytest_cache directory will be created in this
local directory.
Following parameters are required to create a path to cache file in s3:
:param commit_id
:param framework
:param version
:param build_context
:param test_type
"""
local_file_dir = os.path.join(current_dir, ".pytest_cache", "v", "cache")
local_file_path = os.path.join(local_file_dir, "lastfailed")
s3_file_dir = self.__make_s3_path(commit_id, framework, version, build_context, test_type)
s3_file_path = os.path.join(s3_file_dir, "lastfailed")
if os.path.exists(local_file_path):
os.remove(local_file_path)
else:
os.makedirs(local_file_dir, exist_ok=True)
self.__download_cache_from_s3(s3_file_path, local_file_path)
def download_pytest_cache_from_s3_to_ec2(self,
ec2_connection,
path, commit_id,
framework,
version,
build_context,
test_type):
"""
Copy pytest cache file from directory in s3 to ec2 instance. .pytest_cache directory will be created in
:param path ec2 directory.
Following parameters are required to create a path to cache file in s3:
:param path: directory on ec2 instance
:param commit_id
:param framework
:param version
:param build_context
:param test_type
"""
local_file_dir = os.path.join(path, ".pytest_cache", "v", "cache")
local_file_path = os.path.join(local_file_dir, "lastfailed")
s3_file_dir = self.__make_s3_path(commit_id, framework, version, build_context, test_type)
s3_file_path = os.path.join(s3_file_dir, "lastfailed")
self.__delete_file_on_ec2(ec2_connection, local_file_path)
self.__download_cache_from_s3(s3_file_path, "lastfailed")
self.__upload_cache_to_ec2(ec2_connection, "lastfailed", local_file_dir)
def upload_pytest_cache_from_ec2_to_s3(self,
ec2_connection,
path,
commit_id,
framework,
version,
build_context,
test_type):
"""
Copy pytest cache file from ec2 instance to directory in s3. .pytest_cache directory will be copied from
:param path ec2 directory to s3 directory generated from parameters.
Following parameters are required to create a path to cache file in s3:
:param path: directory on ec2 instance
:param commit_id
:param framework
:param version
:param build_context
:param test_type
"""
ec2_dir = os.path.join(path, ".pytest_cache", "v", "cache")
ec2_file_path = os.path.join(ec2_dir, "lastfailed")
s3_file_dir = self.__make_s3_path(commit_id, framework, version, build_context, test_type)
s3_file_path = os.path.join(s3_file_dir, "lastfailed")
# Since we run tests in parallel files from latests executions will overwrite existing file.
# So put the latest file into tmp, add it to local lastfailed and upload to s3.
# At the end of current execution there will be full file in s3
self.__download_cache_from_ec2(ec2_connection, ec2_file_path, "tmp")
self.__merge_2_execution_caches_and_save("tmp", "lastfailed", "lastfailed")
self.__upload_cache_to_s3("lastfailed", s3_file_path)
def upload_pytest_cache_from_local_to_s3(self,
current_dir,
commit_id,
framework,
version,
build_context,
test_type):
"""
Copy pytest cache file from local box to directory in s3. .pytest_cache directory will be copied from
:param current_dir ec2 directory to s3 directory generated from parameters.
Following parameters are required to create a path to cache file in s3:
:param current_dir: directory on ec2 instance
:param commit_id
:param framework
:param version
:param build_context
:param test_type
"""
local_file_dir = os.path.join(current_dir, ".pytest_cache", "v", "cache")
local_file_path = os.path.join(local_file_dir, "lastfailed")
s3_file_dir = self.__make_s3_path(commit_id, framework, version, build_context, test_type)
s3_file_path = os.path.join(s3_file_dir, "lastfailed")
self.__upload_cache_to_s3(local_file_path, s3_file_path)
def __make_s3_path(self, commit_id, framework, version, build_context, test_type):
return os.path.join(commit_id, framework, version, build_context, test_type)
def __upload_cache_to_s3(self, local_file, s3_file):
if os.path.exists(f"{local_file}"):
LOGGER.info(f"Uploading current execution result to {s3_file}")
try:
self.s3_client.upload_file(local_file,
self.bucket_name,
s3_file)
LOGGER.info(f"Cache file uploaded")
except Exception as e:
LOGGER.info(f"Cache file wasn't uploaded because of error: {e}")
else:
LOGGER.info(f"No cache file was created")
def __merge_2_execution_caches_and_save(self, cache_file_1, cache_file_2, save_to):
"""
Merges 2 JSON objects into one and safe on disk
:param cache_file_1
:param cache_file_2
:param save_to: filename where to save result JSON
"""
if self.__is_file_exist_and_not_empty(cache_file_1):
with open(cache_file_1) as tmp1:
json1 = json.load(tmp1)
else:
json1 = {}
if self.__is_file_exist_and_not_empty(cache_file_2):
with open(cache_file_2) as tmp2:
json2 = json.load(tmp2)
else:
json2 = {}
merged_json = {**json1, **json2}
if len(merged_json) != 0:
with open(save_to, "w") as f:
f.write(json.dumps(merged_json))
def __is_file_exist_and_not_empty(self, file_path):
return os.path.exists(file_path) and os.stat(file_path).st_size != 0
def __download_cache_from_s3(self, s3_file, local_file):
LOGGER.info(f"Downloading previous executions cache: {s3_file}")
try:
self.s3_client.download_file(self.bucket_name,
f"{s3_file}",
f"{local_file}")
except Exception as e:
LOGGER.info(f"Cache file wasn't downloaded: {e}")
def __upload_cache_to_ec2(self, ec2_connection, local_file, ec2_file):
try:
ec2_connection.put(local_file, f"{ec2_file}")
except Exception as e:
LOGGER.info(f"Cache file wasn't uploaded: {e}")
def __download_cache_from_ec2(self, ec2_connection, ec2_file, local_file):
LOGGER.info(f"Downloading executions cache from ec2 instance")
try:
ec2_connection.get(f"{ec2_file}", local_file)
except Exception as e:
LOGGER.info(f"Cache file wasn't downloaded: {e}")
def __delete_file_on_ec2(self, ec2_connection, ec2_file):
ec2_connection.run(f"rm -f {ec2_file}")
|
kipoi/__init__.py | bfclarke/kipoi | 213 | 12632578 | <gh_stars>100-1000
from __future__ import absolute_import
__author__ = '<NAME>'
__email__ = '<EMAIL>'
from ._version import __version__
# available modules
from . import config
from . import pipeline
from kipoi_utils import utils # backward compat
from kipoi_utils import data_utils # backward compat
from . import sources
from . import remote # backward compat
from . import model
from . import data
from . import external
# import kipoi_conda as conda # backward compat
# # monkey patch for backward compatibility
# from . import env_db as _env_cb
# conda.env_db = env_db
from . import specs
from . import components # backward compat
from . import readers
from . import writers
from . import plugin
from . import env_db
# shortcuts
from .model import get_model
from .data import get_dataloader_factory
from .sources import get_model_descr, get_dataloader_descr
from .pipeline import install_model_requirements, install_dataloader_requirements
from .config import get_source, list_sources, list_models, list_dataloaders
from . import cli
from .plugin import list_plugins
# from .config import model_sources as sources
|
tests/issues/gh182.py | aureooms-contrib/ics-py | 312 | 12632579 | <filename>tests/issues/gh182.py
from datetime import datetime
import pytest
from dateutil.tz import gettz
from ics import Todo
from ics.contentline import ContentLine, lines_to_container
def test_issue_182_seconds_ignored():
todo = Todo.from_container(lines_to_container([
"BEGIN:VTODO",
"DTSTART;TZID=Europe/Berlin:20180219T120005",
"COMPLETED;TZID=Europe/Brussels:20180418T150001",
"END:VTODO"
]))
assert todo.begin == datetime(2018, 2, 19, 12, 0, 5, tzinfo=gettz("Europe/Berlin"))
assert todo.completed == datetime(2018, 4, 18, 15, 0, 1, tzinfo=gettz("Europe/Brussels"))
with pytest.raises(ValueError):
Todo.from_container(lines_to_container([
"BEGIN:VTODO",
"DTSTART;TZID=Europe/Berlin:2018-02-19 12:00:05",
"END:VTODO"
]))
container = lines_to_container([
"BEGIN:VTODO",
"COMPLETED:;TZID=Europe/Brussels:20180418T150001",
# ^ this : breaks parsing
"END:VTODO"
])
assert container[0] == ContentLine("COMPLETED", value=";TZID=Europe/Brussels:20180418T150001")
pytest.raises(ValueError, Todo.from_container, container)
|
sdk/keyvault/azure-keyvault-secrets/azure/keyvault/secrets/_generated/v7_3_preview/models/_key_vault_client_enums.py | rsdoherty/azure-sdk-for-python | 2,728 | 12632592 | <reponame>rsdoherty/azure-sdk-for-python
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum, EnumMeta
from six import with_metaclass
class _CaseInsensitiveEnumMeta(EnumMeta):
def __getitem__(self, name):
return super().__getitem__(name.upper())
def __getattr__(cls, name):
"""Return the enum member matching `name`
We use __getattr__ instead of descriptors or inserting into the enum
class' __dict__ in order to support `name` and `value` being both
properties for enum members (which live in the class' __dict__) and
enum members themselves.
"""
try:
return cls._member_map_[name.upper()]
except KeyError:
raise AttributeError(name)
class DeletionRecoveryLevel(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Reflects the deletion recovery level currently in effect for secrets in the current vault. If
it contains 'Purgeable', the secret can be permanently deleted by a privileged user; otherwise,
only the system can purge the secret, at the end of the retention interval.
"""
#: Denotes a vault state in which deletion is an irreversible operation, without the possibility
#: for recovery. This level corresponds to no protection being available against a Delete
#: operation; the data is irretrievably lost upon accepting a Delete operation at the entity level
#: or higher (vault, resource group, subscription etc.).
PURGEABLE = "Purgeable"
#: Denotes a vault state in which deletion is recoverable, and which also permits immediate and
#: permanent deletion (i.e. purge). This level guarantees the recoverability of the deleted entity
#: during the retention interval (90 days), unless a Purge operation is requested, or the
#: subscription is cancelled. System wil permanently delete it after 90 days, if not recovered.
RECOVERABLE_PURGEABLE = "Recoverable+Purgeable"
#: Denotes a vault state in which deletion is recoverable without the possibility for immediate
#: and permanent deletion (i.e. purge). This level guarantees the recoverability of the deleted
#: entity during the retention interval(90 days) and while the subscription is still available.
#: System wil permanently delete it after 90 days, if not recovered.
RECOVERABLE = "Recoverable"
#: Denotes a vault and subscription state in which deletion is recoverable within retention
#: interval (90 days), immediate and permanent deletion (i.e. purge) is not permitted, and in
#: which the subscription itself cannot be permanently canceled. System wil permanently delete it
#: after 90 days, if not recovered.
RECOVERABLE_PROTECTED_SUBSCRIPTION = "Recoverable+ProtectedSubscription"
#: Denotes a vault state in which deletion is recoverable, and which also permits immediate and
#: permanent deletion (i.e. purge when 7<= SoftDeleteRetentionInDays < 90). This level guarantees
#: the recoverability of the deleted entity during the retention interval, unless a Purge
#: operation is requested, or the subscription is cancelled.
CUSTOMIZED_RECOVERABLE_PURGEABLE = "CustomizedRecoverable+Purgeable"
#: Denotes a vault state in which deletion is recoverable without the possibility for immediate
#: and permanent deletion (i.e. purge when 7<= SoftDeleteRetentionInDays < 90).This level
#: guarantees the recoverability of the deleted entity during the retention interval and while the
#: subscription is still available.
CUSTOMIZED_RECOVERABLE = "CustomizedRecoverable"
#: Denotes a vault and subscription state in which deletion is recoverable, immediate and
#: permanent deletion (i.e. purge) is not permitted, and in which the subscription itself cannot
#: be permanently canceled when 7<= SoftDeleteRetentionInDays < 90. This level guarantees the
#: recoverability of the deleted entity during the retention interval, and also reflects the fact
#: that the subscription itself cannot be cancelled.
CUSTOMIZED_RECOVERABLE_PROTECTED_SUBSCRIPTION = "CustomizedRecoverable+ProtectedSubscription"
|
torchdyn/models/energy.py | iisabeller/torchdyn | 825 | 12632600 | <filename>torchdyn/models/energy.py
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import torch
from torch import Tensor
import torch.nn as nn
from torch.autograd import grad
from torch.autograd.functional import hessian, jacobian
class ConservativeLinearSNF(nn.Module):
def __init__(self, energy, J):
"""Stable Neural Flows: https://arxiv.org/abs/2003.08063
A generalization of Hamiltonian Neural Networks and other energy-based parametrization of Neural ODEs
Conservative version with energy preservation. Input assumed to be of dimensions `batch, dim`
Args:
energy: function parametrizing the energy.
J: network parametrizing the skew-symmetric interconnection matrix
"""
super().__init__()
self.energy = energy
self.J = J
def forward(self, x: Tensor):
with torch.set_grad_enabled(True):
self.n = x.shape[1] // 2
x = x.requires_grad_(True)
dHdx = torch.autograd.grad(self.H(x).sum(), x, create_graph=True)[0]
dHdx = torch.einsum('ijk, ij -> ik', self._skew(x), dHdx)
return dHdx
def _generate_skew(self, x):
M = self.J(x).reshape(-1, *x.shape[1:])
return (M - M.transpose(0, 2, 1)) / 2
class GNF(nn.Module):
def __init__(self, energy:nn.Module):
"""Gradient Neural Flows version of SNFs: https://arxiv.org/abs/2003.08063
Args:
energy (nn.Module): function parametrizing the energy.
"""
super().__init__()
self.energy = energy
def forward(self, x):
with torch.set_grad_enabled(True):
x = x.requires_grad_(True)
eps = self.energy(x).sum()
out = -torch.autograd.grad(eps, x, allow_unused=False, create_graph=True)[0]
return out
class HNN(nn.Module):
def __init__(self, net:nn.Module):
"""Hamiltonian Neural ODE
Args:
net (nn.Module): function parametrizing the vector field.
"""
super().__init__()
self.net = net
def forward(self, x):
with torch.set_grad_enabled(True):
n = x.shape[1] // 2
x = x.requires_grad_(True)
gradH = grad(self.net(x).sum(), x, create_graph=True)[0]
return torch.cat([gradH[:, n:], -gradH[:, :n]], 1).to(x)
class LNN(nn.Module):
def __init__(self, net):
"""Lagrangian Neural Network.
Args:
net (nn.Module)
Notes:
LNNs are currently quite slow. Improvements will be made whenever `functorch` is either merged upstream or included
as a dependency.
"""
super().__init__()
self.net = net
def forward(self, x):
self.n = n = x.shape[1]//2
bs = x.shape[0]
x = x.requires_grad_(True)
qqd_batch = tuple(x[i, :] for i in range(bs))
jac = tuple(map(partial(jacobian, self._lagrangian, create_graph=True), qqd_batch))
hess = tuple(map(partial(hessian, self._lagrangian, create_graph=True), qqd_batch))
qdd_batch = tuple(map(self._qdd, zip(jac, hess, qqd_batch)))
qd, qdd = x[:, n:], torch.cat([qdd[None] for qdd in qdd_batch])
return torch.cat([qd, qdd], 1)
def _lagrangian(self, qqd):
return self.net(qqd).sum()
def _qdd(self, inp):
n = self.n ; jac, hess, qqd = inp
return hess[n:, n:].pinverse()@(jac[:n] - hess[n:, :n]@qqd[n:])
|
redun/handle.py | cclauss/redun | 303 | 12632616 | <reponame>cclauss/redun<filename>redun/handle.py
from typing import Any, Optional, Tuple, Type
from redun.hashing import hash_struct
from redun.value import Value, get_type_registry
def get_handle_class(handle_class_name: str) -> Type["Handle"]:
"""
Returns a Handle class from the TypeRegistry.
"""
klass = get_type_registry().parse_type_name(handle_class_name)
return klass
def get_fullname(namespace: Optional[str], name: str) -> str:
"""
Constructs a fullname from a namespace and a name.
"""
if namespace:
return namespace + "." + name
else:
return name
class HandleInfo:
"""
Handle state information that is stored under `handle.__handle__`.
"""
def __init__(
self,
name: str,
args: Tuple,
kwargs: dict,
class_name: str,
namespace: Optional[str] = None,
call_hash: str = "",
hash: Optional[str] = None,
key: str = "",
):
self.name = name
self.namespace = namespace
self.fullname = get_fullname(self.namespace, self.name)
self.args = args
self.kwargs = kwargs
self.class_name = class_name
self.call_hash = call_hash
self.key = key
self.hash = hash or self.get_hash()
# Cache of recent parent handles used by redun to aid recording.
self.is_recorded = False # TODO: Would be nice to move this to backend.
self.fork_parent: Optional[Handle] = None
def get_state(self) -> dict:
"""
Returns serializable state dict.
"""
return {
"name": self.name,
"namespace": self.namespace,
"args": self.args,
"kwargs": self.kwargs,
"class_name": self.class_name,
"call_hash": self.call_hash,
"key": self.key,
"hash": self.hash,
}
def get_hash(self) -> str:
"""
Returns hash of the handle.
"""
if self.call_hash:
# Derived state from a call_node.
return hash_struct(["Handle", self.fullname, "call_hash", self.key, self.call_hash])
else:
# Initial state.
return hash_struct(["Handle", self.fullname, "init", self.key, self.args, self.kwargs])
def update_hash(self) -> None:
self.hash = self.get_hash()
def apply_call(self, handle: "Handle", call_hash: str) -> "Handle":
handle2 = self.clone(handle)
handle2.__handle__.call_hash = call_hash
handle2.__handle__.key = ""
handle2.__handle__.update_hash()
return handle2
def fork(self, handle: "Handle", key: str) -> "Handle":
handle2 = self.clone(handle)
# Note: When forking, the previous Handle hash is used as the call_hash.
handle2.__handle__.call_hash = handle.__handle__.hash
handle2.__handle__.key = key
handle2.__handle__.update_hash()
handle2.__handle__.fork_parent = handle
return handle2
def clone(self, handle: "Handle") -> "Handle":
# Create new handle instance.
klass = get_handle_class(self.class_name)
handle2 = klass.__new__(klass, self.name, *self.args, **self.kwargs)
# Copy over attributes to new handle.
ignore_attrs = {"__handle__"}
for key, value in handle.__dict__.items():
if key not in ignore_attrs:
handle2.__dict__[key] = value
return handle2
class Handle(Value):
"""
A Value that accumulates state as it passes through Tasks.
"""
type_name = "redun.Handle"
def __new__(cls, name: Optional[str] = None, *args, **kwargs) -> "Handle":
import redun.scheduler
handle = super().__new__(cls)
# Note: name is None when loading from a pickle.
if name is not None:
# Set HandleInfo within __new__ so that user can't forget.
handle.__handle__ = HandleInfo(
name=name,
namespace=redun.scheduler.get_current_job_namespace(required=False),
args=args,
kwargs=kwargs,
class_name=cls.type_name,
)
return handle
def __init__(self, name: str, args: Tuple = (), kwargs: dict = {}):
pass
def __repr__(self) -> str:
return "{class_name}(fullname={fullname}, hash={hash})".format(
class_name=self.__handle__.class_name,
fullname=self.__handle__.fullname,
hash=(self.__handle__.hash or "")[:8],
)
def __getattr__(self, attr: str) -> Any:
"""
Proxy attribute access to `self.instance` if it exists.
"""
if "instance" in self.__dict__:
return getattr(self.instance, attr)
else:
raise AttributeError(
"'{}' object has no attribute '{}'".format(type(self).__name__, attr)
)
def __getstate__(self) -> dict:
"""
Returns dict for serialization.
"""
return self.__handle__.get_state()
def __setstate__(self, state: dict) -> None:
"""
Sets state from dict for deserialization.
"""
self.__handle__ = HandleInfo(**state)
self.__handle__.is_recorded = True
self.__init__(state["name"], *state["args"], **state["kwargs"]) # type: ignore
def apply_call(self, call_hash: str) -> "Handle":
"""
Returns a new Handle derived from this one assumin passage through a call with call_hash.
"""
return self.__handle__.apply_call(self, call_hash)
def fork(self, key: str) -> "Handle":
"""
Forks the handle into a second one for use in parallel tasks.
"""
return self.__handle__.fork(self, key)
def is_valid(self) -> bool:
"""
Returns True if handle is still valid (has not been rolled back).
"""
from redun.scheduler import get_current_scheduler
if self.type_name != self.__handle__.class_name:
# Handle class_name might be out of date from deserialization.
return False
scheduler = get_current_scheduler()
assert scheduler
return scheduler.backend.is_valid_handle(self)
def get_hash(self, data: Optional[bytes] = None) -> str:
"""
Returns a hash of the handle.
"""
return self.__handle__.hash
def preprocess(self, preprocess_args: dict) -> "Handle":
"""
Forks a handle as it passes into a task.
"""
call_order = preprocess_args["call_order"]
return self.fork(self.__handle__.key or str(call_order))
def postprocess(self, postprocess_args: dict) -> "Handle":
"""
Applies the call_hash to the handle as it returns from a task.
"""
return self.apply_call(postprocess_args["pre_call_hash"])
def create_handle(state: dict) -> Handle:
"""
Returns a new Handle created from a state dict.
"""
handle_class = get_handle_class(state["class_name"])
handle = handle_class.__new__(handle_class, state["name"], *state["args"], **state["kwargs"])
handle.__init__(state["name"], *state["args"], **state["kwargs"]) # type: ignore
return handle
|
src/torchphysics/problem/samplers/data_samplers.py | TomF98/torchphysics | 203 | 12632638 | <filename>src/torchphysics/problem/samplers/data_samplers.py
"""File with samplers that handle external created data.
E.g. measurements or validation data computed with other methods.
"""
from .sampler_base import PointSampler
from ..spaces import Points
class DataSampler(PointSampler):
"""A sampler that processes external created data points.
Parameters
----------
points : torchphysics.spaces.points or dict
The data points that this data sampler should pass to a condition.
Either already a torchphysics.spaces.points object or in form of
dictionary like: {'x': tensor_for_x, 't': tensor_for_t, .....}.
For the dicitionary all tensor need to have the same batch dimension.
"""
def __init__(self, points):
if isinstance(points, Points):
self.points = points
elif isinstance(points, dict):
self.points = Points.from_coordinates(points)
else:
raise TypeError("points should be one of Points or dict.")
n = len(points)
super().__init__(n_points=n)
def sample_points(self, params=Points.empty(), device='cpu'):
self.points = self.points.to(device)
return self.points |
Scripts/sims4communitylib/utils/cas/common_cas_utils.py | ColonolNutty/Sims4CommunityLibrary | 118 | 12632649 | <reponame>ColonolNutty/Sims4CommunityLibrary
"""
The Sims 4 Community Library is licensed under the Creative Commons Attribution 4.0 International public license (CC BY 4.0).
https://creativecommons.org/licenses/by/4.0/
https://creativecommons.org/licenses/by/4.0/legalcode
Copyright (c) COLONOLNUTTY
"""
import os
from typing import Tuple, Union, Any
from sims4communitylib.exceptions.common_exceptions_handler import CommonExceptionHandler
from sims4communitylib.mod_support.mod_identity import CommonModIdentity
from sims4communitylib.modinfo import ModInfo
from sims4communitylib.services.sim.cas.common_sim_outfit_io import CommonSimOutfitIO
from sims4communitylib.utils.common_log_registry import CommonLogRegistry
from sims4communitylib.utils.common_resource_utils import CommonResourceUtils
# ReadTheDocs
from sims4communitylib.utils.sims.common_sim_name_utils import CommonSimNameUtils
from sims4communitylib.utils.sims.common_sim_utils import CommonSimUtils
ON_RTD = os.environ.get('READTHEDOCS', None) == 'True'
if not ON_RTD:
from server_commands.argument_helpers import OptionalTargetParam
from sims.outfits.outfit_enums import OutfitCategory, BodyType
from sims.sim_info import SimInfo
from sims4.commands import Command, CheatOutput, CommandType
else:
# noinspection PyMissingOrEmptyDocstring
class OptionalTargetParam:
pass
# noinspection PyMissingOrEmptyDocstring
class OutfitCategory:
pass
# noinspection PyMissingOrEmptyDocstring
class BodyType:
NONE = 0
# noinspection PyMissingOrEmptyDocstring
class SimInfo:
pass
# noinspection PyMissingOrEmptyDocstring
class Command:
pass
# noinspection PyMissingOrEmptyDocstring
class CheatOutput:
pass
# noinspection PyMissingOrEmptyDocstring
class CommandType:
pass
log = CommonLogRegistry.get().register_log(ModInfo.get_identity(), 's4cl_common_cas_utils')
class CommonCASUtils:
"""Utilities for manipulating the CAS parts of Sims.
"""
@staticmethod
def is_cas_part_loaded(cas_part_id: int) -> bool:
"""is_cas_part_loaded(cas_part_id)
Determine if a CAS part is loaded within the game.
.. note:: If the CAS part is part of a package that is not installed, it will be considered as not loaded.
.. note:: A CAS part is considered as "loaded" when the BodyType it has can be found within the sims.outfits.outfit_enums.BodyType enum.
:param cas_part_id: The Decimal identifier of a CAS part.
:type cas_part_id: int
:return: True if the CAS part is loaded within the game, False if not.
:rtype: bool
"""
body_type = CommonCASUtils.get_body_type_of_cas_part(cas_part_id)
return body_type is not None and body_type > 0
@staticmethod
def get_body_type_of_cas_part(cas_part_id: int) -> Union[BodyType, int]:
"""get_body_type_of_cas_part(cas_part_id)
Retrieve the BodyType of a CAS part.
.. note:: Some Body Types don't appear in the BodyType enum.
:param cas_part_id: The decimal identifier of a CAS part.
:type cas_part_id: int
:return: The default BodyType of the CAS part or an int if the Body Type is not within the BodyType enum.
:rtype: Union[BodyType, int]
"""
from cas.cas import get_caspart_bodytype
body_type = get_caspart_bodytype(cas_part_id)
if isinstance(body_type, int) and body_type in BodyType.value_to_name:
new_body_type = CommonResourceUtils.get_enum_by_name(BodyType.value_to_name[body_type], BodyType, default_value=None)
if new_body_type is not None:
body_type = new_body_type
return body_type
@staticmethod
def get_body_type_by_name(name: str, default_body_type: Union[BodyType, None]=BodyType.NONE) -> BodyType:
"""get_body_type_by_name(name, default_value=BodyType.NONE)
Retrieve an BodyType by name.
:param name: The name of a body type.
:type name: str
:param default_body_type: The default body type to use if a body type is not found using the specified name. Default is BodyType.NONE
:type default_body_type: Union[BodyType, None], optional
:return: The BodyType with the specified name or the default body type if no body type was found using the specified name.
:rtype: BodyType
"""
upper_case_name = str(name).upper().strip()
return CommonResourceUtils.get_enum_by_name(upper_case_name, BodyType, default_value=default_body_type)
@staticmethod
def convert_value_to_body_type(value: Union[BodyType, int]) -> Union[BodyType, int]:
"""convert_value_to_body_type(value)
Retrieve an BodyType by value.
:param value: The value of a body type.
:type value: Union[BodyType, int]
:return: The BodyType with the specified value or the specified value if no BodyType was found.
:rtype: Union[BodyType, int]
"""
if isinstance(value, BodyType):
return value
if value in BodyType.value_to_name:
return CommonResourceUtils.get_enum_by_name(BodyType.value_to_name[value], BodyType, default_value=value)
return value
@staticmethod
def attach_cas_part_to_sim(sim_info: SimInfo, cas_part_id: int, body_type: Union[BodyType, int]=BodyType.NONE, outfit_category_and_index: Union[Tuple[OutfitCategory, int], None]=None, mod_identity: CommonModIdentity=None, **__) -> bool:
"""attach_cas_part_to_sim(sim_info, cas_part_id, body_type=BodyType.NONE, outfit_category_and_index=None, mod_identity=None, **__)
Add a CAS part at the specified BodyType to the Sims outfit.
:param sim_info: The SimInfo of a Sim to add the CAS part to.
:type sim_info: SimInfo
:param cas_part_id: The decimal identifier of a CAS part to attach to the Sim.
:type cas_part_id: int
:param body_type: The BodyType the CAS part will be attached to. If no value is provided or it is None, the BodyType of the CAS part itself will be used.
:type body_type: Union[BodyType, int], optional
:param outfit_category_and_index: The outfit category and index of the Sims outfit to modify. If no value is provided, the Sims current outfit will be used.
:type outfit_category_and_index: Union[Tuple[OutfitCategory, int], None], optional
:param mod_identity: The identity of the mod making changes. Default is None. Optional, but highly recommended!
:type mod_identity: CommonModIdentity, optional
:return: True if the CAS part was successfully attached to the Sim. False if the CAS part was not successfully attached to the Sim.
:rtype: bool
"""
from sims4communitylib.services.sim.cas.common_sim_outfit_io import CommonSimOutfitIO
if cas_part_id == -1 or cas_part_id is None:
raise RuntimeError('No cas_part_id was provided.')
log.format_with_message('Attempting to attach CAS part to Sim', sim=sim_info, cas_part_id=cas_part_id, body_type=body_type, outfit_category_and_index=outfit_category_and_index)
outfit_io = CommonSimOutfitIO(sim_info, outfit_category_and_index=outfit_category_and_index, mod_identity=mod_identity)
outfit_io.attach_cas_part(cas_part_id, body_type=body_type)
return outfit_io.apply(**__)
@staticmethod
def detach_cas_part_from_sim(sim_info: SimInfo, cas_part_id: int, body_type: Union[BodyType, int, None]=None, outfit_category_and_index: Union[Tuple[OutfitCategory, int], None]=None, mod_identity: CommonModIdentity=None, **__) -> bool:
"""detach_cas_part_from_sim(sim_info, cas_part_id, body_type=None, outfit_category_and_index=None, mod_identity=None, **__)
Remove a CAS part at the specified BodyType from the Sims outfit.
:param sim_info: The SimInfo of a Sim to remove the CAS part from.
:type sim_info: SimInfo
:param cas_part_id: The decimal identifier of a CAS part to detach from the Sim.
:type cas_part_id: int
:param body_type: The BodyType the CAS part will be detached from. If BodyType.NONE is provided, the BodyType of the CAS Part itself will be used. If set to None, the CAS part will be removed from all BodyTypes. Default is None.
:type body_type: Union[BodyType, int, None], optional
:param outfit_category_and_index: The outfit category and index of the Sims outfit to modify. If no value is provided, the Sims current outfit will be used.
:type outfit_category_and_index: Union[Tuple[OutfitCategory, int], None], optional
:param mod_identity: The identity of the mod making changes. Default is None. Optional, but highly recommended!
:type mod_identity: CommonModIdentity, optional
:return: True if the CAS part was successfully detached from the Sim. False if the CAS part was not successfully detached from the Sim.
:rtype: bool
"""
from sims4communitylib.services.sim.cas.common_sim_outfit_io import CommonSimOutfitIO
if cas_part_id == -1 or cas_part_id is None:
raise RuntimeError('No cas_part_id was provided.')
log.format_with_message('Attempting to remove CAS part from Sim', sim=sim_info, cas_part_id=cas_part_id, body_type=body_type, outfit_category_and_index=outfit_category_and_index)
outfit_io = CommonSimOutfitIO(sim_info, outfit_category_and_index=outfit_category_and_index, mod_identity=mod_identity)
if body_type is None:
outfit_io.detach_cas_part(cas_part_id)
elif body_type == BodyType.NONE:
body_type = CommonCASUtils.get_body_type_of_cas_part(cas_part_id)
if outfit_io.get_cas_part_at_body_type(body_type) != cas_part_id:
return False
outfit_io.detach_body_type(body_type)
return outfit_io.apply(**__)
@staticmethod
def has_cas_part_attached(sim_info: SimInfo, cas_part_id: int, body_type: Union[BodyType, int, None]=None, outfit_category_and_index: Tuple[OutfitCategory, int]=None, mod_identity: CommonModIdentity=None) -> bool:
"""has_cas_part_attached(sim_info, cas_part_id, body_type=None, outfit_category_and_index=None, mod_identity=None)
Determine if a Sim has the specified CAS part attached to their outfit.
:param sim_info: The SimInfo of the Sim to check.
:type sim_info: SimInfo
:param cas_part_id: A decimal identifier of the CAS part to locate.
:type cas_part_id: int
:param body_type: The BodyType the CAS part will be located at. If BodyType.NONE is provided, the body type of the CAS Part itself will be used. If set to None, the CAS part will be located within any BodyType. Default is None.
:type body_type: Union[BodyType, int, None], optional
:param outfit_category_and_index: The outfit category and index of the Sims outfit to check. Default is the Sims current outfit.
:type outfit_category_and_index: Union[Tuple[OutfitCategory, int], None], optional
:param mod_identity: The identity of the mod performing the function. Default is None. Optional, but highly recommended!
:type mod_identity: CommonModIdentity, optional
:return: True, if the Sims outfit contain the specified CAS part. False, if the Sims outfit does not contain the specified CAS part.
:rtype: bool
"""
log.format_with_message('Checking if CAS part is attached to Sim.', sim=sim_info, cas_part_id=cas_part_id, body_type=body_type, outfit_category_and_index=outfit_category_and_index)
outfit_io = CommonSimOutfitIO(sim_info, outfit_category_and_index=outfit_category_and_index, mod_identity=mod_identity)
if body_type is None:
return outfit_io.is_cas_part_attached(cas_part_id)
if body_type == BodyType.NONE:
body_type = CommonCASUtils.get_body_type_of_cas_part(cas_part_id)
return outfit_io.get_cas_part_at_body_type(body_type) == cas_part_id
@staticmethod
def has_any_cas_part_attached_to_body_type(sim_info: SimInfo, body_type: Union[BodyType, int], outfit_category_and_index: Tuple[OutfitCategory, int]=None, mod_identity: CommonModIdentity=None) -> bool:
"""has_any_cas_part_attached_to_body_type(sim_info, body_type, outfit_category_and_index=None, mod_identity=None)
Determine if a Sim has a CAS Part attached to a BodyType.
:param sim_info: An instance of a Sim.
:type sim_info: SimInfo
:param body_type: A BodyType to check.
:type body_type: Union[BodyType, int]
:param outfit_category_and_index: An outfit category and index of the outfit. Default is None, which is whatever outfit a Sim is currently wearing.
:type outfit_category_and_index: Tuple[OutfitCategory, int], optional
:param mod_identity: The identity of the mod performing the function. Default is None. Optional, but highly recommended!
:type mod_identity: CommonModIdentity, optional
:return: True, if the Sim has any CAS Part attached to the specified BodyType for the specified outfit. False, it not.
:rtype: bool
"""
return CommonCASUtils.get_cas_part_id_at_body_type(sim_info, body_type, outfit_category_and_index=outfit_category_and_index, mod_identity=mod_identity) != -1
@staticmethod
def get_body_type_cas_part_is_attached_to(sim_info: SimInfo, cas_part_id: int, outfit_category_and_index: Tuple[OutfitCategory, int]=None, mod_identity: CommonModIdentity=None) -> Union[BodyType, int]:
"""get_body_type_cas_part_is_attached_to(sim_info, cas_part_id, outfit_category_and_index=None, mod_identity=None)
Retrieve the BodyType that a CAS part is attached to within a Sims outfit.
:param sim_info: The SimInfo of the Sim to check.
:type sim_info: SimInfo
:param cas_part_id: A decimal identifier of the CAS part to locate.
:type cas_part_id: int
:param outfit_category_and_index: The outfit category and index of the Sims outfit to check. If None, the current outfit of the Sim will be used.
:type outfit_category_and_index: Tuple[OutfitCategory, int], optional
:param mod_identity: The identity of the mod performing the function. Default is None. Optional, but highly recommended!
:type mod_identity: CommonModIdentity, optional
:return: The BodyType the specified CAS part id is attached to or BodyType.NONE if the CAS part is not found or the Sim does not have body parts for their outfit.
:rtype: Union[BodyType, int]
"""
log.format_with_message('Retrieving BodyType for CAS part.', sim=sim_info, cas_part_id=cas_part_id, outfit_category_and_index=outfit_category_and_index)
outfit_io = CommonSimOutfitIO(sim_info, outfit_category_and_index=outfit_category_and_index, mod_identity=mod_identity)
return outfit_io.get_body_type_cas_part_is_attached_to(cas_part_id)
@staticmethod
def get_cas_part_id_at_body_type(sim_info: SimInfo, body_type: Union[BodyType, int], outfit_category_and_index: Tuple[OutfitCategory, int]=None, mod_identity: CommonModIdentity=None) -> int:
"""get_cas_part_id_at_body_type(sim_info, body_type, outfit_category_and_index=None, mod_identity=None)
Retrieve the CAS part identifier attached to the specified BodyType within a Sims outfit.
:param sim_info: The SimInfo of the Sim to check.
:type sim_info: SimInfo
:param body_type: The BodyType to check.
:type body_type: Union[BodyType, int]
:param outfit_category_and_index: The outfit category and index of the Sims outfit to check. Default is the Sims current outfit.
:type outfit_category_and_index: Tuple[OutfitCategory, int], optional
:param mod_identity: The identity of the mod performing the function. Default is None. Optional, but highly recommended!
:type mod_identity: CommonModIdentity, optional
:return: The CAS part identifier attached to the specified BodyType or -1 if the BodyType is not found.
:rtype: int
"""
log.format_with_message('Checking if CAS part is attached to Sim.', sim=sim_info, body_type=body_type, outfit_category_and_index=outfit_category_and_index)
outfit_io = CommonSimOutfitIO(sim_info, outfit_category_and_index=outfit_category_and_index, mod_identity=mod_identity)
return outfit_io.get_cas_part_at_body_type(body_type)
@staticmethod
def get_skin_tone(sim_info: SimInfo) -> int:
"""get_skin_tone(sim_info)
Retrieve the id for the Skin Tone of a Sim.
:param sim_info: An instance of a Sim.
:type sim_info: SimInfo
:return: The decimal identifier of the skin tone of the specified Sim.
:rtype: int
"""
return sim_info.skin_tone
@staticmethod
def get_skin_tone_value_shift(sim_info: SimInfo) -> int:
"""get_skin_tone_value_shift(sim_info)
Retrieve the value shift for the Skin Tone of a Sim.
:param sim_info: An instance of a Sim.
:type sim_info: SimInfo
:return: The value shift for the Skin Tone of a Sim.
:rtype: int
"""
return sim_info.skin_tone_val_shift
@staticmethod
def set_skin_tone(sim_info: SimInfo, skin_tone: int):
"""set_skin_tone(sim_info, skin_tone)
Set the skin tone of a Sim.
:param sim_info: An instance of a Sim.
:type sim_info: SimInfo
:param skin_tone: The decimal identifier of the skin tone to set the Sim to.
:type skin_tone: int
"""
sim_info.skin_tone = skin_tone
if not ON_RTD:
@Command('s4clib.attach_cas_part', command_type=CommandType.Live)
def _s4clib_attach_cas_part(cas_part_id: int, body_type_str: str='any', opt_sim: OptionalTargetParam=None, _connection: Any=None):
from sims4communitylib.utils.sims.common_sim_name_utils import CommonSimNameUtils
from sims4communitylib.utils.sims.common_sim_utils import CommonSimUtils
from server_commands.argument_helpers import get_optional_target
output = CheatOutput(_connection)
if cas_part_id < 0:
output('ERROR: cas_part_id must be a positive number.')
return
if not CommonCASUtils.is_cas_part_loaded(cas_part_id):
output('ERROR: No cas part was found with id: {}'.format(cas_part_id))
return
sim_info = CommonSimUtils.get_sim_info(get_optional_target(opt_sim, _connection))
if sim_info is None:
output('Failed, no Sim was specified or the specified Sim was not found!')
return
if body_type_str is None:
output('No body_type specified.')
return
if body_type_str == 'any':
body_type = BodyType.NONE
elif body_type_str.isnumeric():
try:
body_type = int(body_type_str)
except ValueError:
output('Specified body type is neither a number nor a body type name {}'.format(body_type_str))
return
else:
body_type = CommonResourceUtils.get_enum_by_name(body_type_str.upper(), BodyType, default_value=BodyType.NONE)
if body_type == BodyType.NONE:
output('Specified body type is not a body type, it was "{}"'.format(body_type_str))
return
output('Attempting to attach CAS Part \'{}\' to Sim \'{}\''.format(cas_part_id, CommonSimNameUtils.get_full_name(sim_info)))
try:
if CommonCASUtils.attach_cas_part_to_sim(sim_info, cas_part_id, body_type=body_type):
output('CAS Part attached to Sim {} successfully.'.format(CommonSimNameUtils.get_full_name(sim_info)))
except Exception as ex:
output('An error occurred while trying to attach the CAS Part!')
CommonExceptionHandler.log_exception(ModInfo.get_identity(), 'Error occurred trying to attach a CAS Part to a Sim.', exception=ex)
output('Done attaching CAS Part to the Sim.')
@Command('s4clib.detach_cas_part', command_type=CommandType.Live)
def _s4clib_detach_cas_part(cas_part_id: int, body_type_str: str='all', opt_sim: OptionalTargetParam=None, _connection: Any=None):
from sims4communitylib.utils.sims.common_sim_name_utils import CommonSimNameUtils
from sims4communitylib.utils.sims.common_sim_utils import CommonSimUtils
from server_commands.argument_helpers import get_optional_target
output = CheatOutput(_connection)
if cas_part_id < 0:
output('ERROR: cas_part_id must be a positive number.')
return
if not CommonCASUtils.is_cas_part_loaded(cas_part_id):
output('ERROR: No cas part was found with id: {}'.format(cas_part_id))
return
sim_info = CommonSimUtils.get_sim_info(get_optional_target(opt_sim, _connection))
if sim_info is None:
output('Failed, no Sim was specified or the specified Sim was not found!')
return
if body_type_str is None:
output('ERROR: No body_type was specified.')
return
if body_type_str == 'all':
body_type = None
elif body_type_str.isnumeric():
try:
body_type = int(body_type_str)
except ValueError:
output('Specified body type is neither a number nor a body type name {}'.format(body_type_str))
return
else:
body_type = CommonResourceUtils.get_enum_by_name(body_type_str.upper(), BodyType, default_value=BodyType.NONE)
if body_type == BodyType.NONE:
output('Specified body type is not a body type {}'.format(body_type_str))
return
output('Attempting to detach CAS Part \'{}\' from Sim \'{}\''.format(cas_part_id, CommonSimNameUtils.get_full_name(sim_info)))
try:
if CommonCASUtils.detach_cas_part_from_sim(sim_info, cas_part_id, body_type=body_type):
output('CAS Part detached from Sim {} successfully.'.format(CommonSimNameUtils.get_full_name(sim_info)))
except Exception as ex:
CommonExceptionHandler.log_exception(ModInfo.get_identity(), 'Error occurred trying to detach a CAS Part from a Sim.', exception=ex)
output('Done detaching CAS Pat to the Sim.')
@Command('s4clib.print_cas_part_at_body_type', command_type=CommandType.Live)
def _s4clib_print_cas_part_at_body_type(body_type_str: str, opt_sim: OptionalTargetParam=None, _connection: int=None):
from sims4communitylib.utils.sims.common_sim_utils import CommonSimUtils
from server_commands.argument_helpers import get_optional_target
output = CheatOutput(_connection)
output('Printing CAS Part at body type. ')
if body_type_str is None:
output('No body_type specified.')
return
if not body_type_str.isnumeric():
body_type = CommonResourceUtils.get_enum_by_name(body_type_str.upper(), BodyType, default_value=BodyType.NONE)
if body_type == BodyType.NONE:
output('Specified body type is not a body type {}'.format(body_type_str))
return
else:
try:
body_type = int(body_type_str)
except ValueError:
output('Specified body type is neither a number nor a body type name {}'.format(body_type_str))
return
sim_info = CommonSimUtils.get_sim_info(get_optional_target(opt_sim, _connection))
if sim_info is None:
output('Failed, no Sim was specified or the specified Sim was not found!')
return
cas_part_id = CommonCASUtils.get_cas_part_id_at_body_type(sim_info, body_type)
output('Found cas part id at body type {}: {}'.format(body_type, cas_part_id))
@Command('s4clib.is_cas_part_available', command_type=CommandType.Live)
def _s4clib_is_cas_part_available(part_id: int=None, _connection: int=None):
output = CheatOutput(_connection)
if part_id is None:
output('No CAS Part specified!')
return
output('Checking if CAS Part {} is available.'.format(part_id))
if CommonCASUtils.is_cas_part_loaded(part_id):
output('CAS Part is available.')
else:
output('CAS Part is not available.')
@Command('s4clib.print_skin_tone', command_type=CommandType.Live)
def _common_print_skin_tone(opt_sim: OptionalTargetParam=None, _connection: int=None):
from server_commands.argument_helpers import get_optional_target
output = CheatOutput(_connection)
output('Attempting to get skin tone.')
sim_info = CommonSimUtils.get_sim_info(get_optional_target(opt_sim, _connection))
if sim_info is None:
output('Failed, no Sim was specified or the specified Sim was not found!')
return
output('Sim: {}'.format(CommonSimNameUtils.get_full_name(sim_info)))
output('Skin Tone: {}'.format(CommonCASUtils.get_skin_tone(sim_info)))
output('Skin Tone Val Shift: {}'.format(CommonCASUtils.get_skin_tone_value_shift(sim_info)))
@Command('s4clib.set_skin_tone', command_type=CommandType.Live)
def _common_set_skin_tone(skin_tone_id: int, opt_sim: OptionalTargetParam=None, _connection: int=None):
from server_commands.argument_helpers import get_optional_target
output = CheatOutput(_connection)
sim_info = CommonSimUtils.get_sim_info(get_optional_target(opt_sim, _connection))
if sim_info is None:
output('Failed, no Sim was specified or the specified Sim was not found!')
return
output('Attempting to set the skin tone \'{}\' of Sim \'{}\''.format(skin_tone_id, CommonSimNameUtils.get_full_name(sim_info)))
CommonCASUtils.set_skin_tone(sim_info, skin_tone_id)
output('Done setting the skin overlay of the Sim.')
|
pygtkweb/demos/062-uimanager.py | takipsizad/pyjs | 739 | 12632661 | <gh_stars>100-1000
#!/usr/bin/env python
import pygtk
pygtk.require('2.0')
import gtk
class UIManagerExample:
ui = '''<ui>
<menubar name="MenuBar">
<menu action="File">
<menuitem action="Quit"/>
</menu>
<menu action="Sound">
<menuitem action="Mute"/>
</menu>
<menu action="RadioBand">
<menuitem action="AM"/>
<menuitem action="FM"/>
<menuitem action="SSB"/>
</menu>
</menubar>
<toolbar name="Toolbar">
<toolitem action="Quit"/>
<separator/>
<toolitem action="Mute"/>
<separator/>
<placeholder name="RadioBandItems">
<toolitem action="AM"/>
<toolitem action="FM"/>
<toolitem action="SSB"/>
</placeholder>
</toolbar>
</ui>'''
def __init__(self):
# Create the toplevel window
window = gtk.Window()
window.connect('destroy', lambda w: gtk.main_quit())
window.set_size_request(300, -1)
vbox = gtk.VBox()
window.add(vbox)
# Create a UIManager instance
uimanager = gtk.UIManager()
# Add the accelerator group to the toplevel window
accelgroup = uimanager.get_accel_group()
window.add_accel_group(accelgroup)
# Create an ActionGroup
actiongroup = gtk.ActionGroup('UIManagerExample')
self.actiongroup = actiongroup
# Create a ToggleAction, etc.
actiongroup.add_toggle_actions([('Mute', None, '_Mute', '<Control>m',
'Mute the volume', self.mute_cb)])
# Create actions
actiongroup.add_actions([('Quit', gtk.STOCK_QUIT, '_Quit me!', None,
'Quit the Program', self.quit_cb),
('File', None, '_File'),
('Sound', None, '_Sound'),
('RadioBand', None, '_Radio Band')])
actiongroup.get_action('Quit').set_property('short-label', '_Quit')
# Create some RadioActions
actiongroup.add_radio_actions([('AM', None, '_AM', '<Control>a',
'AM Radio', 0),
('FM', None, '_FM', '<Control>f',
'FM Radio', 1),
('SSB', None, '_SSB', '<Control>s',
'SSB Radio', 2),
], 0, self.radioband_cb)
# Add the actiongroup to the uimanager
uimanager.insert_action_group(actiongroup, 0)
# Add a UI description
uimanager.add_ui_from_string(self.ui)
# Create a MenuBar
menubar = uimanager.get_widget('/MenuBar')
vbox.pack_start(menubar, False)
# Create a Toolbar
toolbar = uimanager.get_widget('/Toolbar')
vbox.pack_start(toolbar, False)
# Create and pack two Labels
label = gtk.Label('Sound is not muted')
vbox.pack_start(label)
self.mutelabel = label
label = gtk.Label('Radio band is AM')
vbox.pack_start(label)
self.bandlabel = label
# Create buttons to control visibility and sensitivity of actions
buttonbox = gtk.HButtonBox()
sensitivebutton = gtk.CheckButton('Sensitive')
sensitivebutton.set_active(True)
sensitivebutton.connect('toggled', self.toggle_sensitivity)
visiblebutton = gtk.CheckButton('Visible')
visiblebutton.set_active(True)
visiblebutton.connect('toggled', self.toggle_visibility)
# add them to buttonbox
buttonbox.pack_start(sensitivebutton, False)
buttonbox.pack_start(visiblebutton, False)
vbox.pack_start(buttonbox)
window.show_all()
return
def mute_cb(self, action):
# action has not toggled yet
text = ('muted', 'not muted')[action.get_active()==False]
self.mutelabel.set_text('Sound is %s' % text)
return
def radioband_cb(self, action, current):
text = ('AM', 'FM', 'SSB')[action.get_current_value()]
self.bandlabel.set_text('Radio band is %s' % text)
return
def quit_cb(self, b):
print 'Quitting program'
gtk.main_quit()
def toggle_sensitivity(self, b):
self.actiongroup.set_sensitive(b.get_active())
return
def toggle_visibility(self, b):
self.actiongroup.set_visible(b.get_active())
return
if __name__ == '__main__':
ba = UIManagerExample()
gtk.main()
|
torchtyping/pytest_plugin.py | olliethomas/torchtyping | 881 | 12632675 | <reponame>olliethomas/torchtyping
from .typechecker import patch_typeguard
def pytest_addoption(parser):
group = parser.getgroup("torchtyping")
group.addoption(
"--torchtyping-patch-typeguard",
action="store_true",
help="Run torchtyping's typeguard patch.",
)
def pytest_configure(config):
if config.getoption("torchtyping_patch_typeguard"):
patch_typeguard()
|
docs/basic_usage/bu03.py | jviide/htm.py | 112 | 12632683 | from htm import htm
@htm
def html(tag, props, children):
return tag, props, children
# start
name = 'World'
result03 = html("""
<div title="Say Hi">Hello {name}</div>
""")
|
plugins/Status/plugin.py | mogad0n/Limnoria | 476 | 12632715 | ###
# Copyright (c) 2002-2005, <NAME>
# Copyright (c) 2009, <NAME>
# Copyright (c) 2010-2021, The Limnoria Contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import os
import sys
import time
import threading
import multiprocessing
import subprocess
import supybot.conf as conf
import supybot.utils as utils
import supybot.world as world
from supybot.commands import *
import supybot.callbacks as callbacks
from supybot.i18n import PluginInternationalization, internationalizeDocstring
_ = PluginInternationalization('Status')
class Status(callbacks.Plugin):
"""This plugin allows you to view different bot statistics, for example,
uptime."""
def __init__(self, irc):
self.__parent = super(Status, self)
self.__parent.__init__(irc)
# XXX It'd be nice if these could be kept in the registry.
self.sentMsgs = 0
self.recvdMsgs = 0
self.sentBytes = 0
self.recvdBytes = 0
self.connected = {}
def __call__(self, irc, msg):
self.recvdMsgs += 1
self.recvdBytes += len(msg)
self.__parent.__call__(irc, msg)
def outFilter(self, irc, msg):
self.sentMsgs += 1
self.sentBytes += len(msg)
return msg
def do001(self, irc, msg):
self.connected[irc] = time.time()
@internationalizeDocstring
def status(self, irc, msg, args):
"""takes no arguments
Returns the status of the bot.
"""
networks = {}
for Irc in world.ircs:
networks.setdefault(Irc.network, []).append(Irc.nick)
networks = sorted(networks.items())
networks = [format(_('%s as %L'), net, nicks) for (net,nicks) in networks]
L = [format(_('I am connected to %L.'), networks)]
if world.profiling:
L.append(_('I am currently in code profiling mode.'))
irc.reply(' '.join(L))
status = wrap(status)
@internationalizeDocstring
def threads(self, irc, msg, args):
"""takes no arguments
Returns the current threads that are active.
"""
threads = [t.getName() for t in threading.enumerate()]
threads.sort()
s = format(_('I have spawned %n; %n %b still currently active: %L.'),
(world.threadsSpawned, 'thread'),
(len(threads), 'thread'), len(threads), threads)
irc.reply(s)
threads = wrap(threads)
def processes(self, irc, msg, args):
"""takes no arguments
Returns the number of processes that have been spawned, and list of
ones that are still active.
"""
ps = [multiprocessing.current_process().name]
ps = ps + [p.name for p in multiprocessing.active_children()]
s = format('I have spawned %n; %n %b still currently active: %L.',
(world.processesSpawned, 'process'),
(len(ps), 'process'),
len(ps), ps)
irc.reply(s)
processes = wrap(processes)
def net(self, irc, msg, args):
"""takes no arguments
Returns some interesting network-related statistics.
"""
try:
elapsed = time.time() - self.connected[irc.getRealIrc()]
timeElapsed = utils.timeElapsed(elapsed)
except KeyError:
timeElapsed = _('an indeterminate amount of time')
irc.reply(format(_('I have received %s messages for a total of %S. '
'I have sent %s messages for a total of %S. '
'I have been connected to %s for %s.'),
self.recvdMsgs, self.recvdBytes,
self.sentMsgs, self.sentBytes, irc.server, timeElapsed))
net = wrap(net)
@internationalizeDocstring
def cpu(self, irc, msg, args):
"""takes no arguments
Returns some interesting CPU-related statistics on the bot.
"""
(user, system, childUser, childSystem, elapsed) = os.times()
now = time.time()
target = (msg.channel, irc.network)
timeRunning = now - world.startedAt
if self.registryValue('cpu.children', *target) and \
user+system < timeRunning+1: # Fudge for FPU inaccuracies.
children = _('My children have taken %.2f seconds of user time '
'and %.2f seconds of system time '
'for a total of %.2f seconds of CPU time.') % \
(childUser, childSystem, childUser+childSystem)
else:
children = ''
activeThreads = threading.activeCount()
response = _('I have taken %.2f seconds of user time and %.2f seconds '
'of system time, for a total of %.2f seconds of CPU '
'time. %s') % (user, system, user + system, children)
if self.registryValue('cpu.threads', *target):
response += format('I have spawned %n; I currently have %i still '
'running.',
(world.threadsSpawned, 'thread'), activeThreads)
if self.registryValue('cpu.memory', *target):
mem = None
pid = os.getpid()
plat = sys.platform
try:
if plat.startswith('linux') or plat.startswith('sunos') or \
plat.startswith('freebsd') or plat.startswith('openbsd') or \
plat.startswith('darwin'):
cmd = 'ps -o rss -p %s' % pid
try:
inst = subprocess.Popen(cmd.split(), close_fds=True,
stdin=open(os.devnull),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError:
irc.error(_('Unable to run ps command.'), Raise=True)
(out, foo) = inst.communicate()
inst.wait()
mem = int(out.splitlines()[1])
elif sys.platform.startswith('netbsd'):
mem = int(os.stat('/proc/%s/mem' % pid)[7])
if mem:
response += format(_(' I\'m taking up %S of memory.'),
mem*1024)
else:
response += _(' I\'m taking up an unknown amount of memory.')
except Exception:
self.log.exception('Uncaught exception in cpu.memory:')
irc.reply(utils.str.normalizeWhitespace(response))
cpu = wrap(cpu)
@internationalizeDocstring
def cmd(self, irc, msg, args):
"""takes no arguments
Returns some interesting command-related statistics.
"""
commands = 0
callbacksPlugin = 0
for cb in irc.callbacks:
if isinstance(cb, callbacks.Plugin):
callbacksPlugin += 1
commands += len(cb.listCommands())
s = format(_('I offer a total of %n in %n. I have processed %n.'),
(commands, 'command'),
(callbacksPlugin, 'command-based', 'plugin'),
(world.commandsProcessed, 'command'))
irc.reply(s)
cmd = wrap(cmd)
@internationalizeDocstring
def commands(self, irc, msg, args):
"""takes no arguments
Returns a list of the commands offered by the bot.
"""
commands = set()
for cb in irc.callbacks:
if isinstance(cb, callbacks.Plugin):
for command in cb.listCommands():
commands.add(command)
irc.reply(format('%L', sorted(commands)))
commands = wrap(commands)
@internationalizeDocstring
def uptime(self, irc, msg, args):
"""takes no arguments
Returns the amount of time the bot has been running.
"""
response = _('I have been running for %s.') % \
utils.timeElapsed(time.time() - world.startedAt)
irc.reply(response)
uptime = wrap(uptime)
@internationalizeDocstring
def server(self, irc, msg, args):
"""takes no arguments
Returns the server the bot is on.
"""
irc.reply(irc.server)
server = wrap(server)
@internationalizeDocstring
def network(self, irc, msg, args):
"""takes no arguments
Returns the network the bot is on.
"""
irc.reply(irc.network)
network = wrap(network)
Class = Status
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
serpent/machine_learning/reinforcement_learning/ppo/policy.py | JackEasons/SerpentAI | 6,762 | 12632734 | <reponame>JackEasons/SerpentAI<filename>serpent/machine_learning/reinforcement_learning/ppo/policy.py
import torch
from .cnn_base import CNNBase
from .distributions import Categorical
class Policy(torch.nn.Module):
def __init__(self, observation_shape, action_space, recurrent_policy):
super().__init__()
# Image input only for now...
self.base = CNNBase(observation_shape[0], recurrent_policy)
# Discrete space only for now...
self.distribution = Categorical(self.base.output_size, action_space)
self.state_size = self.base.state_size
def forward(self, inputs, states, masks):
raise NotImplementedError
def act(self, inputs, states, masks, deterministic=False):
value, actor_features, states = self.base(inputs, states, masks)
distribution = self.distribution(actor_features)
if deterministic:
action = distribution.mode()
else:
action = distribution.sample()
action_log_probs = distribution.log_probs(action)
return value, action, action_log_probs, states
def get_value(self, inputs, states, masks):
value, _, _ = self.base(inputs, states, masks)
return value
def evaluate_actions(self, inputs, states, masks, action):
value, actor_features, states = self.base(inputs, states, masks)
distribution = self.distribution(actor_features)
action_log_probs = distribution.log_probs(action)
distribution_entropy = distribution.entropy().mean()
return value, action_log_probs, distribution_entropy, states
|
wam/settings.py | laozhudetui/wam | 227 | 12632749 | <gh_stars>100-1000
#!/usr/bin/env python
# coding: utf8
"""
Django settings for wam project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from django.conf import global_settings
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
TEMPLATE_DIRS = (BASE_DIR + '/wam/templates',)
TEMPLATE_CONTEXT_PROCESSORS = global_settings.TEMPLATE_CONTEXT_PROCESSORS + (
'django.core.context_processors.request',
)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'jk&z8&k*z#=25f7$e!o@^#^y8)a-373!#0eye^fm24s7lu^m@m'
# SECRET_KEY = os.urandom(64)
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
SESSION_COOKIE_AGE = 60*60*24
SESSION_COOKIE_NAME = 'wam_sessionid'
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
INSTALLED_APPS += (
'wam.apps.am',
'wam.apps.fm',
'wam.apps.main',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'wam.urls'
WSGI_APPLICATION = 'wam.wsgi.application'
AUTHENTICATION_BACKENDS = (
#'wam.ldap_backend.LDAPBackend', # 如果想使用 LDAP 认证取消注释
'django.contrib.auth.backends.ModelBackend',
)
LDAP_SERVER_DOMAIN = 'test.com'
LDAP_SERVER_URI = 'ldaps://ldap.test.com'
LDAP_USER_DN_TEMPLATE = 'uid=%s,ou=People,dc=test,dc=com'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
# dev
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# production
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.mysql',
# 'NAME': 'wamdb',
# 'USER': 'root',
# 'PASSWORD': 'password',
# 'HOST': '127.0.0.1',
# 'PORT': '3306',
# }
# }
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'zh-CN'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'wam/static'),
)
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
PACKAGE_SOTRE_DIR = os.path.join(BASE_DIR, 'wam/packages/')
FILE_STORE_DIR = os.path.join(BASE_DIR, 'wam/files/')
WAM_VENDOR_LOGO_URL = '/static/images/logo/'
WAM_VENDOR_LOGO_ROOT = os.path.join(BASE_DIR, 'wam/static/images/logo')
# Email Settings
EMAIL_HOST = ''
EMAIL_PORT = 25
EMAIL_USE_TLS = False
EMAIL_USE_SSL = False
|
true_coders/migrations/0038_auto_20210703_0011.py | horacexd/clist | 166 | 12632758 | # Generated by Django 3.1.12 on 2021-07-03 00:11
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
dependencies = [
('true_coders', '0037_auto_20210702_2355'),
]
operations = [
migrations.AddField(
model_name='list',
name='uuid',
field=models.UUIDField(db_index=True, default=uuid.uuid4, editable=False, unique=True),
),
migrations.AlterField(
model_name='list',
name='owner',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='my_list_set', to='true_coders.coder'),
),
]
|
models/vision/detection/awsdet/datasets/loader/build_loader.py | kevinyang8/deep-learning-models | 129 | 12632767 | <gh_stars>100-1000
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# -*- coding: utf-8 -*-
import tensorflow as tf
from .. import data_generator
from awsdet.utils.runner.dist_utils import get_dist_info
def build_dataloader(dataset,
imgs_per_gpu,
workers_per_gpu=1, # unused
num_gpus=0,
dist=True,
shuffle=True,
**kwargs):
"""Build a TF Dataset pipeline that returns padded batches.
In distributed training, each GPU/process has a dataloader.
In non-distributed training, there is only one dataloader for all GPUs.
Args:
dataset (Dataset): A dataset.
imgs_per_gpu (int): Number of images on each GPU, i.e., batch size of
each GPU.
workers_per_gpu (int): How many subprocesses to use for data loading
for each GPU. - TODO: unused
num_gpus (int): Number of GPUs. Only used in non-distributed training - TODO
dist (bool): Distributed training/test or not. Default: True.
shuffle (bool): Whether to shuffle the data at every epoch.
Default: True.
kwargs: any keyword argument to be used to initialize DataLoader
Returns:
tf.data.Dataset: A TF dataset pipeline.
"""
batch_size = imgs_per_gpu
if dist:
rank, local_rank, size, local_size = get_dist_info()
if dataset.train:
generator = data_generator.DataGenerator(dataset, index=rank, num_gpus=size, shuffle=shuffle)
else:
generator = data_generator.DataGenerator(dataset, index=rank, num_gpus=local_size, shuffle=False) # evaluation on node 0 workers
else:
generator = data_generator.DataGenerator(dataset, shuffle=False)
if dataset.train:
tf_dataset = tf.data.Dataset.from_generator(
generator, (tf.float32, tf.float32, tf.float32, tf.int32))
if dataset.mask:
tf_dataset = tf.data.Dataset.from_generator(
generator, (tf.float32, tf.float32, tf.float32, tf.int32, tf.int32))
else:
tf_dataset = tf.data.Dataset.from_generator(
generator, (tf.float32, tf.float32, tf.float32, tf.int32))
tf_dataset = tf_dataset.map(lambda *args: args, num_parallel_calls=tf.data.experimental.AUTOTUNE)
tf_dataset = tf_dataset.prefetch(tf.data.experimental.AUTOTUNE)
if dataset.mask:
tf_dataset = tf_dataset.padded_batch(
batch_size,
padded_shapes=(
tf.TensorShape([None, None, 3]), # image padded to largest in batch
tf.TensorShape([11]), # image meta - no padding
tf.TensorShape([None, 4]), # bounding boxes, padded to longest
tf.TensorShape([None]), # labels, padded to longest
tf.TensorShape([None, None, None]) # pad masks to largest in batch
),
padding_values=(0.0, 0.0, 0.0, -1, -1))
else:
tf_dataset = tf_dataset.padded_batch(
batch_size,
padded_shapes=(
tf.TensorShape([None, None, 3]), # image padded to largest in batch
tf.TensorShape([11]), # image meta - no padding
tf.TensorShape([None, 4]), # bounding boxes, padded to longest
tf.TensorShape([None]) # labels, padded to longest
),
padding_values=(0.0, 0.0, 0.0, -1))
tf_dataset = tf_dataset.prefetch(tf.data.experimental.AUTOTUNE)
return tf_dataset, generator.num_examples // batch_size
else:
tf_dataset = tf.data.Dataset.from_generator(
generator, (tf.float32, tf.float32))
tf_dataset = tf_dataset.padded_batch(
batch_size,
padded_shapes=(
tf.TensorShape([None, None,
3]), # image padded to largest in batch
tf.TensorShape([11]), # image meta - no padding
),
padding_values=(0.0, 0.0))
tf_dataset = tf_dataset.repeat()
return tf_dataset, generator.num_examples // batch_size
|
tests/test_queue_size.py | purificant/tasktiger | 1,143 | 12632770 | """Test max queue size limits."""
from multiprocessing import Process
import datetime
import os
import signal
import time
import pytest
from tasktiger import Task, Worker
from tasktiger.exceptions import QueueFullException
from .config import DELAY
from .tasks import decorated_task_max_queue_size, simple_task, sleep_task
from .test_base import BaseTestCase
from .utils import external_worker
class TestMaxQueue(BaseTestCase):
"""TaskTiger test max queue size."""
def test_task_simple_delay(self):
"""Test enforcing max queue size using delay function."""
self.tiger.delay(simple_task, queue='a', max_queue_size=1)
self._ensure_queues(queued={'a': 1})
# Queue size would be 2 so it should fail
with pytest.raises(QueueFullException):
self.tiger.delay(simple_task, queue='a', max_queue_size=1)
# Process first task and then queuing a second should succeed
Worker(self.tiger).run(once=True, force_once=True)
self.tiger.delay(simple_task, queue='a', max_queue_size=1)
self._ensure_queues(queued={'a': 1})
def test_task_decorated(self):
"""Test max queue size with decorator."""
decorated_task_max_queue_size.delay()
self._ensure_queues(queued={'default': 1})
with pytest.raises(QueueFullException):
decorated_task_max_queue_size.delay()
def test_task_all_states(self):
"""Test max queue size with tasks in all three states."""
# Active
task = Task(self.tiger, sleep_task, queue='a')
task.delay()
self._ensure_queues(queued={'a': 1})
# Start a worker and wait until it starts processing.
worker = Process(target=external_worker)
worker.start()
time.sleep(DELAY)
# Kill the worker while it's still processing the task.
os.kill(worker.pid, signal.SIGKILL)
self._ensure_queues(active={'a': 1})
# Scheduled
self.tiger.delay(
simple_task,
queue='a',
max_queue_size=3,
when=datetime.timedelta(seconds=10),
)
# Queued
self.tiger.delay(simple_task, queue='a', max_queue_size=3)
self._ensure_queues(
active={'a': 1}, queued={'a': 1}, scheduled={'a': 1}
)
# Should fail to queue task to run immediately
with pytest.raises(QueueFullException):
self.tiger.delay(simple_task, queue='a', max_queue_size=3)
# Should fail to queue task to run in the future
with pytest.raises(QueueFullException):
self.tiger.delay(
simple_task,
queue='a',
max_queue_size=3,
when=datetime.timedelta(seconds=10),
)
|
regtests/requirejs/webworker_p2js.py | ahakingdom/Rusthon | 622 | 12632774 | '''import p2.js inside webworker'''
# sudo npm install -g p2
import threading
from time import sleep
def main():
shared = []
w = threading.start_webworker( worker, [shared] )
sleep(1.0)
TestError( len(shared)==2 )
TestError( shared[0]==10 )
TestError( shared[1]==20 )
with webworker:
import p2
def worker( arr ):
v = p2.vec2.fromValues(10,20)
arr.append( v[0] )
arr.append( v[1] )
|
9.用两个栈实现队列/9.用两个栈实现队列.py | shenweichen/coding_interviews | 483 | 12632775 | # -*- coding:utf-8 -*-
class Solution:
stack1 = []
stack2 = []
def push(self, node):
# write code here
self.stack1.append(node)
def pop(self):
# return xx
if len(self.stack2)==0:
while len(self.stack1)!=0:
self.stack2.append(self.stack1[-1])
self.stack1.pop()
xx = self.stack2[-1]
self.stack2.pop()
return xx |
src/YoutubeDataset.py | JasonQSY/YouTube3D | 102 | 12632794 | import numpy as np
import random
from utils import save_obj, load_obj
import torch
from torch.utils import data
import cv2
import os
import h5py
import random
from ReDWebNet import resNet_data_preprocess
def draw(img, target, fname):
img_temp = img.copy()
color_close = (255, 0, 0) # close is blue
color_far = (0, 255, 0) # far is green
for i in range(target.shape[1]):
x1 = int(target[1, i]); y1 = int(target[0, i]);
x2 = int(target[3, i]); y2 = int(target[2, i]);
cv2.circle(img_temp,(x1, y1),2,color_far,-1)
cv2.circle(img_temp,(x2, y2),2,color_close,-1)
cv2.arrowedLine(img_temp, (x2, y2), (x1, y1), (0, 255, 255), 1)
cv2.imwrite(fname, img_temp)
print "Done writing to %s" % fname
class data_augmenter():
def __init__(self, width, height):
"""
Args:
width and height are only used to determine the
output aspect ratio, not the actual output size
"""
self.ops = []
cv2.setNumThreads(0)
self.width = float(width)
self.height = float(height)
def add_rotation(self, probability, max_left_rotation=-10, max_right_rotation=10):
self.ops.append({'type':'rotation', 'probability':probability, 'max_left_rotation': max_left_rotation, 'max_right_rotation':max_right_rotation})
def add_zoom(self, probability, min_percentage, max_percentage):
self.ops.append({'type':'zoom', 'probability':probability, 'min_percentage': min_percentage, 'max_percentage': max_percentage})
def add_flip_left_right(self, probability):
self.ops.append({'type':'flip_lr', 'probability':probability})
def add_crop(self, probability, min_percentage=0.5):
self.ops.append({'type':'crop', 'probability':probability, 'min_percentage':min_percentage})
def draw(self, img, target, fname):
img_temp = img.copy()
color_close = (255, 0, 0) # close is blue
color_far = (0, 255, 0) # far is green
for i in range(target.shape[1]):
x1 = int(target[1, i]); y1 = int(target[0, i]);
x2 = int(target[3, i]); y2 = int(target[2, i]);
cv2.circle(img_temp,(x1, y1),2,color_far,-1)
cv2.circle(img_temp,(x2, y2),2,color_close,-1)
cv2.arrowedLine(img_temp, (x2, y2), (x1, y1), (0, 255, 255), 1)
cv2.imwrite(fname, img_temp)
print "Done writing to %s" % fname
def __str__(self):
out_str = 'Data Augmenter:\n'
for op in self.ops:
out_str += '\t'
for key in op.keys():
out_str = out_str + str(key) +':'+ str(op[key]) + '\t'
out_str += '\n'
return out_str
def aug(self, img, target):
orig_img = img.copy()
orig_target = target.copy()
for op in self.ops:
if random.uniform(0.0, 1.0) <= op['probability']:
if op['type'] == 'crop':
percentage = random.uniform(op['min_percentage'], 1.0)
# print "Cropping.: Percentage = %f" % percentage
#################### image
if img.shape[0] <= img.shape[1]:
dst_h = int(img.shape[0] * percentage)
dst_w = min(int(dst_h / self.height * self.width), img.shape[1])
elif img.shape[0] > img.shape[1]:
dst_w = int(img.shape[1] * percentage)
dst_h = min(int(dst_w / self.width * self.height), img.shape[0])
offset_y = random.randint(0, img.shape[0]- dst_h)
offset_x = random.randint(0, img.shape[1]- dst_w)
img = img[offset_y:offset_y+dst_h, offset_x:offset_x+dst_w, :]
#################### target
target[0,:] = target[0,:] - offset_y
target[1,:] = target[1,:] - offset_x
target[2,:] = target[2,:] - offset_y
target[3,:] = target[3,:] - offset_x
mask = target[0,:] < dst_h
mask = np.logical_and(mask, target[1,:] < dst_w)
mask = np.logical_and(mask, target[2,:] < dst_h)
mask = np.logical_and(mask, target[3,:] < dst_w)
mask = np.logical_and(mask, target[0,:] >= 0)
mask = np.logical_and(mask, target[1,:] >= 0)
mask = np.logical_and(mask, target[2,:] >= 0)
mask = np.logical_and(mask, target[3,:] >= 0)
# self.draw(img, target, '2_crop.png')
if np.sum(mask) == 0 or np.sum(mask) == 1:
return orig_img, orig_target
else:
target = target[:, mask]
elif op['type'] == 'flip_lr':
# print "Flipping..................."
#################### image
img = cv2.flip(img, 1)
#################### target
target[1,:] = img.shape[1] - target[1,:]
target[3,:] = img.shape[1] - target[3,:]
# self.draw(img, target, '4_flip.png')
elif op['type'] == 'zoom':
# print "Zooming..................."
#################### image
percentage = random.uniform(op['min_percentage'], op['max_percentage'])
img = cv2.resize(img, None, fx = percentage, fy = percentage)
#################### target
target[0:4,:] = target[0:4,:] * percentage
# self.draw(img, target, '1_zoom.png')
elif op['type'] == 'rotation':
# print "Rotating..................."
#################### image
angle = random.uniform(-op['max_left_rotation'], op['max_right_rotation'])
rotation_matrix = cv2.getRotationMatrix2D((img.shape[1]/2, img.shape[0]/2), angle, 1.0)
img = cv2.warpAffine(img, rotation_matrix, (img.shape[1], img.shape[0]))
#################### target
temp = rotation_matrix[0,:].copy()
rotation_matrix[0,:] = rotation_matrix[1,:]
rotation_matrix[1,:] = temp
temp = rotation_matrix[:,0].copy()
rotation_matrix[:,0] = rotation_matrix[:,1]
rotation_matrix[:,1] = temp
target[0:2,:] = rotation_matrix[:,0:2].dot(target[0:2,:]) + rotation_matrix[:,2:3]
target[2:4,:] = rotation_matrix[:,0:2].dot(target[2:4,:]) + rotation_matrix[:,2:3]
mask = target[0,:] < img.shape[0]
mask = np.logical_and(mask, target[1,:] < img.shape[1])
mask = np.logical_and(mask, target[2,:] < img.shape[0])
mask = np.logical_and(mask, target[3,:] < img.shape[1])
mask = np.logical_and(mask, target[0,:] >= 0)
mask = np.logical_and(mask, target[1,:] >= 0)
mask = np.logical_and(mask, target[2,:] >= 0)
mask = np.logical_and(mask, target[3,:] >= 0)
if np.sum(mask) == 0 or np.sum(mask) == 1:
return orig_img, orig_target
else:
target = target[:, mask]
# self.draw(img, target, '3_rotation.png')
return img, target
class YoutubeDataset(data.Dataset):
def __init__(self, csv_filename,
height=240, width=320,
b_oppi = False,
b_data_aug = False,
b_resnet_prep = False):
super(YoutubeDataset, self).__init__()
print("=====================================================")
print "Using YoutubeDataset..."
self.parse_youtube_csv(csv_filename)
if b_resnet_prep:
self.height = 384
self.width = 384
else:
self.height = height
self.width = width
self.n_sample = len(self.img_names)
self.b_oppi = b_oppi # only take one relative depth pair per image
self.b_resnet_prep = b_resnet_prep
self.b_data_aug = b_data_aug
print "\t-(width, height): (%d, %d)" % (self.width, self.height)
print "\t-%s: %d samples" % (csv_filename, self.n_sample)
print "\t-One relative depth pair per image:", self.b_oppi
print "\t-Data augmentation:", self.b_data_aug
print "\t-Resnet data preprocessing:", self.b_resnet_prep
print("=====================================================")
if self.b_data_aug:
self.da = data_augmenter(width = self.width, height = self.height)
self.da.add_zoom(0.8, min_percentage = 0.5, max_percentage = 3.0)
self.da.add_crop(1.1, min_percentage = 0.5)
self.da.add_rotation(0.8, max_left_rotation = -10.0, max_right_rotation = 10.0)
self.da.add_flip_left_right(0.5)
print self.da
def parse_csv_meta_data(self, csv_filename):
img_names = []
pkl_names = []
## A line in the csv file should look like this:
## ./laundry_room_0001/shot_001/0001.jpg, ./laundry_room_0001/shot_001/colmap/0/0001_0.6_6000_col_reldepth.pkl
with open(csv_filename, 'r') as f:
while True:
line = f.readline()
if not line:
break
infos = line.split(',')
img_name, pkl_name = infos[0].strip(), infos[1].strip()
img_name = '../data/' + img_name
pkl_name = '../data/' + pkl_name
img_names.append(img_name)
pkl_names.append(pkl_name)
return img_names, pkl_names
def parse_youtube_csv(self, csv_filename):
meta_filename = csv_filename.replace('.csv', '.meta')
if not os.path.exists(meta_filename):
print meta_filename, "does not exist. Creating..."
self.img_names, self.pkl_names = self.parse_csv_meta_data(csv_filename)
save_obj({"img_names":self.img_names, "pkl_names":self.pkl_names}, meta_filename, verbal = True)
else:
print "Loading ", meta_filename
temp = load_obj(meta_filename, verbal = True)
self.img_names = temp["img_names"]
self.pkl_names = temp["pkl_names"]
def __getitem__(self, index):
# This data reader assumes that the target coordinates are represented
# by value in [0, 1.0], i.e., the ratio between the original coordinate
# and the original image height / image width
color = cv2.imread(self.img_names[index])
target = load_obj(self.pkl_names[index])
assert target.shape[0] == 5
if self.b_oppi and target.shape[1] > 2:
rand_idx = 0
target = target[:, rand_idx:rand_idx+1]
target[0,:] = target[0,:] * color.shape[0] #y_A
_dummy = target[0,:]; _dummy[_dummy>=color.shape[0]] = color.shape[0] - 1; _dummy[_dummy < 0] = 0
target[1,:] = target[1,:] * color.shape[1] #x_A
_dummy = target[1,:]; _dummy[_dummy>=color.shape[1]] = color.shape[1] - 1; _dummy[_dummy < 0] = 0
target[2,:] = target[2,:] * color.shape[0] #y_B
_dummy = target[2,:]; _dummy[_dummy>=color.shape[0]] = color.shape[0] - 1; _dummy[_dummy < 0] = 0
target[3,:] = target[3,:] * color.shape[1] #x_B
_dummy = target[3,:]; _dummy[_dummy>=color.shape[1]] = color.shape[1] - 1; _dummy[_dummy < 0] = 0
# target[:4,:] = target[:4,:] - 1 # the coordinate in python starts from 0!!!!
# draw(color, target, '0_orig.png')
if self.b_data_aug:
color, target = self.da.aug(color, target)
target[0,:] = target[0,:] / float(color.shape[0]) * self.height #y_A
_dummy = target[0,:]; _dummy[_dummy>=self.height] = self.height - 1; _dummy[_dummy < 0] = 0
target[1,:] = target[1,:] / float(color.shape[1]) * self.width #x_A
_dummy = target[1,:]; _dummy[_dummy>=self.width] = self.width - 1; _dummy[_dummy < 0] = 0
target[2,:] = target[2,:] / float(color.shape[0]) * self.height #y_B
_dummy = target[2,:]; _dummy[_dummy>=self.height] = self.height - 1; _dummy[_dummy < 0] = 0
target[3,:] = target[3,:] / float(color.shape[1]) * self.width #x_B
_dummy = target[3,:]; _dummy[_dummy>=self.width] = self.width - 1; _dummy[_dummy < 0] = 0
color = cv2.resize(color, (self.width, self.height))
# draw(color, target, '5_final.png')
# raw_input()
color = color.transpose(2, 0, 1).astype(np.float32) / 255.0
if self.b_resnet_prep:
color = resNet_data_preprocess(color)
return color, target.astype(np.int64), (self.height, self.width)
def __len__(self):
return self.n_sample
class YoutubeDatasetVal(YoutubeDataset):
def __init__(self, csv_filename,
height=240, width=320,
b_oppi = False,
b_resnet_prep = False):
print("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("\tValidation version of the YoutubeDataset")
print("\t\t-It never perform data augmentation")
YoutubeDataset.__init__(self, csv_filename,
height = height, width = width,
b_oppi = b_oppi,
b_data_aug = False,
b_resnet_prep = b_resnet_prep)
def __getitem__(self, index):
# This data reader assumes that the target coordinates are represented
# by value in [0, 1.0], i.e., the ratio between the original coordinate
# and the original image height / image width
#####################################################################
color = cv2.imread(self.img_names[index])
orig_img_res = color.shape[:2]
color = cv2.resize(color, (self.width, self.height))
color = color.transpose(2, 0, 1).astype(np.float32) / 255.0
if self.b_resnet_prep:
color = resNet_data_preprocess(color)
#####################################################################
target = load_obj(self.pkl_names[index])
assert target.shape[0] == 5
if self.b_oppi and target.shape[1] > 2:
rand_idx = random.randint(0, target.shape[1] - 2)
target = target[:, rand_idx:rand_idx+1]
target[0,:] = target[0,:] * orig_img_res[0] #y_A
_dummy = target[0,:]; _dummy[_dummy>=orig_img_res[0]] = orig_img_res[0] - 1; _dummy[_dummy < 0] = 0
target[1,:] = target[1,:] * orig_img_res[1] #x_A
_dummy = target[1,:]; _dummy[_dummy>=orig_img_res[1]] = orig_img_res[1] - 1; _dummy[_dummy < 0] = 0
target[2,:] = target[2,:] * orig_img_res[0] #y_B
_dummy = target[2,:]; _dummy[_dummy>=orig_img_res[0]] = orig_img_res[0] - 1; _dummy[_dummy < 0] = 0
target[3,:] = target[3,:] * orig_img_res[1] #x_B
_dummy = target[3,:]; _dummy[_dummy>=orig_img_res[1]] = orig_img_res[1] - 1; _dummy[_dummy < 0] = 0
# target[:4,:] = target[:4,:] - 1 # the coordinate in python starts from 0!!!!
return color, target.astype(np.int64), orig_img_res
|
corehq/ex-submodules/casexml/apps/case/dbaccessors/related.py | akashkj/commcare-hq | 471 | 12632800 | from casexml.apps.case.sharedmodels import CommCareCaseIndex
def get_reverse_indices_json(domain, case_id):
from casexml.apps.case.models import CommCareCase
return CommCareCase.get_db().view(
"case_indices/related",
startkey=[domain, case_id, "reverse_index"],
endkey=[domain, case_id, "reverse_index", {}],
reduce=False,
wrapper=lambda r: r['value'],
).all()
def get_reverse_indices(case):
return get_reverse_indices_for_case_id(case['domain'], case['_id'])
def get_reverse_indices_for_case_id(domain, case_id):
return [CommCareCaseIndex.wrap(raw)
for raw in get_reverse_indices_json(domain, case_id)]
|
datatypes.py | loyalgarlic/snakepit-game | 124 | 12632802 | from collections import namedtuple
Position = namedtuple("Position", "x y")
Vector = namedtuple("Vector", "xdir ydir")
Char = namedtuple("Char", "char color")
Draw = namedtuple("Draw", "x y char color")
|
chaospy/distributions/collection/chi.py | utsekaj42/chaospy | 333 | 12632822 | """Chi distribution."""
import numpy
from scipy import special
from ..baseclass import SimpleDistribution, ShiftScaleDistribution
class chi(SimpleDistribution):
"""Chi distribution."""
def __init__(self, df=1):
super(chi, self).__init__(dict(df=df))
def _pdf(self, x, df):
return x**(df-1)*numpy.exp(-x*x*.5)/2**(df*.5-1)/special.gamma(df*.5)
def _cdf(self, x, df):
return special.gammainc(df*0.5,0.5*x*x)
def _ppf(self, q, df):
return numpy.sqrt(2*special.gammaincinv(df*0.5, q))
def _lower(self, df):
return numpy.sqrt(2*special.gammaincinv(df*0.5, 1e-12))
def _upper(self, df):
return numpy.sqrt(2*special.gammaincinv(df*0.5, 1-1e-12))
def _mom(self, k, df):
return 2**(.5*k)*special.gamma(.5*(df+k))/special.gamma(.5*df)
class Chi(ShiftScaleDistribution):
"""
Chi distribution.
Args:
df (float, Distribution):
Degrees of freedom
scale (float, Distribution):
Scaling parameter
shift (float, Distribution):
Location parameter
Examples:
>>> distribution = chaospy.Chi(1.5)
>>> distribution
Chi(1.5)
>>> uloc = numpy.linspace(0, 1, 6)
>>> uloc
array([0. , 0.2, 0.4, 0.6, 0.8, 1. ])
>>> xloc = distribution.inv(uloc)
>>> xloc.round(3)
array([0. , 0.472, 0.791, 1.127, 1.568, 7.294])
>>> numpy.allclose(distribution.fwd(xloc), uloc)
True
>>> distribution.pdf(xloc).round(3)
array([0. , 0.596, 0.631, 0.546, 0.355, 0. ])
>>> distribution.sample(4).round(3)
array([1.229, 0.321, 2.234, 0.924])
>>> distribution.mom(1).round(3)
1.046
"""
def __init__(self, df=1, scale=1, shift=0):
super(Chi, self).__init__(
dist=chi(df),
scale=scale,
shift=shift,
repr_args=[df],
)
class Maxwell(ShiftScaleDistribution):
"""
Maxwell-Boltzmann distribution
Chi distribution with 3 degrees of freedom
Args:
scale (float, Distribution):
Scaling parameter
shift (float, Distribution):
Location parameter
Examples:
>>> distribution = chaospy.Maxwell()
>>> distribution
Maxwell()
>>> uloc = numpy.linspace(0, 1, 6)
>>> uloc
array([0. , 0.2, 0.4, 0.6, 0.8, 1. ])
>>> xloc = distribution.inv(uloc)
>>> xloc.round(3)
array([0. , 1.003, 1.367, 1.716, 2.154, 7.676])
>>> numpy.allclose(distribution.fwd(xloc), uloc)
True
>>> distribution.pdf(xloc).round(3)
array([0. , 0.485, 0.586, 0.539, 0.364, 0. ])
>>> distribution.sample(4).round(3)
array([1.819, 0.806, 2.798, 1.507])
>>> distribution.mom(1).round(3)
1.596
"""
def __init__(self, scale=1, shift=0):
super(Maxwell, self).__init__(
dist=chi(3),
scale=scale,
shift=shift,
repr_args=[],
)
class Rayleigh(ShiftScaleDistribution):
"""
Rayleigh distribution
Args:
scale (float, Distribution):
Scaling parameter
shift (float, Distribution):
Location parameter
Examples:
>>> distribution = chaospy.Rayleigh()
>>> distribution
Rayleigh()
>>> uloc = numpy.linspace(0, 1, 6)
>>> uloc
array([0. , 0.2, 0.4, 0.6, 0.8, 1. ])
>>> xloc = distribution.inv(uloc)
>>> xloc.round(3)
array([0. , 0.668, 1.011, 1.354, 1.794, 7.434])
>>> numpy.allclose(distribution.fwd(xloc), uloc)
True
>>> distribution.pdf(xloc).round(3)
array([0. , 0.534, 0.606, 0.541, 0.359, 0. ])
>>> distribution.sample(4).round(3)
array([1.456, 0.494, 2.45 , 1.147])
>>> distribution.mom(1).round(3)
1.253
"""
def __init__(self, scale=1, shift=0):
super(Rayleigh, self).__init__(
dist=chi(2),
scale=scale,
shift=shift,
repr_args=[],
)
|
src/riotwatcher/_apis/league_of_legends/urls/__init__.py | acgandhi/Riot-Watcher | 489 | 12632883 | from .ChampionApiUrls import ChampionApiV3Urls
from .ChampionMasteryApiUrls import ChampionMasteryApiV4Urls
from .ClashApiUrls import ClashApiV1Urls
from .DataDragonUrls import DataDragonUrls
from .LeagueApiUrls import LeagueApiV4Urls
from .LolStatusApiUrls import LolStatusApiV3Urls
from .LolStatusApiV4Urls import LolStatusApiV4Urls
from .SpectatorApiUrls import SpectatorApiV4Urls
from .SummonerApiUrls import SummonerApiV4Urls
from .ThirdPartyCodeApiUrls import ThirdPartyCodeApiV4Urls
from .MatchApiV5Urls import MatchApiV5Urls
|
Lib/objc/_UsageTracking.py | snazari/Pyto | 701 | 12632894 | <filename>Lib/objc/_UsageTracking.py
"""
Classes from the 'UsageTracking' framework.
"""
try:
from rubicon.objc import ObjCClass
except ValueError:
def ObjCClass(name):
return None
def _Class(name):
try:
return ObjCClass(name)
except NameError:
return None
USXPCRemoteObjectProxy = _Class("USXPCRemoteObjectProxy")
USWebpageUsage = _Class("USWebpageUsage")
USWebHistory = _Class("USWebHistory")
USVideoUsage = _Class("USVideoUsage")
USUsageTrust = _Class("USUsageTrust")
USUsageTrackingBundle = _Class("USUsageTrackingBundle")
USTrackingAgentPrivateConnection = _Class("USTrackingAgentPrivateConnection")
USTrackingAgentConnection = _Class("USTrackingAgentConnection")
USUsageReporter = _Class("USUsageReporter")
USWebUsageReport = _Class("USWebUsageReport")
USApplicationUsageReport = _Class("USApplicationUsageReport")
USCategoryUsageReport = _Class("USCategoryUsageReport")
USUsageReport = _Class("USUsageReport")
USUsageQuerying = _Class("USUsageQuerying")
USUsageMonitor = _Class("USUsageMonitor")
USBudget = _Class("USBudget")
USTrustIdentifier = _Class("USTrustIdentifier")
USDomainNormalization = _Class("USDomainNormalization")
|
forte/utils/utils_processor.py | jzpang/forte | 163 | 12632895 | <gh_stars>100-1000
# Copyright 2021 The Forte Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utility functions related to processors.
"""
__all__ = ["record_types_and_attributes_check", "collect_input_pack_record"]
from typing import Dict, Set
from forte.data.base_pack import PackType
from forte.common import ExpectedRecordNotFound
from forte.common.resources import Resources
def record_types_and_attributes_check(
expectation: Dict[str, Set[str]], input_pack_record: Dict[str, Set[str]]
):
r"""Check if any types or attributes in expectation dictionary doesn't
match with input_pack.record. If not, an error of
:class:`~forte.common.exception.ExpectedRecordNotFound` will be raised.
Args:
expectation: Dictionary of types and their attributes required for
the current processor/evaluator.
input_pack_record: The input pack record content combined with
all the parent types and attributes collected from
`merged_entry_tree`.
Returns:
"""
# pylint: disable=protected-access
if expectation is not None:
# check if expected types are in input pack.
for expected_t in expectation:
if expected_t not in input_pack_record.keys():
raise ExpectedRecordNotFound(
f"The record type {expected_t} is not found in "
f"meta of the prediction datapack."
)
else:
expected_value = expectation.get(expected_t)
if expected_value is not None:
for expected_t_v in expected_value:
if expected_t_v not in input_pack_record.get(
expected_t, []
):
raise ExpectedRecordNotFound(
f"The record attribute type "
f"{expected_t_v} is not found in "
f"attribute of record {expected_t} "
f"in meta of the input datapack."
)
def collect_input_pack_record(
resources: Resources, input_pack: PackType
) -> Dict[str, Set[str]]:
# pylint: disable=protected-access
r"""Method to collect the type and attributes from the input pack and if
:attr:`~forte.pipeline.Pipeline.resource` has `onto_specs` as key
and ontology specification file path as value, then
`merged_entry_tree` that has all the entries in ontology specification
file would be populated. All the parent entry nodes of the input pack
would be collected from this tree and add to the returned record
dictionary for later comparison to enable subclass type checking.
Args:
resources: The pipeline attribute that stores and passes resources on
the pipeline level.
input_pack: The input datapack.
Returns:
input_pack_record: The input pack record content combined with
all the parent types and attributes collected from
merged_entry_tree
"""
input_pack_record = input_pack._meta.record.copy()
if resources.get("merged_entry_tree"):
merged_entry_tree = resources.get("merged_entry_tree")
merged_entry_tree.collect_parents(input_pack_record)
return input_pack_record
|
application/frontend/templatetags/versiontag.py | cqkenuo/w12scan | 864 | 12632900 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 2019/3/29 5:35 PM
# @Author : w8ay
# @File : versiontag.py
from django import template
from config import W12SCAN_VERSION
register = template.Library()
@register.simple_tag
def w12_version():
return W12SCAN_VERSION
|
reconstruction/reconstruction_model.py | garyxcheng/federated | 330 | 12632931 | # Copyright 2020, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Abstractions for Federated Reconstruction Models."""
import abc
import attr
@attr.s(eq=False, frozen=True, slots=True)
class BatchOutput(object):
"""A structure that holds the output of a `ReconstructionModel`.
Note: All fields are optional (may be None).
- `predictions`: Tensor of predictions on the examples.
- `labels`: Tensor of labels for the examples.
- `num_examples`: tf.int32 scalar number of examples seen in the batch.
"""
predictions = attr.ib()
labels = attr.ib()
num_examples = attr.ib()
class ReconstructionModel(object, metaclass=abc.ABCMeta):
"""Represents a reconstruction model for use in Tensorflow Federated.
`ReconstructionModel`s are used to train models that reconstruct a set of
their variables on device, never sharing those variables with the server.
Each `ReconstructionModel` will work on a set of `tf.Variables`, and each
method should be a computation that can be implemented as a `tf.function`;
this implies the class should essentially be stateless from a Python
perspective, as each method will generally only be traced once (per set of
arguments) to create the corresponding TensorFlow graph functions. Thus,
`ReconstructionModel` instances should behave as expected in both eager and
graph (TF 1.0) usage.
In general, `tf.Variables` may be either:
* Weights, the variables needed to make predictions with the model.
* Local variables, e.g. to accumulate aggregated metrics across
calls to forward_pass.
The weights can be broken down into:
* Global variables: Variables that are allowed to be aggregated on the
server.
* Local variables: Variables that cannot leave the device.
Furthermore, both of these types of variables can be:
* Trainable variables: These can and should be trained using gradient-based
methods.
* Non-trainable variables: Could include fixed pre-trained layers or static
model data.
These variables are provided via:
* `global_trainable_variables`
* `global_non_trainable_variables`
* `local_trainable_variables`
* `local_non_trainable_variables`
properties, and must be initialized by the user of the `ReconstructionModel`.
While training a reconstruction model, global trainable variables will
generally be provided by the server. Local trainable variables will then be
reconstructed locally. Updates to the global trainable variables will be sent
back to the server. Local variables are not transmitted.
All `tf.Variables` should be introduced in `__init__`; this could move to a
`build` method more inline with Keras (see
https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) in
the future.
"""
@abc.abstractproperty
def global_trainable_variables(self):
"""An iterable of `tf.Variable` objects, see class comment for details."""
pass
@abc.abstractproperty
def global_non_trainable_variables(self):
"""An iterable of `tf.Variable` objects, see class comment for details."""
pass
@abc.abstractproperty
def local_trainable_variables(self):
"""An iterable of `tf.Variable` objects, see class comment for details."""
pass
@abc.abstractproperty
def local_non_trainable_variables(self):
"""An iterable of `tf.Variable` objects, see class comment for details."""
pass
@abc.abstractproperty
def input_spec(self):
"""The type specification of the `batch_input` parameter for `forward_pass`.
A nested structure of `tf.TensorSpec` objects, that matches the structure of
arguments that will be passed as the `batch_input` argument of
`forward_pass`. The tensors must include a batch dimension as the first
dimension, but the batch dimension may be undefined.
"""
pass
@abc.abstractmethod
def forward_pass(self, batch_input, training=True):
"""Runs the forward pass and returns results.
This method should not modify any variables that are part of the model
parameters, that is, variables that influence the predictions. Rather, this
is done by the training loop.
Args:
batch_input: a nested structure that matches the structure of
`ReconstructionModel.input_spec` and each tensor in `batch_input`
satisfies `tf.TensorSpec.is_compatible_with()` for the corresponding
`tf.TensorSpec` in `ReconstructionModel.input_spec`.
training: If `True`, run the training forward pass, otherwise, run in
evaluation mode. The semantics are generally the same as the `training`
argument to `keras.Model.__call__`; this might e.g. influence how
dropout or batch normalization is handled.
Returns:
A `BatchOutput` object.
"""
pass
|
src/genie/libs/parser/iosxr/monitor.py | balmasea/genieparser | 204 | 12632933 | """monitor.py
Iosxr parsers for the following show commands:
* monitor interface {interface}
"""
# Python
import re
import time
# Metaparser
from genie.metaparser import MetaParser
from genie.metaparser.util.schemaengine import (Any,
Optional, Use, SchemaTypeError, Schema)
from genie.libs.parser.utils.common import Common
""" Schema for:
* monitor interface {interface}
"""
class MonitorInterfaceInterfaceSchema(MetaParser):
schema = {
"monitor_time": {
Any(): {
"hostname": str,
"sys_up_time": str,
Optional("protocol"): str,
Optional("line_protocol_status"): str,
Optional("interface_status"): str,
Optional("encapsulation"): str,
Optional("interface"): {
Any(): {
Optional("interface_status"): str,
Optional("input_bps"): int,
Optional("output_bps"): int,
Optional("input_bps_percent"): float,
Optional("output_bps_percent"): float,
Optional("input_bytes"): float,
Optional("input_bytes_unit"): str,
Optional("output_bytes"): float,
Optional("output_bytes_unit"): str,
Optional("input_delta"): int,
Optional("output_delta"): int,
Optional("traffic_stats"): {
"input_packets": int,
"input_packets_delta": int,
"input_pps": int,
"input_bytes": int,
"input_bytes_delta": int,
"input_kbps_rate": int,
"input_kbps_delta": float,
"output_packets": int,
"output_packets_delta": int,
"output_pps": int,
"output_bytes": int,
"output_bytes_delta": int,
"output_kbps_rate": int,
"output_kbps_delta": float
},
Optional("error_stats"):{
"input_total": int,
"input_total_delta": int,
"input_crc": int,
"input_crc_delta": int,
Optional("input_frame"): int,
Optional("input_frame_delta"): int,
"input_overrun": int,
"input_overrun_delta": int,
"output_total": int,
"output_total_delta": int,
Optional("output_underrun"): int,
Optional("output_underrun_delta"): int
}
}
},
}
}
}
""" Parser for:
* monitor interface {interface}
"""
class MonitorInterfaceInterface(MonitorInterfaceInterfaceSchema):
cli_command = ['monitor interface {interface}']
def cli(self, output=None, interface=None, timeout=10):
if output is None:
self.device.sendline(self.cli_command[0].format(interface=interface))
try:
out = self.device.expect(
[r"{}\s+Monitor\sTime:[\s\S]+Quit='q'".format(self.device._hostname)],
timeout=timeout).match_output
skip_timeout = False
except AttributeError:
out = self.device.expect(
[r"{}\s+Monitor\sTime:[\s\S]+Quit='q'".format(self.device._hostname)],\
timeout=timeout)
skip_timeout = True
self.device.sendline('q')
if not skip_timeout:
time.sleep(5)
self.device.expect('.*')
else:
out = output
#Initialize dictionaries
ret_dict = {}
#Initialize flag
control_flag = None
# F17-ASR9922 Monitor Time: 00:00:00 SysUptime: 09:47:06
p1 = re.compile(r'^(?P<hostname>[\S]+)\s+Monitor Time:\s+(?P<monitor_time>'
r'[\S]+)\s+SysUptime:\s+(?P<sys_up_time>[\S]+)$')
# Protocol:General
p2 = re.compile(r'^Protocol:(?P<protocol>\S+)$')
# Hu0/0/0/0 22000/ 0% 23000/ 0% 114.6M/0 280.5M/0
p3 = re.compile(r'^(?P<interface>\S+)\s+(?P<input_bps>[\d]+)\/\s+?(?P<input_bps_percent>[\S]+)'
r'%\s+(?P<output_bps>[\d]+)\/\s+?(?P<output_bps_percent>[\S]+)%\s+'
r'(?P<input_bytes>[\d\.]+)(?P<int_bytes>\w)?\/(?P<input_delta>[\d]+)\s+'
r'(?P<output_bytes>[\d\.]+)(?P<out_bytes>\w)?\/(?P<output_delta>[\d]+)$')
# Gi0/0/0/1 (statistics not available)
p3_1 = re.compile(r'^(?P<interface>\S+)\s+\((?P<statistics>[\s\S]+)\)')
# MgmtEth0/RP0/CPU0/0 is up, line protocol is up
p4 = re.compile(r'^(?P<interface>\S+) +is +(?P<interface_status>\S+), '
r'+line +protocol +is +(?P<line_protocol_status>\S+)$')
# Encapsulation 802.1Q
p5 = re.compile(r'^Encapsulation\s+(?P<encapsulation>.+)$')
# Input Packets: 282171 0
p6 = re.compile(r'^Input\s+Packets:\s+(?P<input_packets>\d+)\s+'
r'(?P<input_packets_delta>\d+)$')
# Input pps: 133
p7 = re.compile(r'^Input\s+pps:\s+(?P<input_pps>\d+)$')
# Input Bytes: 261447750 40913
p8 = re.compile(r'^Input\s+Bytes:\s+(?P<input_bytes>\d+)\s+(?P<input_bytes_delta>\d+)$')
# Input Kbps (rate): 176 ( 0%)
p9 = re.compile(r'^Input\s+Kbps\s+\(rate\):\s+(?P<input_kbps_rate>\S+)'
r'\s+\((?P<input_kbps_delta>.*)\)$')
# Output Packets: 1178 0
p10 = re.compile(r'^Output\s+Packets:\s+(?P<output_packets>\d+)\s+'
r'(?P<output_packets_delta>\d+)$')
# Output pps: 133
p11 = re.compile(r'^Output\s+pps:\s+(?P<output_pps>\d+)$')
# Output Bytes: 261447750 40913
p12 = re.compile(r'^Output\s+Bytes:\s+(?P<output_bytes>\d+)\s+(?P<output_bytes_delta>\d+)$')
# Output Kbps (rate): 176 ( 0%)
p13 = re.compile(r'^Output\s+Kbps\s+\(rate\):\s+(?P<output_kbps_rate>\S+)'
r'\s+\((?P<output_kbps_delta>.*)\)$')
# Input Total: 0 0
p14 = re.compile(r'^Input\s+Total:\s+(?P<input_total>\d+)\s+(?P<input_total_delta>\d+)$')
# Input CRC: 0 0
p15 = re.compile(r'^Input\s+CRC:\s+(?P<input_crc>\d+)\s+(?P<input_crc_delta>\d+)$')
# Input Frame: 0 0
p16 = re.compile(r'^Input\s+Frame:\s+(?P<input_frame>\d+)\s+(?P<input_frame_delta>\d+)$')
# Input Frame:
p16_1 = re.compile(r'^Input\s+Frame:$')
#0 0
p16_2 = re.compile(r'^(?P<input_frame>\d+)\s+(?P<input_frame_delta>\d+)$')
# Input Overrun: 0 0
p17 = re.compile(r'^Input\s+Overrun:\s+(?P<input_overrun>\d+)\s+'
r'(?P<input_overrun_delta>\d+)$')
# Output Total: 0 0
p18 = re.compile(r'^Output\s+Total:\s+(?P<output_total>\d+)\s+(?P<output_total_delta>\d+)$')
# Output Underrun: 0 0
p19 = re.compile(r'^Output\s+Underrun:\s+(?P<output_underrun>\d+)\s+'
r'(?P<output_underrun_delta>\d+)$')
# Output Underrun: 0
p19_1 = re.compile(r'^Output\s+Underrun:\s+(?P<output_underrun>\d+)$')
#0
p19_2 = re.compile(r'^(?P<output_underrun_delta>\d+)$')
for line in out.splitlines():
#To remove all ANSI directives
ansi_escape3 = re.compile(r'(\x9B|\x1B\[)[0-?]*[ -/]*[@-~]', flags=re.IGNORECASE)
line = ansi_escape3.sub('', line)
line = line.strip()
# F17-ASR9922 Monitor Time: 00:00:00 SysUptime: 09:47:06
m = p1.match(line)
if m:
group = m.groupdict()
monitor_time_dict = ret_dict.setdefault('monitor_time', {}).\
setdefault(group['monitor_time'], {})
monitor_time_dict.update({'hostname': group['hostname'],
'sys_up_time': group['sys_up_time']
})
continue
# Protocol:General
m = p2.match(line)
if m:
group = m.groupdict()
monitor_time_dict.update({'protocol': group['protocol']})
continue
# Hu0/0/0/0 22000/ 0% 23000/ 0% 114.6M/0 280.5M/0
m = p3.match(line)
if m:
group = m.groupdict()
#convert interface to full name
interface = Common.convert_intf_name(group['interface'])
interface_dict = monitor_time_dict.setdefault("interface", {})
each_intf_dict = interface_dict.setdefault(interface, {})
each_intf_dict.update({
'input_bps': int(group['input_bps']),
'output_bps': int(group['output_bps']),
'input_delta': int(group['input_delta']),
'output_delta': int(group['output_delta']),
'input_bytes': float(group['input_bytes']),
'output_bytes': float(group['output_bytes']),
})
if group['input_bps_percent'] == '--':
input_bps_percent = 0.0
else:
input_bps_percent = float(group['input_bps_percent'])
if group['output_bps_percent'] == '--':
output_bps_percent = 0.0
else:
output_bps_percent = float(group['output_bps_percent'])
each_intf_dict.update({'input_bps_percent': input_bps_percent,
'output_bps_percent': output_bps_percent})
if group['int_bytes']:
each_intf_dict.update({'input_bytes_unit': group['int_bytes']})
if group['out_bytes']:
each_intf_dict.update({'output_bytes_unit': group['out_bytes']})
continue
# Gi0/0/0/1 (statistics not available)
m = p3_1.match(line)
if m:
group = m.groupdict()
# convert interface to full name
interface = Common.convert_intf_name(group['interface'])
interface_dict = monitor_time_dict.setdefault("interface", {})
each_intf_dict = interface_dict.setdefault(interface, {})
each_intf_dict.update({'interface_status': group['statistics']})
# MgmtEth0/RP0/CPU0/0 is up, line protocol is up
m = p4.match(line)
if m:
group = m.groupdict()
# convert interface to full name
interface = Common.convert_intf_name(group['interface'])
interface_dict = monitor_time_dict.setdefault("interface", {})
each_intf_dict = interface_dict.setdefault(interface, {})
monitor_time_dict.update({'interface_status': group['interface_status'],
'line_protocol_status': group['line_protocol_status']})
continue
# Encapsulation 802.1Q
m = p5.match(line)
if m:
group = m.groupdict()
monitor_time_dict.update({'encapsulation': group['encapsulation']})
continue
# Input Packets: 282171 0
m = p6.match(line)
if m:
group = m.groupdict()
traffic_stats_dict = each_intf_dict.setdefault('traffic_stats', {})
traffic_stats_dict.update({'input_packets': int(group['input_packets']),
'input_packets_delta': int(group['input_packets_delta'])})
continue
# Input pps: 133
m = p7.match(line)
if m:
group = m.groupdict()
traffic_stats_dict.update({'input_pps': int(group['input_pps'])})
continue
# Input Bytes: 261447750 40913
m = p8.match(line)
if m:
group = m.groupdict()
traffic_stats_dict.update({'input_bytes': int(group['input_bytes']),
'input_bytes_delta': int(group['input_bytes_delta'])})
continue
# Input Kbps (rate): 176 ( 0%)
m = p9.match(line)
if m:
group = m.groupdict()
input_kbps_delta = re.sub('[\s%]+', "", group['input_kbps_delta'])
if group['input_kbps_rate'] == 'NA':
input_kbps_rate = 0
else:
input_kbps_rate = int(group['input_kbps_rate'])
if input_kbps_delta == 'NA':
input_kbps_delta = 0.0
else:
input_kbps_delta = float(input_kbps_delta)
traffic_stats_dict.update({'input_kbps_rate': input_kbps_rate,
'input_kbps_delta': input_kbps_delta})
continue
# Output Packets: 1178 0
m = p10.match(line)
if m:
group = m.groupdict()
traffic_stats_dict.update({'output_packets': int(group['output_packets']),
'output_packets_delta': int(group['output_packets_delta'])})
continue
# Output pps: 133
m = p11.match(line)
if m:
group = m.groupdict()
traffic_stats_dict.update({'output_pps': int(group['output_pps'])})
continue
# Output Bytes: 261447750 40913
m = p12.match(line)
if m:
group = m.groupdict()
traffic_stats_dict.update({'output_bytes': int(group['output_bytes']),
'output_bytes_delta': int(group['output_bytes_delta'])})
continue
# Output Kbps (rate): 0 ( 0%)
m = p13.match(line)
if m:
group = m.groupdict()
output_kbps_delta = re.sub('[\s%]+', "", group['output_kbps_delta'])
if group['output_kbps_rate'] == 'NA':
output_kbps_rate = 0
else:
output_kbps_rate = int(group['output_kbps_rate'])
if output_kbps_delta == 'NA':
output_kbps_delta = 0.0
else:
output_kbps_delta = float(output_kbps_delta)
traffic_stats_dict.update({'output_kbps_rate': output_kbps_rate,
'output_kbps_delta': output_kbps_delta})
continue
# Input Total: 0 0
m = p14.match(line)
if m:
group = m.groupdict()
error_stats_dict = each_intf_dict.setdefault('error_stats', {})
error_stats_dict.update({'input_total': int(group['input_total']),
'input_total_delta': int(group['input_total_delta'])})
continue
# Input CRC: 0 0
m = p15.match(line)
if m:
group = m.groupdict()
error_stats_dict.update({'input_crc': int(group['input_crc']),
'input_crc_delta': int(group['input_crc_delta'])})
continue
# Input Frame: 0 0
m = p16.match(line)
if m:
group = m.groupdict()
error_stats_dict.update({'input_frame': int(group['input_frame']),
'input_frame_delta': int(group['input_frame_delta'])})
continue
# Input Frame:
m = p16_1.match(line)
if m:
control_flag = 'input_frame'
continue
# 0 0
m = p16_2.match(line)
if m and control_flag=='input_frame':
group = m.groupdict()
error_stats_dict.update({'input_frame': int(group['input_frame']),
'input_frame_delta': int(group['input_frame_delta'])})
continue
# Input Overrun: 0 0
m = p17.match(line)
if m:
group = m.groupdict()
error_stats_dict.update({'input_overrun': int(group['input_overrun']),
'input_overrun_delta': int(group['input_overrun_delta'])})
continue
# Output Total: 0 0
m = p18.match(line)
if m:
group = m.groupdict()
error_stats_dict.update({'output_total': int(group['output_total']),
'output_total_delta': int(group['output_total_delta'])})
continue
# Output Underrun: 0 0
m = p19.match(line)
if m:
group = m.groupdict()
error_stats_dict.update({'output_underrun': int(group['output_underrun']),
'output_underrun_delta': int(group['output_underrun_delta'])})
continue
# Output Underrun: 0
m = p19_1.match(line)
if m:
group = m.groupdict()
error_stats_dict.update({'output_underrun': int(group['output_underrun'])})
control_flag="output_underrun"
# 0
m = p19_2.match(line)
if m and control_flag == "output_underrun":
group = m.groupdict()
error_stats_dict.update({'output_underrun_delta': int(group['output_underrun_delta'])})
return ret_dict
|
Packs/CircleCI/Integrations/CircleCI/CircleCI_test.py | diCagri/content | 799 | 12633006 | import io
import json
import pytest
from typing import Tuple, Dict
from CircleCI import Client, circleci_workflows_list_command, circleci_artifacts_list_command, \
circleci_workflow_jobs_list_command, circleci_workflow_last_runs_command, DEFAULT_LIMIT_VALUE
from CommonServerPython import CommandResults
fake_client = Client('', '', False, False, '', '', '')
def util_load_json(path):
with io.open(path, mode='r', encoding='utf-8') as f:
return json.loads(f.read())
test_data = util_load_json('test_data/circle_ci_commands_test_data.json')
@pytest.mark.parametrize('command_func, func_name',
[(circleci_workflows_list_command, 'get_workflows_list'),
(circleci_artifacts_list_command, 'get_job_artifacts'),
(circleci_workflow_jobs_list_command, 'get_workflow_jobs'),
(circleci_workflow_last_runs_command, 'get_last_workflow_runs')])
def test_circleci_commands(mocker, command_func, func_name):
"""
Given:
- 'args': XSOAR arguments
When:
- Executing a CircleCI command.
Then:
- Ensure expected CommandResults object is returned.
"""
command_test_data = test_data[func_name]
mocker.patch.object(fake_client, func_name, return_value=command_test_data['response'])
result: CommandResults = command_func(fake_client, dict())
assert result.outputs_prefix == command_test_data['outputs_prefix']
assert result.outputs_key_field == command_test_data['outputs_key_field']
assert result.outputs == command_test_data['outputs']
GET_COMMON_ARGUMENTS_INPUTS = [(Client('', '', False, False, vc_type='a', organization='b', project='c'), dict(),
('a', 'b', 'c', DEFAULT_LIMIT_VALUE)),
(Client('', '', False, False, vc_type='a', organization='b', project='c'),
{'vcs_type': 'x'}, ('x', 'b', 'c', DEFAULT_LIMIT_VALUE)),
(Client('', '', False, False, vc_type='a', organization='b', project='c'),
{'organization': 'x'}, ('a', 'x', 'c', DEFAULT_LIMIT_VALUE)),
(Client('', '', False, False, vc_type='a', organization='b', project='c'),
{'project': 'x'}, ('a', 'b', 'x', DEFAULT_LIMIT_VALUE)),
(Client('', '', False, False, vc_type='a', organization='b', project='c'),
{'limit': 1}, ('a', 'b', 'c', 1)),
(Client('', '', False, False, vc_type='a', organization='b', project='c'),
{'vcs_type': 'x', 'limit': 1}, ('x', 'b', 'c', 1)),
(Client('', '', False, False, vc_type='a', organization='b', project='c'),
{'organization': 'x', 'limit': 1}, ('a', 'x', 'c', 1)),
(Client('', '', False, False, vc_type='a', organization='b', project='c'),
{'project': 'x', 'limit': 1}, ('a', 'b', 'x', 1)),
(Client('', '', False, False, vc_type='a', organization='b', project='c'),
{'vcs_type': 'x', 'organization': 'y'}, ('x', 'y', 'c', DEFAULT_LIMIT_VALUE)),
(Client('', '', False, False, vc_type='a', organization='b', project='c'),
{'vcs_type': 'x', 'project': 'y'}, ('x', 'b', 'y', DEFAULT_LIMIT_VALUE)),
(Client('', '', False, False, vc_type='a', organization='b', project='c'),
{'organization': 'x', 'project': 'y'}, ('a', 'x', 'y', DEFAULT_LIMIT_VALUE)),
(Client('', '', False, False, vc_type='a', organization='b', project='c'),
{'vcs_type': 'x', 'organization': 'y', 'project': 'z'},
('x', 'y', 'z', DEFAULT_LIMIT_VALUE)),
(Client('', '', False, False, vc_type='a', organization='b', project='c'),
{'vcs_type': 'x', 'organization': 'y', 'limit': 1},
('x', 'y', 'c', 1)),
(Client('', '', False, False, vc_type='a', organization='b', project='c'),
{'vcs_type': 'x', 'project': 'y', 'limit': 1},
('x', 'b', 'y', 1)),
(Client('', '', False, False, vc_type='a', organization='b', project='c'),
{'organization': 'x', 'project': 'y', 'limit': 1},
('a', 'x', 'y', 1)),
(Client('', '', False, False, vc_type='a', organization='b', project='c'),
{'vcs_type': 'x', 'organization': 'y', 'project': 'z', 'limit': 1},
('x', 'y', 'z', 1)),
]
@pytest.mark.parametrize('client, args, expected', GET_COMMON_ARGUMENTS_INPUTS)
def test_get_common_arguments(client: Client, args: Dict, expected: Tuple[str, str, str, int]):
"""
Given:
- XSOAR arguments
When:
- Extracting common used args for few commands.
Then
- Ensure the common commands are extracted as expected, and uses default value of instance parameter if not found.
"""
from CircleCI import get_common_arguments
assert get_common_arguments(client, args) == expected
|
xmanager/xm/id_predictor_test.py | jurgisp/xmanager | 392 | 12633009 | # Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import unittest
from xmanager.xm import id_predictor
from xmanager.xm import utils
class IdPredictorTest(unittest.TestCase):
@utils.run_in_asyncio_loop
async def test_first_id_is_correct(self):
"""Simple Predictor usage example."""
predictor = id_predictor.Predictor(next_id=1)
first_id = predictor.reserve_id()
async with predictor.submit_id(first_id):
self.assertEqual(first_id, 1)
@utils.run_in_asyncio_loop
async def test_ids_are_submitted_in_order(self):
predictor = id_predictor.Predictor(next_id=1)
self.assertEqual(predictor.reserve_id(), 1)
self.assertEqual(predictor.reserve_id(), 2)
self.assertEqual(predictor.reserve_id(), 3)
submitted_ids = []
async def submit(id_to_submit):
async with predictor.submit_id(id_to_submit):
submitted_ids.append(id_to_submit)
await asyncio.gather(submit(3), submit(2), submit(1))
self.assertEqual(submitted_ids, [1, 2, 3])
@utils.run_in_asyncio_loop
async def test_broken_sequence(self):
predictor = id_predictor.Predictor(next_id=1)
self.assertEqual(predictor.reserve_id(), 1)
self.assertEqual(predictor.reserve_id(), 2)
with self.assertRaises(RuntimeError):
async with predictor.submit_id(1):
raise RuntimeError('Id was eaten by a giant space ant.')
with self.assertRaises(id_predictor.BrokenSequenceError):
async with predictor.submit_id(2):
pass
if __name__ == '__main__':
unittest.main()
|
tests/checker/reputation/reputation_test_base.py | Centaurioun/PyFunceble | 213 | 12633012 | """
The tool to check the availability or syntax of domain, IP or URL.
::
██████╗ ██╗ ██╗███████╗██╗ ██╗███╗ ██╗ ██████╗███████╗██████╗ ██╗ ███████╗
██╔══██╗╚██╗ ██╔╝██╔════╝██║ ██║████╗ ██║██╔════╝██╔════╝██╔══██╗██║ ██╔════╝
██████╔╝ ╚████╔╝ █████╗ ██║ ██║██╔██╗ ██║██║ █████╗ ██████╔╝██║ █████╗
██╔═══╝ ╚██╔╝ ██╔══╝ ██║ ██║██║╚██╗██║██║ ██╔══╝ ██╔══██╗██║ ██╔══╝
██║ ██║ ██║ ╚██████╔╝██║ ╚████║╚██████╗███████╗██████╔╝███████╗███████╗
╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═══╝ ╚═════╝╚══════╝╚═════╝ ╚══════╝╚══════╝
A module that provides some abstract class for the reputation tests.
Author:
<NAME>, @funilrys, contactTATAfunilrysTODTODcom
Special thanks:
https://pyfunceble.github.io/special-thanks.html
Contributors:
https://pyfunceble.github.io/contributors.html
Project link:
https://github.com/funilrys/PyFunceble
Project documentation:
https://pyfunceble.readthedocs.io/en/dev/
Project homepage:
https://pyfunceble.github.io/
License:
::
Copyright 2017, 2018, 2019, 2020, 2021 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import tempfile
import unittest
from typing import List, Optional
from PyFunceble.checker.reputation.base import ReputationCheckerBase
class ReputationCheckerTestBase(unittest.TestCase):
"""
Tests of the base of all our reputation checker.
"""
def setUp(self) -> None:
"""
Setups everything we need.
"""
self.checker = ReputationCheckerBase()
self.tempfile = tempfile.NamedTemporaryFile("wb", delete=False)
self.our_dataset = """
127.176.134.253#4#3#Malicious Host
127.34.113.192#4#2#Malicious Host
127.34.107.238#4#2#Malicious Host
127.166.193.125#4#2#Malicious Host
127.179.153.18#4#2#Malicious Host
127.89.243.30#4#2#Malicious Host
127.147.142.53#4#2#Malicious Host
127.35.150.233#4#3#Malicious Host
127.97.172.196#4#2#Malicious Host
127.24.78.18#4#2#Malicious Host
"""
self.tempfile.write(self.our_dataset.encode())
self.tempfile.seek(0)
self.checker.ipv4_reputation_query_tool.source_file = self.tempfile.name
def tearDown(self) -> None:
"""
Destroyes everything we don't need.
"""
self.tempfile.close()
os.unlink(self.tempfile.name)
del self.checker
@staticmethod
def fake_query_a_record(*args, **kwargs) -> Optional[List[str]]:
"""
A fake method which provides a fake IP for the testing.
"""
_ = args
_ = kwargs
return ["127.176.134.253"]
@staticmethod
def fake_query_a_record_none(*args, **kwargs) -> Optional[List[str]]:
"""
A fake method which provides a fake IP for the testing.
"""
_ = args
_ = kwargs
@staticmethod
def fake_query_a_record_not_known(*args, **kwargs) -> Optional[List[str]]:
"""
A fake method which provides a fake IP for the testing.
"""
_ = args
_ = kwargs
return ["192.168.127.12"]
|
dbver/versions/00001_init.py | bopopescu/redis-ctl | 109 | 12633015 | <filename>dbver/versions/00001_init.py
from migrate import *
from sqlalchemy.sql.sqltypes import *
from sqlalchemy.sql.schema import *
from sqlalchemy.dialects.mysql.base import MEDIUMTEXT
from sqlalchemy.sql.functions import func
meta = MetaData()
cluster = Table(
'cluster', meta,
Column('id', Integer, nullable=False, primary_key=True, autoincrement=True),
Column('description', String(256), nullable=True, server_default=None),
mysql_engine='InnoDB', mysql_charset='utf8'
)
cluster_balance_plan = Table(
'cluster_balance_plan', meta,
Column('id', Integer, nullable=False, primary_key=True, autoincrement=True),
Column('cluster_id', Integer, ForeignKey("cluster.id"), nullable=False, unique=True),
Column('balance_plan_json', MEDIUMTEXT, nullable=False),
mysql_engine='InnoDB', mysql_charset='utf8'
)
cluster_task = Table(
'cluster_task', meta,
Column('id', Integer, nullable=False, primary_key=True, autoincrement=True),
Column('cluster_id', Integer, ForeignKey("cluster.id"), unique=False, nullable=False),
Column('creation', DateTime, nullable=False, index=True, server_default=func.now()),
Column('task_type', Integer, nullable=False, server_default="0"),
Column('exec_error', MEDIUMTEXT, nullable=True),
Column('completion', DateTime, nullable=True, index=True, server_default=None),
mysql_engine='InnoDB', mysql_charset='utf8'
)
cluster_task_step = Table(
'cluster_task_step', meta,
Column('id', Integer, nullable=False, primary_key=True, autoincrement=True),
Column('task_id', Integer, ForeignKey("cluster_task.id"), unique=False, nullable=False),
Column('command', String(64), nullable=False, server_default=""),
Column('args_json', MEDIUMTEXT, nullable=False),
Column('exec_error', MEDIUMTEXT, nullable=True),
Column('start_time', DateTime, nullable=True, server_default=None),
Column('completion', DateTime, nullable=True, server_default=None),
mysql_engine='InnoDB', mysql_charset='utf8'
)
cluster_task_lock = Table(
'cluster_task_lock', meta,
Column('id', Integer, nullable=False, primary_key=True, autoincrement=True),
Column('cluster_id', Integer, ForeignKey("cluster.id"), unique=False, nullable=False),
Column('task_id', Integer, ForeignKey("cluster_task.id"), unique=False, nullable=False),
Column('step_id', Integer, ForeignKey("cluster_task_step.id"), unique=False, nullable=True, server_default=None),
mysql_engine='InnoDB', mysql_charset='utf8'
)
polling_stat = Table(
'polling_stat', meta,
Column('id', Integer, nullable=False, primary_key=True, autoincrement=True),
Column('polling_time', DateTime, nullable=False, index=True, server_default=func.now()),
Column('stat_json', MEDIUMTEXT, nullable=False),
mysql_engine='InnoDB', mysql_charset='utf8'
)
proxy = Table(
'proxy', meta,
Column('id', Integer, nullable=False, primary_key=True, autoincrement=True),
Column('host', String(255), nullable=False, server_default=""),
Column('port', Integer, nullable=False, server_default="0"),
Column('eru_container_id', String(64), nullable=True, index=True, server_default=None),
Column('cluster_id', Integer, ForeignKey("cluster.id"), unique=False, nullable=True, server_default=None),
Column('suppress_alert', Integer, nullable=False, server_default="0"),
UniqueConstraint('host', 'port', name="address"),
mysql_engine='InnoDB', mysql_charset='utf8'
)
proxy_status = Table(
'proxy_status', meta,
Column('id', Integer, nullable=False, primary_key=True, autoincrement=True),
Column('addr', String(255), nullable=False, unique=True, server_default=""),
Column('poll_count', Integer, nullable=False, server_default="0"),
Column('avail_count', Integer, nullable=False, server_default="0"),
Column('rsp_1ms', Integer, nullable=False, server_default="0"),
Column('rsp_5ms', Integer, nullable=False, server_default="0"),
mysql_engine='InnoDB', mysql_charset='utf8'
)
redis_node = Table(
'redis_node', meta,
Column('id', Integer, nullable=False, primary_key=True, autoincrement=True),
Column('host', String(255), nullable=False, server_default=""),
Column('port', Integer, nullable=False, server_default="0"),
Column('eru_container_id', String(64), nullable=True, index=True, server_default=None),
Column('assignee_id', Integer, ForeignKey("cluster.id"), unique=False, nullable=True, server_default=None),
Column('suppress_alert', Integer, nullable=False, server_default="0"),
UniqueConstraint('host', 'port', name="address"),
mysql_engine='InnoDB', mysql_charset='utf8'
)
redis_node_status = Table(
'redis_node_status', meta,
Column('id', Integer, nullable=False, primary_key=True, autoincrement=True),
Column('addr', String(255), nullable=False, unique=True, server_default=""),
Column('poll_count', Integer, nullable=False, server_default="0"),
Column('avail_count', Integer, nullable=False, server_default="0"),
Column('rsp_1ms', Integer, nullable=False, server_default="0"),
Column('rsp_5ms', Integer, nullable=False, server_default="0"),
mysql_engine='InnoDB', mysql_charset='utf8'
)
def upgrade(migrate_engine):
meta.bind = migrate_engine
cluster.create()
cluster_balance_plan.create()
cluster_task.create()
cluster_task_step.create()
cluster_task_lock.create()
polling_stat.create()
proxy.create()
proxy_status.create()
redis_node.create()
redis_node_status.create()
def downgrade(migrate_engine):
meta.bind = migrate_engine
redis_node_status.drop(checkfirst=True)
redis_node.drop(checkfirst=True)
proxy_status.drop(checkfirst=True)
proxy.drop(checkfirst=True)
polling_stat.drop(checkfirst=True)
cluster_task_lock.drop(checkfirst=True)
cluster_task_step.drop(checkfirst=True)
cluster_task.drop(checkfirst=True)
cluster_balance_plan.drop(checkfirst=True)
cluster.drop(checkfirst=True)
|
rdkit/Chem/Scaffolds/UnitTestMurckoScaffold.py | kazuyaujihara/rdkit | 1,609 | 12633023 | # $Id: test_MurckoScaffold.py 3672 2010-06-14 17:10:00Z landrgr1 $
#
# Created by <NAME>, June 2008
#
from collections import namedtuple
import doctest
import unittest
from rdkit import Chem
from rdkit.Chem.Scaffolds import MurckoScaffold
from rdkit.Chem.Scaffolds.MurckoScaffold import (GetScaffoldForMol, _pyGetScaffoldForMol,
MurckoScaffoldSmilesFromSmiles,
MurckoScaffoldSmiles, MakeScaffoldGeneric)
TestMolecule = namedtuple('TestMolecule', 'smiles,scaffold')
def load_tests(loader, tests, ignore):
""" Add the Doctests from the module """
tests.addTests(doctest.DocTestSuite(MurckoScaffold, optionflags=doctest.ELLIPSIS))
return tests
class TestCase(unittest.TestCase):
def test1MurckoScaffold(self):
# Test the functionality on a smaller test set
for testMol in self.testMolecules:
mol = Chem.MolFromSmiles(testMol.smiles)
calcScaffold = Chem.MolToSmiles(GetScaffoldForMol(mol))
actualScaffold = Chem.MolToSmiles(Chem.MolFromSmiles(testMol.scaffold))
self.assertEqual(calcScaffold, actualScaffold)
def test2MurckoScaffold(self):
# Test the functionality on a larger test set
for testMol in self.testMolecules2:
mol = Chem.MolFromSmiles(testMol.smiles)
calcScaffold = Chem.MolToSmiles(GetScaffoldForMol(mol))
actualScaffold = Chem.MolToSmiles(Chem.MolFromSmiles(testMol.scaffold))
self.assertEqual(calcScaffold, actualScaffold)
def test_ReferenceImplementation(self):
# Check that the C++ implementation is equivalent to the Python reference implementation
for testMol in self.testMolecules:
mol = Chem.MolFromSmiles(testMol.smiles)
calcScaffold1 = Chem.MolToSmiles(GetScaffoldForMol(mol))
calcScaffold2 = Chem.MolToSmiles(_pyGetScaffoldForMol(mol))
self.assertEqual(calcScaffold1, calcScaffold2)
def test_MurckScaffoldSmilesFromSmiles(self):
self.assertEqual(
MurckoScaffoldSmilesFromSmiles('Cc1cc(Oc2nccc(CCC)c2)ccc1'), 'c1ccc(Oc2ccccn2)cc1')
self.assertEqual(MurckoScaffoldSmilesFromSmiles('CCCC'), '')
def test_MurckoScaffoldSmiles(self):
self.assertEqual(MurckoScaffoldSmiles('Cc1cc(Oc2nccc(CCC)c2)ccc1'), 'c1ccc(Oc2ccccn2)cc1')
self.assertEqual(
MurckoScaffoldSmiles(mol=Chem.MolFromSmiles('Cc1cc(Oc2nccc(CCC)c2)ccc1')),
'c1ccc(Oc2ccccn2)cc1')
self.assertRaises(ValueError, MurckoScaffoldSmiles, smiles=None, mol=None)
def test_MakeScaffoldGeneric(self):
def testSmiles(smiles):
return Chem.MolToSmiles(MakeScaffoldGeneric(Chem.MolFromSmiles(smiles)))
self.assertEqual(testSmiles('c1ccccc1'), 'C1CCCCC1')
self.assertEqual(testSmiles('c1cccnc1'), 'C1CCCCC1')
# Examples associated with sf.net issue 246
self.assertEqual(testSmiles('c1[nH]ccc1'), 'C1CCCC1')
self.assertEqual(testSmiles('C1[NH2+]C1'), 'C1CC1')
self.assertEqual(testSmiles('C1[C@](Cl)(F)O1'), 'CC1(C)CC1')
testMolecules = [
TestMolecule('CC1CCC1', 'C1CCC1'),
TestMolecule('NCNCC2CC2C1CC1O', 'C1CC1C1CC1'),
# Spiro
TestMolecule('OC2C(C)C21C(N)C1C', 'C2CC12CC1'),
# Carbonyl outside scaffold
TestMolecule('C1CC1C(=O)OC', 'C1CC1'),
# Double bond outside scaffold
TestMolecule('C1CC1C=C', 'C1CC1'),
# Double bond in scaffold
TestMolecule('C1CC1C=CC1CC1C=CNNCO', 'C1CC1C=CC1CC1'),
TestMolecule('CC1CC1C(N)C1C(N)C1', 'C1CC1CC1CC1'),
# Double bond in linker
TestMolecule('C1CC1C(C(C)C)=NC1CC1', 'C1CC1C=NC1CC1'),
# S=O group in scaffold
TestMolecule('C1CC1S(=O)C1CC1C=CNNCO', 'C1CC1S(=O)C1CC1'),
# S=O group outside scaffold
TestMolecule('O=SCNC1CC1S(=O)C1CC1C=CNNCO', 'C1CC1S(=O)C1CC1'),
# SO2 group in scaffold
TestMolecule('C1CC1S(=O)(=O)C1CC1C=CNNCO', 'C1CC1S(=O)(=O)C1CC1'),
# SO2 group outside scaffold
TestMolecule('O=S(CNCNC)(=O)CNC1CC1S(=O)(=O)C1CC1C=CNNCO', 'C1CC1S(=O)(=O)C1CC1'),
# Hydroxamide
TestMolecule('C1CC1C=NO', 'C1CC1'),
# Cyano group
TestMolecule('C1CC1C#N', 'C1CC1'),
# Acetylene group
TestMolecule('C1CC1C#CNC', 'C1CC1'),
TestMolecule('O=C1N(C)C(=O)N1C#CNC', 'O=C1NC(=O)N1'),
TestMolecule('[O-][N+](=O)c1cc(ccc1Cl)NS(=O)(=O)Cc2ccccc2', 'c1ccccc1NS(=O)(=O)Cc2ccccc2'),
# N-Substituted pyrrol
TestMolecule('Cn1cccc1', 'c1ccc[nH]1'),
# Explicit hydrogens are removed
TestMolecule('C1CC1[CH](C)C1CC1', 'C1CC1CC1CC1'),
]
testMolecules2 = [
TestMolecule('CCOc1ccccc1N(S(C)(=O)=O)CC(NC1CCCCC1)=O', 'O=C(NC1CCCCC1)CNc1ccccc1'),
TestMolecule('c1ccc(-c2c(C)n(-c3c(C(O)=O)cccc3)c(C)nc2=O)cc1',
'O=c1c(cn(cn1)-c1ccccc1)-c1ccccc1'),
TestMolecule('Cc1ccc(Cl)c2c1NC(=O)C2=C1NC(=S)NC1=O', 'c1cc2c(cc1)C(=C1C(NC(N1)=S)=O)C(=O)N2'),
TestMolecule('CNC(=O)CCc1[nH]c2c(c1Sc1ccccc1)cccc2', 'c1cc(Sc2c3c([nH]c2)cccc3)ccc1'),
TestMolecule('CC(=O)OCC(=O)C1(O)CCC2C1(C)CC(=O)C1C3(C)CCC(=O)C=C3CCC21',
'O=C1C=C2CCC3C4CCCC4CC(=O)C3C2CC1'),
TestMolecule('CC(C)CC(Nc1nc(Cl)ccc1[N+]([O-])=O)C(O)=O', 'c1ccncc1'),
TestMolecule('COc1ccc(C(Nc2ccc(S(N3C(C)CCCC3)(=O)=O)cc2)=O)c(OC)c1OC',
'O=C(Nc1ccc(S(=O)(=O)N2CCCCC2)cc1)c1ccccc1'),
TestMolecule('CC(C)CCNc1nc(N)c([N+](=O)[O-])c(NCCO)n1', 'c1cncnc1'),
TestMolecule('c1ccc(Oc2c(NC(COC(c3c(C)noc3C)=O)=O)cccc2)cc1',
'O=C(COC(=O)c1cnoc1)Nc1ccccc1Oc1ccccc1'),
TestMolecule('COC(CCCCC1SCC(NC(OC)=O)C1NC(OC)=O)=O', 'C1CCCS1'),
TestMolecule('CSc1ccc(-c2c(C#N)c(N)nc3n(-c4ccccc4)nc(C)c32)cc1',
'c1ccc(cc1)-c1c2c(n(nc2)-c2ccccc2)ncc1'),
TestMolecule('O=C1Cc2ccccc2Sc2c1cc(Cl)cc2', 'O=C1Cc2ccccc2Sc2ccccc21'),
TestMolecule('COC(c1n(CC(N(C)c2ccccc2)=O)c2ccsc2c1)=O', 'O=C(Cn1c2ccsc2cc1)Nc1ccccc1'),
TestMolecule('N=C1C(=Cc2coc3ccccc3c2=O)C(=O)N=C2SC(c3ccncc3)=NN12',
'N=C1C(=Cc2coc3ccccc3c2=O)C(=O)N=C2SC(c3ccncc3)=NN12'),
TestMolecule('CCOC(c1ccc(NC(CCc2c(C)nc3ncnn3c2C)=O)cc1)=O', 'O=C(Nc1ccccc1)CCc1cnc2n(ncn2)c1'),
TestMolecule('COC(=O)C1=C(C)NC(C)=C(C(OC)=O)C1c1oc(-c2c(Cl)c(Cl)ccc2)cc1',
'c1ccc(-c2oc(C3C=CNC=C3)cc2)cc1'),
TestMolecule('CCN(S(c1cc(NC(COC(CCc2nc3ccccc3s2)=O)=O)ccc1)(=O)=O)CC',
'c1cc(NC(COC(=O)CCc2nc3c(s2)cccc3)=O)ccc1'),
TestMolecule('CCOC(c1cc(OC(c2ccccc2)=O)n(-c2ccccc2)n1)=O', 'O=C(Oc1n(ncc1)-c1ccccc1)c1ccccc1'),
TestMolecule('CCOC(=O)c1nc2c(c(NCc3ccccc3F)n1)cccc2', 'c1ccc(CNc2ncnc3c2cccc3)cc1'),
TestMolecule('Cc1nc(C)n(CC(N2CCCC(C(c3c(C)cc(Cl)cc3)=O)C2)=O)n1',
'c1ccc(cc1)C(=O)C1CCCN(C(=O)Cn2cncn2)C1'),
TestMolecule('COc1cc(NC(=O)c2nnn(CCc3ccccc3)c2N)c(OC)cc1', 'O=C(c1nnn(c1)CCc1ccccc1)Nc1ccccc1'),
TestMolecule('Cc1cc(C(=O)CN2C(=O)c3ccccc3C2=O)c(C)n1Cc1cccs1',
'O=C(CN1C(c2c(cccc2)C1=O)=O)c1cn(Cc2cccs2)cc1'),
TestMolecule('c1cnc2c(c1)cccc2S(N1CCC(C(=O)N2CCN(c3ccc(Cl)cc3)CC2)CC1)(=O)=O',
'c1ccc(cc1)N1CCN(C(=O)C2CCN(S(=O)(=O)c3c4ncccc4ccc3)CC2)CC1'),
TestMolecule('CCOC(c1c(C)[nH]c(C(NNC(c2ccc(C(C)(C)C)cc2)=O)=O)c1C)=O',
'c1ccc(cc1)C(NNC(c1ccc[nH]1)=O)=O'),
TestMolecule('CCOC(c1cc(C(C)C)sc1NC(=O)COC(CCS(c1ccccc1)(=O)=O)=O)=O',
'c1ccc(S(CCC(=O)OCC(Nc2cccs2)=O)(=O)=O)cc1'),
TestMolecule('CCC1CCCCN1CCCNC(=O)Cn1nc(-c2ccccc2)ccc1=O',
'O=C(NCCCN1CCCCC1)Cn1nc(ccc1=O)-c1ccccc1'),
TestMolecule('CCc1cc(OCCn2nc(C(O)=O)c3ccccc3c2=O)ccc1', 'O=c1n(CCOc2ccccc2)ncc2ccccc21'),
TestMolecule('Fc1ccc(CN2CCN3C(CCC3)C2C2CCCCC2)cc1F', 'c1ccc(cc1)CN1CCN2CCCC2C1C1CCCCC1'),
TestMolecule('O=[N+]([O-])c1cc(-c2nnc(N3CCOCC3)c3ccccc23)ccc1N1CCOCC1',
'c1cc2c(nnc(c2cc1)N1CCOCC1)-c1ccc(cc1)N1CCOCC1'),
TestMolecule('Cc1ccnc(NC(=O)COc2ccc3oc4c(c3c2)CCCC4)c1',
'O=C(COc1ccc2oc3c(c2c1)CCCC3)Nc1ccccn1'),
TestMolecule('Cc1cc(=O)oc(C)c1C(=O)NCCCN1CCN(c2ccc(F)cc2)CC1',
'c1ccc(N2CCN(CCCNC(c3ccc(oc3)=O)=O)CC2)cc1'),
TestMolecule('Cc1cc(C(=O)CSc2nc(=O)cc(N)[nH]2)c(C)n1-c1cccc(F)c1',
'O=C(CSc1nc(cc[nH]1)=O)c1cn(cc1)-c1ccccc1'),
TestMolecule('CCN(S(c1cccc(C(=O)N2CCCCC2)c1)(=O)=O)CC', 'O=C(N1CCCCC1)c1ccccc1'),
TestMolecule('CNC(=S)N1CCC(NC(=O)C23CC4CC(C2)CC(C3)C4)CC1',
'O=C(NC1CCNCC1)C12CC3CC(C1)CC(C3)C2'),
TestMolecule('Cc1cc2c(cc1)N=C(C)C(N=O)=C(C)N2', 'c1cc2NC=CC=Nc2cc1'),
TestMolecule('COc1ccc(Sc2cc(C(F)(F)F)nc(-c3ncccc3)n2)cc1', 'c1ccc(cc1)Sc1nc(ncc1)-c1ncccc1'),
TestMolecule('c1coc(CNC(Cn2cc(C(c3ccccc3)=O)c3c2cccc3)=O)c1',
'c1coc(CNC(Cn2cc(C(c3ccccc3)=O)c3c2cccc3)=O)c1'),
TestMolecule('O=C(NCc1ccc(Cl)cc1)c1noc(-c2ccco2)c1', 'O=C(c1noc(c1)-c1ccco1)NCc1ccccc1'),
TestMolecule('CN(C)c1ccc(C(c2n(CCOC(=O)Nc3ccc(Cl)cc3)nnn2)N2CCOCC2)cc1',
'O=C(Nc1ccccc1)OCCn1nnnc1C(c1ccccc1)N1CCOCC1'),
TestMolecule('NC(=NOC(=O)c1cc(Cn2cc(C(F)(F)F)ccc2=O)ccc1)c1ccccc1',
'c1ccc(C=NOC(c2cc(Cn3ccccc3=O)ccc2)=O)cc1'),
TestMolecule('CCc1nnc(NC(=O)Cc2c(-c3ccc(C)cc3)nc(C)s2)s1', 'O=C(Cc1c(-c2ccccc2)ncs1)Nc1nncs1'),
TestMolecule('COCCCNC(=O)CN1C(=O)N(Cc2ccccc2Cl)CC1', 'O=C1NCCN1Cc1ccccc1'),
TestMolecule('Cc1cc([N+]([O-])=O)nn1CC(=O)NCCCn1ccnc1', 'O=C(Cn1nccc1)NCCCn1ccnc1'),
TestMolecule('c1cc(F)c(N2CCN(C(=O)c3ccc(S(NCC4OCCC4)(=O)=O)cc3)CC2)cc1',
'c1ccc(cc1)N1CCN(C(c2ccc(cc2)S(=O)(=O)NCC2OCCC2)=O)CC1'),
TestMolecule('CC(NCc1cccnc1)=C1C(=O)NC(=O)N(c2ccc(C)cc2)C1=O',
'c1cc(ccc1)N1C(=O)NC(C(=CNCc2cccnc2)C1=O)=O'),
TestMolecule('Cc1ccn(C)c(=N)c1', 'N=c1[nH]cccc1'),
TestMolecule('Cc1cc(C)nc(N2CCC(CNC(=O)CCc3ccccc3)CC2)n1',
'O=C(CCc1ccccc1)NCC1CCN(c2ncccn2)CC1'),
TestMolecule('CCOC1=CC(=CNNC(CCCC(NC2CCCCC2)=O)=O)C=CC1=O',
'C1=CC(C=CC1=O)=CNNC(=O)CCCC(=O)NC1CCCCC1'),
TestMolecule('CC(=O)N1CCN(c2ccc([N+]([O-])=O)cc2)CC1', 'c1ccc(cc1)N1CCNCC1'),
TestMolecule('CS(N(CC(=O)N1CCCCC1)Cc1ccc(Cl)cc1)(=O)=O', 'O=C(N1CCCCC1)CNCc1ccccc1'),
TestMolecule('c1coc(C(=O)N2CCN(C(COc3cc(C(NCc4ccccc4)=O)ccc3)=O)CC2)c1',
'c1coc(C(=O)N2CCN(C(COc3cc(C(NCc4ccccc4)=O)ccc3)=O)CC2)c1'),
TestMolecule('Cc1cccc2sc(NNC(=O)C3=COCCO3)nc12', 'O=C(NNc1nc2ccccc2s1)C1=COCCO1'),
TestMolecule('c1ccc2c(c1)N(C)C1(C=Nc3c(cc(N4CCOCC4)c4ccccc34)O1)C2(C)C',
'C1=Nc2c(cc(c3ccccc23)N2CCOCC2)OC11Nc2ccccc2C1'),
TestMolecule('COc1cccc(C2N(CCN3CCOCC3)C(=O)C(O)=C2C(=O)c2sc(C)nc2C)c1',
'O=C(C1=CC(=O)N(C1c1ccccc1)CCN1CCOCC1)c1scnc1'),
TestMolecule('COc1cc(OC)c(NC(CSc2nc3c(c(=O)n2-c2ccc(F)cc2)SCC3)=O)cc1',
'c1ccc(cc1)NC(=O)CSc1n(c(=O)c2c(n1)CCS2)-c1ccccc1'),
TestMolecule('Cc1ccccc1CN1c2ccccc2C2(C1=O)OCCCO2', 'O=C1C2(OCCCO2)c2c(N1Cc1ccccc1)cccc2'),
TestMolecule('O=C(N1C2(OCC1)CCN(c1ncc(C(F)(F)F)cc1Cl)CC2)c1ccccc1',
'O=C(c1ccccc1)N1C2(OCC1)CCN(c1ccccn1)CC2'),
TestMolecule('CC=CC=CC(=O)Nc1nccs1', 'c1ncsc1'),
TestMolecule('CC(C)(C)c1ccc(C(c2c[nH]c(C(NCc3cccnc3)=O)c2)=O)cc1',
'c1ccc(cc1)C(=O)c1c[nH]c(c1)C(=O)NCc1cccnc1'),
TestMolecule('CCC(=O)Nc1c(C)nn(-c2cc(C)c(C)cc2)c1C', 'c1ccc(cc1)-n1nccc1'),
TestMolecule('Cc1ccc(SCCC(=O)NCCSCc2c(C)cccc2)cc1', 'O=C(NCCSCc1ccccc1)CCSc1ccccc1'),
TestMolecule('CC1=NN(Cc2ccccc2)C(=O)C1=Cc1ccc(N(C)C)cc1', 'O=C1C(C=NN1Cc1ccccc1)=Cc1ccccc1'),
TestMolecule('COCC(=O)Nc1ccc(S(NCCc2ccccc2)(=O)=O)cc1', 'c1ccc(CCNS(=O)(=O)c2ccccc2)cc1'),
TestMolecule('CCOC(=O)N(C)c1ccc(C(O)(C(F)(F)F)C(F)(F)F)cc1', 'c1ccccc1'),
TestMolecule('Fc1ccc(COC2=C(C(O)=O)CCNC2=O)cc1F', 'O=C1NCCC=C1OCc1ccccc1'),
TestMolecule('O=C1N2C(Nc3ccccc31)CCCCC2', 'O=C1N2C(Nc3ccccc31)CCCCC2'),
TestMolecule('Cl.COc1ccc(-c2nc3n(ccc4ccccc43)c2CN2CCOCC2)cc1OC',
'c1cccc(c1)-c1nc2c3c(ccn2c1CN1CCOCC1)cccc3'),
TestMolecule('ClCc1oc(-c2ccccc2)nn1', 'c1oc(nn1)-c1ccccc1'),
TestMolecule('Cl.Cc1ccc(OCC(O)Cn2c(=N)n(CCN3CCCCC3)c3ccccc32)cc1',
'N=c1n(CCCOc2ccccc2)c2ccccc2n1CCN1CCCCC1'),
TestMolecule('COc1ccc(C(=O)C=C(C)Nc2ccc3c(c2)OCO3)cc1', 'O=C(C=CNc1ccc2c(c1)OCO2)c1ccccc1'),
TestMolecule('c1csc(CN(C(c2ccc(F)cc2)C(NC2CCCCC2)=O)C(=O)CN2S(=O)(=O)c3ccccc3C2=O)c1',
'c1cc(CN(C(=O)CN2S(=O)(c3ccccc3C2=O)=O)C(C(=O)NC2CCCCC2)c2ccccc2)sc1'),
TestMolecule('c1csc(S(NCCSc2n(-c3ccccc3)nnn2)(=O)=O)c1',
'c1csc(S(NCCSc2n(-c3ccccc3)nnn2)(=O)=O)c1'),
TestMolecule('Cc1cccc(C=NNC(=O)Cn2c(N)nnn2)n1', 'O=C(Cn1cnnn1)NN=Cc1ccccn1'),
TestMolecule('CCOC(C1(Cc2ccc(Cl)cc2)CCN(C(c2cc(C)nc(C)n2)=O)CC1)=O',
'O=C(N1CCC(CC1)Cc1ccccc1)c1ccncn1'),
TestMolecule('c1ccc(C(N(CC2OCCC2)C(Cn2nnc3ccccc23)=O)C(NCc2ccc(F)cc2)=O)cc1',
'O=C(N(C(c1ccccc1)C(=O)NCc1ccccc1)CC1OCCC1)Cn1nnc2c1cccc2'),
TestMolecule('O=C1CSC(c2ccncc2)N1Cc1occc1', 'O=C1CSC(c2ccncc2)N1Cc1occc1'),
TestMolecule('COc1c(OCc2ccccc2)c(Br)cc(C=NNC(=O)Cn2nc([N+]([O-])=O)cc2C)c1',
'O=C(Cn1nccc1)NN=Cc1ccc(cc1)OCc1ccccc1'),
TestMolecule('Cc1c(Cn2nnc(-c3cc(C(=O)O)ccc3)n2)cccc1', 'c1cccc(-c2nn(nn2)Cc2ccccc2)c1'),
TestMolecule('O=C(c1ccc2snnc2c1)N1CCCC1', 'O=C(c1ccc2snnc2c1)N1CCCC1'),
TestMolecule('c1ccc(CC(NN2C(=O)C(=Cc3c(C(O)=O)cccc3)SC2=S)=O)cc1',
'O=C1C(=Cc2ccccc2)SC(=S)N1NC(Cc1ccccc1)=O'),
TestMolecule('Cc1ccccc1OCC(=O)NN=Cc1ccncc1', 'O=C(COc1ccccc1)NN=Cc1ccncc1'),
TestMolecule('O=C(C=Cc1ccccc1)NC(=S)Nc1ccc(CN2CCOCC2)cc1',
'O=C(C=Cc1ccccc1)NC(=S)Nc1ccc(CN2CCOCC2)cc1'),
TestMolecule('COc1ccc(NC(=S)N(Cc2cnccc2)Cc2c(=O)[nH]c3c(c2)cc(OC)c(OC)c3)cc1',
'O=c1c(CN(C(=S)Nc2ccccc2)Cc2cnccc2)cc2ccccc2[nH]1'),
TestMolecule('Nc1ccc2nc3c([nH]c(=O)n(C4CCCCC4)c3=O)nc2c1',
'c1ccc2nc3[nH]c(n(c(c3nc2c1)=O)C1CCCCC1)=O'),
TestMolecule('Cc1cc(NC(=O)c2ccc(S(Nc3ccccc3)(=O)=O)cc2)no1',
'c1cc(no1)NC(=O)c1ccc(S(=O)(=O)Nc2ccccc2)cc1'),
TestMolecule('Nn1c(Cc2c3c(cccc3)ccc2)nnc1SCc1ccccc1',
'c1ccc(CSc2nnc([nH]2)Cc2c3c(cccc3)ccc2)cc1'),
TestMolecule('Cc1[nH]nc(Nc2cc(C)ccc2)c1[N+](=O)[O-]', 'c1ccc(cc1)Nc1n[nH]cc1'),
TestMolecule('CC1Cn2c(nc3n(C)c(=O)[nH]c(=O)c23)O1', 'O=c1[nH]c2nc3n(c2c([nH]1)=O)CCO3'),
TestMolecule('c1csc(C(OCC(NC23CC4CC(C2)CC(C3)C4)=O)=O)c1',
'c1csc(C(OCC(NC23CC4CC(C2)CC(C3)C4)=O)=O)c1'),
TestMolecule('c1ccc(S(NC2=NC(=O)C(=Cc3cnccc3)S2)(=O)=O)cc1',
'c1ccc(S(NC2=NC(=O)C(=Cc3cnccc3)S2)(=O)=O)cc1'),
TestMolecule('CCCn1c(N2CCN(C)CC2)nc2n(C)c(=O)[nH]c(=O)c12',
'O=c1[nH]c([nH]c2nc([nH]c12)N1CCNCC1)=O'),
TestMolecule('CCn1c(SCC(Nc2cc(S(N3CCOCC3)(=O)=O)ccc2OC)=O)nnc1-c1ccncc1',
'c1cc(S(=O)(=O)N2CCOCC2)cc(NC(=O)CSc2nnc(-c3ccncc3)[nH]2)c1'),
TestMolecule('C#CCNC(=O)C1=CC(c2ccc(Br)cc2)CC(OCc2ccc(CO)cc2)O1',
'c1cccc(c1)C1C=COC(OCc2ccccc2)C1'),
TestMolecule('CCc1c(SCC(=O)Nc2cc(C)on2)nc2ccc(C)cc2c1', 'O=C(Nc1ccon1)CSc1ccc2c(cccc2)n1'),
TestMolecule('CCOCCCN(C(C(NC1CCCC1)=O)c1cccc(OC)c1OC)C(c1ccco1)=O',
'c1cc(ccc1)C(NC(c1occc1)=O)C(=O)NC1CCCC1'),
TestMolecule('Cc1ccc(C(=O)NC(=S)NNS(c2ccccc2)(=O)=O)cc1',
'c1cccc(c1)C(NC(=S)NNS(=O)(=O)c1ccccc1)=O'),
TestMolecule('COc1ccc(CC(N)=NOC(=O)c2sccc2)cc1', 'O=C(ON=CCc1ccccc1)c1sccc1'),
TestMolecule('c1ccc(C(O)=C2C(c3ncccc3)N(CC(OC)OC)C(=O)C2=O)cc1',
'c1cc(C=C2C(=O)C(=O)NC2c2ncccc2)ccc1'),
TestMolecule('COC(=O)CSc1nc(C)cc(Oc2ccccc2)n1', 'c1ccc(Oc2ccncn2)cc1'),
TestMolecule('COc1ccc(Cn2c(C)ccc2C)cc1', 'c1ccc(cc1)Cn1cccc1'),
TestMolecule('COc1cccc(N2CCN(C3CC(=O)N(c4ccc(C)c(Cl)c4)C3=O)CC2)c1',
'O=C1N(c2ccccc2)C(=O)C(C1)N1CCN(c2ccccc2)CC1'),
TestMolecule('COc1cccc(OC)c1OCCN(C)C.OC(=O)C(O)=O', 'c1ccccc1'),
TestMolecule('C1CCC(NC(=O)c2ccc(S(N3CCCC3)(=O)=O)cc2)C1',
'C1CCC(NC(=O)c2ccc(S(N3CCCC3)(=O)=O)cc2)C1'),
TestMolecule('CCCN(C(=O)Cn1ncc2c(=O)oc3c(c12)cccc3)c1cc(C)ccc1',
'O=C(Cn1ncc2c(oc3c(cccc3)c12)=O)Nc1ccccc1'),
TestMolecule('CNC(NC(CSc1nnc(C(F)(F)F)n1C)=O)=O', 'n1nc[nH]c1'),
TestMolecule('CCOCCCN1C(=O)CC(C(NCCc2ccc(C)cc2)=O)C1', 'O=C1NCC(C1)C(NCCc1ccccc1)=O'),
TestMolecule('COc1c([N+](=O)[O-])cc(CSc2n[nH]c(C)n2)cc1', 'c1ccc(CSc2nc[nH]n2)cc1'),
TestMolecule('CN(C)CC(=O)c1ccc(-c2ccccc2)cc1', 'c1cccc(c1)-c1ccccc1'),
TestMolecule('CC1(O)C(=O)c2c(cccc2)N(c2ccccc2)C1=O', 'O=C1CC(=O)N(c2c1cccc2)c1ccccc1'),
TestMolecule('CN(S(c1ccccc1)(=O)=O)CC(=O)NCCc1ccccc1', 'c1ccc(CCNC(=O)CNS(=O)(=O)c2ccccc2)cc1'),
TestMolecule('CCNc1ccccc1C(=O)O', 'c1ccccc1'),
TestMolecule('CC1(C)C(CSc2nc3ccccc3[nH]2)C1(Cl)Cl', 'c1ccc2c(nc([nH]2)SCC2CC2)c1'),
TestMolecule('CC(C)c1ccc(OCC(=O)NC(=S)Nc2c3cccc4c3c(cc2)CC4)cc1',
'O=C(NC(=S)Nc1c2cccc3c2c(cc1)CC3)COc1ccccc1'),
TestMolecule('CN(C)c1ccc(NC(CN2CCC(C(c3ccc(F)cc3)=O)CC2)=O)cc1',
'c1cccc(c1)NC(CN1CCC(CC1)C(=O)c1ccccc1)=O'),
TestMolecule('CCCCN(C)C(=O)Cc1c(OC)ccc2cc(Br)ccc21', 'c1c2ccccc2ccc1'),
TestMolecule('Cc1ccc(NC(CSc2sc(NC(CN3CCOCC3)=O)nn2)=O)cc1',
'O=C(Nc1ccccc1)CSc1sc(nn1)NC(=O)CN1CCOCC1'),
TestMolecule('COCCNC(=S)NNc1cccc(C(=O)O)c1', 'c1ccccc1'),
TestMolecule('O=C(CNc1ccccc1)NN=Cc1ccc2c(c1)OCCO2', 'O=C(CNc1ccccc1)NN=Cc1ccc2c(c1)OCCO2'),
TestMolecule('COc1cc2ccccc2cc1C(=O)NCC(c1sccc1)N(C)C', 'O=C(NCCc1sccc1)c1cc2c(cc1)cccc2'),
TestMolecule('COc1ccc(C(N(C)C)CNC(=O)CCOc2ccccc2)cc1', 'O=C(NCCc1ccccc1)CCOc1ccccc1'),
TestMolecule('Cl.CCN(CC)CCCN1C(=O)CSC1c1ccc([N+]([O-])=O)cc1', 'O=C1CSC(c2ccccc2)N1'),
TestMolecule('CCC(Nc1ccc(OC)cc1OC)=C1C(=O)NC(=O)NC1=O', 'c1cc(NC=C2C(=O)NC(=O)NC2=O)ccc1'),
TestMolecule('c1coc(-c2cc(C(F)(F)F)nc(NCc3ccc(F)cc3)n2)c1', 'c1ccc(CNc2nccc(n2)-c2occc2)cc1'),
TestMolecule('CCOC(Nc1sc(C)c(C)c1C(OCC)=O)=O', 'c1ccsc1'),
TestMolecule('O=CN1CCN(C(C(=O)NC2CCCCC2)c2cc3c(cc2[N+]([O-])=O)OCO3)CC1',
'O=C(C(N1CCNCC1)c1ccc2c(c1)OCO2)NC1CCCCC1'),
TestMolecule('COc1cc(C2N(c3ccc(Br)cc3)C(=O)c3n[nH]c(C)c32)ccc1O',
'O=C1c2n[nH]cc2C(N1c1ccccc1)c1ccccc1'),
TestMolecule('c1cc(NC(=O)c2ccccc2[N+]([O-])=O)c(N2CCOCC2)cc1',
'O=C(Nc1c(cccc1)N1CCOCC1)c1ccccc1'),
TestMolecule('N#Cc1cc2c(nc1SCC(=O)N1CCCCC1)CCCCC2', 'O=C(N1CCCCC1)CSc1ccc2c(n1)CCCCC2'),
TestMolecule('CCN(CC)c1ccc(CN(C(=O)c2cc(OC)c(OC)c(OC)c2)C2CCS(=O)(=O)C2)cc1',
'O=S1(=O)CCC(N(Cc2ccccc2)C(=O)c2ccccc2)C1'),
TestMolecule('COc1cc(NC(=S)N2CCN(Cc3ccccc3)CC2)cc(OC)c1', 'S=C(N1CCN(CC1)Cc1ccccc1)Nc1ccccc1'),
TestMolecule('CC(=O)C(=CNc1ccc(OCc2ccccc2)cc1)c1ccccc1', 'c1cccc(c1)COc1ccc(NC=Cc2ccccc2)cc1'),
TestMolecule('CC(C)C(C(NC(C)C(N)=O)=O)NC(C1CCCN1C(OC(C)(C)C)=O)=O', 'C1CCNC1'),
TestMolecule('CCOc1ccc(N2CC(C(=O)Nc3cccc(S(NC4=NCCC4)(=O)=O)c3)CC2=O)cc1',
'c1cccc(c1)N1CC(C(=O)Nc2cccc(S(=O)(=O)NC3=NCCC3)c2)CC1=O'),
TestMolecule('O=C(NCc1ccccc1Cl)CSc1ccc(-c2cccs2)nn1', 'O=C(NCc1ccccc1)CSc1ccc(nn1)-c1sccc1'),
TestMolecule('COc1ccc(OC)c(N=c2ssnc2Cl)c1', 'c1cccc(c1)N=c1ssnc1'),
TestMolecule('CC(=O)C1=C(C)NC(=O)CC1c1c(Cl)cccc1', 'O=C1CC(C=CN1)c1ccccc1'),
TestMolecule('CCC(=O)N=C(N)Nc1nc(C)c2cc(C)c(C)cc2n1', 'c1cc2c(cc1)ncnc2'),
TestMolecule('Cc1ccccc1C(OC1OC(=O)C(Cl)=C1Nc1ccc(C(O)=O)cc1)=O',
'O=C(OC1OC(C=C1Nc1ccccc1)=O)c1ccccc1'),
TestMolecule('CCOc1cc(CN2CCC(CO)(Cc3cccc(C(F)(F)F)c3)CC2)ccc1OC',
'c1ccc(cc1)CC1CCN(Cc2ccccc2)CC1'),
TestMolecule('Cc1cc2c([nH]c(=O)c(CCNC(c3cccs3)=O)c2)cc1C',
'O=C(NCCc1cc2ccccc2[nH]c1=O)c1cccs1'),
TestMolecule('Cc1ccc(Nc2cc(=O)[nH]c(=O)[nH]2)cc1C', 'c1cccc(c1)Nc1cc([nH]c([nH]1)=O)=O'),
TestMolecule('Cc1cc(OCC(=O)NC2CCS(=O)(=O)C2)c2c(oc(=O)c3c2CCC3)c1',
'O=C(NC1CCS(=O)(C1)=O)COc1c2c(ccc1)oc(c1c2CCC1)=O'),
TestMolecule('CCc1sc(NC(CCC(NCCc2ccc(OC)c(OC)c2)=O)=O)nn1',
'c1cc(ccc1)CCNC(=O)CCC(=O)Nc1scnn1'),
TestMolecule('N#CC1=C(SCc2ccccc2)NC(=O)CC1c1ccc(O)cc1', 'O=C1NC(=CC(C1)c1ccccc1)SCc1ccccc1'),
TestMolecule('O=C(NCCN1CCOCC1)c1csc2c1CCCC2', 'O=C(NCCN1CCOCC1)c1csc2c1CCCC2'),
TestMolecule('CCCCC(=O)Nc1cc(OC)c(NC(C2CCCCC2)=O)cc1OC', 'O=C(Nc1ccccc1)C1CCCCC1'),
TestMolecule('Cc1ccc(C(C(C)OC(C2CC(=O)N(C3CCCCC3)C2)=O)=O)cc1',
'c1cc(C(=O)COC(C2CC(=O)N(C2)C2CCCCC2)=O)ccc1'),
TestMolecule('Cc1ccc(S(C(C#N)c2c(N3CCCC3)nc3ccccc3n2)(=O)=O)cc1C',
'c1ccc(cc1)S(=O)(=O)Cc1c(nc2ccccc2n1)N1CCCC1'),
TestMolecule('CC1(C)OC(=O)C(=Cc2[nH]ccc2)C(=O)O1', 'O=C1OCOC(=O)C1=Cc1[nH]ccc1'),
TestMolecule('Cc1cc(C)cc(Oc2nc3n(cccc3C)c(=O)c2C=C(C#N)C(=O)NC2CCS(=O)(=O)C2)c1',
'c1ccc(cc1)Oc1c(c(=O)n2ccccc2n1)C=CC(=O)NC1CCS(=O)(=O)C1'),
TestMolecule('COc1cc(NC(=O)NCc2c(C)onc2-c2ccccc2)ccc1', 'O=C(NCc1conc1-c1ccccc1)Nc1ccccc1'),
TestMolecule('c1ccc(C(Oc2cc3c(cc2)C(=O)CO3)=O)cc1', 'c1ccc(C(Oc2cc3c(cc2)C(=O)CO3)=O)cc1'),
TestMolecule('CCN1C(=O)C2C(c3cccs3)N3C4C(=O)N(CC)C(=O)C4C(c4cccs4)N3C2C1=O',
'c1cc(sc1)C1C2C(NC(=O)C2N2N1C1C(=O)NC(=O)C1C2c1cccs1)=O'),
TestMolecule('Cc1cc(C(N2CCCC(C(c3cc(F)ccc3F)=O)C2)=O)c(C)o1',
'O=C(N1CCCC(C(=O)c2ccccc2)C1)c1cocc1'),
TestMolecule('COc1cc(C=NO)ccc1Oc1c([N+]([O-])=O)cc([N+]([O-])=O)cc1', 'c1cccc(Oc2ccccc2)c1'),
TestMolecule('Cc1ccc(N(Cc2c(=O)[nH]c3ccc(C)cc3c2)C(c2cccs2)=O)cc1',
'O=C(N(c1ccccc1)Cc1c([nH]c2c(cccc2)c1)=O)c1cccs1'),
TestMolecule('COc1ccc(C(=O)Nn2c(C)nnc2-n2c(C)cc(C)n2)cc1OC', 'O=C(c1ccccc1)Nn1cnnc1-n1nccc1'),
TestMolecule('Cc1c(NC(=O)c2c(C)c(Cl)c(C)nc2Cl)cccc1', 'O=C(c1cccnc1)Nc1ccccc1'),
TestMolecule('c1ccc(CNC(CC(C(=O)NCc2ccccc2)c2nc(=O)c3ccccc3[nH]2)=O)cc1',
'c1ccc(CNC(CC(C(=O)NCc2ccccc2)c2nc(=O)c3ccccc3[nH]2)=O)cc1'),
TestMolecule('CNc1n(-c2ccccc2)ncc1[N+](=O)[O-]', 'c1n(ncc1)-c1ccccc1'),
TestMolecule('CC1SC2(NC1=O)C1CC3CC(C1)CC2C3', 'O=C1CSC2(N1)C1CC3CC(C1)CC2C3'),
TestMolecule('CCc1ccccc1NC(=S)N(C(C)c1occc1)CCOC', 'S=C(NCc1occc1)Nc1ccccc1'),
TestMolecule('CCC(C)NC(=O)C1CCCN(S(c2ccc(-n3cnnn3)cc2)(=O)=O)C1',
'C1CCN(CC1)S(=O)(=O)c1ccc(cc1)-n1nnnc1'),
TestMolecule('COc1c2c(ccc1)C1CC(C)(O2)N(Cc2ccccc2)C(=O)N1', 'O=C1NC2CC(Oc3ccccc32)N1Cc1ccccc1'),
TestMolecule('COc1ccc(C2NC(=O)c3c(cccc3)O2)c(OC)c1OC', 'O=C1NC(Oc2c1cccc2)c1ccccc1'),
TestMolecule('O=C(NNC=C1C=Nc2ccccc21)c1ccn(Cc2c(Cl)cc(Cl)cc2)n1',
'O=C(NNC=C1c2c(cccc2)N=C1)c1nn(cc1)Cc1ccccc1'),
TestMolecule('c1ccc(NS(c2ccc(OCC(=O)NCc3cnccc3)cc2)(=O)=O)cc1',
'c1ccc(NS(c2ccc(OCC(=O)NCc3cnccc3)cc2)(=O)=O)cc1'),
TestMolecule('COC1=CC(=O)C(=C2NNC(C(F)(F)F)=C2c2cc3ccccc3o2)C=C1',
'O=C1C=CC=CC1=C1NNC=C1c1cc2ccccc2o1'),
TestMolecule('CCOC(=O)c1c(C(COC(C=Cc2ccc(Cl)cc2)=O)=O)c(C)[nH]c1C',
'c1ccc(C=CC(OCC(=O)c2cc[nH]c2)=O)cc1'),
TestMolecule('Cc1nc2ncnn2c(N2CCN(c3nnnn3-c3ccccc3)CC2)c1',
'c1nc2ncnn2c(c1)N1CCN(c2nnnn2-c2ccccc2)CC1'),
TestMolecule('CC(C)Oc1ccc(C(=O)Nc2ccc(NC(c3ccco3)=O)c(Cl)c2)cc1',
'O=C(Nc1ccc(cc1)NC(=O)c1ccccc1)c1occc1'),
TestMolecule('CC(c1ccccc1)NC(C(NCC1OCCC1)=O)=O', 'O=C(NCc1ccccc1)C(=O)NCC1OCCC1'),
TestMolecule('CCCCOc1ccc(NC(=O)CCSc2nccn2C)cc1', 'O=C(Nc1ccccc1)CCSc1ncc[nH]1'),
TestMolecule('O=C(OCc1ncccc1)c1oc(COc2c(Cl)cccc2)cc1', 'O=C(OCc1ncccc1)c1ccc(o1)COc1ccccc1'),
TestMolecule('COc1ccc(C=NNC(=O)OC(C)(C)C)cc1OC', 'c1ccccc1'),
TestMolecule('CC1CCCCC1NC(COC(c1ccc(S(NCc2ccco2)(=O)=O)cc1)=O)=O',
'c1coc(c1)CNS(=O)(=O)c1ccc(cc1)C(=O)OCC(=O)NC1CCCCC1'),
TestMolecule('Nn1c(SCC(=O)Nc2cccc(F)c2)nnc1C1CCCCC1', 'O=C(CSc1[nH]c(nn1)C1CCCCC1)Nc1ccccc1'),
TestMolecule('Cc1n[nH]c(NC2CCCCC2)nc1=O', 'O=c1cn[nH]c(n1)NC1CCCCC1'),
TestMolecule('CCCCCCCCC(=O)NC(C(Cl)(Cl)Cl)NC(=S)N1CCOCC1', 'C1NCCOC1'),
TestMolecule('CCCc1ccc(Oc2coc3cc(OCC(Nc4c(C)cccc4)=O)ccc3c2=O)cc1',
'c1cccc(c1)Oc1c(c2ccc(cc2oc1)OCC(=O)Nc1ccccc1)=O'),
TestMolecule('Cc1ccc(C(=O)NN=C2CCSC2)cc1[N+]([O-])=O', 'O=C(NN=C1CCSC1)c1ccccc1'),
TestMolecule('N#CC1=C2SCN(c3ccc(F)cc3)CN2C(=O)CC1c1cc(F)ccc1',
'O=C1N2CN(c3ccccc3)CSC2=CC(c2ccccc2)C1'),
TestMolecule('c1ccc(CN2C(=O)CC(Nc3cc4c(cc3)cccc4)C2=O)cc1',
'c1ccc(CN2C(=O)CC(Nc3cc4c(cc3)cccc4)C2=O)cc1'),
TestMolecule('COc1ccc(NC(C)=O)cc1NC(=O)CN1CCN(CC(=O)Nc2ccc(Cl)cc2)CC1',
'O=C(Nc1ccccc1)CN1CCN(CC1)CC(=O)Nc1ccccc1'),
TestMolecule('Clc1c(Cl)c(C2NC(=O)CCC2[N+]([O-])=O)ccc1', 'O=C1NC(CCC1)c1ccccc1'),
TestMolecule('CCN(C(=O)CSc1n(-c2ccccc2)c(-c2ccccc2)nn1)CC', 'c1ccc(cc1)-n1cnnc1-c1ccccc1'),
TestMolecule('CC(=O)CCCCn1cnc2n(C)c(=O)n(C)c(=O)c12', 'O=c1[nH]c(c2c(nc[nH]2)[nH]1)=O'),
TestMolecule('CC1=NN(c2ccccc2)C(=N)C1=NNc1ccc(Cl)cc1', 'N=C1C(=NNc2ccccc2)C=NN1c1ccccc1'),
TestMolecule('CCc1ccc(OCC(=O)N(CC)CC)cc1', 'c1ccccc1'),
TestMolecule('CN(CC(=O)N1CCCCC1)S(c1ccc(Cl)cc1)(=O)=O', 'O=C(CNS(=O)(=O)c1ccccc1)N1CCCCC1'),
TestMolecule('CSc1ncc(C=C2C(=O)NC(=O)N(c3ccc(C)cc3)C2=O)cn1',
'c1ccc(N2C(NC(=O)C(=Cc3cncnc3)C2=O)=O)cc1'),
TestMolecule('COCCNC(=S)Nc1c(Cc2ccccc2)cccc1', 'c1ccc(Cc2ccccc2)cc1'),
TestMolecule('COc1cc(C(=O)Nc2nnc(C(C)(C)C)s2)c([N+]([O-])=O)cc1OC', 'O=C(Nc1nncs1)c1ccccc1'),
TestMolecule('CCOC(=O)c1ccc(NC(=O)c2cc(OC)c(OC(C)C)cc2)cc1', 'O=C(Nc1ccccc1)c1ccccc1'),
TestMolecule('COc1ccc(C(=O)C=C2Sc3cc4c(cc3N2C)OCO4)cc1', 'O=C(C=C1Sc2cc3c(cc2N1)OCO3)c1ccccc1'),
TestMolecule('CCCC1=NN(c2sc3c(n2)cccc3)C(=O)C1=CNCCCN(CC)CC', 'C=C1C=NN(C1=O)c1sc2ccccc2n1'),
TestMolecule('COc1ccc(C(COC(CN2C(=O)NC(C)(C)C2=O)=O)=O)cc1OC',
'c1ccc(C(=O)COC(=O)CN2C(=O)CNC2=O)cc1'),
TestMolecule('O=C(Oc1ccc(Br)cc1)C1CC(=O)N(c2ccc(F)cc2)C1',
'O=C(C1CC(N(C1)c1ccccc1)=O)Oc1ccccc1'),
TestMolecule('O=c1nc(-c2ccccn2)[nH]c(C(F)(F)F)c1Br', 'O=c1cc[nH]c(-c2ncccc2)n1'),
TestMolecule('CCOC(c1oc2ccccc2c1NC(CN1CCN(C)CC1)=O)=O', 'O=C(CN1CCNCC1)Nc1coc2ccccc21'),
TestMolecule('CSc1nsc(NN=Cc2ccc3c(c2)OCO3)c1C#N', 'c1cc(sn1)NN=Cc1ccc2OCOc2c1'),
TestMolecule('CC(C)(C)NC(NC(CSc1nc(C)c(C)c(C)n1)=O)=O', 'c1cncnc1'),
TestMolecule('Cc1cccnc1CN1CCN(Cc2onc(C(c3ccccc3)c3ccccc3)n2)CC1',
'c1cccnc1CN1CCN(CC1)Cc1onc(n1)C(c1ccccc1)c1ccccc1'),
TestMolecule('COc1ccc(Nc2oc3cc(=O)ccc-3cc2C(=O)Nc2ncccc2)cc1OC',
'c1ccc(cc1)Nc1oc2-c(ccc(c2)=O)cc1C(Nc1ncccc1)=O'),
TestMolecule('c1cc(C)c(OCC(NS(c2ccc(C)cc2)(=O)=O)=O)cc1', 'O=C(COc1ccccc1)NS(=O)(=O)c1ccccc1'),
TestMolecule('CCOc1ccc(-c2scc(CSc3sc(N)nn3)n2)cc1OC', 'c1cccc(c1)-c1nc(cs1)CSc1scnn1'),
TestMolecule('c1ccc(C(=O)COC(=O)CN2C(=O)C3C4CC(C3C2=O)C=C4)cc1',
'c1ccc(C(=O)COC(=O)CN2C(=O)C3C4CC(C3C2=O)C=C4)cc1'),
TestMolecule('Cc1occc1C(=O)NC(C)c1ccc2c(c1)OCO2', 'O=C(NCc1ccc2c(c1)OCO2)c1ccoc1'),
TestMolecule('CCn1c(SCC(=O)Nc2c(Cl)nccc2)nnc1-c1ccccc1',
'O=C(Nc1cnccc1)CSc1[nH]c(nn1)-c1ccccc1'),
TestMolecule('CCC(C)N(C)C1CCN(C(=S)Nc2cc(OC)ccc2)CC1', 'S=C(Nc1ccccc1)N1CCCCC1'),
TestMolecule('Brc1oc(C(=O)N2CC(=O)Nc3c(cc(Br)cc3)C2c2ccccc2)cc1',
'O=C(N1CC(Nc2ccccc2C1c1ccccc1)=O)c1occc1'),
TestMolecule('CN(C(=O)CCSc1nc(-c2cc3c(cc2)OCO3)cc(C(F)(F)F)n1)Cc1ccccc1',
'O=C(NCc1ccccc1)CCSc1nc(ccn1)-c1cc2c(cc1)OCO2'),
TestMolecule('[Br-].COc1c(OC)c(OC)cc(-c2nc3c[n+](CC(=O)c4ccccc4)ccc3n2C)c1',
'O=C(C[n+]1cc2nc([nH]c2cc1)-c1ccccc1)c1ccccc1'),
TestMolecule('CCOC(CSc1n(-c2c(OC)cccc2)c(CNC(Cc2ccccc2)=O)nn1)=O',
'O=C(Cc1ccccc1)NCc1n(cnn1)-c1ccccc1'),
TestMolecule('CS(N(Cc1ccccc1)c1ccc(C(Nc2c(Sc3ccccc3)cccc2)=O)cc1)(=O)=O',
'O=C(c1ccc(NCc2ccccc2)cc1)Nc1c(cccc1)Sc1ccccc1'),
TestMolecule('Cc1nc(C2N(C(=O)c3cn(C)c4c(c3=O)cccc4)CCc3c4c([nH]c32)cccc4)ccc1',
'O=C(c1c[nH]c2c(cccc2)c1=O)N1C(c2ncccc2)c2[nH]c3ccccc3c2CC1'),
TestMolecule('CCCCc1nc(N2CCOCC2)c(C#N)c2c1CCCC2', 'c1nc(cc2c1CCCC2)N1CCOCC1'),
TestMolecule('O=C(NN=Cc1cc([N+]([O-])=O)ccc1Cl)c1nccnc1', 'O=C(NN=Cc1ccccc1)c1nccnc1'),
TestMolecule('COc1ccc(-n2c(SCC(=O)c3ccc4c(c3)OCCO4)nnn2)cc1',
'O=C(c1ccc2c(c1)OCCO2)CSc1n(nnn1)-c1ccccc1'),
TestMolecule('COc1c(C=CC(=O)Nc2cc(S(NC3=NCCCCC3)(=O)=O)ccc2)cccc1',
'O=C(Nc1cc(ccc1)S(=O)(=O)NC1=NCCCCC1)C=Cc1ccccc1'),
TestMolecule('Cc1nn(-c2ccc(F)cc2)c(Cl)c1C=C(CC(=O)O)c1sc2ccccc2n1',
'c1cc2sc(nc2cc1)C=Cc1cn(nc1)-c1ccccc1'),
TestMolecule('COc1c(OC)c(OC)cc(C2N(c3ccccc3)OC3C2C(=O)N(Cc2ccccc2)C3=O)c1',
'c1cccc(c1)CN1C(=O)C2C(N(OC2C1=O)c1ccccc1)c1ccccc1'),
TestMolecule('COCCNC(=S)Nc1cc(OC)c(NC(=O)c2ccco2)cc1OC', 'O=C(Nc1ccccc1)c1occc1'),
TestMolecule('N#Cc1c(SCC(=O)c2cc3c(oc2=O)cccc3)nc(-c2ccccc2)cc1',
'O=C(c1cc2c(cccc2)oc1=O)CSc1cccc(n1)-c1ccccc1'),
TestMolecule('O=C(N1CCCC1)c1nc2ccccn2c1CN1CCCC(OCc2ccccc2)C1',
'O=C(N1CCCC1)c1nc2ccccn2c1CN1CCCC(OCc2ccccc2)C1'),
TestMolecule('Brc1cccc(OCCSc2ncccn2)c1', 'c1cccc(c1)OCCSc1ncccn1'),
TestMolecule('CC(C)(C)NC(=O)C12CCC(C)(C1(C)C)c1nc3ccccc3nc12', 'c1cccc2nc3C4CC(CC4)c3nc12'),
TestMolecule('[I-].CC(C)C1C(OCC(O)C[N+]2(C)CCCCC2)CC(C)CC1', 'C1CC[NH+](CC1)CCCOC1CCCCC1'),
TestMolecule('Cc1ccccc1NS(=O)(=O)c1ccc(OCC(=O)N2CCCCC2)cc1',
'c1cc(ccc1)NS(=O)(=O)c1ccc(cc1)OCC(=O)N1CCCCC1'),
TestMolecule('Cc1cc(NC(=O)CSc2nc3c(c(=O)n2-c2ccc(Br)cc2)SCC3)no1',
'O=C(CSc1nc2c(c(n1-c1ccccc1)=O)SCC2)Nc1ccon1'),
TestMolecule('Cc1ccccc1C(NC(C(C)C)C(OCC(c1[nH]ccc1)=O)=O)=O',
'c1cc([nH]c1)C(COC(CNC(=O)c1ccccc1)=O)=O'),
TestMolecule('Cc1ccnc(NS(c2ccc(NS(C)(=O)=O)cc2)(=O)=O)n1', 'c1ccc(S(=O)(=O)Nc2ncccn2)cc1'),
TestMolecule('Cn1c(-c2ccc(Cl)cc2)cnc1NCc1cc2c(cc1[N+]([O-])=O)OCO2.OC(=O)C(O)=O',
'c1cc(ccc1)-c1[nH]c(nc1)NCc1cc2c(cc1)OCO2'),
TestMolecule('CC1Cc2ccccc2N1C(=O)CON=Cc1ccc(OC(F)F)cc1', 'O=C(CON=Cc1ccccc1)N1CCc2c1cccc2'),
TestMolecule('C=C1C(=O)OC2C(O)C(C)=CC(=O)C=C(C)CC(OC(C(C)=CC)=O)C12',
'C=C1C2CCC=CC(C=CCC2OC1=O)=O'),
TestMolecule('O=C1C2N(CSC2)c2c(cc(C(F)(F)F)cc2)N1Cc1cccc(F)c1',
'O=C1C2N(CSC2)c2ccccc2N1Cc1ccccc1'),
TestMolecule('Cc1ccc(OCC(=O)Nc2c[nH]c(=O)[nH]c2=O)cc1C',
'O=C(COc1ccccc1)Nc1c[nH]c([nH]c1=O)=O'),
TestMolecule('Cn1c(CN2CCOCC2)nc2cc(NC(=O)c3ccccc3Cl)ccc12',
'O=C(c1ccccc1)Nc1ccc2[nH]c(nc2c1)CN1CCOCC1'),
TestMolecule('O=c1oc2ccc(O)cc2c(CN2CCN(CC=Cc3ccccc3)CC2)c1',
'O=c1oc2ccccc2c(c1)CN1CCN(CC1)CC=Cc1ccccc1'),
TestMolecule('Cn1c(Cc2ccccc2)nnc1SCCC(=O)Nc1ccccc1', 'O=C(CCSc1nnc([nH]1)Cc1ccccc1)Nc1ccccc1'),
TestMolecule('c1cc2nc(CC(=O)c3cc([N+]([O-])=O)ccc3)[nH]c2cc1',
'O=C(Cc1nc2ccccc2[nH]1)c1ccccc1'),
TestMolecule('c1cc2cc(C(=O)N3CCN(c4ccc(N5CCOCC5)nn4)CC3)c(=O)oc2cc1',
'c1cc2cc(C(=O)N3CCN(c4ccc(N5CCOCC5)nn4)CC3)c(=O)oc2cc1'),
TestMolecule('COc1ccccc1-n1c(=S)[nH]nc1CCn1nc(C)c(Br)c1C', 'S=c1[nH]nc(n1-c1ccccc1)CCn1cccn1'),
TestMolecule('CCC(=O)NC(=S)Nc1ccc(N2CCOCC2)cc1', 'c1cccc(c1)N1CCOCC1'),
TestMolecule('CCCCCC(=O)N1CCN(CCNC=C2C(=O)CC(c3ccc(OC)c(OC)c3)CC2=O)CC1',
'c1ccc(cc1)C1CC(=O)C(C(=O)C1)=CNCCN1CCNCC1'),
TestMolecule('CN1CCN(C(=O)CN(S(C)(=O)=O)Cc2ccc(Cl)cc2)CC1', 'O=C(CNCc1ccccc1)N1CCNCC1'),
TestMolecule('COc1cc(OC)cc(C(=O)NCc2cccnc2)c1', 'O=C(NCc1cccnc1)c1ccccc1'),
TestMolecule('c1cncc(NC(=O)C2CCCN(S(c3cccc4c3nsn4)(=O)=O)C2)c1',
'c1cncc(NC(=O)C2CCCN(S(c3cccc4c3nsn4)(=O)=O)C2)c1'),
TestMolecule('CC(NC1=NN(C(C)=O)C(C)(c2cccs2)S1)=O', 'c1cc(sc1)C1SC=NN1'),
TestMolecule('CCCC(=O)Nc1ccc(-c2nc3cc(C)c(C)cc3o2)cc1', 'c1cccc(c1)-c1nc2ccccc2o1'),
TestMolecule('Cc1c(C)n(CC(O)CN2CCOCC2)c2ccccc12.OC(=O)C(O)=O', 'c1cn(c2ccccc12)CCCN1CCOCC1'),
TestMolecule('Cc1occc1-c1n(CCc2ccccc2)c(SCC(=O)Nc2sccn2)nn1',
'O=C(Nc1sccn1)CSc1n(c(nn1)-c1cocc1)CCc1ccccc1'),
TestMolecule('Cc1oc(-c2cc(F)ccc2)nc1CN1C(CCc2ncccc2)CCCC1',
'c1ccc(cc1)-c1nc(co1)CN1C(CCCC1)CCc1ncccc1'),
TestMolecule('COc1c(OC)c(C(O)=O)c(C=NNC(c2cc(NC(c3ccc(F)cc3)=O)ccc2)=O)cc1',
'O=C(Nc1cc(ccc1)C(=O)NN=Cc1ccccc1)c1ccccc1'),
TestMolecule('CCn1c(Cc2ccccc2)nnc1SCC(=O)Nc1ccc(S(N)(=O)=O)cc1',
'O=C(CSc1[nH]c(nn1)Cc1ccccc1)Nc1ccccc1'),
TestMolecule('CCn1c(COc2nn(-c3ccccc3)c(=O)cc2)nnc1SCc1ccc(OC)cc1',
'O=c1ccc(nn1-c1ccccc1)OCc1[nH]c(nn1)SCc1ccccc1'),
TestMolecule('CC1=NC(=O)C(=C2CC(O)(C(F)(F)F)ON2)C(C)=C1', 'O=C1C(=C2NOCC2)C=CC=N1'),
TestMolecule('COc1ccc(NC(=S)Nc2ccccc2C(F)(F)F)cc1', 'S=C(Nc1ccccc1)Nc1ccccc1'),
TestMolecule('CCCc1cc(=O)nc(SCC(=O)c2cc(C)n(CCOC)c2C)[nH]1',
'O=C(c1c[nH]cc1)CSc1[nH]ccc(=O)n1'),
TestMolecule('CC(=O)Nc1ccc2c(c1)C(C)(C)C(C)N2C', 'c1ccc2c(c1)NCC2'),
TestMolecule('CCN1CCN(C(c2ccc(OCC(Nc3ccc(F)cc3)=O)c(OC)c2)=O)CC1',
'c1cc(ccc1)NC(=O)COc1ccc(C(N2CCNCC2)=O)cc1'),
TestMolecule('CCCCN1C2CCCC1CC(NC(=O)c1ccc(OC)c(OC)c1)C2', 'O=C(NC1CC2NC(CCC2)C1)c1ccccc1'),
TestMolecule('c1ccc(N(CC(=O)N2CCOCC2)S(c2ccccc2)(=O)=O)cc1',
'c1ccc(N(CC(=O)N2CCOCC2)S(c2ccccc2)(=O)=O)cc1'),
TestMolecule('CCn1c(C)nc2cc(C(=O)NN=Cc3ccc(OC)c(O)c3)ccc12',
'O=C(NN=Cc1ccccc1)c1ccc2[nH]cnc2c1'),
TestMolecule('[Cl-].NC(=O)CN1C=CC(=C[NH+]=O)C=C1', 'C=C1C=CNC=C1'),
TestMolecule('Cn1cnnc1SC1C(NS(c2ccccc2)(=O)=O)c2c3c(ccc2)cccc31',
'O=S(=O)(NC1C(Sc2[nH]cnn2)c2cccc3c2c1ccc3)c1ccccc1'),
TestMolecule('COc1ccc(Nc2nc(NCc3ccco3)nc(NN=Cc3ccccc3F)n2)cc1',
'c1ccc(Nc2nc(nc(n2)NN=Cc2ccccc2)NCc2ccco2)cc1'),
TestMolecule('CC1=CC(=O)C(=C2C=C(c3ccccc3[N+]([O-])=O)NN2)C=C1',
'O=C1C(=C2NNC(=C2)c2ccccc2)C=CC=C1'),
TestMolecule('COc1ccc(CC2[N+]([O-])(C)CCc3cc(OC)c(O)cc32)cc1O',
'c1ccc(cc1)CC1c2c(cccc2)CC[NH2+]1'),
TestMolecule('Cl.NC(N)=Nc1nc(=O)c2cc(Br)ccc2[nH]1', 'O=c1nc[nH]c2ccccc21'),
TestMolecule('CC(=O)N1CCC(=NNc2ccc(S(=O)(=O)N3CCOCC3)cc2[N+]([O-])=O)CC1',
'c1cc(ccc1NN=C1CCNCC1)S(=O)(=O)N1CCOCC1'),
TestMolecule('Cc1cc(S(N(Cc2ccc(F)cc2)CC2OCCC2)(=O)=O)ccc1-n1cnnn1',
'c1cc(ccc1)CN(CC1OCCC1)S(c1ccc(cc1)-n1cnnn1)(=O)=O'),
TestMolecule('CC1(C)OCc2c(c3c(sc4c(NCCCO)ncnc43)nc2-c2ccco2)C1',
'c1ncnc2c1sc1nc(c3c(c12)CCOC3)-c1ccco1'),
TestMolecule('COc1ccc(CCNC(=O)CSc2n(-c3ccc(OC)c(OC)c3)nnn2)cc1OC',
'O=C(CSc1n(-c2ccccc2)nnn1)NCCc1ccccc1'),
TestMolecule('CC(C)(CC(O)=O)CC(NCc1c(Cl)cccc1Sc1ccc(Cl)cc1)=O', 'c1ccc(Sc2ccccc2)cc1'),
TestMolecule('COc1ccc(-c2cc(CCCC(=O)NCCc3cc(OC)ccc3OC)no2)cc1',
'O=C(NCCc1ccccc1)CCCc1noc(c1)-c1ccccc1'),
TestMolecule('Cc1ccc(-c2ncns2)cc1', 'c1ccc(cc1)-c1sncn1'),
TestMolecule('C(O)CCn1c(=O)c2c(nc1C=Cc1ccc([N+]([O-])=O)o1)cccc2',
'O=c1[nH]c(C=Cc2ccco2)nc2c1cccc2'),
TestMolecule('COC(CC(O)CC(O)C(C)OCc1ccccc1)OC', 'c1ccccc1'),
TestMolecule('Cl.CCCC(N1CCN(C(=O)c2occc2)CC1)c1n(C(C)(C)C)nnn1',
'O=C(N1CCN(Cc2nnn[nH]2)CC1)c1ccco1'),
TestMolecule('O=C(NC(CO)c1ccccc1)c1occc1', 'O=C(NCc1ccccc1)c1occc1'),
TestMolecule('O=C(Nc1ccc(N2CCOCC2)cc1)c1c(Cl)cc(F)c(F)c1', 'O=C(Nc1ccc(N2CCOCC2)cc1)c1ccccc1'),
TestMolecule('CCc1sc(N2C(=O)c3ccc(Oc4ccc([N+]([O-])=O)cc4)cc3C2=O)nn1',
'O=C1N(C(=O)c2cc(Oc3ccccc3)ccc21)c1scnn1'),
TestMolecule('CC(C)Cc1ccc(C(C)C(=O)O)cc1', 'c1ccccc1'),
TestMolecule('Cl.N=c1sccn1CC(=O)Nc1cc(S(N2CCCC2)(=O)=O)ccc1Cl',
'N=c1n(CC(=O)Nc2cccc(S(=O)(N3CCCC3)=O)c2)ccs1'),
TestMolecule('c1ccc(-c2ccc(C(=O)OC3CC4OC(=O)CC4C3CO)cc2)cc1',
'c1ccc(cc1)-c1ccc(C(=O)OC2CC3CC(=O)OC3C2)cc1'),
TestMolecule('CN(CCC#N)CC(=O)Nc1ccc(S(N)(=O)=O)cc1', 'c1ccccc1'),
TestMolecule('Cc1nc(-c2ccc([N+]([O-])=O)cc2)sc1C(=O)O', 'c1cc(-c2sccn2)ccc1'),
TestMolecule('c1coc(C(=O)N2CCN(C(Cn3nnc(-c4ccc(NC(c5ccc(F)cc5)=O)cc4)n3)=O)CC2)c1',
'O=C(N1CCN(C(=O)Cn2nc(nn2)-c2ccc(NC(=O)c3ccccc3)cc2)CC1)c1ccco1'),
TestMolecule('Cc1onc(-c2c(Cl)cccc2Cl)c1C(N)=S', 'c1ccc(cc1)-c1nocc1'),
TestMolecule('CCOC(=O)c1cnc2ccccc2c1NCCO', 'c1cnc2ccccc2c1'),
TestMolecule('Cc1ccc(C)c(NC(=O)Cn2nnc(-c3ccc(N4CCOCC4)cc3)n2)c1',
'O=C(Cn1nnc(n1)-c1ccc(cc1)N1CCOCC1)Nc1ccccc1'),
TestMolecule('CC(C)(C)c1cc(C(=O)NNc2ccc(OC(F)(F)F)cc2)n(Cc2ccccc2)n1',
'O=C(NNc1ccccc1)c1ccnn1Cc1ccccc1'),
TestMolecule('CCCCCOC(=O)C1=C(C)N=C2N(NN=N2)C1c1ccc(OC)c(OC)c1OC',
'c1cccc(c1)C1N2NN=NC2=NC=C1'),
TestMolecule('Cc1cc2cc(CNC(=O)C3CC3)ccc2n1C', 'O=C(NCc1ccc2c(cc[nH]2)c1)C1CC1'),
TestMolecule('Cc1ccccc1C(NC(CC(C)C)C(Nc1cc(S(N(C)C)(=O)=O)ccc1)=O)=O',
'c1ccc(cc1)NC(CNC(=O)c1ccccc1)=O'),
TestMolecule('COCCCNC(=S)N1CCC(NC(=O)c2ccco2)CC1', 'O=C(NC1CCNCC1)c1ccco1'),
TestMolecule('Cn1c(C=Cc2oc([N+]([O-])=O)cc2)nc2ccccc2c1=O', 'O=c1[nH]c(C=Cc2occc2)nc2ccccc12'),
TestMolecule('c1cc2nc(SCc3cc(=O)n4ccsc4n3)n(CCCO)c(=O)c2cc1',
'c1ccc2nc(SCc3cc(=O)n4ccsc4n3)[nH]c(=O)c2c1'),
TestMolecule('c1ccc2c(c1)cccc2NC(=O)CC1SC(NCC2OCCC2)=NC1=O',
'c1ccc2c(c1)cccc2NC(=O)CC1SC(NCC2OCCC2)=NC1=O'),
]
if __name__ == '__main__': # pragma: no cover
unittest.main()
|
tools/binary_size/libsupersize/dwarfdump_test.py | zealoussnow/chromium | 14,668 | 12633051 | <filename>tools/binary_size/libsupersize/dwarfdump_test.py
#!/usr/bin/env python3
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
import dwarfdump
class DwarfDumpTest(unittest.TestCase):
def _MakeRangeInfoList(self, flat_list):
out = []
for item in flat_list:
assert len(item) == 3
out.append((dwarfdump._AddressRange(item[0], item[1]), item[2]))
return out
def testParseNonContiguousAddressRange(self):
"""Test parsing DW_TAG_compile_unit with non-contiguous address range."""
lines = [
'DW_TAG_compile_unit',
'DW_AT_name ("solution.cc")',
'DW_AT_low_pc (0x0)',
'DW_AT_ranges (0x1',
'[0x10, 0x21)',
'[0x31, 0x41))',
]
expected_info_list = [(0x10, 0x21, 'solution.cc'),
(0x31, 0x41, 'solution.cc')]
self.assertEqual(self._MakeRangeInfoList(expected_info_list),
dwarfdump.ParseDumpOutputForTest(lines))
def testParseNonContiguousAddressRangeOtherBrackets(self):
"""Test parsing DW_AT_ranges when non-standard brackets are used."""
lines = [
'DW_TAG_compile_unit',
'DW_AT_name ("solution.cc")',
'DW_AT_low_pc (0x0)',
'DW_AT_ranges [0x1',
'(0x10, 0x21)',
'[0x31, 0x41]]',
]
expected_info_list = [(0x10, 0x21, 'solution.cc'),
(0x31, 0x41, 'solution.cc')]
self.assertEqual(self._MakeRangeInfoList(expected_info_list),
dwarfdump.ParseDumpOutputForTest(lines))
def testParseNonContiguousIgnoreEmptyRanges(self):
"""Test that empty ranges are ignored when parsing DW_AT_ranges."""
lines = [
'DW_TAG_compile_unit',
'DW_AT_name ("solution.cc")',
'DW_AT_ranges (0x1',
'[0x1, 0x1)',
'[0x10, 0x21)',
'[0x22, 0x22)',
'[0x31, 0x41)',
'[0x42, 0x42))',
]
expected_info_list = [(0x10, 0x21, 'solution.cc'),
(0x31, 0x41, 'solution.cc')]
self.assertEqual(self._MakeRangeInfoList(expected_info_list),
dwarfdump.ParseDumpOutputForTest(lines))
def testParseContiguousAddressRange(self):
"""Test parsing DW_TAG_compile_unit with contiguous address range."""
lines = [
'DW_TAG_compile_unit',
'DW_AT_name ("solution.cc")',
'DW_AT_low_pc (0x1)',
'DW_AT_high_pc (0x10)',
]
expected_info_list = [
(0x1, 0x10, 'solution.cc'),
]
self.assertEqual(self._MakeRangeInfoList(expected_info_list),
dwarfdump.ParseDumpOutputForTest(lines))
def testParseSingleAddress(self):
"""Test parsing DW_TAG_compile_unit with single address."""
lines = [
'DW_TAG_compile_unit',
'DW_AT_name ("solution.cc")',
'DW_AT_low_pc (0x10)',
]
expected_info_list = [
(0x10, 0x11, 'solution.cc'),
]
self.assertEqual(self._MakeRangeInfoList(expected_info_list),
dwarfdump.ParseDumpOutputForTest(lines))
def testParseEmptyCompileUnit(self):
"""Test parsing empty DW_TAG_compile_unit."""
lines = ['DW_TAG_compile_unit']
self.assertEqual([], dwarfdump.ParseDumpOutputForTest(lines))
def testConsecutiveCompileUnits(self):
"""Test parsing consecutive DW_TAG_compile_units."""
lines = [
'DW_TAG_compile_unit',
'DW_AT_name ("foo.cc")',
'DW_AT_low_pc (0x1)',
'DW_AT_high_pc (0x10)',
'DW_TAG_compile_unit',
'DW_AT_name ("bar.cc")',
'DW_AT_low_pc (0x12)',
'DW_AT_high_pc (0x20)',
]
expected_info_list = [(0x1, 0x10, 'foo.cc'), (0x12, 0x20, 'bar.cc')]
self.assertEqual(self._MakeRangeInfoList(expected_info_list),
dwarfdump.ParseDumpOutputForTest(lines))
def testTagTerminatedCompileUnit(self):
"""Test parsing DW_TAG_compile_unit where compile unit is followed by a
non-DW_TAG_compile_unit entry.
"""
lines = [
'DW_TAG_compile_unit',
'DW_AT_name ("foo.cc")',
'DW_AT_low_pc (0x1)',
'DW_AT_high_pc (0x10)',
'DW_TAG_subprogram',
'DW_AT_name ("bar.cc")',
'DW_AT_low_pc (0x12)',
'DW_AT_high_pc (0x20)',
]
expected_info_list = [
(0x1, 0x10, 'foo.cc'),
]
self.assertEqual(self._MakeRangeInfoList(expected_info_list),
dwarfdump.ParseDumpOutputForTest(lines))
def testHandlePrefixes(self):
"""Test parsing DW_TAG_compile_unit where 'DW_' does not start line in
DW_TAG_compile_unit entry.
"""
lines = [
'0x1 DW_TAG_compile_unit',
' DW_AT_language (DW_LANG_C_plus_plus_14)',
' DW_AT_name ("solution.cc")',
' DW_AT_stmt_list (0x5)',
' DW_AT_low_pc (0x1)',
' DW_AT_high_pc (0x10)',
]
expected_info_list = [
(0x1, 0x10, 'solution.cc'),
]
self.assertEqual(self._MakeRangeInfoList(expected_info_list),
dwarfdump.ParseDumpOutputForTest(lines))
def testFindAddress(self):
"""Tests for _SourceMapper.FindSourceForTextAddress()"""
lines = [
'DW_TAG_compile_unit',
'DW_AT_name ("foo.cc")',
'DW_AT_low_pc (0x1)',
'DW_AT_high_pc (0x10)',
'DW_TAG_compile_unit',
'DW_AT_name ("bar.cc")',
'DW_AT_low_pc (0x21)',
'DW_AT_high_pc (0x30)',
'DW_TAG_compile_unit',
'DW_AT_name ("baz.cc")',
'DW_AT_low_pc (0x41)',
'DW_AT_high_pc (0x50)',
]
source_mapper = dwarfdump.CreateAddressSourceMapperForTest(lines)
# Address is before first range.
self.assertIsNone(source_mapper.FindSourceForTextAddress(0x0))
# Address matches start of first range.
self.assertEqual('foo.cc', source_mapper.FindSourceForTextAddress(0x1))
# Address is in the middle of middle range.
self.assertEqual('bar.cc', source_mapper.FindSourceForTextAddress(0x2a))
# Address matches end of last range.
self.assertEqual('baz.cc', source_mapper.FindSourceForTextAddress(0x4f))
# Address is after lange range.
self.assertIsNone(source_mapper.FindSourceForTextAddress(0x50))
if __name__ == '__main__':
unittest.main()
|
zippy/benchmarks/src/benchmarks/whoosh/benchmark/marc21.py | lucapele/pele-c | 319 | 12633087 | <reponame>lucapele/pele-c<filename>zippy/benchmarks/src/benchmarks/whoosh/benchmark/marc21.py<gh_stars>100-1000
from __future__ import with_statement, print_function
import fnmatch, logging, os.path, re
from whoosh import analysis, fields, index, qparser, query, scoring
from whoosh.util import now
log = logging.getLogger(__name__)
# Functions for reading MARC format
LEADER = (' ' * 10) + '22' + (' ' * 8) + '4500'
LEADER_LEN = len(LEADER)
DIRECTORY_ENTRY_LEN = 12
SUBFIELD_INDICATOR = "\x1F"
END_OF_FIELD = "\x1E"
END_OF_RECORD = "\x1D"
isbn_regex = re.compile(r'[-0-9xX]+')
def read_file(dbfile, tags=None):
while True:
pos = dbfile.tell()
first5 = dbfile.read(5)
if not first5:
return
if len(first5) < 5:
raise Exception
length = int(first5)
chunk = dbfile.read(length - 5)
yield parse_record(first5 + chunk, tags), pos
def read_record(filename, pos, tags=None):
f = open(filename, "rb")
f.seek(pos)
first5 = f.read(5)
length = int(first5)
chunk = f.read(length - 5)
return parse_record(first5 + chunk, tags)
def parse_record(data, tags=None):
leader = data[:LEADER_LEN]
assert len(leader) == LEADER_LEN
dataoffset = int(data[12:17])
assert dataoffset > 0
assert dataoffset < len(data)
# dataoffset - 1 to avoid END-OF-FIELD byte
dirstart = LEADER_LEN
dirend = dataoffset - 1
# Number of fields in record
assert (dirend - dirstart) % DIRECTORY_ENTRY_LEN == 0
field_count = (dirend - dirstart) // DIRECTORY_ENTRY_LEN
result = {}
for i in xrange(field_count):
start = dirstart + i * DIRECTORY_ENTRY_LEN
end = start + DIRECTORY_ENTRY_LEN
tag = data[start:start + 3]
if tags and not tag in tags:
continue
entry = data[start:end]
elen = int(entry[3:7])
offset = dataoffset + int(entry[7:12])
edata = data[offset:offset + elen - 1]
if not (tag < "010" and tag.isdigit()):
edata = edata.split(SUBFIELD_INDICATOR)[1:]
if tag in result:
result[tag].extend(edata)
else:
result[tag] = edata
else:
result[tag] = edata
return result
def subfield(vs, code):
for v in vs:
if v.startswith(code):
return v[1:]
return None
def joinsubfields(vs):
return " ".join(v[1:] for v in vs if v and v[0] != "6")
def getfields(d, *tags):
return (d[tag] for tag in tags if tag in d)
def title(d):
title = None
if "245" in d:
svs = d["245"]
title = subfield(svs, "a")
if title:
t2 = subfield(svs, "b")
if t2:
title += t2
return title
def isbn(d):
if "020" in d:
num = subfield(d["020"], "a")
if num:
match = isbn_regex.search(num)
if match:
return match.group(0).replace('-', '')
def author(d):
if "100" in d:
return joinsubfields(d["100"])
elif "110" in d:
return joinsubfields(d["110"])
elif "111" in d:
return joinsubfields(d["111"])
def uniform_title(d):
if "130" in d:
return joinsubfields(d["130"])
elif "240" in d:
return joinsubfields(d["240"])
subjectfields = ("600 610 611 630 648 650 651 653 654 655 656 657 658 662 "
"690 691 696 697 698 699").split()
def subjects(d):
return " ".join(joinsubfields(vs) for vs in getfields(d, *subjectfields))
def physical(d):
return joinsubfields(d["300"])
def location(d):
return joinsubfields(d["852"])
def publisher(d):
if "260" in d:
return subfield(d["260"], "b")
def pubyear(d):
if "260" in d:
return subfield(d["260"], "c")
def uni(v):
return u"" if v is None else v.decode("utf-8", "replace")
# Indexing and searching
def make_index(basedir, ixdir, procs=4, limitmb=128, multisegment=True,
glob="*.mrc"):
if not os.path.exists(ixdir):
os.mkdir(ixdir)
# Multi-lingual stop words
stoplist = (analysis.STOP_WORDS
| set("de la der und le die et en al no von di du da "
"del zur ein".split()))
# Schema
ana = analysis.StemmingAnalyzer(stoplist=stoplist)
schema = fields.Schema(title=fields.TEXT(analyzer=ana),
author=fields.TEXT(phrase=False),
subject=fields.TEXT(analyzer=ana, phrase=False),
file=fields.STORED, pos=fields.STORED,
)
# MARC fields to extract
mfields = set(subjectfields) # Subjects
mfields.update("100 110 111".split()) # Author
mfields.add("245") # Title
print("Indexing with %d processor(s) and %d MB per processor"
% (procs, limitmb))
c = 0
t = now()
ix = index.create_in(ixdir, schema)
with ix.writer(procs=procs, limitmb=limitmb,
multisegment=multisegment) as w:
filenames = [filename for filename in os.listdir(basedir)
if fnmatch.fnmatch(filename, glob)]
for filename in filenames:
path = os.path.join(basedir, filename)
print("Indexing", path)
f = open(path, 'rb')
for x, pos in read_file(f, mfields):
w.add_document(title=uni(title(x)), author=uni(author(x)),
subject=uni(subjects(x)),
file=filename, pos=pos)
c += 1
f.close()
print("Committing...")
print("Indexed %d records in %0.02f minutes" % (c, (now() - t) / 60.0))
def print_record(no, basedir, filename, pos):
path = os.path.join(basedir, filename)
record = read_record(path, pos)
print("% 5d. %s" % (no + 1, title(record)))
print(" ", author(record))
print(" ", subjects(record))
isbn_num = isbn(record)
if isbn_num:
print(" ISBN:", isbn_num)
print()
def search(qstring, ixdir, basedir, limit=None, optimize=True, scores=True):
ix = index.open_dir(ixdir)
qp = qparser.QueryParser("title", ix.schema)
q = qp.parse(qstring)
with ix.searcher(weighting=scoring.PL2()) as s:
if scores:
r = s.search(q, limit=limit, optimize=optimize)
for hit in r:
print_record(hit.rank, basedir, hit["file"], hit["pos"])
print("Found %d records in %0.06f seconds" % (len(r), r.runtime))
else:
t = now()
for i, docnum in enumerate(s.docs_for_query(q)):
if not limit or i < limit:
fields = s.stored_fields(docnum)
print_record(i, basedir, fields["file"], fields["pos"])
print("Found %d records in %0.06f seconds" % (i, now() - t))
if __name__ == "__main__":
from optparse import OptionParser
p = OptionParser(usage="usage: %prog [options] query")
# Common options
p.add_option("-f", "--filedir", metavar="DIR", dest="basedir",
help="Directory containing the .mrc files to index",
default="data/HLOM")
p.add_option("-d", "--dir", metavar="DIR", dest="ixdir",
help="Directory containing the index", default="marc_index")
# Indexing options
p.add_option("-i", "--index", dest="index",
help="Index the records", action="store_true", default=False)
p.add_option("-p", "--procs", metavar="NPROCS", dest="procs",
help="Number of processors to use", default="1")
p.add_option("-m", "--mb", metavar="MB", dest="limitmb",
help="Limit the indexer to this many MB of memory per writer",
default="128")
p.add_option("-M", "--merge-segments", dest="multisegment",
help="If indexing with multiproc, merge the segments after"
" indexing", action="store_false", default=True)
p.add_option("-g", "--match", metavar="GLOB", dest="glob",
help="Only index file names matching the given pattern",
default="*.mrc")
# Search options
p.add_option("-l", "--limit", metavar="NHITS", dest="limit",
help="Maximum number of search results to print (0=no limit)",
default="10")
p.add_option("-O", "--no-optimize", dest="optimize",
help="Turn off searcher optimization (for debugging)",
action="store_false", default=True)
p.add_option("-s", "--scoring", dest="scores",
help="Score the results", action="store_true", default=False)
options, args = p.parse_args()
if options.index:
make_index(options.basedir, options.ixdir,
procs=int(options.procs),
limitmb=int(options.limitmb),
multisegment=options.multisegment,
glob=options.glob)
if args:
qstring = " ".join(args).decode("utf-8")
limit = int(options.limit)
if limit < 1:
limit = None
search(qstring, options.ixdir, options.basedir, limit=limit,
optimize=options.optimize, scores=options.scores)
|
livecode/sample_X.py | pragnesh-ai/driverlessai-recipes | 194 | 12633091 | """ Randomly sample rows from dataset"""
# Specification:
# Inputs:
# X: datatable - primary dataset
# Parameters:
# fraction: float - fraction of rows to sample from 'X' (must be between 0 and 1)
# random_seed: int - random seed to control for reproducibility
import random
fraction = 0.1
random_seed = 0.7030
new_dataset_name = "new_dataset_name_after_sampling"
N = X.shape[0]
sample_size = int(N * fraction)
random.seed(random_seed)
return {new_dataset_name: X[random.sample(range(N), sample_size), :]}
|
graphql_compiler/compiler/ir_lowering_match/__init__.py | kensho-technologies/graphql-compiler | 521 | 12633110 | # Copyright 2018-present Kensho Technologies, LLC.
import six
from ...schema.schema_info import CommonSchemaInfo
from ..blocks import Filter
from ..compiler_frontend import IrAndMetadata
from ..ir_lowering_common.common import (
extract_optional_location_root_info,
extract_simple_optional_location_info,
lower_context_field_existence,
merge_consecutive_filter_clauses,
optimize_boolean_expression_comparisons,
remove_end_optionals,
)
from ..ir_self_consistency_checks import self_consistency_check_ir_blocks_from_frontend
from ..match_query import MatchQuery, convert_to_match_query
from ..workarounds import (
orientdb_class_with_while,
orientdb_eval_scheduling,
orientdb_query_execution,
)
from .between_lowering import lower_comparisons_to_between
from .ir_lowering import (
lower_backtrack_blocks,
lower_folded_coerce_types_into_filter_blocks,
lower_string_operators,
remove_backtrack_blocks_from_fold,
rewrite_binary_composition_inside_ternary_conditional,
truncate_repeated_single_step_traversals,
truncate_repeated_single_step_traversals_in_sub_queries,
)
from .optional_traversal import (
collect_filters_to_first_location_occurrence,
convert_optional_traversals_to_compound_match_query,
lower_context_field_expressions,
prune_non_existent_outputs,
)
from .utils import construct_where_filter_predicate
##############
# Public API #
##############
def lower_ir(schema_info: CommonSchemaInfo, ir: IrAndMetadata) -> MatchQuery:
"""Lower the IR into an IR form that can be represented in MATCH queries.
Args:
schema_info: CommonSchemaInfo containing all relevant schema information
ir: IrAndMetadata representing the query to lower into MATCH-compatible form
Returns:
MatchQuery object containing the IR blocks organized in a MATCH-like structure
"""
self_consistency_check_ir_blocks_from_frontend(ir.ir_blocks, ir.query_metadata_table)
# Construct the mapping of each location to its corresponding GraphQL type.
location_types = {
location: location_info.type
for location, location_info in ir.query_metadata_table.registered_locations
}
# Compute the set of all locations that have associated type coercions.
coerced_locations = {
location
for location, location_info in ir.query_metadata_table.registered_locations
if location_info.coerced_from_type is not None
}
# Extract information for both simple and complex @optional traverses
location_to_optional_results = extract_optional_location_root_info(ir.ir_blocks)
complex_optional_roots, location_to_optional_roots = location_to_optional_results
simple_optional_root_info = extract_simple_optional_location_info(
ir.ir_blocks, complex_optional_roots, location_to_optional_roots
)
ir_blocks = remove_end_optionals(ir.ir_blocks)
# Append global operation block(s) to filter out incorrect results
# from simple optional match traverses (using a WHERE statement)
if len(simple_optional_root_info) > 0:
where_filter_predicate = construct_where_filter_predicate(
ir.query_metadata_table, simple_optional_root_info
)
# The GlobalOperationsStart block should already exist at this point. It is inserted
# in the compiler_frontend, and this function asserts that at the beginning.
ir_blocks.insert(-1, Filter(where_filter_predicate))
# These lowering / optimization passes work on IR blocks.
ir_blocks = lower_context_field_existence(ir_blocks, ir.query_metadata_table)
ir_blocks = optimize_boolean_expression_comparisons(ir_blocks)
ir_blocks = rewrite_binary_composition_inside_ternary_conditional(ir_blocks)
ir_blocks = merge_consecutive_filter_clauses(ir_blocks)
ir_blocks = lower_string_operators(ir_blocks)
ir_blocks = orientdb_eval_scheduling.workaround_lowering_pass(
ir_blocks, ir.query_metadata_table
)
# Here, we lower from raw IR blocks into a MatchQuery object.
# From this point on, the lowering / optimization passes work on the MatchQuery representation.
match_query = convert_to_match_query(ir_blocks)
match_query = lower_comparisons_to_between(match_query)
match_query = lower_backtrack_blocks(match_query, ir.query_metadata_table)
match_query = truncate_repeated_single_step_traversals(match_query)
match_query = orientdb_class_with_while.workaround_type_coercions_in_recursions(match_query)
# Optimize and lower the IR blocks inside @fold scopes.
new_folds = {
key: merge_consecutive_filter_clauses(
remove_backtrack_blocks_from_fold(
lower_folded_coerce_types_into_filter_blocks(folded_ir_blocks)
)
)
for key, folded_ir_blocks in six.iteritems(match_query.folds)
}
match_query = match_query._replace(folds=new_folds)
compound_match_query = convert_optional_traversals_to_compound_match_query(
match_query, complex_optional_roots, location_to_optional_roots
)
compound_match_query = prune_non_existent_outputs(compound_match_query)
compound_match_query = collect_filters_to_first_location_occurrence(compound_match_query)
compound_match_query = lower_context_field_expressions(compound_match_query)
compound_match_query = truncate_repeated_single_step_traversals_in_sub_queries(
compound_match_query
)
compound_match_query = orientdb_query_execution.expose_ideal_query_execution_start_points(
compound_match_query, location_types, coerced_locations
)
return compound_match_query
|
ch21-轮廓Contours/凸包-凸性检测-边界矩形-最小外接圆-拟合.py | makelove/OpenCV-Python-Tutorial | 2,875 | 12633149 | <reponame>makelove/OpenCV-Python-Tutorial
# -*- coding: utf-8 -*-
# @Time : 2017/7/12 下午8:28
# @Author : play4fun
# @File : 凸包-凸性检测-边界矩形-最小外接圆-拟合.py
# @Software: PyCharm
"""
凸包-凸性检测-边界矩形-最小外接圆-拟合.py:
"""
import cv2
import numpy as np
img=cv2.imread('../data/lightning.png',0)
image, contours, hierarchy = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnt=contours[0]
'''
函数 cv2.convexHull() 可以用来检测一个曲线是否具有凸性缺 并能纠 正缺 。一般来 凸性曲线总是凸出来的 至少是平的。如果有地方凹 去 了就 叫做凸性缺
例如下图中的手。红色曲线显示了手的凸包 凸性缺 双箭头标出来了。
'''
# convexHull(points, hull=None, clockwise=None, returnPoints=None)
hull = cv2.convexHull(points, hull, clockwise, returnPoints)
'''
points 我们 传入的 廓
• hull 输出 通常不需要
• clockwise 方向标志。如果 置为 True 出的凸包是顺时针 方向的。 否则为逆时针 方向。
• returnPoints 值为 True。它会 回凸包上点的坐标。如果 置 为 False 就会 回与凸包点对应的 廓上的点。
'''
hull = cv2.convexHull(cnt)
# 凸性检测
# 函数 cv2.isContourConvex() 可以可以用来检测一个曲线是不是凸的。它只能 回 True 或 False。没什么大不了的。
k = cv2.isContourConvex(cnt)
# 边界矩形
'''
直边界矩形 一个直矩形 就是没有旋转的矩形 。它不会考虑对象是否旋转。 所以边界矩形的 积不是最小的。可以使用函数 cv2.boundingRect() 查 找得到。
x y 为矩形左上角的坐标 w h 是矩形的宽和 。
'''
x, y, w, h = cv2.boundingRect(cnt)
img = cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
'''
旋转矩形
这里,以最小面积绘制边界矩形,因此也考虑旋转。使用的功能是cv2.minAreaRect()。它返回一个Box2D结构,其中包含以下条件 - (中心(x,y),(宽度,高度),旋转角度)。但是要绘制这个矩形,我们需要矩形的四个角。它是通过函数cv2.boxPoints()
'''
rect = cv2.minAreaRect(cnt)
box = cv2.boxPoints(rect)
box = np.int0(box)
cv2.drawContours(img,[box],0,(0,0,255),2)
# 最小外接圆
# 函数 cv2.minEnclosingCircle() 可以帮我们找到一个对 的外切圆。它是所有能够包括对 的圆中 积最小的一个。
(x, y), radius = cv2.minEnclosingCircle(cnt)
center = (int(x), int(y))
radius = int(radius)
img = cv2.circle(img, center, radius, (0, 255, 0), 2)
# 椭圆拟合
# 使用的函数为 cv2.ellipse() 回值其实就是旋 界矩形的内切圆
ellipse = cv2.fitEllipse(cnt)
#((135.34278869628906, 134.22764587402344),(57.018402099609375, 166.91265869140625),136.8311767578125)
angle=ellipse[2]
im = cv2.ellipse(img, ellipse, (0, 255, 0), 2)
# 直线拟合
# 我们可以根据一组点拟合出一条直线 同样我们也可以为图像中的白色点 拟合出一条直线。
rows, cols = img.shape[:2]
[vx, vy, x, y] = cv2.fitLine(cnt, cv2.DIST_L2, 0, 0.01, 0.01)
lefty = int((-x * vy / vx) + y)
righty = int(((cols - x) * vy / vx) + y)
cv2.line(img, (cols - 1, righty), (0, lefty), (0, 255, 0), 2)
|
dgmc/models/spline.py | rlckd159/deep-graph-matching-consensus | 194 | 12633160 | <reponame>rlckd159/deep-graph-matching-consensus<filename>dgmc/models/spline.py
import torch
from torch.nn import Linear as Lin
import torch.nn.functional as F
from torch_geometric.nn import SplineConv
class SplineCNN(torch.nn.Module):
def __init__(self, in_channels, out_channels, dim, num_layers, cat=True,
lin=True, dropout=0.0):
super(SplineCNN, self).__init__()
self.in_channels = in_channels
self.dim = dim
self.num_layers = num_layers
self.cat = cat
self.lin = lin
self.dropout = dropout
self.convs = torch.nn.ModuleList()
for _ in range(num_layers):
conv = SplineConv(in_channels, out_channels, dim, kernel_size=5)
self.convs.append(conv)
in_channels = out_channels
if self.cat:
in_channels = self.in_channels + num_layers * out_channels
else:
in_channels = out_channels
if self.lin:
self.out_channels = out_channels
self.final = Lin(in_channels, out_channels)
else:
self.out_channels = in_channels
self.reset_parameters()
def reset_parameters(self):
for conv in self.convs:
conv.reset_parameters()
if self.lin:
self.final.reset_parameters()
def forward(self, x, edge_index, edge_attr, *args):
""""""
xs = [x]
for conv in self.convs:
xs += [F.relu(conv(xs[-1], edge_index, edge_attr))]
x = torch.cat(xs, dim=-1) if self.cat else xs[-1]
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.final(x) if self.lin else x
return x
def __repr__(self):
return ('{}({}, {}, dim={}, num_layers={}, cat={}, lin={}, '
'dropout={})').format(self.__class__.__name__,
self.in_channels, self.out_channels,
self.dim, self.num_layers, self.cat,
self.lin, self.dropout)
|
fonts/vector/romanc.py | szczys/st7789_mpy | 153 | 12633161 | <filename>fonts/vector/romanc.py
WIDTH = 32
HEIGHT = 32
FIRST = 0x20
LAST = 0x7f
_font =\
b'\x00\x4a\x5a\x0e\x4d\x57\x52\x46\x51\x48\x52\x54\x53\x48\x52'\
b'\x46\x20\x52\x52\x48\x52\x4e\x20\x52\x52\x59\x51\x5a\x52\x5b'\
b'\x53\x5a\x52\x59\x15\x49\x5b\x4e\x46\x4d\x47\x4d\x4d\x20\x52'\
b'\x4e\x47\x4d\x4d\x20\x52\x4e\x46\x4f\x47\x4d\x4d\x20\x52\x57'\
b'\x46\x56\x47\x56\x4d\x20\x52\x57\x47\x56\x4d\x20\x52\x57\x46'\
b'\x58\x47\x56\x4d\x0b\x48\x5d\x53\x42\x4c\x62\x20\x52\x59\x42'\
b'\x52\x62\x20\x52\x4c\x4f\x5a\x4f\x20\x52\x4b\x55\x59\x55\x29'\
b'\x48\x5c\x50\x42\x50\x5f\x20\x52\x54\x42\x54\x5f\x20\x52\x58'\
b'\x49\x57\x4a\x58\x4b\x59\x4a\x59\x49\x57\x47\x54\x46\x50\x46'\
b'\x4d\x47\x4b\x49\x4b\x4b\x4c\x4d\x4d\x4e\x4f\x4f\x55\x51\x57'\
b'\x52\x59\x54\x20\x52\x4b\x4b\x4d\x4d\x4f\x4e\x55\x50\x57\x51'\
b'\x58\x52\x59\x54\x59\x58\x57\x5a\x54\x5b\x50\x5b\x4d\x5a\x4b'\
b'\x58\x4b\x57\x4c\x56\x4d\x57\x4c\x58\x1f\x46\x5e\x5b\x46\x49'\
b'\x5b\x20\x52\x4e\x46\x50\x48\x50\x4a\x4f\x4c\x4d\x4d\x4b\x4d'\
b'\x49\x4b\x49\x49\x4a\x47\x4c\x46\x4e\x46\x50\x47\x53\x48\x56'\
b'\x48\x59\x47\x5b\x46\x20\x52\x57\x54\x55\x55\x54\x57\x54\x59'\
b'\x56\x5b\x58\x5b\x5a\x5a\x5b\x58\x5b\x56\x59\x54\x57\x54\x30'\
b'\x46\x5f\x5b\x4e\x5a\x4f\x5b\x50\x5c\x4f\x5c\x4e\x5b\x4d\x5a'\
b'\x4d\x59\x4e\x58\x50\x56\x55\x54\x58\x52\x5a\x50\x5b\x4d\x5b'\
b'\x4a\x5a\x49\x58\x49\x55\x4a\x53\x50\x4f\x52\x4d\x53\x4b\x53'\
b'\x49\x52\x47\x50\x46\x4e\x47\x4d\x49\x4d\x4b\x4e\x4e\x50\x51'\
b'\x55\x58\x57\x5a\x5a\x5b\x5b\x5b\x5c\x5a\x5c\x59\x20\x52\x4d'\
b'\x5b\x4b\x5a\x4a\x58\x4a\x55\x4b\x53\x4d\x51\x20\x52\x4d\x4b'\
b'\x4e\x4d\x56\x58\x58\x5a\x5a\x5b\x05\x4e\x56\x52\x46\x51\x4d'\
b'\x20\x52\x53\x46\x51\x4d\x13\x4b\x59\x56\x42\x54\x44\x52\x47'\
b'\x50\x4b\x4f\x50\x4f\x54\x50\x59\x52\x5d\x54\x60\x56\x62\x20'\
b'\x52\x54\x44\x52\x48\x51\x4b\x50\x50\x50\x54\x51\x59\x52\x5c'\
b'\x54\x60\x13\x4b\x59\x4e\x42\x50\x44\x52\x47\x54\x4b\x55\x50'\
b'\x55\x54\x54\x59\x52\x5d\x50\x60\x4e\x62\x20\x52\x50\x44\x52'\
b'\x48\x53\x4b\x54\x50\x54\x54\x53\x59\x52\x5c\x50\x60\x08\x4a'\
b'\x5a\x52\x4c\x52\x58\x20\x52\x4d\x4f\x57\x55\x20\x52\x57\x4f'\
b'\x4d\x55\x05\x45\x5f\x52\x49\x52\x5b\x20\x52\x49\x52\x5b\x52'\
b'\x07\x4e\x56\x53\x57\x52\x58\x51\x57\x52\x56\x53\x57\x53\x59'\
b'\x51\x5b\x02\x45\x5f\x49\x52\x5b\x52\x05\x4e\x56\x52\x56\x51'\
b'\x57\x52\x58\x53\x57\x52\x56\x02\x47\x5d\x5b\x42\x49\x62\x27'\
b'\x48\x5c\x51\x46\x4e\x47\x4c\x4a\x4b\x4f\x4b\x52\x4c\x57\x4e'\
b'\x5a\x51\x5b\x53\x5b\x56\x5a\x58\x57\x59\x52\x59\x4f\x58\x4a'\
b'\x56\x47\x53\x46\x51\x46\x20\x52\x51\x46\x4f\x47\x4e\x48\x4d'\
b'\x4a\x4c\x4f\x4c\x52\x4d\x57\x4e\x59\x4f\x5a\x51\x5b\x20\x52'\
b'\x53\x5b\x55\x5a\x56\x59\x57\x57\x58\x52\x58\x4f\x57\x4a\x56'\
b'\x48\x55\x47\x53\x46\x0a\x48\x5c\x4e\x4a\x50\x49\x53\x46\x53'\
b'\x5b\x20\x52\x52\x47\x52\x5b\x20\x52\x4e\x5b\x57\x5b\x2c\x48'\
b'\x5c\x4c\x4a\x4d\x4b\x4c\x4c\x4b\x4b\x4b\x4a\x4c\x48\x4d\x47'\
b'\x50\x46\x54\x46\x57\x47\x58\x48\x59\x4a\x59\x4c\x58\x4e\x55'\
b'\x50\x50\x52\x4e\x53\x4c\x55\x4b\x58\x4b\x5b\x20\x52\x54\x46'\
b'\x56\x47\x57\x48\x58\x4a\x58\x4c\x57\x4e\x54\x50\x50\x52\x20'\
b'\x52\x4b\x59\x4c\x58\x4e\x58\x53\x5a\x56\x5a\x58\x59\x59\x58'\
b'\x20\x52\x4e\x58\x53\x5b\x57\x5b\x58\x5a\x59\x58\x59\x56\x2e'\
b'\x48\x5c\x4c\x4a\x4d\x4b\x4c\x4c\x4b\x4b\x4b\x4a\x4c\x48\x4d'\
b'\x47\x50\x46\x54\x46\x57\x47\x58\x49\x58\x4c\x57\x4e\x54\x4f'\
b'\x51\x4f\x20\x52\x54\x46\x56\x47\x57\x49\x57\x4c\x56\x4e\x54'\
b'\x4f\x20\x52\x54\x4f\x56\x50\x58\x52\x59\x54\x59\x57\x58\x59'\
b'\x57\x5a\x54\x5b\x50\x5b\x4d\x5a\x4c\x59\x4b\x57\x4b\x56\x4c'\
b'\x55\x4d\x56\x4c\x57\x20\x52\x57\x51\x58\x54\x58\x57\x57\x59'\
b'\x56\x5a\x54\x5b\x0c\x48\x5c\x54\x48\x54\x5b\x20\x52\x55\x46'\
b'\x55\x5b\x20\x52\x55\x46\x4a\x55\x5a\x55\x20\x52\x51\x5b\x58'\
b'\x5b\x26\x48\x5c\x4d\x46\x4b\x50\x20\x52\x4b\x50\x4d\x4e\x50'\
b'\x4d\x53\x4d\x56\x4e\x58\x50\x59\x53\x59\x55\x58\x58\x56\x5a'\
b'\x53\x5b\x50\x5b\x4d\x5a\x4c\x59\x4b\x57\x4b\x56\x4c\x55\x4d'\
b'\x56\x4c\x57\x20\x52\x53\x4d\x55\x4e\x57\x50\x58\x53\x58\x55'\
b'\x57\x58\x55\x5a\x53\x5b\x20\x52\x4d\x46\x57\x46\x20\x52\x4d'\
b'\x47\x52\x47\x57\x46\x2f\x48\x5c\x57\x49\x56\x4a\x57\x4b\x58'\
b'\x4a\x58\x49\x57\x47\x55\x46\x52\x46\x4f\x47\x4d\x49\x4c\x4b'\
b'\x4b\x4f\x4b\x55\x4c\x58\x4e\x5a\x51\x5b\x53\x5b\x56\x5a\x58'\
b'\x58\x59\x55\x59\x54\x58\x51\x56\x4f\x53\x4e\x52\x4e\x4f\x4f'\
b'\x4d\x51\x4c\x54\x20\x52\x52\x46\x50\x47\x4e\x49\x4d\x4b\x4c'\
b'\x4f\x4c\x55\x4d\x58\x4f\x5a\x51\x5b\x20\x52\x53\x5b\x55\x5a'\
b'\x57\x58\x58\x55\x58\x54\x57\x51\x55\x4f\x53\x4e\x1e\x48\x5c'\
b'\x4b\x46\x4b\x4c\x20\x52\x4b\x4a\x4c\x48\x4e\x46\x50\x46\x55'\
b'\x49\x57\x49\x58\x48\x59\x46\x20\x52\x4c\x48\x4e\x47\x50\x47'\
b'\x55\x49\x20\x52\x59\x46\x59\x49\x58\x4c\x54\x51\x53\x53\x52'\
b'\x56\x52\x5b\x20\x52\x58\x4c\x53\x51\x52\x53\x51\x56\x51\x5b'\
b'\x3e\x48\x5c\x50\x46\x4d\x47\x4c\x49\x4c\x4c\x4d\x4e\x50\x4f'\
b'\x54\x4f\x57\x4e\x58\x4c\x58\x49\x57\x47\x54\x46\x50\x46\x20'\
b'\x52\x50\x46\x4e\x47\x4d\x49\x4d\x4c\x4e\x4e\x50\x4f\x20\x52'\
b'\x54\x4f\x56\x4e\x57\x4c\x57\x49\x56\x47\x54\x46\x20\x52\x50'\
b'\x4f\x4d\x50\x4c\x51\x4b\x53\x4b\x57\x4c\x59\x4d\x5a\x50\x5b'\
b'\x54\x5b\x57\x5a\x58\x59\x59\x57\x59\x53\x58\x51\x57\x50\x54'\
b'\x4f\x20\x52\x50\x4f\x4e\x50\x4d\x51\x4c\x53\x4c\x57\x4d\x59'\
b'\x4e\x5a\x50\x5b\x20\x52\x54\x5b\x56\x5a\x57\x59\x58\x57\x58'\
b'\x53\x57\x51\x56\x50\x54\x4f\x2f\x48\x5c\x58\x4d\x57\x50\x55'\
b'\x52\x52\x53\x51\x53\x4e\x52\x4c\x50\x4b\x4d\x4b\x4c\x4c\x49'\
b'\x4e\x47\x51\x46\x53\x46\x56\x47\x58\x49\x59\x4c\x59\x52\x58'\
b'\x56\x57\x58\x55\x5a\x52\x5b\x4f\x5b\x4d\x5a\x4c\x58\x4c\x57'\
b'\x4d\x56\x4e\x57\x4d\x58\x20\x52\x51\x53\x4f\x52\x4d\x50\x4c'\
b'\x4d\x4c\x4c\x4d\x49\x4f\x47\x51\x46\x20\x52\x53\x46\x55\x47'\
b'\x57\x49\x58\x4c\x58\x52\x57\x56\x56\x58\x54\x5a\x52\x5b\x0b'\
b'\x4e\x56\x52\x4f\x51\x50\x52\x51\x53\x50\x52\x4f\x20\x52\x52'\
b'\x56\x51\x57\x52\x58\x53\x57\x52\x56\x0d\x4e\x56\x52\x4f\x51'\
b'\x50\x52\x51\x53\x50\x52\x4f\x20\x52\x53\x57\x52\x58\x51\x57'\
b'\x52\x56\x53\x57\x53\x59\x51\x5b\x03\x46\x5e\x5a\x49\x4a\x52'\
b'\x5a\x5b\x05\x45\x5f\x49\x4f\x5b\x4f\x20\x52\x49\x55\x5b\x55'\
b'\x03\x46\x5e\x4a\x49\x5a\x52\x4a\x5b\x1f\x49\x5b\x4d\x4a\x4e'\
b'\x4b\x4d\x4c\x4c\x4b\x4c\x4a\x4d\x48\x4e\x47\x50\x46\x53\x46'\
b'\x56\x47\x57\x48\x58\x4a\x58\x4c\x57\x4e\x56\x4f\x52\x51\x52'\
b'\x54\x20\x52\x53\x46\x55\x47\x56\x48\x57\x4a\x57\x4c\x56\x4e'\
b'\x54\x50\x20\x52\x52\x59\x51\x5a\x52\x5b\x53\x5a\x52\x59\x37'\
b'\x45\x60\x57\x4e\x56\x4c\x54\x4b\x51\x4b\x4f\x4c\x4e\x4d\x4d'\
b'\x50\x4d\x53\x4e\x55\x50\x56\x53\x56\x55\x55\x56\x53\x20\x52'\
b'\x51\x4b\x4f\x4d\x4e\x50\x4e\x53\x4f\x55\x50\x56\x20\x52\x57'\
b'\x4b\x56\x53\x56\x55\x58\x56\x5a\x56\x5c\x54\x5d\x51\x5d\x4f'\
b'\x5c\x4c\x5b\x4a\x59\x48\x57\x47\x54\x46\x51\x46\x4e\x47\x4c'\
b'\x48\x4a\x4a\x49\x4c\x48\x4f\x48\x52\x49\x55\x4a\x57\x4c\x59'\
b'\x4e\x5a\x51\x5b\x54\x5b\x57\x5a\x59\x59\x5a\x58\x20\x52\x58'\
b'\x4b\x57\x53\x57\x55\x58\x56\x11\x48\x5c\x52\x46\x4b\x5b\x20'\
b'\x52\x52\x46\x59\x5b\x20\x52\x52\x49\x58\x5b\x20\x52\x4d\x55'\
b'\x56\x55\x20\x52\x49\x5b\x4f\x5b\x20\x52\x55\x5b\x5b\x5b\x2c'\
b'\x47\x5d\x4c\x46\x4c\x5b\x20\x52\x4d\x46\x4d\x5b\x20\x52\x49'\
b'\x46\x55\x46\x58\x47\x59\x48\x5a\x4a\x5a\x4c\x59\x4e\x58\x4f'\
b'\x55\x50\x20\x52\x55\x46\x57\x47\x58\x48\x59\x4a\x59\x4c\x58'\
b'\x4e\x57\x4f\x55\x50\x20\x52\x4d\x50\x55\x50\x58\x51\x59\x52'\
b'\x5a\x54\x5a\x57\x59\x59\x58\x5a\x55\x5b\x49\x5b\x20\x52\x55'\
b'\x50\x57\x51\x58\x52\x59\x54\x59\x57\x58\x59\x57\x5a\x55\x5b'\
b'\x1f\x47\x5c\x58\x49\x59\x4c\x59\x46\x58\x49\x56\x47\x53\x46'\
b'\x51\x46\x4e\x47\x4c\x49\x4b\x4b\x4a\x4e\x4a\x53\x4b\x56\x4c'\
b'\x58\x4e\x5a\x51\x5b\x53\x5b\x56\x5a\x58\x58\x59\x56\x20\x52'\
b'\x51\x46\x4f\x47\x4d\x49\x4c\x4b\x4b\x4e\x4b\x53\x4c\x56\x4d'\
b'\x58\x4f\x5a\x51\x5b\x1d\x47\x5d\x4c\x46\x4c\x5b\x20\x52\x4d'\
b'\x46\x4d\x5b\x20\x52\x49\x46\x53\x46\x56\x47\x58\x49\x59\x4b'\
b'\x5a\x4e\x5a\x53\x59\x56\x58\x58\x56\x5a\x53\x5b\x49\x5b\x20'\
b'\x52\x53\x46\x55\x47\x57\x49\x58\x4b\x59\x4e\x59\x53\x58\x56'\
b'\x57\x58\x55\x5a\x53\x5b\x15\x47\x5c\x4c\x46\x4c\x5b\x20\x52'\
b'\x4d\x46\x4d\x5b\x20\x52\x53\x4c\x53\x54\x20\x52\x49\x46\x59'\
b'\x46\x59\x4c\x58\x46\x20\x52\x4d\x50\x53\x50\x20\x52\x49\x5b'\
b'\x59\x5b\x59\x55\x58\x5b\x13\x47\x5b\x4c\x46\x4c\x5b\x20\x52'\
b'\x4d\x46\x4d\x5b\x20\x52\x53\x4c\x53\x54\x20\x52\x49\x46\x59'\
b'\x46\x59\x4c\x58\x46\x20\x52\x4d\x50\x53\x50\x20\x52\x49\x5b'\
b'\x50\x5b\x27\x47\x5e\x58\x49\x59\x4c\x59\x46\x58\x49\x56\x47'\
b'\x53\x46\x51\x46\x4e\x47\x4c\x49\x4b\x4b\x4a\x4e\x4a\x53\x4b'\
b'\x56\x4c\x58\x4e\x5a\x51\x5b\x53\x5b\x56\x5a\x58\x58\x20\x52'\
b'\x51\x46\x4f\x47\x4d\x49\x4c\x4b\x4b\x4e\x4b\x53\x4c\x56\x4d'\
b'\x58\x4f\x5a\x51\x5b\x20\x52\x58\x53\x58\x5b\x20\x52\x59\x53'\
b'\x59\x5b\x20\x52\x55\x53\x5c\x53\x1a\x46\x5e\x4b\x46\x4b\x5b'\
b'\x20\x52\x4c\x46\x4c\x5b\x20\x52\x58\x46\x58\x5b\x20\x52\x59'\
b'\x46\x59\x5b\x20\x52\x48\x46\x4f\x46\x20\x52\x55\x46\x5c\x46'\
b'\x20\x52\x4c\x50\x58\x50\x20\x52\x48\x5b\x4f\x5b\x20\x52\x55'\
b'\x5b\x5c\x5b\x0b\x4d\x58\x52\x46\x52\x5b\x20\x52\x53\x46\x53'\
b'\x5b\x20\x52\x4f\x46\x56\x46\x20\x52\x4f\x5b\x56\x5b\x13\x4b'\
b'\x5a\x55\x46\x55\x57\x54\x5a\x52\x5b\x50\x5b\x4e\x5a\x4d\x58'\
b'\x4d\x56\x4e\x55\x4f\x56\x4e\x57\x20\x52\x54\x46\x54\x57\x53'\
b'\x5a\x52\x5b\x20\x52\x51\x46\x58\x46\x1a\x46\x5c\x4b\x46\x4b'\
b'\x5b\x20\x52\x4c\x46\x4c\x5b\x20\x52\x59\x46\x4c\x53\x20\x52'\
b'\x51\x4f\x59\x5b\x20\x52\x50\x4f\x58\x5b\x20\x52\x48\x46\x4f'\
b'\x46\x20\x52\x55\x46\x5b\x46\x20\x52\x48\x5b\x4f\x5b\x20\x52'\
b'\x55\x5b\x5b\x5b\x0d\x49\x5b\x4e\x46\x4e\x5b\x20\x52\x4f\x46'\
b'\x4f\x5b\x20\x52\x4b\x46\x52\x46\x20\x52\x4b\x5b\x5a\x5b\x5a'\
b'\x55\x59\x5b\x1d\x46\x5f\x4b\x46\x4b\x5b\x20\x52\x4c\x46\x52'\
b'\x58\x20\x52\x4b\x46\x52\x5b\x20\x52\x59\x46\x52\x5b\x20\x52'\
b'\x59\x46\x59\x5b\x20\x52\x5a\x46\x5a\x5b\x20\x52\x48\x46\x4c'\
b'\x46\x20\x52\x59\x46\x5d\x46\x20\x52\x48\x5b\x4e\x5b\x20\x52'\
b'\x56\x5b\x5d\x5b\x14\x47\x5e\x4c\x46\x4c\x5b\x20\x52\x4d\x46'\
b'\x59\x59\x20\x52\x4d\x48\x59\x5b\x20\x52\x59\x46\x59\x5b\x20'\
b'\x52\x49\x46\x4d\x46\x20\x52\x56\x46\x5c\x46\x20\x52\x49\x5b'\
b'\x4f\x5b\x2b\x47\x5d\x51\x46\x4e\x47\x4c\x49\x4b\x4b\x4a\x4f'\
b'\x4a\x52\x4b\x56\x4c\x58\x4e\x5a\x51\x5b\x53\x5b\x56\x5a\x58'\
b'\x58\x59\x56\x5a\x52\x5a\x4f\x59\x4b\x58\x49\x56\x47\x53\x46'\
b'\x51\x46\x20\x52\x51\x46\x4f\x47\x4d\x49\x4c\x4b\x4b\x4f\x4b'\
b'\x52\x4c\x56\x4d\x58\x4f\x5a\x51\x5b\x20\x52\x53\x5b\x55\x5a'\
b'\x57\x58\x58\x56\x59\x52\x59\x4f\x58\x4b\x57\x49\x55\x47\x53'\
b'\x46\x1c\x47\x5d\x4c\x46\x4c\x5b\x20\x52\x4d\x46\x4d\x5b\x20'\
b'\x52\x49\x46\x55\x46\x58\x47\x59\x48\x5a\x4a\x5a\x4d\x59\x4f'\
b'\x58\x50\x55\x51\x4d\x51\x20\x52\x55\x46\x57\x47\x58\x48\x59'\
b'\x4a\x59\x4d\x58\x4f\x57\x50\x55\x51\x20\x52\x49\x5b\x50\x5b'\
b'\x3f\x47\x5d\x51\x46\x4e\x47\x4c\x49\x4b\x4b\x4a\x4f\x4a\x52'\
b'\x4b\x56\x4c\x58\x4e\x5a\x51\x5b\x53\x5b\x56\x5a\x58\x58\x59'\
b'\x56\x5a\x52\x5a\x4f\x59\x4b\x58\x49\x56\x47\x53\x46\x51\x46'\
b'\x20\x52\x51\x46\x4f\x47\x4d\x49\x4c\x4b\x4b\x4f\x4b\x52\x4c'\
b'\x56\x4d\x58\x4f\x5a\x51\x5b\x20\x52\x53\x5b\x55\x5a\x57\x58'\
b'\x58\x56\x59\x52\x59\x4f\x58\x4b\x57\x49\x55\x47\x53\x46\x20'\
b'\x52\x4e\x59\x4e\x58\x4f\x56\x51\x55\x52\x55\x54\x56\x55\x58'\
b'\x56\x5f\x57\x60\x59\x60\x5a\x5e\x5a\x5d\x20\x52\x55\x58\x56'\
b'\x5c\x57\x5e\x58\x5f\x59\x5f\x5a\x5e\x2c\x47\x5d\x4c\x46\x4c'\
b'\x5b\x20\x52\x4d\x46\x4d\x5b\x20\x52\x49\x46\x55\x46\x58\x47'\
b'\x59\x48\x5a\x4a\x5a\x4c\x59\x4e\x58\x4f\x55\x50\x4d\x50\x20'\
b'\x52\x55\x46\x57\x47\x58\x48\x59\x4a\x59\x4c\x58\x4e\x57\x4f'\
b'\x55\x50\x20\x52\x49\x5b\x50\x5b\x20\x52\x52\x50\x54\x51\x55'\
b'\x52\x58\x59\x59\x5a\x5a\x5a\x5b\x59\x20\x52\x54\x51\x55\x53'\
b'\x57\x5a\x58\x5b\x5a\x5b\x5b\x59\x5b\x58\x21\x48\x5c\x58\x49'\
b'\x59\x46\x59\x4c\x58\x49\x56\x47\x53\x46\x50\x46\x4d\x47\x4b'\
b'\x49\x4b\x4b\x4c\x4d\x4d\x4e\x4f\x4f\x55\x51\x57\x52\x59\x54'\
b'\x20\x52\x4b\x4b\x4d\x4d\x4f\x4e\x55\x50\x57\x51\x58\x52\x59'\
b'\x54\x59\x58\x57\x5a\x54\x5b\x51\x5b\x4e\x5a\x4c\x58\x4b\x55'\
b'\x4b\x5b\x4c\x58\x0f\x49\x5c\x52\x46\x52\x5b\x20\x52\x53\x46'\
b'\x53\x5b\x20\x52\x4c\x46\x4b\x4c\x4b\x46\x5a\x46\x5a\x4c\x59'\
b'\x46\x20\x52\x4f\x5b\x56\x5b\x16\x46\x5e\x4b\x46\x4b\x55\x4c'\
b'\x58\x4e\x5a\x51\x5b\x53\x5b\x56\x5a\x58\x58\x59\x55\x59\x46'\
b'\x20\x52\x4c\x46\x4c\x55\x4d\x58\x4f\x5a\x51\x5b\x20\x52\x48'\
b'\x46\x4f\x46\x20\x52\x56\x46\x5c\x46\x0e\x48\x5c\x4b\x46\x52'\
b'\x5b\x20\x52\x4c\x46\x52\x58\x20\x52\x59\x46\x52\x5b\x20\x52'\
b'\x49\x46\x4f\x46\x20\x52\x55\x46\x5b\x46\x17\x46\x5e\x4a\x46'\
b'\x4e\x5b\x20\x52\x4b\x46\x4e\x56\x20\x52\x52\x46\x4e\x5b\x20'\
b'\x52\x52\x46\x56\x5b\x20\x52\x53\x46\x56\x56\x20\x52\x5a\x46'\
b'\x56\x5b\x20\x52\x47\x46\x4e\x46\x20\x52\x57\x46\x5d\x46\x14'\
b'\x48\x5c\x4b\x46\x58\x5b\x20\x52\x4c\x46\x59\x5b\x20\x52\x59'\
b'\x46\x4b\x5b\x20\x52\x49\x46\x4f\x46\x20\x52\x55\x46\x5b\x46'\
b'\x20\x52\x49\x5b\x4f\x5b\x20\x52\x55\x5b\x5b\x5b\x13\x48\x5d'\
b'\x4b\x46\x52\x51\x52\x5b\x20\x52\x4c\x46\x53\x51\x53\x5b\x20'\
b'\x52\x5a\x46\x53\x51\x20\x52\x49\x46\x4f\x46\x20\x52\x56\x46'\
b'\x5c\x46\x20\x52\x4f\x5b\x56\x5b\x0f\x48\x5c\x58\x46\x4b\x5b'\
b'\x20\x52\x59\x46\x4c\x5b\x20\x52\x4c\x46\x4b\x4c\x4b\x46\x59'\
b'\x46\x20\x52\x4b\x5b\x59\x5b\x59\x55\x58\x5b\x0b\x4b\x59\x4f'\
b'\x42\x4f\x62\x20\x52\x50\x42\x50\x62\x20\x52\x4f\x42\x56\x42'\
b'\x20\x52\x4f\x62\x56\x62\x02\x4b\x59\x4b\x46\x59\x5e\x0b\x4b'\
b'\x59\x54\x42\x54\x62\x20\x52\x55\x42\x55\x62\x20\x52\x4e\x42'\
b'\x55\x42\x20\x52\x4e\x62\x55\x62\x07\x47\x5d\x4a\x54\x52\x4f'\
b'\x5a\x54\x20\x52\x4a\x54\x52\x50\x5a\x54\x02\x48\x5c\x48\x62'\
b'\x5c\x62\x06\x4c\x58\x50\x46\x55\x4c\x20\x52\x50\x46\x4f\x47'\
b'\x55\x4c\x26\x49\x5d\x4e\x4f\x4e\x50\x4d\x50\x4d\x4f\x4e\x4e'\
b'\x50\x4d\x54\x4d\x56\x4e\x57\x4f\x58\x51\x58\x58\x59\x5a\x5a'\
b'\x5b\x20\x52\x57\x4f\x57\x58\x58\x5a\x5a\x5b\x5b\x5b\x20\x52'\
b'\x57\x51\x56\x52\x50\x53\x4d\x54\x4c\x56\x4c\x58\x4d\x5a\x50'\
b'\x5b\x53\x5b\x55\x5a\x57\x58\x20\x52\x50\x53\x4e\x54\x4d\x56'\
b'\x4d\x58\x4e\x5a\x50\x5b\x20\x47\x5c\x4c\x46\x4c\x5b\x20\x52'\
b'\x4d\x46\x4d\x5b\x20\x52\x4d\x50\x4f\x4e\x51\x4d\x53\x4d\x56'\
b'\x4e\x58\x50\x59\x53\x59\x55\x58\x58\x56\x5a\x53\x5b\x51\x5b'\
b'\x4f\x5a\x4d\x58\x20\x52\x53\x4d\x55\x4e\x57\x50\x58\x53\x58'\
b'\x55\x57\x58\x55\x5a\x53\x5b\x20\x52\x49\x46\x4d\x46\x1b\x48'\
b'\x5b\x57\x50\x56\x51\x57\x52\x58\x51\x58\x50\x56\x4e\x54\x4d'\
b'\x51\x4d\x4e\x4e\x4c\x50\x4b\x53\x4b\x55\x4c\x58\x4e\x5a\x51'\
b'\x5b\x53\x5b\x56\x5a\x58\x58\x20\x52\x51\x4d\x4f\x4e\x4d\x50'\
b'\x4c\x53\x4c\x55\x4d\x58\x4f\x5a\x51\x5b\x23\x48\x5d\x57\x46'\
b'\x57\x5b\x20\x52\x58\x46\x58\x5b\x20\x52\x57\x50\x55\x4e\x53'\
b'\x4d\x51\x4d\x4e\x4e\x4c\x50\x4b\x53\x4b\x55\x4c\x58\x4e\x5a'\
b'\x51\x5b\x53\x5b\x55\x5a\x57\x58\x20\x52\x51\x4d\x4f\x4e\x4d'\
b'\x50\x4c\x53\x4c\x55\x4d\x58\x4f\x5a\x51\x5b\x20\x52\x54\x46'\
b'\x58\x46\x20\x52\x57\x5b\x5b\x5b\x1e\x48\x5b\x4c\x53\x58\x53'\
b'\x58\x51\x57\x4f\x56\x4e\x54\x4d\x51\x4d\x4e\x4e\x4c\x50\x4b'\
b'\x53\x4b\x55\x4c\x58\x4e\x5a\x51\x5b\x53\x5b\x56\x5a\x58\x58'\
b'\x20\x52\x57\x53\x57\x50\x56\x4e\x20\x52\x51\x4d\x4f\x4e\x4d'\
b'\x50\x4c\x53\x4c\x55\x4d\x58\x4f\x5a\x51\x5b\x15\x4b\x58\x55'\
b'\x47\x54\x48\x55\x49\x56\x48\x56\x47\x55\x46\x53\x46\x51\x47'\
b'\x50\x49\x50\x5b\x20\x52\x53\x46\x52\x47\x51\x49\x51\x5b\x20'\
b'\x52\x4d\x4d\x55\x4d\x20\x52\x4d\x5b\x54\x5b\x3b\x49\x5c\x51'\
b'\x4d\x4f\x4e\x4e\x4f\x4d\x51\x4d\x53\x4e\x55\x4f\x56\x51\x57'\
b'\x53\x57\x55\x56\x56\x55\x57\x53\x57\x51\x56\x4f\x55\x4e\x53'\
b'\x4d\x51\x4d\x20\x52\x4f\x4e\x4e\x50\x4e\x54\x4f\x56\x20\x52'\
b'\x55\x56\x56\x54\x56\x50\x55\x4e\x20\x52\x56\x4f\x57\x4e\x59'\
b'\x4d\x59\x4e\x57\x4e\x20\x52\x4e\x55\x4d\x56\x4c\x58\x4c\x59'\
b'\x4d\x5b\x50\x5c\x55\x5c\x58\x5d\x59\x5e\x20\x52\x4c\x59\x4d'\
b'\x5a\x50\x5b\x55\x5b\x58\x5c\x59\x5e\x59\x5f\x58\x61\x55\x62'\
b'\x4f\x62\x4c\x61\x4b\x5f\x4b\x5e\x4c\x5c\x4f\x5b\x1b\x47\x5d'\
b'\x4c\x46\x4c\x5b\x20\x52\x4d\x46\x4d\x5b\x20\x52\x4d\x50\x4f'\
b'\x4e\x52\x4d\x54\x4d\x57\x4e\x58\x50\x58\x5b\x20\x52\x54\x4d'\
b'\x56\x4e\x57\x50\x57\x5b\x20\x52\x49\x46\x4d\x46\x20\x52\x49'\
b'\x5b\x50\x5b\x20\x52\x54\x5b\x5b\x5b\x11\x4d\x58\x52\x46\x51'\
b'\x47\x52\x48\x53\x47\x52\x46\x20\x52\x52\x4d\x52\x5b\x20\x52'\
b'\x53\x4d\x53\x5b\x20\x52\x4f\x4d\x53\x4d\x20\x52\x4f\x5b\x56'\
b'\x5b\x18\x4d\x58\x53\x46\x52\x47\x53\x48\x54\x47\x53\x46\x20'\
b'\x52\x54\x4d\x54\x5f\x53\x61\x51\x62\x4f\x62\x4e\x61\x4e\x60'\
b'\x4f\x5f\x50\x60\x4f\x61\x20\x52\x53\x4d\x53\x5f\x52\x61\x51'\
b'\x62\x20\x52\x50\x4d\x54\x4d\x1a\x47\x5c\x4c\x46\x4c\x5b\x20'\
b'\x52\x4d\x46\x4d\x5b\x20\x52\x57\x4d\x4d\x57\x20\x52\x52\x53'\
b'\x58\x5b\x20\x52\x51\x53\x57\x5b\x20\x52\x49\x46\x4d\x46\x20'\
b'\x52\x54\x4d\x5a\x4d\x20\x52\x49\x5b\x50\x5b\x20\x52\x54\x5b'\
b'\x5a\x5b\x0b\x4d\x58\x52\x46\x52\x5b\x20\x52\x53\x46\x53\x5b'\
b'\x20\x52\x4f\x46\x53\x46\x20\x52\x4f\x5b\x56\x5b\x2b\x42\x63'\
b'\x47\x4d\x47\x5b\x20\x52\x48\x4d\x48\x5b\x20\x52\x48\x50\x4a'\
b'\x4e\x4d\x4d\x4f\x4d\x52\x4e\x53\x50\x53\x5b\x20\x52\x4f\x4d'\
b'\x51\x4e\x52\x50\x52\x5b\x20\x52\x53\x50\x55\x4e\x58\x4d\x5a'\
b'\x4d\x5d\x4e\x5e\x50\x5e\x5b\x20\x52\x5a\x4d\x5c\x4e\x5d\x50'\
b'\x5d\x5b\x20\x52\x44\x4d\x48\x4d\x20\x52\x44\x5b\x4b\x5b\x20'\
b'\x52\x4f\x5b\x56\x5b\x20\x52\x5a\x5b\x61\x5b\x1b\x47\x5d\x4c'\
b'\x4d\x4c\x5b\x20\x52\x4d\x4d\x4d\x5b\x20\x52\x4d\x50\x4f\x4e'\
b'\x52\x4d\x54\x4d\x57\x4e\x58\x50\x58\x5b\x20\x52\x54\x4d\x56'\
b'\x4e\x57\x50\x57\x5b\x20\x52\x49\x4d\x4d\x4d\x20\x52\x49\x5b'\
b'\x50\x5b\x20\x52\x54\x5b\x5b\x5b\x23\x48\x5c\x51\x4d\x4e\x4e'\
b'\x4c\x50\x4b\x53\x4b\x55\x4c\x58\x4e\x5a\x51\x5b\x53\x5b\x56'\
b'\x5a\x58\x58\x59\x55\x59\x53\x58\x50\x56\x4e\x53\x4d\x51\x4d'\
b'\x20\x52\x51\x4d\x4f\x4e\x4d\x50\x4c\x53\x4c\x55\x4d\x58\x4f'\
b'\x5a\x51\x5b\x20\x52\x53\x5b\x55\x5a\x57\x58\x58\x55\x58\x53'\
b'\x57\x50\x55\x4e\x53\x4d\x23\x47\x5c\x4c\x4d\x4c\x62\x20\x52'\
b'\x4d\x4d\x4d\x62\x20\x52\x4d\x50\x4f\x4e\x51\x4d\x53\x4d\x56'\
b'\x4e\x58\x50\x59\x53\x59\x55\x58\x58\x56\x5a\x53\x5b\x51\x5b'\
b'\x4f\x5a\x4d\x58\x20\x52\x53\x4d\x55\x4e\x57\x50\x58\x53\x58'\
b'\x55\x57\x58\x55\x5a\x53\x5b\x20\x52\x49\x4d\x4d\x4d\x20\x52'\
b'\x49\x62\x50\x62\x20\x48\x5c\x57\x4d\x57\x62\x20\x52\x58\x4d'\
b'\x58\x62\x20\x52\x57\x50\x55\x4e\x53\x4d\x51\x4d\x4e\x4e\x4c'\
b'\x50\x4b\x53\x4b\x55\x4c\x58\x4e\x5a\x51\x5b\x53\x5b\x55\x5a'\
b'\x57\x58\x20\x52\x51\x4d\x4f\x4e\x4d\x50\x4c\x53\x4c\x55\x4d'\
b'\x58\x4f\x5a\x51\x5b\x20\x52\x54\x62\x5b\x62\x16\x49\x5a\x4e'\
b'\x4d\x4e\x5b\x20\x52\x4f\x4d\x4f\x5b\x20\x52\x4f\x53\x50\x50'\
b'\x52\x4e\x54\x4d\x57\x4d\x58\x4e\x58\x4f\x57\x50\x56\x4f\x57'\
b'\x4e\x20\x52\x4b\x4d\x4f\x4d\x20\x52\x4b\x5b\x52\x5b\x1f\x4a'\
b'\x5b\x57\x4f\x58\x4d\x58\x51\x57\x4f\x56\x4e\x54\x4d\x50\x4d'\
b'\x4e\x4e\x4d\x4f\x4d\x51\x4e\x52\x50\x53\x55\x55\x57\x56\x58'\
b'\x57\x20\x52\x4d\x50\x4e\x51\x50\x52\x55\x54\x57\x55\x58\x56'\
b'\x58\x59\x57\x5a\x55\x5b\x51\x5b\x4f\x5a\x4e\x59\x4d\x57\x4d'\
b'\x5b\x4e\x59\x0f\x4b\x5a\x50\x46\x50\x57\x51\x5a\x53\x5b\x55'\
b'\x5b\x57\x5a\x58\x58\x20\x52\x51\x46\x51\x57\x52\x5a\x53\x5b'\
b'\x20\x52\x4d\x4d\x55\x4d\x1b\x47\x5d\x4c\x4d\x4c\x58\x4d\x5a'\
b'\x50\x5b\x52\x5b\x55\x5a\x57\x58\x20\x52\x4d\x4d\x4d\x58\x4e'\
b'\x5a\x50\x5b\x20\x52\x57\x4d\x57\x5b\x20\x52\x58\x4d\x58\x5b'\
b'\x20\x52\x49\x4d\x4d\x4d\x20\x52\x54\x4d\x58\x4d\x20\x52\x57'\
b'\x5b\x5b\x5b\x0e\x49\x5b\x4c\x4d\x52\x5b\x20\x52\x4d\x4d\x52'\
b'\x59\x20\x52\x58\x4d\x52\x5b\x20\x52\x4a\x4d\x50\x4d\x20\x52'\
b'\x54\x4d\x5a\x4d\x17\x46\x5e\x4a\x4d\x4e\x5b\x20\x52\x4b\x4d'\
b'\x4e\x58\x20\x52\x52\x4d\x4e\x5b\x20\x52\x52\x4d\x56\x5b\x20'\
b'\x52\x53\x4d\x56\x58\x20\x52\x5a\x4d\x56\x5b\x20\x52\x47\x4d'\
b'\x4e\x4d\x20\x52\x57\x4d\x5d\x4d\x14\x48\x5c\x4c\x4d\x57\x5b'\
b'\x20\x52\x4d\x4d\x58\x5b\x20\x52\x58\x4d\x4c\x5b\x20\x52\x4a'\
b'\x4d\x50\x4d\x20\x52\x54\x4d\x5a\x4d\x20\x52\x4a\x5b\x50\x5b'\
b'\x20\x52\x54\x5b\x5a\x5b\x15\x48\x5b\x4c\x4d\x52\x5b\x20\x52'\
b'\x4d\x4d\x52\x59\x20\x52\x58\x4d\x52\x5b\x50\x5f\x4e\x61\x4c'\
b'\x62\x4b\x62\x4a\x61\x4b\x60\x4c\x61\x20\x52\x4a\x4d\x50\x4d'\
b'\x20\x52\x54\x4d\x5a\x4d\x0f\x49\x5b\x57\x4d\x4c\x5b\x20\x52'\
b'\x58\x4d\x4d\x5b\x20\x52\x4d\x4d\x4c\x51\x4c\x4d\x58\x4d\x20'\
b'\x52\x4c\x5b\x58\x5b\x58\x57\x57\x5b\x27\x4b\x59\x54\x42\x52'\
b'\x43\x51\x44\x50\x46\x50\x48\x51\x4a\x52\x4b\x53\x4d\x53\x4f'\
b'\x51\x51\x20\x52\x52\x43\x51\x45\x51\x47\x52\x49\x53\x4a\x54'\
b'\x4c\x54\x4e\x53\x50\x4f\x52\x53\x54\x54\x56\x54\x58\x53\x5a'\
b'\x52\x5b\x51\x5d\x51\x5f\x52\x61\x20\x52\x51\x53\x53\x55\x53'\
b'\x57\x52\x59\x51\x5a\x50\x5c\x50\x5e\x51\x60\x52\x61\x54\x62'\
b'\x02\x4e\x56\x52\x42\x52\x62\x27\x4b\x59\x50\x42\x52\x43\x53'\
b'\x44\x54\x46\x54\x48\x53\x4a\x52\x4b\x51\x4d\x51\x4f\x53\x51'\
b'\x20\x52\x52\x43\x53\x45\x53\x47\x52\x49\x51\x4a\x50\x4c\x50'\
b'\x4e\x51\x50\x55\x52\x51\x54\x50\x56\x50\x58\x51\x5a\x52\x5b'\
b'\x53\x5d\x53\x5f\x52\x61\x20\x52\x53\x53\x51\x55\x51\x57\x52'\
b'\x59\x53\x5a\x54\x5c\x54\x5e\x53\x60\x52\x61\x50\x62\x17\x46'\
b'\x5e\x49\x55\x49\x53\x4a\x50\x4c\x4f\x4e\x4f\x50\x50\x54\x53'\
b'\x56\x54\x58\x54\x5a\x53\x5b\x51\x20\x52\x49\x53\x4a\x51\x4c'\
b'\x50\x4e\x50\x50\x51\x54\x54\x56\x55\x58\x55\x5a\x54\x5b\x51'\
b'\x5b\x4f\x22\x4a\x5a\x4a\x46\x4a\x5b\x4b\x5b\x4b\x46\x4c\x46'\
b'\x4c\x5b\x4d\x5b\x4d\x46\x4e\x46\x4e\x5b\x4f\x5b\x4f\x46\x50'\
b'\x46\x50\x5b\x51\x5b\x51\x46\x52\x46\x52\x5b\x53\x5b\x53\x46'\
b'\x54\x46\x54\x5b\x55\x5b\x55\x46\x56\x46\x56\x5b\x57\x5b\x57'\
b'\x46\x58\x46\x58\x5b\x59\x5b\x59\x46\x5a\x46\x5a\x5b'
_index =\
b'\x00\x00\x03\x00\x22\x00\x4f\x00\x68\x00\xbd\x00\xfe\x00\x61'\
b'\x01\x6e\x01\x97\x01\xc0\x01\xd3\x01\xe0\x01\xf1\x01\xf8\x01'\
b'\x05\x02\x0c\x02\x5d\x02\x74\x02\xcf\x02\x2e\x03\x49\x03\x98'\
b'\x03\xf9\x03\x38\x04\xb7\x04\x18\x05\x31\x05\x4e\x05\x57\x05'\
b'\x64\x05\x6d\x05\xae\x05\x1f\x06\x44\x06\x9f\x06\xe0\x06\x1d'\
b'\x07\x4a\x07\x73\x07\xc4\x07\xfb\x07\x14\x08\x3d\x08\x74\x08'\
b'\x91\x08\xce\x08\xf9\x08\x52\x09\x8d\x09\x0e\x0a\x69\x0a\xae'\
b'\x0a\xcf\x0a\xfe\x0a\x1d\x0b\x4e\x0b\x79\x0b\xa2\x0b\xc3\x0b'\
b'\xdc\x0b\xe3\x0b\xfc\x0b\x0d\x0c\x14\x0c\x23\x0c\x72\x0c\xb5'\
b'\x0c\xee\x0c\x37\x0d\x76\x0d\xa3\x0d\x1c\x0e\x55\x0e\x7a\x0e'\
b'\xad\x0e\xe4\x0e\xfd\x0e\x56\x0f\x8f\x0f\xd8\x0f\x21\x10\x64'\
b'\x10\x93\x10\xd4\x10\xf5\x10\x2e\x11\x4d\x11\x7e\x11\xa9\x11'\
b'\xd6\x11\xf7\x11\x48\x12\x4f\x12\xa0\x12\xd1\x12'
INDEX = memoryview(_index)
FONT = memoryview(_font)
|
conftest.py | utsekaj42/chaospy | 333 | 12633174 | <reponame>utsekaj42/chaospy
"""Global configuration."""
import os
import pytest
import numpy
import scipy
import sklearn.linear_model
@pytest.fixture(autouse=True)
def global_setup(doctest_namespace, monkeypatch):
"""Global configuration setup."""
# set debug mode during testing
environ = os.environ.copy()
environ["NUMPOLY_DEBUG"] = "1"
environ["CHAOSPY_DEBUG"] = "1"
monkeypatch.setattr("os.environ", environ)
import chaospy
doctest_namespace["numpy"] = numpy
doctest_namespace["scipy"] = scipy
doctest_namespace["chaospy"] = chaospy
doctest_namespace["sklearn"] = sklearn
# fix random seeds:
numpy.random.seed(1000)
|
inference.py | amorgun/pose-with-style | 168 | 12633209 | import argparse
import os
import torch
from torchvision import utils
from tqdm import tqdm
from torch.utils import data
import numpy as np
import random
from PIL import Image
import torchvision.transforms as transforms
from dataset import DeepFashionDataset
from model import Generator
from util.dp2coor import getSymXYcoordinates
from util.coordinate_completion_model import define_G as define_CCM
def tensors2square(im, pose, sil):
width = im.shape[2]
diff = args.size - width
left = int((args.size-width)/2)
right = diff - left
im = torch.nn.functional.pad(input=im, pad=(right, left, 0, 0), mode='constant', value=0)
pose = torch.nn.functional.pad(input=pose, pad=(right, left, 0, 0), mode='constant', value=0)
sil = torch.nn.functional.pad(input=sil, pad=(right, left, 0, 0), mode='constant', value=0)
return im, pose, sil
def tensor2square(x):
width = x.shape[2]
diff = args.size - width
left = int((args.size-width)/2)
right = diff - left
x = torch.nn.functional.pad(input=x, pad=(right, left, 0, 0), mode='constant', value=0)
return x
def generate(args, g_ema, device, mean_latent):
with torch.no_grad():
g_ema.eval()
path = args.input_path
input_name = args.input_name
pose_name = args.target_name
# input
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
input_image = Image.open(os.path.join(path, input_name+'.png')).convert('RGB')
w, h = input_image.size
input_image = transform(input_image).float().to(device)
input_pose = np.array(Image.open(os.path.join(path, input_name+'_iuv.png')))
input_sil = np.array(Image.open(os.path.join(path, input_name+'_sil.png')))/255
# get partial coordinates from dense pose
dp_uv_lookup_256_np = np.load('util/dp_uv_lookup_256.npy')
uv_coor, uv_mask, uv_symm_mask = getSymXYcoordinates(input_pose, resolution = 512)
# union sil with densepose masks
input_sil = 1-((1-input_sil) * (input_pose[:, :, 0] == 0).astype('float'))
input_sil = torch.from_numpy(input_sil).float().unsqueeze(0)
input_pose = torch.from_numpy(input_pose).permute(2, 0, 1)
# target
target_pose = np.array(Image.open(os.path.join(path, pose_name+'_iuv.png')))
target_pose = torch.from_numpy(target_pose).permute(2, 0, 1)
# convert to square by centering
input_image, input_pose, input_sil = tensors2square(input_image, input_pose, input_sil)
target_pose = tensor2square(target_pose)
# add batch dimension
input_image = input_image.unsqueeze(0).float().to(device)
input_pose = input_pose.unsqueeze(0).float().to(device)
input_sil = input_sil.unsqueeze(0).float().to(device)
target_pose = target_pose.unsqueeze(0).float().to(device)
# complete partial coordinates
coor_completion_generator = define_CCM().cuda()
CCM_checkpoint = torch.load(args.CCM_pretrained_model)
coor_completion_generator.load_state_dict(CCM_checkpoint["g"])
coor_completion_generator.eval()
for param in coor_completion_generator.parameters():
coor_completion_generator.requires_grad = False
# uv coor preprocessing (put image in center)
shift = int((h-w)/2) # center shift
uv_coor[:,:,0] = uv_coor[:,:,0] + shift # put in center
uv_coor = ((2*uv_coor/(h-1))-1)
uv_coor = uv_coor*np.expand_dims(uv_mask,2) + (-10*(1-np.expand_dims(uv_mask,2)))
# coordinate completion
uv_coor_pytorch = torch.from_numpy(uv_coor).float().permute(2, 0, 1).unsqueeze(0) # from h,w,c to 1,c,h,w
uv_mask_pytorch = torch.from_numpy(uv_mask).unsqueeze(0).unsqueeze(0).float() #1xchw
with torch.no_grad():
coor_completion_generator.eval()
complete_coor = coor_completion_generator(uv_coor_pytorch.cuda(), uv_mask_pytorch.cuda())
# reposing
appearance = torch.cat([input_image, input_sil, complete_coor], 1)
output, _ = g_ema(appearance=appearance, pose=target_pose)
utils.save_image(
output[:, :, :, int(shift):args.size-int(shift)],
os.path.join(args.save_path, input_name+'_2_'+pose_name+'_vis.png'),
nrow=1,
normalize=True,
range=(-1, 1),
)
if __name__ == "__main__":
device = "cuda"
parser = argparse.ArgumentParser(description="inference")
parser.add_argument("--input_path", type=str, help="path to the input dataset")
parser.add_argument("--input_name", type=str, default="fashionWOMENDressesid0000262902_3back", help="input file name")
parser.add_argument("--target_name", type=str, default="fashionWOMENDressesid0000262902_1front", help="target file name")
parser.add_argument("--size", type=int, default=512, help="output image size of the generator")
parser.add_argument("--truncation", type=float, default=1, help="truncation ratio")
parser.add_argument("--truncation_mean", type=int, default=4096, help="number of vectors to calculate mean for the truncation")
parser.add_argument("--channel_multiplier", type=int, default=2, help="channel multiplier of the generator. config-f = 2, else = 1")
parser.add_argument("--pretrained_model", type=str, default="posewithstyle.pt", help="pose with style pretrained model")
parser.add_argument("--CCM_pretrained_model", type=str, default="CCM_epoch50.pt", help="pretrained coordinate completion model")
parser.add_argument("--save_path", type=str, default="./data/output", help="path to save output .data/output")
args = parser.parse_args()
args.latent = 2048
args.n_mlp = 8
if not os.path.exists(args.save_path):
os.makedirs(args.save_path)
g_ema = Generator(args.size, args.latent, args.n_mlp, channel_multiplier=args.channel_multiplier).to(device)
checkpoint = torch.load(args.pretrained_model)
g_ema.load_state_dict(checkpoint["g_ema"])
if args.truncation < 1:
with torch.no_grad():
mean_latent = g_ema.mean_latent(args.truncation_mean)
else:
mean_latent = None
generate(args, g_ema, device, mean_latent)
|
blender/arm/logicnode/object/LN_get_object_property.py | onelsonic/armory | 2,583 | 12633235 | <filename>blender/arm/logicnode/object/LN_get_object_property.py<gh_stars>1000+
from arm.logicnode.arm_nodes import *
class GetPropertyNode(ArmLogicTreeNode):
"""Returns the value of the given object property.
@seeNode Set Object Property"""
bl_idname = 'LNGetPropertyNode'
bl_label = 'Get Object Property'
arm_version = 1
arm_section = 'props'
def arm_init(self, context):
self.add_input('ArmNodeSocketObject', 'Object')
self.add_input('ArmStringSocket', 'Property')
self.add_output('ArmDynamicSocket', 'Value')
self.add_output('ArmStringSocket', 'Property')
|
tests/emukit/quadrature/ground_truth_integrals_qkernel.py | ndalchau/emukit | 152 | 12633248 | <gh_stars>100-1000
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# Use this script for ground truth integrals of the quadrature kernels.
import numpy as np
import GPy
from typing import List, Tuple
from emukit.model_wrappers.gpy_quadrature_wrappers import RBFGPy
from emukit.quadrature.kernels.integration_measures import IsotropicGaussianMeasure, UniformMeasure
from emukit.quadrature.kernels import QuadratureRBFLebesgueMeasure, QuadratureRBFIsoGaussMeasure, QuadratureRBFUniformMeasure
def _sample_uniform(num_samples: int, bounds: List[Tuple[float, float]]):
D = len(bounds)
samples = np.reshape(np.random.rand(num_samples * D), [num_samples, D])
samples_shifted = np.zeros(samples.shape)
for d in range(D):
samples_shifted[:, d] = samples[:, d] * (bounds[d][1] - bounds[d][0]) + bounds[d][0]
return samples_shifted
def _sample_gauss_iso(num_samples: int, measure: IsotropicGaussianMeasure):
D = measure.num_dimensions
samples = np.reshape(np.random.randn(num_samples * D), [num_samples, D])
return measure.mean + np.sqrt(measure.variance) * samples
def qK_lebesgue(num_samples: int, qrbf: QuadratureRBFLebesgueMeasure, x2: np.ndarray):
bounds = qrbf.integral_bounds._bounds
samples = _sample_uniform(num_samples, bounds)
Kx = qrbf.K(samples, x2)
differences = np.array([x[1] - x[0] for x in bounds])
volume = np.prod(differences)
return np.mean(Kx, axis=0) * volume
def qKq_lebesgue(num_samples: int, qrbf: QuadratureRBFLebesgueMeasure):
bounds = qrbf.integral_bounds._bounds
samples = _sample_uniform(num_samples, bounds)
qKx = qrbf.qK(samples)
differences = np.array([x[1] - x[0] for x in bounds])
volume = np.prod(differences)
return np.mean(qKx) * volume
def qK_gauss_iso(num_samples: int, qrbf: QuadratureRBFIsoGaussMeasure, x2: np.ndarray):
measure = qrbf.measure
samples = _sample_gauss_iso(num_samples, measure)
Kx = qrbf.K(samples, x2)
return np.mean(Kx, axis=0)
def qKq_gauss_iso(num_samples: int, qrbf: QuadratureRBFIsoGaussMeasure):
measure = qrbf.measure
samples = _sample_gauss_iso(num_samples, measure)
qKx = qrbf.qK(samples)
return np.mean(qKx)
def qK_uniform(num_samples: int, qrbf: QuadratureRBFUniformMeasure, x2: np.ndarray):
if qrbf.integral_bounds is None:
bounds = qrbf.measure.bounds
samples = _sample_uniform(num_samples, bounds)
Kx = qrbf.K(samples, x2)
return np.mean(Kx, axis=0)
else:
bounds = qrbf.integral_bounds._bounds
samples = _sample_uniform(num_samples, bounds)
Kx = qrbf.K(samples, x2) * qrbf.measure.compute_density(samples)[:, np.newaxis]
differences = np.array([x[1] - x[0] for x in bounds])
volume = np.prod(differences)
return np.mean(Kx, axis=0) * volume
def qKq_uniform(num_samples: int, qrbf: QuadratureRBFUniformMeasure):
if qrbf.integral_bounds is None:
bounds = qrbf.measure.bounds
samples = _sample_uniform(num_samples, bounds)
qKx = qrbf.qK(samples)
return np.mean(qKx)
else:
bounds = qrbf.integral_bounds._bounds
samples = _sample_uniform(num_samples, bounds)
qKx = qrbf.qK(samples) * qrbf.measure.compute_density(samples)[np.newaxis, :]
differences = np.array([x[1] - x[0] for x in bounds])
volume = np.prod(differences)
return np.mean(qKx) * volume
if __name__ == "__main__":
np.random.seed(0)
# === Choose MEASURE BELOW ======
#MEASURE_INTBOUNDS = 'Lebesgue-finite'
#MEASURE_INTBOUNDS = 'GaussIso-infinite'
#MEASURE_INTBOUNDS = 'Uniform-infinite'
MEASURE_INTBOUNDS = 'Uniform-finite'
# === CHOOSE MEASURE ABOVE ======
x1 = np.array([[-1, 1], [0, 0], [-2, 0.1]])
x2 = np.array([[-1, 1], [0, 0], [-2, 0.1], [-3, 3]])
D = x1.shape[1]
gpy_kernel = GPy.kern.RBF(input_dim=D)
emukit_rbf = RBFGPy(gpy_kernel)
if MEASURE_INTBOUNDS == 'Lebesgue-finite':
emukit_qrbf = QuadratureRBFLebesgueMeasure(emukit_rbf, integral_bounds=[(-1, 2), (-3, 3)])
elif MEASURE_INTBOUNDS == 'GaussIso-infinite':
measure = IsotropicGaussianMeasure(mean=np.arange(D), variance=2.)
emukit_qrbf = QuadratureRBFIsoGaussMeasure(rbf_kernel=emukit_rbf, measure=measure)
elif MEASURE_INTBOUNDS == 'Uniform-infinite':
measure = UniformMeasure(bounds=[(0, 2), (-4, 3)])
emukit_qrbf = QuadratureRBFUniformMeasure(emukit_rbf, integral_bounds=None, measure=measure)
elif MEASURE_INTBOUNDS == 'Uniform-finite':
measure = UniformMeasure(bounds=[(1, 2), (-4, 2)])
emukit_qrbf = QuadratureRBFUniformMeasure(emukit_rbf, integral_bounds=[(-1, 2), (-3, 3)], measure=measure)
else:
raise ValueError('Measure-integral-bounds combination not defined')
print()
print('measure: {}'.format(MEASURE_INTBOUNDS))
print('no dimensions: {}'.format(D))
print()
# === qK ==============================================================
num_runs = 100
num_samples = 1e6
num_std = 3
qK_SAMPLES = np.zeros([num_runs, x2.shape[0]])
qK = emukit_qrbf.qK(x2)[0, :]
for i in range(num_runs):
num_samples = int(num_samples)
if MEASURE_INTBOUNDS == 'Lebesgue-finite':
qK_samples = qK_lebesgue(num_samples, emukit_qrbf, x2)
elif MEASURE_INTBOUNDS == 'GaussIso-infinite':
qK_samples = qK_gauss_iso(num_samples, emukit_qrbf, x2)
elif MEASURE_INTBOUNDS == 'Uniform-infinite':
qK_samples = qK_uniform(num_samples, emukit_qrbf, x2)
elif MEASURE_INTBOUNDS == 'Uniform-finite':
qK_samples = qK_uniform(num_samples, emukit_qrbf, x2)
else:
raise ValueError('Measure-integral-bounds combination not defined')
qK_SAMPLES[i, :] = qK_samples
print('=== qK ========================================================')
print('no samples per integral: {:.1E}'.format(num_samples))
print('number of integrals: {}'.format(num_runs))
print('number of standard deviations: {}'.format(num_std))
for i in range(x2.shape[0]):
print([qK_SAMPLES[:, i].mean() - num_std * qK_SAMPLES[:, i].std(),
qK_SAMPLES[:, i].mean() + num_std * qK_SAMPLES[:, i].std()])
print()
# === qKq =============================================================
num_runs = 100
num_samples = 1e6
num_std = 3
qKq_SAMPLES = np.zeros(num_runs)
qKq = emukit_qrbf.qKq()
for i in range(num_runs):
num_samples = int(num_samples)
if MEASURE_INTBOUNDS == 'Lebesgue-finite':
qKq_samples = qKq_lebesgue(num_samples, emukit_qrbf)
elif MEASURE_INTBOUNDS == 'GaussIso-infinite':
qKq_samples = qKq_gauss_iso(num_samples, emukit_qrbf)
elif MEASURE_INTBOUNDS == 'Uniform-infinite':
qKq_samples = qKq_uniform(num_samples, emukit_qrbf)
elif MEASURE_INTBOUNDS == 'Uniform-finite':
qKq_samples = qKq_uniform(num_samples, emukit_qrbf)
else:
raise ValueError('Measure-integral-bounds combination not defined')
qKq_SAMPLES[i] = qKq_samples
print('=== qKq =======================================================')
print('no samples per integral: {:.1E}'.format(num_samples))
print('number of integrals: {}'.format(num_runs))
print('number of standard deviations: {}'.format(num_std))
print([qKq_SAMPLES.mean() - num_std * qKq_SAMPLES.std(),
qKq_SAMPLES.mean() + num_std * qKq_SAMPLES.std()])
print()
|
scalyr_agent/third_party_tls/tlslite/utils/x25519.py | zak905/scalyr-agent-2 | 121 | 12633261 | # Authors:
# <NAME> (2017)
#
# See the LICENSE file for legal information regarding use of this file.
"""Handling X25519 and X448 curve based key agreement protocol."""
from .cryptomath import bytesToNumber, numberToByteArray, divceil
# the names of the variables come directly from RFC 7748 so changing them
# would make the code harder to audit/compare
# pylint: disable=invalid-name
def decodeUCoordinate(u, bits):
"""Function to decode the public U coordinate of X25519-family curves."""
if bits not in (255, 448):
raise ValueError("Invalid number of expected bits")
if bits % 8:
u[-1] &= (1 << (bits % 8)) - 1
return bytesToNumber(u, endian="little")
def decodeScalar22519(k):
"""Function to decode the private K parameter of the x25519 function."""
k[0] &= 248
k[31] &= 127
k[31] |= 64
return bytesToNumber(k, endian="little")
def decodeScalar448(k):
"""Function to decode the private K parameter of the X448 function."""
k[0] &= 252
k[55] |= 128
return bytesToNumber(k, endian="little")
def cswap(swap, x_2, x_3):
"""Conditional swap function."""
if swap:
return x_3, x_2
else:
return x_2, x_3
X25519_G = numberToByteArray(9, 32, endian="little")
X25519_ORDER_SIZE = 32
def x25519(k, u):
"""
Perform point multiplication on X25519 curve.
:type k: bytearray
:param k: random secret value (multiplier), should be 32 byte long
:type u: bytearray
:param u: curve generator or the other party key share
:rtype: bytearray
"""
bits = 255
k = decodeScalar22519(k)
u = decodeUCoordinate(u, bits)
a24 = 121665
p = 2**255 - 19
return _x25519_generic(k, u, bits, a24, p)
X448_G = numberToByteArray(5, 56, endian="little")
X448_ORDER_SIZE = 56
def x448(k, u):
"""
Perform point multiplication on X448 curve.
:type k: bytearray
:param k: random secret value (multiplier), should be 56 bytes long
:type u: bytearray
:param u: curve generator or the other party key share
:rtype: bytearray
"""
bits = 448
k = decodeScalar448(k)
u = decodeUCoordinate(u, bits)
a24 = 39081
p = 2**448 - 2**224 - 1
return _x25519_generic(k, u, bits, a24, p)
def _x25519_generic(k, u, bits, a24, p):
"""Generic Montgomery ladder implementation of the x25519 algorithm."""
x_1 = u
x_2 = 1
z_2 = 0
x_3 = u
z_3 = 1
swap = 0
for t in range(bits-1, -1, -1):
k_t = (k >> t) & 1
swap ^= k_t
x_2, x_3 = cswap(swap, x_2, x_3)
z_2, z_3 = cswap(swap, z_2, z_3)
swap = k_t
A = (x_2 + z_2) % p
AA = pow(A, 2, p)
B = (x_2 - z_2) % p
BB = pow(B, 2, p)
E = (AA - BB) % p
C = (x_3 + z_3) % p
D = (x_3 - z_3) % p
DA = (D * A) % p
CB = (C * B) % p
x_3 = pow(DA + CB, 2, p)
z_3 = (x_1 * pow(DA - CB, 2, p)) % p
x_2 = (AA * BB) % p
z_2 = (E * (AA + a24 * E)) % p
x_2, x_3 = cswap(swap, x_2, x_3)
z_2, z_3 = cswap(swap, z_2, z_3)
ret = (x_2 * pow(z_2, p - 2, p)) % p
return numberToByteArray(ret, divceil(bits, 8), endian="little")
# pylint: enable=invalid-name
|
RNS/Utilities/rnpath.py | krypt0x/Reticulum | 254 | 12633263 | #!/usr/bin/env python3
import RNS
import sys
import time
import argparse
from RNS._version import __version__
def program_setup(configdir, destination_hexhash, verbosity):
try:
dest_len = (RNS.Reticulum.TRUNCATED_HASHLENGTH//8)*2
if len(destination_hexhash) != dest_len:
raise ValueError("Destination length is invalid, must be {hex} hexadecimal characters ({byte} bytes).".format(hex=dest_len, byte=dest_len//2))
try:
destination_hash = bytes.fromhex(destination_hexhash)
except Exception as e:
raise ValueError("Invalid destination entered. Check your input.")
except Exception as e:
print(str(e))
exit()
reticulum = RNS.Reticulum(configdir = configdir, loglevel = 3+verbosity)
if not RNS.Transport.has_path(destination_hash):
RNS.Transport.request_path(destination_hash)
print("Path to "+RNS.prettyhexrep(destination_hash)+" requested ", end=" ")
sys.stdout.flush()
i = 0
syms = "⢄⢂⢁⡁⡈⡐⡠"
while not RNS.Transport.has_path(destination_hash):
time.sleep(0.1)
print(("\b\b"+syms[i]+" "), end="")
sys.stdout.flush()
i = (i+1)%len(syms)
hops = RNS.Transport.hops_to(destination_hash)
next_hop = RNS.prettyhexrep(reticulum.get_next_hop(destination_hash))
next_hop_interface = reticulum.get_next_hop_if_name(destination_hash)
if hops != 1:
ms = "s"
else:
ms = ""
print("\rPath found, destination "+RNS.prettyhexrep(destination_hash)+" is "+str(hops)+" hop"+ms+" away via "+next_hop+" on "+next_hop_interface)
def main():
try:
parser = argparse.ArgumentParser(description="Reticulum Path Discovery Utility")
parser.add_argument("--config",
action="store",
default=None,
help="path to alternative Reticulum config directory",
type=str
)
parser.add_argument(
"--version",
action="version",
version="rnpath {version}".format(version=__version__)
)
parser.add_argument(
"destination",
nargs="?",
default=None,
help="hexadecimal hash of the destination",
type=str
)
parser.add_argument('-v', '--verbose', action='count', default=0)
args = parser.parse_args()
if args.config:
configarg = args.config
else:
configarg = None
if not args.destination:
print("")
parser.print_help()
print("")
else:
program_setup(configdir = configarg, destination_hexhash = args.destination, verbosity = args.verbose)
except KeyboardInterrupt:
print("")
exit()
if __name__ == "__main__":
main() |
mariner/exceptions.py | terabyte128/mariner | 167 | 12633279 | <reponame>terabyte128/mariner
from abc import ABC, abstractmethod
class MarinerException(Exception, ABC):
@abstractmethod
def get_title(self) -> str:
raise NotImplementedError
@abstractmethod
def get_description(self) -> str:
raise NotImplementedError
class UnexpectedPrinterResponse(MarinerException):
def __init__(self, response: str) -> None:
self.response = response
def get_title(self) -> str:
return "Unexpected Printer Response"
def get_description(self) -> str:
return f"The printer returned an unexpected response: {repr(self.response)}"
|
aliyun-python-sdk-core/tests/auth/algorithm/test_sha_hmac256.py | yndu13/aliyun-openapi-python-sdk | 1,001 | 12633290 | <gh_stars>1000+
# coding=utf-8
from tests import unittest
from aliyunsdkcore.auth.algorithm import sha_hmac256 as hmac256
class TestShaHmac256(unittest.TestCase):
def test(self):
self.assertEqual(hmac256.get_signer_name(), "SHA256withRSA")
self.assertEqual(hmac256.get_signer_type(), "PRIVATEKEY")
self.assertEqual(hmac256.get_signer_version(), "1.0")
def test_sha_hmac256(self):
secret = '''<KEY>'''
result = hmac256.get_sign_string("source", secret)
self.assertEqual(
result, "UNyJPD27jjSNl70b02E/DUtgtNESdtAuxbNBZTlksk1t/GYjiQNRlF"
"Iubp/EGKcWsqs7p5SFKnNiSRqWG3A51VmJFBXXtyW1nwLC9xY/MbUj6JVWNYCu"
"LkPWM942O+GAk7N+G8ZQZt7ib2MhruDAUmv1lLN26lDaCPBX2MJQJCo=")
result = hmac256.get_sign_string("中文unicode", secret)
self.assertEqual(
result, "UMmvLGAtZAiQIHhtNCkIQyvfAlbmGKVCM4Kz+HZQBgcXzc6qSjVNWQ"
"V5GFAh6w6Kzmhh7jpBf24Xybg88APEBfpCVDzWHrXBi38bV8xOik3dmiIcp4XI"
"wndwixLwv8fJ4O5WSliN6hJTflWSeUxP+H2AjWNb2XUzYmSzOt81t4Y=")
|
sdk/python/pulumi_aws/codestarconnections/host.py | alexbowers/pulumi-aws | 260 | 12633295 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['HostArgs', 'Host']
@pulumi.input_type
class HostArgs:
def __init__(__self__, *,
provider_endpoint: pulumi.Input[str],
provider_type: pulumi.Input[str],
name: Optional[pulumi.Input[str]] = None,
vpc_configuration: Optional[pulumi.Input['HostVpcConfigurationArgs']] = None):
"""
The set of arguments for constructing a Host resource.
:param pulumi.Input[str] provider_endpoint: The endpoint of the infrastructure to be represented by the host after it is created.
:param pulumi.Input[str] provider_type: The name of the external provider where your third-party code repository is configured.
:param pulumi.Input[str] name: The name of the host to be created. The name must be unique in the calling AWS account.
:param pulumi.Input['HostVpcConfigurationArgs'] vpc_configuration: The VPC configuration to be provisioned for the host. A VPC must be configured, and the infrastructure to be represented by the host must already be connected to the VPC.
"""
pulumi.set(__self__, "provider_endpoint", provider_endpoint)
pulumi.set(__self__, "provider_type", provider_type)
if name is not None:
pulumi.set(__self__, "name", name)
if vpc_configuration is not None:
pulumi.set(__self__, "vpc_configuration", vpc_configuration)
@property
@pulumi.getter(name="providerEndpoint")
def provider_endpoint(self) -> pulumi.Input[str]:
"""
The endpoint of the infrastructure to be represented by the host after it is created.
"""
return pulumi.get(self, "provider_endpoint")
@provider_endpoint.setter
def provider_endpoint(self, value: pulumi.Input[str]):
pulumi.set(self, "provider_endpoint", value)
@property
@pulumi.getter(name="providerType")
def provider_type(self) -> pulumi.Input[str]:
"""
The name of the external provider where your third-party code repository is configured.
"""
return pulumi.get(self, "provider_type")
@provider_type.setter
def provider_type(self, value: pulumi.Input[str]):
pulumi.set(self, "provider_type", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the host to be created. The name must be unique in the calling AWS account.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="vpcConfiguration")
def vpc_configuration(self) -> Optional[pulumi.Input['HostVpcConfigurationArgs']]:
"""
The VPC configuration to be provisioned for the host. A VPC must be configured, and the infrastructure to be represented by the host must already be connected to the VPC.
"""
return pulumi.get(self, "vpc_configuration")
@vpc_configuration.setter
def vpc_configuration(self, value: Optional[pulumi.Input['HostVpcConfigurationArgs']]):
pulumi.set(self, "vpc_configuration", value)
@pulumi.input_type
class _HostState:
def __init__(__self__, *,
arn: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
provider_endpoint: Optional[pulumi.Input[str]] = None,
provider_type: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
vpc_configuration: Optional[pulumi.Input['HostVpcConfigurationArgs']] = None):
"""
Input properties used for looking up and filtering Host resources.
:param pulumi.Input[str] arn: The CodeStar Host ARN.
:param pulumi.Input[str] name: The name of the host to be created. The name must be unique in the calling AWS account.
:param pulumi.Input[str] provider_endpoint: The endpoint of the infrastructure to be represented by the host after it is created.
:param pulumi.Input[str] provider_type: The name of the external provider where your third-party code repository is configured.
:param pulumi.Input[str] status: The CodeStar Host status. Possible values are `PENDING`, `AVAILABLE`, `VPC_CONFIG_DELETING`, `VPC_CONFIG_INITIALIZING`, and `VPC_CONFIG_FAILED_INITIALIZATION`.
:param pulumi.Input['HostVpcConfigurationArgs'] vpc_configuration: The VPC configuration to be provisioned for the host. A VPC must be configured, and the infrastructure to be represented by the host must already be connected to the VPC.
"""
if arn is not None:
pulumi.set(__self__, "arn", arn)
if name is not None:
pulumi.set(__self__, "name", name)
if provider_endpoint is not None:
pulumi.set(__self__, "provider_endpoint", provider_endpoint)
if provider_type is not None:
pulumi.set(__self__, "provider_type", provider_type)
if status is not None:
pulumi.set(__self__, "status", status)
if vpc_configuration is not None:
pulumi.set(__self__, "vpc_configuration", vpc_configuration)
@property
@pulumi.getter
def arn(self) -> Optional[pulumi.Input[str]]:
"""
The CodeStar Host ARN.
"""
return pulumi.get(self, "arn")
@arn.setter
def arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "arn", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the host to be created. The name must be unique in the calling AWS account.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="providerEndpoint")
def provider_endpoint(self) -> Optional[pulumi.Input[str]]:
"""
The endpoint of the infrastructure to be represented by the host after it is created.
"""
return pulumi.get(self, "provider_endpoint")
@provider_endpoint.setter
def provider_endpoint(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "provider_endpoint", value)
@property
@pulumi.getter(name="providerType")
def provider_type(self) -> Optional[pulumi.Input[str]]:
"""
The name of the external provider where your third-party code repository is configured.
"""
return pulumi.get(self, "provider_type")
@provider_type.setter
def provider_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "provider_type", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
"""
The CodeStar Host status. Possible values are `PENDING`, `AVAILABLE`, `VPC_CONFIG_DELETING`, `VPC_CONFIG_INITIALIZING`, and `VPC_CONFIG_FAILED_INITIALIZATION`.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
@property
@pulumi.getter(name="vpcConfiguration")
def vpc_configuration(self) -> Optional[pulumi.Input['HostVpcConfigurationArgs']]:
"""
The VPC configuration to be provisioned for the host. A VPC must be configured, and the infrastructure to be represented by the host must already be connected to the VPC.
"""
return pulumi.get(self, "vpc_configuration")
@vpc_configuration.setter
def vpc_configuration(self, value: Optional[pulumi.Input['HostVpcConfigurationArgs']]):
pulumi.set(self, "vpc_configuration", value)
class Host(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
name: Optional[pulumi.Input[str]] = None,
provider_endpoint: Optional[pulumi.Input[str]] = None,
provider_type: Optional[pulumi.Input[str]] = None,
vpc_configuration: Optional[pulumi.Input[pulumi.InputType['HostVpcConfigurationArgs']]] = None,
__props__=None):
"""
Provides a CodeStar Host.
> **NOTE:** The `codestarconnections.Host` resource is created in the state `PENDING`. Authentication with the host provider must be completed in the AWS Console. For more information visit [Set up a pending host](https://docs.aws.amazon.com/dtconsole/latest/userguide/connections-host-setup.html).
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.codestarconnections.Host("example",
provider_endpoint="https://example.com",
provider_type="GitHubEnterpriseServer")
```
## Import
CodeStar Host can be imported using the ARN, e.g.
```sh
$ pulumi import aws:codestarconnections/host:Host example-host arn:aws:codestar-connections:us-west-1:0123456789:host/79d4d357-a2ee-41e4-b350-2fe39ae59448
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] name: The name of the host to be created. The name must be unique in the calling AWS account.
:param pulumi.Input[str] provider_endpoint: The endpoint of the infrastructure to be represented by the host after it is created.
:param pulumi.Input[str] provider_type: The name of the external provider where your third-party code repository is configured.
:param pulumi.Input[pulumi.InputType['HostVpcConfigurationArgs']] vpc_configuration: The VPC configuration to be provisioned for the host. A VPC must be configured, and the infrastructure to be represented by the host must already be connected to the VPC.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: HostArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a CodeStar Host.
> **NOTE:** The `codestarconnections.Host` resource is created in the state `PENDING`. Authentication with the host provider must be completed in the AWS Console. For more information visit [Set up a pending host](https://docs.aws.amazon.com/dtconsole/latest/userguide/connections-host-setup.html).
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.codestarconnections.Host("example",
provider_endpoint="https://example.com",
provider_type="GitHubEnterpriseServer")
```
## Import
CodeStar Host can be imported using the ARN, e.g.
```sh
$ pulumi import aws:codestarconnections/host:Host example-host arn:aws:codestar-connections:us-west-1:0123456789:host/79d4d357-a2ee-41e4-b350-2fe39ae59448
```
:param str resource_name: The name of the resource.
:param HostArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(HostArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
name: Optional[pulumi.Input[str]] = None,
provider_endpoint: Optional[pulumi.Input[str]] = None,
provider_type: Optional[pulumi.Input[str]] = None,
vpc_configuration: Optional[pulumi.Input[pulumi.InputType['HostVpcConfigurationArgs']]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = HostArgs.__new__(HostArgs)
__props__.__dict__["name"] = name
if provider_endpoint is None and not opts.urn:
raise TypeError("Missing required property 'provider_endpoint'")
__props__.__dict__["provider_endpoint"] = provider_endpoint
if provider_type is None and not opts.urn:
raise TypeError("Missing required property 'provider_type'")
__props__.__dict__["provider_type"] = provider_type
__props__.__dict__["vpc_configuration"] = vpc_configuration
__props__.__dict__["arn"] = None
__props__.__dict__["status"] = None
super(Host, __self__).__init__(
'aws:codestarconnections/host:Host',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
arn: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
provider_endpoint: Optional[pulumi.Input[str]] = None,
provider_type: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
vpc_configuration: Optional[pulumi.Input[pulumi.InputType['HostVpcConfigurationArgs']]] = None) -> 'Host':
"""
Get an existing Host resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] arn: The CodeStar Host ARN.
:param pulumi.Input[str] name: The name of the host to be created. The name must be unique in the calling AWS account.
:param pulumi.Input[str] provider_endpoint: The endpoint of the infrastructure to be represented by the host after it is created.
:param pulumi.Input[str] provider_type: The name of the external provider where your third-party code repository is configured.
:param pulumi.Input[str] status: The CodeStar Host status. Possible values are `PENDING`, `AVAILABLE`, `VPC_CONFIG_DELETING`, `VPC_CONFIG_INITIALIZING`, and `VPC_CONFIG_FAILED_INITIALIZATION`.
:param pulumi.Input[pulumi.InputType['HostVpcConfigurationArgs']] vpc_configuration: The VPC configuration to be provisioned for the host. A VPC must be configured, and the infrastructure to be represented by the host must already be connected to the VPC.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _HostState.__new__(_HostState)
__props__.__dict__["arn"] = arn
__props__.__dict__["name"] = name
__props__.__dict__["provider_endpoint"] = provider_endpoint
__props__.__dict__["provider_type"] = provider_type
__props__.__dict__["status"] = status
__props__.__dict__["vpc_configuration"] = vpc_configuration
return Host(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
"""
The CodeStar Host ARN.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the host to be created. The name must be unique in the calling AWS account.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="providerEndpoint")
def provider_endpoint(self) -> pulumi.Output[str]:
"""
The endpoint of the infrastructure to be represented by the host after it is created.
"""
return pulumi.get(self, "provider_endpoint")
@property
@pulumi.getter(name="providerType")
def provider_type(self) -> pulumi.Output[str]:
"""
The name of the external provider where your third-party code repository is configured.
"""
return pulumi.get(self, "provider_type")
@property
@pulumi.getter
def status(self) -> pulumi.Output[str]:
"""
The CodeStar Host status. Possible values are `PENDING`, `AVAILABLE`, `VPC_CONFIG_DELETING`, `VPC_CONFIG_INITIALIZING`, and `VPC_CONFIG_FAILED_INITIALIZATION`.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter(name="vpcConfiguration")
def vpc_configuration(self) -> pulumi.Output[Optional['outputs.HostVpcConfiguration']]:
"""
The VPC configuration to be provisioned for the host. A VPC must be configured, and the infrastructure to be represented by the host must already be connected to the VPC.
"""
return pulumi.get(self, "vpc_configuration")
|
dev/Gems/CloudGemFramework/v1/AWS/common-code/LambdaService/test/mock_handler.py | BadDevCode/lumberyard | 1,738 | 12633297 | #
# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
# its licensors.
#
# For complete copyright and license terms please see the LICENSE at the root of this
# distribution (the "License"). All use of this software is governed by the License,
# or, if provided, by the license below or the license accompanying this file. Do not
# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
import service
received_request = None
received_param_a = None
received_param_b = None
received_param_c = None
received_kwargs = None
response = None
def reset():
global received_request, received_param_a, received_param_b, received_param_c, received_kwargs, response
received_request = None
received_param_a = None
received_param_b = None
received_param_c = None
received_kwargs = None
response = None
@service.api
def test_function_alone(request, param_a):
global received_request, received_param_a, response
received_request = request
received_param_a = param_a
return response
@service.api
def test_function_first(request, param_a, param_b):
global received_request, received_param_a, received_param_b, response
received_request = request
received_param_a = param_a
received_param_b = param_b
return response
@service.api
def test_function_last(request, param_b, param_a):
global received_request, received_param_a, received_param_b, response
received_request = request
received_param_a = param_a
received_param_b = param_b
return response
@service.api
def test_function_middle(request, param_b, param_a, param_c):
global received_request, received_param_a, received_param_b, received_param_c, response
received_request = request
received_param_a = param_a
received_param_b = param_b
received_param_c = param_c
return response
test_not_a_function = 1
def test_function_without_decorator(request, param_a):
raise RuntimeError('Should not be called')
@service.api
def test_function_with_default_value(request, param_a = 'default param a'):
global received_request, received_param_a, response
received_request = request
received_param_a = param_a
return response
@service.api
def test_function_with_kwargs(request, param_a, **kwargs):
global received_request, received_param_a, received_kwargs, response
received_request = request
received_param_a = param_a
received_kwargs = kwargs
return response
@service.api
def test_function_without_parameters(request):
global received_request, response
received_request = request
return response
@service.api
def test_function_with_first_parameter_name_conflict(param_a, param_b):
raise RuntimeError('Should not be called')
def __logging_filter(parameter):
if isinstance(parameter, dict):
param_a = parameter.get('param_a')
if isinstance(param_a, dict) and 'key1' in param_a:
param_a['key1'] = 'REPLACEMENT_VALUE'
@service.api(unlogged_parameters=['param_b'], logging_filter=__logging_filter)
def test_function_with_parameter_logging_filter(request, param_a, param_b):
global received_request, received_param_a, received_param_b, response
received_request = request
received_param_a = param_a
received_param_b = param_b
return response
|
benchmarks/benchmark.py | zeta1999/tensor_annotations | 117 | 12633301 | <filename>benchmarks/benchmark.py
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Run pytype on Acme and see how long it takes.
Automatically repeats the test a number of times and report the average and
minimum/maximum.
When run normally, reports the time to type-check without using our stubs.
To check the time *with* the stubs, install the stubs as in the main README.md
and then run `export TYPESHED_HOME="$HOME/typeshed"` before launching
the script.
"""
import datetime
import logging
import os
import shutil
import subprocess
import tempfile
from absl import app
from absl import flags
_NUM_RUNS = flags.DEFINE_integer('num_runs', default=3,
help='Number of times to repeat test')
def main(_):
with tempfile.TemporaryDirectory() as d:
os.chdir(d)
# ===== Download Acme =====
subprocess.run(['git', 'clone', 'https://github.com/deepmind/acme'],
check=True)
os.chdir('acme')
subprocess.run(['git', 'checkout', '4da30b8'], check=True)
os.chdir(d)
check_dir = os.path.join('acme', 'acme', 'agents', 'tf')
# ===== Time how long it takes to run pytype =====
times = []
for run_num in range(_NUM_RUNS.value):
logging.info('Test %d/%d', 1 + run_num, _NUM_RUNS.value)
t1 = datetime.datetime.now()
subprocess.run(['pytype', check_dir,
# Ignore dependencies. (I've tried installing dependencies
# to fix this, but it still chokes on trfl and reverb,
# so giving up for now.)
'--disable', 'import-error'],
check=True)
t2 = datetime.datetime.now()
shutil.rmtree('.pytype') # Remove pytype cache
delta = t2 - t1
times.append(delta)
logging.info('Test %d/%d: %d seconds',
1 + run_num, _NUM_RUNS.value, delta.total_seconds())
# ===== Print statistics =====
mean = sum(times, datetime.timedelta()).total_seconds() / _NUM_RUNS.value
logging.info('Average: %d seconds', mean)
logging.info('Minimum: %d seconds', min(times).total_seconds())
logging.info('Maximum: %d seconds', max(times).total_seconds())
logging.info('All times: %r', times)
if __name__ == '__main__':
app.run(main)
|
tests/test_soft_fail.py | ActivisionGameScience/assertpy | 246 | 12633326 | # Copyright (c) 2015-2019, Activision Publishing, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from assertpy import assert_that, fail, soft_fail, soft_assertions
def test_soft_fail_without_context():
try:
soft_fail()
fail('should have raised error')
except AssertionError as e:
out = str(e)
assert_that(out).is_equal_to('Fail!')
assert_that(out).does_not_contain('should have raised error')
def test_soft_fail_with_msg_without_context():
try:
soft_fail('some msg')
fail('should have raised error')
except AssertionError as e:
out = str(e)
assert_that(out).is_equal_to('Fail: some msg!')
assert_that(out).does_not_contain('should have raised error')
def test_soft_fail():
try:
with soft_assertions():
soft_fail()
fail('should have raised error')
except AssertionError as e:
out = str(e)
assert_that(out).contains('Fail!')
assert_that(out).does_not_contain('should have raised error')
def test_soft_fail_with_msg():
try:
with soft_assertions():
soft_fail('foobar')
fail('should have raised error')
except AssertionError as e:
out = str(e)
assert_that(out).contains('Fail: foobar!')
assert_that(out).does_not_contain('should have raised error')
def test_soft_fail_with_soft_failing_asserts():
try:
with soft_assertions():
assert_that('foo').is_length(4)
assert_that('foo').is_empty()
soft_fail('foobar')
assert_that('foo').is_not_equal_to('foo')
assert_that('foo').is_equal_to_ignoring_case('BAR')
fail('should have raised error')
except AssertionError as e:
out = str(e)
assert_that(out).contains('Expected <foo> to be of length <4>, but was <3>.')
assert_that(out).contains('Expected <foo> to be empty string, but was not.')
assert_that(out).contains('Fail: foobar!')
assert_that(out).contains('Expected <foo> to be not equal to <foo>, but was.')
assert_that(out).contains('Expected <foo> to be case-insensitive equal to <BAR>, but was not.')
assert_that(out).does_not_contain('should have raised error')
def test_double_soft_fail():
try:
with soft_assertions():
soft_fail()
soft_fail('foobar')
fail('should have raised error')
except AssertionError as e:
out = str(e)
assert_that(out).contains('Fail!')
assert_that(out).contains('Fail: foobar!')
assert_that(out).does_not_contain('should have raised error')
|
scipy/stats/tests/studentized_range_mpmath_ref.py | jcharlong/scipy | 9,095 | 12633333 | # To run this script, run
# `python studentized_range_mpmath_ref.py`
# in the "scipy/stats/tests/" directory
# This script generates a JSON file "./data/studentized_range_mpmath_ref.json"
# that is used to compare the accuracy of `studentized_range` functions against
# precise (20 DOP) results generated using `mpmath`.
# Equations in this file have been taken from
# https://en.wikipedia.org/wiki/Studentized_range_distribution
# and have been checked against the following reference:
# <NAME>., and <NAME>. "Algorithm AS 190: Probabilities and
# Upper Quantiles for the Studentized Range." Journal of the Royal
# Statistical Society. Series C (Applied Statistics), vol. 32, no. 2,
# 1983, pp. 204-210. JSTOR, www.jstor.org/stable/2347300. Accessed 18
# Feb. 2021.
# Note: I would have prefered to use pickle rather than JSON, but -
# due to security concerns - decided against it.
import itertools
from collections import namedtuple
import json
import time
import os
from multiprocessing import Pool, cpu_count
from mpmath import gamma, pi, sqrt, quad, inf, mpf, mp
from mpmath import npdf as phi
from mpmath import ncdf as Phi
results_filepath = "data/studentized_range_mpmath_ref.json"
num_pools = max(cpu_count() - 1, 1)
MPResult = namedtuple("MPResult", ["src_case", "mp_result"])
CdfCase = namedtuple("CdfCase",
["q", "k", "v", "expected_atol", "expected_rtol"])
MomentCase = namedtuple("MomentCase",
["m", "k", "v", "expected_atol", "expected_rtol"])
# Load previously generated JSON results, or init a new dict if none exist
if os.path.isfile(results_filepath):
res_dict = json.load(open(results_filepath, mode="r"))
else:
res_dict = dict()
# Frame out data structure. Store data with the function type as a top level
# key to allow future expansion
res_dict["COMMENT"] = ("!!!!!! THIS FILE WAS AUTOGENERATED BY RUNNING "
"`python studentized_range_mpmath_ref.py` !!!!!!")
res_dict.setdefault("cdf_data", [])
res_dict.setdefault("pdf_data", [])
res_dict.setdefault("moment_data", [])
general_atol, general_rtol = 1e-11, 1e-11
mp.dps = 24
cp_q = [0.1, 1, 4, 10]
cp_k = [3, 10, 20]
cp_nu = [3, 10, 20, 50, 100, 120]
cdf_pdf_cases = [
CdfCase(*case,
general_atol,
general_rtol)
for case in
itertools.product(cp_q, cp_k, cp_nu)
]
mom_atol, mom_rtol = 1e-9, 1e-9
# These are EXTREMELY slow - Multiple days each in worst case.
moment_cases = [
MomentCase(i, 3, 10, mom_atol, mom_rtol)
for i in range(5)
]
def write_data():
"""Writes the current res_dict to the target JSON file"""
with open(results_filepath, mode="w") as f:
json.dump(res_dict, f, indent=2)
def to_dict(named_tuple):
"""Converts a namedtuple to a dict"""
return dict(named_tuple._asdict())
def mp_res_to_dict(mp_result):
"""Formats an MPResult namedtuple into a dict for JSON dumping"""
return {
"src_case": to_dict(mp_result.src_case),
# np assert can't handle mpf, so take the accuracy hit here.
"mp_result": float(mp_result.mp_result)
}
def cdf_mp(q, k, nu):
"""Straightforward implementation of studentized range CDF"""
q, k, nu = mpf(q), mpf(k), mpf(nu)
def inner(s, z):
return phi(z) * (Phi(z + q * s) - Phi(z)) ** (k - 1)
def outer(s, z):
return s ** (nu - 1) * phi(sqrt(nu) * s) * inner(s, z)
def whole(s, z):
return (sqrt(2 * pi) * k * nu ** (nu / 2)
/ (gamma(nu / 2) * 2 ** (nu / 2 - 1)) * outer(s, z))
res = quad(whole, [0, inf], [-inf, inf],
method="gauss-legendre", maxdegree=10)
return res
def pdf_mp(q, k, nu):
"""Straightforward implementation of studentized range PDF"""
q, k, nu = mpf(q), mpf(k), mpf(nu)
def inner(s, z):
return phi(z + q * s) * phi(z) * (Phi(z + q * s) - Phi(z)) ** (k - 2)
def outer(s, z):
return s ** nu * phi(sqrt(nu) * s) * inner(s, z)
def whole(s, z):
return (sqrt(2 * pi) * k * (k - 1) * nu ** (nu / 2)
/ (gamma(nu / 2) * 2 ** (nu / 2 - 1)) * outer(s, z))
res = quad(whole, [0, inf], [-inf, inf],
method="gauss-legendre", maxdegree=10)
return res
def moment_mp(m, k, nu):
"""Implementation of the studentized range moment"""
m, k, nu = mpf(m), mpf(k), mpf(nu)
def inner(q, s, z):
return phi(z + q * s) * phi(z) * (Phi(z + q * s) - Phi(z)) ** (k - 2)
def outer(q, s, z):
return s ** nu * phi(sqrt(nu) * s) * inner(q, s, z)
def pdf(q, s, z):
return (sqrt(2 * pi) * k * (k - 1) * nu ** (nu / 2)
/ (gamma(nu / 2) * 2 ** (nu / 2 - 1)) * outer(q, s, z))
def whole(q, s, z):
return q ** m * pdf(q, s, z)
res = quad(whole, [0, inf], [0, inf], [-inf, inf],
method="gauss-legendre", maxdegree=10)
return res
def result_exists(set_key, case):
"""Searches the results dict for a result in the set that matches a case.
Returns True if such a case exists."""
if set_key not in res_dict:
raise ValueError(f"{set_key} not present in data structure!")
case_dict = to_dict(case)
existing_res = list(filter(
lambda res: res["src_case"] == case_dict, # dict comparison
res_dict[set_key]))
return len(existing_res) > 0
def run(case, run_lambda, set_key, index=0, total_cases=0):
"""Runs the single passed case, returning an mp dictionary and index"""
t_start = time.perf_counter()
res = run_lambda(case)
print(f"Finished {index + 1}/{total_cases} in batch. "
f"(Took {time.perf_counter() - t_start}s)")
return index, set_key, mp_res_to_dict(MPResult(case, res))
def write_result(res):
"""A callback for completed jobs. Inserts and writes a calculated result
to file."""
index, set_key, result_dict = res
res_dict[set_key].insert(index, result_dict)
write_data()
def run_cases(cases, run_lambda, set_key):
"""Runs an array of cases and writes to file"""
# Generate jobs to run from cases that do not have a result in
# the previously loaded JSON.
job_arg = [(case, run_lambda, set_key, index, len(cases))
for index, case in enumerate(cases)
if not result_exists(set_key, case)]
print(f"{len(cases) - len(job_arg)}/{len(cases)} cases won't be "
f"calculated because their results already exist.")
jobs = []
pool = Pool(num_pools)
# Run all using multiprocess
for case in job_arg:
jobs.append(pool.apply_async(run, args=case, callback=write_result))
pool.close()
pool.join()
def run_pdf(case):
return pdf_mp(case.q, case.k, case.v)
def run_cdf(case):
return cdf_mp(case.q, case.k, case.v)
def run_moment(case):
return moment_mp(case.m, case.k, case.v)
def main():
t_start = time.perf_counter()
total_cases = 2 * len(cdf_pdf_cases) + len(moment_cases)
print(f"Processing {total_cases} test cases")
print(f"Running 1st batch ({len(cdf_pdf_cases)} PDF cases). "
f"These take about 30s each.")
run_cases(cdf_pdf_cases, run_pdf, "pdf_data")
print(f"Running 2nd batch ({len(cdf_pdf_cases)} CDF cases). "
f"These take about 30s each.")
run_cases(cdf_pdf_cases, run_cdf, "cdf_data")
print(f"Running 3rd batch ({len(moment_cases)} moment cases). "
f"These take about anywhere from a few hours to days each.")
run_cases(moment_cases, run_moment, "moment_data")
print(f"Test data generated in {time.perf_counter() - t_start}s")
if __name__ == "__main__":
main()
|
convnets-keras/build/lib.linux-x86_64-2.7/convnetskeras/customlayers.py | ksmizer/GrapevinePD | 123 | 12633401 | import numpy as np
from keras.layers.core import Lambda, Merge
from keras.layers.convolutional import Convolution2D
from keras import backend as K
from keras.engine import Layer
def crosschannelnormalization(alpha = 1e-4, k=2, beta=0.75, n=5,**kwargs):
"""
This is the function used for cross channel normalization in the original
Alexnet
"""
def f(X):
b, ch, r, c = X.shape
half = n // 2
square = K.square(X)
extra_channels = K.spatial_2d_padding(K.permute_dimensions(square, (0,2,3,1))
, (0,half))
extra_channels = K.permute_dimensions(extra_channels, (0,3,1,2))
scale = k
for i in range(n):
scale += alpha * extra_channels[:,i:i+ch,:,:]
scale = scale ** beta
return X / scale
return Lambda(f, output_shape=lambda input_shape:input_shape,**kwargs)
def splittensor(axis=1, ratio_split=1, id_split=0,**kwargs):
def f(X):
div = X.shape[axis] // ratio_split
if axis == 0:
output = X[id_split*div:(id_split+1)*div,:,:,:]
elif axis == 1:
output = X[:, id_split*div:(id_split+1)*div, :, :]
elif axis == 2:
output = X[:,:,id_split*div:(id_split+1)*div,:]
elif axis == 3:
output = X[:,:,:,id_split*div:(id_split+1)*div]
else:
raise ValueError("This axis is not possible")
return output
def g(input_shape):
output_shape=list(input_shape)
output_shape[axis] = output_shape[axis] // ratio_split
return tuple(output_shape)
return Lambda(f,output_shape=lambda input_shape:g(input_shape),**kwargs)
def convolution2Dgroup(n_group, nb_filter, nb_row, nb_col, **kwargs):
def f(input):
return Merge([
Convolution2D(nb_filter//n_group,nb_row,nb_col)(
splittensor(axis=1,
ratio_split=n_group,
id_split=i)(input))
for i in range(n_group)
],mode='concat',concat_axis=1)
return f
class Softmax4D(Layer):
def __init__(self, axis=-1,**kwargs):
self.axis=axis
super(Softmax4D, self).__init__(**kwargs)
def build(self,input_shape):
pass
def call(self, x,mask=None):
e = K.exp(x - K.max(x, axis=self.axis, keepdims=True))
s = K.sum(e, axis=self.axis, keepdims=True)
return e / s
def get_output_shape_for(self, input_shape):
return input_shape
|
utils/scripts/gsa/geneSetOverlap.py | lufuhao/snp2vcf | 139 | 12633465 | <reponame>lufuhao/snp2vcf
#!/usr/bin/env python
#------------------------------------------------------------------------------
#
# Overlap between gene sets
#
#------------------------------------------------------------------------------
import sys
# Debug mode?
debug = False
#------------------------------------------------------------------------------
# Load MSigDb file
#------------------------------------------------------------------------------
def loadMsigDb(msigFile):
geneSet = {}
for line in open(msigFile) :
fields = line.rstrip().split("\t")
geneSetName = fields[0]
geneSet[ geneSetName ] = set( fields[2:] )
if debug : print geneSetName, " => ", geneSet[ geneSetName ]
return geneSet
#------------------------------------------------------------------------------
# Main
#------------------------------------------------------------------------------
#---
# Command line parameters
#---
if len(sys.argv) != 3 :
print >> sys.stderr, "Usage: " + sys.argv[0] + " msigDb.gmt set.gmt"
sys.exit(1)
msigFile = sys.argv[1]
setFile = sys.argv[2]
geneSets = loadMsigDb(msigFile)
testSets = loadMsigDb(setFile)
print "{}%\t{}\t{}\t{}\t{}".format("overlap%", "overlap", "size_1", "size_2", "Gene Set 1", "Gene Set 2")
for gsetName1 in testSets:
size1 = len(testSets[gsetName1])
if size1 > 5:
for gsetName2 in geneSets:
size2 = len(geneSets[gsetName2])
count = len(testSets[gsetName1] & geneSets[gsetName2])
if count > 0:
overlap = (100.0 * count) / size1
print "{}%\t{}\t{}\t{}\t{}\t{}".format(overlap, count, size1, size2, gsetName1, gsetName2)
|
src/python/nimbusml/internal/entrypoints/trainers_fasttreetweedieregressor.py | michaelgsharp/NimbusML | 134 | 12633467 | # - Generated by tools/entrypoint_compiler.py: do not edit by hand
"""
Trainers.FastTreeTweedieRegressor
"""
import numbers
from ..utils.entrypoints import EntryPoint
from ..utils.utils import try_set, unlist
def trainers_fasttreetweedieregressor(
training_data,
predictor_model=None,
number_of_trees=100,
number_of_leaves=20,
feature_column_name='Features',
minimum_example_count_per_leaf=10,
label_column_name='Label',
learning_rate=0.2,
example_weight_column_name=None,
row_group_column_name=None,
normalize_features='Auto',
caching='Auto',
index=1.5,
best_step_ranking_regression_trees=False,
use_line_search=False,
maximum_number_of_line_search_steps=0,
minimum_step_size=0.0,
optimization_algorithm='GradientDescent',
early_stopping_rule=None,
early_stopping_metrics=1,
enable_pruning=False,
use_tolerant_pruning=False,
pruning_threshold=0.004,
pruning_window_size=5,
shrinkage=1.0,
dropout_rate=0.0,
get_derivatives_sample_rate=1,
write_last_ensemble=False,
maximum_tree_output=100.0,
random_start=False,
filter_zero_lambdas=False,
baseline_scores_formula=None,
baseline_alpha_risk=None,
position_discount_freeform=None,
parallel_trainer=None,
number_of_threads=None,
seed=123,
feature_selection_seed=123,
entropy_coefficient=0.0,
histogram_pool_size=-1,
disk_transpose=None,
feature_flocks=True,
categorical_split=False,
maximum_categorical_group_count_per_node=64,
maximum_categorical_split_point_count=64,
minimum_example_fraction_for_categorical_split=0.001,
minimum_examples_for_categorical_split=100,
bias=0.0,
bundling='None',
maximum_bin_count_per_feature=255,
sparsify_threshold=0.7,
feature_first_use_penalty=0.0,
feature_reuse_penalty=0.0,
gain_confidence_level=0.0,
softmax_temperature=0.0,
execution_time=False,
feature_fraction=1.0,
bagging_size=0,
bagging_example_fraction=0.7,
feature_fraction_per_split=1.0,
smoothing=0.0,
allow_empty_trees=True,
feature_compression_level=1,
compress_ensemble=False,
print_test_graph=False,
print_train_valid_graph=False,
test_frequency=2147483647,
**params):
"""
**Description**
Trains gradient boosted decision trees to fit target values using a
Tweedie loss function. This learner is a generalization of
Poisson, compound Poisson, and gamma regression.
:param number_of_trees: Total number of decision trees to create
in the ensemble (inputs).
:param training_data: The data to be used for training (inputs).
:param number_of_leaves: The max number of leaves in each
regression tree (inputs).
:param feature_column_name: Column to use for features (inputs).
:param minimum_example_count_per_leaf: The minimal number of
examples allowed in a leaf of a regression tree, out of the
subsampled data (inputs).
:param label_column_name: Column to use for labels (inputs).
:param learning_rate: The learning rate (inputs).
:param example_weight_column_name: Column to use for example
weight (inputs).
:param row_group_column_name: Column to use for example groupId
(inputs).
:param normalize_features: Normalize option for the feature
column (inputs).
:param caching: Whether trainer should cache input training data
(inputs).
:param index: Index parameter for the Tweedie distribution, in
the range [1, 2]. 1 is Poisson loss, 2 is gamma loss, and
intermediate values are compound Poisson loss. (inputs).
:param best_step_ranking_regression_trees: Option for using best
regression step trees (inputs).
:param use_line_search: Should we use line search for a step size
(inputs).
:param maximum_number_of_line_search_steps: Number of post-
bracket line search steps (inputs).
:param minimum_step_size: Minimum line search step size (inputs).
:param optimization_algorithm: Optimization algorithm to be used
(GradientDescent, AcceleratedGradientDescent) (inputs).
:param early_stopping_rule: Early stopping rule. (Validation set
(/valid) is required.) (inputs).
:param early_stopping_metrics: Early stopping metrics. (For
regression, 1: L1, 2:L2; for ranking, 1:NDCG@1, 3:NDCG@3)
(inputs).
:param enable_pruning: Enable post-training pruning to avoid
overfitting. (a validation set is required) (inputs).
:param use_tolerant_pruning: Use window and tolerance for pruning
(inputs).
:param pruning_threshold: The tolerance threshold for pruning
(inputs).
:param pruning_window_size: The moving window size for pruning
(inputs).
:param shrinkage: Shrinkage (inputs).
:param dropout_rate: Dropout rate for tree regularization
(inputs).
:param get_derivatives_sample_rate: Sample each query 1 in k
times in the GetDerivatives function (inputs).
:param write_last_ensemble: Write the last ensemble instead of
the one determined by early stopping (inputs).
:param maximum_tree_output: Upper bound on absolute value of
single tree output (inputs).
:param random_start: Training starts from random ordering
(determined by /r1) (inputs).
:param filter_zero_lambdas: Filter zero lambdas during training
(inputs).
:param baseline_scores_formula: Freeform defining the scores that
should be used as the baseline ranker (inputs).
:param baseline_alpha_risk: Baseline alpha for tradeoffs of risk
(0 is normal training) (inputs).
:param position_discount_freeform: The discount freeform which
specifies the per position discounts of examples in a query
(uses a single variable P for position where P=0 is first
position) (inputs).
:param parallel_trainer: Allows to choose Parallel FastTree
Learning Algorithm (inputs).
:param number_of_threads: The number of threads to use (inputs).
:param seed: The seed of the random number generator (inputs).
:param feature_selection_seed: The seed of the active feature
selection (inputs).
:param entropy_coefficient: The entropy (regularization)
coefficient between 0 and 1 (inputs).
:param histogram_pool_size: The number of histograms in the pool
(between 2 and numLeaves) (inputs).
:param disk_transpose: Whether to utilize the disk or the data's
native transposition facilities (where applicable) when
performing the transpose (inputs).
:param feature_flocks: Whether to collectivize features during
dataset preparation to speed up training (inputs).
:param categorical_split: Whether to do split based on multiple
categorical feature values. (inputs).
:param maximum_categorical_group_count_per_node: Maximum
categorical split groups to consider when splitting on a
categorical feature. Split groups are a collection of split
points. This is used to reduce overfitting when there many
categorical features. (inputs).
:param maximum_categorical_split_point_count: Maximum categorical
split points to consider when splitting on a categorical
feature. (inputs).
:param minimum_example_fraction_for_categorical_split: Minimum
categorical example percentage in a bin to consider for a
split. (inputs).
:param minimum_examples_for_categorical_split: Minimum
categorical example count in a bin to consider for a split.
(inputs).
:param bias: Bias for calculating gradient for each feature bin
for a categorical feature. (inputs).
:param bundling: Bundle low population bins. Bundle.None(0): no
bundling, Bundle.AggregateLowPopulation(1): Bundle low
population, Bundle.Adjacent(2): Neighbor low population
bundle. (inputs).
:param maximum_bin_count_per_feature: Maximum number of distinct
values (bins) per feature (inputs).
:param sparsify_threshold: Sparsity level needed to use sparse
feature representation (inputs).
:param feature_first_use_penalty: The feature first use penalty
coefficient (inputs).
:param feature_reuse_penalty: The feature re-use penalty
(regularization) coefficient (inputs).
:param gain_confidence_level: Tree fitting gain confidence
requirement (should be in the range [0,1) ). (inputs).
:param softmax_temperature: The temperature of the randomized
softmax distribution for choosing the feature (inputs).
:param execution_time: Print execution time breakdown to stdout
(inputs).
:param feature_fraction: The fraction of features (chosen
randomly) to use on each iteration (inputs).
:param bagging_size: Number of trees in each bag (0 for disabling
bagging) (inputs).
:param bagging_example_fraction: Percentage of training examples
used in each bag (inputs).
:param feature_fraction_per_split: The fraction of features
(chosen randomly) to use on each split (inputs).
:param smoothing: Smoothing paramter for tree regularization
(inputs).
:param allow_empty_trees: When a root split is impossible, allow
training to proceed (inputs).
:param feature_compression_level: The level of feature
compression to use (inputs).
:param compress_ensemble: Compress the tree Ensemble (inputs).
:param print_test_graph: Print metrics graph for the first test
set (inputs).
:param print_train_valid_graph: Print Train and Validation
metrics in graph (inputs).
:param test_frequency: Calculate metric values for
train/valid/test every k rounds (inputs).
:param predictor_model: The trained model (outputs).
"""
entrypoint_name = 'Trainers.FastTreeTweedieRegressor'
inputs = {}
outputs = {}
if number_of_trees is not None:
inputs['NumberOfTrees'] = try_set(
obj=number_of_trees,
none_acceptable=True,
is_of_type=numbers.Real)
if training_data is not None:
inputs['TrainingData'] = try_set(
obj=training_data,
none_acceptable=False,
is_of_type=str)
if number_of_leaves is not None:
inputs['NumberOfLeaves'] = try_set(
obj=number_of_leaves,
none_acceptable=True,
is_of_type=numbers.Real)
if feature_column_name is not None:
inputs['FeatureColumnName'] = try_set(
obj=feature_column_name,
none_acceptable=True,
is_of_type=str,
is_column=True)
if minimum_example_count_per_leaf is not None:
inputs['MinimumExampleCountPerLeaf'] = try_set(
obj=minimum_example_count_per_leaf,
none_acceptable=True,
is_of_type=numbers.Real)
if label_column_name is not None:
inputs['LabelColumnName'] = try_set(
obj=label_column_name,
none_acceptable=True,
is_of_type=str,
is_column=True)
if learning_rate is not None:
inputs['LearningRate'] = try_set(
obj=learning_rate,
none_acceptable=True,
is_of_type=numbers.Real)
if example_weight_column_name is not None:
inputs['ExampleWeightColumnName'] = try_set(
obj=example_weight_column_name,
none_acceptable=True,
is_of_type=str,
is_column=True)
if row_group_column_name is not None:
inputs['RowGroupColumnName'] = try_set(
obj=row_group_column_name,
none_acceptable=True,
is_of_type=str,
is_column=True)
if normalize_features is not None:
inputs['NormalizeFeatures'] = try_set(
obj=normalize_features,
none_acceptable=True,
is_of_type=str,
values=[
'No',
'Warn',
'Auto',
'Yes'])
if caching is not None:
inputs['Caching'] = try_set(
obj=caching,
none_acceptable=True,
is_of_type=str,
values=[
'Auto',
'Memory',
'None'])
if index is not None:
inputs['Index'] = try_set(
obj=index,
none_acceptable=True,
is_of_type=numbers.Real)
if best_step_ranking_regression_trees is not None:
inputs['BestStepRankingRegressionTrees'] = try_set(
obj=best_step_ranking_regression_trees,
none_acceptable=True,
is_of_type=bool)
if use_line_search is not None:
inputs['UseLineSearch'] = try_set(
obj=use_line_search,
none_acceptable=True,
is_of_type=bool)
if maximum_number_of_line_search_steps is not None:
inputs['MaximumNumberOfLineSearchSteps'] = try_set(
obj=maximum_number_of_line_search_steps,
none_acceptable=True,
is_of_type=numbers.Real)
if minimum_step_size is not None:
inputs['MinimumStepSize'] = try_set(
obj=minimum_step_size,
none_acceptable=True,
is_of_type=numbers.Real)
if optimization_algorithm is not None:
inputs['OptimizationAlgorithm'] = try_set(
obj=optimization_algorithm,
none_acceptable=True,
is_of_type=str,
values=[
'GradientDescent',
'AcceleratedGradientDescent',
'ConjugateGradientDescent'])
if early_stopping_rule is not None:
inputs['EarlyStoppingRule'] = try_set(
obj=early_stopping_rule,
none_acceptable=True,
is_of_type=dict)
if early_stopping_metrics is not None:
inputs['EarlyStoppingMetrics'] = try_set(
obj=early_stopping_metrics,
none_acceptable=True,
is_of_type=numbers.Real)
if enable_pruning is not None:
inputs['EnablePruning'] = try_set(
obj=enable_pruning,
none_acceptable=True,
is_of_type=bool)
if use_tolerant_pruning is not None:
inputs['UseTolerantPruning'] = try_set(
obj=use_tolerant_pruning,
none_acceptable=True,
is_of_type=bool)
if pruning_threshold is not None:
inputs['PruningThreshold'] = try_set(
obj=pruning_threshold,
none_acceptable=True,
is_of_type=numbers.Real)
if pruning_window_size is not None:
inputs['PruningWindowSize'] = try_set(
obj=pruning_window_size,
none_acceptable=True,
is_of_type=numbers.Real)
if shrinkage is not None:
inputs['Shrinkage'] = try_set(
obj=shrinkage,
none_acceptable=True,
is_of_type=numbers.Real)
if dropout_rate is not None:
inputs['DropoutRate'] = try_set(
obj=dropout_rate,
none_acceptable=True,
is_of_type=numbers.Real)
if get_derivatives_sample_rate is not None:
inputs['GetDerivativesSampleRate'] = try_set(
obj=get_derivatives_sample_rate,
none_acceptable=True,
is_of_type=numbers.Real)
if write_last_ensemble is not None:
inputs['WriteLastEnsemble'] = try_set(
obj=write_last_ensemble,
none_acceptable=True,
is_of_type=bool)
if maximum_tree_output is not None:
inputs['MaximumTreeOutput'] = try_set(
obj=maximum_tree_output,
none_acceptable=True,
is_of_type=numbers.Real)
if random_start is not None:
inputs['RandomStart'] = try_set(
obj=random_start,
none_acceptable=True,
is_of_type=bool)
if filter_zero_lambdas is not None:
inputs['FilterZeroLambdas'] = try_set(
obj=filter_zero_lambdas,
none_acceptable=True,
is_of_type=bool)
if baseline_scores_formula is not None:
inputs['BaselineScoresFormula'] = try_set(
obj=baseline_scores_formula, none_acceptable=True, is_of_type=str)
if baseline_alpha_risk is not None:
inputs['BaselineAlphaRisk'] = try_set(
obj=baseline_alpha_risk, none_acceptable=True, is_of_type=str)
if position_discount_freeform is not None:
inputs['PositionDiscountFreeform'] = try_set(
obj=position_discount_freeform,
none_acceptable=True,
is_of_type=str)
if parallel_trainer is not None:
inputs['ParallelTrainer'] = try_set(
obj=parallel_trainer,
none_acceptable=True,
is_of_type=dict)
if number_of_threads is not None:
inputs['NumberOfThreads'] = try_set(
obj=number_of_threads,
none_acceptable=True,
is_of_type=numbers.Real)
if seed is not None:
inputs['Seed'] = try_set(
obj=seed,
none_acceptable=True,
is_of_type=numbers.Real)
if feature_selection_seed is not None:
inputs['FeatureSelectionSeed'] = try_set(
obj=feature_selection_seed,
none_acceptable=True,
is_of_type=numbers.Real)
if entropy_coefficient is not None:
inputs['EntropyCoefficient'] = try_set(
obj=entropy_coefficient,
none_acceptable=True,
is_of_type=numbers.Real)
if histogram_pool_size is not None:
inputs['HistogramPoolSize'] = try_set(
obj=histogram_pool_size,
none_acceptable=True,
is_of_type=numbers.Real)
if disk_transpose is not None:
inputs['DiskTranspose'] = try_set(
obj=disk_transpose,
none_acceptable=True,
is_of_type=bool)
if feature_flocks is not None:
inputs['FeatureFlocks'] = try_set(
obj=feature_flocks,
none_acceptable=True,
is_of_type=bool)
if categorical_split is not None:
inputs['CategoricalSplit'] = try_set(
obj=categorical_split,
none_acceptable=True,
is_of_type=bool)
if maximum_categorical_group_count_per_node is not None:
inputs['MaximumCategoricalGroupCountPerNode'] = try_set(
obj=maximum_categorical_group_count_per_node,
none_acceptable=True,
is_of_type=numbers.Real)
if maximum_categorical_split_point_count is not None:
inputs['MaximumCategoricalSplitPointCount'] = try_set(
obj=maximum_categorical_split_point_count,
none_acceptable=True,
is_of_type=numbers.Real)
if minimum_example_fraction_for_categorical_split is not None:
inputs['MinimumExampleFractionForCategoricalSplit'] = try_set(
obj=minimum_example_fraction_for_categorical_split,
none_acceptable=True,
is_of_type=numbers.Real)
if minimum_examples_for_categorical_split is not None:
inputs['MinimumExamplesForCategoricalSplit'] = try_set(
obj=minimum_examples_for_categorical_split,
none_acceptable=True,
is_of_type=numbers.Real)
if bias is not None:
inputs['Bias'] = try_set(
obj=bias,
none_acceptable=True,
is_of_type=numbers.Real)
if bundling is not None:
inputs['Bundling'] = try_set(
obj=bundling,
none_acceptable=True,
is_of_type=str,
values=[
'None',
'AggregateLowPopulation',
'Adjacent'])
if maximum_bin_count_per_feature is not None:
inputs['MaximumBinCountPerFeature'] = try_set(
obj=maximum_bin_count_per_feature,
none_acceptable=True,
is_of_type=numbers.Real)
if sparsify_threshold is not None:
inputs['SparsifyThreshold'] = try_set(
obj=sparsify_threshold,
none_acceptable=True,
is_of_type=numbers.Real)
if feature_first_use_penalty is not None:
inputs['FeatureFirstUsePenalty'] = try_set(
obj=feature_first_use_penalty,
none_acceptable=True,
is_of_type=numbers.Real)
if feature_reuse_penalty is not None:
inputs['FeatureReusePenalty'] = try_set(
obj=feature_reuse_penalty,
none_acceptable=True,
is_of_type=numbers.Real)
if gain_confidence_level is not None:
inputs['GainConfidenceLevel'] = try_set(
obj=gain_confidence_level,
none_acceptable=True,
is_of_type=numbers.Real)
if softmax_temperature is not None:
inputs['SoftmaxTemperature'] = try_set(
obj=softmax_temperature,
none_acceptable=True,
is_of_type=numbers.Real)
if execution_time is not None:
inputs['ExecutionTime'] = try_set(
obj=execution_time,
none_acceptable=True,
is_of_type=bool)
if feature_fraction is not None:
inputs['FeatureFraction'] = try_set(
obj=feature_fraction,
none_acceptable=True,
is_of_type=numbers.Real)
if bagging_size is not None:
inputs['BaggingSize'] = try_set(
obj=bagging_size,
none_acceptable=True,
is_of_type=numbers.Real)
if bagging_example_fraction is not None:
inputs['BaggingExampleFraction'] = try_set(
obj=bagging_example_fraction,
none_acceptable=True,
is_of_type=numbers.Real)
if feature_fraction_per_split is not None:
inputs['FeatureFractionPerSplit'] = try_set(
obj=feature_fraction_per_split,
none_acceptable=True,
is_of_type=numbers.Real)
if smoothing is not None:
inputs['Smoothing'] = try_set(
obj=smoothing,
none_acceptable=True,
is_of_type=numbers.Real)
if allow_empty_trees is not None:
inputs['AllowEmptyTrees'] = try_set(
obj=allow_empty_trees,
none_acceptable=True,
is_of_type=bool)
if feature_compression_level is not None:
inputs['FeatureCompressionLevel'] = try_set(
obj=feature_compression_level,
none_acceptable=True,
is_of_type=numbers.Real)
if compress_ensemble is not None:
inputs['CompressEnsemble'] = try_set(
obj=compress_ensemble,
none_acceptable=True,
is_of_type=bool)
if print_test_graph is not None:
inputs['PrintTestGraph'] = try_set(
obj=print_test_graph,
none_acceptable=True,
is_of_type=bool)
if print_train_valid_graph is not None:
inputs['PrintTrainValidGraph'] = try_set(
obj=print_train_valid_graph,
none_acceptable=True,
is_of_type=bool)
if test_frequency is not None:
inputs['TestFrequency'] = try_set(
obj=test_frequency,
none_acceptable=True,
is_of_type=numbers.Real)
if predictor_model is not None:
outputs['PredictorModel'] = try_set(
obj=predictor_model, none_acceptable=False, is_of_type=str)
input_variables = {
x for x in unlist(inputs.values())
if isinstance(x, str) and x.startswith("$")}
output_variables = {
x for x in unlist(outputs.values())
if isinstance(x, str) and x.startswith("$")}
entrypoint = EntryPoint(
name=entrypoint_name, inputs=inputs, outputs=outputs,
input_variables=input_variables,
output_variables=output_variables)
return entrypoint
|
tests/test.py | davidbistolas/py-simple-audio | 121 | 12633476 | import simpleaudio as sa
import unittest
class TestSimpleaudio(unittest.TestCase):
def test_num_channels(self):
self.assertRaises(ValueError, sa.play_buffer, b'\0' * 16, 0, 2, 44100)
self.assertRaises(ValueError, sa.play_buffer, b'\0' * 16, 3, 2, 44100)
def test_bytes_per_chan(self):
self.assertRaises(ValueError, sa.play_buffer, b'\0' * 16, 2, 0, 44100)
self.assertRaises(ValueError, sa.play_buffer, b'\0' * 16, 2, 5, 44100)
def test_sample_rate(self):
self.assertRaises(ValueError, sa.play_buffer, b'\0' * 16, 2, 2, 44101)
|
util/crypto/diffieHellman.py | arinachison/abides | 196 | 12633533 | <reponame>arinachison/abides
import nacl.bindings as nb
import random
import pandas as pd
import numpy as np
import math
def dict_keygeneration(peer_list):
# CDB: turned these into dictionaries to relax assumptions around agent IDs.
pkeys = {}
skeys = {}
for peer_id in peer_list:
pkeys[peer_id], skeys[peer_id] = nb.crypto_kx_keypair()
return pkeys, skeys
def dict_keyexchange(peer_list, self_id, my_pkeys, my_skeys, peer_pkeys):
# CDB: The last three parameters are now all dictionaries. Dictionary keys
# are peer ids to which we gave the key, or from which we received the key.
# comkeys is also now a dictionary keyed by peer id.
comkeys = {}
for peer_id in peer_list:
if peer_id > self_id:
common_key_raw, _ = nb.crypto_kx_client_session_keys(my_pkeys[peer_id], my_skeys[peer_id], peer_pkeys[peer_id])
else:
_, common_key_raw = nb.crypto_kx_server_session_keys(my_pkeys[peer_id], my_skeys[peer_id], peer_pkeys[peer_id])
# Hash the common keys.
comkeys[peer_id] = int.from_bytes(nb.crypto_hash_sha256(common_key_raw), byteorder='big')
return comkeys
#PRG
def randomize( r, modulo, clientsign):
# Call the double lenght pseudorsndom generator
random.seed(r)
rand = random.getrandbits(256*2)
rand_b_raw = bin(rand)
nr_zeros_append = 256 - (len(rand_b_raw) - 2)
rand_b = '0' * nr_zeros_append + rand_b_raw[2:]
# Use first half to mask the inputs and second half as the next seed to the pseudorsndom generator
R = int(rand_b[0:256], 2)
r = int(rand_b[256:] , 2)
return r, R
def randomize_all(party_i, common_key_list, modulo):
for i in range(len(common_key_list)):
if i == party_i:
continue
clientsign = 1 if i > party_i else -1
common_key_list[i], client = randomize( common_key_list[i], modulo, clientsign)
return common_key_list, client
|
rapidsms/backends/vumi/forms.py | catalpainternational/rapidsms | 330 | 12633546 | from django import forms
from rapidsms.backends.http.forms import BaseHttpForm
class VumiForm(BaseHttpForm):
message_id = forms.CharField()
to_addr = forms.CharField()
from_addr = forms.CharField()
in_reply_to = forms.CharField(required=False)
session_event = forms.CharField(required=False)
content = forms.CharField(required=False)
transport_name = forms.CharField()
transport_type = forms.CharField()
group = forms.CharField(required=False)
def get_incoming_data(self):
fields = self.cleaned_data.copy()
# save message_id as external_id so RapidSMS will handle it properly
fields['external_id'] = self.cleaned_data['message_id']
connections = self.lookup_connections([self.cleaned_data['from_addr']])
return {'connection': connections[0],
'text': self.cleaned_data['content'],
'fields': fields}
|
lettersmith/absolutize.py | ericmjl/lettersmith_py | 103 | 12633551 | <reponame>ericmjl/lettersmith_py<filename>lettersmith/absolutize.py
"""
Tools for making relative URLs absolute in doc content.
"""
import re
from lettersmith.docs import renderer
from lettersmith.func import composable
from lettersmith import path as pathtools
URL_ATTR = r"""(src|href)=["'](.*?)["']"""
def absolutize(base_url):
"""
Absolutize URLs in content. Replaces any relative URLs in content
that start with `/` and instead starts them with `base_url`.
URLS are found by matching against `href=` and `src=`.
"""
def render_inner_match(match):
attr = match.group(1)
value = match.group(2)
url = pathtools.qualify_url(value, base_url)
return '{attr}="{url}"'.format(attr=attr, url=url)
@renderer
def render(content):
"""
Absolutize URLs in doc content fields.
"""
return re.sub(URL_ATTR, render_inner_match, content)
return render |
tests/__init__.py | gitter-badger/vcspull | 169 | 12633619 | from . import fixtures # noqa
|
margaritashotgun/auth.py | cyberdefensegrp/margaritashotgun | 198 | 12633668 | <filename>margaritashotgun/auth.py
from enum import Enum
import paramiko
from paramiko import PasswordRequiredException
from margaritashotgun.exceptions import AuthenticationMissingUsernameError
from margaritashotgun.exceptions import AuthenticationMethodMissingError
class AuthMethods(Enum):
key = 'key'
password = 'password'
class Auth():
def __init__(self, username=None, password=None, key=None):
"""
:type username: str
:param username: username for ssh authentication
:type password: str
:param password: password for ssh authentication
:type key: str
:param key: path to rsa key for ssh authentication
"""
self.method = None
self.username = None
self.password = None
self.key = None
if username is None or username == "":
raise AuthenticationMissingUsernameError()
else:
self.username = username
if key is not None:
self.key = self.load_key(key, password)
self.method = AuthMethods.key
elif password is not None:
self.password = password
self.method = AuthMethods.password
else:
raise AuthenticationMethodMissingError()
def load_key(self, key_path, password):
"""
Creates paramiko rsa key
:type key_path: str
:param key_path: path to rsa key
:type password: str
:param password: password to try if rsa key is encrypted
"""
try:
return paramiko.RSAKey.from_private_key_file(key_path)
except PasswordRequiredException as ex:
return paramiko.RSAKey.from_private_key_file(key_path,
password=password)
|
txdav/common/datastore/podding/test/test_conduit.py | backwardn/ccs-calendarserver | 462 | 12633669 | ##
# Copyright (c) 2005-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from pycalendar.datetime import DateTime
from pycalendar.period import Period
from twext.python.clsprop import classproperty
import txweb2.dav.test.util
from txweb2.http_headers import MimeType
from txweb2.stream import MemoryStream
from twisted.internet.defer import inlineCallbacks, succeed, returnValue
from twistedcaldav import caldavxml
from twistedcaldav.ical import Component, normalize_iCalStr
from txdav.caldav.datastore.query.filter import Filter
from txdav.caldav.datastore.scheduling.cuaddress import calendarUserFromCalendarUserAddress
from txdav.caldav.datastore.scheduling.freebusy import FreebusyQuery
from txdav.caldav.datastore.scheduling.ischedule.localservers import ServersDB, Server
from txdav.caldav.datastore.sql import ManagedAttachment, AttachmentLink
from txdav.caldav.datastore.test.common import CaptureProtocol
from txdav.common.datastore.podding.conduit import PoddingConduit, \
FailedCrossPodRequestError
from txdav.common.datastore.podding.resource import ConduitResource
from txdav.common.datastore.podding.test.util import MultiStoreConduitTest, \
FakeConduitRequest
from txdav.common.datastore.sql_tables import _BIND_STATUS_ACCEPTED
from txdav.common.datastore.test.util import populateCalendarsFrom, CommonCommonTests
from txdav.common.icommondatastore import ObjectResourceNameAlreadyExistsError, \
ObjectResourceNameNotAllowedError
from txdav.common.idirectoryservice import DirectoryRecordNotFoundError
class TestConduit (CommonCommonTests, txweb2.dav.test.util.TestCase):
class FakeConduit(object):
def recv_fake(self, j):
return succeed({
"back2u": j["echo"],
"more": "bits",
})
@inlineCallbacks
def setUp(self):
yield super(TestConduit, self).setUp()
serversDB = ServersDB()
serversDB.addServer(Server("A", "http://127.0.0.1", "A", True))
serversDB.addServer(Server("B", "http://127.0.0.2", "B", False))
yield self.buildStoreAndDirectory(serversDB=serversDB)
self.site.resource.putChild("conduit", ConduitResource(self.site.resource, self.storeUnderTest()))
yield self.populate()
@inlineCallbacks
def populate(self):
yield populateCalendarsFrom(self.requirements, self.storeUnderTest())
self.notifierFactory.reset()
@classproperty(cache=False)
def requirements(cls): # @NoSelf
return {
"user01": {
"calendar_1": {
},
"inbox": {
},
},
"user02": {
"calendar_1": {
},
"inbox": {
},
},
"user03": {
"calendar_1": {
},
"inbox": {
},
},
}
@inlineCallbacks
def test_validRequest(self):
"""
Cross-pod request fails when there is no shared secret header present.
"""
conduit = PoddingConduit(self.storeUnderTest())
r1, r2 = yield conduit.validRequest("user01", "puser02")
self.assertTrue(r1 is not None)
self.assertTrue(r2 is not None)
yield self.assertFailure(
conduit.validRequest("bogus01", "user02"),
DirectoryRecordNotFoundError
)
yield self.assertFailure(
conduit.validRequest("user01", "bogus02"),
DirectoryRecordNotFoundError
)
yield self.assertFailure(
conduit.validRequest("user01", "user02"),
FailedCrossPodRequestError
)
class TestConduitToConduit(MultiStoreConduitTest):
class FakeConduit(PoddingConduit):
@inlineCallbacks
def send_fake(self, txn, ownerUID, shareeUID):
_ignore_owner, sharee = yield self.validRequest(ownerUID, shareeUID)
action = {
"action": "fake",
"echo": "bravo"
}
result = yield self.sendRequest(txn, sharee, action)
returnValue(result)
def recv_fake(self, txn, j):
return succeed({
"back2u": j["echo"],
"more": "bits",
})
def makeConduit(self, store):
"""
Use our own variant.
"""
conduit = self.FakeConduit(store)
conduit.conduitRequestClass = FakeConduitRequest
return conduit
@inlineCallbacks
def test_fake_action(self):
"""
Cross-pod request works when conduit does support the action.
"""
store = self.theStoreUnderTest(0)
response = yield store.conduit.send_fake(self.theTransactionUnderTest(0), "user01", "puser01")
self.assertEqual(response, {"back2u": "bravo", "more": "bits"})
yield self.commitTransaction(0)
store = self.theStoreUnderTest(1)
response = yield store.conduit.send_fake(self.theTransactionUnderTest(1), "puser01", "user01")
self.assertEqual(response, {"back2u": "bravo", "more": "bits"})
yield self.commitTransaction(1)
class TestConduitAPI(MultiStoreConduitTest):
"""
Test that the conduit api works.
"""
nowYear = {"now": DateTime.getToday().getYear()}
caldata1 = """BEGIN:VCALENDAR
VERSION:2.0
CALSCALE:GREGORIAN
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:uid1
DTSTART:{now:04d}0102T140000Z
DURATION:PT1H
CREATED:20060102T190000Z
DTSTAMP:20051222T210507Z
RRULE:FREQ=WEEKLY
SUMMARY:instance
END:VEVENT
END:VCALENDAR
""".replace("\n", "\r\n").format(**nowYear)
caldata1_changed = """BEGIN:VCALENDAR
VERSION:2.0
CALSCALE:GREGORIAN
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:uid1
DTSTART:{now:04d}0102T150000Z
DURATION:PT1H
CREATED:20060102T190000Z
DTSTAMP:20051222T210507Z
RRULE:FREQ=WEEKLY
SUMMARY:instance changed
END:VEVENT
END:VCALENDAR
""".replace("\n", "\r\n").format(**nowYear)
caldata2 = """BEGIN:VCALENDAR
VERSION:2.0
CALSCALE:GREGORIAN
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:uid2
DTSTART:{now:04d}0102T160000Z
DURATION:PT1H
CREATED:20060102T190000Z
DTSTAMP:20051222T210507Z
RRULE:FREQ=WEEKLY
SUMMARY:instance
END:VEVENT
END:VCALENDAR
""".replace("\n", "\r\n").format(**nowYear)
caldata3 = """BEGIN:VCALENDAR
VERSION:2.0
CALSCALE:GREGORIAN
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:uid3
DTSTART:{now:04d}0102T160000Z
DURATION:PT1H
CREATED:20060102T190000Z
DTSTAMP:20051222T210507Z
RRULE:FREQ=WEEKLY
SUMMARY:instance
END:VEVENT
END:VCALENDAR
""".replace("\n", "\r\n").format(**nowYear)
@inlineCallbacks
def test_basic_share(self):
"""
Test that basic invite/uninvite works.
"""
yield self.createShare("user01", "puser01")
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
shared = yield calendar1.shareeView("puser01")
self.assertEqual(shared.shareStatus(), _BIND_STATUS_ACCEPTED)
yield self.commitTransaction(0)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
self.assertTrue(shared is not None)
self.assertTrue(shared.external())
yield self.commitTransaction(1)
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
yield calendar1.uninviteUIDFromShare("puser01")
yield self.commitTransaction(0)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
self.assertTrue(shared is None)
yield self.commitTransaction(1)
@inlineCallbacks
def test_countobjects(self):
"""
Test that action=countobjects works.
"""
yield self.createShare("user01", "puser01")
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
count = yield shared.countObjectResources()
self.assertEqual(count, 0)
yield self.commitTransaction(1)
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
count = yield calendar1.countObjectResources()
self.assertEqual(count, 1)
yield self.commitTransaction(0)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
count = yield shared.countObjectResources()
self.assertEqual(count, 1)
yield self.commitTransaction(1)
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home="user01", calendar_name="calendar", name="1.ics")
yield object1.remove()
count = yield calendar1.countObjectResources()
self.assertEqual(count, 0)
yield self.commitTransaction(0)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
count = yield shared.countObjectResources()
self.assertEqual(count, 0)
yield self.commitTransaction(1)
@inlineCallbacks
def test_listobjects(self):
"""
Test that action=listobjects works.
"""
yield self.createShare("user01", "puser01")
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
objects = yield shared.listObjectResources()
self.assertEqual(set(objects), set())
yield self.commitTransaction(1)
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
yield calendar1.createCalendarObjectWithName("2.ics", Component.fromString(self.caldata2))
objects = yield calendar1.listObjectResources()
self.assertEqual(set(objects), set(("1.ics", "2.ics",)))
yield self.commitTransaction(0)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
objects = yield shared.listObjectResources()
self.assertEqual(set(objects), set(("1.ics", "2.ics",)))
yield self.commitTransaction(1)
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home="user01", calendar_name="calendar", name="1.ics")
yield object1.remove()
objects = yield calendar1.listObjectResources()
self.assertEqual(set(objects), set(("2.ics",)))
yield self.commitTransaction(0)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
objects = yield shared.listObjectResources()
self.assertEqual(set(objects), set(("2.ics",)))
yield self.commitTransaction(1)
@inlineCallbacks
def test_synctoken(self):
"""
Test that action=synctoken works.
"""
yield self.createShare("user01", "puser01")
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
token1_1 = yield calendar1.syncTokenRevision()
yield self.commitTransaction(0)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
token2_1 = yield shared.syncTokenRevision()
yield self.commitTransaction(1)
self.assertEqual(token1_1, token2_1)
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
yield self.commitTransaction(0)
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
token1_2 = yield calendar1.syncTokenRevision()
yield self.commitTransaction(0)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
token2_2 = yield shared.syncTokenRevision()
yield self.commitTransaction(1)
self.assertNotEqual(token1_1, token1_2)
self.assertEqual(token1_2, token2_2)
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home="user01", calendar_name="calendar", name="1.ics")
yield object1.remove()
count = yield calendar1.countObjectResources()
self.assertEqual(count, 0)
yield self.commitTransaction(0)
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
token1_3 = yield calendar1.syncTokenRevision()
yield self.commitTransaction(0)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
token2_3 = yield shared.syncTokenRevision()
yield self.commitTransaction(1)
self.assertNotEqual(token1_1, token1_3)
self.assertNotEqual(token1_2, token1_3)
self.assertEqual(token1_3, token2_3)
@inlineCallbacks
def test_resourcenamessincerevision(self):
"""
Test that action=synctoken works.
"""
yield self.createShare("user01", "puser01")
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
token1_1 = yield calendar1.syncToken()
yield self.commitTransaction(0)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
token2_1 = yield shared.syncToken()
yield self.commitTransaction(1)
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
yield self.commitTransaction(0)
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
token1_2 = yield calendar1.syncToken()
names1 = yield calendar1.resourceNamesSinceToken(token1_1)
self.assertEqual(names1, ([u"1.ics"], [], [],))
yield self.commitTransaction(0)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
token2_2 = yield shared.syncToken()
names2 = yield shared.resourceNamesSinceToken(token2_1)
self.assertEqual(names2, ([u"1.ics"], [], [],))
yield self.commitTransaction(1)
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home="user01", calendar_name="calendar", name="1.ics")
yield object1.remove()
count = yield calendar1.countObjectResources()
self.assertEqual(count, 0)
yield self.commitTransaction(0)
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
token1_3 = yield calendar1.syncToken()
names1 = yield calendar1.resourceNamesSinceToken(token1_2)
self.assertEqual(names1, ([], [u"1.ics"], [],))
yield self.commitTransaction(0)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
token2_3 = yield shared.syncToken()
names2 = yield shared.resourceNamesSinceToken(token2_2)
self.assertEqual(names2, ([], [u"1.ics"], [],))
yield self.commitTransaction(1)
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
names1 = yield calendar1.resourceNamesSinceToken(token1_3)
self.assertEqual(names1, ([], [], [],))
yield self.commitTransaction(0)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
names2 = yield shared.resourceNamesSinceToken(token2_3)
self.assertEqual(names2, ([], [], [],))
yield self.commitTransaction(1)
@inlineCallbacks
def test_resourceuidforname(self):
"""
Test that action=resourceuidforname works.
"""
yield self.createShare("user01", "puser01")
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
yield self.commitTransaction(0)
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
uid = yield calendar1.resourceUIDForName("1.ics")
self.assertEqual(uid, "uid1")
uid = yield calendar1.resourceUIDForName("2.ics")
self.assertTrue(uid is None)
yield self.commitTransaction(0)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
uid = yield shared.resourceUIDForName("1.ics")
self.assertEqual(uid, "uid1")
uid = yield shared.resourceUIDForName("2.ics")
self.assertTrue(uid is None)
yield self.commitTransaction(1)
@inlineCallbacks
def test_resourcenameforuid(self):
"""
Test that action=resourcenameforuid works.
"""
yield self.createShare("user01", "puser01")
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
yield self.commitTransaction(0)
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
name = yield calendar1.resourceNameForUID("uid1")
self.assertEqual(name, "1.ics")
name = yield calendar1.resourceNameForUID("uid2")
self.assertTrue(name is None)
yield self.commitTransaction(0)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
name = yield shared.resourceNameForUID("uid1")
self.assertEqual(name, "1.ics")
name = yield shared.resourceNameForUID("uid2")
self.assertTrue(name is None)
yield self.commitTransaction(1)
@inlineCallbacks
def test_search(self):
"""
Test that action=resourcenameforuid works.
"""
yield self.createShare("user01", "puser01")
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
yield self.commitTransaction(0)
filter = caldavxml.Filter(
caldavxml.ComponentFilter(
*[caldavxml.ComponentFilter(
**{"name": ("VEVENT", "VFREEBUSY", "VAVAILABILITY")}
)],
**{"name": "VCALENDAR"}
)
)
filter = Filter(filter)
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
names = [item[0] for item in (yield calendar1.search(filter))]
self.assertEqual(names, ["1.ics", ])
yield self.commitTransaction(0)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
names = [item[0] for item in (yield shared.search(filter))]
self.assertEqual(names, ["1.ics", ])
yield self.commitTransaction(1)
@inlineCallbacks
def test_loadallobjects(self):
"""
Test that action=loadallobjects works.
"""
yield self.createShare("user01", "puser01")
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
resource1 = yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
resource_id1 = resource1.id()
resource2 = yield calendar1.createCalendarObjectWithName("2.ics", Component.fromString(self.caldata2))
resource_id2 = resource2.id()
yield self.commitTransaction(0)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
resources = yield shared.objectResources()
byname = dict([(obj.name(), obj) for obj in resources])
byuid = dict([(obj.uid(), obj) for obj in resources])
self.assertEqual(len(resources), 2)
self.assertEqual(set([obj.name() for obj in resources]), set(("1.ics", "2.ics",)))
self.assertEqual(set([obj.uid() for obj in resources]), set(("uid1", "uid2",)))
self.assertEqual(set([obj.id() for obj in resources]), set((resource_id1, resource_id2,)))
resource = yield shared.objectResourceWithName("1.ics")
self.assertTrue(resource is byname["1.ics"])
resource = yield shared.objectResourceWithName("2.ics")
self.assertTrue(resource is byname["2.ics"])
resource = yield shared.objectResourceWithName("Missing.ics")
self.assertTrue(resource is None)
resource = yield shared.objectResourceWithUID("uid1")
self.assertTrue(resource is byuid["uid1"])
resource = yield shared.objectResourceWithUID("uid2")
self.assertTrue(resource is byuid["uid2"])
resource = yield shared.objectResourceWithUID("uid-missing")
self.assertTrue(resource is None)
resource = yield shared.objectResourceWithID(resource_id1)
self.assertTrue(resource is byname["1.ics"])
resource = yield shared.objectResourceWithID(resource_id2)
self.assertTrue(resource is byname["2.ics"])
resource = yield shared.objectResourceWithID(0)
self.assertTrue(resource is None)
yield self.commitTransaction(1)
object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home="user01", calendar_name="calendar", name="1.ics")
yield object1.remove()
yield self.commitTransaction(0)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
resources = yield shared.objectResources()
byname = dict([(obj.name(), obj) for obj in resources])
byuid = dict([(obj.uid(), obj) for obj in resources])
self.assertEqual(len(resources), 1)
self.assertEqual(set([obj.name() for obj in resources]), set(("2.ics",)))
self.assertEqual(set([obj.uid() for obj in resources]), set(("uid2",)))
self.assertEqual(set([obj.id() for obj in resources]), set((resource_id2,)))
resource = yield shared.objectResourceWithName("1.ics")
self.assertTrue(resource is None)
resource = yield shared.objectResourceWithName("2.ics")
self.assertTrue(resource is byname["2.ics"])
resource = yield shared.objectResourceWithName("Missing.ics")
self.assertTrue(resource is None)
resource = yield shared.objectResourceWithUID("uid1")
self.assertTrue(resource is None)
resource = yield shared.objectResourceWithUID("uid2")
self.assertTrue(resource is byuid["uid2"])
resource = yield shared.objectResourceWithUID("uid-missing")
self.assertTrue(resource is None)
resource = yield shared.objectResourceWithID(resource_id1)
self.assertTrue(resource is None)
resource = yield shared.objectResourceWithID(resource_id2)
self.assertTrue(resource is byname["2.ics"])
resource = yield shared.objectResourceWithID(0)
self.assertTrue(resource is None)
yield self.commitTransaction(1)
@inlineCallbacks
def test_loadallobjectswithnames(self):
"""
Test that action=loadallobjectswithnames works.
"""
yield self.createShare("user01", "puser01")
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
resource1 = yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
resource_id1 = resource1.id()
yield calendar1.createCalendarObjectWithName("2.ics", Component.fromString(self.caldata2))
resource3 = yield calendar1.createCalendarObjectWithName("3.ics", Component.fromString(self.caldata3))
resource_id3 = resource3.id()
yield self.commitTransaction(0)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
resources = yield shared.objectResources()
self.assertEqual(len(resources), 3)
yield self.commitTransaction(1)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
resources = yield shared.objectResourcesWithNames(("1.ics", "3.ics",))
byname = dict([(obj.name(), obj) for obj in resources])
byuid = dict([(obj.uid(), obj) for obj in resources])
self.assertEqual(len(resources), 2)
self.assertEqual(set([obj.name() for obj in resources]), set(("1.ics", "3.ics",)))
self.assertEqual(set([obj.uid() for obj in resources]), set(("uid1", "uid3",)))
self.assertEqual(set([obj.id() for obj in resources]), set((resource_id1, resource_id3,)))
resource = yield shared.objectResourceWithName("1.ics")
self.assertTrue(resource is byname["1.ics"])
resource = yield shared.objectResourceWithName("3.ics")
self.assertTrue(resource is byname["3.ics"])
resource = yield shared.objectResourceWithName("Missing.ics")
self.assertTrue(resource is None)
resource = yield shared.objectResourceWithUID("uid1")
self.assertTrue(resource is byuid["uid1"])
resource = yield shared.objectResourceWithUID("uid3")
self.assertTrue(resource is byuid["uid3"])
resource = yield shared.objectResourceWithUID("uid-missing")
self.assertTrue(resource is None)
resource = yield shared.objectResourceWithID(resource_id1)
self.assertTrue(resource is byname["1.ics"])
resource = yield shared.objectResourceWithID(resource_id3)
self.assertTrue(resource is byname["3.ics"])
resource = yield shared.objectResourceWithID(0)
self.assertTrue(resource is None)
yield self.commitTransaction(1)
object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home="user01", calendar_name="calendar", name="1.ics")
yield object1.remove()
yield self.commitTransaction(0)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
resources = yield shared.objectResourcesWithNames(("1.ics", "3.ics",))
byname = dict([(obj.name(), obj) for obj in resources])
byuid = dict([(obj.uid(), obj) for obj in resources])
self.assertEqual(len(resources), 1)
self.assertEqual(set([obj.name() for obj in resources]), set(("3.ics",)))
self.assertEqual(set([obj.uid() for obj in resources]), set(("uid3",)))
self.assertEqual(set([obj.id() for obj in resources]), set((resource_id3,)))
resource = yield shared.objectResourceWithName("1.ics")
self.assertTrue(resource is None)
resource = yield shared.objectResourceWithName("3.ics")
self.assertTrue(resource is byname["3.ics"])
resource = yield shared.objectResourceWithName("Missing.ics")
self.assertTrue(resource is None)
resource = yield shared.objectResourceWithUID("uid1")
self.assertTrue(resource is None)
resource = yield shared.objectResourceWithUID("uid3")
self.assertTrue(resource is byuid["uid3"])
resource = yield shared.objectResourceWithUID("uid-missing")
self.assertTrue(resource is None)
resource = yield shared.objectResourceWithID(resource_id1)
self.assertTrue(resource is None)
resource = yield shared.objectResourceWithID(resource_id3)
self.assertTrue(resource is byname["3.ics"])
resource = yield shared.objectResourceWithID(0)
self.assertTrue(resource is None)
yield self.commitTransaction(1)
@inlineCallbacks
def test_objectwith(self):
"""
Test that action=objectwith works.
"""
yield self.createShare("user01", "puser01")
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
resource = yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
resource_id = resource.id()
yield self.commitTransaction(0)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
resource = yield shared.objectResourceWithName("1.ics")
self.assertTrue(resource is not None)
self.assertEqual(resource.name(), "1.ics")
self.assertEqual(resource.uid(), "uid1")
resource = yield shared.objectResourceWithName("2.ics")
self.assertTrue(resource is None)
yield self.commitTransaction(1)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
resource = yield shared.objectResourceWithUID("uid1")
self.assertTrue(resource is not None)
self.assertEqual(resource.name(), "1.ics")
self.assertEqual(resource.uid(), "uid1")
resource = yield shared.objectResourceWithUID("uid2")
self.assertTrue(resource is None)
yield self.commitTransaction(1)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
resource = yield shared.objectResourceWithID(resource_id)
self.assertTrue(resource is not None)
self.assertEqual(resource.name(), "1.ics")
self.assertEqual(resource.uid(), "uid1")
resource = yield shared.objectResourceWithID(0)
self.assertTrue(resource is None)
yield self.commitTransaction(1)
object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home="user01", calendar_name="calendar", name="1.ics")
yield object1.remove()
yield self.commitTransaction(0)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
resource = yield shared.objectResourceWithName("1.ics")
self.assertTrue(resource is None)
yield self.commitTransaction(1)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
resource = yield shared.objectResourceWithUID("uid1")
self.assertTrue(resource is None)
yield self.commitTransaction(1)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
resource = yield shared.objectResourceWithID(resource_id)
self.assertTrue(resource is None)
yield self.commitTransaction(1)
@inlineCallbacks
def test_create(self):
"""
Test that action=create works.
"""
yield self.createShare("user01", "puser01")
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
resource = yield shared.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
resource_id = resource.id()
self.assertTrue(resource is not None)
self.assertEqual(resource.name(), "1.ics")
self.assertEqual(resource.uid(), "uid1")
self.assertFalse(resource._componentChanged)
yield self.commitTransaction(1)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
resource = yield shared.objectResourceWithUID("uid1")
self.assertTrue(resource is not None)
self.assertEqual(resource.name(), "1.ics")
self.assertEqual(resource.uid(), "uid1")
self.assertEqual(resource.id(), resource_id)
yield self.commitTransaction(1)
object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home="user01", calendar_name="calendar", name="1.ics")
self.assertTrue(object1 is not None)
self.assertEqual(object1.name(), "1.ics")
self.assertEqual(object1.uid(), "uid1")
self.assertEqual(object1.id(), resource_id)
yield self.commitTransaction(0)
@inlineCallbacks
def test_create_exception(self):
"""
Test that action=create fails when a duplicate name is used.
"""
yield self.createShare("user01", "puser01")
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
yield self.commitTransaction(0)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
yield self.failUnlessFailure(shared.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1)), ObjectResourceNameAlreadyExistsError)
yield self.abortTransaction(1)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
yield self.failUnlessFailure(shared.createCalendarObjectWithName(".2.ics", Component.fromString(self.caldata2)), ObjectResourceNameNotAllowedError)
yield self.abortTransaction(1)
@inlineCallbacks
def test_setcomponent(self):
"""
Test that action=setcomponent works.
"""
yield self.createShare("user01", "puser01")
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
yield self.commitTransaction(0)
shared_object = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", calendar_name="shared-calendar", name="1.ics")
ical = yield shared_object.component()
self.assertTrue(isinstance(ical, Component))
self.assertEqual(normalize_iCalStr(str(ical)), normalize_iCalStr(self.caldata1))
yield self.commitTransaction(1)
shared_object = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", calendar_name="shared-calendar", name="1.ics")
changed = yield shared_object.setComponent(Component.fromString(self.caldata1_changed))
self.assertFalse(changed)
ical = yield shared_object.component()
self.assertTrue(isinstance(ical, Component))
self.assertEqual(normalize_iCalStr(str(ical)), normalize_iCalStr(self.caldata1_changed))
yield self.commitTransaction(1)
object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home="user01", calendar_name="calendar", name="1.ics")
ical = yield object1.component()
self.assertTrue(isinstance(ical, Component))
self.assertEqual(normalize_iCalStr(str(ical)), normalize_iCalStr(self.caldata1_changed))
yield self.commitTransaction(0)
@inlineCallbacks
def test_component(self):
"""
Test that action=component works.
"""
yield self.createShare("user01", "puser01")
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
yield self.commitTransaction(0)
shared_object = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", calendar_name="shared-calendar", name="1.ics")
ical = yield shared_object.component()
self.assertTrue(isinstance(ical, Component))
self.assertEqual(normalize_iCalStr(str(ical)), normalize_iCalStr(self.caldata1))
yield self.commitTransaction(1)
@inlineCallbacks
def test_remove(self):
"""
Test that action=remove works.
"""
yield self.createShare("user01", "puser01")
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
yield self.commitTransaction(0)
shared_object = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", calendar_name="shared-calendar", name="1.ics")
yield shared_object.remove()
yield self.commitTransaction(1)
shared_object = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", calendar_name="shared-calendar", name="1.ics")
self.assertTrue(shared_object is None)
yield self.commitTransaction(1)
object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home="user01", calendar_name="calendar", name="1.ics")
self.assertTrue(object1 is None)
yield self.commitTransaction(0)
@inlineCallbacks
def test_freebusy(self):
"""
Test that action=component works.
"""
yield self.createShare("user01", "puser01")
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
yield self.commitTransaction(0)
fbstart = "{now:04d}0102T000000Z".format(**self.nowYear)
fbend = "{now:04d}0103T000000Z".format(**self.nowYear)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
fbinfo = FreebusyQuery.FBInfo([], [], [])
timerange = Period(DateTime.parseText(fbstart), DateTime.parseText(fbend))
organizer = recipient = (yield calendarUserFromCalendarUserAddress("mailto:<EMAIL>", self.theTransactionUnderTest(1)))
freebusy = FreebusyQuery(organizer=organizer, recipient=recipient, timerange=timerange)
matchtotal = (yield freebusy.generateFreeBusyInfo([shared, ], fbinfo))
self.assertEqual(matchtotal, 1)
self.assertEqual(fbinfo[0], [Period.parseText("{now:04d}0102T140000Z/PT1H".format(**self.nowYear)), ])
self.assertEqual(len(fbinfo[1]), 0)
self.assertEqual(len(fbinfo[2]), 0)
yield self.commitTransaction(1)
def attachmentToString(self, attachment):
"""
Convenience to convert an L{IAttachment} to a string.
@param attachment: an L{IAttachment} provider to convert into a string.
@return: a L{Deferred} that fires with the contents of the attachment.
@rtype: L{Deferred} firing C{bytes}
"""
capture = CaptureProtocol()
attachment.retrieve(capture)
return capture.deferred
@inlineCallbacks
def test_add_attachment(self):
"""
Test that action=add-attachment works.
"""
yield self.createShare("user01", "puser01")
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
object1 = yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
resourceID = object1.id()
yield self.commitTransaction(0)
shared_object = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", calendar_name="shared-calendar", name="1.ics")
data = "Here is some text."
attachment, location = yield shared_object.addAttachment(None, MimeType.fromString("text/plain"), "test.txt", MemoryStream(data))
managedID = attachment.managedID()
from txdav.caldav.datastore.sql_external import ManagedAttachmentExternal
self.assertTrue(isinstance(attachment, ManagedAttachmentExternal))
self.assertEqual(attachment.size(), len(data))
self.assertTrue("user01/dropbox/" in location)
yield self.commitTransaction(1)
cobjs = yield ManagedAttachment.referencesTo(self.theTransactionUnderTest(0), managedID)
self.assertEqual(cobjs, set((resourceID,)))
attachment = yield ManagedAttachment.load(self.theTransactionUnderTest(0), resourceID, managedID)
self.assertEqual(attachment.name(), "test.txt")
data = yield self.attachmentToString(attachment)
self.assertEqual(data, "Here is some text.")
yield self.commitTransaction(0)
@inlineCallbacks
def test_update_attachment(self):
"""
Test that action=update-attachment works.
"""
yield self.createShare("user01", "puser01")
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
yield self.commitTransaction(0)
object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home="user01", calendar_name="calendar", name="1.ics")
resourceID = object1.id()
attachment, _ignore_location = yield object1.addAttachment(None, MimeType.fromString("text/plain"), "test.txt", MemoryStream("Here is some text."))
managedID = attachment.managedID()
yield self.commitTransaction(0)
shared_object = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", calendar_name="shared-calendar", name="1.ics")
data = "Here is some more text."
attachment, location = yield shared_object.updateAttachment(managedID, MimeType.fromString("text/plain"), "test.txt", MemoryStream(data))
managedID = attachment.managedID()
from txdav.caldav.datastore.sql_external import ManagedAttachmentExternal
self.assertTrue(isinstance(attachment, ManagedAttachmentExternal))
self.assertEqual(attachment.size(), len(data))
self.assertTrue("user01/dropbox/" in location)
yield self.commitTransaction(1)
cobjs = yield ManagedAttachment.referencesTo(self.theTransactionUnderTest(0), managedID)
self.assertEqual(cobjs, set((resourceID,)))
attachment = yield ManagedAttachment.load(self.transactionUnderTest(), resourceID, managedID)
self.assertEqual(attachment.name(), "test.txt")
data = yield self.attachmentToString(attachment)
self.assertEqual(data, "Here is some more text.")
yield self.commitTransaction(0)
@inlineCallbacks
def test_remove_attachment(self):
"""
Test that action=remove-attachment works.
"""
yield self.createShare("user01", "puser01")
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
yield self.commitTransaction(0)
object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home="user01", calendar_name="calendar", name="1.ics")
resourceID = object1.id()
attachment, _ignore_location = yield object1.addAttachment(None, MimeType.fromString("text/plain"), "test.txt", MemoryStream("Here is some text."))
managedID = attachment.managedID()
yield self.commitTransaction(0)
shared_object = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", calendar_name="shared-calendar", name="1.ics")
yield shared_object.removeAttachment(None, managedID)
yield self.commitTransaction(1)
cobjs = yield ManagedAttachment.referencesTo(self.theTransactionUnderTest(0), managedID)
self.assertEqual(cobjs, set())
attachment = yield ManagedAttachment.load(self.theTransactionUnderTest(0), resourceID, managedID)
self.assertTrue(attachment is None)
yield self.commitTransaction(0)
@inlineCallbacks
def test_get_all_attachments(self):
"""
Test that action=get-all-attachments works.
"""
yield self.createShare("user01", "puser01")
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
yield self.commitTransaction(0)
object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home="user01", calendar_name="calendar", name="1.ics")
yield object1.addAttachment(None, MimeType.fromString("text/plain"), "test.txt", MemoryStream("Here is some text."))
yield self.commitTransaction(0)
shared_object = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", calendar_name="shared-calendar", name="1.ics")
attachments = yield shared_object.ownerHome().getAllAttachments()
self.assertEqual(len(attachments), 1)
self.assertTrue(isinstance(attachments[0], ManagedAttachment))
self.assertEqual(attachments[0].contentType(), MimeType.fromString("text/plain"))
self.assertEqual(attachments[0].name(), "test.txt")
yield self.commitTransaction(1)
@inlineCallbacks
def test_get_attachment_data(self):
"""
Test that action=get-all-attachments works.
"""
yield self.createShare("user01", "puser01")
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
yield self.commitTransaction(0)
object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home="user01", calendar_name="calendar", name="1.ics")
attachment, _ignore_location = yield object1.addAttachment(None, MimeType.fromString("text/plain"), "test.txt", MemoryStream("Here is some text."))
remote_id = attachment.id()
yield self.commitTransaction(0)
home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name="puser01")
shared_object = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", calendar_name="shared-calendar", name="1.ics")
attachment = yield ManagedAttachment._create(self.theTransactionUnderTest(1), None, home1.id())
attachment._contentType = MimeType.fromString("text/plain")
attachment._name = "test.txt"
yield shared_object.ownerHome().readAttachmentData(remote_id, attachment)
yield self.commitTransaction(1)
@inlineCallbacks
def test_get_attachment_links(self):
"""
Test that action=get-attachment-links works.
"""
yield self.createShare("user01", "puser01")
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
cobj1 = yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
calobjID = cobj1.id()
yield self.commitTransaction(0)
object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home="user01", calendar_name="calendar", name="1.ics")
attachment, _ignore_location = yield object1.addAttachment(None, MimeType.fromString("text/plain"), "test.txt", MemoryStream("Here is some text."))
attID = attachment.id()
managedID = attachment.managedID()
yield self.commitTransaction(0)
shared_object = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", calendar_name="shared-calendar", name="1.ics")
links = yield shared_object.ownerHome().getAttachmentLinks()
self.assertEqual(len(links), 1)
self.assertTrue(isinstance(links[0], AttachmentLink))
self.assertEqual(links[0]._attachmentID, attID)
self.assertEqual(links[0]._managedID, managedID)
self.assertEqual(links[0]._calendarObjectID, calobjID)
yield self.commitTransaction(1)
|
2019/07/27/Django Example App - YouTube Search With YouTube Data API/django_youtube_search/youtube_search/search/views.py | kenjitagawa/youtube_video_code | 492 | 12633678 | import requests
from isodate import parse_duration
from django.conf import settings
from django.shortcuts import render, redirect
def index(request):
videos = []
if request.method == 'POST':
search_url = 'https://www.googleapis.com/youtube/v3/search'
video_url = 'https://www.googleapis.com/youtube/v3/videos'
search_params = {
'part' : 'snippet',
'q' : request.POST['search'],
'key' : settings.YOUTUBE_DATA_API_KEY,
'maxResults' : 9,
'type' : 'video'
}
r = requests.get(search_url, params=search_params)
results = r.json()['items']
video_ids = []
for result in results:
video_ids.append(result['id']['videoId'])
if request.POST['submit'] == 'lucky':
return redirect(f'https://www.youtube.com/watch?v={ video_ids[0] }')
video_params = {
'key' : settings.YOUTUBE_DATA_API_KEY,
'part' : 'snippet,contentDetails',
'id' : ','.join(video_ids),
'maxResults' : 9
}
r = requests.get(video_url, params=video_params)
results = r.json()['items']
for result in results:
video_data = {
'title' : result['snippet']['title'],
'id' : result['id'],
'url' : f'https://www.youtube.com/watch?v={ result["id"] }',
'duration' : int(parse_duration(result['contentDetails']['duration']).total_seconds() // 60),
'thumbnail' : result['snippet']['thumbnails']['high']['url']
}
videos.append(video_data)
context = {
'videos' : videos
}
return render(request, 'search/index.html', context)
|
src/binwalk/__init__.py | chubbymaggie/binwalk | 5,504 | 12633687 | <gh_stars>1000+
__all__ = ['scan', 'execute', 'ModuleException']
from binwalk.core.module import Modules
from binwalk.core.version import __version__ # This file is auto-generated by setup.py and ignored by .gitignore
from binwalk.core.exceptions import ModuleException
# Convenience functions
def scan(*args, **kwargs):
with Modules(*args, **kwargs) as m:
objs = m.execute()
return objs
def execute(*args, **kwargs):
return scan(*args, **kwargs)
|
skfda/inference/anova/__init__.py | jiduque/scikit-fda | 147 | 12633737 | """Implementation of ANOVA for functional data."""
from ._anova_oneway import oneway_anova, v_asymptotic_stat, v_sample_stat
|
ptp/components/models/__init__.py | aasseman/pytorchpipe | 232 | 12633760 | <filename>ptp/components/models/__init__.py<gh_stars>100-1000
from .model import Model
# General usage
from .general_usage.feed_forward_network import FeedForwardNetwork
from .general_usage.recurrent_neural_network import RecurrentNeuralNetwork
from .general_usage.seq2seq import Seq2Seq
from .general_usage.attention_decoder import AttentionDecoder
# Language
from .language.index_embeddings import IndexEmbeddings
from .language.sentence_embeddings import SentenceEmbeddings
# Vision
from .vision.convnet_encoder import ConvNetEncoder
from .vision.generic_image_encoder import GenericImageEncoder
from .vision.lenet5 import LeNet5
# Multi-modal reasoning
from .multi_modal_reasoning.compact_bilinear_pooling import CompactBilinearPooling
from .multi_modal_reasoning.factorized_bilinear_pooling import FactorizedBilinearPooling
from .multi_modal_reasoning.low_rank_bilinear_pooling import LowRankBilinearPooling
from .multi_modal_reasoning.question_driven_attention import QuestionDrivenAttention
from .multi_modal_reasoning.relational_network import RelationalNetwork
from .multi_modal_reasoning.self_attention import SelfAttention
__all__ = [
'Model',
# General usage
'FeedForwardNetwork',
'RecurrentNeuralNetwork',
'Seq2Seq',
'AttentionDecoder',
# Language
'IndexEmbeddings',
'SentenceEmbeddings',
# Vision
'ConvNetEncoder',
'GenericImageEncoder',
'LeNet5',
# Multi-modal reasoning
'CompactBilinearPooling',
'FactorizedBilinearPooling',
'LowRankBilinearPooling',
'QuestionDrivenAttention',
'RelationalNetwork',
'SelfAttention'
]
|
leet/array/toHex.py | monishshah18/python-cp-cheatsheet | 140 | 12633768 | class Solution:
def toHex(self, num: int) -> str:
rtn = []
index = "0123456789abcdef"
if num == 0: return '0'
if num < 0: num += 2 ** 32
while num > 0:
digit = num % 16
num = num // 16
rtn.append(index[digit])
return "".join(rtn[::-1]) |
InnerEye-DataQuality/InnerEyeDataQuality/deep_learning/transforms.py | faz1993/InnerEye-DeepLearning | 402 | 12633783 | <reponame>faz1993/InnerEye-DeepLearning<filename>InnerEye-DataQuality/InnerEyeDataQuality/deep_learning/transforms.py<gh_stars>100-1000
# ------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# ------------------------------------------------------------------------------------------
import random
from typing import Any, Callable, Tuple
import PIL
import PIL.Image
import numpy as np
import torch
import torchvision
from scipy.ndimage import gaussian_filter, map_coordinates
from skimage.filters import rank
from skimage.morphology import disk
from InnerEyeDataQuality.configs.config_node import ConfigNode
class BaseTransform:
def __init__(self, config: ConfigNode):
self.transform = lambda x: x
def __call__(self, data: PIL.Image.Image) -> PIL.Image.Image:
return self.transform(data)
class Standardize:
def __init__(self, mean: np.ndarray, std: np.ndarray):
self.mean = np.array(mean)
self.std = np.array(std)
def __call__(self, image: PIL.Image.Image) -> np.ndarray:
image = np.asarray(image).astype(np.float32) / 255.
image = (image - self.mean) / self.std
return image
class CenterCrop(BaseTransform):
def __init__(self, config: ConfigNode):
self.transform = torchvision.transforms.CenterCrop(config.preprocess.center_crop_size)
class RandomCrop(BaseTransform):
def __init__(self, config: ConfigNode):
self.transform = torchvision.transforms.RandomCrop(
config.dataset.image_size,
padding=config.augmentation.random_crop.padding,
fill=config.augmentation.random_crop.fill,
padding_mode=config.augmentation.random_crop.padding_mode)
class RandomResizeCrop(BaseTransform):
def __init__(self, config: ConfigNode):
self.transform = torchvision.transforms.RandomResizedCrop(
size=config.preprocess.resize,
scale=config.augmentation.random_crop.scale)
class RandomHorizontalFlip(BaseTransform):
def __init__(self, config: ConfigNode):
self.transform = torchvision.transforms.RandomHorizontalFlip(
config.augmentation.random_horizontal_flip.prob)
class RandomAffine(BaseTransform):
def __init__(self, config: ConfigNode):
self.transform = torchvision.transforms.RandomAffine(degrees=config.augmentation.random_affine.max_angle, # 15
translate=(
config.augmentation.random_affine.max_horizontal_shift,
config.augmentation.random_affine.max_vertical_shift),
shear=config.augmentation.random_affine.max_shear)
class Resize(BaseTransform):
def __init__(self, config: ConfigNode):
self.transform = torchvision.transforms.Resize(config.preprocess.resize)
class RandomColorJitter(BaseTransform):
def __init__(self, config: ConfigNode) -> None:
self.transform = torchvision.transforms.ColorJitter(brightness=config.augmentation.random_color.brightness,
contrast=config.augmentation.random_color.contrast,
saturation=config.augmentation.random_color.saturation)
class RandomErasing(BaseTransform):
def __init__(self, config: ConfigNode) -> None:
self.transform = torchvision.transforms.RandomErasing(p=0.5,
scale=config.augmentation.random_erasing.scale,
ratio=config.augmentation.random_erasing.ratio)
class RandomGamma(BaseTransform):
def __init__(self, config: ConfigNode) -> None:
self.min, self.max = config.augmentation.gamma.scale
def __call__(self, image: PIL.Image.Image) -> PIL.Image.Image:
gamma = random.uniform(self.min, self.max)
return torchvision.transforms.functional.adjust_gamma(image, gamma=gamma)
class HistogramNormalization:
def __init__(self, config: ConfigNode) -> None:
self.disk_size = config.preprocess.histogram_normalization.disk_size
def __call__(self, image: PIL.Image.Image) -> np.ndarray:
# Apply local histogram equalization
image = np.array(image)
return PIL.Image.fromarray(rank.equalize(image, selem=disk(self.disk_size)))
class ExpandChannels:
def __call__(self, data: torch.Tensor) -> torch.Tensor:
return torch.repeat_interleave(data, 3, dim=0)
class ToNumpy:
def __call__(self, image: PIL.Image.Image) -> np.ndarray:
return np.array(image)
class AddGaussianNoise:
def __init__(self, config: ConfigNode) -> None:
"""
Transformation to add Gaussian noise N(0, std) to
an image. Where std is set with the config.augmentation.gaussian_noise.std
argument. The transformation will be applied with probability
config.augmentation.gaussian_noise.p_apply
"""
self.std = config.augmentation.gaussian_noise.std
self.p_apply = config.augmentation.gaussian_noise.p_apply
def __call__(self, data: torch.Tensor) -> torch.Tensor:
if np.random.random(1) > self.p_apply:
return data
noise = torch.randn(size=data.shape) * self.std
data = torch.clamp(data + noise, 0, 1)
return data
class ElasticTransform:
"""Elastic deformation of images as described in [Simard2003]_.
.. [Simard2003] Simard, <NAME>, "Best Practices for
Convolutional Neural Networks applied to Visual Document Analysis", in
Proc. of the International Conference on Document Analysis and
Recognition, 2003.
https://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.160.8494&rep=rep1&type=pdf
:param sigma: elasticity coefficient
:param alpha: intensity of the deformation
:param p_apply: probability of applying the transformation
"""
def __init__(self, config: ConfigNode) -> None:
self.alpha = config.augmentation.elastic_transform.alpha
self.sigma = config.augmentation.elastic_transform.sigma
self.p_apply = config.augmentation.elastic_transform.p_apply
def __call__(self, image: PIL.Image) -> PIL.Image:
if np.random.random(1) > self.p_apply:
return image
image = np.asarray(image).squeeze()
assert len(image.shape) == 2
shape = image.shape
dx = gaussian_filter((np.random.random(shape) * 2 - 1), self.sigma, mode="constant", cval=0) * self.alpha
dy = gaussian_filter((np.random.random(shape) * 2 - 1), self.sigma, mode="constant", cval=0) * self.alpha
x, y = np.meshgrid(np.arange(shape[0]), np.arange(shape[1]), indexing='ij')
indices = np.reshape(x + dx, (-1, 1)), np.reshape(y + dy, (-1, 1))
return PIL.Image.fromarray(map_coordinates(image, indices, order=1).reshape(shape))
class DualViewTransformWrapper:
def __init__(self, transforms: Callable):
self.transforms = transforms
def __call__(self, sample: PIL.Image.Image) -> Tuple[Any, Any]:
transform = self.transforms
xi = transform(sample)
xj = transform(sample)
return xi, xj
|
modules/ts/misc/report.py | thisisgopalmandal/opencv | 163 | 12633792 | #!/usr/bin/env python
import testlog_parser, sys, os, xml, re, glob
from table_formatter import *
from optparse import OptionParser
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-o", "--output", dest="format", help="output results in text format (can be 'txt', 'html' or 'auto' - default)", metavar="FMT", default="auto")
parser.add_option("-u", "--units", dest="units", help="units for output values (s, ms (default), us, ns or ticks)", metavar="UNITS", default="ms")
parser.add_option("-c", "--columns", dest="columns", help="comma-separated list of columns to show", metavar="COLS", default="")
parser.add_option("-f", "--filter", dest="filter", help="regex to filter tests", metavar="REGEX", default=None)
parser.add_option("", "--show-all", action="store_true", dest="showall", default=False, help="also include empty and \"notrun\" lines")
(options, args) = parser.parse_args()
if len(args) < 1:
print >> sys.stderr, "Usage:\n", os.path.basename(sys.argv[0]), "<log_name1>.xml"
exit(0)
options.generateHtml = detectHtmlOutputType(options.format)
# expand wildcards and filter duplicates
files = []
files1 = []
for arg in args:
if ("*" in arg) or ("?" in arg):
files1.extend([os.path.abspath(f) for f in glob.glob(arg)])
else:
files.append(os.path.abspath(arg))
seen = set()
files = [ x for x in files if x not in seen and not seen.add(x)]
files.extend((set(files1) - set(files)))
args = files
# load test data
tests = []
files = []
for arg in set(args):
try:
cases = testlog_parser.parseLogFile(arg)
if cases:
files.append(os.path.basename(arg))
tests.extend(cases)
except:
pass
if options.filter:
expr = re.compile(options.filter)
tests = [t for t in tests if expr.search(str(t))]
tbl = table(", ".join(files))
if options.columns:
metrics = [s.strip() for s in options.columns.split(",")]
metrics = [m for m in metrics if m and not m.endswith("%") and m in metrix_table]
else:
metrics = None
if not metrics:
metrics = ["name", "samples", "outliers", "min", "median", "gmean", "mean", "stddev"]
if "name" not in metrics:
metrics.insert(0, "name")
for m in metrics:
if m == "name":
tbl.newColumn(m, metrix_table[m][0])
else:
tbl.newColumn(m, metrix_table[m][0], align = "center")
needNewRow = True
for case in sorted(tests, key=lambda x: str(x)):
if needNewRow:
tbl.newRow()
if not options.showall:
needNewRow = False
status = case.get("status")
if status != "run":
if status != "notrun":
needNewRow = True
for m in metrics:
if m == "name":
tbl.newCell(m, str(case))
else:
tbl.newCell(m, status, color = "red")
else:
needNewRow = True
for m in metrics:
val = metrix_table[m][1](case, None, options.units)
if isinstance(val, float):
tbl.newCell(m, "%.2f %s" % (val, options.units), val)
else:
tbl.newCell(m, val, val)
if not needNewRow:
tbl.trimLastRow()
# output table
if options.generateHtml:
if options.format == "moinwiki":
tbl.htmlPrintTable(sys.stdout, True)
else:
htmlPrintHeader(sys.stdout, "Report %s tests from %s" % (len(tests), ", ".join(files)))
tbl.htmlPrintTable(sys.stdout)
htmlPrintFooter(sys.stdout)
else:
tbl.consolePrintTable(sys.stdout)
|
external/mmdetection/tests/ote_params_validation/test_ote_ote_utils_params_validation.py | opencv/openvino_training_extensions | 775 | 12633826 | # Copyright (C) 2021-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
import pytest
from detection_tasks.apis.detection.ote_utils import (
ColorPalette,
generate_label_schema,
get_task_class,
load_template,
)
from ote_sdk.test_suite.e2e_test_system import e2e_pytest_unit
from ote_sdk.tests.parameters_validation.validation_helper import (
check_value_error_exception_raised,
)
class TestColorPaletteInputParamsValidation:
@staticmethod
def color_palette():
return ColorPalette(1)
@e2e_pytest_unit
def test_color_palette_init_params_validation(self):
"""
<b>Description:</b>
Check ColorPalette object initialization parameters validation
<b>Input data:</b>
ColorPalette object initialization parameters with unexpected type
<b>Expected results:</b>
Test passes if ValueError exception is raised when unexpected type object is specified as
ColorPalette object initialization parameter
"""
correct_values_dict = {
"n": 1,
}
unexpected_str = "unexpected string"
unexpected_values = [
# Unexpected string is specified as "n" parameter
("n", unexpected_str),
# Unexpected string is specified as "rng" parameter
("rng", unexpected_str),
]
check_value_error_exception_raised(
correct_parameters=correct_values_dict,
unexpected_values=unexpected_values,
class_or_function=ColorPalette,
)
@e2e_pytest_unit
def test_color_palette_get_item_params_validation(self):
"""
<b>Description:</b>
Check ColorPalette object "__getitem__" method input parameters validation
<b>Input data:</b>
ColorPalette object, "n" non-integer object
<b>Expected results:</b>
Test passes if ValueError exception is raised when unexpected type object is specified as
input parameter for "__getitem__" method
"""
color_palette = self.color_palette()
with pytest.raises(ValueError):
color_palette.__getitem__("unexpected string") # type: ignore
class TestOTEUtilsFunctionsInputParamsValidation:
@e2e_pytest_unit
def test_generate_label_schema_input_params_validation(self):
"""
<b>Description:</b>
Check "generate_label_schema" function input parameters validation
<b>Input data:</b>
"generate_label_schema" function unexpected-type input parameters
<b>Expected results:</b>
Test passes if ValueError exception is raised when unexpected type object is specified as
input parameter for "generate_label_schema" function
"""
correct_values_dict = {
"label_names": ["label_1", "label_2"],
}
unexpected_int = 1
unexpected_values = [
# Unexpected integer is specified as "label_names" parameter
("label_names", unexpected_int),
# Unexpected integer is specified as nested label name
("label_names", ["label_1", unexpected_int]),
# Unexpected integer is specified as "label_domain" parameter
("label_domain", unexpected_int),
]
check_value_error_exception_raised(
correct_parameters=correct_values_dict,
unexpected_values=unexpected_values,
class_or_function=generate_label_schema,
)
@e2e_pytest_unit
def test_load_template_params_validation(self):
"""
<b>Description:</b>
Check "load_template" function input parameters validation
<b>Input data:</b>
"path" unexpected string with yaml file object
<b>Expected results:</b>
Test passes if ValueError exception is raised when unexpected type object is specified as
input parameter for "load_template" function
"""
for incorrect_parameter in [
# Unexpected integer is specified as "path" parameter
1,
# Empty string is specified as "path" parameter
"",
# Path to non-existing file is specified as "path" parameter
"./non_existing.yaml",
# Path to non-yaml file is specified as "path" parameter
"./unexpected_type.jpg",
# Path Null character is specified in "path" parameter
"./null\0char.yaml",
# Path with non-printable character is specified as "path" parameter
"./non\nprintable.yaml",
]:
with pytest.raises(ValueError):
load_template(incorrect_parameter)
@e2e_pytest_unit
def test_get_task_class_input_params_validation(self):
"""
<b>Description:</b>
Check "get_task_class" function input parameters validation
<b>Input data:</b>
"path" non string-type object
<b>Expected results:</b>
Test passes if ValueError exception is raised when unexpected type object is specified as
input parameter for "get_task_class" function
"""
with pytest.raises(ValueError):
get_task_class(path=1) # type: ignore
|
libraries/botbuilder-adapters-slack/botbuilder/adapters/slack/slack_message.py | Fl4v/botbuilder-python | 388 | 12633834 | <reponame>Fl4v/botbuilder-python<gh_stars>100-1000
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from slack.web.classes.attachments import Attachment
from slack.web.classes.blocks import Block
class SlackMessage:
def __init__(self, **kwargs):
self.ephemeral = kwargs.get("ephemeral")
self.as_user = kwargs.get("as_user")
self.icon_url = kwargs.get("icon_url")
self.icon_emoji = kwargs.get("icon_emoji")
self.thread_ts = kwargs.get("thread_ts")
self.user = kwargs.get("user")
self.channel = kwargs.get("channel")
self.text = kwargs.get("text")
self.team = kwargs.get("team")
self.ts = kwargs.get("ts") # pylint: disable=invalid-name
self.username = kwargs.get("username")
self.bot_id = kwargs.get("bot_id")
self.icons = kwargs.get("icons")
self.blocks: [Block] = kwargs.get("blocks")
# Create proper Attachment objects
# It would appear that we can get dict fields from the wire that aren't defined
# in the Attachment class. So only pass in known fields.
attachments = kwargs.get("attachments")
if attachments is not None:
self.attachments = [
Attachment(**{x: att[x] for x in att if x in Attachment.attributes})
for att in kwargs.get("attachments")
]
|
xcessiv/stacker.py | KhaledTo/xcessiv | 1,362 | 12633870 | import sklearn
if sklearn.__version__.startswith('0.18'):
from sklearn.pipeline import _BasePipeline as bp
else:
from sklearn.utils.metaestimators import _BaseComposition as bp
import numpy as np
class XcessivStackedEnsemble(bp):
"""Contains the class for the Xcessiv stacked ensemble"""
def __init__(self, base_learners, meta_feature_generators,
secondary_learner, cv_function):
super(XcessivStackedEnsemble, self).__init__()
self.base_learners = base_learners
self.meta_feature_generators = meta_feature_generators
self.secondary_learner = secondary_learner
self.cv_function = cv_function
self._named_learners = [('bl{}'.format(idx), base_learner) for idx, base_learner
in enumerate(base_learners)]
self._named_learners.append(('secondary-learner', secondary_learner))
def get_params(self, deep=True):
"""Get parameters for this estimator.
Args:
deep (boolean, optional): If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
params: mapping of string to any Parameter names mapped to their values.
"""
return self._get_params('_named_learners', deep=deep)
def set_params(self, **params):
"""Set the parameters of this estimator."""
self._set_params('_named_learners', **params)
return self
def fit(self, X, y):
print('Fitting {} base learners'.format(len(self.base_learners)))
all_learner_meta_features = []
for idx, base_learner in enumerate(self.base_learners):
single_learner_meta_features = []
test_indices = []
for num, (train_idx, test_idx) in enumerate(self.cv_function(X, y)):
print('Fold {} of base learner {}'.format(num+1, idx+1))
base_learner.fit(X[train_idx], y[train_idx])
preds = getattr(base_learner, self.meta_feature_generators[idx])(X[test_idx])
if len(preds.shape) == 1:
preds = preds.reshape(-1, 1)
single_learner_meta_features.append(
preds
)
test_indices.append(test_idx)
single_learner_meta_features = np.concatenate(single_learner_meta_features)
all_learner_meta_features.append(single_learner_meta_features)
all_learner_meta_features = np.concatenate(all_learner_meta_features, axis=1)
test_indices = np.concatenate(test_indices) # reorganized order due to CV
print('Fitting meta-learner')
self.secondary_learner.fit(all_learner_meta_features, y[test_indices])
return self
def _process_using_meta_feature_generator(self, X, meta_feature_generator):
"""Process using secondary learner meta-feature generator
Since secondary learner meta-feature generator can be anything e.g. predict, predict_proba,
this internal method gives the ability to use any string. Just make sure secondary learner
has the method.
Args:
X (array-like): Features array
meta_feature_generator (str, unicode): Method for use by secondary learner
"""
all_learner_meta_features = []
for idx, base_learner in enumerate(self.base_learners):
single_learner_meta_features = getattr(base_learner,
self.meta_feature_generators[idx])(X)
if len(single_learner_meta_features.shape) == 1:
single_learner_meta_features = single_learner_meta_features.reshape(-1, 1)
all_learner_meta_features.append(single_learner_meta_features)
all_learner_meta_features = np.concatenate(all_learner_meta_features, axis=1)
out = getattr(self.secondary_learner, meta_feature_generator)(all_learner_meta_features)
return out
|
sample_factory/runner/runs/paper_doom_all_basic_envs.py | eles13/sample-factory | 320 | 12633907 | <filename>sample_factory/runner/runs/paper_doom_all_basic_envs.py
from sample_factory.runner.run_description import RunDescription, Experiment, ParamGrid
_params = ParamGrid([
('seed', [0, 1111, 2222, 3333, 4444, 5555, 6666, 7777, 8888, 9999]),
('env', ['doom_my_way_home', 'doom_deadly_corridor', 'doom_defend_the_center', 'doom_defend_the_line', 'doom_health_gathering', 'doom_health_gathering_supreme']),
])
_experiments = [
Experiment(
'basic_envs_fs4',
'python -m sample_factory.algorithms.appo.train_appo --train_for_env_steps=500000000 --algo=APPO --env_frameskip=4 --use_rnn=True --num_workers=36 --num_envs_per_worker=8 --num_policies=1 --ppo_epochs=1 --rollout=32 --recurrence=32 --batch_size=2048 --wide_aspect_ratio=False',
_params.generate_params(randomize=False),
),
]
RUN_DESCRIPTION = RunDescription('paper_doom_basic_envs_appo_v97_fs4', experiments=_experiments)
|
self_supervision/patch_utils.py | kristinakupf/revisiting-self-supervised | 360 | 12633935 | #!/usr/bin/python
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for patch based image processing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import struct
import numpy as np
import tensorflow as tf
import tensorflow_hub as hub
import preprocess
import utils
from models.utils import get_net
from trainer import make_estimator
FLAGS = tf.flags.FLAGS
PATCH_H_COUNT = 3
PATCH_W_COUNT = 3
PATCH_COUNT = PATCH_H_COUNT * PATCH_W_COUNT
# It's supposed to be in the root folder, which is also pwd when running, if the
# instructions in the README are followed. Hence not a flag.
PERMUTATION_PATH = 'permutations_100_max.bin'
def apply_model(image_fn,
is_training,
num_outputs,
perms,
make_signature=False):
"""Creates the patch based model output from patches representations.
Args:
image_fn: function returns image tensor.
is_training: is training flag used for batch norm and drop out.
num_outputs: number of output classes.
perms: numpy array with shape [m, k], element range [0, PATCH_COUNT). k
stands for the patch numbers used in a permutation. m stands forthe number
of permutations. Each permutation is used to concat the patch inputs
[n*PATCH_COUNT, h, w, c] into tensor with shape [n*m, h, w, c*k].
make_signature: whether to create signature for hub module.
Returns:
out: output tensor with shape [n*m, 1, 1, num_outputs].
Raises:
ValueError: An error occurred when the architecture is unknown.
"""
images = image_fn()
net = get_net(num_classes=FLAGS.get_flag_value('embed_dim', 1000))
out, end_points = net(images, is_training,
weight_decay=FLAGS.get_flag_value('weight_decay', 1e-4))
print(end_points)
if not make_signature:
out = permutate_and_concat_batch_patches(out, perms)
out = fully_connected(out, num_outputs, is_training=is_training)
out = tf.squeeze(out, [1, 2])
if make_signature:
hub.add_signature(inputs={'image': images}, outputs=out)
hub.add_signature(
name='representation',
inputs={'image': images},
outputs=end_points)
return out
def image_grid(images, ny, nx, padding=0):
"""Create a batch of image grids from a batch of images.
Args:
images: A batch of patches (B,N,H,W,C)
ny: vertical number of images
nx: horizontal number of images
padding: number of zeros between images, if any.
Returns:
A tensor batch of image grids shaped (B,H*ny,W*nx,C), although that is a
simplifying lie: if padding is used h/w will be different.
"""
with tf.name_scope('grid_image'):
if padding:
padding = [padding, padding]
images = tf.pad(images, [[0, 0], [0, 0], padding, padding, [0, 0]])
return tf.concat([
tf.concat([images[:, y * nx + x] for x in range(nx)], axis=-2)
for y in range(ny)], axis=-3)
def creates_estimator_model(images, labels, perms, num_classes, mode):
"""Creates EstimatorSpec for the patch based self supervised models.
Args:
images: images
labels: self supervised labels (class indices)
perms: patch permutations
num_classes: number of different permutations
mode: model's mode: training, eval or prediction
Returns:
EstimatorSpec
"""
print(' +++ Mode: %s, images: %s, labels: %s' % (mode, images, labels))
images = tf.reshape(images, shape=[-1] + images.get_shape().as_list()[-3:])
if mode in [tf.estimator.ModeKeys.TRAIN, tf.estimator.ModeKeys.EVAL]:
with tf.variable_scope('module'):
image_fn = lambda: images
logits = apply_model(
image_fn=image_fn,
is_training=(mode == tf.estimator.ModeKeys.TRAIN),
num_outputs=num_classes,
perms=perms,
make_signature=False)
else:
input_shape = utils.str2intlist(
FLAGS.get_flag_value('serving_input_shape', 'None,None,None,3'))
image_fn = lambda: tf.placeholder( # pylint: disable=g-long-lambda
shape=input_shape,
dtype=tf.float32)
apply_model_function = functools.partial(
apply_model,
image_fn=image_fn,
num_outputs=num_classes,
perms=perms,
make_signature=True)
tf_hub_module_spec = hub.create_module_spec(
apply_model_function, [(utils.TAGS_IS_TRAINING, {
'is_training': True
}), (set(), {
'is_training': False
})],
drop_collections=['summaries'])
tf_hub_module = hub.Module(tf_hub_module_spec, trainable=False, tags=set())
hub.register_module_for_export(tf_hub_module, export_name='module')
logits = tf_hub_module(images)
return make_estimator(mode, predictions=logits)
# build loss and accuracy
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits)
loss = tf.reduce_mean(loss)
eval_metrics = (
lambda labels, logits: { # pylint: disable=g-long-lambda
'accuracy': tf.metrics.accuracy(
labels=labels, predictions=tf.argmax(logits, axis=-1))},
[labels, logits])
return make_estimator(mode, loss, eval_metrics, logits)
def fully_connected(inputs,
num_classes=100,
weight_decay=5e-4,
keep_prob=0.5,
is_training=True):
"""Two layers fully connected network copied from Alexnet fc7-fc8."""
net = inputs
_, _, w, _ = net.get_shape().as_list()
kernel_regularizer = tf.contrib.layers.l2_regularizer(scale=weight_decay)
net = tf.layers.conv2d(
net,
filters=4096,
kernel_size=w,
padding='same',
kernel_initializer=tf.truncated_normal_initializer(0.0, 0.005),
bias_initializer=tf.constant_initializer(0.1),
kernel_regularizer=kernel_regularizer)
net = tf.layers.batch_normalization(
net, momentum=0.997, epsilon=1e-5, fused=None, training=is_training)
net = tf.nn.relu(net)
if is_training:
net = tf.nn.dropout(net, keep_prob=keep_prob)
net = tf.layers.conv2d(
net,
filters=num_classes,
kernel_size=1,
padding='same',
kernel_initializer=tf.truncated_normal_initializer(0.0, 0.005),
bias_initializer=tf.zeros_initializer(),
kernel_regularizer=kernel_regularizer)
return net
def generate_patch_locations():
"""Generates relative patch locations."""
perms = np.array([(i, 4) for i in range(9) if i != 4])
return perms, len(perms)
def load_permutations():
"""Loads a set of pre-defined permutations."""
with tf.gfile.Open(PERMUTATION_PATH, 'rb') as f:
int32_size = 4
s = f.read(int32_size * 2)
[num_perms, c] = struct.unpack('<ll', s)
perms = []
for _ in range(num_perms * c):
s = f.read(int32_size)
x = struct.unpack('<l', s)
perms.append(x[0])
perms = np.reshape(perms, [num_perms, c])
# The bin file used index [1,9] for permutation, updated to [0, 8] for index.
perms = perms - 1
assert np.min(perms) == 0 and np.max(perms) == PATCH_COUNT - 1
return perms, num_perms
def permutate_and_concat_image_patches(patch_embeddings, perms):
"""Permutates patches from an image according to permutations.
Args:
patch_embeddings: input tensor with shape [PATCH_COUNT, h, w, c], where
PATCH_COUNT is the patch number per image.
perms: numpy array with shape [m, k], with element in range
[0, PATCH_COUNT). Permutation is used to concat the patches.
Returns:
out: output tensor with shape [m, h, w, c*k].
"""
_, h, w, c = patch_embeddings.get_shape().as_list()
if isinstance(perms, np.ndarray):
num_perms, perm_len = perms.shape
else:
num_perms, perm_len = perms.get_shape().as_list()
def permutate_patch(perm):
permed = tf.gather(patch_embeddings, perm, axis=0)
concat_tensor = tf.transpose(permed, perm=[1, 2, 3, 0])
concat_tensor = tf.reshape(
concat_tensor, shape=[-1, h, w, perm_len * c])
return concat_tensor
permed_patches = tf.stack([
permutate_patch(perms[i]) for i in range(num_perms)
])
return permed_patches
def permutate_and_concat_batch_patches(batch_patch_embeddings, perms):
"""Permutates patches from a mini batch according to permutations.
Args:
batch_patch_embeddings: input tensor with shape [n*PATCH_COUNT, h, w, c] or
[n*PATCH_COUNT, c], where PATCH_COUNT is the patch number per image
and n is the number of images in this mini batch.
perms: numpy array with shape [m, k], with element in range
[0, PATCH_COUNT). Permutation is used to concat the patches.
Returns:
out: output tensor with shape [n*m, h, w, c*k].
"""
print(' +++ permutate patches input: %s' % batch_patch_embeddings)
if len(batch_patch_embeddings.get_shape().as_list()) == 4:
_, h, w, c = batch_patch_embeddings.get_shape().as_list()
elif len(batch_patch_embeddings.get_shape().as_list()) == 2:
_, c = batch_patch_embeddings.get_shape().as_list()
h, w = (1, 1)
else:
raise ValueError('Unexpected batch_patch_embeddings shape: %s' %
batch_patch_embeddings.get_shape().as_list())
patches = tf.reshape(batch_patch_embeddings, shape=[-1, PATCH_COUNT, h, w, c])
patches = tf.stack([
permutate_and_concat_image_patches(patches[i], perms)
for i in range(patches.get_shape().as_list()[0])
])
patches = tf.reshape(patches, shape=[-1, h, w, perms.shape[1] * c])
print(' +++ permutate patches output: %s' % batch_patch_embeddings)
return patches
def get_patch_representation(
images,
hub_module,
patch_preprocess='crop_patches,standardization',
is_training=False,
target_features=9000,
pooling_fn=None,
combine_patches='concat',
signature='representation'):
"""Permutates patches from a mini batch according to permutations.
Args:
images: input images, can be full image (NHWC) or image patchs (NPHWC).
hub_module: hub module.
patch_preprocess: preprocess applied to the image. Note that preprocess may
require setting parameters in the FLAGS.config file.
is_training: is training mode.
target_features: target feature dimension. Note that the features might
exceed this number if there're too many channels.
pooling_fn: pooling method applied to the features.
combine_patches: one of {'concat', 'max_pool', 'avg_pool'}.
signature: signature for the hub module.
Returns:
out: output representation tensors.
Raises:
ValueError: unsupported combine_patches.
"""
if patch_preprocess:
preprocess_fn = preprocess.get_preprocess_fn(patch_preprocess, is_training)
images = preprocess_fn({'image': images})['image']
assert len(images.get_shape().as_list()) == 5, 'Shape must match NPHWC.'
_, num_of_patches, h, w, c = images.get_shape().as_list()
images = tf.reshape(images, shape=[-1, h, w, c])
out_tensors = hub_module(
images,
signature=signature,
as_dict=True)
if combine_patches == 'concat':
target_features = target_features // num_of_patches
if pooling_fn is not None:
out_tensors = pooling_fn(out_tensors)
for k, t in out_tensors.iteritems():
if len(t.get_shape().as_list()) == 2:
t = t[:, None, None, :]
assert len(t.get_shape().as_list()) == 4, 'Unsupported rank %d' % len(
t.get_shape().as_list())
# Take patch-dimension out of batch-dimension: [NP]HWC -> NPHWC
t = tf.reshape(t, [-1, num_of_patches] + t.get_shape().as_list()[-3:])
if combine_patches == 'concat':
# [N, P, H, W, C] -> [N, H, W, P*C]
_, p, h, w, c = t.get_shape().as_list()
out_tensors[k] = tf.reshape(
tf.transpose(t, perm=[0, 2, 3, 4, 1]), tf.stack([-1, h, w, p * c]))
elif combine_patches == 'max_pool':
# Reduce max on P channel of NPHWC.
out_tensors[k] = tf.reduce_max(t, axis=1)
elif combine_patches == 'avg_pool':
# Reduce mean on P channel of NPHWC.
out_tensors[k] = tf.reduce_mean(t, axis=1)
else:
raise ValueError(
'Unsupported combine patches method %s.' % combine_patches)
return out_tensors
|
examples/classic/expand.py | ria02/InquirerPy | 120 | 12633939 | <gh_stars>100-1000
from InquirerPy import prompt
from InquirerPy.prompts.expand import ExpandChoice
from InquirerPy.separator import Separator
def question1_choice(_):
return [
ExpandChoice(key="a", name="Apple", value="Apple"),
ExpandChoice(key="c", name="Cherry", value="Cherry"),
ExpandChoice(key="o", name="Orange", value="Orange"),
ExpandChoice(key="p", name="Peach", value="Peach"),
ExpandChoice(key="m", name="Melon", value="Melon"),
ExpandChoice(key="s", name="Strawberry", value="Strawberry"),
ExpandChoice(key="g", name="Grapes", value="Grapes"),
]
def question2_choice(_):
return [
ExpandChoice(key="d", name="Delivery", value="dl"),
ExpandChoice(key="p", name="Pick Up", value="pk"),
Separator(line=15 * "*"),
ExpandChoice(key="c", name="Car Park", value="cp"),
ExpandChoice(key="t", name="Third Party", value="tp"),
]
def main():
questions = [
{
"type": "expand",
"choices": question1_choice,
"message": "Pick your favourite:",
"default": "o",
"cycle": False,
},
{
"type": "expand",
"choices": question2_choice,
"message": "Select your preferred method:",
},
]
result = prompt(questions)
if __name__ == "__main__":
main()
|
alipay/aop/api/domain/AccTransDetail.py | snowxmas/alipay-sdk-python-all | 213 | 12633979 | <reponame>snowxmas/alipay-sdk-python-all
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.CertInfo import CertInfo
from alipay.aop.api.domain.OriTxnInfo import OriTxnInfo
from alipay.aop.api.domain.AccPayeeInfo import AccPayeeInfo
class AccTransDetail(object):
def __init__(self):
self._alipay_order_no = None
self._cert_info = None
self._detail_no = None
self._ori_txn_info = None
self._payee_info = None
self._reach_time = None
self._remark = None
self._settlement_currency = None
self._trans_amount = None
self._trans_currency = None
@property
def alipay_order_no(self):
return self._alipay_order_no
@alipay_order_no.setter
def alipay_order_no(self, value):
self._alipay_order_no = value
@property
def cert_info(self):
return self._cert_info
@cert_info.setter
def cert_info(self, value):
if isinstance(value, CertInfo):
self._cert_info = value
else:
self._cert_info = CertInfo.from_alipay_dict(value)
@property
def detail_no(self):
return self._detail_no
@detail_no.setter
def detail_no(self, value):
self._detail_no = value
@property
def ori_txn_info(self):
return self._ori_txn_info
@ori_txn_info.setter
def ori_txn_info(self, value):
if isinstance(value, OriTxnInfo):
self._ori_txn_info = value
else:
self._ori_txn_info = OriTxnInfo.from_alipay_dict(value)
@property
def payee_info(self):
return self._payee_info
@payee_info.setter
def payee_info(self, value):
if isinstance(value, AccPayeeInfo):
self._payee_info = value
else:
self._payee_info = AccPayeeInfo.from_alipay_dict(value)
@property
def reach_time(self):
return self._reach_time
@reach_time.setter
def reach_time(self, value):
self._reach_time = value
@property
def remark(self):
return self._remark
@remark.setter
def remark(self, value):
self._remark = value
@property
def settlement_currency(self):
return self._settlement_currency
@settlement_currency.setter
def settlement_currency(self, value):
self._settlement_currency = value
@property
def trans_amount(self):
return self._trans_amount
@trans_amount.setter
def trans_amount(self, value):
self._trans_amount = value
@property
def trans_currency(self):
return self._trans_currency
@trans_currency.setter
def trans_currency(self, value):
self._trans_currency = value
def to_alipay_dict(self):
params = dict()
if self.alipay_order_no:
if hasattr(self.alipay_order_no, 'to_alipay_dict'):
params['alipay_order_no'] = self.alipay_order_no.to_alipay_dict()
else:
params['alipay_order_no'] = self.alipay_order_no
if self.cert_info:
if hasattr(self.cert_info, 'to_alipay_dict'):
params['cert_info'] = self.cert_info.to_alipay_dict()
else:
params['cert_info'] = self.cert_info
if self.detail_no:
if hasattr(self.detail_no, 'to_alipay_dict'):
params['detail_no'] = self.detail_no.to_alipay_dict()
else:
params['detail_no'] = self.detail_no
if self.ori_txn_info:
if hasattr(self.ori_txn_info, 'to_alipay_dict'):
params['ori_txn_info'] = self.ori_txn_info.to_alipay_dict()
else:
params['ori_txn_info'] = self.ori_txn_info
if self.payee_info:
if hasattr(self.payee_info, 'to_alipay_dict'):
params['payee_info'] = self.payee_info.to_alipay_dict()
else:
params['payee_info'] = self.payee_info
if self.reach_time:
if hasattr(self.reach_time, 'to_alipay_dict'):
params['reach_time'] = self.reach_time.to_alipay_dict()
else:
params['reach_time'] = self.reach_time
if self.remark:
if hasattr(self.remark, 'to_alipay_dict'):
params['remark'] = self.remark.to_alipay_dict()
else:
params['remark'] = self.remark
if self.settlement_currency:
if hasattr(self.settlement_currency, 'to_alipay_dict'):
params['settlement_currency'] = self.settlement_currency.to_alipay_dict()
else:
params['settlement_currency'] = self.settlement_currency
if self.trans_amount:
if hasattr(self.trans_amount, 'to_alipay_dict'):
params['trans_amount'] = self.trans_amount.to_alipay_dict()
else:
params['trans_amount'] = self.trans_amount
if self.trans_currency:
if hasattr(self.trans_currency, 'to_alipay_dict'):
params['trans_currency'] = self.trans_currency.to_alipay_dict()
else:
params['trans_currency'] = self.trans_currency
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AccTransDetail()
if 'alipay_order_no' in d:
o.alipay_order_no = d['alipay_order_no']
if 'cert_info' in d:
o.cert_info = d['cert_info']
if 'detail_no' in d:
o.detail_no = d['detail_no']
if 'ori_txn_info' in d:
o.ori_txn_info = d['ori_txn_info']
if 'payee_info' in d:
o.payee_info = d['payee_info']
if 'reach_time' in d:
o.reach_time = d['reach_time']
if 'remark' in d:
o.remark = d['remark']
if 'settlement_currency' in d:
o.settlement_currency = d['settlement_currency']
if 'trans_amount' in d:
o.trans_amount = d['trans_amount']
if 'trans_currency' in d:
o.trans_currency = d['trans_currency']
return o
|
kc-py1.py | taoliu/kmer-cnt | 139 | 12633986 | <gh_stars>100-1000
#!/usr/bin/env python
import sys
base_for = "ACGT"
base_rev = "TGCA"
comp_tab = str.maketrans(base_for, base_rev)
def count_kmer(h, k, seq):
l = len(seq)
if l < k: return
for i in range(l - k + 1):
kmer_for = seq[i:(i+k)]
if 'N' in kmer_for: continue
kmer_rev = kmer_for.translate(comp_tab)[::-1]
if kmer_for < kmer_rev: kmer = kmer_for
else: kmer = kmer_rev
if kmer in h:
h[kmer] += 1
else: h[kmer] = 1
def count_stdin(k):
counter = {}
seq = []
for line in sys.stdin:
if line[0] == '>':
if len(seq) > 0:
count_kmer(counter, k, ''.join(seq))
seq = []
else:
seq.append(line[:-1])
if len(seq) > 0:
count_kmer(counter, k, ''.join(seq).upper())
return counter
def print_hist(counter):
hist = [0] * 256
for kmer in counter:
cnt = counter[kmer]
if cnt > 255: cnt = 255
hist[cnt] += 1
for i in range(1, 256):
print("{}\t{}".format(i, hist[i]))
counter = count_stdin(31)
print_hist(counter)
|
seal/routing.py | topicgit/seal | 132 | 12633987 | from channels.auth import AuthMiddlewareStack
from channels.routing import URLRouter, ProtocolTypeRouter
from django.urls import path
from k8s.consumers import EchoConsumer
application = ProtocolTypeRouter({
"websocket": AuthMiddlewareStack(
URLRouter([
path(r"ws/<slug:name>/<slug:namespace>", EchoConsumer),
])
)
}) |
tests/plugins/fail_by_itself.py | Bladez1753/lightning | 2,288 | 12634010 | <filename>tests/plugins/fail_by_itself.py<gh_stars>1000+
#!/usr/bin/env python3
from pyln.client import Plugin
import os
import threading
import time
plugin = Plugin()
class FailThread(threading.Thread):
def __init__(self):
super().__init__()
self.start()
def run(self):
time.sleep(1)
print("Exiting!")
os._exit(1)
@plugin.init()
def init(options, configuration, plugin):
FailThread()
@plugin.method('failcmd')
def failcmd(plugin):
pass
plugin.run()
|
python3/pracmln/utils/project.py | seba90/pracmln | 123 | 12634011 | <reponame>seba90/pracmln
#
#
# (C) 2011-2015 by <NAME> (<EMAIL>)
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import copy
from zipfile import ZipFile, ZIP_DEFLATED
import os
import sys
from dnutils import ifnone, logs
import json
import collections
from pracmln.utils import locs
logger = logs.getlogger(__name__)
class MLNProject(object):
"""
Represents a .pracmln project archive containing MLNs, DBs and config files.
"""
def __init__(self, name=None):
self._name = name if name is not None and '.pracmln' in name else '{}.pracmln'.format(name or 'unknown')
self._mlns = {}
self.learnconf = PRACMLNConfig()
self.queryconf = PRACMLNConfig()
self._emlns = {}
self._dbs = {}
self._results = {}
self._dirty = True
self.dirty_listeners = []
@property
def dirty(self):
return self._dirty or self.learnconf.dirty or self.queryconf.dirty
@dirty.setter
def dirty(self, d):
self._dirty = d
for l in self.dirty_listeners: l(d)
def addlistener(self, listener):
self.dirty_listeners.append(listener)
@property
def mlns(self):
return self._mlns
@mlns.setter
def mlns(self, mlns):
self._mlns = mlns
self.dirty = True
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name if name is not None and '.pracmln' in name else '{}.pracmln'.format(name or 'unknown')
self.dirty = True
@property
def dbs(self):
return self._dbs
@dbs.setter
def dbs(self, dbs):
self._dbs = dbs
self.dirty = True
@property
def emlns(self):
return self._emlns
@emlns.setter
def emlns(self, emlns):
self._emlns = emlns
self.dirty = True
@property
def results(self):
return self._results
@results.setter
def results(self, results):
self._results = results
self.dirty = True
def add_mln(self, name, content=''):
self._mlns[name] = content
self.dirty = True
def add_db(self, name, content=''):
self._dbs[name] = content
self.dirty = True
def add_emln(self, name, content=''):
self._emlns[name] = content
self.dirty = True
def add_result(self, name, content=''):
self._results[name] = content
self.dirty = True
def rm_mln(self, name):
del self._mlns[name]
self.dirty = True
def rm_db(self, name):
del self._dbs[name]
self.dirty = True
def rm_emln(self, name):
del self._emlns[name]
self.dirty = True
def rm_result(self, name):
del self._results[name]
self.dirty = True
def copy(self):
proj_ = copy.deepcopy(self)
return proj_
def loadmln(self, config, mln=None):
if config == 'query': config = self.queryconf
elif config == 'learn': config = self.learnconf
from pracmln.mln.base import parse_mln
path = self.path if hasattr(self, 'path') else None
return parse_mln(self.mlns[ifnone(mln, config['mln'])], projectpath=path, logic=config['logic'], grammar=config['grammar'])
def loaddb(self, mln, config, db=None):
if db is None:
if config not in ['query', 'learn']:
raise Exception('Need a database name or config.')
if config == 'query': config = self.queryconf
elif config == 'learn': config = self.learnconf
from pracmln.mln.database import parse_db
path = self.path if hasattr(self, 'path') else None
return parse_db(mln, self.dbs[ifnone(db, config['db'])], ignore_unknown_preds=config['ignore_unknown_preds'], projectpath=path)
def save(self, dirpath='.'):
filename = self.name
self.path = dirpath
with open(os.path.join(dirpath, filename), 'wb') as zf:
self.tostream(zf)
self.dirty = False
@staticmethod
def open(filepath):
fullpath = os.path.expanduser(os.path.expandvars(filepath))
name = os.path.basename(fullpath)
proj = MLNProject(name)
proj.path = os.path.dirname(fullpath)
with ZipFile(fullpath, 'r') as zf:
for member in zf.namelist():
if member == 'learn.conf':
tmpconf = eval(zf.open(member).read().decode("utf-8"))
proj.learnconf = PRACMLNConfig()
proj.learnconf.update(tmpconf)
elif member == 'query.conf':
tmpconf = eval(zf.open(member).read().decode("utf-8"))
proj.queryconf = PRACMLNConfig()
proj.queryconf.update(tmpconf)
else:
path, f = os.path.split(member)
if path == 'mlns':
proj._mlns[f] = zf.open(member).read().decode("utf-8")
elif path == 'emlns':
proj._emlns[f] = zf.open(member).read().decode("utf-8")
elif path == 'dbs':
proj._dbs[f] = zf.open(member).read().decode("utf-8")
elif path == 'results':
proj._results[f] = zf.open(member).read().decode("utf-8")
return proj
@staticmethod
def read(filename, stream, path=None):
proj = MLNProject(filename)
proj.path = path
with ZipFile(stream, 'r') as zf:
for member in zf.namelist():
if member == 'learn.conf':
tmpconf = eval(zf.open(member).read().decode("utf-8"))
proj.learnconf = PRACMLNConfig()
proj.learnconf.update(tmpconf)
elif member == 'query.conf':
tmpconf = eval(zf.open(member).read().decode("utf-8"))
proj.queryconf = PRACMLNConfig()
proj.queryconf.update(tmpconf)
else:
path, f = os.path.split(member)
if path == 'mlns':
proj._mlns[f] = zf.open(member).read().decode("utf-8")
elif path == 'emlns':
proj._emlns[f] = zf.open(member).read().decode("utf-8")
elif path == 'dbs':
proj._dbs[f] = zf.open(member).read().decode("utf-8")
elif path == 'results':
proj._results[f] = zf.open(member).read().decode("utf-8")
return proj
def tostream(self, stream):
with ZipFile(stream, 'w', ZIP_DEFLATED) as zf:
# save the learn.conf
zf.writestr('learn.conf', self.learnconf.dumps())
# save the query.conf
zf.writestr('query.conf', self.queryconf.dumps())
# save the MLNs
for name, mln in self.mlns.items():
zf.writestr(os.path.join('mlns', name), mln)
# save the model extensions
for name, emln in self.emlns.items():
zf.writestr(os.path.join('emlns', name), emln)
# save the DBs
for name, db in self.dbs.items():
zf.writestr(os.path.join('dbs', name), db)
# save the results
for name, result in self.results.items():
zf.writestr(os.path.join('results', name), result)
def write(self, stream=sys.stdout):
stream.write('MLN Project: %s\n' % self.name)
if self.learnconf is not None:
stream.write('learn.conf\n')
if self.queryconf is not None:
stream.write('query.conf\n')
stream.write('mlns/\n')
for name in self.mlns:
stream.write(' ./%s\n' % name)
stream.write('dbs/\n')
for name in self.dbs:
stream.write(' ./%s\n' % name)
stream.write('emlns/\n')
for name in self.emlns:
stream.write(' ./%s\n' % name)
stream.write('results/\n')
for name in self.results:
stream.write(' ./%s\n' % name)
def convert(data):
"""
convert everything to ASCII
"""
if isinstance(data, str):
return str(data)
elif isinstance(data, collections.Mapping):
return dict(list(map(convert, iter(data.items()))))
elif isinstance(data, collections.Iterable):
return type(data)(list(map(convert, data)))
else:
return data
class PRACMLNConfig(object):
def __init__(self, filepath=None):
self.config_file = mlnpath(filepath) if filepath is not None else None
self.config = {}
self._dirty = False
if self.config_file is None or not self.config_file.exists:
self.config = {}
else:
if self.config_file.project is not None:
self.config = convert(dict((self.config_file.content.config)))
else:
self.config = convert(json.loads(self.config_file.content))
logger.debug('loaded %s config' % self.config_file.compose())
@property
def dirty(self):
return self._dirty
def get(self, k, d=None):
return self.config.get(k, d)
def update(self, d):
self.config.update(d)
self._dirty = True
def __getitem__(self, s):
if type(s) is slice:
prim = s.start
sec = s.stop
if self.config.get(prim) is not None:
return self.config.get(prim).get(sec)
else:
return None
else:
return self.config.get(s)
def __setitem__(self, s, v):
if type(s) is slice:
prim = s.start
sec = s.stop
p = self.config.get(prim)
if p is None:
p = {}
self.config[prim] = p
p[sec] = v
else:
self.config[s] = v
self._dirty = True
def dump(self):
if self.config_file is None:
raise Exception('no filename specified')
if self.config_file.project is not None:
project = mlnpath(self.config_file.projectloc).content
if self.config_file.file == 'query.conf':
project.queryconf.config = self.config
elif self.config_file.file == 'learn.conf':
project.queryconf.file = self.config
else: Exception('Invalid config file name: %s' % self.config_file.file)
project.save(project.resolve_path())
else:
with open(os.path.join(self.config_file.resolve_path(), self.config_file.file), 'w+') as cf:
cf.write(json.dumps(self.config))
self._dirty = False
def dumps(self):
self._dirty = False
return json.dumps(self.config, indent=4)
class mlnpath(object):
"""
Loads the MLN resource content from a location.
A location can be a regular absolute or relative path to an `.mln` file. It may also refer
to an MLN inside a `.pracmln` project container. In the latter case, the `.mln` file name
needs to be separated from the `.pracmln` project location by a colon. Path specification
may also contain references to system environment variables, which are referred to of the
form ``${var}``.
:Example:
>>> mlnpath('~/mlns/classification.pracmln:model-1.mln').content
...
"""
def __init__(self, path):
# split the path wrt slashes
self.path, file = os.path.split(path)
self._abspath = self.path.startswith('/')
if ':' in file or file.endswith('.pracmln'):
res = file.split(':')
if len(res) == 2:
self.project, self.file = res
elif len(res) == 1:
self.project, self.file = res[0], None
else:
self.project = None
self.file = file
def compose(self):
p = os.path.join(*self.path)
if self.project is not None:
p += ('/' if p else '') + self.project
if self.file is not None:
p += ':' + str(self.file)
else:
p += ifnone(self.file, '', lambda x: '/' + str(x))
return p
def resolve_path(self):
p = self.path
for f in (os.path.expanduser, os.path.expandvars, os.path.normpath):
p = f(p)
return p
@property
def file(self):
"""
Returns the name of the file specified by this ``mlnpath``.
"""
return self._file
@file.setter
def file(self, f):
self._file = f
@property
def project(self):
"""
Returns the project name specified.
"""
return self._project
@project.setter
def project(self, p):
self._project = p
@property
def content(self):
"""
Returns the content of the file specified by this ``mlnpath``.
"""
path = self.resolve_path()
if self.project is not None:
proj = MLNProject.open(os.path.join(self.resolve_path(), self.project))
if self.file is None:
return proj
fileext = self.file.split('.')[-1]
if fileext == 'mln':
mln = proj.mlns.get(self.file)
if mln is None: raise Exception('Project %s does not contain and MLN named %s' % (self.project, self.file))
return mln
elif fileext == 'db':
db = proj.dbs.get(self.file)
if db is None: raise Exception('Project %s does not contain a database named %s' % (self.project, self.file))
return db
elif fileext == 'conf':
conf = {'query.conf': proj.queryconf, 'learn.conf': proj.learnconf}.get(self.file)
if conf is None: raise Exception('Project %s does not contain a config file named %s' % (self.project, self.file))
return conf
else:
with open(os.path.join(path, self.file)) as f:
return f.read()
@property
def projectloc(self):
"""
Returns the location of the project file, if any is specified.
"""
if self.project is None:
raise Exception('No project specified in the path.')
return os.path.join(self.resolve_path(), self.project)
@property
def exists(self):
"""
Checks if the file exists.
"""
return os.path.exists(os.path.join(self.resolve_path(), ifnone(self.project, self.file)))
@property
def isabs(self):
return self._abspath
def __str__(self):
return self.compose()
def __repr__(self):
return 'mlnpath(%s)' % str(self)
if __name__ == '__main__':
# proj = MLNProject('myproject')
# proj.add_mln('model.mln', '// predicate declarations\nfoo(x)')
# proj.add_db('data.db', 'foox(X)')
# proj.save()
proj = MLNProject.open(os.path.join(locs.examples, 'smokers', 'smokers.pracmln'))
proj.write()
print(proj.queryconf.config)
|
sdk/python/pulumi_gcp/organizations/get_organization.py | sisisin/pulumi-gcp | 121 | 12634018 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetOrganizationResult',
'AwaitableGetOrganizationResult',
'get_organization',
'get_organization_output',
]
@pulumi.output_type
class GetOrganizationResult:
"""
A collection of values returned by getOrganization.
"""
def __init__(__self__, create_time=None, directory_customer_id=None, domain=None, id=None, lifecycle_state=None, name=None, org_id=None, organization=None):
if create_time and not isinstance(create_time, str):
raise TypeError("Expected argument 'create_time' to be a str")
pulumi.set(__self__, "create_time", create_time)
if directory_customer_id and not isinstance(directory_customer_id, str):
raise TypeError("Expected argument 'directory_customer_id' to be a str")
pulumi.set(__self__, "directory_customer_id", directory_customer_id)
if domain and not isinstance(domain, str):
raise TypeError("Expected argument 'domain' to be a str")
pulumi.set(__self__, "domain", domain)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if lifecycle_state and not isinstance(lifecycle_state, str):
raise TypeError("Expected argument 'lifecycle_state' to be a str")
pulumi.set(__self__, "lifecycle_state", lifecycle_state)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if org_id and not isinstance(org_id, str):
raise TypeError("Expected argument 'org_id' to be a str")
pulumi.set(__self__, "org_id", org_id)
if organization and not isinstance(organization, str):
raise TypeError("Expected argument 'organization' to be a str")
pulumi.set(__self__, "organization", organization)
@property
@pulumi.getter(name="createTime")
def create_time(self) -> str:
"""
Timestamp when the Organization was created. A timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds. Example: "2014-10-02T15:01:23.045123456Z".
"""
return pulumi.get(self, "create_time")
@property
@pulumi.getter(name="directoryCustomerId")
def directory_customer_id(self) -> str:
"""
The Google for Work customer ID of the Organization.
"""
return pulumi.get(self, "directory_customer_id")
@property
@pulumi.getter
def domain(self) -> str:
return pulumi.get(self, "domain")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="lifecycleState")
def lifecycle_state(self) -> str:
"""
The Organization's current lifecycle state.
"""
return pulumi.get(self, "lifecycle_state")
@property
@pulumi.getter
def name(self) -> str:
"""
The resource name of the Organization in the form `organizations/{organization_id}`.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="orgId")
def org_id(self) -> str:
"""
The Organization ID.
"""
return pulumi.get(self, "org_id")
@property
@pulumi.getter
def organization(self) -> Optional[str]:
return pulumi.get(self, "organization")
class AwaitableGetOrganizationResult(GetOrganizationResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetOrganizationResult(
create_time=self.create_time,
directory_customer_id=self.directory_customer_id,
domain=self.domain,
id=self.id,
lifecycle_state=self.lifecycle_state,
name=self.name,
org_id=self.org_id,
organization=self.organization)
def get_organization(domain: Optional[str] = None,
organization: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetOrganizationResult:
"""
Get information about a Google Cloud Organization. Note that you must have the `roles/resourcemanager.organizationViewer` role (or equivalent permissions) at the organization level to use this datasource.
```python
import pulumi
import pulumi_gcp as gcp
org = gcp.organizations.get_organization(domain="example.com")
sales = gcp.organizations.Folder("sales",
display_name="Sales",
parent=org.name)
```
:param str domain: The domain name of the Organization.
:param str organization: The Organization's numeric ID, including an optional `organizations/` prefix.
"""
__args__ = dict()
__args__['domain'] = domain
__args__['organization'] = organization
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('gcp:organizations/getOrganization:getOrganization', __args__, opts=opts, typ=GetOrganizationResult).value
return AwaitableGetOrganizationResult(
create_time=__ret__.create_time,
directory_customer_id=__ret__.directory_customer_id,
domain=__ret__.domain,
id=__ret__.id,
lifecycle_state=__ret__.lifecycle_state,
name=__ret__.name,
org_id=__ret__.org_id,
organization=__ret__.organization)
@_utilities.lift_output_func(get_organization)
def get_organization_output(domain: Optional[pulumi.Input[Optional[str]]] = None,
organization: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetOrganizationResult]:
"""
Get information about a Google Cloud Organization. Note that you must have the `roles/resourcemanager.organizationViewer` role (or equivalent permissions) at the organization level to use this datasource.
```python
import pulumi
import pulumi_gcp as gcp
org = gcp.organizations.get_organization(domain="example.com")
sales = gcp.organizations.Folder("sales",
display_name="Sales",
parent=org.name)
```
:param str domain: The domain name of the Organization.
:param str organization: The Organization's numeric ID, including an optional `organizations/` prefix.
"""
...
|
raiden/utils/capabilities.py | tirkarthi/raiden | 2,101 | 12634030 | <reponame>tirkarthi/raiden
from typing import Any, Dict, Union
import structlog
from raiden.constants import Capabilities
from raiden.settings import CapabilitiesConfig
log = structlog.get_logger(__name__)
def _bool_to_binary(value: Any) -> str:
if isinstance(value, bool):
return "1" if value is True else "0"
return value
def int_bool(value: str) -> Union[bool, str]:
try:
if int(value) in {0, 1}:
return bool(int(value))
else:
return value
except ValueError:
return value
def capdict_to_config(capdict: Dict[str, Any]) -> CapabilitiesConfig:
config = CapabilitiesConfig(
receive=capdict.get(Capabilities.RECEIVE.value, True),
mediate=capdict.get(Capabilities.MEDIATE.value, True),
delivery=capdict.get(Capabilities.DELIVERY.value, True),
web_rtc=capdict.get(Capabilities.WEBRTC.value, False),
to_device=capdict.get(Capabilities.TODEVICE.value, False),
immutable_metadata=capdict.get(Capabilities.IMMUTABLE_METADATA.value, False),
)
for key in capdict.keys():
if key not in [_.value for _ in Capabilities]:
setattr(config, key, capdict[key])
return config
def capconfig_to_dict(config: CapabilitiesConfig) -> Dict[str, Any]:
result = {
Capabilities.RECEIVE.value: config.receive,
Capabilities.MEDIATE.value: config.mediate,
Capabilities.DELIVERY.value: config.delivery,
Capabilities.WEBRTC.value: config.web_rtc,
Capabilities.TODEVICE.value: config.to_device,
Capabilities.IMMUTABLE_METADATA.value: config.immutable_metadata,
}
other_keys = [
key
for key in config.__dict__.keys()
if key
not in ["receive", "mediate", "delivery", "web_rtc", "to_device", "immutable_metadata"]
]
for key in other_keys:
if key not in [_.value for _ in Capabilities]:
result[key] = getattr(config, key)
return result
|
pcbmode/config.py | Hylian/pcbmode | 370 | 12634039 | <filename>pcbmode/config.py
#!/usr/bin/python
# This file is used as a global config file while PCBmodE is running.
# DO NOT EDIT THIS FILE
cfg = {} # PCBmodE configuration
brd = {} # board data
stl = {} # style data
pth = {} # path database
msg = {} # message database
stk = {} # stackup data
|
deepsleep/trainer.py | HTJR/deepsleepnet | 266 | 12634067 | <reponame>HTJR/deepsleepnet
import itertools
import os
import re
import time
from datetime import datetime
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from sklearn.metrics import confusion_matrix, f1_score
from deepsleep.data_loader import NonSeqDataLoader, SeqDataLoader
from deepsleep.model import DeepFeatureNet, DeepSleepNet
from deepsleep.optimize import adam, adam_clipping_list_lr
from deepsleep.utils import iterate_minibatches, iterate_batch_seq_minibatches
# from tensorlayer.db import TensorDB
# from tensorlayer.db import JobStatus
# db = TensorDB(ip='172.16.31.10', port=27020, db_name='DeepSleepNet', user_name='tensorlayer', password='<PASSWORD>', studyID='1')
class Trainer(object):
def __init__(
self,
interval_plot_filter=50,
interval_save_model=100,
interval_print_cm=10
):
self.interval_plot_filter = interval_plot_filter
self.interval_save_model = interval_save_model
self.interval_print_cm = interval_print_cm
def print_performance(self, sess, output_dir, network_name,
n_train_examples, n_valid_examples,
train_cm, valid_cm, epoch, n_epochs,
train_duration, train_loss, train_acc, train_f1,
valid_duration, valid_loss, valid_acc, valid_f1):
# Get regularization loss
train_reg_loss = tf.add_n(tf.compat.v1.get_collection("losses", scope=network_name + "\/"))
train_reg_loss_value = sess.run(train_reg_loss)
valid_reg_loss_value = train_reg_loss_value
# Print performance
if ((epoch + 1) % self.interval_print_cm == 0) or ((epoch + 1) == n_epochs):
print(" ")
print("[{}] epoch {}:".format(
datetime.now(), epoch+1
))
print((
"train ({:.3f} sec): n={}, loss={:.3f} ({:.3f}), acc={:.3f}, "
"f1={:.3f}".format(
train_duration, n_train_examples,
train_loss, train_reg_loss_value,
train_acc, train_f1
)
))
print(train_cm)
print((
"valid ({:.3f} sec): n={}, loss={:.3f} ({:.3f}), acc={:.3f}, "
"f1={:.3f}".format(
valid_duration, n_valid_examples,
valid_loss, valid_reg_loss_value,
valid_acc, valid_f1
)
))
print(valid_cm)
print(" ")
else:
print((
"epoch {}: "
"train ({:.2f} sec): n={}, loss={:.3f} ({:.3f}), "
"acc={:.3f}, f1={:.3f} | "
"valid ({:.2f} sec): n={}, loss={:.3f} ({:.3f}), "
"acc={:.3f}, f1={:.3f}".format(
epoch+1,
train_duration, n_train_examples,
train_loss, train_reg_loss_value,
train_acc, train_f1,
valid_duration, n_valid_examples,
valid_loss, valid_reg_loss_value,
valid_acc, valid_f1
)
))
def print_network(self, network):
print("inputs ({}): {}".format(
network.inputs.name, network.inputs.get_shape()
))
print("targets ({}): {}".format(
network.targets.name, network.targets.get_shape()
))
for name, act in network.activations:
print("{} ({}): {}".format(name, act.name, act.get_shape()))
print(" ")
def plot_filters(self, sess, epoch, reg_exp, output_dir, n_viz_filters):
conv_weight = re.compile(reg_exp)
for v in tf.compat.v1.trainable_variables():
value = sess.run(v)
if conv_weight.match(v.name):
weights = np.squeeze(value)
# Only plot conv that has one channel
if len(weights.shape) > 2:
continue
weights = weights.T
plt.figure(figsize=(18, 10))
plt.title(v.name)
for w_idx in range(n_viz_filters):
plt.subplot(4, 4, w_idx+1)
plt.plot(weights[w_idx])
plt.axis("tight")
plt.savefig(os.path.join(
output_dir, "{}_{}.png".format(
v.name.replace("/", "_").replace(":0", ""),
epoch+1
)
))
plt.close("all")
class DeepFeatureNetTrainer(Trainer):
def __init__(
self,
data_dir,
output_dir,
n_folds,
fold_idx,
batch_size,
input_dims,
n_classes,
interval_plot_filter=50,
interval_save_model=100,
interval_print_cm=10
):
super(self.__class__, self).__init__(
interval_plot_filter=interval_plot_filter,
interval_save_model=interval_save_model,
interval_print_cm=interval_print_cm
)
self.data_dir = data_dir
self.output_dir = output_dir
self.n_folds = n_folds
self.fold_idx = fold_idx
self.batch_size = batch_size
self.input_dims = input_dims
self.n_classes = n_classes
def _run_epoch(self, sess, network, inputs, targets, train_op, is_train):
start_time = time.time()
y = []
y_true = []
total_loss, n_batches = 0.0, 0
is_shuffle = True if is_train else False
for x_batch, y_batch in iterate_minibatches(inputs,
targets,
self.batch_size,
shuffle=is_shuffle):
feed_dict = {
network.input_var: x_batch,
network.target_var: y_batch
}
# # MONITORING
# if n_batches == 0:
# print "BEFORE UPDATE [is_train={}]".format(is_train)
# for n, v in network.monitor_vars[:2]:
# val = sess.run(v, feed_dict=feed_dict)
# val = np.transpose(val, axes=(3, 0, 1, 2)).reshape((64, -1))
# mean_val = np.mean(val, axis=1)
# var_val = np.var(val, axis=1)
# print "{}: {}\nmean_shape={}, mean_val={}\nvar_shape={}, var_val={}".format(
# n, val.shape, mean_val.shape, mean_val[:5], var_val.shape, var_val[:5]
# )
_, loss_value, y_pred = sess.run(
[train_op, network.loss_op, network.pred_op],
feed_dict=feed_dict
)
# # MONITORING
# if n_batches == 0:
# print "AFTER UPDATE [is_train={}]".format(is_train)
# for n, v in network.monitor_vars[:2]:
# val = sess.run(v, feed_dict=feed_dict)
# val = np.transpose(val, axes=(3, 0, 1, 2)).reshape((64, -1))
# mean_val = np.mean(val, axis=1)
# var_val = np.var(val, axis=1)
# print "{}: {}\nmean_shape={}, mean_val={}\nvar_shape={}, var_val={}".format(
# n, val.shape, mean_val.shape, mean_val[:5], var_val.shape, var_val[:5]
# )
total_loss += loss_value
n_batches += 1
y.append(y_pred)
y_true.append(y_batch)
# Check the loss value
assert not np.isnan(loss_value), \
"Model diverged with loss = NaN"
duration = time.time() - start_time
total_loss /= n_batches
total_y_pred = np.hstack(y)
total_y_true = np.hstack(y_true)
return total_y_true, total_y_pred, total_loss, duration
def train(self, n_epochs, resume):
with tf.Graph().as_default(), tf.compat.v1.Session() as sess:
# Build training and validation networks
train_net = DeepFeatureNet(
batch_size=self.batch_size,
input_dims=self.input_dims,
n_classes=self.n_classes,
is_train=True,
reuse_params=False,
use_dropout=True
)
valid_net = DeepFeatureNet(
batch_size=self.batch_size,
input_dims=self.input_dims,
n_classes=self.n_classes,
is_train=False,
reuse_params=True,
use_dropout=True
)
# Initialize parameters
train_net.init_ops()
valid_net.init_ops()
print("Network (layers={})".format(len(train_net.activations)))
print("inputs ({}): {}".format(
train_net.input_var.name, train_net.input_var.get_shape()
))
print("targets ({}): {}".format(
train_net.target_var.name, train_net.target_var.get_shape()
))
for name, act in train_net.activations:
print("{} ({}): {}".format(name, act.name, act.get_shape()))
print(" ")
# Define optimization operations
train_op, grads_and_vars_op = adam(
loss=train_net.loss_op,
lr=1e-4,
train_vars=tf.compat.v1.trainable_variables()
)
# Make subdirectory for pretraining
output_dir = os.path.join(self.output_dir, "fold{}".format(self.fold_idx), train_net.name)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Global step for resume training
with tf.compat.v1.variable_scope(train_net.name) as scope:
global_step = tf.Variable(0, name="global_step", trainable=False)
# print "Trainable Variables:"
# for v in tf.compat.v1.trainable_variables():
# print v.name, v.get_shape()
# print " "
# print "All Variables:"
# for v in tf.compat.v1.global_variables():
# print v.name, v.get_shape()
# print " "
# Create a saver
saver = tf.compat.v1.train.Saver(tf.compat.v1.global_variables(), max_to_keep=0)
# Initialize variables in the graph
sess.run(tf.compat.v1.global_variables_initializer())
# Add the graph structure into the Tensorboard writer
train_summary_wrt = tf.compat.v1.summary.FileWriter(
os.path.join(output_dir, "train_summary"),
sess.graph
)
# Resume the training if applicable
if resume:
if os.path.exists(output_dir):
if os.path.isfile(os.path.join(output_dir, "checkpoint")):
# Restore the last checkpoint
saver.restore(sess, tf.train.latest_checkpoint(output_dir))
print("Model restored")
print("[{}] Resume pre-training ...\n".format(datetime.now()))
else:
print("[{}] Start pre-training ...\n".format(datetime.now()))
else:
print("[{}] Start pre-training ...\n".format(datetime.now()))
# Load data
if sess.run(global_step) < n_epochs:
data_loader = NonSeqDataLoader(
data_dir=self.data_dir,
n_folds=self.n_folds,
fold_idx=self.fold_idx
)
x_train, y_train, x_valid, y_valid = data_loader.load_train_data()
# Performance history
all_train_loss = np.zeros(n_epochs)
all_train_acc = np.zeros(n_epochs)
all_train_f1 = np.zeros(n_epochs)
all_valid_loss = np.zeros(n_epochs)
all_valid_acc = np.zeros(n_epochs)
all_valid_f1 = np.zeros(n_epochs)
# Loop each epoch
for epoch in range(sess.run(global_step), n_epochs):
# # MONITORING
# print "BEFORE TRAINING"
# monitor_vars = [
# "deepfeaturenet/l1_conv/bn/moving_mean:0",
# "deepfeaturenet/l1_conv/bn/moving_variance:0"
# ]
# for n in monitor_vars:
# v = tf.compat.v1.get_default_graph().get_tensor_by_name(n)
# val = sess.run(v)
# print "{}: {}, {}".format(n, val.shape, val[:5])
# Update parameters and compute loss of training set
y_true_train, y_pred_train, train_loss, train_duration = \
self._run_epoch(
sess=sess, network=train_net,
inputs=x_train, targets=y_train,
train_op=train_op,
is_train=True
)
n_train_examples = len(y_true_train)
train_cm = confusion_matrix(y_true_train, y_pred_train)
train_acc = np.mean(y_true_train == y_pred_train)
train_f1 = f1_score(y_true_train, y_pred_train, average="macro")
# # MONITORING
# print "AFTER TRAINING and BEFORE VALID"
# for n in monitor_vars:
# v = tf.compat.v1.get_default_graph().get_tensor_by_name(n)
# val = sess.run(v)
# print "{}: {}, {}".format(n, val.shape, val[:5])
# Evaluate the model on the validation set
y_true_val, y_pred_val, valid_loss, valid_duration = \
self._run_epoch(
sess=sess, network=valid_net,
inputs=x_valid, targets=y_valid,
train_op=tf.no_op(),
is_train=False
)
n_valid_examples = len(y_true_val)
valid_cm = confusion_matrix(y_true_val, y_pred_val)
valid_acc = np.mean(y_true_val == y_pred_val)
valid_f1 = f1_score(y_true_val, y_pred_val, average="macro")
# db.train_log(args={
# "n_folds": self.n_folds,
# "fold_idx": self.fold_idx,
# "epoch": epoch,
# "train_step": "pretraining",
# "datetime": datetime.utcnow(),
# "model": train_net.name,
# "n_train_examples": n_train_examples,
# "n_valid_examples": n_valid_examples,
# "train_loss": train_loss,
# "train_acc": train_acc,
# "train_f1": train_f1,
# "train_duration": train_duration,
# "valid_loss": valid_loss,
# "valid_acc": valid_acc,
# "valid_f1": valid_f1,
# "valid_duration": valid_duration,
# })
all_train_loss[epoch] = train_loss
all_train_acc[epoch] = train_acc
all_train_f1[epoch] = train_f1
all_valid_loss[epoch] = valid_loss
all_valid_acc[epoch] = valid_acc
all_valid_f1[epoch] = valid_f1
# Report performance
self.print_performance(
sess, output_dir, train_net.name,
n_train_examples, n_valid_examples,
train_cm, valid_cm, epoch, n_epochs,
train_duration, train_loss, train_acc, train_f1,
valid_duration, valid_loss, valid_acc, valid_f1
)
# Save performance history
np.savez(
os.path.join(output_dir, "perf_fold{}.npz".format(self.fold_idx)),
train_loss=all_train_loss, valid_loss=all_valid_loss,
train_acc=all_train_acc, valid_acc=all_valid_acc,
train_f1=all_train_f1, valid_f1=all_valid_f1,
y_true_val=np.asarray(y_true_val),
y_pred_val=np.asarray(y_pred_val)
)
# Visualize weights from convolutional layers
if ((epoch + 1) % self.interval_plot_filter == 0) or ((epoch + 1) == n_epochs):
self.plot_filters(sess, epoch, train_net.name + "(_[0-9])?\/l[0-9]+_conv\/(weights)", output_dir, 16)
self.plot_filters(sess, epoch, train_net.name + "(_[0-9])?/l[0-9]+_conv\/conv1d\/(weights)", output_dir, 16)
# Save checkpoint
sess.run(tf.compat.v1.assign(global_step, epoch+1))
if ((epoch + 1) % self.interval_save_model == 0) or ((epoch + 1) == n_epochs):
start_time = time.time()
save_path = os.path.join(
output_dir, "model_fold{}.ckpt".format(self.fold_idx)
)
saver.save(sess, save_path, global_step=global_step)
duration = time.time() - start_time
print("Saved model checkpoint ({:.3f} sec)".format(duration))
# Save paramaters
if ((epoch + 1) % self.interval_save_model == 0) or ((epoch + 1) == n_epochs):
start_time = time.time()
save_dict = {}
for v in tf.compat.v1.global_variables():
save_dict[v.name] = sess.run(v)
np.savez(
os.path.join(
output_dir,
"params_fold{}.npz".format(self.fold_idx)),
**save_dict
)
duration = time.time() - start_time
print("Saved trained parameters ({:.3f} sec)".format(duration))
print("Finish pre-training")
return os.path.join(output_dir, "params_fold{}.npz".format(self.fold_idx))
class DeepSleepNetTrainer(Trainer):
def __init__(
self,
data_dir,
output_dir,
n_folds,
fold_idx,
batch_size,
input_dims,
n_classes,
seq_length,
n_rnn_layers,
return_last,
interval_plot_filter=50,
interval_save_model=100,
interval_print_cm=10
):
super(self.__class__, self).__init__(
interval_plot_filter=interval_plot_filter,
interval_save_model=interval_save_model,
interval_print_cm=interval_print_cm
)
self.data_dir = data_dir
self.output_dir = output_dir
self.n_folds = n_folds
self.fold_idx = fold_idx
self.batch_size = batch_size
self.input_dims = input_dims
self.n_classes = n_classes
self.seq_length = seq_length
self.n_rnn_layers = n_rnn_layers
self.return_last = return_last
def _run_epoch(self, sess, network, inputs, targets, train_op, is_train):
start_time = time.time()
y = []
y_true = []
total_loss, n_batches = 0.0, 0
for sub_idx, each_data in enumerate(zip(inputs, targets)):
each_x, each_y = each_data
# # Initialize state of LSTM - Unidirectional LSTM
# state = sess.run(network.initial_state)
# Initialize state of LSTM - Bidirectional LSTM
fw_state = sess.run(network.fw_initial_state)
bw_state = sess.run(network.bw_initial_state)
for x_batch, y_batch in iterate_batch_seq_minibatches(inputs=each_x,
targets=each_y,
batch_size=self.batch_size,
seq_length=self.seq_length):
feed_dict = {
network.input_var: x_batch,
network.target_var: y_batch
}
# Unidirectional LSTM
# for i, (c, h) in enumerate(network.initial_state):
# feed_dict[c] = state[i].c
# feed_dict[h] = state[i].h
# _, loss_value, y_pred, state = sess.run(
# [train_op, network.loss_op, network.pred_op, network.final_state],
# feed_dict=feed_dict
# )
for i, (c, h) in enumerate(network.fw_initial_state):
feed_dict[c] = fw_state[i].c
feed_dict[h] = fw_state[i].h
for i, (c, h) in enumerate(network.bw_initial_state):
feed_dict[c] = bw_state[i].c
feed_dict[h] = bw_state[i].h
_, loss_value, y_pred, fw_state, bw_state = sess.run(
[train_op, network.loss_op, network.pred_op, network.fw_final_state, network.bw_final_state],
feed_dict=feed_dict
)
total_loss += loss_value
n_batches += 1
y.append(y_pred)
y_true.append(y_batch)
# Check the loss value
assert not np.isnan(loss_value), \
"Model diverged with loss = NaN"
duration = time.time() - start_time
total_loss /= n_batches
total_y_pred = np.hstack(y)
total_y_true = np.hstack(y_true)
return total_y_true, total_y_pred, total_loss, duration
def finetune(self, pretrained_model_path, n_epochs, resume):
pretrained_model_name = "deepfeaturenet"
with tf.Graph().as_default(), tf.compat.v1.Session() as sess:
# Build training and validation networks
train_net = DeepSleepNet(
batch_size=self.batch_size,
input_dims=self.input_dims,
n_classes=self.n_classes,
seq_length=self.seq_length,
n_rnn_layers=self.n_rnn_layers,
return_last=self.return_last,
is_train=True,
reuse_params=False,
use_dropout_feature=True,
use_dropout_sequence=True
)
valid_net = DeepSleepNet(
batch_size=self.batch_size,
input_dims=self.input_dims,
n_classes=self.n_classes,
seq_length=self.seq_length,
n_rnn_layers=self.n_rnn_layers,
return_last=self.return_last,
is_train=False,
reuse_params=True,
use_dropout_feature=True,
use_dropout_sequence=True
)
# Initialize parameters
train_net.init_ops()
valid_net.init_ops()
print("Network (layers={})".format(len(train_net.activations)))
print("inputs ({}): {}".format(
train_net.input_var.name, train_net.input_var.get_shape()
))
print("targets ({}): {}".format(
train_net.target_var.name, train_net.target_var.get_shape()
))
for name, act in train_net.activations:
print("{} ({}): {}".format(name, act.name, act.get_shape()))
print(" ")
# Get list of all pretrained parameters
with np.load(pretrained_model_path) as f:
pretrain_params = list(f.keys())
# Remove the network-name-prefix
for i in range(len(pretrain_params)):
pretrain_params[i] = pretrain_params[i].replace(pretrained_model_name, "network")
# Get trainable variables of the pretrained, and new ones
train_vars1 = [v for v in tf.compat.v1.trainable_variables()
if v.name.replace(train_net.name, "network") in pretrain_params]
train_vars2 = list(set(tf.compat.v1.trainable_variables()) - set(train_vars1))
# Optimizer that use different learning rates for each part of the network
train_op, grads_and_vars_op = adam_clipping_list_lr(
loss=train_net.loss_op,
list_lrs=[1e-6, 1e-4],
list_train_vars=[train_vars1, train_vars2],
clip_value=10.0
)
# Make subdirectory for pretraining
output_dir = os.path.join(self.output_dir, "fold{}".format(self.fold_idx), train_net.name)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Global step for resume training
with tf.compat.v1.variable_scope(train_net.name) as scope:
global_step = tf.Variable(0, name="global_step", trainable=False)
# print "Pretrained parameters:"
# for v in train_vars1:
# print v.name
# print " "
# print "Optimizing parameters:"
# for v in train_vars2:
# print v.name
# print " "
# print "Trainable Variables:"
# for v in tf.compat.v1.trainable_variables():
# print v.name, v.get_shape()
# print " "
# print "All Variables:"
# for v in tf.compat.v1.global_variables():
# print v.name, v.get_shape()
# print " "
# Create a saver
saver = tf.compat.v1.train.Saver(tf.compat.v1.global_variables(), max_to_keep=0)
# Initialize variables in the graph
sess.run(tf.compat.v1.global_variables_initializer())
# Add the graph structure into the Tensorboard writer
train_summary_wrt = tf.compat.v1.summary.FileWriter(
os.path.join(output_dir, "train_summary"),
sess.graph
)
# Resume the training if applicable
load_pretrain = False
if resume:
if os.path.exists(output_dir):
if os.path.isfile(os.path.join(output_dir, "checkpoint")):
# Restore the last checkpoint
saver.restore(sess, tf.train.latest_checkpoint(output_dir))
print("Model restored")
print("[{}] Resume fine-tuning ...\n".format(datetime.now()))
else:
load_pretrain = True
else:
load_pretrain = True
if load_pretrain:
# Load pre-trained model
print("Loading pre-trained parameters to the model ...")
print(" | --> {} from {}".format(pretrained_model_name, pretrained_model_path))
with np.load(pretrained_model_path) as f:
for k, v in f.items():
if "Adam" in k or "softmax" in k or "power" in k or "global_step" in k:
continue
prev_k = k
k = k.replace(pretrained_model_name, train_net.name)
tmp_tensor = tf.compat.v1.get_default_graph().get_tensor_by_name(k)
sess.run(
tf.compat.v1.assign(
tmp_tensor,
v
)
)
print("assigned {}: {} to {}: {}".format(
prev_k, v.shape, k, tmp_tensor.get_shape()
))
print(" ")
print("[{}] Start fine-tuning ...\n".format(datetime.now()))
# Load data
if sess.run(global_step) < n_epochs:
data_loader = SeqDataLoader(
data_dir=self.data_dir,
n_folds=self.n_folds,
fold_idx=self.fold_idx
)
x_train, y_train, x_valid, y_valid = data_loader.load_train_data()
# Performance history
all_train_loss = np.zeros(n_epochs)
all_train_acc = np.zeros(n_epochs)
all_train_f1 = np.zeros(n_epochs)
all_valid_loss = np.zeros(n_epochs)
all_valid_acc = np.zeros(n_epochs)
all_valid_f1 = np.zeros(n_epochs)
# Loop each epoch
for epoch in range(sess.run(global_step), n_epochs):
# Update parameters and compute loss of training set
y_true_train, y_pred_train, train_loss, train_duration = \
self._run_epoch(
sess=sess, network=train_net,
inputs=x_train, targets=y_train,
train_op=train_op,
is_train=True
)
n_train_examples = len(y_true_train)
train_cm = confusion_matrix(y_true_train, y_pred_train)
train_acc = np.mean(y_true_train == y_pred_train)
train_f1 = f1_score(y_true_train, y_pred_train, average="macro")
# Evaluate the model on the validation set
y_true_val, y_pred_val, valid_loss, valid_duration = \
self._run_epoch(
sess=sess, network=valid_net,
inputs=x_valid, targets=y_valid,
train_op=tf.no_op(),
is_train=False
)
n_valid_examples = len(y_true_val)
valid_cm = confusion_matrix(y_true_val, y_pred_val)
valid_acc = np.mean(y_true_val == y_pred_val)
valid_f1 = f1_score(y_true_val, y_pred_val, average="macro")
all_train_loss[epoch] = train_loss
all_train_acc[epoch] = train_acc
all_train_f1[epoch] = train_f1
all_valid_loss[epoch] = valid_loss
all_valid_acc[epoch] = valid_acc
all_valid_f1[epoch] = valid_f1
# db.train_log(args={
# "n_folds": self.n_folds,
# "fold_idx": self.fold_idx,
# "epoch": epoch,
# "train_step": "finetuning",
# "datetime": datetime.utcnow(),
# "model": train_net.name,
# "n_train_examples": n_train_examples,
# "n_valid_examples": n_valid_examples,
# "train_loss": train_loss,
# "train_acc": train_acc,
# "train_f1": train_f1,
# "train_duration": train_duration,
# "valid_loss": valid_loss,
# "valid_acc": valid_acc,
# "valid_f1": valid_f1,
# "valid_duration": valid_duration,
# })
# Report performance
self.print_performance(
sess, output_dir, train_net.name,
n_train_examples, n_valid_examples,
train_cm, valid_cm, epoch, n_epochs,
train_duration, train_loss, train_acc, train_f1,
valid_duration, valid_loss, valid_acc, valid_f1
)
# Save performance history
np.savez(
os.path.join(output_dir, "perf_fold{}.npz".format(self.fold_idx)),
train_loss=all_train_loss, valid_loss=all_valid_loss,
train_acc=all_train_acc, valid_acc=all_valid_acc,
train_f1=all_train_f1, valid_f1=all_valid_f1,
y_true_val=np.asarray(y_true_val),
y_pred_val=np.asarray(y_pred_val)
)
# Visualize weights from convolutional layers
if ((epoch + 1) % self.interval_plot_filter == 0) or ((epoch + 1) == n_epochs):
self.plot_filters(sess, epoch, train_net.name + "(_[0-9])?\/l[0-9]+_conv\/(weights)", output_dir, 16)
self.plot_filters(sess, epoch, train_net.name + "(_[0-9])?/l[0-9]+_conv\/conv1d\/(weights)", output_dir, 16)
# Save checkpoint
sess.run(tf.compat.v1.assign(global_step, epoch+1))
if ((epoch + 1) % self.interval_save_model == 0) or ((epoch + 1) == n_epochs):
start_time = time.time()
save_path = os.path.join(
output_dir, "model_fold{}.ckpt".format(self.fold_idx)
)
saver.save(sess, save_path, global_step=global_step)
duration = time.time() - start_time
print("Saved model checkpoint ({:.3f} sec)".format(duration))
# Save paramaters
if ((epoch + 1) % self.interval_save_model == 0) or ((epoch + 1) == n_epochs):
start_time = time.time()
save_dict = {}
for v in tf.compat.v1.global_variables():
save_dict[v.name] = sess.run(v)
np.savez(
os.path.join(
output_dir,
"params_fold{}.npz".format(self.fold_idx)),
**save_dict
)
duration = time.time() - start_time
print("Saved trained parameters ({:.3f} sec)".format(duration))
print("Finish fine-tuning")
return os.path.join(output_dir, "params_fold{}.npz".format(self.fold_idx))
|
src/python/web/serialize/serialize_auto_queue.py | annihilatethee/seedsync | 255 | 12634093 | # Copyright 2017, <NAME>, All rights reserved.
import json
from typing import List
from controller import AutoQueuePattern
class SerializeAutoQueue:
__KEY_PATTERN = "pattern"
@staticmethod
def patterns(patterns: List[AutoQueuePattern]) -> str:
patterns_list = []
for pattern in patterns:
patterns_list.append({
SerializeAutoQueue.__KEY_PATTERN: pattern.pattern
})
return json.dumps(patterns_list)
|
venv/lib/python3.9/site-packages/pendulum/lang/da.py | qarik-hanrattyjen/apache-airflow-backport-providers-google-2021.3.3 | 224 | 12634094 | # -*- coding: utf-8 -*-
translations = {
# Days
'days': {
0: 'søndag',
1: 'mandag',
2: 'tirsdag',
3: 'onsdag',
4: 'torsdag',
5: 'fredag',
6: 'lørdag'
},
'days_abbrev': {
0: 'søn',
1: 'man',
2: 'tir',
3: 'ons',
4: 'tor',
5: 'fre',
6: 'lør'
},
# Months
'months': {
1: 'januar',
2: 'februar',
3: 'marts',
4: 'april',
5: 'maj',
6: 'juni',
7: 'juli',
8: 'august',
9: 'september',
10: 'oktober',
11: 'november',
12: 'december',
},
'months_abbrev': {
1: 'jan',
2: 'feb',
3: 'mar',
4: 'apr',
5: 'maj',
6: 'jun',
7: 'jul',
8: 'aug',
9: 'sep',
10: 'okt',
11: 'nov',
12: 'dec',
},
# Units of time
'year': ['{count} år', '{count} år'],
'month': ['{count} måned', '{count} måneder'],
'week': ['{count} uge', '{count} uger'],
'day': ['{count} dag', '{count} dage'],
'hour': ['{count} time', '{count} timer'],
'minute': ['{count} minut', '{count} minutter'],
'second': ['{count} sekund', '{count} sekunder'],
# Relative time
'ago': '{time} siden',
'from_now': 'om {time}',
'after': '{time} efter',
'before': '{time} før',
# Date formats
'date_formats': {
'LTS': 'HH:mm:ss',
'LT': 'HH:mm',
'LLLL': 'dddd [d.] D. MMMM YYYY HH:mm',
'LLL': 'D. MMMM YYYY HH:mm',
'LL': 'D. MMMM YYYY',
'L': 'DD/MM/YYYY',
},
}
|
setup.py | lantunes/cellpylib | 124 | 12634095 | from setuptools import setup
setup(name="cellpylib",
version="1.1.0",
description="CellPyLib, A library for working with Cellular Automata, for Python.",
long_description="CellPyLib is a library for working with Cellular Automata, for Python. "
"Currently, only 1- and 2-dimensional k-color cellular automata with "
"periodic boundary conditions are supported. The size of the "
"neighbourhood can be adjusted. ",
license="Apache License 2.0",
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.6',
],
url='http://github.com/lantunes/cellpylib',
author="<NAME>",
author_email="<EMAIL>",
packages=["cellpylib"],
keywords=["cellular automata", "complexity", "complex systems", "computation", "non-linear dynamics"],
python_requires='>3.6',
install_requires=["numpy >= 1.15.4", "matplotlib >= 3.0.2"])
|
platypush/message/response/__init__.py | RichardChiang/platypush | 228 | 12634123 | <reponame>RichardChiang/platypush
import json
import time
from platypush.message import Message
class Response(Message):
""" Response message class """
def __init__(self, target=None, origin=None, id=None, output=None, errors=None,
timestamp=None, disable_logging=False):
"""
:param target: Target
:type target: str
:param origin: Origin
:type origin: str
:param output: Output
:param errors: Errors
:param id: Message ID this response refers to
:type id: str
:param timestamp: Message timestamp
:type timestamp: float
"""
super().__init__(timestamp=timestamp)
self.target = target
self.output = self._parse_msg(output)
self.errors = self._parse_msg(errors or [])
self.origin = origin
self.id = id
self.disable_logging = disable_logging
def is_error(self):
""" Returns True if the response has errors """
return len(self.errors) != 0
@classmethod
def _parse_msg(cls, msg):
if isinstance(msg, bytes) or isinstance(msg, bytearray):
msg = msg.decode('utf-8')
if isinstance(msg, str):
try:
msg = json.loads(msg.strip())
except ValueError:
pass
return msg
@classmethod
def build(cls, msg):
msg = super().parse(msg)
args = {
'target': msg['target'],
'output': msg['response']['output'],
'errors': msg['response']['errors'],
'timestamp': msg['_timestamp'] if '_timestamp' in msg else time.time(),
'disable_logging': msg.get('_disable_logging', False),
}
if 'id' in msg:
args['id'] = msg['id']
if 'origin' in msg:
args['origin'] = msg['origin']
return cls(**args)
def __str__(self):
"""
Overrides the ``str()`` operator and converts
the message into a UTF-8 JSON string
"""
output = self.output if self.output is not None else {
'success': True if not self.errors else False
}
response_dict = {
'id': self.id,
'type': 'response',
'target': self.target if hasattr(self, 'target') else None,
'origin': self.origin if hasattr(self, 'origin') else None,
'_timestamp': self.timestamp,
'response': {
'output': output,
'errors': self.errors,
},
}
if self.disable_logging:
response_dict['_disable_logging'] = self.disable_logging
return json.dumps(response_dict, cls=self.Encoder)
# vim:sw=4:ts=4:et:
|
fbgemm_gpu/fbgemm_gpu/split_embedding_codegen_lookup_invokers.py | garroud/FBGEMM | 792 | 12634130 | <reponame>garroud/FBGEMM<gh_stars>100-1000
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import python.lookup_adagrad as lookup_adagrad # noqa: F401
import python.lookup_adam as lookup_adam # noqa: F401
import python.lookup_approx_rowwise_adagrad as lookup_approx_rowwise_adagrad # noqa: F401
import python.lookup_approx_sgd as lookup_approx_sgd # noqa: F401
import python.lookup_args as lookup_args # noqa: F401
import python.lookup_lamb as lookup_lamb # noqa: F401
import python.lookup_lars_sgd as lookup_lars_sgd # noqa: F401
import python.lookup_partial_rowwise_adam as lookup_partial_rowwise_adam # noqa: F401
import python.lookup_partial_rowwise_lamb as lookup_partial_rowwise_lamb # noqa: F401
import python.lookup_rowwise_adagrad as lookup_rowwise_adagrad # noqa: F401
import python.lookup_sgd as lookup_sgd # noqa: F401
|
solutions/reverse_image_search/object_detection/server/src/operations/load.py | kilianovski/bootcamp | 789 | 12634135 | import os
import sys
from diskcache import Cache
sys.path.append("..")
from config import DEFAULT_TABLE, CACHE_DIR
def get_imgs_path(path):
pics = os.listdir(path)
pics.sort()
paths = []
for f in pics:
if f.endswith('.jpg'):
paths.append(os.path.join(path, f))
return paths
def get_num_vecs(cache, model, imgs_folder):
paths = get_imgs_path(imgs_folder)
cache['total'] = len(paths)
vectors = []
obj_num = []
current = 0
for x in paths:
vecs = model.execute(x)
for vec in vecs:
vectors.append(vec)
obj_num.append(len(vecs))
current += 1
cache['current'] = current
return vectors, obj_num
def match_ids_and_imgs(imgs, obj_num):
matched_imgs = []
for i, num in enumerate(obj_num):
for _ in range(num):
matched_imgs.append(imgs[i])
return matched_imgs
def format_data(ids, names):
data = []
for i in range(len(ids)):
value = (str(ids[i]), names[i])
data.append(value)
return data
def do_load(table_name, database_path, model, mil_cli, mysql_cli):
if not table_name:
table_name = DEFAULT_TABLE
cache = Cache(CACHE_DIR)
vectors, obj_num = get_num_vecs(cache, model, database_path)
ids = mil_cli.insert(table_name, vectors)
mil_cli.create_index(table_name)
imgs = get_imgs_path(database_path)
matched_imgs = match_ids_and_imgs(imgs, obj_num)
mysql_cli.create_mysql_table(table_name)
mysql_cli.load_data_to_mysql(table_name, format_data(ids, matched_imgs))
return len(ids)
|
chrome/common/extensions/docs/server2/link_converter.py | iplo/Chain | 231 | 12634174 | <reponame>iplo/Chain
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This script converts old-style <a> links to API docs to the new $ref links.
# See reference_resolver.py for more info on the format of $ref links.
import optparse
import os
import re
from docs_server_utils import SanitizeAPIName
def _ReadFile(filename):
with open(filename) as f:
return f.read()
def _WriteFile(filename, contents):
with open(filename, 'w') as f:
f.write(contents)
def _Replace(matches, filename):
title = matches.group(3)
if matches.group(2).count('#') != 1:
return '<a%shref=%s>%s</a>' % (matches.group(1),
matches.group(2),
title)
clean = (matches.group(2).replace('\\', '')
.replace("'", '')
.replace('"', '')
.replace('/', ''))
page, link = clean.split('#')
if not page:
page = '%s.html' % SanitizeAPIName(filename.rsplit(os.sep, 1)[-1])
if (not link.startswith('property-') and
not link.startswith('type-') and
not link.startswith('method-') and
not link.startswith('event-')):
return '<a%shref=%s>%s</a>' % (matches.group(1),
matches.group(2),
title)
link = re.sub('^(property|type|method|event)-', '', link).replace('-', '.')
page = page.replace('.html', '.').replace('_', '.')
if matches.group(1) == ' ':
padding = ''
else:
padding = matches.group(1)
if link in title:
return '%s$ref:%s%s' % (padding, page, link)
else:
return '%s$ref:[%s%s %s]' % (padding, page, link, title)
def _ConvertFile(filename, use_stdout):
regex = re.compile(r'<a(.*?)href=(.*?)>(.*?)</a>', flags=re.DOTALL)
contents = _ReadFile(filename)
contents = re.sub(regex,
lambda m: _Replace(m, filename),
contents)
contents = contents.replace('$ref:extension.lastError',
'$ref:runtime.lastError')
if use_stdout:
print contents
else:
_WriteFile(filename, contents)
if __name__ == '__main__':
parser = optparse.OptionParser(
description='Converts <a> links to $ref links.',
usage='usage: %prog [option] <directory>')
parser.add_option('-f', '--file', default='',
help='Convert links in single file.')
parser.add_option('-o', '--out', action='store_true', default=False,
help='Write to stdout.')
regex = re.compile(r'<a(.*?)href=(.*?)>(.*?)</a>', flags=re.DOTALL)
opts, argv = parser.parse_args()
if opts.file:
_ConvertFile(opts.file, opts.out)
else:
if len(argv) != 1:
parser.print_usage()
exit(0)
for root, dirs, files in os.walk(argv[0]):
for name in files:
_ConvertFile(os.path.join(root, name), opts.out)
|
tests/test_unique_fields_mixin.py | radicalbiscuit/drf-writable-nested | 754 | 12634188 | <reponame>radicalbiscuit/drf-writable-nested
from django.test import TestCase
from rest_framework.exceptions import ValidationError, ErrorDetail
from . import (
models,
serializers,
)
class UniqueFieldsMixinTestCase(TestCase):
def test_create_update_success(self):
serializer = serializers.UFMParentSerializer(
data={'child': {'field': 'value'}})
self.assertTrue(serializer.is_valid())
parent = serializer.save()
serializer = serializers.UFMParentSerializer(
instance=parent,
data={
'pk': parent.pk,
'child': {
'pk': parent.child.pk,
'field': 'value',
}
}
)
self.assertTrue(serializer.is_valid())
serializer.save()
def test_create_update_failed(self):
# In this case everything is valid on the validation stage, because
# UniqueValidator is skipped
# But `save` should raise an exception on create/update
child = models.UFMChild.objects.create(field='value')
parent = models.UFMParent.objects.create(child=child)
default_error_detail = ErrorDetail(
string='ufm child with this field already exists.',
code='unique')
unique_message_error_detail = ErrorDetail(
string=serializers.UNIQUE_ERROR_MESSAGE,
code='unique'
)
serializer = serializers.UFMParentSerializer(
data={
'child': {
'field': child.field,
}
}
)
self.assertTrue(serializer.is_valid())
with self.assertRaises(ValidationError) as ctx:
serializer.save()
self.assertEqual(
ctx.exception.detail,
{'child': {'field': [default_error_detail]}}
)
serializer = serializers.UFMParentSerializer(
instance=parent,
data={
'pk': parent.pk,
'child': {
'field': child.field,
}
}
)
self.assertTrue(serializer.is_valid())
with self.assertRaises(ValidationError) as ctx:
serializer.save()
self.assertEqual(
ctx.exception.detail,
{'child': {'field': [default_error_detail]}}
)
serializer = serializers.UFMParentSerializerForValidatorMessage(
data={
'child': {
'field': child.field,
}
}
)
self.assertTrue(serializer.is_valid())
with self.assertRaises(ValidationError) as ctx:
serializer.save()
self.assertEqual(
ctx.exception.detail,
{'child': {'field': [unique_message_error_detail]}}
)
def test_unique_field_not_required_for_partial_updates(self):
child = models.UFMChild.objects.create(field='value')
serializer = serializers.UFMChildSerializer(
instance=child,
data={},
partial=True
)
self.assertTrue(serializer.is_valid())
serializer.save()
|
src/pymap3d/ned.py | ryanpavlick/pymap3d | 116 | 12634216 | """ Transforms involving NED North East Down """
from __future__ import annotations
import typing
from .enu import geodetic2enu, aer2enu, enu2aer
from .ecef import ecef2geodetic, ecef2enuv, ecef2enu, enu2ecef
from .ellipsoid import Ellipsoid
if typing.TYPE_CHECKING:
from numpy import ndarray
def aer2ned(
az: ndarray, elev: ndarray, slantRange: ndarray, deg: bool = True
) -> tuple[ndarray, ndarray, ndarray]:
"""
converts azimuth, elevation, range to target from observer to North, East, Down
Parameters
-----------
az : float
azimuth
elev : float
elevation
slantRange : float
slant range [meters]
deg : bool, optional
degrees input/output (False: radians in/out)
Results
-------
n : float
North NED coordinate (meters)
e : float
East NED coordinate (meters)
d : float
Down NED coordinate (meters)
"""
e, n, u = aer2enu(az, elev, slantRange, deg=deg)
return n, e, -u
def ned2aer(
n: ndarray, e: ndarray, d: ndarray, deg: bool = True
) -> tuple[ndarray, ndarray, ndarray]:
"""
converts North, East, Down to azimuth, elevation, range
Parameters
----------
n : float
North NED coordinate (meters)
e : float
East NED coordinate (meters)
d : float
Down NED coordinate (meters)
deg : bool, optional
degrees input/output (False: radians in/out)
Results
-------
az : float
azimuth
elev : float
elevation
slantRange : float
slant range [meters]
"""
return enu2aer(e, n, -d, deg=deg)
def ned2geodetic(
n: ndarray,
e: ndarray,
d: ndarray,
lat0: ndarray,
lon0: ndarray,
h0: ndarray,
ell: Ellipsoid = None,
deg: bool = True,
) -> tuple[ndarray, ndarray, ndarray]:
"""
Converts North, East, Down to target latitude, longitude, altitude
Parameters
----------
n : float
North NED coordinate (meters)
e : float
East NED coordinate (meters)
d : float
Down NED coordinate (meters)
lat0 : float
Observer geodetic latitude
lon0 : float
Observer geodetic longitude
h0 : float
observer altitude above geodetic ellipsoid (meters)
ell : Ellipsoid, optional
reference ellipsoid
deg : bool, optional
degrees input/output (False: radians in/out)
Results
-------
lat : float
target geodetic latitude
lon : float
target geodetic longitude
h : float
target altitude above geodetic ellipsoid (meters)
"""
x, y, z = enu2ecef(e, n, -d, lat0, lon0, h0, ell, deg=deg)
return ecef2geodetic(x, y, z, ell, deg=deg)
def ned2ecef(
n: ndarray,
e: ndarray,
d: ndarray,
lat0: ndarray,
lon0: ndarray,
h0: ndarray,
ell: Ellipsoid = None,
deg: bool = True,
) -> tuple[ndarray, ndarray, ndarray]:
"""
North, East, Down to target ECEF coordinates
Parameters
----------
n : float
North NED coordinate (meters)
e : float
East NED coordinate (meters)
d : float
Down NED coordinate (meters)
lat0 : float
Observer geodetic latitude
lon0 : float
Observer geodetic longitude
h0 : float
observer altitude above geodetic ellipsoid (meters)
ell : Ellipsoid, optional
reference ellipsoid
deg : bool, optional
degrees input/output (False: radians in/out)
Results
-------
x : float
ECEF x coordinate (meters)
y : float
ECEF y coordinate (meters)
z : float
ECEF z coordinate (meters)
"""
return enu2ecef(e, n, -d, lat0, lon0, h0, ell, deg=deg)
def ecef2ned(
x: ndarray,
y: ndarray,
z: ndarray,
lat0: ndarray,
lon0: ndarray,
h0: ndarray,
ell: Ellipsoid = None,
deg: bool = True,
) -> tuple[ndarray, ndarray, ndarray]:
"""
Convert ECEF x,y,z to North, East, Down
Parameters
----------
x : float
ECEF x coordinate (meters)
y : float
ECEF y coordinate (meters)
z : float
ECEF z coordinate (meters)
lat0 : float
Observer geodetic latitude
lon0 : float
Observer geodetic longitude
h0 : float
observer altitude above geodetic ellipsoid (meters)
ell : Ellipsoid, optional
reference ellipsoid
deg : bool, optional
degrees input/output (False: radians in/out)
Results
-------
n : float
North NED coordinate (meters)
e : float
East NED coordinate (meters)
d : float
Down NED coordinate (meters)
"""
e, n, u = ecef2enu(x, y, z, lat0, lon0, h0, ell, deg=deg)
return n, e, -u
def geodetic2ned(
lat: ndarray,
lon: ndarray,
h: ndarray,
lat0: ndarray,
lon0: ndarray,
h0: ndarray,
ell: Ellipsoid = None,
deg: bool = True,
) -> tuple[ndarray, ndarray, ndarray]:
"""
convert latitude, longitude, altitude of target to North, East, Down from observer
Parameters
----------
lat : float
target geodetic latitude
lon : float
target geodetic longitude
h : float
target altitude above geodetic ellipsoid (meters)
lat0 : float
Observer geodetic latitude
lon0 : float
Observer geodetic longitude
h0 : float
observer altitude above geodetic ellipsoid (meters)
ell : Ellipsoid, optional
reference ellipsoid
deg : bool, optional
degrees input/output (False: radians in/out)
Results
-------
n : float
North NED coordinate (meters)
e : float
East NED coordinate (meters)
d : float
Down NED coordinate (meters)
"""
e, n, u = geodetic2enu(lat, lon, h, lat0, lon0, h0, ell, deg=deg)
return n, e, -u
def ecef2nedv(
x: float, y: float, z: float, lat0: float, lon0: float, deg: bool = True
) -> tuple[float, float, float]:
"""
for VECTOR between two points
Parameters
----------
x : float
ECEF x coordinate (meters)
y : float
ECEF y coordinate (meters)
z : float
ECEF z coordinate (meters)
lat0 : float
Observer geodetic latitude
lon0 : float
Observer geodetic longitude
deg : bool, optional
degrees input/output (False: radians in/out)
Results
-------
(Vector)
n : float
North NED coordinate (meters)
e : float
East NED coordinate (meters)
d : float
Down NED coordinate (meters)
"""
e, n, u = ecef2enuv(x, y, z, lat0, lon0, deg=deg)
return n, e, -u
|
VMEncryption/setup.py | shridpant/azure-linux-extensions | 266 | 12634269 | #!/usr/bin/env python
#
# VM Backup extension
#
# Copyright 2015 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# To build:
# python setup.py sdist
#
# To install:
# python setup.py install
#
# To register (only needed once):
# python setup.py register
#
# To upload:
# python setup.py sdist upload
import codecs
import json
import os
import subprocess
from distutils.core import setup
from zipfile import ZipFile
from shutil import copy2
from main.Common import CommonVariables
packages_array = []
main_folder = 'main'
main_entry = main_folder + '/handle.py'
packages_array.append(main_folder)
patch_folder = main_folder + '/patch'
packages_array.append(patch_folder)
oscrypto_folder = main_folder + '/oscrypto'
packages_array.append(oscrypto_folder)
packages_array.append(oscrypto_folder + '/91ade')
packages_array.append(oscrypto_folder + '/rhel_72_lvm')
packages_array.append(oscrypto_folder + '/rhel_72_lvm/encryptstates')
packages_array.append(oscrypto_folder + '/rhel_72')
packages_array.append(oscrypto_folder + '/rhel_72/encryptstates')
packages_array.append(oscrypto_folder + '/rhel_68')
packages_array.append(oscrypto_folder + '/rhel_68/encryptstates')
packages_array.append(oscrypto_folder + '/centos_68')
packages_array.append(oscrypto_folder + '/centos_68/encryptstates')
packages_array.append(oscrypto_folder + '/ubuntu_1604')
packages_array.append(oscrypto_folder + '/ubuntu_1604/encryptstates')
packages_array.append(oscrypto_folder + '/ubuntu_1404')
packages_array.append(oscrypto_folder + '/ubuntu_1404/encryptstates')
transitions_folder = 'transitions/transitions'
packages_array.append(transitions_folder)
"""
copy the dependency to the local
"""
"""
copy the utils lib to local
"""
target_utils_path = main_folder + '/' + CommonVariables.utils_path_name
packages_array.append(target_utils_path)
"""
generate the HandlerManifest.json file.
"""
manifest_obj = [{
"name": CommonVariables.extension_name,
"version": "1.0",
"handlerManifest": {
"installCommand": "extension_shim.sh -c {0} --install".format(main_entry),
"uninstallCommand": "extension_shim.sh -c {0} --uninstall".format(main_entry),
"updateCommand": "extension_shim.sh -c {0} --update".format(main_entry),
"enableCommand": "extension_shim.sh -c {0} --enable".format(main_entry),
"disableCommand": "extension_shim.sh -c {0} --disable".format(main_entry),
"rebootAfterInstall": False,
"reportHeartbeat": False
}
}]
manifest_str = json.dumps(manifest_obj, sort_keys = True, indent = 4)
manifest_file = open("HandlerManifest.json", "w")
manifest_file.write(manifest_str)
manifest_file.close()
"""
generate the extension xml file
"""
extension_xml_file_content = """<ExtensionImage xmlns="http://schemas.microsoft.com/windowsazure">
<ProviderNameSpace>Microsoft.Azure.Security</ProviderNameSpace>
<Type>%s</Type>
<Version>%s</Version>
<Label>%s</Label>
<HostingResources>VmRole</HostingResources>
<MediaLink></MediaLink>
<Description>%s</Description>
<IsInternalExtension>true</IsInternalExtension>
<Eula>https://azure.microsoft.com/en-us/support/legal/</Eula>
<PrivacyUri>https://azure.microsoft.com/en-us/support/legal/</PrivacyUri>
<HomepageUri>https://github.com/Azure/azure-linux-extensions</HomepageUri>
<IsJsonExtension>true</IsJsonExtension>
<SupportedOS>Linux</SupportedOS>
<CompanyName>Microsoft</CompanyName>
<!--%%REGIONS%%-->
</ExtensionImage>""" % (CommonVariables.extension_type, CommonVariables.extension_version, CommonVariables.extension_label, CommonVariables.extension_description)
extension_xml_file = open('manifest.xml', 'w')
extension_xml_file.write(extension_xml_file_content)
extension_xml_file.close()
"""
setup script, to package the files up
"""
setup(name = CommonVariables.extension_name,
version = CommonVariables.extension_version,
description=CommonVariables.extension_description,
license='Apache License 2.0',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/Azure/azure-linux-extensions',
classifiers = ['Development Status :: 5 - Production/Stable',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'License :: OSI Approved :: Apache Software License'],
packages = packages_array)
"""
unzip the package files and re-package it.
"""
target_zip_file_location = './dist/'
target_folder_name = CommonVariables.extension_name + '-' + str(CommonVariables.extension_version)
target_zip_file_path = target_zip_file_location + target_folder_name + '.zip'
target_zip_file = ZipFile(target_zip_file_path)
target_zip_file.extractall(target_zip_file_location)
def dos2unix(src):
args = ["dos2unix", src]
devnull = open(os.devnull, 'w')
child = subprocess.Popen(args, stdout=devnull, stderr=devnull)
print('dos2unix %s ' % (src))
child.wait()
def remove_utf8_bom(src):
print('removing utf-8 bom from %s ' % (src))
contents = None
with open(src, "r+b") as fp:
bincontents = fp.read()
if bincontents[:len(codecs.BOM_UTF8)] == codecs.BOM_UTF8:
contents = bincontents.decode('utf-8-sig')
elif bincontents[:3] == '\xef\x00\x00':
contents = bincontents[3:].decode('utf-8')
else:
contents = bincontents.decode('utf8')
with open(src, "wb") as fp:
fp.write(contents.encode('utf-8'))
def zip(src, dst):
zf = ZipFile("%s" % (dst), "w")
abs_src = os.path.abspath(src)
for dirname, subdirs, files in os.walk(src):
for filename in files:
absname = os.path.abspath(os.path.join(dirname, filename))
dos2unix(absname)
remove_utf8_bom(absname)
arcname = absname[len(abs_src) + 1:]
print('zipping %s as %s' % (os.path.join(dirname, filename), arcname))
zf.write(absname, arcname)
zf.close()
final_folder_path = target_zip_file_location + target_folder_name
# Manually add SupportedOS.json file as setup seems to only copy py file
copy2(main_folder+'/SupportedOS.json', final_folder_path+'/'+main_folder )
zip(final_folder_path, target_zip_file_path)
|
vimdoc/docline.py | torao-1892/vimdoc | 211 | 12634294 | """Vimfile documentation lines, the stuff of vimdoc blocks."""
import abc
import vimdoc
from vimdoc import error
from vimdoc import regex
from vimdoc.block import Block
class DocLine(object):
"""One line of vim documentation."""
__metaclass__ = abc.ABCMeta
def Each(self, blocks, selection):
"""Iterates the selected blocks."""
for i in selection:
if i >= len(blocks):
raise error.InvalidBlockNumber(i, blocks, selection)
yield blocks[i]
def Affect(self, blocks, selection):
"""Updates each selected block.
Args:
blocks: The different blocks defined so far.
selection: The blocks being operated upon.
Returns:
The blocks ready to be closed (which is an empty list -- codelines are the
ones who close blocks, not doclines.)
"""
if not blocks:
blocks.append(Block())
selection.append(0)
for block in self.Each(blocks, selection):
self.Update(block)
return ()
@abc.abstractmethod
def Update(self, block):
"""Update one block."""
class Text(DocLine):
def __init__(self, line):
self.line = line
def Update(self, block):
block.AddLine(self.line)
class BlockDirective(DocLine):
"""A line-spanning directive, like @usage."""
__metaclass__ = abc.ABCMeta
REGEX = regex.no_args
def __init__(self, args):
match = self.REGEX.match(args)
if not match:
raise error.InvalidBlockArgs(self.__class__.__name__, args)
self.Assign(*match.groups())
def Assign(self):
pass
class All(BlockDirective):
REGEX = regex.no_args
def Assign(self):
pass
def Affect(self, blocks, selection):
selection[:] = range(len(blocks))
for block in blocks:
block.SetType(True)
return ()
def Update(self, block):
pass
class Author(BlockDirective):
REGEX = regex.any_args
def Assign(self, author):
self.author = author
def Update(self, block):
block.Global(author=self.author)
class Backmatter(BlockDirective):
REGEX = regex.backmatter_args
def Assign(self, ident):
self.id = ident
def Update(self, block):
block.SetType(vimdoc.BACKMATTER)
block.Local(id=self.id)
class Default(BlockDirective):
REGEX = regex.default_args
def Assign(self, arg, value):
self.arg = arg
self.value = value
def Update(self, block):
block.Default(self.arg, self.value)
class Deprecated(BlockDirective):
REGEX = regex.one_arg
def Assign(self, reason):
self.reason = reason
def Update(self, block):
block.Local(deprecated=self.reason)
# pylint: disable=g-bad-name
class Exception_(BlockDirective):
REGEX = regex.maybe_word
def Assign(self, word):
self.word = word
def Update(self, block):
block.Local(exception=self.word)
class Dict(BlockDirective):
REGEX = regex.dict_args
def Assign(self, name, attribute=None):
self.name = name
self.attribute = attribute
def Update(self, block):
block.SetType(True)
block.Local(dict=self.name)
if self.attribute:
block.SetType(vimdoc.FUNCTION)
block.Local(attribute=self.attribute)
# We can't set the dict type here because it may be set to Function type
# later, and we don't want a type mismatch.
class Library(BlockDirective):
def Update(self, block):
block.Global(library=True)
class Order(BlockDirective):
REGEX = regex.order_args
def Assign(self, args):
self.order = regex.order_arg.findall(args)
def Update(self, block):
block.Global(order=self.order)
class Private(BlockDirective):
def Update(self, block):
block.Local(private=True)
class Public(BlockDirective):
def Update(self, block):
block.Local(private=False)
class Section(BlockDirective):
REGEX = regex.section_args
def __init__(self, args):
super(Section, self).__init__(args)
def Assign(self, name, ident):
self.name = name.replace('\\,', ',').replace('\\\\', '\\')
if ident is None:
# If omitted, it's the name in lowercase, with spaces converted to dashes.
ident = self.name.lower().replace(' ', '-')
self.id = ident
def Update(self, block):
block.SetType(vimdoc.SECTION)
block.Local(name=self.name, id=self.id)
class ParentSection(BlockDirective):
REGEX = regex.parent_section_args
def Assign(self, name):
self.name = name.lower()
def Update(self, block):
block.SetParentSection(self.name)
class Setting(BlockDirective):
REGEX = regex.one_arg
def Assign(self, name):
scope_match = regex.setting_scope.match(name)
# Assume global scope if no explicit scope given.
if scope_match is None:
name = 'g:' + name
self.name = name
def Update(self, block):
block.SetType(vimdoc.SETTING)
block.Local(name=self.name)
class Standalone(BlockDirective):
def Update(self, block):
block.Global(standalone=True)
class Stylized(BlockDirective):
REGEX = regex.stylizing_args
def Assign(self, stylization):
self.stylization = stylization
def Update(self, block):
block.Global(stylization=self.stylization)
class SubSection(BlockDirective):
REGEX = regex.any_args
def Assign(self, name):
self.name = name
def Update(self, block):
block.AddSubHeader(self.name)
class Tagline(BlockDirective):
REGEX = regex.any_args
def Assign(self, tagline):
self.tagline = tagline
def Update(self, block):
block.Global(tagline=self.tagline)
class Throws(BlockDirective):
REGEX = regex.throw_args
def Assign(self, typ, description):
if not regex.vim_error.match(typ):
typ = 'ERROR({})'.format(typ)
self.error = typ
self.description = description
def Update(self, block):
block.Except(self.error, self.description)
class Header(BlockDirective):
"""A header directive, like @usage @function or @command."""
__metaclass__ = abc.ABCMeta
def Affect(self, blocks, selection):
"""Updates the block selection.
If this block is already split into multiple sections, or if it already has
a header, then a new section is created with this header. Otherwise, this
header is set as the header for the single block.
Args:
blocks: The blocks defined in the documentation so far.
selection: The blocks currently being acted on.
Returns:
The blocks ready to be closed (which is none).
"""
if (len(blocks) != 1) or (blocks[0].header):
# Mark this as a secondary block if there are other blocks above it that
# are describing the same block. (This allows us to, for example, only add
# the function tag to the FIRST block that describes the function and not
# to subsequent blocks showing other ways to use the same function.)
is_secondary = len(blocks) > 0
newblock = Block(is_secondary=is_secondary)
# If the first block has no header, copy its locals.
if blocks and blocks[0].header is None:
newblock.locals = dict(blocks[0].locals)
blocks.append(newblock)
selection[:] = [len(blocks) - 1]
else:
# There is only one block. Assert that it's selected.
assert selection == [0], 'Singleton blocks must be selected.'
for block in self.Each(blocks, selection):
block.SetHeader(self)
self.Update(block)
return ()
def Assign(self, usage):
self.usage = usage
self.reqs = regex.required_arg.findall(usage)
self.opts = regex.optional_arg.findall(usage)
def Update(self, block):
pass
def GenerateUsage(self, block):
isfunc = block.locals.get('type') == vimdoc.FUNCTION
sep = ', ' if isfunc else ' '
extra_reqs = sep.join('{%s}' % r
for r in block.RequiredArgs()
if r not in self.reqs)
extra_opts = sep.join('[%s]' % o
for o in block.OptionalArgs()
if o not in self.opts)
usage = self.FillOut(block.FullName(), sep, extra_reqs, extra_opts)
# Command usage should have a ':' prefix before the name.
if block.locals.get('type') == vimdoc.COMMAND and not usage.startswith(':'):
usage = ':' + usage
return usage
def FillOut(self, name, sep, extra_reqs, extra_opts):
"""Expands the usage line with the given arguments."""
# The user may use the {] hole to place both required and optional args,
# appropriately separated.
if extra_reqs and extra_opts:
extra_args = extra_reqs + sep + extra_opts
else:
extra_args = extra_reqs + extra_opts
# Expand the argument holes.
# Presumably, the user won't use both the arg hole and the required/optional
# holes. If they do, then we'll dutifully replicate the args.
usage = regex.arg_hole.sub(extra_args, self.usage)
usage = regex.required_hole.sub(extra_reqs, usage)
usage = regex.optional_hole.sub(extra_opts, usage)
# Remove bad separators.
usage = regex.bad_separator.sub('', usage)
# Expand the name holes.
usage = regex.name_hole.sub(name, usage)
# Expand the hole escape sequences.
usage = regex.namehole_escape.sub(r'<\1>', usage)
usage = regex.requiredhole_escape.sub(r'{\1}', usage)
usage = regex.optionalhole_escape.sub(r'[\1]', usage)
return usage
class Command(Header):
REGEX = regex.any_args
def Update(self, block):
block.SetType(vimdoc.COMMAND)
class Function(Header):
REGEX = regex.any_args
def Update(self, block):
block.SetType(vimdoc.FUNCTION)
class Usage(Header):
REGEX = regex.usage_args
def GenerateUsage(self, block):
"""Generates the usage line. Syntax depends upon the block type."""
normalize = lambda arg: arg if arg[0] in '[{' else ('{%s}' % arg)
args = [normalize(arg) for arg in regex.usage_arg.findall(self.usage)]
if block.locals.get('type') == vimdoc.FUNCTION:
# Functions are like MyFunction({req1}, {req2}, [opt1])
self.usage = '<>(%s)' % ', '.join(args)
else:
assert block.locals.get('type') == vimdoc.COMMAND
# Commands are like :[range]MyCommand[!] {req1} {req2} [opt1]
self.usage = ':%s %s' % (block.locals.get('head', '<>'), ' '.join(args))
return super(Usage, self).GenerateUsage(block)
BLOCK_DIRECTIVES = {
'all': All,
'author': Author,
'backmatter': Backmatter,
'command': Command,
'default': Default,
'deprecated': Deprecated,
'dict': Dict,
'exception': Exception_,
'function': Function,
'library': Library,
'order': Order,
'parentsection': ParentSection,
'private': Private,
'public': Public,
'section': Section,
'setting': Setting,
'standalone': Standalone,
'stylized': Stylized,
'subsection': SubSection,
'tagline': Tagline,
'throws': Throws,
'usage': Usage,
}
|
src/DyldExtractor/dyld/dyld_context.py | arandomdev/DyldExtractor | 177 | 12634313 | <gh_stars>100-1000
from mmap import mmap
from DyldExtractor.file_context import FileContext
from DyldExtractor.dyld.dyld_structs import (
dyld_cache_header,
dyld_cache_mapping_info,
dyld_cache_image_info,
)
class DyldContext(FileContext):
header: dyld_cache_header
mappings: list[dyld_cache_mapping_info]
images: list[dyld_cache_image_info]
def __init__(self, file: mmap) -> None:
"""A wrapper around a dyld file.
Provides convenient methods and attributes for a given dyld file.
Args:
file: an open dyld file.
"""
super().__init__(file, offset=0)
self.header = dyld_cache_header(file)
self.mappings = []
for i in range(self.header.mappingCount):
offset = self.header.mappingOffset + (i * dyld_cache_mapping_info.SIZE)
self.mappings.append(dyld_cache_mapping_info(file, offset))
self.images = []
for i in range(self.header.imagesCount):
offset = self.header.imagesOffset + (i * dyld_cache_image_info.SIZE)
self.images.append(dyld_cache_image_info(file, offset))
pass
def convertAddr(self, vmaddr: int) -> int:
"""Convert a vmaddr to its file offset
Returns:
The file offset, but if not found, `-1`.
"""
for mapping in self.mappings:
lowBound = mapping.address
highBound = mapping.address + mapping.size
if vmaddr >= lowBound and vmaddr < highBound:
mappingOff = vmaddr - lowBound
return mapping.fileOffset + mappingOff
# didn't find the address in any mappings...
return None
def headerContainsField(self, field: str) -> bool:
"""Check to see if the header contains the given field.
Args:
`field`: The name of the field.
Returns:
A bool.
"""
# first check to see if we even have it.
if not hasattr(self.header, field):
return False
fieldOff = getattr(dyld_cache_header, field).offset
mappingOff = self.header.mappingOffset
# The mapping info is directly after the header. We can use this fact
# to determine if the header originally had that field.
if fieldOff < mappingOff:
return True
else:
return False
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.