prompt
stringlengths 15
655k
| completion
stringlengths 3
32.4k
| api
stringlengths 8
52
|
---|---|---|
import hashlib
import math
import numpy as np
import pprint
import pytest
import random
import re
import subprocess
import sys
import tempfile
import json
from catboost import (
CatBoost,
CatBoostClassifier,
CatBoostRegressor,
CatBoostError,
EFstrType,
FeaturesData,
Pool,
cv,
sum_models,
train,)
from catboost.eval.catboost_evaluation import CatboostEvaluation, EvalType
from catboost.utils import eval_metric, create_cd, get_roc_curve, select_threshold
from catboost.utils import DataMetaInfo, TargetStats, compute_training_options
import os.path
from pandas import read_table, DataFrame, Series, Categorical
from six import PY3
from six.moves import xrange
from catboost_pytest_lib import (
DelayedTee,
binary_path,
data_file,
local_canonical_file,
permute_dataset_columns,
remove_time_from_json,
test_output_path,
generate_random_labeled_set
)
if sys.version_info.major == 2:
import cPickle as pickle
else:
import _pickle as pickle
pytest_plugins = "list_plugin",
fails_on_gpu = pytest.mark.fails_on_gpu
EPS = 1e-5
BOOSTING_TYPE = ['Ordered', 'Plain']
OVERFITTING_DETECTOR_TYPE = ['IncToDec', 'Iter']
NONSYMMETRIC = ['Lossguide', 'Depthwise']
TRAIN_FILE = data_file('adult', 'train_small')
TEST_FILE = data_file('adult', 'test_small')
CD_FILE = data_file('adult', 'train.cd')
NAN_TRAIN_FILE = data_file('adult_nan', 'train_small')
NAN_TEST_FILE = data_file('adult_nan', 'test_small')
NAN_CD_FILE = data_file('adult_nan', 'train.cd')
CLOUDNESS_TRAIN_FILE = data_file('cloudness_small', 'train_small')
CLOUDNESS_TEST_FILE = data_file('cloudness_small', 'test_small')
CLOUDNESS_CD_FILE = data_file('cloudness_small', 'train.cd')
QUERYWISE_TRAIN_FILE = data_file('querywise', 'train')
QUERYWISE_TEST_FILE = data_file('querywise', 'test')
QUERYWISE_CD_FILE = data_file('querywise', 'train.cd')
QUERYWISE_CD_FILE_WITH_GROUP_WEIGHT = data_file('querywise', 'train.cd.group_weight')
QUERYWISE_CD_FILE_WITH_GROUP_ID = data_file('querywise', 'train.cd.query_id')
QUERYWISE_CD_FILE_WITH_SUBGROUP_ID = data_file('querywise', 'train.cd.subgroup_id')
QUERYWISE_TRAIN_PAIRS_FILE = data_file('querywise', 'train.pairs')
QUERYWISE_TRAIN_PAIRS_FILE_WITH_PAIR_WEIGHT = data_file('querywise', 'train.pairs.weighted')
QUERYWISE_TEST_PAIRS_FILE = data_file('querywise', 'test.pairs')
AIRLINES_5K_TRAIN_FILE = data_file('airlines_5K', 'train')
AIRLINES_5K_TEST_FILE = data_file('airlines_5K', 'test')
AIRLINES_5K_CD_FILE = data_file('airlines_5K', 'cd')
SMALL_CATEGORIAL_FILE = data_file('small_categorial', 'train')
SMALL_CATEGORIAL_CD_FILE = data_file('small_categorial', 'train.cd')
BLACK_FRIDAY_TRAIN_FILE = data_file('black_friday', 'train')
BLACK_FRIDAY_TEST_FILE = data_file('black_friday', 'test')
BLACK_FRIDAY_CD_FILE = data_file('black_friday', 'cd')
OUTPUT_MODEL_PATH = 'model.bin'
OUTPUT_COREML_MODEL_PATH = 'model.mlmodel'
OUTPUT_CPP_MODEL_PATH = 'model.cpp'
OUTPUT_PYTHON_MODEL_PATH = 'model.py'
OUTPUT_JSON_MODEL_PATH = 'model.json'
OUTPUT_ONNX_MODEL_PATH = 'model.onnx'
PREDS_PATH = 'predictions.npy'
PREDS_TXT_PATH = 'predictions.txt'
FIMP_NPY_PATH = 'feature_importance.npy'
FIMP_TXT_PATH = 'feature_importance.txt'
OIMP_PATH = 'object_importances.txt'
JSON_LOG_PATH = 'catboost_info/catboost_training.json'
TARGET_IDX = 1
CAT_FEATURES = [0, 1, 2, 4, 6, 8, 9, 10, 11, 12, 16]
model_diff_tool = binary_path("catboost/tools/model_comparator/model_comparator")
np.set_printoptions(legacy='1.13')
class LogStdout:
def __init__(self, file):
self.log_file = file
def __enter__(self):
self.saved_stdout = sys.stdout
sys.stdout = self.log_file
return self.saved_stdout
def __exit__(self, exc_type, exc_value, exc_traceback):
sys.stdout = self.saved_stdout
self.log_file.close()
def compare_canonical_models(model, diff_limit=0):
return local_canonical_file(model, diff_tool=[model_diff_tool, '--diff-limit', str(diff_limit)])
def map_cat_features(data, cat_features):
result = []
for i in range(data.shape[0]):
result.append([])
for j in range(data.shape[1]):
result[i].append(str(data[i, j]) if j in cat_features else data[i, j])
return result
def _check_shape(pool, object_count, features_count):
return np.shape(pool.get_features()) == (object_count, features_count)
def _check_data(data1, data2):
return np.all(np.isclose(data1, data2, rtol=0.001, equal_nan=True))
def _count_lines(afile):
with open(afile, 'r') as f:
num_lines = sum(1 for line in f)
return num_lines
def _generate_nontrivial_binary_target(num, seed=20181219, prng=None):
'''
Generate binary vector with non zero variance
:param num:
:return:
'''
if prng is None:
prng = np.random.RandomState(seed=seed)
def gen():
return prng.randint(0, 2, size=num)
if num <= 1:
return gen()
y = gen() # 0/1 labels
while y.min() == y.max():
y = gen()
return y
def _generate_random_target(num, seed=20181219, prng=None):
if prng is None:
prng = np.random.RandomState(seed=seed)
return prng.random_sample((num,))
def set_random_weight(pool, seed=20181219, prng=None):
if prng is None:
prng = np.random.RandomState(seed=seed)
pool.set_weight(prng.random_sample(pool.num_row()))
if pool.num_pairs() > 0:
pool.set_pairs_weight(prng.random_sample(pool.num_pairs()))
def verify_finite(result):
inf = float('inf')
for r in result:
assert(r == r)
assert(abs(r) < inf)
def append_param(metric_name, param):
return metric_name + (':' if ':' not in metric_name else ';') + param
# returns (features DataFrame, cat_feature_indices)
def load_pool_features_as_df(pool_file, cd_file, target_idx):
data = read_table(pool_file, header=None, dtype=str)
data.drop([target_idx], axis=1, inplace=True)
return (data, Pool(pool_file, column_description=cd_file).get_cat_feature_indices())
# Test cases begin here ########################################################
def test_load_file():
assert _check_shape(Pool(TRAIN_FILE, column_description=CD_FILE), 101, 17)
def test_load_list():
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
cat_features = pool.get_cat_feature_indices()
data = map_cat_features(pool.get_features(), cat_features)
label = pool.get_label()
assert _check_shape(Pool(data, label, cat_features), 101, 17)
def test_load_ndarray():
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
cat_features = pool.get_cat_feature_indices()
data = np.array(map_cat_features(pool.get_features(), cat_features))
label = np.array(pool.get_label())
assert _check_shape(Pool(data, label, cat_features), 101, 17)
@pytest.mark.parametrize('dataset', ['adult', 'adult_nan', 'querywise'])
def test_load_df_vs_load_from_file(dataset):
train_file, cd_file, target_idx, other_non_feature_columns = {
'adult': (TRAIN_FILE, CD_FILE, TARGET_IDX, []),
'adult_nan': (NAN_TRAIN_FILE, NAN_CD_FILE, TARGET_IDX, []),
'querywise': (QUERYWISE_TRAIN_FILE, QUERYWISE_CD_FILE, 2, [0, 1, 3, 4])
}[dataset]
pool1 = Pool(train_file, column_description=cd_file)
data = read_table(train_file, header=None)
labels = DataFrame(data.iloc[:, target_idx], dtype=np.float32)
data.drop([target_idx] + other_non_feature_columns, axis=1, inplace=True)
cat_features = pool1.get_cat_feature_indices()
pool2 = Pool(data, labels, cat_features)
assert _check_data(pool1.get_features(), pool2.get_features())
assert _check_data([float(label) for label in pool1.get_label()], pool2.get_label())
def test_load_series():
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
data = read_table(TRAIN_FILE, header=None)
labels = Series(data.iloc[:, TARGET_IDX])
data.drop([TARGET_IDX], axis=1, inplace=True)
data = Series(list(data.values))
cat_features = pool.get_cat_feature_indices()
pool2 = Pool(data, labels, cat_features)
assert _check_data(pool.get_features(), pool2.get_features())
assert [int(label) for label in pool.get_label()] == pool2.get_label()
def test_pool_cat_features():
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
assert np.all(pool.get_cat_feature_indices() == CAT_FEATURES)
def test_pool_cat_features_as_strings():
df = DataFrame(data=[[1, 2], [3, 4]], columns=['col1', 'col2'])
pool = Pool(df, cat_features=['col2'])
assert np.all(pool.get_cat_feature_indices() == [1])
data = [[1, 2, 3], [4, 5, 6]]
pool = Pool(data, feature_names=['col1', 'col2', 'col3'], cat_features=['col2', 'col3'])
assert np.all(pool.get_cat_feature_indices() == [1, 2])
data = [[1, 2, 3], [4, 5, 6]]
with pytest.raises(CatBoostError):
Pool(data, cat_features=['col2', 'col3'])
def test_load_generated():
pool_size = (100, 10)
prng = np.random.RandomState(seed=20181219)
data = np.round(prng.normal(size=pool_size), decimals=3)
label = _generate_nontrivial_binary_target(pool_size[0], prng=prng)
pool = Pool(data, label)
assert _check_data(pool.get_features(), data)
assert _check_data(pool.get_label(), label)
def test_load_dumps():
pool_size = (100, 10)
prng = np.random.RandomState(seed=20181219)
data = prng.randint(10, size=pool_size)
labels = _generate_nontrivial_binary_target(pool_size[0], prng=prng)
pool1 = Pool(data, labels)
lines = []
for i in range(len(data)):
line = [str(labels[i])] + [str(x) for x in data[i]]
lines.append('\t'.join(line))
text = '\n'.join(lines)
with open('test_data_dumps', 'w') as f:
f.write(text)
pool2 = Pool('test_data_dumps')
assert _check_data(pool1.get_features(), pool2.get_features())
assert pool1.get_label() == [int(label) for label in pool2.get_label()]
def test_dataframe_with_pandas_categorical_columns():
df = DataFrame()
df['num_feat_0'] = [0, 1, 0, 2, 3, 1, 2]
df['num_feat_1'] = [0.12, 0.8, 0.33, 0.11, 0.0, 1.0, 0.0]
df['cat_feat_2'] = Series(['A', 'B', 'A', 'C', 'A', 'A', 'A'], dtype='category')
df['cat_feat_3'] = Series(['x', 'x', 'y', 'y', 'y', 'x', 'x'])
df['cat_feat_4'] = Categorical(
['large', 'small', 'medium', 'large', 'small', 'small', 'medium'],
categories=['small', 'medium', 'large'],
ordered=True
)
df['cat_feat_5'] = [0, 1, 0, 2, 3, 1, 2]
labels = [0, 1, 1, 0, 1, 0, 1]
model = CatBoostClassifier(iterations=2)
model.fit(X=df, y=labels, cat_features=[2, 3, 4, 5])
pred = model.predict(df)
preds_path = test_output_path(PREDS_TXT_PATH)
np.savetxt(preds_path, np.array(pred), fmt='%.8f')
return local_canonical_file(preds_path)
# feature_matrix is (doc_count x feature_count)
def get_features_data_from_matrix(feature_matrix, cat_feature_indices, order='C'):
object_count = len(feature_matrix)
feature_count = len(feature_matrix[0])
cat_feature_count = len(cat_feature_indices)
num_feature_count = feature_count - cat_feature_count
result_num = np.empty((object_count, num_feature_count), dtype=np.float32, order=order)
result_cat = np.empty((object_count, cat_feature_count), dtype=object, order=order)
for object_idx in xrange(object_count):
num_feature_idx = 0
cat_feature_idx = 0
for feature_idx in xrange(len(feature_matrix[object_idx])):
if (cat_feature_idx < cat_feature_count) and (cat_feature_indices[cat_feature_idx] == feature_idx):
# simplified handling of transformation to bytes for tests
result_cat[object_idx, cat_feature_idx] = (
feature_matrix[object_idx, feature_idx]
if isinstance(feature_matrix[object_idx, feature_idx], bytes)
else str(feature_matrix[object_idx, feature_idx]).encode('utf-8')
)
cat_feature_idx += 1
else:
result_num[object_idx, num_feature_idx] = float(feature_matrix[object_idx, feature_idx])
num_feature_idx += 1
return FeaturesData(num_feature_data=result_num, cat_feature_data=result_cat)
def get_features_data_from_file(data_file, drop_columns, cat_feature_indices, order='C'):
data_matrix_from_file = read_table(data_file, header=None, dtype=str)
data_matrix_from_file.drop(drop_columns, axis=1, inplace=True)
return get_features_data_from_matrix(np.array(data_matrix_from_file), cat_feature_indices, order)
def compare_flat_index_and_features_data_pools(flat_index_pool, features_data_pool):
assert flat_index_pool.shape == features_data_pool.shape
cat_feature_indices = flat_index_pool.get_cat_feature_indices()
num_feature_count = flat_index_pool.shape[1] - len(cat_feature_indices)
flat_index_pool_features = flat_index_pool.get_features()
features_data_pool_features = features_data_pool.get_features()
for object_idx in xrange(flat_index_pool.shape[0]):
num_feature_idx = 0
cat_feature_idx = 0
for flat_feature_idx in xrange(flat_index_pool.shape[1]):
if (
(cat_feature_idx < len(cat_feature_indices))
and (cat_feature_indices[cat_feature_idx] == flat_feature_idx)
):
# simplified handling of transformation to bytes for tests
assert (flat_index_pool_features[object_idx][flat_feature_idx] ==
features_data_pool_features[object_idx][num_feature_count + cat_feature_idx])
cat_feature_idx += 1
else:
assert np.isclose(
flat_index_pool_features[object_idx][flat_feature_idx],
features_data_pool_features[object_idx][num_feature_idx],
rtol=0.001,
equal_nan=True
)
num_feature_idx += 1
@pytest.mark.parametrize('order', ['C', 'F'], ids=['order=C', 'order=F'])
def test_from_features_data_vs_load_from_files(order):
pool_from_files = Pool(TRAIN_FILE, column_description=CD_FILE)
features_data = get_features_data_from_file(
data_file=TRAIN_FILE,
drop_columns=[TARGET_IDX],
cat_feature_indices=pool_from_files.get_cat_feature_indices(),
order=order
)
pool_from_features_data = Pool(data=features_data)
compare_flat_index_and_features_data_pools(pool_from_files, pool_from_features_data)
def test_features_data_with_empty_objects():
fd = FeaturesData(
cat_feature_data=np.empty((0, 4), dtype=object)
)
assert fd.get_object_count() == 0
assert fd.get_feature_count() == 4
assert fd.get_num_feature_count() == 0
assert fd.get_cat_feature_count() == 4
assert fd.get_feature_names() == [''] * 4
fd = FeaturesData(
num_feature_data=np.empty((0, 2), dtype=np.float32),
num_feature_names=['f0', 'f1']
)
assert fd.get_object_count() == 0
assert fd.get_feature_count() == 2
assert fd.get_num_feature_count() == 2
assert fd.get_cat_feature_count() == 0
assert fd.get_feature_names() == ['f0', 'f1']
fd = FeaturesData(
cat_feature_data=np.empty((0, 2), dtype=object),
num_feature_data=np.empty((0, 3), dtype=np.float32)
)
assert fd.get_object_count() == 0
assert fd.get_feature_count() == 5
assert fd.get_num_feature_count() == 3
assert fd.get_cat_feature_count() == 2
assert fd.get_feature_names() == [''] * 5
def test_features_data_names():
# empty specification of names
fd = FeaturesData(
cat_feature_data=np.array([[b'amazon', b'bing'], [b'ebay', b'google']], dtype=object),
num_feature_data=np.array([[1.0, 2.0, 3.0], [22.0, 7.1, 10.2]], dtype=np.float32),
)
assert fd.get_feature_names() == [''] * 5
# full specification of names
fd = FeaturesData(
cat_feature_data=np.array([[b'amazon', b'bing'], [b'ebay', b'google']], dtype=object),
cat_feature_names=['shop', 'search'],
num_feature_data=np.array([[1.0, 2.0, 3.0], [22.0, 7.1, 10.2]], dtype=np.float32),
num_feature_names=['weight', 'price', 'volume']
)
assert fd.get_feature_names() == ['weight', 'price', 'volume', 'shop', 'search']
# partial specification of names
fd = FeaturesData(
cat_feature_data=np.array([[b'amazon', b'bing'], [b'ebay', b'google']], dtype=object),
num_feature_data=np.array([[1.0, 2.0, 3.0], [22.0, 7.1, 10.2]], dtype=np.float32),
num_feature_names=['weight', 'price', 'volume']
)
assert fd.get_feature_names() == ['weight', 'price', 'volume', '', '']
# partial specification of names
fd = FeaturesData(
cat_feature_data=np.array([[b'amazon', b'bing'], [b'ebay', b'google']], dtype=object),
cat_feature_names=['shop', 'search'],
num_feature_data=np.array([[1.0, 2.0, 3.0], [22.0, 7.1, 10.2]], dtype=np.float32),
)
assert fd.get_feature_names() == ['', '', '', 'shop', 'search']
def compare_pools_from_features_data_and_generic_matrix(
features_data,
generic_matrix,
cat_features_indices,
feature_names=None
):
pool1 = Pool(data=features_data)
pool2 = Pool(data=generic_matrix, cat_features=cat_features_indices, feature_names=feature_names)
assert _check_data(pool1.get_features(), pool2.get_features())
assert pool1.get_cat_feature_indices() == pool2.get_cat_feature_indices()
assert pool1.get_feature_names() == pool2.get_feature_names()
@pytest.mark.parametrize('order', ['C', 'F'], ids=['order=C', 'order=F'])
def test_features_data_good(order):
# 0 objects
compare_pools_from_features_data_and_generic_matrix(
FeaturesData(cat_feature_data=np.empty((0, 4), dtype=object, order=order)),
np.empty((0, 4), dtype=object),
cat_features_indices=[0, 1, 2, 3]
)
# 0 objects
compare_pools_from_features_data_and_generic_matrix(
FeaturesData(
cat_feature_data=np.empty((0, 2), dtype=object, order=order),
cat_feature_names=['cat0', 'cat1'],
num_feature_data=np.empty((0, 3), dtype=np.float32, order=order),
),
np.empty((0, 5), dtype=object),
cat_features_indices=[3, 4],
feature_names=['', '', '', 'cat0', 'cat1']
)
compare_pools_from_features_data_and_generic_matrix(
FeaturesData(
cat_feature_data=np.array([[b'amazon', b'bing'], [b'ebay', b'google']], dtype=object, order=order)
),
[[b'amazon', b'bing'], [b'ebay', b'google']],
cat_features_indices=[0, 1]
)
compare_pools_from_features_data_and_generic_matrix(
FeaturesData(
num_feature_data=np.array([[1.0, 2.0, 3.0], [22.0, 7.1, 10.2]], dtype=np.float32, order=order)
),
[[1.0, 2.0, 3.0], [22.0, 7.1, 10.2]],
cat_features_indices=[]
)
compare_pools_from_features_data_and_generic_matrix(
FeaturesData(
cat_feature_data=np.array([[b'amazon', b'bing'], [b'ebay', b'google']], dtype=object, order=order),
num_feature_data=np.array([[1.0, 2.0, 3.0], [22.0, 7.1, 10.2]], dtype=np.float32, order=order)
),
[[1.0, 2.0, 3.0, b'amazon', b'bing'], [22.0, 7.1, 10.2, b'ebay', b'google']],
cat_features_indices=[3, 4]
)
compare_pools_from_features_data_and_generic_matrix(
FeaturesData(
cat_feature_data=np.array([[b'amazon', b'bing'], [b'ebay', b'google']], dtype=object, order=order),
cat_feature_names=['shop', 'search']
),
[[b'amazon', b'bing'], [b'ebay', b'google']],
cat_features_indices=[0, 1],
feature_names=['shop', 'search']
)
compare_pools_from_features_data_and_generic_matrix(
FeaturesData(
num_feature_data=np.array([[1.0, 2.0, 3.0], [22.0, 7.1, 10.2]], dtype=np.float32, order=order),
num_feature_names=['weight', 'price', 'volume']
),
[[1.0, 2.0, 3.0], [22.0, 7.1, 10.2]],
cat_features_indices=[],
feature_names=['weight', 'price', 'volume']
)
compare_pools_from_features_data_and_generic_matrix(
FeaturesData(
cat_feature_data=np.array([[b'amazon', b'bing'], [b'ebay', b'google']], dtype=object, order=order),
cat_feature_names=['shop', 'search'],
num_feature_data=np.array([[1.0, 2.0, 3.0], [22.0, 7.1, 10.2]], dtype=np.float32, order=order),
num_feature_names=['weight', 'price', 'volume']
),
[[1.0, 2.0, 3.0, b'amazon', b'bing'], [22.0, 7.1, 10.2, b'ebay', b'google']],
cat_features_indices=[3, 4],
feature_names=['weight', 'price', 'volume', 'shop', 'search']
)
def test_features_data_bad():
# empty
with pytest.raises(CatBoostError):
FeaturesData()
# names w/o data
with pytest.raises(CatBoostError):
FeaturesData(cat_feature_data=[[b'amazon', b'bing']], num_feature_names=['price'])
# bad matrix type
with pytest.raises(CatBoostError):
FeaturesData(
cat_feature_data=[[b'amazon', b'bing']],
num_feature_data=np.array([1.0, 2.0, 3.0], dtype=np.float32)
)
# bad matrix shape
with pytest.raises(CatBoostError):
FeaturesData(num_feature_data=np.array([[[1.0], [2.0], [3.0]]], dtype=np.float32))
# bad element type
with pytest.raises(CatBoostError):
FeaturesData(
cat_feature_data=np.array([b'amazon', b'bing'], dtype=object),
num_feature_data=np.array([1.0, 2.0, 3.0], dtype=np.float64)
)
# bad element type
with pytest.raises(CatBoostError):
FeaturesData(cat_feature_data=np.array(['amazon', 'bing']))
# bad names type
with pytest.raises(CatBoostError):
FeaturesData(
cat_feature_data=np.array([[b'google'], [b'reddit']], dtype=object),
cat_feature_names=[None, 'news_aggregator']
)
# bad names length
with pytest.raises(CatBoostError):
FeaturesData(
cat_feature_data=np.array([[b'google'], [b'bing']], dtype=object),
cat_feature_names=['search_engine', 'news_aggregator']
)
# no features
with pytest.raises(CatBoostError):
FeaturesData(
cat_feature_data=np.array([[], [], []], dtype=object),
num_feature_data=np.array([[], [], []], dtype=np.float32)
)
# number of objects is different
with pytest.raises(CatBoostError):
FeaturesData(
cat_feature_data=np.array([[b'google'], [b'bing']], dtype=object),
num_feature_data=np.array([1.0, 2.0, 3.0], dtype=np.float32)
)
def test_predict_regress(task_type):
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoost({'iterations': 2, 'loss_function': 'RMSE', 'task_type': task_type, 'devices': '0'})
model.fit(train_pool)
assert(model.is_fitted())
output_model_path = test_output_path(OUTPUT_MODEL_PATH)
model.save_model(output_model_path)
return compare_canonical_models(output_model_path)
def test_predict_sklearn_regress(task_type):
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoostRegressor(iterations=2, learning_rate=0.03, task_type=task_type, devices='0')
model.fit(train_pool)
assert(model.is_fitted())
output_model_path = test_output_path(OUTPUT_MODEL_PATH)
model.save_model(output_model_path)
return compare_canonical_models(output_model_path)
def test_predict_sklearn_class(task_type):
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=2, learning_rate=0.03, loss_function='Logloss:border=0.5', task_type=task_type, devices='0')
model.fit(train_pool)
assert(model.is_fitted())
output_model_path = test_output_path(OUTPUT_MODEL_PATH)
model.save_model(output_model_path)
return compare_canonical_models(output_model_path)
def test_predict_class_raw(task_type):
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
test_pool = Pool(TEST_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=2, task_type=task_type, devices='0')
model.fit(train_pool)
pred = model.predict(test_pool)
preds_path = test_output_path(PREDS_PATH)
np.save(preds_path, np.array(pred))
return local_canonical_file(preds_path)
def test_raw_predict_equals_to_model_predict(task_type):
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
test_pool = Pool(TEST_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=10, task_type=task_type, devices='0')
model.fit(train_pool, eval_set=test_pool)
assert(model.is_fitted())
pred = model.predict(test_pool, prediction_type='RawFormulaVal')
assert np.all(np.isclose(model.get_test_eval(), pred, rtol=1.e-6))
@pytest.mark.parametrize('problem', ['Classifier', 'Regressor'])
def test_predict_and_predict_proba_on_single_object(problem):
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
if problem == 'Classifier':
model = CatBoostClassifier(iterations=2)
else:
model = CatBoostRegressor(iterations=2)
model.fit(train_pool)
test_data = read_table(TEST_FILE, header=None)
test_data.drop([TARGET_IDX], axis=1, inplace=True)
pred = model.predict(test_data)
if problem == 'Classifier':
pred_probabilities = model.predict_proba(test_data)
random.seed(0)
for i in xrange(3): # just some indices
test_object_idx = random.randrange(test_data.shape[0])
assert pred[test_object_idx] == model.predict(test_data.values[test_object_idx])
if problem == 'Classifier':
assert np.array_equal(pred_probabilities[test_object_idx], model.predict_proba(test_data.values[test_object_idx]))
def test_model_pickling(task_type):
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
test_pool = Pool(TEST_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=10, task_type=task_type, devices='0')
model.fit(train_pool, eval_set=test_pool)
pred = model.predict(test_pool, prediction_type='RawFormulaVal')
model_unpickled = pickle.loads(pickle.dumps(model))
pred_new = model_unpickled.predict(test_pool, prediction_type='RawFormulaVal')
assert all(pred_new == pred)
def test_fit_from_file(task_type):
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoost({'iterations': 2, 'loss_function': 'RMSE', 'task_type': task_type, 'devices': '0'})
model.fit(train_pool)
predictions1 = model.predict(train_pool)
model.fit(TRAIN_FILE, column_description=CD_FILE)
predictions2 = model.predict(train_pool)
assert all(predictions1 == predictions2)
assert 'train_finish_time' in model.get_metadata()
@fails_on_gpu(how='assert 0.019921323750168085 < EPS, where 0.019921323750168085 = abs((0.03378972364589572 - 0.053711047396063805))')
@pytest.mark.parametrize('order', ['C', 'F'], ids=['order=C', 'order=F'])
def test_fit_from_features_data(order, task_type):
pool_from_files = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoost({'iterations': 2, 'loss_function': 'RMSE', 'task_type': task_type, 'devices': '0'})
model.fit(pool_from_files)
assert(model.is_fitted())
predictions_from_files = model.predict(pool_from_files)
features_data = get_features_data_from_file(
data_file=TRAIN_FILE,
drop_columns=[TARGET_IDX],
cat_feature_indices=pool_from_files.get_cat_feature_indices(),
order=order
)
model.fit(X=features_data, y=pool_from_files.get_label())
predictions_from_features_data = model.predict(Pool(features_data))
for prediction1, prediction2 in zip(predictions_from_files, predictions_from_features_data):
assert abs(prediction1 - prediction2) < EPS
def test_fit_from_empty_features_data(task_type):
model = CatBoost({'iterations': 2, 'loss_function': 'RMSE', 'task_type': task_type, 'devices': '0'})
with pytest.raises(CatBoostError):
model.fit(
X=FeaturesData(num_feature_data=np.empty((0, 2), dtype=np.float32)),
y=np.empty((0), dtype=np.int32)
)
def test_coreml_import_export(task_type):
train_pool = Pool(QUERYWISE_TRAIN_FILE, column_description=QUERYWISE_CD_FILE)
test_pool = Pool(QUERYWISE_TEST_FILE, column_description=QUERYWISE_CD_FILE)
model = CatBoost(params={'loss_function': 'RMSE', 'iterations': 20, 'thread_count': 8, 'task_type': task_type, 'devices': '0'})
model.fit(train_pool)
output_coreml_model_path = test_output_path(OUTPUT_COREML_MODEL_PATH)
model.save_model(output_coreml_model_path, format="coreml")
canon_pred = model.predict(test_pool)
coreml_loaded_model = CatBoostRegressor()
coreml_loaded_model.load_model(output_coreml_model_path, format="coreml")
assert all(canon_pred == coreml_loaded_model.predict(test_pool))
return compare_canonical_models(output_coreml_model_path)
def test_coreml_import_export_one_hot_features(task_type):
train_pool = Pool(SMALL_CATEGORIAL_FILE, column_description=SMALL_CATEGORIAL_CD_FILE)
model = CatBoost(params={'loss_function': 'RMSE', 'iterations': 2, 'task_type': task_type, 'devices': '0', 'one_hot_max_size': 4})
model.fit(train_pool)
output_coreml_model_path = test_output_path(OUTPUT_COREML_MODEL_PATH)
model.save_model(output_coreml_model_path, format="coreml", pool=train_pool)
pred = model.predict(train_pool)
coreml_loaded_model = CatBoostRegressor()
coreml_loaded_model.load_model(output_coreml_model_path, format="coreml")
assert all(pred == coreml_loaded_model.predict(train_pool))
return compare_canonical_models(output_coreml_model_path)
@pytest.mark.parametrize('pool', ['adult', 'higgs'])
def test_convert_model_to_json(task_type, pool):
train_pool = Pool(data_file(pool, 'train_small'), column_description=data_file(pool, 'train.cd'))
test_pool = Pool(data_file(pool, 'test_small'), column_description=data_file(pool, 'train.cd'))
converted_model_path = test_output_path("converted_model.bin")
model = CatBoost({'iterations': 20, 'task_type': task_type, 'devices': '0'})
model.fit(train_pool)
output_model_path = test_output_path(OUTPUT_MODEL_PATH)
output_json_model_path = test_output_path(OUTPUT_JSON_MODEL_PATH)
model.save_model(output_model_path)
model.save_model(output_json_model_path, format="json")
model2 = CatBoost()
model2.load_model(output_json_model_path, format="json")
model2.save_model(converted_model_path)
pred1 = model.predict(test_pool)
pred2 = model2.predict(test_pool)
assert _check_data(pred1, pred2)
subprocess.check_call((model_diff_tool, output_model_path, converted_model_path, '--diff-limit', '0.000001'))
return compare_canonical_models(converted_model_path)
def test_coreml_cbm_import_export(task_type):
train_pool = Pool(QUERYWISE_TRAIN_FILE, column_description=QUERYWISE_CD_FILE)
test_pool = Pool(QUERYWISE_TEST_FILE, column_description=QUERYWISE_CD_FILE)
model = CatBoost(params={'loss_function': 'RMSE', 'iterations': 20, 'thread_count': 8, 'task_type': task_type, 'devices': '0'})
model.fit(train_pool)
canon_pred = model.predict(test_pool)
output_coreml_model_path = test_output_path(OUTPUT_COREML_MODEL_PATH)
model.save_model(output_coreml_model_path, format="coreml")
coreml_loaded_model = CatBoost()
coreml_loaded_model.load_model(output_coreml_model_path, format="coreml")
output_model_path = test_output_path(OUTPUT_MODEL_PATH)
coreml_loaded_model.save_model(output_model_path)
cbm_loaded_model = CatBoost()
cbm_loaded_model.load_model(output_model_path)
assert all(canon_pred == cbm_loaded_model.predict(test_pool))
return compare_canonical_models(output_coreml_model_path)
def test_cpp_export_no_cat_features(task_type):
train_pool = Pool(QUERYWISE_TRAIN_FILE, column_description=QUERYWISE_CD_FILE)
model = CatBoost({'iterations': 2, 'loss_function': 'RMSE', 'task_type': task_type, 'devices': '0'})
model.fit(train_pool)
output_cpp_model_path = test_output_path(OUTPUT_CPP_MODEL_PATH)
model.save_model(output_cpp_model_path, format="cpp")
return local_canonical_file(output_cpp_model_path)
def test_cpp_export_with_cat_features(task_type):
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoost({'iterations': 20, 'task_type': task_type, 'devices': '0'})
model.fit(train_pool)
output_cpp_model_path = test_output_path(OUTPUT_CPP_MODEL_PATH)
model.save_model(output_cpp_model_path, format="cpp")
return local_canonical_file(output_cpp_model_path)
@pytest.mark.parametrize('iterations', [2, 40])
def test_export_to_python_no_cat_features(task_type, iterations):
train_pool = Pool(QUERYWISE_TRAIN_FILE, column_description=QUERYWISE_CD_FILE)
model = CatBoost({'iterations': iterations, 'loss_function': 'RMSE', 'task_type': task_type, 'devices': '0'})
model.fit(train_pool)
output_python_model_path = test_output_path(OUTPUT_PYTHON_MODEL_PATH)
model.save_model(output_python_model_path, format="python")
return local_canonical_file(output_python_model_path)
@pytest.mark.parametrize('iterations', [2, 40])
def test_export_to_python_with_cat_features(task_type, iterations):
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoost({'iterations': iterations, 'task_type': task_type, 'devices': '0'})
model.fit(train_pool)
output_python_model_path = test_output_path(OUTPUT_PYTHON_MODEL_PATH)
model.save_model(output_python_model_path, format="python", pool=train_pool)
return local_canonical_file(output_python_model_path)
def test_export_to_python_with_cat_features_from_pandas(task_type):
model = CatBoost({'iterations': 5, 'task_type': task_type, 'devices': '0'})
X = DataFrame([[1, 2], [3, 4]], columns=['Num', 'Categ'])
y = [1, 0]
cat_features = [1]
model.fit(X, y, cat_features)
output_python_model_path = test_output_path(OUTPUT_PYTHON_MODEL_PATH)
model.save_model(output_python_model_path, format="python", pool=X)
return local_canonical_file(output_python_model_path)
@pytest.mark.parametrize('problem_type', ['binclass', 'multiclass', 'regression'])
def test_onnx_export(problem_type):
if problem_type == 'binclass':
loss_function = 'Logloss'
train_path = TRAIN_FILE
cd_path = CD_FILE
elif problem_type == 'multiclass':
loss_function = 'MultiClass'
train_path = CLOUDNESS_TRAIN_FILE
cd_path = CLOUDNESS_CD_FILE
elif problem_type == 'regression':
loss_function = 'RMSE'
train_path = TRAIN_FILE
cd_path = CD_FILE
else:
raise Exception('Unsupported problem_type: %s' % problem_type)
train_pool = Pool(train_path, column_description=cd_path)
model = CatBoost(
{
'task_type': 'CPU', # TODO(akhropov): GPU results are unstable, difficult to compare models
'loss_function': loss_function,
'iterations': 5,
'depth': 4,
# onnx format export does not yet support categorical features so ignore them
'ignored_features': train_pool.get_cat_feature_indices()
}
)
model.fit(train_pool)
output_onnx_model_path = test_output_path(OUTPUT_ONNX_MODEL_PATH)
model.save_model(
output_onnx_model_path,
format="onnx",
export_parameters={
'onnx_domain': 'ai.catboost',
'onnx_model_version': 1,
'onnx_doc_string': 'test model for problem_type %s' % problem_type,
'onnx_graph_name': 'CatBoostModel_for_%s' % problem_type
}
)
return compare_canonical_models(output_onnx_model_path)
def test_predict_class(task_type):
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
test_pool = Pool(TEST_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=2, learning_rate=0.03, task_type=task_type, devices='0')
model.fit(train_pool)
pred = model.predict(test_pool, prediction_type="Class")
preds_path = test_output_path(PREDS_PATH)
np.save(preds_path, np.array(pred))
return local_canonical_file(preds_path)
def test_zero_learning_rate(task_type):
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=2, learning_rate=0, task_type=task_type, devices='0')
with pytest.raises(CatBoostError):
model.fit(train_pool)
def test_predict_class_proba(task_type):
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
test_pool = Pool(TEST_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=2, learning_rate=0.03, task_type=task_type, devices='0')
model.fit(train_pool)
pred = model.predict_proba(test_pool)
preds_path = test_output_path(PREDS_PATH)
np.save(preds_path, np.array(pred))
return local_canonical_file(preds_path)
@fails_on_gpu(how='assert 0.031045619651137835 < EPS, where 0.031045619651137835 = <function amax at ...')
@pytest.mark.parametrize('function_name', ['predict', 'predict_proba'])
def test_predict_funcs_from_features_data(function_name, task_type):
function = getattr(CatBoostClassifier, function_name)
train_pool_from_files = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=10, learning_rate=0.03, task_type=task_type, devices='0')
model.fit(train_pool_from_files)
test_pool_from_files = Pool(TEST_FILE, column_description=CD_FILE)
predictions_from_files = function(model, test_pool_from_files)
train_features_data, test_features_data = [
get_features_data_from_file(
data_file=data_file,
drop_columns=[TARGET_IDX],
cat_feature_indices=train_pool_from_files.get_cat_feature_indices()
)
for data_file in [TRAIN_FILE, TEST_FILE]
]
model.fit(X=train_features_data, y=train_pool_from_files.get_label())
predictions_from_features_data = function(model, test_features_data)
for prediction1, prediction2 in zip(predictions_from_files, predictions_from_features_data):
assert np.max(np.abs(prediction1 - prediction2)) < EPS
# empty
empty_test_features_data = FeaturesData(
num_feature_data=np.empty((0, test_features_data.get_num_feature_count()), dtype=np.float32),
cat_feature_data=np.empty((0, test_features_data.get_cat_feature_count()), dtype=object)
)
empty_predictions = function(model, empty_test_features_data)
assert len(empty_predictions) == 0
def test_no_cat_in_predict(task_type):
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
test_pool = Pool(TEST_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=2, learning_rate=0.03, task_type=task_type, devices='0')
model.fit(train_pool)
pred1 = model.predict(map_cat_features(test_pool.get_features(), train_pool.get_cat_feature_indices()))
pred2 = model.predict(Pool(map_cat_features(test_pool.get_features(), train_pool.get_cat_feature_indices()), cat_features=train_pool.get_cat_feature_indices()))
assert _check_data(pred1, pred2)
def test_save_model(task_type):
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
test_pool = Pool(TEST_FILE, column_description=CD_FILE)
model = CatBoost({'task_type': task_type, 'devices': '0'})
model.fit(train_pool)
output_model_path = test_output_path(OUTPUT_MODEL_PATH)
model.save_model(output_model_path)
model2 = CatBoost()
model2.load_model(output_model_path)
pred1 = model.predict(test_pool)
pred2 = model2.predict(test_pool)
assert _check_data(pred1, pred2)
def test_multiclass(task_type):
pool = Pool(CLOUDNESS_TRAIN_FILE, column_description=CLOUDNESS_CD_FILE)
classifier = CatBoostClassifier(iterations=2, loss_function='MultiClass', thread_count=8, task_type=task_type, devices='0')
classifier.fit(pool)
output_model_path = test_output_path(OUTPUT_MODEL_PATH)
classifier.save_model(output_model_path)
new_classifier = CatBoostClassifier()
new_classifier.load_model(output_model_path)
pred = new_classifier.predict_proba(pool)
preds_path = test_output_path(PREDS_PATH)
np.save(preds_path, np.array(pred))
return local_canonical_file(preds_path)
def test_multiclass_classes_count_missed_classes(task_type):
prng = np.random.RandomState(seed=0)
pool = Pool(prng.random_sample(size=(100, 10)), label=prng.choice([1, 3], size=100))
classifier = CatBoostClassifier(classes_count=4, iterations=2, loss_function='MultiClass', thread_count=8, task_type=task_type, devices='0')
classifier.fit(pool)
output_model_path = test_output_path(OUTPUT_MODEL_PATH)
classifier.save_model(output_model_path)
new_classifier = CatBoostClassifier()
new_classifier.load_model(output_model_path)
pred = new_classifier.predict_proba(pool)
classes = new_classifier.predict(pool)
assert pred.shape == (100, 4)
assert np.array(classes).all() in [1, 3]
preds_path = test_output_path(PREDS_PATH)
np.save(preds_path, np.array(pred))
return local_canonical_file(preds_path)
@pytest.mark.parametrize('label_type', ['string', 'int'])
def test_multiclass_custom_class_labels(label_type, task_type):
if label_type == 'int':
train_labels = [1, 2]
elif label_type == 'string':
train_labels = ['Class1', 'Class2']
prng = np.random.RandomState(seed=0)
train_pool = Pool(prng.random_sample(size=(100, 10)), label=prng.choice(train_labels, size=100))
test_pool = Pool(prng.random_sample(size=(50, 10)))
classifier = CatBoostClassifier(iterations=2, loss_function='MultiClass', thread_count=8, task_type=task_type, devices='0')
classifier.fit(train_pool)
output_model_path = test_output_path(OUTPUT_MODEL_PATH)
classifier.save_model(output_model_path)
new_classifier = CatBoostClassifier()
new_classifier.load_model(output_model_path)
pred = new_classifier.predict_proba(test_pool)
classes = new_classifier.predict(test_pool)
assert pred.shape == (50, 2)
assert all(((class1 in train_labels) for class1 in classes))
preds_path = test_output_path(PREDS_TXT_PATH)
np.savetxt(preds_path, np.array(pred), fmt='%.8f')
return local_canonical_file(preds_path)
def test_multiclass_custom_class_labels_from_files(task_type):
labels = ['a', 'b', 'c', 'd']
cd_path = test_output_path('cd.txt')
np.savetxt(cd_path, [[0, 'Target']], fmt='%s', delimiter='\t')
prng = np.random.RandomState(seed=0)
train_path = test_output_path('train.txt')
np.savetxt(train_path, generate_random_labeled_set(100, 10, labels, prng=prng), fmt='%s', delimiter='\t')
test_path = test_output_path('test.txt')
np.savetxt(test_path, generate_random_labeled_set(25, 10, labels, prng=prng), fmt='%s', delimiter='\t')
train_pool = Pool(train_path, column_description=cd_path)
test_pool = Pool(test_path, column_description=cd_path)
classifier = CatBoostClassifier(iterations=2, loss_function='MultiClass', thread_count=8, task_type=task_type, devices='0')
classifier.fit(train_pool)
output_model_path = test_output_path(OUTPUT_MODEL_PATH)
classifier.save_model(output_model_path)
new_classifier = CatBoostClassifier()
new_classifier.load_model(output_model_path)
pred = new_classifier.predict_proba(test_pool)
classes = new_classifier.predict(test_pool)
assert pred.shape == (25, 4)
assert all(((class1 in labels) for class1 in classes))
preds_path = test_output_path(PREDS_TXT_PATH)
np.savetxt(preds_path, np.array(pred), fmt='%.8f')
return local_canonical_file(preds_path)
def test_class_names(task_type):
class_names = ['Small', 'Medium', 'Large']
prng = np.random.RandomState(seed=0)
train_pool = Pool(prng.random_sample(size=(100, 10)), label=prng.choice(class_names, size=100))
test_pool = Pool(prng.random_sample(size=(25, 10)))
classifier = CatBoostClassifier(
iterations=2,
loss_function='MultiClass',
class_names=class_names,
thread_count=8,
task_type=task_type,
devices='0'
)
classifier.fit(train_pool)
output_model_path = test_output_path(OUTPUT_MODEL_PATH)
classifier.save_model(output_model_path)
new_classifier = CatBoostClassifier()
new_classifier.load_model(output_model_path)
pred = new_classifier.predict_proba(test_pool)
classes = new_classifier.predict(test_pool)
assert pred.shape == (25, 3)
assert all(((class1 in class_names) for class1 in classes))
assert sorted(classifier.classes_) == sorted(class_names)
preds_path = test_output_path(PREDS_TXT_PATH)
np.savetxt(preds_path, np.array(pred), fmt='%.8f')
return local_canonical_file(preds_path)
def test_inconsistent_labels_and_class_names():
class_names = ['Small', 'Medium', 'Large']
prng = np.random.RandomState(seed=0)
train_pool = Pool(prng.random_sample(size=(100, 10)), label=prng.choice([0, 1, 2], size=100))
classifier = CatBoostClassifier(
iterations=2,
loss_function='MultiClass',
class_names=class_names,
)
with pytest.raises(CatBoostError):
classifier.fit(train_pool)
@pytest.mark.parametrize(
'features_dtype',
['str', 'np.float32'],
ids=['features_dtype=str', 'features_dtype=np.float32']
)
def test_querywise(features_dtype, task_type):
train_pool = Pool(QUERYWISE_TRAIN_FILE, column_description=QUERYWISE_CD_FILE)
test_pool = Pool(QUERYWISE_TEST_FILE, column_description=QUERYWISE_CD_FILE)
model = CatBoost(params={'loss_function': 'QueryRMSE', 'iterations': 2, 'thread_count': 8, 'task_type': task_type, 'devices': '0'})
model.fit(train_pool)
pred1 = model.predict(test_pool)
df = read_table(QUERYWISE_TRAIN_FILE, delimiter='\t', header=None)
train_query_id = df.loc[:, 1]
train_target = df.loc[:, 2]
train_data = df.drop([0, 1, 2, 3, 4], axis=1).astype(eval(features_dtype))
df = read_table(QUERYWISE_TEST_FILE, delimiter='\t', header=None)
test_data = df.drop([0, 1, 2, 3, 4], axis=1).astype(eval(features_dtype))
model.fit(train_data, train_target, group_id=train_query_id)
pred2 = model.predict(test_data)
assert _check_data(pred1, pred2)
def test_group_weight(task_type):
train_pool = Pool(QUERYWISE_TRAIN_FILE, column_description=QUERYWISE_CD_FILE_WITH_GROUP_WEIGHT)
test_pool = Pool(QUERYWISE_TEST_FILE, column_description=QUERYWISE_CD_FILE_WITH_GROUP_WEIGHT)
model = CatBoost(params={'loss_function': 'YetiRank', 'iterations': 10, 'thread_count': 8, 'task_type': task_type, 'devices': '0'})
model.fit(train_pool)
pred1 = model.predict(test_pool)
df = read_table(QUERYWISE_TRAIN_FILE, delimiter='\t', header=None)
train_query_weight = df.loc[:, 0]
train_query_id = df.loc[:, 1]
train_target = df.loc[:, 2]
train_data = df.drop([0, 1, 2, 3, 4], axis=1).astype(str)
df = read_table(QUERYWISE_TEST_FILE, delimiter='\t', header=None)
test_query_weight = df.loc[:, 0]
test_query_id = df.loc[:, 1]
test_data = Pool(df.drop([0, 1, 2, 3, 4], axis=1).astype(np.float32), group_id=test_query_id, group_weight=test_query_weight)
model.fit(train_data, train_target, group_id=train_query_id, group_weight=train_query_weight)
pred2 = model.predict(test_data)
assert _check_data(pred1, pred2)
def test_zero_baseline(task_type):
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
baseline = np.zeros(pool.num_row())
pool.set_baseline(baseline)
model = CatBoostClassifier(iterations=2, learning_rate=0.03, task_type=task_type, devices='0')
model.fit(pool)
output_model_path = test_output_path(OUTPUT_MODEL_PATH)
model.save_model(output_model_path)
return compare_canonical_models(output_model_path)
def test_ones_weight(task_type):
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
weight = np.ones(pool.num_row())
pool.set_weight(weight)
model = CatBoostClassifier(iterations=2, learning_rate=0.03, task_type=task_type, devices='0')
model.fit(pool)
output_model_path = test_output_path(OUTPUT_MODEL_PATH)
model.save_model(output_model_path)
return compare_canonical_models(output_model_path)
def test_non_ones_weight(task_type):
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
weight = np.arange(1, pool.num_row() + 1)
pool.set_weight(weight)
model = CatBoostClassifier(iterations=2, learning_rate=0.03, task_type=task_type, devices='0')
model.fit(pool)
output_model_path = test_output_path(OUTPUT_MODEL_PATH)
model.save_model(output_model_path)
return compare_canonical_models(output_model_path)
def test_ones_weight_equal_to_nonspecified_weight(task_type):
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
test_pool = Pool(TEST_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=2, learning_rate=0.03, task_type=task_type, devices='0')
predictions = []
for set_weights in [False, True]:
if set_weights:
weight = np.ones(train_pool.num_row())
train_pool.set_weight(weight)
model.fit(train_pool)
predictions.append(model.predict(test_pool))
assert _check_data(predictions[0], predictions[1])
def test_py_data_group_id(task_type):
train_pool_from_files = Pool(QUERYWISE_TRAIN_FILE, column_description=QUERYWISE_CD_FILE_WITH_GROUP_ID)
test_pool_from_files = Pool(QUERYWISE_TEST_FILE, column_description=QUERYWISE_CD_FILE_WITH_GROUP_ID)
model = CatBoost(
params={'loss_function': 'QueryRMSE', 'iterations': 2, 'thread_count': 4, 'task_type': task_type, 'devices': '0'}
)
model.fit(train_pool_from_files)
predictions_from_files = model.predict(test_pool_from_files)
train_df = read_table(QUERYWISE_TRAIN_FILE, delimiter='\t', header=None)
train_target = train_df.loc[:, 2]
raw_train_group_id = train_df.loc[:, 1]
train_data = train_df.drop([0, 1, 2, 3, 4], axis=1).astype(np.float32)
test_df = read_table(QUERYWISE_TEST_FILE, delimiter='\t', header=None)
test_data = Pool(test_df.drop([0, 1, 2, 3, 4], axis=1).astype(np.float32))
for group_id_func in (int, str, lambda id: 'myid_' + str(id)):
train_group_id = [group_id_func(group_id) for group_id in raw_train_group_id]
model.fit(train_data, train_target, group_id=train_group_id)
predictions_from_py_data = model.predict(test_data)
assert _check_data(predictions_from_files, predictions_from_py_data)
def test_py_data_subgroup_id(task_type):
train_pool_from_files = Pool(QUERYWISE_TRAIN_FILE, column_description=QUERYWISE_CD_FILE_WITH_SUBGROUP_ID)
test_pool_from_files = Pool(QUERYWISE_TEST_FILE, column_description=QUERYWISE_CD_FILE_WITH_SUBGROUP_ID)
model = CatBoost(
params={'loss_function': 'QueryRMSE', 'iterations': 2, 'thread_count': 4, 'task_type': task_type, 'devices': '0'}
)
model.fit(train_pool_from_files)
predictions_from_files = model.predict(test_pool_from_files)
train_df = read_table(QUERYWISE_TRAIN_FILE, delimiter='\t', header=None)
train_group_id = train_df.loc[:, 1]
raw_train_subgroup_id = train_df.loc[:, 4]
train_target = train_df.loc[:, 2]
train_data = train_df.drop([0, 1, 2, 3, 4], axis=1).astype(np.float32)
test_df = read_table(QUERYWISE_TEST_FILE, delimiter='\t', header=None)
test_data = Pool(test_df.drop([0, 1, 2, 3, 4], axis=1).astype(np.float32))
for subgroup_id_func in (int, str, lambda id: 'myid_' + str(id)):
train_subgroup_id = [subgroup_id_func(subgroup_id) for subgroup_id in raw_train_subgroup_id]
model.fit(train_data, train_target, group_id=train_group_id, subgroup_id=train_subgroup_id)
predictions_from_py_data = model.predict(test_data)
assert _check_data(predictions_from_files, predictions_from_py_data)
def test_fit_data(task_type):
pool = Pool(CLOUDNESS_TRAIN_FILE, column_description=CLOUDNESS_CD_FILE)
eval_pool = Pool(CLOUDNESS_TEST_FILE, column_description=CLOUDNESS_CD_FILE)
base_model = CatBoostClassifier(iterations=2, learning_rate=0.03, loss_function="MultiClass", task_type=task_type, devices='0')
base_model.fit(pool)
baseline = np.array(base_model.predict(pool, prediction_type='RawFormulaVal'))
eval_baseline = np.array(base_model.predict(eval_pool, prediction_type='RawFormulaVal'))
eval_pool.set_baseline(eval_baseline)
model = CatBoostClassifier(iterations=2, learning_rate=0.03, loss_function="MultiClass")
data = map_cat_features(pool.get_features(), pool.get_cat_feature_indices())
model.fit(data, pool.get_label(), pool.get_cat_feature_indices(), sample_weight=np.arange(1, pool.num_row() + 1), baseline=baseline, use_best_model=True, eval_set=eval_pool)
pred = model.predict_proba(eval_pool)
preds_path = test_output_path(PREDS_PATH)
np.save(preds_path,
|
np.array(pred)
|
numpy.array
|
# -*- coding: utf-8 -*-
import tellurium as te
import random
import numpy as np
import copy
import analysis
class ReactionType:
UNIUNI = 0
BIUNI = 1
UNIBI = 2
BIBI = 3
class RegulationType:
DEFAULT = 0
INHIBITION = 1
ACTIVATION = 2
INIHIBITION_ACTIVATION = 3
class Reversibility:
IRREVERSIBLE = 0
REVERSIBLE = 1
class RP:
UniUni = 0.75
BiUni = 0.1
UniBi = 0.1
BiBI = 0.05
class RLP:
Default = 0.7
Inhib = 0.125
Activ = 0.125
Inhibactiv = 0.05
class REVP:
Irreversible = 0.
Reversible = 1.
#def pickRateLawType():
# rt = np.random.random()
# if rt < RateLawProb.default:
# return 0
# elif rt < RateLawProb.default + RateLawProb.inhib:
# return 1
# elif rt < RateLawProb.default + RateLawProb.inhib + RateLawProb.activ:
# return 2
# return 3
def pickReactionType():
rt1 =
|
np.random.random()
|
numpy.random.random
|
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import datetime
from reda.importers.eit_version_2010 import _average_swapped_current_injections
def _extract_adc_data(mat, **kwargs):
"""Extract adc-channel related data (i.e., data that is captured for all 48
channels of the 40-channel medusa system
"""
md = mat['MD'].squeeze()
frequencies = mat['MP']['fm'].take(0)
# it seems that there exist different file formats under this same official
# version.
if md['fm'].size == frequencies.size:
use_v = 0
else:
use_v = 1
# print('@@@')
# import IPython
# IPython.embed()
# exit()
# Labview epoch
epoch = datetime.datetime(1904, 1, 1)
def convert_epoch(x):
timestamp = epoch + datetime.timedelta(seconds=x.astype(float))
return timestamp
dfl = []
# loop over frequencies
for f_id in range(0, frequencies.size):
frequency = frequencies[f_id]
if use_v == 0:
def get_field(key):
return md[key][f_id]
elif use_v == 1:
def get_field(key):
indices = np.where(
md['fm'].take(0) == frequencies[f_id])
return md[key].take(0)[indices]
# def get_field(key):
# indices = np.where(md['fm'].take(f_id) == frequencies[f_id])
# return md[key].take(f_id)[indices]
timestamp = np.atleast_2d(
[convert_epoch(x) for x in get_field('Time')]
).T.squeeze()
column_names = ['ch{:02}'.format(i) for i in range(48)]
ab = get_field('cni')
index_pairs = [
(channel, 'Ug3_{}'.format(i)) for channel in column_names
for i in range(3)
]
Ug3 = get_field('Ug3')
ug3_reshaped = Ug3.reshape([Ug3.shape[0], Ug3.shape[1] * 3])
df = pd.DataFrame(
ug3_reshaped,
index=pd.MultiIndex.from_arrays(
[
ab[:, 0],
ab[:, 1],
|
np.ones(ab.shape[0])
|
numpy.ones
|
import cv2
import sys
import os
sys.path.append('..')
import numpy as np
from maskrcnn_benchmark.structures.bounding_box import BoxList
from preprocess.colors import get_colors
from maskrcnn_benchmark.structures.image_list import ImageList
from maskrcnn_benchmark.structures.segmentation_mask import SegmentationMask, Polygons
from maskrcnn_benchmark.structures.bounding_box import BoxList
from pycocotools import mask as maskUtils
import openslide as ops
import random
import itertools
from maskrcnn_benchmark.utils.miscellaneous import maskToPolygons
import pdb
def vis_bbox(bboxlist, imagelist, normalize = [102.9801, 115.9465, 122.7717] ):
if isinstance(imagelist, ImageList):
images = []
for i, bbox in enumerate(bboxlist):
if bbox.mode != 'xyxy':
bbox = bbox.convert('xyxy')
image = imagelist.tensors[i].numpy()
image =
|
np.squeeze(image)
|
numpy.squeeze
|
"""Code for the bootstrap uncertainty quantification (BUQ) algorithm."""
import time
import logging
import numpy as np
import pandas as pd
import buq
import models
import tests
def import_time_series_data():
"""Import time series data for model, without any time slicing."""
ts_data = pd.read_csv('data/demand_wind.csv', index_col=0)
ts_data.index = pd.to_datetime(ts_data.index)
return ts_data
def bootstrap_sample_weeks(data, num_weeks_per_season):
"""Create bootstrap sample by sampling weeks from different
meteorological seasons.
Parameters:
-----------
data (pandas DataFrame) : demand and wind data
num_weeks_per_season (int) : number of weeks sampled from each season
Returns:
--------
output (pandas DataFrame) : the bootstrap sample
"""
# Sample weeks from the meteorological seasons
bins = [[12, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11]]
sample_length = 4*num_weeks_per_season*7*24
output = np.zeros(shape=(sample_length, data.shape[1]))
k = 0
for block in range(num_weeks_per_season):
for bin_num in range(4):
year = np.random.choice(list(data.index.year.unique()))
data_sel = data[(data.index.year == year)&
(data.index.month.isin(bins[bin_num]))]
num_days = data_sel.shape[0]/24
possible_startdays = np.arange(num_days - 7 + 1)
sample_index = (24*np.random.choice(possible_startdays)
+ np.arange(7*24))
sample = data_sel.iloc[sample_index]
output[k:k+sample.shape[0]] = sample.values
k = k + sample.shape[0]
# Change output from numpy array to pandas DataFrame
if data.shape[1] == 2:
output_columns = ['demand', 'wind']
if data.shape[1] == 6:
output_columns = ['demand_region2', 'demand_region4',
'demand_region5', 'wind_region2',
'wind_region5', 'wind_region6']
index = pd.to_datetime(np.arange(sample_length),
origin='2020', unit='h') # Dummy datetime index
output = pd.DataFrame(output, index=index, columns=output_columns)
return output
def bootstrap_sample_months(data, num_years):
""""Create hypothetical years by block bootstrapping months.
Parameters:
-----------
data (pandas DataFrame) : demand and wind data
num_years (int) : number of years of the output sample
Returns:
--------
output (pandas DataFrame) : the bootstrap sample
"""
years_np = np.zeros(shape=(8760*num_years, data.shape[1]))
num_years_inp = data.values.shape[0]/8760
# Create each year individually and input them
for year_num in range(num_years):
year_np = np.zeros(shape=(8760, data.shape[1]))
lims = [0, 744, 1416, 2160, 2880, 3624, 4344, 5088, 5832,
6552, 7296, 8016, 8760]
# List of years from which months are taken
month_years = np.array([int(num_years_inp*np.random.rand(1))
for month in range(12)])
# Input the sampled months
for month in range(12):
llim, rlim = lims[month], lims[month+1]
yrstart = 8760 * month_years[month]
year_np[llim:rlim] = data.values[yrstart+llim:yrstart+rlim]
# Input the year into the years array
years_np[8760*year_num:8760*(year_num+1)] = year_np
# Change output from numpy array to pandas DataFrame
output_columns = ['demand_region2', 'demand_region4', 'demand_region5',
'wind_region2', 'wind_region5', 'wind_region6']
index = pd.to_datetime(np.arange(years_np.shape[0]),
origin='2020', unit='h') # Dummy datetime index
output = pd.DataFrame(years_np, index=index, columns=output_columns)
return output
def run_simulation(model_name_in_paper, ts_data, run_id=0):
"""Run Calliope model with demand & wind data.
Parameters:
-----------
model_name_in_paper (str) : 'LP_planning', 'MILP_planning' or
'operation'
ts_data (pandas DataFrame) : demand & wind time series data
run_id (int or str) : unique id, useful if running in parallel
Returns:
--------
results (pandas DataFrame) : model outputs
"""
start = time.time()
if model_name_in_paper == 'LP_planning':
model = models.SixRegionModel(ts_data=ts_data,
run_mode='plan',
baseload_integer=False,
baseload_ramping=False,
allow_unmet=True,
run_id=run_id)
elif model_name_in_paper == 'MILP_planning':
model = models.SixRegionModel(ts_data=ts_data,
run_mode='plan',
baseload_integer=True,
baseload_ramping=False,
allow_unmet=True,
run_id=run_id)
elif model_name_in_paper == 'operation':
model = models.SixRegionModel(ts_data=ts_data,
run_mode='operate',
baseload_integer=False,
baseload_ramping=True,
allow_unmet=True,
run_id=run_id)
else:
raise ValueError('Invalid model name.')
# Run model and save results
model.run()
finish = time.time()
tests.test_output_consistency_6_region(model, run_mode=(
'operate' if model_name_in_paper == 'operation' else 'plan'
))
results = model.get_summary_outputs()
results.loc['time'] = finish - start
return results
def run_years_simulation(model_name_in_paper, startyear, endyear, run_id=0):
"""Run model with certain years of data."""
ts_data = import_time_series_data()
ts_data = ts_data.loc[str(startyear):str(endyear)]
results = run_simulation(model_name_in_paper, ts_data=ts_data,
run_id=run_id)
return results
def run_bootstrap_simulation(model_name_in_paper, scheme,
num_blocks_per_bin, run_id=0):
"""Run model with bootstrap sampled data
Parameters:
-----------
model_name_in_paper (str) : 'LP_planning', 'MILP_planning' or
'operation'
scheme: either 'months' or 'weeks' -- scheme used to create bootstrap
samples
num_blocks_per_bin: either the number of months sampled from each
calendar month, or the number of weeks sampled from each season
Returns:
--------
results (pandas DataFrame) : model outputs
"""
ts_data = import_time_series_data()
# Create bootstrap sample and run model
if scheme == 'months':
sample = buq.bootstrap_sample_months(ts_data,
num_blocks_per_bin)
elif scheme == 'weeks':
sample = buq.bootstrap_sample_weeks(ts_data,
num_blocks_per_bin)
else:
raise ValueError('Must be either months or weeks scheme')
results = run_simulation(model_name_in_paper, ts_data=sample,
run_id=run_id)
return results
def run_buq_algorithm(model_name_in_paper,
point_sample_length,
bootstrap_scheme,
num_blocks_per_bin,
num_bootstrap_samples):
"""Run through BUQ algorithm once to estimate standard deviation.
Parameters:
-----------
model_name_in_paper (str) : 'LP_planning', 'MILP_planning' or
'operation'
point_sample_length (int) : length of sample used to determine point
estimate (in hours), used only for rescaling
boostrap scheme (str) : bootstrap scheme for calculating standard
deviation: 'months' or 'weeks'
num_blocks_per_bin (int) : number of months from each calendar month
or number of weeks from each season
num_bootstrap_samples (int) : number of bootstrap samples over which to
calculate the standard deviation
Returns:
--------
point_estimate_stdev (pandas DataFrame) : estimates for the standard
deviation of each model output
"""
if bootstrap_scheme == 'weeks':
bootstrap_sample_length = num_blocks_per_bin * 4 * 7 * 24
elif bootstrap_scheme == 'months':
bootstrap_sample_length = num_blocks_per_bin * 8760
# Calculate variance across bootstrap samples
logging.info('Starting bootstrap samples')
# Run model for each bootstrap sample
run_index = np.arange(num_bootstrap_samples)
for sample_num in run_index:
logging.info('\n\nCalculating bootstrap sample %s', sample_num+1)
results = run_bootstrap_simulation(model_name_in_paper,
bootstrap_scheme,
num_blocks_per_bin)
if sample_num == 0:
outputs = pd.DataFrame(columns=
|
np.arange(num_bootstrap_samples)
|
numpy.arange
|
import csv
import numpy as np
from sklearn.metrics import r2_score
FILE_NAME = r"external/GSM3153400_scTrioSeq2Met_CRC11_NC_418.singleC.cpg.txt"
START_INDEX = 6500000
END_INDEX = 6700000
POSITION_INDEX = 0
STRAND_INDEX = "2"
chr16 = []
pos_dict = {}
def main():
with open(FILE_NAME) as f:
csv_file = csv.DictReader(f, delimiter="\t")
for line in csv_file.reader:
if line[0] == "chr16":
pos = int(line[1])
others = [int(i) for i in line[4:-3]]
if line[3] == "-":
pos -= 1
if pos not in pos_dict:
pos_dict[pos] = np.array(others)
else:
pos_dict[pos] += np.array(others)
data_array = np.array([np.insert(pos_dict[pos], 0, pos) for pos in pos_dict])
indexes = np.where(
np.logical_and(data_array[:, POSITION_INDEX] > START_INDEX,
data_array[:, POSITION_INDEX] < END_INDEX))
pmd = data_array[indexes]
ratio = pmd[:, 2] / pmd[:, 1]
# pmd_with_ratio = np.column_stack((pmd, ratio))
# pmd_with_ratio[:,0] = pmd_with_ratio[:,0].astype(np.int)
r2 = []
for index in range(ratio.size):
print(index)
ratio_copy = np.copy(ratio)
value = ratio_copy[index]
ratio_copy_deleted =
|
np.delete(ratio_copy, index)
|
numpy.delete
|
# voronoiVis.py
# script to generate a tetrahedra/cone for 2D Voronoi visualization
# Author PKR, UPENN, MARCH 2019
# add loading paths
import sys, os
sys.path.append(os.path.join(sys.path[0],'ONI'))
sys.path.append(os.path.join(sys.path[0],'Legacy'))
import numpy as np
from vispy import app, gloo
from binLoad import binLoad
from loadLocB import loadLocB
from vispy.util.transforms import translate, perspective, rotate
# using cython code here
from polyArea import PolyArea
from scipy import spatial as sp
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.cm as cm
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
## SET FILE LOCATION HERE
#FileLoc = '//entropy.lgads.lakadamyali.group/Data3/peter/Melina/MelinagfptauA3_gfpnanogerm_1hrDMSOtreatedSTORM.1543953593043.locb'
FileLoc = '//entropy.lgads.lakadamyali.group/Data3/peter/hMSC-MeHA-Glass.bin'
OutDict = binLoad(FileLoc)
#OutDict = loadLocB(FileLoc)
# Pull out X Y coordinates
Pos = OutDict['Localizations'][:,0:2]
#keys = ['rawPosition_x','rawPosition_y'] #,'rawPosition_z']
#rawPosCol = [OutDict['Localization_Matrix_Mapping'].get(key) for key in keys]
#Pos = OutDict['Localization_Results'][:,rawPosCol]
maxes = np.amax(Pos,axis=0)
mines = np.amin(Pos,axis=0)
width = np.max(maxes-mines)
pointz = 2*((Pos-mines)/width)-1
# flip y-axis to match insight3 convention
pointz[:,1] = -pointz[:,1]
#pointz = 2*(np.random.rand(1000000,2)-0.5)
uPos = np.unique(pointz,axis=0) # make sure points are unique
pointz = uPos
nPoints = uPos.shape[0]
Area = PolyArea(uPos*width/2)
# find min/max values for normalization
minima = min(-np.log(Area))
maxima = max(-np.log(Area))
print(minima)
print(maxima)
## SET PARAMETERS HERE:
pix2nm = 116
barWidth = 2000
maxima = 7
minima = 3
## Creating a modified jet color map here
jjet = cm.get_cmap('jet',240)
ojet = jjet(np.linspace(0,1,240))
fadeLen = 16
djet = np.zeros([fadeLen,4])
# create an nx4 array of dark colors
for ii in range(0,fadeLen):
djet[ii,:] = (ii/fadeLen)*ojet[0,:]
djet[ii,3] = 1
newjet = np.vstack((djet,ojet))
njetcmp = ListedColormap(newjet)
# normalize chosen colormap
norm = mpl.colors.Normalize(vmin=minima, vmax=maxima, clip=True)
mapper = cm.ScalarMappable(norm=norm, cmap=njetcmp)
color = np.zeros((nPoints,4))
color = mapper.to_rgba(-np.log(Area))
# VBO Code for Voronoi Display
# note the 'color' and 'v_color' in vertex
vertex = """
uniform mat4 u_model; // Model matrix
uniform mat4 u_view; // View matrix
uniform mat4 u_projection; // Projection matrix
uniform vec4 u_color; // mask color for edge plotting
attribute vec3 a_position;
attribute vec4 a_color;
varying vec4 v_color;
void main()
{
gl_Position = u_projection * u_view * u_model * vec4(a_position, 1.0);
v_color = a_color * u_color;
}
"""
# note the varying 'v_color', it must has the same name as in the vertex.
fragment = """
varying vec4 v_color;
void main()
{
gl_FragColor = v_color;
}
"""
# helper functions for generating polygons, scale bar
def genVIC(points,tri=32,color=None):
N = points.shape[0]
V = np.zeros((N*(tri+1),3),dtype=np.float32)
I = np.zeros((N*(tri),3),dtype=np.uint32)
C = np.ones((N*(tri+1),4),dtype=np.float32)
npi = np.pi
V[::tri+1,0:2] = points
starts = np.arange(0,N*(tri+1),tri+1)
ends = np.arange(tri,N*(tri+1),tri+1)
I[::tri,:] = np.stack((starts,ends,starts+1),axis=-1)
for ii in range(0,tri):
adjust = [np.cos(2*npi*ii/(tri-1))/64,np.sin(2*npi*ii/(tri-1))/64]
V[ii+1::tri+1,0:2] = points+adjust
V[ii+1::tri+1,2] = -0.1
for ii in range(0,tri-1):
I[ii+1::tri,:] = np.stack((starts,starts+ii+1,starts+ii+2),axis=-1)
# we'll put color logic in later, do random for now
if color is None:
color = np.random.rand(N,4)
color[:,3] = 1
C[::tri+1,:] = color
for ii in range(0,tri):
C[ii+1::tri+1,:] = C[::tri+1,:]
return V, I, C
def genScaleBar(dim, center):
# bottom right, bottom left, top left, top right
rect = {0:[1,-1],1:[-1,-1],2:[-1,1],3:[1,1]}
N = 4
V = np.zeros((N,3),dtype=np.float32)
I = np.zeros((N-2,3),dtype=np.uint32)
C = np.ones((N,4),dtype=np.float32)
for ii in range(0,N):
V[ii,0] = center[0]+rect[ii][0]*dim[0]/2
V[ii,1] = center[1]+rect[ii][1]*dim[1]/2
V[:,2] = 0.002 # raise scale bar above voronoi cones
I[0,:] = [0, 1, 2]
I[1,:] = [2, 3, 0]
return V,I,C
class Canvas(app.Canvas):
""" build canvas class for this demo """
def __init__(self):
""" initialize the canvas """
app.Canvas.__init__(self,
size=(512, 512),
title='SR Voronoi Visualizer',
keys='interactive')
self.tri=16 # 16 edges for each point
self.shrinkage = width/2
# define vertices, indices, color
V,I,C = genVIC(pointz,self.tri,color)
self.BarStart = V.shape[0]
# set initial scale and center point
self.centerpos = [0,0]
self.scale = 1
# hard-coded bar coordinates in the mean time
self.BarCenter = [0.9, -0.9]
self.BarDim = [barWidth/pix2nm/self.shrinkage, 0.05/np.sqrt(self.scale)]
bV,bI,bC = genScaleBar(self.BarDim,self.BarCenter)
bI = bI+self.BarStart
# bind to data
V = np.vstack((V,bV))
I = np.vstack((I,bI))
C = np.vstack((C,bC))
# shader program
tet = gloo.Program(vert=vertex, frag=fragment)#, count=V.shape[0])
self.I = gloo.IndexBuffer(I)
self.V = gloo.VertexBuffer(V)
self.C = gloo.VertexBuffer(C)
tet['a_position'] = self.V
tet['a_color'] = self.C
# intialize transformation matrix
self.view =
|
np.eye(4, dtype=np.float32)
|
numpy.eye
|
import sys
import numpy as np
from itertools import combinations
from pyemto.utilities.utils import rotation_matrix
import spglib as spg
try:
from pymatgen import Lattice, Structure
from pymatgen.vis.structure_vtk import StructureVis
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.analysis.structure_matcher import StructureMatcher
from pymatgen.util.coord import get_angle
except ImportError:
# pymatgen has not been installed
raise ImportError('emto_input_generator requires pymatgen>=4.4.0 to be installed!')
import os
import pyemto
import pyemto.common.common as common
class EMTO:
"""This class can be used to create EMTO input files from
an arbitrary structure. What is needed as input:
-primitive lattice vectors,
-basis vectors,
-list of atomic species that occupy the basis sites.
"""
def __init__(self, folder=None, EMTOdir=None):
""" """
if folder is None:
self.folder = os.getcwd()
else:
self.folder = folder
if EMTOdir is None:
self.EMTOdir = '/home/EMTO'
else:
self.EMTOdir = EMTOdir
self.sg2ibz = {1:14, 2:14, 3:12, 4:12, 5:13, 6:12, 7:12, 8:13, 9:13, 10:12,
11:12, 12:13, 13:12, 14:12, 15:13, 16:8, 17:8, 18:8, 19:8, 20:9,
21:9, 22:11, 23:10, 24:10, 25:8, 26:8, 27:8, 28:8, 29:8, 30:8,
31:8, 32:8, 33:8, 34:8, 35:9, 36:9, 37:9, 38:9, 39:9, 40:9,
41:9, 42:11, 43:11, 44:10, 45:10, 46:10, 47:8, 48:8, 49:8, 50:8,
51:8, 52:8, 53:8, 54:8, 55:8, 56:8, 57:8, 58:8, 59:8, 60:8,
61:8, 62:8, 63:9, 64:9, 65:9, 66:9, 67:9, 68:9, 69:11, 70:11,
71:10, 72:10, 73:10, 74:10, 75:5, 76:5, 77:5, 78:5, 79:6, 80:6,
81:5, 82:6, 83:5, 84:5, 85:5, 86:5, 87:6, 88:6, 89:5, 90:5,
91:5, 92:5, 93:5, 94:5, 95:5, 96:5, 97:6, 98:6, 99:5, 100:5,
101:5, 102:5, 103:5, 104:5, 105:5, 106:5, 107:6, 108:6, 109:6, 110:6,
111:5, 112:5, 113:5, 114:5, 115:5, 116:5, 117:5, 118:5, 119:6, 120:6,
121:6, 122:6, 123:5, 124:5, 125:5, 126:5, 127:5, 128:5, 129:5, 130:5,
131:5, 132:5, 133:5, 134:5, 135:5, 136:5, 137:5, 138:5, 139:6, 140:6,
141:6, 142:6, 143:4, 144:4, 145:4, 146:7, 147:4, 148:7, 149:4, 150:4,
151:4, 152:4, 153:4, 154:4, 155:7, 156:4, 157:4, 158:4, 159:4, 160:7,
161:7, 162:4, 163:4, 164:4, 165:4, 166:7, 167:7, 168:4, 169:4, 170:4,
171:4, 172:4, 173:4, 174:4, 175:4, 176:4, 177:4, 178:4, 179:4, 180:4,
181:4, 182:4, 183:4, 184:4, 185:4, 186:4, 187:4, 188:4, 189:4, 190:4,
191:4, 192:4, 193:4, 194:4, 195:1, 196:2, 197:3, 198:1, 199:3, 200:1,
201:1, 202:2, 203:2, 204:3, 205:1, 206:3, 207:1, 208:1, 209:2, 210:2,
211:3, 212:1, 213:1, 214:3, 215:1, 216:2, 217:3, 218:1, 219:2, 220:3,
221:1, 222:1, 223:1, 224:1, 225:2, 226:2, 227:2, 228:2, 229:3, 230:3}
self.sg2bl = {1:'simple triclinic', 2:'simple triclinic',
3:'simple monoclinic', 4:'simple monoclinic',
5:'base-centered monoclinic', 6:'simple monoclinic',
7:'simple monoclinic', 8:'base-centered monoclinic',
9:'base-centered monoclinic', 10:'simple monoclinic',
11:'simple monoclinic', 12:'base-centered monoclinic',
13:'simple monoclinic', 14:'simple monoclinic',
15:'base-centered monoclinic', 16:'simple orthorhombic',
17:'simple orthorhombic', 18:'simple orthorhombic',
19:'simple orthorhombic', 20:'base-centered orthorhombic',
21:'base-centered orthorhombic', 22:'face-centered orthorhombic',
23:'body-centered orthorhombic', 24:'body-centered orthorhombic',
25:'simple orthorhombic', 26:'simple orthorhombic',
27:'simple orthorhombic', 28:'simple orthorhombic',
29:'simple orthorhombic', 30:'simple orthorhombic',
31:'simple orthorhombic', 32:'simple orthorhombic',
33:'simple orthorhombic', 34:'simple orthorhombic',
35:'base-centered orthorhombic', 36:'base-centered orthorhombic',
37:'base-centered orthorhombic', 38:'base-centered orthorhombic',
39:'base-centered orthorhombic', 40:'base-centered orthorhombic',
41:'base-centered orthorhombic', 42:'face-centered orthorhombic',
43:'face-centered orthorhombic', 44:'body-centered orthorhombic',
45:'body-centered orthorhombic', 46:'body-centered orthorhombic',
47:'simple orthorhombic', 48:'simple orthorhombic',
49:'simple orthorhombic', 50:'simple orthorhombic',
51:'simple orthorhombic', 52:'simple orthorhombic',
53:'simple orthorhombic', 54:'simple orthorhombic',
55:'simple orthorhombic', 56:'simple orthorhombic',
57:'simple orthorhombic', 58:'simple orthorhombic',
59:'simple orthorhombic', 60:'simple orthorhombic',
61:'simple orthorhombic', 62:'simple orthorhombic',
63:'base-centered orthorhombic', 64:'base-centered orthorhombic',
65:'base-centered orthorhombic', 66:'base-centered orthorhombic',
67:'base-centered orthorhombic', 68:'base-centered orthorhombic',
69:'face-centered orthorhombic', 70:'face-centered orthorhombic',
71:'body-centered orthorhombic', 72:'body-centered orthorhombic',
73:'body-centered orthorhombic', 74:'body-centered orthorhombic',
75:'simple tetragonal', 76:'simple tetragonal',
77:'simple tetragonal', 78:'simple tetragonal',
79:'body-centered tetragonal', 80:'body-centered tetragonal',
81:'simple tetragonal', 82:'body-centered tetragonal',
83:'simple tetragonal', 84:'simple tetragonal',
85:'simple tetragonal', 86:'simple tetragonal',
87:'body-centered tetragonal', 88:'body-centered tetragonal',
89:'simple tetragonal', 90:'simple tetragonal',
91:'simple tetragonal', 92:'simple tetragonal',
93:'simple tetragonal', 94:'simple tetragonal',
95:'simple tetragonal', 96:'simple tetragonal',
97:'body-centered tetragonal', 98:'body-centered tetragonal',
99:'simple tetragonal', 100:'simple tetragonal',
101:'simple tetragonal', 102:'simple tetragonal',
103:'simple tetragonal', 104:'simple tetragonal',
105:'simple tetragonal', 106:'simple tetragonal',
107:'body-centered tetragonal', 108:'body-centered tetragonal',
109:'body-centered tetragonal', 110:'body-centered tetragonal',
111:'simple tetragonal', 112:'simple tetragonal',
113:'simple tetragonal', 114:'simple tetragonal',
115:'simple tetragonal', 116:'simple tetragonal',
117:'simple tetragonal', 118:'simple tetragonal',
119:'body-centered tetragonal', 120:'body-centered tetragonal',
121:'body-centered tetragonal', 122:'body-centered tetragonal',
123:'simple tetragonal', 124:'simple tetragonal',
125:'simple tetragonal', 126:'simple tetragonal',
127:'simple tetragonal', 128:'simple tetragonal',
129:'simple tetragonal', 130:'simple tetragonal',
131:'simple tetragonal', 132:'simple tetragonal',
133:'simple tetragonal', 134:'simple tetragonal',
135:'simple tetragonal', 136:'simple tetragonal',
137:'simple tetragonal', 138:'simple tetragonal',
139:'body-centered tetragonal', 140:'body-centered tetragonal',
141:'body-centered tetragonal', 142:'body-centered tetragonal',
143:'hexagonal', 144:'hexagonal',
145:'hexagonal', 146:'rhombohedral',
147:'hexagonal', 148:'rhombohedral',
149:'hexagonal', 150:'hexagonal',
151:'hexagonal', 152:'hexagonal',
153:'hexagonal', 154:'hexagonal',
155:'rhombohedral', 156:'hexagonal',
157:'hexagonal', 158:'hexagonal',
159:'hexagonal', 160:'rhombohedral',
161:'rhombohedral', 162:'hexagonal',
163:'hexagonal', 164:'hexagonal',
165:'hexagonal', 166:'rhombohedral',
167:'rhombohedral', 168:'hexagonal',
169:'hexagonal', 170:'hexagonal',
171:'hexagonal', 172:'hexagonal',
173:'hexagonal', 174:'hexagonal',
175:'hexagonal', 176:'hexagonal',
177:'hexagonal', 178:'hexagonal',
179:'hexagonal', 180:'hexagonal',
181:'hexagonal', 182:'hexagonal',
183:'hexagonal', 184:'hexagonal',
185:'hexagonal', 186:'hexagonal',
187:'hexagonal', 188:'hexagonal',
189:'hexagonal', 190:'hexagonal',
191:'hexagonal', 192:'hexagonal',
193:'hexagonal', 194:'hexagonal',
195:'simple cubic', 196:'face-centered cubic',
197:'body-centered cubic', 198:'simple cubic',
199:'body-centered cubic', 200:'simple cubic',
201:'simple cubic', 202:'face-centered cubic',
203:'face-centered cubic', 204:'body-centered cubic',
205:'simple cubic', 206:'body-centered cubic',
207:'simple cubic', 208:'simple cubic',
209:'face-centered cubic', 210:'face-centered cubic',
211:'body-centered cubic', 212:'simple cubic',
213:'simple cubic', 214:'body-centered cubic',
215:'simple cubic', 216:'face-centered cubic',
217:'body-centered cubic', 218:'simple cubic',
219:'face-centered cubic', 220:'body-centered cubic',
221:'simple cubic', 222:'simple cubic',
223:'simple cubic', 224:'simple cubic',
225:'face-centered cubic', 226:'face-centered cubic',
227:'face-centered cubic', 228:'face-centered cubic',
229:'body-centered cubic', 230:'body-centered cubic'}
# BMDL, KSTR, SHAPE, KGRN and KFCD class instances
self.input_system = pyemto.System(folder=self.folder, EMTOdir=self.EMTOdir)
#
self.fit_angle_tol = 5e-6
self.fit_norm_ratio_tol = 5e-6
return
def calc_ws_radius(self, struct):
bohr2angst = 0.52917721
vol_unit = struct.volume/struct.num_sites
sws = (3*vol_unit/4.0/np.pi)**(1.0/3)/bohr2angst
return sws
def make_basis_array(self, struct):
"""Returns a 2D numpy array of the basis atom coordinates
in !!Cartesian!! coordinates.
"""
len_basis = struct.num_sites
emto_basis = np.zeros((len_basis, 3))
for i in range(len_basis):
emto_basis[i, :] = struct.sites[i].coords
return emto_basis
def make_sites_array(self, struct):
len_basis = struct.num_sites
emto_sites = []
for i in range(len_basis):
emto_sites.append(struct.sites[i].specie.number)
return emto_sites
def make_cpa_sites_array(self, struct):
len_basis = struct.num_sites
self.atoms_cpa = []
self.concs_cpa = []
self.splts_cpa = []
self.fixs_cpa = []
for i in range(len_basis):
atom_number = struct.sites[i].specie.number
for j in range(len(self.pmg_species)):
if atom_number == self.pmg_species[j]:
self.atoms_cpa.append(self.species[j])
self.concs_cpa.append(self.concs[j])
self.splts_cpa.append(self.splts[j])
self.fixs_cpa.append(self.fixs[j])
break
def get_equivalent_sites(self):
"""Find all the sites that have exactly the same species,
concentrations, and magnetic moments"""
splt_tol = 1e-6
conc_tol = 1e-6
species_sorted = []
splts_sorted = []
concs_sorted = []
for i in range(len(self.species)):
tmp1 = []
tmp2 = []
tmp3 = []
ind_sorted = np.argsort(self.species[i])
for ind in ind_sorted:
tmp1.append(self.species[i][ind])
tmp2.append(self.splts[i][ind])
tmp3.append(self.concs[i][ind])
species_sorted.append(tmp1)
splts_sorted.append(tmp2)
concs_sorted.append(tmp3)
eqv_sites = np.zeros((len(species_sorted), len(species_sorted)), dtype=np.int) + 9999
for i in range(len(species_sorted)-1):
for j in range(i+1, len(species_sorted)):
eqv_sites[i,j] = 1
if len(species_sorted[i]) != len(species_sorted[j]):
# Sites i and j contain different amound of atoms.
# For now, take them to be non-equivalent, although
# they could still be equivalent in the case that
# some element has been split into two or more parts
# concentration-wise (whole and the parts should have
# identical magnetic moments).
eqv_sites[i, j] = 0
else:
for a1, a2, splt1, splt2, conc1, conc2 in zip(species_sorted[i], species_sorted[j],
splts_sorted[i], splts_sorted[j], concs_sorted[i], concs_sorted[j]):
if a1 != a2 or np.abs(splt1 - splt2) > splt_tol or np.abs(conc1 - conc2) > conc_tol:
# Some pair of atoms (in the sorted lists) were not
# the same => sites i and j are not equivalent.
eqv_sites[i, j] = 0
break
output_sites = np.ones(len(species_sorted), dtype=np.int) * 9999
next_available = 1
for i in range(len(species_sorted)-1):
if output_sites[i] == 9999:
output_sites[i] = next_available
next_available += 1
for j in range(i+1, len(species_sorted)):
if eqv_sites[i, j] == 1:
output_sites[j] = output_sites[i]
if output_sites[-1] == 9999:
output_sites[-1] = next_available
return output_sites
def prepare_input_files(self, prims=None, basis=None, latpath=None,
coords_are_cartesian=False, latname=None,
species=None, find_primitive=True,
concs=None, splts=None, its=None, ws_wsts=None,
make_supercell=None, fixs=None,
**kwargs):
if prims is None:
sys.exit('EMTO.init_structure(): \'prims\' has to be given!')
if basis is None:
sys.exit('EMTO.init_structure(): \'basis\' has to be given!')
if latpath is None:
self.latpath = os.getcwd()
else:
self.latpath = latpath
if latname is None:
self.latname = 'structure'
else:
self.latname = latname
self.prims = np.array(prims)
self.basis = np.array(basis)
self.len_basis = len(self.basis[:, 0])
if species is None:
sys.exit('EMTO.init_structure(): \'species\' has to be given!')
else:
self.species = []
for i in range(len(species)):
if isinstance(species[i], list):
tmp = []
for j in range(len(species[i])):
tmp.append(species[i][j])
self.species.append(tmp)
else:
self.species.append([species[i]])
if splts is None:
# Assume a zero moments array
self.splts = []
for i in range(len(self.species)):
if isinstance(self.species[i], list):
tmp = []
for j in range(len(self.species[i])):
tmp.append(0.0)
self.splts.append(tmp)
else:
self.splts.append([0.0])
else:
self.splts = []
for i in range(len(splts)):
if isinstance(splts[i], list):
tmp = []
for j in range(len(splts[i])):
tmp.append(splts[i][j])
self.splts.append(tmp)
else:
self.splts.append([splts[i]])
if fixs is None:
# Assume a zero moments array
self.fixs = []
for i in range(len(self.species)):
if isinstance(self.species[i], list):
tmp = []
for j in range(len(self.species[i])):
tmp.append('N')
self.fixs.append(tmp)
else:
self.fixs.append(['N'])
else:
self.fixs = []
for i in range(len(fixs)):
if isinstance(fixs[i], list):
tmp = []
for j in range(len(fixs[i])):
tmp.append(fixs[i][j])
self.fixs.append(tmp)
else:
self.fixs.append([fixs[i]])
if concs is None:
# Assume a zero moments array
self.concs = []
for i in range(len(self.species)):
if isinstance(self.species[i], list):
tmp = []
for j in range(len(self.species[i])):
tmp.append(1.0/len(self.species[i]))
self.concs.append(tmp)
else:
self.concs.append([1.0])
else:
self.concs = []
for i in range(len(concs)):
if isinstance(concs[i], list):
tmp = []
tmp_sum = 0.0
for j in range(len(concs[i])):
tmp.append(concs[i][j])
tmp_sum += concs[i][j]
print(tmp_sum)
if tmp_sum < 1.1:
if np.abs(tmp_sum - 1.0) > 1.e-6:
sys.exit('Concentrations {0} for site {1} do not add up to 1.0!!!'.format(concs[i], i+1))
else:
if np.abs(tmp_sum - 100.0) > 1.e-3:
sys.exit('Concentrations {0} for site {1} do not add up to 100!!!'.format(concs[i], i+1))
self.concs.append(tmp)
else:
self.concs.append([concs[i]])
# Check that all species, concs, and splts arrays have the same dimensions
for a, b in combinations([self.basis, self.species, self.concs, self.splts, self.fixs], 2):
if len(a) != len(b):
print(a, 'len = ', len(a))
print(b, 'len = ', len(b))
sys.exit('The above input arrays have inconsistent lengths!!!')
for a, b in combinations([self.species, self.concs, self.splts, self.fixs], 2):
for sublist1, sublist2 in zip(a, b):
if len(sublist1) != len(sublist2):
print(sublist1, 'len = ', len(sublist1))
print(sublist2, 'len = ', len(sublist2))
sys.exit('The above input array elements have inconsistent lengths!!!')
self.find_primitive = find_primitive
if self.find_primitive:
self.pmg_species = self.get_equivalent_sites()
else:
self.pmg_species = np.linspace(1, len(self.species), len(self.species), dtype=np.int)
#
self.coords_are_cartesian = coords_are_cartesian
self.ibz = None
self.make_supercell = make_supercell
#
self.pmg_input_lattice = Lattice(self.prims)
self.pmg_input_struct = Structure(self.pmg_input_lattice, self.pmg_species, self.basis,
coords_are_cartesian=self.coords_are_cartesian)
#
if self.make_supercell is not None:
self.pmg_input_struct.make_supercell(self.make_supercell)
#
self.sws = self.calc_ws_radius(self.pmg_input_struct)
#
self.finder = SpacegroupAnalyzer(self.pmg_input_struct, symprec=0.0001, angle_tolerance=0.0001)
self.stm = StructureMatcher(ltol=0.001, stol=0.001, angle_tol=0.001, attempt_supercell=True)
#
print("Input structure information:")
print(self.pmg_input_struct)
print("Volume: ", self.pmg_input_struct.volume)
print("Lattice vectors:")
print(self.pmg_input_struct.lattice.matrix)
print("")
#
# spglib
spg_cell = (
self.pmg_input_lattice.matrix,
self.pmg_input_struct.frac_coords,
self.pmg_species
)
self.spg_space_group = spg.get_spacegroup(spg_cell)
self.spg_space_group_number = int(self.spg_space_group.split()[-1].lstrip('(').rstrip(')'))
self.spg_space_group_symbol = self.spg_space_group
self.spg_prim_lat, self.spg_prim_pos, self.spg_prim_species = spg.standardize_cell(spg_cell,
to_primitive=True)
self.prim_struct = Structure(Lattice(self.spg_prim_lat), self.spg_prim_species, self.spg_prim_pos)
self.spg_ibz = self.sg2ibz[self.spg_space_group_number]
self.ibz = self.spg_ibz
mesh = [kwargs['nkx'], kwargs['nky'], kwargs['nkz']]
#print()
#print('#'*60)
mapping, grid = spg.get_ir_reciprocal_mesh(mesh, spg_cell, is_time_reversal=True, is_shift=(0, 0, 0))
uniques, counts = np.unique(mapping, return_counts=True)
all_weights = []
kpoints = []
weights = []
for xx in mapping:
all_weights.append(counts[np.argwhere(uniques == xx).flatten()[0]])
for xx, yy in zip(uniques, counts):
kpoints.append(grid[np.argwhere(mapping == xx).flatten()[0]])
weights.append(yy)
#for xx, yy, zz in zip(mapping, grid, all_weights):
# print(xx, yy, zz)
#print()
#for kp, ww in zip(kpoints, weights):
# print(kp, ww)
#print()
#print('NKVEC = ', len(kpoints))
#print('#'*60)
#print()
#print(spg_prim_pos)
#print(spg_prim_species)
#
#print("Detected standard conventional structure:")
#print(self.conv_struct)
#print("Volume: ",self.conv_struct.volume)
#print("Lattice vectors:")
#print(self.conv_struct.lattice.matrix)
#print("")
print("Detected standardized structure:")
print(self.prim_struct)
print("Volume: ", self.prim_struct.volume)
print("Lattice vectors:")
print(self.prim_struct.lattice.matrix)
print("")
#
self.primaa = self.prim_struct.lattice.matrix[0, :]
self.primbb = self.prim_struct.lattice.matrix[1, :]
self.primcc = self.prim_struct.lattice.matrix[2, :]
self.output_basis = self.make_basis_array(self.prim_struct)
# Below we calculate the transformation that maps
# self.primaX to lattice vectors used by EMTO.
# This transform depends on the type of the Bravais lattice,
# so each case must be treated separately.
if self.spg_ibz == 1:
norm_tmp = np.linalg.norm(self.primaa)
self.output_prima = self.primaa/norm_tmp
self.output_primb = self.primbb/norm_tmp
self.output_primc = self.primcc/norm_tmp
# Apply transformation on the basis atoms
self.output_basis = self.output_basis/norm_tmp
self.output_boa = 0.0
self.output_coa = 0.0
self.output_alpha = 0.0
self.output_beta = 0.0
self.output_gamma = 0.0
self.emto_prima = np.array([1, 0, 0])
self.emto_primb = np.array([0, 1, 0])
self.emto_primc = np.array([0, 0, 1])
self.emto_basis = self.output_basis
elif self.spg_ibz == 2:
norm_tmp = 2*self.primaa[1]
self.output_prima = self.primcc/norm_tmp
self.output_primb = self.primaa/norm_tmp
self.output_primc = self.primbb/norm_tmp
# Apply transformation on the basis atoms
self.output_basis = self.output_basis/norm_tmp
self.output_boa = 0.0
self.output_coa = 0.0
self.output_alpha = 0.0
self.output_beta = 0.0
self.output_gamma = 0.0
self.emto_prima = np.array([0.5, 0.5, 0])
self.emto_primb = np.array([0, 0.5, 0.5])
self.emto_primc = np.array([0.5, 0, 0.5])
self.emto_basis = self.output_basis
elif self.spg_ibz == 3:
norm_tmp = 2*self.primaa[1]
self.output_prima = self.primcc/norm_tmp
self.output_primb = self.primaa/norm_tmp
self.output_primc = self.primbb/norm_tmp
# Apply transformation on the basis atoms
self.output_basis = self.output_basis/norm_tmp
self.output_boa = 0.0
self.output_coa = 0.0
self.output_alpha = 0.0
self.output_beta = 0.0
self.output_gamma = 0.0
self.emto_prima = np.array([0.5, 0.5, -0.5])
self.emto_primb = np.array([-0.5, 0.5, 0.5])
self.emto_primc = np.array([0.5, -0.5, 0.5])
self.emto_basis = self.output_basis
elif self.spg_ibz == 4:
rot1 = rotation_matrix([0.0, 0.0, 1.0], 0./180*np.pi)
self.output_prima = np.dot(rot1, self.primaa)
self.output_primb = np.dot(rot1, self.primbb)
self.output_primc = np.dot(rot1, self.primcc)
for i in range(len(self.output_basis[:, 0])):
self.output_basis[i, :] = np.dot(rot1, self.output_basis[i, :])
self.output_boa = 0.0
self.output_coa = self.output_primc[2]
self.output_alpha = 0.0
self.output_beta = 0.0
self.output_gamma = 0.0
# EMTO convention:
self.emto_prima = np.array([1., 0, 0])
self.emto_primb = np.array([-0.5, np.sqrt(3.)/2, 0])
self.emto_primc = np.array([0., 0, self.output_coa])
self.emto_basis = self.output_basis
elif self.spg_ibz == 5:
norm_tmp = self.primaa[0]
self.output_prima = self.primaa/norm_tmp
self.output_primb = self.primbb/norm_tmp
self.output_primc = self.primcc/norm_tmp
# Apply transformation on the basis atoms
self.output_basis = self.output_basis/norm_tmp
self.output_boa = 0.0
self.output_coa = self.output_primc[2]
self.output_alpha = 0.0
self.output_beta = 0.0
self.output_gamma = 0.0
self.emto_prima = np.array([1.0, 0.0, 0.0])
self.emto_primb = np.array([0.0, 1.0, 0.0])
self.emto_primc = np.array([0.0, 0.0, self.output_coa])
self.emto_basis = self.output_basis
elif self.spg_ibz == 6:
self.output_prima = self.primbb
self.output_primb = self.primcc
self.output_primc = self.primaa
# Apply transformation on the basis atoms
self.output_basis = self.output_basis
self.output_boa = 0.0
self.output_coa = 2*self.output_prima[2]
self.output_alpha = 0.0
self.output_beta = 0.0
self.output_gamma = 0.0
self.emto_prima = np.array([0.5, -0.5, self.output_coa/2])
self.emto_primb = np.array([0.5, 0.5, -self.output_coa/2])
self.emto_primc = np.array([-0.5, 0.5, self.output_coa/2])
self.emto_basis = self.output_basis
elif self.spg_ibz == 7:
alpha = self.prim_struct.lattice.alpha
kulma = np.arctan((self.primaa[0]+self.primbb[0]+self.primcc[0])/
(self.primaa[2]+self.primbb[2]+self.primcc[2]))
rot1 = rotation_matrix([0.0, -1.0, 0.0], kulma)
rot2 = np.array([[-np.sqrt(3.0)/2, -0.5, 0.0],
[0.5, -np.sqrt(3.0)/2, 0.0],
[0.0, 0.0, 1.0]])
self.output_prima = np.dot(rot2, np.dot(rot1, self.primaa))
self.output_primb = np.dot(rot2, np.dot(rot1, self.primbb))
self.output_primc = np.dot(rot2, np.dot(rot1, self.primcc))
scale_a = self.output_prima[1]
print('scale_a = ',scale_a)
self.output_prima = self.output_prima/scale_a
self.output_primb = self.output_primb/scale_a
self.output_primc = self.output_primc/scale_a
# Apply transformation on the basis atoms
for i in range(len(self.output_basis[:, 0])):
self.output_basis[i,:] = np.dot(rot2, np.dot(rot1, self.output_basis[i, :]))/scale_a
self.output_boa = 1.0
self.output_coa = self.output_prima[2]
self.output_alpha = 0.0
self.output_beta = 0.0
self.output_gamma = 0.0
self.emto_prima = np.array([0.0, 1.0, self.output_coa])
self.emto_primb = np.array([-np.sqrt(3.)/2, -0.5, self.output_coa])
self.emto_primc = np.array([np.sqrt(3.)/2, -0.5, self.output_coa])
self.emto_basis = self.output_basis
elif self.spg_ibz == 8:
if (np.abs(self.primaa[0]) < np.abs(self.primbb[1])) and \
(np.abs(self.primbb[1]) < np.abs(self.primcc[2])):
norm_tmp = self.primaa[0]
self.output_prima = self.primaa/norm_tmp
self.output_primb = self.primbb/norm_tmp
self.output_primc = self.primcc/norm_tmp
# Apply transformation on the basis atoms
self.output_basis = self.output_basis/norm_tmp
elif np.abs(np.abs(self.primaa[0]) - np.abs(self.primbb[1])) < 1.e-6 and \
np.abs(self.primbb[1]) < np.abs(self.primcc[2]):
norm_tmp = self.primaa[0]
self.output_prima = self.primaa/norm_tmp
self.output_primb = self.primbb/norm_tmp
self.output_primc = self.primcc/norm_tmp
# Apply transformation on the basis atoms
self.output_basis = self.output_basis/norm_tmp
elif np.abs(self.primaa[0]) < np.abs(self.primcc[2]):
norm_tmp = self.primcc[2]
rot1 = rotation_matrix([0.0, 0.0, 1.0], -90./180*np.pi)
rot2 = rotation_matrix([-1.0, 0.0, 0.0], 90./180*np.pi)
self.output_prima = np.dot(rot2, np.dot(rot1, self.primbb))/norm_tmp
self.output_primb = np.dot(rot2, np.dot(rot1, self.primcc))/norm_tmp
self.output_primc = np.dot(rot2, np.dot(rot1, self.primaa))/norm_tmp
print(self.output_prima)
print(self.output_primb)
print(self.output_primc)
# Apply transformation on the basis atoms
for i in range(len(self.output_basis[:, 0])):
self.output_basis[i,:] = np.dot(rot2, np.dot(rot1, self.output_basis[i, :]))/norm_tmp
else:
norm_tmp = self.primaa[0]
self.output_prima = self.primaa/norm_tmp
self.output_primb = self.primbb/norm_tmp
self.output_primc = self.primcc/norm_tmp
# Apply transformation on the basis atoms
self.output_basis = self.output_basis/norm_tmp
#
self.output_boa = self.output_primb[1]
self.output_coa = self.output_primc[2]
self.output_alpha = 0.0
self.output_beta = 0.0
self.output_gamma = 0.0
self.emto_prima = np.array([1.0, 0.0, 0.0])
self.emto_primb = np.array([0.0, self.output_boa, 0.0])
self.emto_primc = np.array([0.0, 0.0 ,self.output_coa])
self.emto_basis = self.output_basis
elif self.spg_ibz == 9:
if np.abs(self.primbb[1] - 0.5) < 1e-12 and \
np.abs(self.primcc[1] + 0.5) < 1e-12:
rot1 = rotation_matrix([0.0, 1.0, 0.0], 90./180*np.pi)
self.output_prima = np.dot(rot1, self.primaa)
self.output_primb = np.dot(rot1, self.primbb)
self.output_primc = np.dot(rot1, self.primcc)
# Redefine lattice vectors
tmp = np.copy(self.output_prima)
self.output_prima[:] = self.output_primc[:]
self.output_primc[:] = tmp
# Mirror along the xy-plane
self.output_primc *= -1
# Scale lattice vectors so that a1 and a2 x-components are 0.5
norm_tmp = 2*self.output_prima[0]
self.output_prima /= norm_tmp
self.output_primb /= norm_tmp
self.output_primc /= norm_tmp
# Apply transformation on the basis atoms
for i in range(len(self.output_basis[:, 0])):
self.output_basis[i,:] = np.dot(rot1, self.output_basis[i, :])
for i in range(len(self.output_basis[:, 0])):
self.output_basis[i,2] *= -1
self.output_basis /= norm_tmp
#print(self.output_prima)
#print(self.output_primb)
#print(self.output_primc)
else:
norm_tmp = 2*self.primaa[0]
self.output_prima = self.primaa/norm_tmp
self.output_primb = self.primbb/norm_tmp
self.output_primc = self.primcc/norm_tmp
# Apply transformation on the basis atoms
self.output_basis = self.output_basis/norm_tmp
self.output_boa = 2*self.output_primb[1]
self.output_coa = self.output_primc[2]
self.output_alpha = 0.0
self.output_beta = 0.0
self.output_gamma = 0.0
# EMTO convention:
self.emto_prima = np.array([0.5, -self.output_boa/2, 0])
self.emto_primb = np.array([0.5, self.output_boa/2, 0])
self.emto_primc = np.array([0, 0, self.output_coa])
self.emto_basis = self.output_basis
elif self.spg_ibz == 10:
self.output_prima = np.zeros_like(self.primaa)
self.output_primb = np.zeros_like(self.primbb)
self.output_primc = np.zeros_like(self.primcc)
self.output_prima[0] = self.primaa[1]
self.output_prima[1] = self.primaa[0]
self.output_prima[2] = self.primaa[2]
self.output_primb[0] = self.primcc[1]
self.output_primb[1] = self.primcc[0]
self.output_primb[2] = self.primcc[2]
self.output_primc[0] = self.primbb[1]
self.output_primc[1] = self.primbb[0]
self.output_primc[2] = self.primbb[2]
norm_tmp = 2*self.output_prima[0]
self.output_prima /= norm_tmp
self.output_primb /= norm_tmp
self.output_primc /= norm_tmp
# Apply transformation on the basis atoms
basis_tmp = np.copy(self.output_basis)
self.output_basis[:, 0] = basis_tmp[:, 1]
self.output_basis[:, 1] = basis_tmp[:, 0]
self.output_basis = self.output_basis/norm_tmp
self.output_boa = 2*self.output_primc[1]
self.output_coa = 2*self.output_primc[2]
self.output_alpha = 0.0
self.output_beta = 0.0
self.output_gamma = 0.0
self.emto_prima = np.array([0.5, -self.output_boa/2, self.output_coa/2])
self.emto_primb = np.array([0.5, self.output_boa/2, -self.output_coa/2])
self.emto_primc = np.array([-0.5, self.output_boa/2, self.output_coa/2])
self.emto_basis = self.output_basis
elif self.spg_ibz == 11:
rot1 = rotation_matrix([1, 1, 1], 120./180*np.pi)
self.output_prima = np.dot(rot1, self.primaa)
self.output_primb = np.dot(rot1, self.primbb)
self.output_primc = np.dot(rot1, self.primcc)
norm_tmp = 2*self.output_prima[0]
self.output_prima /= norm_tmp
self.output_primb /= norm_tmp
self.output_primc /= norm_tmp
# Apply transformation on the basis atoms
for i in range(len(self.output_basis[:, 0])):
self.output_basis[i, :] = np.dot(rot1, self.output_basis[i, :])
self.output_basis /= norm_tmp
self.output_boa = 2*self.output_primc[1]
self.output_coa = 2*self.output_primc[2]
self.output_alpha = 0.0
self.output_beta = 0.0
self.output_gamma = 0.0
# EMTO convention:
self.emto_prima = np.array([0.5, 0, self.output_coa/2])
self.emto_primb = np.array([0.5, self.output_boa/2, 0])
self.emto_primc = np.array([0, self.output_boa/2, self.output_coa/2])
self.emto_basis = self.output_basis
elif self.spg_ibz == 12:
bc_norm = np.linalg.norm(self.primaa)
# Rotate 90 degreen counter clockwise around the x-axis
rot1 = rotation_matrix([1, 0, 0], -90./180*np.pi)
self.output_prima = np.dot(rot1, self.primaa/bc_norm)
self.output_primb = np.dot(rot1, self.primcc/bc_norm)
self.output_primc = np.dot(rot1, self.primbb/bc_norm)
# Mirror a3 from negative z-axis to positive side
self.output_primc *= -1.0
# spg uses gamma > 90, so we redefine the a3 lattice vector so that
# gamma < 90:
self.output_primb[0] *= -1.0
gamma = get_angle(self.output_prima, self.output_primb)
y_fac = self.output_primb[1]
shift = np.abs(2*self.output_primb[0])
#
# Apply transformation on the basis atoms
for i in range(len(self.output_basis[:, 0])):
self.output_basis[i, :] = np.dot(rot1, self.output_basis[i, :])/bc_norm
# Transform basis because self.output_primc was mirrored:
for i in range(len(self.output_basis[:, 0])):
self.output_basis[i, 2] *= -1.0
# Transform basis because gamma was changed above:
for i in range(len(self.output_basis[:, 0])):
#self.output_basis[i, :] = np.dot(shift_mat, self.output_basis[i, :])
if self.output_basis[i, 1] > 0:
self.output_basis[i, 0] += shift * np.abs(self.output_basis[i, 1] / y_fac)
else:
self.output_basis[i, 0] -= shift * np.abs(self.output_basis[i, 1] / y_fac)
self.output_boa = np.linalg.norm(self.output_primb)
self.output_coa = np.linalg.norm(self.output_primc)
self.output_alpha = 0.0
self.output_beta = 0.0
self.output_gamma = gamma
self.emto_prima = np.array([1.0, 0, 0])
self.emto_primb = np.array([self.output_boa*np.cos(np.radians(self.output_gamma)),
self.output_boa*np.sin(np.radians(self.output_gamma)), 0])
self.emto_primc = np.array([0, 0, self.output_coa])
self.emto_basis = self.output_basis
elif self.spg_ibz == 13:
gamma = get_angle(self.primcc, self.primaa+self.primbb)
switch_x_y = np.array([[0, -1, 0],
[1, 0, 0],
[0, 0, 1]])
rot1 = np.array([[1.0,0.0,0.0],
[0.0,np.cos(np.radians(180-gamma)),-np.sin(np.radians(180-gamma))],
[0.0,np.sin(np.radians(180-gamma)),np.cos(np.radians(180-gamma))]])
rot2 = np.array([[0.0,0.0,1.0],
[0.0,1.0,0.0],
[-1.0,0.0,0.0]])
bc_norm = np.linalg.norm(self.primaa+self.primbb)
self.output_prima = np.dot(rot2, np.dot(rot1, np.dot(switch_x_y, self.primcc)))/bc_norm
self.output_primb = np.dot(rot2, np.dot(rot1, np.dot(switch_x_y, self.primaa)))/bc_norm
self.output_primc = np.dot(rot2, np.dot(rot1, np.dot(switch_x_y, self.primbb)))/bc_norm
# Apply transformation on the basis atoms
for i in range(len(self.output_basis[:, 0])):
self.output_basis[i, :] = np.dot(rot2, np.dot(rot1, np.dot(switch_x_y, self.output_basis[i, :])))/bc_norm
self.output_boa = np.abs(self.output_prima[1])
self.output_coa = np.abs(2*self.output_primc[2])
self.output_alpha = 0.0
self.output_beta = 0.0
self.output_gamma = gamma
self.emto_prima = np.array([0.0, -self.output_boa, 0])
self.emto_primb = np.array([0.5*np.sin(np.radians(self.output_gamma)),
-0.5*np.cos(np.radians(self.output_gamma)),
-self.output_coa/2])
self.emto_primc = np.array([0.5*np.sin(np.radians(self.output_gamma)),
-0.5*np.cos(np.radians(self.output_gamma)),
self.output_coa/2])
self.emto_basis = self.output_basis
elif self.spg_ibz == 14:
norm_tmp = self.primaa[0]
self.output_prima = self.primaa/norm_tmp
self.output_primb = self.primbb/norm_tmp
self.output_primc = self.primcc/norm_tmp
# Apply transformation on the basis atoms
self.output_basis = self.output_basis/norm_tmp
# This could be tested, should be OK:
#self.output_boa = np.sqrt(self.output_primb[0]**2+self.output_primb[1]**2)
self.output_boa = self.prim_struct.lattice.b/self.prim_struct.lattice.a
self.output_coa = self.prim_struct.lattice.c/self.prim_struct.lattice.a
self.output_alpha = self.prim_struct.lattice.alpha
self.output_beta = self.prim_struct.lattice.beta
self.output_gamma = self.prim_struct.lattice.gamma
self.emto_prima = np.array([1.0, 0, 0])
self.emto_primb = np.array([self.output_boa*np.cos(np.radians(self.output_gamma)),
self.output_boa*np.sin(np.radians(self.output_gamma)),
0])
self.emto_primc = np.array([self.output_coa*np.cos(np.radians(self.output_beta)),
self.output_coa*(np.cos(np.radians(self.output_alpha)) -
np.cos(np.radians(self.output_beta)) *
np.cos(np.radians(self.output_gamma))) / np.sin(np.radians(self.output_gamma)),
self.output_coa*np.sqrt(1 - np.cos(np.radians(self.output_gamma))**2 -
np.cos(np.radians(self.output_alpha))**2 -
np.cos(np.radians(self.output_beta))**2 +
2*np.cos(np.radians(self.output_alpha))*
np.cos(np.radians(self.output_beta))*
np.cos(np.radians(self.output_gamma)))/np.sin(np.radians(self.output_gamma))])
self.emto_basis = self.output_basis
self.output_sites = self.make_sites_array(self.prim_struct)
self.output_lattice = Lattice(np.array([self.emto_prima, self.emto_primb, self.emto_primc]))
self.output_struct = Structure(self.output_lattice, self.output_sites,
self.emto_basis, coords_are_cartesian=True)
#
# Print EMTO structure information
print("")
print("Generated EMTO structure:")
print(self.output_struct)
print("Volume: ", self.output_struct.volume)
print("WS-rad: ", self.sws)
print("Lattice vectors:")
print(self.output_struct.lattice.matrix)
print("Basis vectors:")
for i in range(len(self.output_struct.sites)):
print(self.output_struct.sites[i].coords)
print("")
# Print symmetry information
print("spglib reports the following information:")
print("The spacegroup symbol of input structure: {}".format(self.spg_space_group))
print("The spacegroup number of input structure: {}".format(self.spg_space_group_number))
print("The Bravais lattice of input structure : {}".format(self.sg2bl[self.spg_space_group_number]))
print("Number of basis atoms : {}".format(self.prim_struct.num_sites))
print("EMTO IBZ : {}".format(self.spg_ibz))
print("")
emto_cell = (
self.output_lattice.matrix,
self.output_struct.frac_coords,
self.output_sites
)
self.emto_space_group = spg.get_spacegroup(emto_cell)
self.emto_space_group_number = int(self.emto_space_group.split()[-1].lstrip('(').rstrip(')'))
self.emto_space_group_symbol = self.emto_space_group
self.emto_prim_lat, self.emto_prim_pos, self.emto_prim_species = spg.standardize_cell(emto_cell, to_primitive=True)
self.emto_struct = Structure(Lattice(self.emto_prim_lat), self.emto_prim_species, self.emto_prim_pos)
self.emto_ibz = self.sg2ibz[self.emto_space_group_number]
print("spglib reports the following information:")
print("The spacegroup symbol of EMTO structure : {}".format(self.emto_space_group))
print("The spacegroup number of EMTO structure : {}".format(self.emto_space_group_number))
print("The Bravais lattice of EMTO structure : {}".format(self.sg2bl[self.emto_space_group_number]))
print("Number of basis atoms : {}".format(self.output_struct.num_sites))
print("EMTO IBZ : {}".format(self.emto_ibz))
print("")
print(self.prim_struct)
print(self.emto_struct)
should_exit = False
if (self.spg_space_group != self.emto_space_group):
print("Input and output spacegroups are different!!!")
should_exit = True
if (self.spg_ibz != self.emto_ibz):
print("Input and output IBZ are different!!!")
should_exit = True
if should_exit:
sys.exit("Structure conversion went wrong! Check the symmetry information above.")
#
fitted_angles = [get_angle(self.output_prima, self.emto_prima),
get_angle(self.output_primb, self.emto_primb),
get_angle(self.output_primc, self.emto_primc)]
for i, angle in enumerate(fitted_angles):
#print(angle)
if angle > self.fit_angle_tol:
sys.exit('Error: Angle between lattice vectors {0} is {1} > {2}!!!'.format(i+1, angle, self.fit_angle_tol))
fitted_ratios = [np.linalg.norm(self.output_prima) / np.linalg.norm(self.emto_prima),
np.linalg.norm(self.output_primb) / np.linalg.norm(self.emto_primb),
np.linalg.norm(self.output_primc) / np.linalg.norm(self.emto_primc)]
for i, ratio in enumerate(fitted_ratios):
#print(ratio)
if np.abs(ratio - 1.0) > self.fit_norm_ratio_tol:
sys.exit('Error: Ratio between lattice vector {0} norms is {1} > {2}!!!'.format(i+1, ratio, self.fit_norm_ratio_tol))
print('Structure similarity check (input vs. output for EMTO):')
fit1 = self.stm.fit_anonymous(self.pmg_input_struct, self.prim_struct)
fit2 = self.stm.fit(self.pmg_input_struct, self.prim_struct)
fit3 = self.stm.fit_anonymous(self.prim_struct, self.output_struct)
fit4 = self.stm.fit(self.prim_struct, self.output_struct)
fit5 = self.stm.fit_anonymous(self.pmg_input_struct, self.output_struct)
fit6 = self.stm.fit(self.pmg_input_struct, self.output_struct)
print('Input -> spglib (sites only) ?: ', fit1)
print('Input -> spglib (sites+chemistry)?: ', fit2)
print('spglib -> EMTO (sites only) ?: ', fit3)
print('spglib -> EMTO (sites+chemistry)?: ', fit4)
print('Input -> EMTO (sites only) ?: ', fit5)
print('Input -> EMTO (sites+chemistry)?: ', fit6)
print("")
if not all([fit1, fit2, fit3, fit4, fit5, fit6]):
sys.exit('Some structures are not identical (check for False above) !!!')
# Generate EMTO structure input files
self.input_system.lattice.set_values(jobname_lat=self.latname,
latpath=self.latpath,
lat=common.ibz_to_lat(self.ibz),
latparams=[1.0, self.output_boa, self.output_coa],
latvectors=[self.output_alpha, self.output_beta, self.output_gamma],
basis=self.output_basis,
EMTOdir=self.EMTOdir,
**kwargs)
# Finally, save atoms, splts, and concs of the output structure to be read by init_bulk function.
self.make_cpa_sites_array(self.output_struct)
#
# Prepare KGRN, KFCD, and SLURM input files next.
if self.ibz is None:
sys.exit('self.ibz == None! Run create_structure_input() to generate IBZ \n'+
'for your structure.')
# Construct an index array to keep track of the number of atoms in each site.
if self.atoms_cpa is None:
sys.exit('EMTO.init_bulk(): \'self.atoms_cpa\' does not exist!!! (Did you run init_structure?)')
else:
index_array = np.ones(len(self.atoms_cpa), dtype='int32')
for i in range(len(self.atoms_cpa)):
if isinstance(self.atoms_cpa[i], list):
index_array[i] = len(self.atoms_cpa[i])
else:
index_array[i] = 1
index_len = np.sum(index_array)
atoms_flat = []
for i in range(len(self.atoms_cpa)):
if isinstance(self.atoms_cpa[i], list):
for j in range(len(self.atoms_cpa[i])):
atoms_flat.append(self.atoms_cpa[i][j])
else:
atoms_flat.append(self.atoms_cpa[i])
self.KGRN_atoms = np.array(atoms_flat)
if self.splts_cpa is None:
sys.exit('EMTO.init_bulk(): \'self.splts_cpa\' does not exist!!! (Did you run init_structure?)')
else:
splts_flat = []
for i in range(len(self.splts_cpa)):
if isinstance(self.splts_cpa[i], list):
for j in range(len(self.splts_cpa[i])):
splts_flat.append(self.splts_cpa[i][j])
else:
splts_flat.append(self.splts_cpa[i])
self.KGRN_splts = np.array(splts_flat)
if self.fixs_cpa is None:
sys.exit('EMTO.init_bulk(): \'self.fixs_cpa\' does not exist!!! (Did you run init_structure?)')
else:
fixs_flat = []
for i in range(len(self.fixs_cpa)):
if isinstance(self.fixs_cpa[i], list):
for j in range(len(self.fixs_cpa[i])):
fixs_flat.append(self.fixs_cpa[i][j])
else:
fixs_flat.append(self.fixs_cpa[i])
self.KGRN_fixs = np.array(fixs_flat)
if self.concs_cpa is None:
sys.exit('EMTO.init_bulk(): \'self.concs_cpa\' does not exist!!! (Did you run init_structure?)')
else:
concs_flat = []
for i in range(len(self.concs_cpa)):
if isinstance(self.concs_cpa[i], list):
for j in range(len(self.concs_cpa[i])):
concs_flat.append(self.concs_cpa[i][j])
else:
concs_flat.append(self.concs_cpa[i])
self.KGRN_concs =
|
np.array(concs_flat)
|
numpy.array
|
import collections
import itertools
import numpy as np
# adapted from github.com/janelia-flyem/gala
def smallest_int_dtype(number, *, signed=False, min_dtype=np.int8):
"""Return the smallest numpy integer dtype that can represent `number`.
Parameters
----------
number : int
The minimum number to be represented by dtype.
signed : bool, optional
Whether a signed dtype is required.
min_dtype : numpy dtype, optional
Specify a minimum dtype in case `number` is not the absolute
maximum that the user wants to represent.
Returns
-------
dtype : numpy dtype
The required data type.
Examples
--------
>>> smallest_int_dtype(8)
<class 'numpy.uint8'>
>>> smallest_int_dtype(2**9)
<class 'numpy.uint16'>
>>> smallest_int_dtype(2**17)
<class 'numpy.uint32'>
>>> smallest_int_dtype(2**33)
<class 'numpy.uint64'>
>>> smallest_int_dtype(8, signed=True)
<class 'numpy.int8'>
>>> smallest_int_dtype(8, signed=True, min_dtype=np.int16)
<class 'numpy.int16'>
>>> smallest_int_dtype(-2**9)
<class 'numpy.int16'>
>>> smallest_int_dtype(-2**17)
<class 'numpy.int32'>
>>> smallest_int_dtype(-2**33)
<class 'numpy.int64'>
"""
if number < 0:
signed = True
number = abs(number)
if not signed:
if number <= np.iinfo(np.uint8).max:
dtype = np.uint8
elif number <= np.iinfo(np.uint16).max:
dtype = np.uint16
elif number <= np.iinfo(np.uint32).max:
dtype = np.uint32
else: # number <= np.iinfo(np.uint64).max:
dtype = np.uint64
else:
if np.iinfo(np.int8).min <= number <= np.iinfo(np.int8).max:
dtype = np.int8
elif np.iinfo(np.int16).min <= number <= np.iinfo(np.int16).max:
dtype = np.int16
elif np.iinfo(np.int32).min <= number <= np.iinfo(np.int32).max:
dtype = np.int32
else: # if np.iinfo(np.int64).min <= number <= np.iinfo(np.int64).max:
dtype = np.int64
if np.iinfo(dtype).max < np.iinfo(min_dtype).max:
dtype = min_dtype
return dtype
# adapted from github.com/janelia-flyem/gala
def pad(ar, vals, *, axes=None):
"""Pad an array with values in `vals` along `axes`.
Parameters
----------
ar : array, shape (M, N, ...)
The input array.
vals : int or iterable of int, shape (K,)
The values to pad with.
axes : int in {0, ..., `ar.ndim`}, or iterable thereof, optional
The axes of `ar` to pad. If None, pad along all axes.
Returns
-------
ar2 : array, shape (M+2K, N+2K, ...)
The padded array.
Examples
--------
>>> ar = np.array([4, 5, 6])
>>> pad(ar, 0)
array([0, 4, 5, 6, 0])
>>> pad(ar, [0, 1])
array([1, 0, 4, 5, 6, 0, 1])
>>> ar = np.array([[4, 5, 6]])
>>> pad(ar, 0)
array([[0, 0, 0, 0, 0],
[0, 4, 5, 6, 0],
[0, 0, 0, 0, 0]])
>>> pad(ar, 0, axes=1)
array([[0, 4, 5, 6, 0]])
"""
if axes is None:
axes = list(range(ar.ndim))
if not isinstance(vals, collections.Iterable):
vals = [vals]
if not isinstance(axes, collections.Iterable):
axes = [axes]
p = len(vals)
newshape = np.array(ar.shape)
for ax in axes:
newshape[ax] += 2*p
vals = np.reshape(vals, (p,) + (1,) * (ar.ndim-1))
new_dtype = ar.dtype
if np.issubdtype(new_dtype, np.integer):
maxval = max([np.max(vals), np.max(ar)])
minval = min([np.min(vals), np.min(ar)])
signed = (minval < 0)
maxval = max(abs(minval), maxval)
new_dtype = smallest_int_dtype(maxval, signed=signed,
min_dtype=new_dtype)
ar2 = np.empty(newshape, dtype=new_dtype)
center = np.ones(newshape, dtype=bool)
for ax in axes:
ar2.swapaxes(0, ax)[p-1::-1,...] = vals
ar2.swapaxes(0, ax)[-p:,...] = vals
center.swapaxes(0, ax)[p-1::-1,...] = False
center.swapaxes(0, ax)[-p:,...] = False
ar2[center] = ar.ravel()
return ar2
def raveled_steps_to_neighbors(shape, connectivity=1, *, order='C', spacing=1,
return_distances=True):
"""Return raveled coordinate steps for given array shape and neighborhood.
Parameters
----------
shape : tuple of int
The array shape.
connectivity : {1, ..., len(shape)}, optional
The n-dimensional connectivity. See
`scipy.ndimage.generate_binary_structure` for more.
order : {'C', 'F'}, optional
The ordering of the array, either C or Fortran.
spacing : float, or array-like of float, shape `len(shape)`
The spacing of the pixels along each dimension.
return_distances : bool, optional
If True (default), return also the Euclidean distance to each
neighbor.
Returns
-------
steps : array of int, shape (K,)
Each value in `steps` moves from a central pixel to a
`connectivity`-neighbor in an array of shape `shape`.
distances : array of float, shape (K,), optional
The Euclidean distance corresponding to each step. This is only
returned if `return_distances` is True.
Examples
--------
>>> raveled_steps_to_neighbors((5,), 1)
(array([ 1, -1]), array([1., 1.]))
>>> raveled_steps_to_neighbors((2, 3), 2, return_distances=False)
array([ 3, 1, -3, -1, 4, 2, -2, -4])
>>> raveled_steps_to_neighbors((2, 3), 1, order='F')[0]
array([ 2, 1, -2, -1])
Using `spacing` we can obtain different distance values along different
axes:
>>> raveled_steps_to_neighbors((3, 4, 5), spacing=[5, 1, 1])
(array([ 20, 5, 1, -20, -5, -1]), array([5., 1., 1., 5., 1., 1.]))
"""
spacing = np.ones(len(shape), dtype=float) * spacing
if order == 'C':
dims = shape[-1:0:-1]
else:
dims = shape[:-1]
stepsizes = np.cumprod((1,) + dims)[::-1]
steps = [stepsizes, -stepsizes]
distances = [spacing, spacing]
for nhops in range(2, connectivity + 1):
prod = np.array(list(itertools.product(*[[1, -1]] * nhops)))
multisteps = np.array(list(itertools.combinations(stepsizes, nhops))).T
dhopsq = np.array(list(itertools.combinations(spacing ** 2, nhops))).T
steps.append((prod @ multisteps).ravel())
distances.append(np.sqrt(np.abs(prod) @ dhopsq).ravel())
if return_distances:
return (np.concatenate(steps).astype(int),
|
np.concatenate(distances)
|
numpy.concatenate
|
from matplotlib import pyplot as plt
from PIL import Image
import numpy as np
from os import path
from itertools import product
# --------------- Questão 1 --------------- #
original_img = np.array(Image.open(path.join(path.curdir, "original_lena.jpg")).convert("L"))
# --------------- Questão 2 --------------- #
summed_img = original_img + 30
summed_img[summed_img <= 30] = 255
Image.fromarray(summed_img).save(path.join(path.curdir, "summed_lena.jpg"))
subtracted_img = original_img - 70
subtracted_img[subtracted_img >= (255 - 70)] = 0
Image.fromarray(subtracted_img).save(path.join(path.curdir, "subtracted_lena.jpg"))
multiplied_img = original_img.copy()
multiplied_img[multiplied_img > int(255 / 1.2)] = 255
multiplied_img[multiplied_img < 255] = np.array(multiplied_img[multiplied_img < 255] * 1.2, dtype="uint8")
Image.fromarray(multiplied_img).save(path.join(path.curdir, "multiplied_lena.jpg"))
divided_img = original_img // 4
Image.fromarray(divided_img).save(path.join(path.curdir, "divided_lena.jpg"))
# ------------- Questão 3 & 4 ------------- #
plt.figure()
plt.imshow(original_img, cmap="gray", clim=(0, 255))
plt.axis("off")
plt.title("Original image")
fig, ax = plt.subplots(2, 2)
ax[0, 0].imshow(summed_img, cmap="gray", clim=(0, 255))
ax[0, 0].set_title("Summed image")
ax[0, 0].set_axis_off()
ax[0, 1].imshow(subtracted_img, cmap="gray", clim=(0, 255))
ax[0, 1].set_title("Subtracted image")
ax[0, 1].set_axis_off()
ax[1, 0].imshow(multiplied_img, cmap="gray", clim=(0, 255))
ax[1, 0].set_title("Multiplied image")
ax[1, 0].set_axis_off()
ax[1, 1].imshow(divided_img, cmap="gray", clim=(0, 255))
ax[1, 1].set_title("Divided image")
ax[1, 1].set_axis_off()
fig.tight_layout()
plt.show()
plt.figure()
plt.hist(original_img.ravel(), bins=256)
plt.title("Original image")
plt.xlabel("Grayscale value")
plt.xlabel("Pixel count")
plt.xlim(-2, 260)
fig, ax = plt.subplots(2, 2)
ax[0, 0].hist(summed_img.ravel(), bins=256)
ax[0, 0].set_title("Summed image")
ax[0, 0].set_xlabel("Grayscale value")
ax[0, 0].set_ylabel("Pixel count")
ax[0, 0].set_xlim(-5, 260)
ax[0, 1].hist(subtracted_img.ravel(), bins=256)
ax[0, 1].set_title("Subtracted image")
ax[0, 1].set_xlabel("Grayscale value")
ax[0, 1].set_ylabel("Pixel count")
ax[0, 1].set_xlim(-5, 260)
ax[1, 0].hist(multiplied_img.ravel(), bins=256)
ax[1, 0].set_title("Multiplied image")
ax[1, 0].set_xlabel("Grayscale value")
ax[1, 0].set_ylabel("Pixel count")
ax[1, 0].set_xlim(-5, 260)
ax[1, 1].hist(divided_img.ravel(), bins=256)
ax[1, 1].set_title("Divided image")
ax[1, 1].set_xlabel("Grayscale value")
ax[1, 1].set_ylabel("Pixel count")
ax[1, 1].set_xlim(-5, 260)
fig.tight_layout()
plt.show()
# --------------- Questão 5 --------------- #
def maximum_error(img_1, img_2):
return np.max(np.abs((img_1.astype("int32") - img_2.astype("int32"))))
def mean_absolute_error(img_1, img_2):
return np.sum(np.abs((img_1.astype("int32") - img_2.astype("int32")))) / np.prod(img_1.shape)
def mean_square_error(img_1, img_2):
return np.sum(np.square(img_1.astype("int32") - img_2.astype("int32"))) / np.prod(img_1.shape)
def root_mean_square_error(img_1, img_2):
return np.sqrt(mean_square_error(img_1, img_2))
def normalized_mean_square_error(img_1, img_2):
return np.sum(np.square(img_1.astype("int32") - img_2.astype("int32"))) / np.sum(np.square(img_1.astype("int32")))
def peak_signal_to_noise_ratio(img_1, img_2):
return 20 * np.log10(255 / root_mean_square_error(img_1, img_2))
def signal_to_noise_ratio(img_1, img_2):
return 10 * np.log10(1 / normalized_mean_square_error(img_1, img_2))
def covariance(img_1, img_2):
return np.sum(
np.multiply(img_1.astype("int32") - np.mean(img_1), img_2.astype("int32") - np.mean(img_2))
) / np.prod(img_1.shape)
def correlation_coeficient(img_1, img_2):
return np.sum(
np.multiply(img_1.astype("int32") - np.mean(img_1), img_2.astype("int32") - np.mean(img_2))
) / np.sqrt(
np.sum(np.square(img_1.astype("int32") - np.mean(img_1)))
* np.sum(np.square(img_2.astype("int32") -
|
np.mean(img_2)
|
numpy.mean
|
import numpy as np
from scipy.optimize import least_squares
import pandas as pd
from lib.logger import Logger
import json, math
logger = Logger('./calibration', clear_log=True)
# y,x,z
#pixel ray directions [center-center][left-center][right-center][center-bottom][center-top]
# width, height
# 539 959, 0 959, 1079 959, 539 0, 539 1919
indices = {
'center': [(539, 959), (539, 960), (540, 959), (540, 960)],
'left': [(0, 959), (0, 960)],
'left_half': [(269, 959), (269, 960)],
'right': [(1079, 959), (1079, 960)],
'right_half': [(810, 959), (810, 960)],
'top': [(539, 1919), (540, 1919)],
'top_half': [(539, 1440), (540, 1440)],
'bottom': [(539, 0), (540, 0)],
'bottom_half': [(539, 479), (540, 479)],
'top_left_half': [(269, 1440)],
'top_right_half': [(810, 1440)],
'bottom_left_half': [(269, 479)],
'bottom_right_half': [(810, 479)],
}
idx = [(539, 959), ]#,(0, 959), (1079, 959), (539, 0), (539, 1919)]
cams = []
for i in [1,2,3,4,5]:
rays =pd.read_csv('data/scalarFlow/calib20190813/{}_rays.txt'.format(i), sep=' ', skiprows=1, header=None, names=['pY','pX','dY','dX','dZ'], index_col=(0,1))
cam = {}
for key, idx in indices.items():
tmp = []
try:
for id in idx:
ray = rays.loc[id]
tmp.append({'start': np.asarray([ray['pX'],ray['pY'],0.0]),
'dir': np.asarray([ray['dX'],ray['dY'],ray['dZ']]),
})
cam[key] = tmp
except:
print('[W]: could not access index {} for cam {}, key {}'.format(id,i, key))
cams.append(cam)
#https://math.stackexchange.com/questions/2598811/calculate-the-point-closest-to-multiple-rays
def to_ray(pos, start, dir):
t = np.dot(dir, pos-start)/np.dot(dir,dir)
return pos - (start + t*dir)
def dist_to_ray(pos, start, dir):
dist = no.linalg.norm(to_ray(pos, start, dir))
def f(x, rays):
e = []
for ray in rays:
e += list(to_ray(x, ray['start'], ray['dir']))
return e
def angle_between(a, b):
return np.arccos(np.dot(a, b)/(np.linalg.norm(a)*np.linalg.norm(b)))
#https://en.wikipedia.org/wiki/Slerp
def slerp(v1, v2, t):
O = angle_between(v1, v2)
sO = np.sin(O)
return np.sin((1-t)*O)/sO * v1 + np.sin(t*O)/sO * v2
deg_to_rad = np.pi/180
rad_to_deg = 180/np.pi
cam_order = [0,1,2,3,4] #[0, 4, 3, 2, 1] #mapping of ray calibration files to recorded sequences?
#i=0
cam_rays = []
cam_params = []
cam_json = {str(_):{} for _ in cam_order}
for i in range(len(cams)):
cam = cams[cam_order[i]]
params = {'rotation':None, 'position':None, 'position_error':None,
'forward':None, 'right':None, 'up':None,
'fov_horizontal':None, 'fov_vertical':None}
print('cam', i+1)
if 'center' in cam:
c_rays = [ray['dir'] for ray in cam['center']]
c = slerp(slerp(c_rays[0],c_rays[1], 0.5), slerp(c_rays[2],c_rays[3], 0.5), 0.5)
c /= np.linalg.norm(c)
fwd = c
t_y = np.arctan(c[0]/c[2])
t_x = np.arctan(c[1]/np.linalg.norm([c[0], c[2]]))
print('\trot:', t_x*rad_to_deg,t_y*rad_to_deg,0.0)
print('\tfwd: {} (center ray)'.format(c))
params['rotation']=[t_x*rad_to_deg,t_y*rad_to_deg,0.0]
params['forward']=list(fwd)
if 'left' in cam and 'right' in cam:
l = slerp(cam['left'][0]['dir'],cam['left'][1]['dir'], 0.5)
r = slerp(cam['right'][0]['dir'],cam['right'][1]['dir'], 0.5)
up =
|
np.cross(l, r)
|
numpy.cross
|
import numpy as np
import numexpr as ne
import numba
from scipy import optimize as sio
from scipy import ndimage as scnd
@numba.jit
def get_flat_dpc(data4D_flat):
CentralDisk = np.mean(data4D_flat,axis=0)
beam_x,beam_y,_ = st.util.sobel_circle(CentralDisk)
yy, xx = np.mgrid[0:data4D_flat.shape[1],0:data4D_flat.shape[2]]
YCom = np.zeros(data4D_flat.shape[0],dtype=np.float)
XCom = np.zeros(data4D_flat.shape[0],dtype=np.float)
for ii in numba.prange(data4D_flat.shape[0]):
cbed = data4D_flat[ii,:,:]
YCom[ii] = (np.sum(np.multiply(yy,cbed))/np.sum(cbed)) - beam_y
XCom[ii] = (np.sum(np.multiply(xx,cbed))/np.sum(cbed)) - beam_x
return XCom,YCom
def cart2pol(x, y):
rho = ne.evaluate("((x**2) + (y**2)) ** 0.5")
phi = ne.evaluate("arctan2(y, x)")
return (rho, phi)
def pol2cart(rho, phi):
x = ne.evaluate("rho * cos(phi)")
y = ne.evaluate("rho * sin(phi)")
return (x, y)
def angle_fun(angle,rho_dpc,phi_dpc):
x_dpc,y_dpc = pol2cart(rho_dpc,(phi_dpc + (angle*((np.pi)/180))))
charge = np.gradient(x_dpc)[1] + np.gradient(y_dpc)[0]
angle_sum = np.sum(np.abs(charge))
return angle_sum
def optimize_angle(x_dpc,y_dpc,adf_stem):
flips = np.zeros(4,dtype=bool)
flips[2:4] = True
chg_sums = np.zeros(4,dtype=x_dpc.dtype)
angles = np.zeros(4,dtype=x_dpc.dtype)
x0 = 90
for ii in range(2):
to_flip = flips[2*ii]
if to_flip:
xdpcf = np.flip(x_dpc)
else:
xdpcf = x_dpc
rho_dpc,phi_dpc = cart2pol(xdpcf,y_dpc)
x = sio.minimize(angle_fun,x0,args=(rho_dpc,phi_dpc))
min_x = x.x
sol1 = min_x - 90
sol2 = min_x + 90
chg_sums[int(2*ii)] = np.sum(charge_dpc(xdpcf,y_dpc,sol1)*adf_stem)
chg_sums[int(2*ii+1)] = np.sum(charge_dpc(xdpcf,y_dpc,sol2)*adf_stem)
angles[int(2*ii)] = sol1
angles[int(2*ii+1)] = sol2
angle = (-1)*angles[chg_sums==np.amin(chg_sums)][0]
final_flip = flips[chg_sums==np.amin(chg_sums)][0]
return angle, final_flip
def corrected_dpc(x_dpc,y_dpc,angle,flipper):
if flipper:
xdpcf = np.fliplr(x_dpc)
else:
xdpcf = np.copy(x_dpc)
rho_dpc,phi_dpc = cart2pol(xdpcf,y_dpc)
x_dpc2,y_dpc2 = pol2cart(rho_dpc,(phi_dpc - (angle*((np.pi)/180))))
return x_dpc2,y_dpc2
def potential_dpc(x_dpc,y_dpc,angle=0):
if angle==0:
potential = integrate_dpc(x_dpc,y_dpc)
else:
rho_dpc,phi_dpc = cart2pol(x_dpc,y_dpc)
x_dpc,y_dpc = pol2cart(rho_dpc,phi_dpc + (angle*((np.pi)/180)))
potential = integrate_dpc(x_dpc,y_dpc)
return potential
def charge_dpc(x_dpc,y_dpc,angle=0):
if angle==0:
charge = np.gradient(x_dpc)[1] + np.gradient(y_dpc)[0]
else:
rho_dpc,phi_dpc = cart2pol(x_dpc,y_dpc)
x_dpc,y_dpc = pol2cart(rho_dpc,phi_dpc + (angle*((np.pi)/180)))
charge = np.gradient(x_dpc)[1] + np.gradient(y_dpc)[0]
return charge
def integrate_dpc(xshift,
yshift,
fourier_calibration=1):
#Initialize matrices
size_array = np.asarray(np.shape(xshift))
x_mirrored = np.zeros(2*size_array,dtype=np.float64)
y_mirrored = np.zeros(2*size_array,dtype=np.float64)
#Generate antisymmetric X arrays
x_mirrored[0:size_array[0],0:size_array[1]] = np.fliplr(np.flipud(0 - xshift))
x_mirrored[0:size_array[0],size_array[1]:(2*size_array[1])] = np.fliplr(0 - xshift)
x_mirrored[size_array[0]:(2*size_array[0]),0:size_array[1]] = np.flipud(xshift)
x_mirrored[size_array[0]:(2*size_array[0]),size_array[1]:(2*size_array[1])] = xshift
#Generate antisymmetric Y arrays
y_mirrored[0:size_array[0],0:size_array[1]] = np.fliplr(np.flipud(0 - yshift))
y_mirrored[0:size_array[0],size_array[1]:(2*size_array[1])] = np.fliplr(yshift)
y_mirrored[size_array[0]:(2*size_array[0]),0:size_array[1]] = np.flipud(0 - yshift)
y_mirrored[size_array[0]:(2*size_array[0]),size_array[1]:(2*size_array[1])] = yshift
#Calculated Fourier transform of antisymmetric matrices
x_mirr_ft = np.fft.fft2(x_mirrored)
y_mirr_ft = np.fft.fft2(y_mirrored)
#Calculated inverse Fourier space calibration
qx = np.mean(np.diff((np.arange(-size_array[1],size_array[1], 1))/
(2*fourier_calibration*size_array[1])))
qy = np.mean(np.diff((np.arange(-size_array[0],size_array[0], 1))/
(2*fourier_calibration*size_array[0])))
#Calculate mirrored CPM integrand
mirr_ft = (x_mirr_ft + ((1j)*y_mirr_ft))/(qx + ((1j)*qy))
mirr_int = np.fft.ifft2(mirr_ft)
#Select integrand from antisymmetric matrix
integrand = np.abs(mirr_int[size_array[0]:(2*size_array[0]),size_array[1]:(2*size_array[1])])
return integrand
def centerCBED(data4D_flat,
x_cen,
y_cen):
image_size = np.asarray(data4D_flat.shape[1:3])
fourier_cal_y = (np.linspace((-image_size[0]/2), ((image_size[0]/2) - 1), image_size[0]))/image_size[0]
fourier_cal_x = (np.linspace((-image_size[1]/2), ((image_size[1]/2) - 1), image_size[1]))/image_size[1]
[fourier_mesh_x, fourier_mesh_y] = np.meshgrid(fourier_cal_x, fourier_cal_y)
move_pixels = np.flip(image_size/2) -
|
np.asarray((x_cen,y_cen))
|
numpy.asarray
|
import warnings
import math
import numpy as np
import quaternionic
import pytest
def test_to_scalar_part(Rs):
assert np.array_equal(Rs.to_scalar_part, Rs.ndarray[..., 0])
def test_from_scalar_part():
scalars = np.random.rand(17, 8, 3)
q = quaternionic.array.from_scalar_part(scalars)
assert q.shape[-1] == 4
assert q.shape[:-1] == scalars.shape
assert np.array_equal(q.to_scalar_part, scalars)
def test_to_vector_part(Rs):
assert np.array_equal(Rs.to_vector_part, Rs.ndarray[..., 1:])
def test_from_vector_part():
vec = np.random.rand(17, 8, 5, 3)
q = quaternionic.array.from_vector_part(vec)
assert q.shape[-1] == 4
assert q.shape[:-1] == vec.shape[:-1]
assert np.array_equal(q.to_vector_part, vec)
def test_to_rotation_matrix(Rs, eps, array):
one, x, y, z = tuple(array(np.eye(4)))
zero = 0.0 * one
Rs = array(Rs.ndarray)
def quat_mat(quat):
return np.array([(quat * v * quat.inverse).vector for v in [x, y, z]]).T
def quat_mat_vec(quats):
mat_vec = np.array([(quats * v * np.reciprocal(quats)).vector for v in [x, y, z]])
return np.transpose(mat_vec, tuple(range(mat_vec.ndim))[1:-1]+(-1, 0))
with np.errstate(invalid='raise'):
with pytest.raises((FloatingPointError, ZeroDivisionError)):
zero.to_rotation_matrix
for R in Rs:
# Test correctly normalized rotors:
assert np.allclose(quat_mat(R), R.to_rotation_matrix, atol=2*eps)
# Test incorrectly normalized rotors:
assert np.allclose(quat_mat(R), (1.1*R).to_rotation_matrix, atol=2*eps)
Rs0 = Rs.copy()
Rs0[Rs.shape[0]//2] = zero
with np.errstate(invalid='raise'):
with pytest.raises((FloatingPointError, ZeroDivisionError)):
Rs0.to_rotation_matrix
# Test correctly normalized rotors:
assert np.allclose(quat_mat_vec(Rs), Rs.to_rotation_matrix, atol=2*eps)
# Test incorrectly normalized rotors:
assert np.allclose(quat_mat_vec(Rs), (1.1*Rs).to_rotation_matrix, atol=2*eps)
# Simply test that this function succeeds and returns the right shape
assert (Rs.reshape((2, 5, 10, 4))).to_rotation_matrix.shape == (2, 5, 10, 3, 3)
def test_from_rotation_matrix(Rs, eps):
from scipy import linalg
for nonorthogonal in [True, False]:
if nonorthogonal:
rot_mat_eps = 10*eps
else:
rot_mat_eps = 5*eps
for i, R1 in enumerate(Rs):
R2 = quaternionic.array.from_rotation_matrix(R1.to_rotation_matrix, nonorthogonal=nonorthogonal)
d = quaternionic.distance.rotation.intrinsic(R1, R2)
assert d < rot_mat_eps, (i, R1, R2, d) # Can't use allclose here; we don't care about rotor sign
Rs2 = quaternionic.array.from_rotation_matrix(Rs.to_rotation_matrix, nonorthogonal=nonorthogonal)
for R1, R2 in zip(Rs, Rs2):
d = quaternionic.distance.rotation.intrinsic(R1, R2)
assert d < rot_mat_eps, (R1, R2, d) # Can't use allclose here; we don't care about rotor sign
Rs3 = Rs.reshape((2, 5, 10, 4))
Rs4 = quaternionic.array.from_rotation_matrix(Rs3.to_rotation_matrix)
for R3, R4 in zip(Rs3.flattened, Rs4.flattened):
d = quaternionic.distance.rotation.intrinsic(R3, R4)
assert d < rot_mat_eps, (R3, R4, d) # Can't use allclose here; we don't care about rotor sign
def test_to_transformation_matrix(Rs, eps, array):
one, x, y, z = tuple(array(np.eye(4)))
zero = 0.0 * one
Rs = array(Rs.ndarray)
def quat_mat(quat):
return np.array([(quat * v * np.conjugate(quat)).ndarray for v in [one, x, y, z]]).T
def quat_mat_vec(quats):
mat_vec = np.array([(quats * v * np.conjugate(quats)).ndarray for v in [one, x, y, z]])
return np.transpose(mat_vec, tuple(range(mat_vec.ndim))[1:-1]+(-1, 0))
# Test individual quaternions
for R in Rs:
# Test correctly normalized rotors:
assert np.allclose(quat_mat(R), R.to_transformation_matrix, atol=2*eps)
# Test incorrectly normalized rotors:
for scale in [0.0, 0.123, 0.5, 1.1, 2.3]:
assert np.allclose(scale**2*quat_mat(R), (scale*R).to_transformation_matrix, atol=2*eps)
# Test vectorized quaternions
# Test correctly normalized rotors:
assert np.allclose(quat_mat_vec(Rs), Rs.to_transformation_matrix, atol=2*eps)
# Test incorrectly normalized rotors:
for scale in [0.0, 0.123, 0.5, 1.1, 2.3]:
assert np.allclose(scale**2*quat_mat_vec(Rs), (scale*Rs).to_transformation_matrix, atol=2*eps)
# Simply test that this function succeeds and returns the right shape
assert (Rs.reshape((2, 5, 10, 4))).to_transformation_matrix.shape == (2, 5, 10, 4, 4)
def test_to_rotation_vector():
np.random.seed(1234)
n_tests = 1000
vecs = np.random.uniform(high=math.pi/math.sqrt(3), size=n_tests*3).reshape((n_tests, 3))
quats = np.zeros(vecs.shape[:-1]+(4,))
quats[..., 1:] = vecs[...]
quats = quaternionic.array(quats)
quats = np.exp(quats/2)
quat_vecs = quats.to_rotation_vector
assert np.allclose(quat_vecs, vecs)
def test_from_rotation_vector():
np.random.seed(1234)
n_tests = 1000
vecs = np.random.uniform(high=math.pi/math.sqrt(3), size=n_tests*3).reshape((n_tests, 3))
quats = np.zeros(vecs.shape[:-1]+(4,))
quats[..., 1:] = vecs[...]
quats = quaternionic.array(quats)
quats = np.exp(quats/2)
quat_vecs = quats.to_rotation_vector
quats2 = quaternionic.array.from_rotation_vector(quat_vecs)
assert np.allclose(quats, quats2)
def test_from_spherical_coordinates():
one, x, y, z = tuple(quaternionic.array(np.eye(4)))
zero = 0.0 * one
np.random.seed(1843)
random_angles = [[np.random.uniform(-np.pi, np.pi), np.random.uniform(-np.pi, np.pi)]
for i in range(5000)]
for vartheta, varphi in random_angles:
q = quaternionic.array.from_spherical_coordinates(vartheta, varphi)
assert abs((np.exp(quaternionic.array(0, 0, 0, varphi / 2.)) * np.exp(quaternionic.array(0, 0, vartheta / 2., 0)))
- q) < 1.e-15
xprime = q * x * q.inverse
yprime = q * y * q.inverse
zprime = q * z * q.inverse
nhat = quaternionic.array(
0.0,
math.sin(vartheta)*math.cos(varphi),
math.sin(vartheta)*math.sin(varphi),
math.cos(vartheta)
)
thetahat = quaternionic.array(
0.0,
math.cos(vartheta)*math.cos(varphi),
math.cos(vartheta)*math.sin(varphi),
-math.sin(vartheta)
)
phihat = quaternionic.array(0.0, -math.sin(varphi), math.cos(varphi), 0.0)
assert abs(xprime - thetahat) < 1.e-15
assert abs(yprime - phihat) < 1.e-15
assert abs(zprime - nhat) < 1.e-15
assert np.max(np.abs(
quaternionic.array.from_spherical_coordinates(random_angles)
- quaternionic.array([quaternionic.array.from_spherical_coordinates(vartheta, varphi) for vartheta, varphi in random_angles])
)) < 1.e-15
def test_to_spherical_coordinates(Rs):
one, x, y, z = tuple(quaternionic.array(np.eye(4)))
zero = 0.0 * one
np.random.seed(1843)
# First test on rotors that are precisely spherical-coordinate rotors
random_angles = [[np.random.uniform(0, np.pi), np.random.uniform(0, 2*np.pi)]
for i in range(5000)]
for vartheta, varphi in random_angles:
vartheta2, varphi2 = (quaternionic.array.from_spherical_coordinates(vartheta, varphi)).to_spherical_coordinates
varphi2 = (varphi2 + 2*np.pi) if varphi2 < 0 else varphi2
assert abs(vartheta - vartheta2) < 1e-12, ((vartheta, varphi), (vartheta2, varphi2))
assert abs(varphi - varphi2) < 1e-12, ((vartheta, varphi), (vartheta2, varphi2))
# Now test that arbitrary rotors rotate z to the appropriate location
for R in Rs:
vartheta, varphi = R.to_spherical_coordinates
R2 = quaternionic.array.from_spherical_coordinates(vartheta, varphi)
assert (R*z*R.inverse - R2*z*R2.inverse).abs < 4e-15, (R, R2, (vartheta, varphi))
def test_from_euler_angles():
np.random.seed(1843)
random_angles = [[np.random.uniform(-np.pi, np.pi),
np.random.uniform(-np.pi, np.pi),
np.random.uniform(-np.pi, np.pi)]
for i in range(5000)]
for alpha, beta, gamma in random_angles:
assert abs((np.exp(quaternionic.array(0, 0, 0, alpha / 2.))
* np.exp(quaternionic.array(0, 0, beta / 2., 0))
* np.exp(quaternionic.array(0, 0, 0, gamma / 2.))
)
- quaternionic.array.from_euler_angles(alpha, beta, gamma)) < 1.e-15
assert np.max(np.abs(quaternionic.array.from_euler_angles(random_angles)
- quaternionic.array([quaternionic.array.from_euler_angles(alpha, beta, gamma)
for alpha, beta, gamma in random_angles]))) < 1.e-15
def test_to_euler_angles(eps, array):
np.random.seed(1843)
random_angles = [[np.random.uniform(-np.pi, np.pi),
np.random.uniform(-np.pi, np.pi),
np.random.uniform(-np.pi, np.pi)]
for i in range(5000)]
for alpha, beta, gamma in random_angles:
R1 = array.from_euler_angles(alpha, beta, gamma)
R2 = array.from_euler_angles(*list(R1.to_euler_angles))
d = quaternionic.distance.rotation.intrinsic(R1, R2)
assert d < 6e3*eps, ((alpha, beta, gamma), R1, R2, d) # Can't use allclose here; we don't care about rotor sign
q0 = array(0, 0.6, 0.8, 0)
assert q0.norm == 1.0
assert abs(q0 - array.from_euler_angles(*list(q0.to_euler_angles))) < 1.e-15
def test_from_euler_phases(eps, array):
np.random.seed(1843)
random_angles = [[np.random.uniform(-np.pi, 2 * np.pi),
np.random.uniform(0, np.pi),
np.random.uniform(-np.pi, 2 * np.pi)]
for i in range(5000)]
for alpha, beta, gamma in random_angles:
R1 = array.from_euler_angles(alpha, beta, gamma)
R2 = array.from_euler_phases([np.exp(1j * alpha), np.exp(1j * beta), np.exp(1j * gamma)])
d = quaternionic.distance.rotation.intrinsic(R1, R2)
assert d < 8*eps, ((alpha, beta, gamma), R1, R2, d) # Can't use allclose here; we don't care about rotor sign
def test_to_euler_phases(eps, array):
np.random.seed(1843)
random_angles = [
[np.random.uniform(-np.pi, 2 * np.pi),
np.random.uniform(0, np.pi),
np.random.uniform(-np.pi, 2 * np.pi)]
for _ in range(5000)
]
for alpha, beta, gamma in random_angles:
z1 = array.from_euler_angles(alpha, beta, gamma).to_euler_phases
z2 = np.array([np.exp(1j * alpha), np.exp(1j * beta), np.exp(1j * gamma)])
assert abs(z1[0] - z2[0]) < 5*eps, (alpha, beta, gamma, z1, z2)
assert abs(z1[1] - z2[1]) < 5*eps, (alpha, beta, gamma, z1, z2)
assert abs(z1[2] - z2[2]) < 5*eps, (alpha, beta, gamma, z1, z2)
random_angles = tuple(
[np.random.uniform(-np.pi, 2 * np.pi),
0.0,
np.random.uniform(-np.pi, 2 * np.pi)]
for _ in range(50)
)
for alpha, beta, gamma in random_angles:
R1 = array.from_euler_angles(alpha, beta, gamma)
R1.x = 0.0
R1.y = 0.0
z1 = R1.to_euler_phases
z2 = np.array([np.exp(1j * alpha), np.exp(1j * beta), np.exp(1j * gamma)])
assert abs(z1[1] - 1) < 5*eps, (alpha, beta, gamma, z1, z2)
assert abs(z1[1] - z2[1]) < 5*eps, (alpha, beta, gamma, z1, z2)
assert abs(z1[0]*z1[2] - z2[0]*z2[2]) < 5*eps, (alpha, beta, gamma, z1, z2)
random_angles = tuple(
[np.random.uniform(-np.pi, 2 * np.pi),
np.pi,
np.random.uniform(-np.pi, 2 * np.pi)]
for _ in range(50)
)
for alpha, beta, gamma in random_angles:
R1 = array.from_euler_angles(alpha, beta, gamma)
R1.w = 0.0
R1.z = 0.0
z1 = R1.to_euler_phases
z2 = np.array([np.exp(1j * alpha), np.exp(1j * beta), np.exp(1j * gamma)])
assert abs(z1[1] - -1) < 5*eps, (alpha, beta, gamma, z1, z2)
assert abs(z1[1] - z2[1]) < 5*eps, (alpha, beta, gamma, z1, z2)
assert abs(z1[0]*z1[2].conjugate() - z2[0]*z2[2].conjugate()) < 5*eps, (alpha, beta, gamma, z1, z2)
def test_to_angular_velocity():
import math
import numpy as np
import quaternionic
t0 = 0.0
t2 = 10_000.0
Omega_orb = 2 * math.pi * 100 / t2
Omega_prec = 2 * math.pi * 10 / t2
alpha = 0.125 * math.pi
alphadot = 2 * alpha / t2
nu = 0.2 * alpha
Omega_nu = Omega_prec
R0 = np.exp(-1.1 * alpha * quaternionic.x / 2)
def R(t):
return (R0
* np.exp(Omega_prec * t * quaternionic.z / 2) * np.exp((alpha + alphadot * t) * quaternionic.x / 2)
* np.exp(-Omega_prec * t * quaternionic.z / 2)
* np.exp(Omega_orb * t * quaternionic.z / 2)
* np.exp(nu * np.cos(Omega_nu * t) * quaternionic.y / 2))
def Rdot(t):
R_dynamic = R0.inverse * R(t)
R_prec = np.exp(Omega_prec * t * quaternionic.z / 2)
R_nu = np.exp(nu * np.cos(Omega_nu * t) * quaternionic.y / 2)
return R0 * (0.5 * Omega_prec * quaternionic.z * R_dynamic
+ 0.5 * alphadot * R_prec * quaternionic.x * R_prec.conj() * R_dynamic
+ 0.5 * (Omega_orb - Omega_prec) * R_dynamic * R_nu.inverse * quaternionic.z * R_nu
+ 0.5 * (-Omega_nu * nu * np.sin(Omega_nu * t)) * R_dynamic * quaternionic.y)
def Omega_tot(_, t):
Rotor = R(t)
RotorDot = Rdot(t)
return (2 * RotorDot * Rotor.inverse).vector
t = np.linspace(t0, t2/100, num=10_000)
R_approx = R(t).to_angular_velocity(t, t_new=None, axis=0)
R_exact = Omega_tot(None, t)
assert np.max(np.linalg.norm(R_approx - R_exact, axis=1)) < 5e-13
t = np.linspace(t0, t2/100, num=10_000)
t_new = np.linspace(t0, t2/100, num=103)
R_approx = R(t).to_angular_velocity(t, t_new=t_new, axis=0)
R_exact = Omega_tot(None, t_new)
assert np.max(np.linalg.norm(R_approx - R_exact, axis=1)) < 5e-13
def test_from_angular_velocity():
import math
import numpy as np
import quaternionic
t0 = 0.0
t2 = 10_000.0
Omega_orb = 2 * math.pi * 100 / t2
Omega_prec = 2 * math.pi * 10 / t2
alpha = 0.125 * math.pi
alphadot = 2 * alpha / t2
nu = 0.2 * alpha
Omega_nu = Omega_prec
R0 = np.exp(-1.1 * alpha * quaternionic.x / 2)
def R(t):
return (R0
* np.exp(Omega_prec * t * quaternionic.z / 2) * np.exp((alpha + alphadot * t) * quaternionic.x / 2)
* np.exp(-Omega_prec * t * quaternionic.z / 2)
* np.exp(Omega_orb * t * quaternionic.z / 2)
* np.exp(nu * np.cos(Omega_nu * t) * quaternionic.y / 2))
def Rdot(t):
R_dynamic = R0.inverse * R(t)
R_prec = np.exp(Omega_prec * t * quaternionic.z / 2)
R_nu = np.exp(nu * np.cos(Omega_nu * t) * quaternionic.y / 2)
return R0 * (0.5 * Omega_prec * quaternionic.z * R_dynamic
+ 0.5 * alphadot * R_prec * quaternionic.x * R_prec.conj() * R_dynamic
+ 0.5 * (Omega_orb - Omega_prec) * R_dynamic * R_nu.inverse * quaternionic.z * R_nu
+ 0.5 * (-Omega_nu * nu * np.sin(Omega_nu * t)) * R_dynamic * quaternionic.y)
def Omega_tot(_, t):
Rotor = R(t)
RotorDot = Rdot(t)
return (2 * RotorDot * Rotor.inverse).vector
t = np.linspace(t0, t2/10, num=1_000)
# Test raisers
with pytest.raises(ValueError):
R_approx = quaternionic.array.from_angular_velocity([1+2j, 3+4j], t, R0=R(t0), tolerance=1e-6)
with pytest.raises(ValueError):
R_approx = quaternionic.array.from_angular_velocity(np.random.rand(17, 2), t, R0=R(t0), tolerance=1e-6)
# Test with exact Omega function
R_approx = quaternionic.array.from_angular_velocity(Omega_tot, t, R0=R(t0), tolerance=1e-6)
R_exact = R(t)
# phi_Delta = np.array([quaternionic.distance.rotation.intrinsic(e, a) for e, a in zip(R_exact, R_approx)])
phi_Delta = quaternionic.distance.rotation.intrinsic(R_exact, R_approx)
assert np.max(phi_Delta) < 1e-4, np.max(phi_Delta)
# Test with exact Omega function
R_approx = quaternionic.array.from_angular_velocity(Omega_tot, t, R0=None, tolerance=1e-6)
R_exact = R(t) * R(t0).inverse
# phi_Delta = np.array([quaternionic.distance.rotation.intrinsic(e, a) for e, a in zip(R_exact, R_approx)])
phi_Delta = quaternionic.distance.rotation.intrinsic(R_exact, R_approx)
assert np.max(phi_Delta) < 1e-4, np.max(phi_Delta)
# Test with explicit values, given at the moments output above
v = np.array([Omega_tot(None, ti) for ti in t])
R_approx = quaternionic.array.from_angular_velocity(v, t, R0=R(t0), tolerance=1e-6)
R_exact = R(t)
phi_Delta = quaternionic.distance.rotation.intrinsic(R_exact, R_approx)
assert np.max(phi_Delta) < 1e-4, np.max(phi_Delta)
def test_to_minimal_rotation():
import math
import numpy as np
import quaternionic
t = np.linspace(0.0, 100.0, num=1_000)
ω = (5 * 2 * np.pi) / (t[-1] - t[0])
# Test basic removal of rotation about z
q = np.exp((ω * t / 2) * quaternionic.z)
q_minimal_rotation = q.to_minimal_rotation(t, t_new=None, axis=0, iterations=2)
qa = q * quaternionic.z * q.inverse
qb = q_minimal_rotation * quaternionic.z * q_minimal_rotation.inverse
assert np.max((qa - qb).norm) < 1e-16
assert np.max((q_minimal_rotation - quaternionic.one).norm) < 1e-16
# Test same with t_new
t_new = np.linspace(0.0, 100.0, num=1_005)
ω = (5 * 2 * np.pi) / (t[-1] - t[0])
q = np.exp((ω * t / 2) * quaternionic.z)
q_new = np.exp((ω * t_new / 2) * quaternionic.z)
q_minimal_rotation = q.to_minimal_rotation(t, t_new=t_new, axis=0, iterations=2)
qa = q_new * quaternionic.z * q_new.inverse
qb = q_minimal_rotation * quaternionic.z * q_minimal_rotation.inverse
assert t_new.shape[0] == q_minimal_rotation.shape[0]
assert np.max((qa - qb).norm) < 1e-16
assert np.max((q_minimal_rotation - quaternionic.one).norm) < 1e-16
# Test rotation onto uniform rotation in x-y plane
q = quaternionic.array(
np.stack(
(
np.ones(t.size),
np.cos(ω*t),
np.sin(ω*t),
np.zeros(t.size)
),
axis=1
)
/ np.sqrt(2)
)
q_minimal_rotation = q.to_minimal_rotation(t)
qa = q * quaternionic.z * q.inverse
qb = q_minimal_rotation * quaternionic.z * q_minimal_rotation.inverse
assert np.max((qa - qb).norm) < 1e-16
assert np.max(abs(ω - np.linalg.norm(q_minimal_rotation.to_angular_velocity(t), axis=1))) < 1e-8
assert np.max(abs(q_minimal_rotation.to_angular_velocity(t)[:, :2])) < 1e-8
def test_random():
q = quaternionic.array.random()
assert isinstance(q, quaternionic.array)
assert q.dtype == np.float64
assert q.shape == (4,)
q = quaternionic.array.random(tuple())
assert isinstance(q, quaternionic.array)
assert q.dtype == np.float64
assert q.shape == (4,)
q = quaternionic.array.random(17)
assert isinstance(q, quaternionic.array)
assert q.dtype == np.float64
assert q.shape == (17, 4)
q = quaternionic.array.random((17, 3))
assert isinstance(q, quaternionic.array)
assert q.dtype == np.float64
assert q.shape == (17, 3, 4)
q = quaternionic.array.random((17, 3, 4))
assert isinstance(q, quaternionic.array)
assert q.dtype == np.float64
assert q.shape == (17, 3, 4)
q = quaternionic.array.random((17, 3, 4), normalize=True)
assert np.max(np.abs(1 - q.abs)) < 4 *
|
np.finfo(float)
|
numpy.finfo
|
import math
import random
import numpy as np
from .bamboo import Bamboo
from .builder import INTERNODE
from .builder import build_internode
from .warp_internode import WarpInternode
from ..utils.warp_tools import get_image_size, calc_expand_size_and_matrix, warp_bbox, clip_bbox, filter_bbox
__all__ = ['Warp', 'WarpPerspective', 'WarpResize', 'WarpScale', 'WarpStretch', 'WarpRotate', 'WarpShear', 'WarpTranslate']
@INTERNODE.register_module()
class Warp(Bamboo):
def __init__(self, internodes, ccs=True, **kwargs):
assert len(internodes) > 0
self.internode = WarpInternode(ccs=ccs)
self.internodes = []
self.ccs = ccs
for cfg in internodes:
cfg['ccs'] = False
self.internodes.append(build_internode(cfg))
def __call__(self, data_dict):
data_dict['warp_matrix'] = np.eye(3)
data_dict['warp_size'] = get_image_size(data_dict['image'])
super(Warp, self).__call__(data_dict)
data_dict['warp_tmp_matrix'] = data_dict.pop('warp_matrix')
data_dict['warp_tmp_size'] = data_dict.pop('warp_size')
data_dict = self.internode(data_dict)
return data_dict
def reverse(self, **kwargs):
return kwargs
def __repr__(self):
split_str = [i.__repr__() for i in self.internodes]
bamboo_str = type(self).__name__ + '('
for i in range(len(split_str)):
bamboo_str += '\n ' + split_str[i].replace('\n', '\n ')
bamboo_str += '\n ccs={}'.format(self.ccs)
bamboo_str = '{}\n)'.format(bamboo_str)
return bamboo_str
def rper(self):
return 'Warp(not available)'
@INTERNODE.register_module()
class WarpPerspective(WarpInternode):
def __init__(self, distortion_scale=0.5, **kwargs):
super(WarpPerspective, self).__init__(**kwargs)
self.distortion_scale = distortion_scale
@staticmethod
def get_params(width, height, distortion_scale):
"""Get parameters for ``perspective`` for a random perspective transform.
Args:
width : width of the image.
height : height of the image.
Returns:
List containing [top-left, top-right, bottom-right, bottom-left] of the original image,
List containing [top-left, top-right, bottom-right, bottom-left] of the transformed image.
"""
half_height = int(height / 2)
half_width = int(width / 2)
topleft = (random.randint(0, int(distortion_scale * half_width)),
random.randint(0, int(distortion_scale * half_height)))
topright = (random.randint(width - int(distortion_scale * half_width) - 1, width - 1),
random.randint(0, int(distortion_scale * half_height)))
botright = (random.randint(width - int(distortion_scale * half_width) - 1, width - 1),
random.randint(height - int(distortion_scale * half_height) - 1, height - 1))
botleft = (random.randint(0, int(distortion_scale * half_width)),
random.randint(height - int(distortion_scale * half_height) - 1, height - 1))
startpoints = [(0, 0), (width - 1, 0), (width - 1, height - 1), (0, height - 1)]
endpoints = [topleft, topright, botright, botleft]
# endpoints = [(83, 18), (605, 25), (605, 397), (139, 341)]
return startpoints, endpoints
@staticmethod
def build_matrix(startpoints, endpoints):
matrix = []
for p1, p2 in zip(startpoints, endpoints):
matrix.append([p1[0], p1[1], 1, 0, 0, 0, -p2[0] * p1[0], -p2[0] * p1[1]])
matrix.append([0, 0, 0, p1[0], p1[1], 1, -p2[1] * p1[0], -p2[1] * p1[1]])
A = np.array(matrix)
B = np.array(endpoints).flatten()
c, _, _, _ = np.linalg.lstsq(A, B, rcond=None)
c = c.tolist() + [1]
c = np.matrix(c).reshape(3, 3)
return np.array(c)
def __call__(self, data_dict):
if 'warp_size' in data_dict.keys():
size = data_dict['warp_size']
else:
size = get_image_size(data_dict['image'])
width, height = size
startpoints, endpoints = self.get_params(width, height, self.distortion_scale)
M = self.build_matrix(startpoints, endpoints)
if self.expand:
# print(startpoints, endpoints)
xx = [e[0] for e in endpoints]
yy = [e[1] for e in endpoints]
nw = max(xx) - min(xx)
nh = max(yy) - min(yy)
E = np.eye(3)
E[0, 2] = -min(xx)
E[1, 2] = -min(yy)
M = E @ M
size = (nw, nh)
# print(size, 'new')
data_dict['warp_tmp_matrix'] = M
data_dict['warp_tmp_size'] = size
super(WarpPerspective, self).__call__(data_dict)
return data_dict
def __repr__(self):
return 'WarpPerspective(distortion_scale={}, {})'.format(self.distortion_scale, super(WarpPerspective, self).__repr__())
@INTERNODE.register_module()
class WarpResize(WarpInternode):
def __init__(self, size, keep_ratio=True, short=False, **kwargs):
super(WarpResize, self).__init__(**kwargs)
assert len(size) == 2
assert size[0] > 0 and size[1] > 0
self.size = size
self.keep_ratio = keep_ratio
self.short = short
def build_matrix(self, img_size):
w, h = img_size
C = np.eye(3)
C[0, 2] = -w / 2
C[1, 2] = -h / 2
R = np.eye(3)
if self.keep_ratio:
if self.short:
r = max(self.size[0] / w, self.size[1] / h)
else:
r = min(self.size[0] / w, self.size[1] / h)
R[0, 0] = r
R[1, 1] = r
ow = (self.size[0] - R[0, 0] * w) / 2
oh = (self.size[1] - R[1, 1] * h) / 2
else:
R[0, 0] = self.size[0] / w
R[1, 1] = self.size[1] / h
ow = 0
oh = 0
CI = np.eye(3)
# if self.center:
# CI[0, 2] = self.size[0] / 2
# CI[1, 2] = self.size[1] / 2
# else:
CI[0, 2] = self.size[0] / 2 - ow
CI[1, 2] = self.size[1] / 2 - oh
return CI @ R @ C
def calc_scale(self, size):
w, h = size
tw, th = self.size
rw, rh = tw / w, th / h
if self.keep_ratio:
if self.short:
r = max(rh, rw)
scale = (r, r)
else:
r = min(rh, rw)
scale = (r, r)
else:
scale = (rw, rh)
return scale
def __call__(self, data_dict):
if 'warp_size' in data_dict.keys():
size = data_dict['warp_size']
else:
size = get_image_size(data_dict['image'])
M = self.build_matrix(size)
if self.keep_ratio and (self.expand or self.short):
_, new_size = calc_expand_size_and_matrix(M, size)
size = new_size
else:
size = self.size
# print(M, 'M1')
data_dict['warp_tmp_matrix'] = M
data_dict['warp_tmp_size'] = size
super(WarpResize, self).__call__(data_dict)
return data_dict
def reverse(self, **kwargs):
if 'resize_and_padding_reverse_flag' not in kwargs.keys():
return kwargs
if 'ori_size' in kwargs.keys():
h, w = kwargs['ori_size']
h, w = int(h), int(w)
M = self.build_matrix((w, h))
# print(M, 'M2', type(M))
# print(np.matrix(M).I)
M = np.array(np.matrix(M).I)
if 'bbox' in kwargs.keys():
boxes = warp_bbox(kwargs['bbox'], M)
boxes = clip_bbox(boxes, (w, h))
keep = filter_bbox(boxes)
kwargs['bbox'] = boxes[keep]
if 'bbox_meta' in kwargs.keys():
kwargs['bbox_meta'].filter(keep)
return kwargs
def __repr__(self):
return 'WarpResize(size={}, keep_ratio={}, short={}, {})'.format(self.size, self.keep_ratio, self.short, super(WarpResize, self).__repr__())
@INTERNODE.register_module()
class WarpScale(WarpInternode):
def __init__(self, r, **kwargs):
super(WarpScale, self).__init__(**kwargs)
assert len(r) == 2
assert r[0] <= r[1] and r[0] > 0
self.r = tuple(r)
@staticmethod
def build_matrix(r, img_size):
w, h = img_size
C = np.eye(3)
C[0, 2] = -w / 2
C[1, 2] = -h / 2
R = np.eye(3)
R[0, 0] = r
R[1, 1] = r
CI = np.eye(3)
CI[0, 2] = w / 2
CI[1, 2] = h / 2
return CI @ R @ C
def __call__(self, data_dict):
if 'warp_size' in data_dict.keys():
size = data_dict['warp_size']
else:
size = get_image_size(data_dict['image'])
r = random.uniform(*self.r)
M = self.build_matrix(r, size)
data_dict['warp_tmp_matrix'] = M
data_dict['warp_tmp_size'] = size
super(WarpScale, self).__call__(data_dict)
return data_dict
def __repr__(self):
return 'WarpScale(r={}, {})'.format(self.r, super(WarpScale, self).__repr__())
@INTERNODE.register_module()
class WarpStretch(WarpInternode):
def __init__(self, rw, rh, **kwargs):
super(WarpStretch, self).__init__(**kwargs)
assert len(rw) == 2 and len(rh) == 2
assert rw[0] <= rw[1] and rw[0] > 0
assert rh[0] <= rh[1] and rh[0] > 0
self.rw = tuple(rw)
self.rh = tuple(rh)
@staticmethod
def build_matrix(rs, img_size):
w, h = img_size
C = np.eye(3)
C[0, 2] = -w / 2
C[1, 2] = -h / 2
R = np.eye(3)
R[0, 0] = rs[0]
R[1, 1] = rs[1]
CI = np.eye(3)
CI[0, 2] = w / 2
CI[1, 2] = h / 2
return CI @ R @ C
def __call__(self, data_dict):
if 'warp_size' in data_dict.keys():
size = data_dict['warp_size']
else:
size = get_image_size(data_dict['image'])
rw = random.uniform(*self.rw)
rh = random.uniform(*self.rh)
M = self.build_matrix((rw, rh), size)
data_dict['warp_tmp_matrix'] = M
data_dict['warp_tmp_size'] = size
super(WarpStretch, self).__call__(data_dict)
return data_dict
def __repr__(self):
return 'WarpStretch(rw={}, rh={}, {})'.format(self.rw, self.rh, super(WarpStretch, self).__repr__())
@INTERNODE.register_module()
class WarpRotate(WarpInternode):
def __init__(self, angle, **kwargs):
super(WarpRotate, self).__init__(**kwargs)
assert -180 < angle[0] <= 180
assert -180 < angle[1] <= 180
assert angle[0] <= angle[1]
self.angle = angle
@staticmethod
def build_matrix(angle, img_size):
w, h = img_size
angle = math.radians(angle)
C = np.eye(3)
C[0, 2] = -w / 2
C[1, 2] = -h / 2
R = np.eye(3)
R[0, 0] = round(math.cos(angle), 15)
R[0, 1] = -round(math.sin(angle), 15)
R[1, 0] = round(math.sin(angle), 15)
R[1, 1] = round(math.cos(angle), 15)
CI = np.eye(3)
CI[0, 2] = w / 2
CI[1, 2] = h / 2
return CI @ R @ C
def __call__(self, data_dict):
angle = random.uniform(self.angle[0], self.angle[1])
if angle != 0:
if 'warp_size' in data_dict.keys():
size = data_dict['warp_size']
else:
size = get_image_size(data_dict['image'])
M = self.build_matrix(angle, size)
if self.expand:
E, new_size = calc_expand_size_and_matrix(M, size)
M = E @ M
size = new_size
data_dict['warp_tmp_matrix'] = M
data_dict['warp_tmp_size'] = size
super(WarpRotate, self).__call__(data_dict)
return data_dict
def __repr__(self):
return 'WarpRotate(angle={}, {})'.format(self.angle, super(WarpRotate, self).__repr__())
@INTERNODE.register_module()
class WarpShear(WarpInternode):
def __init__(self, ax, ay, **kwargs):
super(WarpShear, self).__init__(**kwargs)
assert len(ax) == 2 and len(ay) == 2
assert ax[0] <= ax[1]
assert ay[0] <= ay[1]
self.ax = tuple(ax)
self.ay = tuple(ay)
@staticmethod
def build_matrix(angles, img_size):
w, h = img_size
ax, ay = math.radians(angles[0]), math.radians(angles[1])
C = np.eye(3)
C[0, 2] = -w / 2
C[1, 2] = -h / 2
S = np.eye(3)
S[0, 1] = math.tan(ax)
S[1, 0] = math.tan(ay)
CI = np.eye(3)
CI[0, 2] = w / 2
CI[1, 2] = h / 2
return CI @ S @ C
def __call__(self, data_dict):
if 'warp_size' in data_dict.keys():
size = data_dict['warp_size']
else:
size = get_image_size(data_dict['image'])
shear = (random.uniform(*self.ax), random.uniform(*self.ay))
M = self.build_matrix(shear, size)
if self.expand:
E, new_size = calc_expand_size_and_matrix(M, size)
M = E @ M
size = new_size
data_dict['warp_tmp_matrix'] = M
data_dict['warp_tmp_size'] = size
super(WarpShear, self).__call__(data_dict)
return data_dict
def __repr__(self):
return 'WarpShear(ax={}, ay={}, {})'.format(self.ax, self.ay, super(WarpShear, self).__repr__())
@INTERNODE.register_module()
class WarpTranslate(WarpInternode):
def __init__(self, rw, rh, **kwargs):
super(WarpTranslate, self).__init__(**kwargs)
assert len(rw) == 2 and len(rh) == 2
assert rw[0] <= rw[1]
assert rh[0] <= rh[1]
self.rw = tuple(rw)
self.rh = tuple(rh)
@staticmethod
def build_matrix(translations):
T =
|
np.eye(3)
|
numpy.eye
|
"""
Functions for visualizing flow cytometry data.
Functions in this module are divided in two categories:
- Simple Plot Functions, with a signature similar to the following::
plot_fxn(data_list, channels, parameters, savefig)
where `data_list` is a NxD FCSData object or numpy array, or a list of
such, `channels` specifies the channel or channels to use for the plot,
`parameters` are function-specific parameters, and `savefig` indicates
whether to save the figure to an image file. Note that `hist1d`, `violin`,
and `violin_dose_response` use `channel` instead of `channels`, since they
use a single channel, and `density2d` only accepts one FCSData object or
numpy array as its first argument.
Simple Plot Functions do not create a new figure or axis, so they can be
called directly to plot in a previously created axis if desired. If
`savefig` is not specified, the plot is maintained in the current axis
when the function returns. This allows for further modifications to the
axis by direct calls to, for example, ``plt.xlabel``, ``plt.title``, etc.
However, if `savefig` is specified, the figure is closed after being
saved. In this case, the function may include keyword parameters
`xlabel`, `ylabel`, `xlim`, `ylim`, `title`, and others related to
legend or color, which allow the user to modify the axis prior to saving.
The following functions in this module are Simple Plot Functions:
- ``hist1d``
- ``violin``
- ``violin_dose_response``
- ``density2d``
- ``scatter2d``
- ``scatter3d``
- Complex Plot Functions, which create a figure with several axes, and use
one or more Simple Plot functions to populate the axes. They always
include a `savefig` argument, which indicates whether to save the figure
to a file. If `savefig` is not specified, the plot is maintained in the
newly created figure when the function returns. However, if `savefig` is
specified, the figure is closed after being saved.
The following functions in this module are Complex Plot Functions:
- ``density_and_hist``
- ``scatter3d_and_projections``
"""
import packaging
import packaging.version
import collections
import numpy as np
import scipy.ndimage.filters
import matplotlib
import matplotlib.scale
import matplotlib.transforms
import matplotlib.ticker
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.font_manager import FontProperties
import warnings
# expose the collections module abstract base classes (ABCs) in both
# Python 2 and 3
try:
# python 3
collectionsAbc = collections.abc
except AttributeError:
# python 2
collectionsAbc = collections
cmap_default = plt.get_cmap('Spectral_r')
savefig_dpi = 250
###
# CUSTOM TRANSFORMS
###
class _InterpolatedInverseTransform(matplotlib.transforms.Transform):
"""
Class that inverts a given transform class using interpolation.
Parameters
----------
transform : matplotlib.transforms.Transform
Transform class to invert. It should be a monotonic transformation.
smin : float
Minimum value to transform.
smax : float
Maximum value to transform.
resolution : int, optional
Number of points to use to evaulate `transform`. Default is 1000.
Methods
-------
transform_non_affine(x)
Apply inverse transformation to a Nx1 numpy array.
Notes
-----
Upon construction, this class generates an array of `resolution` points
between `smin` and `smax`. Next, it evaluates the specified
transformation on this array, and both the original and transformed
arrays are stored. When calling ``transform_non_affine(x)``, these two
arrays are used along with ``np.interp()`` to inverse-transform ``x``.
Note that `smin` and `smax` are also transformed and stored. When using
``transform_non_affine(x)``, any values in ``x`` outside the range
specified by `smin` and `smax` transformed are masked.
"""
# ``input_dims``, ``output_dims``, and ``is_separable`` are required by
# matplotlib.
input_dims = 1
output_dims = 1
is_separable = True
def __init__(self, transform, smin, smax, resolution=1000):
# Call parent's constructor
matplotlib.transforms.Transform.__init__(self)
# Store transform object
self._transform = transform
# Generate input array
self._s_range = np.linspace(smin, smax, resolution)
# Evaluate provided transformation and store result
self._x_range = transform.transform_non_affine(self._s_range)
# Transform bounds and store
self._xmin = transform.transform_non_affine(smin)
self._xmax = transform.transform_non_affine(smax)
if self._xmin > self._xmax:
self._xmax, self._xmin = self._xmin, self._xmax
def transform_non_affine(self, x, mask_out_of_range=True):
"""
Transform a Nx1 numpy array.
Parameters
----------
x : array
Data to be transformed.
mask_out_of_range : bool, optional
Whether to mask input values out of range.
Return
------
array or masked array
Transformed data.
"""
# Mask out-of-range values
if mask_out_of_range:
x_masked = np.ma.masked_where((x < self._xmin) | (x > self._xmax),
x)
else:
x_masked = x
# Calculate s and return
return np.interp(x_masked, self._x_range, self._s_range)
def inverted(self):
"""
Get an object representing an inverse transformation to this class.
Since this class implements the inverse of a given transformation,
this function just returns the original transformation.
Return
------
matplotlib.transforms.Transform
Object implementing the reverse transformation.
"""
return self._transform
class _LogicleTransform(matplotlib.transforms.Transform):
"""
Class implementing the Logicle transform, from scale to data values.
Relevant parameters can be specified manually, or calculated from
a given FCSData object.
Parameters
----------
T : float
Maximum range of data values. If `data` is None, `T` defaults to
262144. If `data` is not None, specifying `T` overrides the
default value that would be calculated from `data`.
M : float
(Asymptotic) number of decades in display scale units. If `data` is
None, `M` defaults to 4.5. If `data` is not None, specifying `M`
overrides the default value that would be calculated from `data`.
W : float
Width of linear range in display scale units. If `data` is None,
`W` defaults to 0.5. If `data` is not None, specifying `W`
overrides the default value that would be calculated from `data`.
data : FCSData or numpy array or list of FCSData or numpy array
Flow cytometry data from which a set of T, M, and W parameters will
be generated.
channel : str or int
Channel of `data` from which a set of T, M, and W parameters will
be generated. `channel` should be specified if `data` is
multidimensional.
Methods
-------
transform_non_affine(s)
Apply transformation to a Nx1 numpy array.
Notes
-----
Logicle scaling combines the advantages of logarithmic and linear
scaling. It is useful when data spans several orders of magnitude
(when logarithmic scaling would be appropriate) and a significant
number of datapoints are negative.
Logicle scaling is implemented using the following equation::
x = T * 10**(-(M-W)) * (10**(s-W) \
- (p**2)*10**(-(s-W)/p) + p**2 - 1)
This equation transforms data ``s`` expressed in "display scale" units
into ``x`` in "data value" units. Parameters in this equation
correspond to the class properties. ``p`` and ``W`` are related as
follows::
W = 2*p * log10(p) / (p + 1)
If a FCSData object or list of FCSData objects is specified along with
a channel, the following default logicle parameters are used: T is
taken from the largest ``data[i].range(channel)[1]`` or the largest
element in ``data[i]`` if ``data[i].range()`` is not available, M is
set to the largest of 4.5 and ``4.5 / np.log10(262144) * np.log10(T)``,
and W is taken from ``(M - log10(T / abs(r))) / 2``, where ``r`` is the
minimum negative event. If no negative events are present, W is set to
zero.
References
----------
.. [1] <NAME>, <NAME>, <NAME>, "A New Logicle Display
Method Avoids Deceptive Effects of Logarithmic Scaling for Low Signals
and Compensated Data," Cytometry Part A 69A:541-551, 2006, PMID
16604519.
"""
# ``input_dims``, ``output_dims``, and ``is_separable`` are required by
# matplotlib.
input_dims = 1
output_dims = 1
is_separable = True
# Locator objects need this object to store the logarithm base used as an
# attribute.
base = 10
def __init__(self, T=None, M=None, W=None, data=None, channel=None):
matplotlib.transforms.Transform.__init__(self)
# If data is included, try to obtain T, M and W from it
if data is not None:
# Convert to list if necessary
if not isinstance(data, list):
data = [data]
# Obtain T, M, and W if not specified
# If elements of data have ``.range()``, use it to determine the
# max data value. Else, use the maximum value in the array.
if T is None:
T = 0
for d in data:
# Extract channel
if d.ndim > 1:
if channel is None:
msg = "if multidimensional data is provided, a"
msg += " channel should be specified"
raise ValueError(msg)
else:
y = d[:, channel]
else:
y = d
if hasattr(y, 'range') and hasattr(y.range, '__call__'):
Ti = y.range(0)[1]
else:
Ti = np.max(y)
T = Ti if Ti > T else T
if M is None:
M = max(4.5, 4.5 / np.log10(262144) * np.log10(T))
if W is None:
W = 0
for d in data:
# Extract channel
if d.ndim > 1:
if channel is None:
msg = "if multidimensional data is provided, a"
msg += " channel should be specified"
raise ValueError(msg)
else:
y = d[:, channel]
else:
y = d
# If negative events are present, use minimum.
if np.any(y < 0):
r = np.min(y)
Wi = (M - np.log10(T / abs(r))) / 2
W = Wi if Wi > W else W
else:
# Default parameter values
if T is None:
T = 262144
if M is None:
M = 4.5
if W is None:
W = 0.5
# Check that property values are valid
if T <= 0:
raise ValueError("T should be positive")
if M <= 0:
raise ValueError("M should be positive")
if W < 0:
raise ValueError("W should not be negative")
# Store parameters
self._T = T
self._M = M
self._W = W
# Calculate dependent parameter p
# It is not possible to analytically obtain ``p`` as a function of W
# only, so ``p`` is calculated numerically using a root finding
# algorithm. The initial estimate provided to the algorithm is taken
# from the asymptotic behavior of the equation as ``p -> inf``. This
# results in ``W = 2*log10(p)``.
p0 = 10**(W / 2.)
# Functions to provide to the root finding algorithm
def W_f(p):
return 2*p / (p + 1) * np.log10(p)
def W_root(p, W_target):
return W_f(p) - W_target
# Find solution
sol = scipy.optimize.root(W_root, x0=p0, args=(W))
# Solution should be unique
assert sol.success
assert len(sol.x) == 1
# Store solution
self._p = sol.x[0]
@property
def T(self):
"""
Maximum range of data.
"""
return self._T
@property
def M(self):
"""
(Asymptotic) number of decades in display scale units.
"""
return self._M
@property
def W(self):
"""
Width of linear range in display scale units.
"""
return self._W
def transform_non_affine(self, s):
"""
Apply transformation to a Nx1 numpy array.
Parameters
----------
s : array
Data to be transformed in display scale units.
Return
------
array or masked array
Transformed data, in data value units.
"""
T = self._T
M = self._M
W = self._W
p = self._p
# Calculate x
return T * 10**(-(M-W)) * (10**(s-W) - (p**2)*10**(-(s-W)/p) + p**2 - 1)
def inverted(self):
"""
Get an object implementing the inverse transformation.
Return
------
_InterpolatedInverseTransform
Object implementing the reverse transformation.
"""
return _InterpolatedInverseTransform(transform=self,
smin=0,
smax=self._M)
###
# CUSTOM TICK LOCATORS AND FORMATTERS
###
class _LogicleLocator(matplotlib.ticker.Locator):
"""
Determine the tick locations for logicle axes.
Parameters
----------
transform : _LogicleTransform
transform object
subs : array, optional
Subtick values, as multiples of the main ticks. If None, do not use
subticks.
"""
def __init__(self, transform, subs=None):
self._transform = transform
if subs is None:
self._subs = [1.0]
else:
self._subs = subs
self.numticks = 15
def set_params(self, subs=None, numticks=None):
"""
Set parameters within this locator.
Parameters
----------
subs : array, optional
Subtick values, as multiples of the main ticks.
numticks : array, optional
Number of ticks.
"""
if numticks is not None:
self.numticks = numticks
if subs is not None:
self._subs = subs
def __call__(self):
"""
Return the locations of the ticks.
"""
# Note, these are untransformed coordinates
vmin, vmax = self.axis.get_view_interval()
return self.tick_values(vmin, vmax)
def tick_values(self, vmin, vmax):
"""
Get a set of tick values properly spaced for logicle axis.
"""
# Extract base from transform object
b = self._transform.base
# The logicle domain is divided into two regions: A "linear" region,
# which may include negative numbers, and a "logarithmic" region, which
# only includes positive numbers. These two regions are separated by a
# value t, given by the logicle equations. An illustration is given
# below.
#
# -t ==0== t ========>
# lin log
#
# vmin and vmax can be anywhere in this domain, meaning that both should
# be greater than -t.
#
# The logarithmic region will only have major ticks at integral log
# positions. The linear region will have a major tick at zero, and one
# major tick at the largest absolute integral log value in screen
# inside this region. Subticks will be added at multiples of the
# integral log positions.
# If the linear range is too small, create new transformation object
# with slightly wider linear range. Otherwise, the number of decades
# below will be infinite
if self._transform.W == 0 or \
self._transform.M / self._transform.W > self.numticks:
self._transform = _LogicleTransform(
T=self._transform.T,
M=self._transform.M,
W=self._transform.M / self.numticks)
# Calculate t
t = - self._transform.transform_non_affine(0)
# Swap vmin and vmax if necessary
if vmax < vmin:
vmin, vmax = vmax, vmin
# Calculate minimum and maximum limits in scale units
vmins = self._transform.inverted().transform_non_affine(vmin)
vmaxs = self._transform.inverted().transform_non_affine(vmax)
# Check whether linear or log regions are present
has_linear = has_log = False
if vmin <= t:
has_linear = True
if vmax > t:
has_log = True
else:
has_log = True
# Calculate number of ticks in linear and log regions
# The number of ticks is distributed by the fraction that each region
# occupies in scale units
if has_linear:
fraction_linear = (min(vmaxs, 2*self._transform.W) - vmins) / \
(vmaxs - vmins)
numticks_linear = np.round(self.numticks*fraction_linear)
else:
numticks_linear = 0
if has_log:
fraction_log = (vmaxs - max(vmins, 2*self._transform.W)) / \
(vmaxs - vmins)
numticks_log = np.round(self.numticks*fraction_log)
else:
numticks_log = 0
# Calculate extended ranges and step size for tick location
# Extended ranges take into account discretization.
if has_log:
# The logarithmic region's range will include from the decade
# immediately below the lower end of the region to the decade
# immediately above the upper end.
# Note that this may extend the logarithmic region to the left.
log_ext_range = [np.floor(np.log(max(vmin, t)) / np.log(b)),
np.ceil(np.log(vmax) / np.log(b))]
# Since a major tick will be located at the lower end of the
# extended range, make sure that it is not too close to zero.
if vmin <= 0:
zero_s = self._transform.inverted().\
transform_non_affine(0)
min_tick_space = 1./self.numticks
while True:
min_tick_s = self._transform.inverted().\
transform_non_affine(b**log_ext_range[0])
if (min_tick_s - zero_s)/(vmaxs - vmins) < min_tick_space \
and ((log_ext_range[0] + 1) < log_ext_range[1]):
log_ext_range[0] += 1
else:
break
# Number of decades in the extended region
log_decades = log_ext_range[1] - log_ext_range[0]
# The step is at least one decade.
if numticks_log > 1:
log_step = max(np.floor(float(log_decades)/(numticks_log-1)), 1)
else:
log_step = 1
else:
# Linear region only
linear_range = [vmin, vmax]
# Initial step size will be one decade below the maximum whole
# decade in the range
linear_step = _base_down(
linear_range[1] - linear_range[0], b) / b
# Reduce the step size according to specified number of ticks
while (linear_range[1] - linear_range[0])/linear_step > \
numticks_linear:
linear_step *= b
# Get extended range by discretizing the region limits
vmin_ext = np.floor(linear_range[0]/linear_step)*linear_step
vmax_ext = np.ceil(linear_range[1]/linear_step)*linear_step
linear_range_ext = [vmin_ext, vmax_ext]
# Calculate major tick positions
major_ticklocs = []
if has_log:
# Logarithmic region present
# If a linear region is present, add the negative of the lower limit
# of the extended log region and zero. Then, add ticks for each
# logarithmic step as calculated above.
if has_linear:
major_ticklocs.append(- b**log_ext_range[0])
major_ticklocs.append(0)
# Use nextafter to pick the next floating point number, and try to
# include the upper limit in the generated range.
major_ticklocs.extend(b ** (np.arange(
log_ext_range[0],
np.nextafter(log_ext_range[1], np.inf),
log_step)))
else:
# Only linear region present
# Draw ticks according to linear step calculated above.
# Use nextafter to pick the next floating point number, and try to
# include the upper limit in the generated range.
major_ticklocs.extend(np.arange(
linear_range_ext[0],
np.nextafter(linear_range_ext[1], np.inf),
linear_step))
major_ticklocs = np.array(major_ticklocs)
# Add subticks if requested
subs = self._subs
if (subs is not None) and (len(subs) > 1 or subs[0] != 1.0):
ticklocs = []
if has_log:
# Subticks for each major tickloc present
for major_tickloc in major_ticklocs:
ticklocs.extend(subs * major_tickloc)
# Subticks from one decade below the lowest
major_ticklocs_pos = major_ticklocs[major_ticklocs > 0]
if len(major_ticklocs_pos):
tickloc_next_low = np.min(major_ticklocs_pos)/b
ticklocs.append(tickloc_next_low)
ticklocs.extend(subs * tickloc_next_low)
# Subticks for the negative linear range
if vmin < 0:
ticklocs.extend([(-ti) for ti in ticklocs if ti < -vmin ])
else:
ticklocs = list(major_ticklocs)
# If zero is present, add ticks from a decade below the lowest
if (vmin < 0) and (vmax > 0):
major_ticklocs_nonzero = major_ticklocs[
np.nonzero(major_ticklocs)]
tickloc_next_low = np.min(np.abs(major_ticklocs_nonzero))/b
ticklocs.append(tickloc_next_low)
ticklocs.extend(subs * tickloc_next_low)
ticklocs.append(-tickloc_next_low)
ticklocs.extend(subs * - tickloc_next_low)
else:
# Subticks not requested
ticklocs = major_ticklocs
# Remove ticks outside requested range
ticklocs = [t for t in ticklocs if (t >= vmin) and (t <= vmax)]
return self.raise_if_exceeds(np.array(ticklocs))
def view_limits(self, vmin, vmax):
"""
Try to choose the view limits intelligently.
"""
b = self._transform.base
if vmax < vmin:
vmin, vmax = vmax, vmin
if not matplotlib.ticker.is_decade(abs(vmin), b):
if vmin < 0:
vmin = -_base_up(-vmin, b)
else:
vmin = _base_down(vmin, b)
if not matplotlib.ticker.is_decade(abs(vmax), b):
if vmax < 0:
vmax = -_base_down(-vmax, b)
else:
vmax = _base_up(vmax, b)
if vmin == vmax:
if vmin < 0:
vmin = -_base_up(-vmin, b)
vmax = -_base_down(-vmax, b)
else:
vmin = _base_down(vmin, b)
vmax = _base_up(vmax, b)
result = matplotlib.transforms.nonsingular(vmin, vmax)
return result
class _ViolinAutoLocator(matplotlib.ticker.MaxNLocator):
"""
Default linear tick locator aware of min and max violins.
Parameters
----------
min_tick_loc : int or float, optional
Location of min violin tick. Default is None.
max_tick_loc : int or float, optional
Location of max violin tick. Default is None.
data_lim_min : int or float, optional
Location of lower boundary of data, at or below which ticks are not
illustrated. Default is None.
Other parameters
----------------
See matplotlib.ticker.MaxNLocator.
Notes
-----
The `nbins` default is 'auto' and the `steps` default is
(1, 2, 2.5, 5, 10) to emulate matplotlib.ticker.AutoLocator, which
subclasses matplotlib.ticker.MaxNLocator and installs nice defaults.
"""
default_params = matplotlib.ticker.MaxNLocator.default_params.copy()
# use defaults from matplotlib.ticker.AutoLocator
default_params['nbins'] = 'auto'
default_params['steps'] = (1, 2, 2.5, 5, 10)
# add parameters specific to violin plots
default_params.update({'min_tick_loc' : None,
'max_tick_loc' : None,
'data_lim_min' : None})
def set_params(self, **kwargs):
if 'min_tick_loc' in kwargs:
self._min_tick_loc = kwargs.pop('min_tick_loc')
if 'max_tick_loc' in kwargs:
self._max_tick_loc = kwargs.pop('max_tick_loc')
if 'data_lim_min' in kwargs:
self._data_lim_min = kwargs.pop('data_lim_min')
matplotlib.ticker.MaxNLocator.set_params(self, **kwargs)
def tick_values(self, vmin, vmax):
locs = list(matplotlib.ticker.MaxNLocator.tick_values(self,
vmin=vmin,
vmax=vmax))
# if `data_lim_min` is specified, remove all ticks at or below it
if self._data_lim_min is not None:
locs = [loc
for loc in locs
if loc > self._data_lim_min]
# add min and max violin ticks as appropriate
if self._max_tick_loc is not None:
locs.insert(0, self._max_tick_loc)
if self._min_tick_loc is not None:
locs.insert(0, self._min_tick_loc)
locs = np.array(locs)
return self.raise_if_exceeds(locs)
class _ViolinLogLocator(matplotlib.ticker.LogLocator):
"""
Default log tick locator aware of min, max, and zero violins.
Parameters
----------
min_tick_loc : int or float, optional
Location of min violin tick. Default is None.
max_tick_loc : int or float, optional
Location of max violin tick. Default is None.
zero_tick_loc : int or float, optional
Location of zero violin tick. Default is None.
data_lim_min : int or float, optional
Location of lower boundary of data, at or below which ticks are not
illustrated. Default is None.
Other parameters
----------------
See matplotlib.ticker.LogLocator.
"""
def __init__(self,
min_tick_loc=None,
max_tick_loc=None,
zero_tick_loc=None,
data_lim_min=None,
**kwargs):
self._min_tick_loc = min_tick_loc
self._max_tick_loc = max_tick_loc
self._zero_tick_loc = zero_tick_loc
self._data_lim_min = data_lim_min
matplotlib.ticker.LogLocator.__init__(self, **kwargs)
def set_params(self, **kwargs):
if 'min_tick_loc' in kwargs:
self._min_tick_loc = kwargs.pop('min_tick_loc')
if 'max_tick_loc' in kwargs:
self._max_tick_loc = kwargs.pop('max_tick_loc')
if 'zero_tick_loc' in kwargs:
self._zero_tick_loc = kwargs.pop('zero_tick_loc')
if 'data_lim_min' in kwargs:
self._data_lim_min = kwargs.pop('data_lim_min')
matplotlib.ticker.LogLocator.set_params(self, **kwargs)
def tick_values(self, vmin, vmax):
locs = list(matplotlib.ticker.LogLocator.tick_values(self,
vmin=vmin,
vmax=vmax))
# if `data_lim_min` is specified, remove all ticks at or below it
if self._data_lim_min is not None:
locs = [loc
for loc in locs
if loc > self._data_lim_min]
# add min, max, and zero violin ticks as appropriate
if self._zero_tick_loc is not None:
locs.insert(0, self._zero_tick_loc)
if self._max_tick_loc is not None:
locs.insert(0, self._max_tick_loc)
if self._min_tick_loc is not None:
locs.insert(0, self._min_tick_loc)
locs = np.array(locs)
return self.raise_if_exceeds(locs)
class _ViolinScalarFormatter(matplotlib.ticker.ScalarFormatter):
"""
Default linear tick formatter aware of min and max violins.
Parameters
----------
min_tick_loc : int or float, optional
Location of min violin tick. Default is None.
max_tick_loc : int or float, optional
Location of max violin tick. Default is None.
min_tick_label : str, optional
Label of min violin tick. Default='Min'.
max_tick_label : str, optional
Label of max violin tick. Default='Max'.
Other parameters
----------------
See matplotlib.ticker.ScalarFormatter.
"""
def __init__(self,
min_tick_loc=None,
max_tick_loc=None,
min_tick_label='Min',
max_tick_label='Max',
**kwargs):
self._min_tick_loc = min_tick_loc
self._max_tick_loc = max_tick_loc
self._min_tick_label = min_tick_label
self._max_tick_label = max_tick_label
matplotlib.ticker.ScalarFormatter.__init__(self, **kwargs)
def __call__(self, x, pos=None):
if self._min_tick_loc is not None and x == self._min_tick_loc:
return self._min_tick_label
if self._max_tick_loc is not None and x == self._max_tick_loc:
return self._max_tick_label
return matplotlib.ticker.ScalarFormatter.__call__(self, x=x, pos=pos)
class _ViolinLogFormatterSciNotation(matplotlib.ticker.LogFormatterSciNotation):
"""
Default log tick formatter aware of min, max, and zero violins.
Parameters
----------
min_tick_loc : int or float, optional
Location of min violin tick. Default is None.
max_tick_loc : int or float, optional
Location of max violin tick. Default is None.
zero_tick_loc : int or float, optional
Location of zero violin tick. Default is None.
min_tick_label : str, optional
Label of min violin tick. Default='Min'.
max_tick_label : str, optional
Label of max violin tick. Default='Max'.
zero_tick_label : str, optional
Label of zero violin tick. Default is generated by
matplotlib.ticker.LogFormatterSciNotation with x=0.
Other parameters
----------------
See matplotlib.ticker.LogFormatterSciNotation.
"""
def __init__(self,
min_tick_loc=None,
max_tick_loc=None,
zero_tick_loc=None,
min_tick_label='Min',
max_tick_label='Max',
zero_tick_label=None,
**kwargs):
self._min_tick_loc = min_tick_loc
self._max_tick_loc = max_tick_loc
self._zero_tick_loc = zero_tick_loc
self._min_tick_label = min_tick_label
self._max_tick_label = max_tick_label
if zero_tick_label is None:
self._zero_tick_label = \
matplotlib.ticker.LogFormatterSciNotation.__call__(self, x=0)
else:
self._zero_tick_label = zero_tick_label
matplotlib.ticker.LogFormatterSciNotation.__init__(self, **kwargs)
def __call__(self, x, pos=None):
if self._min_tick_loc is not None and x == self._min_tick_loc:
return self._min_tick_label
if self._max_tick_loc is not None and x == self._max_tick_loc:
return self._max_tick_label
if self._zero_tick_loc is not None and x == self._zero_tick_loc:
return self._zero_tick_label
return matplotlib.ticker.LogFormatterSciNotation.__call__(self,
x=x,
pos=pos)
###
# CUSTOM SCALES
###
def _base_down(x, base=10):
"""
Floor `x` to the nearest lower ``base^n``, where ``n`` is an integer.
Parameters
----------
x : float
Number to calculate the floor from.
base : float, optional
Base used to calculate the floor.
Return
------
float
The nearest lower ``base^n`` from `x`, where ``n`` is an integer.
"""
if x == 0.0:
return -base
lx = np.floor(np.log(x) / np.log(base))
return base ** lx
def _base_up(x, base=10):
"""
Ceil `x` to the nearest higher ``base^n``, where ``n`` is an integer.
Parameters
----------
x : float
Number to calculate the ceiling from.
base : float, optional
Base used to calculate the ceiling.
Return
------
float
The nearest higher ``base^n`` from `x`, where ``n`` is an integer.
"""
if x == 0.0:
return base
lx = np.ceil(
|
np.log(x)
|
numpy.log
|
from bisect import bisect_left, bisect_right
import numpy
from datetime import date, datetime
import pytz
from affine import Affine
from netCDF4 import num2date, date2num, Variable
from pyproj import Proj
import six
from trefoil.geometry.bbox import BBox
from trefoil.utilities.window import Window
from trefoil.netcdf.utilities import get_ncattrs
from trefoil.netcdf.crs import PROJ4_GEOGRAPHIC
class CoordinateVariable(object):
"""
Wraps a one-dimensional variable with the same name as a dimension
(http://www.unidata.ucar.edu/software/netcdf/docs/BestPractices.html).
"""
def __init__(self, input):
"""
A Coordinate Variable can be created from a netCDF dataset variable or a numpy array.
:param input: variable in a netCDF dataset or a numpy array
"""
self._ncattrs = dict()
if isinstance(input, Variable):
self.values = input[:]
for attr in input.ncattrs():
if not attr == '_FillValue':
self._ncattrs[attr] = input.getncattr(attr)
else:
self.values = input[:].copy()
def __len__(self):
return self.values.shape[0]
def is_ascending_order(self):
return self.values[0] < self.values[1]
def indices_for_range(self, start, stop):
"""
Returns the indices in this variable for the start and stop values
:param start: start value
:param stop: stop value
:return: start and stop indices
"""
assert stop > start
if start > self.values.max():
return self.values.size - 1, self.values.size - 1
elif stop < self.values.min():
return 0, 0
if self.is_ascending_order():
start_index = min(self.values.searchsorted(start), self.values.size - 1)
# Need to move 1 index to the left unless we matched an index closely (allowing for precision errors)
if start_index > 0 and not numpy.isclose(start, self.values[start_index]):
start_index -= 1
stop_index = min(self.values.searchsorted(stop), self.values.size - 1)
if not numpy.isclose(stop, self.values[stop_index]) and stop < self.values[stop_index]:
stop_index -= 1
return start_index, stop_index
else:
# If values are not ascending, they need to be reversed
temp = self.values[::-1]
start_index = min(temp.searchsorted(start), temp.size - 1)
if start_index > 0 and not numpy.isclose(start, temp[start_index]):
start_index -= 1
stop_index = min(temp.searchsorted(stop), temp.size - 1)
if not numpy.isclose(stop, temp[stop_index]) and stop < temp[stop_index]:
stop_index -= 1
size = self.values.size - 1
return max(size - stop_index, 0), max(size - start_index, 0)
def slice_by_range(self, start, stop):
"""
Slices a subset of values between start and stop values.
:param start: start value
:param stop: stop value
:return: sliced view of self.values. Make sure to copy this before altering it!
"""
assert stop > start
if start >= self.values.max() or stop <= self.values.min():
return numpy.array([])
start_index, stop_index = self.indices_for_range(start, stop)
return self.values[start_index:stop_index+1]
def add_to_dataset(self, dataset, name, is_unlimited=False, **kwargs):
"""
:param dataset: name of the dataset to add the dimension and variable to
:param name: name of the dimension and variable
:param is_unlimited: set the dimension as unlimited
:param kwargs: creation options for output variable. Should be limited to compression info.
:return: the newly created variable
"""
if name in dataset.variables:
raise Exception("Variable already exists in dataset")
if name in dataset.dimensions:
dimension = dataset.dimensions[name]
if is_unlimited != dimension.isunlimited() or len(self) != len(dimension):
raise Exception("Dimension already exists in dataset, but has different size")
else:
dimension_length = None if is_unlimited else len(self)
dataset.createDimension(name, dimension_length)
if 'fill_value' not in kwargs:
fill_value = getattr(self.values, 'fill_value', None)
if fill_value is not None:
kwargs['fill_value'] = fill_value
if self.values.dtype.char == 'S':
variable = dataset.createVariable(name, 'string', (name,), **kwargs)
# Have to write each index at a time, and cast to string. Not optimal but seems to be the only way allowed by netCDF4.
for index, value in enumerate(self.values):
variable[index] = str(value)
else:
variable = dataset.createVariable(name, self.values.dtype, (name,), **kwargs)
variable[:] = self.values[:]
for att, value in six.iteritems(self._ncattrs):
variable.setncattr(att, value)
return variable
class BoundsCoordinateVariable(CoordinateVariable):
"""
Wraps a two-dimensional variable representing bounds. Shape is always (N, 2).
Useful for representing time ranges, etc.
Example: http://www.cgd.ucar.edu/cms/eaton/netcdf/CF-20010629.htm#grid_ex4
"""
def is_ascending_order(self):
return self.values[0][0] < self.values[1][0]
def indices_for_range(self, start, stop):
raise NotImplementedError("Not yet implemented")
def add_to_dataset(self, dataset, name, is_unlimited=False, **kwargs):
"""
:param dataset: name of the dataset to add the dimension and variable to
:param name: name of the dimension and variable. Note: a new dimension for the bounds '_bnds' will be created.
:param is_unlimited: set the dimension as unlimited
:param kwargs: creation options for output variable. Should be limited to compression info.
:return: the newly created variable
"""
if name in dataset.variables:
raise Exception("Variable already exists in dataset")
bounds_dimension_name = '_bnds'
if bounds_dimension_name in dataset.dimensions:
if len(dataset.dimensions[bounds_dimension_name]) != 2:
raise ValueError('Bounds dimension _bnds is already present in dataset and not of size 2')
else:
dataset.createDimension(bounds_dimension_name, 2)
if name in dataset.dimensions:
dimension = dataset.dimensions[name]
if is_unlimited != dimension.isunlimited() or len(self) != len(dimension):
raise Exception("Dimension already exists in dataset, but has different size")
else:
dimension_length = None if is_unlimited else len(self)
dataset.createDimension(name, dimension_length)
fill_value = getattr(self.values, 'fill_value', None)
if fill_value is not None:
kwargs['fill_value'] = fill_value
variable = dataset.createVariable(name, self.values.dtype, (name,bounds_dimension_name), **kwargs)
variable[:] = self.values[:]
for att, value in six.iteritems(self._ncattrs):
variable.setncattr(att, value)
return variable
class SpatialCoordinateVariable(CoordinateVariable):
"""
Abstracts properties for a given spatial dimension (e.g., longitude).
Assumes that pixels follow a regular grid, and that dimension values represent centroids
"""
@property
def min(self):
return self.values.min()
@property
def max(self):
return self.values.max()
@property
def pixel_size(self):
return float(abs(self.values[1] - self.values[0]))
@property
def edges(self):
"""
Return coordinates of pixel edges from the min to the max
"""
pixel_size = self.pixel_size
if self.is_ascending_order():
temp = numpy.append(self.values, self.values[-1] + pixel_size)
else:
temp = numpy.append(self.values[0] + pixel_size, self.values)
return temp - (pixel_size / 2.0)
def get_offset_for_subset(self, coordinate_variable):
"""
Find the offset index of coordinate_variable within this coordinate variable.
This assumes that coordinate_variable is a subset of this one, and that coordinates and projections match.
"""
assert len(coordinate_variable) <= self.values.shape[0]
#TODO: make this a fuzzy match within a certain decimal precision
return list(self.values).index(coordinate_variable.values[0])
class SpatialCoordinateVariables(object):
"""
Encapsulates x and y coordinates with projection information
"""
def __init__(self, x, y, projection):
assert isinstance(x, SpatialCoordinateVariable)
assert isinstance(y, SpatialCoordinateVariable)
if projection is not None:
assert isinstance(projection, Proj)
self.x = x
self.y = y
self.projection = projection
@property
def shape(self):
return (len(self.y), len(self.x))
@property
def bbox(self):
half_x_pixel_size = self.x.pixel_size / 2.0
half_y_pixel_size = self.y.pixel_size / 2.0
return BBox(
(
self.x.min - half_x_pixel_size,
self.y.min - half_y_pixel_size,
self.x.max + half_x_pixel_size,
self.y.max + half_y_pixel_size
),
self.projection
)
@property
def affine(self):
bbox = self.bbox
return Affine(
self.x.pixel_size,
0, # Not used
bbox.xmin,
0, # Not used
self.y.values[1] - self.y.values[0], # Negative if y is descending
bbox.ymin if self.y.is_ascending_order() else bbox.ymax
)
@classmethod
def from_dataset(cls, dataset, x_name='longitude', y_name='latitude', projection=None):
"""
Return a SpatialCoordinateVariables object for a dataset
:param dataset: netCDF dataset
:param x_varname: name of the x dimension
:param y_varname: name of the y dimension
:param projection: pyproj Proj object
:return: CoordinateVariables instance
"""
#TODO: detect x and y names, and projection
if projection is None and x_name == 'longitude':
projection = Proj(PROJ4_GEOGRAPHIC)
return cls(
SpatialCoordinateVariable(dataset.variables[x_name]),
SpatialCoordinateVariable(dataset.variables[y_name]),
projection
)
@staticmethod
def from_bbox(bbox, x_size, y_size, dtype='float32', y_ascending=False):
"""
Return a SpatialCoordinateVariables object from BBox and dimensions
:param bbox: instance of a BBox, must have a projection
:param x_size: number of pixels in x dimension (width or number of columns)
:param y_size: number of pixels in y dimension (height or number of rows)
:param dtype: data type (string or numpy dtype object) of values
:param y_ascending: by default, y values are anchored from top left and are descending; if True, this inverts that order
:return: CoordinateVariables instance, assuming that rows are ordered in decreasing value
"""
assert isinstance(bbox, BBox)
if not bbox.projection:
raise ValueError('bbox projection must be defined')
x_pixel_size = (bbox.xmax - bbox.xmin) / float(x_size)
y_pixel_size = (bbox.ymax - bbox.ymin) / float(y_size)
x_arr = numpy.arange(x_size, dtype=dtype)
x_arr *= x_pixel_size
x_arr += (bbox.xmin + x_pixel_size / 2.0)
if y_ascending:
y_arr =
|
numpy.arange(y_size, dtype=dtype)
|
numpy.arange
|
import numpy as np
X_METER_PER_PIXEL = 3.7/700
Y_METER_PER_PIXEL = 30/720
to_meters = np.array([[X_METER_PER_PIXEL, 0],
[0, Y_METER_PER_PIXEL]])
def in_meters(point):
return np.dot(point, to_meters)
class Lanes:
def __init__(self, left, right):
self.left = left
self.right = right
def distance_from_center(self, center):
center = in_meters(center)
center_x, center_y = center
right_x = self.right.meters.p(center_y)
left_x = self.left.meters.p(center_y)
return ((right_x + left_x)/2 - center_x)
def lane_distance(self, y):
_, y = in_meters((0, y))
return (self.right.meters.p(y) - self.left.meters.p(y))
def lanes_parallel(self, height, samples=50):
distance_per_sample = height // samples
distances = []
for y in range(0, height, distance_per_sample):
distances.append(self.lane_distance(y))
std2 = 2*np.std(distances)
mean = np.mean(distances)
arr =
|
np.array(distances)
|
numpy.array
|
"""Tests for the array padding functions.
"""
import pytest
import numpy as np
from numpy.testing import assert_array_equal, assert_allclose, assert_equal
from numpy.lib.arraypad import _as_pairs
_numeric_dtypes = (
np.sctypes["uint"]
+ np.sctypes["int"]
+ np.sctypes["float"]
+ np.sctypes["complex"]
)
_all_modes = {
'constant': {'constant_values': 0},
'edge': {},
'linear_ramp': {'end_values': 0},
'maximum': {'stat_length': None},
'mean': {'stat_length': None},
'median': {'stat_length': None},
'minimum': {'stat_length': None},
'reflect': {'reflect_type': 'even'},
'symmetric': {'reflect_type': 'even'},
'wrap': {},
'empty': {}
}
class TestAsPairs(object):
def test_single_value(self):
"""Test casting for a single value."""
expected = np.array([[3, 3]] * 10)
for x in (3, [3], [[3]]):
result = _as_pairs(x, 10)
assert_equal(result, expected)
# Test with dtype=object
obj = object()
assert_equal(
_as_pairs(obj, 10),
np.array([[obj, obj]] * 10)
)
def test_two_values(self):
"""Test proper casting for two different values."""
# Broadcasting in the first dimension with numbers
expected = np.array([[3, 4]] * 10)
for x in ([3, 4], [[3, 4]]):
result = _as_pairs(x, 10)
assert_equal(result, expected)
# and with dtype=object
obj = object()
assert_equal(
_as_pairs(["a", obj], 10),
np.array([["a", obj]] * 10)
)
# Broadcasting in the second / last dimension with numbers
assert_equal(
_as_pairs([[3], [4]], 2),
np.array([[3, 3], [4, 4]])
)
# and with dtype=object
assert_equal(
_as_pairs([["a"], [obj]], 2),
np.array([["a", "a"], [obj, obj]])
)
def test_with_none(self):
expected = ((None, None), (None, None), (None, None))
assert_equal(
_as_pairs(None, 3, as_index=False),
expected
)
assert_equal(
_as_pairs(None, 3, as_index=True),
expected
)
def test_pass_through(self):
"""Test if `x` already matching desired output are passed through."""
expected = np.arange(12).reshape((6, 2))
assert_equal(
_as_pairs(expected, 6),
expected
)
def test_as_index(self):
"""Test results if `as_index=True`."""
assert_equal(
_as_pairs([2.6, 3.3], 10, as_index=True),
np.array([[3, 3]] * 10, dtype=np.intp)
)
assert_equal(
_as_pairs([2.6, 4.49], 10, as_index=True),
np.array([[3, 4]] * 10, dtype=np.intp)
)
for x in (-3, [-3], [[-3]], [-3, 4], [3, -4], [[-3, 4]], [[4, -3]],
[[1, 2]] * 9 + [[1, -2]]):
with pytest.raises(ValueError, match="negative values"):
_as_pairs(x, 10, as_index=True)
def test_exceptions(self):
"""Ensure faulty usage is discovered."""
with pytest.raises(ValueError, match="more dimensions than allowed"):
_as_pairs([[[3]]], 10)
with pytest.raises(ValueError, match="could not be broadcast"):
_as_pairs([[1, 2], [3, 4]], 3)
with pytest.raises(ValueError, match="could not be broadcast"):
_as_pairs(np.ones((2, 3)), 3)
class TestConditionalShortcuts(object):
@pytest.mark.parametrize("mode", _all_modes.keys())
def test_zero_padding_shortcuts(self, mode):
test = np.arange(120).reshape(4, 5, 6)
pad_amt = [(0, 0) for _ in test.shape]
assert_array_equal(test, np.pad(test, pad_amt, mode=mode))
@pytest.mark.parametrize("mode", ['maximum', 'mean', 'median', 'minimum',])
def test_shallow_statistic_range(self, mode):
test = np.arange(120).reshape(4, 5, 6)
pad_amt = [(1, 1) for _ in test.shape]
assert_array_equal(np.pad(test, pad_amt, mode='edge'),
np.pad(test, pad_amt, mode=mode, stat_length=1))
@pytest.mark.parametrize("mode", ['maximum', 'mean', 'median', 'minimum',])
def test_clip_statistic_range(self, mode):
test = np.arange(30).reshape(5, 6)
pad_amt = [(3, 3) for _ in test.shape]
assert_array_equal(np.pad(test, pad_amt, mode=mode),
np.pad(test, pad_amt, mode=mode, stat_length=30))
class TestStatistic(object):
def test_check_mean_stat_length(self):
a = np.arange(100).astype('f')
a = np.pad(a, ((25, 20), ), 'mean', stat_length=((2, 3), ))
b = np.array(
[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5,
0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5,
0.5, 0.5, 0.5, 0.5, 0.5,
0., 1., 2., 3., 4., 5., 6., 7., 8., 9.,
10., 11., 12., 13., 14., 15., 16., 17., 18., 19.,
20., 21., 22., 23., 24., 25., 26., 27., 28., 29.,
30., 31., 32., 33., 34., 35., 36., 37., 38., 39.,
40., 41., 42., 43., 44., 45., 46., 47., 48., 49.,
50., 51., 52., 53., 54., 55., 56., 57., 58., 59.,
60., 61., 62., 63., 64., 65., 66., 67., 68., 69.,
70., 71., 72., 73., 74., 75., 76., 77., 78., 79.,
80., 81., 82., 83., 84., 85., 86., 87., 88., 89.,
90., 91., 92., 93., 94., 95., 96., 97., 98., 99.,
98., 98., 98., 98., 98., 98., 98., 98., 98., 98.,
98., 98., 98., 98., 98., 98., 98., 98., 98., 98.
])
assert_array_equal(a, b)
def test_check_maximum_1(self):
a = np.arange(100)
a = np.pad(a, (25, 20), 'maximum')
b = np.array(
[99, 99, 99, 99, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
99, 99, 99, 99, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99, 99, 99, 99, 99, 99]
)
assert_array_equal(a, b)
def test_check_maximum_2(self):
a = np.arange(100) + 1
a = np.pad(a, (25, 20), 'maximum')
b = np.array(
[100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
51, 52, 53, 54, 55, 56, 57, 58, 59, 60,
61, 62, 63, 64, 65, 66, 67, 68, 69, 70,
71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
81, 82, 83, 84, 85, 86, 87, 88, 89, 90,
91, 92, 93, 94, 95, 96, 97, 98, 99, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100]
)
assert_array_equal(a, b)
def test_check_maximum_stat_length(self):
a = np.arange(100) + 1
a = np.pad(a, (25, 20), 'maximum', stat_length=10)
b = np.array(
[10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
51, 52, 53, 54, 55, 56, 57, 58, 59, 60,
61, 62, 63, 64, 65, 66, 67, 68, 69, 70,
71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
81, 82, 83, 84, 85, 86, 87, 88, 89, 90,
91, 92, 93, 94, 95, 96, 97, 98, 99, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100]
)
assert_array_equal(a, b)
def test_check_minimum_1(self):
a = np.arange(100)
a = np.pad(a, (25, 20), 'minimum')
b = np.array(
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
)
assert_array_equal(a, b)
def test_check_minimum_2(self):
a = np.arange(100) + 2
a = np.pad(a, (25, 20), 'minimum')
b = np.array(
[2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2,
2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
42, 43, 44, 45, 46, 47, 48, 49, 50, 51,
52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
62, 63, 64, 65, 66, 67, 68, 69, 70, 71,
72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
82, 83, 84, 85, 86, 87, 88, 89, 90, 91,
92, 93, 94, 95, 96, 97, 98, 99, 100, 101,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
)
assert_array_equal(a, b)
def test_check_minimum_stat_length(self):
a = np.arange(100) + 1
a = np.pad(a, (25, 20), 'minimum', stat_length=10)
b = np.array(
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
51, 52, 53, 54, 55, 56, 57, 58, 59, 60,
61, 62, 63, 64, 65, 66, 67, 68, 69, 70,
71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
81, 82, 83, 84, 85, 86, 87, 88, 89, 90,
91, 92, 93, 94, 95, 96, 97, 98, 99, 100,
91, 91, 91, 91, 91, 91, 91, 91, 91, 91,
91, 91, 91, 91, 91, 91, 91, 91, 91, 91]
)
assert_array_equal(a, b)
def test_check_median(self):
a = np.arange(100).astype('f')
a = np.pad(a, (25, 20), 'median')
b = np.array(
[49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
49.5, 49.5, 49.5, 49.5, 49.5,
0., 1., 2., 3., 4., 5., 6., 7., 8., 9.,
10., 11., 12., 13., 14., 15., 16., 17., 18., 19.,
20., 21., 22., 23., 24., 25., 26., 27., 28., 29.,
30., 31., 32., 33., 34., 35., 36., 37., 38., 39.,
40., 41., 42., 43., 44., 45., 46., 47., 48., 49.,
50., 51., 52., 53., 54., 55., 56., 57., 58., 59.,
60., 61., 62., 63., 64., 65., 66., 67., 68., 69.,
70., 71., 72., 73., 74., 75., 76., 77., 78., 79.,
80., 81., 82., 83., 84., 85., 86., 87., 88., 89.,
90., 91., 92., 93., 94., 95., 96., 97., 98., 99.,
49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5]
)
assert_array_equal(a, b)
def test_check_median_01(self):
a = np.array([[3, 1, 4], [4, 5, 9], [9, 8, 2]])
a = np.pad(a, 1, 'median')
b = np.array(
[[4, 4, 5, 4, 4],
[3, 3, 1, 4, 3],
[5, 4, 5, 9, 5],
[8, 9, 8, 2, 8],
[4, 4, 5, 4, 4]]
)
assert_array_equal(a, b)
def test_check_median_02(self):
a = np.array([[3, 1, 4], [4, 5, 9], [9, 8, 2]])
a = np.pad(a.T, 1, 'median').T
b = np.array(
[[5, 4, 5, 4, 5],
[3, 3, 1, 4, 3],
[5, 4, 5, 9, 5],
[8, 9, 8, 2, 8],
[5, 4, 5, 4, 5]]
)
assert_array_equal(a, b)
def test_check_median_stat_length(self):
a = np.arange(100).astype('f')
a[1] = 2.
a[97] = 96.
a = np.pad(a, (25, 20), 'median', stat_length=(3, 5))
b = np.array(
[ 2., 2., 2., 2., 2., 2., 2., 2., 2., 2.,
2., 2., 2., 2., 2., 2., 2., 2., 2., 2.,
2., 2., 2., 2., 2.,
0., 2., 2., 3., 4., 5., 6., 7., 8., 9.,
10., 11., 12., 13., 14., 15., 16., 17., 18., 19.,
20., 21., 22., 23., 24., 25., 26., 27., 28., 29.,
30., 31., 32., 33., 34., 35., 36., 37., 38., 39.,
40., 41., 42., 43., 44., 45., 46., 47., 48., 49.,
50., 51., 52., 53., 54., 55., 56., 57., 58., 59.,
60., 61., 62., 63., 64., 65., 66., 67., 68., 69.,
70., 71., 72., 73., 74., 75., 76., 77., 78., 79.,
80., 81., 82., 83., 84., 85., 86., 87., 88., 89.,
90., 91., 92., 93., 94., 95., 96., 96., 98., 99.,
96., 96., 96., 96., 96., 96., 96., 96., 96., 96.,
96., 96., 96., 96., 96., 96., 96., 96., 96., 96.]
)
assert_array_equal(a, b)
def test_check_mean_shape_one(self):
a = [[4, 5, 6]]
a = np.pad(a, (5, 7), 'mean', stat_length=2)
b = np.array(
[[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6]]
)
assert_array_equal(a, b)
def test_check_mean_2(self):
a = np.arange(100).astype('f')
a = np.pad(a, (25, 20), 'mean')
b = np.array(
[49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
49.5, 49.5, 49.5, 49.5, 49.5,
0., 1., 2., 3., 4., 5., 6., 7., 8., 9.,
10., 11., 12., 13., 14., 15., 16., 17., 18., 19.,
20., 21., 22., 23., 24., 25., 26., 27., 28., 29.,
30., 31., 32., 33., 34., 35., 36., 37., 38., 39.,
40., 41., 42., 43., 44., 45., 46., 47., 48., 49.,
50., 51., 52., 53., 54., 55., 56., 57., 58., 59.,
60., 61., 62., 63., 64., 65., 66., 67., 68., 69.,
70., 71., 72., 73., 74., 75., 76., 77., 78., 79.,
80., 81., 82., 83., 84., 85., 86., 87., 88., 89.,
90., 91., 92., 93., 94., 95., 96., 97., 98., 99.,
49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5]
)
assert_array_equal(a, b)
@pytest.mark.parametrize("mode", [
"mean",
"median",
"minimum",
"maximum"
])
def test_same_prepend_append(self, mode):
""" Test that appended and prepended values are equal """
# This test is constructed to trigger floating point rounding errors in
# a way that caused gh-11216 for mode=='mean'
a = np.array([-1, 2, -1]) + np.array([0, 1e-12, 0], dtype=np.float64)
a = np.pad(a, (1, 1), mode)
|
assert_equal(a[0], a[-1])
|
numpy.testing.assert_equal
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.data.tfexample_decoder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.slim.python.slim.data import tfexample_decoder
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import test
class TFExampleDecoderTest(test.TestCase):
def _EncodedFloatFeature(self, ndarray):
return feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=ndarray.flatten().tolist()))
def _EncodedInt64Feature(self, ndarray):
return feature_pb2.Feature(int64_list=feature_pb2.Int64List(
value=ndarray.flatten().tolist()))
def _EncodedBytesFeature(self, tf_encoded):
with self.test_session():
encoded = tf_encoded.eval()
def BytesList(value):
return feature_pb2.BytesList(value=[value])
return feature_pb2.Feature(bytes_list=BytesList(encoded))
def _BytesFeature(self, ndarray):
values = ndarray.flatten().tolist()
for i in range(len(values)):
values[i] = values[i].encode('utf-8')
return feature_pb2.Feature(bytes_list=feature_pb2.BytesList(value=values))
def _StringFeature(self, value):
value = value.encode('utf-8')
return feature_pb2.Feature(bytes_list=feature_pb2.BytesList(value=[value]))
def _Encoder(self, image, image_format):
assert image_format in ['jpeg', 'JPEG', 'png', 'PNG', 'raw', 'RAW']
if image_format in ['jpeg', 'JPEG']:
tf_image = constant_op.constant(image, dtype=dtypes.uint8)
return image_ops.encode_jpeg(tf_image)
if image_format in ['png', 'PNG']:
tf_image = constant_op.constant(image, dtype=dtypes.uint8)
return image_ops.encode_png(tf_image)
if image_format in ['raw', 'RAW']:
return constant_op.constant(image.tostring(), dtype=dtypes.string)
def GenerateImage(self, image_format, image_shape):
"""Generates an image and an example containing the encoded image.
Args:
image_format: the encoding format of the image.
image_shape: the shape of the image to generate.
Returns:
image: the generated image.
example: a TF-example with a feature key 'image/encoded' set to the
serialized image and a feature key 'image/format' set to the image
encoding format ['jpeg', 'JPEG', 'png', 'PNG', 'raw'].
"""
num_pixels = image_shape[0] * image_shape[1] * image_shape[2]
image = np.linspace(
0, num_pixels - 1, num=num_pixels).reshape(image_shape).astype(np.uint8)
tf_encoded = self._Encoder(image, image_format)
example = example_pb2.Example(features=feature_pb2.Features(feature={
'image/encoded': self._EncodedBytesFeature(tf_encoded),
'image/format': self._StringFeature(image_format)
}))
return image, example.SerializeToString()
def DecodeExample(self, serialized_example, item_handler, image_format):
"""Decodes the given serialized example with the specified item handler.
Args:
serialized_example: a serialized TF example string.
item_handler: the item handler used to decode the image.
image_format: the image format being decoded.
Returns:
the decoded image found in the serialized Example.
"""
serialized_example = array_ops.reshape(serialized_example, shape=[])
decoder = tfexample_decoder.TFExampleDecoder(
keys_to_features={
'image/encoded':
parsing_ops.FixedLenFeature(
(), dtypes.string, default_value=''),
'image/format':
parsing_ops.FixedLenFeature(
(), dtypes.string, default_value=image_format),
},
items_to_handlers={'image': item_handler})
[tf_image] = decoder.decode(serialized_example, ['image'])
return tf_image
def RunDecodeExample(self, serialized_example, item_handler, image_format):
tf_image = self.DecodeExample(serialized_example, item_handler,
image_format)
with self.test_session():
decoded_image = tf_image.eval()
# We need to recast them here to avoid some issues with uint8.
return decoded_image.astype(np.float32)
def testDecodeExampleWithJpegEncoding(self):
image_shape = (2, 3, 3)
image, serialized_example = self.GenerateImage(
image_format='jpeg', image_shape=image_shape)
decoded_image = self.RunDecodeExample(
serialized_example, tfexample_decoder.Image(), image_format='jpeg')
# Need to use a tolerance of 1 because of noise in the jpeg encode/decode
self.assertAllClose(image, decoded_image, atol=1.001)
def testDecodeExampleWithJPEGEncoding(self):
test_image_channels = [1, 3]
for channels in test_image_channels:
image_shape = (2, 3, channels)
image, serialized_example = self.GenerateImage(
image_format='JPEG', image_shape=image_shape)
decoded_image = self.RunDecodeExample(
serialized_example,
tfexample_decoder.Image(channels=channels),
image_format='JPEG')
# Need to use a tolerance of 1 because of noise in the jpeg encode/decode
self.assertAllClose(image, decoded_image, atol=1.001)
def testDecodeExampleWithNoShapeInfo(self):
test_image_channels = [1, 3]
for channels in test_image_channels:
image_shape = (2, 3, channels)
_, serialized_example = self.GenerateImage(
image_format='jpeg', image_shape=image_shape)
tf_decoded_image = self.DecodeExample(
serialized_example,
tfexample_decoder.Image(
shape=None, channels=channels),
image_format='jpeg')
self.assertEqual(tf_decoded_image.get_shape().ndims, 3)
def testDecodeExampleWithPngEncoding(self):
test_image_channels = [1, 3, 4]
for channels in test_image_channels:
image_shape = (2, 3, channels)
image, serialized_example = self.GenerateImage(
image_format='png', image_shape=image_shape)
decoded_image = self.RunDecodeExample(
serialized_example,
tfexample_decoder.Image(channels=channels),
image_format='png')
self.assertAllClose(image, decoded_image, atol=0)
def testDecodeExampleWithPNGEncoding(self):
test_image_channels = [1, 3, 4]
for channels in test_image_channels:
image_shape = (2, 3, channels)
image, serialized_example = self.GenerateImage(
image_format='PNG', image_shape=image_shape)
decoded_image = self.RunDecodeExample(
serialized_example,
tfexample_decoder.Image(channels=channels),
image_format='PNG')
self.assertAllClose(image, decoded_image, atol=0)
def testDecodeExampleWithRawEncoding(self):
image_shape = (2, 3, 3)
image, serialized_example = self.GenerateImage(
image_format='raw', image_shape=image_shape)
decoded_image = self.RunDecodeExample(
serialized_example,
tfexample_decoder.Image(shape=image_shape),
image_format='raw')
self.assertAllClose(image, decoded_image, atol=0)
def testDecodeExampleWithRAWEncoding(self):
image_shape = (2, 3, 3)
image, serialized_example = self.GenerateImage(
image_format='RAW', image_shape=image_shape)
decoded_image = self.RunDecodeExample(
serialized_example,
tfexample_decoder.Image(shape=image_shape),
image_format='RAW')
self.assertAllClose(image, decoded_image, atol=0)
def testDecodeExampleWithJpegEncodingAt16BitCausesError(self):
image_shape = (2, 3, 3)
unused_image, serialized_example = self.GenerateImage(
image_format='jpeg', image_shape=image_shape)
# decode_raw support uint16 now so ValueError will be thrown instead.
with self.assertRaisesRegexp(
ValueError,
'true_fn and false_fn must have the same type: uint16, uint8'):
unused_decoded_image = self.RunDecodeExample(
serialized_example,
tfexample_decoder.Image(dtype=dtypes.uint16),
image_format='jpeg')
def testDecodeExampleWithStringTensor(self):
tensor_shape = (2, 3, 1)
np_array = np.array([[['ab'], ['cd'], ['ef']],
[['ghi'], ['jkl'], ['mnop']]])
example = example_pb2.Example(features=feature_pb2.Features(feature={
'labels': self._BytesFeature(np_array),
}))
serialized_example = example.SerializeToString()
with self.test_session():
serialized_example = array_ops.reshape(serialized_example, shape=[])
keys_to_features = {
'labels':
parsing_ops.FixedLenFeature(
tensor_shape,
dtypes.string,
default_value=constant_op.constant(
'', shape=tensor_shape, dtype=dtypes.string))
}
items_to_handlers = {'labels': tfexample_decoder.Tensor('labels'),}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
[tf_labels] = decoder.decode(serialized_example, ['labels'])
labels = tf_labels.eval()
labels = labels.astype(np_array.dtype)
self.assertTrue(np.array_equal(np_array, labels))
def testDecodeExampleWithFloatTensor(self):
np_array = np.random.rand(2, 3, 1).astype('f')
example = example_pb2.Example(features=feature_pb2.Features(feature={
'array': self._EncodedFloatFeature(np_array),
}))
serialized_example = example.SerializeToString()
with self.test_session():
serialized_example = array_ops.reshape(serialized_example, shape=[])
keys_to_features = {
'array': parsing_ops.FixedLenFeature(np_array.shape, dtypes.float32)
}
items_to_handlers = {'array': tfexample_decoder.Tensor('array'),}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
[tf_array] = decoder.decode(serialized_example, ['array'])
self.assertAllEqual(tf_array.eval(), np_array)
def testDecodeExampleWithInt64Tensor(self):
np_array = np.random.randint(1, 10, size=(2, 3, 1))
example = example_pb2.Example(features=feature_pb2.Features(feature={
'array': self._EncodedInt64Feature(np_array),
}))
serialized_example = example.SerializeToString()
with self.test_session():
serialized_example = array_ops.reshape(serialized_example, shape=[])
keys_to_features = {
'array': parsing_ops.FixedLenFeature(np_array.shape, dtypes.int64)
}
items_to_handlers = {'array': tfexample_decoder.Tensor('array'),}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
[tf_array] = decoder.decode(serialized_example, ['array'])
self.assertAllEqual(tf_array.eval(), np_array)
def testDecodeExampleWithVarLenTensor(self):
np_array = np.array([[[1], [2], [3]], [[4], [5], [6]]])
example = example_pb2.Example(features=feature_pb2.Features(feature={
'labels': self._EncodedInt64Feature(np_array),
}))
serialized_example = example.SerializeToString()
with self.test_session():
serialized_example = array_ops.reshape(serialized_example, shape=[])
keys_to_features = {
'labels': parsing_ops.VarLenFeature(dtype=dtypes.int64),
}
items_to_handlers = {'labels': tfexample_decoder.Tensor('labels'),}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
[tf_labels] = decoder.decode(serialized_example, ['labels'])
labels = tf_labels.eval()
self.assertAllEqual(labels, np_array.flatten())
def testDecodeExampleWithFixLenTensorWithShape(self):
np_array =
|
np.array([[1, 2, 3], [4, 5, 6]])
|
numpy.array
|
import cv2
import numpy as np
def draw(scale, width, div, grad1, color1, color2, color3, lines, angle, pad, grad2, color4, color5):
assert scale > 0 and scale % 2 == 1
assert 0 < width
assert 0 < div
assert 0 <= grad1 <= 100
assert 0 <= color1[0] <= 255 and 0 <= color1[1] <= 255 and 0 <= color1[2] <= 255
assert 0 <= color2[0] <= 255 and 0 <= color2[1] <= 255 and 0 <= color2[2] <= 255
assert 0 <= color3[0] <= 255 and 0 <= color3[1] <= 255 and 0 <= color3[2] <= 255
assert 0 < lines
assert 0 <= angle < 360
assert 0 <= pad
assert 0 <= grad2 <= 100
assert 0 <= color4[0] <= 255 and 0 <= color4[1] <= 255 and 0 <= color4[2] <= 255
assert 0 <= color5[0] <= 255 and 0 <= color5[1] <= 255 and 0 <= color5[2] <= 255
x = __draw1(scale, width, div, grad1, color1, color2, color3)
x = __draw2(x, lines)
x = __draw3(x, angle)
x = __draw4(x, scale, pad, grad2, color4, color5)
return np.clip(x, 0, 255).astype(np.uint8)
def __draw1(scale, width, div, grad1, color1, color2, color3):
width = width*scale
grad1 = (grad1*width) // 100
x0 = np.zeros((1, width, 3), dtype=np.uint8)
x0[:] = color1
x1 = np.zeros((1, width, 3), dtype=np.uint8)
x1[:] = color2
one = np.ones((grad1,), dtype=np.float32)
one_to_zero = np.linspace(
1, 0, width-grad1, endpoint=False, dtype=np.float32)
one_to_zero = one_to_zero*one_to_zero * \
(3-2*one_to_zero) # 3rd order interpolation
gradation = np.r_[one, one_to_zero]
gradation = gradation[None, :, None]
x = x0 * gradation + x1 * (1-gradation)
pos = np.arange(width)
mask_width = pos // div
mask_width[mask_width < scale] = scale
mask_width[(mask_width > mask_width[-1]-scale) &
(mask_width < mask_width[-1])] = mask_width[-1] - scale
pos_in_segment = ((pos * div) % width) // div
mask = (pos_in_segment <= mask_width)
x[:, mask, :] = color3
return x
def __draw2(x, lines):
width = x.shape[1]
pos = np.arange(width)
mask = ((pos*lines) // width) % 2 == 0
xx = np.zeros((width, width, 3), dtype=np.float32)
xx[mask] = x
xx[~mask] = x[:, ::-1]
return xx
def __draw3(x, angle):
# add alpha channel
x = cv2.cvtColor(x, cv2.COLOR_BGR2BGRA)
# rotate coarsely
if angle < 90:
pass
elif angle < 180:
angle -= 90
x = cv2.rotate(x, cv2.ROTATE_90_COUNTERCLOCKWISE)
elif angle < 270:
angle -= 180
x = cv2.rotate(x, cv2.ROTATE_180)
elif angle < 360:
angle -= 270
x = cv2.rotate(x, cv2.ROTATE_90_CLOCKWISE)
# rotate finely
if angle > 0:
adjust = np.sqrt(2) * np.sin(angle/180*np.pi + np.pi/4)
trans = cv2.getRotationMatrix2D((0, 0), angle, 1)
width = x.shape[1]
trans[1, 2] = width*np.sin(angle/180*np.pi)
x = cv2.warpAffine(x, trans, (int(adjust*width)+1,
int(adjust*width)+1), flags=cv2.INTER_NEAREST)
return x
def __draw4(x, scale, pad, grad2, color3, color4):
x_width = x.shape[1]
pad = pad*scale
width = x_width+pad*2
grad2 = (grad2*width) // 100
bk1 = np.zeros((width, width, 3), dtype=np.float32)
bk1[:] = color3
bk2 =
|
np.zeros((width, width, 3), dtype=np.float32)
|
numpy.zeros
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import griddata
import osmnx as ox
import networkx as nx
from analogistics.chart.chart_3D_surface import createFigureWith3Dsurface
from analogistics.supply_chain.P8_performance_assessment.utilities_movements import getCoverageStats
from analogistics.clean import cleanUsingIQR
from sklearn.metrics import mean_squared_error
from sklearn import cluster
from sklearn.mixture import GaussianMixture
def mercatorProjection(latitude: float, longitude: float):
"""
Return the Mercator projection coordinates of given latitude and longitude
Args:
latitude (float): Latitude of a point.
longitude (float): Longitude of a point.
Returns:
x (TYPE): Mercator X coordinate.
y (TYPE): Mercator Y coordinate.
"""
R = 6378.14 # earth's ray
e = 0.0167 # earth's eccentricity
lon_rad = (np.pi / 180) * longitude
lat_rad = (np.pi / 180) * latitude
x = R * lon_rad
y = R * np.log(((1 - e * np.sin(lat_rad)) / (1 + e * np.sin(lat_rad))) ** (e / 2) * np.tan(np.pi / 4 + lat_rad / 2))
return x, y
def optimalLocationRectangularDistance(D_filtered: pd.DataFrame, latCol: str, lonCol: str, weightCol: str):
"""
this function returns the optimal location based on rectangular distances
Args:
D_filtered (pd.DataFrame): Input dataframe.
latCol (str): Column name containing latitude.
lonCol (str): Column name containing longitude.
weightCol (str): Column name containing weight (e.g. production quantity or flows).
Returns:
lat_optimal (TYPE): Optimal latitude.
lon_optimal (TYPE): Optimal longitude.
"""
# optimal location
op_w = sum(D_filtered[weightCol]) / 2 # identify the median of the sum of weights
# identify optimal latitude
if len(D_filtered) > 1: # when there are more than a single point
D_filtered = D_filtered.sort_values(by=latCol, ascending=True) # sort by latitude
D_filtered['X_cumsum'] = D_filtered[weightCol].cumsum() # calculate the cumulated sum
# identify the LATITUDE closer to the optimal location
D_opt_x_max = D_filtered[D_filtered['X_cumsum'] >= op_w].iloc[0]
D_opt_x_min = D_filtered[D_filtered['X_cumsum'] < op_w].iloc[-1]
x_array = [D_opt_x_min['X_cumsum'], D_opt_x_max['X_cumsum']]
y_array = [D_opt_x_min[latCol], D_opt_x_max[latCol]]
lat_optimal = np.interp(op_w, x_array, y_array)
# identify the LONGITUDE closer to the optimal location
D_filtered = D_filtered.sort_values(by=lonCol, ascending=True) # sort by latitude
D_filtered['Y_cumsum'] = D_filtered[weightCol].cumsum() # calculate the cumulated sum
D_opt_x_max = D_filtered[D_filtered['Y_cumsum'] >= op_w].iloc[0]
D_opt_x_min = D_filtered[D_filtered['Y_cumsum'] < op_w].iloc[-1]
x_array = [D_opt_x_min['Y_cumsum'], D_opt_x_max['Y_cumsum']]
y_array = [D_opt_x_min[lonCol], D_opt_x_max[lonCol]]
lon_optimal = np.interp(op_w, x_array, y_array)
else: # with a single point take the coordinates of the point
lat_optimal = float(D_filtered.iloc[0][latCol])
lon_optimal = float(D_filtered.iloc[0][lonCol])
return lat_optimal, lon_optimal
def optimalLocationGravityProblem(D_filtered: pd.DataFrame, latCol: str, lonCol: str, weightCol: str):
"""
this dunction calculate the optimal location with squared euclidean distances
Args:
D_filtered (pd.DataFrame): Input dataframe.
latCol (str): Column name containing latitude.
lonCol (str): Column name containing longitude.
weightCol (str): Column name containing weight (e.g. production quantity or flows).
Returns:
lat_optimal (TYPE): Optimal latitude.
lon_optimal (TYPE): Optimal longitude.
"""
D_filtered_notnan = D_filtered.dropna(subset=[latCol, lonCol, weightCol])
D_filtered_notnan = D_filtered_notnan[D_filtered_notnan[weightCol] > 0]
if len(D_filtered_notnan) > 0:
lat_optimal = sum(D_filtered_notnan[latCol] * D_filtered_notnan[weightCol]) / sum(D_filtered_notnan[weightCol])
lon_optimal = sum(D_filtered_notnan[lonCol] * D_filtered_notnan[weightCol]) / sum(D_filtered_notnan[weightCol])
else:
lat_optimal = lon_optimal = 0
return lat_optimal, lon_optimal
def optimalLocationEuclideanDistance(D_filtered: pd.DataFrame, latCol: str, lonCol: str, weightCol: str):
"""
this function calculates the optimal location with euclidean distances using the kuhn procedure
Args:
D_filtered (pd.DataFrame): Input dataframe.
latCol (str): Column name containing latitude.
lonCol (str): Column name containing longitude.
weightCol (str): Column name containing weight (e.g. production quantity or flows).
Returns:
lat_optimal (TYPE): Optimal latitude.
lon_optimal (TYPE): Optimal longitude.
"""
def _funcGKuhn(wi, xj_1, yj_1, ai, bi):
# implements the function g in the kuhn procedure for euclidean distances
return wi / ((xj_1 - ai) ** 2 + (yj_1 - bi) ** 2)
# remove null values
D_filtered_notnan = D_filtered.dropna(subset=[latCol, lonCol, weightCol])
# identify the first solution of the gravity problem
lat_optimal_0, lon_optimal_0 = optimalLocationGravityProblem(D_filtered_notnan, latCol, lonCol, weightCol)
xj_1 = lon_optimal_0
yj_1 = lat_optimal_0
wi = D_filtered_notnan[weightCol]
ai = D_filtered_notnan[lonCol]
bi = D_filtered_notnan[latCol]
# iterates Kuhn procedure to approximate the solution
diff_x = 1 # a latitude degree is about 111 km
while diff_x > 0.01:
lon_optimal_j = sum(_funcGKuhn(wi, xj_1, yj_1, ai, bi) * ai) / sum(_funcGKuhn(wi, xj_1, yj_1, ai, bi))
diff_x = np.abs(xj_1 - lon_optimal_j)
# print(diff_x)
xj_1 = lon_optimal_j
# iterates Kuhn procedure to approximate the solution
diff_x = 1
while diff_x > 0.01:
lat_optimal_j = sum(_funcGKuhn(wi, xj_1, yj_1, ai, bi) * bi) / sum(_funcGKuhn(wi, xj_1, yj_1, ai, bi))
diff_x = np.abs(yj_1 - lat_optimal_j)
# print(diff_x)
yj_1 = lat_optimal_j
return lat_optimal_j, lon_optimal_j
def func_rectangularDistanceCost(x: float, y: float, x_opt: float, y_opt: float, wi: float) -> float:
"""
return cost values with rectangular distances
Args:
x (float): X coordinate.
y (float): Y coordinate.
x_opt (float): X coordinate of the optimal location.
y_opt (float): Y coordinate of the optimal location.
wi (float): weight (e.g. flow).
Returns:
float: Cost value.
"""
return (np.abs(x - x_opt) + np.abs(y - y_opt)) * wi
def func_gravityDistanceCost(x: float, y: float, x_opt: float, y_opt: float, wi: float) -> float:
"""
return cost values with squared euclidean distances
Args:
x (float): X coordinate.
y (float): Y coordinate.
x_opt (float): X coordinate of the optimal location.
y_opt (float): Y coordinate of the optimal location.
wi (float): weight (e.g. flow).
Returns:
float: Cost value.
"""
return ((x - x_opt) ** 2 + (y - y_opt) ** 2) * wi
def func_euclideanDistanceCost(x: float, y: float, x_opt: float, y_opt: float, wi: float) -> float:
"""
return cost values with euclidean distance
Args:
x (float): X coordinate.
y (float): Y coordinate.
x_opt (float): X coordinate of the optimal location.
y_opt (float): Y coordinate of the optimal location.
wi (float): weight (e.g. flow).
Returns:
float: Cost value.
"""
return np.sqrt((x - x_opt) ** 2 + (y - y_opt) ** 2) * wi
def defineDistanceTableEstimator(D_mov: pd.DataFrame, lonCol_From_mov: str, latCol_From_mov: str,
lonCol_To_mov: str, latCol_To_mov: str, G: nx.graph,
cleanOutliersCoordinates: bool = False, capacityField: str = 'QUANTITY'):
"""
Args:
D_mov (pd.DataFrame): Inpud dataframe.
lonCol_From_mov (str): name of the D_mov dataframe with longitude of the loading node.
latCol_From_mov (str): name of the D_mov dataframe with latitude of the loading node.
lonCol_To_mov (str): name of the D_mov dataframe with longitude of the discharging node.
latCol_To_mov (str): name of the D_mov dataframe with latitude of the loading node.
G (nx.graph): road graph obtained with osmnx.
cleanOutliersCoordinates (bool, optional): is true to remove outliers in latitude and longitude. Defaults to False.
capacityField (str, optional): field of quantity to measure the coverage statistics on it. Defaults to 'QUANTITY'.
Returns:
D_dist (TYPE): DESCRIPTION.
df_coverages (TYPE): DESCRIPTION.
"""
# clean data and get coverages
analysisFieldList = [lonCol_From_mov, latCol_From_mov, lonCol_To_mov, latCol_To_mov]
coverages, _ = getCoverageStats(D_mov, analysisFieldList, capacityField=capacityField)
D_dist = D_mov[[lonCol_From_mov, latCol_From_mov, lonCol_To_mov, latCol_To_mov]].drop_duplicates().dropna().reset_index()
if cleanOutliersCoordinates:
D_dist, coverages_outl = cleanUsingIQR(D_dist, [lonCol_From_mov, latCol_From_mov, lonCol_To_mov, latCol_To_mov])
coverages = (coverages[0] * coverages_outl[0], coverages[1] * coverages_outl[1])
df_coverages = pd.DataFrame(coverages)
D_dist['REAL_DISTANCE'] = np.nan
D_dist['MERCATOR_X_FROM'] = np.nan
D_dist['MERCATOR_Y_FROM'] = np.nan
D_dist['MERCATOR_X_TO'] = np.nan
D_dist['MERCATOR_Y_TO'] = np.nan
for index, row in D_dist.iterrows():
# get the coordinates
lonFrom = row[lonCol_From_mov]
latFrom = row[latCol_From_mov]
lonTo = row[lonCol_To_mov]
latTo = row[latCol_To_mov]
# get the closest node on the graph
node_from = ox.get_nearest_node(G, (latFrom, lonFrom), method='euclidean')
node_to = ox.get_nearest_node(G, (latTo, lonTo), method='euclidean')
length = nx.shortest_path_length(G=G, source=node_from, target=node_to, weight='length')
D_dist['REAL_DISTANCE'].loc[index] = length
# convert into mercator coordinates
x_merc_from, y_merc_from = mercatorProjection(latFrom, lonFrom)
x_merc_to, y_merc_to = mercatorProjection(latTo, lonTo)
D_dist['MERCATOR_X_FROM'].loc[index] = x_merc_from
D_dist['MERCATOR_Y_FROM'].loc[index] = y_merc_from
D_dist['MERCATOR_X_TO'].loc[index] = x_merc_to
D_dist['MERCATOR_Y_TO'].loc[index] = y_merc_to
D_dist['EUCLIDEAN_DISTANCE'] = 1000 * func_euclideanDistanceCost(D_dist['MERCATOR_X_FROM'],
D_dist['MERCATOR_Y_FROM'],
D_dist['MERCATOR_X_TO'],
D_dist['MERCATOR_Y_TO'],
1)
D_dist['RECTANGULAR_DISTANCE'] = 1000 * func_rectangularDistanceCost(D_dist['MERCATOR_X_FROM'],
D_dist['MERCATOR_Y_FROM'],
D_dist['MERCATOR_X_TO'],
D_dist['MERCATOR_Y_TO'],
1)
D_dist['GRAVITY_DISTANCE'] = 1000 * func_gravityDistanceCost(D_dist['MERCATOR_X_FROM'],
D_dist['MERCATOR_Y_FROM'],
D_dist['MERCATOR_X_TO'],
D_dist['MERCATOR_Y_TO'],
1)
error_euclidean = mean_squared_error(D_dist['REAL_DISTANCE'], D_dist['EUCLIDEAN_DISTANCE'])
error_rectangular = mean_squared_error(D_dist['REAL_DISTANCE'], D_dist['RECTANGULAR_DISTANCE'])
error_gravity = mean_squared_error(D_dist['REAL_DISTANCE'], D_dist['GRAVITY_DISTANCE'])
print(f"MSE EUCLIDEAN: {
|
np.round(error_euclidean,2)
|
numpy.round
|
# -*- coding: utf-8 -*-
import logging
import pandas as pd
import numpy as np
import pprint
from pcnaDeep.data.utils import deduce_transition, find_daugs
from pcnaDeep.data.annotate import findM
from sklearn.cluster import KMeans
from sklearn.preprocessing import MinMaxScaler
def list_dist(a, b):
"""Count difference between elements of two lists.
Args:
a (list): classifications with method A
b (list): classifications with method B
"""
count = 0
assert len(a) == len(b)
for i in range(len(a)):
if a[i] != b[i]:
count += 1
if a[i] == 'G1/G2' and (b[i] in ['G1', 'G2', 'G1*', 'G2*']):
count -= 1
return count
def get_rsv_input_gt(track, gt_name='predicted_class', G2_trh=200, no_cls_GT=False):
"""Deduce essential input of resolver from a ground truth.
"""
logger = logging.getLogger('pcna.Resolver.resolveGroundTruth')
track['lineageId'] = track['trackId']
if 'emerging' not in track.columns:
track['emerging'] = 0
if 'BF_mean' not in track.columns:
track['BF_mean'] = 0
if 'BF_std' not in track.columns:
track['BF_std'] = 0
# resolve classification, if supplied
if not no_cls_GT:
track['Probability of G1/G2'] = 0
track['Probability of S'] = 0
track['Probability of M'] = 0
track.loc[track[gt_name].str.contains('G'), 'Probability of G1/G2'] = 1
track.loc[track[gt_name] == 'E', 'Probability of G1/G2'] = 1
track.loc[track[gt_name] == 'S', 'Probability of S'] = 1
track.loc[track[gt_name] == 'M', 'Probability of M'] = 1
track.loc[track[gt_name] == 'S', 'predicted_class'] = 'S'
track.loc[track[gt_name].str.contains('G'), 'predicted_class'] = 'G1/G2'
track.loc[track[gt_name] == 'E', 'predicted_class'] = 'E'
track.loc[track[gt_name] == 'M', 'predicted_class'] = 'M'
track.loc[track[gt_name] == 'E', 'emerging'] = 1
ann = {'track': [], 'mitosis_parent': [], 'm_entry': [], 'm_exit': []}
mt_dic = {}
imprecise_m = []
for i in np.unique(track['trackId']):
sub = track[track['trackId'] == i]
ann['track'].append(i)
par = list(sub['parentTrackId'])[0]
if par:
par_lin = list(track.loc[track['trackId'] == par, 'lineageId'])[0]
track.loc[track['trackId'] == i, 'lineageId'] = par_lin
track.loc[track['parentTrackId'] == i, 'lineageId'] = par_lin
m_exit = findM(sub[gt_name].tolist(), 'begin')
if m_exit is None:
m_exit = sub['frame'].iloc[0]
logger.warning('Mitosis exit not found for daughter: ' + str(i))
imprecise_m.append(i)
else:
m_exit = sub['frame'].iloc[m_exit]
if par not in mt_dic.keys():
mt_dic[par] = {'daug': {i: {'dist': 0, 'm_exit': m_exit}}, 'div': -1}
else:
mt_dic[par]['daug'][i] = {'dist': 0, 'm_exit': m_exit}
ann['mitosis_parent'].append(int(par))
ann['m_entry'].append(None)
ann['m_exit'].append(int(m_exit))
else:
ann['mitosis_parent'].append(None)
ann['m_entry'].append(None)
ann['m_exit'].append(None)
ann = pd.DataFrame(ann, dtype=int)
for i in mt_dic.keys():
par_sub = track[track['trackId'] == i]
m_entry = findM(par_sub[gt_name].tolist(), 'end')
if m_entry is None:
logger.warning('Mitosis entry not found for parent: ' + str(i) + '. Will not resolve this M phase.')
else:
m_entry = par_sub['frame'].iloc[m_entry]
mt_dic[i]['div'] = m_entry
ann.loc[ann['track'] == i, 'm_entry'] = m_entry
# Note resolver only takes predicted class G1/G2, no G1 or G2;
# Resolver classifies G1 or G2 based on intensity, so first mask intensity and background intensity,
# then recover from table joining
track_masked = track.copy()
if 'mean_intensity' not in track_masked.columns:
track_masked['mean_intensity'] = 0
track_masked.loc[track_masked[gt_name] == 'G2', 'mean_intensity'] = G2_trh + 1
if 'background_mean' not in track_masked.columns:
track_masked['background_mean'] = 0
track_masked.loc[track_masked[gt_name].str.contains('G'), gt_name] = 'G1/G2'
track_masked['predicted_class'] = track_masked[gt_name]
logger.debug(pprint.pformat(mt_dic))
return track_masked, ann, mt_dic, imprecise_m
def resolve_from_gt(track, gt_name='predicted_class', extra_gt=None, G2_trh=None, no_cls_GT=False,
minG=1, minS=1, minM=1, minLineage=0):
"""Resolve cell cycle phase from the ground truth. Wrapper of `get_rsv_input_gt()`.
Args:
track (pandas.DataFrame): data frame of each object each row, must have following columns:
- trackId, frame, parentTrackId, <ground truth classification column>
gt_name (str): refers to the column in track that corresponds to ground truth classification.
extra_gt (str): refers to the column in track that has G2 ground truth if `gt_name` does not. See notes below.
G2_trh (int): intensity threshold for classifying G2 phase (for arrest tracks only).
no_cls_GT (bool): Set to `true` if no classification ground truth is provided.
Will resolve based on current classifications.
minG (int): minimum G phase frame length (default 1).
minS (int): minimum S phase frame length (default 1).
minM (int): minimum M phase frame length (default 1).
minLineage (int): minimum lineage frame length to resolve (default 0, resolve all tracks).
Note:
- If do not want G2 to be classified based on thresholding, rather, based on ground truth classification.
Simply leave `G2_trh=None` and the threshold will be calculated as the smallest average intensity of G2 phase
in labeled tracks (outlier smaller than mena - 3*sd excluded).
- If the ground truth column does not contain `G2` instances, tell the program to look at
an extra partially G2 ground truth column like `resolved_class` to extract information. This may be useful when
`predicted_class` has been corrected from the Correction Interface which only contains G1/G2 but not G2. In this
case, you can also assign `resolved_class` as the ground truth classification column. Both will work.
- If `mean_intensity` or `background_mean` column is not in the table, will set the threshold to 100.
- Use at own risk if the input classification in not reliable.
"""
if G2_trh is None:
if 'mean_intensity' in track.columns:
if 'G2' not in list(track[gt_name]):
assert 'G2' in list(track[extra_gt]), 'G2 not found in either gt_name or extra_gt columns.'
G2_tracks = track.loc[track[extra_gt] == 'G2']
else:
G2_tracks = track.loc[track[gt_name] == 'G2']
avgs = []
for i in np.unique(G2_tracks['trackId']):
sub = G2_tracks[G2_tracks['trackId'] == i]
avgs.append(np.mean(sub['mean_intensity'] - sub['background_mean']))
avgs = np.array(avgs)
avgs = avgs[avgs >= (np.mean(avgs) - np.std(avgs) * 3)]
G2_trh = int(np.floor(np.min(avgs))) - 1
elif 'mean_intensity' not in track.columns or 'background_mean' not in track.columns:
G2_trh = 100 # dummy threshold
else:
raise ValueError('Must provide a G2 intensity threshold or provide G2 ground truth classification.')
print('Using G2 intensity threshold: ' + str(G2_trh))
track_masked, ann, mt_dic, imprecise_m = get_rsv_input_gt(track, gt_name, G2_trh=G2_trh, no_cls_GT=no_cls_GT)
r = Resolver(track_masked, ann, mt_dic, maxBG=minG, minS=minS, minM=minM, minLineage=minLineage,
impreciseExit=imprecise_m, G2_trh=G2_trh)
rsTrack, phase = r.doResolve()
rsTrack = rsTrack[['trackId', 'frame', 'resolved_class', 'name']]
if 'resolved_class' in track.columns:
del track['resolved_class']
if 'name' in track.columns:
del track['name']
rsTrack = track.merge(rsTrack, on=['trackId','frame'])
return rsTrack, phase
class Resolver:
def __init__(self, track, ann, mt_dic, maxBG=25, minS=20, minM=10, minLineage=10, impreciseExit=None, G2_trh=100):
"""Resolve cell cycle duration, identity G1 or G2.
Args:
- pcnaDeep.tracker outputs:
track (pandas.DataFrame): tracked object table;
ann (pandas.DataFrame): track annotation table;
mt_dic (dict): mitosis information lookup dictionary;
impreciseExit (list): list of tracks which M-G1 transition not clearly labeled.
- GPR algorithm parameters for searching S/M phase:
maxBG (float): maximum background class appearance allowed within target phase;
minS (float): minimum target S phase length;
minM (float): minimum target M phase length.
- Options:
minLineage (int): minimum lineage length to record in the output phase table.
G2_trh (int): G2 intensity threshold for classifying arrested G1/G2 tracks. Background subtracted.
"""
if impreciseExit is None:
impreciseExit = []
self.logger = logging.getLogger('pcna.Resolver')
self.impreciseExit = impreciseExit
self.track = track
self.ann = ann
self.maxBG = maxBG
self.minS = minS
self.mt_dic = mt_dic
self.minM = minM
self.rsTrack = None
self.minLineage = minLineage
self.unresolved = []
self.mt_unresolved = []
self.arrest = {} # trackId : arrest phase
self.G2_trh = G2_trh
self.phase = pd.DataFrame(columns=['track', 'type', 'G1', 'S', 'M', 'G2', 'parent'])
def doResolve(self):
"""Main function of class resolver.
Returns:
pandas.DataFrame: tracked object table with additional column 'resolved_class'.
pandas.DataFrame: phase table with cell cycle durations.
"""
self.logger.info('Resolving cell cycle phase...')
track = self.track.copy()
rt = pd.DataFrame()
for i in np.unique(track['lineageId']):
d = track[track['lineageId'] == i]
t = self.resolveLineage(d, i)
rt = rt.append(t)
rt = rt.sort_values(by=['trackId', 'frame'])
self.rsTrack = rt.copy()
self.check_trans_integrity()
self.mt_unresolved = list(np.unique(self.mt_unresolved))
if self.mt_unresolved:
self.logger.warning('Sequential mitosis without S phase; Ignore tracks: ' + str(self.mt_unresolved)[1:-1])
if self.unresolved:
self.logger.warning('Numerous classification change after resolving, check: ' + str(self.unresolved)[1:-1])
self.resolveArrest(self.G2_trh)
phase = self.doResolvePhase()
self.getAnn()
return self.rsTrack, phase
def check_trans_integrity(self):
"""Check track transition integrity. If transition other than G1->S; S->G2, G2->M, M->G1 found, do not resolve.
"""
for t in np.unique(self.rsTrack['trackId']):
if t not in self.mt_unresolved and t not in self.unresolved:
sub = self.rsTrack[self.rsTrack['trackId'] == t]
rcls = list(sub['resolved_class'])
for i in range(1,sub.shape[0]):
if rcls[i-1] != rcls[i]:
trs = '-'.join([rcls[i-1], rcls[i]])
if trs not in ['G1-S','S-G2','G2-M','M-G1']:
self.logger.warning('Wrong transition ' + trs + ' in track: ' + str(t))
return
def getAnn(self):
"""Add an annotation column to tracked object table
The annotation format is track ID - (parentTrackId, optional) - resolved_class
"""
ann = []
cls_col = 'resolved_class'
if cls_col not in self.track.columns:
print('Phase not resolved yet. Using predicted phase classifications.')
cls_col = 'predicted_class'
track_id = list(self.track['trackId'])
parent_id = list(self.track['parentTrackId'])
cls_lb = list(self.track[cls_col])
for i in range(self.track.shape[0]):
inform = [str(track_id[i]), str(parent_id[i]), cls_lb[i]]
if inform[1] == '0':
del inform[1]
ann.append('-'.join(inform))
self.track['name'] = ann
return
def resolveArrest(self, G2_trh=None):
"""Determine G1/G2 arrest tracks.
- If `G2_trh` is supplied, determine G2 based on background-subtracted mean of the track
(averaged across frames).
- If `G2_trh` is not supplied, assign G1 or G2 classification according to 2-center K-mean.
Args:
G2_trh (int): int between 1-255, above the threshold will be classified as G2.
"""
trk = self.rsTrack[self.rsTrack['trackId'].isin(list(self.arrest.keys()))].copy()
intensity = []
ids = []
for i in self.arrest.keys():
sub = trk[trk['trackId'] == i]
if self.arrest[i] == 'S':
continue
corrected_mean = np.mean(sub['mean_intensity'] - sub['background_mean'])
intensity.append(corrected_mean)
ids.append(i)
if G2_trh is None:
self.logger.warning('No G2 threshold provided, using KMean clustering to distinguish arrested G1/G2 track.')
X = np.expand_dims(np.array(intensity), axis=1)
X = MinMaxScaler().fit_transform(X)
y = list(KMeans(2).fit_predict(X))
else:
if G2_trh < 1 or G2_trh > 255:
raise ValueError('G2 threshold must be within the interval: 1~255.')
y = []
for i in range(len(ids)):
if intensity[i] > G2_trh:
y.append(1)
else:
y.append(0)
for i in range(len(ids)):
if y[i] == 0:
self.arrest[ids[i]] = 'G1'
self.rsTrack.loc[self.rsTrack['trackId'] == ids[i], 'resolved_class'] = 'G1*'
else:
self.arrest[ids[i]] = 'G2'
self.rsTrack.loc[self.rsTrack['trackId'] == ids[i], 'resolved_class'] = 'G2*'
return
def resolveLineage(self, lineage, main):
"""Resolve all tracks in a lineage recursively
main (int): the parent track ID of current search
"""
info = self.ann.loc[self.ann['track'] == main]
m_entry = info['m_entry'].values[0]
m_exit = info['m_exit'].values[0]
if len(np.unique(lineage['trackId'])) == 1:
rsd = self.resolveTrack(lineage.copy(), m_entry=m_entry, m_exit=m_exit)
return rsd
else:
out = pd.DataFrame()
lg = lineage[lineage['trackId'] == main]
out = out.append(self.resolveTrack(lg.copy(), m_entry=m_entry, m_exit=m_exit))
daugs = self.mt_dic[main]['daug']
for i in list(daugs.keys()):
out = out.append(
self.resolveLineage(lineage[lineage['trackId'].isin(find_daugs(lineage, i) + [i])].copy(), i))
return out
def resolveTrack(self, trk, m_entry=None, m_exit=None):
"""Resolve single track.
Args:
trk (pandas.DataFrame): track table
m_entry (int): time of mitosis entry corresponding to 'frame' column in table
m_exit (int): time of mitosis exit corresponding to 'frame' column in table
If no m time supplied, only treat as G1/G2/S track.
Arrested track not resolved, return full G1/G2 list.
Returns:
pandas.DataFrame table with addition column of resolved class
"""
UNRESOLVED_FRACTION = 0.2 # after resolving the class, if more than x% class has been corrected, label with
resolved_class = ['G1/G2' for _ in range(trk.shape[0])]
if trk.shape[0] == 0:
raise ValueError('Track not found!')
#return None
track_id = trk['trackId'].tolist()[0]
cls = list(trk['predicted_class'])
if list(np.unique(cls)) == ['S']:
trk['resolved_class'] = 'S'
self.arrest[track_id] = 'S'
return trk
confid = np.array(trk[['Probability of G1/G2', 'Probability of S', 'Probability of M']])
out = deduce_transition(l=cls, tar='S', confidence=confid, min_tar=self.minS,
max_res=self.maxBG, casual_end=False)
flag = False
if not (out is None or out[0] == out[1]):
flag = True
a = (out[0], np.min((out[1] + 1, len(resolved_class) - 1)))
resolved_class[a[0]:a[1] + 1] = ['S' for _ in range(a[0], a[1] + 1)]
if a[0] > 0:
resolved_class[:a[0]] = ['G1' for _ in range(a[0])]
if a[1] < len(resolved_class) - 1:
resolved_class[a[1]:] = ['G2' for _ in range(len(resolved_class) - a[1])]
frame = trk['frame'].tolist()
if m_exit is not None:
emerging = trk['emerging'].tolist()
if 1 in emerging:
exit_idx = int(np.min((frame.index(m_exit), emerging.index(1)))) # Emerging classification refers to G1
else:
exit_idx = frame.index(m_exit)
resolved_class[:exit_idx + 1] = ['M' for _ in range(exit_idx + 1)]
i = exit_idx + 1
while i < len(resolved_class):
if resolved_class[i] == 'G1/G2':
resolved_class[i] = 'G1'
else:
break
i += 1
if m_entry is not None:
resolved_class[frame.index(m_entry):] = ['M' for _ in range(len(resolved_class) - frame.index(m_entry))]
i = frame.index(m_entry) - 1
while i >= 0:
if resolved_class[i] == 'G1/G2':
resolved_class[i] = 'G2'
else:
break
i -= 1
if not flag and m_exit is not None and m_entry is not None:
resolved_class = cls.copy()
self.mt_unresolved.extend([track_id] + find_daugs(self.track, track_id))
if m_exit is None and m_entry is None:
# some tracks begin/end with mitosis and not associated during refinement. In this case, override any
# classification at terminal. Only track ends/begin with M will be kept, otherwise override with G1/G2.
# WARNING: this can leads to false negative
mt_out_begin = deduce_transition(l=cls, tar='M', confidence=confid, min_tar=1,
max_res=self.maxBG)
mt_out_end = deduce_transition(l=cls[::-1], tar='M', confidence=confid[::-1, :], min_tar=1,
max_res=self.maxBG)
if mt_out_end is not None:
# check if out and end interval overlaps
compare = (len(cls)-mt_out_end[1]-1, len(cls)-mt_out_end[0]-1)
if mt_out_begin is not None:
if compare[0] == mt_out_begin[0] and compare[1] == mt_out_begin[1]:
# if overlap, assign larger index one to None
if mt_out_end[0] < mt_out_begin[0]:
mt_out_begin = None
else:
mt_out_end = None
if mt_out_begin is not None and mt_out_end is None:
if mt_out_begin[0] == 0:
resolved_class[mt_out_begin[0]: mt_out_begin[1] + 1] = ['M' for _ in
range(mt_out_begin[0], mt_out_begin[1] + 1)]
# if followed with G1/G2 only, change to G1
if
|
np.unique(resolved_class[mt_out_begin[1] + 1:])
|
numpy.unique
|
import numpy as np
from hmmlearn import hmm
def states_probability(a, states, initial=1.0):
return initial * np.product(a[states[:-1], states[1:]])
def forward_algorithm(a, b, observations, initial_probs=None):
state_n, _ = a.shape
current_probs = initial_probs
if initial_probs is None:
current_probs = np.full(state_n, 1.0 / state_n, dtype=np.float64)
for observation in observations:
observation_probs = b[:, observation]
current_probs = (a @ current_probs) * observation_probs
return current_probs.sum()
class HMMGMMClassifier:
def __init__(self):
self.models = None
self.unique_labels = None
def fit(self, features, labels, *args, **kwargs):
self.unique_labels = np.unique(labels)
self.models = []
for unique_label in self.unique_labels:
relevant_ixs = (labels == unique_label).nonzero()[0]
relevant_data = []
for relevant_ix in relevant_ixs:
row = features[relevant_ix]
relevant_data.append(row)
lengths = np.array([len(data) for data in relevant_data])
concat =
|
np.vstack(relevant_data)
|
numpy.vstack
|
# -*- coding: utf-8 -*-
u"""
.. _glow:
xrtGlow -- an interactive 3D beamline viewer
--------------------------------------------
The beamline created in xrtQook can be interactively viewed in an OpenGL based
widget xrtGlow. It visualizes beams, footprints, surfaces, apertures and
screens. The brightness represents intensity and the color represents an
auxiliary user-selected distribution, typically energy. A virtual screen can be
put at any position and dragged by mouse with simultaneous observation of the
beam distribution on it. See two example screenshots below (click to expand and
read the captions).
The primary purpose of xrtGlow is to demonstrate the alignment correctness
given the fact that xrtQook can automatically calculate several positional and
angular parameters.
See aslo :ref:`Notes on using xrtGlow <glow_notes>`.
+-------------+-------------+
| |glow1| | |glow2| |
+-------------+-------------+
.. |glow1| imagezoom:: _images/xrtGlow1.png
:alt:  A view of xrtQook with embedded xrtGlow. Visible is a virtual
screen draggable by mouse, a curved mirror surface with a footprint on
it and the color (energy) distribution on the virtual screen. The scale
along the beamline is compressed by a factor of 100.
.. |glow2| imagezoom:: _images/xrtGlow2.png
:loc: upper-right-corner
:alt:   xrtGlow with three double-paraboloid lenses. The scaling on
this image is isotropic. The source (on the left) is a parallel
geometric source. The coloring is by axial divergence (red=0), showing
the effect of refractive focusing.
"""
from __future__ import print_function
__author__ = "<NAME>, <NAME>"
import sys
import os
import numpy as np
from functools import partial
import matplotlib as mpl
# import inspect
import re
import copy
# import time
from collections import OrderedDict
from ...backends import raycing
from ...backends.raycing import sources as rsources
from ...backends.raycing import screens as rscreens
from ...backends.raycing import oes as roes
from ...backends.raycing import apertures as rapertures
from ...backends.raycing import materials as rmats
from ..commons import qt
from ..commons import gl
from ...plotter import colorFactor, colorSaturation
_DEBUG_ = False # If False, exceptions inside the module are ignored
class xrtGlow(qt.QWidget):
def __init__(self, arrayOfRays, parent=None, progressSignal=None):
super(xrtGlow, self).__init__()
self.parentRef = parent
self.cAxisLabelSize = 10
mplFont = {'size': self.cAxisLabelSize}
mpl.rc('font', **mplFont)
self.setWindowTitle('xrtGlow')
iconsDir = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'_icons')
self.setWindowIcon(qt.QIcon(os.path.join(iconsDir, 'icon-GLow.ico')))
self.populateOEsList(arrayOfRays)
self.segmentsModel = self.initSegmentsModel()
self.segmentsModelRoot = self.segmentsModel.invisibleRootItem()
self.populateSegmentsModel(arrayOfRays)
self.fluxDataModel = qt.QStandardItemModel()
for colorField in raycing.allBeamFields:
self.fluxDataModel.appendRow(qt.QStandardItem(colorField))
self.customGlWidget = xrtGlWidget(self, arrayOfRays,
self.segmentsModelRoot,
self.oesList,
self.beamsToElements,
progressSignal)
self.customGlWidget.rotationUpdated.connect(self.updateRotationFromGL)
self.customGlWidget.scaleUpdated.connect(self.updateScaleFromGL)
self.customGlWidget.histogramUpdated.connect(self.updateColorMap)
self.customGlWidget.setContextMenuPolicy(qt.CustomContextMenu)
self.customGlWidget.customContextMenuRequested.connect(self.glMenu)
self.makeNavigationPanel()
self.makeTransformationPanel()
self.makeColorsPanel()
self.makeGridAndProjectionsPanel()
self.makeScenePanel()
mainLayout = qt.QHBoxLayout()
sideLayout = qt.QVBoxLayout()
tabs = qt.QTabWidget()
tabs.addTab(self.navigationPanel, "Navigation")
tabs.addTab(self.transformationPanel, "Transformations")
tabs.addTab(self.colorOpacityPanel, "Colors")
tabs.addTab(self.projectionPanel, "Grid/Projections")
tabs.addTab(self.scenePanel, "Scene")
sideLayout.addWidget(tabs)
self.canvasSplitter = qt.QSplitter()
self.canvasSplitter.setChildrenCollapsible(False)
self.canvasSplitter.setOrientation(qt.Horizontal)
mainLayout.addWidget(self.canvasSplitter)
sideWidget = qt.QWidget()
sideWidget.setLayout(sideLayout)
self.canvasSplitter.addWidget(self.customGlWidget)
self.canvasSplitter.addWidget(sideWidget)
self.setLayout(mainLayout)
self.customGlWidget.oesList = self.oesList
toggleHelp = qt.QShortcut(self)
toggleHelp.setKey(qt.Key_F1)
toggleHelp.activated.connect(self.customGlWidget.toggleHelp)
fastSave = qt.QShortcut(self)
fastSave.setKey(qt.Key_F5)
fastSave.activated.connect(partial(self.saveScene, '_xrtScnTmp_.npy'))
fastLoad = qt.QShortcut(self)
fastLoad.setKey(qt.Key_F6)
fastLoad.activated.connect(partial(self.loadScene, '_xrtScnTmp_.npy'))
startMovie = qt.QShortcut(self)
startMovie.setKey(qt.Key_F7)
startMovie.activated.connect(self.startRecordingMovie)
toggleScreen = qt.QShortcut(self)
toggleScreen.setKey(qt.Key_F3)
toggleScreen.activated.connect(self.customGlWidget.toggleVScreen)
self.dockToQook = qt.QShortcut(self)
self.dockToQook.setKey(qt.Key_F4)
self.dockToQook.activated.connect(self.toggleDock)
tiltScreen = qt.QShortcut(self)
tiltScreen.setKey(qt.CTRL + qt.Key_T)
tiltScreen.activated.connect(self.customGlWidget.switchVScreenTilt)
def makeNavigationPanel(self):
self.navigationLayout = qt.QVBoxLayout()
centerCBLabel = qt.QLabel('Center view at:')
self.centerCB = qt.QComboBox()
self.centerCB.setMaxVisibleItems(48)
for key in self.oesList.keys():
self.centerCB.addItem(str(key))
# centerCB.addItem('customXYZ')
self.centerCB.currentIndexChanged['QString'].connect(self.centerEl)
self.centerCB.setCurrentIndex(0)
layout = qt.QHBoxLayout()
layout.addWidget(centerCBLabel)
layout.addWidget(self.centerCB)
layout.addStretch()
self.navigationLayout.addLayout(layout)
self.oeTree = qt.QTreeView()
self.oeTree.setModel(self.segmentsModel)
self.oeTree.setContextMenuPolicy(qt.CustomContextMenu)
self.oeTree.customContextMenuRequested.connect(self.oeTreeMenu)
self.oeTree.resizeColumnToContents(0)
self.navigationLayout.addWidget(self.oeTree)
self.navigationPanel = qt.QWidget(self)
self.navigationPanel.setLayout(self.navigationLayout)
def makeTransformationPanel(self):
self.zoomPanel = qt.QGroupBox(self)
self.zoomPanel.setFlat(False)
self.zoomPanel.setTitle("Log scale")
zoomLayout = qt.QVBoxLayout()
fitLayout = qt.QHBoxLayout()
scaleValidator = qt.QDoubleValidator()
scaleValidator.setRange(0, 7, 7)
self.zoomSliders = []
self.zoomEditors = []
for iaxis, axis in enumerate(['x', 'y', 'z']):
axLabel = qt.QLabel(axis)
axEdit = qt.QLineEdit()
axSlider = qt.glowSlider(self, qt.Horizontal, qt.glowTopScale)
axSlider.setRange(0, 7, 0.01)
value = 1 if iaxis == 1 else 3
axSlider.setValue(value)
axEdit.setText("{0:.2f}".format(value))
axEdit.setValidator(scaleValidator)
axEdit.editingFinished.connect(
partial(self.updateScaleFromQLE, axSlider))
axSlider.valueChanged.connect(
partial(self.updateScale, iaxis, axEdit))
self.zoomSliders.append(axSlider)
self.zoomEditors.append(axEdit)
layout = qt.QHBoxLayout()
axLabel.setMinimumWidth(12)
layout.addWidget(axLabel)
axEdit.setMaximumWidth(48)
layout.addWidget(axEdit)
layout.addWidget(axSlider)
zoomLayout.addLayout(layout)
for iaxis, axis in enumerate(['x', 'y', 'z', 'all']):
fitX = qt.QPushButton("fit {}".format(axis))
dim = [iaxis] if iaxis < 3 else [0, 1, 2]
fitX.clicked.connect(partial(self.fitScales, dim))
fitLayout.addWidget(fitX)
zoomLayout.addLayout(fitLayout)
self.zoomPanel.setLayout(zoomLayout)
self.rotationPanel = qt.QGroupBox(self)
self.rotationPanel.setFlat(False)
self.rotationPanel.setTitle("Rotation (deg)")
rotationLayout = qt.QVBoxLayout()
fixedViewsLayout = qt.QHBoxLayout()
# rotModeCB = qt.QCheckBox('Use Eulerian rotation')
# rotModeCB.setCheckState(2)
# rotModeCB.stateChanged.connect(self.checkEulerian)
# rotationLayout.addWidget(rotModeCB, 0, 0)
rotValidator = qt.QDoubleValidator()
rotValidator.setRange(-180., 180., 9)
self.rotationSliders = []
self.rotationEditors = []
for iaxis, axis in enumerate(['pitch (Rx)', 'roll (Ry)', 'yaw (Rz)']):
axLabel = qt.QLabel(axis)
axEdit = qt.QLineEdit("0.")
axEdit.setValidator(rotValidator)
axSlider = qt.glowSlider(self, qt.Horizontal, qt.glowTopScale)
axSlider.setRange(-180, 180, 0.01)
axSlider.setValue(0)
axEdit.editingFinished.connect(
partial(self.updateRotationFromQLE, axSlider))
axSlider.valueChanged.connect(
partial(self.updateRotation, iaxis, axEdit))
self.rotationSliders.append(axSlider)
self.rotationEditors.append(axEdit)
layout = qt.QHBoxLayout()
axLabel.setMinimumWidth(64)
layout.addWidget(axLabel)
axEdit.setMaximumWidth(48)
layout.addWidget(axEdit)
layout.addWidget(axSlider)
rotationLayout.addLayout(layout)
for axis, angles in zip(['Side', 'Front', 'Top', 'Isometric'],
[[[0.], [0.], [0.]],
[[0.], [0.], [90.]],
[[0.], [90.], [0.]],
[[0.], [35.264], [-45.]]]):
setView = qt.QPushButton(axis)
setView.clicked.connect(partial(self.updateRotationFromGL, angles))
fixedViewsLayout.addWidget(setView)
rotationLayout.addLayout(fixedViewsLayout)
self.rotationPanel.setLayout(rotationLayout)
self.transformationPanel = qt.QWidget(self)
transformationLayout = qt.QVBoxLayout()
transformationLayout.addWidget(self.zoomPanel)
transformationLayout.addWidget(self.rotationPanel)
transformationLayout.addStretch()
self.transformationPanel.setLayout(transformationLayout)
def fitScales(self, dims):
for dim in dims:
dimMin = np.min(self.customGlWidget.footprintsArray[:, dim])
dimMax = np.max(self.customGlWidget.footprintsArray[:, dim])
newScale = 1.9 * self.customGlWidget.aPos[dim] /\
(dimMax - dimMin) * self.customGlWidget.maxLen
self.customGlWidget.tVec[dim] = -0.5 * (dimMin + dimMax)
self.customGlWidget.scaleVec[dim] = newScale
self.updateScaleFromGL(self.customGlWidget.scaleVec)
def makeColorsPanel(self):
self.opacityPanel = qt.QGroupBox(self)
self.opacityPanel.setFlat(False)
self.opacityPanel.setTitle("Opacity")
opacityLayout = qt.QVBoxLayout()
self.opacitySliders = []
self.opacityEditors = []
for iaxis, (axis, rstart, rend, rstep, val) in enumerate(zip(
('Line opacity', 'Line width', 'Point opacity', 'Point size'),
(0, 0, 0, 0), (1., 20., 1., 20.), (0.001, 0.01, 0.001, 0.01),
(0.2, 2., 0.25, 3.))):
axLabel = qt.QLabel(axis)
opacityValidator = qt.QDoubleValidator()
axSlider = qt.glowSlider(self, qt.Horizontal, qt.glowTopScale)
axSlider.setRange(rstart, rend, rstep)
axSlider.setValue(val)
axEdit = qt.QLineEdit()
opacityValidator.setRange(rstart, rend, 5)
self.updateOpacity(iaxis, axEdit, val)
axEdit.setValidator(opacityValidator)
axEdit.editingFinished.connect(
partial(self.updateOpacityFromQLE, axSlider))
axSlider.valueChanged.connect(
partial(self.updateOpacity, iaxis, axEdit))
self.opacitySliders.append(axSlider)
self.opacityEditors.append(axEdit)
layout = qt.QHBoxLayout()
axLabel.setMinimumWidth(80)
layout.addWidget(axLabel)
axEdit.setMaximumWidth(48)
layout.addWidget(axEdit)
layout.addWidget(axSlider)
opacityLayout.addLayout(layout)
self.opacityPanel.setLayout(opacityLayout)
self.colorPanel = qt.QGroupBox(self)
self.colorPanel.setFlat(False)
self.colorPanel.setTitle("Color")
colorLayout = qt.QVBoxLayout()
self.mplFig = mpl.figure.Figure(dpi=self.logicalDpiX()*0.8)
self.mplFig.patch.set_alpha(0.)
self.mplFig.subplots_adjust(left=0.15, bottom=0.15, top=0.92)
self.mplAx = self.mplFig.add_subplot(111)
self.mplFig.suptitle("")
self.drawColorMap('energy')
self.paletteWidget = qt.FigCanvas(self.mplFig)
self.paletteWidget.setSizePolicy(qt.QSizePolicy.Maximum,
qt.QSizePolicy.Maximum)
self.paletteWidget.span = mpl.widgets.RectangleSelector(
self.mplAx, self.updateColorSelFromMPL, drawtype='box',
useblit=True, rectprops=dict(alpha=0.4, facecolor='white'),
button=1, interactive=True)
layout = qt.QHBoxLayout()
self.colorControls = []
colorCBLabel = qt.QLabel('Color Axis:')
colorCB = qt.QComboBox()
colorCB.setMaxVisibleItems(48)
colorCB.setModel(self.fluxDataModel)
colorCB.setCurrentIndex(colorCB.findText('energy'))
colorCB.currentIndexChanged['QString'].connect(self.changeColorAxis)
self.colorControls.append(colorCB)
layout.addWidget(colorCBLabel)
layout.addWidget(colorCB)
layout.addStretch()
colorLayout.addLayout(layout)
colorLayout.addWidget(self.paletteWidget)
layout = qt.QHBoxLayout()
for icSel, cSelText in enumerate(['Color Axis min', 'Color Axis max']):
if icSel > 0:
layout.addStretch()
selLabel = qt.QLabel(cSelText)
selValidator = qt.QDoubleValidator()
selValidator.setRange(-1.0e20 if icSel == 0 else
self.customGlWidget.colorMin,
self.customGlWidget.colorMax if icSel == 0
else 1.0e20, 5)
selQLE = qt.QLineEdit()
selQLE.setValidator(selValidator)
selQLE.setText('{0:.6g}'.format(
self.customGlWidget.colorMin if icSel == 0 else
self.customGlWidget.colorMax))
selQLE.editingFinished.connect(
partial(self.updateColorAxis, icSel))
selQLE.setMaximumWidth(80)
self.colorControls.append(selQLE)
layout.addWidget(selLabel)
layout.addWidget(selQLE)
colorLayout.addLayout(layout)
layout = qt.QHBoxLayout()
for icSel, cSelText in enumerate(['Selection min', 'Selection max']):
if icSel > 0:
layout.addStretch()
selLabel = qt.QLabel(cSelText)
selValidator = qt.QDoubleValidator()
selValidator.setRange(self.customGlWidget.colorMin,
self.customGlWidget.colorMax, 5)
selQLE = qt.QLineEdit()
selQLE.setValidator(selValidator)
selQLE.setText('{0:.6g}'.format(
self.customGlWidget.colorMin if icSel == 0 else
self.customGlWidget.colorMax))
selQLE.editingFinished.connect(
partial(self.updateColorSelFromQLE, icSel))
selQLE.setMaximumWidth(80)
self.colorControls.append(selQLE)
layout.addWidget(selLabel)
layout.addWidget(selQLE)
colorLayout.addLayout(layout)
selSlider = qt.glowSlider(self, qt.Horizontal, qt.glowTopScale)
rStep = (self.customGlWidget.colorMax -
self.customGlWidget.colorMin) / 100.
rValue = (self.customGlWidget.colorMax +
self.customGlWidget.colorMin) * 0.5
selSlider.setRange(self.customGlWidget.colorMin,
self.customGlWidget.colorMax, rStep)
selSlider.setValue(rValue)
selSlider.sliderMoved.connect(self.updateColorSel)
self.colorControls.append(selSlider)
colorLayout.addWidget(selSlider)
layout = qt.QHBoxLayout()
axLabel = qt.QLabel("Intensity cut-off")
axEdit = qt.QLineEdit("0.01")
cutValidator = qt.QDoubleValidator()
cutValidator.setRange(0, 1, 3)
axEdit.setValidator(cutValidator)
axEdit.editingFinished.connect(self.updateCutoffFromQLE)
axLabel.setMinimumWidth(144)
layout.addWidget(axLabel)
axEdit.setMaximumWidth(48)
layout.addWidget(axEdit)
layout.addStretch()
colorLayout.addLayout(layout)
layout = qt.QHBoxLayout()
explLabel = qt.QLabel("Color bump height, mm")
explEdit = qt.QLineEdit("0.0")
explValidator = qt.QDoubleValidator()
explValidator.setRange(-1000, 1000, 3)
explEdit.setValidator(explValidator)
explEdit.editingFinished.connect(self.updateExplosionDepth)
explLabel.setMinimumWidth(144)
layout.addWidget(explLabel)
explEdit.setMaximumWidth(48)
layout.addWidget(explEdit)
layout.addStretch()
colorLayout.addLayout(layout)
# axSlider = qt.glowSlider(
# self, qt.Horizontal, qt.glowTopScale)
# axSlider.setRange(0, 1, 0.001)
# axSlider.setValue(0.01)
# axSlider.valueChanged.connect(self.updateCutoff)
# colorLayout.addWidget(axSlider, 3+3, 0, 1, 2)
glNormCB = qt.QCheckBox('Global Normalization')
glNormCB.setChecked(True)
glNormCB.stateChanged.connect(self.checkGNorm)
colorLayout.addWidget(glNormCB)
self.glNormCB = glNormCB
iHSVCB = qt.QCheckBox('Intensity as HSV Value')
iHSVCB.setChecked(False)
iHSVCB.stateChanged.connect(self.checkHSV)
colorLayout.addWidget(iHSVCB)
self.iHSVCB = iHSVCB
self.colorPanel.setLayout(colorLayout)
self.colorOpacityPanel = qt.QWidget(self)
colorOpacityLayout = qt.QVBoxLayout()
colorOpacityLayout.addWidget(self.colorPanel)
colorOpacityLayout.addWidget(self.opacityPanel)
colorOpacityLayout.addStretch()
self.colorOpacityPanel.setLayout(colorOpacityLayout)
def makeGridAndProjectionsPanel(self):
self.gridPanel = qt.QGroupBox(self)
self.gridPanel.setFlat(False)
self.gridPanel.setTitle("Show coordinate grid")
self.gridPanel.setCheckable(True)
self.gridPanel.toggled.connect(self.checkDrawGrid)
scaleValidator = qt.QDoubleValidator()
scaleValidator.setRange(0, 7, 7)
xyzGridLayout = qt.QVBoxLayout()
self.gridSliders = []
self.gridEditors = []
for iaxis, axis in enumerate(['x', 'y', 'z']):
axLabel = qt.QLabel(axis)
axEdit = qt.QLineEdit("0.9")
axEdit.setValidator(scaleValidator)
axSlider = qt.glowSlider(self, qt.Horizontal, qt.glowTopScale)
axSlider.setRange(0, 10, 0.01)
axSlider.setValue(0.9)
axEdit.editingFinished.connect(
partial(self.updateGridFromQLE, axSlider))
axSlider.valueChanged.connect(
partial(self.updateGrid, iaxis, axEdit))
self.gridSliders.append(axSlider)
self.gridEditors.append(axEdit)
layout = qt.QHBoxLayout()
axLabel.setMinimumWidth(20)
layout.addWidget(axLabel)
axEdit.setMaximumWidth(48)
layout.addWidget(axEdit)
layout.addWidget(axSlider)
xyzGridLayout.addLayout(layout)
checkBox = qt.QCheckBox('Fine grid')
checkBox.setChecked(False)
checkBox.stateChanged.connect(self.checkFineGrid)
xyzGridLayout.addWidget(checkBox)
self.checkBoxFineGrid = checkBox
self.gridControls = []
projectionLayout = qt.QVBoxLayout()
checkBox = qt.QCheckBox('Perspective')
checkBox.setChecked(True)
checkBox.stateChanged.connect(self.checkPerspect)
self.checkBoxPerspective = checkBox
projectionLayout.addWidget(self.checkBoxPerspective)
self.gridControls.append(self.checkBoxPerspective)
self.gridControls.append(self.gridPanel)
self.gridControls.append(self.checkBoxFineGrid)
self.gridPanel.setLayout(xyzGridLayout)
self.projVisPanel = qt.QGroupBox(self)
self.projVisPanel.setFlat(False)
self.projVisPanel.setTitle("Projections visibility")
projVisLayout = qt.QVBoxLayout()
self.projLinePanel = qt.QGroupBox(self)
self.projLinePanel.setFlat(False)
self.projLinePanel.setTitle("Projections opacity")
self.projectionControls = []
for iaxis, axis in enumerate(['Side (YZ)', 'Front (XZ)', 'Top (XY)']):
checkBox = qt.QCheckBox(axis)
checkBox.setChecked(False)
checkBox.stateChanged.connect(partial(self.projSelection, iaxis))
self.projectionControls.append(checkBox)
projVisLayout.addWidget(checkBox)
self.projLinePanel.setEnabled(False)
self.projVisPanel.setLayout(projVisLayout)
projLineLayout = qt.QVBoxLayout()
self.projectionOpacitySliders = []
self.projectionOpacityEditors = []
for iaxis, axis in enumerate(
['Line opacity', 'Line width', 'Point opacity', 'Point size']):
axLabel = qt.QLabel(axis)
projectionValidator = qt.QDoubleValidator()
axSlider = qt.glowSlider(self, qt.Horizontal, qt.glowTopScale)
if iaxis in [0, 2]:
axSlider.setRange(0, 1., 0.001)
axSlider.setValue(0.1)
axEdit = qt.QLineEdit("0.1")
projectionValidator.setRange(0, 1., 5)
else:
axSlider.setRange(0, 20, 0.01)
axSlider.setValue(1.)
axEdit = qt.QLineEdit("1")
projectionValidator.setRange(0, 20., 5)
axEdit.setValidator(projectionValidator)
axEdit.editingFinished.connect(
partial(self.updateProjectionOpacityFromQLE, axSlider))
axSlider.valueChanged.connect(
partial(self.updateProjectionOpacity, iaxis, axEdit))
self.projectionOpacitySliders.append(axSlider)
self.projectionOpacityEditors.append(axEdit)
layout = qt.QHBoxLayout()
axLabel.setMinimumWidth(80)
layout.addWidget(axLabel)
axEdit.setMaximumWidth(48)
layout.addWidget(axEdit)
layout.addWidget(axSlider)
projLineLayout.addLayout(layout)
self.projLinePanel.setLayout(projLineLayout)
self.projectionPanel = qt.QWidget(self)
projectionLayout.addWidget(self.gridPanel)
projectionLayout.addWidget(self.projVisPanel)
projectionLayout.addWidget(self.projLinePanel)
projectionLayout.addStretch()
self.projectionPanel.setLayout(projectionLayout)
def makeScenePanel(self):
sceneLayout = qt.QVBoxLayout()
self.sceneControls = []
for iCB, (cbText, cbFunc) in enumerate(zip(
['Enable antialiasing',
'Enable blending',
'Depth test for Lines',
'Depth test for Points',
'Invert scene color',
'Use scalable font',
'Show Virtual Screen label',
'Virtual Screen for Indexing',
'Show lost rays',
'Show local axes'],
[self.checkAA,
self.checkBlending,
self.checkLineDepthTest,
self.checkPointDepthTest,
self.invertSceneColor,
self.checkScalableFont,
self.checkShowLabels,
self.checkVSColor,
self.checkShowLost,
self.checkShowLocalAxes])):
aaCheckBox = qt.QCheckBox(cbText)
aaCheckBox.setChecked(iCB in [1, 2])
aaCheckBox.stateChanged.connect(cbFunc)
self.sceneControls.append(aaCheckBox)
sceneLayout.addWidget(aaCheckBox)
axLabel = qt.QLabel('Font Size')
axSlider = qt.glowSlider(self, qt.Horizontal, qt.glowTopScale)
axSlider.setRange(1, 20, 0.5)
axSlider.setValue(5)
axSlider.valueChanged.connect(self.updateFontSize)
layout = qt.QHBoxLayout()
layout.addWidget(axLabel)
layout.addWidget(axSlider)
sceneLayout.addLayout(layout)
labelPrec = qt.QComboBox()
for order in range(5):
labelPrec.addItem("{}mm".format(10**-order))
labelPrec.setCurrentIndex(1)
labelPrec.currentIndexChanged['int'].connect(self.setLabelPrec)
aaLabel = qt.QLabel('Label Precision')
layout = qt.QHBoxLayout()
aaLabel.setMinimumWidth(100)
layout.addWidget(aaLabel)
labelPrec.setMaximumWidth(120)
layout.addWidget(labelPrec)
layout.addStretch()
sceneLayout.addLayout(layout)
oeTileValidator = qt.QIntValidator()
oeTileValidator.setRange(1, 20)
for ia, axis in enumerate(['OE tessellation X', 'OE tessellation Y']):
axLabel = qt.QLabel(axis)
axEdit = qt.QLineEdit("2")
axEdit.setValidator(oeTileValidator)
axEdit.editingFinished.connect(partial(self.updateTileFromQLE, ia))
layout = qt.QHBoxLayout()
axLabel.setMinimumWidth(100)
layout.addWidget(axLabel)
axEdit.setMaximumWidth(48)
layout.addWidget(axEdit)
layout.addStretch()
sceneLayout.addLayout(layout)
self.scenePanel = qt.QWidget(self)
sceneLayout.addStretch()
self.scenePanel.setLayout(sceneLayout)
def toggleDock(self):
if self.parentRef is not None:
self.parentRef.catchViewer()
self.parentRef = None
def initSegmentsModel(self, isNewModel=True):
newModel = qt.QStandardItemModel()
newModel.setHorizontalHeaderLabels(['Rays',
'Footprint',
'Surface',
'Label'])
if isNewModel:
headerRow = []
for i in range(4):
child = qt.QStandardItem("")
child.setEditable(False)
child.setCheckable(True)
child.setCheckState(2 if i < 2 else 0)
headerRow.append(child)
newModel.invisibleRootItem().appendRow(headerRow)
newModel.itemChanged.connect(self.updateRaysList)
return newModel
def updateOEsList(self, arrayOfRays):
self.oesList = None
self.beamsToElements = None
self.populateOEsList(arrayOfRays)
self.updateSegmentsModel(arrayOfRays)
self.oeTree.resizeColumnToContents(0)
self.centerCB.blockSignals(True)
tmpIndex = self.centerCB.currentIndex()
for i in range(self.centerCB.count()):
self.centerCB.removeItem(0)
for key in self.oesList.keys():
self.centerCB.addItem(str(key))
# self.segmentsModel.layoutChanged.emit()
try:
self.centerCB.setCurrentIndex(tmpIndex)
except: # analysis:ignore
pass
self.centerCB.blockSignals(False)
self.customGlWidget.arrayOfRays = arrayOfRays
self.customGlWidget.beamsDict = arrayOfRays[1]
self.customGlWidget.oesList = self.oesList
self.customGlWidget.beamsToElements = self.beamsToElements
# self.customGlWidget.newColorAxis = True
# self.customGlWidget.populateVerticesArray(self.segmentsModelRoot)
self.changeColorAxis(None)
self.customGlWidget.positionVScreen()
self.customGlWidget.glDraw()
def populateOEsList(self, arrayOfRays):
self.oesList = OrderedDict()
self.beamsToElements = OrderedDict()
oesList = arrayOfRays[2]
for segment in arrayOfRays[0]:
if segment[0] == segment[2]:
oesList[segment[0]].append(segment[1])
oesList[segment[0]].append(segment[3])
for segOE, oeRecord in oesList.items():
if len(oeRecord) > 2: # DCM
elNames = [segOE+'_Entrance', segOE+'_Exit']
else:
elNames = [segOE]
for elName in elNames:
self.oesList[elName] = [oeRecord[0]] # pointer to object
if len(oeRecord) < 3 or elName.endswith('_Entrance'):
center = list(oeRecord[0].center)
is2ndXtal = False
else:
is2ndXtal = True
# center = arrayOfRays[1][oeRecord[3]].wCenter
gb = self.oesList[elName][0].local_to_global(
rsources.Beam(nrays=2), returnBeam=True,
is2ndXtal=is2ndXtal)
center = [gb.x[0], gb.y[0], gb.z[0]]
for segment in arrayOfRays[0]:
ind = oeRecord[1]*2
if str(segment[ind]) == str(segOE):
if len(oeRecord) < 3 or\
(elName.endswith('Entrance') and
str(segment[3]) == str(oeRecord[2])) or\
(elName.endswith('Exit') and
str(segment[3]) == str(oeRecord[3])):
if len(self.oesList[elName]) < 2:
self.oesList[elName].append(
str(segment[ind+1]))
self.beamsToElements[segment[ind+1]] =\
elName
break
else:
self.oesList[elName].append(None)
self.oesList[elName].append(center)
self.oesList[elName].append(is2ndXtal)
def createRow(self, text, segMode):
newRow = []
for iCol in range(4):
newItem = qt.QStandardItem(str(text) if iCol == 0 else "")
newItem.setCheckable(True if (segMode == 3 and iCol == 0) or
(segMode == 1 and iCol > 0) else False)
if newItem.isCheckable():
newItem.setCheckState(2 if iCol < 2 else 0)
newItem.setEditable(False)
newRow.append(newItem)
return newRow
def updateSegmentsModel(self, arrayOfRays):
def copyRow(item, row):
newRow = []
for iCol in range(4):
oldItem = item.child(row, iCol)
newItem = qt.QStandardItem(str(oldItem.text()))
newItem.setCheckable(oldItem.isCheckable())
if newItem.isCheckable():
newItem.setCheckState(oldItem.checkState())
newItem.setEditable(oldItem.isEditable())
newRow.append(newItem)
return newRow
newSegmentsModel = self.initSegmentsModel(isNewModel=False)
newSegmentsModel.invisibleRootItem().appendRow(
copyRow(self.segmentsModelRoot, 0))
for element, elRecord in self.oesList.items():
for iel in range(self.segmentsModelRoot.rowCount()):
elItem = self.segmentsModelRoot.child(iel, 0)
elName = str(elItem.text())
if str(element) == elName:
elRow = copyRow(self.segmentsModelRoot, iel)
for segment in arrayOfRays[0]:
if segment[3] is not None:
endBeamText = "to {}".format(
self.beamsToElements[segment[3]])
if str(segment[1]) == str(elRecord[1]):
if elItem.hasChildren():
for ich in range(elItem.rowCount()):
if str(elItem.child(ich, 0).text()) ==\
endBeamText:
elRow[0].appendRow(
copyRow(elItem, ich))
break
else:
elRow[0].appendRow(self.createRow(
endBeamText, 3))
else:
elRow[0].appendRow(self.createRow(
endBeamText, 3))
newSegmentsModel.invisibleRootItem().appendRow(elRow)
break
else:
elRow = self.createRow(str(element), 1)
for segment in arrayOfRays[0]:
if str(segment[1]) == str(elRecord[1]) and\
segment[3] is not None:
endBeamText = "to {}".format(
self.beamsToElements[segment[3]])
elRow[0].appendRow(self.createRow(endBeamText, 3))
newSegmentsModel.invisibleRootItem().appendRow(elRow)
self.segmentsModel = newSegmentsModel
self.segmentsModelRoot = self.segmentsModel.invisibleRootItem()
self.oeTree.setModel(self.segmentsModel)
def populateSegmentsModel(self, arrayOfRays):
for element, elRecord in self.oesList.items():
newRow = self.createRow(element, 1)
for segment in arrayOfRays[0]:
cond = str(segment[1]) == str(elRecord[1]) # or\
# str(segment[0])+"_Entrance" == element
if cond:
try: # if segment[3] is not None:
endBeamText = "to {}".format(
self.beamsToElements[segment[3]])
newRow[0].appendRow(self.createRow(endBeamText, 3))
except: # analysis:ignore
continue
self.segmentsModelRoot.appendRow(newRow)
def drawColorMap(self, axis):
xv, yv = np.meshgrid(np.linspace(0, colorFactor, 200),
np.linspace(0, 1, 200))
xv = xv.flatten()
yv = yv.flatten()
self.im = self.mplAx.imshow(mpl.colors.hsv_to_rgb(np.vstack((
xv, np.ones_like(xv)*colorSaturation, yv)).T).reshape((
200, 200, 3)),
aspect='auto', origin='lower',
extent=(self.customGlWidget.colorMin,
self.customGlWidget.colorMax,
0, 1))
self.mplAx.set_xlabel(axis)
self.mplAx.set_ylabel('Intensity')
def updateColorMap(self, histArray):
if histArray[0] is not None:
size = len(histArray[0])
histImage = np.zeros((size, size, 3))
colorMin = self.customGlWidget.colorMin
colorMax = self.customGlWidget.colorMax
hMax = np.float(np.max(histArray[0]))
intensity = np.float64(np.array(histArray[0]) / hMax)
histVals = np.int32(intensity * (size-1))
for col in range(size):
histImage[0:histVals[col], col, :] = mpl.colors.hsv_to_rgb(
(colorFactor * (histArray[1][col] - colorMin) /
(colorMax - colorMin),
colorSaturation, intensity[col]))
self.im.set_data(histImage)
try:
topEl = np.where(intensity >= 0.5)[0]
hwhm = (np.abs(histArray[1][topEl[0]] -
histArray[1][topEl[-1]])) * 0.5
cntr = (histArray[1][topEl[0]] + histArray[1][topEl[-1]]) * 0.5
newLabel = u"{0:.3f}\u00b1{1:.3f}".format(cntr, hwhm)
self.mplAx.set_title(newLabel, fontsize=self.cAxisLabelSize)
except: # analysis:ignore
pass
self.mplFig.canvas.draw()
self.mplFig.canvas.blit()
self.paletteWidget.span.extents = self.paletteWidget.span.extents
else:
xv, yv = np.meshgrid(np.linspace(0, colorFactor, 200),
np.linspace(0, 1, 200))
xv = xv.flatten()
yv = yv.flatten()
self.im.set_data(mpl.colors.hsv_to_rgb(np.vstack((
xv, np.ones_like(xv)*colorSaturation, yv)).T).reshape((
200, 200, 3)))
self.mplAx.set_title("")
self.mplFig.canvas.draw()
self.mplFig.canvas.blit()
if self.paletteWidget.span.visible:
self.paletteWidget.span.extents =\
self.paletteWidget.span.extents
self.mplFig.canvas.blit()
def checkGNorm(self, state):
self.customGlWidget.globalNorm = True if state > 0 else False
self.customGlWidget.populateVerticesArray(self.segmentsModelRoot)
self.customGlWidget.glDraw()
def checkHSV(self, state):
self.customGlWidget.iHSV = True if state > 0 else False
self.customGlWidget.populateVerticesArray(self.segmentsModelRoot)
self.customGlWidget.glDraw()
def checkDrawGrid(self, state):
self.customGlWidget.drawGrid = True if state > 0 else False
self.customGlWidget.glDraw()
def checkFineGrid(self, state):
self.customGlWidget.fineGridEnabled = True if state > 0 else False
self.customGlWidget.glDraw()
def checkPerspect(self, state):
self.customGlWidget.perspectiveEnabled = True if state > 0 else False
self.customGlWidget.glDraw()
def checkAA(self, state):
self.customGlWidget.enableAA = True if state > 0 else False
self.customGlWidget.glDraw()
def checkBlending(self, state):
self.customGlWidget.enableBlending = True if state > 0 else False
self.customGlWidget.glDraw()
def checkLineDepthTest(self, state):
self.customGlWidget.linesDepthTest = True if state > 0 else False
self.customGlWidget.glDraw()
def checkPointDepthTest(self, state):
self.customGlWidget.pointsDepthTest = True if state > 0 else False
self.customGlWidget.glDraw()
def invertSceneColor(self, state):
self.customGlWidget.invertColors = True if state > 0 else False
self.customGlWidget.glDraw()
def checkScalableFont(self, state):
self.customGlWidget.useScalableFont = True if state > 0 else False
self.customGlWidget.glDraw()
def checkShowLabels(self, state):
self.customGlWidget.showOeLabels = True if state > 0 else False
self.customGlWidget.glDraw()
def checkVSColor(self, state):
self.customGlWidget.vScreenForColors = True if state > 0 else False
self.customGlWidget.populateVerticesArray(self.segmentsModelRoot)
self.customGlWidget.glDraw()
def checkShowLost(self, state):
self.customGlWidget.showLostRays = True if state > 0 else False
self.customGlWidget.populateVerticesArray(self.segmentsModelRoot)
self.customGlWidget.glDraw()
def checkShowLocalAxes(self, state):
self.customGlWidget.showLocalAxes = True if state > 0 else False
self.customGlWidget.glDraw()
def setSceneParam(self, iAction, state):
self.sceneControls[iAction].setChecked(state)
def setProjectionParam(self, iAction, state):
self.projectionControls[iAction].setChecked(state)
def setGridParam(self, iAction, state):
self.gridControls[iAction].setChecked(state)
def setLabelPrec(self, prec):
self.customGlWidget.labelCoordPrec = prec
self.customGlWidget.glDraw()
def updateColorAxis(self, icSel):
if icSel == 0:
txt = re.sub(',', '.', str(self.colorControls[1].text()))
if txt == "{0:.3f}".format(self.customGlWidget.colorMin):
return
newColorMin = float(txt)
self.customGlWidget.colorMin = newColorMin
self.colorControls[2].validator().setBottom(newColorMin)
else:
txt = re.sub(',', '.', str(self.colorControls[2].text()))
if txt == "{0:.3f}".format(self.customGlWidget.colorMax):
return
newColorMax = float(txt)
self.customGlWidget.colorMax = newColorMax
self.colorControls[1].validator().setTop(newColorMax)
self.changeColorAxis(None, newLimits=True)
def changeColorAxis(self, selAxis, newLimits=False):
if selAxis is None:
selAxis = self.colorControls[0].currentText()
self.customGlWidget.newColorAxis = False if\
self.customGlWidget.selColorMin is not None else True
else:
self.customGlWidget.getColor = getattr(
raycing, 'get_{}'.format(selAxis))
self.customGlWidget.newColorAxis = True
oldColorMin = self.customGlWidget.colorMin
oldColorMax = self.customGlWidget.colorMax
self.customGlWidget.populateVerticesArray(self.segmentsModelRoot)
self.mplAx.set_xlabel(selAxis)
if oldColorMin == self.customGlWidget.colorMin and\
oldColorMax == self.customGlWidget.colorMax and not newLimits:
return
self.customGlWidget.selColorMin = self.customGlWidget.colorMin
self.customGlWidget.selColorMax = self.customGlWidget.colorMax
extents = (self.customGlWidget.colorMin,
self.customGlWidget.colorMax, 0, 1)
self.im.set_extent(extents)
self.mplFig.gca().ticklabel_format(useOffset=True)
# self.mplFig.gca().autoscale_view()
extents = list(extents)
self.colorControls[1].setText(
'{0:.3f}'.format(self.customGlWidget.colorMin))
self.colorControls[2].setText(
'{0:.3f}'.format(self.customGlWidget.colorMax))
self.colorControls[3].setText(
'{0:.3f}'.format(self.customGlWidget.colorMin))
self.colorControls[4].setText(
'{0:.3f}'.format(self.customGlWidget.colorMax))
self.colorControls[3].validator().setRange(
self.customGlWidget.colorMin, self.customGlWidget.colorMax, 5)
self.colorControls[4].validator().setRange(
self.customGlWidget.colorMin, self.customGlWidget.colorMax, 5)
slider = self.colorControls[5]
center = 0.5 * (extents[0] + extents[1])
newMin = self.customGlWidget.colorMin
newMax = self.customGlWidget.colorMax
newRange = (newMax - newMin) * 0.01
slider.setRange(newMin, newMax, newRange)
slider.setValue(center)
self.mplFig.canvas.draw()
self.paletteWidget.span.active_handle = None
self.paletteWidget.span.to_draw.set_visible(False)
self.customGlWidget.glDraw()
def updateColorSelFromMPL(self, eclick, erelease):
try:
extents = list(self.paletteWidget.span.extents)
self.customGlWidget.selColorMin = np.min([extents[0], extents[1]])
self.customGlWidget.selColorMax = np.max([extents[0], extents[1]])
self.colorControls[3].setText(
"{0:.3f}".format(self.customGlWidget.selColorMin))
self.colorControls[4].setText(
"{0:.3f}".format(self.customGlWidget.selColorMax))
self.colorControls[3].validator().setTop(
self.customGlWidget.selColorMax)
self.colorControls[4].validator().setBottom(
self.customGlWidget.selColorMin)
slider = self.colorControls[5]
center = 0.5 * (extents[0] + extents[1])
halfWidth = (extents[1] - extents[0]) * 0.5
newMin = self.customGlWidget.colorMin + halfWidth
newMax = self.customGlWidget.colorMax - halfWidth
newRange = (newMax - newMin) * 0.01
slider.setRange(newMin, newMax, newRange)
slider.setValue(center)
self.customGlWidget.populateVerticesArray(self.segmentsModelRoot)
self.customGlWidget.glDraw()
except: # analysis:ignore
pass
def updateColorSel(self, position):
slider = self.sender()
if isinstance(position, int):
try:
position /= slider.scale
except: # analysis:ignore
pass
try:
extents = list(self.paletteWidget.span.extents)
width = np.abs(extents[1] - extents[0])
self.customGlWidget.selColorMin = position - 0.5*width
self.customGlWidget.selColorMax = position + 0.5*width
self.colorControls[3].setText('{0:.3f}'.format(position-0.5*width))
self.colorControls[4].setText('{0:.3f}'.format(position+0.5*width))
self.colorControls[3].validator().setTop(position + 0.5*width)
self.colorControls[4].validator().setBottom(position - 0.5*width)
newExtents = (position - 0.5*width, position + 0.5*width,
extents[2], extents[3])
self.paletteWidget.span.extents = newExtents
self.customGlWidget.populateVerticesArray(self.segmentsModelRoot)
self.customGlWidget.glDraw()
except: # analysis:ignore
pass
def updateColorSelFromQLE(self, icSel):
try:
editor = self.sender()
txt = str(editor.text())
value = float(txt)
extents = list(self.paletteWidget.span.extents)
if icSel == 0:
if txt == "{0:.3f}".format(self.customGlWidget.selColorMin):
return
if value < self.customGlWidget.colorMin:
self.im.set_extent(
[value, self.customGlWidget.colorMax, 0, 1])
self.customGlWidget.colorMin = value
self.customGlWidget.selColorMin = value
newExtents = (value, extents[1], extents[2], extents[3])
# self.colorControls[2].validator().setBottom(value)
else:
if txt == "{0:.3f}".format(self.customGlWidget.selColorMax):
return
if value > self.customGlWidget.colorMax:
self.im.set_extent(
[self.customGlWidget.colorMin, value, 0, 1])
self.customGlWidget.colorMax = value
self.customGlWidget.selColorMax = value
newExtents = (extents[0], value, extents[2], extents[3])
# self.colorControls[1].validator().setTop(value)
center = 0.5 * (newExtents[0] + newExtents[1])
halfWidth = (newExtents[1] - newExtents[0]) * 0.5
newMin = self.customGlWidget.colorMin + halfWidth
newMax = self.customGlWidget.colorMax - halfWidth
newRange = (newMax - newMin) * 0.01
slider = self.colorControls[5]
slider.setRange(newMin, newMax, newRange)
slider.setValue(center)
self.paletteWidget.span.extents = newExtents
self.customGlWidget.populateVerticesArray(self.segmentsModelRoot)
self.customGlWidget.glDraw()
self.mplFig.canvas.draw()
except: # analysis:ignore
pass
def projSelection(self, ind, state):
self.customGlWidget.projectionsVisibility[ind] = state
self.customGlWidget.glDraw()
anyOf = False
for proj in self.projectionControls:
anyOf = anyOf or proj.isChecked()
if anyOf:
break
self.projLinePanel.setEnabled(anyOf)
def updateRotation(self, iax, editor, position):
slider = self.sender()
if isinstance(position, int):
try:
position /= slider.scale
except: # analysis:ignore
pass
editor.setText("{0:.2f}".format(position))
self.customGlWidget.rotations[iax][0] = np.float32(position)
self.customGlWidget.updateQuats()
self.customGlWidget.glDraw()
def updateRotationFromGL(self, actPos):
for iaxis, (slider, editor) in\
enumerate(zip(self.rotationSliders, self.rotationEditors)):
value = actPos[iaxis][0]
slider.setValue(value)
editor.setText("{0:.2f}".format(value))
def updateRotationFromQLE(self, slider):
editor = self.sender()
value = float(re.sub(',', '.', str(editor.text())))
slider.setValue(value)
def updateScale(self, iax, editor, position):
slider = self.sender()
if isinstance(position, int):
try:
position /= slider.scale
except: # analysis:ignore
pass
editor.setText("{0:.2f}".format(position))
self.customGlWidget.scaleVec[iax] = np.float32(np.power(10, position))
self.customGlWidget.glDraw()
def updateScaleFromGL(self, scale):
if isinstance(scale, (int, float)):
scale = [scale, scale, scale]
for iaxis, (slider, editor) in \
enumerate(zip(self.zoomSliders, self.zoomEditors)):
value = np.log10(scale[iaxis])
slider.setValue(value)
editor.setText("{0:.2f}".format(value))
def updateScaleFromQLE(self, slider):
editor = self.sender()
value = float(re.sub(',', '.', str(editor.text())))
slider.setValue(value)
def updateFontSize(self, position):
slider = self.sender()
if isinstance(position, int):
try:
position /= slider.scale
except: # analysis:ignore
pass
self.customGlWidget.fontSize = position
self.customGlWidget.glDraw()
def updateRaysList(self, item):
if item.parent() is None:
if item.row() == 0:
if item.checkState != 1:
model = item.model()
column = item.column()
model.blockSignals(True)
parent = self.segmentsModelRoot
try:
for iChild in range(parent.rowCount()):
if iChild > 0:
cItem = parent.child(iChild, column)
if cItem.isCheckable():
cItem.setCheckState(
item.checkState())
if cItem.hasChildren():
for iGChild in range(cItem.rowCount()):
gcItem = cItem.child(iGChild, 0)
if gcItem.isCheckable():
gcItem.setCheckState(
item.checkState())
finally:
model.blockSignals(False)
model.layoutChanged.emit()
else:
parent = self.segmentsModelRoot
model = item.model()
for iChild in range(parent.rowCount()):
outState = item.checkState()
if iChild > 0:
cItem = parent.child(iChild, item.column())
if item.column() > 0:
if cItem.checkState() != item.checkState():
outState = 1
break
model.blockSignals(True)
parent.child(0, item.column()).setCheckState(outState)
model.blockSignals(False)
model.layoutChanged.emit()
else:
parent = self.segmentsModelRoot
model = item.model()
for iChild in range(parent.rowCount()):
outState = item.checkState()
if iChild > 0:
cItem = parent.child(iChild, item.column())
if cItem.hasChildren():
for iGChild in range(cItem.rowCount()):
gcItem = cItem.child(iGChild, 0)
if gcItem.isCheckable():
if gcItem.checkState() !=\
item.checkState():
outState = 1
break
if outState == 1:
break
model.blockSignals(True)
parent.child(0, item.column()).setCheckState(outState)
model.blockSignals(False)
model.layoutChanged.emit()
if item.column() == 3:
self.customGlWidget.labelsToPlot = []
for ioe in range(self.segmentsModelRoot.rowCount() - 1):
if self.segmentsModelRoot.child(ioe + 1, 3).checkState() == 2:
self.customGlWidget.labelsToPlot.append(str(
self.segmentsModelRoot.child(ioe + 1, 0).text()))
else:
self.customGlWidget.populateVerticesArray(self.segmentsModelRoot)
self.customGlWidget.glDraw()
def oeTreeMenu(self, position):
indexes = self.oeTree.selectedIndexes()
level = 100
if len(indexes) > 0:
level = 0
index = indexes[0]
selectedItem = self.segmentsModel.itemFromIndex(index)
while index.parent().isValid():
index = index.parent()
level += 1
if level == 0:
menu = qt.QMenu()
menu.addAction('Center here',
partial(self.centerEl, str(selectedItem.text())))
menu.exec_(self.oeTree.viewport().mapToGlobal(position))
else:
pass
def updateGrid(self, iax, editor, position):
slider = self.sender()
if isinstance(position, int):
try:
position /= slider.scale
except: # analysis:ignore
pass
editor.setText("{0:.2f}".format(position))
if position != 0:
self.customGlWidget.aPos[iax] = np.float32(position)
self.customGlWidget.glDraw()
def updateGridFromQLE(self, slider):
editor = self.sender()
value = float(re.sub(',', '.', str(editor.text())))
slider.setValue(value)
def updateGridFromGL(self, aPos):
for iaxis, (slider, editor) in\
enumerate(zip(self.gridSliders, self.gridEditors)):
value = aPos[iaxis]
slider.setValue(value)
editor.setText("{0:.2f}".format(value))
def glMenu(self, position):
menu = qt.QMenu()
subMenuF = menu.addMenu('File')
for actText, actFunc in zip(['Export to image', 'Save scene geometry',
'Load scene geometry'],
[self.exportToImage, self.saveSceneDialog,
self.loadSceneDialog]):
mAction = qt.QAction(self)
mAction.setText(actText)
mAction.triggered.connect(actFunc)
subMenuF.addAction(mAction)
menu.addSeparator()
mAction = qt.QAction(self)
mAction.setText("Show Virtual Screen")
mAction.setCheckable(True)
mAction.setChecked(False if self.customGlWidget.virtScreen is None
else True)
mAction.triggered.connect(self.customGlWidget.toggleVScreen)
menu.addAction(mAction)
for iAction, actCnt in enumerate(self.sceneControls):
if 'Virtual Screen' not in actCnt.text():
continue
mAction = qt.QAction(self)
mAction.setText(actCnt.text())
mAction.setCheckable(True)
mAction.setChecked(bool(actCnt.checkState()))
mAction.triggered.connect(partial(self.setSceneParam, iAction))
menu.addAction(mAction)
menu.addSeparator()
for iAction, actCnt in enumerate(self.gridControls):
mAction = qt.QAction(self)
if actCnt.staticMetaObject.className() == 'QCheckBox':
actText = actCnt.text()
actCheck = bool(actCnt.checkState())
else:
actText = actCnt.title()
actCheck = actCnt.isChecked()
mAction.setText(actText)
mAction.setCheckable(True)
mAction.setChecked(actCheck)
mAction.triggered.connect(
partial(self.setGridParam, iAction))
if iAction == 0: # perspective
menu.addAction(mAction)
elif iAction == 1: # show grid
subMenuG = menu.addMenu('Coordinate grid')
subMenuG.addAction(mAction)
elif iAction == 2: # fine grid
subMenuG.addAction(mAction)
menu.addSeparator()
subMenuP = menu.addMenu('Projections')
for iAction, actCnt in enumerate(self.projectionControls):
mAction = qt.QAction(self)
mAction.setText(actCnt.text())
mAction.setCheckable(True)
mAction.setChecked(bool(actCnt.checkState()))
mAction.triggered.connect(
partial(self.setProjectionParam, iAction))
subMenuP.addAction(mAction)
menu.addSeparator()
subMenuS = menu.addMenu('Scene')
for iAction, actCnt in enumerate(self.sceneControls):
if 'Virtual Screen' in actCnt.text():
continue
mAction = qt.QAction(self)
mAction.setText(actCnt.text())
mAction.setCheckable(True)
mAction.setChecked(bool(actCnt.checkState()))
mAction.triggered.connect(partial(self.setSceneParam, iAction))
subMenuS.addAction(mAction)
menu.addSeparator()
menu.exec_(self.customGlWidget.mapToGlobal(position))
def exportToImage(self):
saveDialog = qt.QFileDialog()
saveDialog.setFileMode(qt.QFileDialog.AnyFile)
saveDialog.setAcceptMode(qt.QFileDialog.AcceptSave)
saveDialog.setNameFilter("BMP files (*.bmp);;JPG files (*.jpg);;JPEG files (*.jpeg);;PNG files (*.png);;TIFF files (*.tif)") # analysis:ignore
saveDialog.selectNameFilter("JPG files (*.jpg)")
if (saveDialog.exec_()):
image = self.customGlWidget.grabFrameBuffer(withAlpha=True)
filename = saveDialog.selectedFiles()[0]
extension = str(saveDialog.selectedNameFilter())[-5:-1].strip('.')
if not filename.endswith(extension):
filename = "{0}.{1}".format(filename, extension)
image.save(filename)
def saveSceneDialog(self):
saveDialog = qt.QFileDialog()
saveDialog.setFileMode(qt.QFileDialog.AnyFile)
saveDialog.setAcceptMode(qt.QFileDialog.AcceptSave)
saveDialog.setNameFilter("Numpy files (*.npy)") # analysis:ignore
if (saveDialog.exec_()):
filename = saveDialog.selectedFiles()[0]
extension = 'npy'
if not filename.endswith(extension):
filename = "{0}.{1}".format(filename, extension)
self.saveScene(filename)
def loadSceneDialog(self):
loadDialog = qt.QFileDialog()
loadDialog.setFileMode(qt.QFileDialog.AnyFile)
loadDialog.setAcceptMode(qt.QFileDialog.AcceptOpen)
loadDialog.setNameFilter("Numpy files (*.npy)") # analysis:ignore
if (loadDialog.exec_()):
filename = loadDialog.selectedFiles()[0]
extension = 'npy'
if not filename.endswith(extension):
filename = "{0}.{1}".format(filename, extension)
self.loadScene(filename)
def saveScene(self, filename):
params = dict()
for param in ['aspect', 'cameraAngle', 'projectionsVisibility',
'lineOpacity', 'lineWidth', 'pointOpacity', 'pointSize',
'lineProjectionOpacity', 'lineProjectionWidth',
'pointProjectionOpacity', 'pointProjectionSize',
'coordOffset', 'cutoffI', 'drawGrid', 'aPos', 'scaleVec',
'tVec', 'cameraPos', 'rotations',
'visibleAxes', 'signs', 'selColorMin', 'selColorMax',
'colorMin', 'colorMax', 'fineGridEnabled',
'useScalableFont', 'invertColors', 'perspectiveEnabled',
'globalNorm', 'viewPortGL', 'iHSV']:
params[param] = getattr(self.customGlWidget, param)
params['size'] = self.geometry()
params['sizeGL'] = self.canvasSplitter.sizes()
params['colorAxis'] = str(self.colorControls[0].currentText())
try:
np.save(filename, params)
except: # analysis:ignore
print('Error saving file')
return
print('Saved scene to {}'.format(filename))
def loadScene(self, filename):
try:
params = np.load(filename).item()
except: # analysis:ignore
print('Error loading file')
return
for param in ['aspect', 'cameraAngle', 'projectionsVisibility',
'lineOpacity', 'lineWidth', 'pointOpacity', 'pointSize',
'lineProjectionOpacity', 'lineProjectionWidth',
'pointProjectionOpacity', 'pointProjectionSize',
'coordOffset', 'cutoffI', 'drawGrid', 'aPos', 'scaleVec',
'tVec', 'cameraPos', 'rotations',
'visibleAxes', 'signs', 'selColorMin', 'selColorMax',
'colorMin', 'colorMax', 'fineGridEnabled',
'useScalableFont', 'invertColors', 'perspectiveEnabled',
'globalNorm', 'viewPortGL', 'iHSV']:
setattr(self.customGlWidget, param, params[param])
self.setGeometry(params['size'])
self.canvasSplitter.setSizes(params['sizeGL'])
self.updateScaleFromGL(self.customGlWidget.scaleVec)
self.blockSignals(True)
self.updateRotationFromGL(self.customGlWidget.rotations)
self.updateOpacityFromGL([self.customGlWidget.lineOpacity,
self.customGlWidget.lineWidth,
self.customGlWidget.pointOpacity,
self.customGlWidget.pointSize])
for iax, checkBox in enumerate(self.projectionControls):
checkBox.setChecked(self.customGlWidget.projectionsVisibility[iax])
self.gridPanel.setChecked(self.customGlWidget.drawGrid)
self.checkBoxFineGrid.setChecked(self.customGlWidget.fineGridEnabled)
self.checkBoxPerspective.setChecked(
self.customGlWidget.perspectiveEnabled)
self.updateProjectionOpacityFromGL(
[self.customGlWidget.lineProjectionOpacity,
self.customGlWidget.lineProjectionWidth,
self.customGlWidget.pointProjectionOpacity,
self.customGlWidget.pointProjectionSize])
self.updateGridFromGL(self.customGlWidget.aPos)
self.sceneControls[4].setChecked(self.customGlWidget.invertColors)
self.sceneControls[5].setChecked(self.customGlWidget.useScalableFont)
self.sceneControls[4].setChecked(self.customGlWidget.invertColors)
self.glNormCB.setChecked(self.customGlWidget.globalNorm)
self.iHSVCB.setChecked(self.customGlWidget.iHSV)
self.blockSignals(False)
self.mplFig.canvas.draw()
colorCB = self.colorControls[0]
colorCB.setCurrentIndex(colorCB.findText(params['colorAxis']))
newExtents = list(self.paletteWidget.span.extents)
newExtents[0] = params['selColorMin']
newExtents[1] = params['selColorMax']
try:
self.paletteWidget.span.extents = newExtents
except: # analysis:ignore
pass
self.updateColorSelFromMPL(0, 0)
print('Loaded scene from {}'.format(filename))
def startRecordingMovie(self): # by F7
if self.generator is None:
return
startFrom = self.startFrom if hasattr(self, 'startFrom') else 0
for it in self.generator(*self.generatorArgs):
self.bl.propagate_flow(startFrom=startFrom)
rayPath = self.bl.export_to_glow()
self.updateOEsList(rayPath)
self.customGlWidget.glDraw()
if self.isHidden():
self.show()
image = self.customGlWidget.grabFrameBuffer(withAlpha=True)
try:
image.save(self.bl.glowFrameName)
cNameSp = os.path.splitext(self.bl.glowFrameName)
cName = cNameSp[0] + "_color" + cNameSp[1]
self.mplFig.savefig(cName)
except AttributeError:
print('no glowFrameName was given!')
print("Finished with the movie.")
def centerEl(self, oeName):
self.customGlWidget.coordOffset = list(self.oesList[str(oeName)][2])
self.customGlWidget.tVec = np.float32([0, 0, 0])
self.customGlWidget.populateVerticesArray(self.segmentsModelRoot)
self.customGlWidget.glDraw()
def updateCutoffFromQLE(self):
try:
editor = self.sender()
value = float(re.sub(',', '.', str(editor.text())))
extents = list(self.paletteWidget.span.extents)
self.customGlWidget.cutoffI = np.float32(value)
self.customGlWidget.populateVerticesArray(self.segmentsModelRoot)
newExtents = (extents[0], extents[1],
self.customGlWidget.cutoffI, extents[3])
self.paletteWidget.span.extents = newExtents
self.customGlWidget.glDraw()
except: # analysis:ignore
pass
def updateExplosionDepth(self):
try:
editor = self.sender()
value = float(re.sub(',', '.', str(editor.text())))
self.customGlWidget.depthScaler = np.float32(value)
if self.customGlWidget.virtScreen is not None:
self.customGlWidget.populateVScreen()
self.customGlWidget.glDraw()
except: # analysis:ignore
pass
def updateOpacity(self, iax, editor, position):
slider = self.sender()
if isinstance(position, int):
try:
position /= slider.scale
except: # analysis:ignore
if _DEBUG_:
raise
else:
pass
editor.setText("{0:.2f}".format(position))
if iax == 0:
self.customGlWidget.lineOpacity = np.float32(position)
elif iax == 1:
self.customGlWidget.lineWidth = np.float32(position)
elif iax == 2:
self.customGlWidget.pointOpacity = np.float32(position)
elif iax == 3:
self.customGlWidget.pointSize = np.float32(position)
self.customGlWidget.glDraw()
def updateOpacityFromQLE(self, slider):
editor = self.sender()
value = float(str(editor.text()))
slider.setValue(value)
self.customGlWidget.glDraw()
def updateOpacityFromGL(self, ops):
for iaxis, (slider, editor, op) in\
enumerate(zip(self.opacitySliders, self.opacityEditors, ops)):
slider.setValue(op)
editor.setText("{0:.2f}".format(op))
def updateTileFromQLE(self, ia):
editor = self.sender()
value = float(str(editor.text()))
self.customGlWidget.tiles[ia] = np.int(value)
self.customGlWidget.glDraw()
def updateProjectionOpacity(self, iax, editor, position):
slider = self.sender()
if isinstance(position, int):
try:
position /= slider.scale
except: # analysis:ignore
if _DEBUG_:
raise
else:
pass
editor.setText("{0:.2f}".format(position))
if iax == 0:
self.customGlWidget.lineProjectionOpacity = np.float32(position)
elif iax == 1:
self.customGlWidget.lineProjectionWidth = np.float32(position)
elif iax == 2:
self.customGlWidget.pointProjectionOpacity = np.float32(position)
elif iax == 3:
self.customGlWidget.pointProjectionSize = np.float32(position)
self.customGlWidget.glDraw()
def updateProjectionOpacityFromQLE(self, slider):
editor = self.sender()
value = float(re.sub(',', '.', str(editor.text())))
slider.setValue(value)
self.customGlWidget.glDraw()
def updateProjectionOpacityFromGL(self, ops):
for iaxis, (slider, editor, op) in\
enumerate(zip(self.projectionOpacitySliders,
self.projectionOpacityEditors, ops)):
slider.setValue(op)
editor.setText("{0:.2f}".format(op))
class xrtGlWidget(qt.QGLWidget):
rotationUpdated = qt.pyqtSignal(np.ndarray)
scaleUpdated = qt.pyqtSignal(np.ndarray)
histogramUpdated = qt.pyqtSignal(tuple)
def __init__(self, parent, arrayOfRays, modelRoot, oesList, b2els, signal):
qt.QGLWidget.__init__(self, parent)
self.QookSignal = signal
self.virtScreen = None
self.virtBeam = None
self.virtDotsArray = None
self.virtDotsColor = None
self.vScreenForColors = False
self.globalColorIndex = None
self.isVirtScreenNormal = False
self.segmentModel = modelRoot
self.vScreenSize = 0.5
self.setMinimumSize(400, 400)
self.aspect = 1.
self.depthScaler = 0.
self.viewPortGL = [0, 0, 500, 500]
self.perspectiveEnabled = True
self.cameraAngle = 60
self.setMouseTracking(True)
self.surfCPOrder = 4
self.oesToPlot = []
self.labelsToPlot = []
self.tiles = [2, 2]
self.arrayOfRays = arrayOfRays
self.beamsDict = arrayOfRays[1]
self.oesList = oesList
self.oeContour = dict()
self.slitEdges = dict()
self.beamsToElements = b2els
self.slitThickness = 2. # mm
self.contourWidth = 2
self.projectionsVisibility = [0, 0, 0]
self.lineOpacity = 0.1
self.lineWidth = 1
self.pointOpacity = 0.1
self.pointSize = 1
self.linesDepthTest = True
self.pointsDepthTest = False
self.labelCoordPrec = 1
self.lineProjectionOpacity = 0.1
self.lineProjectionWidth = 1
self.pointProjectionOpacity = 0.1
self.pointProjectionSize = 1
self.coordOffset = [0., 0., 0.]
self.enableAA = False
self.enableBlending = True
self.cutoffI = 0.01
self.getColor = raycing.get_energy
self.globalNorm = True
self.iHSV = False
self.newColorAxis = True
self.colorMin = -1e20
self.colorMax = 1e20
self.selColorMin = None
self.selColorMax = None
self.scaleVec = np.array([1e3, 1e1, 1e3])
self.maxLen = 1.
self.showLostRays = False
self.showLocalAxes = False
self.populateVerticesArray(modelRoot)
self.drawGrid = True
self.fineGridEnabled = False
self.showOeLabels = False
self.aPos = [0.9, 0.9, 0.9]
self.prevMPos = [0, 0]
self.prevWC = np.float32([0, 0, 0])
self.coordinateGridLineWidth = 1
# self.fixedFontType = 'GLUT_BITMAP_TIMES_ROMAN'
self.fixedFontType = 'GLUT_BITMAP_HELVETICA'
self.fixedFontSize = '12' # 10, 12, 18 for Helvetica; 10, 24 for Roman
self.fixedFont = getattr(gl, "{0}_{1}".format(self.fixedFontType,
self.fixedFontSize))
self.useScalableFont = False
self.fontSize = 5
self.scalableFontType = gl.GLUT_STROKE_ROMAN
# self.scalableFontType = gl.GLUT_STROKE_MONO_ROMAN
self.scalableFontWidth = 1
self.useFontAA = False
self.tVec = np.array([0., 0., 0.])
self.cameraTarget = [0., 0., 0.]
self.cameraPos = np.float32([3.5, 0., 0.])
self.isEulerian = False
self.rotations = np.float32([[0., 1., 0., 0.],
[0., 0., 1., 0.],
[0., 0., 0., 1.]])
self.textOrientation = [0.5, 0.5, 0.5, 0.5]
self.updateQuats()
pModelT = np.identity(4)
self.visibleAxes = np.argmax(np.abs(pModelT), axis=1)
self.signs = np.ones_like(pModelT)
self.invertColors = False
self.showHelp = False
# self.glDraw()
def eulerToQ(self, rotMatrXYZ):
hPitch = np.radians(rotMatrXYZ[0][0]) * 0.5
hRoll = np.radians(rotMatrXYZ[1][0]) * 0.5
hYaw = np.radians(rotMatrXYZ[2][0]) * 0.5
cosPitch = np.cos(hPitch)
sinPitch = np.sin(hPitch)
cosRoll = np.cos(hRoll)
sinRoll = np.sin(hRoll)
cosYaw = np.cos(hYaw)
sinYaw = np.sin(hYaw)
return [cosPitch*cosRoll*cosYaw - sinPitch*sinRoll*sinYaw,
sinRoll*sinYaw*cosPitch + sinPitch*cosRoll*cosYaw,
sinRoll*cosPitch*cosYaw - sinPitch*sinYaw*cosRoll,
sinYaw*cosPitch*cosRoll + sinPitch*sinRoll*cosYaw]
def qToVec(self, quat):
angle = 2 * np.arccos(quat[0])
q2v = np.sin(angle * 0.5)
qbt1 = quat[1] / q2v if q2v != 0 else 0
qbt2 = quat[2] / q2v if q2v != 0 else 0
qbt3 = quat[3] / q2v if q2v != 0 else 0
return [np.degrees(angle), qbt1, qbt2, qbt3]
def rotateZYX(self):
if self.isEulerian:
gl.glRotatef(*self.rotations[0])
gl.glRotatef(*self.rotations[1])
gl.glRotatef(*self.rotations[2])
else:
gl.glRotatef(*self.rotationVec)
def updateQuats(self):
self.qRot = self.eulerToQ(self.rotations)
self.rotationVec = self.qToVec(self.qRot)
self.qText = self.qToVec(
self.quatMult([self.qRot[0], -self.qRot[1],
-self.qRot[2], -self.qRot[3]],
self.textOrientation))
def vecToQ(self, vec, alpha):
""" Quaternion from vector and angle"""
return np.insert(vec*np.sin(alpha*0.5), 0, np.cos(alpha*0.5))
def rotateVecQ(self, vec, q):
qn = np.copy(q)
qn[1:] *= -1
return self.quatMult(self.quatMult(
q, self.vecToQ(vec, np.pi*0.25)), qn)[1:]
def setPointSize(self, pSize):
self.pointSize = pSize
self.glDraw()
def setLineWidth(self, lWidth):
self.lineWidth = lWidth
self.glDraw()
def populateVerticesOnly(self, segmentsModelRoot):
if segmentsModelRoot is None:
return
self.segmentModel = segmentsModelRoot
# signal = self.QookSignal
self.verticesArray = None
self.footprintsArray = None
self.oesToPlot = []
self.labelsToPlot = []
self.footprints = dict()
colorsRays = None
alphaRays = None
colorsDots = None
alphaDots = None
globalColorsDots = None
globalColorsRays = None
verticesArrayLost = None
colorsRaysLost = None
footprintsArrayLost = None
colorsDotsLost = None
maxLen = 1.
tmpMax = -1.0e12 * np.ones(3)
tmpMin = -1. * tmpMax
if self.newColorAxis:
newColorMax = -1e20
newColorMin = 1e20
# self.selColorMax = newColorMax
# self.selColorMin = newColorMin
else:
newColorMax = self.colorMax
newColorMin = self.colorMin
# totalOEs = range(segmentsModelRoot.rowCount() - 2)
for ioe in range(segmentsModelRoot.rowCount() - 1):
ioeItem = segmentsModelRoot.child(ioe + 1, 0)
# try:
# if signal is not None:
# signalStr = "Plotting beams for {}, %p% done.".format(
# str(ioeItem.text()))
# signal.emit((float(ioe) / float(totalOEs),
# signalStr))
# except:
# pass
if segmentsModelRoot.child(ioe + 1, 2).checkState() == 2:
self.oesToPlot.append(str(ioeItem.text()))
self.footprints[str(ioeItem.text())] = None
if segmentsModelRoot.child(ioe + 1, 3).checkState() == 2:
self.labelsToPlot.append(str(ioeItem.text()))
try:
startBeam = self.beamsDict[
self.oesList[str(ioeItem.text())][1]]
# lostNum = self.oesList[str(ioeItem.text())][0].lostNum
# good = startBeam.state > 0
good = (startBeam.state == 1) | (startBeam.state == 2)
if len(startBeam.state[good]) > 0:
for tmpCoord, tAxis in enumerate(['x', 'y', 'z']):
axMin = np.min(getattr(startBeam, tAxis)[good])
axMax = np.max(getattr(startBeam, tAxis)[good])
if axMin < tmpMin[tmpCoord]:
tmpMin[tmpCoord] = axMin
if axMax > tmpMax[tmpCoord]:
tmpMax[tmpCoord] = axMax
newColorMax = max(np.max(
self.getColor(startBeam)[good]),
newColorMax)
newColorMin = min(np.min(
self.getColor(startBeam)[good]),
newColorMin)
except: # analysis:ignore
if _DEBUG_:
raise
else:
continue
if self.newColorAxis:
if newColorMin != self.colorMin:
self.colorMin = newColorMin
self.selColorMin = self.colorMin
if newColorMax != self.colorMax:
self.colorMax = newColorMax
self.selColorMax = self.colorMax
if ioeItem.hasChildren():
for isegment in range(ioeItem.rowCount()):
segmentItem0 = ioeItem.child(isegment, 0)
if segmentItem0.checkState() == 2:
endBeam = self.beamsDict[
self.oesList[str(segmentItem0.text())[3:]][1]]
# good = startBeam.state > 0
good = (startBeam.state == 1) | (startBeam.state == 2)
if len(startBeam.state[good]) == 0:
continue
intensity = startBeam.Jss + startBeam.Jpp
intensityAll = intensity / np.max(intensity[good])
good = np.logical_and(good,
intensityAll >= self.cutoffI)
goodC = np.logical_and(
self.getColor(startBeam) <= self.selColorMax,
self.getColor(startBeam) >= self.selColorMin)
good = np.logical_and(good, goodC)
if self.vScreenForColors and\
self.globalColorIndex is not None:
good = np.logical_and(good, self.globalColorIndex)
globalColorsRays = np.repeat(
self.globalColorArray[good], 2, axis=0) if\
globalColorsRays is None else np.concatenate(
(globalColorsRays,
np.repeat(self.globalColorArray[good], 2,
axis=0)))
else:
if self.globalNorm:
alphaMax = 1.
else:
if len(intensity[good]) > 0:
alphaMax = np.max(intensity[good])
else:
alphaMax = 1.
alphaMax = alphaMax if alphaMax != 0 else 1.
alphaRays = np.repeat(intensity[good] / alphaMax,
2).T\
if alphaRays is None else np.concatenate(
(alphaRays.T,
np.repeat(intensity[good] / alphaMax,
2).T))
colorsRays = np.repeat(np.array(self.getColor(
startBeam)[good]), 2).T if\
colorsRays is None else np.concatenate(
(colorsRays.T,
np.repeat(np.array(self.getColor(
startBeam)[good]), 2).T))
vertices = np.array(
[startBeam.x[good] - self.coordOffset[0],
endBeam.x[good] - self.coordOffset[0]]).flatten(
'F')
vertices = np.vstack((vertices, np.array(
[startBeam.y[good] - self.coordOffset[1],
endBeam.y[good] - self.coordOffset[1]]).flatten(
'F')))
vertices = np.vstack((vertices, np.array(
[startBeam.z[good] - self.coordOffset[2],
endBeam.z[good] - self.coordOffset[2]]).flatten(
'F')))
self.verticesArray = vertices.T if\
self.verticesArray is None else\
np.vstack((self.verticesArray, vertices.T))
if self.showLostRays:
try:
lostNum = self.oesList[str(
segmentItem0.text())[3:]][0].lostNum
except: # analysis:ignore
lostNum = 1e3
lost = startBeam.state == lostNum
try:
lostOnes = len(startBeam.x[lost]) * 2
except: # analysis:ignore
lostOnes = 0
colorsRaysLost = lostOnes if colorsRaysLost is\
None else colorsRaysLost + lostOnes
if lostOnes > 0:
verticesLost = np.array(
[startBeam.x[lost] - self.coordOffset[0],
endBeam.x[lost] -
self.coordOffset[0]]).flatten('F')
verticesLost = np.vstack((verticesLost, np.array( # analysis:ignore
[startBeam.y[lost] - self.coordOffset[1],
endBeam.y[lost] -
self.coordOffset[1]]).flatten('F')))
verticesLost = np.vstack((verticesLost, np.array( # analysis:ignore
[startBeam.z[lost] - self.coordOffset[2],
endBeam.z[lost] -
self.coordOffset[2]]).flatten('F')))
verticesArrayLost = verticesLost.T if\
verticesArrayLost is None else\
np.vstack((verticesArrayLost, verticesLost.T)) # analysis:ignore
if segmentsModelRoot.child(ioe + 1, 1).checkState() == 2:
# good = startBeam.state > 0
good = (startBeam.state == 1) | (startBeam.state == 2)
if len(startBeam.state[good]) == 0:
continue
intensity = startBeam.Jss + startBeam.Jpp
try:
intensityAll = intensity / np.max(intensity[good])
good = np.logical_and(good, intensityAll >= self.cutoffI)
goodC = np.logical_and(
self.getColor(startBeam) <= self.selColorMax,
self.getColor(startBeam) >= self.selColorMin)
good = np.logical_and(good, goodC)
except: # analysis:ignore
if _DEBUG_:
raise
else:
continue
if self.vScreenForColors and self.globalColorIndex is not None:
good = np.logical_and(good, self.globalColorIndex)
globalColorsDots = self.globalColorArray[good] if\
globalColorsDots is None else np.concatenate(
(globalColorsDots, self.globalColorArray[good]))
else:
if self.globalNorm:
alphaMax = 1.
else:
if len(intensity[good]) > 0:
alphaMax = np.max(intensity[good])
else:
alphaMax = 1.
alphaMax = alphaMax if alphaMax != 0 else 1.
alphaDots = intensity[good].T / alphaMax if\
alphaDots is None else np.concatenate(
(alphaDots.T, intensity[good].T / alphaMax))
colorsDots = np.array(self.getColor(
startBeam)[good]).T if\
colorsDots is None else np.concatenate(
(colorsDots.T, np.array(self.getColor(
startBeam)[good]).T))
vertices = np.array(startBeam.x[good] - self.coordOffset[0])
vertices = np.vstack((vertices, np.array(
startBeam.y[good] - self.coordOffset[1])))
vertices = np.vstack((vertices, np.array(
startBeam.z[good] - self.coordOffset[2])))
self.footprintsArray = vertices.T if\
self.footprintsArray is None else\
np.vstack((self.footprintsArray, vertices.T))
if self.showLostRays:
try:
lostNum = self.oesList[str(ioeItem.text())][0].lostNum
except: # analysis:ignore
lostNum = 1e3
lost = startBeam.state == lostNum
try:
lostOnes = len(startBeam.x[lost])
except: # analysis:ignore
lostOnes = 0
colorsDotsLost = lostOnes if\
colorsDotsLost is None else\
colorsDotsLost + lostOnes
if lostOnes > 0:
verticesLost = np.array(startBeam.x[lost] -
self.coordOffset[0])
verticesLost = np.vstack((verticesLost, np.array(
startBeam.y[lost] - self.coordOffset[1])))
verticesLost = np.vstack((verticesLost, np.array(
startBeam.z[lost] - self.coordOffset[2])))
footprintsArrayLost = verticesLost.T if\
footprintsArrayLost is None else\
np.vstack((footprintsArrayLost, verticesLost.T))
try:
if self.colorMin == self.colorMax:
if self.colorMax == 0: # and self.colorMin == 0 too
self.colorMin, self.colorMax = -0.1, 0.1
else:
self.colorMin = self.colorMax * 0.99
self.colorMax *= 1.01
if self.vScreenForColors and self.globalColorIndex is not None:
self.raysColor = globalColorsRays
elif colorsRays is not None:
colorsRays = colorFactor * (colorsRays-self.colorMin) /\
(self.colorMax - self.colorMin)
colorsRays = np.dstack((colorsRays,
np.ones_like(alphaRays)*colorSaturation, # analysis:ignore
alphaRays if self.iHSV else
np.ones_like(alphaRays)))
colorsRGBRays = np.squeeze(mpl.colors.hsv_to_rgb(colorsRays))
if self.globalNorm and len(alphaRays) > 0:
alphaMax = np.max(alphaRays)
else:
alphaMax = 1.
alphaMax = alphaMax if alphaMax != 0 else 1.
alphaColorRays = np.array([alphaRays / alphaMax]).T
self.raysColor = np.float32(np.hstack([colorsRGBRays,
alphaColorRays]))
if self.showLostRays:
if colorsRaysLost is not None:
lostColor = np.zeros((colorsRaysLost, 4))
lostColor[:, 0] = 0.5
lostColor[:, 3] = 0.25
self.raysColor = np.float32(np.vstack((self.raysColor,
lostColor)))
if verticesArrayLost is not None:
self.verticesArray = np.float32(np.vstack((
self.verticesArray, verticesArrayLost)))
except: # analysis:ignore
if _DEBUG_:
raise
else:
pass
try:
if self.colorMin == self.colorMax:
if self.colorMax == 0: # and self.colorMin == 0 too
self.colorMin, self.colorMax = -0.1, 0.1
else:
self.colorMin = self.colorMax * 0.99
self.colorMax *= 1.01
if self.vScreenForColors and self.globalColorIndex is not None:
self.dotsColor = globalColorsDots
elif colorsDots is not None:
colorsDots = colorFactor * (colorsDots-self.colorMin) /\
(self.colorMax - self.colorMin)
colorsDots = np.dstack((colorsDots,
np.ones_like(alphaDots)*colorSaturation, # analysis:ignore
alphaDots if self.iHSV else
np.ones_like(alphaDots)))
colorsRGBDots = np.squeeze(mpl.colors.hsv_to_rgb(colorsDots))
if self.globalNorm and len(alphaDots) > 0:
alphaMax = np.max(alphaDots)
else:
alphaMax = 1.
alphaMax = alphaMax if alphaMax != 0 else 1.
alphaColorDots = np.array([alphaDots / alphaMax]).T
self.dotsColor = np.float32(np.hstack([colorsRGBDots,
alphaColorDots]))
if self.showLostRays:
if colorsDotsLost is not None:
lostColor = np.zeros((colorsDotsLost, 4))
lostColor[:, 0] = 0.5
lostColor[:, 3] = 0.25
self.dotsColor = np.float32(np.vstack((self.dotsColor,
lostColor)))
if footprintsArrayLost is not None:
self.footprintsArray = np.float32(np.vstack((
self.footprintsArray, footprintsArrayLost)))
except: # analysis:ignore
if _DEBUG_:
raise
else:
pass
tmpMaxLen = np.max(tmpMax - tmpMin)
if tmpMaxLen > maxLen:
maxLen = tmpMaxLen
self.maxLen = maxLen
self.newColorAxis = False
def populateVerticesArray(self, segmentsModelRoot):
self.populateVerticesOnly(segmentsModelRoot)
self.populateVScreen()
if self.vScreenForColors:
self.populateVerticesOnly(segmentsModelRoot)
def modelToWorld(self, coords, dimension=None):
self.maxLen = self.maxLen if self.maxLen != 0 else 1.
if dimension is None:
return np.float32(((coords + self.tVec) * self.scaleVec) /
self.maxLen)
else:
return np.float32(((coords[dimension] + self.tVec[dimension]) *
self.scaleVec[dimension]) / self.maxLen)
def worldToModel(self, coords):
return np.float32(coords * self.maxLen / self.scaleVec - self.tVec)
def drawText(self, coord, text, noScalable=False, alignment=None,
useCaption=False):
useScalableFont = False if noScalable else self.useScalableFont
pView = gl.glGetIntegerv(gl.GL_VIEWPORT)
pProjection = gl.glGetDoublev(gl.GL_PROJECTION_MATRIX)
if not useScalableFont:
gl.glRasterPos3f(*coord)
for symbol in text:
gl.glutBitmapCharacter(self.fixedFont, ord(symbol))
else:
tLineWidth = gl.glGetDoublev(gl.GL_LINE_WIDTH)
tLineAA = gl.glIsEnabled(gl.GL_LINE_SMOOTH)
if self.useFontAA:
gl.glEnable(gl.GL_LINE_SMOOTH)
else:
gl.glDisable(gl.GL_LINE_SMOOTH)
gl.glLineWidth(self.scalableFontWidth)
fontScale = self.fontSize / 12500.
coordShift = np.zeros(3, dtype=np.float32)
fontSizeLoc = np.float32(np.array([104.76, 119.05, 0])*fontScale)
if alignment is not None:
if alignment[0] == 'left':
coordShift[0] = -fontSizeLoc[0] * len(text)
else:
coordShift[0] = fontSizeLoc[0]
if alignment[1] == 'top':
vOffset = 0.5
elif alignment[1] == 'bottom':
vOffset = -1.5
else:
vOffset = -0.5
coordShift[1] = vOffset * fontSizeLoc[1]
if useCaption:
textWidth = 0
for symbol in text.strip(" "):
textWidth += gl.glutStrokeWidth(self.scalableFontType,
ord(symbol))
gl.glPushMatrix()
gl.glTranslatef(*coord)
gl.glRotatef(*self.qText)
gl.glTranslatef(*coordShift)
gl.glScalef(fontScale, fontScale, fontScale)
depthCounter = 1
spaceFound = False
while not spaceFound:
depthCounter += 1
for dy in [-1, 1]:
for dx in [1, -1]:
textShift = (depthCounter+0.5*dy) * 119.05*1.5
gl.glPushMatrix()
textPos = [dx*depthCounter * 119.05*1.5 +
(0 if dx > 0 else -1) * textWidth,
dy*textShift, 0]
gl.glTranslatef(*textPos)
pModel = gl.glGetDoublev(gl.GL_MODELVIEW_MATRIX)
bottomLeft = np.array(gl.gluProject(
*[0, 0, 0], model=pModel, proj=pProjection,
view=pView)[:-1])
topRight = np.array(gl.gluProject(
*[textWidth, 119.05*2.5, 0],
model=pModel, proj=pProjection,
view=pView)[:-1])
gl.glPopMatrix()
spaceFound = True
for oeLabel in list(self.labelsBounds.values()):
if not (bottomLeft[0] > oeLabel[1][0] or
bottomLeft[1] > oeLabel[1][1] or
topRight[0] < oeLabel[0][0] or
topRight[1] < oeLabel[0][1]):
spaceFound = False
if spaceFound:
self.labelsBounds[text] = [0]*2
self.labelsBounds[text][0] = bottomLeft
self.labelsBounds[text][1] = topRight
break
if spaceFound:
break
gl.glPopMatrix()
gl.glPushMatrix()
gl.glTranslatef(*coord)
gl.glRotatef(*self.qText)
gl.glScalef(fontScale, fontScale, fontScale)
captionPos = depthCounter * 119.05*1.5
gl.glBegin(gl.GL_LINE_STRIP)
gl.glVertex3f(0, 0, 0)
gl.glVertex3f(captionPos*dx, captionPos*dy, 0)
gl.glVertex3f(captionPos*dx + textWidth*dx,
captionPos*dy, 0)
gl.glEnd()
gl.glTranslatef(*textPos)
for symbol in text.strip(" "):
gl.glutStrokeCharacter(self.scalableFontType, ord(symbol))
gl.glPopMatrix()
else:
gl.glPushMatrix()
gl.glTranslatef(*coord)
gl.glRotatef(*self.qText)
gl.glTranslatef(*coordShift)
gl.glScalef(fontScale, fontScale, fontScale)
for symbol in text:
gl.glutStrokeCharacter(self.scalableFontType, ord(symbol))
gl.glPopMatrix()
gl.glLineWidth(tLineWidth)
if tLineAA:
gl.glEnable(gl.GL_LINE_SMOOTH)
else:
gl.glDisable(gl.GL_LINE_SMOOTH)
def setMaterial(self, mat):
if mat == 'Cu':
gl.glMaterialfv(gl.GL_FRONT_AND_BACK, gl.GL_AMBIENT,
[0.3, 0.15, 0.15, 1])
gl.glMaterialfv(gl.GL_FRONT_AND_BACK, gl.GL_DIFFUSE,
[0.4, 0.25, 0.15, 1])
gl.glMaterialfv(gl.GL_FRONT_AND_BACK, gl.GL_SPECULAR,
[1., 0.7, 0.3, 1])
gl.glMaterialfv(gl.GL_FRONT_AND_BACK, gl.GL_EMISSION,
[0.1, 0.1, 0.1, 1])
gl.glMaterialf(gl.GL_FRONT, gl.GL_SHININESS, 100)
elif mat == 'magRed':
gl.glMaterialfv(gl.GL_FRONT_AND_BACK, gl.GL_AMBIENT,
[0.6, 0.1, 0.1, 1])
gl.glMaterialfv(gl.GL_FRONT_AND_BACK, gl.GL_DIFFUSE,
[0.8, 0.1, 0.1, 1])
gl.glMaterialfv(gl.GL_FRONT_AND_BACK, gl.GL_SPECULAR,
[1., 0.1, 0.1, 1])
gl.glMaterialfv(gl.GL_FRONT_AND_BACK, gl.GL_EMISSION,
[0.1, 0.1, 0.1, 1])
gl.glMaterialf(gl.GL_FRONT, gl.GL_SHININESS, 100)
elif mat == 'magBlue':
gl.glMaterialfv(gl.GL_FRONT_AND_BACK, gl.GL_AMBIENT,
[0.1, 0.1, 0.6, 1])
gl.glMaterialfv(gl.GL_FRONT_AND_BACK, gl.GL_DIFFUSE,
[0.1, 0.1, 0.8, 1])
gl.glMaterialfv(gl.GL_FRONT_AND_BACK, gl.GL_SPECULAR,
[0.1, 0.1, 1., 1])
gl.glMaterialfv(gl.GL_FRONT_AND_BACK, gl.GL_EMISSION,
[0.1, 0.1, 0.1, 1])
gl.glMaterialf(gl.GL_FRONT, gl.GL_SHININESS, 100)
elif mat == 'semiSi':
gl.glMaterialfv(gl.GL_FRONT_AND_BACK, gl.GL_AMBIENT,
[0.1, 0.1, 0.1, 0.75])
gl.glMaterialfv(gl.GL_FRONT_AND_BACK, gl.GL_DIFFUSE,
[0.3, 0.3, 0.3, 0.75])
gl.glMaterialfv(gl.GL_FRONT_AND_BACK, gl.GL_SPECULAR,
[1., 0.9, 0.8, 0.75])
gl.glMaterialfv(gl.GL_FRONT_AND_BACK, gl.GL_EMISSION,
[0.1, 0.1, 0.1, 0.75])
gl.glMaterialf(gl.GL_FRONT, gl.GL_SHININESS, 100)
else:
gl.glMaterialfv(gl.GL_FRONT_AND_BACK, gl.GL_AMBIENT,
[0.1, 0.1, 0.1, 1])
gl.glMaterialfv(gl.GL_FRONT_AND_BACK, gl.GL_DIFFUSE,
[0.3, 0.3, 0.3, 1])
gl.glMaterialfv(gl.GL_FRONT_AND_BACK, gl.GL_SPECULAR,
[1., 0.9, 0.8, 1])
gl.glMaterialfv(gl.GL_FRONT_AND_BACK, gl.GL_EMISSION,
[0.1, 0.1, 0.1, 1])
gl.glMaterialf(gl.GL_FRONT, gl.GL_SHININESS, 100)
def paintGL(self):
def makeCenterStr(centerList, prec):
retStr = '('
for dim in centerList:
retStr += '{0:.{1}f}, '.format(dim, prec)
return retStr[:-2] + ')'
if self.invertColors:
gl.glClearColor(1.0, 1.0, 1.0, 1.)
else:
gl.glClearColor(0.0, 0.0, 0.0, 1.)
gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)
gl.glMatrixMode(gl.GL_PROJECTION)
gl.glLoadIdentity()
if self.perspectiveEnabled:
gl.gluPerspective(self.cameraAngle, self.aspect, 0.001, 10000)
else:
orthoView = self.cameraPos[0]*0.45
gl.glOrtho(-orthoView*self.aspect, orthoView*self.aspect,
-orthoView, orthoView, -100, 100)
gl.glMatrixMode(gl.GL_MODELVIEW)
gl.glLoadIdentity()
gl.gluLookAt(self.cameraPos[0], self.cameraPos[1], self.cameraPos[2],
self.cameraTarget[0], self.cameraTarget[1],
self.cameraTarget[2],
0.0, 0.0, 1.0)
if self.enableBlending:
gl.glEnable(gl.GL_MULTISAMPLE)
gl.glEnable(gl.GL_BLEND)
gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA)
# gl.glBlendFunc(gl.GL_SRC_ALPHA, GL_ONE)
gl.glEnable(gl.GL_POINT_SMOOTH)
gl.glHint(gl.GL_POINT_SMOOTH_HINT, gl.GL_NICEST)
gl.glEnableClientState(gl.GL_VERTEX_ARRAY)
gl.glEnableClientState(gl.GL_COLOR_ARRAY)
gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_LINE)
self.rotateZYX()
pModel = np.array(gl.glGetDoublev(gl.GL_MODELVIEW_MATRIX))[:-1, :-1]
self.visibleAxes = np.argmax(np.abs(pModel), axis=0)
self.signs = np.sign(pModel)
self.axPosModifier = np.ones(3)
if self.enableAA:
gl.glEnable(gl.GL_LINE_SMOOTH)
gl.glHint(gl.GL_LINE_SMOOTH_HINT, gl.GL_NICEST)
gl.glHint(gl.GL_POLYGON_SMOOTH_HINT, gl.GL_NICEST)
# gl.glHint(GL_PERSPECTIVE_CORRECTION_HINT, gl.GL_NICEST)
for dim in range(3):
for iAx in range(3):
self.axPosModifier[iAx] = (self.signs[iAx][2] if
self.signs[iAx][2] != 0 else 1)
if self.projectionsVisibility[dim] > 0:
if self.lineProjectionWidth > 0 and\
self.lineProjectionOpacity > 0 and\
self.verticesArray is not None:
projectionRays = self.modelToWorld(
np.copy(self.verticesArray))
projectionRays[:, dim] =\
-self.aPos[dim] * self.axPosModifier[dim]
self.drawArrays(
0, gl.GL_LINES, projectionRays, self.raysColor,
self.lineProjectionOpacity, self.lineProjectionWidth)
if self.pointProjectionSize > 0 and\
self.pointProjectionOpacity > 0:
if self.footprintsArray is not None:
projectionDots = self.modelToWorld(
np.copy(self.footprintsArray))
projectionDots[:, dim] =\
-self.aPos[dim] * self.axPosModifier[dim]
self.drawArrays(
0, gl.GL_POINTS, projectionDots, self.dotsColor,
self.pointProjectionOpacity,
self.pointProjectionSize)
if self.virtDotsArray is not None:
projectionDots = self.modelToWorld(
np.copy(self.virtDotsArray))
projectionDots[:, dim] =\
-self.aPos[dim] * self.axPosModifier[dim]
self.drawArrays(
0, gl.GL_POINTS, projectionDots,
self.virtDotsColor,
self.pointProjectionOpacity,
self.pointProjectionSize)
if self.enableAA:
gl.glDisable(gl.GL_LINE_SMOOTH)
if self.linesDepthTest:
gl.glEnable(gl.GL_DEPTH_TEST)
if self.enableAA:
gl.glEnable(gl.GL_LINE_SMOOTH)
gl.glHint(gl.GL_LINE_SMOOTH_HINT, gl.GL_NICEST)
gl.glHint(gl.GL_POLYGON_SMOOTH_HINT, gl.GL_NICEST)
if self.lineWidth > 0 and self.lineOpacity > 0 and\
self.verticesArray is not None:
self.drawArrays(1, gl.GL_LINES, self.verticesArray, self.raysColor,
self.lineOpacity, self.lineWidth)
if self.linesDepthTest:
gl.glDisable(gl.GL_DEPTH_TEST)
if self.enableAA:
gl.glDisable(gl.GL_LINE_SMOOTH)
gl.glEnable(gl.GL_DEPTH_TEST)
gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_FILL)
if len(self.oesToPlot) > 0: # Surfaces of optical elements
gl.glEnableClientState(gl.GL_NORMAL_ARRAY)
gl.glEnable(gl.GL_NORMALIZE)
self.addLighting(3.)
for oeString in self.oesToPlot:
try:
oeToPlot = self.oesList[oeString][0]
is2ndXtal = self.oesList[oeString][3]
if isinstance(oeToPlot, roes.OE):
self.plotOeSurface(oeToPlot, is2ndXtal)
elif isinstance(oeToPlot, rscreens.HemisphericScreen):
self.setMaterial('semiSi')
self.plotHemiScreen(oeToPlot)
elif isinstance(oeToPlot, rscreens.Screen):
self.setMaterial('semiSi')
self.plotScreen(oeToPlot)
if isinstance(oeToPlot, (rapertures.RectangularAperture,
rapertures.RoundAperture)):
self.setMaterial('Cu')
self.plotAperture(oeToPlot)
else:
continue
except: # analysis:ignore
if _DEBUG_:
raise
else:
continue
gl.glDisable(gl.GL_LIGHTING)
gl.glDisable(gl.GL_NORMALIZE)
gl.glDisableClientState(gl.GL_NORMAL_ARRAY)
gl.glDisable(gl.GL_DEPTH_TEST)
if self.enableAA:
gl.glEnable(gl.GL_LINE_SMOOTH)
gl.glHint(gl.GL_LINE_SMOOTH_HINT, gl.GL_NICEST)
gl.glEnable(gl.GL_DEPTH_TEST)
if len(self.oesToPlot) > 0:
for oeString in self.oesToPlot:
oeToPlot = self.oesList[oeString][0]
if isinstance(oeToPlot, (rsources.BendingMagnet,
rsources.Wiggler,
rsources.Undulator)):
self.plotSource(oeToPlot)
# elif isinstance(oeToPlot, rscreens.HemisphericScreen):
# self.plotHemiScreen(oeToPlot)
# elif isinstance(oeToPlot, rscreens.Screen):
# self.plotScreen(oeToPlot)
# elif isinstance(oeToPlot, roes.OE):
# self.drawOeContour(oeToPlot)
# elif isinstance(oeToPlot, rapertures.RectangularAperture):
# self.drawSlitEdges(oeToPlot)
else:
continue
if self.virtScreen is not None:
gl.glEnable(gl.GL_LINE_SMOOTH)
gl.glHint(gl.GL_LINE_SMOOTH_HINT, gl.GL_NICEST)
gl.glHint(gl.GL_POLYGON_SMOOTH_HINT, gl.GL_NICEST)
self.plotScreen(self.virtScreen, [self.vScreenSize]*2,
[1, 0, 0, 1], plotFWHM=True)
# if not self.enableAA:
# gl.glDisable(gl.GL_LINE_SMOOTH)
gl.glDisable(gl.GL_DEPTH_TEST)
gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_LINE)
if self.pointsDepthTest:
gl.glEnable(gl.GL_DEPTH_TEST)
if self.pointSize > 0 and self.pointOpacity > 0:
if self.footprintsArray is not None:
self.drawArrays(1, gl.GL_POINTS, self.footprintsArray,
self.dotsColor, self.pointOpacity,
self.pointSize)
if self.virtDotsArray is not None:
self.drawArrays(1, gl.GL_POINTS, self.virtDotsArray,
self.virtDotsColor, self.pointOpacity,
self.pointSize)
gl.glDisableClientState(gl.GL_VERTEX_ARRAY)
gl.glDisableClientState(gl.GL_COLOR_ARRAY)
if self.enableAA:
gl.glDisable(gl.GL_LINE_SMOOTH)
if self.pointsDepthTest:
gl.glDisable(gl.GL_DEPTH_TEST)
# oeLabels = OrderedDict()
self.labelsBounds = OrderedDict()
if len(self.labelsToPlot) > 0:
if self.invertColors:
gl.glColor4f(0.0, 0.0, 0.0, 1.)
else:
gl.glColor4f(1.0, 1.0, 1.0, 1.)
gl.glLineWidth(1)
# for oeKey, oeValue in self.oesList.items():
for oeKey in self.labelsToPlot:
oeValue = self.oesList[oeKey]
oeCenterStr = makeCenterStr(oeValue[2],
self.labelCoordPrec)
oeCoord = np.array(oeValue[2])
oeCenterStr = ' {0}: {1}mm'.format(
oeKey, oeCenterStr)
oeLabelPos = self.modelToWorld(oeCoord - self.coordOffset)
self.drawText(oeLabelPos, oeCenterStr, useCaption=True)
if self.showOeLabels and self.virtScreen is not None:
vsCenterStr = ' {0}: {1}mm'.format(
'Virtual Screen', makeCenterStr(self.virtScreen.center,
self.labelCoordPrec))
try:
pModel = gl.glGetDoublev(gl.GL_MODELVIEW_MATRIX)
pProjection = gl.glGetDoublev(gl.GL_PROJECTION_MATRIX)
pView = gl.glGetIntegerv(gl.GL_VIEWPORT)
m1 = self.modelToWorld(
self.virtScreen.frame[1] - self.coordOffset)
m2 = self.modelToWorld(
self.virtScreen.frame[2] - self.coordOffset)
scr1 = gl.gluProject(
*m1, model=pModel,
proj=pProjection, view=pView)[0]
scr2 = gl.gluProject(
*m2, model=pModel,
proj=pProjection, view=pView)[0]
lblCenter = self.virtScreen.frame[1] if scr1 > scr2 else\
self.virtScreen.frame[2]
except: # analysis:ignore
if _DEBUG_:
raise
else:
lblCenter = self.virtScreen.center
vsLabelPos = self.modelToWorld(lblCenter - self.coordOffset)
if self.invertColors:
gl.glColor4f(0.0, 0.0, 0.0, 1.)
else:
gl.glColor4f(1.0, 1.0, 1.0, 1.)
gl.glLineWidth(1)
self.drawText(vsLabelPos, vsCenterStr)
if len(self.oesToPlot) > 0 and self.showLocalAxes: # Local axes
for oeString in self.oesToPlot:
try:
oeToPlot = self.oesList[oeString][0]
is2ndXtal = self.oesList[oeString][3]
if hasattr(oeToPlot, 'local_to_global'):
self.drawLocalAxes(oeToPlot, is2ndXtal)
except:
if _DEBUG_:
raise
else:
continue
gl.glEnable(gl.GL_DEPTH_TEST)
if self.drawGrid: # Coordinate grid box
self.drawCoordinateGrid()
gl.glFlush()
self.drawDirectionAxes()
if self.showHelp:
self.drawHelp()
if self.enableBlending:
gl.glDisable(gl.GL_MULTISAMPLE)
gl.glDisable(gl.GL_BLEND)
gl.glDisable(gl.GL_POINT_SMOOTH)
gl.glFlush()
def quatMult(self, qf, qt):
return [qf[0]*qt[0]-qf[1]*qt[1]-qf[2]*qt[2]-qf[3]*qt[3],
qf[0]*qt[1]+qf[1]*qt[0]+qf[2]*qt[3]-qf[3]*qt[2],
qf[0]*qt[2]-qf[1]*qt[3]+qf[2]*qt[0]+qf[3]*qt[1],
qf[0]*qt[3]+qf[1]*qt[2]-qf[2]*qt[1]+qf[3]*qt[0]]
def drawCoordinateGrid(self):
def populateGrid(grids):
axisLabelC = []
axisLabelC.extend([np.vstack(
(self.modelToWorld(grids, 0),
np.ones(len(grids[0]))*self.aPos[1]*self.axPosModifier[1],
np.ones(len(grids[0]))*-self.aPos[2]*self.axPosModifier[2]
))])
axisLabelC.extend([np.vstack(
(np.ones(len(grids[1]))*self.aPos[0]*self.axPosModifier[0],
self.modelToWorld(grids, 1),
np.ones(len(grids[1]))*-self.aPos[2]*self.axPosModifier[2]
))])
zAxis = np.vstack(
(np.ones(len(grids[2]))*-self.aPos[0]*self.axPosModifier[0],
np.ones(len(grids[2]))*self.aPos[1]*self.axPosModifier[1],
self.modelToWorld(grids, 2)))
xAxisB = np.vstack(
(self.modelToWorld(grids, 0),
np.ones(len(grids[0]))*-self.aPos[1]*self.axPosModifier[1],
np.ones(len(grids[0]))*-self.aPos[2]*self.axPosModifier[2]))
yAxisB = np.vstack(
(np.ones(len(grids[1]))*-self.aPos[0]*self.axPosModifier[0],
self.modelToWorld(grids, 1),
np.ones(len(grids[1]))*-self.aPos[2]*self.axPosModifier[2]))
zAxisB = np.vstack(
(np.ones(len(grids[2]))*-self.aPos[0]*self.axPosModifier[0],
np.ones(len(grids[2]))*-self.aPos[1]*self.axPosModifier[1],
self.modelToWorld(grids, 2)))
xAxisC = np.vstack(
(self.modelToWorld(grids, 0),
np.ones(len(grids[0]))*-self.aPos[1]*self.axPosModifier[1],
np.ones(len(grids[0]))*self.aPos[2]*self.axPosModifier[2]))
yAxisC = np.vstack(
(np.ones(len(grids[1]))*-self.aPos[0]*self.axPosModifier[0],
self.modelToWorld(grids, 1),
np.ones(len(grids[1]))*self.aPos[2]*self.axPosModifier[2]))
axisLabelC.extend([np.vstack(
(np.ones(len(grids[2]))*self.aPos[0]*self.axPosModifier[0],
np.ones(len(grids[2]))*-self.aPos[1]*self.axPosModifier[1],
self.modelToWorld(grids, 2)))])
xLines = np.vstack(
(axisLabelC[0], xAxisB, xAxisB, xAxisC)).T.flatten().reshape(
4*xAxisB.shape[1], 3)
yLines = np.vstack(
(axisLabelC[1], yAxisB, yAxisB, yAxisC)).T.flatten().reshape(
4*yAxisB.shape[1], 3)
zLines = np.vstack(
(zAxis, zAxisB, zAxisB, axisLabelC[2])).T.flatten().reshape(
4*zAxisB.shape[1], 3)
return axisLabelC, np.vstack((xLines, yLines, zLines))
def drawGridLines(gridArray, lineWidth, lineOpacity, figType):
gl.glEnableClientState(gl.GL_VERTEX_ARRAY)
gl.glEnableClientState(gl.GL_COLOR_ARRAY)
gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_LINE)
gridColor = np.ones((len(gridArray), 4)) * lineOpacity
gridArrayVBO = gl.vbo.VBO(np.float32(gridArray))
gridArrayVBO.bind()
gl.glVertexPointerf(gridArrayVBO)
gridColorArray = gl.vbo.VBO(np.float32(gridColor))
gridColorArray.bind()
gl.glColorPointerf(gridColorArray)
gl.glLineWidth(lineWidth)
gl.glDrawArrays(figType, 0, len(gridArrayVBO))
gridArrayVBO.unbind()
gridColorArray.unbind()
gl.glDisableClientState(gl.GL_VERTEX_ARRAY)
gl.glDisableClientState(gl.GL_COLOR_ARRAY)
def getAlignment(point, hDim, vDim=None):
pView = gl.glGetIntegerv(gl.GL_VIEWPORT)
pModel = gl.glGetDoublev(gl.GL_MODELVIEW_MATRIX)
pProjection = gl.glGetDoublev(gl.GL_PROJECTION_MATRIX)
sp0 = np.array(gl.gluProject(
*point, model=pModel, proj=pProjection, view=pView))
pointH = np.copy(point)
pointH[hDim] *= 1.1
spH = np.array(gl.gluProject(*pointH, model=pModel,
proj=pProjection, view=pView))
pointV = np.copy(point)
if vDim is None:
vAlign = 'middle'
else:
pointV[vDim] *= 1.1
spV = np.array(gl.gluProject(*pointV, model=pModel,
proj=pProjection, view=pView))
vAlign = 'top' if spV[1] - sp0[1] > 0 else 'bottom'
hAlign = 'left' if spH[0] - sp0[0] < 0 else 'right'
return (hAlign, vAlign)
back = np.array([[-self.aPos[0], self.aPos[1], -self.aPos[2]],
[-self.aPos[0], self.aPos[1], self.aPos[2]],
[-self.aPos[0], -self.aPos[1], self.aPos[2]],
[-self.aPos[0], -self.aPos[1], -self.aPos[2]]])
side = np.array([[self.aPos[0], -self.aPos[1], -self.aPos[2]],
[-self.aPos[0], -self.aPos[1], -self.aPos[2]],
[-self.aPos[0], -self.aPos[1], self.aPos[2]],
[self.aPos[0], -self.aPos[1], self.aPos[2]]])
bottom = np.array([[self.aPos[0], -self.aPos[1], -self.aPos[2]],
[self.aPos[0], self.aPos[1], -self.aPos[2]],
[-self.aPos[0], self.aPos[1], -self.aPos[2]],
[-self.aPos[0], -self.aPos[1], -self.aPos[2]]])
back[:, 0] *= self.axPosModifier[0]
side[:, 1] *= self.axPosModifier[1]
bottom[:, 2] *= self.axPosModifier[2]
# Calculating regular grids in world coordinates
limits = np.array([-1, 1])[:, np.newaxis] * np.array(self.aPos)
allLimits = limits * self.maxLen / self.scaleVec - self.tVec\
+ self.coordOffset
axisGridArray = []
gridLabels = []
precisionLabels = []
if self.fineGridEnabled:
fineGridArray = []
for iAx in range(3):
m2 = self.aPos[iAx] / 0.9
dx1 = np.abs(allLimits[:, iAx][0] - allLimits[:, iAx][1]) / m2
order = np.floor(np.log10(dx1))
m1 = dx1 * 10**-order
if (m1 >= 1) and (m1 < 2):
step = 0.2 * 10**order
elif (m1 >= 2) and (m1 < 4):
step = 0.5 * 10**order
else:
step = 10**order
if step < 1:
decimalX = int(np.abs(order)) + 1 if m1 < 4 else\
int(np.abs(order))
else:
decimalX = 0
gridX = np.arange(np.int(allLimits[:, iAx][0]/step)*step,
allLimits[:, iAx][1], step)
gridX = gridX if gridX[0] >= allLimits[:, iAx][0] else\
gridX[1:]
gridLabels.extend([gridX])
precisionLabels.extend([np.ones_like(gridX)*decimalX])
axisGridArray.extend([gridX - self.coordOffset[iAx]])
if self.fineGridEnabled:
fineStep = step * 0.2
fineGrid = np.arange(
np.int(allLimits[:, iAx][0]/fineStep)*fineStep,
allLimits[:, iAx][1], fineStep)
fineGrid = fineGrid if\
fineGrid[0] >= allLimits[:, iAx][0] else fineGrid[1:]
fineGridArray.extend([fineGrid - self.coordOffset[iAx]])
axisL, axGrid = populateGrid(axisGridArray)
if self.fineGridEnabled:
tmp, fineAxGrid = populateGrid(fineGridArray)
if self.invertColors:
gl.glColor4f(0.0, 0.0, 0.0, 1.)
else:
gl.glColor4f(1.0, 1.0, 1.0, 1.)
for iAx in range(3):
if not (not self.perspectiveEnabled and
iAx == self.visibleAxes[2]):
tAlign = None
midp = int(len(axisL[iAx][0, :])/2)
if iAx == self.visibleAxes[1]: # Side plane,
if self.useScalableFont:
tAlign = getAlignment(axisL[iAx][:, midp],
self.visibleAxes[0])
else:
axisL[iAx][self.visibleAxes[2], :] *= 1.05 # depth
axisL[iAx][self.visibleAxes[0], :] *= 1.05 # side
if iAx == self.visibleAxes[0]: # Bottom plane, left-right
if self.useScalableFont:
tAlign = getAlignment(axisL[iAx][:, midp],
self.visibleAxes[2],
self.visibleAxes[1])
else:
axisL[iAx][self.visibleAxes[1], :] *= 1.05 # height
axisL[iAx][self.visibleAxes[2], :] *= 1.05 # side
if iAx == self.visibleAxes[2]: # Bottom plane, left-right
if self.useScalableFont:
tAlign = getAlignment(axisL[iAx][:, midp],
self.visibleAxes[0],
self.visibleAxes[1])
else:
axisL[iAx][self.visibleAxes[1], :] *= 1.05 # height
axisL[iAx][self.visibleAxes[0], :] *= 1.05 # side
for tick, tText, pcs in list(zip(axisL[iAx].T, gridLabels[iAx],
precisionLabels[iAx])):
valueStr = "{0:.{1}f}".format(tText, int(pcs))
self.drawText(tick, valueStr, alignment=tAlign)
# if not self.enableAA:
# gl.glDisable(gl.GL_LINE_SMOOTH)
gl.glEnable(gl.GL_LINE_SMOOTH)
gl.glHint(gl.GL_LINE_SMOOTH_HINT, gl.GL_NICEST)
gl.glHint(gl.GL_POINT_SMOOTH_HINT, gl.GL_NICEST)
tLineWidth = gl.glGetDoublev(gl.GL_LINE_WIDTH)
drawGridLines(np.vstack((back, side, bottom)),
self.coordinateGridLineWidth * 2, 0.75, gl.GL_QUADS)
drawGridLines(axGrid, self.coordinateGridLineWidth, 0.5, gl.GL_LINES)
if self.fineGridEnabled:
drawGridLines(fineAxGrid, self.coordinateGridLineWidth, 0.25,
gl.GL_LINES)
gl.glDisable(gl.GL_LINE_SMOOTH)
gl.glLineWidth(tLineWidth)
def drawArrays(self, tr, geom, vertices, colors, lineOpacity, lineWidth):
if vertices is None or colors is None:
return
if bool(tr):
vertexArray = gl.vbo.VBO(self.modelToWorld(vertices))
else:
vertexArray = gl.vbo.VBO(vertices)
vertexArray.bind()
gl.glVertexPointerf(vertexArray)
pureOpacity = np.copy(colors[:, 3])
colors[:, 3] = np.float32(pureOpacity * lineOpacity)
colorArray = gl.vbo.VBO(colors)
colorArray.bind()
gl.glColorPointerf(colorArray)
if geom == gl.GL_LINES:
gl.glLineWidth(lineWidth)
else:
gl.glPointSize(lineWidth)
gl.glDrawArrays(geom, 0, len(vertices))
colors[:, 3] = pureOpacity
colorArray.unbind()
vertexArray.unbind()
def plotSource(self, oe):
# gl.glEnable(gl.GL_MAP2_VERTEX_3)
# gl.glEnable(gl.GL_MAP2_NORMAL)
nPeriods = int(oe.Np) if hasattr(oe, 'Np') else 0.5
if hasattr(oe, 'L0'):
lPeriod = oe.L0
maghL = 0.25 * lPeriod * 0.5
else:
try:
lPeriod = (oe.Theta_max - oe.Theta_min) * oe.ro * 1000
except AttributeError:
if _DEBUG_:
raise
else:
lPeriod = 500.
maghL = lPeriod
maghH = 10 * 0.5
maghW = 10 * 0.5
surfRot = [[0, 0, 0, 1], [180, 0, 1, 0],
[-90, 0, 1, 0], [90, 0, 1, 0],
[-90, 1, 0, 0], [90, 1, 0, 0]]
surfTrans = np.array([[0, 0, maghH], [0, 0, -maghH],
[-maghW, 0, 0], [maghW, 0, 0],
[0, maghL, 0], [0, -maghL, 0]])
surfScales = np.array([[maghW*2, maghL*2, 0], [maghW*2, maghL*2, 0],
[0, maghL*2, maghH*2], [0, maghL*2, maghH*2],
[maghW*2, 0, maghH*2], [maghW*2, 0, maghH*2]])
# deltaX = 1. / 2. # float(self.tiles[0])
# deltaY = 1. / 2. # float(self.tiles[1])
magToggle = True
gl.glLineWidth(1)
gl.glPushMatrix()
gl.glTranslatef(*(self.modelToWorld(np.array(oe.center) -
self.coordOffset)))
gl.glRotatef(np.degrees(oe.pitch * self.scaleVec[2] /
self.scaleVec[1]), 1, 0, 0)
yaw = oe.yaw
try:
az = oe.bl.azimuth
except: # analysis:ignore
if _DEBUG_:
raise
else:
az = 0
gl.glRotatef(np.degrees((yaw-az) * self.scaleVec[0] /
self.scaleVec[1]), 0, 0, 1)
gl.glTranslatef(*(-1. * self.modelToWorld(np.array(oe.center) -
self.coordOffset)))
for period in range(int(nPeriods) if nPeriods > 0.5 else 1):
for hp in ([0, 0.5] if nPeriods > 0.5 else [0.25]):
pY = list(oe.center)[1] - lPeriod * (0.5 * nPeriods -
period - hp)
magToggle = not magToggle
for gap in [maghH*1.25, -maghH*1.25]:
cubeCenter = np.array([oe.center[0], pY, oe.center[2]+gap])
# self.setMaterial('magRed' if magToggle else 'magBlue')
magColor = [0.7, 0.1, 0.1, 1.] if magToggle \
else [0.1, 0.1, 0.7, 1.]
magToggle = not magToggle
for surf in range(6):
gl.glPushMatrix()
gl.glTranslatef(*(self.modelToWorld(
cubeCenter + surfTrans[surf] - self.coordOffset)))
gl.glScalef(*(self.modelToWorld(surfScales[surf] -
self.tVec)))
gl.glRotatef(*surfRot[surf])
gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_FILL)
gl.glBegin(gl.GL_QUADS)
gl.glColor4f(*magColor)
gl.glVertex3f(-0.5, -0.5, 0)
gl.glVertex3f(-0.5, 0.5, 0)
gl.glVertex3f(0.5, 0.5, 0)
gl.glVertex3f(0.5, -0.5, 0)
gl.glEnd()
gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_LINE)
gl.glBegin(gl.GL_QUADS)
gl.glColor4f(0, 0, 0, 1.)
gl.glVertex3f(-0.5, -0.5, 0)
gl.glVertex3f(-0.5, 0.5, 0)
gl.glVertex3f(0.5, 0.5, 0)
gl.glVertex3f(0.5, -0.5, 0)
gl.glEnd()
# for i in range(2):
# xGridOe = np.linspace(-0.5 + i*deltaX,
# -0.5 + (i+1)*deltaX,
# self.surfCPOrder)
# for k in range(2):
# yGridOe = np.linspace(-0.5 + k*deltaY,
# -0.5 + (k+1)*deltaY,
# self.surfCPOrder)
# xv, yv = np.meshgrid(xGridOe, yGridOe)
# xv = xv.flatten()
# yv = yv.flatten()
# zv = np.zeros_like(xv)
#
# surfCP = np.vstack((xv, yv, zv)).T
# surfNorm = np.vstack((np.zeros_like(xv),
# np.zeros_like(xv),
# np.ones_like(zv),
# np.ones_like(zv))).T
#
# gl.glMap2f(gl.GL_MAP2_VERTEX_3, 0, 1, 0, 1,
# surfCP.reshape(
# self.surfCPOrder,
# self.surfCPOrder, 3))
#
# gl.glMap2f(gl.GL_MAP2_NORMAL, 0, 1, 0, 1,
# surfNorm.reshape(
# self.surfCPOrder,
# self.surfCPOrder, 4))
#
# gl.glMapGrid2f(self.surfCPOrder, 0.0, 1.0,
# self.surfCPOrder, 0.0, 1.0)
#
# gl.glEvalMesh2(gl.GL_FILL, 0,
# self.surfCPOrder,
# 0, self.surfCPOrder)
gl.glPopMatrix()
gl.glPopMatrix()
# gl.glDisable(gl.GL_MAP2_VERTEX_3)
# gl.glDisable(gl.GL_MAP2_NORMAL)
def plotCurvedMesh(self, x, y, z, a, b, c, shift):
surfCP = np.vstack((x - self.coordOffset[0] - shift[0],
y - self.coordOffset[1] - shift[1],
z - self.coordOffset[2] - shift[2])).T
gl.glMap2f(gl.GL_MAP2_VERTEX_3, 0, 1, 0, 1,
self.modelToWorld(surfCP.reshape(
self.surfCPOrder,
self.surfCPOrder, 3)))
surfNorm = np.vstack((a, b, c,
np.ones_like(a))).T
gl.glMap2f(gl.GL_MAP2_NORMAL, 0, 1, 0, 1,
surfNorm.reshape(
self.surfCPOrder,
self.surfCPOrder, 4))
gl.glMapGrid2f(self.surfCPOrder, 0.0, 1.0,
self.surfCPOrder, 0.0, 1.0)
gl.glEvalMesh2(gl.GL_FILL, 0, self.surfCPOrder,
0, self.surfCPOrder)
def plotOeSurface(self, oe, is2ndXtal):
def getThickness(element):
thickness = 0
if isinstance(oe, roes.Plate):
if oe.t is not None:
return oe.t
if hasattr(oe, "material"):
if oe.material is not None:
thickness = 10.
if hasattr(oe.material, "t"):
thickness = oe.material.t if oe.material.t is not None\
else thickness
elif isinstance(oe.material, rmats.Multilayer):
if oe.material.substrate is not None:
if hasattr(oe.material.substrate, 't'):
if oe.material.substrate.t is not None:
thickness = oe.material.substrate.t
return thickness
thickness = getThickness(oe)
self.setMaterial('Si')
gl.glEnable(gl.GL_MAP2_VERTEX_3)
gl.glEnable(gl.GL_MAP2_NORMAL)
# Top and Bottom Surfaces
nsIndex = int(is2ndXtal)
if is2ndXtal:
xLimits = list(oe.limPhysX2)
# xLimits = list(oe.limOptX2) if\
# oe.limOptX2 is not None else oe.limPhysX2
if np.any(np.abs(xLimits) == raycing.maxHalfSizeOfOE):
if oe.footprint is not None:
xLimits = oe.footprint[nsIndex][:, 0]
yLimits = list(oe.limPhysY2)
# yLimits = list(oe.limOptY2) if\
# oe.limOptY2 is not None else oe.limPhysY2
if np.any(np.abs(yLimits) == raycing.maxHalfSizeOfOE):
if oe.footprint is not None:
yLimits = oe.footprint[nsIndex][:, 1]
else:
xLimits = list(oe.limPhysX)
# xLimits = list(oe.limOptX) if\
# oe.limOptX is not None else oe.limPhysX
if np.any(np.abs(xLimits) == raycing.maxHalfSizeOfOE):
if oe.footprint is not None:
xLimits = oe.footprint[nsIndex][:, 0]
yLimits = list(oe.limPhysY)
# yLimits = list(oe.limOptY) if\
# oe.limOptY is not None else oe.limPhysY
if np.any(np.abs(yLimits) == raycing.maxHalfSizeOfOE):
if oe.footprint is not None:
yLimits = oe.footprint[nsIndex][:, 1]
localTiles = np.array(self.tiles)
if oe.shape == 'round':
rX = np.abs((xLimits[1] - xLimits[0]))*0.5
rY = np.abs((yLimits[1] - yLimits[0]))*0.5
cX = (xLimits[1] + xLimits[0])*0.5
cY = (yLimits[1] + yLimits[0])*0.5
xLimits = [0, 1.]
yLimits = [0, 2*np.pi]
localTiles[1] *= 3
for i in range(localTiles[0]):
deltaX = (xLimits[1] - xLimits[0]) /\
float(localTiles[0])
xGridOe = np.linspace(xLimits[0] + i*deltaX,
xLimits[0] + (i+1)*deltaX,
self.surfCPOrder) + oe.dx
for k in range(localTiles[1]):
deltaY = (yLimits[1] - yLimits[0]) /\
float(localTiles[1])
yGridOe = np.linspace(yLimits[0] + k*deltaY,
yLimits[0] + (k+1)*deltaY,
self.surfCPOrder)
xv, yv = np.meshgrid(xGridOe, yGridOe)
if oe.shape == 'round':
xv, yv = rX*xv*np.cos(yv)+cX, rY*xv*np.sin(yv)+cY
xv = xv.flatten()
yv = yv.flatten()
if is2ndXtal:
zExt = '2'
else:
zExt = '1' if hasattr(oe, 'local_z1') else ''
local_z = getattr(oe, 'local_r{}'.format(zExt)) if\
oe.isParametric else getattr(oe, 'local_z{}'.format(zExt))
local_n = getattr(oe, 'local_n{}'.format(zExt))
xv = np.copy(xv)
yv = np.copy(yv)
zv = np.zeros_like(xv)
if oe.isParametric:
xv, yv, zv = oe.xyz_to_param(xv, yv, zv)
zv = local_z(xv, yv)
nv = local_n(xv, yv)
gbT = rsources.Beam(nrays=len(xv))
if oe.isParametric:
xv, yv, zv = oe.param_to_xyz(xv, yv, zv)
gbT.x = xv
gbT.y = yv
gbT.z = zv
gbT.a = nv[0] * np.ones_like(zv)
gbT.b = nv[1] * np.ones_like(zv)
gbT.c = nv[2] * np.ones_like(zv)
if thickness > 0:
gbB = rsources.Beam(copyFrom=gbT)
if isinstance(oe, roes.LauePlate):
gbB.z[:] = gbT.z - thickness
gbB.a = -gbT.a
gbB.b = -gbT.b
gbB.c = -gbT.c
else:
gbB.z[:] = -thickness
gbB.a[:] = 0
gbB.b[:] = 0
gbB.c[:] = -1.
oe.local_to_global(gbB, is2ndXtal=is2ndXtal)
oe.local_to_global(gbT, is2ndXtal=is2ndXtal)
if hasattr(oe, '_nCRL'):
cShift = oe.centerShift
nSurf = oe._nCRL
else:
cShift = np.zeros(3)
nSurf = 1
for iSurf in range(nSurf):
dC = cShift * iSurf
self.plotCurvedMesh(gbT.x, gbT.y, gbT.z,
gbT.a, gbT.b, gbT.c, dC)
if thickness > 0 and\
not isinstance(oe, roes.DoubleParaboloidLens):
self.plotCurvedMesh(gbB.x, gbB.y, gbB.z,
gbB.a, gbB.b, gbB.c, dC)
# Side faces
if isinstance(oe, roes.Plate):
self.setMaterial('semiSi')
if thickness > 0:
for ie, yPos in enumerate(yLimits):
for i in range(localTiles[0]):
if oe.shape == 'round':
continue
deltaX = (xLimits[1] - xLimits[0]) /\
float(localTiles[0])
xGridOe = np.linspace(xLimits[0] + i*deltaX,
xLimits[0] + (i+1)*deltaX,
self.surfCPOrder) + oe.dx
edgeX = xGridOe
edgeY = np.ones_like(xGridOe)*yPos
edgeZ = np.zeros_like(xGridOe)
if oe.isParametric:
edgeX, edgeY, edgeZ = oe.xyz_to_param(
edgeX, edgeY, edgeZ)
edgeZ = local_z(edgeX, edgeY)
if oe.isParametric:
edgeX, edgeY, edgeZ = oe.param_to_xyz(
edgeX, edgeY, edgeZ)
gridZ = None
for zTop in edgeZ:
gridZ = np.linspace(-thickness, zTop,
self.surfCPOrder) if\
gridZ is None else np.concatenate((
gridZ, np.linspace(-thickness, zTop,
self.surfCPOrder)))
gridX = np.repeat(edgeX, len(edgeZ))
gridY = np.ones_like(gridX) * yPos
xN = np.zeros_like(gridX)
yN = (1 if ie == 1 else -1)*np.ones_like(gridX)
zN = np.zeros_like(gridX)
faceBeam = rsources.Beam(nrays=len(gridX))
faceBeam.x = gridX
faceBeam.y = gridY
faceBeam.z = gridZ
faceBeam.a = xN
faceBeam.b = yN
faceBeam.c = zN
oe.local_to_global(faceBeam, is2ndXtal=is2ndXtal)
self.plotCurvedMesh(faceBeam.x, faceBeam.y, faceBeam.z,
faceBeam.a, faceBeam.b, faceBeam.c,
[0]*3)
for ie, xPos in enumerate(xLimits):
if ie == 0 and oe.shape == 'round':
continue
for i in range(localTiles[1]):
deltaY = (yLimits[1] - yLimits[0]) /\
float(localTiles[1])
yGridOe = np.linspace(yLimits[0] + i*deltaY,
yLimits[0] + (i+1)*deltaY,
self.surfCPOrder)
edgeY = yGridOe
edgeX = np.ones_like(yGridOe)*xPos
edgeZ = np.zeros_like(xGridOe)
if oe.shape == 'round':
edgeX, edgeY = rX*edgeX*np.cos(edgeY)+cX,\
rY*edgeX*np.sin(edgeY)+cY
if oe.isParametric:
edgeX, edgeY, edgeZ = oe.xyz_to_param(
edgeX, edgeY, edgeZ)
edgeZ = local_z(edgeX, edgeY)
if oe.isParametric:
edgeX, edgeY, edgeZ = oe.param_to_xyz(
edgeX, edgeY, edgeZ)
zN = 0
gridZ = None
for zTop in edgeZ:
gridZ = np.linspace(-thickness, zTop,
self.surfCPOrder) if\
gridZ is None else np.concatenate((
gridZ, np.linspace(-thickness, zTop,
self.surfCPOrder)))
gridY = np.repeat(edgeY, len(edgeZ))
if oe.shape == 'round':
yN = (gridY-cY) / rY
gridX = np.repeat(edgeX, len(edgeZ))
xN = (gridX-cX) / rX
else:
gridX = np.repeat(edgeX, len(edgeZ))
yN = np.zeros_like(gridX)
xN = (1 if ie == 1 else -1) * np.ones_like(gridX)
zN = np.zeros_like(gridX)
faceBeam = rsources.Beam(nrays=len(gridX))
faceBeam.x = gridX
faceBeam.y = gridY
faceBeam.z = gridZ
faceBeam.a = xN
faceBeam.b = yN
faceBeam.c = zN
oe.local_to_global(faceBeam, is2ndXtal=is2ndXtal)
self.plotCurvedMesh(faceBeam.x, faceBeam.y, faceBeam.z,
faceBeam.a, faceBeam.b, faceBeam.c,
[0]*3)
gl.glDisable(gl.GL_MAP2_VERTEX_3)
gl.glDisable(gl.GL_MAP2_NORMAL)
# Contour
# xBound = np.linspace(xLimits[0], xLimits[1],
# self.surfCPOrder*(localTiles[0]+1))
# yBound = np.linspace(yLimits[0], yLimits[1],
# self.surfCPOrder*(localTiles[1]+1))
# if oe.shape == 'round':
# oeContour = [0]
# oneEdge = [0]
# else:
# oeContour = [0]*4
# oneEdge = [0]*4
# oeContour[0] = np.array([xBound,
# yBound[0]*np.ones_like(xBound)]) # bottom
# oeContour[1] = np.array([xBound[-1]*np.ones_like(yBound),
# yBound]) # left
# oeContour[2] = np.array([np.flip(xBound, 0),
# yBound[-1]*np.ones_like(xBound)]) # top
# oeContour[3] = np.array([xBound[0]*np.ones_like(yBound),
# np.flip(yBound, 0)]) # right
#
# for ie, edge in enumerate(oeContour):
# if oe.shape == 'round':
# edgeX, edgeY = rX*np.cos(yBound)+cX, rY*np.sin(yBound)+cY
# else:
# edgeX = edge[0, :]
# edgeY = edge[1, :]
# edgeZ = np.zeros_like(edgeX)
#
# if oe.isParametric:
# edgeX, edgeY, edgeZ = oe.xyz_to_param(edgeX, edgeY,
# edgeZ)
#
# edgeZ = local_z(edgeX, edgeY)
# if oe.isParametric:
# edgeX, edgeY, edgeZ = oe.param_to_xyz(
# edgeX, edgeY, edgeZ)
# edgeBeam = rsources.Beam(nrays=len(edgeX))
# edgeBeam.x = edgeX
# edgeBeam.y = edgeY
# edgeBeam.z = edgeZ
#
# oe.local_to_global(edgeBeam, is2ndXtal=is2ndXtal)
# oneEdge[ie] = np.vstack((edgeBeam.x - self.coordOffset[0],
# edgeBeam.y - self.coordOffset[1],
# edgeBeam.z - self.coordOffset[2])).T
#
# self.oeContour[oe.name] = oneEdge
# def drawOeContour(self, oe):
# gl.glEnable(gl.GL_MAP1_VERTEX_3)
# gl.glLineWidth(self.contourWidth)
# gl.glColor4f(0.0, 0.0, 0.0, 1.0)
# cpo = self.surfCPOrder
# for ie in range(len(self.oeContour[oe.name])):
# edge = self.oeContour[oe.name][ie]
# nTiles = self.tiles[0] if ie in [0, 2] else self.tiles[1]
# nTiles = self.tiles[1]*3 if oe.shape == 'round' else nTiles
# for tile in range(nTiles+1):
# gl.glMap1f(gl.GL_MAP1_VERTEX_3, 0, 1,
# self.modelToWorld(edge[tile*cpo:(tile+1)*cpo+1, :]))
# gl.glMapGrid1f(cpo, 0.0, 1.0)
# gl.glEvalMesh1(gl.GL_LINE, 0, cpo)
#
# gl.glDisable(gl.GL_MAP1_VERTEX_3)
# def drawSlitEdges(self, oe):
# gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_LINE)
# gl.glLineWidth(self.contourWidth)
# gl.glColor4f(0.0, 0.0, 0.0, 1.0)
# gl.glBegin(gl.GL_QUADS)
# for edge in self.modelToWorld(np.array(self.slitEdges[oe.name]) -
# np.array(self.coordOffset)):
# gl.glVertex3f(*edge[0, :])
# gl.glVertex3f(*edge[1, :])
# gl.glVertex3f(*edge[3, :])
# gl.glVertex3f(*edge[2, :])
#
# gl.glVertex3f(*edge[0, :])
# gl.glVertex3f(*edge[1, :])
# gl.glVertex3f(*edge[5, :])
# gl.glVertex3f(*edge[4, :])
#
# gl.glVertex3f(*edge[5, :])
# gl.glVertex3f(*edge[1, :])
# gl.glVertex3f(*edge[3, :])
# gl.glVertex3f(*edge[7, :])
#
# gl.glVertex3f(*edge[4, :])
# gl.glVertex3f(*edge[5, :])
# gl.glVertex3f(*edge[7, :])
# gl.glVertex3f(*edge[6, :])
#
# gl.glVertex3f(*edge[0, :])
# gl.glVertex3f(*edge[4, :])
# gl.glVertex3f(*edge[6, :])
# gl.glVertex3f(*edge[2, :])
#
# gl.glVertex3f(*edge[2, :])
# gl.glVertex3f(*edge[3, :])
# gl.glVertex3f(*edge[7, :])
# gl.glVertex3f(*edge[6, :])
# gl.glEnd()
def plotAperture(self, oe):
surfCPOrder = self.surfCPOrder
gl.glEnable(gl.GL_MAP2_VERTEX_3)
gl.glEnable(gl.GL_MAP2_NORMAL)
plotVolume = False
# slitT = self.slitThickness
if oe.shape == 'round':
r = oe.r
isBeamStop = len(re.findall('Stop', str(type(oe)))) > 0
if isBeamStop:
limits = [[0, r, 0, 2*np.pi]]
else:
wf = max(r*0.25, 2.5)
limits = [[r, r+wf, 0, 2*np.pi]]
tiles = self.tiles[1] * 5
else:
try:
left, right, bottom, top = oe.spotLimits
except: # analysis:ignore
if _DEBUG_:
raise
else:
left, right, bottom, top = 0, 0, 0, 0
for akind, d in zip(oe.kind, oe.opening):
if akind.startswith('l'):
left = d
elif akind.startswith('r'):
right = d
elif akind.startswith('b'):
bottom = d
elif akind.startswith('t'):
top = d
w = right - left
h = top - bottom
wf = max(min(w, h)*0.5, 2.5)
limits = []
for akind, d in zip(oe.kind, oe.opening):
if akind.startswith('l'):
limits.append([left-wf, left, bottom-wf, top+wf])
elif akind.startswith('r'):
limits.append([right, right+wf, bottom-wf, top+wf])
elif akind.startswith('b'):
limits.append([left-wf, right+wf, bottom-wf, bottom])
elif akind.startswith('t'):
limits.append([left-wf, right+wf, top, top+wf])
tiles = self.tiles[1]
if not plotVolume:
for xMin, xMax, yMin, yMax in limits:
xGridOe = np.linspace(xMin, xMax, surfCPOrder)
deltaY = (yMax - yMin) / float(tiles)
for k in range(tiles):
yMinT = yMin + k*deltaY
yMaxT = yMinT + deltaY
yGridOe = np.linspace(yMinT, yMaxT, surfCPOrder)
xv, yv = np.meshgrid(xGridOe, yGridOe)
if oe.shape == 'round':
xv, yv = xv*np.cos(yv), xv*np.sin(yv)
xv = xv.flatten()
yv = yv.flatten()
gbT = rsources.Beam(nrays=len(xv))
gbT.x = xv
gbT.y = np.zeros_like(xv)
gbT.z = yv
gbT.a = np.zeros_like(xv)
gbT.b = np.ones_like(xv)
gbT.c = np.zeros_like(xv)
oe.local_to_global(gbT)
for surf in [1, -1]:
self.plotCurvedMesh(gbT.x, gbT.y, gbT.z,
gbT.a, gbT.b[:]*surf, gbT.c,
[0, 0, 0])
# else:
# self.slitEdges[oe.name] = []
# for iface, face in enumerate(limits):
# dT = slitT if iface < 2 else -slitT # Slit thickness
# # front
# xGridOe = np.linspace(face[0], face[1], surfCPOrder)
# zGridOe = np.linspace(face[2], face[3], surfCPOrder)
# yGridOe = np.linspace(0, -dT, surfCPOrder)
# xVert, yVert, zVert = np.meshgrid([face[0], face[1]],
# [0, -dT],
# [face[2], face[3]])
# bladeVertices = np.vstack((xVert.flatten(),
# yVert.flatten(),
# zVert.flatten())).T
# gbt = rsources.Beam(nrays=8)
# gbt.x = bladeVertices[:, 0]
# gbt.y = bladeVertices[:, 1]
# gbt.z = bladeVertices[:, 2]
# oe.local_to_global(gbt)
#
# self.slitEdges[oe.name].append(np.vstack((gbt.x, gbt.y,
# gbt.z)).T)
#
# xv, zv = np.meshgrid(xGridOe, zGridOe)
# xv = xv.flatten()
# zv = zv.flatten()
#
# gbT = rsources.Beam(nrays=len(xv))
# gbT.x = xv
# gbT.y = np.zeros_like(xv)
# gbT.z = zv
#
# gbT.a = np.zeros_like(xv)
# gbT.b = np.ones_like(xv)
# gbT.c = np.zeros_like(xv)
#
# oe.local_to_global(gbT)
#
# for ysurf in [0, dT]:
# nsurf = 1. if (dT > 0 and ysurf != 0) or\
# (ysurf == 0 and dT < 0) else -1.
# self.plotCurvedMesh(gbT.x, gbT.y, gbT.z,
# gbT.a, gbT.b[:]*nsurf, gbT.c,
# [0, ysurf, 0])
#
# # side
# zv, yv = np.meshgrid(zGridOe, yGridOe)
# zv = zv.flatten()
# yv = yv.flatten()
#
# gbT = rsources.Beam(nrays=len(yv))
# gbT.y = yv
# gbT.x = np.zeros_like(yv)
# gbT.z = zv
#
# gbT.a = np.ones_like(yv)
# gbT.b = np.zeros_like(yv)
# gbT.c = np.zeros_like(yv)
#
# oe.local_to_global(gbT)
#
# for isurf, xsurf in enumerate([face[0], face[1]]):
# nsurf = 1. if isurf == 0 else -1
# self.plotCurvedMesh(gbT.x, gbT.y, gbT.z,
# gbT.a[:]*nsurf, gbT.b, gbT.c,
# [xsurf, 0, 0])
#
# # top
# xv, yv = np.meshgrid(xGridOe, yGridOe)
# xv = xv.flatten()
# yv = yv.flatten()
#
# gbT = rsources.Beam(nrays=len(yv))
# gbT.x = xv
# gbT.y = yv
# gbT.z = np.zeros_like(xv)
#
# gbT.a = np.zeros_like(yv)
# gbT.b = np.zeros_like(yv)
# gbT.c = np.ones_like(yv)
#
# oe.local_to_global(gbT)
#
# for isurf, zsurf in enumerate([face[2], face[3]]):
# nsurf = 1. if isurf == 0 else -1
# self.plotCurvedMesh(gbT.x, gbT.y, gbT.z,
# gbT.a, gbT.b, gbT.c[:]*nsurf,
# [0, 0, zsurf])
gl.glDisable(gl.GL_MAP2_VERTEX_3)
gl.glDisable(gl.GL_MAP2_NORMAL)
def plotScreen(self, oe, dimensions=None, frameColor=None, plotFWHM=False):
scAbsZ = np.linalg.norm(oe.z * self.scaleVec)
scAbsX = np.linalg.norm(oe.x * self.scaleVec)
if dimensions is not None:
vScrHW = dimensions[0]
vScrHH = dimensions[1]
else:
vScrHW = self.vScreenSize
vScrHH = self.vScreenSize
dX = vScrHW * np.array(oe.x) * self.maxLen / scAbsX
dZ = vScrHH * np.array(oe.z) * self.maxLen / scAbsZ
vScreenBody = np.zeros((4, 3))
vScreenBody[0, :] = vScreenBody[1, :] = oe.center - dX
vScreenBody[2, :] = vScreenBody[3, :] = oe.center + dX
vScreenBody[0, :] -= dZ
vScreenBody[1, :] += dZ
vScreenBody[2, :] += dZ
vScreenBody[3, :] -= dZ
gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_FILL)
gl.glBegin(gl.GL_QUADS)
# if self.invertColors:
# gl.glColor4f(0.0, 0.0, 0.0, 0.2)
# else:
# gl.glColor4f(1.0, 1.0, 1.0, 0.2)
for i in range(4):
gl.glVertex3f(*self.modelToWorld(vScreenBody[i, :] -
self.coordOffset))
gl.glEnd()
if frameColor is not None:
self.virtScreen.frame = vScreenBody
gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_LINE)
gl.glLineWidth(2)
gl.glBegin(gl.GL_QUADS)
gl.glColor4f(*frameColor)
for i in range(4):
gl.glVertex3f(*self.modelToWorld(vScreenBody[i, :] -
self.coordOffset))
gl.glEnd()
if plotFWHM:
gl.glLineWidth(1)
gl.glDisable(gl.GL_LINE_SMOOTH)
if self.invertColors:
gl.glColor4f(0.0, 0.0, 0.0, 1.)
else:
gl.glColor4f(1.0, 1.0, 1.0, 1.)
startVec = np.array([0, 1, 0])
destVec = np.array(oe.y / self.scaleVec)
rotVec = np.cross(startVec, destVec)
rotAngle = np.degrees(np.arccos(
np.dot(startVec, destVec) /
np.linalg.norm(startVec) / np.linalg.norm(destVec)))
rotVecGL = np.float32(np.hstack((rotAngle, rotVec)))
pView = gl.glGetIntegerv(gl.GL_VIEWPORT)
pModel = gl.glGetDoublev(gl.GL_MODELVIEW_MATRIX)
pProjection = gl.glGetDoublev(gl.GL_PROJECTION_MATRIX)
scr = np.zeros((3, 3))
for iAx in range(3):
scr[iAx] = np.array(gl.gluProject(
*(self.modelToWorld(vScreenBody[iAx] - self.coordOffset)),
model=pModel, proj=pProjection, view=pView))
vFlip = 2. if scr[0, 1] > scr[1, 1] else 0.
hFlip = 2. if scr[1, 0] > scr[2, 0] else 0.
for iAx, text in enumerate(oe.FWHMstr):
fontScale = self.fontSize / 12500.
coord = self.modelToWorld(
(vScreenBody[iAx + 1] + vScreenBody[iAx + 2]) * 0.5 -
self.coordOffset)
coordShift = np.zeros(3, dtype=np.float32)
if iAx == 0: # Horizontal Label
coordShift[0] = (hFlip - 1.) * fontScale *\
len(text) * 104.76 * 0.5
coordShift[2] = fontScale * 200.
else: # Vertical Label
coordShift[0] = fontScale * 200.
coordShift[2] = (vFlip - 1.) * fontScale *\
len(text) * 104.76 * 0.5
gl.glPushMatrix()
gl.glTranslatef(*coord)
gl.glRotatef(*rotVecGL)
gl.glTranslatef(*coordShift)
gl.glRotatef(180.*(vFlip*0.5), 1, 0, 0)
gl.glRotatef(180.*(hFlip*0.5), 0, 0, 1)
if iAx > 0:
gl.glRotatef(-90, 0, 1, 0)
if iAx == 0: # Horizontal Label to half height
gl.glTranslatef(0, 0, -50. * fontScale)
else: # Vertical Label to half height
gl.glTranslatef(-50. * fontScale, 0, 0)
gl.glRotatef(90, 1, 0, 0)
gl.glScalef(fontScale, fontScale, fontScale)
for symbol in text:
gl.glutStrokeCharacter(
gl.GLUT_STROKE_MONO_ROMAN, ord(symbol))
gl.glPopMatrix()
gl.glEnable(gl.GL_LINE_SMOOTH)
def plotHemiScreen(self, oe, dimensions=None):
try:
rMajor = oe.R
except: # analysis:ignore
rMajor = 1000.
if dimensions is not None:
rMinor = dimensions
else:
rMinor = self.vScreenSize
if rMinor > rMajor:
rMinor = rMajor
yVec = np.array(oe.x)
sphereCenter = np.array(oe.center)
gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_FILL)
if self.invertColors:
gl.glColor4f(0.0, 0.0, 0.0, 0.2)
else:
gl.glColor4f(1.0, 1.0, 1.0, 0.2)
gl.glEnable(gl.GL_MAP2_VERTEX_3)
dAngle = np.arctan2(rMinor, rMajor)
xLimits = [-dAngle + yVec[0], dAngle + yVec[0]]
yLimits = [-dAngle + yVec[2], dAngle + yVec[2]]
for i in range(self.tiles[0]):
deltaX = (xLimits[1] - xLimits[0]) /\
float(self.tiles[0])
xGridOe = np.linspace(xLimits[0] + i*deltaX,
xLimits[0] + (i+1)*deltaX,
self.surfCPOrder)
for k in range(self.tiles[1]):
deltaY = (yLimits[1] - yLimits[0]) /\
float(self.tiles[1])
yGridOe = np.linspace(yLimits[0] + k*deltaY,
yLimits[0] + (k+1)*deltaY,
self.surfCPOrder)
xv, yv = np.meshgrid(xGridOe, yGridOe)
xv = xv.flatten()
yv = yv.flatten()
ibp = rsources.Beam(nrays=len(xv))
ibp.x[:] = sphereCenter[0]
ibp.y[:] = sphereCenter[1]
ibp.z[:] = sphereCenter[2]
ibp.b[:] = yVec[1]
ibp.a = xv
ibp.c = yv
ibp.state[:] = 1
gbp = oe.expose_global(beam=ibp)
surfCP = np.vstack((gbp.x - self.coordOffset[0],
gbp.y - self.coordOffset[1],
gbp.z - self.coordOffset[2])).T
gl.glMap2f(gl.GL_MAP2_VERTEX_3, 0, 1, 0, 1,
self.modelToWorld(surfCP.reshape(
self.surfCPOrder,
self.surfCPOrder, 3)))
gl.glMapGrid2f(self.surfCPOrder, 0.0, 1.0,
self.surfCPOrder, 0.0, 1.0)
gl.glEvalMesh2(gl.GL_FILL, 0, self.surfCPOrder,
0, self.surfCPOrder)
gl.glDisable(gl.GL_MAP2_VERTEX_3)
def addLighting(self, pos):
spot = 60
exp = 30
ambient = [0.2, 0.2, 0.2, 1]
diffuse = [0.5, 0.5, 0.5, 1]
specular = [1.0, 1.0, 1.0, 1]
gl.glEnable(gl.GL_LIGHTING)
# corners = [[-pos, pos, pos, 1], [-pos, -pos, -pos, 1],
# [-pos, pos, -pos, 1], [-pos, -pos, pos, 1],
# [pos, pos, -pos, 1], [pos, -pos, pos, 1],
# [pos, pos, pos, 1], [pos, -pos, -pos, 1]]
corners = [[0, 0, pos, 1], [0, pos, 0, 1],
[pos, 0, 0, 1], [-pos, 0, 0, 1],
[0, -pos, 0, 1], [0, 0, -pos, 1]]
gl.glLightModeli(gl.GL_LIGHT_MODEL_TWO_SIDE, 0)
for iLight in range(len(corners)):
light = gl.GL_LIGHT0 + iLight
gl.glEnable(light)
gl.glLightfv(light, gl.GL_POSITION, corners[iLight])
gl.glLightfv(light, gl.GL_SPOT_DIRECTION,
np.array(corners[len(corners)-iLight-1])/pos)
gl.glLightfv(light, gl.GL_SPOT_CUTOFF, spot)
gl.glLightfv(light, gl.GL_SPOT_EXPONENT, exp)
gl.glLightfv(light, gl.GL_AMBIENT, ambient)
gl.glLightfv(light, gl.GL_DIFFUSE, diffuse)
gl.glLightfv(light, gl.GL_SPECULAR, specular)
# gl.glBegin(gl.GL_LINES)
# glVertex4f(*corners[iLight])
# glVertex4f(*corners[len(corners)-iLight-1])
# gl.glEnd()
def toggleHelp(self):
self.showHelp = not self.showHelp
self.glDraw()
def drawHelp(self):
hHeight = 300
hWidth = 500
pView = gl.glGetIntegerv(gl.GL_VIEWPORT)
gl.glViewport(0, self.viewPortGL[3]-hHeight, hWidth, hHeight)
gl.glMatrixMode(gl.GL_PROJECTION)
gl.glLoadIdentity()
gl.glOrtho(-1, 1, -1, 1, -1, 1)
gl.glMatrixMode(gl.GL_MODELVIEW)
gl.glLoadIdentity()
gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_FILL)
gl.glBegin(gl.GL_QUADS)
if self.invertColors:
gl.glColor4f(1.0, 1.0, 1.0, 0.9)
else:
gl.glColor4f(0.0, 0.0, 0.0, 0.9)
backScreen = [[1, 1], [1, -1],
[-1, -1], [-1, 1]]
for corner in backScreen:
gl.glVertex3f(corner[0], corner[1], 0)
gl.glEnd()
if self.invertColors:
gl.glColor4f(0.0, 0.0, 0.0, 1.0)
else:
gl.glColor4f(1.0, 1.0, 1.0, 1.0)
gl.glLineWidth(3)
gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_LINE)
gl.glBegin(gl.GL_QUADS)
backScreen = [[1, 1], [1, -1],
[-1, -1], [-1, 1]]
for corner in backScreen:
gl.glVertex3f(corner[0], corner[1], 0)
gl.glEnd()
helpList = [
'F1: Open/Close this help window',
'F3: Add/Remove Virtual Screen',
'F4: Dock/Undock xrtGlow if launched from xrtQook',
'F5/F6: Quick Save/Load Scene']
if hasattr(self, 'generator'):
helpList += ['F7: Start recording movie']
helpList += [
'LeftMouse: Rotate the Scene',
'SHIFT+LeftMouse: Translate in perpendicular to the shortest view axis', # analysis:ignore
'ALT+LeftMouse: Translate in parallel to the shortest view axis', # analysis:ignore
'CTRL+LeftMouse: Drag Virtual Screen',
'ALT+WheelMouse: Scale Virtual Screen',
'CTRL+SHIFT+LeftMouse: Translate the Beamline around Virtual Screen', # analysis:ignore
' (with Beamline along the longest view axis)', # analysis:ignore
'CTRL+ALT+LeftMouse: Translate the Beamline around Virtual Screen', # analysis:ignore
' (with Beamline along the shortest view axis)', # analysis:ignore
'CTRL+T: Toggle Virtual Screen orientation (vertical/normal to the beam)', # analysis:ignore
'WheelMouse: Zoom the Beamline',
'CTRL+WheelMouse: Zoom the Scene']
for iLine, text in enumerate(helpList):
self.drawText([-1. + 0.05,
1. - 2. * (iLine + 1) / float(len(helpList)+1), 0],
text, True)
gl.glFlush()
gl.glViewport(*pView)
def drawCone(self, z, r, nFacets, color):
phi = np.linspace(0, 2*np.pi, nFacets)
xp = r * np.cos(phi)
yp = r * np.sin(phi)
base = np.vstack((xp, yp, np.zeros_like(xp)))
coneVertices = np.hstack((np.array([0, 0, z]).reshape(3, 1),
base)).T
gridColor = np.zeros((len(coneVertices), 4))
gridColor[:, color] = 1
gridColor[:, 3] = 0.75
gridArray = gl.vbo.VBO(np.float32(coneVertices))
gridArray.bind()
gl.glVertexPointerf(gridArray)
gridColorArray = gl.vbo.VBO(np.float32(gridColor))
gridColorArray.bind()
gl.glColorPointerf(gridColorArray)
gl.glDrawArrays(gl.GL_TRIANGLE_FAN, 0, len(gridArray))
gridArray.unbind()
gridColorArray.unbind()
def drawLocalAxes(self, oe, is2ndXtal):
def drawArrow(color, arrowArray, yText='hkl'):
gridColor = np.zeros((len(arrowArray) - 1, 4))
gridColor[:, 3] = 0.75
if color == 4:
gridColor[:, 0] = 1
gridColor[:, 1] = 1
elif color == 5:
gridColor[:, 0] = 1
gridColor[:, 1] = 0.5
else:
gridColor[:, color] = 1
gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_FILL)
gl.glEnableClientState(gl.GL_VERTEX_ARRAY)
gl.glEnableClientState(gl.GL_COLOR_ARRAY)
gridArray = gl.vbo.VBO(np.float32(arrowArray[1:, :]))
gridArray.bind()
gl.glVertexPointerf(gridArray)
gridColorArray = gl.vbo.VBO(np.float32(gridColor))
gridColorArray.bind()
gl.glColorPointerf(gridColorArray)
gl.glDrawArrays(gl.GL_TRIANGLE_FAN, 0, len(gridArray))
gridArray.unbind()
gridColorArray.unbind()
gl.glDisableClientState(gl.GL_VERTEX_ARRAY)
gl.glDisableClientState(gl.GL_COLOR_ARRAY)
gl.glEnable(gl.GL_LINE_SMOOTH)
gl.glHint(gl.GL_LINE_SMOOTH_HINT, gl.GL_NICEST)
gl.glBegin(gl.GL_LINES)
colorVec = [0, 0, 0, 0.75]
if color == 4:
colorVec[0] = 1
colorVec[1] = 1
elif color == 5:
colorVec[0] = 1
colorVec[1] = 0.5
else:
colorVec[color] = 1
gl.glColor4f(*colorVec)
gl.glVertex3f(*arrowArray[0, :])
gl.glVertex3f(*arrowArray[1, :])
gl.glEnd()
gl.glColor4f(*colorVec)
gl.glRasterPos3f(*arrowArray[1, :])
if color == 0:
axSymb = 'Z'
elif color == 1:
axSymb = 'Y'
elif color == 2:
axSymb = 'X'
elif color == 4:
axSymb = yText
else:
axSymb = ''
for symbol in " {}".format(axSymb):
gl.glutBitmapCharacter(self.fixedFont, ord(symbol))
gl.glDisable(gl.GL_LINE_SMOOTH)
z, r, nFacets = 0.25, 0.02, 20
phi = np.linspace(0, 2*np.pi, nFacets)
xp = np.insert(r * np.cos(phi), 0, [0., 0.])
yp = np.insert(r * np.sin(phi), 0, [0., 0.])
zp = np.insert(z*0.8*np.ones_like(phi), 0, [0., z])
crPlaneZ = None
yText = None
if hasattr(oe, 'local_n'):
material = None
if hasattr(oe, 'material'):
material = oe.material
if is2ndXtal:
zExt = '2'
if hasattr(oe, 'material2'):
material = oe.material2
else:
zExt = '1' if hasattr(oe, 'local_n1') else ''
if raycing.is_sequence(material):
material = material[oe.curSurface]
local_n = getattr(oe, 'local_n{}'.format(zExt))
normals = local_n(0, 0)
if len(normals) > 3:
crPlaneZ = np.array(normals[:3], dtype=np.float)
crPlaneZ /= np.linalg.norm(crPlaneZ)
if material not in [None, 'None']:
if hasattr(material, 'hkl'):
hklSeparator = ',' if np.any(np.array(
material.hkl) >= 10) else ''
yText = '[{0[0]}{1}{0[1]}{1}{0[2]}]'.format(
list(material.hkl), hklSeparator)
# yText = '{}'.format(list(material.hkl))
cb = rsources.Beam(nrays=nFacets+2)
cb.a[:] = cb.b[:] = cb.c[:] = 0.
cb.a[0] = cb.b[1] = cb.c[2] = 1.
if crPlaneZ is not None: # Adding asymmetric crystal orientation
asAlpha = np.arccos(crPlaneZ[2])
acpX = np.array([0., 0., 1.], dtype=np.float) if asAlpha == 0 else\
np.cross(np.array([0., 0., 1.], dtype=np.float), crPlaneZ)
acpX /= np.linalg.norm(acpX)
cb.a[3] = acpX[0]
cb.b[3] = acpX[1]
cb.c[3] = acpX[2]
cb.state[:] = 1
if isinstance(oe, (rscreens.HemisphericScreen, rscreens.Screen)):
cb.x[:] += oe.center[0]
cb.y[:] += oe.center[1]
cb.z[:] += oe.center[2]
oeNormX = oe.x
oeNormY = oe.y
else:
if is2ndXtal:
oe.local_to_global(cb, is2ndXtal=is2ndXtal)
else:
oe.local_to_global(cb)
oeNormX = np.array([cb.a[0], cb.b[0], cb.c[0]])
oeNormY = np.array([cb.a[1], cb.b[1], cb.c[1]])
scNormX = oeNormX * self.scaleVec
scNormY = oeNormY * self.scaleVec
scNormX /= np.linalg.norm(scNormX)
scNormY /= np.linalg.norm(scNormY)
scNormZ = np.cross(scNormX, scNormY)
scNormZ /= np.linalg.norm(scNormZ)
for iAx in range(3):
if iAx == 0:
xVec = scNormX
yVec = scNormY
zVec = scNormZ
elif iAx == 2:
xVec = scNormY
yVec = scNormZ
zVec = scNormX
else:
xVec = scNormZ
yVec = scNormX
zVec = scNormY
dX = xp[:, np.newaxis] * xVec
dY = yp[:, np.newaxis] * yVec
dZ = zp[:, np.newaxis] * zVec
coneCP = self.modelToWorld(np.vstack((
cb.x - self.coordOffset[0], cb.y - self.coordOffset[1],
cb.z - self.coordOffset[2])).T) + dX + dY + dZ
drawArrow(iAx, coneCP)
if crPlaneZ is not None: # drawAsymmetricPlane:
crPlaneX = np.array([cb.a[3], cb.b[3], cb.c[3]])
crPlaneNormX = crPlaneX * self.scaleVec
crPlaneNormX /= np.linalg.norm(crPlaneNormX)
crPlaneNormZ = self.rotateVecQ(
scNormZ, self.vecToQ(crPlaneNormX, asAlpha))
crPlaneNormZ /= np.linalg.norm(crPlaneNormZ)
crPlaneNormY = np.cross(crPlaneNormX, crPlaneNormZ)
crPlaneNormY /= np.linalg.norm(crPlaneNormY)
color = 4
dX = xp[:, np.newaxis] * crPlaneNormX
dY = yp[:, np.newaxis] * crPlaneNormY
dZ = zp[:, np.newaxis] * crPlaneNormZ
coneCP = self.modelToWorld(np.vstack((
cb.x - self.coordOffset[0], cb.y - self.coordOffset[1],
cb.z - self.coordOffset[2])).T) + dX + dY + dZ
drawArrow(color, coneCP, yText)
def drawDirectionAxes(self):
arrowSize = 0.05
axisLen = 0.1
tLen = (arrowSize + axisLen) * 2
gl.glLineWidth(1.)
pView = gl.glGetIntegerv(gl.GL_VIEWPORT)
gl.glViewport(0, 0, int(150*self.aspect), 150)
gl.glMatrixMode(gl.GL_PROJECTION)
gl.glLoadIdentity()
if self.perspectiveEnabled:
gl.gluPerspective(60, self.aspect, 0.001, 10)
else:
gl.glOrtho(-tLen*self.aspect, tLen*self.aspect, -tLen, tLen, -1, 1)
gl.glMatrixMode(gl.GL_MODELVIEW)
gl.glLoadIdentity()
gl.gluLookAt(.5, 0.0, 0.0,
0.0, 0.0, 0.0,
0.0, 0.0, 1.0)
gl.glEnable(gl.GL_LINE_SMOOTH)
gl.glHint(gl.GL_LINE_SMOOTH_HINT, gl.GL_NICEST)
gl.glHint(gl.GL_POLYGON_SMOOTH_HINT, gl.GL_NICEST)
gl.glHint(gl.GL_POINT_SMOOTH_HINT, gl.GL_NICEST)
gl.glEnableClientState(gl.GL_VERTEX_ARRAY)
gl.glEnableClientState(gl.GL_COLOR_ARRAY)
self.rotateZYX()
gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_FILL)
for iAx in range(3):
if not (not self.perspectiveEnabled and
2-iAx == self.visibleAxes[2]):
gl.glPushMatrix()
trVec = np.zeros(3, dtype=np.float32)
trVec[2-iAx] = axisLen
gl.glTranslatef(*trVec)
if iAx == 1:
gl.glRotatef(-90, 1.0, 0.0, 0.0)
elif iAx == 2:
gl.glRotatef(90, 0.0, 1.0, 0.0)
self.drawCone(arrowSize, 0.02, 20, iAx)
gl.glPopMatrix()
gl.glDisableClientState(gl.GL_VERTEX_ARRAY)
gl.glDisableClientState(gl.GL_COLOR_ARRAY)
gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_LINE)
gl.glBegin(gl.GL_LINES)
for iAx in range(3):
if not (not self.perspectiveEnabled and
2-iAx == self.visibleAxes[2]):
colorVec = [0, 0, 0, 0.75]
colorVec[iAx] = 1
gl.glColor4f(*colorVec)
gl.glVertex3f(0, 0, 0)
trVec = np.zeros(3, dtype=np.float32)
trVec[2-iAx] = axisLen
gl.glVertex3f(*trVec)
gl.glColor4f(*colorVec)
gl.glEnd()
if not (not self.perspectiveEnabled and self.visibleAxes[2] == 2):
gl.glColor4f(1, 0, 0, 1)
gl.glRasterPos3f(0, 0, axisLen*1.5)
for symbol in " {} (mm)".format('Z'):
gl.glutBitmapCharacter(self.fixedFont, ord(symbol))
if not (not self.perspectiveEnabled and self.visibleAxes[2] == 1):
gl.glColor4f(0, 0.75, 0, 1)
gl.glRasterPos3f(0, axisLen*1.5, 0)
for symbol in " {} (mm)".format('Y'):
gl.glutBitmapCharacter(self.fixedFont, ord(symbol))
if not (not self.perspectiveEnabled and self.visibleAxes[2] == 0):
gl.glColor4f(0, 0.5, 1, 1)
gl.glRasterPos3f(axisLen*1.5, 0, 0)
for symbol in " {} (mm)".format('X'):
gl.glutBitmapCharacter(self.fixedFont, ord(symbol))
# gl.glFlush()
gl.glViewport(*pView)
gl.glColor4f(1, 1, 1, 1)
gl.glDisable(gl.GL_LINE_SMOOTH)
def initializeGL(self):
gl.glutInit()
gl.glutInitDisplayMode(gl.GLUT_RGBA | gl.GLUT_DOUBLE | gl.GLUT_DEPTH)
gl.glViewport(*self.viewPortGL)
def resizeGL(self, widthInPixels, heightInPixels):
self.viewPortGL = [0, 0, widthInPixels, heightInPixels]
gl.glViewport(*self.viewPortGL)
self.aspect = np.float32(widthInPixels)/np.float32(heightInPixels)
def populateVScreen(self):
if any([prop is None for prop in [self.virtBeam,
self.selColorMax,
self.selColorMin]]):
return
startBeam = self.virtBeam
try:
vColorArray = self.getColor(startBeam)
except AttributeError:
if _DEBUG_:
raise
else:
return
good = (startBeam.state == 1) | (startBeam.state == 2)
intensity = startBeam.Jss + startBeam.Jpp
intensityAll = intensity / np.max(intensity[good])
good = np.logical_and(good, intensityAll >= self.cutoffI)
goodC = np.logical_and(
vColorArray <= self.selColorMax,
vColorArray >= self.selColorMin)
good = np.logical_and(good, goodC)
if len(vColorArray[good]) == 0:
return
self.globalColorIndex = good if self.vScreenForColors else None
if self.globalNorm:
alphaMax = 1.
else:
if len(intensity[good]) > 0:
alphaMax = np.max(intensity[good])
else:
alphaMax = 1.
alphaMax = alphaMax if alphaMax != 0 else 1.
# alphaDots = intensity[good].T / alphaMax
# colorsDots = np.array(vColorArray[good]).T
alphaDots = intensity.T / alphaMax
colorsDots = np.array(vColorArray).T
if self.colorMin == self.colorMax:
if self.colorMax == 0: # and self.colorMin == 0 too
self.colorMin, self.colorMax = -0.1, 0.1
else:
self.colorMin = self.colorMax * 0.99
self.colorMax *= 1.01
colorsDots = colorFactor * (colorsDots-self.colorMin) /\
(self.colorMax-self.colorMin)
depthDots = copy.deepcopy(colorsDots[good]) * self.depthScaler
colorsDots = np.dstack((colorsDots,
np.ones_like(alphaDots)*colorSaturation,
alphaDots if self.iHSV else
np.ones_like(alphaDots)))
deltaY = self.virtScreen.y * depthDots[:, np.newaxis]
vertices = np.array(
startBeam.x[good] - deltaY[:, 0] - self.coordOffset[0])
vertices = np.vstack((vertices, np.array(
startBeam.y[good] - deltaY[:, 1] - self.coordOffset[1])))
vertices = np.vstack((vertices, np.array(
startBeam.z[good] - deltaY[:, 2] - self.coordOffset[2])))
self.virtDotsArray = vertices.T
colorsRGBDots = np.squeeze(mpl.colors.hsv_to_rgb(colorsDots))
if self.globalNorm and len(alphaDots[good]) > 0:
alphaMax = np.max(alphaDots[good])
else:
alphaMax = 1.
alphaColorDots = np.array([alphaDots / alphaMax]).T
if self.vScreenForColors:
self.globalColorArray = np.float32(np.hstack([colorsRGBDots,
alphaColorDots]))
self.virtDotsColor = np.float32(np.hstack([colorsRGBDots[good],
alphaColorDots[good]]))
histogram = np.histogram(np.array(
vColorArray[good]),
range=(self.colorMin, self.colorMax),
weights=intensity[good],
bins=100)
self.histogramUpdated.emit(histogram)
locBeam = self.virtScreen.expose(self.virtScreen.beamToExpose)
lbi = intensity[good]
self.virtScreen.FWHMstr = []
for axis in ['x', 'z']:
goodlb = getattr(locBeam, axis)[good]
histAxis = np.histogram(goodlb, weights=lbi, bins=100)
hMax = np.max(histAxis[0])
hNorm = histAxis[0] / hMax
topEl = np.where(hNorm >= 0.5)[0]
fwhm = np.abs(histAxis[1][topEl[0]] - histAxis[1][topEl[-1]])
order = np.floor(np.log10(fwhm)) if fwhm > 0 else -10
if order >= 2:
units = "m"
mplier = 1e-3
elif order >= -1:
units = "mm"
mplier = 1.
elif order >= -4:
units = "um"
mplier = 1e3
else: # order >= -7:
units = "nm"
mplier = 1e6
self.virtScreen.FWHMstr.append(
"FWHM({0}) = {1:.3f}{2}".format(
str(axis).upper(), fwhm*mplier, units))
def createVScreen(self):
try:
self.virtScreen = rscreens.Screen(
bl=list(self.oesList.values())[0][0].bl)
self.virtScreen.center = self.worldToModel(np.array([0, 0, 0])) +\
self.coordOffset
self.positionVScreen()
if self.vScreenForColors:
self.populateVerticesOnly(self.segmentModel)
self.glDraw()
except: # analysis:ignore
if _DEBUG_:
raise
else:
self.clearVScreen()
def positionVScreen(self):
if self.virtScreen is None:
return
cntr = self.virtScreen.center
tmpDist = 1e12
totalDist = 1e12
cProj = None
for segment in self.arrayOfRays[0]:
if segment[3] is None:
continue
try:
beamStartTmp = self.beamsDict[segment[1]]
beamEndTmp = self.beamsDict[segment[3]]
bStart0 = beamStartTmp.wCenter
bEnd0 = beamEndTmp.wCenter
beam0 = bEnd0 - bStart0
# Finding the projection of the VScreen.center on segments
cProjTmp = bStart0 + np.dot(cntr-bStart0, beam0) /\
np.dot(beam0, beam0) * beam0
s = 0
for iDim in range(3):
s += np.floor(np.abs(np.sign(cProjTmp[iDim] -
bStart0[iDim]) +
np.sign(cProjTmp[iDim] -
bEnd0[iDim]))*0.6)
dist = np.linalg.norm(cProjTmp-cntr)
if dist < tmpDist:
if s == 0:
tmpDist = dist
beamStart0 = beamStartTmp
bStartC = bStart0
bEndC = bEnd0
cProj = cProjTmp
else:
if np.linalg.norm(bStart0-cntr) < totalDist:
totalDist = np.linalg.norm(bStart0-cntr)
self.virtScreen.center = cProjTmp
self.virtScreen.beamStart = bStart0
self.virtScreen.beamEnd = bEnd0
self.virtScreen.beamToExpose = beamStartTmp
except: # analysis:ignore
if _DEBUG_:
raise
else:
continue
if cProj is not None:
self.virtScreen.center = cProj
self.virtScreen.beamStart = bStartC
self.virtScreen.beamEnd = bEndC
self.virtScreen.beamToExpose = beamStart0
if self.isVirtScreenNormal:
vsX = [self.virtScreen.beamToExpose.b[0],
-self.virtScreen.beamToExpose.a[0], 0]
vsY = [self.virtScreen.beamToExpose.a[0],
self.virtScreen.beamToExpose.b[0],
self.virtScreen.beamToExpose.c[0]]
vsZ = np.cross(vsX/np.linalg.norm(vsX),
vsY/np.linalg.norm(vsY))
else:
vsX = 'auto'
vsZ = 'auto'
self.virtScreen.set_orientation(vsX, vsZ)
try:
self.virtBeam = self.virtScreen.expose_global(
self.virtScreen.beamToExpose)
self.populateVScreen()
except: # analysis:ignore
self.clearVScreen()
def toggleVScreen(self):
if self.virtScreen is None:
self.createVScreen()
else:
self.clearVScreen()
def clearVScreen(self):
self.virtScreen = None
self.virtBeam = None
self.virtDotsArray = None
self.virtDotsColor = None
if self.globalColorIndex is not None:
self.globalColorIndex = None
self.populateVerticesOnly(self.segmentModel)
self.histogramUpdated.emit((None, None))
self.glDraw()
def switchVScreenTilt(self):
self.isVirtScreenNormal = not self.isVirtScreenNormal
self.positionVScreen()
self.glDraw()
def mouseMoveEvent(self, mEvent):
pView = gl.glGetIntegerv(gl.GL_VIEWPORT)
mouseX = mEvent.x()
mouseY = pView[3] - mEvent.y()
ctrlOn = bool(int(mEvent.modifiers()) & int(qt.ControlModifier))
altOn = bool(int(mEvent.modifiers()) & int(qt.AltModifier))
shiftOn = bool(int(mEvent.modifiers()) & int(qt.ShiftModifier))
if mEvent.buttons() == qt.LeftButton:
gl.glMatrixMode(gl.GL_MODELVIEW)
gl.glLoadIdentity()
gl.gluLookAt(self.cameraPos[0], self.cameraPos[1],
self.cameraPos[2],
self.cameraTarget[0], self.cameraTarget[1],
self.cameraTarget[2],
0.0, 0.0, 1.0)
self.rotateZYX()
pModel = gl.glGetDoublev(gl.GL_MODELVIEW_MATRIX)
gl.glMatrixMode(gl.GL_PROJECTION)
gl.glLoadIdentity()
if self.perspectiveEnabled:
gl.gluPerspective(self.cameraAngle, self.aspect, 0.01, 100)
else:
orthoView = self.cameraPos[0]*0.45
gl.glOrtho(-orthoView*self.aspect, orthoView*self.aspect,
-orthoView, orthoView, -100, 100)
pProjection = gl.glGetDoublev(gl.GL_PROJECTION_MATRIX)
if mEvent.modifiers() == qt.NoModifier:
self.rotations[2][0] += np.float32(
self.signs[2][1] *
(mouseX - self.prevMPos[0]) * 36. / 90.)
self.rotations[1][0] -= np.float32(
(mouseY - self.prevMPos[1]) * 36. / 90.)
for ax in range(2):
if self.rotations[self.visibleAxes[ax+1]][0] > 180:
self.rotations[self.visibleAxes[ax+1]][0] -= 360
if self.rotations[self.visibleAxes[ax+1]][0] < -180:
self.rotations[self.visibleAxes[ax+1]][0] += 360
self.updateQuats()
self.rotationUpdated.emit(self.rotations)
elif shiftOn:
for iDim in range(2):
mStart = np.zeros(3)
mEnd = np.zeros(3)
mEnd[self.visibleAxes[iDim]] = 1.
# mEnd = -1 * mStart
pStart = np.array(gl.gluProject(
*mStart, model=pModel, proj=pProjection,
view=pView)[:-1])
pEnd = np.array(gl.gluProject(
*mEnd, model=pModel, proj=pProjection,
view=pView)[:-1])
pScr = np.array([mouseX, mouseY])
prevPScr = np.array(self.prevMPos)
bDir = pEnd - pStart
pProj = pStart + np.dot(pScr - pStart, bDir) /\
np.dot(bDir, bDir) * bDir
pPrevProj = pStart + np.dot(prevPScr - pStart, bDir) /\
np.dot(bDir, bDir) * bDir
self.tVec[self.visibleAxes[iDim]] += np.dot(
pProj - pPrevProj, bDir) / np.dot(bDir, bDir) *\
self.maxLen / self.scaleVec[self.visibleAxes[iDim]]
if ctrlOn and self.virtScreen is not None:
self.virtScreen.center[self.visibleAxes[iDim]] -=\
np.dot(
pProj - pPrevProj, bDir) / np.dot(bDir, bDir) *\
self.maxLen / self.scaleVec[self.visibleAxes[iDim]]
if ctrlOn and self.virtScreen is not None:
v0 = self.virtScreen.center
self.positionVScreen()
self.tVec -= self.virtScreen.center - v0
elif altOn:
mStart = np.zeros(3)
mEnd = np.zeros(3)
mEnd[self.visibleAxes[2]] = 1.
# mEnd = -1 * mStart
pStart = np.array(gl.gluProject(
*mStart, model=pModel, proj=pProjection,
view=pView)[:-1])
pEnd = np.array(gl.gluProject(
*mEnd, model=pModel, proj=pProjection,
view=pView)[:-1])
pScr = np.array([mouseX, mouseY])
prevPScr = np.array(self.prevMPos)
bDir = pEnd - pStart
pProj = pStart + np.dot(pScr - pStart, bDir) /\
np.dot(bDir, bDir) * bDir
pPrevProj = pStart + np.dot(prevPScr - pStart, bDir) /\
np.dot(bDir, bDir) * bDir
self.tVec[self.visibleAxes[2]] += np.dot(
pProj - pPrevProj, bDir) / np.dot(bDir, bDir) *\
self.maxLen / self.scaleVec[self.visibleAxes[2]]
if ctrlOn and self.virtScreen is not None:
self.virtScreen.center[self.visibleAxes[2]] -=\
np.dot(pProj - pPrevProj, bDir) / np.dot(bDir, bDir) *\
self.maxLen / self.scaleVec[self.visibleAxes[2]]
v0 = self.virtScreen.center
self.positionVScreen()
self.tVec -= self.virtScreen.center - v0
elif ctrlOn:
if self.virtScreen is not None:
worldPStart = self.modelToWorld(
self.virtScreen.beamStart - self.coordOffset)
worldPEnd = self.modelToWorld(
self.virtScreen.beamEnd - self.coordOffset)
worldBDir = worldPEnd - worldPStart
normPEnd = worldPStart + np.dot(
np.ones(3) - worldPStart, worldBDir) /\
np.dot(worldBDir, worldBDir) * worldBDir
normPStart = worldPStart + np.dot(
-1. * np.ones(3) - worldPStart, worldBDir) /\
np.dot(worldBDir, worldBDir) * worldBDir
normBDir = normPEnd - normPStart
normScale = np.sqrt(np.dot(normBDir, normBDir) /
np.dot(worldBDir, worldBDir))
if np.dot(normBDir, worldBDir) < 0:
normPStart, normPEnd = normPEnd, normPStart
pStart = np.array(gl.gluProject(
*normPStart, model=pModel, proj=pProjection,
view=pView)[:-1])
pEnd = np.array(gl.gluProject(
*normPEnd, model=pModel, proj=pProjection,
view=pView)[:-1])
pScr =
|
np.array([mouseX, mouseY])
|
numpy.array
|
# Modelling input and assumptions
# Copyright (c) 2019, 2020 <NAME>, The Australian National University
# Licensed under the MIT Licence
# Correspondence: <EMAIL>
import numpy as np
from Optimisation import scenario
Nodel = np.array(['FNQ', 'NSW', 'NT', 'QLD', 'SA', 'TAS', 'VIC', 'WA'])
PVl = np.array(['NSW']*7 + ['FNQ']*1 + ['QLD']*2 + ['FNQ']*3 + ['SA']*6 + ['TAS']*0 + ['VIC']*1 + ['WA']*1 + ['NT']*1)
Windl = np.array(['NSW']*8 + ['FNQ']*1 + ['QLD']*2 + ['FNQ']*2 + ['SA']*8 + ['TAS']*4 + ['VIC']*4 + ['WA']*3 + ['NT']*1)
resolution = 0.5
MLoad = np.genfromtxt('Data/electricity.csv', delimiter=',', skip_header=1, usecols=range(4, 4+len(Nodel))) # EOLoad(t, j), MW
for i in ['evan', 'erigid', 'earticulated', 'enonfreight', 'ebus', 'emotorcycle', 'erail', 'eair', 'ewater', 'ecooking', 'emanufacturing', 'emining']:
MLoad += np.genfromtxt('Data/{}.csv'.format(i), delimiter=',', skip_header=1, usecols=range(4, 4+len(Nodel)))
DSP = 0.8 if scenario>=31 else 0
MLoad += (1 - DSP) * np.genfromtxt('Data/ecar.csv', delimiter=',', skip_header=1, usecols=range(4, 4+len(Nodel)))
MLoadD = DSP * np.genfromtxt('Data/ecar.csv', delimiter=',', skip_header=1, usecols=range(4, 4+len(Nodel)))
TSPV = np.genfromtxt('Data/pv.csv', delimiter=',', skip_header=1, usecols=range(4, 4+len(PVl))) # TSPV(t, i), MW
TSWind = np.genfromtxt('Data/wind.csv', delimiter=',', skip_header=1, usecols=range(4, 4+len(Windl))) # TSWind(t, i), MW
assets = np.genfromtxt('Data/hydrobio.csv', dtype=None, delimiter=',', encoding=None)[1:, 1:].astype(np.float)
CHydro, CBio = [assets[:, x] * pow(10, -3) for x in range(assets.shape[1])] # CHydro(j), MW to GW
CBaseload = np.array([0, 0, 0, 0, 0, 1.0, 0, 0]) # 24/7, GW
CPeak = CHydro + CBio - CBaseload # GW
cars = np.genfromtxt('Data/cars.csv', dtype=None, delimiter=',', encoding=None)[1:, 1:].astype(np.float)
CDP = DSP * cars[:, 0] * 9.6 * pow(10, -6) # kW to GW
CDS = DSP * cars[:, 0] * 77 * 0.75 * pow(10, -6) # kWh to GWh
# FQ, NQ, NS, NV, AS, SW, only TV constrained
CDC6max = 3 * 0.63 # GW
DCloss = np.array([1500, 1000, 1000, 800, 1200, 2400, 400]) * 0.03 * pow(10, -3)
efficiency = 0.8
efficiencyD = 0.8
factor = np.genfromtxt('Data/factor.csv', delimiter=',', usecols=1)
firstyear, finalyear, timestep = (2020, 2029, 1)
if scenario<=17:
node = Nodel[scenario % 10]
MLoad, MLoadD = [x[:, np.where(Nodel==node)[0]] for x in (MLoad, MLoadD)]
TSPV = TSPV[:, np.where(PVl==node)[0]]
TSWind = TSWind[:, np.where(Windl==node)[0]]
CHydro, CBio, CBaseload, CPeak, CDP, CDS = [x[np.where(Nodel==node)[0]] for x in (CHydro, CBio, CBaseload, CPeak, CDP, CDS)]
if node=='QLD':
MLoad, MLoadD, CDP, CDS = [x / 0.9 for x in (MLoad, MLoadD, CDP, CDS)]
Nodel, PVl, Windl = [x[np.where(x==node)[0]] for x in (Nodel, PVl, Windl)]
if scenario>=21:
coverage = [np.array(['NSW', 'QLD', 'SA', 'TAS', 'VIC']),
np.array(['NSW', 'QLD', 'SA', 'TAS', 'VIC', 'WA']),
np.array(['NSW', 'NT', 'QLD', 'SA', 'TAS', 'VIC']),
np.array(['NSW', 'NT', 'QLD', 'SA', 'TAS', 'VIC', 'WA']),
np.array(['FNQ', 'NSW', 'QLD', 'SA', 'TAS', 'VIC']),
np.array(['FNQ', 'NSW', 'QLD', 'SA', 'TAS', 'VIC', 'WA']),
np.array(['FNQ', 'NSW', 'NT', 'QLD', 'SA', 'TAS', 'VIC']),
np.array(['FNQ', 'NSW', 'NT', 'QLD', 'SA', 'TAS', 'VIC', 'WA'])][scenario % 10 - 1]
MLoad, MLoadD = [x[:, np.where(np.in1d(Nodel, coverage)==True)[0]] for x in (MLoad, MLoadD)]
TSPV = TSPV[:, np.where(np.in1d(PVl, coverage)==True)[0]]
TSWind = TSWind[:, np.where(np.in1d(Windl, coverage)==True)[0]]
CHydro, CBio, CBaseload, CPeak, CDP, CDS = [x[np.where(np.in1d(Nodel, coverage)==True)[0]] for x in (CHydro, CBio, CBaseload, CPeak, CDP, CDS)]
if 'FNQ' not in coverage:
MLoad[:, np.where(coverage=='QLD')[0][0]] /= 0.9
MLoadD[:, np.where(coverage=='QLD')[0][0]] /= 0.9
CDP[np.where(coverage == 'QLD')[0]] /= 0.9
CDS[np.where(coverage == 'QLD')[0]] /= 0.9
Nodel, PVl, Windl = [x[np.where(np.in1d(x, coverage)==True)[0]] for x in (Nodel, PVl, Windl)]
intervals, nodes = MLoad.shape
years = int(resolution * intervals / 8760)
pzones, wzones = (TSPV.shape[1], TSWind.shape[1])
pidx, widx, sidx = (pzones, pzones + wzones, pzones + wzones + nodes)
energy = (MLoad + MLoadD).sum() * pow(10, -9) * resolution / years # PWh p.a.
contingency = list(0.25 * (MLoad + MLoadD).max(axis=0) * pow(10, -3)) # MW to GW
GBaseload = np.tile(CBaseload, (intervals, 1)) * pow(10, 3) # GW to MW
class Solution:
"""A candidate solution of decision variables CPV(i), CWind(i), CPHP(j), S-CPHS(j)"""
def __init__(self, x):
self.x = x
self.MLoad, self.MLoadD = (MLoad, MLoadD)
self.intervals, self.nodes = (intervals, nodes)
self.resolution = resolution
self.CPV = list(x[: pidx]) # CPV(i), GW
self.CWind = list(x[pidx: widx]) # CWind(i), GW
self.GPV = TSPV *
|
np.tile(self.CPV, (intervals, 1))
|
numpy.tile
|
import visualization.panda.world as wd
import robot_sim.end_effectors.gripper.robotiq85.robotiq85 as rtq85
import robot_sim.robots.ur3_dual.ur3_dual as ur3ds
import basis.robot_math as rm
import numpy as np
import modeling.collision_model as cm
import modeling.geometric_model as gm
import motion.probabilistic.rrt_connect as rrtc
import copy
import open3d as o3d
import random
from skimage.measure import LineModelND, ransac
import research_posemake_many as pose
import math
import socket
import robot_con.ur.program_builder as pb
import pickle
import time
import sympy as sp
from scipy.optimize import basinhopping
import motion.optimization_based.incremental_nik as inik
rotatedegree = 5
endthreshold = 3
objpointrange = [300, 900, -400, 800, 1051, 1500]
objpos_finalmax_lft = np.array([250, 250, 1600])
objpos_finalmax_rgt = np.array([250, -250, 1600])
## ToDo : Change the param according to the object
## param(acrylic board) ----------------------------------
objpath = "./research_flippingboard2_mm.stl"
l = 300
w = 300
h = 40
M = 4.0
g = 9.8
myu0 = 0.5
myu1 = 0.4
vmax = 30
anglemax = 20
timestep = 1.0
thetathreshold = 50
# limitdegree = 70
limitdegree = 90 + math.degrees(math.atan(h / l)) + 10
print(limitdegree)
objpos_start = np.array([.381, .250, 1.1], dtype=float)
pushpose_pre = np.array([15.46510215, -124.31216495, -22.21501633, -68.25934326, 108.02513127, 39.89826658])
pushrot = np.array([[0.02974146, -0.74159545, 0.67018776],
[0.06115005, -0.66787857, -0.74175392],
[0.99768538, 0.06304286, 0.02548492]])
## ---------------------------------------------------------
## param(stainless box) ----------------------------------------
# objpath = "./objects/TCbox.stl"
# l = 300
# w = 400
# h = 150
# M = 6.0
# g = 9.8
# myu0 = 0.4
# myu1 = 0.1
# vmax = 30
# anglemax = 20
# timestep = 1.0
# thetathreshold = 49
# # limitdegree = 125
# limitdegree = 90 + math.degrees(math.atan(h / l)) + 10
# print(limitdegree)
#
# objpos_start = np.array([381, 250, 1035], dtype=float)
# pushpose_pre = np.array([15.46510215, -124.31216495, -22.21501633, -68.25934326, 108.02513127, 39.89826658])
# pushrot = np.array([[ 0.02974146, -0.74159545, 0.67018776],
# [ 0.06115005, -0.66787857, -0.74175392],
# [ 0.99768538, 0.06304286, 0.02548492]])
## -------------------------------------------------------------
## param(plywood board) ----------------------------------------
# objpath = "./objects/400×500×44.stl"
# l = 500
# w = 400
# h = 44
# M = 6.4
# g = 9.8
# myu0 = 0.6
# myu1 = 0.3
# vmax = 45
# anglemax = 20
# timestep = 1.0
# thetathreshold = 57
# # limitdegree = 100
# limitdegree = 90 + math.degrees(math.atan(h / l)) + 10
# print(limitdegree)
#
# objpos_start = np.array([240, 140-30, 1035], dtype=float)
# pushpose_pre = np.array([12.840271549966547, -92.64224433679576, -39.088370300126584, 112.36556622471164, -92.64626048802772, 35.67784488430386])
# pushrot = np.array([[ 0.02437668, 0.74389354, 0.66785341],
# [-0.16925718, 0.66147852, -0.73061493],
# [-0.98527041, -0.09522902, 0.14203398]])
## --------------------------------------------------------------
Mg = [0, -M * g]
pulleypos = np.array([580, 370, 2500])
ropetoppos = np.array([.25, 0, 2.5])
rotate_axis = np.array([1, 0, 0])
## calibration_matrix 2020-0818
calibration_matrix = np.array([[3.95473025e-02, -8.94575014e-01, -4.45164638e-01, 7.62553715e+02],
[-9.98624616e-01, -2.00371608e-02, -4.84498644e-02, 6.67240739e+01],
[3.44222026e-02, 4.46468426e-01, -8.94137045e-01, 2.12149540e+03],
[0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]])
def gethangedpos(objpos, objrot):
## ベニヤ板の場合
hangedpos = copy.copy(objpos) + (w / 2) * objrot[:, 0] + l * objrot[:, 1] + h * objrot[:, 2]
return hangedpos
def getobjcenter(objpos, objrot):
## ベニヤ板の場合
objcenter = copy.copy(objpos) + (w / 2) * objrot[:, 0] + (l / 2) * objrot[:, 1] + (h / 2) * objrot[:, 2]
return objcenter
def getrotatecenter(objpos, objrot):
## ベニヤ板の場合
rotatecenter = copy.copy(objpos) + (w / 2) * objrot[:, 0]
return rotatecenter
def getrotatecenter_after(objpos, objrot):
## ベニヤ板の場合
rotatecenter = copy.copy(objpos) + (w / 2) * objrot[:, 0] + h * objrot[:, 2]
return rotatecenter
def getrefpoint(objpos, objrot):
## ベニヤ板の場合
refpoint = copy.copy(objpos) + h * objrot[:, 2]
return refpoint
def getpointcloudkinect(pointrange=[]):
pcd = client.getpcd()
pcd2 = np.ones((len(pcd), 4))
pcd2[:, :3] = pcd
newpcd = np.dot(calibration_matrix, pcd2.T).T[:, :3]
if len(pointrange) > 0:
x0, x1, y0, y1, z0, z1 = pointrange
newpcd = np.array([x for x in newpcd if (x0 < x[0] < x1) and (y0 < x[1] < y1) and (z0 < x[2] < z1)])
return newpcd
## 2020_0722作成
def getpointcloudkinectforrope_up(rbt, armname, initialpoint, pointrange):
# pcd = client.getpcd()
# pcd2 = np.ones((len(pcd), 4))
# pcd2[:, :3] = pcd
# newpcd = np.dot(calibration_matrix, pcd2.T).T[:, :3]
newpcd = getpointcloudkinect(pointrange)
finalpoint = rbt.get_gl_tcp(manipulator_name=armname)[0]
tostartvec = copy.copy(initialpoint - finalpoint)
newpcd = np.array([x for x in newpcd if x[2] < 1700])
newpcd = np.array([x for x in newpcd if rm.angle_between_vectors(tostartvec, x - finalpoint) < math.radians(30)])
return newpcd
def getpointcloudkinectforrope_down(rbt, armname, pointrange=[]):
# pcd = client.getpcd()
# pcd2 = np.ones((len(pcd), 4))
# pcd2[:, :3] = pcd
# newpcd = np.dot(calibration_matrix, pcd2.T).T[:, :3]
newpcd = getpointcloudkinect(pointrange)
initialpoint = rbt.get_gl_tcp(manipulator_name=armname)[0]
# eepos_under = copy.copy(initialpoint)
# eepos_under[2] -= 250
# refvec = copy.copy(eepos_under - initialpoint)
base.pggen.plotSphere(base.render, pos=initialpoint, radius=10, rgba=[1, 0, 0, 1])
minuszaxis = np.array([0, 0, -1])
newpcd = np.array([x for x in newpcd if 1100 < x[2] < initialpoint[2]])
newpcd = np.array([x for x in newpcd if rm.angle_between_vectors(minuszaxis, x - initialpoint) < math.radisn(40)])
return newpcd
## RANSACでロープを検出
def doRANSAC(newpcd, threshold):
model_robust, inliers = ransac(newpcd, LineModelND, min_samples=100, residual_threshold=threshold, max_trials=1000)
outliers = inliers == False
## 検出した直線の表示
ropeline = [] # ロープの点群のみ取り出す
for i, eachpoint in enumerate(newpcd):
if inliers[i] == True:
# base.pggen.plotSphere(base.render, pos=newpcd[numberofrope], radius=10, rgba=[1, 0, 0, .5])
ropeline.append(newpcd[i])
return ropeline
## リストを昇順に並べ替える(デフォルト:z座標)
def ascendingorder(array, axis=2):
array = np.asarray(array)
array_ascend = array[array[:, axis].argsort(), :]
return array_ascend
## リストをz座標で降順に並べ替える
def descendingorder(array, axis):
array_ascend = ascendingorder(array, axis)
array_descend = array_ascend[::-1]
return array_descend
def create_candidate_points(arm_name, initialhandpos, obstacles=None, limitation=None):
if arm_name == "lft_arm":
pointlistrange = np.array([.15, .3, .05, .3, 1.3, initialhandpos[2]])
elif arm_name == "rgt_arm":
pointlistrange = np.array([.15, .3, -.2, -.05, 1.3, initialhandpos[2]])
if obstacles is not None and arm_name == "lft":
for obs in obstacles:
## 3dモデルを点群化し、原点に配置
obs_points = obs.sample_surface(8000)
homomat = obs.get_homomat()
obs_points_converted = np.ones((len(obs_points), 4))
obs_points_converted[:, :3] = obs_points
obs_points_converted = np.dot(homomat, obs_points_converted.T).T[:, :3]
zmax = max(obs_points_converted[:, 2]) + .15
pointlistrange[4] = zmax
# print("pointrange", pointlistrange)
if limitation is not None:
pointlistrange[3] = limitation
points = []
number = 30
for i in range(number):
x = random.uniform(pointlistrange[0], pointlistrange[1])
y = random.uniform(pointlistrange[2], pointlistrange[3])
z = random.uniform(pointlistrange[4], pointlistrange[5])
point = [x, y, z]
# print("point", point)
points.append(point)
return points
## 始点での把持姿勢を探索
def decidestartpose(armname, ropelinesorted, predefined_grasps, fromjnt, startpointid):
IKpossiblelist_start = []
while True:
objpos_initial = ropelinesorted[startpointid]
objrot_initial = np.eye(3)
objmat4_initial = rm.homomat_from_posrot(objpos_initial, objrot_initial)
obj_initial = copy.deepcopy(ropeobj) # ->早川:変数の定義はどこですか?また、obj.copy()を使ってください.
obj_initial.set_rgba(rgba=[1, 0, 0, .5])
obj_initial.set_homomat(objmat4_initial)
for i, eachgrasp in enumerate(predefined_grasps):
prejawwidth, prehndfc, prehndpos, prehndrotmat = eachgrasp
prehndhomomat = rm.homomat_from_posrot(prehndpos, prehndrotmat)
hndmat4_initial = np.dot(objmat4_initial, prehndhomomat)
eepos_initial = rm.homomat_transform_points(objmat4_initial, prehndfc)[:3]
eerot_initial = hndmat4_initial[:3, :3]
start = robot_s.ik(component_name=armname,
tgt_pos=eepos_initial,
tgt_rotmat=eerot_initial,
seed_jnt_values=fromjnt)
if start is not None:
original_jnt_values = robot_s.get_jnt_values(component_name=armname)
robot_s.fk(component_name=armname, jnt_values=start)
objrelmat = robot_s.cvt_gl_to_loc_tcp(armname, objpos_initial, objrot_initial)
## 衝突検出
cd_result = robot_s.is_collided(obscmlist)
if not cd_result:
IKpossiblelist_start.append([start, objrelmat, i])
robot_s.fk(component_name=armname, jnt_values=original_jnt_values)
if len(IKpossiblelist_start) > 0:
return IKpossiblelist_start, objpos_initial, objrot_initial, startpointid
startpointid = startpointid + 1
if startpointid == len(ropelinesorted):
print("始点が存在しませんでした")
return [False, False, False, False]
print("startpointid = ", startpointid)
## 終点での把持姿勢を探索(終点を1つにしたとき)
def decidegoalpose_onepoint(arm_name,
IKpossiblelist_start,
hold_pos_final,
predefined_grasps,
obscmlist):
IKpossiblelist_startgoal = []
objrot_final = np.eye(3)
objmat4_final = rm.homomat_from_posrot(hold_pos_final, objrot_final)
# obj_final = copy.deepcopy(ropeobj)
# obj_final.set_rgba(rgba=[1, 0, 0, .5])
# obj_final.set_homomat(objmat4_final)
for i in IKpossiblelist_start:
prejawwidth, prehndfc, prehndpos, prehndrotmat = predefined_grasps[i[2]]
prehndhomomat = rm.homomat_from_posrot(prehndpos, prehndrotmat)
hndmat4_final = np.dot(objmat4_final, prehndhomomat)
eepos_final = rm.homomat_transform_points(objmat4_final, prehndfc)[:3]
eerot_final = hndmat4_final[:3, :3]
fromjnt = i[0]
goal = robot_s.ik(component_name=arm_name,
tgt_pos=eepos_final,
tgt_rotmat=eerot_final,
seed_jnt_values=fromjnt)
# gm.gen_frame(pos=eepos_final, rotmat=eerot_final).attach_to(base)
# robot_s.fk(arm_name, fromjnt)
# robot_s.gen_meshmodel().attach_to(base)
# base.run()
if goal is not None:
original_jnt_values = robot_s.get_jnt_values(component_name=arm_name)
robot_s.fk(component_name=arm_name, jnt_values=goal)
cd_result = robot_s.is_collided(obscmlist)
if not cd_result:
IKpossiblelist_startgoal.append([i[0], goal, i[1], [2]])
robot_s.fk(component_name=arm_name, jnt_values=original_jnt_values)
if len(IKpossiblelist_startgoal) > 0:
return IKpossiblelist_startgoal
else:
print("終点での姿勢が存在しません")
return False
## 最初の一回の引き動作のみ
def getsuitablegoalpos_first(arm_name,
IKpossiblelist_start,
objpos_initial,
objpos_finallist,
predefined_grasps):
## 重み
w_length = 1
w_FT = 1
w_manip = 1
pullinglengthlist = []
for i, selected_objpos_final in enumerate(objpos_finallist):
pullinglength = np.linalg.norm(objpos_initial - selected_objpos_final)
pullinglengthlist.append(pullinglength)
pullinglength_ref = min(pullinglengthlist)
## 評価要素を計算
totalIKpossiblelist_startgoal = []
costlist = []
assessment_value_list = []
for i, selected_objpos_final in enumerate(objpos_finallist):
## pullinglength
pullinglength = pullinglengthlist[i]
pullinglength_cost = 1 - pullinglength_ref / pullinglength
## FT
zaxis = np.array([0, 0, 1])
tostartvec = objpos_initial - selected_objpos_final
theta = rm.angle_between_vectors(rm.unit_vector(tostartvec), zaxis)
FT_cost = math.cos(theta)
## manipulability
IKpossiblelist_startgoal = decidegoalpose_onepoint(arm_name,
IKpossiblelist_start,
objpos_initial,
selected_objpos_final,
predefined_grasps,
obscmlist)
if IKpossiblelist_startgoal is not False and IKpossiblelist_start is not False:
manipulability_cost = len(IKpossiblelist_startgoal) / len(IKpossiblelist_start)
else:
manipulability_cost = -100
## 各コストのリスト
costlist.append([pullinglength_cost, FT_cost, manipulability_cost])
## 使用可能なIKのリスト
totalIKpossiblelist_startgoal.append(IKpossiblelist_startgoal)
## tostartvec, togoalvecのリスト
# veclist.append([tostartvec, togoalvec])
## 評価関数値のリスト
assessment_value = w_length * pullinglength_cost + w_manip * manipulability_cost + w_FT * FT_cost
## [assessment_value, chosen_objpos_final]
assessment_value_list.append([assessment_value, i])
assessment_value_list = descendingorder(assessment_value_list, axis=0)
print("assessment_value_list", assessment_value_list)
return assessment_value_list, totalIKpossiblelist_startgoal, costlist
def getsuitablegoalpos_second(arm_name,
IKpossiblelist_start,
objpos_initial,
objpos_finallist,
predefined_grasps,
predictlist):
# objpos_final_under = np.array([250, 0, 1650])
## 重み
w_length = 1
w_FT = 0
w_manip = 0
pullinglengthlist = []
for i, use_objpos_final in enumerate(objpos_finallist):
pullinglength = np.linalg.norm(objpos_initial - use_objpos_final)
pullinglengthlist.append(pullinglength)
pullinglength_ref = min(pullinglengthlist)
## 評価要素を計算
totalIKpossiblelist_startgoal = []
costlist = []
## 各点における予測値の要素を計算
elements_for_predictlist = []
for i, use_objpos_final in enumerate(objpos_finallist):
flag = 0
togoalvec = copy.copy(use_objpos_final - objpos_initial)
d_next = np.linalg.norm(objpos_initial - use_objpos_final)
d_before, theta_before, theta_beforebefore = predictlist
## 次の角度の予測値
theta_next = theta_before + (theta_before - theta_beforebefore) * (d_next / d_before)
if theta_next > thetathreshold:
d_next = (thetathreshold - theta_before) * (d_before / (theta_before - theta_beforebefore))
use_objpos_final = copy.copy(objpos_initial) + d_next * rm.unit_vector(togoalvec)
togoalvec = copy.copy(use_objpos_final - objpos_initial)
flag = 1
elements_for_predictlist.append([d_next, theta_next, use_objpos_final, flag, togoalvec])
## 評価値の計算
value_plus_element = []
for i, eachpos in enumerate(objpos_finallist):
use_element = elements_for_predictlist[i]
use_objpos_final = use_element[2]
## pullinglength
pullinglength = pullinglengthlist[i]
pullinglength_cost = 1 - pullinglength_ref / pullinglength
print("length cost = ", pullinglength_cost)
## FT
zaxis = np.array([0, 0, 1])
togoalvec = use_element[4]
tostartvec = copy.copy(togoalvec) * (-1)
degree = rm.angle_between_vectors(rm.unit_vector(tostartvec), zaxis)
FT_cost = math.cos(degree)
print("force cost = ", FT_cost)
## 予測位置での物体の情報
obj_predict = copy.deepcopy(obj)
objectpos = copy.copy(objpos_start)
objectrot = rm.rotmat_from_axangle(rotate_axis, math.radians(use_element[1]))
objmat_predict = rm.homomat_from_posrot(objectpos, objectrot)
obj_predict.set_rotmat(objmat_predict)
## 予測位置での物体を障害物として追加
obscmlist.append(obj_predict)
pickle.dump(obscmlist, open("obscmlist.pickle", "wb"))
## manipulability
IKpossiblelist_startgoal = decidegoalpose_onepoint(arm_name,
IKpossiblelist_start,
objpos_initial,
use_objpos_final,
predefined_grasps,
obscmlist)
if IKpossiblelist_startgoal is not False and IKpossiblelist_start is not False:
manipulability_cost = len(IKpossiblelist_startgoal) / len(IKpossiblelist_start)
else:
manipulability_cost = -100
obscmlist.pop(-1)
print("manipulation cost = ", manipulability_cost)
## 各コストのリスト
costlist.append([pullinglength_cost, FT_cost, manipulability_cost])
## 使用可能なIKのリスト
totalIKpossiblelist_startgoal.append(IKpossiblelist_startgoal)
## 評価関数値のリスト
assessment_value = w_length * pullinglength_cost + w_manip * manipulability_cost + w_FT * FT_cost
## value_plus_element : [assessment_value, i, d_next, theta_next, use_objpos_final, flag, togoalvec]
value_plus_element.append([assessment_value, i] + use_element)
# ## [assessment_value, chosen_objpos_final]
# assessment_value_list.append([assessment_value, i])
# assessment_value_list = descendingorder(assessment_value_list, axis=0)
value_plus_element = descendingorder(value_plus_element, axis=0)
assessment_value_list = value_plus_element[:, :2] ## assessment_value, i
print("assessment_value_list", assessment_value_list)
elements_for_predictlist = value_plus_element[:, 2:6] ## d_next, theta_next, use_objpos_final, flag
togoalveclist = value_plus_element[:, 6] ## togoalvec
return assessment_value_list, totalIKpossiblelist_startgoal, costlist, elements_for_predictlist, togoalveclist
## 終点での把持姿勢を探索(0203作成:左右で引く方向を変換)
def decidegoalpose(arm_name,
IKpossiblelist_start,
objpos_initial,
predefined_grasps,
objpos_final=np.array([260, 0, 1200]),
diff=None,
label="down"):
# tic = time.time()
IKpossiblelist_startgoal = []
# if label == "down":
# if arm_name == "lft":
# hold_pos_final = np.array([260, 100, 1400])
# else:
# hold_pos_final = np.array([260, -100, 1400])
objrot_final = np.eye(3)
tostartvec = objpos_initial - objpos_final ## 終点から始点への方向ベクトル(非正規化)
togoalvec = objpos_final - objpos_initial ## 始点から終点への方向ベクトル(非正規化)
togoalvec_len = np.linalg.norm(togoalvec)
togoalvec_normalize = rm.unit_vector(togoalvec)
pullinglength = copy.copy(togoalvec_len)
if label == "down":
if diff is not None: ## 一回目の引き動作のための条件
if diff < togoalvec_len:
print("pass")
pullinglength = copy.copy(diff)
while True:
if label == "down":
objpos_final = objpos_initial + pullinglength * togoalvec_normalize
else:
pass
togoalvec = objpos_final - objpos_initial
print("hold_pos_final", objpos_final)
objmat4_final = rm.homomat_from_posrot(objpos_final, objrot_final)
obj_final = copy.deepcopy(ropeobj)
obj_final.set_rgba([1, 0, 0, .5])
obj_final.set_rotmat(objmat4_final)
for i in IKpossiblelist_start:
prejawwidth, prehndfc, prehndpos, prehndrotmat = predefined_grasps[i[2]]
prehndhomomat = rm.homomat_from_posrot(prehndpos, prehndrotmat)
hndmat4_final = np.dot(objmat4_final, prehndhomomat)
eepos_final = rm.homomat_transform_points(objmat4_final, prehndfc)[:3]
eerot_final = hndmat4_final[:3, :3]
# goal = robot_s.numik(eepos_final, eerot_final, arm_name)
fromjnt = i[0]
goal = robot_s.ik(component_name=arm_name,
tgt_pos=eepos_final,
tgt_rotmat=eerot_final,
seed_jnt_values=fromjnt)
if goal is not None:
original_jnt_values = robot_s.get_jnt_values(component_name=arm_name)
robot_s.fk(component_name=arm_name, jnt_values=goal)
cd_result = robot_s.is_collided(obscmlist)
if not cd_result:
IKpossiblelist_startgoal.append([i[0], goal, i[1], [2]])
robot_s.fk(manipulator_name=arm_name, jnt_values=original_jnt_values)
if len(IKpossiblelist_startgoal) > 0:
print(str(pullinglength) + "mm引きます")
return IKpossiblelist_startgoal, objpos_final, tostartvec, togoalvec
pullinglength -= 1
if pullinglength < 0:
print("終点が存在しません")
return [False, False, False, False]
## 中継点での把持姿勢を探索
def decidemidpose(arm_name, IKpossiblelist_startgoal, handdir, objpos_final=None):
centerflag = 0
if objpos_final is not None:
if objpos_final[1] == 0:
centerflag = 1
print("中継点での姿勢を探索します")
IKpossiblelist = []
for i in IKpossiblelist_startgoal:
direction = rm.unit_vector(handdir[i[3]]) * (-1)
distance = .08
while True:
if objpos_final is None or centerflag == 1: ## 終点が中心のとき(hold_pos_final = Noneを設定)終点からの中継点も計算
## 始点に対する中継点の経路
midpathstart = robot_inik_solver.gen_rel_linear_motion_with_given_conf(arm_name,
i[0],
direction,
distance,
obscmlist,
type="source")
midpathgoal = robot_inik_solver.gen_rel_linear_motion_with_given_conf(arm_name,
i[1],
direction,
distance,
obscmlist,
type="source")
if len(midpathstart) > 0 and len(midpathgoal) > 0:
# robot_s.movearmfk(midpath[-1], arm_name)
# mideepos, mideerot = robot_s.getee(arm_name)
midpathstart = midpathstart[::-1]
midjntstart = copy.copy(midpathstart[0])
midjntgoal = copy.copy(midpathgoal[0])
#### list[startjnt, goaljnt, midjntlist, midpathlist, objrelmat, id]
IKpossiblelist.append(
[i[0], i[1], [midjntstart, midjntgoal], [midpathstart, midpathgoal], i[2], i[3]])
break
else:
distance -= 1
if distance <= 30:
print(str(i[3]) + "番目の姿勢は中継点が見つかりません")
break
else:
## 始点に対する中継点の経路
midpathstart = robot_inik_solver.gen_rel_linear_motion_with_given_conf(arm_name,
i[0],
direction,
distance,
[],
type="source")
if len(midpathstart) > 0:
midpathstart = midpathstart[::-1]
midjntstart = copy.copy(midpathstart[0])
goaljnt = i[1]
#### list[startjnt, goaljnt, midjntlist, midpathlist, objrelmat, id]
IKpossiblelist.append([i[0], i[1], [midjntstart, goaljnt], [midpathstart, []], i[2], i[3]])
break
else:
distance -= 1
if distance <= 30:
print(str(i[3]) + "番目の姿勢は始点に対する中継点が見つかりません")
break
return IKpossiblelist
def ropepullingmotion(IKpossiblelist, togoalvec, ctcallback, theta=None, theta_next=None):
for i in range(len(IKpossiblelist)):
useid = random.randint(0, len(IKpossiblelist) - 1)
use_startjnt = IKpossiblelist[useid][0]
use_objrelmat = IKpossiblelist[useid][4]
pullinglength = np.linalg.norm(togoalvec)
print("pullinglength : ", pullinglength)
togoalvec_copy = copy.copy(togoalvec)
direction = rm.unit_vector(togoalvec_copy)
obstacles_forpullingrope = copy.deepcopy(obscmlist)
if theta is not None and theta_next is not None:
currentobj = copy.deepcopy(obj)
currentrot = rm.rotmat_from_axangle(rotate_axis, theta)
currentmat = rm.homomat_from_posrot(objpos_start, currentrot)
currentobj.set_homomat(currentmat)
nextobj = copy.deepcopy(obj)
nextrot = rm.rotmat_from_axangle(rotate_axis, theta_next)
nextmat = rm.homomat_from_posrot(objpos_start, nextrot)
nextobj.set_homomat(nextmat)
i = 0.1
while True:
appendobj = copy.deepcopy(obj)
appendrot = rm.rotmat_from_euler(rotate_axis, theta + i)
appendmat = rm.homomat_from_posrot(objpos_start, appendrot)
appendobj.set_homomat(appendmat)
obstacles_forpullingrope.append(appendobj)
i += 0.1
if theta + i >= theta_next:
break
ropepulling = robot_inik_solver.gen_rel_linear_motion_with_given_conf(arm_name,
use_startjnt,
direction,
pullinglength,
obstacles_forpullingrope,
type="source")
ropepulling = ctcallback.getLinearPrimitive(use_startjnt, direction, pullinglength, [ropeobj], [use_objrelmat],
obstacles_forpullingrope, type="source")
if len(ropepulling) > 0:
print("ropepulling motion planning success!")
return ropepulling, IKpossiblelist[useid], useid
print("ropepulling motion not found!")
return [False, False, False]
# return ropepulling, IKpossiblelist[useid], useid
def RRTmotion(startjoint, goaljoint, ctcallback, obscmlist, expanddis, maxtime):
tic = time.time()
smoother = sm.Smoother()
pathplanner = rrtc.RRTConnect(start=startjoint, goal=goaljoint, ctcallback=ctcallback,
starttreesamplerate=30,
endtreesamplerate=30, expanddis=expanddis,
maxiter=2000, maxtime=maxtime)
path, _ = pathplanner.planning(obscmlist)
if path is not False:
path = smoother.pathsmoothing(path, pathplanner)
return path
else:
return False
def preprocess_point_cloud(pcd, voxel_size):
print(":: Estimate normal with search radius %.3f." % 10)
o3d.geometry.PointCloud.estimate_normals(pcd, o3d.geometry.KDTreeSearchParamHybrid(radius=10, max_nn=30))
radius_feature = voxel_size * 5
print(":: Compute FPFH feature with search radius %.3f." % radius_feature)
pcd_fpfh = o3d.registration.compute_fpfh_feature(pcd,
o3d.geometry.KDTreeSearchParamHybrid(radius=10, max_nn=100))
return pcd, pcd_fpfh
def execute_global_registration(source_down, target_down, source_fpfh, target_fpfh, voxel_size):
distance_threshold = 30
print(":: RANSAC registration on downsampled point clouds.")
print(" Since the downsampling voxel size is %.3f," % voxel_size)
print(" we use a liberal distance threshold %.3f." % distance_threshold)
result = o3d.registration.registration_ransac_based_on_feature_matching(
source_down, target_down, source_fpfh, target_fpfh, distance_threshold,
o3d.registration.TransformationEstimationPointToPoint(False), 4, [
o3d.registration.CorrespondenceCheckerBasedOnEdgeLength(0.9),
o3d.registration.CorrespondenceCheckerBasedOnDistance(distance_threshold)],
o3d.registration.RANSACConvergenceCriteria(4000000, 500))
return result
def refine_registration(source, target, result_ransac):
distance_threshold = 30
print(":: Point-to-plane ICP registration is applied on original point")
print(" clouds to refine the alignment. This time we use a strict")
print(" distance threshold %.3f." % distance_threshold)
result = o3d.registration.registration_icp(
source, target, distance_threshold, result_ransac.transformation,
o3d.registration.TransformationEstimationPointToPlane())
return result
def objectfitting(newpcd, fitobjpcd, refpoint_fitting):
samplepoint = copy.copy(newpcd)
targetpoint = sample_volume(fitobjpcd, 20000)
targetpointnew = copy.copy(targetpoint)
while True:
targetpoint = targetpointnew
voxel_size = 30
source = o3d.geometry.PointCloud()
source.points = o3d.utility.Vector3dVector(samplepoint)
target = o3d.geometry.PointCloud()
target.points = o3d.utility.Vector3dVector(targetpoint)
source_down, source_fpfh = preprocess_point_cloud(source, voxel_size)
target_down, target_fpfh = preprocess_point_cloud(target, voxel_size)
print("RANSAC start")
result_ransac = execute_global_registration(source_down, target_down,
source_fpfh, source_fpfh, voxel_size)
print(result_ransac)
print("ICP start")
result_icp = refine_registration(source, target, result_ransac)
print(result_icp)
transformation = result_icp.transformation
transmatrix = np.linalg.inv(transformation)
# print("transmatrix = ", transmatrix)
targetpointnew = np.concatenate((targetpoint, np.ones((targetpoint.shape[0], 1))), axis=1)
targetpointnew = np.dot(transmatrix, targetpointnew.T)
targetpointnew = targetpointnew.T[:, :3]
## refpointを変換し、z座標で降順に並べる
refpoint_fitting = np.dot(transmatrix, refpoint_fitting.T).T
refpoint_fitting = descendingorder(refpoint_fitting, axis=2)
print("diff:", abs(refpoint_fitting[0][2] - refpoint_fitting[1][2]))
print("refpoint", refpoint_fitting)
# for i in refpoint_fitting:
# base.pggen.plotSphere(base.render, pos=i[:3], radius=15, rgba=[1,1,0,1])
# break
toppoints_zdiff = abs(refpoint_fitting[0][2] - refpoint_fitting[1][2])
toppoints_length = abs(refpoint_fitting[0] - refpoint_fitting[1])
if 0 < toppoints_zdiff < 10 and 300 < np.linalg.norm(toppoints_length) < 450:
print("----------- fitting end ------------")
break
return targetpointnew, refpoint_fitting
def getobjaxis(targetpointnew, refpoint_fitting, flag=0):
## 点群の重心を求める
cog = np.mean(targetpointnew, axis=0)
## 法線推定
targetpcd = o3d.geometry.PointCloud()
targetpcd.points = o3d.utility.Vector3dVector(targetpointnew)
o3d.geometry.PointCloud.estimate_normals(targetpcd, o3d.geometry.KDTreeSearchParamHybrid(radius=10, max_nn=30))
print("has normals?", targetpcd.has_normals())
normal_array = np.asarray(targetpcd.normals)
normal_array_use = np.array([x for x in normal_array if (x[2] > 0)])
## 法線ベクトルを求める
zaxis_obj = np.mean(normal_array_use, axis=0)
normz = np.linalg.norm(zaxis_obj)
zaxis_obj /= normz
## 床面からの傾きを求める
zaxis = np.array([0, 0, 1])
norm1 = np.linalg.norm(zaxis)
norm2 = np.linalg.norm(zaxis_obj)
dot = np.dot(zaxis, zaxis_obj)
theta = math.acos(dot / (norm1 * norm2)) * 180 / math.pi
norm1 = np.linalg.norm(refpoint_fitting[0][:3] - refpoint_fitting[2][:3])
norm2 = np.linalg.norm(refpoint_fitting[0][:3] - refpoint_fitting[3][:3])
if norm1 < norm2:
yaxis_obj = refpoint_fitting[0][:3] - refpoint_fitting[2][:3]
else:
yaxis_obj = refpoint_fitting[0][:3] - refpoint_fitting[3][:3]
normy = np.linalg.norm(yaxis_obj)
yaxis_obj /= normy
xaxis_obj = np.cross(yaxis_obj, zaxis_obj)
normx = np.linalg.norm(xaxis_obj)
xaxis_obj /= normx
return xaxis_obj, yaxis_obj, zaxis_obj, theta, cog
def getobjposandrot(zaxis_obj):
objpos_initial = copy.copy(objpos_start)
theta = rm.degree_betweenvector(zaxis_obj, [0, 0, 1])
objrot_initial = rm.rodrigues(rotate_axis, theta)
return objpos_initial, objrot_initial
def getobjposandrot_after(cog, xaxis_obj, yaxis_obj, zaxis_obj):
## ベニヤ板の場合
objpos_initial = copy.copy(cog)
objpos_initial -= (w / 2) * xaxis_obj
objpos_initial -= (l / 2) * yaxis_obj
objpos_initial -= (h / 2) * zaxis_obj
objrot_initial = np.empty((0, 3))
objrot_initial = np.append(objrot_initial, np.array([xaxis_obj]), axis=0)
objrot_initial = np.append(objrot_initial, np.array([yaxis_obj]), axis=0)
objrot_initial = np.append(objrot_initial, np.array([zaxis_obj]), axis=0)
objrot_initial = objrot_initial.T
return objpos_initial, objrot_initial
def getlimitdegree(objpos, rotateaxis):
objpos_vertical = copy.copy(objpos)
objrot_vertical = rm.rodrigues(rotateaxis, 90)
criteriapoint = getrefpoint(objpos_vertical, objrot_vertical)
i = 0
n = [0, 0, 1] ## 地面の法線ベクトル
rotatecenter = getrotatecenter_after(objpos_vertical, objrot_vertical)
while True:
objpos_after = copy.copy(objpos)
objrot_after = rm.rodrigues(rotateaxis, 90 + i)
refpoint = getrefpoint(objpos_after, objrot_after)
objpos_after += criteriapoint - refpoint
hangedpos_after = gethangedpos(objpos_after, objrot_after)
objcenter = getobjcenter(objpos_after, objrot_after)
# a0 = sp.Symbol('a0')
t = sp.Symbol('t')
fx = sp.Symbol('fx')
fy = sp.Symbol('fy')
fz = sp.Symbol('fz')
T = np.dot(t, rm.unit_vector(pulleypos - hangedpos_after))
F = np.array([fx, fy, fz])
rt = hangedpos_after - rotatecenter
rg = objcenter - rotatecenter
force_equation = Mg + F + T
print("force_equation : ", force_equation)
moment_equation = np.dot(rotateaxis, np.cross(rg, Mg) + np.cross(rt, T))
print("moment_equation : ", moment_equation)
answer = sp.solve([force_equation[0], force_equation[1], force_equation[2], moment_equation])
print("answer = ", answer)
if len(answer) != 0:
if answer[t] > 0 and answer[fz] > 0:
if answer[fx] ** 2 + answer[fy] ** 2 < answer[fz] ** 2:
break
i += 1
return i + 90
def getpushingpath(theta, rotateaxis):
## Objective Function
def func(p):
f1, f0, t, alpha0, alpha1 = p
n1 = np.array([-math.sin(math.radians(theta)), math.cos(math.radians(theta))])
l1 = np.array([math.cos(math.radians(theta)), math.sin(math.radians(theta))])
n0 = np.array([0, 1])
l0 = np.array([1, 0])
F1 = np.dot(f1, n1) + np.dot(alpha1 * myu1 * f1, l1)
F0 = np.dot(f0, n0) + np.dot(alpha0 * myu0 * f0, l0)
T = t * t_dir
opt = 150 * np.dot(F1, F1) + np.dot(T, T) + np.dot(F0, F0)
return opt
## Constraints
def force_eq1(p):
f1, f0, t, alpha0, alpha1 = p
n1 = np.array([-math.cos(math.radians(theta)), math.sin(math.radians(theta))])
l1 = np.array([math.sin(math.radians(theta)), math.cos(math.radians(theta))])
n0 = np.array([0, 1])
l0 = np.array([1, 0])
F1 = np.dot(f1, n1) + np.dot(alpha1 * myu1 * f1, l1)
F0 = np.dot(f0, n0) + np.dot(alpha0 * myu0 * f0, l0)
T = t * t_dir
return T + Mg + F0 + F1
def torque_eq1(p):
f1, f0, t, alpha0, alpha1 = p
n1 = np.array([-math.cos(math.radians(theta)), math.sin(math.radians(theta))])
l1 = np.array([math.sin(math.radians(theta)), math.cos(math.radians(theta))])
# r1_3d = copy.copy(objpos) + rdir * rot[:, 1]
r1_3d = chosenpos
r1 = np.array([r1_3d[1], r1_3d[2]])
F1 = np.dot(f1, n1) + np.dot(alpha1 * myu1 * f1, l1)
return np.cross(rt, t * t_dir) + np.cross(rg, Mg) + np.cross(r1, F1)
def f1ineq(p):
f1, f0, t, alpha0, alpha1 = p
n1 = np.array([-math.cos(math.radians(theta)), math.sin(math.radians(theta))])
l1 = np.array([math.sin(math.radians(theta)), math.cos(math.radians(theta))])
# r1 = copy.copy(objpos) + r * rot[:, 1]
F1 = np.dot(f1, n1) + np.dot(alpha1 * myu1 * f1, l1)
return 30 ** 2 - np.dot(F1, F1)
pushpath_total = []
pushpos_total = []
obj_total = []
hangedpos_total = []
pushposlist1 = []
pushposlist2 = []
degreelist1 = []
degreelist2 = []
i = 0
endflag = 0
pos_i = []
pos_iminus1 = []
pushpose_iminus1 = [pushpose_pre]
while True:
print("theta = ", theta)
if theta <= 90:
degreelist1.append(theta)
else:
degreelist2.append(theta)
box = copy.deepcopy(obj)
objpos = copy.copy(objpos_start)
rot = rm.rodrigues(rotateaxis, theta)
rot_ver = rm.rodrigues(rotateaxis, 90)
refpos_ver = getrefpoint(objpos_start, rot_ver)
if theta > 90:
refpos = getrefpoint(objpos, rot)
objpos += refpos_ver - refpos
mat = rm.homobuild(objpos, rot)
box.setMat(base.pg.np4ToMat4(mat))
box.setColor(.8, .6, .3, .1)
obj_total.append(box)
# box.reparentTo(base.render)
pushposlist = []
nextpushlenlist = []
for num in range(10):
pushpos = copy.copy(objpos) + (w / 2) * rot[:, 0] + (l - 10 - num * 10) * rot[:, 1]
pushposlist.append(pushpos)
# base.pggen.plotSphere(base.render, pos=pushpos, radius=5, rgba=[0, 1, 0, 1])
## ------ for plotting arrow ----------------------------------------------
# if i == 1:
# print("pre to pos : ", np.linalg.norm(pushpos - pos_iminus1[0]))
# nextpushlenlist.append(np.linalg.norm(pushpos - pos_iminus1[0]))
# base.pggen.plotArrow(base.render, spos=pos_iminus1[0], epos=pushpos,
# length=np.linalg.norm(pushpos - pos_iminus1[0]), thickness=1.0, rgba=[0, 0, 1, 1])
# elif i >= 2:
# print("pre to pos : ", np.linalg.norm(pushpos - pos_i[0]))
# nextpushlenlist.append(np.linalg.norm(pushpos - pos_i[0]))
# base.pggen.plotArrow(base.render, spos=pos_i[0], epos=pushpos,
# length=np.linalg.norm(pushpos - pos_i[0]), thickness=1.0, rgba=[0, 0, 1, 1])
# base.pggen.plotSphere(base.render, pos=pushpos, rgba=[0,1,0,1], radius=5)
## --------------------------------------------------------------------------
hangedpos = gethangedpos(objpos, rot)
hangedpos_total.append(hangedpos)
# base.pggen.plotSphere(base.render, pos=hangedpos, radius=10, rgba=[0,1,0,1])
rotatecenter = getrotatecenter(objpos, rot)
# base.pggen.plotSphere(base.render, pos=rotatecenter, radius=10, rgba=[0,0,1,1])
if theta > 90:
rotatecenter = getrotatecenter_after(objpos, rot)
objcenter = getobjcenter(objpos, rot)
# base.pggen.plotSphere(base.render, pos=objcenter, radius=10, rgba=[1,0,0,1])
rg_3d = objcenter - rotatecenter
rt_3d = hangedpos - rotatecenter
rg = np.array([rg_3d[1], rg_3d[2]])
rt = np.array([rt_3d[1], rt_3d[2]])
optimizationlist = np.empty((0, 2))
t_dir_3d = pulleypos - hangedpos
t_dir = rm.unit_vector([t_dir_3d[1], t_dir_3d[2]])
bounds = ((0, 30), (0, np.inf), (0, np.inf), (-1, 1), (-1, 1))
conds = ({'type': 'eq', 'fun': force_eq1},
{'type': 'eq', 'fun': torque_eq1},
{'type': 'ineq', 'fun': f1ineq})
p0 = [30, 30, 30, 0, 0]
minimizer_kwargs = {"method": "SLSQP", "constraints": conds, "bounds": bounds}
for chosenpos in pushposlist:
if i == 0:
o = basinhopping(func, p0, minimizer_kwargs=minimizer_kwargs)
value = func(o.x)
print(o.x)
optimizationlist = np.append(optimizationlist, np.array([np.array([chosenpos, value])]), axis=0)
elif i == 1:
if np.linalg.norm(chosenpos - pos_iminus1[0]) <= vmax * timestep:
o = basinhopping(func, p0, minimizer_kwargs=minimizer_kwargs)
value = func(o.x)
print(o.x)
optimizationlist = np.append(optimizationlist, np.array([np.array([chosenpos, value])]), axis=0)
else:
vec_current = rm.unit_vector(chosenpos - pos_i[0])
vec_previous = rm.unit_vector(pos_i[0] - pos_iminus1[0])
angle = rm.degree_betweenvector(vec_current, vec_previous)
print("angle=", angle)
# if np.linalg.norm(chosenpos - pos_iminus1[0]) * 0.001 <= vmax * timestep and angle <= anglemax:
if np.linalg.norm(chosenpos - pos_i[0]) <= vmax * timestep and angle <= anglemax:
o = basinhopping(func, p0, minimizer_kwargs=minimizer_kwargs)
value = func(o.x)
print(o.x)
optimizationlist = np.append(optimizationlist, np.array([np.array([chosenpos, value])]), axis=0)
## sorting the pushposlist by value
optimizationlist = ascendingorder(optimizationlist, axis=1)
for j, eachpos in enumerate(optimizationlist):
maximumvaluepos = eachpos[0]
# if i == 0:
# pos_iminus1 = [maximumvaluepos]
# elif i == 1:
# pos_i = [maximumvaluepos]
# else:
# pos_iminus1[0] = pos_i[0]
# pos_i[0] = maximumvaluepos
## kinematic constraint
pushpose = robot_s.ik("lft_arm", maximumvaluepos, pushrot, seed_jnt_values=pushpose_iminus1[0])
if pushpose is not None:
if i == 0:
pos_iminus1 = [maximumvaluepos]
pushpose_iminus1 = [pushpose]
pushpos_total.append(maximumvaluepos)
break
else:
vec = rm.unit_vector(maximumvaluepos - pos_iminus1[0])
length = np.linalg.norm(maximumvaluepos - pos_iminus1[0])
pushpath = ctcallback_lft.getLinearPrimitivenothold(pushpose_iminus1[0], vec, length, obscmlist,
type="source")
if len(pushpath) > 0:
if i == 1:
pos_i = [maximumvaluepos]
else:
pos_iminus1 = pos_i[0]
pos_i = [maximumvaluepos]
pushpose_iminus1 = [pushpose]
pushpath_total.append(pushpath)
pushpos_total.append(maximumvaluepos)
break
# if theta <= 90:
# pushposlist1.append(eachpos[0])
# else:
# pushposlist2.append(eachpos[0])
# break
if endflag == 1:
break
## renew theta
rotatedegree = 5
if 90 <= theta + rotatedegree < 95:
rotatedegree = 90 - theta
if theta + rotatedegree > limitdegree:
rotatedegree = limitdegree - theta
endflag = 1
theta += rotatedegree
i += 1
return pushpath_total, pushpos_total, obj_total, hangedpos_total
def getforce(armname):
if armname == "rgt":
rgtarm_ftsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
rgtarm_ftsocket.connect(uc.rgtarm_ftsocket_ipad)
targetftsocket = rgtarm_ftsocket
else:
lftarm_ftsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
lftarm_ftsocket.connect(uc.lftarm_ftsocket_ipad)
targetftsocket = lftarm_ftsocket
rawft = targetftsocket.recv(1024)
rawft_letter = str(rawft)
rawft_letter_split = rawft_letter.split("(")[1].split(")")[0].split(" , ")
rawft_final = [float(i) for i in rawft_letter_split]
print("force ", rawft_final)
def getforcenorm(force_array):
force_norm = force_array[0] * force_array[0] + force_array[1] * force_array[1] + force_array[2] * force_array[2]
# force_norm = np.linalg.norm(force_array)
force_norm = math.sqrt(force_norm)
return force_norm
targetftsocket.close()
return getforcenorm(rawft_final)
def zeroforce(armname):
pblder = pb.ProgramBuilder()
pblder.loadprog("zerosensor.script")
progzerosensor = pblder.ret_program_to_run()
if armname == "lft":
uc.lftarm.send_program(progzerosensor)
elif armname == "rgt":
uc.rgtarm.send_program(progzerosensor)
if __name__ == "__main__":
base = wd.World(cam_pos=[7, 0, 2.2], lookat_pos=[0, 0, .7])
gm.gen_frame().attach_to(base)
board = cm.gen_box(extent=[.4, .5, .01]) # RANSAC用
rtq85_s = rtq85.Robotiq85()
robot_s = ur3ds.UR3Dual()
robot_inik_solver = inik.IncrementalNIK(robot_s)
## 物体の読み込み
ropeobj = cm.CollisionModel(initor="./research_box_mm.stl")
obj = cm.CollisionModel(initor=objpath)
## 事前定義把持とハンド姿勢の呼び出し
handpose = pose.PoseMaker()
predefined_grasps_lft, handdirlist_lft = handpose.lftgrasppose() # 予備の把持姿勢と把持方向
predefined_grasps_rgt, handdirlist_rgt = handpose.rgtgrasppose()
# for grasp_pose in predefined_grasps_rgt:
# jawwidth, gl_jaw_center_pos, hnd_pos, hnd_rotmat = grasp_pose
# gm.gen_frame(hnd_pos, hnd_rotmat).attach_to(base)
# base.run()
# objects
test = copy.deepcopy(obj)
rotmat = rm.rotmat_from_axangle(rotate_axis, math.radians(45))
homomat = rm.homomat_from_posrot(objpos_start, rotmat)
test.set_homomat(homomat)
test.set_rgba([.8, .6, .3, .4])
test.attach_to(base)
next = copy.deepcopy(obj)
rotmat = rm.rotmat_from_axangle(rotate_axis, math.radians(58))
homomat = rm.homomat_from_posrot(objpos_start, rotmat)
next.set_homomat(homomat)
next.set_rgba([0, 1, 0, .4])
next.attach_to(base)
obscmlist = []
obscmlist.append(test)
obscmlist.append(next)
# ## 確認用の者 右手の引き動作と対象物の移動
arm_name = 'rgt_arm'
rgt_jnt_values = np.radians(
np.array([-16.40505261, -52.96523856, 91.11206022, 36.08211617, 132.71248608, 67.39504932]))
robot_s.fk(component_name=arm_name, jnt_values=rgt_jnt_values)
robot_s.gen_meshmodel().attach_to(base)
# rgt_pos, rgt_rotmat = robot_s.get_gl_tcp(hnd_name=arm_name)
# ropelinesorted = []
# dir = rm.unit_vector(ropetoppos - rgt_pos)
# path = inik_solver.gen_rel_linear_motion(hnd_name=arm_name,
# goal_tcp_pos=rgt_pos,
# goal_tcp_rotmat=rgt_rotmat,
# direction=-dir,
# distance=.15,
# type='source')
# # for conf in path:
# # robot_s.fk(arm_name, conf)
# # robot_s.gen_meshmodel().attach_to(base)
# # base.run()
# robot_s.fk(hnd_name=arm_name, jnt_values=path[-1])
# # robot_s.gen_meshmodel().attach_to(base)
# # base.run()
# rgt_pos, rgt_rotmat = robot_s.get_gl_tcp(hnd_name=arm_name)
# counter = 0
# while True:
# rgt_append_pos = rgt_pos + dir * counter * 1e-3
# ropelinesorted.append(rgt_append_pos)
# counter += 1
# if rgt_append_pos[2] > 1.7:
# break
# ropelinesorted = ropelinesorted[::-1]
# for rope_point in ropelinesorted:
# gm.gen_sphere(rope_point).attach_to(base)
# # base.run()
#
# # 左手の引き動作
# startpointid = 0 # 1.7
# hold_pos_final = np.array([.25, -.15, 1.4]) # 仮設置
# arm_name = "rgt_arm"
# IKpossiblelist_start, hold_pos_init, hold_rot_init, startpointid = decidestartpose(ropelinesorted,
# arm_name,
# predefined_grasps_rgt,
# robot_s.rgt_arm.homeconf,
# startpointid)
# # for data in IKpossiblelist_start:
# # robot_s.fk(arm_name, data[0])
# # robot_s.gen_meshmodel().attach_to(base)
# # base.run()
# IKpossiblelist_startgoal = decidegoalpose_onepoint(arm_name,
# IKpossiblelist_start,
# hold_pos_final,
# predefined_grasps_rgt,
# obscmlist)
# rgtstart = IKpossiblelist_startgoal[0][0]
# rgtgoal = IKpossiblelist_startgoal[0][1]
# robot_s.fk(arm_name, rgtstart)
# rgtstart_pos, rgtstart_rotmat = robot_s.get_gl_tcp(arm_name)
# robot_s.gen_meshmodel().attach_to(base)
# robot_s.fk(arm_name, rgtgoal)
# robot_s.gen_meshmodel().attach_to(base)
# # base.run()
#
# dir = rm.unit_vector(hold_pos_final - ropelinesorted[0])
# length = np.linalg.norm(hold_pos_final - ropelinesorted[0])
# path = inik_solver.gen_rel_linear_motion(arm_name, rgtstart_pos, rgtstart_rotmat, dir, length, [],
# type="source")
# for conf in path:
# robot_s.fk(arm_name, conf)
# robot_s.gen_meshmodel().attach_to(base)
# # base.run()
## ------ シミュレーション用 ------
ropelinesorted = []
for i in range(200):
ropelinesorted.append(np.array([.25, 0, 1.65 - i * 1e3]))
# ropelinesorted = np.load('RopeVertex_test.npy')
np.save('RopeVertex_test.npy', ropelinesorted)
## ------------------------------
## ------ 実際のロープの点群 ------
# pointrange = [.1, .4, -.15, .15, 1.2, 1.8]
# newpcd = getpointcloudkinect(pointrange)
# ropelinesorted = doRANSAC(newpcd, 5)
# np.save('RopeVertex_data.npy', ropelinesorted)
# -----------------------
## todo プッシングのシミュレーションの際はコメントアウト
## 開始(右手で引く)
startpointid = 0
endflag = 0
while True:
arm_name = 'rgt_arm'
print("startpointid", startpointid)
IKpossiblelist_start_rgt, objpos_initial_rgt, objrot_initial_rgt, startpointid = decidestartpose(arm_name,
ropelinesorted,
predefined_grasps_rgt,
robot_s.rgt_arm.homeconf,
startpointid)
objpos_finallist_rgt = create_candidate_points(arm_name=arm_name, initialhandpos=objpos_initial_rgt)
np.save('finalposlist.npy', objpos_finallist_rgt)
# for point in objpos_finallist_rgt:
# gm.gen_sphere(pos=point).attach_to(base)
# base.run()
assessment_value_list_rgt, totalIKpossiblelist_startgoal_rgt, costlist_rgt = \
getsuitablegoalpos_first(arm_name,
IKpossiblelist_start_rgt,
objpos_initial_rgt,
objpos_finallist_rgt,
predefined_grasps_rgt)
for i, each_assessment_value_list in enumerate(assessment_value_list_rgt):
useid = each_assessment_value_list[1] ## 評価値が高いものから順に選択
print("useid", useid)
IKpossiblelist_startgoal_rgt = totalIKpossiblelist_startgoal_rgt[int(useid)]
togoalvec_rgt = objpos_finallist_rgt[int(useid)] - objpos_initial_rgt
tostartvec_rgt = copy.copy(togoalvec_rgt) * (-1)
use_objpos_final_rgt = copy.copy(objpos_initial_rgt) + togoalvec_rgt
IKpossiblelist_rgt = decidemidpose(arm_name,
IKpossiblelist_startgoal_rgt,
handdirlist_rgt,
objpos_final=use_objpos_final_rgt)
ropepulling_rgt, usingposelist_rgt, usingposeid_rgt = ropepullingmotion(IKpossiblelist_rgt, togoalvec_rgt,
ctcallback_rgt)
print("ropepulling_rgt", ropepulling_rgt)
gotoinitialrgtpoint = RRTmotion(rbt.initjnts[3:9], usingposelist_rgt[2][0], ctcallback_rgt, obscmlist, 30,
10)
if len(ropepulling_rgt) > 0 and gotoinitialrgtpoint is not False:
endflag = 1
## はじめにロープを引く長さ
d_0 = np.linalg.norm(objpos_initial_rgt - use_objpos_final_rgt)
theta_0 = 0
theta_1 = 0
break
if endflag == 1:
break
startpointid += 1
# if startpointid == len(ropelinesorted):
## 終点に対する中継点と、経路を保存
keeplist = [usingposelist_rgt[2][1], usingposelist_rgt[3][1], tostartvec_rgt]
pickle.dump(keeplist, open("keeplist.pickle", "wb"))
# # -------------------------------------------------------------------------------
# ## todo プッシングのシミュレーションの際はコメントアウト
### todo pushingのシミュレーション用
## 実環境上
# newpcd = getpointcloudkinect(objpointrange)
# base.pg.genPntsnp(newpcd).reparentTo(base.render)
# # base.run()
# refpoint_fitting = np.array([[-200, -250, 0, 1],
# [-200, 250, 0, 1],
# [200, 250, 0, 1],
# [200, -250, 0, 1]])
# targetpointnew, refpoint_fitting = objectfitting(newpcd, board, refpoint_fitting)
# xaxis_obj, yaxis_obj, zaxis_obj, theta, cog = getobjaxis(targetpointnew, refpoint_fitting)
# objpos_initial_board, objrot_initial_board = getobjposandrot(zaxis_obj)
# objmat4 = rm.homobuild(objpos_initial_board, objrot_initial_board)
# currentobj = copy.deepcopy(obj)
# currentobj.setMat(base.pg.np4ToMat4(objmat4))
# currentobj.setColor(1, 0, 0, .5)
# currentobj.reparentTo(base.render)
# print(theta)
# base.run()
# pickle.dump(currentobj, open("obj.pickle", "wb"))
## シミュレータ上
# theta_sim = [66]
# theta = theta_sim[0]
# # experimentlist = [[gotoinitialrgtpoint + usingposelist_rgt[3][0], 85, 0, "rgt", "gotoinitialrgtpoint", 2.0]]
#
# hold_pos_init = copy.copy(objpos_start)
# hold_rot_init = rm.rodrigues(rotate_axis, theta)
# # test = copy.deepcopy(obj)
# # mat = rm.homobuild(hold_pos_init, hold_rot_init)
# # test.setMat(base.pg.np4ToMat4(mat))
# # test.reparentTo(base.render)
# # robot_s.movearmfk(pushpose_pre, "lft")
# # rbtmg.genmnp(robot_s, togglejntscoord=False).reparentTo(base.render)
# # base.run()
#
# ## pushingの動作計画
# # limitdegree = getlimitdegree(hold_pos_init, rotate_axis)
# limitdegree = 100
# ## test
# pushpath_total, pushpos_total, obj_total, hangedpos_total = getpushingpath_first(obj, hold_pos_init, hold_rot_init, theta, rotate_axis)
# print("pushpath_total", pushpath_total)
#
# obj_ver = copy.deepcopy(obj_total[-1])
# objmat4_ver = obj_ver.gethomomat()
# objpos_ver = objmat4_ver[:3, 3]
# objrot_ver = objmat4_ver[:3, :3]
#
# pushpos = pushpos_total[-1]
# pushpath_total2, pushpos_total2, obj_total2, hangedpos_total2 = getpushingpath_second(obj_ver, objpos_ver, objrot_ver, pushpos, limitdegree, rotate_axis)
# print("pushpath_total2", pushpath_total2)
#
# # pushpath_all = np.concatenate([pushpath_total, pushpath_total2])
# # pushpos_all = np.concatenate([pushpos_total, pushpos_total2])
# # obj_all = np.concatenate([obj_total, obj_total2])
# # looselength_all = np.concatenate([looselength_total, looselength_total2])
# pushpath_all = list(pushpath_total) + list(pushpath_total2)
# pushpos_all = list(pushpos_total) + list(pushpos_total2)
# obj_all = list(obj_total) + list(obj_total2)
# hangedpos_all = list(hangedpos_total) + list(hangedpos_total2)
#
# pickle.dump(obj_all[0], open("obj.pickle", "wb"))
# pickle.dump([pushpath_all, pushpos_all, obj_all, hangedpos_all], open("pushinglist.pickle", "wb"))
## todo pushingのシミュレーション用
# base.run()
### シミュレーション & 実機
### Order of "experimentlist":[path, startjawwidth, endjawwidth, arm_name, pathlable, timepathstep]
predictlist = [d_0, theta_1, theta_0]
experimentlist = [[gotoinitialrgtpoint + usingposelist_rgt[3][0], 85, 0, "rgt", "gotoinitialrgtpoint", 2.0],
[ropepulling_rgt, 0, 0, "rgt", "ropepulling_rgt", 1.0]]
## シミュレーション用
theta_sim = [0]
pushstartpoint = [0]
motioncounter = [0]
pathcounter = [0]
activelist = experimentlist[pathcounter[0]]
## ロープ引きの際のカウンタ
pullingcounter = [0]
pullingcounter_rgt = [0]
pullingcounter_lft = [0]
forcelist = []
ropepullingforcelist = []
used_pushpathlist = []
## 押し動作の際のカウンタ
forcecounter = [0]
pushcounter = [0]
forceflag = [0]
stopflag = [0]
rbtmnpani = [None]
objmnpani = [None]
pntani = [None]
finalposani = [None]
endpoint = [1000]
ropepullingflag = [1]
pushingflag = [0]
ropeloosningflag = [0]
ropepullendflag = [0]
rgtregraspflag = [0]
onearmflag = [0]
pushendflag = [0]
getgoalcounter = [0]
# obj_current = None
finalpos = [None]
rot = rm.rodrigues(rotate_axis, theta_sim[0])
mat = rm.homobuild(objpos_start, rot)
obj_current = copy.deepcopy(obj)
obj_current.setMat(base.pg.np4ToMat4(mat))
pickle.dump(obj_current, open("obj.pickle", "wb"))
def updatemotionsec(activelist, rbtmnp, objmnp, motioncounter, rbt, pnt, finalpos, task):
if motioncounter[0] < len(activelist[0]):
if rbtmnp[0] is not None:
rbtmnp[0].detachNode()
if objmnp[0] is not None:
objmnp[0].detachNode()
if pnt[0] is not None:
pnt[0].detachNode()
if finalpos[0] is not None:
finalpos[0].detachNode()
## ロボットの表示
pose = activelist[0][motioncounter[0]]
armname = activelist[3]
rbt.movearmfk(pose, armname)
rbtmnp[0] = rbtmg.genmnp(rbt)
rbtmnp[0].reparentTo(base.render)
## ロープの点群の表示
# rope = np.load('RopeVertex_test.npy') ## テスト用
rope =
|
np.load('RopeVertex_data.npy')
|
numpy.load
|
import numpy as np
from ..field import Field
def large_poisson(lam, thresh=1e6):
"""
Draw samples from a Poisson distribution, taking care of large values of `lam`.
At large values of `lam` the distribution automatically switches to the corresponding normal distribution.
This switch is independently decided for each expectation value in the `lam` array.
Parameters
----------
lam : array_like
Expectation value for the Poisson distribution. Must be >= 0.
thresh : float
The threshold at which the distribution switched from a Poisson to a normal distribution.
Returns
-------
array_like
The drawn samples from the Poisson or normal distribution, depending on the expectation value.
"""
large = lam > thresh
small = ~large
# Use normal approximation if the number of photons is large
n = np.zeros(len(lam))
n[large] = np.round(lam[large] + np.random.normal(size=
|
np.sum(large)
|
numpy.sum
|
import numpy as np
import matplotlib.pyplot as plt
import sys
# class for plot axis limitation
class SetAxisLimit:
def __init__(self):
pass
def idx_bndry(self, xbl, xbr):
# calculate indices of grid boundaries
xzn0 = np.asarray(self.xzn0)
xlm = np.abs(xzn0 - xbl)
xrm = np.abs(xzn0 - xbr)
idxl = int(np.where(xlm == xlm.min())[0][0])
idxr = int(np.where(xrm == xrm.min())[0][0])
return idxl, idxr
def set_plt_axis(self, LAXIS, xbl, xbr, ybu, ybd, to_plot):
# calculate INDICES for grid boundaries
if LAXIS == 1:
idxl, idxr = self.idx_bndry(xbl, xbr)
# replace NaN with zero and infinity with large finite numbers
to_plot = np.nan_to_num(to_plot)
# hack for nan_to_num() got an unexpected keyword argument 'num,posif,negif'
to_plot_tmp = []
for data in to_plot:
data_tmp = []
for value in data:
if (value > 1.e50):
data_tmp.append(1.e50)
elif (value < -1.e50):
data_tmp.append(-1.e50)
else:
data_tmp.append(value)
to_plot_tmp.append(data_tmp)
to_plot = to_plot_tmp
number_of_curves = len(to_plot)
# print(number_of_curves)
if (number_of_curves == 1):
# limit x/y axis
if LAXIS == 0:
plt.axis([self.xzn0[0], self.xzn0[-1], np.min(to_plot[0][0:-1]), np.max(to_plot[0][0:-1])])
if LAXIS == 1:
plt.axis([xbl, xbr, np.min(to_plot[0][idxl:idxr]), np.max(to_plot[0][idxl:idxr])])
if (number_of_curves == 2):
# limit x/y axis by global min/max from all terms
if LAXIS == 0:
minx = np.min([np.min(to_plot[0][0:-1]), np.min(to_plot[1][0:-1])])
maxx = np.max([np.max(to_plot[0][0:-1]), np.max(to_plot[1][0:-1])])
plt.axis([self.xzn0[0], self.xzn0[-1], minx, maxx])
if LAXIS == 1:
minx = np.min([np.min(to_plot[0][idxl:idxr]), np.min(to_plot[1][idxl:idxr])])
maxx = np.max([np.max(to_plot[0][idxl:idxr]), np.max(to_plot[1][idxl:idxr])])
plt.axis([xbl, xbr, minx, maxx])
if (number_of_curves == 3):
# limit x/y axis by global min/max from all terms
if LAXIS == 0:
minx = np.min([np.min(to_plot[0][0:-1]), np.min(to_plot[1][0:-1]), \
np.min(to_plot[2][0:-1])])
maxx = np.max([np.max(to_plot[0][0:-1]), np.max(to_plot[1][0:-1]), \
np.max(to_plot[2][0:-1])])
plt.axis([self.xzn0[0], self.xzn0[-1], minx, maxx])
if LAXIS == 1:
minx = np.min([np.min(to_plot[0][idxl:idxr]), np.min(to_plot[1][idxl:idxr]), \
np.min(to_plot[2][idxl:idxr])])
maxx = np.max([np.max(to_plot[0][idxl:idxr]), np.max(to_plot[1][idxl:idxr]), \
np.max(to_plot[2][idxl:idxr])])
plt.axis([xbl, xbr, minx, maxx])
if (number_of_curves == 4):
# limit x/y axis by global min/max from all terms
if LAXIS == 0:
minx = np.min([np.min(to_plot[0][0:-1]), np.min(to_plot[1][0:-1]), \
np.min(to_plot[2][0:-1]), np.min(to_plot[3][0:-1])])
maxx = np.max([
|
np.max(to_plot[0][0:-1])
|
numpy.max
|
"""GaussianKDE module."""
import numpy as np
from scipy.special import ndtr
from scipy.stats import gaussian_kde
from copulas import EPSILON, random_state, store_args, validate_random_state
from copulas.optimize import bisect, chandrupatla
from copulas.univariate.base import BoundedType, ParametricType, ScipyModel
class GaussianKDE(ScipyModel):
"""A wrapper for gaussian Kernel density estimation.
It was implemented in scipy.stats toolbox. gaussian_kde is slower than statsmodels
but allows more flexibility.
When a sample_size is provided the fit method will sample the
data, and mask the real information. Also, ensure the number of
entries will be always the value of sample_size.
Args:
sample_size(int): amount of parameters to sample
"""
PARAMETRIC = ParametricType.NON_PARAMETRIC
BOUNDED = BoundedType.UNBOUNDED
MODEL_CLASS = gaussian_kde
@store_args
def __init__(self, sample_size=None, random_state=None, bw_method=None, weights=None):
self.random_state = validate_random_state(random_state)
self._sample_size = sample_size
self.bw_method = bw_method
self.weights = weights
def _get_model(self):
dataset = self._params['dataset']
self._sample_size = self._sample_size or len(dataset)
return gaussian_kde(dataset, bw_method=self.bw_method, weights=self.weights)
def _get_bounds(self):
X = self._params['dataset']
lower = np.min(X) - (5 * np.std(X))
upper = np.max(X) + (5 * np.std(X))
return lower, upper
def probability_density(self, X):
"""Compute the probability density for each point in X.
Arguments:
X (numpy.ndarray):
Values for which the probability density will be computed.
It must have shape (n, 1).
Returns:
numpy.ndarray:
Probability density values for points in X.
Raises:
NotFittedError:
if the model is not fitted.
"""
self.check_fit()
return self._model.evaluate(X)
@random_state
def sample(self, n_samples=1):
"""Sample values from this model.
Argument:
n_samples (int):
Number of values to sample
Returns:
numpy.ndarray:
Array of shape (n_samples, 1) with values randomly
sampled from this model distribution.
Raises:
NotFittedError:
if the model is not fitted.
"""
self.check_fit()
return self._model.resample(size=n_samples)[0]
def cumulative_distribution(self, X):
"""Compute the cumulative distribution value for each point in X.
Arguments:
X (numpy.ndarray):
Values for which the cumulative distribution will be computed.
It must have shape (n, 1).
Returns:
numpy.ndarray:
Cumulative distribution values for points in X.
Raises:
NotFittedError:
if the model is not fitted.
"""
self.check_fit()
X = np.array(X)
stdev = np.sqrt(self._model.covariance[0, 0])
lower = ndtr((self._get_bounds()[0] - self._model.dataset) / stdev)[0]
uppers = ndtr((X[:, None] - self._model.dataset) / stdev)
return (uppers - lower).dot(self._model.weights)
def percent_point(self, U, method='chandrupatla'):
"""Compute the inverse cumulative distribution value for each point in U.
Arguments:
U (numpy.ndarray):
Values for which the cumulative distribution will be computed.
It must have shape (n, 1) and values must be in [0,1].
method (str):
Whether to use the `chandrupatla` or `bisect` solver.
Returns:
numpy.ndarray:
Inverse cumulative distribution values for points in U.
Raises:
NotFittedError:
if the model is not fitted.
"""
self.check_fit()
if len(U.shape) > 1:
raise ValueError(f'Expected 1d array, got {(U, )}.')
if np.any(U > 1.0) or np.any(U < 0.0):
raise ValueError('Expected values in range [0.0, 1.0].')
is_one = U >= 1.0 - EPSILON
is_zero = U <= EPSILON
is_valid = ~(is_zero | is_one)
lower, upper = self._get_bounds()
def _f(X):
return self.cumulative_distribution(X) - U[is_valid]
X = np.zeros(U.shape)
X[is_one] = float('inf')
X[is_zero] = float('-inf')
if is_valid.any():
lower = np.full(U[is_valid].shape, lower)
upper = np.full(U[is_valid].shape, upper)
if method == 'bisect':
X[is_valid] = bisect(_f, lower, upper)
else:
X[is_valid] = chandrupatla(_f, lower, upper)
return X
def _fit_constant(self, X):
sample_size = self._sample_size or len(X)
constant = np.unique(X)[0]
self._params = {
'dataset': [constant] * sample_size,
}
def _fit(self, X):
if self._sample_size:
X = gaussian_kde(X, bw_method=self.bw_method,
weights=self.weights).resample(self._sample_size)
self._params = {
'dataset': X.tolist()
}
self._model = self._get_model()
def _is_constant(self):
return len(
|
np.unique(self._params['dataset'])
|
numpy.unique
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Test circuits and reference outputs for 2-qubit Clifford gate instructions.
"""
import numpy as np
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit
# ==========================================================================
# CX-gate
# ==========================================================================
def cx_gate_circuits_deterministic(final_measure=True):
"""CX-gate test circuits with deterministic counts."""
circuits = []
qr = QuantumRegister(2)
if final_measure:
cr = ClassicalRegister(2)
regs = (qr, cr)
else:
regs = (qr, )
# CX01, |00> state
circuit = QuantumCircuit(*regs)
circuit.cx(qr[0], qr[1])
if final_measure:
circuit.barrier(qr)
circuit.measure(qr, cr)
circuits.append(circuit)
# CX10, |00> state
circuit = QuantumCircuit(*regs)
circuit.cx(qr[1], qr[0])
if final_measure:
circuit.barrier(qr)
circuit.measure(qr, cr)
circuits.append(circuit)
# CX01.(X^I), |10> state
circuit = QuantumCircuit(*regs)
circuit.x(qr[1])
circuit.barrier(qr)
circuit.cx(qr[0], qr[1])
if final_measure:
circuit.barrier(qr)
circuit.measure(qr, cr)
circuits.append(circuit)
# CX10.(I^X), |01> state
circuit = QuantumCircuit(*regs)
circuit.x(qr[0])
circuit.barrier(qr)
circuit.cx(qr[1], qr[0])
if final_measure:
circuit.barrier(qr)
circuit.measure(qr, cr)
circuits.append(circuit)
# CX01.(I^X), |11> state
circuit = QuantumCircuit(*regs)
circuit.x(qr[0])
circuit.barrier(qr)
circuit.cx(qr[0], qr[1])
if final_measure:
circuit.barrier(qr)
circuit.measure(qr, cr)
circuits.append(circuit)
# CX10.(X^I), |11> state
circuit = QuantumCircuit(*regs)
circuit.x(qr[1])
circuit.barrier(qr)
circuit.cx(qr[1], qr[0])
if final_measure:
circuit.barrier(qr)
circuit.measure(qr, cr)
circuits.append(circuit)
# CX01.(X^X), |01> state
circuit = QuantumCircuit(*regs)
circuit.x(qr)
circuit.barrier(qr)
circuit.cx(qr[0], qr[1])
if final_measure:
circuit.barrier(qr)
circuit.measure(qr, cr)
circuits.append(circuit)
# CX10.(X^X), |10> state
circuit = QuantumCircuit(*regs)
circuit.x(qr)
circuit.barrier(qr)
circuit.cx(qr[1], qr[0])
if final_measure:
circuit.barrier(qr)
circuit.measure(qr, cr)
circuits.append(circuit)
return circuits
def cx_gate_counts_deterministic(shots, hex_counts=True):
"""CX-gate circuits reference counts."""
targets = []
if hex_counts:
# CX01, |00> state
targets.append({'0x0': shots}) # {"00": shots}
# CX10, |00> state
targets.append({'0x0': shots}) # {"00": shots}
# CX01.(X^I), |10> state
targets.append({'0x2': shots}) # {"00": shots}
# CX10.(I^X), |01> state
targets.append({'0x1': shots}) # {"00": shots}
# CX01.(I^X), |11> state
targets.append({'0x3': shots}) # {"00": shots}
# CX10.(X^I), |11> state
targets.append({'0x3': shots}) # {"00": shots}
# CX01.(X^X), |01> state
targets.append({'0x1': shots}) # {"00": shots}
# CX10.(X^X), |10> state
targets.append({'0x2': shots}) # {"00": shots}
else:
# CX01, |00> state
targets.append({'00': shots}) # {"00": shots}
# CX10, |00> state
targets.append({'00': shots}) # {"00": shots}
# CX01.(X^I), |10> state
targets.append({'10': shots}) # {"00": shots}
# CX10.(I^X), |01> state
targets.append({'01': shots}) # {"00": shots}
# CX01.(I^X), |11> state
targets.append({'11': shots}) # {"00": shots}
# CX10.(X^I), |11> state
targets.append({'11': shots}) # {"00": shots}
# CX01.(X^X), |01> state
targets.append({'01': shots}) # {"00": shots}
# CX10.(X^X), |10> state
targets.append({'10': shots}) # {"00": shots}
return targets
def cx_gate_statevector_deterministic():
"""CX-gate test circuits with deterministic counts."""
targets = []
# CX01, |00> state
targets.append(np.array([1, 0, 0, 0]))
# CX10, |00> state
targets.append(np.array([1, 0, 0, 0]))
# CX01.(X^I), |10> state
targets.append(np.array([0, 0, 1, 0]))
# CX10.(I^X), |01> state
targets.append(np.array([0, 1, 0, 0]))
# CX01.(I^X), |11> state
targets.append(np.array([0, 0, 0, 1]))
# CX10.(X^I), |11> state
targets.append(np.array([0, 0, 0, 1]))
# CX01.(X^X), |01> state
targets.append(np.array([0, 1, 0, 0]))
# CX10.(X^X), |10> state
targets.append(np.array([0, 0, 1, 0]))
return targets
def cx_gate_unitary_deterministic():
"""CX-gate circuits reference unitaries."""
targets = []
# CX01, |00> state
targets.append(np.array([[1, 0, 0, 0],
[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 1, 0, 0]]))
# CX10, |00> state
targets.append(np.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 1],
[0, 0, 1, 0]]))
# CX01.(X^I), |10> state
targets.append(np.array([[0, 0, 1, 0],
[0, 1, 0, 0],
[1, 0, 0, 0],
[0, 0, 0, 1]]))
# CX10.(I^X), |01> state
targets.append(np.array([[0, 1, 0, 0],
[1, 0, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]]))
# CX01.(I^X), |11> state
targets.append(np.array([[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
[1, 0, 0, 0]]))
# CX10.(X^I), |11> state
targets.append(np.array([[0, 0, 1, 0],
[0, 0, 0, 1],
[0, 1, 0, 0],
[1, 0, 0, 0]]))
# CX01.(X^X), |01> state
targets.append(np.array([[0, 0, 0, 1],
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0]]))
# CX10.(X^X), |10> state
targets.append(np.array([[0, 0, 0, 1],
[0, 0, 1, 0],
[1, 0, 0, 0],
[0, 1, 0, 0]]))
return targets
def cx_gate_circuits_nondeterministic(final_measure=True):
"""CX-gate test circuits with non-deterministic counts."""
circuits = []
qr = QuantumRegister(2)
if final_measure:
cr = ClassicalRegister(2)
regs = (qr, cr)
else:
regs = (qr, )
# CX01.(I^H), Bell state
circuit = QuantumCircuit(*regs)
circuit.h(qr[0])
circuit.barrier(qr)
circuit.cx(qr[0], qr[1])
if final_measure:
circuit.barrier(qr)
circuit.measure(qr, cr)
circuits.append(circuit)
# CX10.(H^I), Bell state
circuit = QuantumCircuit(*regs)
circuit.h(qr[1])
circuit.barrier(qr)
circuit.cx(qr[1], qr[0])
if final_measure:
circuit.barrier(qr)
circuit.measure(qr, cr)
circuits.append(circuit)
return circuits
def cx_gate_counts_nondeterministic(shots, hex_counts=True):
"""CX-gate circuits reference counts."""
targets = []
if hex_counts:
# CX01.(I^H), Bell state
targets.append({'0x0': shots / 2, '0x3': shots / 2})
# CX10.(I^H), Bell state
targets.append({'0x0': shots / 2, '0x3': shots / 2})
else:
# CX01.(I^H), Bell state
targets.append({'00': shots / 2, '11': shots / 2})
# CX10.(I^H), Bell state
targets.append({'00': shots / 2, '11': shots / 2})
return targets
def cx_gate_statevector_nondeterministic():
"""CX-gate circuits reference statevectors."""
targets = []
# CX01.(I^H), Bell state
targets.append(np.array([1, 0, 0, 1]) / np.sqrt(2))
# CX10.(I^H), Bell state
targets.append(np.array([1, 0, 0, 1]) / np.sqrt(2))
return targets
def cx_gate_unitary_nondeterministic():
"""CX-gate circuits reference unitaries."""
targets = []
# CX01.(I^H), Bell state
targets.append(np.array([[1, 1, 0, 0],
[0, 0, 1, -1],
[0, 0, 1, 1],
[1, -1, 0, 0]]) / np.sqrt(2))
# CX10.(I^H), Bell state
targets.append(np.array([[1, 0, 1, 0],
[0, 1, 0, 1],
[0, 1, 0, -1],
[1, 0, -1, 0]]) / np.sqrt(2))
return targets
# ==========================================================================
# CZ-gate
# ==========================================================================
def cz_gate_circuits_deterministic(final_measure=True):
"""CZ-gate test circuits with deterministic counts."""
circuits = []
qr = QuantumRegister(2)
if final_measure:
cr = ClassicalRegister(2)
regs = (qr, cr)
else:
regs = (qr, )
# CZ, |00> state
circuit = QuantumCircuit(*regs)
circuit.cz(qr[0], qr[1])
if final_measure:
circuit.barrier(qr)
circuit.measure(qr, cr)
circuits.append(circuit)
# CX10, |00> state
circuit = QuantumCircuit(*regs)
circuit.h(qr[0])
circuit.cz(qr[0], qr[1])
circuit.h(qr[0])
if final_measure:
circuit.barrier(qr)
circuit.measure(qr, cr)
circuits.append(circuit)
# CX01, |00> state
circuit = QuantumCircuit(*regs)
circuit.h(qr[1])
circuit.cz(qr[1], qr[0])
circuit.h(qr[1])
if final_measure:
circuit.barrier(qr)
circuit.measure(qr, cr)
circuits.append(circuit)
# (I^H).CZ.(X^H) = CX10.(X^I), |11> state
circuit = QuantumCircuit(*regs)
circuit.x(qr[1])
circuit.barrier(qr)
circuit.h(qr[0])
circuit.barrier(qr)
circuit.cz(qr[0], qr[1])
circuit.barrier(qr)
circuit.h(qr[0])
if final_measure:
circuit.barrier(qr)
circuit.measure(qr, cr)
circuits.append(circuit)
# (H^I).CZ.(H^X) = CX01.(I^X), |11> state
circuit = QuantumCircuit(*regs)
circuit.x(qr[0])
circuit.barrier(qr)
circuit.h(qr[1])
circuit.barrier(qr)
circuit.cz(qr[0], qr[1])
circuit.barrier(qr)
circuit.h(qr[1])
if final_measure:
circuit.barrier(qr)
circuit.measure(qr, cr)
circuits.append(circuit)
return circuits
def cz_gate_counts_deterministic(shots, hex_counts=True):
"""CZ-gate circuits reference counts."""
targets = []
if hex_counts:
# CZ, |00> state
targets.append({'0x0': shots})
# CX10, |00> state
targets.append({'0x0': shots})
# CX01, |00> state
targets.append({'0x0': shots})
# (I^H).CZ.(X^H) = CX10.(X^I), |11> state
targets.append({'0x3': shots})
# (H^I).CZ.(H^X) = CX01.(I^H), |11> state
targets.append({'0x3': shots})
else:
# CZ, |00> state
targets.append({'00': shots})
# CX10, |00> state
targets.append({'00': shots})
# CX01, |00> state
targets.append({'00': shots})
# (I^H).CZ.(X^H) = CX10.(X^I), |11> state
targets.append({'11': shots})
# (H^I).CZ.(H^X) = CX01.(I^H), |11> state
targets.append({'11': shots})
return targets
def cz_gate_statevector_deterministic():
"""CZ-gate test circuits with deterministic counts."""
targets = []
# CZ, |00> state
targets.append(np.array([1, 0, 0, 0]))
# CX10, |00> state
targets.append(np.array([1, 0, 0, 0]))
# CX01, |00> state
targets.append(np.array([1, 0, 0, 0]))
# (I^H).CZ.(X^H) = CX10.(X^I), |11> state
targets.append(
|
np.array([0, 0, 0, 1])
|
numpy.array
|
# Import packages
import argparse
import json
import multiprocessing
import os
import pickle
import sys
from pathlib import Path
import numpy as np
import pyroomacoustics as pa
import scipy as scipy
import scipy.io.wavfile
# Import original sources
from config_path import get_paths
from parallel_proc import process
from utils import (ProgressBar, wav_format_to_float, wav_format_to_int16,
write_wav)
# PyroomacousticsでRIRのSimulationを実施
def acoustic_simulation(config_path, sim_info, config, check_empty_channels=False):
# fs
fs = sim_info["wav_frame_rate_mixed"]
# 残響時間
rt60 = sim_info["rir_info_t60"]
# 部屋の大きさ
room_dim = np.array(sim_info["rir_info_room_dimension"])
# 残響時間から逆算
e_absorption, max_order = pa.inverse_sabine(rt60, room_dim)
R = np.array(sim_info["rir_info_microphone_position"])
R = R.T
# 音源位置の情報
speaker_locations = np.array(sim_info["rir_info_speaker_position"])
speaker_locations = speaker_locations.T
n_sources = speaker_locations.shape[1]
# 各音源をシミュレーションに追加する
clean_data_path = sim_info["wav_dpath_original"]
reverberant_conv_data = []
anechoic_conv_data = []
raw_data = []
temp_dtype = np.float64
for s in range(n_sources):
# 最初にReverberantの場合について
room = pa.ShoeBox(room_dim, fs=fs, max_order=max_order, absorption=e_absorption)
room_anechoic = pa.ShoeBox(room_dim, fs=fs, max_order=0)
# 用いるマイクロホンアレイの情報を設定する
room.add_microphone_array(pa.MicrophoneArray(R, fs=room.fs))
room_anechoic.add_microphone_array(pa.MicrophoneArray(R, fs=room.fs))
_, clean_data = scipy.io.wavfile.read(
config_path.output_path / clean_data_path[s]
)
clean_data = wav_format_to_float(clean_data, dtype=temp_dtype)
zero_mean = config["mixinfo_parameters"].get("remove_mean_sources", False)
if zero_mean:
clean_data = clean_data - clean_data.mean()
filled_clean_data = np.zeros(
(sim_info["wav_n_samples_mixed"]), dtype=temp_dtype
)
clean_begin = sim_info["wav_offset"][s]
clean_end = sim_info["wav_offset"][s] + sim_info["wav_n_samples_original"][s]
filled_clean_data[clean_begin:clean_end] = clean_data
norm = np.sqrt(np.average(np.square(filled_clean_data)))
filled_clean_data /= norm
raw_data.append(filled_clean_data.copy())
add_success = False
while not add_success:
try:
room.add_source(speaker_locations[:, s], signal=filled_clean_data)
room_anechoic.add_source(
speaker_locations[:, s], signal=filled_clean_data
)
add_success = True
except Exception:
add_success = False
# room.add_source(speaker_locations[:, s], signal=filled_clean_data)
room.simulate(snr=None)
# room_anechoic.add_source(speaker_locations[:, s], signal=filled_clean_data)
room_anechoic.simulate(snr=None)
n_sample = np.shape(filled_clean_data)[0]
# 畳み込んだ波形を取得する(チャンネル、サンプル)
reverberant_signals = room.mic_array.signals[:, :n_sample].copy()
anechoic_signals = room_anechoic.mic_array.signals[:, :n_sample].copy()
weight = np.sqrt(
10 ** (np.float(sim_info["wav_snr_mixing"][s]) / 10.0)
) / np.sqrt(np.sum(np.square(reverberant_signals)))
reverberant_conv_data.append(weight * reverberant_signals)
anechoic_conv_data.append(weight * anechoic_signals)
reverberant_mix = np.sum(reverberant_conv_data, axis=0)
# Rescale mixed sources so as the maximum value is MAX * 0.9
upper_limit = config["mixinfo_parameters"]["wav_upper_limit"]
mixed_max = np.max(
[
np.max(
|
np.abs(data)
|
numpy.abs
|
# In order to manipulate the array
import numpy as np
# In order to load mat file
from scipy.io import loadmat
# In order to import the libsvm format dataset
from sklearn.datasets import load_svmlight_file
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import Imputer
from sklearn.preprocessing import Binarizer
from collections import Counter
from fetch.coil_2000 import fetch_coil_2000
from process.coil_2000 import convert_coil_2000
def abalone_19():
# Abalone dataset - Convert the ring = 19 to class 1 and the other to class 0
filename = '../../data/raw/mldata/uci-20070111-abalone.mat'
matfile = loadmat(filename)
sex_array = np.zeros(
|
np.ravel(matfile['int1'])
|
numpy.ravel
|
"""
Unit and regression test for the get module of the molsysmt package.
"""
# Import package, test suite, and other packages as needed
import molsysmt as msm
import numpy as np
# Get on molsysmt.MolSys
def test_get_1():
molsys = msm.convert(msm.demo['TcTIM']['1tcd.msmpk'], to_form='molsysmt.MolSys')
output = msm.get(molsys, target='atom', indices=[32,33,34], name=True)
true_output = np.array(['N', 'CA', 'C'], dtype=object)
assert
|
np.all(output == true_output)
|
numpy.all
|
'''
Copyright (C) 2019-2021, <NAME> <<EMAIL>>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import os
import sys
import argparse
import matplotlib.pyplot as plt
import numpy as np
import functools as ft
from matplotlib import rcParams
rcParams['font.family'] = 'serif'
#rcParams['font.serif'] = 'Times New Roman'
rcParams['font.serif'] = 'Linux Libertine O'
rcParams['font.size'] = 14
def readr1(path: str) -> np.array:
with open(path, 'rt') as f:
data = f.readlines()
data = list(filter(lambda x: 'r@1' in x, data))
data = list(map(lambda x: float(x.strip().split()[-1]), data))
data = np.array(data) * 100
return data
class Curve:
def svg(self, path: str):
plt.savefig(path)
class ExpR1Curve(Curve):
tsize = 16
lsize = 14
def __init__(self):
plt.figure(figsize=[5 * 5, 4.8], tight_layout=True)
plt.subplot(1, 5, 1)
plt.grid(True, linestyle=':')
x = np.arange(0, 8) + 1
plt.plot(x, readr1('expr1/mnist.txt'), marker='.', color='tab:gray')
plt.plot(x, readr1('expr1/mnist-d.txt'), marker='v', color='tab:blue')
plt.plot(x, readr1('expr1/mnist-p.txt'), marker='*', color='tab:red')
plt.legend(['Vanilla', 'EST', 'ACT'])
plt.xlabel('Number of Epochs', size=self.lsize)
plt.ylabel('Recall@1', size=self.lsize)
plt.title('R@1 Curve of Defense Methods on MNIST', size=self.tsize)
plt.subplot(1, 5, 2)
plt.grid(True, linestyle=':')
x = np.arange(0, 8) + 1
plt.plot(x, readr1('expr1/fashion.txt'), marker='.', color='tab:gray')
plt.plot(
x,
readr1('expr1/fashion-d.txt'),
marker='v',
color='tab:blue')
plt.plot(x, readr1('expr1/fashion-p.txt'), marker='*', color='tab:red')
plt.legend(['Vanilla', 'EST', 'ACT'])
plt.xlabel('Number of Epochs', size=self.lsize)
plt.ylabel('Recall@1', size=self.lsize)
plt.title('R@1 Curve of Defense Methods on Fashion', size=self.tsize)
plt.subplot(1, 5, 3)
plt.grid(True, linestyle=':')
x = (np.arange(0, 15) + 1) * 10
plt.plot(x, readr1('expr1/cub.txt'), marker='.', color='tab:gray')
plt.plot(x, readr1('expr1/cub-d.txt'), marker='v', color='tab:blue')
plt.plot(x, readr1('expr1/cub-p.txt'), marker='*', color='tab:red')
plt.legend(['Vanilla', 'EST', 'ACT'],
loc='center right', bbox_to_anchor=(1.0, 0.7))
plt.xlabel('Number of Epochs', size=self.lsize)
plt.ylabel('Recall@1', size=self.lsize)
plt.title('R@1 Curve of Defense Methods on CUB', size=self.tsize)
plt.subplot(1, 5, 4)
plt.grid(True, linestyle=':')
x = (np.arange(0, 15) + 1) * 10
plt.plot(x, readr1('expr1/cars.txt'), marker='.', color='tab:gray')
plt.plot(x, readr1('expr1/cars-d.txt'), marker='v', color='tab:blue')
plt.plot(x, readr1('expr1/cars-p.txt'), marker='*', color='tab:red')
plt.legend(['Vanilla', 'EST', 'ACT'],
loc='center right', bbox_to_anchor=(1.0, 0.7))
plt.xlabel('Number of Epochs', size=self.lsize)
plt.ylabel('Recall@1', size=self.lsize)
plt.title('R@1 Curve of Defense Methods on CARS', size=self.tsize)
plt.subplot(1, 5, 5)
plt.grid(True, linestyle=':')
x = (
|
np.arange(0, 15)
|
numpy.arange
|
"""PyTorch related utility functions
"""
import logging
import importlib
import os
import pdb
import random
import shutil
import sys
import time
import traceback
from typing import Union, List
import numpy as np
import torch
# optional imports for less common libraries
try:
import torch_geometric
_torch_geometric_exists = True
except ImportError:
_torch_geometric_exists = False
def all_to_device(data, device):
"""Sends everything into a certain device """
if isinstance(data, dict):
for k in data:
data[k] = all_to_device(data[k], device)
return data
elif isinstance(data, list):
data = [all_to_device(d, device) for d in data]
return data
elif isinstance(data, torch.Tensor):
return data.to(device)
elif _torch_geometric_exists and isinstance(data, torch_geometric.data.batch.Batch):
return data.to(device)
else:
return data # Cannot be converted
def to_numpy(tensor: Union[np.ndarray, torch.Tensor, List]) -> Union[np.ndarray, List]:
"""Wrapper around .detach().cpu().numpy() """
if isinstance(tensor, torch.Tensor):
return tensor.detach().cpu().numpy()
elif isinstance(tensor, np.ndarray):
return tensor
elif isinstance(tensor, list):
return [to_numpy(l) for l in tensor]
elif isinstance(tensor, str):
return tensor
elif tensor is None:
return None
else:
raise NotImplementedError
def all_isfinite(x):
"""Check the entire nested dictionary/list of tensors is finite
(i.e. not nan or inf)"""
if isinstance(x, torch.Tensor):
return bool(torch.all(torch.isfinite(x)))
elif isinstance(x, list):
return all([all_isfinite(xi) for xi in x])
elif isinstance(x, list):
return all([all_isfinite(x[k]) for k in x])
# If it reaches here, it's an unsupported type. Returns True for such cases
return True
def seed_numpy_fn(x):
"""Numpy random seeding function to pass into Pytorch's dataloader.
This is required since numpy RNG is incompatible with fork
https://pytorch.org/docs/stable/notes/faq.html#my-data-loader-workers-return-identical-random-numbers
Example usage:
DataLoader(..., worker_init_fn=seed_numpy_fn)
"""
seed = torch.initial_seed() % (2 ** 32)
np.random.seed(seed)
def setup_seed(seed, cudnn_deterministic=False):
"""
fix random seed for deterministic training
"""
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
if cudnn_deterministic:
torch.backends.cudnn.deterministic = True
class CheckPointManager(object):
"""Manager for saving/managing pytorch checkpoints.
Provides functionality similar to tf.Saver such as
max_to_keep and keep_checkpoint_every_n_hours
"""
def __init__(self, save_path: str = None, max_to_keep=3, keep_checkpoint_every_n_hours=6.0):
if max_to_keep <= 0:
raise ValueError('max_to_keep must be at least 1')
self._max_to_keep = max_to_keep
self._keep_checkpoint_every_n_hours = keep_checkpoint_every_n_hours
self._logger = logging.getLogger(self.__class__.__name__)
self._checkpoints_permanent = [] # Will not be deleted
self._checkpoints_buffer = [] # Those which might still be deleted
self._next_save_time = time.time()
self._best_score = None
self._best_step = None
if save_path is not None:
self._ckpt_dir = os.path.dirname(save_path)
self._save_path = save_path + '-{}.pth'
self._checkpoints_fname = os.path.join(self._ckpt_dir, 'checkpoints.txt')
os.makedirs(self._ckpt_dir, exist_ok=True)
self._update_checkpoints_file()
else:
self._ckpt_dir = None
self._save_path = None
self._checkpoints_fname = None
def _save_checkpoint(self, step, model, score, **kwargs):
save_name = self._save_path.format(step)
model_state_dict = {k: v for (k, v) in model.state_dict().items() if not v.is_sparse}
state = {'state_dict': model_state_dict,
'step': step}
for k in kwargs:
if getattr(kwargs[k], 'state_dict', None) is not None:
state[k] = kwargs[k].state_dict()
else:
state[k] = kwargs[k] # Note that loading of this variable is not supported
torch.save(state, save_name)
self._logger.info('Saved checkpoint: {}'.format(save_name))
self._checkpoints_buffer.append((save_name, time.time(), step))
if self._best_score is None or np.all(np.array(score) >=
|
np.array(self._best_score)
|
numpy.array
|
import torch
from torch.optim import Adam, SGD
from torch.nn.modules import MSELoss
import numpy as np
from experience_replay.exp_replay import ReplayBuffer, PrioritizedReplayBuffer, HindsightReplayBuffer, \
PrioritizedHindsightReplayBuffer
from algorithms.dqn import DQN
from algorithms.dqn_other import algo_DQN
import gym
import matplotlib.pyplot as plt
from environments.acrobot_custom import CustomAcrobotEnv
from environments.acrobot_simple import SimpleAcrobotEnv
from environments.gridworld_2 import GridworldEnv
from plotting import plot_per
def update(algorithm, buffer, params, train_steps):
batch = buffer.sample(params['batch_size'])
if type(buffer) == ReplayBuffer or type(buffer) == HindsightReplayBuffer:
obses_t, a, r, obses_tp1, dones = batch
loss = algorithm.train(obses_t, a, r, obses_tp1, dones)
elif type(buffer) == PrioritizedReplayBuffer or type(buffer) == PrioritizedHindsightReplayBuffer:
obses_t, a, r, obses_tp1, dones, importance_weights, idxs = batch
loss, losses = algorithm.per_train(obses_t, a, r, obses_tp1, dones, importance_weights)
buffer.update_priorities(idxs, losses.numpy() + 1e-8)
else:
raise ValueError('?????')
if isinstance(algorithm, algo_DQN):
return loss
# this func is not implemented for other_DWN
algorithm.update_epsilon()
if train_steps % params['target_network_interval'] == 0:
algorithm.update_target_network()
return loss
def add_transitions_to_buffer(transitions, buffer, completion_reward=0.0, special_goal=False):
if type(buffer) == ReplayBuffer or type(buffer) == PrioritizedReplayBuffer:
if special_goal:
for (f_t, g, a, r, f_tp1, _, done) in transitions:
obs_t = np.hstack((f_t, g))
obs_tp1 = np.hstack((f_tp1, g))
buffer.add(obs_t, a, r, obs_tp1, done)
else:
for (f_t, g, a, r, f_tp1, done) in transitions:
obs_t = np.hstack((f_t, g))
obs_tp1 = np.hstack((f_tp1, g))
buffer.add(obs_t, a, r, obs_tp1, done)
if type(buffer) == HindsightReplayBuffer or type(buffer) == PrioritizedHindsightReplayBuffer:
if special_goal:
g_prime = transitions[-1][5]
# Replace goal of every transition
for i, (f_t, _, a, r, f_tp1, _, done) in enumerate(transitions):
if i == len(transitions) - 1:
r = completion_reward # Last transition has its reward replaced
buffer.add(f_t, g_prime, a, r, f_tp1, done)
else:
g_prime = transitions[-1][4]
# Replace goal of every transition
for i, (f_t, _, a, r, f_tp1, done) in enumerate(transitions):
if i == len(transitions) - 1:
r = completion_reward # Last transition has its reward replaced
buffer.add(f_t, g_prime, a, r, f_tp1, done)
def test(algorithm, env, n_tests=10):
episodes_length = []
is_goal = True
for i in range(n_tests):
# print(i, '/', n_tests, end='\r')
if isinstance(env, GridworldEnv):
obs_t, goal = env.perform_reset()
elif is_goal:
obs_t, goal = env.reset()
else:
obs_t = env.reset()
goal = np.zeros_like(obs_t)
t = 0
while True:
# env.render()
action = algorithm.predict(np.hstack((obs_t, goal)), eval=True)
t += 1
if isinstance(env, GridworldEnv):
obs_tp1, reward, done, _ = env.perform_step(action)
elif is_goal:
obs_tp1, reward, done, _, goal = env.step(action)
else:
obs_tp1, reward, done, _ = env.step(action)
# termination condition
if done:
episodes_length.append(t)
# print('Episode finished in', t, 'steps')
break
obs_t = obs_tp1
return np.mean(episodes_length)
def main(params):
np.random.seed(params['seed'])
torch.manual_seed(params['seed'])
# declare environment
is_goal = True
if params['environment'] == 'acrobot_simple':
env = SimpleAcrobotEnv(stochastic=False, max_steps=400, mean_goal=-1.5)
s, goal = env.reset()
elif params['environment'] == 'windy_grid_world':
env = GridworldEnv()
s, goal = env.perform_reset()
else:
env = gym.make(params['environment'])
s = env.reset()
goal = s
is_goal = False
state_shape = s.shape[0] + goal.shape[0]
# select type of experience replay using the parameters
if params['buffer'] == ReplayBuffer:
buffer = ReplayBuffer(params['buffer_size'])
loss_function = params['loss_function']()
elif params['buffer'] == PrioritizedReplayBuffer:
buffer = PrioritizedReplayBuffer(params['buffer_size'], params['PER_alpha'], params['PER_beta'])
loss_function = params['loss_function'](reduction='none')
elif params['buffer'] == HindsightReplayBuffer:
buffer = HindsightReplayBuffer(params['buffer_size'])
loss_function = params['loss_function']()
elif params['buffer'] == PrioritizedHindsightReplayBuffer:
buffer = PrioritizedHindsightReplayBuffer(params['buffer_size'], params['PER_alpha'], params['PER_beta'])
loss_function = params['loss_function'](reduction='none')
else:
raise ValueError('Buffer type not found.')
# select learning algorithm using the parameters
if params['algorithm'] == DQN:
algorithm = DQN(state_shape,
env.action_space.n,
loss_function=loss_function,
optimizer=params['optimizer'],
lr=params['lr'],
gamma=params['gamma'],
epsilon_delta=1 / (params['epsilon_delta_end'] * params['train_steps']),
epsilon_min=params['epsilon_min'])
elif params['algorithm'] == algo_DQN:
algorithm = algo_DQN()
else:
raise ValueError('Algorithm type not found.')
losses = []
returns = []
train_steps = 0
episodes_length = []
episodes_length_test = []
print('Starting to train:', type(buffer))
test_lengths = test(algorithm, env)
episodes_length_test.append(test_lengths)
while train_steps < params['train_steps']:
if isinstance(env, GridworldEnv):
obs_t, goal = env.perform_reset()
elif is_goal:
obs_t, goal = env.reset()
else:
obs_t = env.reset()
goal = np.zeros_like(obs_t)
t = 0
episode_loss = []
episode_rewards = []
episode_transitions = []
while train_steps < params['train_steps']:
# env.render()
action = algorithm.predict(np.hstack((obs_t, goal)))
t += 1
if isinstance(env, GridworldEnv):
obs_tp1, reward, done, _ = env.perform_step(action)
transition = (obs_t, goal, action, reward, obs_tp1, done)
elif is_goal:
obs_tp1, reward, done, _, gr = env.step(action)
transition = (obs_t, goal, action, reward, obs_tp1, gr, done)
else:
obs_tp1, reward, done, _ = env.step(action)
transition = (obs_t, goal, action, reward, obs_tp1, done)
episode_transitions.append(transition)
episode_rewards.append(reward)
if len(buffer) >= params['batch_size']:
loss = update(algorithm, buffer, params, train_steps)
train_steps += 1
episode_loss.append(loss)
if train_steps % params['test_every'] == 0:
test_lengths = test(algorithm, env)
episodes_length_test.append(test_lengths)
# termination condition
if done:
episodes_length.append(t)
break
obs_t = obs_tp1
special_goal = isinstance(env, CustomAcrobotEnv) or isinstance(env, SimpleAcrobotEnv)
add_transitions_to_buffer(episode_transitions, buffer, special_goal=special_goal)
losses.append(np.mean(episode_loss))
returns.append(np.sum(episode_rewards))
env.close()
return episodes_length_test, returns, losses
def plot_results(er, per, her, pher, params, fig_name='figure'):
er_returns = np.array([np.array(r) for (r, _, _) in er])
# er_returns = np.array([np.mean(np.array(r).reshape((-1, episode_avg)), axis=1) for (r, _, _) in er])
# er_returns = np.concatenate((np.zeros((er_returns.shape[0], 1)), er_returns), axis=1)
x = np.arange(0, er_returns.shape[1] * params['test_every'], params['test_every'])
y = np.mean(er_returns, axis=0)
color = 'blue'
plt.plot(x, y, color=color, label='Experience Replay')
y_std = np.std(er_returns, axis=0)
plt.fill_between(x, y + y_std, y - y_std, color=color, alpha=0.4)
# plt.errorbar(x, y, yerr=np.std(er_returns, axis=0), capsize=5, ecolor=color, color=color, label='ER')
if per is not None:
per_returns = np.array([np.array(r) for (r, _, _) in per])
# per_returns = np.concatenate((np.zeros((er_returns.shape[0], 1)), per_returns), axis=1)
y = np.mean(per_returns, axis=0)
color = 'orange'
plt.plot(x, y, color=color, label='Prioritized Experience Replay')
# plt.errorbar(x, y, yerr=np.std(per_returns, axis=0), capsize=5, ecolor=color, color=color, label='PER')
y_std = np.std(er_returns, axis=0)
plt.fill_between(x, y + y_std, y - y_std, color=color, alpha=0.4)
if her is not None:
her_returns = np.array([
|
np.array(r)
|
numpy.array
|
# Copyright (c) SenseTime. All Rights Reserved.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import torch.nn.functional as F
from pysot.core.config import cfg
from pysot.utils.anchor import Anchors
from pysot.tracker.base_tracker import SiameseTracker
class Modified_SiamRPNTracker(SiameseTracker):
def __init__(self, model):
super(Modified_SiamRPNTracker, self).__init__()
self.score_size = (cfg.TRACK.INSTANCE_SIZE - cfg.TRACK.EXEMPLAR_SIZE) // \
cfg.ANCHOR.STRIDE + 1 + cfg.TRACK.BASE_SIZE
self.anchor_num = len(cfg.ANCHOR.RATIOS) * len(cfg.ANCHOR.SCALES)
hanning = np.hanning(self.score_size)
window = np.outer(hanning, hanning)
self.window = np.tile(window.flatten(), self.anchor_num)
self.anchors = self.generate_anchor(self.score_size)
self.model = model
self.model.eval()
def init(self, img, bbox):
"""
args:
img(np.ndarray): BGR image
bbox: (x, y, w, h) bbox
"""
self.center_pos = np.array([bbox[0]+(bbox[2]-1)/2,
bbox[1]+(bbox[3]-1)/2])
self.size = np.array([bbox[2], bbox[3]])
self.size_list=[]
self.size_list.append(np.array([bbox[2], bbox[3]]))
self.size_list.append(np.array([bbox[2], bbox[3]]))
self.size_list.append(np.array([bbox[2], bbox[3]]))
self.center_pos_list=[]
self.center_pos_list.append(np.array([bbox[0]+(bbox[2]-1)/2,
bbox[1]+(bbox[3]-1)/2]))
self.center_pos_list.append(np.array([bbox[0]+(bbox[2]-1)/2,
bbox[1]+(bbox[3]-1)/2]))
self.center_pos_list.append(np.array([bbox[0]+(bbox[2]-1)/2,
bbox[1]+(bbox[3]-1)/2]))
# calculate z crop size
w_z = self.size[0] + cfg.TRACK.CONTEXT_AMOUNT * np.sum(self.size)
h_z = self.size[1] + cfg.TRACK.CONTEXT_AMOUNT * np.sum(self.size)
s_z = round(np.sqrt(w_z * h_z))
# calculate channle average
self.channel_average = np.mean(img, axis=(0, 1))
# get crop
z_crop = self.get_subwindow(img, self.center_pos,
cfg.TRACK.EXEMPLAR_SIZE,
s_z, self.channel_average)
# self.model.template(z_crop)
def generate_anchor(self, score_size):
anchors = Anchors(cfg.ANCHOR.STRIDE,
cfg.ANCHOR.RATIOS,
cfg.ANCHOR.SCALES)
anchor = anchors.anchors
x1, y1, x2, y2 = anchor[:, 0], anchor[:, 1], anchor[:, 2], anchor[:, 3]
anchor = np.stack([(x1+x2)*0.5, (y1+y2)*0.5, x2-x1, y2-y1], 1)
total_stride = anchors.stride
anchor_num = anchor.shape[0]
anchor = np.tile(anchor, score_size * score_size).reshape((-1, 4))
ori = - (score_size // 2) * total_stride
xx, yy = np.meshgrid([ori + total_stride * dx for dx in range(score_size)],
[ori + total_stride * dy for dy in range(score_size)])
xx, yy = np.tile(xx.flatten(), (anchor_num, 1)).flatten(), \
np.tile(yy.flatten(), (anchor_num, 1)).flatten()
anchor[:, 0], anchor[:, 1] = xx.astype(np.float32), yy.astype(np.float32)
return anchor
def _convert_bbox(self, delta, anchor):
delta = delta.permute(1, 2, 3, 0).contiguous().view(4, -1)
delta = delta.data.cpu().numpy()
delta[0, :] = delta[0, :] * anchor[:, 2] + anchor[:, 0]
delta[1, :] = delta[1, :] * anchor[:, 3] + anchor[:, 1]
delta[2, :] = np.exp(delta[2, :]) * anchor[:, 2]
delta[3, :] = np.exp(delta[3, :]) * anchor[:, 3]
return delta
def _convert_score(self, score):
score = score.permute(1, 2, 3, 0).contiguous().view(2, -1).permute(1, 0)
score = F.softmax(score, dim=1).data[:, 1].cpu().numpy()
return score
def _bbox_clip(self, cx, cy, width, height, boundary):
cx = max(0, min(cx, boundary[1]))
cy = max(0, min(cy, boundary[0]))
width = max(10, min(width, boundary[1]))
height = max(10, min(height, boundary[0]))
return cx, cy, width, height
def Get_Updated_Template(self,img, bbox):
"""
args:
img(np.ndarray): BGR image
bbox: (x, y, w, h) bbox
"""
self.center_pos = np.array([bbox[0] + (bbox[2] - 1) / 2,
bbox[1] + (bbox[3] - 1) / 2])
self.size = np.array([bbox[2], bbox[3]])
# calculate z crop size
w_z = self.size[0] + cfg.TRACK.CONTEXT_AMOUNT * np.sum(self.size)
h_z = self.size[1] + cfg.TRACK.CONTEXT_AMOUNT * np.sum(self.size)
s_z = round(np.sqrt(w_z * h_z))
# calculate channle average
self.channel_average = np.mean(img, axis=(0, 1))
# get crop
z_crop = self.get_subwindow(img, self.center_pos,
cfg.TRACK.EXEMPLAR_SIZE,
s_z, self.channel_average)
return self.model.Get_Template(z_crop) #input a image and bbox output the feature which outputs from backbone;
def track(self, img, zf):
"""
args:
img(np.ndarray): BGR image
return:
bbox(list):[x, y, width, height]
"""
w_z = self.size[0] + cfg.TRACK.CONTEXT_AMOUNT * np.sum(self.size)
h_z = self.size[1] + cfg.TRACK.CONTEXT_AMOUNT * np.sum(self.size)
s_z = np.sqrt(w_z * h_z)
w_z_list=[]
h_z_list=[]
s_z_list=[]
w_z_list.append(self.size_list[0][0] + cfg.TRACK.CONTEXT_AMOUNT * np.sum(self.size_list[0]))
w_z_list.append(self.size_list[1][0] + cfg.TRACK.CONTEXT_AMOUNT * np.sum(self.size_list[1]))
w_z_list.append(self.size_list[2][0] + cfg.TRACK.CONTEXT_AMOUNT * np.sum(self.size_list[2]))
h_z_list.append(self.size_list[0][1] + cfg.TRACK.CONTEXT_AMOUNT * np.sum(self.size_list[0]))
h_z_list.append(self.size_list[1][1] + cfg.TRACK.CONTEXT_AMOUNT * np.sum(self.size_list[1]))
h_z_list.append(self.size_list[2][1] + cfg.TRACK.CONTEXT_AMOUNT * np.sum(self.size_list[2]))
s_z_list.append(w_z_list[0]*h_z_list[0])
s_z_list.append(w_z_list[1]*h_z_list[1])
s_z_list.append(w_z_list[2]*h_z_list[2])
scale_z = cfg.TRACK.EXEMPLAR_SIZE / s_z
scale_z_list=[]
scale_z_list.append(cfg.TRACK.EXEMPLAR_SIZE / s_z_list[0])
scale_z_list.append(cfg.TRACK.EXEMPLAR_SIZE / s_z_list[1])
scale_z_list.append(cfg.TRACK.EXEMPLAR_SIZE / s_z_list[2])
s_x = s_z * (cfg.TRACK.INSTANCE_SIZE / cfg.TRACK.EXEMPLAR_SIZE)
x_crop = self.get_subwindow(img, self.center_pos,
cfg.TRACK.INSTANCE_SIZE,
round(s_x), self.channel_average)
outputs = self.model.modified_track(x_crop,zf)
score_list=[]
score_list.append(self._convert_score(outputs['cls_list'][0]))
score_list.append(self._convert_score(outputs['cls_list'][1]))
score_list.append(self._convert_score(outputs['cls_list'][2]))
score = self._convert_score(outputs['cls'])
pred_bbox_list=[]
pred_bbox_list.append(self._convert_bbox(outputs['loc_list'][0],self.anchors))
pred_bbox_list.append(self._convert_bbox(outputs['loc_list'][1],self.anchors))
pred_bbox_list.append(self._convert_bbox(outputs['loc_list'][2],self.anchors))
pred_bbox = self._convert_bbox(outputs['loc'], self.anchors)
def change(r):
return np.maximum(r, 1. / r)
def sz(w, h):
pad = (w + h) * 0.5
return np.sqrt((w + pad) * (h + pad))
# scale penalty
s_c = change(sz(pred_bbox[2, :], pred_bbox[3, :]) /
(sz(self.size[0]*scale_z, self.size[1]*scale_z)))
s_c_list=[]
s_c_list.append(change(sz(pred_bbox_list[0][2, :], pred_bbox_list[0][3, :]) /
(sz(self.size_list[0][0]*scale_z_list[0], self.size_list[0][1]*scale_z_list[0]))))
s_c_list.append(change(sz(pred_bbox_list[1][2, :], pred_bbox_list[1][3, :]) /
(sz(self.size_list[1][0]*scale_z_list[1], self.size_list[1][1]*scale_z_list[1]))))
s_c_list.append(change(sz(pred_bbox_list[2][2, :], pred_bbox_list[2][3, :]) /
(sz(self.size_list[2][0]*scale_z_list[2], self.size_list[2][1]*scale_z_list[2]))))
# aspect ratio penalty
r_c = change((self.size[0]/self.size[1]) /
(pred_bbox[2, :]/pred_bbox[3, :]))
r_c_list=[]
r_c_list.append(change((self.size_list[0][0]/self.size_list[0][1]) /
(pred_bbox_list[0][2, :]/pred_bbox_list[0][3, :])))
r_c_list.append(change((self.size_list[1][0]/self.size_list[1][1]) /
(pred_bbox_list[1][2, :]/pred_bbox_list[1][3, :])))
r_c_list.append(change((self.size_list[2][0]/self.size_list[2][1]) /
(pred_bbox_list[2][2, :]/pred_bbox_list[2][3, :])))
penalty = np.exp(-(r_c * s_c - 1) * cfg.TRACK.PENALTY_K)
penalty_list=[]
penalty_list.append(np.exp(-(r_c_list[0] * s_c_list[0] - 1) * cfg.TRACK.PENALTY_K))
penalty_list.append(np.exp(-(r_c_list[1] * s_c_list[1] - 1) * cfg.TRACK.PENALTY_K))
penalty_list.append(np.exp(-(r_c_list[2] * s_c_list[2] - 1) * cfg.TRACK.PENALTY_K))
pscore = penalty * score
pscore_list=[]
pscore_list.append(penalty_list[0] * score_list[0])
pscore_list.append(penalty_list[1] * score_list[1])
pscore_list.append(penalty_list[2] * score_list[2])
# window penalty
pscore = pscore * (1 - cfg.TRACK.WINDOW_INFLUENCE) + \
self.window * cfg.TRACK.WINDOW_INFLUENCE
best_idx = np.argmax(pscore)
best_idx_list=[]
pscore_list[0]=pscore_list[0]*(1 - cfg.TRACK.WINDOW_INFLUENCE) + \
self.window * cfg.TRACK.WINDOW_INFLUENCE
best_idx_list.append(np.argmax(pscore_list[0]))
pscore_list[1]=pscore_list[1]*(1 - cfg.TRACK.WINDOW_INFLUENCE) + \
self.window * cfg.TRACK.WINDOW_INFLUENCE
best_idx_list.append(np.argmax(pscore_list[1]))
pscore_list[2]=pscore_list[2]*(1 - cfg.TRACK.WINDOW_INFLUENCE) + \
self.window * cfg.TRACK.WINDOW_INFLUENCE
best_idx_list.append(np.argmax(pscore_list[2]))
bbox = pred_bbox[:, best_idx] / scale_z
bbox_list=[]
bbox_list.append(pred_bbox_list[0][:,best_idx_list[0]]/scale_z_list[0])
bbox_list.append(pred_bbox_list[1][:,best_idx_list[1]]/scale_z_list[1])
bbox_list.append(pred_bbox_list[2][:,best_idx_list[2]]/scale_z_list[2])
lr = penalty[best_idx] * score[best_idx] * cfg.TRACK.LR
lr_list=[]
lr_list.append(penalty_list[0][best_idx_list[0]]*score_list[0][best_idx_list[0]]* cfg.TRACK.LR)
lr_list.append(penalty_list[1][best_idx_list[1]]*score_list[1][best_idx_list[1]]* cfg.TRACK.LR)
lr_list.append(penalty_list[2][best_idx_list[2]]*score_list[2][best_idx_list[2]]* cfg.TRACK.LR)
cx = bbox[0] + self.center_pos[0]
cy = bbox[1] + self.center_pos[1]
cx_list=[]
cy_list=[]
cx_list.append(bbox_list[0][0] + self.center_pos_list[0][0])
cy_list.append(bbox_list[0][1] + self.center_pos_list[0][1])
cx_list.append(bbox_list[1][0] + self.center_pos_list[1][0])
cy_list.append(bbox_list[1][1] + self.center_pos_list[1][1])
cx_list.append(bbox_list[2][0] + self.center_pos_list[2][0])
cy_list.append(bbox_list[2][1] + self.center_pos_list[2][1])
# smooth bbox
width = self.size[0] * (1 - lr) + bbox[2] * lr
height = self.size[1] * (1 - lr) + bbox[3] * lr
width_list=[]
height_list=[]
width_list.append(self.size_list[0][0] * (1 - lr_list[0]) + bbox_list[0][2] * lr_list[0])
height_list.append(self.size_list[0][1] * (1 - lr_list[0]) + bbox_list[0][3] * lr_list[0])
width_list.append(self.size_list[1][0] * (1 - lr_list[1]) + bbox_list[1][2] * lr_list[1])
height_list.append(self.size_list[1][1] * (1 - lr_list[1]) + bbox_list[1][3] * lr_list[1])
width_list.append(self.size_list[2][0] * (1 - lr_list[2]) + bbox_list[2][2] * lr_list[2])
height_list.append(self.size_list[2][1] * (1 - lr_list[2]) + bbox_list[2][3] * lr_list[2])
# clip boundary
cx, cy, width, height = self._bbox_clip(cx, cy, width,
height, img.shape[:2])
cx_list[0], cy_list[0], width_list[0], height_list[0] = self._bbox_clip(cx_list[0], cy_list[0], width_list[0],
height_list[0], img.shape[:2])
cx_list[1], cy_list[1], width_list[1], height_list[1] = self._bbox_clip(cx_list[1], cy_list[1], width_list[1],
height_list[1], img.shape[:2])
cx_list[2], cy_list[2], width_list[2], height_list[2] = self._bbox_clip(cx_list[2], cy_list[2], width_list[2],
height_list[2], img.shape[:2])
# udpate state
self.center_pos = np.array([cx, cy])
self.size = np.array([width, height])
self.center_pos_list[0]=np.array([cx_list[0],cx_list[0]])
self.size_list[0]=np.array([width_list[0], height_list[0]])
self.center_pos_list[1]=
|
np.array([cx_list[1],cx_list[1]])
|
numpy.array
|
"""
Testing the QVM stablizer
"""
import sys
import numpy as np
from itertools import product
from functools import reduce
from pyquil.paulis import sI, sX, sY, sZ, PAULI_COEFF, PAULI_OPS
from pyquil.quil import Program
from pyquil.gates import H, S, CNOT, I
from referenceqvm.qvm_stabilizer import QVM_Stabilizer
from referenceqvm.stabilizer_utils import (pauli_stabilizer_to_binary_stabilizer,
binary_stabilizer_to_pauli_stabilizer)
from referenceqvm.stabilizer_utils import project_stabilized_state
from referenceqvm.api import QVMConnection
pauli_subgroup = [sI, sX, sY, sZ]
five_qubit_code_generators = [sX(0) * sZ(1) * sZ(2) * sX(3) * sI(4),
sI(0) * sX(1) * sZ(2) * sZ(3) * sX(4),
sX(0) * sI(1) * sX(2) * sZ(3) * sZ(4),
sZ(0) * sX(1) * sI(2) * sX(3) * sZ(4)]
bell_stabilizer = [sZ(0) * sZ(1), sX(0) * sX(1)]
def test_initialization():
"""
Test if upon initialization the correct size tableau is set up
"""
num_qubits = 4
qvmstab = QVM_Stabilizer(num_qubits=num_qubits)
assert qvmstab.num_qubits == num_qubits
assert qvmstab.tableau.shape == (2 * num_qubits, 2 * num_qubits + 1)
initial_tableau = np.hstack((np.eye(2 * num_qubits), np.zeros((2 * num_qubits, 1))))
assert np.allclose(initial_tableau, qvmstab.tableau)
num_qubits = 6
qvmstab = QVM_Stabilizer(num_qubits=num_qubits)
assert qvmstab.num_qubits == num_qubits
assert qvmstab.tableau.shape == (2 * num_qubits, 2 * num_qubits + 1)
initial_tableau = np.hstack((np.eye(2 * num_qubits), np.zeros((2 * num_qubits, 1))))
assert
|
np.allclose(initial_tableau, qvmstab.tableau)
|
numpy.allclose
|
import numpy as np
import pandas as pd
import rpy2.robjects as robjects
from rpy2.robjects.packages import importr
from rpy2.robjects import pandas2ri
# Load R packages
rstats = importr('stats')
rmgcv = importr('mgcv')
pandas2ri.activate()
rbase = importr('base')
#rutils = importr('utils')
def pdframe2rdframe(data):
'''
Converts a pandas DataFrame into an R dataframe
:param data: Data to convert into an R dataframe
pandas DataFrame
:return: robjects.vectors.DataFrame
'''
# This step is just to make sure pandas2ri.DataFrame works as expected
assert isinstance(data, pd.DataFrame)
data2 = pd.DataFrame({ci: [vi for vi in data[ci]] for ci in data.columns})
return pandas2ri.DataFrame(data2)
def mgcv_fit(formula, data, family='gaussian', weights=None, method='REML', bam=False, chunk_size=1000):
'''
Fit a Generalized Additive Model
This function is a wrapper of the MGCV package in R.
See its definition for more details https://www.rdocumentation.org/packages/mgcv/versions/1.8-28/topics/gam
:param formula: R formula
string
:param data: Data to fit the model to
pandas DataFrame
:param family: Noise model distribution
string. One of 'gaussian', 'binomial', 'poisson' (default 'gaussian').
:param weights: Observation weights
numpy ndarray
:param method: Smoothing parameter estimation method (see R MGCV documentation)
string, one of 'GCV.Cp', 'GACV.Cp', 'REML', 'P-REML', 'ML', 'fREML' (default 'REML').
:param bam: If MGCV implementation for large datasets should be used
Boolean
:param chunk_size: Size of chunks in which the model matrix is created (Only used when bam=True).
Integer
:return: robjects.vectors.ListVector
'''
assert isinstance(data, pd.DataFrame)
# Define family
if family == 'gaussian':
rfamily = rstats.gaussian(link='identity')
elif family == 'binomial':
rfamily = rstats.binomial(link='logit')
elif family == 'poisson':
rfamily = rstats.poisson(link='log')
else:
raise NotImplementedError
rdata = pdframe2rdframe(data)
rformula = robjects.Formula(formula)
if weights is None:
if not bam:
gam = rmgcv.gam(formula=rformula, data=rdata, family=rfamily, method=method)
else:
gam = rmgcv.bam(formula=rformula, data=rdata, family=rfamily, method=method, chunk_size=chunk_size)
else:
#TODO assert weights
if not bam:
gam = rmgcv.gam(formula=rformula, data=rdata, family=family, weights=weights, method=method)
else:
gam = rmgcv.bam(formula=rformula, data=rdata, family=family, weights=weights, method=method,
chunk_size=chunk_size)
return gam
def mgcv_predict(gam, data, response_type='response'):
'''
Make predictions using a fitted GAM model
See R MGCV package
:param gam: A gam model previously fitted
robjects.vectors.ListVector
:param data: Input data points where predictions are made
pandas DataFrame
:param response_type: Space or transformation in which the prediction is returned (see R MGCV documentation)
string. One of 'response', 'link', 'lpmatrix'.
:return: numpy ndarray of shape (n_data, ) with the posterior mean values.
'''
assert isinstance(gam, robjects.vectors.ListVector)
assert isinstance(data, pd.DataFrame)
if response_type not in ['link', 'response', 'lpmatrix']:
raise NotImplementedError
rdata = pdframe2rdframe(data)
return np.array(rmgcv.predict_gam(gam, newdata=rdata, type=response_type))
def mgcv_posterior_samples(gam, data, n_samples=100, response_type='inverse_link') -> np.ndarray:
'''
Generate samples from the posterior distribution of a GAM
See R MGCV package
:param gam: A gam model previously fitted
robjects.vectors.ListVector
:param data: Input data points where simulations are generated
Pandas DataFrame
:param n_samples: Number of samples to generate
Integer (default 100)
:param response_type: Space or transformation in which the prediction is returned (see R MGCV documentation)
String. One of 'response', 'link', 'inverse_link', 'lpmatrix'.
:return: numpy ndarray of shape (n_samples, n_data)
'''
assert isinstance(gam, robjects.vectors.ListVector)
assert isinstance(data, pd.DataFrame)
assert isinstance(n_samples, int)
assert n_samples > 0
gam_coef = np.array(rstats.coef(gam))
gam_vcov = np.array(rstats.vcov(gam))
M = np.random.multivariate_normal(gam_coef, gam_vcov, size=n_samples)
rdata = pdframe2rdframe(data)
LP = np.array(rmgcv.predict_gam(gam, newdata=rdata, type='lpmatrix'))
_post = np.dot(M, LP.T)
if response_type == 'link':
samples = _post
elif response_type == 'inverse_link':
family, link = get_family(gam), get_link(gam)
if family == 'gaussian':
assert link == 'identity'
samples = _post
elif family == 'binomial':
assert link == 'logit'
samples = 1. / (1. + np.exp(-_post))
else: #family == 'poisson'
assert link == 'log'
samples = np.exp(_post)
elif response_type == 'response': #TODO the results accuracy of this case have not been tested yet
family, link = get_family(gam), get_link(gam)
if family == 'gaussian' and link == 'identity':
ix_scale = get_names(gam).index('scale') #TODO check this line
sigma_noise =
|
np.sqrt(gam[ix_scale])
|
numpy.sqrt
|
"""
@author: <NAME>
"""
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import scipy.io
from scipy.interpolate import griddata
from mpl_toolkits.mplot3d import Axes3D
from pyDOE import lhs
import time
from plotting import newfig, savefig
import matplotlib.gridspec as gridspec
from mpl_toolkits.axes_grid1 import make_axes_locatable
###############################################################################
############################## Helper Functions ###############################
###############################################################################
def initialize_NN(layers):
weights = []
biases = []
num_layers = len(layers)
for l in range(0,num_layers-1):
W = xavier_init(size=[layers[l], layers[l+1]])
b = tf.Variable(tf.zeros([1,layers[l+1]],
dtype=tf.float32),
dtype=tf.float32)
weights.append(W)
biases.append(b)
return weights, biases
def xavier_init(size):
in_dim = size[0]
out_dim = size[1]
xavier_stddev = np.sqrt(2/(in_dim + out_dim))
return tf.Variable(tf.truncated_normal([in_dim, out_dim],
stddev=xavier_stddev, dtype=tf.float32),
dtype=tf.float32)
def neural_net(X, weights, biases):
num_layers = len(weights) + 1
H = X
for l in range(0,num_layers-2):
W = weights[l]
b = biases[l]
H = tf.tanh(tf.add(tf.matmul(H, W), b))
W = weights[-1]
b = biases[-1]
Y = tf.add(tf.matmul(H, W), b)
return Y
###############################################################################
################################ DeepHPM Class ################################
###############################################################################
class DeepHPM:
def __init__(self, t, x, u, v,
x0, u0, v0, tb, X_f,
uv_layers, pde_layers,
layers,
lb_idn, ub_idn,
lb_sol, ub_sol):
# Domain Boundary
self.lb_idn = lb_idn
self.ub_idn = ub_idn
self.lb_sol = lb_sol
self.ub_sol = ub_sol
# Init for Identification
self.idn_init(t, x, u, v, uv_layers, pde_layers)
# Init for Solution
self.sol_init(x0, u0, v0, tb, X_f, layers)
# tf session
self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
log_device_placement=True))
init = tf.global_variables_initializer()
self.sess.run(init)
###########################################################################
############################# Identifier ##################################
###########################################################################
def idn_init(self, t, x, u, v, uv_layers, pde_layers):
# Training Data for Identification
self.t = t
self.x = x
self.u = u
self.v = v
# Layers for Identification
self.uv_layers = uv_layers
self.pde_layers = pde_layers
# Initialize NNs for Identification
self.u_weights, self.u_biases = initialize_NN(uv_layers)
self.v_weights, self.v_biases = initialize_NN(uv_layers)
self.pde_u_weights, self.pde_u_biases = initialize_NN(pde_layers)
self.pde_v_weights, self.pde_v_biases = initialize_NN(pde_layers)
# tf placeholders for Identification
self.t_tf = tf.placeholder(tf.float32, shape=[None, 1])
self.x_tf = tf.placeholder(tf.float32, shape=[None, 1])
self.u_tf = tf.placeholder(tf.float32, shape=[None, 1])
self.v_tf = tf.placeholder(tf.float32, shape=[None, 1])
self.terms_tf = tf.placeholder(tf.float32, shape=[None, pde_layers[0]])
# tf graphs for Identification
self.idn_u_pred, self.idn_v_pred = self.idn_net_uv(self.t_tf, self.x_tf)
self.pde_u_pred, self.pde_v_pred = self.net_pde(self.terms_tf)
self.idn_f_pred, self.idn_g_pred = self.idn_net_fg(self.t_tf, self.x_tf)
# loss for Identification
self.idn_uv_loss = tf.reduce_sum(tf.square(self.idn_u_pred - self.u_tf)) + \
tf.reduce_sum(tf.square(self.idn_v_pred - self.v_tf))
self.idn_fg_loss = tf.reduce_sum(tf.square(self.idn_f_pred)) + \
tf.reduce_sum(tf.square(self.idn_g_pred))
# Optimizer for Identification
self.idn_uv_optimizer = tf.contrib.opt.ScipyOptimizerInterface(self.idn_uv_loss,
var_list = self.u_weights + self.u_biases + self.v_weights + self.v_biases,
method = 'L-BFGS-B',
options = {'maxiter': 50000,
'maxfun': 50000,
'maxcor': 50,
'maxls': 50,
'ftol': 1.0*np.finfo(float).eps})
self.idn_fg_optimizer = tf.contrib.opt.ScipyOptimizerInterface(self.idn_fg_loss,
var_list = self.pde_u_weights + self.pde_u_biases + self.pde_v_weights + self.pde_v_biases,
method = 'L-BFGS-B',
options = {'maxiter': 50000,
'maxfun': 50000,
'maxcor': 50,
'maxls': 50,
'ftol': 1.0*np.finfo(float).eps})
self.idn_uv_optimizer_Adam = tf.train.AdamOptimizer()
self.idn_uv_train_op_Adam = self.idn_uv_optimizer_Adam.minimize(self.idn_uv_loss,
var_list = self.u_weights + self.u_biases + self.v_weights + self.v_biases)
self.idn_fg_optimizer_Adam = tf.train.AdamOptimizer()
self.idn_fg_train_op_Adam = self.idn_fg_optimizer_Adam.minimize(self.idn_fg_loss,
var_list = self.pde_u_weights + self.pde_u_biases + self.pde_v_weights + self.pde_v_biases)
def idn_net_uv(self, t, x):
X = tf.concat([t,x],1)
H = 2*(X - self.lb_idn)/(self.ub_idn - self.lb_idn) - 1
u = neural_net(H, self.u_weights, self.u_biases)
v = neural_net(H, self.v_weights, self.v_biases)
return u, v
def net_pde(self, terms):
pde_u = neural_net(terms, self.pde_u_weights, self.pde_u_biases)
pde_v = neural_net(terms, self.pde_v_weights, self.pde_v_biases)
return pde_u, pde_v
def idn_net_fg(self, t, x):
u, v = self.idn_net_uv(t, x)
u_t = tf.gradients(u, t)[0]
v_t = tf.gradients(v, t)[0]
u_x = tf.gradients(u, x)[0]
v_x = tf.gradients(v, x)[0]
u_xx = tf.gradients(u_x, x)[0]
v_xx = tf.gradients(v_x, x)[0]
terms = tf.concat([u,v,u_x,v_x,u_xx,v_xx],1)
pde_u, pde_v = self.net_pde(terms)
f = u_t - pde_u
g = v_t - pde_v
return f, g
def idn_uv_train(self, N_iter):
tf_dict = {self.t_tf: self.t, self.x_tf: self.x,
self.u_tf: self.u, self.v_tf: self.v}
start_time = time.time()
for it in range(N_iter):
self.sess.run(self.idn_uv_train_op_Adam, tf_dict)
# Print
if it % 10 == 0:
elapsed = time.time() - start_time
loss_value = self.sess.run(self.idn_uv_loss, tf_dict)
print('It: %d, Loss: %.3e, Time: %.2f' %
(it, loss_value, elapsed))
start_time = time.time()
self.idn_uv_optimizer.minimize(self.sess,
feed_dict = tf_dict,
fetches = [self.idn_uv_loss],
loss_callback = self.callback)
def idn_fg_train(self, N_iter):
tf_dict = {self.t_tf: self.t, self.x_tf: self.x}
start_time = time.time()
for it in range(N_iter):
self.sess.run(self.idn_fg_train_op_Adam, tf_dict)
# Print
if it % 10 == 0:
elapsed = time.time() - start_time
loss_value = self.sess.run(self.idn_fg_loss, tf_dict)
print('It: %d, Loss: %.3e, Time: %.2f' %
(it, loss_value, elapsed))
start_time = time.time()
self.idn_fg_optimizer.minimize(self.sess,
feed_dict = tf_dict,
fetches = [self.idn_fg_loss],
loss_callback = self.callback)
def idn_predict(self, t_star, x_star):
tf_dict = {self.t_tf: t_star, self.x_tf: x_star}
u_star = self.sess.run(self.idn_u_pred, tf_dict)
v_star = self.sess.run(self.idn_v_pred, tf_dict)
f_star = self.sess.run(self.idn_f_pred, tf_dict)
g_star = self.sess.run(self.idn_g_pred, tf_dict)
return u_star, v_star, f_star, g_star
def predict_pde(self, terms_star):
tf_dict = {self.terms_tf: terms_star}
pde_u_star = self.sess.run(self.pde_u_pred, tf_dict)
pde_v_star = self.sess.run(self.pde_v_pred, tf_dict)
return pde_u_star, pde_v_star
###########################################################################
############################### Solver ####################################
###########################################################################
def sol_init(self, x0, u0, v0, tb, X_f, layers):
# Training Data for Solution
X0 = np.concatenate((0*x0, x0), 1) # (0, x0)
X_lb = np.concatenate((tb, 0*tb + self.lb_sol[1]), 1) # (tb, lb[1])
X_ub = np.concatenate((tb, 0*tb + self.ub_sol[1]), 1) # (tb, ub[1])
self.X_f = X_f # Collocation Points
self.t0 = X0[:,0:1] # Initial Data (time)
self.x0 = X0[:,1:2] # Initial Data (space)
self.t_lb = X_lb[:,0:1] # Boundary Data (time) -- lower boundary
self.x_lb = X_lb[:,1:2] # Boundary Data (space) -- lower boundary
self.t_ub = X_ub[:,0:1] # Boundary Data (time) -- upper boundary
self.x_ub = X_ub[:,1:2] # Boundary Data (space) -- upper boundary
self.t_f = X_f[:,0:1] # Collocation Points (time)
self.x_f = X_f[:,1:2] # Collocation Points (space)
self.u0 = u0 # Boundary Data
self.v0 = v0 # Boundary Data
# Layers for Solution
# self.layers = layers
# Initialize NNs for Solution
# self.u_weights_sol, self.u_biases_sol = initialize_NN(layers)
# self.v_weights_sol, self.v_biases_sol = initialize_NN(layers)
# tf placeholders for Solution
self.t0_tf = tf.placeholder(tf.float32, shape=[None, 1])
self.x0_tf = tf.placeholder(tf.float32, shape=[None, 1])
self.u0_tf = tf.placeholder(tf.float32, shape=[None, 1])
self.v0_tf = tf.placeholder(tf.float32, shape=[None, 1])
self.t_lb_tf = tf.placeholder(tf.float32, shape=[None, 1])
self.x_lb_tf = tf.placeholder(tf.float32, shape=[None, 1])
self.t_ub_tf = tf.placeholder(tf.float32, shape=[None, 1])
self.x_ub_tf = tf.placeholder(tf.float32, shape=[None, 1])
self.t_f_tf = tf.placeholder(tf.float32, shape=[None, 1])
self.x_f_tf = tf.placeholder(tf.float32, shape=[None, 1])
# tf graphs for Solution
self.u0_pred, self.v0_pred, _, _ = self.sol_net_uv(self.t0_tf, self.x0_tf)
self.u_lb_pred, self.v_lb_pred, self.u_x_lb_pred, self.v_x_lb_pred = self.sol_net_uv(self.t_lb_tf, self.x_lb_tf)
self.u_ub_pred, self.v_ub_pred, self.u_x_ub_pred, self.v_x_ub_pred = self.sol_net_uv(self.t_ub_tf, self.x_ub_tf)
self.sol_f_pred, self.sol_g_pred = self.sol_net_fg(self.t_f_tf, self.x_f_tf)
# loss for Solution
self.sol_loss = tf.reduce_mean(tf.square(self.u0_tf - self.u0_pred)) + \
tf.reduce_mean(tf.square(self.v0_tf - self.v0_pred)) + \
tf.reduce_mean(tf.square(self.u_lb_pred - self.u_ub_pred)) + \
tf.reduce_mean(tf.square(self.v_lb_pred - self.v_ub_pred)) + \
tf.reduce_mean(tf.square(self.u_x_lb_pred - self.u_x_ub_pred)) + \
tf.reduce_mean(tf.square(self.v_x_lb_pred - self.v_x_ub_pred)) + \
tf.reduce_mean(tf.square(self.sol_f_pred)) + \
tf.reduce_mean(tf.square(self.sol_g_pred))
# Optimizer for Solution
self.sol_optimizer = tf.contrib.opt.ScipyOptimizerInterface(self.sol_loss,
var_list = self.u_weights + self.u_biases + self.v_weights + self.v_biases,
method = 'L-BFGS-B',
options = {'maxiter': 50000,
'maxfun': 50000,
'maxcor': 50,
'maxls': 50,
'ftol': 1.0*np.finfo(float).eps})
self.sol_optimizer_Adam = tf.train.AdamOptimizer()
self.sol_train_op_Adam = self.sol_optimizer_Adam.minimize(self.sol_loss,
var_list = self.u_weights + self.u_biases + self.v_weights + self.v_biases)
def sol_net_uv(self, t, x):
X = tf.concat([t,x],1)
H = 2*(X - self.lb_sol)/(self.ub_sol - self.lb_sol) - 1
u = neural_net(H, self.u_weights, self.u_biases)
v = neural_net(H, self.v_weights, self.v_biases)
u_x = tf.gradients(u, x)[0]
v_x = tf.gradients(v, x)[0]
return u, v, u_x, v_x
def sol_net_fg(self, t, x):
u, v, u_x, v_x = self.sol_net_uv(t,x)
u_t = tf.gradients(u, t)[0]
v_t = tf.gradients(v, t)[0]
u_xx = tf.gradients(u_x, x)[0]
v_xx = tf.gradients(v_x, x)[0]
terms = tf.concat([u,v,u_x,v_x,u_xx,v_xx],1)
pde_u, pde_v = self.net_pde(terms)
f = u_t - pde_u
g = v_t - pde_v
# f = u_t + 0.5*v_xx + (u**2 + v**2)*v
# g = v_t - 0.5*u_xx - (u**2 + v**2)*u
return f, g
def callback(self, loss):
print('Loss: %e' % (loss))
def sol_train(self, N_iter):
tf_dict = {self.t0_tf: self.t0, self.x0_tf: self.x0,
self.u0_tf: self.u0, self.v0_tf: self.v0,
self.t_lb_tf: self.t_lb, self.x_lb_tf: self.x_lb,
self.t_ub_tf: self.t_ub, self.x_ub_tf: self.x_ub,
self.t_f_tf: self.t_f, self.x_f_tf: self.x_f}
start_time = time.time()
for it in range(N_iter):
self.sess.run(self.sol_train_op_Adam, tf_dict)
# Print
if it % 10 == 0:
elapsed = time.time() - start_time
loss_value = self.sess.run(self.sol_loss, tf_dict)
print('It: %d, Loss: %.3e, Time: %.2f' %
(it, loss_value, elapsed))
start_time = time.time()
self.sol_optimizer.minimize(self.sess,
feed_dict = tf_dict,
fetches = [self.sol_loss],
loss_callback = self.callback)
def sol_predict(self, t_star, x_star):
u_star = self.sess.run(self.u0_pred, {self.t0_tf: t_star, self.x0_tf: x_star})
v_star = self.sess.run(self.v0_pred, {self.t0_tf: t_star, self.x0_tf: x_star})
f_star = self.sess.run(self.sol_f_pred, {self.t_f_tf: t_star, self.x_f_tf: x_star})
g_star = self.sess.run(self.sol_g_pred, {self.t_f_tf: t_star, self.x_f_tf: x_star})
return u_star, v_star, f_star, g_star
###############################################################################
################################ Main Function ################################
###############################################################################
if __name__ == "__main__":
# Doman bounds
lb_idn = np.array([0.0, -5.0])
ub_idn = np.array([np.pi/2, 5.0])
lb_sol = np.array([0.0, -5.0])
ub_sol =
|
np.array([np.pi/2, 5.0])
|
numpy.array
|
#!/usr/bin/env python
import itertools
import numpy as np
import pandas as pd
import os
import time
import random
import pickle
from ELM import ELMClassifier
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_curve, auc
from sklearn.metrics import log_loss
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import roc_auc_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.model_selection import KFold
features_check = {
"base": {
"features" : [1,2,3,4,5,6,7,8,9],
"C" : 0.001,
"n_hidden" : 50,
"y_column_idx" : 10,
"feature_file" : "../Datasets/features_extractions/base_(all).csv"
},
"base_robust": {
"features" : [2,6,8,9],
"C" : 0.001,
"n_hidden" : 10,
"y_column_idx" : 10,
"feature_file" : "../Datasets/features_extractions/base_(all).csv"
},
"all": {
"features" : [1,2,3,4,5,6,7,8,9,10,11,13,15],
"C" : 50,
"n_hidden" : 150,
"y_column_idx" : 17,
"feature_file" : "../Datasets/features_extractions/median_9_2_(25-75)_vt_include.csv"
},
"novel": {
"features" : [10,11,13,15],
"C" : 0.004,
"n_hidden" : 50,
"y_column_idx" : 17,
"feature_file" : "../Datasets/features_extractions/median_9_2_(25-75)_vt_include.csv"
},
"hybrid_robust": {
"features" : [2,6,8,9,10,11,13,15],
"C" : 0.01,
"n_hidden" : 100,
"y_column_idx" : 17,
"feature_file" : "../Datasets/features_extractions/median_9_2_(25-75)_vt_include.csv"
}
}
model_name = "elm"
features_to_check = ["base","base_robust","all","novel","hybrid_robust"]
threshold = 0.5
learning_rate = 0.001
n_splits = 10
test_size = 0.25
path = os.path.dirname(os.path.abspath(__file__))
features_file_name = "../Datasets/features_extractions/median_9_2_(75-25)_vt_include.csv"
features_file = os.path.join(path, features_file_name)
for features_set in features_to_check:
print("\n\nChecking features - %s" % (features_set))
features_file = os.path.join(path, features_check[features_set]["feature_file"])
y_column_idx = features_check[features_set]["y_column_idx"]
n_hidden = features_check[features_set]["n_hidden"]
train = pd.read_csv(features_file)
######## Append artificial data by number of consecutive characters feature ########
if 2 in features_check[features_set]["features"]:
mal = train[train[train.columns[y_column_idx]]==1].sample(500).copy()
mal["2"] = mal["2"].apply(lambda x:x*random.randint(3,9))
train = train.append(mal, ignore_index=True)
######################################## END #######################################
use_columns = features_check[features_set]["features"]
use_columns.append(y_column_idx)
train = train[train.columns[use_columns]]
use_dataset = train.copy()
use_dataset = np.asfarray(use_dataset.values,np.dtype('Float64'))
# Normlize the dataset
scaler = MinMaxScaler().fit(use_dataset[:, :-1])
dataset_norm = scaler.transform(use_dataset[:, :-1])
# Split features and labels
X, y = use_dataset,
|
np.transpose([use_dataset[:, -1]])
|
numpy.transpose
|
#!/usr/bin/python3
#-*-coding:utf-8-*-
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from args import *
from pahelix.model_zoo.seq_vae_model import VAE
from utils import *
import paddle
from paddle.io import Dataset
import numpy as np
import paddle.fluid as fluid
import pdb
import paddle.fluid.dygraph as dg
import paddle.nn.functional as F
import paddle.nn as nn
from paddle.optimizer.lr import LRScheduler
from visualdl import LogWriter
import rdkit
import pickle
import os
import json
def train_epoch(model, data_loader, epoch, kl_weight, config, optimizer=None):
"""
tbd
"""
if optimizer is None:
model.eval()
else:
model.train()
kl_loss_values = []
recon_loss_values = []
loss_values = []
for batch_id, data in enumerate(data_loader()):
# read batch data
data_batch = data
# forward
kl_loss, recon_loss = model(data_batch)
loss = kl_weight * kl_loss + recon_loss
if optimizer is not None:
loss.backward()
optimizer.step()
optimizer.clear_grad()
# Log
kl_loss_values.append(kl_loss.numpy())
recon_loss_values.append(recon_loss.numpy())
loss_values.append(loss.numpy())
lr = (optimizer.get_lr()
if optimizer is not None
else 0)
if batch_id % 200 == 0 and batch_id > 0:
print('batch:%s, kl_loss:%f, recon_loss:%f' % \
(batch_id, float(np.mean(kl_loss_values)), float(
|
np.mean(recon_loss_values)
|
numpy.mean
|
"""Distance-based metrics.
"""
import math
import numpy as np
from numba import njit
import opfython.utils.constants as c
import opfython.utils.decorator as d
@d.avoid_zero_division
@njit(cache=True)
def additive_symmetric_distance(x, y):
"""Calculates the Additive Symmetric Distance (Symmetric Divergence).
Args:
x (np.array): N-dimensional array.
y (np.array): N-dimensional array.
Returns:
The Additive Symmetric Distance between x and y.
"""
dist = ((x - y) ** 2 * (x + y)) / (x * y)
return 2 * np.sum(dist)
@njit(cache=True)
def average_euclidean_distance(x, y):
"""Calculates the Average Euclidean Distance.
Args:
x (np.array): N-dimensional array.
y (np.array): N-dimensional array.
Returns:
The Average Euclidean Distance between x and y.
"""
dist = squared_euclidean_distance(x, y)
return (dist / x.shape[0]) ** 0.5
@d.avoid_zero_division
@njit(cache=True)
def bhattacharyya_distance(x, y):
"""Calculates the Bhattacharyya Distance.
Args:
x (np.array): N-dimensional array.
y (np.array): N-dimensional array.
Returns:
The Bhattacharyya Distance between x and y.
"""
dist = -math.log(np.sum((x * y) ** 0.5))
return dist
@d.avoid_zero_division
@njit(cache=True)
def bray_curtis_distance(x, y):
"""Calculates the Bray-Curtis Distance (Sorensen Distance).
Args:
x (np.array): N-dimensional array.
y (np.array): N-dimensional array.
Returns:
The Bray-Curtis Distance between x and y.
"""
dist = np.sum(np.fabs(x - y)) / np.sum(x + y)
return dist
@d.avoid_zero_division
@njit(cache=True)
def canberra_distance(x, y):
"""Calculates the Canberra Distance.
Args:
x (np.array): N-dimensional array.
y (np.array): N-dimensional array.
Returns:
The Canberra Distance between x and y.
"""
dist = np.fabs(x - y) / (np.fabs(x) + np.fabs(y))
return np.sum(dist)
@njit(cache=True)
def chebyshev_distance(x, y):
"""Calculates the Chebyshev Distance (Maximum Value Distance, Lagrange, Chessboard Distance).
Args:
x (np.array): N-dimensional array.
y (np.array): N-dimensional array.
Returns:
The Chebyshev Distance between x and y.
"""
dist = np.fabs(x - y)
return np.amax(dist)
@d.avoid_zero_division
@njit(cache=True)
def chi_squared_distance(x, y):
"""Calculates the Chi-Squared Distance.
Args:
x (np.array): N-dimensional array.
y (np.array): N-dimensional array.
Returns:
The Chi-Squared Distance between x and y.
"""
dist = ((x - y) ** 2 / (x + y))
return 0.5 * np.sum(dist)
@d.avoid_zero_division
@njit(cache=True)
def chord_distance(x, y):
"""Calculates the Chord Distance.
Args:
x (np.array): N-dimensional array.
y (np.array): N-dimensional array.
Returns:
The Chord Distance between x and y.
"""
dist = 2 - 2 * (np.sum(x * y) / (np.sum(x ** 2) ** 0.5 * np.sum(y ** 2) ** 0.5))
return dist ** 0.5
@d.avoid_zero_division
@njit(cache=True)
def clark_distance(x, y):
"""Calculates the Clark Distance.
Args:
x (np.array): N-dimensional array.
y (np.array): N-dimensional array.
Returns:
The Clark Distance between x and y.
"""
dist = ((x - y) / np.fabs(x + y)) ** 2
return np.sum(dist) ** 0.5
@d.avoid_zero_division
@njit(cache=True)
def cosine_distance(x, y):
"""Calculates the Cosine Distance.
Args:
x (np.array): N-dimensional array.
y (np.array): N-dimensional array.
Returns:
The cosine Distance between x and y.
"""
dist = 1 - (np.sum(x * y) / (np.sum(x ** 2) ** 0.5 * np.sum(y ** 2) ** 0.5))
return dist
@d.avoid_zero_division
@njit(cache=True)
def dice_distance(x, y):
"""Calculates the Dice Distance.
Args:
x (np.array): N-dimensional array.
y (np.array): N-dimensional array.
Returns:
The Dice Distance between x and y.
"""
dist = 2 * np.sum(x * y) / (np.sum(x ** 2) + np.sum(y ** 2))
return 1 - dist
@d.avoid_zero_division
@njit(cache=True)
def divergence_distance(x, y):
"""Calculates the Divergence Distance.
Args:
x (np.array): N-dimensional array.
y (np.array): N-dimensional array.
Returns:
The Divergence Distance between x and y.
"""
dist = (x - y) ** 2 / (x + y) ** 2
return 2 * np.sum(dist)
@njit(cache=True)
def euclidean_distance(x, y):
"""Calculates the Euclidean Distance (L2 Norm, Ruler Distance).
Args:
x (np.array): N-dimensional array.
y (np.array): N-dimensional array.
Returns:
The Euclidean Distance between x and y.
"""
dist = (x - y) ** 2
return np.sum(dist) ** 0.5
@njit(cache=True)
def gaussian_distance(x, y, gamma=1):
"""Calculates the Gaussian Distance.
Args:
x (np.array): N-dimensional array.
y (np.array): N-dimensional array.
Returns:
The Gaussian Distance between x and y.
"""
dist = (x - y) ** 2
return math.exp(-gamma * np.sum(dist) ** 0.5)
@njit(cache=True)
def gower_distance(x, y):
"""Calculates the Gower Distance (Average Manhattan, Mean Character Distance).
Args:
x (np.array): N-dimensional array.
y (np.array): N-dimensional array.
Returns:
The Gower Distance between x and y.
"""
dist = np.fabs(x - y)
return np.sum(dist) / x.shape[0]
@njit(cache=True)
def hamming_distance(x, y):
"""Calculates the Hamming Distance.
Args:
x (np.array): N-dimensional array.
y (np.array): N-dimensional array.
Returns:
The Hamming Distance between x and y.
"""
dist = np.count_nonzero(x != y)
return dist
@d.avoid_zero_division
@njit(cache=True)
def hassanat_distance(x, y):
"""Calculates the Hassanat Distance.
Args:
x (np.array): N-dimensional array.
y (np.array): N-dimensional array.
Returns:
The Hassanat Distance between x and y.
"""
# Creates an empty variable to hold each dimension's
dist = np.zeros(x.shape[0])
# Creates a binary mask
mask = np.minimum(x, y) >= 0
# Iterates through all dimensions
for i in range(x.shape[0]):
if mask[i] is True:
dist[i] = 1 - (1 + np.minimum(x[i], y[i])) / (1 + np.maximum(x[i], y[i]))
else:
dist[i] = 1 - (1 + np.minimum(x[i], y[i]) + np.fabs(np.minimum(x[i], y[i]))) / (1 + np.maximum(x[i], y[i]) + np.fabs(np.minimum(x[i], y[i])))
return np.sum(dist)
@njit(cache=True)
def hellinger_distance(x, y):
"""Calculates the Hellinger Distance (Jeffries-Matusita Distance).
Args:
x (np.array): N-dimensional array.
y (np.array): N-dimensional array.
Returns:
The Hellinger Distance between x and y.
"""
dist = 2 * (x ** 0.5 - y ** 0.5) ** 2
return np.sum(dist) ** 0.5
@d.avoid_zero_division
def jaccard_distance(x, y):
"""Calculates the Jaccard Distance.
Args:
x (np.array): N-dimensional array.
y (np.array): N-dimensional array.
Returns:
The Jaccard Distance between x and y.
"""
dist = np.sum((x - y) ** 2) / (np.sum(x ** 2) + np.sum(y ** 2) - np.sum(x * y))
return dist
@d.avoid_zero_division
@njit(cache=True)
def jeffreys_distance(x, y):
"""Calculates the Jeffreys Distance (J-Divergence, KL2 Divergence).
Args:
x (np.array): N-dimensional array.
y (np.array): N-dimensional array.
Returns:
The Jeffreys Distance between x and y.
"""
dist = (x - y) * np.log(x / y)
return np.sum(dist)
@d.avoid_zero_division
@njit(cache=True)
def jensen_distance(x, y):
"""Calculates the Jensen Distance.
Args:
x (np.array): N-dimensional array.
y (np.array): N-dimensional array.
Returns:
The Jensen Distance between x and y.
"""
dist = (x * np.log(x) + y * np.log(y)) / 2 - ((x + y) / 2) * np.log((x + y) / 2)
return 0.5 * np.sum(dist)
@d.avoid_zero_division
@njit(cache=True)
def jensen_shannon_distance(x, y):
"""Calculates the Jensen-Shannon Distance.
Args:
x (np.array): N-dimensional array.
y (np.array): N-dimensional array.
Returns:
The Jensen-Shannon Distance between x and y.
"""
dist1 = x * np.log((2 * x) / (x + y))
dist2 = y * np.log((2 * y) / (x + y))
return 0.5 * (np.sum(dist1) + np.sum(dist2))
@d.avoid_zero_division
@njit(cache=True)
def k_divergence_distance(x, y):
"""Calculates the K Divergence Distance.
Args:
x (np.array): N-dimensional array.
y (np.array): N-dimensional array.
Returns:
The K Divergence Distance between x and y.
"""
dist = x * np.log((2 * x) / (x + y))
return np.sum(dist)
@d.avoid_zero_division
@njit(cache=True)
def kulczynski_distance(x, y):
"""Calculates the Kulczynski Distance.
Args:
x (np.array): N-dimensional array.
y (np.array): N-dimensional array.
Returns:
The Kulczynski Distance between x and y.
"""
dist = np.sum(np.fabs(x - y)) / np.sum(np.minimum(x, y))
return dist
@d.avoid_zero_division
@njit(cache=True)
def kullback_leibler_distance(x, y):
"""Calculates the Kullback-Leibler Distance (KL Divergence).
Args:
x (np.array): N-dimensional array.
y (np.array): N-dimensional array.
Returns:
The Kullback-Leibler Distance between x and y.
"""
dist = x * np.log(x / y)
return
|
np.sum(dist)
|
numpy.sum
|
# -*- coding: utf-8 -*-
"""
Demand model of thermal loads
"""
import numpy as np
import pandas as pd
from cea.constants import HOURS_IN_YEAR, HOURS_PRE_CONDITIONING
from cea.demand import demand_writers
from cea.demand import hourly_procedure_heating_cooling_system_load, ventilation_air_flows_simple
from cea.demand import latent_loads
from cea.demand import sensible_loads, electrical_loads, hotwater_loads, refrigeration_loads, datacenter_loads
from cea.demand import ventilation_air_flows_detailed, control_heating_cooling_systems
from cea.demand.building_properties import get_thermal_resistance_surface
from cea.demand.latent_loads import convert_rh_to_moisture_content
from cea.utilities import reporting
def calc_thermal_loads(building_name, bpr, weather_data, date_range, locator,
use_dynamic_infiltration_calculation, resolution_outputs, loads_output, massflows_output,
temperatures_output, config, debug):
"""
Calculate thermal loads of a single building with mechanical or natural ventilation.
Calculation procedure follows the methodology of ISO 13790
The structure of ``usage_schedules`` is:
.. code-block:: python
:emphasize-lines: 2,4
{
'list_uses': ['ADMIN', 'GYM', ...],
'schedules': [ ([...], [...], [...], [...]), (), (), () ]
}
* each element of the 'list_uses' entry represents a building occupancy type.
* each element of the 'schedules' entry represents the schedules for a building occupancy type.
* the schedules for a building occupancy type are a 4-tuple (occupancy, electricity, domestic hot water,
probability of use), with each element of the 4-tuple being a list of hourly values (HOURS_IN_YEAR values).
Side effect include a number of files in two folders:
* ``scenario/outputs/data/demand``
* ``${Name}.csv`` for each building
* temporary folder (as returned by ``tempfile.gettempdir()``)
* ``${Name}T.csv`` for each building
daren-thomas: as far as I can tell, these are the only side-effects.
:param building_name: name of building
:type building_name: str
:param bpr: a collection of building properties for the building used for thermal loads calculation
:type bpr: BuildingPropertiesRow
:param weather_data: data from the .epw weather file. Each row represents an hour of the year. The columns are:
``drybulb_C``, ``relhum_percent``, and ``windspd_ms``
:type weather_data: pandas.DataFrame
:param locator:
:param use_dynamic_infiltration_calculation:
:returns: This function does not return anything
:rtype: NoneType
"""
schedules, tsd = initialize_inputs(bpr, weather_data, locator)
# CALCULATE ELECTRICITY LOADS
tsd = electrical_loads.calc_Eal_Epro(tsd, schedules)
# CALCULATE REFRIGERATION LOADS
if refrigeration_loads.has_refrigeration_load(bpr):
tsd = refrigeration_loads.calc_Qcre_sys(bpr, tsd, schedules)
tsd = refrigeration_loads.calc_Qref(locator, bpr, tsd)
else:
tsd['DC_cre'] = tsd['Qcre_sys'] = tsd['Qcre'] = np.zeros(HOURS_IN_YEAR)
tsd['mcpcre_sys'] = tsd['Tcre_sys_re'] = tsd['Tcre_sys_sup'] = np.zeros(HOURS_IN_YEAR)
tsd['E_cre'] = np.zeros(HOURS_IN_YEAR)
# CALCULATE PROCESS HEATING
tsd['Qhpro_sys'] = schedules['Qhpro_W'] # in Wh
# CALCULATE PROCESS COOLING
tsd['Qcpro_sys'] = schedules['Qcpro_W'] # in Wh
# CALCULATE DATA CENTER LOADS
if datacenter_loads.has_data_load(bpr):
tsd = datacenter_loads.calc_Edata(tsd, schedules) # end-use electricity
tsd = datacenter_loads.calc_Qcdata_sys(bpr, tsd) # system need for cooling
tsd = datacenter_loads.calc_Qcdataf(locator, bpr, tsd) # final need for cooling
else:
tsd['DC_cdata'] = tsd['Qcdata_sys'] = tsd['Qcdata'] = np.zeros(HOURS_IN_YEAR)
tsd['mcpcdata_sys'] = tsd['Tcdata_sys_re'] = tsd['Tcdata_sys_sup'] = np.zeros(HOURS_IN_YEAR)
tsd['Edata'] = tsd['E_cdata'] = np.zeros(HOURS_IN_YEAR)
# CALCULATE SPACE CONDITIONING DEMANDS
if np.isclose(bpr.rc_model['Af'], 0.0): # if building does not have conditioned area
tsd['T_int'] = tsd['T_ext']
tsd['x_int'] = np.vectorize(convert_rh_to_moisture_content)(tsd['rh_ext'], tsd['T_int'])
tsd['E_cs'] = tsd['E_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['Eaux_cs'] = tsd['Eaux_hs'] = tsd['Ehs_lat_aux'] = np.zeros(HOURS_IN_YEAR)
print(f"building {bpr.name} does not have an air-conditioned area")
else:
# get hourly thermal resistances of external surfaces
tsd['RSE_wall'], \
tsd['RSE_roof'], \
tsd['RSE_win'] = get_thermal_resistance_surface(bpr.architecture, weather_data)
# calculate heat gains
tsd = latent_loads.calc_Qgain_lat(tsd, schedules)
tsd = calc_set_points(bpr, date_range, tsd, building_name, config, locator,
schedules) # calculate the setpoints for every hour
tsd = calc_Qhs_Qcs(bpr, tsd,
use_dynamic_infiltration_calculation) # end-use demand latent and sensible + ventilation
tsd = sensible_loads.calc_Qhs_Qcs_loss(bpr, tsd) # losses
tsd = sensible_loads.calc_Qhs_sys_Qcs_sys(tsd) # system (incl. losses)
tsd = sensible_loads.calc_temperatures_emission_systems(bpr, tsd) # calculate temperatures
tsd = electrical_loads.calc_Eve(tsd) # calc auxiliary loads ventilation
tsd = electrical_loads.calc_Eaux_Qhs_Qcs(tsd, bpr) # calc auxiliary loads heating and cooling
tsd = calc_Qcs_sys(bpr, tsd) # final : including fuels and renewables
tsd = calc_Qhs_sys(bpr, tsd) # final : including fuels and renewables
# Positive loads
tsd['Qcs_lat_sys'] = abs(tsd['Qcs_lat_sys'])
tsd['DC_cs'] = abs(tsd['DC_cs'])
tsd['Qcs_sys'] = abs(tsd['Qcs_sys'])
tsd['Qcre_sys'] = abs(tsd['Qcre_sys']) # inverting sign of cooling loads for reporting and graphs
tsd['Qcdata_sys'] = abs(tsd['Qcdata_sys']) # inverting sign of cooling loads for reporting and graphs
# CALCULATE HOT WATER LOADS
if hotwater_loads.has_hot_water_technical_system(bpr):
tsd = electrical_loads.calc_Eaux_fw(tsd, bpr, schedules)
tsd = hotwater_loads.calc_Qww(bpr, tsd, schedules) # end-use
tsd = hotwater_loads.calc_Qww_sys(bpr, tsd) # system (incl. losses)
tsd = electrical_loads.calc_Eaux_ww(tsd, bpr) # calc auxiliary loads
tsd = hotwater_loads.calc_Qwwf(bpr, tsd) # final
else:
tsd = electrical_loads.calc_Eaux_fw(tsd, bpr, schedules)
tsd['Qww'] = tsd['DH_ww'] = tsd['Qww_sys'] = np.zeros(HOURS_IN_YEAR)
tsd['mcpww_sys'] = tsd['Tww_sys_re'] = tsd['Tww_sys_sup'] = np.zeros(HOURS_IN_YEAR)
tsd['Eaux_ww'] = np.zeros(HOURS_IN_YEAR)
tsd['NG_ww'] = tsd['COAL_ww'] = tsd['OIL_ww'] = tsd['WOOD_ww'] = np.zeros(HOURS_IN_YEAR)
tsd['E_ww'] = np.zeros(HOURS_IN_YEAR)
# CALCULATE SUM OF HEATING AND COOLING LOADS
tsd = calc_QH_sys_QC_sys(tsd) # aggregated cooling and heating loads
# CALCULATE ELECTRICITY LOADS PART 2/2 AUXILIARY LOADS + ENERGY GENERATION
tsd = electrical_loads.calc_Eaux(tsd) # auxiliary totals
tsd = electrical_loads.calc_E_sys(tsd) # system (incl. losses)
tsd = electrical_loads.calc_Ef(bpr, tsd) # final (incl. self. generated)
# WRITE SOLAR RESULTS
write_results(bpr, building_name, date_range, loads_output, locator, massflows_output,
resolution_outputs, temperatures_output, tsd, debug)
return
def calc_QH_sys_QC_sys(tsd):
tsd['QH_sys'] = tsd['Qww_sys'] + tsd['Qhs_sys'] + tsd['Qhpro_sys']
tsd['QC_sys'] = tsd['Qcs_sys'] + tsd['Qcdata_sys'] + tsd['Qcre_sys'] + tsd['Qcpro_sys']
return tsd
def write_results(bpr, building_name, date, loads_output, locator, massflows_output,
resolution_outputs, temperatures_output, tsd, debug):
if resolution_outputs == 'hourly':
writer = demand_writers.HourlyDemandWriter(loads_output, massflows_output, temperatures_output)
elif resolution_outputs == 'monthly':
writer = demand_writers.MonthlyDemandWriter(loads_output, massflows_output, temperatures_output)
else:
raise Exception('error')
if debug:
print('Creating instant plotly visualizations of demand variable time series.')
print('Behavior can be changed in cea.utilities.reporting code.')
print('Writing detailed demand results of {} to .xls file.'.format(building_name))
reporting.quick_visualization_tsd(tsd, locator.get_demand_results_folder(), building_name)
reporting.full_report_to_xls(tsd, locator.get_demand_results_folder(), building_name)
else:
writer.results_to_csv(tsd, bpr, locator, date, building_name)
def calc_Qcs_sys(bpr, tsd):
# GET SYSTEMS EFFICIENCIES
energy_source = bpr.supply['source_cs']
scale_technology = bpr.supply['scale_cs']
efficiency_average_year = bpr.supply['eff_cs']
if scale_technology == "BUILDING":
if energy_source == "GRID":
# sum
tsd['E_cs'] = abs(tsd['Qcs_sys']) / efficiency_average_year
tsd['DC_cs'] = np.zeros(HOURS_IN_YEAR)
elif energy_source == "NONE":
tsd['E_cs'] = np.zeros(HOURS_IN_YEAR)
tsd['DC_cs'] = np.zeros(HOURS_IN_YEAR)
else:
raise Exception('check potential error in input database of LCA infrastructure / COOLING')
elif scale_technology == "DISTRICT":
if energy_source == "GRID":
tsd['DC_cs'] = tsd['Qcs_sys'] / efficiency_average_year
tsd['E_cs'] = np.zeros(HOURS_IN_YEAR)
elif energy_source == "NONE":
tsd['DC_cs'] = np.zeros(HOURS_IN_YEAR)
tsd['E_cs'] = np.zeros(HOURS_IN_YEAR)
else:
raise Exception('check potential error in input database of ALL IN ONE SYSTEMS / COOLING')
elif scale_technology == "NONE":
tsd['DC_cs'] = np.zeros(HOURS_IN_YEAR)
tsd['E_cs'] = np.zeros(HOURS_IN_YEAR)
else:
raise Exception('check potential error in input database of LCA infrastructure / COOLING')
return tsd
def calc_Qhs_sys(bpr, tsd):
"""
it calculates final loads
"""
# GET SYSTEMS EFFICIENCIES
# GET SYSTEMS EFFICIENCIES
energy_source = bpr.supply['source_hs']
scale_technology = bpr.supply['scale_hs']
efficiency_average_year = bpr.supply['eff_hs']
if scale_technology == "BUILDING":
if energy_source == "GRID":
tsd['E_hs'] = tsd['Qhs_sys'] / efficiency_average_year
tsd['DH_hs'] =
|
np.zeros(HOURS_IN_YEAR)
|
numpy.zeros
|
import torch
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from waveml.metrics import RMSE, MSE, MAE, MAPE, MSLE, MBE, SAE, SSE
from waveml.utils import to_tensor, to_array
from sklearn.model_selection import KFold
class Wave:
def __init__(self, n_opt_rounds: int = 1000, learning_rate: float = 0.01, loss_function=MSE, verbose: int = 1):
self.n_opt_rounds = int(n_opt_rounds)
self.learning_rate = float(learning_rate)
self.loss_function = loss_function
self.verbose = int(verbose)
self.fitted = False
if self.n_opt_rounds < 1:
raise ValueError(f"n_opt_rounds should belong to an [1;inf) interval, passed {self.n_opt_rounds}")
if self.learning_rate <= 0:
raise ValueError(f"learning rate should belong to a (0;inf) interval, passed {self.learning_rate}")
if self.verbose < 0:
raise ValueError(f"learning rate should belong to a [0;inf) interval, passed {self.verbose}")
class WaveRegressor(Wave):
"""
Weighted average regression model
"""
def __init__(self, n_opt_rounds: int = 1000, learning_rate: float = 0.01, loss_function=MSE, verbose: int = 1):
super().__init__(n_opt_rounds, learning_rate, loss_function, verbose)
# Training process
def fit(self, X: [pd.DataFrame, pd.Series, np.array, torch.Tensor, list],
y: [pd.DataFrame, pd.Series, np.array, torch.Tensor, list], weights=None, eval_set=None,
use_best_model=False) -> None:
X_train_tensor, y_train_tensor, self.use_best_model = to_tensor(X), to_tensor(y), use_best_model
self.train_losses, self.test_losses, self.weights_history = [], [], []
self.fitted = False
if type(self.use_best_model) != bool:
raise ValueError(f"use_best_model parameter should be bool, passed {self.use_best_model}")
self.is_eval_set = True if eval_set != None else False
if self.is_eval_set:
X_test_tensor = to_tensor(eval_set[0])
y_test_tensor = to_tensor(eval_set[1])
n_features = X_train_tensor.shape[1]
self.weights = to_tensor(weights) if weights != None else torch.tensor(
[1 / n_features for i in range(n_features)]
)
self.weights.requires_grad_()
self.optimizer = torch.optim.Adam([self.weights], self.learning_rate)
for i in range(self.n_opt_rounds):
# clear gradient
self.optimizer.zero_grad()
# get train set error
train_loss = self.__opt_func(X_segment=X_train_tensor, y_segment=y_train_tensor)
# append train loss to train loss history
self.train_losses.append(train_loss.item())
# create a train part of fit information
train_output = f"train: {train_loss.item()}"
# optimization of weights according to the function
train_loss.backward()
# create a test part of fit information
test_output = ""
if self.is_eval_set:
# get test set error
test_loss = self.__opt_func(X_segment=X_test_tensor, y_segment=y_test_tensor)
# append test loss to test loss history
self.test_losses.append(test_loss.item())
test_output = f"test: {test_loss.item()}"
if self.verbose != 0:
print(f"round: {i}", train_output, test_output)
self.weights_history.append(self.weights)
self.optimizer.step()
self.fitted = True
# Get a tensor of weights after training
def get_weights(self) -> np.ndarray:
if not self.fitted:
raise AttributeError("Model has not been fitted yet. Use fit() method first.")
if self.use_best_model:
return self.weights_history[self.test_losses.index(min(self.test_losses))].detach().numpy()
return self.weights_history[self.train_losses.index(min(self.train_losses))].detach().numpy()
# Predict on on passed data with current weights
def predict(self, X) -> np.ndarray:
if not self.fitted:
raise AttributeError("Model has not been fitted yet. Use fit() method first.")
X = to_tensor(X)
sum = torch.sum(X * self.get_weights(), 1)
return sum.detach().numpy()
def score(self, X_train, y_test):
if not self.fitted:
raise AttributeError("Model has not been fitted yet. Use fit() method first.")
X_train_tensor, y_test_tensor = to_tensor(X_train), to_tensor(y_test)
y_pred = self.predict(X_train_tensor)
return self.loss_function(y_test_tensor, y_pred).item()
def plot(self) -> None:
if not self.fitted:
raise AttributeError("Model has not been fitted yet. Use fit() method first.")
plt.plot([i for i in range(self.n_opt_rounds)], self.train_losses)
if self.is_eval_set:
plt.plot([i for i in range(self.n_opt_rounds)], self.test_losses)
plt.show()
return
# Function for weight optimization
def __opt_func(self, X_segment, y_segment):
y_true = y_segment
y_pred = self.__inner_predict(X_segment)
return self.loss_function(y_true, y_pred)
def __inner_predict(self, X) -> torch.tensor:
sum = torch.sum(X * self.weights, 1)
return sum
class WaveClassifier(WaveRegressor):
"""
Weighted average classification model
"""
def __init__(self, n_opt_rounds: int = 1000, learning_rate: float = 0.01, loss_function=MSE, verbose: int = 1,
threshold: float = 0.5):
super().__init__(n_opt_rounds, learning_rate, loss_function, verbose)
self.threshold = threshold
# Predict on on passed data with current weights
def predict_proba(self, X: [pd.DataFrame, pd.Series, np.array, torch.Tensor, list]) -> np.ndarray:
if not self.fitted:
raise AttributeError("Model has not been fitted yet. Use fit() method first.")
X = to_tensor(X)
proba = (1 / (1 + torch.exp(-torch.sum(X * self.weights, 1)))).detach().numpy().reshape(-1, 1)
inverse_proba = -proba + 1
return np.hstack([inverse_proba, proba])
def predict(self, X: [pd.DataFrame, pd.Series, np.array, torch.Tensor, list]) -> np.ndarray:
y_pred = self.predict_proba(X)[:, 1]
y_pred[y_pred < self.threshold] = 0
y_pred[y_pred >= self.threshold] = 1
return y_pred
class WaveTransformer(Wave):
"""
Weighted average transformer, which performs a transformation over each feature separately
"""
def __init__(self, n_opt_rounds: int = 1000, learning_rate: float = 0.01, loss_function=MSE,
regression: bool = True,
verbose: int = 1, n_folds: int = 4, random_state: int = None, shuffle: bool = False):
super().__init__(n_opt_rounds, learning_rate, loss_function, verbose)
self.regression = regression
self.shuffle = shuffle
self.n_folds = n_folds
self.random_state = random_state
if n_folds < 2:
raise ValueError(f"n_folds should belong to a [2;inf) interval, passed {self.verbose}")
if n_folds < 0:
raise ValueError(f"random_state should belong to a [0;inf) interval, passed {self.verbose}")
self.n_folds = n_folds
self.shuffle = shuffle
self.random_state = int(random_state) if self.shuffle else None
def __opt_func(self, X_segment, y_segment, weights):
return self.loss_function(X_segment * weights[0] + weights[1], y_segment)
def fit(self, X: [pd.DataFrame, pd.Series, np.array, torch.Tensor, list],
y: [pd.DataFrame, pd.Series, np.array, torch.Tensor, list]):
X_train_tensor, y_train_tensor = to_tensor(X), to_tensor(y)
self.n_features = X_train_tensor.shape[1]
self.weights = []
self.fitted = False
for i in range(self.n_features):
feature_weights = torch.tensor([])
X = X_train_tensor[:, i]
kf = KFold(n_splits=self.n_folds, random_state=self.random_state, shuffle=self.shuffle)
print("\nFeature:", i)
f = 0
for train_index, test_index in kf.split(X):
fold_weights = torch.tensor([1.0, 0.0])
fold_weights.requires_grad_()
self.optimizer = torch.optim.Adam([fold_weights], self.learning_rate)
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y_train_tensor[train_index], y_train_tensor[test_index]
for j in range(self.n_opt_rounds):
self.optimizer.zero_grad()
# get train set error
train_loss = self.__opt_func(X_segment=X_train, y_segment=y_train, weights=fold_weights)
# create a train part of fit information
train_output = f"train: {train_loss.item()}"
# optimization of weights according to the function
if self.verbose >= 1:
print("round:", j, train_output)
train_loss.backward()
self.optimizer.step()
if self.verbose in [1, 2]:
print(f"\tFold {f}:",
self.__opt_func(X_segment=X_test, y_segment=y_test, weights=fold_weights).item())
f += 1
feature_weights = torch.cat([feature_weights, fold_weights])
feature_weights = feature_weights.reshape(-1, 2)
self.weights.append(feature_weights)
self.fitted = True
return self
def get_weights(self) -> np.ndarray:
if not self.fitted:
raise AttributeError("Model has not been fitted yet. Use fit() method first.")
return torch.tensor(self.weights).detach().numpy()
def transform(self, X) -> np.ndarray:
X_tensor = to_tensor(X)
if not self.fitted:
raise AttributeError("Model has not been fitted yet. Use fit() method first.")
for i in range(self.n_features):
feature = X_tensor[:, i]
w = self.weights[i].mean(dim=0)
if self.regression:
X_tensor[:, i] = feature * w[0] + w[1]
else:
(1 / (1 + torch.exp(-(feature * w[0] + w[1]))))
return X_tensor.detach().numpy()
def fit_transform(self, X: [pd.DataFrame, pd.Series, np.array, torch.Tensor, list],
y: [pd.DataFrame, pd.Series, np.array, torch.Tensor, list]) -> np.ndarray:
self.fit(X, y)
return self.transform(X)
class WaveEncoder:
"""
Categorical feature incoding model
"""
def __init__(self, encodeing_type: str, strategy: str = "mean"):
self.encoding_types = ["catboost", "label", "target", "count"]
self.encoding_type = encodeing_type.lower()
if self.encoding_type not in self.encoding_types:
raise ValueError(f"Given encodint type {self.encoding_type}, allowed {', '.join(self.encoding_types)}")
self.strategy = strategy
if self.strategy != None:
self.strategies = {"mean": np.mean,
"median": np.median,
"sum": np.sum}
self.strategy = self.strategy.lower()
if self.strategy not in list(self.strategies.keys()):
raise ValueError(
f"Given strategy type {self.strategy}, allowed {', '.join(list(self.strategies.keys()))}")
self.fitted = False
def fit(self, X: [pd.DataFrame, pd.Series, np.array, torch.Tensor, list],
y: [pd.DataFrame, pd.Series, np.array, torch.Tensor, list] = None, regression=True,
cat_features=None) -> None:
self.fitted = False
self.regression = regression
self.X = to_array(X)
self.y = to_array(y) if type(y) != None else None
self.n_features = X.shape[1]
self.cat_features = cat_features
if self.cat_features == None:
self.cat_features = [i for i in range(self.n_features)]
if self.encoding_type == "count":
self.__count(self.X)
if self.encoding_type == "target":
self.__target(self.X, self.y)
if self.encoding_type == "label":
self.__label(self.X)
def __count(self, X: np.ndarray) -> None:
self.mappers = []
for i in range(len(self.cat_features)):
counts = dict()
feature = X[:, self.cat_features[i]]
uniques = np.unique(feature)
for unique in uniques:
counts[unique] = len(feature[feature == unique])
self.mappers.append(counts)
def __target(self, X: np.ndarray, y: np.ndarray) -> None:
X_labeled = self.__label(X)
if self.regression:
self.mappers = []
for i in range(len(self.cat_features)):
counts = dict()
feature = X_labeled[:, self.cat_features[i]]
uniques = np.unique(feature)
for unique in uniques:
counts[unique] = self.strategies.get(self.strategy)(y[feature == unique])
self.mappers.append(counts)
else:
self.mappers = []
for i in range(len(self.cat_features)):
counts = dict()
feature = X[:, self.cat_features[i]]
uniques = np.unique(feature)
for unique in uniques:
counts[unique] = y[feature == unique].mean()
self.mappers.append(counts)
def __label(self, X: np.ndarray) -> None:
self.mappers = []
for i in range(len(self.cat_features)):
counts = dict()
feature = X[:, self.cat_features[i]]
uniques =
|
np.unique(feature)
|
numpy.unique
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
from typing import List, Optional, Tuple
import cv2
import numpy as np
from scipy.interpolate import griddata
from line_mapper import LineMapper
from digit_recognizer_2 import create_recognizer
from solver import solve
from sudoku.solver import load_image, cut_out_field, find_corners, perspective_transform_contour, \
extract_subcontour
from utils import show_image, wait_windows, scale_image_target_height
_GRID_LINES = 10
_VIZUALIZE = True
# image = load_image("../images/big-numbers.jpg")
# image = load_image("../images/slightly_blurry.jpg")
# image = load_image("../images/sudoku.jpg")
# image = load_image("../images/sudoku-rotated.jpg")
# image = load_image("../images/sudoku-1.jpg")
# image = load_image("../images/sudoku-2.jpg")
# image = load_image("../images/sudoku-2-rotated.jpg")
image = load_image("../images/warped.jpg")
# image = load_image("tmp/003.jpg")
# if _VIZUALIZE:
# show_image("orig", image)
image = scale_image_target_height(image, 640)
t = time.time()
# Extract the field, its contour and corners.
field, field_contour, _, perspective_transform_matrix = cut_out_field(image)
field_gray = cv2.cvtColor(field.image, cv2.COLOR_BGR2GRAY)
if _VIZUALIZE:
show_image("field_gray", field_gray)
# Adjust brightness.
field_gray_closed = cv2.morphologyEx(
field_gray, cv2.MORPH_CLOSE,
cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5)))
field_gray_adj = np.divide(field_gray, field_gray_closed)
field_gray = cv2.normalize(field_gray_adj, None, 0, 255, cv2.NORM_MINMAX, cv2.CV_8UC1)
if _VIZUALIZE:
show_image("field_gray adj", field_gray)
# Binarize the field.
bin_field = cv2.adaptiveThreshold(
field_gray, maxValue=255,
adaptiveMethod=cv2.ADAPTIVE_THRESH_MEAN_C,
thresholdType=cv2.THRESH_BINARY_INV,
blockSize=17,
C=11)
if _VIZUALIZE:
show_image("bin_field", bin_field)
# Find and remove numbers. Look for them on the binary image, erase from the grayscale image.
cell_side = field.side // 9
contours, _ = cv2.findContours(bin_field, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
for contour in contours:
x, y, w, h = cv2.boundingRect(contour)
if w > cell_side * 0.8 or w < 0.2 * cell_side:
continue
if h > cell_side * 0.9 or h < 0.2 * cell_side:
continue
# cv2.rectangle(field_viz, (x, y), (x + w, y + h), color=(0, 255, 0), thickness=-1)
# cv2.rectangle(field_viz, (x, y), (x + w, y + h), color=(0, 0, 0), thickness=-1)
# cv2.drawContours(field_viz, [contour], 0, color=(0, 255, 0), thickness=-1)
cv2.drawContours(field_gray, [contour], 0, color=255, thickness=2)
cv2.drawContours(field_gray, [contour], 0, color=255, thickness=-1)
# field_gray = cv2.GaussianBlur(field_gray, (7, 7), 0)
if _VIZUALIZE:
show_image("field_gray no numbers", field_gray)
# Apply the Sobel operator of 2nd degree to both directions.
grad_x = cv2.Sobel(field_gray, ddepth=cv2.CV_64F, dx=2, dy=0, ksize=7, scale=1, delta=0, borderType=cv2.BORDER_DEFAULT)
np.clip(grad_x, a_min=0, a_max=grad_x.max(), out=grad_x)
grad_x = cv2.normalize(grad_x, None, 0, 255, cv2.NORM_MINMAX, cv2.CV_8UC1)
grad_y = cv2.Sobel(field_gray, ddepth=cv2.CV_64F, dx=0, dy=2, ksize=7, scale=1, delta=0, borderType=cv2.BORDER_DEFAULT)
np.clip(grad_y, a_min=0, a_max=grad_y.max(), out=grad_y)
grad_y = cv2.normalize(grad_y, None, 0, 255, cv2.NORM_MINMAX, cv2.CV_8UC1)
if _VIZUALIZE:
show_image("grad_x", grad_x)
show_image("grad_y", grad_y)
# TODO adaptive window
# TODO fast-and-simple first, fallback on more robust after?
# Outline grid lines.
# The horizontal one, on the left border. The vertical one, on the top border.
def get_average_y_over_window(image, win_x: int, win_y: int, win_w: int, win_h: int) -> Optional[int]:
window = image[win_y:win_y + win_h, win_x:win_x + win_w]
xs, ys = np.meshgrid(np.arange(window.shape[1]), np.arange(window.shape[0]))
xs_dist_weight = np.exp2(xs)
pure_white = np.sum(window.shape[0] * window.shape[1] * 255 * xs_dist_weight)
weighted_window = window * xs_dist_weight
current_white = np.sum(weighted_window)
frac = current_white / pure_white
if frac < 0.0001:
return None
avg = np.sum(ys * weighted_window) / current_white
return int(round(avg))
def detect_line(image, start_x, start_y, win_h, win_w, right_limit) -> Optional[List[Tuple[int, int]]]:
"""
Detects a horizontal white line.
"""
result: List[Tuple[int, int]] = []
win_x = start_x
win_y = start_y - win_h // 2
lost_line = False
while win_x < right_limit:
current_win_w = win_w
if win_x + current_win_w >= right_limit:
break
# Commented-out note:
# Alternatively, try to use a smaller window.
# However, it seems just stopping works better with line extrapolation later.
# if win_x + current_win_w > right_limit:
# current_win_w = right_limit - win_x
# if current_win_w < 3:
# break
avg = get_average_y_over_window(image, win_x, win_y, current_win_w, win_h)
if avg is None:
print("EXPAND")
win_y -= win_h // 2
avg = get_average_y_over_window(image, win_x, win_y, current_win_w, win_h * 2)
if avg is None:
print("LOST")
lost_line = True
win_x = win_x + current_win_w
win_y = win_y + (avg - win_h // 2)
result.append((win_x, win_y + win_h // 2))
if not lost_line:
return result
else:
return None
def detect_lines(work_image: np.ndarray,
border_mapper: LineMapper,
left_limit: int,
right_limit: int) -> List[List[Tuple[int, int]]]:
work_image_blur = cv2.GaussianBlur(work_image, (1, 25), 0)
offset = cell_side // 6
# offset = 0
step = 1
win_w = cell_side // 4
detected_windows = []
for y in range(left_limit, right_limit + 1, step):
x = border_mapper.map_x(y) + offset
w = work_image_blur[y - 3:y + 3, x:x + win_w]
pure_white = w.shape[0] * w.shape[1] * 255
current_white = np.sum(w)
frac = current_white / pure_white
if frac > 0.01:
detected_windows.append((x, y, frac))
assert len(detected_windows) >= _GRID_LINES
cluster_starts = [0]
for i in range(1, len(detected_windows)):
if detected_windows[i][1] - detected_windows[i - 1][1] > cell_side // 6:
# print("---")
cluster_starts.append(i)
# print(detected_windows[i])
# print(cluster_starts)
assert len(cluster_starts) == _GRID_LINES
win_h = 5
win_w = cell_side // 4
result = []
for i in range(len(cluster_starts)):
if i < len(cluster_starts) - 1:
windows = detected_windows[cluster_starts[i]:cluster_starts[i + 1]]
else:
windows = detected_windows[cluster_starts[i]:]
x, y, _ = max(windows, key=lambda w: w[2])
# if _VIZUALIZE:
# cv2.rectangle(viz, (x, y - 3), (x + win_w, y + 3), color=(0, 255, 0), thickness=1)
line = detect_line(work_image, x, y, win_h, win_w, right_limit)
# Extrapolate the beginning and the end.
points_to_fit = 3
xs = [p[0] for p in line[:points_to_fit + 1]]
ys = [p[1] for p in line[:points_to_fit + 1]]
a, b = np.polyfit(xs, ys, 1)
x = 0
y = int(round(a * x + b))
line.insert(0, (x, y))
xs = [p[0] for p in line[-points_to_fit:]]
ys = [p[1] for p in line[-points_to_fit:]]
a, b = np.polyfit(xs, ys, 1)
x = field_viz.shape[1]
y = int(round(a * x + b))
line.append((x, y))
result.append(line)
# break
return result
# Find the left and the top borders.
# Recalculate the contour and the corners on the perspective transformed image.
transformed_field_contour = perspective_transform_contour(field_contour, perspective_transform_matrix)
top_left_idx, top_right_idx, bottom_right_idx, bottom_left_idx = find_corners(transformed_field_contour)
# In the contour, points go counterclockwise.
# Top border: top right -> top left
# Right border: bottom right -> top right
# Bottom border: bottom left -> bottom right
# Left border: top left -> bottom left
top_border = extract_subcontour(transformed_field_contour, top_right_idx, top_left_idx)
# Change points order so they go from the top left corner.
top_border = np.flip(top_border, axis=0)
# Swap x and y.
top_border = np.flip(top_border, axis=1)
# right_border = extract_border(bottom_right_idx, top_right_idx)
# bottom_border = extract_border(bottom_left_idx, bottom_right_idx)
left_border = extract_subcontour(transformed_field_contour, top_left_idx, bottom_left_idx)
field_viz = cv2.cvtColor(field_gray, cv2.COLOR_GRAY2BGR)
cv2.rotate(field_viz, cv2.ROTATE_90_COUNTERCLOCKWISE, dst=field_viz)
top_border_mapper = LineMapper(top_border)
vertical_lines = detect_lines(
cv2.rotate(grad_x, cv2.ROTATE_90_COUNTERCLOCKWISE),
top_border_mapper,
field.margin,
field.margin + field.side
)
assert len(vertical_lines) == _GRID_LINES
vertical_lines_masks = np.zeros(shape=(_GRID_LINES, field.image.shape[0], field.image.shape[1]), dtype=np.uint8)
for i, line in enumerate(vertical_lines):
poly = [np.array(line, np.int32)]
# if _VIZUALIZE:
# for x, y in line:
# cv2.circle(field_viz, (x, y), 0, (0, 0, 255), 2)
# cv2.polylines(field_viz, poly, isClosed=False, color=(0, 0, 255), thickness=1)
# Invert the index: the first in the rotated image is the last by the normal order.
inv_i = _GRID_LINES - i - 1
cv2.polylines(vertical_lines_masks[inv_i], poly, isClosed=False, color=255, thickness=1)
# TODO rotate lines before drawing
vertical_lines_masks = np.rot90(vertical_lines_masks, k=-1, axes=(1, 2))
if _VIZUALIZE:
cv2.rotate(field_viz, cv2.ROTATE_90_CLOCKWISE, dst=field_viz)
left_border_mapper = LineMapper(left_border)
horizontal_lines = detect_lines(grad_y, left_border_mapper, field.margin, field.margin + field.side)
assert len(horizontal_lines) == _GRID_LINES
horizontal_lines_masks = np.zeros(shape=(_GRID_LINES, field.image.shape[0], field.image.shape[1]), dtype=np.uint8)
for i, line in enumerate(horizontal_lines):
poly = [np.array(line, np.int32)]
# if _VIZUALIZE:
# for x, y in line:
# cv2.circle(field_viz, (x, y), 0, (255, 255, 0), 2)
# cv2.polylines(field_viz, poly, isClosed=False, color=(255, 255, 0), thickness=1)
cv2.polylines(horizontal_lines_masks[i], poly, isClosed=False, color=255, thickness=1)
# # TODO ? intersect one horizontal with all vertical
intersection = np.zeros(shape=(field.image.shape[0], field.image.shape[1]), dtype=np.uint8)
grid_points = np.zeros(shape=(_GRID_LINES, _GRID_LINES, 2), dtype=np.uint32)
src_points = []
dst_points = []
for i_row in range(_GRID_LINES):
for i_col in range(_GRID_LINES):
np.bitwise_and(horizontal_lines_masks[i_row], vertical_lines_masks[i_col], out=intersection)
intersection_points =
|
np.argwhere(intersection == 255)
|
numpy.argwhere
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
#
# Helper methods to generate training, validation and test splits from the UCI HAR dataset.
# Each split consists of a separate set of users.
# Reference : https://github.com/guillaume-chevalier/LSTM-Human-Activity-Recognition
import numpy as np
import os
def generateIndicesForSplits(path='./HAR/UCI HAR Dataset/train/subject_train.txt'):
f = open(path)
subjects = []
for line in f:
subject = line.strip().split()
subjects.append(int(subject[0]))
subjects = np.array(subjects)
# get unique subjects
numSubjects = np.unique(subjects)
# shuffle amongst train subjects so that difficult/easy subjects spread in both val and train
np.random.shuffle(numSubjects)
l = len(numSubjects)
splitRatio = 0.1
valSplit = int(l * splitRatio + 1)
valSubjects = numSubjects[:valSplit]
trainSubjects = numSubjects[valSplit:]
trainSubjectIndices = []
valSubjectIndices = []
for i, subject in enumerate(subjects):
if subject in trainSubjects:
trainSubjectIndices.append(i)
elif subject in valSubjects:
valSubjectIndices.append(i)
else:
raise Exception("some bug in your code")
# assert that train/val different
for x in trainSubjectIndices:
assert x not in valSubjectIndices
trainSubjectIndices = np.array(trainSubjectIndices)
valSubjectIndices = np.array(valSubjectIndices)
# shuffle more, so that readings not grouped by a subject
# therefore, no need to shuffle after slicing from read dataset, as we are shuffling here
idx = np.arange(len(trainSubjectIndices))
np.random.shuffle(idx)
trainSubjectIndices = trainSubjectIndices[idx]
idx = np.arange(len(valSubjectIndices))
np.random.shuffle(idx)
valSubjectIndices = valSubjectIndices[idx]
assert len(trainSubjectIndices) + len(valSubjectIndices) == len(subjects)
return trainSubjectIndices, valSubjectIndices
def readData(extractedDir):
INPUT_SIGNAL_TYPES = [
"body_acc_x_",
"body_acc_y_",
"body_acc_z_",
"body_gyro_x_",
"body_gyro_y_",
"body_gyro_z_",
"total_acc_x_",
"total_acc_y_",
"total_acc_z_"
]
# Output classes to learn how to classify
LABELS = [
"WALKING",
"WALKING_UPSTAIRS",
"WALKING_DOWNSTAIRS",
"SITTING",
"STANDING",
"LAYING"
]
DATASET_PATH = extractedDir + "/UCI HAR Dataset/"
TRAIN = "train/"
TEST = "test/"
# Load "X" (the neural network's training and testing inputs)
def load_X(X_signals_paths):
X_signals = []
for signal_type_path in X_signals_paths:
file = open(signal_type_path, 'r')
# Read dataset from disk, dealing with text files' syntax
X_signals.append(
[np.array(serie, dtype=np.float32) for serie in [
row.replace(' ', ' ').strip().split(' ') for row in file
]]
)
file.close()
return np.transpose(np.array(X_signals), (1, 2, 0))
X_train_signals_paths = [
DATASET_PATH + TRAIN + "Inertial Signals/" + signal + "train.txt" for signal in INPUT_SIGNAL_TYPES
]
X_test_signals_paths = [
DATASET_PATH + TEST + "Inertial Signals/" + signal + "test.txt" for signal in INPUT_SIGNAL_TYPES
]
x_train_val_combined = load_X(X_train_signals_paths)
x_test = load_X(X_test_signals_paths)
# Load "y" (the neural network's training and testing outputs)
def load_y(y_path):
file = open(y_path, 'r')
# Read dataset from disk, dealing with text file's syntax
y_ = np.array(
[elem for elem in [
row.replace(' ', ' ').strip().split(' ') for row in file
]],
dtype=np.int32
)
file.close()
# Substract 1 to each output class for friendly 0-based indexing
return y_ - 1
y_train_path = DATASET_PATH + TRAIN + "y_train.txt"
y_test_path = DATASET_PATH + TEST + "y_test.txt"
y_train_val_combined = load_y(y_train_path)
y_test = load_y(y_test_path)
return x_train_val_combined, y_train_val_combined, x_test, y_test
def one_hot(y, numOutput):
y =
|
np.reshape(y, [-1])
|
numpy.reshape
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Simple peak fitting utility with PyMCA/SILX
===========================================
Current fitting backends: PyMca_ or SILX_
.. _PyMca: https://github.com/vasole/pymca
.. _SILX: https://github.com/silx-kit/silx
"""
from sloth.gui.jupyx import run_from_ipython
import os
import numpy as np
HAS_SILX = False
try:
from silx.math.fit import fittheories, bgtheories
from silx.math.fit.fitmanager import FitManager
HAS_SILX = True
except ImportError:
pass
HAS_PYMCA = False
HAS_PYMCA5 = False
try:
from PyMca5.PyMcaMath.fitting import Specfit, SpecfitFunctions
HAS_PYMCA5 = True
except ImportError:
try:
from PyMca import Specfit, SpecfitFunctions
HAS_PYMCA = True
except ImportError:
from sloth import NullClass
Specfit = NullClass
SpecfitFunctions = NullClass
pass
IN_IPYTHON = run_from_ipython()
##############
# SILX BASED #
##############
def fit_silx(x, y, theory=None, bkg=None):
"""fit a peak with using silx library
Parameters
==========
x, y: data 1D arrays
theory : string [None]
available theories:
+---------------------+
| Gaussians |
| Lorentz |
| Area Gaussians |
| Area Lorentz |
| Pseudo-Voigt Line |
| Area Pseudo-Voigt |
| Split Gaussian |
| Split Lorentz |
| Split Pseudo-Voigt |
| Step Down |
| Step Up |
| Slit |
| Atan |
| Hypermet |
| Degree 2 Polynomial |
| Degree 3 Polynomial |
| Degree 4 Polynomial |
| Degree 5 Polynomial |
+---------------------+
bkg : string [None]
available bkg theories:
+---------------------+
| No Background |
| Constant |
| Linear |
| Strip |
| Snip |
| Degree 2 Polynomial |
| Degree 3 Polynomial |
| Degree 4 Polynomial |
| Degree 5 Polynomial |
+---------------------+
Returns
=======
yfit : fit array like x
"""
fit = FitManager()
fit.loadtheories(fittheories)
fit.loadbgtheories(bgtheories)
fit.setdata(x=x, y=y)
yfit = y
_kwerror = False
if (theory is None):
print('fit theory not given! choose one of the following:')
print('\n'.join(map(str, fit.theories.keys())))
_kwerror = True
if (bkg is None):
print('fit background not given! choose one of the following:')
print('\n'.join(map(str, fit.bgtheories.keys())))
_kwerror = True
if _kwerror:
return yfit
fit.settheory(theory)
fit.setbackground(bkg)
try:
fit.estimate()
fit.runfit()
yfit = fit.gendata()
except Exception:
print('ERROR: fit_silx FAILED!!!')
#print('FWHM: {0}'.format(fwhm(x,yfit,method='bin')))
return yfit
###############
# PYMCA BASED #
###############
def fit_splitpvoigt(x, y, dy=False,
theory='Split Pseudo-Voigt', bkg='Constant',
conf=None, npeaks=1,
show_infos=True, show_res=True, plot=True, **kws):
"""simple wrapper to PyMca.Specfit
the goal is to fit (automagically) a set of 1D data (x,y) with an
asymmetric PseudoVoigt (splitpvoigt) function plus a constant
background
Parameters
----------
x, y : data arrays
dy : boolean or float, False
error bar on y. If dy==True: dy=np.sqrt(y) or give an
explicit array
theory : str, ['Split Pseudo-Voigt',
'Gaussians',
'Lorentz',
'Area Gaussians',
'Area Lorentz',
'Pseudo-Voigt Line',
'Area Pseudo-Voigt',
'Split Gaussian',
'Split Lorentz',
'Step Down',
'Step Up',
'Slit',
'Atan',
'Hypermet',
'Periodic Gaussians']
bkg : str, ['Constant', 'No Background', 'Linear', 'Internal']
conf : dictionary, None
to tune Specfit configuration, default:
'FwhmPoints' : int(len(y)/5.
'Sensitivity' : 5.
'EtaFlag' : 1 (force eta between 0 and 1)
'WeightFlag' : 0 (do not weight by noise)
'AutoScaling' : 1 (auto scale y)
'AutoFwhm' : 1 (auto guess fwhm)
npeaks : int, 1
limit the number of split-PseudoVoigt peaks to guess
show_infos: boolean, True
print information on the peakfit setup
show_res : boolean, True
print fit results to standard output
plot : boolean, True
plot data, fit and residual with PyMca (ScanWindow)
Returns
-------
PyMca.Specfit.Specfit, PyMca.ScanWindow.ScanWindow (None if plot=False)
"""
# default fit configuration
fwhmpts_guess = int(len(y) / 10.) # guess 1/10 of points resides in fwhm
iflat = int(len(y) / 5.) # guess 1/5 of points are flat or out of peak
sens_guess =
|
np.mean(y[:iflat])
|
numpy.mean
|
import numpy as np
from qtpy import QtGui
import pyqtgraph as pg
from __code._utilities.table_handler import TableHandler
from __code.panoramic_stitching_for_tof.get import Get
COLOR_LOCK = QtGui.QColor(62, 13, 244, 100)
COLOR_UNLOCK = QtGui.QColor(255, 0, 0, 100)
COLOR_LINE_SEGMENT = QtGui.QColor(255, 0, 255)
LINE_SEGMENT_FONT = QtGui.QFont("Arial", 15)
ROI_WIDTH, ROI_HEIGHT = 50, 50
HORIZONTAL_MARGIN = 100
VERTICAL_MARGIN = 100
class ImageHandler:
def __init__(self, parent=None):
self.parent = parent
def update_contour_plot(self):
if self.parent.contour_image_roi_id:
self.parent.ui.image_view.removeItem(self.parent.contour_image_roi_id)
o_table = TableHandler(table_ui=self.parent.ui.tableWidget)
row_selected = o_table.get_row_selected()
name_of_file_selected = o_table.get_item_str_from_cell(row=row_selected, column=0)
offset_dictionary = self.parent.offset_dictionary
roi = {'x0': offset_dictionary[name_of_file_selected]['xoffset'],
'y0': offset_dictionary[name_of_file_selected]['yoffset']}
if row_selected == 0:
_color = COLOR_LOCK
else:
_color = COLOR_UNLOCK
if roi:
_pen = QtGui.QPen()
_pen.setColor(_color)
_pen.setWidthF(0.01)
_roi_id = pg.ROI([roi['x0'] + HORIZONTAL_MARGIN, roi['y0'] + VERTICAL_MARGIN],
[self.parent.image_width, self.parent.image_height],
pen=_pen, scaleSnap=True,
movable=False)
self.parent.ui.image_view.addItem(_roi_id)
self.parent.contour_image_roi_id = _roi_id
def update_current_panoramic_image(self):
_view = self.parent.ui.image_view.getView()
_view_box = _view.getViewBox()
_state = _view_box.getState()
first_update = False
if self.parent.histogram_level is None:
first_update = True
_histo_widget = self.parent.ui.image_view.getHistogramWidget()
self.parent.histogram_level = _histo_widget.getLevels()
# data_dictionary = self.parent.data_dictionary
data_dictionary = self.parent.coarse_images_dictionary
offset_dictionary = self.parent.offset_dictionary
max_yoffset, max_xoffset = self.get_max_offset()
image_height = self.parent.image_height
image_width = self.parent.image_width
_color = None
panoramic_image = None
o_get = Get(parent=self.parent)
list_folders = o_get.get_list_folders_according_to_offset_table()
for _folder_index, _folder in enumerate(list_folders):
if _folder_index == 0:
panoramic_image = np.zeros((max_yoffset + image_height + 2*VERTICAL_MARGIN,
max_xoffset + image_width + 2*HORIZONTAL_MARGIN))
_image = data_dictionary[_folder].data
is_visible = offset_dictionary[_folder]['visible']
if not is_visible:
continue
if _folder_index == 0:
panoramic_image[VERTICAL_MARGIN:image_height+VERTICAL_MARGIN,
HORIZONTAL_MARGIN:image_width+HORIZONTAL_MARGIN] = _image
else:
xoffset = offset_dictionary[_folder]['xoffset']
yoffset = offset_dictionary[_folder]['yoffset']
panoramic_image[yoffset+VERTICAL_MARGIN: yoffset+image_height+VERTICAL_MARGIN,
xoffset+HORIZONTAL_MARGIN: xoffset+image_width+HORIZONTAL_MARGIN] = \
_image
self.parent.panoramic_images = panoramic_image
_image = np.transpose(panoramic_image)
# _image = self._clean_image(_image)
self.parent.ui.image_view.setImage(_image)
self.parent.current_live_image = _image
_view_box.setState(_state)
if not first_update:
_histo_widget.setLevels(self.parent.histogram_level[0],
self.parent.histogram_level[1])
def get_max_offset(self):
offset_dictionary = self.parent.offset_dictionary
list_xoffset = [offset_dictionary[_key]['xoffset'] for _key in offset_dictionary.keys()]
list_yoffset = [offset_dictionary[_key]['yoffset'] for _key in offset_dictionary.keys()]
return np.int(np.max(list_yoffset)), np.int(np.max(list_xoffset))
def update_from_to_roi(self, state=False):
if self.parent.from_roi_id:
self.parent.ui.image_view.removeItem(self.parent.from_roi_id)
self.parent.ui.image_view.removeItem(self.parent.to_roi_id)
self.parent.ui.image_view.removeItem(self.parent.from_label_id)
self.parent.ui.image_view.removeItem(self.parent.to_label_id)
self.parent.ui.image_view.removeItem(self.parent.from_roi_cross_id)
self.parent.ui.image_view.removeItem(self.parent.to_roi_cross_id)
if state:
from_roi = self.parent.from_roi
x = from_roi['x']
y = from_roi['y']
self.parent.from_roi_id = pg.ROI([x, y],
[ROI_WIDTH, ROI_HEIGHT],
scaleSnap=True)
self.parent.ui.image_view.addItem(self.parent.from_roi_id)
self.parent.from_roi_id.sigRegionChanged.connect(self.parent.from_roi_box_changed)
to_roi = self.parent.to_roi
x = to_roi['x']
y = to_roi['y']
self.parent.to_roi_id = pg.ROI([x, y],
[ROI_WIDTH, ROI_HEIGHT],
scaleSnap=True)
self.parent.ui.image_view.addItem(self.parent.to_roi_id)
self.parent.to_roi_id.sigRegionChanged.connect(self.parent.to_roi_box_changed)
self.update_from_label()
self.update_from_cross_line()
self.update_to_label()
self.update_to_cross_line()
def update_validity_of_from_to_button(self):
# check that from ROI is inside the selected image
from_roi = self.parent.from_roi
x = from_roi['x']
y = from_roi['y']
o_table = TableHandler(table_ui=self.parent.ui.tableWidget)
row_selected = o_table.get_row_selected()
name_of_file_selected = o_table.get_item_str_from_cell(row=row_selected, column=0)
offset_dictionary = self.parent.offset_dictionary
xoffset_of_selected_image = offset_dictionary[name_of_file_selected]['xoffset'] + HORIZONTAL_MARGIN
yoffset_of_selected_image = offset_dictionary[name_of_file_selected]['yoffset'] + VERTICAL_MARGIN
if (x < xoffset_of_selected_image) or (y < yoffset_of_selected_image) or \
(x > xoffset_of_selected_image + self.parent.image_width) or \
(y > yoffset_of_selected_image + self.parent.image_height):
from_to_button_status = False
from_to_error_label = True
else:
from_to_button_status = True
from_to_error_label = False
self.parent.ui.from_to_button.setEnabled(from_to_button_status)
self.parent.ui.from_to_error_label.setVisible(from_to_error_label)
def update_from_to_line_label_changed(self):
from_to_roi = self.parent.from_to_roi
x0 = from_to_roi['x0']
y0 = from_to_roi['y0']
x1 = from_to_roi['x1']
y1 = from_to_roi['y1']
self.parent.from_label_id.setPos(x1, y1)
self.parent.to_label_id.setPos(x0, y0)
def update_cross_line(self, roi_cross_id=None, roi=None):
if roi_cross_id:
self.parent.ui.image_view.removeItem(roi_cross_id)
pos = []
adj = []
x = roi['x']
y = roi['y']
# vertical guide
pos.append([x + ROI_WIDTH / 2, y - ROI_HEIGHT / 2])
pos.append([x + ROI_WIDTH / 2, y + ROI_HEIGHT + ROI_HEIGHT / 2])
adj.append([0, 1])
# horizontal guide
pos.append([x - ROI_WIDTH / 2, y + ROI_HEIGHT / 2])
pos.append([x + ROI_WIDTH + ROI_WIDTH / 2, y + ROI_HEIGHT / 2])
adj.append([2, 3])
pos = np.array(pos)
adj =
|
np.array(adj)
|
numpy.array
|
"""
Generate figures for the DeepCytometer paper for v7 of the pipeline.
Partly deprecated by klf14_b6ntac_exp_0110_paper_figures_v8.py:
* Some figures have been updated to have v8 of the pipeline in the paper.
Code cannibalised from:
* klf14_b6ntac_exp_0097_full_slide_pipeline_v7.py
"""
"""
This file is part of Cytometer
Copyright 2021 Medical Research Council
SPDX-License-Identifier: Apache-2.0
Author: <NAME> <<EMAIL>>
"""
# script name to identify this experiment
experiment_id = 'klf14_b6ntac_exp_0099_paper_figures'
# cross-platform home directory
from pathlib import Path
home = str(Path.home())
import os
import sys
sys.path.extend([os.path.join(home, 'Software/cytometer')])
# json_annotation_files_dict here needs to have the same files as in
# klf14_b6ntac_exp_0098_full_slide_size_analysis_v7.py
# SQWAT: list of annotation files
json_annotation_files_dict = {}
json_annotation_files_dict['sqwat'] = [
'KLF14-B6NTAC 36.1d PAT 99-16 C1 - 2016-02-11 11.48.31.json',
'KLF14-B6NTAC-MAT-16.2d 214-16 C1 - 2016-02-17 16.02.46.json',
'KLF14-B6NTAC-MAT-17.1a 44-16 C1 - 2016-02-01 11.14.17.json',
'KLF14-B6NTAC-MAT-17.1e 48-16 C1 - 2016-02-01 16.27.05.json',
'KLF14-B6NTAC-MAT-18.2a 57-16 C1 - 2016-02-03 09.10.17.json',
'KLF14-B6NTAC-PAT-37.3c 414-16 C1 - 2016-03-15 17.15.41.json',
'KLF14-B6NTAC-MAT-18.1d 53-16 C1 - 2016-02-02 14.32.03.json',
'KLF14-B6NTAC-MAT-17.2b 65-16 C1 - 2016-02-04 10.24.22.json',
'KLF14-B6NTAC-MAT-17.2g 69-16 C1 - 2016-02-04 16.15.05.json',
'KLF14-B6NTAC 37.1a PAT 106-16 C1 - 2016-02-12 16.21.00.json',
'KLF14-B6NTAC-36.1b PAT 97-16 C1 - 2016-02-10 17.38.06.json',
# 'KLF14-B6NTAC-PAT-37.2d 411-16 C1 - 2016-03-15 12.42.26.json',
'KLF14-B6NTAC-MAT-17.2a 64-16 C1 - 2016-02-04 09.17.52.json',
'KLF14-B6NTAC-MAT-16.2f 216-16 C1 - 2016-02-18 10.28.27.json',
'KLF14-B6NTAC-MAT-17.1d 47-16 C1 - 2016-02-01 15.25.53.json',
'KLF14-B6NTAC-MAT-16.2e 215-16 C1 - 2016-02-18 09.19.26.json',
'KLF14-B6NTAC 36.1g PAT 102-16 C1 - 2016-02-11 17.20.14.json',
'KLF14-B6NTAC-37.1g PAT 112-16 C1 - 2016-02-16 13.33.09.json',
'KLF14-B6NTAC-38.1e PAT 94-16 C1 - 2016-02-10 12.13.10.json',
'KLF14-B6NTAC-MAT-18.2d 60-16 C1 - 2016-02-03 13.13.57.json',
'KLF14-B6NTAC-MAT-18.2g 63-16 C1 - 2016-02-03 16.58.52.json',
'KLF14-B6NTAC-MAT-18.2f 62-16 C1 - 2016-02-03 15.46.15.json',
'KLF14-B6NTAC-MAT-18.1b 51-16 C1 - 2016-02-02 09.59.16.json',
'KLF14-B6NTAC-MAT-19.2c 220-16 C1 - 2016-02-18 17.03.38.json',
'KLF14-B6NTAC-MAT-18.1f 55-16 C1 - 2016-02-02 16.14.30.json',
'KLF14-B6NTAC-PAT-36.3b 412-16 C1 - 2016-03-15 14.37.55.json',
'KLF14-B6NTAC-MAT-16.2c 213-16 C1 - 2016-02-17 14.51.18.json',
'KLF14-B6NTAC-PAT-37.4a 417-16 C1 - 2016-03-16 15.55.32.json',
'KLF14-B6NTAC 36.1e PAT 100-16 C1 - 2016-02-11 14.06.56.json',
'KLF14-B6NTAC-MAT-18.1c 52-16 C1 - 2016-02-02 12.26.58.json',
'KLF14-B6NTAC-MAT-18.2b 58-16 C1 - 2016-02-03 11.10.52.json',
'KLF14-B6NTAC-36.1a PAT 96-16 C1 - 2016-02-10 16.12.38.json',
'KLF14-B6NTAC-PAT-39.2d 454-16 C1 - 2016-03-17 14.33.38.json',
'KLF14-B6NTAC 36.1c PAT 98-16 C1 - 2016-02-11 10.45.00.json',
'KLF14-B6NTAC-MAT-18.2e 61-16 C1 - 2016-02-03 14.19.35.json',
'KLF14-B6NTAC-MAT-19.2g 222-16 C1 - 2016-02-25 15.13.00.json',
'KLF14-B6NTAC-PAT-37.2a 406-16 C1 - 2016-03-14 12.01.56.json',
'KLF14-B6NTAC 36.1j PAT 105-16 C1 - 2016-02-12 14.33.33.json',
'KLF14-B6NTAC-37.1b PAT 107-16 C1 - 2016-02-15 11.43.31.json',
'KLF14-B6NTAC-MAT-17.1c 46-16 C1 - 2016-02-01 14.02.04.json',
'KLF14-B6NTAC-MAT-19.2f 217-16 C1 - 2016-02-18 11.48.16.json',
'KLF14-B6NTAC-MAT-17.2d 67-16 C1 - 2016-02-04 12.34.32.json',
'KLF14-B6NTAC-MAT-18.3c 218-16 C1 - 2016-02-18 13.12.09.json',
'KLF14-B6NTAC-PAT-37.3a 413-16 C1 - 2016-03-15 15.54.12.json',
'KLF14-B6NTAC-MAT-19.1a 56-16 C1 - 2016-02-02 17.23.31.json',
'KLF14-B6NTAC-37.1h PAT 113-16 C1 - 2016-02-16 15.14.09.json',
'KLF14-B6NTAC-MAT-18.3d 224-16 C1 - 2016-02-26 11.13.53.json',
'KLF14-B6NTAC-PAT-37.2g 415-16 C1 - 2016-03-16 11.47.52.json',
'KLF14-B6NTAC-37.1e PAT 110-16 C1 - 2016-02-15 17.33.11.json',
'KLF14-B6NTAC-MAT-17.2f 68-16 C1 - 2016-02-04 15.05.54.json',
'KLF14-B6NTAC 36.1h PAT 103-16 C1 - 2016-02-12 10.15.22.json',
# 'KLF14-B6NTAC-PAT-39.1h 453-16 C1 - 2016-03-17 11.38.04.json',
'KLF14-B6NTAC-MAT-16.2b 212-16 C1 - 2016-02-17 12.49.00.json',
'KLF14-B6NTAC-MAT-17.1f 49-16 C1 - 2016-02-01 17.51.46.json',
'KLF14-B6NTAC-PAT-36.3d 416-16 C1 - 2016-03-16 14.44.11.json',
'KLF14-B6NTAC-MAT-16.2a 211-16 C1 - 2016-02-17 11.46.42.json',
'KLF14-B6NTAC-38.1f PAT 95-16 C1 - 2016-02-10 14.41.44.json',
'KLF14-B6NTAC-PAT-36.3a 409-16 C1 - 2016-03-15 10.18.46.json',
'KLF14-B6NTAC-MAT-19.2b 219-16 C1 - 2016-02-18 15.41.38.json',
'KLF14-B6NTAC-MAT-17.1b 45-16 C1 - 2016-02-01 12.23.50.json',
'KLF14-B6NTAC 36.1f PAT 101-16 C1 - 2016-02-11 15.23.06.json',
'KLF14-B6NTAC-MAT-18.1e 54-16 C1 - 2016-02-02 15.26.33.json',
'KLF14-B6NTAC-37.1d PAT 109-16 C1 - 2016-02-15 15.19.08.json',
'KLF14-B6NTAC-MAT-18.2c 59-16 C1 - 2016-02-03 11.56.52.json',
'KLF14-B6NTAC-PAT-37.2f 405-16 C1 - 2016-03-14 10.58.34.json',
'KLF14-B6NTAC-PAT-37.2e 408-16 C1 - 2016-03-14 16.23.30.json',
'KLF14-B6NTAC-MAT-19.2e 221-16 C1 - 2016-02-25 14.00.14.json',
# 'KLF14-B6NTAC-PAT-37.2c 407-16 C1 - 2016-03-14 14.13.54.json',
# 'KLF14-B6NTAC-PAT-37.2b 410-16 C1 - 2016-03-15 11.24.20.json',
'KLF14-B6NTAC-PAT-37.4b 419-16 C1 - 2016-03-17 10.22.54.json',
'KLF14-B6NTAC-37.1c PAT 108-16 C1 - 2016-02-15 14.49.45.json',
'KLF14-B6NTAC-MAT-18.1a 50-16 C1 - 2016-02-02 09.12.41.json',
'KLF14-B6NTAC 36.1i PAT 104-16 C1 - 2016-02-12 12.14.38.json',
'KLF14-B6NTAC-PAT-37.2h 418-16 C1 - 2016-03-16 17.01.17.json',
'KLF14-B6NTAC-MAT-17.2c 66-16 C1 - 2016-02-04 11.46.39.json',
'KLF14-B6NTAC-MAT-18.3b 223-16 C2 - 2016-02-26 10.35.52.json',
'KLF14-B6NTAC-37.1f PAT 111-16 C2 - 2016-02-16 11.26 (1).json',
'KLF14-B6NTAC-PAT 37.2b 410-16 C4 - 2020-02-14 10.27.23.json',
'KLF14-B6NTAC-PAT 37.2c 407-16 C4 - 2020-02-14 10.15.57.json',
# 'KLF14-B6NTAC-PAT 37.2d 411-16 C4 - 2020-02-14 10.34.10.json'
]
# GWAT: list of annotation files
json_annotation_files_dict['gwat'] = [
'KLF14-B6NTAC-36.1a PAT 96-16 B1 - 2016-02-10 15.32.31.json',
'KLF14-B6NTAC-36.1b PAT 97-16 B1 - 2016-02-10 17.15.16.json',
'KLF14-B6NTAC-36.1c PAT 98-16 B1 - 2016-02-10 18.32.40.json',
'KLF14-B6NTAC 36.1d PAT 99-16 B1 - 2016-02-11 11.29.55.json',
'KLF14-B6NTAC 36.1e PAT 100-16 B1 - 2016-02-11 12.51.11.json',
'KLF14-B6NTAC 36.1f PAT 101-16 B1 - 2016-02-11 14.57.03.json',
'KLF14-B6NTAC 36.1g PAT 102-16 B1 - 2016-02-11 16.12.01.json',
'KLF14-B6NTAC 36.1h PAT 103-16 B1 - 2016-02-12 09.51.08.json',
# 'KLF14-B6NTAC 36.1i PAT 104-16 B1 - 2016-02-12 11.37.56.json',
'KLF14-B6NTAC 36.1j PAT 105-16 B1 - 2016-02-12 14.08.19.json',
'KLF14-B6NTAC 37.1a PAT 106-16 B1 - 2016-02-12 15.33.02.json',
'KLF14-B6NTAC-37.1b PAT 107-16 B1 - 2016-02-15 11.25.20.json',
'KLF14-B6NTAC-37.1c PAT 108-16 B1 - 2016-02-15 12.33.10.json',
'KLF14-B6NTAC-37.1d PAT 109-16 B1 - 2016-02-15 15.03.44.json',
'KLF14-B6NTAC-37.1e PAT 110-16 B1 - 2016-02-15 16.16.06.json',
'KLF14-B6NTAC-37.1g PAT 112-16 B1 - 2016-02-16 12.02.07.json',
'KLF14-B6NTAC-37.1h PAT 113-16 B1 - 2016-02-16 14.53.02.json',
'KLF14-B6NTAC-38.1e PAT 94-16 B1 - 2016-02-10 11.35.53.json',
'KLF14-B6NTAC-38.1f PAT 95-16 B1 - 2016-02-10 14.16.55.json',
'KLF14-B6NTAC-MAT-16.2a 211-16 B1 - 2016-02-17 11.21.54.json',
'KLF14-B6NTAC-MAT-16.2b 212-16 B1 - 2016-02-17 12.33.18.json',
'KLF14-B6NTAC-MAT-16.2c 213-16 B1 - 2016-02-17 14.01.06.json',
'KLF14-B6NTAC-MAT-16.2d 214-16 B1 - 2016-02-17 15.43.57.json',
'KLF14-B6NTAC-MAT-16.2e 215-16 B1 - 2016-02-17 17.14.16.json',
'KLF14-B6NTAC-MAT-16.2f 216-16 B1 - 2016-02-18 10.05.52.json',
# 'KLF14-B6NTAC-MAT-17.1a 44-16 B1 - 2016-02-01 09.19.20.json',
'KLF14-B6NTAC-MAT-17.1b 45-16 B1 - 2016-02-01 12.05.15.json',
'KLF14-B6NTAC-MAT-17.1c 46-16 B1 - 2016-02-01 13.01.30.json',
'KLF14-B6NTAC-MAT-17.1d 47-16 B1 - 2016-02-01 15.11.42.json',
'KLF14-B6NTAC-MAT-17.1e 48-16 B1 - 2016-02-01 16.01.09.json',
'KLF14-B6NTAC-MAT-17.1f 49-16 B1 - 2016-02-01 17.12.31.json',
'KLF14-B6NTAC-MAT-17.2a 64-16 B1 - 2016-02-04 08.57.34.json',
'KLF14-B6NTAC-MAT-17.2b 65-16 B1 - 2016-02-04 10.06.00.json',
'KLF14-B6NTAC-MAT-17.2c 66-16 B1 - 2016-02-04 11.14.28.json',
'KLF14-B6NTAC-MAT-17.2d 67-16 B1 - 2016-02-04 12.20.20.json',
'KLF14-B6NTAC-MAT-17.2f 68-16 B1 - 2016-02-04 14.01.40.json',
'KLF14-B6NTAC-MAT-17.2g 69-16 B1 - 2016-02-04 15.52.52.json',
'KLF14-B6NTAC-MAT-18.1a 50-16 B1 - 2016-02-02 08.49.06.json',
'KLF14-B6NTAC-MAT-18.1b 51-16 B1 - 2016-02-02 09.46.31.json',
'KLF14-B6NTAC-MAT-18.1c 52-16 B1 - 2016-02-02 11.24.31.json',
'KLF14-B6NTAC-MAT-18.1d 53-16 B1 - 2016-02-02 14.11.37.json',
# 'KLF14-B6NTAC-MAT-18.1e 54-16 B1 - 2016-02-02 15.06.05.json',
'KLF14-B6NTAC-MAT-18.2a 57-16 B1 - 2016-02-03 08.54.27.json',
'KLF14-B6NTAC-MAT-18.2b 58-16 B1 - 2016-02-03 09.58.06.json',
'KLF14-B6NTAC-MAT-18.2c 59-16 B1 - 2016-02-03 11.41.32.json',
'KLF14-B6NTAC-MAT-18.2d 60-16 B1 - 2016-02-03 12.56.49.json',
'KLF14-B6NTAC-MAT-18.2e 61-16 B1 - 2016-02-03 14.02.25.json',
'KLF14-B6NTAC-MAT-18.2f 62-16 B1 - 2016-02-03 15.00.17.json',
'KLF14-B6NTAC-MAT-18.2g 63-16 B1 - 2016-02-03 16.40.37.json',
'KLF14-B6NTAC-MAT-18.3b 223-16 B1 - 2016-02-25 16.53.42.json',
'KLF14-B6NTAC-MAT-18.3c 218-16 B1 - 2016-02-18 12.51.46.json',
'KLF14-B6NTAC-MAT-18.3d 224-16 B1 - 2016-02-26 10.48.56.json',
'KLF14-B6NTAC-MAT-19.1a 56-16 B1 - 2016-02-02 16.57.46.json',
'KLF14-B6NTAC-MAT-19.2b 219-16 B1 - 2016-02-18 14.21.50.json',
'KLF14-B6NTAC-MAT-19.2c 220-16 B1 - 2016-02-18 16.40.48.json',
'KLF14-B6NTAC-MAT-19.2e 221-16 B1 - 2016-02-25 13.15.27.json',
'KLF14-B6NTAC-MAT-19.2f 217-16 B1 - 2016-02-18 11.23.22.json',
'KLF14-B6NTAC-MAT-19.2g 222-16 B1 - 2016-02-25 14.51.57.json',
'KLF14-B6NTAC-PAT-36.3a 409-16 B1 - 2016-03-15 09.24.54.json',
'KLF14-B6NTAC-PAT-36.3b 412-16 B1 - 2016-03-15 14.11.47.json',
'KLF14-B6NTAC-PAT-36.3d 416-16 B1 - 2016-03-16 14.22.04.json',
# 'KLF14-B6NTAC-PAT-37.2a 406-16 B1 - 2016-03-14 11.46.47.json',
'KLF14-B6NTAC-PAT-37.2b 410-16 B1 - 2016-03-15 11.12.01.json',
'KLF14-B6NTAC-PAT-37.2c 407-16 B1 - 2016-03-14 12.54.55.json',
'KLF14-B6NTAC-PAT-37.2d 411-16 B1 - 2016-03-15 12.01.13.json',
'KLF14-B6NTAC-PAT-37.2e 408-16 B1 - 2016-03-14 16.06.43.json',
'KLF14-B6NTAC-PAT-37.2f 405-16 B1 - 2016-03-14 09.49.45.json',
'KLF14-B6NTAC-PAT-37.2g 415-16 B1 - 2016-03-16 11.04.45.json',
'KLF14-B6NTAC-PAT-37.2h 418-16 B1 - 2016-03-16 16.42.16.json',
'KLF14-B6NTAC-PAT-37.3a 413-16 B1 - 2016-03-15 15.31.26.json',
'KLF14-B6NTAC-PAT-37.3c 414-16 B1 - 2016-03-15 16.49.22.json',
'KLF14-B6NTAC-PAT-37.4a 417-16 B1 - 2016-03-16 15.25.38.json',
'KLF14-B6NTAC-PAT-37.4b 419-16 B1 - 2016-03-17 09.10.42.json',
'KLF14-B6NTAC-PAT-38.1a 90-16 B1 - 2016-02-04 17.27.42.json',
'KLF14-B6NTAC-PAT-39.1h 453-16 B1 - 2016-03-17 11.15.50.json',
'KLF14-B6NTAC-PAT-39.2d 454-16 B1 - 2016-03-17 12.16.06.json'
]
########################################################################################################################
## Explore training/test data of different folds
########################################################################################################################
import pickle
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
os.environ['KERAS_BACKEND'] = 'tensorflow'
import keras.backend as K
import cytometer
import cytometer.data
import tensorflow as tf
# limit number of GPUs
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
LIMIT_GPU_MEMORY = False
# limit GPU memory used
if LIMIT_GPU_MEMORY:
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.9
set_session(tf.Session(config=config))
# specify data format as (n, row, col, channel)
K.set_image_data_format('channels_last')
DEBUG = False
'''Directories and filenames'''
# data paths
klf14_root_data_dir = os.path.join(home, 'Data/cytometer_data/klf14')
klf14_training_dir = os.path.join(klf14_root_data_dir, 'klf14_b6ntac_training')
klf14_training_data_dir = os.path.join(klf14_root_data_dir, 'klf14_b6ntac_training')
klf14_training_non_overlap_data_dir = os.path.join(klf14_root_data_dir, 'klf14_b6ntac_training_non_overlap')
figures_dir = os.path.join(home, 'GoogleDrive/Research/20190727_cytometer_paper/figures')
saved_models_dir = os.path.join(klf14_root_data_dir, 'saved_models')
ndpi_dir = os.path.join(home, 'scan_srv2_cox/Maz Yon')
metainfo_dir = os.path.join(home, 'Data/cytometer_data/klf14')
saved_kfolds_filename = 'klf14_b6ntac_exp_0079_generate_kfolds.pickle'
saved_extra_kfolds_filename = 'klf14_b6ntac_exp_0094_generate_extra_training_images.pickle'
# original dataset used in pipelines up to v6 + extra "other" tissue images
kfold_filename = os.path.join(saved_models_dir, saved_extra_kfolds_filename)
with open(kfold_filename, 'rb') as f:
aux = pickle.load(f)
file_svg_list = aux['file_list']
idx_test_all = aux['idx_test']
idx_train_all = aux['idx_train']
# correct home directory
file_svg_list = [x.replace('/users/rittscher/rcasero', home) for x in file_svg_list]
file_svg_list = [x.replace('/home/rcasero', home) for x in file_svg_list]
# number of images
n_im = len(file_svg_list)
# CSV file with metainformation of all mice
metainfo_csv_file = os.path.join(metainfo_dir, 'klf14_b6ntac_meta_info.csv')
metainfo = pd.read_csv(metainfo_csv_file)
# HACK: If file_svg_list_extra is used above, this block will not work but you don't need it
# for the loop before that calculates the rows of Table MICE with the breakdown of
# cells/other/background objects by mouse
#
# loop the folds to get the ndpi files that correspond to testing of each fold,
ndpi_files_test_list = {}
for i_fold in range(len(idx_test_all)):
# list of .svg files for testing
file_svg_test = np.array(file_svg_list)[idx_test_all[i_fold]]
# list of .ndpi files that the .svg windows came from
file_ndpi_test = [os.path.basename(x).replace('.svg', '') for x in file_svg_test]
file_ndpi_test = np.unique([x.split('_row')[0] for x in file_ndpi_test])
# add to the dictionary {file: fold}
for file in file_ndpi_test:
ndpi_files_test_list[file] = i_fold
if DEBUG:
# list of NDPI files
for key in ndpi_files_test_list.keys():
print(key)
# init dataframe to aggregate training numbers of each mouse
table = pd.DataFrame(columns=['Cells', 'Other', 'Background', 'Windows', 'Windows with cells'])
# loop files with hand traced contours
for i, file_svg in enumerate(file_svg_list):
print('file ' + str(i) + '/' + str(len(file_svg_list) - 1) + ': ' + os.path.basename(file_svg))
# read the ground truth cell contours in the SVG file. This produces a list [contour_0, ..., contour_N-1]
# where each contour_i = [(X_0, Y_0), ..., (X_P-1, X_P-1)]
cell_contours = cytometer.data.read_paths_from_svg_file(file_svg, tag='Cell', add_offset_from_filename=False,
minimum_npoints=3)
other_contours = cytometer.data.read_paths_from_svg_file(file_svg, tag='Other', add_offset_from_filename=False,
minimum_npoints=3)
brown_contours = cytometer.data.read_paths_from_svg_file(file_svg, tag='Brown', add_offset_from_filename=False,
minimum_npoints=3)
background_contours = cytometer.data.read_paths_from_svg_file(file_svg, tag='Background', add_offset_from_filename=False,
minimum_npoints=3)
contours = cell_contours + other_contours + brown_contours + background_contours
# make a list with the type of cell each contour is classified as
contour_type = [np.zeros(shape=(len(cell_contours),), dtype=np.uint8), # 0: white-adipocyte
np.ones(shape=(len(other_contours),), dtype=np.uint8), # 1: other types of tissue
np.ones(shape=(len(brown_contours),), dtype=np.uint8), # 1: brown cells (treated as "other" tissue)
np.zeros(shape=(len(background_contours),), dtype=np.uint8)] # 0: background
contour_type = np.concatenate(contour_type)
print('Cells: ' + str(len(cell_contours)) + '. Other: ' + str(len(other_contours))
+ '. Brown: ' + str(len(brown_contours)) + '. Background: ' + str(len(background_contours)))
# create dataframe for this image
df_common = cytometer.data.tag_values_with_mouse_info(metainfo=metainfo, s=os.path.basename(file_svg),
values=[i,], values_tag='i',
tags_to_keep=['id', 'ko_parent', 'sex'])
# mouse ID as a string
id = df_common['id'].values[0]
sex = df_common['sex'].values[0]
ko = df_common['ko_parent'].values[0]
# row to add to the table
df = pd.DataFrame(
[(sex, ko,
len(cell_contours), len(other_contours) + len(brown_contours), len(background_contours), 1, int(len(cell_contours)>0))],
columns=['Sex', 'Genotype', 'Cells', 'Other', 'Background', 'Windows', 'Windows with cells'], index=[id])
if id in table.index:
num_cols = ['Cells', 'Other', 'Background', 'Windows', 'Windows with cells']
table.loc[id, num_cols] = (table.loc[id, num_cols] + df.loc[id, num_cols])
else:
table = table.append(df, sort=False, ignore_index=False, verify_integrity=True)
# alphabetical order by mouse IDs
table = table.sort_index()
# total number of sampled windows
print('Total number of windows = ' + str(np.sum(table['Windows'])))
print('Total number of windows with cells = ' + str(np.sum(table['Windows with cells'])))
# total number of "Other" and background areas
print('Total number of Other areas = ' + str(np.sum(table['Other'])))
print('Total number of Background areas = ' + str(np.sum(table['Background'])))
# aggregate by sex and genotype
idx_f = table['Sex'] == 'f'
idx_m = table['Sex'] == 'm'
idx_pat = table['Genotype'] == 'PAT'
idx_mat = table['Genotype'] == 'MAT'
print('f PAT: ' + str(np.sum(table.loc[idx_f * idx_pat, 'Cells'])))
print('f MAT: ' + str(np.sum(table.loc[idx_f * idx_mat, 'Cells'])))
print('m PAT: ' + str(np.sum(table.loc[idx_m * idx_pat, 'Cells'])))
print('m MAT: ' + str(np.sum(table.loc[idx_m * idx_mat, 'Cells'])))
# find folds that test images belong to
for i_file, ndpi_file_kernel in enumerate(ndpi_files_test_list):
# fold where the current .ndpi image was not used for training
i_fold = ndpi_files_test_list[ndpi_file_kernel]
print('File ' + str(i_file) + '/' + str(len(ndpi_files_test_list) - 1) + ': ' + ndpi_file_kernel
+ '. Fold = ' + str(i_fold))
# mean and std of mouse weight
weight_f_mat = [22.07, 26.39, 30.65, 24.28, 27.72]
weight_f_pat = [31.42, 29.25, 27.18, 23.69, 21.20]
weight_m_mat = [46.19, 40.87, 40.02, 41.98, 34.52, 36.08]
weight_m_pat = [36.55, 40.77, 36.98, 36.11]
print('f MAT: mean = ' + str(np.mean(weight_f_mat)) + ', std = ' + str(np.std(weight_f_mat)))
print('f PAT: mean = ' + str(np.mean(weight_f_pat)) + ', std = ' + str(np.std(weight_f_pat)))
print('m MAT: mean = ' + str(np.mean(weight_m_mat)) + ', std = ' + str(np.std(weight_m_mat)))
print('m PAT: mean = ' + str(np.mean(weight_m_pat)) + ', std = ' + str(np.std(weight_m_pat)))
########################################################################################################################
## Statistics of hand traced white adipocytes
########################################################################################################################
import shapely
import cytometer.utils
import scipy
rectangle_sides_ratios = []
areas = []
perimeters = []
sphericities = []
# loop files with hand traced contours
for i, file_svg in enumerate(file_svg_list):
print('file ' + str(i) + '/' + str(len(file_svg_list) - 1) + ': ' + os.path.basename(file_svg))
# read the ground truth cell contours in the SVG file. This produces a list [contour_0, ..., contour_N-1]
# where each contour_i = [(X_0, Y_0), ..., (X_P-1, X_P-1)]
cell_contours = cytometer.data.read_paths_from_svg_file(file_svg, tag='Cell', add_offset_from_filename=False,
minimum_npoints=3)
# compute contour properties
for j, cell_contour in enumerate(cell_contours):
poly_cell = shapely.geometry.Polygon(cell_contour)
x, y = poly_cell.minimum_rotated_rectangle.exterior.coords.xy
edge_length = (shapely.geometry.Point(x[0], y[0]).distance(shapely.geometry.Point(x[1], y[1])),
shapely.geometry.Point(x[1], y[1]).distance(shapely.geometry.Point(x[2], y[2])))
rectangle_sides_ratio = np.max(edge_length) / np.min(edge_length)
area = poly_cell.area
perimeter = poly_cell.length
inv_compactness = poly_cell.length ** 2 / (4 * np.pi * area)
sphericity = cytometer.utils.sphericity(poly_cell)
# if j == 58:
# raise ValueError('foo: ' + str(j))
# if (inv_compactness) < 2.2 and (inv_compactness) > 2.15:
# raise ValueError('foo: ' + str(j))
rectangle_sides_ratios.append(rectangle_sides_ratio)
areas.append(area)
perimeters.append(perimeter)
sphericities.append(sphericity)
# compactness measure
inv_compactnesses = list(np.array(perimeters)**2 / (4 * np.pi * np.array(areas)))
print('Max rectangle sides ratio: ' + str(np.max(rectangle_sides_ratios)))
print('Min area: ' + str(np.min(areas)))
print('Max area: ' + str(np.max(areas)))
print('Min perimeter: ' + str(np.min(perimeters)))
print('Max perimeter: ' + str(np.max(perimeters)))
print('Min sphericity: ' + str(np.min(sphericities)))
print('Max sphericity: ' + str(np.max(sphericities)))
print('Min inv_compactness: ' + str(np.min(inv_compactnesses)))
print('Max inv_compactness: ' + str(np.max(inv_compactnesses)))
if DEBUG:
plt.clf()
plt.hist(rectangle_sides_ratios, bins=100)
plt.clf()
plt.boxplot(rectangle_sides_ratios)
plt.ylabel('Rectangle sides ratio')
q = scipy.stats.mstats.hdquantiles(rectangle_sides_ratios, prob=[0.98], axis=0)
plt.plot([0.75, 1.25], [q, q], 'r', 'LineWidth', 2)
plt.xlim(0.5, 1.5)
plt.legend(['$Q_{98\%}$ = ' + '%.2f' %q])
plt.clf()
plt.boxplot(areas)
plt.ylabel('Pixel$^2$')
q = scipy.stats.mstats.hdquantiles(areas, prob=[0.98], axis=0)
plt.plot([0.75, 1.25], [q, q], 'r', 'LineWidth', 2)
plt.xlim(0.5, 1.5)
plt.legend(['$Q_{98\%}$ = ' + '%.0f' %q])
plt.clf()
plt.boxplot(inv_compactnesses)
plt.ylabel('Compatncess$^{-1}$ ratio')
q = scipy.stats.mstats.hdquantiles(inv_compactnesses, prob=[0.98], axis=0)
plt.plot([0.75, 1.25], [q, q], 'r', 'LineWidth', 2)
plt.xlim(0.5, 1.5)
plt.legend(['$Q_{98\%}$ = ' + '%.2f' %q])
plt.clf()
plt.boxplot(sphericities)
plt.ylabel('Sphericity')
q = scipy.stats.mstats.hdquantiles(sphericities, prob=[0.02], axis=0)
plt.plot([0.75, 1.25], [q, q], 'r', 'LineWidth', 2)
plt.xlim(0.5, 1.5)
plt.legend(['$Q_{2\%}$ = ' + '%.2f' %q])
########################################################################################################################
## Plots of get_next_roi_to_process(): Adaptive Block Algorithm
#
# Note: the quantitative comparison versus uniform tiling is provided in klf14_b6ntac_exp_0105_adaptive_blocks_analysis.py
########################################################################################################################
import pickle
import cytometer.utils
# Filter out INFO & WARNING messages
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# # limit number of GPUs
# os.environ['CUDA_VISIBLE_DEVICES'] = '0,1'
os.environ['KERAS_BACKEND'] = 'tensorflow'
import time
import openslide
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
from scipy.signal import fftconvolve
from cytometer.utils import rough_foreground_mask
import PIL
from keras import backend as K
LIMIT_GPU_MEMORY = False
# limit GPU memory used
if LIMIT_GPU_MEMORY:
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.95
set_session(tf.Session(config=config))
DEBUG = False
SAVE_FIGS = False
root_data_dir = os.path.join(home, 'Data/cytometer_data/klf14')
data_dir = os.path.join(home, 'scan_srv2_cox/Maz Yon')
training_dir = os.path.join(home, root_data_dir, 'klf14_b6ntac_training')
figures_dir = os.path.join(home, 'GoogleDrive/Research/20190727_cytometer_paper/figures')
saved_models_dir = os.path.join(root_data_dir, 'saved_models')
results_dir = os.path.join(root_data_dir, 'klf14_b6ntac_results')
annotations_dir = os.path.join(home, 'Data/cytometer_data/aida_data_Klf14_v7/annotations')
# k-folds file
saved_extra_kfolds_filename = 'klf14_b6ntac_exp_0094_generate_extra_training_images.pickle'
# model names
dmap_model_basename = 'klf14_b6ntac_exp_0086_cnn_dmap'
contour_model_basename = 'klf14_b6ntac_exp_0091_cnn_contour_after_dmap'
classifier_model_basename = 'klf14_b6ntac_exp_0095_cnn_tissue_classifier_fcn'
correction_model_basename = 'klf14_b6ntac_exp_0089_cnn_segmentation_correction_overlapping_scaled_contours'
# full resolution image window and network expected receptive field parameters
fullres_box_size = np.array([2751, 2751])
receptive_field = np.array([131, 131])
# rough_foreground_mask() parameters
downsample_factor = 8.0
dilation_size = 25
component_size_threshold = 1e6
hole_size_treshold = 8000
# contour parameters
contour_downsample_factor = 0.1
bspline_k = 1
# block_split() parameters in downsampled image
block_len = np.ceil((fullres_box_size - receptive_field) / downsample_factor)
block_overlap = np.ceil((receptive_field - 1) / 2 / downsample_factor).astype(np.int)
# segmentation parameters
min_cell_area = 1500
max_cell_area = 100e3
min_mask_overlap = 0.8
phagocytosis = True
min_class_prop = 0.5
correction_window_len = 401
correction_smoothing = 11
batch_size = 16
# segmentation correction parameters
# load list of images, and indices for training vs. testing indices
saved_kfolds_filename = os.path.join(saved_models_dir, saved_extra_kfolds_filename)
with open(saved_kfolds_filename, 'rb') as f:
aux = pickle.load(f)
file_svg_list = aux['file_list']
idx_test_all = aux['idx_test']
idx_train_all = aux['idx_train']
# correct home directory
file_svg_list = [x.replace('/users/rittscher/rcasero', home) for x in file_svg_list]
file_svg_list = [x.replace('/home/rcasero', home) for x in file_svg_list]
# loop the folds to get the ndpi files that correspond to testing of each fold
ndpi_files_test_list = {}
for i_fold in range(len(idx_test_all)):
# list of .svg files for testing
file_svg_test = np.array(file_svg_list)[idx_test_all[i_fold]]
# list of .ndpi files that the .svg windows came from
file_ndpi_test = [os.path.basename(x).replace('.svg', '') for x in file_svg_test]
file_ndpi_test = np.unique([x.split('_row')[0] for x in file_ndpi_test])
# add to the dictionary {file: fold}
for file in file_ndpi_test:
ndpi_files_test_list[file] = i_fold
# File 4/19: KLF14-B6NTAC-MAT-17.1c 46-16 C1 - 2016-02-01 14.02.04. Fold = 2
i_file = 4
# File 10/19: KLF14-B6NTAC-36.1a PAT 96-16 C1 - 2016-02-10 16.12.38. Fold = 5
i_file = 10
ndpi_file_kernel = list(ndpi_files_test_list.keys())[i_file]
# for i_file, ndpi_file_kernel in enumerate(ndpi_files_test_list):
# fold where the current .ndpi image was not used for training
i_fold = ndpi_files_test_list[ndpi_file_kernel]
print('File ' + str(i_file) + '/' + str(len(ndpi_files_test_list) - 1) + ': ' + ndpi_file_kernel
+ '. Fold = ' + str(i_fold))
# make full path to ndpi file
ndpi_file = os.path.join(data_dir, ndpi_file_kernel + '.ndpi')
contour_model_file = os.path.join(saved_models_dir, contour_model_basename + '_model_fold_' + str(i_fold) + '.h5')
dmap_model_file = os.path.join(saved_models_dir, dmap_model_basename + '_model_fold_' + str(i_fold) + '.h5')
classifier_model_file = os.path.join(saved_models_dir,
classifier_model_basename + '_model_fold_' + str(i_fold) + '.h5')
correction_model_file = os.path.join(saved_models_dir,
correction_model_basename + '_model_fold_' + str(i_fold) + '.h5')
# name of file to save annotations
annotations_file = os.path.basename(ndpi_file)
annotations_file = os.path.splitext(annotations_file)[0]
annotations_file = os.path.join(annotations_dir, annotations_file + '_exp_0097.json')
# name of file to save areas and contours
results_file = os.path.basename(ndpi_file)
results_file = os.path.splitext(results_file)[0]
results_file = os.path.join(results_dir, results_file + '_exp_0097.npz')
# rough segmentation of the tissue in the image
lores_istissue0, im_downsampled = rough_foreground_mask(ndpi_file, downsample_factor=downsample_factor,
dilation_size=dilation_size,
component_size_threshold=component_size_threshold,
hole_size_treshold=hole_size_treshold,
return_im=True)
if DEBUG:
plt.clf()
plt.imshow(im_downsampled)
plt.axis('off')
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'klf14_b6ntac_exp_0099_histology_i_file_' + str(i_file) + '.png'),
bbox_inches='tight')
plt.clf()
plt.imshow(im_downsampled)
plt.contour(lores_istissue0, colors='k')
plt.axis('off')
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'klf14_b6ntac_exp_0099_rough_mask_i_file_' + str(i_file) + '.png'),
bbox_inches='tight')
# segmentation copy, to keep track of what's left to do
lores_istissue = lores_istissue0.copy()
# open full resolution histology slide
im = openslide.OpenSlide(ndpi_file)
# pixel size
assert(im.properties['tiff.ResolutionUnit'] == 'centimeter')
xres = 1e-2 / float(im.properties['tiff.XResolution'])
yres = 1e-2 / float(im.properties['tiff.YResolution'])
# # init empty list to store area values and contour coordinates
# areas_all = []
# contours_all = []
# keep extracting histology windows until we have finished
step = -1
time_0 = time_curr = time.time()
while np.count_nonzero(lores_istissue) > 0:
# next step (it starts from 0)
step += 1
time_prev = time_curr
time_curr = time.time()
print('File ' + str(i_file) + '/' + str(len(ndpi_files_test_list) - 1) + ': step ' +
str(step) + ': ' +
str(np.count_nonzero(lores_istissue)) + '/' + str(np.count_nonzero(lores_istissue0)) + ': ' +
"{0:.1f}".format(100.0 - np.count_nonzero(lores_istissue) / np.count_nonzero(lores_istissue0) * 100) +
'% completed: ' +
'step time ' + "{0:.2f}".format(time_curr - time_prev) + ' s' +
', total time ' + "{0:.2f}".format(time_curr - time_0) + ' s')
## Code extracted from:
## get_next_roi_to_process()
# variables for get_next_roi_to_process()
seg = lores_istissue.copy()
downsample_factor = downsample_factor
max_window_size = fullres_box_size
border = np.round((receptive_field - 1) / 2)
# convert to np.array so that we can use algebraic operators
max_window_size = np.array(max_window_size)
border = np.array(border)
# convert segmentation mask to [0, 1]
seg = (seg != 0).astype('int')
# approximate measures in the downsampled image (we don't round them)
lores_max_window_size = max_window_size / downsample_factor
lores_border = border / downsample_factor
# kernels that flipped correspond to top line and left line. They need to be pre-flipped
# because the convolution operation internally flips them (two flips cancel each other)
kernel_top = np.zeros(shape=np.round(lores_max_window_size - 2 * lores_border).astype('int'))
kernel_top[int((kernel_top.shape[0] - 1) / 2), :] = 1
kernel_left = np.zeros(shape=np.round(lores_max_window_size - 2 * lores_border).astype('int'))
kernel_left[:, int((kernel_top.shape[1] - 1) / 2)] = 1
if DEBUG:
plt.clf()
plt.imshow(kernel_top)
plt.axis('off')
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'klf14_b6ntac_exp_0099_kernel_top_i_file_' + str(i_file) + '.png'),
bbox_inches='tight')
plt.imshow(kernel_left)
plt.axis('off')
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'klf14_b6ntac_exp_0099_kernel_left_i_file_' + str(i_file) + '.png'),
bbox_inches='tight')
seg_top = np.round(fftconvolve(seg, kernel_top, mode='same'))
seg_left = np.round(fftconvolve(seg, kernel_left, mode='same'))
# window detections
detection_idx = np.nonzero(seg_left * seg_top)
# set top-left corner of the box = top-left corner of first box detected
lores_first_row = detection_idx[0][0]
lores_first_col = detection_idx[1][0]
# first, we look within a window with the maximum size
lores_last_row = detection_idx[0][0] + lores_max_window_size[0] - 2 * lores_border[0]
lores_last_col = detection_idx[1][0] + lores_max_window_size[1] - 2 * lores_border[1]
# second, if the segmentation is smaller than the window, we reduce the window size
window = seg[lores_first_row:int(np.round(lores_last_row)), lores_first_col:int(np.round(lores_last_col))]
idx = np.any(window, axis=1) # reduce rows size
last_segmented_pixel_len = np.max(np.where(idx))
lores_last_row = detection_idx[0][0] + np.min((lores_max_window_size[0] - 2 * lores_border[0],
last_segmented_pixel_len))
idx = np.any(window, axis=0) # reduce cols size
last_segmented_pixel_len = np.max(np.where(idx))
lores_last_col = detection_idx[1][0] + np.min((lores_max_window_size[1] - 2 * lores_border[1],
last_segmented_pixel_len))
# save coordinates for plot (this is only for a figure in the paper and doesn't need to be done in the real
# implementation)
lores_first_col_bak = lores_first_col
lores_first_row_bak = lores_first_row
lores_last_col_bak = lores_last_col
lores_last_row_bak = lores_last_row
# add a border around the window
lores_first_row = np.max([0, lores_first_row - lores_border[0]])
lores_first_col = np.max([0, lores_first_col - lores_border[1]])
lores_last_row = np.min([seg.shape[0], lores_last_row + lores_border[0]])
lores_last_col = np.min([seg.shape[1], lores_last_col + lores_border[1]])
# convert low resolution indices to high resolution
first_row = np.int(np.round(lores_first_row * downsample_factor))
last_row = np.int(np.round(lores_last_row * downsample_factor))
first_col = np.int(np.round(lores_first_col * downsample_factor))
last_col = np.int(np.round(lores_last_col * downsample_factor))
# round down indices in downsampled segmentation
lores_first_row = int(lores_first_row)
lores_last_row = int(lores_last_row)
lores_first_col = int(lores_first_col)
lores_last_col = int(lores_last_col)
# load window from full resolution slide
tile = im.read_region(location=(first_col, first_row), level=0,
size=(last_col - first_col, last_row - first_row))
tile = np.array(tile)
tile = tile[:, :, 0:3]
# interpolate coarse tissue segmentation to full resolution
istissue_tile = lores_istissue[lores_first_row:lores_last_row, lores_first_col:lores_last_col]
istissue_tile = cytometer.utils.resize(istissue_tile, size=(last_col - first_col, last_row - first_row),
resample=PIL.Image.NEAREST)
if DEBUG:
plt.clf()
plt.imshow(tile)
plt.imshow(istissue_tile, alpha=0.5)
plt.contour(istissue_tile, colors='k')
plt.title('Yellow: Tissue. Purple: Background')
plt.axis('off')
# clear keras session to prevent each segmentation iteration from getting slower. Note that this forces us to
# reload the models every time
K.clear_session()
# segment histology, split into individual objects, and apply segmentation correction
labels, labels_class, todo_edge, \
window_im, window_labels, window_labels_corrected, window_labels_class, index_list, scaling_factor_list \
= cytometer.utils.segmentation_pipeline6(tile,
dmap_model=dmap_model_file,
contour_model=contour_model_file,
correction_model=correction_model_file,
classifier_model=classifier_model_file,
min_cell_area=min_cell_area,
mask=istissue_tile,
min_mask_overlap=min_mask_overlap,
phagocytosis=phagocytosis,
min_class_prop=min_class_prop,
correction_window_len=correction_window_len,
correction_smoothing=correction_smoothing,
return_bbox=True, return_bbox_coordinates='xy',
batch_size=batch_size)
# downsample "to do" mask so that the rough tissue segmentation can be updated
lores_todo_edge = PIL.Image.fromarray(todo_edge.astype(np.uint8))
lores_todo_edge = lores_todo_edge.resize((lores_last_col - lores_first_col,
lores_last_row - lores_first_row),
resample=PIL.Image.NEAREST)
lores_todo_edge = np.array(lores_todo_edge)
# update coarse tissue mask (this is only necessary here to plot figures for the paper. In the actual code,
# the coarse mask gets directly updated, without this intermediate step)
seg_updated = seg.copy()
seg_updated[lores_first_row:lores_last_row, lores_first_col:lores_last_col] = lores_todo_edge
if DEBUG:
plt.clf()
fig = plt.imshow(seg, cmap='Greys')
plt.contour(seg_left * seg_top > 0, colors='r')
rect = Rectangle((lores_first_col, lores_first_row),
lores_last_col - lores_first_col, lores_last_row - lores_first_row,
alpha=0.5, facecolor='g', edgecolor='g', zorder=2)
fig.axes.add_patch(rect)
rect2 = Rectangle((lores_first_col_bak, lores_first_row_bak),
lores_last_col_bak - lores_first_col_bak, lores_last_row_bak - lores_first_row_bak,
alpha=1.0, facecolor=None, fill=False, edgecolor='g', lw=1, zorder=3)
fig.axes.add_patch(rect2)
plt.scatter(detection_idx[1][0], detection_idx[0][0], color='k', s=5, zorder=3)
plt.axis('off')
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'klf14_b6ntac_exp_0099_fftconvolve_i_file_' + str(i_file) +
'_step_' + str(step) + '.png'),
bbox_inches='tight')
if DEBUG:
plt.clf()
fig = plt.imshow(seg, cmap='Greys')
plt.contour(seg_left * seg_top > 0, colors='r')
plt.contour(seg_updated, colors='w', zorder=4)
rect = Rectangle((lores_first_col, lores_first_row),
lores_last_col - lores_first_col, lores_last_row - lores_first_row,
alpha=0.5, facecolor='g', edgecolor='g', zorder=2)
fig.axes.add_patch(rect)
rect2 = Rectangle((lores_first_col_bak, lores_first_row_bak),
lores_last_col_bak - lores_first_col_bak, lores_last_row_bak - lores_first_row_bak,
alpha=1.0, facecolor=None, fill=False, edgecolor='g', lw=3, zorder=3)
fig.axes.add_patch(rect2)
plt.scatter(detection_idx[1][0], detection_idx[0][0], color='k', s=5, zorder=3)
plt.axis('off')
plt.tight_layout()
plt.xlim(int(lores_first_col - 50), int(lores_last_col + 50))
plt.ylim(int(lores_last_row + 50), int(lores_first_row - 50))
plt.savefig(os.path.join(figures_dir, 'klf14_b6ntac_exp_0099_fftconvolve_detail_i_file_' + str(i_file) +
'_step_' + str(step) + '.png'),
bbox_inches='tight')
# update coarse tissue mask for next iteration
lores_istissue[lores_first_row:lores_last_row, lores_first_col:lores_last_col] = lores_todo_edge
########################################################################################################################
## Show examples of what each deep CNN do (code cannibilised from the "inspect" scripts of the networks)
########################################################################################################################
import pickle
import warnings
# other imports
import numpy as np
import cv2
import matplotlib.pyplot as plt
os.environ['KERAS_BACKEND'] = 'tensorflow'
import keras
import keras.backend as K
import cytometer.data
import cytometer.utils
import cytometer.model_checkpoint_parallel
import tensorflow as tf
import skimage
from PIL import Image, ImageDraw
import math
LIMIT_GPU_MEMORY = False
# limit GPU memory used
if LIMIT_GPU_MEMORY:
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.9
set_session(tf.Session(config=config))
# specify data format as (n, row, col, channel)
K.set_image_data_format('channels_last')
DEBUG = False
'''Directories and filenames'''
# data paths
klf14_root_data_dir = os.path.join(home, 'Data/cytometer_data/klf14')
klf14_training_dir = os.path.join(klf14_root_data_dir, 'klf14_b6ntac_training')
klf14_training_non_overlap_data_dir = os.path.join(klf14_root_data_dir, 'klf14_b6ntac_training_non_overlap')
figures_dir = os.path.join(home, 'GoogleDrive/Research/20190727_cytometer_paper/figures')
saved_models_dir = os.path.join(klf14_root_data_dir, 'saved_models')
saved_kfolds_filename = 'klf14_b6ntac_exp_0079_generate_kfolds.pickle'
# model names
dmap_model_basename = 'klf14_b6ntac_exp_0086_cnn_dmap'
contour_model_basename = 'klf14_b6ntac_exp_0091_cnn_contour_after_dmap'
classifier_model_basename = 'klf14_b6ntac_exp_0095_cnn_tissue_classifier_fcn'
correction_model_basename = 'klf14_b6ntac_exp_0089_cnn_segmentation_correction_overlapping_scaled_contours'
'''Load folds'''
# load list of images, and indices for training vs. testing indices
contour_model_kfold_filename = os.path.join(saved_models_dir, saved_kfolds_filename)
with open(contour_model_kfold_filename, 'rb') as f:
aux = pickle.load(f)
svg_file_list = aux['file_list']
idx_test_all = aux['idx_test']
idx_train_all = aux['idx_train']
if DEBUG:
for i, file in enumerate(svg_file_list):
print(str(i) + ': ' + file)
# correct home directory
svg_file_list = [x.replace('/home/rcasero', home) for x in svg_file_list]
# KLF14-B6NTAC-36.1b PAT 97-16 C1 - 2016-02-10 17.38.06_row_017204_col_019444.tif (fold 5 for testing. No .svg)
# KLF14-B6NTAC-36.1b PAT 97-16 C1 - 2016-02-10 17.38.06_row_009644_col_061660.tif (fold 5 for testing. No .svg)
# KLF14-B6NTAC 36.1c PAT 98-16 C1 - 2016-02-11 10.45.00_row_019228_col_015060.svg (fold 7 for testing. With .svg)
# find which fold the testing image belongs to
np.where(['36.1c' in x for x in svg_file_list])
idx_test_all[7]
# TIFF files that correspond to the SVG files (without augmentation)
im_orig_file_list = []
for i, file in enumerate(svg_file_list):
im_orig_file_list.append(file.replace('.svg', '.tif'))
im_orig_file_list[i] = os.path.join(os.path.dirname(im_orig_file_list[i]) + '_augmented',
'im_seed_nan_' + os.path.basename(im_orig_file_list[i]))
# check that files exist
if not os.path.isfile(file):
# warnings.warn('i = ' + str(i) + ': File does not exist: ' + os.path.basename(file))
warnings.warn('i = ' + str(i) + ': File does not exist: ' + file)
if not os.path.isfile(im_orig_file_list[i]):
# warnings.warn('i = ' + str(i) + ': File does not exist: ' + os.path.basename(im_orig_file_list[i]))
warnings.warn('i = ' + str(i) + ': File does not exist: ' + im_orig_file_list[i])
'''Inspect model results'''
# for i_fold, idx_test in enumerate(idx_test_all):
i_fold = 7; idx_test = idx_test_all[i_fold]
print('Fold ' + str(i_fold) + '/' + str(len(idx_test_all)-1))
'''Load data'''
# split the data list into training and testing lists
im_test_file_list, im_train_file_list = cytometer.data.split_list(im_orig_file_list, idx_test)
# load the test data (im, dmap, mask)
test_dataset, test_file_list, test_shuffle_idx = \
cytometer.data.load_datasets(im_test_file_list, prefix_from='im', prefix_to=['im', 'dmap', 'mask', 'contour'],
nblocks=1, shuffle_seed=None)
# fill in the little gaps in the mask
kernel = np.ones((3, 3), np.uint8)
for i in range(test_dataset['mask'].shape[0]):
test_dataset['mask'][i, :, :, 0] = cv2.dilate(test_dataset['mask'][i, :, :, 0].astype(np.uint8),
kernel=kernel, iterations=1)
# load dmap model, and adjust input size
saved_model_filename = os.path.join(saved_models_dir, dmap_model_basename + '_model_fold_' + str(i_fold) + '.h5')
dmap_model = keras.models.load_model(saved_model_filename)
if dmap_model.input_shape[1:3] != test_dataset['im'].shape[1:3]:
dmap_model = cytometer.utils.change_input_size(dmap_model, batch_shape=test_dataset['im'].shape)
# estimate dmaps
pred_dmap = dmap_model.predict(test_dataset['im'], batch_size=4)
if DEBUG:
for i in range(test_dataset['im'].shape[0]):
plt.clf()
plt.subplot(221)
plt.imshow(test_dataset['im'][i, :, :, :])
plt.axis('off')
plt.subplot(222)
plt.imshow(test_dataset['dmap'][i, :, :, 0])
plt.axis('off')
plt.subplot(223)
plt.imshow(test_dataset['mask'][i, :, :, 0])
plt.axis('off')
plt.subplot(224)
plt.imshow(pred_dmap[i, :, :, 0])
plt.axis('off')
# KLF14-B6NTAC 36.1c PAT 98-16 C1 - 2016-02-11 10.45.00_row_019228_col_015060.svg
i = 2
if DEBUG:
plt.clf()
plt.imshow(test_dataset['im'][i, :, :, :])
plt.axis('off')
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, os.path.basename(im_test_file_list[i]).replace('.tif', '.png')),
bbox_inches='tight')
plt.clf()
plt.imshow(test_dataset['dmap'][i, :, :, 0])
plt.axis('off')
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'dmap_' + os.path.basename(im_test_file_list[i]).replace('.tif', '.png')),
bbox_inches='tight')
plt.clf()
plt.imshow(pred_dmap[i, :, :, 0])
plt.axis('off')
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'pred_dmap_' + os.path.basename(im_test_file_list[i]).replace('.tif', '.png')),
bbox_inches='tight')
# load dmap to contour model, and adjust input size
saved_model_filename = os.path.join(saved_models_dir, contour_model_basename + '_model_fold_' + str(i_fold) + '.h5')
contour_model = keras.models.load_model(saved_model_filename)
if contour_model.input_shape[1:3] != pred_dmap.shape[1:3]:
contour_model = cytometer.utils.change_input_size(contour_model, batch_shape=pred_dmap.shape)
# estimate contours
pred_contour = contour_model.predict(pred_dmap, batch_size=4)
if DEBUG:
plt.clf()
plt.imshow(test_dataset['contour'][i, :, :, 0])
plt.axis('off')
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'contour_' + os.path.basename(im_test_file_list[i]).replace('.tif', '.png')),
bbox_inches='tight')
plt.clf()
plt.imshow(pred_contour[i, :, :, 0])
plt.axis('off')
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'pred_contour_' + os.path.basename(im_test_file_list[i]).replace('.tif', '.png')),
bbox_inches='tight')
# load classifier model, and adjust input size
saved_model_filename = os.path.join(saved_models_dir, classifier_model_basename + '_model_fold_' + str(i_fold) + '.h5')
classifier_model = keras.models.load_model(saved_model_filename)
if classifier_model.input_shape[1:3] != test_dataset['im'].shape[1:3]:
classifier_model = cytometer.utils.change_input_size(classifier_model, batch_shape=test_dataset['im'].shape)
# estimate pixel-classification
pred_class = classifier_model.predict(test_dataset['im'], batch_size=4)
if DEBUG:
plt.clf()
plt.imshow(pred_class[i, :, :, 0])
plt.contour(pred_class[i, :, :, 0] > 0.5, colors='r', linewidhts=3)
plt.axis('off')
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'pred_class_' + os.path.basename(im_test_file_list[i]).replace('.tif', '.png')),
bbox_inches='tight')
plt.clf()
plt.imshow(pred_class[i, :, :, 0] > 0.5)
plt.axis('off')
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'pred_class_thresh_' + os.path.basename(im_test_file_list[i]).replace('.tif', '.png')),
bbox_inches='tight')
## plot of classifier ground truth
# print('file ' + str(i) + '/' + str(len(file_svg_list) - 1))
# init output
im_array_all = []
out_class_all = []
out_mask_all = []
contour_type_all = []
file_tif = os.path.join(klf14_training_dir, os.path.basename(im_test_file_list[i]))
file_tif = file_tif.replace('im_seed_nan_', '')
# change file extension from .svg to .tif
file_svg = file_tif.replace('.tif', '.svg')
# open histology training image
im = Image.open(file_tif)
# make array copy
im_array = np.array(im)
# read the ground truth cell contours in the SVG file. This produces a list [contour_0, ..., contour_N-1]
# where each contour_i = [(X_0, Y_0), ..., (X_P-1, X_P-1)]
cell_contours = cytometer.data.read_paths_from_svg_file(file_svg, tag='Cell', add_offset_from_filename=False,
minimum_npoints=3)
other_contours = cytometer.data.read_paths_from_svg_file(file_svg, tag='Other', add_offset_from_filename=False,
minimum_npoints=3)
brown_contours = cytometer.data.read_paths_from_svg_file(file_svg, tag='Brown', add_offset_from_filename=False,
minimum_npoints=3)
background_contours = cytometer.data.read_paths_from_svg_file(file_svg, tag='Background',
add_offset_from_filename=False,
minimum_npoints=3)
contours = cell_contours + other_contours + brown_contours + background_contours
# make a list with the type of cell each contour is classified as
contour_type = [np.zeros(shape=(len(cell_contours),), dtype=np.uint8), # 0: white-adipocyte
np.ones(shape=(len(other_contours),), dtype=np.uint8), # 1: other types of tissue
np.ones(shape=(len(brown_contours),), dtype=np.uint8), # 1: brown cells (treated as "other" tissue)
np.zeros(shape=(len(background_contours),), dtype=np.uint8)] # 0: background
contour_type = np.concatenate(contour_type)
contour_type_all.append(contour_type)
print('Cells: ' + str(len(cell_contours)))
print('Other: ' + str(len(other_contours)))
print('Brown: ' + str(len(brown_contours)))
print('Background: ' + str(len(background_contours)))
# initialise arrays for training
out_class = np.zeros(shape=im_array.shape[0:2], dtype=np.uint8)
out_mask = np.zeros(shape=im_array.shape[0:2], dtype=np.uint8)
# loop ground truth cell contours
for j, contour in enumerate(contours):
plt.plot([p[0] for p in contour], [p[1] for p in contour])
plt.text(contour[0][0], contour[0][1], str(j))
if DEBUG:
plt.clf()
plt.subplot(121)
plt.imshow(im_array)
plt.plot([p[0] for p in contour], [p[1] for p in contour])
xy_c = (np.mean([p[0] for p in contour]), np.mean([p[1] for p in contour]))
plt.scatter(xy_c[0], xy_c[1])
# rasterise current ground truth segmentation
cell_seg_gtruth = Image.new("1", im_array.shape[0:2][::-1], "black") # I = 32-bit signed integer pixels
draw = ImageDraw.Draw(cell_seg_gtruth)
draw.polygon(contour, outline="white", fill="white")
cell_seg_gtruth = np.array(cell_seg_gtruth, dtype=np.bool)
# we are going to save the ground truth segmentation of the cell that we are going to later use in the figures
if j == 106:
cell_seg_gtruth_106 = cell_seg_gtruth.copy()
if DEBUG:
plt.subplot(122)
plt.cla()
plt.imshow(im_array)
plt.contour(cell_seg_gtruth.astype(np.uint8))
# add current object to training output and mask
out_mask[cell_seg_gtruth] = 1
out_class[cell_seg_gtruth] = contour_type[j]
if DEBUG:
plt.clf()
aux = (1- out_class).astype(np.float32)
aux = np.ma.masked_where(out_mask < 0.5, aux)
plt.imshow(aux)
plt.axis('off')
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'class_' + os.path.basename(im_test_file_list[i]).replace('.tif', '.png')),
bbox_inches='tight')
## Segmentation correction CNN
# segmentation parameters
min_cell_area = 1500
max_cell_area = 100e3
min_mask_overlap = 0.8
phagocytosis = True
min_class_prop = 0.5
correction_window_len = 401
correction_smoothing = 11
batch_size = 2
# segment histology
labels, labels_class, _ \
= cytometer.utils.segment_dmap_contour_v6(im_array,
contour_model=contour_model, dmap_model=dmap_model,
classifier_model=classifier_model,
border_dilation=0)
labels = labels[0, :, :]
labels_class = labels_class[0, :, :, 0]
if DEBUG:
plt.clf()
plt.imshow(labels)
if DEBUG:
plt.clf()
plt.imshow(labels)
plt.clf()
aux = skimage.segmentation.find_boundaries(labels, mode='thick')
kernel = np.ones((3, 3), np.uint8)
aux = cv2.dilate(aux.astype(np.uint8), kernel=kernel, iterations=1)
plt.imshow(aux)
plt.axis('off')
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'watershed_' + os.path.basename(im_test_file_list[i]).replace('.tif', '.png')),
bbox_inches='tight')
# remove labels that touch the edges, that are too small or too large, don't overlap enough with the tissue mask,
# are fully surrounded by another label or are not white adipose tissue
labels, todo_edge = cytometer.utils.clean_segmentation(
labels, min_cell_area=min_cell_area, max_cell_area=max_cell_area,
remove_edge_labels=True, mask=None, min_mask_overlap=min_mask_overlap,
phagocytosis=phagocytosis,
labels_class=labels_class, min_class_prop=min_class_prop)
if DEBUG:
plt.clf()
plt.imshow(im_array)
plt.contour(labels, levels=np.unique(labels), colors='k')
plt.contourf(labels == 0)
plt.clf()
aux = skimage.segmentation.find_boundaries(labels, mode='thick')
kernel = np.ones((3, 3), np.uint8)
aux = cv2.dilate(aux.astype(np.uint8), kernel=kernel, iterations=1)
plt.imshow(aux)
plt.axis('off')
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'cleaned_' + os.path.basename(im_test_file_list[i]).replace('.tif', '.png')),
bbox_inches='tight')
# split image into individual labels
im_array = np.expand_dims(im_array, axis=0)
labels = np.expand_dims(labels, axis=0)
labels_class = np.expand_dims(labels_class, axis=0)
cell_seg_gtruth_106 = np.expand_dims(cell_seg_gtruth_106, axis=0)
window_mask = None
(window_labels, window_im, window_labels_class, window_cell_seg_gtruth_106), index_list, scaling_factor_list \
= cytometer.utils.one_image_per_label_v2((labels, im_array, labels_class, cell_seg_gtruth_106.astype(np.uint8)),
resize_to=(correction_window_len, correction_window_len),
resample=(Image.NEAREST, Image.LINEAR, Image.NEAREST, Image.NEAREST),
only_central_label=True, return_bbox=True)
# load correction model
saved_model_filename = os.path.join(saved_models_dir, correction_model_basename + '_model_fold_' + str(i_fold) + '.h5')
correction_model = keras.models.load_model(saved_model_filename)
if correction_model.input_shape[1:3] != window_im.shape[1:3]:
correction_model = cytometer.utils.change_input_size(correction_model, batch_shape=window_im.shape)
# multiply image by mask
window_im_masked = cytometer.utils.quality_model_mask(
np.expand_dims(window_labels, axis=-1), im=window_im, quality_model_type='-1_1')
# process (histology * mask) to estimate which pixels are underestimated and which overestimated in the segmentation
window_im_masked = correction_model.predict(window_im_masked, batch_size=batch_size)
# compute the correction to be applied to the segmentation
correction = (window_im[:, :, :, 0].copy() * 0).astype(np.float32)
correction[window_im_masked[:, :, :, 0] >= 0.5] = 1 # the segmentation went too far
correction[window_im_masked[:, :, :, 0] <= -0.5] = -1 # the segmentation fell short
if DEBUG:
j = 0
plt.clf()
plt.imshow(correction[j, :, :])
# plt.contour(window_labels[j, ...], colors='r', linewidths=1)
plt.axis('off')
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'pred_correction_' + os.path.basename(im_test_file_list[i]).replace('.tif', '.png')),
bbox_inches='tight')
plt.clf()
plt.imshow(correction[j, :, :])
plt.contour(window_labels[j, ...], colors='r', linewidths=1)
plt.contour(window_cell_seg_gtruth_106[j, ...], colors='w', linewidths=1)
plt.axis('off')
plt.tight_layout()
plt.savefig(
os.path.join(figures_dir, 'pred_correction_gtruth_' + os.path.basename(im_test_file_list[i]).replace('.tif', '.png')),
bbox_inches='tight')
# correct segmentation (full operation)
window_im = window_im.astype(np.float32)
window_im /= 255.0
window_labels_corrected = cytometer.utils.correct_segmentation(
im=window_im, seg=window_labels,
correction_model=correction_model, model_type='-1_1',
smoothing=correction_smoothing,
batch_size=batch_size)
if DEBUG:
# plot input to and output from Correction CNN examples
for j in [13, 15, 18]:
plt.clf()
plt.imshow(window_im[j, :, :, :])
plt.contour(window_labels[j, ...], colors='g', linewidths=4)
plt.axis('off')
plt.tight_layout()
plt.savefig(
os.path.join(figures_dir, 'correction_input_j_' + str(j) + '_'
+ os.path.basename(im_test_file_list[i]).replace('.tif', '.png')),
bbox_inches='tight')
plt.savefig(
os.path.join(figures_dir, 'correction_input_j_' + str(j) + '_'
+ os.path.basename(im_test_file_list[i]).replace('.tif', '.svg')),
bbox_inches='tight')
plt.clf()
plt.imshow(window_im[j, :, :, :])
plt.contour(window_labels_corrected[j, ...], colors='r', linewidths=4)
plt.axis('off')
plt.tight_layout()
plt.savefig(
os.path.join(figures_dir, 'correction_output_j_' + str(j) + '_'
+ os.path.basename(im_test_file_list[i]).replace('.tif', '.png')),
bbox_inches='tight')
plt.savefig(
os.path.join(figures_dir, 'correction_output_j_' + str(j) + '_'
+ os.path.basename(im_test_file_list[i]).replace('.tif', '.svg')),
bbox_inches='tight')
# convert overlap labels in cropped images to contours (points), and add cropping window offset so that the
# contours are in the tile-window coordinates
offset_xy = np.array(index_list)[:, [2, 3]] # index_list: [i, lab, x0, y0, xend, yend]
contours = cytometer.utils.labels2contours(window_labels, offset_xy=offset_xy,
scaling_factor_xy=scaling_factor_list)
contours_corrected = cytometer.utils.labels2contours(window_labels_corrected, offset_xy=offset_xy,
scaling_factor_xy=scaling_factor_list)
# crop contours that overflow the edges
for j in range(len(contours_corrected)):
contours_corrected[j] = np.clip(contours_corrected[j], a_min=0, a_max=1000)
if DEBUG:
# plot corrected overlapping contours all together
ax = plt.clf()
plt.imshow(labels[0, :, :] * 0)
for j in range(len(contours_corrected)):
plt.fill(contours_corrected[j][:, 1], contours_corrected[j][:, 0],
edgecolor=(0.993248, 0.906157, 0.143936, 1.0), fill=False, lw=1.5)
plt.axis('off')
plt.tight_layout()
plt.savefig(
os.path.join(figures_dir,
'corrected_contours_' + os.path.basename(im_test_file_list[i]).replace('.tif', '.png')),
bbox_inches='tight')
plt.savefig(
os.path.join(figures_dir,
'corrected_contours_' + os.path.basename(im_test_file_list[i]).replace('.tif', '.svg')),
bbox_inches='tight')
if DEBUG:
j = 0
plt.clf()
plt.imshow(window_im[j, ...])
plt.contour(window_labels[j, ...], colors='r', linewidths=3)
plt.text(185, 210, '+1', fontsize=30)
plt.text(116, 320, '-1', fontsize=30)
plt.axis('off')
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'im_for_correction_' + os.path.basename(im_test_file_list[i]).replace('.tif', '.png')),
bbox_inches='tight')
plt.clf()
plt.imshow(window_im[j, ...])
plt.contour(window_labels_corrected[j, ...], colors='g', linewidths=3)
plt.text(185, 210, '+1', fontsize=30)
plt.text(116, 320, '-1', fontsize=30)
plt.axis('off')
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'corrected_seg_' + os.path.basename(im_test_file_list[i]).replace('.tif', '.png')),
bbox_inches='tight')
aux = np.array(contours[j])
plt.plot(aux[:, 0], aux[:, 1])
########################################################################################################################
## Check whether manual correction of pipeline results makes a difference
# For this experiment, we corrected by hand on AIDA the automatic segmentations produced by the pipeline,
# and compared the segmentation error.
########################################################################################################################
import matplotlib.pyplot as plt
import cytometer.data
from shapely.geometry import Polygon
import openslide
import numpy as np
import scipy.stats
import pandas as pd
from mlxtend.evaluate import permutation_test
from statsmodels.stats.multitest import multipletests
import math
# directories
klf14_root_data_dir = os.path.join(home, 'Data/cytometer_data/klf14')
annotations_dir = os.path.join(home, 'Data/cytometer_data/aida_data_Klf14_v7/annotations')
ndpi_dir = os.path.join(home, 'scan_srv2_cox/Maz Yon')
figures_dir = os.path.join(home, 'GoogleDrive/Research/20190727_cytometer_paper/figures')
metainfo_dir = os.path.join(home, 'Data/cytometer_data/klf14')
DEBUG = False
depot = 'sqwat'
# depot = 'gwat'
permutation_sample_size = 9 # the factorial of this number is the number of repetitions in the permutation tests
# list of annotation files for this depot
json_annotation_files = json_annotation_files_dict[depot]
# modify filenames to select the particular segmentation we want (e.g. the automatic ones, or the manually refined ones)
json_pipeline_annotation_files = [x.replace('.json', '_exp_0097_corrected_monolayer_left.json') for x in json_annotation_files]
json_pipeline_annotation_files = [os.path.join(annotations_dir, x) for x in json_pipeline_annotation_files]
# modify filenames to select the particular segmentation we want (e.g. the automatic ones, or the manually refined ones)
json_refined_annotation_files = [x.replace('.json', '_exp_0097_refined_left.json') for x in json_annotation_files]
json_refined_annotation_files = [os.path.join(annotations_dir, x) for x in json_refined_annotation_files]
# CSV file with metainformation of all mice
metainfo_csv_file = os.path.join(metainfo_dir, 'klf14_b6ntac_meta_info.csv')
metainfo = pd.read_csv(metainfo_csv_file)
# make sure that in the boxplots PAT comes before MAT
metainfo['sex'] = metainfo['sex'].astype(pd.api.types.CategoricalDtype(categories=['f', 'm'], ordered=True))
metainfo['ko_parent'] = metainfo['ko_parent'].astype(pd.api.types.CategoricalDtype(categories=['PAT', 'MAT'], ordered=True))
metainfo['genotype'] = metainfo['genotype'].astype(pd.api.types.CategoricalDtype(categories=['KLF14-KO:WT', 'KLF14-KO:Het'], ordered=True))
quantiles = np.linspace(0, 1, 11)
quantiles = quantiles[1:-1]
pipeline_area_q_all = []
refined_area_q_all = []
pipeline_area_mean_all = []
refined_area_mean_all = []
id_all = []
for i_file, (json_pipeline_annotation_file, json_refined_annotation_file) in enumerate(zip(json_pipeline_annotation_files, json_refined_annotation_files)):
# create dataframe for this image
df_common = cytometer.data.tag_values_with_mouse_info(metainfo=metainfo, s=os.path.basename(json_pipeline_annotation_file),
values=[i_file, ], values_tag='i_file',
tags_to_keep=['id', 'ko_parent', 'genotype', 'sex',
'BW', 'gWAT', 'SC'])
# mouse ID as a string
id = df_common['id'].values[0]
# we have only refined some of the segmentations for testing
if not id in ['16.2a', '16.2b', '16.2c', '16.2d', '16.2e', '16.2f', '17.1a', '17.1b', '17.1c', '17.1d', '17.1e',
'17.1f', '17.2a', '17.2b', '17.2c', '17.2d', '17.2f', '17.2g', '18.1a', '18.1b', '18.1c', '18.1d']:
continue
print('File ' + str(i_file) + '/' + str(len(json_annotation_files)-1) + ': ')
if os.path.isfile(json_pipeline_annotation_file):
print('\t' + os.path.basename(json_pipeline_annotation_file))
else:
print('\t' + os.path.basename(json_pipeline_annotation_file) + ' ... missing')
if os.path.isfile(json_refined_annotation_file):
print('\t' + os.path.basename(json_refined_annotation_file))
else:
print('\t' + os.path.basename(json_refined_annotation_file) + ' ... missing')
# ndpi file that corresponds to this .json file
ndpi_file = json_pipeline_annotation_file.replace('_exp_0097_corrected_monolayer_left.json', '.ndpi')
ndpi_file = ndpi_file.replace(annotations_dir, ndpi_dir)
# open full resolution histology slide
im = openslide.OpenSlide(ndpi_file)
# pixel size
assert (im.properties['tiff.ResolutionUnit'] == 'centimeter')
xres = 1e-2 / float(im.properties['tiff.XResolution']) # m
yres = 1e-2 / float(im.properties['tiff.YResolution']) # m
# ko = df_common['ko_parent'].values[0]
# genotype = df_common['genotype'].values[0]
# sex = df_common['sex'].values[0]
# bw = df_common['BW'].values[0]
# gwat = df_common['gWAT'].values[0]
# sc = df_common['SC'].values[0]
# read contours from AIDA annotations
pipeline_contours = cytometer.data.aida_get_contours(json_pipeline_annotation_file, layer_name='White adipocyte.*')
refined_contours = cytometer.data.aida_get_contours(json_refined_annotation_file, layer_name='White adipocyte.*')
# compute area of each contour
pipeline_areas = [Polygon(c).area * xres * yres for c in pipeline_contours] # (um^2)
refined_areas = [Polygon(c).area * xres * yres for c in refined_contours] # (um^2)
# compute HD quantiles
pipeline_area_q = scipy.stats.mstats.hdquantiles(pipeline_areas, prob=quantiles, axis=0)
refined_area_q = scipy.stats.mstats.hdquantiles(refined_areas, prob=quantiles, axis=0)
# compute average cell size
pipeline_area_mean = np.mean(pipeline_areas)
refined_area_mean = np.mean(refined_areas)
pipeline_area_q_all.append(pipeline_area_q)
refined_area_q_all.append(refined_area_q)
pipeline_area_mean_all.append(pipeline_area_mean)
refined_area_mean_all.append(refined_area_mean)
id_all.append(id)
print('Removed cells: %.2f' % (1 - len(refined_areas) / len(pipeline_areas)))
print((np.array(pipeline_area_q) - np.array(refined_area_q)) * 1e12)
if DEBUG:
plt.clf()
plt.plot(quantiles, pipeline_area_q * 1e12, label='Pipeline', linewidth=3)
plt.plot(quantiles, refined_area_q * 1e12, label='Refined', linewidth=3)
plt.tick_params(axis='both', which='major', labelsize=14)
plt.xlabel('Quantiles', fontsize=14)
plt.ylabel('Area ($\mu m^2$)', fontsize=14)
plt.legend(fontsize=12)
# convert the list of vectors into a matrix
pipeline_area_q_all = np.vstack(pipeline_area_q_all)
refined_area_q_all = np.vstack(refined_area_q_all)
refined_area_mean_all = np.array(refined_area_mean_all)
pipeline_area_mean_all = np.array(pipeline_area_mean_all)
if DEBUG:
plt.clf()
pipeline_area_q_mean = np.mean(pipeline_area_q_all, axis=0)
for i in range(pipeline_area_q_all.shape[0]):
plt.plot(quantiles, 100 * (refined_area_q_all[i, :] - pipeline_area_q_all[i, :]) / pipeline_area_q_mean, label=id_all[i], linewidth=3)
plt.tick_params(axis='both', which='major', labelsize=14)
plt.xlabel('Quantiles', fontsize=14)
plt.ylabel('Area change with refinement (%)', fontsize=14)
plt.legend(fontsize=12)
plt.tight_layout()
if DEBUG:
plt.clf()
plt.boxplot(100 * (refined_area_mean_all - pipeline_area_mean_all) / pipeline_area_mean_all, labels=['Mean size'])
plt.tick_params(axis='both', which='major', labelsize=14)
plt.ylabel('Area change with refinement (%)', fontsize=14)
plt.tight_layout()
########################################################################################################################
## Plots of segmented full slides with quantile colourmaps
########################################################################################################################
# This is done in klf14_b6ntac_exp_0098_full_slide_size_analysis_v7
########################################################################################################################
## Analysis of time and blocks that took to compute full slide segmentation from the server logs
########################################################################################################################
# The results were noted down in klf14_b6ntac_exp_0097_full_slide_pipeline_v7_logs.csv. This file is in the GoogleDrive
# directory with the rest of the paper.
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import openslide
import statsmodels.api as sm
import scipy.stats
DEBUG = False
'''Directories and filenames'''
# data paths
klf14_root_data_dir = os.path.join(home, 'Data/cytometer_data/klf14')
saved_models_dir = os.path.join(klf14_root_data_dir, 'saved_models')
ndpi_dir = os.path.join(home, 'scan_srv2_cox/Maz Yon')
annotations_dir = os.path.join(home, 'Data/cytometer_data/aida_data_Klf14_v7/annotations')
times_dir = os.path.join(home, 'GoogleDrive/Research/20190727_cytometer_paper')
# load filenames, number of blocks and time it took to segment them
times_file = 'klf14_b6ntac_exp_0097_full_slide_pipeline_v7_logs.csv'
times_file = os.path.join(times_dir, times_file)
times_df = pd.read_csv(times_file)
# read rough masks of the files in the dataframe, to measure the tissue area in each
for i, file in enumerate(times_df['File']):
# filename of the coarse tissue mask
coarse_mask_file = os.path.join(annotations_dir, file + '_rough_mask.npz')
# load coarse tissue mask
with np.load(coarse_mask_file) as data:
mask = data['lores_istissue0']
if DEBUG:
plt.clf()
plt.imshow(mask)
# open full resolution histology slide to get pixel size
ndpi_file = os.path.join(ndpi_dir, file + '.ndpi')
im = openslide.OpenSlide(ndpi_file)
# pixel size
assert (im.properties['tiff.ResolutionUnit'] == 'centimeter')
xres = 1e-2 / float(im.properties['tiff.XResolution'])
yres = 1e-2 / float(im.properties['tiff.YResolution'])
# scaling factor to get pixel size in the coarse mask
k = np.array(im.dimensions) / mask.shape[::-1]
# add tissue area to the dataframe. Calculations for full resolution slide, even though they
# are computed from the coarse mask
times_df.loc[i, 'tissue_area_pix'] = np.count_nonzero(mask) * k[0] * k[1]
times_df.loc[i, 'tissue_area_mm2'] = times_df.loc[i, 'tissue_area_pix'] * xres * yres * 1e6
if DEBUG:
# plot tissue area vs. time to compute
plt.clf()
plt.scatter(times_df['tissue_area_mm2'], times_df['Blocks Time (s)'])
# fit linear model
model = sm.formula.ols('Q("Blocks Time (s)") ~ tissue_area_mm2', data=times_df).fit()
print(model.summary())
# Pearson coefficient
rho, rho_p = scipy.stats.pearsonr(times_df['tissue_area_mm2'], times_df['Blocks Time (s)'])
print('Pearson coeff = ' + str(rho))
print('p-val = ' + str(rho_p))
# tissue area
print('Tissue area')
print('min = ' + str(np.min(times_df['tissue_area_mm2'])) + ' mm2 = ' + str(np.min(times_df['tissue_area_pix']) * 1e-6) + ' Mpix')
print('max = ' + str(np.max(times_df['tissue_area_mm2'])) + ' mm2 = ' + str(np.max(times_df['tissue_area_pix']) * 1e-6) + ' Mpix')
# corresponding time to compute
print('Time to compute')
time_pred = model.predict(times_df['tissue_area_mm2'])
print('min = ' + str(np.min(time_pred) / 3600) + ' h')
print('max = ' + str(np.max(time_pred) / 3600) + ' h')
tissue_area_mm2_q1 = scipy.stats.mstats.hdquantiles(times_df['tissue_area_mm2'], prob=0.25, axis=0)
tissue_area_mm2_q2 = scipy.stats.mstats.hdquantiles(times_df['tissue_area_mm2'], prob=0.5, axis=0)
tissue_area_mm2_q3 = scipy.stats.mstats.hdquantiles(times_df['tissue_area_mm2'], prob=0.75, axis=0)
aux_df = times_df.loc[0:2, :].copy()
aux_df.loc[0, 'tissue_area_mm2'] = tissue_area_mm2_q1
aux_df.loc[1, 'tissue_area_mm2'] = tissue_area_mm2_q2
aux_df.loc[2, 'tissue_area_mm2'] = tissue_area_mm2_q3
tissue_time_pred_q = model.predict(aux_df)
print('q1 = ' + str(tissue_area_mm2_q1) + ' mm2 -> '
+ str(tissue_time_pred_q[0] / 3600) + ' h')
print('q2 = ' + str(tissue_area_mm2_q2) + ' mm2 -> '
+ str(tissue_time_pred_q[1] / 3600) + ' h')
print('q3 = ' + str(tissue_area_mm2_q3) + ' mm2 -> '
+ str(tissue_time_pred_q[2] / 3600) + ' h')
########################################################################################################################
## Segmentation validation
########################################################################################################################
# This is done in klf14_b6ntac_exp_0096_pipeline_v7_validation.py
########################################################################################################################
## Time that it takes to do Auto vs. Corrected segmentation
########################################################################################################################
import numpy as np
import time
import pandas as pd
import pickle
import matplotlib.pyplot as plt
import cytometer.data
import cytometer.utils
from PIL import Image, ImageDraw, ImageEnhance
DEBUG = False
# data paths
klf14_root_data_dir = os.path.join(home, 'Data/cytometer_data/klf14')
saved_models_dir = os.path.join(klf14_root_data_dir, 'saved_models')
metainfo_dir = os.path.join(home, 'Data/cytometer_data/klf14')
times_dir = os.path.join(home, 'GoogleDrive/Research/20190727_cytometer_paper')
saved_kfolds_filename = 'klf14_b6ntac_exp_0079_generate_kfolds.pickle'
saved_extra_kfolds_filename = 'klf14_b6ntac_exp_0094_generate_extra_training_images.pickle'
# model names
dmap_model_basename = 'klf14_b6ntac_exp_0086_cnn_dmap'
contour_model_basename = 'klf14_b6ntac_exp_0091_cnn_contour_after_dmap'
classifier_model_basename = 'klf14_b6ntac_exp_0095_cnn_tissue_classifier_fcn'
correction_model_basename = 'klf14_b6ntac_exp_0089_cnn_segmentation_correction_overlapping_scaled_contours'
# training window length
training_window_len = 401
# segmentation parameters
min_cell_area = 1500
max_cell_area = 100e3
min_mask_overlap = 0.8
phagocytosis = True
# min_class_prop = 0.5
# correction_window_len = 401
# correction_smoothing = 11
batch_size = 2
'''Load folds'''
# load list of images, and indices for training vs. testing indices
saved_kfolds_filename = os.path.join(saved_models_dir, saved_kfolds_filename)
with open(saved_kfolds_filename, 'rb') as f:
aux = pickle.load(f)
file_svg_list = aux['file_list']
idx_test_all = aux['idx_test']
idx_train_all = aux['idx_train']
# correct home directory
file_svg_list = [x.replace('/users/rittscher/rcasero', home) for x in file_svg_list]
file_svg_list = [x.replace('/home/rcasero', home) for x in file_svg_list]
# number of images
n_im = len(file_svg_list)
# number of folds
n_folds = len(idx_test_all)
# CSV file with metainformation of all mice
metainfo_csv_file = os.path.join(metainfo_dir, 'klf14_b6ntac_meta_info.csv')
metainfo = pd.read_csv(metainfo_csv_file)
# # correct home directory in file paths
# file_svg_list = cytometer.data.change_home_directory(list(file_svg_list), '/home/rcasero', home, check_isfile=True)
# file_svg_list = cytometer.data.change_home_directory(list(file_svg_list), '/users/rittscher/rcasero', home, check_isfile=True)
## compute and save results (you can skip this section if this has been done before, and go straight where you load the
## results)
# load data computed in 0096 validation script
data_filename = os.path.join(saved_models_dir, 'klf14_b6ntac_exp_0096_pipeline_v7_validation' + '_data.npz')
with np.load(data_filename) as data:
im_array_all = data['im_array_all']
rough_mask_all = data['rough_mask_all']
out_class_all = 1 - data['out_class_all'] # encode as 0: other, 1: WAT
out_mask_all = data['out_mask_all']
# init dataframes
df_manual_all = pd.DataFrame()
df_auto_all = pd.DataFrame()
# init time vectors
time_auto = []
time_corrected = []
for i_fold in range(len(idx_test_all)):
# start timer
t0 = time.time()
''' Get the images/masks/classification that were not used for training of this particular fold '''
print('# Fold ' + str(i_fold) + '/' + str(len(idx_test_all) - 1))
# test and training image indices. These indices refer to file_list
idx_test = idx_test_all[i_fold]
# list of test files (used later for the dataframe)
file_list_test = np.array(file_svg_list)[idx_test]
print('## len(idx_test) = ' + str(len(idx_test)))
# split data into training and testing
im_array_test = im_array_all[idx_test, :, :, :]
rough_mask_test = rough_mask_all[idx_test, :, :]
out_class_test = out_class_all[idx_test, :, :, :]
out_mask_test = out_mask_all[idx_test, :, :]
''' Segmentation into non-overlapping objects '''
# names of contour, dmap and tissue classifier models
contour_model_filename = \
os.path.join(saved_models_dir, contour_model_basename + '_model_fold_' + str(i_fold) + '.h5')
dmap_model_filename = \
os.path.join(saved_models_dir, dmap_model_basename + '_model_fold_' + str(i_fold) + '.h5')
classifier_model_filename = \
os.path.join(saved_models_dir, classifier_model_basename + '_model_fold_' + str(i_fold) + '.h5')
# segment histology
pred_seg_test, pred_class_test, _ \
= cytometer.utils.segment_dmap_contour_v6(im_array_test,
dmap_model=dmap_model_filename,
contour_model=contour_model_filename,
classifier_model=classifier_model_filename,
border_dilation=0, batch_size=batch_size)
if DEBUG:
i = 0
plt.clf()
plt.subplot(221)
plt.cla()
plt.imshow(im_array_test[i, :, :, :])
plt.axis('off')
plt.subplot(222)
plt.cla()
plt.imshow(im_array_test[i, :, :, :])
plt.contourf(pred_class_test[i, :, :, 0].astype(np.float32), alpha=0.5)
plt.axis('off')
plt.subplot(223)
plt.cla()
plt.imshow(im_array_test[i, :, :, :])
plt.contour(pred_seg_test[i, :, :], levels=np.unique(pred_seg_test[i, :, :]), colors='k')
plt.axis('off')
plt.subplot(224)
plt.cla()
plt.imshow(im_array_test[i, :, :, :])
plt.contourf(pred_class_test[i, :, :, 0].astype(np.float32), alpha=0.5)
plt.contour(pred_seg_test[i, :, :], levels=np.unique(pred_seg_test[i, :, :]), colors='k')
plt.axis('off')
plt.tight_layout()
# clean segmentation: remove labels that are too small or that don't overlap enough with
# the rough foreground mask
pred_seg_test, _ \
= cytometer.utils.clean_segmentation(pred_seg_test, min_cell_area=min_cell_area, max_cell_area=max_cell_area,
remove_edge_labels=False,
mask=rough_mask_test, min_mask_overlap=min_mask_overlap,
phagocytosis=phagocytosis, labels_class=None)
# record processing time
time_auto.append((time.time() - t0) / im_array_test.shape[0])
if DEBUG:
plt.clf()
aux = np.stack((rough_mask_test[i, :, :],) * 3, axis=2)
plt.imshow(im_array_test[i, :, :, :] * aux)
plt.contour(pred_seg_test[i, ...], levels=np.unique(pred_seg_test[i, ...]), colors='k')
plt.axis('off')
''' Split image into individual labels and correct segmentation to take overlaps into account '''
(window_seg_test, window_im_test, window_class_test, window_rough_mask_test), index_list, scaling_factor_list \
= cytometer.utils.one_image_per_label_v2((pred_seg_test, im_array_test,
pred_class_test[:, :, :, 0].astype(np.uint8),
rough_mask_test.astype(np.uint8)),
resize_to=(training_window_len, training_window_len),
resample=(Image.NEAREST, Image.LINEAR, Image.NEAREST, Image.NEAREST),
only_central_label=True)
# correct segmentations
correction_model_filename = os.path.join(saved_models_dir,
correction_model_basename + '_model_fold_' + str(i_fold) + '.h5')
window_seg_corrected_test = cytometer.utils.correct_segmentation(im=window_im_test, seg=window_seg_test,
correction_model=correction_model_filename,
model_type='-1_1', batch_size=batch_size,
smoothing=11)
# record processing time
time_corrected.append((time.time() - t0 - time_auto[-1]) / im_array_test.shape[0])
# save for later use
times_file = os.path.join(times_dir, 'klf14_b6ntac_exp_0099_time_comparison_auto_corrected.npz')
np.savez(times_file, time_auto=time_auto, time_corrected=time_corrected)
# compute what proportion of time the algorithm spends on the Auto segmentatiom vs. corrected segmentation
time_auto_ratio = np.array(time_auto) / (np.array(time_auto) + np.array(time_corrected))
print('Time Auto ratio:')
print('mean = ' + str(100 * np.mean(time_auto_ratio)) + ' %')
print('std = ' + str(100 * np.std(time_auto_ratio)) + ' %')
print('Ratio of total time to Auto')
print('mean = ' + 'x' + str(np.mean(1 / time_auto_ratio)))
print('std = ' + 'x' + str(np.std(1 / time_auto_ratio)))
time_corrected_ratio = np.array(time_corrected) / (np.array(time_auto) + np.array(time_corrected))
print('Time Corrected ratio:')
print('mean = ' + str(100 * np.mean(time_corrected_ratio)) + ' %')
print('std = ' + str(100 * np.std(time_corrected_ratio)) + ' %')
########################################################################################################################
## Cell populations from automatically segmented images in two depots: SQWAT and GWAT.
## This section needs to be run for each of the depots. But the results are saved, so in later sections, it's possible
## to get all the data together
### USED IN PAPER
########################################################################################################################
import matplotlib.pyplot as plt
import cytometer.data
from shapely.geometry import Polygon
import openslide
import numpy as np
import scipy.stats
import pandas as pd
from mlxtend.evaluate import permutation_test
from statsmodels.stats.multitest import multipletests
import math
# directories
klf14_root_data_dir = os.path.join(home, 'Data/cytometer_data/klf14')
annotations_dir = os.path.join(home, 'Data/cytometer_data/aida_data_Klf14_v8/annotations')
ndpi_dir = os.path.join(home, 'scan_srv2_cox/Maz Yon')
figures_dir = os.path.join(home, 'GoogleDrive/Research/20190727_cytometer_paper/figures')
metainfo_dir = os.path.join(home, 'Data/cytometer_data/klf14')
DEBUG = False
permutation_sample_size = 9 # the factorial of this number is the number of repetitions in the permutation tests
for depot in ['sqwat', 'gwat']:
# list of annotation files for this depot
json_annotation_files = json_annotation_files_dict[depot]
# modify filenames to select the particular segmentation we want (e.g. the automatic ones, or the manually refined ones)
json_annotation_files = [x.replace('.json', '_exp_0097_corrected.json') for x in json_annotation_files]
json_annotation_files = [os.path.join(annotations_dir, x) for x in json_annotation_files]
# CSV file with metainformation of all mice
metainfo_csv_file = os.path.join(metainfo_dir, 'klf14_b6ntac_meta_info.csv')
metainfo = pd.read_csv(metainfo_csv_file)
# make sure that in the boxplots PAT comes before MAT
metainfo['sex'] = metainfo['sex'].astype(pd.api.types.CategoricalDtype(categories=['f', 'm'], ordered=True))
metainfo['ko_parent'] = metainfo['ko_parent'].astype(pd.api.types.CategoricalDtype(categories=['PAT', 'MAT'], ordered=True))
metainfo['genotype'] = metainfo['genotype'].astype(pd.api.types.CategoricalDtype(categories=['KLF14-KO:WT', 'KLF14-KO:Het'], ordered=True))
quantiles = np.linspace(0, 1, 11)
quantiles = quantiles[1:-1]
# compute areas of the rough masks
filename_rough_mask_area = os.path.join(figures_dir, 'klf14_b6ntac_exp_0099_rough_mask_area_' + depot + '.npz')
id_all = []
rough_mask_area_all = []
for i_file, json_file in enumerate(json_annotation_files):
print('File ' + str(i_file) + '/' + str(len(json_annotation_files)-1) + ': ' + os.path.basename(json_file))
if not os.path.isfile(json_file):
print('Missing file')
# continue
# open full resolution histology slide
ndpi_file = json_file.replace('_exp_0097_corrected.json', '.ndpi')
ndpi_file = os.path.join(ndpi_dir, os.path.basename(ndpi_file))
im = openslide.OpenSlide(ndpi_file)
# pixel size
assert (im.properties['tiff.ResolutionUnit'] == 'centimeter')
xres = 1e-2 / float(im.properties['tiff.XResolution'])
yres = 1e-2 / float(im.properties['tiff.YResolution'])
# load mask
rough_mask_file = json_file.replace('_exp_0097_corrected.json', '_rough_mask.npz')
rough_mask_file = os.path.join(annotations_dir, rough_mask_file)
if not os.path.isfile(rough_mask_file):
print('No mask: ' + rough_mask_file)
aux = np.load(rough_mask_file)
lores_istissue0 = aux['lores_istissue0']
if DEBUG:
foo = aux['im_downsampled']
foo = PIL.Image.fromarray(foo)
foo = foo.resize(tuple((np.round(np.array(foo.size[0:2]) / 4)).astype(np.int)))
plt.imshow(foo)
plt.title(os.path.basename(ndpi_file))
# compute scaling factor between downsampled mask and original image
size_orig = np.array(im.dimensions) # width, height
size_downsampled = np.array(lores_istissue0.shape)[::-1] # width, height
downsample_factor = size_orig / size_downsampled # width, height
# create dataframe for this image
rough_mask_area = np.count_nonzero(lores_istissue0) * (xres * downsample_factor[0]) * (yres * downsample_factor[1]) # m^2
df_common = cytometer.data.tag_values_with_mouse_info(metainfo=metainfo, s=os.path.basename(json_file),
values=[rough_mask_area,], values_tag='SC_rough_mask_area',
tags_to_keep=['id', 'ko_parent', 'sex'])
# mouse ID as a string
id = df_common['id'].values[0]
# correct area because most slides contain two slices, but some don't
if depot == 'sqwat' and not (id in ['16.2d', '17.1e', '17.2g', '16.2e', '18.1f', '37.4a', '37.2e']):
# two slices in the slide, so slice area is approx. one half
rough_mask_area /= 2
elif depot == 'gwat' and not (id in ['36.1d', '16.2a', '16.2b', '16.2c', '16.2d', '16.2e', '17.1b', '17.1d',
'17.1e', '17.1f', '17.2c', '17.2d', '17.2f', '17.2g', '18.1b', '18.1c',
'18.1d', '18.2a', '18.2c', '18.2d', '18.2f', '18.2g', '18.3c', '19.1a',
'19.2e', '19.2f', '19.2g', '36.3d', '37.2e', '37.2f', '37.2g', '37.2h',
'37.3a', '37.4a', '37.4b', '39.2d']):
# two slices in the slide, so slice area is approx. one half
rough_mask_area /= 2
# add to output
id_all.append(id)
rough_mask_area_all.append(rough_mask_area)
# save results
np.savez_compressed(filename_rough_mask_area, id_all=id_all, rough_mask_area_all=rough_mask_area_all)
# load or compute area quantiles
filename_quantiles = os.path.join(figures_dir, 'klf14_b6ntac_exp_0099_area_quantiles_' + depot + '.npz')
if os.path.isfile(filename_quantiles):
with np.load(filename_quantiles) as aux:
area_mean_all = aux['area_mean_all']
area_q_all = aux['area_q_all']
id_all = aux['id_all']
ko_all = aux['ko_all']
genotype_all = aux['genotype_all']
sex_all = aux['sex_all']
else:
area_mean_all = []
area_q_all = []
id_all = []
ko_all = []
genotype_all = []
sex_all = []
bw_all = []
gwat_all = []
sc_all = []
for i_file, json_file in enumerate(json_annotation_files):
print('File ' + str(i_file) + '/' + str(len(json_annotation_files)-1) + ': ' + os.path.basename(json_file))
if not os.path.isfile(json_file):
print('Missing file')
continue
# ndpi file that corresponds to this .json file
ndpi_file = json_file.replace('_exp_0097_corrected.json', '.ndpi')
ndpi_file = ndpi_file.replace(annotations_dir, ndpi_dir)
# open full resolution histology slide
im = openslide.OpenSlide(ndpi_file)
# pixel size
assert (im.properties['tiff.ResolutionUnit'] == 'centimeter')
xres = 1e-2 / float(im.properties['tiff.XResolution']) # m
yres = 1e-2 / float(im.properties['tiff.YResolution']) # m
# create dataframe for this image
df_common = cytometer.data.tag_values_with_mouse_info(metainfo=metainfo, s=os.path.basename(json_file),
values=[i_file,], values_tag='i_file',
tags_to_keep=['id', 'ko_parent', 'genotype', 'sex',
'BW', 'gWAT', 'SC'])
# mouse ID as a string
id = df_common['id'].values[0]
ko = df_common['ko_parent'].values[0]
genotype = df_common['genotype'].values[0]
sex = df_common['sex'].values[0]
bw = df_common['BW'].values[0]
gwat = df_common['gWAT'].values[0]
sc = df_common['SC'].values[0]
# read contours from AIDA annotations
contours = cytometer.data.aida_get_contours(os.path.join(annotations_dir, json_file), layer_name='White adipocyte.*')
# compute area of each contour
areas = [Polygon(c).area * xres * yres for c in contours] # (um^2)
# compute average area of all contours
area_mean = np.mean(areas)
# compute HD quantiles
area_q = scipy.stats.mstats.hdquantiles(areas, prob=quantiles, axis=0)
# append to totals
area_mean_all.append(area_mean)
area_q_all.append(area_q)
id_all.append(id)
ko_all.append(ko)
genotype_all.append(genotype)
sex_all.append(sex)
bw_all.append(bw)
gwat_all.append(gwat)
sc_all.append(sc)
# reorder from largest to smallest final area value
area_mean_all = np.array(area_mean_all)
area_q_all = np.array(area_q_all)
id_all = np.array(id_all)
ko_all = np.array(ko_all)
genotype_all = np.array(genotype_all)
sex_all = np.array(sex_all)
bw_all = np.array(bw_all)
gwat_all = np.array(gwat_all)
sc_all = np.array(sc_all)
idx = np.argsort(area_q_all[:, -1])
idx = idx[::-1] # sort from larger to smaller
area_mean_all = area_mean_all[idx]
area_q_all = area_q_all[idx, :]
id_all = id_all[idx]
ko_all = ko_all[idx]
genotype_all = genotype_all[idx]
sex_all = sex_all[idx]
bw_all = bw_all[idx]
gwat_all = gwat_all[idx]
sc_all = sc_all[idx]
np.savez_compressed(filename_quantiles, area_mean_all=area_mean_all, area_q_all=area_q_all, id_all=id_all,
ko_all=ko_all, genotype_all=genotype_all, sex_all=sex_all,
bw_all=bw_all, gwat_all=gwat_all, sc_all=sc_all)
if DEBUG:
plt.clf()
for i in range(len(area_q_all)):
# plot
if ko_all[i] == 'PAT':
color = 'g'
elif ko_all[i] == 'MAT':
color = 'r'
else:
raise ValueError('Unknown ko value: ' + ko)
if sex_all[i] == 'f':
plt.subplot(121)
plt.plot(quantiles, area_q_all[i] * 1e12 * 1e-3, color=color)
elif sex_all[i] == 'm':
plt.subplot(122)
plt.plot(quantiles, area_q_all[i] * 1e12 * 1e-3, color=color)
else:
raise ValueError('Unknown sex value: ' + sex)
legend_f = [i + ' ' + j.replace('KLF14-KO:', '') for i, j
in zip(id_all[sex_all == 'f'], genotype_all[sex_all == 'f'])]
legend_m = [i + ' ' + j.replace('KLF14-KO:', '') for i, j
in zip(id_all[sex_all == 'm'], genotype_all[sex_all == 'm'])]
plt.subplot(121)
plt.title('Female', fontsize=14)
plt.tick_params(labelsize=14)
plt.xlabel('Quantile', fontsize=14)
plt.ylabel('Area ($10^{3}\ \mu m^2$)', fontsize=14)
plt.legend(legend_f, fontsize=12)
plt.subplot(122)
plt.title('Male', fontsize=14)
plt.tick_params(labelsize=14)
plt.xlabel('Quantile', fontsize=14)
plt.legend(legend_m, fontsize=12)
# DEBUG:
# area_q_all = np.vstack((area_q_all, area_q_all))
# id_all = np.hstack((id_all, id_all))
# ko_all = np.hstack((ko_all, ko_all))
# genotype_all = np.hstack((genotype_all, genotype_all))
# sex_all = np.hstack((sex_all, sex_all))
# compute variability of area values for each quantile
area_q_f_pat = area_q_all[(sex_all == 'f') * (ko_all == 'PAT'), :]
area_q_m_pat = area_q_all[(sex_all == 'm') * (ko_all == 'PAT'), :]
area_q_f_mat = area_q_all[(sex_all == 'f') * (ko_all == 'MAT'), :]
area_q_m_mat = area_q_all[(sex_all == 'm') * (ko_all == 'MAT'), :]
area_interval_f_pat = scipy.stats.mstats.hdquantiles(area_q_f_pat, prob=[0.025, 0.25, 0.5, 0.75, 0.975], axis=0)
area_interval_m_pat = scipy.stats.mstats.hdquantiles(area_q_m_pat, prob=[0.025, 0.25, 0.5, 0.75, 0.975], axis=0)
area_interval_f_mat = scipy.stats.mstats.hdquantiles(area_q_f_mat, prob=[0.025, 0.25, 0.5, 0.75, 0.975], axis=0)
area_interval_m_mat = scipy.stats.mstats.hdquantiles(area_q_m_mat, prob=[0.025, 0.25, 0.5, 0.75, 0.975], axis=0)
area_q_f_pat_wt = area_q_all[(sex_all == 'f') * (ko_all == 'PAT') * (genotype_all == 'KLF14-KO:WT'), :]
area_q_m_pat_wt = area_q_all[(sex_all == 'm') * (ko_all == 'PAT') * (genotype_all == 'KLF14-KO:WT'), :]
area_q_f_mat_wt = area_q_all[(sex_all == 'f') * (ko_all == 'MAT') * (genotype_all == 'KLF14-KO:WT'), :]
area_q_m_mat_wt = area_q_all[(sex_all == 'm') * (ko_all == 'MAT') * (genotype_all == 'KLF14-KO:WT'), :]
area_interval_f_pat_wt = scipy.stats.mstats.hdquantiles(area_q_f_pat_wt, prob=[0.025, 0.25, 0.5, 0.75, 0.975], axis=0)
area_interval_m_pat_wt = scipy.stats.mstats.hdquantiles(area_q_m_pat_wt, prob=[0.025, 0.25, 0.5, 0.75, 0.975], axis=0)
area_interval_f_mat_wt = scipy.stats.mstats.hdquantiles(area_q_f_mat_wt, prob=[0.025, 0.25, 0.5, 0.75, 0.975], axis=0)
area_interval_m_mat_wt = scipy.stats.mstats.hdquantiles(area_q_m_mat_wt, prob=[0.025, 0.25, 0.5, 0.75, 0.975], axis=0)
area_q_f_pat_het = area_q_all[(sex_all == 'f') * (ko_all == 'PAT') * (genotype_all == 'KLF14-KO:Het'), :]
area_q_m_pat_het = area_q_all[(sex_all == 'm') * (ko_all == 'PAT') * (genotype_all == 'KLF14-KO:Het'), :]
area_q_f_mat_het = area_q_all[(sex_all == 'f') * (ko_all == 'MAT') * (genotype_all == 'KLF14-KO:Het'), :]
area_q_m_mat_het = area_q_all[(sex_all == 'm') * (ko_all == 'MAT') * (genotype_all == 'KLF14-KO:Het'), :]
area_interval_f_pat_het = scipy.stats.mstats.hdquantiles(area_q_f_pat_het, prob=[0.025, 0.25, 0.5, 0.75, 0.975], axis=0)
area_interval_m_pat_het = scipy.stats.mstats.hdquantiles(area_q_m_pat_het, prob=[0.025, 0.25, 0.5, 0.75, 0.975], axis=0)
area_interval_f_mat_het = scipy.stats.mstats.hdquantiles(area_q_f_mat_het, prob=[0.025, 0.25, 0.5, 0.75, 0.975], axis=0)
area_interval_m_mat_het = scipy.stats.mstats.hdquantiles(area_q_m_mat_het, prob=[0.025, 0.25, 0.5, 0.75, 0.975], axis=0)
n_f_pat_wt = area_q_f_pat_wt.shape[0]
n_m_pat_wt = area_q_m_pat_wt.shape[0]
n_f_mat_wt = area_q_f_mat_wt.shape[0]
n_m_mat_wt = area_q_m_mat_wt.shape[0]
n_f_pat_het = area_q_f_pat_het.shape[0]
n_m_pat_het = area_q_m_pat_het.shape[0]
n_f_mat_het = area_q_f_mat_het.shape[0]
n_m_mat_het = area_q_m_mat_het.shape[0]
if DEBUG:
# plots of female median ECDF^-1 with quartile shaded area
plt.clf()
plt.subplot(121)
plt.plot(quantiles, area_interval_f_pat_wt[2, :] * 1e12 * 1e-3, 'C0', linewidth=3, label=str(n_f_pat_wt) + ' WT')
plt.fill_between(quantiles, area_interval_f_pat_wt[1, :] * 1e12 * 1e-3, area_interval_f_pat_wt[3, :] * 1e12 * 1e-3,
facecolor='C0', alpha=0.3)
plt.plot(quantiles, area_interval_f_pat_het[2, :] * 1e12 * 1e-3, 'k', linewidth=3, label=str(n_f_pat_het) + ' Het')
plt.fill_between(quantiles, area_interval_f_pat_het[1, :] * 1e12 * 1e-3, area_interval_f_pat_het[3, :] * 1e12 * 1e-3,
facecolor='k', alpha=0.3)
plt.title('Female PAT', fontsize=14)
plt.xlabel('Quantile', fontsize=14)
plt.ylabel('White adipocyte area ($\cdot 10^3 \mu$m$^2$)', fontsize=14)
plt.tick_params(axis='both', which='major', labelsize=14)
plt.legend(loc='upper left', prop={'size': 12})
plt.ylim(0, 15)
plt.tight_layout()
plt.subplot(122)
plt.plot(quantiles, area_interval_f_mat_wt[2, :] * 1e12 * 1e-3, 'C0', linewidth=3, label=str(n_f_mat_wt) + ' WT')
plt.fill_between(quantiles, area_interval_f_mat_wt[1, :] * 1e12 * 1e-3, area_interval_f_mat_wt[3, :] * 1e12 * 1e-3,
facecolor='C0', alpha=0.3)
plt.plot(quantiles, area_interval_f_mat_het[2, :] * 1e12 * 1e-3, 'k', linewidth=3, label=str(n_f_mat_het) + ' Het')
plt.fill_between(quantiles, area_interval_f_mat_het[1, :] * 1e12 * 1e-3, area_interval_f_mat_het[3, :] * 1e12 * 1e-3,
facecolor='k', alpha=0.3)
plt.title('Female MAT', fontsize=14)
plt.xlabel('Quantile', fontsize=14)
plt.tick_params(axis='both', which='major', labelsize=14)
plt.legend(loc='upper left', prop={'size': 12})
plt.ylim(0, 15)
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'exp_0099_' + depot + '_cell_area_female_pat_vs_mat_bands.svg'))
plt.savefig(os.path.join(figures_dir, 'exp_0099_' + depot + '_cell_area_female_pat_vs_mat_bands.png'))
if DEBUG:
# plots of male median ECDF^-1 with quartile shaded area
plt.clf()
plt.subplot(121)
plt.plot(quantiles, area_interval_m_pat_wt[2, :] * 1e12 * 1e-3, 'C0', linewidth=3, label=str(n_m_pat_wt) + ' WT')
plt.fill_between(quantiles, area_interval_m_pat_wt[1, :] * 1e12 * 1e-3, area_interval_m_pat_wt[3, :] * 1e12 * 1e-3,
facecolor='C0', alpha=0.3)
plt.plot(quantiles, area_interval_m_pat_het[2, :] * 1e12 * 1e-3, 'k', linewidth=3, label=str(n_m_pat_het) + ' Het')
plt.fill_between(quantiles, area_interval_m_pat_het[1, :] * 1e12 * 1e-3, area_interval_m_pat_het[3, :] * 1e12 * 1e-3,
facecolor='k', alpha=0.3)
plt.title('Male PAT', fontsize=14)
plt.xlabel('Quantile', fontsize=14)
plt.ylabel('White adipocyte area ($\cdot 10^3 \mu$m$^2$)', fontsize=14)
plt.tick_params(axis='both', which='major', labelsize=14)
plt.legend(loc='upper left', prop={'size': 12})
plt.ylim(0, 16)
plt.tight_layout()
plt.subplot(122)
plt.plot(quantiles, area_interval_m_mat_wt[2, :] * 1e12 * 1e-3, 'C0', linewidth=3, label=str(n_m_mat_wt) + ' WT')
plt.fill_between(quantiles, area_interval_m_mat_wt[1, :] * 1e12 * 1e-3, area_interval_m_mat_wt[3, :] * 1e12 * 1e-3,
facecolor='C0', alpha=0.3)
plt.plot(quantiles, area_interval_m_mat_het[2, :] * 1e12 * 1e-3, 'k', linewidth=3, label=str(n_m_mat_het) + ' Het')
plt.fill_between(quantiles, area_interval_m_mat_het[1, :] * 1e12 * 1e-3, area_interval_m_mat_het[3, :] * 1e12 * 1e-3,
facecolor='k', alpha=0.3)
plt.title('Male MAT', fontsize=14)
plt.xlabel('Quantile', fontsize=14)
plt.tick_params(axis='both', which='major', labelsize=14)
plt.legend(loc='upper left', prop={'size': 12})
plt.ylim(0, 16)
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'exp_0099_' + depot + '_cell_area_male_pat_vs_mat_bands.svg'))
plt.savefig(os.path.join(figures_dir, 'exp_0099_' + depot + '_cell_area_male_pat_vs_mat_bands.png'))
filename_pvals = os.path.join(figures_dir, 'klf14_b6ntac_exp_0099_pvals_' + depot + '.npz')
if os.path.isfile(filename_pvals):
with
|
np.load(filename_pvals)
|
numpy.load
|
import numpy as np
# 判断两个核是否可分
def is_separable(local_outcat):
# row1 = random.sample(range(0, 1000), 10)
# test_data = data.iloc[row1]
# test_data = np.array(test_data)
local_outcat[:, 7: 10] = local_outcat[:, 7: 10] / 2.3548 # 检测核表的轴长(标准差)
distance_xy = [] # 表达式二的xy距离矩阵
distance_xy_sigma = [] # 表达式二的sigma距离矩阵
distance_v = [] # 表达式一的v距离矩阵
distance_v_sigma = [] # 表达式一的sigma距离矩阵
for i in range(local_outcat.shape[0]):
distance_xy.append(np.sqrt((local_outcat[:, 1] - local_outcat[i, 1]) ** 2 + (local_outcat[:, 2] - local_outcat[i, 2]) ** 2))
distance_xy_sigma.append(2.3548 * np.sqrt(local_outcat[:, 8] ** 2 + local_outcat[:, 7] ** 2 + local_outcat[i, 8] ** 2 +
local_outcat[i, 7] ** 2))
distance_v.append(np.abs(local_outcat[:, 3] - local_outcat[i, 3]))
distance_v_sigma.append(2.3548 * np.sqrt(local_outcat[:, 9] ** 2 + local_outcat[i, 9] ** 2))
distance_xy = np.array(distance_xy)
distance_xy_sigma = np.array(distance_xy_sigma)
distance_v = np.array(distance_v)
distance_v_sigma = np.array(distance_v_sigma)
func1_res = distance_v - distance_v_sigma
func1_res[func1_res >= 0] = 0 # 可分
func1_res[func1_res < 0] = 1 # 不可分
func2_res = distance_xy - distance_xy_sigma
func2_res[func2_res >= 0] = 0 # 可分
func2_res[func2_res < 0] = 1 # 不可分
result = np.zeros_like(func2_res)
for i in range(func2_res.shape[0]):
for j in range(func2_res.shape[1]):
result[i, j] = func2_res[i, j] and func1_res[i, j]
row, col = np.diag_indices_from(result)
result[row, col] = np.array(0) # 将对角线元素的值置为0
a =
|
np.triu(result)
|
numpy.triu
|
"""Test script for single agent problems.
This scripts runs the best model found by one of the executions of `singleagent.py`
Example
-------
To run the script, type in a terminal:
$ python test_singleagent.py --exp ./results/save-<env>-<algo>-<obs>-<act>-<time_date> --time <seconds>
"""
import sys
sys.path.append('../../')
import os
import time
from datetime import datetime
import argparse
import re
import numpy as np
import gym
import torch
from stable_baselines3.common.env_checker import check_env
from stable_baselines3 import A2C, PPO, SAC, TD3, DDPG
from stable_baselines3.common.policies import ActorCriticPolicy as a2cppoMlpPolicy
from stable_baselines3.sac.policies import SACPolicy as sacMlpPolicy
from stable_baselines3.td3 import MlpPolicy as td3ddpgMlpPolicy
from stable_baselines3.common.evaluation import evaluate_policy
from gym_pybullet_drones.utils.utils import sync
from gym_pybullet_drones.utils.Logger import Logger
from gym_pybullet_drones.envs.single_agent_rl.MoveAviary import MoveAviary
from gym_pybullet_drones.envs.single_agent_rl.HoverAviary import HoverAviary
from gym_pybullet_drones.envs.single_agent_rl.BaseSingleAgentAviary import ActionType, ObservationType
import shared_constants
STARTING_POINT = np.array([[0, 0, 1.2]])
def selective_noise(obs, mean=0.1, dev=0.05):
noise = np.random.normal(mean, dev, size=(12,))
obs[10] += noise[10]
obs[11] += noise[11]
return obs
def white_noise(obs):
noise = np.random.normal(0, 0.08, size=(12,))
obs[:] += noise[:]
return obs
if __name__ == "__main__":
#### Define and parse (optional) arguments for the script ##
parser = argparse.ArgumentParser(description='Single agent reinforcement learning example script using MoveAviary')
parser.add_argument('--exp', type=str,
help='The experiment folder written as ./results/save-<env>-<algo>-<obs>-<act>-<time_date>',
metavar='')
parser.add_argument('--time', default='6', type=int, help='Time to run experiment in seconds', metavar='')
parser.add_argument('--white_noise', default='0', type=int,
help='White noise on all observations, 1 active, 0 inactive', metavar='')
parser.add_argument('--noise_mean', default='0.6', type=float, help='Selective noise mean', metavar='')
parser.add_argument('--noise_dev', default='0.1', type=float, help='Selective noise std deviation', metavar='')
parser.add_argument('--noise_starting_sec', default='2', type=float, help='Selective noise starting time',
metavar='')
parser.add_argument('--noise_duration', default='0', type=float, help='Selective noise duration', metavar='')
ARGS = parser.parse_args()
#### Load the model from file ##############################
algo = ARGS.exp.split("-")[2]
if os.path.isfile(ARGS.exp + '/best_model.zip'):
path = ARGS.exp + '/best_model.zip'
elif os.path.isfile(ARGS.exp + '/success_model.zip'):
path = ARGS.exp + '/success_model.zip'
else:
print("[ERROR]: no model under the specified path", ARGS.exp)
if algo == 'a2c':
model = A2C.load(path)
if algo == 'ppo':
model = PPO.load(path)
if algo == 'sac':
model = SAC.load(path)
if algo == 'td3':
model = TD3.load(path)
if algo == 'ddpg':
model = DDPG.load(path)
#### Parameters to recreate the environment ################
env_name = ARGS.exp.split("-")[1] + "-aviary-v0"
OBS = ObservationType.KIN if ARGS.exp.split("-")[3] == 'kin' else ObservationType.RGB
if ARGS.exp.split("-")[4] == 'rpm':
ACT = ActionType.RPM
elif ARGS.exp.split("-")[4] == 'dyn':
ACT = ActionType.DYN
elif ARGS.exp.split("-")[4] == 'pid':
ACT = ActionType.PID
elif ARGS.exp.split("-")[4] == 'vel':
ACT = ActionType.VEL
elif ARGS.exp.split("-")[4] == 'tun':
ACT = ActionType.TUN
elif ARGS.exp.split("-")[4] == 'one_d_rpm':
ACT = ActionType.ONE_D_RPM
elif ARGS.exp.split("-")[4] == 'one_d_dyn':
ACT = ActionType.ONE_D_DYN
elif ARGS.exp.split("-")[4] == 'one_d_pid':
ACT = ActionType.ONE_D_PID
#### Evaluate the model ####################################
if 'move' in env_name:
eval_env = MoveAviary(initial_xyzs=STARTING_POINT,
aggregate_phy_steps=shared_constants.AGGR_PHY_STEPS,
obs=OBS,
act=ACT)
elif 'hover' in env_name:
eval_env = HoverAviary(initial_xyzs=STARTING_POINT,
aggregate_phy_steps=shared_constants.AGGR_PHY_STEPS,
obs=OBS,
act=ACT)
mean_reward, std_reward = evaluate_policy(model,
eval_env,
n_eval_episodes=10
)
print("\n\n\nMean reward ", mean_reward, " +- ", std_reward, "\n\n")
#### Show, record a video, and log the model's performance ####
if 'move' in env_name:
test_env = MoveAviary(initial_xyzs=STARTING_POINT,
aggregate_phy_steps=shared_constants.AGGR_PHY_STEPS,
obs=OBS,
act=ACT,
gui=True,
record=True
)
elif 'hover' in env_name:
test_env = HoverAviary(initial_xyzs=STARTING_POINT,
aggregate_phy_steps=shared_constants.AGGR_PHY_STEPS,
obs=OBS,
act=ACT,
gui=True,
record=True
)
logger = Logger(logging_freq_hz=int(test_env.SIM_FREQ / test_env.AGGR_PHY_STEPS),
num_drones=1
)
obs = test_env.reset()
start = time.time()
for i in range(ARGS.time * int(test_env.SIM_FREQ / test_env.AGGR_PHY_STEPS)):
#### APPLY NOISE ####
if ARGS.white_noise == 1:
obs = white_noise(obs)
if ARGS.noise_duration > 0.5:
start_noise = ARGS.noise_starting_sec
end_noise = ARGS.noise_starting_sec + ARGS.noise_duration
if start_noise * int(test_env.SIM_FREQ / test_env.AGGR_PHY_STEPS) < i < end_noise * int(
test_env.SIM_FREQ / test_env.AGGR_PHY_STEPS):
obs = selective_noise(obs, ARGS.noise_mean, ARGS.noise_dev)
################
action, _states = model.predict(obs,
deterministic=True
)
obs, reward, done, info = test_env.step(action)
test_env.render()
if OBS == ObservationType.KIN:
logger.log(drone=0,
timestamp=i / test_env.SIM_FREQ,
state=np.hstack([obs[0:3], np.zeros(4), obs[3:15], np.resize(action, (4))]),
control=
|
np.zeros(12)
|
numpy.zeros
|
import itertools
import numpy as np
from scipy.ndimage.filters import gaussian_filter
import matplotlib.pyplot as plt
from transformations import SimpleTransformer
from utils import load_mnist
from keras.utils import to_categorical
from keras.layers import Flatten, Conv2D, Dense, BatchNormalization, MaxPool2D, Input, Lambda, average
from keras.models import Sequential, Model
import keras.backend as K
import tensorflow as tf
(x_train, y_train), (x_test, y_test) = load_mnist()
# scale to be in [0, 1]
x_train = (x_train + 1) / 2.
x_test = (x_test + 1) / 2.
single_class_ind = 3
anomaly_class_ind = 0
x_train_single = x_train[y_train == single_class_ind]
x_test_single = x_test[y_test == single_class_ind]
x_test_anomaly = x_test[y_test == anomaly_class_ind]
transformer = SimpleTransformer()
transformations_inds = np.tile(np.arange(transformer.n_transforms), len(x_train_single))
x_train_single_transformed = transformer.transform_batch(np.repeat(x_train_single, transformer.n_transforms, axis=0),
transformations_inds)
mdl = Sequential([Conv2D(32, (3, 3), padding='same', input_shape=(32, 32, 1), activation='relu'),
BatchNormalization(axis=-1),
MaxPool2D(),
Flatten(),
Dense(10, activation='relu'),
BatchNormalization(axis=-1),
Dense(transformer.n_transforms, activation='softmax')])
mdl.compile('adam',
'categorical_crossentropy',
['acc'])
batch_size = 64
mdl.fit(x=x_train_single_transformed,
y=to_categorical(transformations_inds),
batch_size=batch_size,
validation_split=0.1,
epochs=10)
single_class_preds = np.zeros((len(x_test_single), transformer.n_transforms))
for t in range(transformer.n_transforms):
single_class_preds[:, t] = mdl.predict(transformer.transform_batch(x_test_single, [t] * len(x_test_single)),
batch_size=batch_size)[:, t]
single_class_scores = single_class_preds.mean(axis=-1)
anomaly_class_preds = np.zeros((len(x_test_anomaly), transformer.n_transforms))
for t in range(transformer.n_transforms):
anomaly_class_preds[:, t] = mdl.predict(transformer.transform_batch(x_test_anomaly, [t] * len(x_test_anomaly)),
batch_size=batch_size)[:, t]
anomaly_class_scores = anomaly_class_preds.mean(axis=-1)
def affine(x, is_flip, k_rotate):
return tf.image.rot90(tf.image.flip_left_right(x) if is_flip else x,
k=k_rotate)
x_in = Input(batch_shape=mdl.input_shape)
transformations_sm_responses = [mdl(Lambda(affine, arguments={'is_flip': is_flip, 'k_rotate': k_rotate})(x_in))
for is_flip, k_rotate in itertools.product((False, True), range(4))]
out = average([Lambda(lambda sm_res: sm_res[:, j:j+1])(tens) for j, tens in enumerate(transformations_sm_responses)])
inference_mdl = Model(x_in, out)
grads_tensor = K.gradients([inference_mdl.output], [inference_mdl.input])[0]
grads_fn = K.function([inference_mdl.input], [grads_tensor])
def optimize_anomaly_images():
for im_ind in range(len(x_test_anomaly)):
im = x_test_anomaly[im_ind:im_ind+1].copy()
eta = 5
for _ in range(200):
grads = grads_fn([im])[0]
grads[np.abs(grads * im) < np.percentile(np.abs(grads * im), 80)] = 0
im_diff = grads * eta
im_diff *= 0.99
im += im_diff
im = gaussian_filter(im, 0.28)
im = np.clip(im, 0, 1)
im[im < np.percentile(
|
np.abs(im)
|
numpy.abs
|
from functools import partial
import numpy as np
from scipy.optimize import brentq
from scipy.special import ndtr
from scipy.stats import gaussian_kde
from copulas import EPSILON, scalarize, store_args
from copulas.marginals.model import BoundedType, ParametricType, ScipyModel
class GaussianKDE(ScipyModel):
PARAMETRIC = ParametricType.NON_PARAMETRIC
BOUNDED = BoundedType.BOUNDED
MODEL_CLASS = gaussian_kde
@store_args
def __init__(self, sample_size=None, random_seed=None, bw_method=None, weights=None):
self.random_seed = random_seed
self._sample_size = sample_size
self.bw_method = bw_method
self.weights = weights
def _get_model(self):
dataset = self._params['dataset']
self._sample_size = self._sample_size or len(dataset)
return gaussian_kde(dataset, bw_method=self.bw_method, weights=self.weights)
def _get_bounds(self):
X = self._params['dataset']
lower = np.min(X) - (5 * np.std(X))
upper =
|
np.max(X)
|
numpy.max
|
import tensorflow as tf
from sklearn.metrics import confusion_matrix
import numpy as np
import load
train_samples, train_labels = load._train_samples, load._train_labels
test_samples, test_labels = load._test_samples, load._test_labels
print('Training set', train_samples.shape, train_labels.shape)
print(' Test set', test_samples.shape, test_labels.shape)
image_size = load.image_size
num_labels = load.num_labels
num_channels = load.num_channels
def get_chunk(samples, labels, chunkSize):
"""
Iterator/Generator: get a batch of data
这个函数是一个迭代器/生成器,用于每一次只得到 chunkSize 这么多的数据
用于 for loop, just like range() function
"""
if len(samples) != len(labels):
raise Exception('Length of samples and labels must equal')
stepStart = 0 # initial step
i = 0
while stepStart < len(samples):
stepEnd = stepStart + chunkSize
if stepEnd < len(samples):
yield i, samples[stepStart:stepEnd], labels[stepStart:stepEnd]
i += 1
stepStart = stepEnd
class Network():
def __init__(self, num_hidden, batch_size):
"""
@num_hidden: 隐藏层的节点数量
@batch_size:因为我们要节省内存,所以分批处理数据。每一批的数据量。
"""
self.batch_size = batch_size
self.test_batch_size = 500
# Hyper Parameters
self.num_hidden = num_hidden
# Graph Related
self.graph = tf.Graph()
self.tf_train_samples = None
self.tf_train_labels = None
self.tf_test_samples = None
self.tf_test_labels = None
self.tf_test_prediction = None
def define_graph(self):
"""
定义我的的计算图谱
"""
with self.graph.as_default():
# 这里只是定义图谱中的各种变量
self.tf_train_samples = tf.placeholder(
tf.float32, shape=(self.batch_size, image_size, image_size, num_channels)
)
self.tf_train_labels = tf.placeholder(
tf.float32, shape=(self.batch_size, num_labels)
)
self.tf_test_samples = tf.placeholder(
tf.float32, shape=(self.test_batch_size, image_size, image_size, num_channels)
)
# fully connected layer 1, fully connected
fc1_weights = tf.Variable(
tf.truncated_normal([image_size * image_size * num_channels, self.num_hidden], stddev=0.1)
)
fc1_biases = tf.Variable(tf.constant(0.1, shape=[self.num_hidden]))
# fully connected layer 2 --> output layer
fc2_weights = tf.Variable(
tf.truncated_normal([self.num_hidden, num_labels], stddev=0.1)
)
fc2_biases = tf.Variable(tf.constant(0.1, shape=[num_labels]))
# 想在来定义图谱的运算
def model(data):
# fully connected layer 1
shape = data.get_shape().as_list()
print(data.get_shape(), shape)
reshape = tf.reshape(data, [shape[0], shape[1] * shape[2] * shape[3]])
print(reshape.get_shape(), fc1_weights.get_shape(), fc1_biases.get_shape())
hidden = tf.nn.relu(tf.matmul(reshape, fc1_weights) + fc1_biases)
# fully connected layer 2
return tf.matmul(hidden, fc2_weights) + fc2_biases
# Training computation.
logits = model(self.tf_train_samples)
self.loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=self.tf_train_labels)
)
# print(self.loss)
# Optimizer.
self.optimizer = tf.train.GradientDescentOptimizer(0.01).minimize(self.loss)
# Predictions for the training, validation, and test data.
self.train_prediction = tf.nn.softmax(logits)
self.test_prediction = tf.nn.softmax(model(self.tf_test_samples))
def run(self):
"""
用到Session
"""
# private function
def print_confusion_matrix(confusionMatrix):
print('Confusion Matrix:')
for i, line in enumerate(confusionMatrix):
print(line, line[i] / np.sum(line))
a = 0
for i, column in enumerate(np.transpose(confusionMatrix, (1, 0))):
a += (column[i] / np.sum(column)) * (np.sum(column) / 26000)
print(column[i] / np.sum(column), )
print('\n', np.sum(confusionMatrix), a)
self.session = tf.Session(graph=self.graph)
with self.session as session:
tf.global_variables_initializer().run()
### 训练
print('Start Training')
# batch 1000
for i, samples, labels in get_chunk(train_samples, train_labels, chunkSize=self.batch_size):
_, l, predictions = session.run(
[self.optimizer, self.loss, self.train_prediction],
feed_dict={self.tf_train_samples: samples, self.tf_train_labels: labels}
)
print("loss is :", l)
# labels is True Labels
accuracy, _ = self.accuracy(predictions, labels)
if i % 50 == 0:
print('Minibatch loss at step %d: %f' % (i, l))
print('Minibatch accuracy: %.1f%%' % accuracy)
###
### 测试
print('Start Testing')
accuracies = []
confusionMatrices = []
for i, samples, labels in get_chunk(test_samples, test_labels, chunkSize=self.test_batch_size):
result = self.test_prediction.eval(feed_dict={self.tf_test_samples: samples})
accuracy, cm = self.accuracy(result, labels, need_confusion_matrix=True)
accuracies.append(accuracy)
confusionMatrices.append(cm)
print('Test Accuracy: %.1f%%' % accuracy)
print(' Average Accuracy:', np.average(accuracies))
print('Standard Deviation:', np.std(accuracies))
print_confusion_matrix(np.add.reduce(confusionMatrices))
###
def accuracy(self, predictions, labels, need_confusion_matrix=False):
"""
计算预测的正确率与召回率
@return: accuracy and confusionMatrix as a tuple
"""
_predictions = np.argmax(predictions, 1)
_labels = np.argmax(labels, 1)
cm = confusion_matrix(_labels, _predictions) if need_confusion_matrix else None
# == is overloaded for numpy array
accuracy = (100.0 *
|
np.sum(_predictions == _labels)
|
numpy.sum
|
import os
import sys
import numpy as np
import time
import glob
import pickle
from sklearn.neighbors import KDTree
import tensorflow as tf
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(ROOT_DIR)
DATA_DIR = os.path.join('/data/dataset/', 'SensatUrban')
if not os.path.exists(DATA_DIR):
raise IOError(f"{DATA_DIR} not found!")
from utils.ply import read_ply, write_ply
from .custom_dataset import CustomDataset, grid_subsampling, tf_batch_subsampling, tf_batch_neighbors
class SensatUrbanDataset(CustomDataset):
def __init__(self, config, input_threads=8):
"""Class to handle SensatUrban dataset for scene segmentation task.
Args:
config: config file
input_threads: the number elements to process in parallel
"""
super(SensatUrbanDataset, self).__init__()
self.config = config
self.num_threads = input_threads
self.trainval = config.get('trainval', False)
# Dict from labels to names
self.path = DATA_DIR
self.label_to_names = {0: 'Ground', 1: 'High Vegetation', 2: 'Buildings', 3: 'Walls',
4: 'Bridge', 5: 'Parking', 6: 'Rail', 7: 'traffic Roads', 8: 'Street Furniture',
9: 'Cars', 10: 'Footpath', 11: 'Bikes', 12: 'Water'}
# Initiate a bunch of variables concerning class labels
self.init_labels()
config.num_classes = self.num_classes
# Number of input threads
self.num_threads = input_threads
self.all_files = np.sort(glob.glob(os.path.join(self.path, 'original_block_ply', '*.ply')))
self.val_file_name = ['birmingham_block_1',
'birmingham_block_5',
'cambridge_block_10',
'cambridge_block_7']
self.test_file_name = ['birmingham_block_2', 'birmingham_block_8',
'cambridge_block_15', 'cambridge_block_22',
'cambridge_block_16', 'cambridge_block_27']
# Some configs
self.num_gpus = config.num_gpus
self.first_subsampling_dl = config.first_subsampling_dl
self.in_features_dim = config.in_features_dim
self.num_layers = config.num_layers
self.downsample_times = config.num_layers - 1
self.density_parameter = config.density_parameter
self.batch_size = config.batch_size
self.augment_scale_anisotropic = config.augment_scale_anisotropic
self.augment_symmetries = config.augment_symmetries
self.augment_rotation = config.augment_rotation
self.augment_scale_min = config.augment_scale_min
self.augment_scale_max = config.augment_scale_max
self.augment_noise = config.augment_noise
self.augment_color = config.augment_color
self.epoch_steps = config.epoch_steps
self.validation_size = config.validation_size
self.in_radius = config.in_radius
# initialize
self.num_per_class = np.zeros(self.num_classes)
self.ignored_labels = np.array([])
self.val_proj = []
self.val_labels = []
self.test_proj = []
self.test_labels = []
self.possibility = {}
self.min_possibility = {}
self.input_trees = {'training': [], 'validation': [], 'test': []}
self.input_colors = {'training': [], 'validation': [], 'test': []}
self.input_labels = {'training': [], 'validation': [], 'test': []}
self.input_names = {'training': [], 'validation': [], 'test': []}
# input subsampling
self.load_sub_sampled_clouds(self.first_subsampling_dl)
self.batch_limit = self.calibrate_batches()
print("batch_limit: ", self.batch_limit)
self.neighborhood_limits = [26, 31, 38, 41, 39]
self.neighborhood_limits = [int(l * self.density_parameter // 5) for l in self.neighborhood_limits]
print("neighborhood_limits: ", self.neighborhood_limits)
# Get generator and mapping function
gen_function, gen_types, gen_shapes = self.get_batch_gen('training')
gen_function_val, _, _ = self.get_batch_gen('validation')
gen_function_test, _, _ = self.get_batch_gen('test')
map_func = self.get_tf_mapping()
self.train_data = tf.data.Dataset.from_generator(gen_function, gen_types, gen_shapes)
self.train_data = self.train_data.map(map_func=map_func, num_parallel_calls=self.num_threads)
self.train_data = self.train_data.prefetch(10)
self.val_data = tf.data.Dataset.from_generator(gen_function_val, gen_types, gen_shapes)
self.val_data = self.val_data.map(map_func=map_func, num_parallel_calls=self.num_threads)
self.val_data = self.val_data.prefetch(10)
self.test_data = tf.data.Dataset.from_generator(gen_function_test, gen_types, gen_shapes)
self.test_data = self.test_data.map(map_func=map_func, num_parallel_calls=self.num_threads)
self.test_data = self.test_data.prefetch(10)
# create a iterator of the correct shape and type
iter = tf.data.Iterator.from_structure(self.train_data.output_types, self.train_data.output_shapes)
self.flat_inputs = [None] * self.num_gpus
for i in range(self.num_gpus):
self.flat_inputs[i] = iter.get_next()
# create the initialisation operations
self.train_init_op = iter.make_initializer(self.train_data)
self.val_init_op = iter.make_initializer(self.val_data)
self.test_init_op = iter.make_initializer(self.test_data)
@staticmethod
def get_num_class_from_label(labels, total_class):
num_pts_per_class = np.zeros(total_class, dtype=np.int32)
# original class distribution
val_list, counts =
|
np.unique(labels, return_counts=True)
|
numpy.unique
|
import numpy as np
from skimage import filters
from skimage.util import random_noise, img_as_ubyte, img_as_float
from scipy.ndimage.interpolation import zoom as npzoom
from skimage.transform import rescale
import PIL
def no_crap(img, scale=4, upsample=False):
from skimage.transform import rescale
x = np.array(img)
multichannel = len(x.shape) > 2
x = rescale(x, scale=1/scale, order=1, multichannel=multichannel)
x *= np.iinfo(np.uint8).max
return PIL.Image.fromarray(x.astype(np.uint8))
def fluo_G_D(x, scale=4, upsample=False):
xn = np.array(x)
xorig_max = xn.max()
xn = xn.astype(np.float32)
xn /= float(np.iinfo(np.uint8).max)
x = np.array(x)
mu, sigma = 0, 5
noise =
|
np.random.normal(mu, sigma*0.05, x.shape)
|
numpy.random.normal
|
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
from .taskdataset import EbbFlowStats
class PlotRTs(EbbFlowStats):
"""Plot RT distributions.
Args
----
stats_obj (EbbFlowStats instance): Data from the model/participant.
palette (str, optional): Color palette used for plotting.
"""
def __init__(self, stats_obj, palette='viridis'):
self.__dict__ = stats_obj.__dict__
self.palette = palette
def plot_rt_dists(self, ax, plot_type):
if plot_type == 'all':
plot_df = self._format_all()
elif plot_type == 'switch':
plot_df = self._format_by_switch()
elif plot_type == 'congruency':
plot_df = self._format_by_congruency()
sns.violinplot(x='trial_type', y='rts', hue='model_or_user',
data=plot_df, split=True, inner=None, ax=ax,
palette=self.palette, cut=0, linewidth=0.5)
if plot_type == 'all':
ax.set_xticks([])
else:
ax.set_xticklabels(
ax.get_xticklabels(), rotation=45, ha='right',
rotation_mode='anchor')
ax.set_xlabel('')
ax.set_ylabel('')
return ax
def _format_as_df(self, plot_dists, model_or_user, trial_types):
all_rts = pd.concat(plot_dists)
m_u_array = []
ttype_array = []
for rts, mu, ttype in zip(plot_dists, model_or_user, trial_types):
m_u_array.extend(len(rts) * [mu])
ttype_array.extend(len(rts) * [ttype])
plot_df = pd.DataFrame({'rts': all_rts, 'model_or_user': m_u_array,
'trial_type': ttype_array})
return plot_df
def _format_all(self):
plot_dists = [self.df['urt_ms'], self.df['mrt_ms']]
m_or_u = ['user', 'model']
trial_types = ['N/A', 'N/A']
return self._format_as_df(plot_dists, m_or_u, trial_types)
def _format_by_switch(self):
stay_inds = self.select(**{'is_switch': 0})
switch_inds = self.select(**{'is_switch': 1})
u_stay_rts = self.df['urt_ms'][stay_inds]
m_stay_rts = self.df['mrt_ms'][stay_inds]
u_switch_rts = self.df['urt_ms'][switch_inds]
m_switch_rts = self.df['mrt_ms'][switch_inds]
plot_dists = [u_stay_rts, u_switch_rts, m_stay_rts, m_switch_rts]
trial_types = ['Stay', 'Switch', 'Stay', 'Switch']
m_or_u = ['user', 'user', 'model', 'model']
return self._format_as_df(plot_dists, m_or_u, trial_types)
def _format_by_congruency(self):
con_inds = self.select(**{'is_congruent': 1})
incon_inds = self.select(**{'is_congruent': 0})
u_con_rts = self.df['urt_ms'][con_inds]
m_con_rts = self.df['mrt_ms'][con_inds]
u_incon_rts = self.df['urt_ms'][incon_inds]
m_incon_rts = self.df['mrt_ms'][incon_inds]
plot_dists = [u_con_rts, u_incon_rts, m_con_rts, m_incon_rts]
trial_types = ['Congruent', 'Incongruent', 'Congruent',
'Incongruent']
m_or_u = ['user', 'user', 'model', 'model']
return self._format_as_df(plot_dists, m_or_u, trial_types)
class BarPlot():
"""Plot seaborn style barplots, but allow plotting of
s.e.m. error bars. See figure2.py and figure3.py for usage.
Args
----
df (pandas DataFrame): Data to plot.
palette (str, optional): Color palette used for plotting.
"""
supported_error = {'sem', 'sd'}
def __init__(self, df, palette='viridis'):
self.df = df
self.palette = palette
def plot_grouped_bar(self, x, y, hue, error_type, ax, **kwargs):
# Note: Currently this only supports plotting two groups
# (designated by the hue argument)
assert error_type in self.supported_error, \
'error_type must be one of the following: ' \
f'{self.supported_error}'
colors = [(0.2363, 0.3986, 0.5104, 1.0),
(0.2719, 0.6549, 0.4705, 1.0)]
width = kwargs.get('width', 0.35)
x_offset = -width / 2
hue_types = self.df[hue].unique()
elinewidth = kwargs.get('elinewidth', 0.5)
error_kw = {'elinewidth': elinewidth}
for i, h in enumerate(hue_types):
group_df = self.df.query(f'{hue} == @h')
group_means, group_errors = self._get_group_data(
group_df, x, y, error_type)
plot_x = np.arange(len(group_means))
ax.bar(plot_x + x_offset, group_means, yerr=group_errors,
width=width, label=h, error_kw=error_kw,
**{'fc': colors[i]})
x_offset += width
ax = self._adjust_bar(plot_x, ax, **kwargs)
return ax
def plot_bar(self, keys, error_type, ax, **kwargs):
assert error_type in self.supported_error, \
'error_type must be one of the following: ' \
f'{self.supported_error}'
colors = sns.color_palette(palette=self.palette, n_colors=len(keys))
width = kwargs.get('width', 0.75)
plot_data = [self.df[key] for key in keys]
for di, d in enumerate(plot_data):
d_mean = np.mean(d)
d_sem = np.std(d) / np.sqrt(len(d))
ax.bar(di, d_mean, yerr=d_sem, width=width, error_kw={'elinewidth': 1},
**{'fc': colors[di]})
ax = self._adjust_bar(np.arange(len(plot_data)), ax, **kwargs)
return ax
def _get_group_data(self, group_df, x, y, error_type):
means = group_df.groupby(x)[y].mean().to_numpy()
if error_type == 'sem':
errors = group_df.groupby(x)[y].sem().to_numpy()
elif error_type == 'sd':
errors = group_df.groupby(x)[y].std().to_numpy()
return means, errors
def _adjust_bar(self, plot_x, ax, **kwargs):
ax.set_xlabel(kwargs.get('xlabel', None))
ax.set_ylabel(kwargs.get('ylabel', None))
ax.set_xticks(plot_x)
ax.set_xticklabels(kwargs.get('xticklabels', None),
rotation=45, ha='right', rotation_mode='anchor')
if 'yticks' in kwargs.keys():
ax.set_yticks(kwargs['yticks'])
ax.set_xlim(kwargs.get('xlim', None))
ax.set_ylim(kwargs.get('ylim', None))
if kwargs.get('plot_legend', False):
ax.legend()
ax.get_legend().get_frame().set_linewidth(0.0)
return ax
class PlotModelLatents():
"""Plot the model latents in 3D. See e.g. figure3.py and
figure4.py for usage.
Args
----
data (EbbFlowStats instance): Data to plot.
post_on_dur (int, optional): Duration after stimulus onset to plot (ms).
pcs_to_plot (list, optional): Which PCs to plot.
fixed_points (pandas DataFrame, optional): Fixed points to plot.
"""
default_colors = 2 * ['royalblue', 'forestgreen', 'crimson', 'orange']
def __init__(self, data, post_on_dur=1200, pcs_to_plot=[0, 1, 2],
fixed_points=None, plot_pre_onset=True):
self.data = data
self.pcs_to_plot = pcs_to_plot
self.latents = data.windowed['pca_latents'][:, :, pcs_to_plot]
self.m_rts = data.df['mrt_ms'].to_numpy()
self.step = data.step
self.n_pre = data.n_pre
self.t_off_ind = self.n_pre + 1 \
+ np.round(post_on_dur / self.step).astype('int')
if plot_pre_onset:
self.t_on_ind = 0
else:
self.t_on_ind = self.n_pre
self.fixed_points = fixed_points
def plot_stay_switch(self, ax, params, elev=30, azim=60):
# Plot average stay and switch trajectories for one task cue
# and one response direction (used e.g. in Fig. 4A).
labels = ['Stay', 'Switch']
styles = ['-', '-']
series = self._get_stay_switch_series(params)
cmap = sns.color_palette('viridis', as_cmap=True)
color_indices = [0.2, 0.8]
colors = [cmap(i) for i in color_indices]
plot_kwargs = {'colors': colors, 'line_styles': styles,
'line_width': 0.5, 'plot_series_onset': True,
'plot_series_rt': True, 'plot_times':
|
np.array([100])
|
numpy.array
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os, sys, codecs, glob
from PIL import Image, ImageDraw
import numpy as np
import pandas as pd
import cv2
import torch
torch.backends.cudnn.benchmark = False
# torch.backends.cudnn.enabled = False
import torchvision.models as models
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
from torch.utils.data.dataset import Dataset
import logging
logging.basicConfig(level=logging.DEBUG, filename='example.log',
format='%(asctime)s - %(filename)s[line:%(lineno)d]: %(message)s') #
def draw_cv2(raw_strokes, size=256, lw=6, time_color=True):
BASE_SIZE = 299
img = np.zeros((BASE_SIZE, BASE_SIZE), np.uint8)
for t, stroke in enumerate(eval(raw_strokes)):
str_len = len(stroke[0])
for i in range(len(stroke[0]) - 1):
# dot dropout
if np.random.uniform() > 0.95:
continue
color = 255 - min(t, 10) * 13 if time_color else 255
_ = cv2.line(img, (stroke[0][i] + 22, stroke[1][i] + 22),
(stroke[0][i + 1] + 22, stroke[1][i + 1] + 22), color, lw)
if size != BASE_SIZE:
return cv2.resize(img, (size, size))
else:
return img
class QRDataset(Dataset):
def __init__(self, img_drawing, img_label, img_size, transform=None):
self.img_drawing = img_drawing
self.img_label = img_label
self.img_size = img_size
self.transform = transform
def __getitem__(self, index):
img = np.zeros((self.img_size, self.img_size, 3))
img[:, :, 0] = draw_cv2(self.img_drawing[index], self.img_size)
img[:, :, 1] = img[:, :, 0]
img[:, :, 2] = img[:, :, 0]
img = Image.fromarray(np.uint8(img))
if self.transform is not None:
img = self.transform(img)
label = torch.from_numpy(
|
np.array([self.img_label[index]])
|
numpy.array
|
# Copyright (C) 2021 <NAME>
#
# SPDX-License-Identifier: MIT
#
# This tests the custom assembly for the unbiased Nitsche formulation in a special case
# that can be expressed using ufl:
# We consider a very simple test case made up of two disconnected elements with a constant
# gap in x[tdim-1]-direction. The contact surfaces are made up of exactly one edge
# from each element that are perfectly aligned such that the quadrature points only
# differ in the x[tdim-1]-direction by the given gap.
# For comparison, we consider a DG function space on a mesh that is constructed by
# removing the gap between the elements and merging the edges making up the contact
# surface into one. This allows us to use DG-functions and ufl to formulate the contact
# terms in the variational form by suitably adjusting the deformation u and using the given
# constant gap.
import numpy as np
import scipy
import pytest
import ufl
from dolfinx.cpp.mesh import to_type
import dolfinx.fem as _fem
from dolfinx.graph import create_adjacencylist
from dolfinx.mesh import (CellType, locate_entities_boundary, locate_entities, create_mesh,
compute_midpoints, meshtags)
from mpi4py import MPI
import dolfinx_cuas
import dolfinx_contact
import dolfinx_contact.cpp
from dolfinx_contact.helpers import (R_minus, dR_minus, R_plus, dR_plus, epsilon, lame_parameters, sigma_func)
kt = dolfinx_contact.cpp.Kernel
def DG_rhs_plus(u0, v0, h, n, gamma, theta, sigma, gap, dS):
# This version of the ufl form agrees with the formulation in https://doi.org/10.1007/s00211-018-0950-x
def Pn_g(u, a, b):
return ufl.dot(u(a) - u(b), -n(b)) - gap - (h(a) / gamma) * ufl.dot(sigma(u(a)) * n(a), -n(b))
def Pn_gtheta(v, a, b):
return ufl.dot(v(a) - v(b), -n(b)) - theta * (h(a) / gamma) * ufl.dot(sigma(v(a)) * n(a), -n(b))
F = 0.5 * (gamma / h('+')) * R_plus(Pn_g(u0, '+', '-')) * Pn_gtheta(v0, '+', '-') * dS
F += 0.5 * (gamma / h('-')) * R_plus(Pn_g(u0, '-', '+')) * Pn_gtheta(v0, '-', '+') * dS
return F
def DG_rhs_minus(u0, v0, h, n, gamma, theta, sigma, gap, dS):
# This version of the ufl form agrees with its one-sided equivalent in nitsche_ufl.py
def Pn_g(u, a, b):
return ufl.dot(sigma(u(a)) * n(a), -n(b)) + (gamma / h(a)) * (gap - ufl.dot(u(a) - u(b), -n(b)))
def Pn_gtheta(v, a, b):
return theta * ufl.dot(sigma(v(a)) * n(a), -n(b)) - (gamma / h(a)) * ufl.dot(v(a) - v(b), -n(b))
F = 0.5 * (h('+') / gamma) * R_minus(Pn_g(u0, '+', '-')) * Pn_gtheta(v0, '+', '-') * dS
F += 0.5 * (h('-') / gamma) * R_minus(Pn_g(u0, '-', '+')) * Pn_gtheta(v0, '-', '+') * dS
return F
def DG_jac_plus(u0, v0, w0, h, n, gamma, theta, sigma, gap, dS):
# This version of the ufl form agrees with the formulation in https://doi.org/10.1007/s00211-018-0950-x
def Pn_g(u, a, b):
return ufl.dot(u(a) - u(b), -n(b)) - gap - (h(a) / gamma) * ufl.dot(sigma(u(a)) * n(a), -n(b))
def Pn_gtheta(v, a, b, t):
return ufl.dot(v(a) - v(b), -n(b)) - t * (h(a) / gamma) * ufl.dot(sigma(v(a)) * n(a), -n(b))
J = 0.5 * (gamma / h('+')) * dR_plus(Pn_g(u0, '+', '-')) * \
Pn_gtheta(w0, '+', '-', 1.0) * Pn_gtheta(v0, '+', '-', theta) * dS
J += 0.5 * (gamma / h('-')) * dR_plus(Pn_g(u0, '-', '+')) * \
Pn_gtheta(w0, '-', '+', 1.0) * Pn_gtheta(v0, '-', '+', theta) * dS
return J
def DG_jac_minus(u0, v0, w0, h, n, gamma, theta, sigma, gap, dS):
# This version of the ufl form agrees with its one-sided equivalent in nitsche_ufl.py
def Pn_g(u, a, b):
return ufl.dot(sigma(u(a)) * n(a), -n(b)) + (gamma / h(a)) * (gap - ufl.dot(u(a) - u(b), -n(b)))
def Pn_gtheta(v, a, b, t):
return t * ufl.dot(sigma(v(a)) * n(a), -n(b)) - (gamma / h(a)) * ufl.dot(v(a) - v(b), -n(b))
J = 0.5 * (h('+') / gamma) * dR_minus(Pn_g(u0, '+', '-')) * \
Pn_gtheta(w0, '+', '-', 1.0) * Pn_gtheta(v0, '+', '-', theta) * dS
J += 0.5 * (h('-') / gamma) * dR_minus(Pn_g(u0, '-', '+')) * \
Pn_gtheta(w0, '-', '+', 1.0) * Pn_gtheta(v0, '-', '+', theta) * dS
return J
def compute_dof_permutations(V_dg, V_cg, gap, facets_dg, facets_cg):
'''The meshes used for the two different formulations are
created independently of each other. Therefore we need to
determine how to map the dofs from one mesh to the other in
order to compare the results'''
mesh_dg = V_dg.mesh
mesh_cg = V_cg.mesh
bs = V_cg.dofmap.index_map_bs
tdim = mesh_dg.topology.dim
mesh_dg.topology.create_connectivity(tdim - 1, tdim)
f_to_c_dg = mesh_dg.topology.connectivity(tdim - 1, tdim)
mesh_cg.topology.create_connectivity(tdim - 1, tdim)
mesh_cg.topology.create_connectivity(tdim, tdim - 1)
f_to_c_cg = mesh_cg.topology.connectivity(tdim - 1, tdim)
c_to_f_cg = mesh_cg.topology.connectivity(tdim, tdim - 1)
x_cg = V_cg.tabulate_dof_coordinates()
x_dg = V_dg.tabulate_dof_coordinates()
for i in range(len(facets_dg)):
facet_dg = facets_dg[i]
dofs_cg = []
coordinates_cg = []
for facet_cg in np.array(facets_cg)[:, 0]:
# retrieve dofs and dof coordinates for mesh with gap
cell = f_to_c_cg.links(facet_cg)[0]
all_facets = c_to_f_cg.links(cell)
local_index = np.argwhere(np.array(all_facets) == facet_cg)[0, 0]
dof_layout = V_cg.dofmap.dof_layout
local_dofs = dof_layout.entity_closure_dofs(tdim - 1, local_index)
dofs_cg0 = V_cg.dofmap.cell_dofs(cell)[local_dofs]
dofs_cg.append(dofs_cg0)
coordinates_cg.append(x_cg[dofs_cg0, :])
# retrieve all dg dofs on mesh without gap for each cell
# and modify coordinates by gap if necessary
cells = f_to_c_dg.links(facet_dg)
for cell in cells:
midpoint = compute_midpoints(mesh_dg, tdim, [cell])[0]
if midpoint[tdim - 1] > 0:
# coordinates of corresponding dofs are identical for both meshes
dofs_dg0 = V_dg.dofmap.cell_dofs(cell)
coordinates_dg0 = x_dg[dofs_dg0, :]
else:
# coordinates of corresponding dofs need to be adjusted by gap
dofs_dg1 = V_dg.dofmap.cell_dofs(cell)
coordinates_dg1 = x_dg[dofs_dg1, :]
coordinates_dg1[:, tdim - 1] -= gap
# create array of indices to access corresponding function values
num_dofs_f = dofs_cg[0].size
indices_cg = np.zeros(bs * 2 * num_dofs_f, dtype=np.int32)
for i, dofs in enumerate(dofs_cg):
for j, dof in enumerate(dofs):
for k in range(bs):
indices_cg[i * num_dofs_f * bs + j * bs + k] = bs * dof + k
indices_dg =
|
np.zeros(indices_cg.size, dtype=np.int32)
|
numpy.zeros
|
"""
Module of functions that are like ufuncs in acting on arrays and optionally
storing results in an output array.
"""
__all__ = ['fix', 'isneginf', 'isposinf', 'log2']
import numpy.core.numeric as nx
def fix(x, y=None):
"""
Round to nearest integer towards zero.
Round an array of floats element-wise to nearest integer towards zero.
The rounded values are returned as floats.
Parameters
----------
x : array_like
An array of floats to be rounded
y : ndarray, optional
Output array
Returns
-------
out : ndarray of floats
The array of rounded numbers
See Also
--------
floor : Round downwards
around : Round to given number of decimals
Examples
--------
>>> np.fix(3.14)
3.0
>>> np.fix(3)
3.0
>>> np.fix([2.1, 2.9, -2.1, -2.9])
array([ 2., 2., -2., -2.])
"""
x = nx.asanyarray(x)
if y is None:
y = nx.zeros_like(x)
y1 = nx.floor(x)
y2 = nx.ceil(x)
y[...] = nx.where(x >= 0, y1, y2)
return y
def isposinf(x, y=None):
"""
Shows which elements of the input are positive infinity.
Returns a numpy array resulting from an element-wise test for positive
infinity.
Parameters
----------
x : array_like
The input array.
y : array_like
A boolean array with the same shape as `x` to store the result.
Returns
-------
y : ndarray
A numpy boolean array with the same dimensions as the input.
If second argument is not supplied then a numpy boolean array is returned
with values True where the corresponding element of the input is positive
infinity and values False where the element of the input is not positive
infinity.
If second argument is supplied then an numpy integer array is returned
with values 1 where the corresponding element of the input is positive
positive infinity.
See Also
--------
isinf : Shows which elements are negative or positive infinity.
isneginf : Shows which elements are negative infinity.
isnan : Shows which elements are Not a Number (NaN).
isfinite: Shows which elements are not: Not a number, positive and
negative infinity
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Also that positive infinity is not equivalent to negative infinity. But
infinity is equivalent to positive infinity.
Errors result if second argument is also supplied with scalar input or
if first and second arguments have different shapes.
Numpy's definitions for positive infinity (PINF) and negative infinity
(NINF) may be change in the future versions.
Examples
--------
>>> np.isposinf(np.PINF)
array(True, dtype=bool)
>>> np.isposinf(np.inf)
array(True, dtype=bool)
>>> np.isposinf(np.NINF)
array(False, dtype=bool)
>>> np.isposinf([-np.inf, 0., np.inf])
array([False, False, True], dtype=bool)
>>> x=np.array([-np.inf, 0., np.inf])
>>> y=np.array([2,2,2])
>>> np.isposinf(x,y)
array([1, 0, 0])
>>> y
array([1, 0, 0])
"""
if y is None:
x = nx.asarray(x)
y = nx.empty(x.shape, dtype=nx.bool_)
nx.logical_and(nx.isinf(x), ~nx.signbit(x), y)
return y
def isneginf(x, y=None):
"""
Return True where x is -infinity, and False otherwise.
Parameters
----------
x : array_like
The input array.
y : array_like
A boolean array with the same shape as `x` to store the result.
Returns
-------
y : ndarray
A boolean array where y[i] = True only if x[i] = -Inf.
See Also
--------
isposinf, isfinite
Examples
--------
>>> np.isneginf([-np.inf, 0., np.inf])
array([ True, False, False], dtype=bool)
"""
if y is None:
x = nx.asarray(x)
y = nx.empty(x.shape, dtype=nx.bool_)
nx.logical_and(
|
nx.isinf(x)
|
numpy.core.numeric.isinf
|
"""
Hydrogen.py
Author: <NAME>
Affiliation: University of Colorado at Boulder
Created on: Mon Mar 12 18:02:07 2012
Description: Container for hydrogen physics stuff.
"""
import scipy
import numpy as np
from types import FunctionType
from scipy.optimize import fsolve, minimize
from ..util.ParameterFile import ParameterFile
from ..util.Math import central_difference, interp1d
from .Constants import A10, T_star, m_p, m_e, erg_per_ev, h, c, E_LyA, E_LL, \
k_B
try:
from scipy.special import gamma
g23 = gamma(2. / 3.)
g13 = gamma(1. / 3.)
c1 = 4. * np.pi / 3. /
|
np.sqrt(3.)
|
numpy.sqrt
|
#
# Copyright (c) 2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# based on https://github.com/keras-team/keras/blob/master/examples/imdb_cnn.py
import argparse
import multiprocessing
import os
import keras
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.layers import Embedding
from keras.layers import Conv1D, GlobalMaxPooling1D
from keras.datasets import imdb
from keras import backend as K
from keras_preprocessing.sequence import _remove_long_seq
from tensorflow import saved_model
import tensorflow as tf
import numpy as np
try:
from experiment_metrics.api import publish
except ImportError:
print("Nauta's Experiment metrics library not found.")
publish = print # If experiment_metrics.api is not available, simply bind publish to print function
class TensorflowModelCheckpoint(keras.callbacks.ModelCheckpoint):
"""
A simple extension of keras.callbacks.ModelCheckpoint that saves model also in TensorFlow checkpoint format.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.saver = tf.train.Saver()
def on_epoch_end(self, epoch, logs=None):
super().on_epoch_end(epoch, logs)
sess = keras.backend.get_session()
self.saver.save(sess, self.filepath.replace('{epoch}', str(epoch)).replace('h5', 'ckpt'))
class NautaExperimentMetricsCallback(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs: dict = None):
publish({'accuracy': str(logs.get('acc')),
'loss': str(logs.get('loss'))[:5], # Reduce precision for shorter metrics
'validation_accuracy': str(logs.get('val_acc')),
'validation_loss': str(logs.get('val_loss'))[:5]})
def load_data(dataset_path: str = None, num_words=None, skip_top=0, maxlen=None, seed=113, start_char=1, oov_char=2,
index_from=3):
print('Loading data...')
if dataset_path:
with np.load(dataset_path) as f:
x_train, labels_train = f['x_train'], f['y_train']
x_test, labels_test = f['x_test'], f['y_test']
np.random.seed(seed)
indices = np.arange(len(x_train))
np.random.shuffle(indices)
x_train = x_train[indices]
labels_train = labels_train[indices]
indices = np.arange(len(x_test))
np.random.shuffle(indices)
x_test = x_test[indices]
labels_test = labels_test[indices]
xs = np.concatenate([x_train, x_test])
labels = np.concatenate([labels_train, labels_test])
if start_char is not None:
xs = [[start_char] + [w + index_from for w in x] for x in xs]
elif index_from:
xs = [[w + index_from for w in x] for x in xs]
if maxlen:
xs, labels = _remove_long_seq(maxlen, xs, labels)
if not xs:
raise ValueError('After filtering for sequences shorter than maxlen=' +
str(maxlen) + ', no sequence was kept. '
'Increase maxlen.')
if not num_words:
num_words = max([max(x) for x in xs])
# by convention, use 2 as OOV word
# reserve 'index_from' (=3 by default) characters:
# 0 (padding), 1 (start), 2 (OOV)
if oov_char is not None:
xs = [[w if (skip_top <= w < num_words) else oov_char for w in x]
for x in xs]
else:
xs = [[w for w in x if skip_top <= w < num_words]
for x in xs]
idx = len(x_train)
x_train, y_train = np.array(xs[:idx]), np.array(labels[:idx])
x_test, y_test =
|
np.array(xs[idx:])
|
numpy.array
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
r"""A collection of math transform methods."""
import math
import random
import sys
from typing import List, Tuple, Union, Optional
import numpy as np # type: ignore
from scipy.spatial.transform.rotation import Rotation # type: ignore
import cv2 # type: ignore
ZERO_VECTOR3 = np.array([0.0, 0.0, 0.0]).reshape(3, 1)
X_VECTOR3 = np.array([1.0, 0.0, 0.0]).reshape(3, 1)
Y_VECTOR3 = np.array([0.0, 1.0, 0.0]).reshape(3, 1)
Z_VECTOR3 = np.array([0.0, 0.0, 1.0]).reshape(3, 1)
_FloatOrInt = Union[float, int]
ArrayOrList = Union[List[_FloatOrInt], Tuple[_FloatOrInt], np.ndarray]
ArrayOrList2 = Union[List[_FloatOrInt], Tuple[_FloatOrInt, _FloatOrInt],
np.ndarray]
ArrayOrList3 = Union[List[_FloatOrInt], Tuple[_FloatOrInt, _FloatOrInt,
_FloatOrInt], np.ndarray]
ArrayOrList4 = Union[List[_FloatOrInt], Tuple[_FloatOrInt, _FloatOrInt,
_FloatOrInt, _FloatOrInt],
np.ndarray]
ArrayOrList8 = Union[List[_FloatOrInt],
Tuple[_FloatOrInt, _FloatOrInt, _FloatOrInt, _FloatOrInt,
_FloatOrInt, _FloatOrInt, _FloatOrInt, _FloatOrInt],
np.ndarray]
def transform(v: np.ndarray, translation: Optional[np.ndarray],
rotation: Optional[np.ndarray]) -> np.ndarray:
"""Transforms a 3D point using translation and rotation vectors.
This rotates in place before translation. This is the way Unity operates.
Args:
v: array containing (x, y, z) coords or numpy array of shape (3, N).
translation: array containing input translation (t_x, t_y, t_z).
rotation: array containing input rotation vector parameterized using
OpenCV's Rodrigues parameterization: (r_x, r_y, r_z).
Returns:
array containing (x, y, z) points of shape (3, N).
"""
npnts = v.size // 3
v2 = v.reshape(3, npnts)
if rotation is not None:
r_mat, _ = cv2.Rodrigues(rotation.reshape(3, 1))
v2 = r_mat.dot(v2)
if translation is not None:
v2 = np.add(v2, translation.reshape(3, 1))
return v2
def transform_inv(v: np.ndarray, translation: Optional[np.ndarray],
rotation: Optional[np.ndarray]) -> np.ndarray:
"""Transforms a 3D point using the inverse translation and rotation vectors.
Args:
v: array containing (x, y, z) coords or numpy array of shape (3, N).
translation: array containing input translation (t_x, t_y, t_z).
rotation: array containing input rotation vector parameterized using
OpenCV's Rodrigues parameterization: (r_x, r_y, r_z).
Returns:
array containing (x, y, z) points of shape (3, N).
"""
npnts = v.size // 3
v2 = v.reshape(3, npnts)
if translation is not None:
v2 = np.add(v2, -translation.reshape(3, 1))
if rotation is not None:
r_mat, _ = cv2.Rodrigues(rotation.reshape(3, 1))
v2 = r_mat.transpose().dot(v2)
return v2
def project_2d_int(p: ArrayOrList3,
extrinsics: np.ndarray,
intrinsics: np.ndarray,
distortion: np.ndarray,
clip_range: float = 100000) -> Optional[Tuple[int, int]]:
"""Computes the 2D projection of a 3D point p.
Using a camera model using the extrinsincs, intrinsics, and distortion
parameters.
Args:
p: numpy array of type np.float32 containing (x, y, z) coord.
extrinsics: numpy array of length 6 containing [t_x, t_y, t_z, r_x, r_y,
r_z].
intrinsics: numpy array of shape (3, 3). This is typically the return value
of intrinsics_to_matrix.
distortion: camera distortion parameters of shape (5,).
clip_range: Return None if pixel x or y is larger than clip_range.
Returns:
x, y: integer pixel coords, else returns None if point is outside clip
range or behind the camera view.
"""
p2 = np.array(p, dtype=np.float32).reshape((1, 3, 1))
# opencv project points will project even if behind camera
# so do transform and z check here.
p2 = transform(p2, extrinsics[:3].reshape(3, 1), extrinsics[3:].reshape(3, 1))
if p2[2] < sys.float_info.epsilon:
return None
p2, _ = cv2.projectPoints(
np.array([p2]), ZERO_VECTOR3, ZERO_VECTOR3, intrinsics, distortion)
x = int(p2[0][0][0])
y = int(p2[0][0][1])
if x > clip_range:
return None
if x < -clip_range:
return None
if y > clip_range:
return None
if y < -clip_range:
return None
return x, y
def project(p: ArrayOrList3, extrinsics: np.ndarray, intrinsics: np.ndarray,
distortion: np.ndarray) -> np.ndarray:
"""Compute the 2D projection of a 3D point p.
Using a camera model using the extrinsincs, intrinsics, and distortion
parameters.
Args:
p: array of containing (x, y, z) coord.
extrinsics: array of length 6 containing [t_x, t_y, t_z, r_x, r_y, r_z].
intrinsics: array of shape (3, 3). This is typically the return value of
intrinsics_to_matrix.
distortion: camera distortion parameters of shape (5,).
Returns:
numpy array of shape (1, 1, 2) containing projected x, y.
"""
p2, _ = cv2.projectPoints(p, extrinsics[3:].reshape(3, 1),
extrinsics[:3].reshape(3,
1), intrinsics, distortion)
return p2
def intrinsics_to_matrix(v: ArrayOrList4,
dtype: np.dtype = np.float64) -> np.ndarray:
"""Convert a flattened list of intrinsics to 3x3 intrincis matrix."""
return np.array([[v[0], 0, v[2]], [0, v[1], v[3]], [0, 0, 1]], dtype=dtype)
def intrinsics_to_list(v: np.ndarray) -> List[float]:
"""Convert a 3x3 intrinsics matrix to a flattened list of intrinsics."""
return [v[0, 0], v[1, 1], v[0, 2], v[1, 2]]
def unproject(p: ArrayOrList, z: float, intrinsic: np.ndarray,
distortion: ArrayOrList) -> np.ndarray:
"""Unproject (u,v) pixel coordinate, with depth z, into x,y,z coordinate.
Args:
p: (u,v) pixel coordinate.
z: depth at pixel (u,v).
intrinsic: array of shape (3, 3). This is typically the return value of
intrinsics_to_matrix.
distortion: camera distortion parameters of shape (5,).
Returns:
numpy.ndarray of shape (3,) containing xyz coordinate in camera frame.
"""
cam_mtx = intrinsic # shape [3,3]
cam_dist = np.array(distortion) # shape [5]
pts = np.array([np.array(p, dtype=np.float32).reshape(1, 2)]) # shape [1,2]
point_undistorted = cv2.undistortPoints(pts, cam_mtx, cam_dist)
x = point_undistorted[0][0][0] * z
y = point_undistorted[0][0][1] * z
return np.array([x, y, z])
def unproject_depth_sample(img_pt: ArrayOrList2, raw_depth: float,
depth_dist: ArrayOrList8, camera_mtx: np.ndarray,
camera_dist: np.ndarray) -> np.ndarray:
"""Convert (u,v) pixel coordinate, with depth, into an (x, y, z) coordinate.
Args:
img_pt: (u,v) pixel coordinate.
raw_depth: depth value pre-calibration.
depth_dist: depth distortion parameters of shape (8,)
camera_mtx: intrinsics matrix of shape (3, 3). This is typically the return
value of intrinsics_to_matrix.
camera_dist: camera distortion parameters. numpy array of shape (5,).
Returns:
xyz coordinate in camera frame of shape (1, 3).
"""
# ax = (img_pt[0] - camera_mtx[0][2]) / camera_mtx[0][0]
# ay = (img_pt[1] - camera_mtx[1][2]) / camera_mtx[1][1]
adjusted_depth = depth_dist[0] + raw_depth * depth_dist[1]
# + \
# ax * depth_dist[2] + \
# ay * depth_dist[3] + \
# ax * ay * depth_dist[4] + \
# ax * raw_depth * depth_dist[5] + \
# ay * raw_depth * depth_dist[6] + \
# ax * ay * raw_depth * depth_dist[7]
return unproject(img_pt, adjusted_depth, camera_mtx,
camera_dist).reshape(1, 3)
def unproject_vectorized(uv_coordinates: np.ndarray, depth_values: np.ndarray,
intrinsic: np.ndarray,
distortion: np.ndarray) -> np.ndarray:
"""Vectorized version of unproject(), for N points.
Args:
uv_coordinates: pixel coordinates to unproject of shape (n, 2).
depth_values: depth values corresponding index-wise to the uv_coordinates of
shape (n).
intrinsic: array of shape (3, 3). This is typically the return value of
intrinsics_to_matrix.
distortion: camera distortion parameters of shape (5,).
Returns:
xyz coordinates in camera frame of shape (n, 3).
"""
cam_mtx = intrinsic # shape [3, 3]
cam_dist = np.array(distortion) # shape [5]
# shape of points_undistorted is [N, 2] after the squeeze().
points_undistorted = cv2.undistortPoints(
uv_coordinates.reshape((-1, 1, 2)), cam_mtx, cam_dist).squeeze(axis=1)
x = points_undistorted[:, 0] * depth_values
y = points_undistorted[:, 1] * depth_values
xyz = np.vstack((x, y, depth_values)).T
return xyz
def unproject_depth_sample_vectorized(img_pts: np.ndarray,
raw_depths: np.ndarray,
depth_dist: ArrayOrList8,
camera_mtx: np.ndarray,
camera_dist: np.ndarray) -> np.ndarray:
"""Convert (u,v) pixel coordinate, with depth, into an (x, y, z) coordinate.
Args:
img_pts: (u,v) pixel coordinates of shape (n, 2).
raw_depths: depth values pre-calibration of shape (n).
depth_dist: depth distortion parameters of shape (8,)
camera_mtx: intrinsics matrix of shape (3, 3). This is typically the return
value of intrinsics_to_matrix.
camera_dist: camera distortion parameters. numpy array of shape (5,).
Returns:
xyz coordinates in camera frame of shape (n, 3).
"""
adjusted_depths = depth_dist[0] + raw_depths * depth_dist[1]
return unproject_vectorized(img_pts, adjusted_depths, camera_mtx,
camera_dist).reshape(img_pts.shape[0], 3)
def unproject_depth_vectorized(im_depth: np.ndarray, depth_dist: np.ndarray,
camera_mtx: np.ndarray,
camera_dist: np.ndarray) -> np.ndarray:
"""Unproject depth image into 3D point cloud, using calibration.
Args:
im_depth: raw depth image, pre-calibration of shape (height, width).
depth_dist: depth distortion parameters of shape (8,)
camera_mtx: intrinsics matrix of shape (3, 3). This is typically the return
value of intrinsics_to_matrix.
camera_dist: camera distortion parameters shape (5,).
Returns:
numpy array of shape [3, H*W]. each column is xyz coordinates
"""
h, w = im_depth.shape
# shape of each u_map, v_map is [H, W].
dtype = np.float64 if not isinstance(im_depth, np.ndarray) else im_depth.dtype
u_map, v_map = np.meshgrid(
np.linspace(0, w - 1, w, dtype=dtype),
np.linspace(0, h - 1, h, dtype=dtype))
adjusted_depth = depth_dist[0] + im_depth * depth_dist[1]
# shape after stack is [N, 2], where N = H * W.
uv_coordinates = np.stack((u_map.reshape(-1), v_map.reshape(-1)), axis=-1)
return unproject_vectorized(uv_coordinates, adjusted_depth.reshape(-1),
camera_mtx, camera_dist)
def pose_to_matrix(pose: np.ndarray) -> np.ndarray:
"""Converts a pose to a homogeneous transformation matrix.
A pose is a 6-element array. First three elements are the translations of the
post, next three elements are the rotation of the pose in the axis-angle
representation.
Args:
pose: convert from pose.
Returns:
two dimensional numpy array of a homogeneous transformation matrix.
array([[R00, R01, R02, T0],
[R10, R11, R12, T1],
[R20, R21, R22, T2],
[0., 0., 0., 1.]])
"""
return convert_to_matrix(pose[0:3], pose[3:6])
def convert_to_matrix(translation: ArrayOrList3,
rotation: ArrayOrList3) -> np.ndarray:
"""Converts 3D translation & rotation to a homogeneous transformation matrix.
Args:
translation: x, y, z position of the pose.
rotation: axis/angle representation of the rotation.
Returns:
two dimensional numpy array of a homogeneous transformation matrix.
array([[R00, R01, R02, T0],
[R10, R11, R12, T1],
[R20, R21, R22, T2],
[0., 0., 0., 1.]])
"""
m = np.zeros((4, 4), dtype=np.float)
m[:3, :3] = cv2.Rodrigues(np.array(rotation))[0]
m[0, 3] = translation[0]
m[1, 3] = translation[1]
m[2, 3] = translation[2]
m[3, 3] = 1
return m
def multiply_pose(a: np.ndarray, b: np.ndarray) -> np.ndarray:
"""Combine two poses.
A pose is a 6-element array. First three elements are the translations of the
post, next three elements are the rotation of the pose in the axis-angle
representation.
Args:
a: relative pose on top of the base pose.
b: base pose
Returns:
combined pose.
"""
ma = pose_to_matrix(a)
mb = pose_to_matrix(b)
return matrix_to_pose(np.matmul(ma, mb))
def inverse_pose(pose: np.ndarray) -> np.ndarray:
"""Inverse a pose.
A pose is a 6-element array. First three elements are the translations of the
post, next three elements are the rotation of the pose in the axis-angle
representation.
Args:
pose: relative pose on top of the base pose.
Returns:
inverted pose.
"""
transformation_matrix = pose_to_matrix(pose)
inverse_translation =
|
np.identity(4, dtype=np.float64)
|
numpy.identity
|
import numpy as np
def zero():
return np.zeros((3, ), dtype=np.float64)
def ones():
return
|
np.ones((3,), dtype=np.float64)
|
numpy.ones
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from unittest import TestCase, main
import numpy as np
import numpy.testing as npt
import pandas as pd
from skbio.sequence import GrammaredSequence
from skbio.util import classproperty
from skbio.util._testing import assert_data_frame_almost_equal
class ExampleGrammaredSequence(GrammaredSequence):
@classproperty
def degenerate_map(cls):
return {"X": set("AB"), "Y": set("BC"), "Z": set("AC")}
@classproperty
def nondegenerate_chars(cls):
return set("ABC")
@classproperty
def default_gap_char(cls):
return '-'
@classproperty
def gap_chars(cls):
return set('-.')
class ExampleMotifsTester(ExampleGrammaredSequence):
@property
def _motifs(self):
# These aren't really motifs, just a way to excercise the code paths
return {
"name1": lambda x, _, __: str(x),
"name2": lambda x, _, __: len(x)
}
class TestGrammaredSequence(TestCase):
def test_default_gap_must_be_in_gap_chars(self):
with self.assertRaisesRegex(
TypeError,
"default_gap_char must be in gap_chars for class "
"GrammaredSequenceInvalidDefaultGap"):
class GrammaredSequenceInvalidDefaultGap(ExampleGrammaredSequence):
@classproperty
def default_gap_char(cls):
return '*'
def test_degenerates_must_expand_to_valid_nondegenerates(self):
with self.assertRaisesRegex(
TypeError,
"degenerate_map must expand only to characters included in "
"nondegenerate_chars for class "
"GrammaredSequenceInvalidDefaultGap"):
class GrammaredSequenceInvalidDefaultGap(ExampleGrammaredSequence):
@classproperty
def degenerate_map(cls):
return {"X": set("B")}
@classproperty
def nondegenerate_chars(cls):
return set("A")
def test_gap_chars_and_degenerates_share(self):
with self.assertRaisesRegex(
TypeError,
"gap_chars and degenerate_chars must not share any characters "
"for class GrammaredSequenceGapInDegenerateMap"):
class GrammaredSequenceGapInDegenerateMap(
ExampleGrammaredSequence):
@classproperty
def degenerate_map(cls):
return {"X": set("AB")}
@classproperty
def nondegenerate_chars(cls):
return set("ABC")
@classproperty
def gap_chars(cls):
return set(".-X")
def test_gap_chars_and_nondegenerates_share(self):
with self.assertRaisesRegex(
TypeError,
("gap_chars and nondegenerate_chars must not share any characters "
"for class GrammaredSequenceGapInNondegenerateMap")):
class GrammaredSequenceGapInNondegenerateMap(
ExampleGrammaredSequence):
@classproperty
def degenerate_map(cls):
return {"X": set("AB")}
@classproperty
def nondegenerate_chars(cls):
return set("ABC")
@classproperty
def gap_chars(cls):
return set(".-A")
def test_degenerates_and_nondegenerates_share(self):
with self.assertRaisesRegex(
TypeError,
("degenerate_chars and nondegenerate_chars must not share any "
"characters for class GrammaredSequenceInvalid")):
class GrammaredSequenceInvalid(ExampleGrammaredSequence):
@classproperty
def degenerate_map(cls):
return {"X": set("AB")}
@classproperty
def nondegenerate_chars(cls):
return set("ABCX")
def test_instantiation_with_no_implementation(self):
class GrammaredSequenceSubclassNoImplementation(GrammaredSequence):
pass
with self.assertRaises(TypeError) as cm:
GrammaredSequenceSubclassNoImplementation()
self.assertIn("abstract class", str(cm.exception))
self.assertIn("nondegenerate_chars", str(cm.exception))
self.assertIn("degenerate_map", str(cm.exception))
def test_init_default_parameters(self):
seq = ExampleGrammaredSequence('.-ABCXYZ')
npt.assert_equal(seq.values, np.array('.-ABCXYZ', dtype='c'))
self.assertEqual(seq.metadata, {})
assert_data_frame_almost_equal(seq.positional_metadata,
pd.DataFrame(index=range(8)))
def test_init_nondefault_parameters(self):
seq = ExampleGrammaredSequence(
'.-ABCXYZ',
metadata={'id': 'foo'},
positional_metadata={'quality': range(8)})
npt.assert_equal(seq.values, np.array('.-ABCXYZ', dtype='c'))
self.assertEqual(seq.metadata, {'id': 'foo'})
assert_data_frame_almost_equal(seq.positional_metadata,
pd.DataFrame({'quality': range(8)}))
def test_init_valid_empty_sequence(self):
# just make sure we can instantiate an empty sequence regardless of
# `validate` and `lowercase` parameters. more extensive tests
# are performed in Sequence base class unit tests
for validate in (True, False):
for lowercase in (True, False):
seq = ExampleGrammaredSequence(
'', validate=validate, lowercase=lowercase)
self.assertEqual(seq, ExampleGrammaredSequence(''))
def test_init_valid_single_character_sequence(self):
for validate in (True, False):
for lowercase in (True, False):
seq = ExampleGrammaredSequence(
'C', validate=validate, lowercase=lowercase)
self.assertEqual(seq, ExampleGrammaredSequence('C'))
def test_init_valid_multiple_character_sequence(self):
for validate in (True, False):
for lowercase in (True, False):
seq = ExampleGrammaredSequence(
'BAACB.XYY-AZ', validate=validate, lowercase=lowercase)
self.assertEqual(seq, ExampleGrammaredSequence('BAACB.XYY-AZ'))
def test_init_validate_parameter_single_character(self):
seq = 'w'
with self.assertRaisesRegex(ValueError, "character.*'w'"):
ExampleGrammaredSequence(seq)
# test that we can instantiate an invalid sequence. we don't guarantee
# anything working beyond instantiation
ExampleGrammaredSequence(seq, validate=False)
def test_init_validate_parameter_multiple_characters(self):
# mix of valid and invalid characters with repeats and lowercased
# alphabet characters
seq = 'CBCBBbawCbbwBXYZ-.x'
with self.assertRaisesRegex(ValueError, "\['a', 'b', 'w', 'x'\]"):
ExampleGrammaredSequence(seq)
ExampleGrammaredSequence(seq, validate=False)
def test_init_lowercase_all_lowercase(self):
s = 'cbcbbbazcbbzbxyz-.x'
with self.assertRaisesRegex(ValueError,
"\['a', 'b', 'c', 'x', 'y', 'z'\]"):
ExampleGrammaredSequence(s)
seq = ExampleGrammaredSequence(s, lowercase=True)
self.assertEqual(seq, ExampleGrammaredSequence('CBCBBBAZCBBZBXYZ-.X'))
def test_init_lowercase_mixed_case(self):
s = 'CBCBBbazCbbzBXYZ-.x'
with self.assertRaisesRegex(ValueError, "\['a', 'b', 'x', 'z'\]"):
ExampleGrammaredSequence(s)
seq = ExampleGrammaredSequence(s, lowercase=True)
self.assertEqual(seq, ExampleGrammaredSequence('CBCBBBAZCBBZBXYZ-.X'))
def test_init_lowercase_no_validation(self):
s = 'car'
with self.assertRaisesRegex(ValueError, "\['a', 'c', 'r'\]"):
ExampleGrammaredSequence(s)
with self.assertRaisesRegex(ValueError, "character.*'R'"):
ExampleGrammaredSequence(s, lowercase=True)
ExampleGrammaredSequence(s, lowercase=True, validate=False)
def test_init_lowercase_byte_ownership(self):
bytes = np.array([97, 98, 97], dtype=np.uint8)
with self.assertRaisesRegex(ValueError, "\['a', 'b'\]"):
ExampleGrammaredSequence(bytes)
seq = ExampleGrammaredSequence(bytes, lowercase=True)
self.assertEqual(seq, ExampleGrammaredSequence('ABA'))
# should not share the same memory
self.assertIsNot(seq._bytes, bytes)
# we should have copied `bytes` before modifying in place to convert to
# upper. make sure `bytes` hasn't been mutated
npt.assert_equal(bytes, np.array([97, 98, 97], dtype=np.uint8))
def test_init_lowercase_invalid_keys(self):
for invalid_key in ((), [], 2):
invalid_type = type(invalid_key)
with self.assertRaisesRegex(TypeError,
"lowercase keyword argument expected "
"a bool or string, but got %s" %
invalid_type):
ExampleGrammaredSequence('ACGTacgt', lowercase=invalid_key)
def test_degenerate_chars(self):
expected = set("XYZ")
self.assertIs(type(ExampleGrammaredSequence.degenerate_chars), set)
self.assertEqual(ExampleGrammaredSequence.degenerate_chars, expected)
ExampleGrammaredSequence.degenerate_chars.add("W")
self.assertEqual(ExampleGrammaredSequence.degenerate_chars, expected)
self.assertEqual(ExampleGrammaredSequence('').degenerate_chars,
expected)
with self.assertRaises(AttributeError):
ExampleGrammaredSequence('').degenerate_chars = set("BAR")
def test_nondegenerate_chars(self):
expected = set("ABC")
self.assertEqual(ExampleGrammaredSequence.nondegenerate_chars,
expected)
ExampleGrammaredSequence.degenerate_chars.add("D")
self.assertEqual(ExampleGrammaredSequence.nondegenerate_chars,
expected)
self.assertEqual(ExampleGrammaredSequence('').nondegenerate_chars,
expected)
with self.assertRaises(AttributeError):
ExampleGrammaredSequence('').nondegenerate_chars = set("BAR")
def test_gap_chars(self):
expected = set(".-")
self.assertIs(type(ExampleGrammaredSequence.gap_chars), set)
self.assertEqual(ExampleGrammaredSequence.gap_chars, expected)
ExampleGrammaredSequence.gap_chars.add("_")
self.assertEqual(ExampleGrammaredSequence.gap_chars, expected)
self.assertEqual(ExampleGrammaredSequence('').gap_chars, expected)
with self.assertRaises(AttributeError):
ExampleGrammaredSequence('').gap_chars = set("_ =")
def test_default_gap_char(self):
self.assertIs(type(ExampleGrammaredSequence.default_gap_char), str)
self.assertEqual(ExampleGrammaredSequence.default_gap_char, '-')
self.assertEqual(ExampleGrammaredSequence('').default_gap_char, '-')
with self.assertRaises(AttributeError):
ExampleGrammaredSequence('').default_gap_char = '.'
def test_alphabet(self):
expected = set("ABC.-XYZ")
self.assertIs(type(ExampleGrammaredSequence.alphabet), set)
self.assertEqual(ExampleGrammaredSequence.alphabet, expected)
ExampleGrammaredSequence.alphabet.add("DEF")
self.assertEqual(ExampleGrammaredSequence.alphabet, expected)
self.assertEqual(ExampleGrammaredSequence('').alphabet, expected)
with self.assertRaises(AttributeError):
ExampleGrammaredSequence('').alphabet = set("ABCDEFG.-WXYZ")
def test_degenerate_map(self):
expected = {"X": set("AB"), "Y": set("BC"), "Z": set("AC")}
self.assertEqual(ExampleGrammaredSequence.degenerate_map, expected)
ExampleGrammaredSequence.degenerate_map['W'] = set("ABC")
ExampleGrammaredSequence.degenerate_map['X'] = set("CA")
self.assertEqual(ExampleGrammaredSequence.degenerate_map, expected)
self.assertEqual(ExampleGrammaredSequence('').degenerate_map, expected)
with self.assertRaises(AttributeError):
ExampleGrammaredSequence('').degenerate_map = {'W': "ABC"}
def test_gaps(self):
self.assertIs(type(ExampleGrammaredSequence("").gaps()), np.ndarray)
self.assertIs(ExampleGrammaredSequence("").gaps().dtype,
np.dtype('bool'))
npt.assert_equal(ExampleGrammaredSequence("ABCXBZYABC").gaps(),
np.zeros(10).astype(bool))
npt.assert_equal(ExampleGrammaredSequence(".-.-.").gaps(),
np.ones(5).astype(bool))
npt.assert_equal(ExampleGrammaredSequence("A.B-C.X-Y.").gaps(),
np.array([0, 1] * 5, dtype=bool))
npt.assert_equal(ExampleGrammaredSequence("AB.AC.XY-").gaps(),
np.array([0, 0, 1] * 3, dtype=bool))
npt.assert_equal(ExampleGrammaredSequence("A.BC.-").gaps(),
np.array([0, 1, 0, 0, 1, 1], dtype=bool))
def test_has_gaps(self):
self.assertIs(type(ExampleGrammaredSequence("").has_gaps()), bool)
self.assertIs(type(ExampleGrammaredSequence("-").has_gaps()), bool)
self.assertFalse(ExampleGrammaredSequence("").has_gaps())
self.assertFalse(ExampleGrammaredSequence("ABCXYZ").has_gaps())
self.assertTrue(ExampleGrammaredSequence("-").has_gaps())
self.assertTrue(ExampleGrammaredSequence("ABCXYZ-").has_gaps())
def test_degenerates(self):
self.assertIs(type(ExampleGrammaredSequence("").degenerates()),
np.ndarray)
self.assertIs(ExampleGrammaredSequence("").degenerates().dtype,
np.dtype('bool'))
npt.assert_equal(ExampleGrammaredSequence("ABCBC-.AB.").degenerates(),
np.zeros(10).astype(bool))
npt.assert_equal(ExampleGrammaredSequence("ZYZYZ").degenerates(),
np.ones(5).astype(bool))
npt.assert_equal(ExampleGrammaredSequence("AX.Y-ZBXCZ").degenerates(),
np.array([0, 1] * 5, dtype=bool))
npt.assert_equal(ExampleGrammaredSequence("ABXACY.-Z").degenerates(),
np.array([0, 0, 1] * 3, dtype=bool))
npt.assert_equal(ExampleGrammaredSequence("AZBCXY").degenerates(),
np.array([0, 1, 0, 0, 1, 1], dtype=bool))
def test_has_degenerates(self):
self.assertIs(type(ExampleGrammaredSequence("").has_degenerates()),
bool)
self.assertIs(type(ExampleGrammaredSequence("X").has_degenerates()),
bool)
self.assertFalse(ExampleGrammaredSequence("").has_degenerates())
self.assertFalse(ExampleGrammaredSequence("A-.BC").has_degenerates())
self.assertTrue(ExampleGrammaredSequence("Z").has_degenerates())
self.assertTrue(ExampleGrammaredSequence("ABC.XYZ-").has_degenerates())
def test_nondegenerates(self):
self.assertIs(type(ExampleGrammaredSequence("").nondegenerates()),
np.ndarray)
self.assertIs(ExampleGrammaredSequence("").nondegenerates().dtype,
np.dtype('bool'))
npt.assert_equal(
ExampleGrammaredSequence("XYZYZ-.XY.").nondegenerates(),
np.zeros(10).astype(bool))
npt.assert_equal(ExampleGrammaredSequence("ABABA").nondegenerates(),
np.ones(5).astype(bool))
npt.assert_equal(
ExampleGrammaredSequence("XA.B-AZCXA").nondegenerates(),
np.array([0, 1] * 5, dtype=bool))
npt.assert_equal(
ExampleGrammaredSequence("XXAZZB.-C").nondegenerates(),
np.array([0, 0, 1] * 3, dtype=bool))
npt.assert_equal(ExampleGrammaredSequence("YB.-AC").nondegenerates(),
np.array([0, 1, 0, 0, 1, 1], dtype=bool))
def test_has_nondegenerates(self):
self.assertIs(type(ExampleGrammaredSequence("").has_nondegenerates()),
bool)
self.assertIs(type(ExampleGrammaredSequence("A").has_nondegenerates()),
bool)
self.assertFalse(ExampleGrammaredSequence("").has_nondegenerates())
self.assertFalse(
ExampleGrammaredSequence("X-.YZ").has_nondegenerates())
self.assertTrue(ExampleGrammaredSequence("C").has_nondegenerates())
self.assertTrue(
ExampleGrammaredSequence(".XYZ-ABC").has_nondegenerates())
def test_degap(self):
kw = {
'metadata': {
'id': 'some_id',
'description': 'some description',
},
}
self.assertEqual(
ExampleGrammaredSequence(
"", positional_metadata={'qual': []}, **kw).degap(),
ExampleGrammaredSequence(
"", positional_metadata={'qual': []}, **kw))
self.assertEqual(
ExampleGrammaredSequence(
"ABCXYZ",
positional_metadata={'qual': np.arange(6)},
**kw).degap(),
ExampleGrammaredSequence(
"ABCXYZ",
positional_metadata={'qual':
|
np.arange(6)
|
numpy.arange
|
import os
from collections import namedtuple
import numpy as np
from .idx import read_idx_file
def _to_onehot(k, a):
vs = []
for x in a:
v = np.zeros((k, ), np.float32)
v[x] = 1
vs.append(v)
return
|
np.array(vs)
|
numpy.array
|
import numpy as np
import matplotlib.pyplot as plt
class LinearSpline:
def __init__(self):
self.ts = []
self.xs = []
def add_entry(self, t, x):
self.ts.append(t)
self.xs.append(x)
def interpolate(self, t):
if t <= self.ts[0]:
return self.xs[0]
elif t >= self.ts[-1]:
return self.xs[-1]
else:
i2 = 0
while t > self.ts[i2]:
i2 += 1
i1 = i2 - 1
k = (t - self.ts[i1])/(self.ts[i2]-self.ts[i1])
return self.xs[i1]*(1-k) + self.xs[i2]*k
class LinearSpline3D:
def __init__(self):
self.xs = LinearSpline()
self.ys = LinearSpline()
self.zs = LinearSpline()
def add_entry(self, t, x, y ,z):
self.xs.add_entry(t, x)
self.ys.add_entry(t, y)
self.zs.add_entry(t, z)
def get_start(self):
return self.xs.ts[0]
def get_duree(self):
return self.xs.ts[-1] - self.xs.ts[0]
def interpolate(self, t):
x = self.xs.interpolate(t)
y = self.ys.interpolate(t)
z = self.zs.interpolate(t)
return x, y, z
class CircularSpline3D:
def __init__(self, start):
self.ts = []
self.duree = []
self.rayon = []
self.normale = []
self.center = []
self.angle = []
self.end = [start]
def add_entry(self, t, center, normale, angle, duree):
self.ts.append(t)
self.duree.append(duree)
rayon = np.array([self.end[-1][i] - center[i] for i in range(3)])
self.rayon.append(rayon)
self.center.append(center)
self.normale.append(normale)
self.angle.append(angle)
self.end.append(rayon * np.cos(angle) + np.cross(normale, rayon) * np.sin(angle) + center)
def get_middle(self, i):
return (self.end[i - 1] + self.end[i]) / 2
def get_end(self, i):
return self.end[i + 1]
def interpolate(self, t):
if t <= self.ts[0]:
print(t)
return self.rayon[0] + self.center[0]
elif t >= self.ts[-1] + self.duree[-1]:
return self.end[-1]
else:
i = 0
while t > self.ts[i] + self.duree[i]:
i += 1
angle = self.angle[i] * (t - self.ts[i]) / self.duree[i]
M = self.rayon[i] * np.cos(angle) + np.cross(self.normale[i], self.rayon[i]) * np.sin(angle) + self.center[i]
return M
if __name__ == "__main__":
# from mpl_toolkits import mplot3d
# ax = plt.axes(projection='3d')
# ts = np.arange(0, 2*np.pi, 0.05)
# xs = []
# ys = []
# zs = []
# nx = []
# ny = []
# nz = []
# for t in ts:
# R = np.array([3, 0, 0])
# N = np.array([0, 0, 1])
# O = np.array([2, 0, 0])
# nx.append(N[0] * t + O[0])
# ny.append(N[1] * t + O[1])
# nz.append(N[2] * t + O[2])
# M = R * math.cos(t) + np.cross(N, R) * math.sin(t) + O
# xs.append(M[0])
# ys.append(M[1])
# zs.append(M[2])
from mpl_toolkits import mplot3d
ax = plt.axes(projection='3d')
ts = np.arange(0., 9, 0.05)
xs = []
ys = []
zs = []
N1 = []
N2 = []
# point = [3, 1, 0]
# spline = CircularSpline3D(point)
# center = [0, 0, 0]
# normale1 = [0, 0, 1]
# spline.add_entry(0, center, normale1, 3, np.pi / 2)
# normale2 = spline.get_middle(0)
# center2 = spline.get_middle(0)
# spline.add_entry(3, center2, normale2, 3, np.pi)
point = [3, 0, 0]
spline = CircularSpline3D(point)
angle = np.pi / 3
center = [0, 0, 0]
normale1 = [0, 0, 1]
spline.add_entry(0, center, normale1, angle, 3)
normale2 = [1, 0, 0]
center2 = [spline.get_end(0)[0], center[1], center[2]]
print(center2)
# spline.add_entry(3, center, normale1, - np.pi, 3)
spline.add_entry(3, center2, normale2, np.pi, 3)
spline.add_entry(6, center, normale1, angle, 3)
ax.scatter3D(center[0], center[1], center[2], c='r')
ax.scatter3D(center2[0], center2[1], center2[2], c='g')
ax.scatter3D(point[0], point[1], point[2], c='b')
for t in ts:
x, y, z = spline.interpolate(t)
xs.append(x)
ys.append(y)
zs.append(z)
N1.append(np.array(normale1) * t + np.array(center))
N2.append(np.array(normale2) * t +
|
np.array(center)
|
numpy.array
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import numpy as np
from .utils import *
from .splitters import Splitter
from sklearn.cluster import AgglomerativeClustering
from sklearn.decomposition import PCA,TruncatedSVD,NMF,FastICA
from scipy.stats import pearsonr
from sklearn.metrics import roc_auc_score
from pandas import DataFrame
from sklearn.neighbors import LocalOutlierFactor
from sklearn.ensemble import IsolationForest
from sklearn.neighbors import VALID_METRICS
import warnings
warnings.filterwarnings("ignore", message='default contamination parameter 0.1 will change in version 0.22 to "auto". This will change the predict method behavior.')
warnings.filterwarnings("ignore", message='Data with input dtype float64 was converted to bool by check_pairwise_arrays.')
warnings.filterwarnings("ignore", message='Invalid value encountered in percentile')
DEBUG = True
all_metrics = ['cityblock','L2','L4','braycurtis',
'canberra','chebyshev','correlation','mahalanobis',
'wL2','wL4']
def sk_check(X_train,X_test,y_test,o_list):
f_f = [LocalOutlierFactor(n_neighbors=5),\
LocalOutlierFactor(n_neighbors=10),\
LocalOutlierFactor(n_neighbors=35),\
IsolationForest(max_samples='auto')]
f_name = ['LOF5','LOF10','LOF35','i-forest']
columns = ['method']+['AUC','MCC','RWS']
n_row = 2
index = np.arange(n_row) # array of numbers for the number of samples
df = DataFrame(columns=columns, index = index)
y_test = np.array(y_test)
exec('T_o ='+(' | '.join(['(y_test=='+str(i)+')' for i in o_list])),locals(),globals())
auc_max = -1
for i in range(3):
lof = f_f[i]
lof.fit(X_test)
outliers = -lof.negative_outlier_factor_
auc_test = roc_auc_score(T_o, outliers)
if auc_test>auc_max:
auc_max = auc_test
df['method'][0] = f_name[i]
df['MCC'][0] = MCC(T_o, outliers)
df['AUC'][0] = auc_max
df['RWS'][0] = rws_score(T_o, outliers)
df['method'][1] = f_name[3]
isof = f_f[3]
isof.fit(X_train)
scores_pred = isof.decision_function(X_test)
outliers = scores_pred.max()-scores_pred
df['MCC'][1] = MCC(T_o, outliers)
df['AUC'][1] = roc_auc_score(T_o, outliers)
df['RWS'][1] = rws_score(T_o, outliers)
return df
def d_lof(X_seen,X_unseen=None,n_neighbors=20,algorithm='auto',metric='minkowski'):
lof = LocalOutlierFactor(n_neighbors = n_neighbors,
algorithm = algorithm,
metric = metric,
novelty=not (X_unseen is None),
n_jobs=-1)
lof.fit(X_seen)
if X_unseen is None:
return -lof.negative_outlier_factor_
else:
return -lof.score_samples(X_unseen)
def grid_run_lof(X_seen,y_seen=None,
X_unseen=None,y_unseen=None,
n_neighbors = [5,20,35],
algorithms = ['ball_tree', 'kd_tree', 'brute'],
metrics=None):
'''
This function is able to deal with three modes:
1- Unsupervised outlier detection
2- Semi-supervised outlier detection
3- Novelty detection
'''
novelty = 0
semisupervised = 0
if (np.all(y_seen==0)) | (y_seen is None):
novelty = 1
X_unseen_p = X_unseen
y_seen = y_unseen
print('Novelty detection mode.')
conds = (X_unseen is not None and y_unseen is not None)
assert conds,'In novelty detection you need to input the unseen data sets.'
elif y_unseen is not None and X_unseen is not None:
semisupervised = 1
# print('Semi-supervised option is not available for novelty detection.')
X_unseen_p = None
print('Semi-supervised outlier detection mode.')
elif X_seen is not None:
X_unseen_p = X_unseen
print('Unsupervised outlier detection mode.')
else:
assert 0, 'The configuration is not recognized!'
aucs,mccs,rwss,conf = [],[],[],[]
for nn in n_neighbors:
for al in algorithms:
if metrics is None:
metrics = VALID_METRICS[al]
for mt in metrics:
try:
outliers = d_lof(X_seen=X_seen,X_unseen=X_unseen_p,n_neighbors=nn,algorithm=al,metric=mt)
conf.append([nn,al,mt])
aucs.append(roc_auc_score(y_seen, outliers))
mccs.append(MCC(y_seen, outliers))
rwss.append(rws_score(y_seen, outliers))
except:
pass
if semisupervised:
nn,al,mt = conf[np.argmax(aucs)]
outliers = d_lof(X_seen=X_unseen,n_neighbors=nn,algorithm=al,metric=mt)
auc = roc_auc_score(y_unseen, outliers)
nn,al,mt = conf[np.argmax(mccs)]
outliers = d_lof(X_seen=X_unseen,n_neighbors=nn,algorithm=al,metric=mt)
mcc = roc_auc_score(y_unseen, outliers)
nn,al,mt = conf[np.argmax(rwss)]
outliers = d_lof(X_seen=X_unseen,n_neighbors=nn,algorithm=al,metric=mt)
rws = roc_auc_score(y_unseen, outliers)
return auc,mcc,rws
else:
return
|
np.array(aucs)
|
numpy.array
|
'''
Plot functions to graphically present simulation results
'''
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from parameters import SAVE_FIGS, ONE_FIGURE
server_names = ['server 1', 'server 2', 'server 3',
'server 4', 'server 5']
color_sequence = ['#1f77b4', '#aec7e8', '#ff7f0e', '#ffbb78', '#2ca02c']
def setup_plots(suptitle):
'''
Basic setup of plots so it can be reused on plot functions
Parameters
----------
suptitle: string
Description of the plot that will appear on the top
Returns
-------
Figure and axis matplotlib structs
'''
fig, ax = plt.subplots(1, 1, figsize=(15, 12))
# fig.suptitle(suptitle)
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label]):
item.set_fontsize(30)
for item in (ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(26)
item.set_fontweight("bold")
font = {'weight' : 'bold'}
matplotlib.rc('font', **font)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
# Provide tick lines across the plot to help viewers trace along
# the axis ticks.
plt.grid(True, 'major', 'y', ls='--', lw=.5, c='k', alpha=.3)
# Remove the tick marks; they are unnecessary with the tick lines we just
# plotted.
plt.tick_params(axis='both', which='both', bottom=True, top=False,
labelbottom=True, left=False, right=False, labelleft=True)
return fig, ax
def create_plot_server(result, path_name, suptitle, xlabel, ylabel, offset):
'''
Generate the plot needed
Parameters
----------
result: 2-d array
Each row is a different timeslot
Returns
-------
Plot
'''
if ONE_FIGURE == False:
fig, ax = setup_plots(suptitle)
y_positions = []
for index, row in enumerate(result):
line = plt.plot(row, lw=5, color=color_sequence[index])
# set the text to start on the y of the last value of the line
y_pos = row[-1]
server_name = server_names[index]
# move based on offset if names overlap on plot
while y_pos in y_positions:
y_pos += offset
y_positions.append(y_pos)
plt.text(len(row) + 5, y_pos, server_name, fontsize=24, color=color_sequence[index])
plt.xlabel(xlabel, fontweight='bold')
plt.ylabel(ylabel, fontweight='bold')
if SAVE_FIGS == True and ONE_FIGURE == False:
plt.savefig("plots/" + path_name + ".png")
else:
plt.show(block=False)
def plot_data_offloading_of_users(all_bytes_offloaded):
'''
Plot the data each user is offloading in each timeslot
Parameters
----------
all_bytes_offloaded: 2-d array
Contains on each row the amount of data each user is offloading. Each row is
a different timeslot
Returns
-------
Plot
'''
result = all_bytes_offloaded
# Each row on the transposed matrix contains the data the user offloads
# in each timeslot. Different rows mean different user.
result = np.transpose(result)
suptitle = "Data each user is offloading in each timeslot"
if ONE_FIGURE == False:
fig, ax = setup_plots(suptitle)
for index, row in enumerate(result):
# display only some of the users on the plot
if index%11 == 0:
line = plt.plot(row, lw=5)
# line = plt.plot(row, lw=5)
plt.xlabel('iterations', fontweight='bold')
plt.ylabel('amount of data (bytes)', fontweight='bold')
path_name = "all_bytes_offloaded"
if SAVE_FIGS == True and ONE_FIGURE == False:
plt.savefig("plots/" + path_name + ".png")
else:
plt.show(block=False)
def plot_user_utility(all_user_utility):
'''
Plot the utility each user has in each timeslot
Parameters
----------
all_user_utility: 2-d array
Contains on each row the utility value each user has. Each row is
a different timeslot
Returns
-------
Plot
'''
result = all_user_utility
# Each row on the transposed matrix contains the data the user offloads
# in each timeslot. Different rows mean different user.
result = np.transpose(result)
suptitle = "Utility each user has in each timeslot"
if ONE_FIGURE == False:
fig, ax = setup_plots(suptitle)
for index, row in enumerate(result):
line = plt.plot(row, lw=5)
plt.xlabel('iterations', fontweight='bold')
plt.ylabel('utility', fontweight='bold')
path_name = "all_user_utility"
if SAVE_FIGS == True and ONE_FIGURE == False:
plt.savefig("plots/" + path_name + ".png")
else:
plt.show(block=False)
def plot_num_of_users_on_each_server(all_server_selected, S, **params):
'''
Plot number of users on each server every timeslot
Parameters
----------
all_server_selected: 2-d array
Contains on each row the server each user has selected. Each row is
a different timeslot
S: int
Number of servers
Returns
-------
Plot
'''
# How many users each server has each timeslot
# result = np.empty((0, S), int)
# for row in all_server_selected:
# # the bincount finds how many times each server has been selected
# result = np.append(result, [np.bincount(row, minlength=S)], axis=0)
result = all_server_selected
# Each row on the transposed matrix contains how many users the server has
# in each timeslot. Different rows mean different servers.
result = np.transpose(result)
offset = np.abs(np.max(result) - np.min(result))*0.03
if offset < 0.005:
offset = 0.005 + np.abs(np.max(result))*0.005;
path_name = "all_server_selected"
suptitle = "Number of users each server has in each timeslot"
xlabel = "timeslots"
ylabel = "num of users"
create_plot_server(result, path_name, suptitle, xlabel, ylabel, offset)
def plot_pricing_of_each_server(all_prices):
'''
Plot pricing of each server on every timeslot
Parameters
----------
all_prices: 2-d array
Contains on each row the price each server has chosen. Each row is
a different timeslot
Returns
-------
Plot
'''
result = all_prices
# Each row on the transposed matrix contains the price the server has
# in each timeslot. Different rows mean different servers.
result = np.transpose(result)
offset = np.abs(np.max(result) - np.min(result))*0.03
if offset < 0.005:
offset = 0.005 + np.abs(np.max(result))*0.005;
path_name = "all_prices"
suptitle = "Price each server has selected in each timeslot"
xlabel = "timeslots"
ylabel = "price"
create_plot_server(result, path_name, suptitle, xlabel, ylabel, offset)
def plot_receiving_data_on_each_server(all_bytes_to_server):
'''
Plot the data each server is receiving in each timeslot
Parameters
----------
all_bytes_to_server: 2-d array
Contains on each row the amount of data each server is receiving. Each row is
a different timeslot
Returns
-------
Plot
'''
result = all_bytes_to_server
# Each row on the transposed matrix contains the data the server receives
# in each timeslot. Different rows mean different servers.
result = np.transpose(result)
offset = np.abs(np.max(result) - np.min(result))*0.03
if offset < 0.005:
offset = 0.005 + np.abs(np.max(result))*0.005;
path_name = "all_bytes_to_server"
suptitle = "Data each server is receiving in each timeslot"
xlabel = "timeslots"
ylabel = "amount of data (bytes)"
create_plot_server(result, path_name, suptitle, xlabel, ylabel, offset)
def plot_server_welfare(all_server_welfare):
'''
Plot the welfare of each server in each timeslot
Parameters
----------
all_server_welfare: 2-d array
Contains on each row the welfare of each server. Each row is
a different timeslot
Returns
-------
Plot
'''
result = all_server_welfare
# Each row on the transposed matrix contains the data the user offloads
# in each timeslot. Different rows mean different user.
result = np.transpose(result)
offset = np.abs(np.max(result) - np.min(result))*0.03
if offset < 0.005:
offset = 0.005 + np.abs(np.max(result))*0.005;
path_name = "all_server_welfare"
suptitle = "Welfare of the server at the end of each timeslot"
xlabel = "timeslots"
ylabel = "welfare"
create_plot_server(result, path_name, suptitle, xlabel, ylabel, offset)
def plot_server_Rs(all_Rs):
'''
Plot the competitiveness score each server has in each timeslot
Parameters
----------
all_Rs: 2-d array
Contains on each row the Rs of each server. Each row is
a different timeslot
Returns
-------
Plot
'''
result = all_Rs
# Each row on the transposed matrix contains the data the user offloads
# in each timeslot. Different rows mean different user.
result = np.transpose(result)
offset = np.abs(np.max(result) - np.min(result))*0.03
if offset < 0.005:
offset = 0.005 + np.abs(
|
np.max(result)
|
numpy.max
|
from pathlib import Path
import cv2
import glob
import imageio
import nibabel
import numpy as np
import os
import pandas as pd
import random
import sys
import tensorflow as tf
from skimage.transform import resize
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.preprocessing.image import save_img
from config import config as cfg
class Dataset:
def __init__(self, path, images_path, modality='flair', extract=True, initial_slice=0, final_slice=155,
train_split=0.8,
test_split=0.1, val_split=0.1):
self.input_path = path
self.images_path = images_path
self.initial_slice = initial_slice
self.final_slice = final_slice
self.modality = modality
self.train_split = train_split
self.test_split = train_split + test_split
self.val_split = train_split + val_split
self.train = None
self.val = None
self.test = None
if extract:
self.patients_HGG = []
self.patients_LGG = []
self._extract()
self._filter()
self._get_sets()
def _get_csvs(self):
print('>> Searching for sets in csv format..')
sets = dict()
try:
train_set = pd.read_csv('train_set.csv')
val_set = pd.read_csv('val_set.csv')
test_set = pd.read_csv('test_set.csv')
sets['train_set'] = train_set
sets['val_set'] = val_set
sets['test_set'] = test_set
print('!! Found them.')
return sets
except FileNotFoundError:
print("!! Sets not found!")
return sets
def join_imgs_path(self, tuples):
for idx, tup in enumerate(tuples):
# print(f'{idx}: {tup}')
img = self.images_path + tup[0]
seg = self.images_path + tup[1]
tuples[idx][0] = img
tuples[idx][1] = seg
return tuples
def _get_sets(self):
sets = self._get_csvs()
train = None
val = None
test = None
if len(sets) == 0:
self._split_data()
self._save_sets_csv()
else:
for set in sets:
if 'train' in set:
tuples = sets['train_set'].values.tolist()
tuples = self.join_imgs_path(tuples)
self.train = tuples
elif 'val' in set:
tuples = sets['val_set'].values.tolist()
tuples = self.join_imgs_path(tuples)
self.val = tuples
else:
tuples = sets['test_set'].values.tolist()
tuples = self.join_imgs_path(tuples)
self.test = tuples
def _split_data(self):
print('>> Splitting data....')
imgs = []
segs = []
for path in Path(self.images_path).rglob('*.png'):
path = str(path)
if 'seg' in path:
# segs.append(str(path[-31:]))
segs.append(str(path))
elif f'{self.modality}' in path:
# imgs.append(str(path[-33:]))
imgs.append(str(path))
tuples = list(zip(imgs, segs))
random.shuffle(tuples)
self.train = tuples[:int(len(tuples) * self.train_split)]
self.val = tuples[int(len(tuples) * self.train_split):int(len(tuples) * self.test_split)]
self.test = tuples[int(len(tuples) * self.val_split):]
print('<< Split done!')
def _save_sets_csv(self):
sets = [self.train, self.val, self.test]
idx = 0
for set in sets:
imgs = []
segs = []
for i in range(len(set)):
img = set[i][0]
seg = set[i][1]
imgs.append(img[-33:])
segs.append(seg[-31:])
data = {'imgs': imgs, 'segs': segs}
df_test = pd.DataFrame(data)
if idx == 0:
df_test.to_csv('train_set.csv', index=False)
print('!! Train set paths saved to > train_set.csv')
elif idx == 1:
df_test.to_csv('val_set.csv', index=False)
print('!! Val set paths saved to > val_set.csv')
else:
df_test.to_csv('test_set.csv', index=False)
print('!! Test set paths saved to > test_set.csv')
idx += 1
def _extract(self):
amount = count(self.input_path, 'nii.gz')
c = 0
for path in Path(self.input_path).rglob('*.nii.gz'):
HGG = False
data = nibabel.load(path)
image_array = data.get_fdata()
# Handling patient folders
if path.parts[-3] == 'HGG':
HGG = True
if path.parts[-2] not in self.patients_HGG:
self.patients_HGG.append(path.parts[-2])
else:
if path.parts[-2] not in self.patients_LGG:
self.patients_LGG.append(path.parts[-2])
if HGG:
output_label = self.images_path + 'HGG/'
index = self.patients_HGG.index(path.parts[-2])
else:
output_label = self.images_path + 'LGG/'
index = self.patients_LGG.index(path.parts[-2])
patient = 'patient{:03d}'.format(index + 1)
for slice in range(self.initial_slice, self.final_slice):
img = image_array[:, :, slice]
img = np.rot90(np.rot90(np.rot90(img)))
img = resize(img, (240, 240, 3), order=0, preserve_range=True, anti_aliasing=False)
if ('seg' in path.parts[-1]):
img[img == 1] = 1
img[img == 2] = 2
img[img == 4] = 3
output_tissue = output_label + 'seg/'
if not os.path.exists(output_tissue):
os.makedirs(output_tissue)
print("Created ouput directory: " + output_tissue)
mask_gray = cv2.cvtColor(img.astype('float32'), cv2.COLOR_RGB2GRAY)
# not black image
if not cv2.countNonZero(mask_gray) == 0:
imageio.imwrite(output_tissue + patient + '_slice{:03d}'.format(slice) + '.png',
img.astype(np.uint8))
if ('flair' in path.parts[-1]):
output_tissue = output_label + 'flair/'
if not os.path.exists(output_tissue):
os.makedirs(output_tissue)
print("Created ouput directory: " + output_tissue)
save_img(output_tissue + patient + '_slice{:03d}'.format(slice) + '.png', img, scale=True)
if ('t1' in path.parts[-1]):
output_tissue = output_label + 't1/'
if not os.path.exists(output_tissue):
os.makedirs(output_tissue)
print("Created ouput directory: " + output_tissue)
save_img(output_tissue + patient + '_slice{:03d}'.format(slice) + '.png', img, scale=True)
if ('t1ce' in path.parts[-1]):
output_tissue = output_label + 't1c/'
if not os.path.exists(output_tissue):
os.makedirs(output_tissue)
print("Created ouput directory: " + output_tissue)
save_img(output_tissue + patient + '_slice{:03d}'.format(slice) + '.png', img, scale=True)
if ('t2' in path.parts[-1]):
output_tissue = output_label + 't2/'
if not os.path.exists(output_tissue):
os.makedirs(output_tissue)
print("Created ouput directory: " + output_tissue)
save_img(output_tissue + patient + '_slice{:03d}'.format(slice) + '.png', img, scale=True)
printProgressBar(c + 1, amount, prefix='Progress:', suffix='Complete', length=50)
c += 1
def _filter(self):
print('>> Filtering black images (without ground truth)..')
labels = ['HGG', 'LGG']
for label in labels:
path_to_tumors = self.images_path + f'{label}/seg'
path_to_modality = self.images_path + f'{label}/flair'
tumors = glob.glob(path_to_tumors + '/*.png')
images = glob.glob(path_to_modality + '/*.png')
tumors_filter = []
images_filters = []
idx = 0
for tumor in tumors:
tumors_filter.append(tumor[-16:])
idx += 1
idx = 0
for img in images:
images_filters.append(img[-16:])
idx += 1
count = 0
idx = 0
for imgs in images_filters:
if not imgs in tumors_filter:
os.remove(images[idx])
count += 1
idx += 1
count2 = 0
for img in images_filters:
if not img in tumors_filter:
count2 += 1
print('<< Done!\n')
def get_data(self):
return self.train, self.val, self.test
class DataGenerator(tf.keras.utils.Sequence):
def __init__(self, tuples, img_size=(128, 128), batch_size=8, classes=4):
# random.shuffle(tuples)
self.input_img_paths = [tuples[i][0] for i in range(len(tuples))]
self.target_img_paths = [tuples[i][1] for i in range(len(tuples))]
self.classes = classes
self.batch_size = batch_size
self.img_size = img_size
def __len__(self):
return len(self.target_img_paths) // self.batch_size
def __getitem__(self, index):
"""Returns tuple (input, target) correspond to batch #idx."""
x = []
y = []
i = index * self.batch_size
batch_input_img_paths = self.input_img_paths[i: i + self.batch_size]
batch_target_img_paths = self.target_img_paths[i: i + self.batch_size]
for j, path in enumerate(batch_input_img_paths):
img = load_img(path, target_size=self.img_size, color_mode="grayscale")
img_arr = np.array(img)
img_arr = img_arr / float(255)
img = np.resize(img_arr, self.img_size)
img = img.reshape(self.img_size[0], self.img_size[1], 1)
maior = np.max(img) if np.max(img) > 0 else 1
img = img / maior
x.append(img)
for j, path in enumerate(batch_target_img_paths):
img = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
img = cv2.resize(img, (self.img_size[0], self.img_size[1]))
y.append(tf.one_hot(img.astype(np.int64), self.classes))
return np.array(x), np.array(y)
# Print iterations progress
def printProgressBar(iteration, total, prefix='', suffix='', decimals=1, length=100, fill='█', printEnd="\r"):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
printEnd - Optional : end character (e.g. "\r", "\r\n") (Str)
"""
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print(f'\r{prefix} |{bar}| {percent}% {suffix}', end=printEnd)
# Print New Line on Complete
if iteration == total:
print()
def count(input, type):
count = 0
for path in Path(input).rglob('*.' + type):
count += 1
return count
np.set_printoptions(threshold=sys.maxsize)
def read_img(p, img_size):
"""
:param p: path of image
:return: image as numpy array
"""
img = cv2.imread(p)
img = cv2.resize(img, img_size)
img_c = np.copy(
|
np.asarray(img)
|
numpy.asarray
|
import pytest
import pyCGM_Single.pycgmStatic as pycgmStatic
import numpy as np
from mock import patch
rounding_precision = 8
class TestPycgmStaticAxis():
"""
This class tests the axis functions in pycgmStatic.py:
staticCalculationHead
pelvisJointCenter
hipJointCenter
hipAxisCenter
kneeJointCenter
ankleJointCenter
footJointCenter
headJC
uncorrect_footaxis
rotaxis_footflat
rotaxis_nonfootflat
findJointC
"""
nan_3d = [np.nan, np.nan, np.nan]
rand_coor = [np.random.randint(0, 10), np.random.randint(0, 10), np.random.randint(0, 10)]
@pytest.mark.parametrize(["head", "expected"], [
# Test from running sample data
([[[244.87227957886893, 326.0240255639856, 1730.4189843948805],
[243.89575702706503, 325.0366593474616, 1730.1515677531293],
[244.89086730509763, 324.80072493605866, 1731.1283433097797]],
[244.89547729492188, 325.0578918457031, 1730.1619873046875]],
0.25992807335420975),
# Test with zeros for all params
([[[0, 0, 0], [0, 0, 0], [0, 0, 0]], [0, 0, 0]],
np.nan),
# Testing when values are added to head[0][0]
([[[-1, 8, 9], [0, 0, 0], [0, 0, 0]], [0, 0, 0]],
1.5707963267948966),
# Testing when values are added to head[0][1]
([[[0, 0, 0], [7, 5, 7], [0, 0, 0]], [0, 0, 0]],
np.nan),
# Testing when values are added to head[0][2]
([[[0, 0, 0], [0, 0, 0], [3, -6, -2]], [0, 0, 0]],
0.0),
# Testing when values are added to head[0]
([[[-1, 8, 9], [7, 5, 7], [3, -6, -2]], [0, 0, 0]],
-1.3521273809209546),
# Testing when values are added to head[1]
([[[0, 0, 0], [0, 0, 0], [0, 0, 0]], [-4, 7, 8]],
0.7853981633974483),
# Testing when values are added to head
([[[-1, 8, 9], [7, 5, 7], [3, -6, -2]], [-4, 7, 8]],
-0.09966865249116204),
# Testing that when head is composed of lists of ints
([[[-1, 8, 9], [7, 5, 7], [3, -6, -2]], [-4, 7, 8]],
-0.09966865249116204),
# Testing that when head is composed of numpy arrays of ints
([np.array([[-1, 8, 9], [7, 5, 7], [3, -6, -2]], dtype='int'), np.array([-4, 7, 8], dtype='int')],
-0.09966865249116204),
# Testing that when head is composed of lists of floats
([[[-1.0, 8.0, 9.0], [7.0, 5.0, 7.0], [3.0, -6.0, -2.0]], [-4.0, 7.0, 8.0]],
-0.09966865249116204),
# Testing that when head is composed of numpy arrays of floats
([np.array([[-1.0, 8.0, 9.0], [7.0, 5.0, 7.0], [3.0, -6.0, -2.0]], dtype='float'), np.array([-4.0, 7.0, 8.0], dtype='float')],
-0.09966865249116204)])
def test_staticCalculationHead(self, head, expected):
"""
This test provides coverage of the staticCalculationHead function in pycgmStatic.py, defined as staticCalculationHead(frame, head)
This test takes 2 parameters:
head: array containing the head axis and head origin
expected: the expected result from calling staticCalculationHead on head
This function first calculates the x, y, z axes of the head by subtracting the given head axes by the head
origin. It then calls headoffCalc on this head axis and a global axis to find the head offset angles.
This test ensures that:
- the head axis and the head origin both have an effect on the final offset angle
- the resulting output is correct when head is composed of lists of ints, numpy arrays of ints, lists of
floats, and numpy arrays of floats.
"""
result = pycgmStatic.staticCalculationHead(None, head)
np.testing.assert_almost_equal(result, expected, rounding_precision)
@pytest.mark.parametrize(["frame", "expected"], [
# Test from running sample data
({'RASI': np.array([357.90066528, 377.69210815, 1034.97253418]),
'LASI': np.array([145.31594849, 405.79052734, 1030.81445312]),
'RPSI': np.array([274.00466919, 205.64402771, 1051.76452637]),
'LPSI': np.array([189.15231323, 214.86122131, 1052.73486328])},
[np.array([251.60830688, 391.74131775, 1032.89349365]),
np.array([[251.74063624, 392.72694721, 1032.78850073], [250.61711554, 391.87232862, 1032.8741063], [251.60295336, 391.84795134, 1033.88777762]]),
np.array([231.57849121, 210.25262451, 1052.24969482])]),
# Test with zeros for all params
({'SACR': np.array([0, 0, 0]), 'RASI': np.array([0, 0, 0]), 'LASI': np.array([0, 0, 0]),
'RPSI': np.array([0, 0, 0]), 'LPSI': np.array([0, 0, 0])},
[np.array([0, 0, 0]), np.array([nan_3d, nan_3d, nan_3d]), np.array([0, 0, 0])]),
# Testing when adding values to frame['RASI'] and frame['LASI']
({'RASI': np.array([-6, 6, 3]), 'LASI': np.array([-7, -9, 1]), 'RPSI': np.array([0, 0, 0]),
'LPSI': np.array([0, 0, 0])},
[np.array([-6.5, -1.5, 2.0]),
np.array([[-7.44458106, -1.48072284, 2.32771179], [-6.56593805, -2.48907071, 1.86812391], [-6.17841206, -1.64617634, 2.93552855]]),
np.array([0, 0, 0])]),
# Testing when adding values to frame['RPSI'] and frame['LPSI']
({'RASI': np.array([0, 0, 0]), 'LASI': np.array([0, 0, 0]), 'RPSI': np.array([1, 0, -4]),
'LPSI': np.array([7, -2, 2])},
[np.array([0, 0, 0]), np.array([nan_3d, nan_3d, nan_3d]), np.array([4., -1.0, -1.0])]),
# Testing when adding values to frame['SACR']
({'SACR': np.array([-4, 8, -5]), 'RASI': np.array([0, 0, 0]), 'LASI': np.array([0, 0, 0]),
'RPSI': np.array([0, 0, 0]), 'LPSI': np.array([0, 0, 0])},
[np.array([0, 0, 0]), np.array([nan_3d, nan_3d, nan_3d]), np.array([-4, 8, -5, ])]),
# Testing when adding values to frame['RASI'], frame['LASI'], frame['RPSI'] and frame['LPSI']
({'RASI': np.array([-6, 6, 3]), 'LASI': np.array([-7, -9, 1]), 'RPSI': np.array([1, 0, -4]),
'LPSI': np.array([7, -2, 2])},
[np.array([-6.5, -1.5, 2.0]),
np.array([[-7.45825845, -1.47407957, 2.28472598], [-6.56593805, -2.48907071, 1.86812391], [-6.22180416, -1.64514566, 2.9494945]]),
np.array([4.0, -1.0, -1.0])]),
# Testing when adding values to frame['SACR'], frame['RASI'] and frame['LASI']
({'SACR': np.array([-4, 8, -5]), 'RASI': np.array([-6, 6, 3]), 'LASI': np.array([-7, -9, 1]),
'RPSI': np.array([0, 0, 0]), 'LPSI': np.array([0, 0, 0])},
[np.array([-6.5, -1.5, 2.0]),
np.array([[-6.72928306, -1.61360872, 2.96670695], [-6.56593805, -2.48907071, 1.86812391], [-5.52887619, -1.59397972, 2.21928602]]),
np.array([-4, 8, -5])]),
# Testing when adding values to frame['SACR'], frame['RPSI'] and frame['LPSI']
({'SACR': np.array([-4, 8, -5]), 'RASI': np.array([0, 0, 0]), 'LASI': np.array([0, 0, 0]),
'RPSI': np.array([1, 0, -4]), 'LPSI': np.array([7, -2, 2])},
[np.array([0, 0, 0]), np.array([nan_3d, nan_3d, nan_3d]), np.array([-4, 8, -5])]),
# Testing when adding values to frame['SACR'], frame['RASI'], frame['LASI'], frame['RPSI'] and frame['LPSI']
({'SACR': np.array([-4, 8, -5]), 'RASI': np.array([-6, 6, 3]), 'LASI': np.array([-7, -9, 1]),
'RPSI': np.array([1, 0, -4]), 'LPSI': np.array([7, -2, 2])},
[np.array([-6.5, -1.5, 2.0]),
np.array([[-6.72928306, -1.61360872, 2.96670695], [-6.56593805, -2.48907071, 1.86812391], [-5.52887619, -1.59397972, 2.21928602]]),
np.array([-4, 8, -5])]),
# Testing that when frame is composed of lists of ints
({'SACR': [-4, 8, -5], 'RASI': np.array([-6, 6, 3]), 'LASI': np.array([-7, -9, 1]), 'RPSI': [1, 0, -4],
'LPSI': [7, -2, 2]},
[np.array([-6.5, -1.5, 2.0]),
np.array([[-6.72928306, -1.61360872, 2.96670695], [-6.56593805, -2.48907071, 1.86812391], [-5.52887619, -1.59397972, 2.21928602]]),
np.array([-4, 8, -5])]),
# Testing that when frame is composed ofe numpy arrays of ints
({'SACR': np.array([-4, 8, -5], dtype='int'), 'RASI': np.array([-6, 6, 3], dtype='int'),
'LASI': np.array([-7, -9, 1], dtype='int'), 'RPSI': np.array([1, 0, -4], dtype='int'),
'LPSI': np.array([7, -2, 2], dtype='int')},
[np.array([-6.5, -1.5, 2.0]),
np.array([[-6.72928306, -1.61360872, 2.96670695], [-6.56593805, -2.48907071, 1.86812391], [-5.52887619, -1.59397972, 2.21928602]]),
np.array([-4, 8, -5])]),
# Testing that when frame is composed of lists of floats
({'SACR': [-4.0, 8.0, -5.0], 'RASI': np.array([-6.0, 6.0, 3.0]), 'LASI': np.array([-7.0, -9.0, 1.0]),
'RPSI': [1.0, 0.0, -4.0], 'LPSI': [7.0, -2.0, 2.0]},
[np.array([-6.5, -1.5, 2.0]),
np.array([[-6.72928306, -1.61360872, 2.96670695], [-6.56593805, -2.48907071, 1.86812391], [-5.52887619, -1.59397972, 2.21928602]]),
np.array([-4, 8, -5])]),
# Testing that when frame is composed of numpy arrays of floats
({'SACR': np.array([-4.0, 8.0, -5.0], dtype='float'), 'RASI': np.array([-6.0, 6.0, 3.0], dtype='float'),
'LASI': np.array([-7.0, -9.0, 1.0], dtype='float'), 'RPSI': np.array([1.0, 0.0, -4.0], dtype='float'),
'LPSI': np.array([7.0, -2.0, 2.0], dtype='float')},
[np.array([-6.5, -1.5, 2.0]),
np.array([[-6.72928306, -1.61360872, 2.96670695], [-6.56593805, -2.48907071, 1.86812391], [-5.52887619, -1.59397972, 2.21928602]]),
np.array([-4, 8, -5])])])
def test_pelvisJointCenter(self, frame, expected):
"""
This test provides coverage of the pelvisJointCenter function in pycgmStatic.py, defined as pelvisJointCenter(frame)
This test takes 2 parameters:
frame: dictionary of marker lists
expected: the expected result from calling pelvisJointCenter on frame
This test is checking to make sure the pelvis joint center and axis are calculated correctly given the input
parameters. The test checks to see that the correct values in expected are updated per each input parameter added:
When values are added to frame['RASI'] and frame['LASI'], expected[0] and expected[1] should be updated
When values are added to frame['RPSI'] and frame['LPSI'], expected[2] should be updated
When values are added to frame['SACR'], expected[2] should be updated, and expected[1] should also be updated
if there are values for frame['RASI'] and frame['LASI']
Values produced from frame['SACR'] takes precedent over frame['RPSI'] and frame['LPSI']
If RPSI and LPSI are given, then the sacrum will be the midpoint of those two markers. If they are not given then the sacrum is already calculated / specified.
The origin of the pelvis is midpoint of the RASI and LASI markers.
The axis of the pelvis is calculated using LASI, RASI, origin, and sacrum in the Gram-Schmidt orthogonalization procedure (ref. Kadaba 1990).
Lastly, it checks that the resulting output is correct when frame is composed of lists of ints, numpy arrays of
ints, lists of floats, and numpy arrays of floats. frame['LASI'] and frame['RASI'] were kept as numpy arrays
every time as list would cause an error in the following line of pycgmStatic.py as lists cannot be divided by floats:
origin = (RASI+LASI)/2.0
"""
result = pycgmStatic.pelvisJointCenter(frame)
np.testing.assert_almost_equal(result[0], expected[0], rounding_precision)
np.testing.assert_almost_equal(result[1], expected[1], rounding_precision)
np.testing.assert_almost_equal(result[2], expected[2], rounding_precision)
@pytest.mark.parametrize(["pel_origin", "pel_x", "pel_y", "pel_z", "vsk", "expected"], [
# Test from running sample data
([251.608306884766, 391.741317749023, 1032.893493652344], [251.740636241119, 392.726947206848, 1032.788500732036], [250.617115540376, 391.872328624646, 1032.874106304030], [251.602953357582, 391.847951338178, 1033.887777624562],
{'MeanLegLength': 940.0, 'R_AsisToTrocanterMeasure': 72.512, 'L_AsisToTrocanterMeasure': 72.512, 'InterAsisDistance': 215.908996582031},
[[182.57097863, 339.43231855, 935.52900126], [308.38050472, 322.80342417, 937.98979061]]),
# Basic test with zeros for all params
([0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0],
{'MeanLegLength': 0.0, 'R_AsisToTrocanterMeasure': 0.0, 'L_AsisToTrocanterMeasure': 0.0, 'InterAsisDistance': 0.0},
[[0, 0, 0], [0, 0, 0]]),
# Testing when values are added to pel_origin
([1, 0, -3], [0, 0, 0], [0, 0, 0], [0, 0, 0],
{'MeanLegLength': 0.0, 'R_AsisToTrocanterMeasure': 0.0, 'L_AsisToTrocanterMeasure': 0.0, 'InterAsisDistance': 0.0},
[[-6.1387721, 0, 18.4163163], [8.53165418, 0, -25.59496255]]),
# Testing when values are added to pel_x
([0, 0, 0], [-5, -3, -6], [0, 0, 0], [0, 0, 0],
{'MeanLegLength': 0.0, 'R_AsisToTrocanterMeasure': 0.0, 'L_AsisToTrocanterMeasure': 0.0, 'InterAsisDistance': 0.0},
[[54.02442793, 32.41465676, 64.82931352], [54.02442793, 32.41465676, 64.82931352]]),
# Testing when values are added to pel_y
([0, 0, 0], [0, 0, 0], [4, -1, 2], [0, 0, 0],
{'MeanLegLength': 0.0, 'R_AsisToTrocanterMeasure': 0.0, 'L_AsisToTrocanterMeasure': 0.0, 'InterAsisDistance': 0.0},
[[29.34085257, -7.33521314, 14.67042628], [-29.34085257, 7.33521314, -14.67042628]]),
# Testing when values are added to pel_z
([0, 0, 0], [0, 0, 0], [0, 0, 0], [3, 8, 2],
{'MeanLegLength': 0.0, 'R_AsisToTrocanterMeasure': 0.0, 'L_AsisToTrocanterMeasure': 0.0, 'InterAsisDistance': 0.0},
[[31.82533363, 84.86755635, 21.21688909], [31.82533363, 84.86755635, 21.21688909]]),
# Test when values are added to pel_x, pel_y, and pel_z
([0, 0, 0], [-5, -3, -6], [4, -1, 2], [3, 8, 2],
{'MeanLegLength': 0.0, 'R_AsisToTrocanterMeasure': 0.0, 'L_AsisToTrocanterMeasure': 0.0, 'InterAsisDistance': 0.0},
[[115.19061413, 109.94699997, 100.71662889], [56.508909 , 124.61742625, 71.37577632]]),
# Test when values are added to pel_origin, pel_x, pel_y, and pel_z
([1, 0, -3], [-5, -3, -6], [4, -1, 2], [3, 8, 2],
{'MeanLegLength': 0.0, 'R_AsisToTrocanterMeasure': 0.0, 'L_AsisToTrocanterMeasure': 0.0, 'InterAsisDistance': 0.0},
[[109.05184203, 109.94699997, 119.13294518], [65.04056318, 124.61742625, 45.78081377]]),
# Test when values are added to pel_origin, pel_x, pel_y, pel_z, and vsk[MeanLegLength]
([1, 0, -3], [-5, -3, -6], [4, -1, 2], [3, 8, 2],
{'MeanLegLength': 15.0, 'R_AsisToTrocanterMeasure': 0.0, 'L_AsisToTrocanterMeasure': 0.0, 'InterAsisDistance': 0.0},
[[100.88576753, 97.85280235, 106.39612748], [61.83654463, 110.86920998, 41.31408931]]),
# Test when values are added to pel_origin, pel_x, pel_y, pel_z, and vsk[R_AsisToTrocanterMeasure]
([1, 0, -3], [-5, -3, -6], [4, -1, 2], [3, 8, 2],
{'MeanLegLength': 0.0, 'R_AsisToTrocanterMeasure': -24.0, 'L_AsisToTrocanterMeasure': 0.0, 'InterAsisDistance': 0.0},
[[109.05184203, 109.94699997, 119.13294518], [-57.09307697, 115.44008189, 14.36512267]]),
# Test when values are added to pel_origin, pel_x, pel_y, pel_z, and vsk[L_AsisToTrocanterMeasure]
([1, 0, -3], [-5, -3, -6], [4, -1, 2], [3, 8, 2],
{'MeanLegLength': 0.0, 'R_AsisToTrocanterMeasure': 0.0, 'L_AsisToTrocanterMeasure': 0-7.0, 'InterAsisDistance': 0.0},
[[73.42953032, 107.27027453, 109.97003528], [65.04056318, 124.61742625, 45.78081377]]),
# Test when values are added to pel_origin, pel_x, pel_y, pel_z, and vsk[InterAsisDistance]
([1, 0, -3], [-5, -3, -6], [4, -1, 2], [3, 8, 2],
{'MeanLegLength': 0.0, 'R_AsisToTrocanterMeasure': 0.0, 'L_AsisToTrocanterMeasure': 0.0, 'InterAsisDistance': 11.0},
[[125.55184203, 104.44699997, 146.63294518], [48.54056318, 130.11742625, 18.28081377]]),
# Test when values are added to pel_origin, pel_x, pel_y, pel_z, and all values in vsk
([1, 0, -3], [-5, -3, -6], [4, -1, 2], [3, 8, 2],
{'MeanLegLength': 15.0, 'R_AsisToTrocanterMeasure': -24.0, 'L_AsisToTrocanterMeasure': -7.0, 'InterAsisDistance': 11.0},
[[81.76345582, 89.67607691, 124.73321758], [-76.79709552, 107.19186562, -17.60160178]]),
# Testing that when pel_origin, pel_x, pel_y, and pel_z are lists of ints and vsk values are ints
([1, 0, -3], [-5, -3, -6], [4, -1, 2], [3, 8, 2],
{'MeanLegLength': 15, 'R_AsisToTrocanterMeasure': -24, 'L_AsisToTrocanterMeasure': -7, 'InterAsisDistance': 11},
[[81.76345582, 89.67607691, 124.73321758], [-76.79709552, 107.19186562, -17.60160178]]),
# Testing that when pel_origin, pel_x, pel_y, and pel_z are numpy arrays of ints and vsk values are ints
(np.array([1, 0, -3], dtype='int'), np.array([-5, -3, -6], dtype='int'), np.array([4, -1, 2], dtype='int'),
np.array([3, 8, 2], dtype='int'),
{'MeanLegLength': 15, 'R_AsisToTrocanterMeasure': -24, 'L_AsisToTrocanterMeasure': -7, 'InterAsisDistance': 11},
[[81.76345582, 89.67607691, 124.73321758], [-76.79709552, 107.19186562, -17.60160178]]),
# Testing that when pel_origin, pel_x, pel_y, and pel_z are lists of floats and vsk values are floats
([1.0, 0.0, -3.0], [-5.0, -3.0, -6.0], [4.0, -1.0, 2.0], [3.0, 8.0, 2.0],
{'MeanLegLength': 15.0, 'R_AsisToTrocanterMeasure': -24.0, 'L_AsisToTrocanterMeasure': -7.0, 'InterAsisDistance': 11.0},
[[81.76345582, 89.67607691, 124.73321758], [-76.79709552, 107.19186562, -17.60160178]]),
# Testing that when pel_origin, pel_x, pel_y, and pel_z are numpy arrays of floats and vsk values are floats
(np.array([1.0, 0.0, -3.0], dtype='float'), np.array([-5.0, -3.0, -6.0], dtype='float'),
np.array([4.0, -1.0, 2.0], dtype='float'), np.array([3.0, 8.0, 2.0], dtype='float'),
{'MeanLegLength': 15.0, 'R_AsisToTrocanterMeasure': -24.0, 'L_AsisToTrocanterMeasure': -7.0, 'InterAsisDistance': 11},
[[81.76345582, 89.67607691, 124.73321758], [-76.79709552, 107.19186562, -17.60160178]])])
def test_hipJointCenter(self, pel_origin, pel_x, pel_y, pel_z, vsk, expected):
"""
This test provides coverage of the hipJointCenter function in pycgmStatic.py, defined as hipJointCenter(frame, pel_origin, pel_x, pel_y, pel_z, vsk)
This test takes 6 parameters:
pel_origin: array of x,y,z position of origin of the pelvis
pel_x: array of x,y,z position of x-axis of the pelvis
pel_y: array of x,y,z position of y-axis of the pelvis
pel_z: array of x,y,z position of z-axis of the pelvis
vsk: dictionary containing subject measurements from a VSK file
expected: the expected result from calling hipJointCenter on pel_origin, pel_x, pel_y, pel_z, and vsk
This test is checking to make sure the hip joint center is calculated correctly given the input parameters.
The test checks to see that the correct values in expected are updated per each input parameter added. Any
parameter that is added should change the value of every value in expected.
The hip joint center axis and origin are calculated using the Hip Joint Center Calculation (ref. Davis_1991).
Lastly, it checks that the resulting output is correct when pel_origin, pel_x, pel_y, and pel_z are composed of
lists of ints, numpy arrays of ints, lists of floats, and numpy arrays of floats and vsk values are ints or floats.
"""
result = pycgmStatic.hipJointCenter(None, pel_origin, pel_x, pel_y, pel_z, vsk)
np.testing.assert_almost_equal(result[0], expected[0], rounding_precision)
np.testing.assert_almost_equal(result[1], expected[1], rounding_precision)
@pytest.mark.parametrize(["l_hip_jc", "r_hip_jc", "pelvis_axis", "expected"], [
# Test from running sample data
([182.57097863, 339.43231855, 935.52900126], [308.38050472, 322.80342417, 937.98979061],
[np.array([251.60830688, 391.74131775, 1032.89349365]), np.array([[251.74063624, 392.72694721, 1032.78850073], [250.61711554, 391.87232862, 1032.8741063], [251.60295336, 391.84795134, 1033.88777762]]), np.array([231.57849121, 210.25262451, 1052.24969482])],
[[245.47574167208043, 331.1178713574418, 936.7593959314677], [[245.60807102843359, 332.10350081526684, 936.6544030111602], [244.48455032769033, 331.2488822330648, 936.7400085831541], [245.47038814489719, 331.22450494659665, 937.7536799036861]]]),
# Basic test with zeros for all params
([0, 0, 0], [0, 0, 0],
[np.array([0, 0, 0]), np.array([[0, 0, 0], [0, 0, 0], [0, 0, 0]]), np.array(rand_coor)],
[[0, 0, 0], [[0, 0, 0], [0, 0, 0], [0, 0, 0]]]),
# Testing when values are added to l_hip_jc
([1, -3, 2], [0, 0, 0],
[np.array([0, 0, 0]), np.array([[0, 0, 0], [0, 0, 0], [0, 0, 0]]), np.array(rand_coor)],
[[0.5, -1.5, 1], [[0.5, -1.5, 1], [0.5, -1.5, 1], [0.5, -1.5, 1]]]),
# Testing when values are added to r_hip_jc
([0, 0, 0], [-8, 1, 4],
[np.array([0, 0, 0]), np.array([[0, 0, 0], [0, 0, 0], [0, 0, 0]]), np.array(rand_coor)],
[[-4, 0.5, 2], [[-4, 0.5, 2], [-4, 0.5, 2], [-4, 0.5, 2]]]),
# Testing when values are added to l_hip_jc and r_hip_jc
([8, -3, 7], [5, -2, -1],
[np.array([0, 0, 0]), np.array([[0, 0, 0], [0, 0, 0], [0, 0, 0]]), np.array(rand_coor)],
[[6.5, -2.5, 3], [[6.5, -2.5, 3], [6.5, -2.5, 3], [6.5, -2.5, 3]]]),
# Testing when values are added to pelvis_axis[0]
([0, 0, 0], [0, 0, 0],
[np.array([1, -3, 6]), np.array([[0, 0, 0], [0, 0, 0], [0, 0, 0]]), np.array(rand_coor)],
[[0, 0, 0], [[-1, 3, -6], [-1, 3, -6], [-1, 3, -6]]]),
# Testing when values are added to pelvis_axis[1]
([0, 0, 0], [0, 0, 0],
[np.array([0, 0, 0]), np.array([[1, 0, 5], [-2, -7, -3], [9, -2, 7]]), np.array(rand_coor)],
[[0, 0, 0], [[1, 0, 5], [-2, -7, -3], [9, -2, 7]]]),
# Testing when values are added to pelvis_axis[0] and pelvis_axis[1]
([0, 0, 0], [0, 0, 0],
[np.array([-3, 0, 5]), np.array([[-4, 5, -2], [0, 0, 0], [8, 5, -1]]), np.array(rand_coor)],
[[0, 0, 0], [[-1, 5, -7], [3, 0, -5], [11, 5, -6]]]),
# Testing when values are added to all params
([-5, 3, 8], [-3, -7, -1],
[np.array([6, 3, 9]), np.array([[5, 4, -2], [0, 0, 0], [7, 2, 3]]), np.array(rand_coor)],
[[-4, -2, 3.5], [[-5, -1, -7.5], [-10, -5, -5.5], [-3, -3, -2.5]]]),
# Testing that when l_hip_jc, r_hip_jc, and pelvis_axis are composed of lists of ints
([-5, 3, 8], [-3, -7, -1],
[[6, 3, 9], [[5, 4, -2], [0, 0, 0], [7, 2, 3]], rand_coor],
[[-4, -2, 3.5], [[-5, -1, -7.5], [-10, -5, -5.5], [-3, -3, -2.5]]]),
# Testing that when l_hip_jc, r_hip_jc, and pelvis_axis are composed of numpy arrays of ints
(np.array([-5, 3, 8], dtype='int'), np.array([-3, -7, -1], dtype='int'),
[np.array([6, 3, 9], dtype='int'), np.array([[5, 4, -2], [0, 0, 0], [7, 2, 3]], dtype='int'), rand_coor],
[[-4, -2, 3.5], [[-5, -1, -7.5], [-10, -5, -5.5], [-3, -3, -2.5]]]),
# Testing that when l_hip_jc, r_hip_jc, and pelvis_axis are composed of lists of floats
([-5.0, 3.0, 8.0], [-3.0, -7.0, -1.0],
[[6.0, 3.0, 9.0], [[5.0, 4.0, -2.0], [0.0, 0.0, 0.0], [7.0, 2.0, 3.0]], rand_coor],
[[-4, -2, 3.5], [[-5, -1, -7.5], [-10, -5, -5.5], [-3, -3, -2.5]]]),
# Testing that when l_hip_jc, r_hip_jc, and pelvis_axis are composed of numpy arrays of floats
(np.array([-5.0, 3.0, 8.0], dtype='float'), np.array([-3.0, -7.0, -1.0], dtype='float'),
[np.array([6.0, 3.0, 9.0], dtype='float'),
np.array([[5.0, 4.0, -2.0], [0.0, 0.0, 0.0], [7.0, 2.0, 3.0]], dtype='float'), rand_coor],
[[-4, -2, 3.5], [[-5, -1, -7.5], [-10, -5, -5.5], [-3, -3, -2.5]]])])
def test_hipAxisCenter(self, l_hip_jc, r_hip_jc, pelvis_axis, expected):
"""
This test provides coverage of the hipAxisCenter function in pycgmStatic.py, defined as hipAxisCenter(l_hip_jc, r_hip_jc, pelvis_axis)
This test takes 4 parameters:
l_hip_jc: array of left hip joint center x,y,z position
r_hip_jc: array of right hip joint center x,y,z position
pelvis_axis: array of pelvis origin and axis
expected: the expected result from calling hipAxisCenter on l_hip_jc, r_hip_jc, and pelvis_axis
This test is checking to make sure the hip axis center is calculated correctly given the input parameters.
The test checks to see that the correct values in expected are updated per each input parameter added:
When values are added to l_hip_jc or r_hip_jc, every value in expected should be updated
When values are added to pelvis_axis, expected[1] should be updated
The hip axis center is calculated using the midpoint of the right and left hip joint centers.
Then, the given pelvis_axis variable is converted into x,y,z axis format.
The pelvis axis is then translated to the shared hip center by calculating the sum of:
pelvis_axis axis component + hip_axis_center axis component
Lastly, it checks that the resulting output is correct when l_hip_jc, r_hip_jc, and pelvis_axis are composed of
lists of ints, numpy arrays of ints, lists of floats, and numpy arrays of floats.
"""
result = pycgmStatic.hipAxisCenter(l_hip_jc, r_hip_jc, pelvis_axis)
np.testing.assert_almost_equal(result[0], expected[0], rounding_precision)
np.testing.assert_almost_equal(result[1], expected[1], rounding_precision)
@pytest.mark.parametrize(["frame", "hip_JC", "vsk", "mockReturnVal", "expectedMockArgs", "expected"], [
# Test from running sample data
({'RTHI': np.array([426.50338745, 262.65310669, 673.66247559]),
'LTHI': np.array([51.93867874, 320.01849365, 723.03186035]),
'RKNE': np.array([416.98687744, 266.22558594, 524.04089355]),
'LKNE': np.array([84.62355804, 286.69122314, 529.39819336])},
[[182.57097863, 339.43231855, 935.52900126], [308.38050472, 322.80342417, 937.98979061]],
{'RightKneeWidth': 105.0, 'LeftKneeWidth': 105.0},
[np.array([364.17774614, 292.17051722, 515.19181496]), np.array([143.55478579, 279.90370346, 524.78408753])],
[[[426.50338745, 262.65310669, 673.66247559], [308.38050472, 322.80342417, 937.98979061], [416.98687744, 266.22558594, 524.04089355], 59.5],
[[51.93867874, 320.01849365, 723.03186035], [182.57097863, 339.43231855, 935.52900126], [84.62355804, 286.69122314, 529.39819336], 59.5]],
[np.array([364.17774614, 292.17051722, 515.19181496]),
np.array([143.55478579, 279.90370346, 524.78408753]),
np.array([[[364.61959153, 293.06758353, 515.18513093], [363.29019771, 292.60656648, 515.04309095], [364.04724541, 292.24216264, 516.18067112]],
[[143.65611282, 280.88685896, 524.63197541], [142.56434499, 280.01777943, 524.86163553], [143.64837987, 280.04650381, 525.76940383]]])]),
# Test with zeros for all params
({'RTHI': np.array([0, 0, 0]), 'LTHI': np.array([0, 0, 0]), 'RKNE': np.array([0, 0, 0]), 'LKNE': np.array([0, 0, 0])},
[[0, 0, 0], [0, 0, 0]],
{'RightKneeWidth': 0.0, 'LeftKneeWidth': 0.0},
[np.array([0, 0, 0]), np.array([0, 0, 0])],
[[[0, 0, 0], [0, 0, 0], [0, 0, 0], 7.0], [[0, 0, 0], [0, 0, 0], [0, 0, 0], 7.0]],
[np.array([0, 0, 0]), np.array([0, 0, 0]),
np.array([[nan_3d, nan_3d, nan_3d],
[nan_3d, nan_3d, nan_3d]])]),
# Testing when values are added to frame
({'RTHI': np.array([1, 2, 4]), 'LTHI': np.array([-1, 0, 8]), 'RKNE': np.array([8, -4, 5]), 'LKNE': np.array([8, -8, 5])},
[[0, 0, 0], [0, 0, 0]],
{'RightKneeWidth': 0.0, 'LeftKneeWidth': 0.0},
[np.array([0, 0, 0]), np.array([0, 0, 0])],
[[[1, 2, 4], [0, 0, 0], [8, -4, 5], 7.0], [[-1, 0, 8], [0, 0, 0], [8, -8, 5], 7.0]],
[np.array([0, 0, 0]), np.array([0, 0, 0]),
np.array([[nan_3d, nan_3d, nan_3d],
[nan_3d, nan_3d, nan_3d]])]),
# Testing when values are added to hip_JC
({'RTHI': np.array([0, 0, 0]), 'LTHI': np.array([0, 0, 0]), 'RKNE': np.array([0, 0, 0]), 'LKNE': np.array([0, 0, 0])},
[[-8, 8, -2], [1, -9, 2]],
{'RightKneeWidth': 0.0, 'LeftKneeWidth': 0.0},
[np.array([0, 0, 0]), np.array([0, 0, 0])],
[[[0, 0, 0], [1, -9, 2], [0, 0, 0], 7.0], [[0, 0, 0], [-8, 8, -2], [0, 0, 0], 7.0]],
[np.array([0, 0, 0]), np.array([0, 0, 0]),
np.array([[nan_3d, nan_3d, [0.10783277, -0.97049496, 0.21566555]],
[nan_3d, nan_3d, [-0.69631062, 0.69631062, -0.17407766]]])]),
# Testing when values are added to vsk
({'RTHI': np.array([0, 0, 0]), 'LTHI': np.array([0, 0, 0]), 'RKNE': np.array([0, 0, 0]), 'LKNE': np.array([0, 0, 0])},
[[0, 0, 0], [0, 0, 0]],
{'RightKneeWidth': 9.0, 'LeftKneeWidth': -6.0},
[np.array([0, 0, 0]), np.array([0, 0, 0])],
[[[0, 0, 0], [0, 0, 0], [0, 0, 0], 11.5], [[0, 0, 0], [0, 0, 0], [0, 0, 0], 4.0]],
[np.array([0, 0, 0]), np.array([0, 0, 0]),
np.array([[nan_3d, nan_3d, nan_3d],
[nan_3d, nan_3d, nan_3d]])]),
# Testing when values are added to mockReturnVal
({'RTHI': np.array([0, 0, 0]), 'LTHI': np.array([0, 0, 0]), 'RKNE': np.array([0, 0, 0]), 'LKNE': np.array([0, 0, 0])},
[[0, 0, 0], [0, 0, 0]],
{'RightKneeWidth': 0.0, 'LeftKneeWidth': 0.0},
[np.array([-5, -5, -9]), np.array([3, -6, -5])],
[[[0, 0, 0], [0, 0, 0], [0, 0, 0], 7.0], [[0, 0, 0], [0, 0, 0], [0, 0, 0], 7.0]],
[np.array([-5, -5, -9]), np.array([3, -6, -5]),
np.array([[nan_3d, nan_3d, [-4.56314797, -4.56314797, -8.21366635]],
[nan_3d, nan_3d, [2.64143142, -5.28286283, -4.4023857]]])]),
# Testing when values are added to frame and hip_JC
({'RTHI': np.array([1, 2, 4]), 'LTHI': np.array([-1, 0, 8]), 'RKNE': np.array([8, -4, 5]), 'LKNE': np.array([8, -8, 5])},
[[-8, 8, -2], [1, -9, 2]],
{'RightKneeWidth': 0.0, 'LeftKneeWidth': 0.0},
[np.array([0, 0, 0]), np.array([0, 0, 0])],
[[[1, 2, 4], [1, -9, 2], [8, -4, 5], 7.0], [[-1, 0, 8], [-8, 8, -2], [8, -8, 5], 7.0]],
[np.array([0, 0, 0]), np.array([0, 0, 0]),
np.array([[[-0.0512465, -0.22206816, -0.97368348], [0.99284736, 0.09394289, -0.07368069], [0.10783277, -0.97049496, 0.21566555]],
[[-0.68318699, -0.71734633, -0.1366374 ], [-0.22001604, 0.02378552, 0.97520623], [-0.69631062, 0.69631062, -0.17407766]]])]),
# Testing when values are added to frame, hip_JC, and vsk
({'RTHI': np.array([1, 2, 4]), 'LTHI': np.array([-1, 0, 8]), 'RKNE': np.array([8, -4, 5]),
'LKNE': np.array([8, -8, 5])},
[[-8, 8, -2], [1, -9, 2]],
{'RightKneeWidth': 9.0, 'LeftKneeWidth': -6.0},
[np.array([0, 0, 0]), np.array([0, 0, 0])],
[[[1, 2, 4], [1, -9, 2], [8, -4, 5], 11.5], [[-1, 0, 8], [-8, 8, -2], [8, -8, 5], 4.0]],
[np.array([0, 0, 0]), np.array([0, 0, 0]),
np.array([[[-0.0512465, -0.22206816, -0.97368348], [0.99284736 ,0.09394289, -0.07368069], [0.10783277, -0.97049496, 0.21566555]],
[[-0.68318699, -0.71734633, -0.1366374 ], [-0.22001604, 0.02378552, 0.97520623], [-0.69631062, 0.69631062, -0.17407766]]])]),
# Testing when values are added to frame, hip_JC, vsk, and mockReturnVal
({'RTHI': np.array([1, 2, 4]), 'LTHI': np.array([-1, 0, 8]), 'RKNE': np.array([8, -4, 5]), 'LKNE': np.array([8, -8, 5])},
[[-8, 8, -2], [1, -9, 2]],
{'RightKneeWidth': 9.0, 'LeftKneeWidth': -6.0},
[np.array([-5, -5, -9]), np.array([3, -6, -5])],
[[[1, 2, 4], [1, -9, 2], [8, -4, 5], 11.5], [[-1, 0, 8], [-8, 8, -2], [8, -8, 5], 4.0]],
[np.array([-5, -5, -9]), np.array([3, -6, -5]),
np.array([[[-5.65539698, -5.75053525, -8.91543265], [-4.39803462, -5.58669523, -9.54168847], [-4.54382845, -5.30411437, -8.16368549]],
[[2.57620655, -6.14126448, -5.89467506], [2.32975119, -6.6154814, -4.58533245], [2.39076635, -5.22461171, -4.83384537]]])]),
# Testing that when hip_JC is composed of lists of ints and vsk values are ints
({'RTHI': np.array([1, 2, 4]), 'LTHI': np.array([-1, 0, 8]), 'RKNE': np.array([8, -4, 5]), 'LKNE': np.array([8, -8, 5])},
[[-8, 8, -2], [1, -9, 2]],
{'RightKneeWidth': 9, 'LeftKneeWidth': -6},
[np.array([-5, -5, -9]), np.array([3, -6, -5])],
[[[1, 2, 4], [1, -9, 2], [8, -4, 5], 11.5], [[-1, 0, 8], [-8, 8, -2], [8, -8, 5], 4.0]],
[np.array([-5, -5, -9]), np.array([3, -6, -5]),
np.array([[[-5.65539698, -5.75053525, -8.91543265], [-4.39803462, -5.58669523, -9.54168847], [-4.54382845, -5.30411437, -8.16368549]],
[[2.57620655, -6.14126448, -5.89467506], [2.32975119, -6.6154814, -4.58533245], [2.39076635, -5.22461171, -4.83384537]]])]),
# Testing that when hip_JC is composed of numpy arrays of ints and vsk values are ints
({'RTHI': np.array([1, 2, 4], dtype='int'), 'LTHI': np.array([-1, 0, 8], dtype='int'),
'RKNE': np.array([8, -4, 5], dtype='int'), 'LKNE': np.array([8, -8, 5], dtype='int')},
np.array([[-8, 8, -2], [1, -9, 2]], dtype='int'),
{'RightKneeWidth': 9, 'LeftKneeWidth': -6},
[np.array([-5, -5, -9]), np.array([3, -6, -5])],
[[[1, 2, 4], [1, -9, 2], [8, -4, 5], 11.5], [[-1, 0, 8], [-8, 8, -2], [8, -8, 5], 4.0]],
[np.array([-5, -5, -9]), np.array([3, -6, -5]),
np.array([[[-5.65539698, -5.75053525, -8.91543265], [-4.39803462, -5.58669523, -9.54168847], [-4.54382845, -5.30411437, -8.16368549]],
[[2.57620655, -6.14126448, -5.89467506], [2.32975119, -6.6154814, -4.58533245], [2.39076635, -5.22461171, -4.83384537]]])]),
# Testing that when hip_JC is composed of lists of floats and vsk values are floats
({'RTHI': np.array([1, 2, 4]), 'LTHI': np.array([-1, 0, 8]), 'RKNE': np.array([8, -4, 5]), 'LKNE': np.array([8, -8, 5])},
[[-8.0, 8.0, -2.0], [1.0, -9.0, 2.0]],
{'RightKneeWidth': 9.0, 'LeftKneeWidth': -6.0},
[np.array([-5, -5, -9]), np.array([3, -6, -5])],
[[[1, 2, 4], [1, -9, 2], [8, -4, 5], 11.5], [[-1, 0, 8], [-8, 8, -2], [8, -8, 5], 4.0]],
[np.array([-5, -5, -9]), np.array([3, -6, -5]),
np.array([[[-5.65539698, -5.75053525, -8.91543265], [-4.39803462, -5.58669523, -9.54168847], [-4.54382845, -5.30411437, -8.16368549]],
[[2.57620655, -6.14126448, -5.89467506], [2.32975119, -6.6154814, -4.58533245], [2.39076635, -5.22461171, -4.83384537]]])]),
# Testing that when hip_JC is composed of numpy arrays of floats and vsk values are floats
({'RTHI': np.array([1.0, 2.0, 4.0], dtype='float'), 'LTHI': np.array([-1.0, 0.0, 8.0], dtype='float'),
'RKNE': np.array([8.0, -4.0, 5.0], dtype='float'), 'LKNE': np.array([8.0, -8.0, 5.0], dtype='float')},
np.array([[-8.0, 8.0, -2.0], [1.0, -9.0, 2.0]], dtype='int'),
{'RightKneeWidth': 9.0, 'LeftKneeWidth': -6.0},
[np.array([-5, -5, -9]), np.array([3, -6, -5])],
[[[1, 2, 4], [1, -9, 2], [8, -4, 5], 11.5], [[-1, 0, 8], [-8, 8, -2], [8, -8, 5], 4.0]],
[np.array([-5, -5, -9]), np.array([3, -6, -5]),
np.array([[[-5.65539698, -5.75053525, -8.91543265], [-4.39803462, -5.58669523, -9.54168847], [-4.54382845, -5.30411437, -8.16368549]],
[[2.57620655, -6.14126448, -5.89467506], [2.32975119, -6.6154814, -4.58533245], [2.39076635, -5.22461171, -4.83384537]]])])])
def test_kneeJointCenter(self, frame, hip_JC, vsk, mockReturnVal, expectedMockArgs, expected):
"""
This test provides coverage of the kneeJointCenter function in pycgmStatic.py, defined as kneeJointCenter(frame, hip_JC, delta, vsk)
This test takes 6 parameters:
frame: dictionary of marker lists
hip_JC: array of hip_JC containing the x,y,z axes marker positions of the hip joint center
vsk: dictionary containing subject measurements from a VSK file
mockReturnVal: the value to be returned by the mock for findJointC
expectedMockArgs: the expected arguments used to call the mocked function, findJointC
expected: the expected result from calling kneeJointCenter on frame, hip_JC, vsk, and mockReturnVal
This test is checking to make sure the knee joint center and axis are calculated correctly given the input
parameters. This tests mocks findJointC to make sure the correct parameters are being passed into it given the
parameters passed into kneeJointCenter, and to also ensure that kneeJointCenter returns the correct value considering
the return value of findJointC, mockReturnVal.
For each direction (L or R) D, the D knee joint center is calculated using DTHI, D hip joint center, and
DKNE in the Rodriques' rotation formula. The knee width for each knee is applied after the rotation in the formula as well.
Each knee joint center and the RKNE / LKNE markers are used in the Knee Axis Calculation
(ref. Clinical Gait Analysis hand book, Baker2013) calculation formula.
Lastly, it checks that the resulting output is correct when hip_JC is composed of lists of ints, numpy arrays of
ints, lists of floats, and numpy arrays of floats and vsk values are ints and floats. The values in frame were
kept as numpy arrays as lists would cause an error in the following lines of pycgmStatic.py as lists cannot
be subtracted
by each other:
thi_kne_R = RTHI-RKNE
thi_kne_L = LTHI-LKNE
"""
with patch.object(pycgmStatic, 'findJointC', side_effect=mockReturnVal) as mock_findJointC:
result = pycgmStatic.kneeJointCenter(frame, hip_JC, None, vsk)
# Asserting that there were only 2 calls to findJointC
np.testing.assert_equal(mock_findJointC.call_count, 2)
# Asserting that the correct params were sent in the 1st (right) call to findJointC
np.testing.assert_almost_equal(expectedMockArgs[0][0], mock_findJointC.call_args_list[0][0][0], rounding_precision)
np.testing.assert_almost_equal(expectedMockArgs[0][1], mock_findJointC.call_args_list[0][0][1], rounding_precision)
np.testing.assert_almost_equal(expectedMockArgs[0][2], mock_findJointC.call_args_list[0][0][2], rounding_precision)
np.testing.assert_almost_equal(expectedMockArgs[0][3], mock_findJointC.call_args_list[0][0][3], rounding_precision)
# Asserting that the correct params were sent in the 2nd (left) call to findJointC
np.testing.assert_almost_equal(expectedMockArgs[1][0], mock_findJointC.call_args_list[1][0][0], rounding_precision)
np.testing.assert_almost_equal(expectedMockArgs[1][1], mock_findJointC.call_args_list[1][0][1], rounding_precision)
np.testing.assert_almost_equal(expectedMockArgs[1][2], mock_findJointC.call_args_list[1][0][2], rounding_precision)
np.testing.assert_almost_equal(expectedMockArgs[1][3], mock_findJointC.call_args_list[1][0][3], rounding_precision)
# Asserting that findShoulderJC returned the correct result given the return value given by mocked findJointC
np.testing.assert_almost_equal(result[0], expected[0], rounding_precision)
np.testing.assert_almost_equal(result[1], expected[1], rounding_precision)
np.testing.assert_almost_equal(result[2], expected[2], rounding_precision)
@pytest.mark.parametrize(["frame", "knee_JC", "vsk", "mockReturnVal", "expectedMockArgs", "expected"], [
# Test from running sample data
({'RTIB': np.array([433.97537231, 211.93408203, 273.3008728 ]), 'LTIB': np.array([50.04016495, 235.90718079, 364.32226562]),
'RANK': np.array([422.77005005, 217.74053955, 92.86152649]), 'LANK': np.array([58.57380676, 208.54806519, 86.16953278])},
[np.array([364.17774614, 292.17051722, 515.19181496]), np.array([143.55478579, 279.90370346, 524.78408753]),
np.array([[rand_coor, rand_coor, rand_coor], [rand_coor, rand_coor, rand_coor]])],
{'RightAnkleWidth': 70.0, 'LeftAnkleWidth': 70.0, 'RightTibialTorsion': 0.0, 'LeftTibialTorsion': 0.0},
[np.array([393.76181608, 247.67829633, 87.73775041]), np.array([98.74901939, 219.46930221, 80.6306816])],
[[[433.97537231, 211.93408203, 273.3008728 ], [364.17774614, 292.17051722, 515.19181496], [422.77005005, 217.74053955, 92.86152649], 42.0],
[[50.04016495, 235.90718079, 364.32226562], [143.55478579, 279.90370346, 524.78408753], [58.57380676, 208.54806519, 86.16953278], 42.0]],
[np.array([393.76181608, 247.67829633, 87.73775041]), np.array([98.74901939, 219.46930221, 80.6306816]),
[[np.array([394.48171575, 248.37201348, 87.715368]),
np.array([393.07114384, 248.39110006, 87.61575574]),
np.array([393.69314056, 247.78157916, 88.73002876])],
[np.array([98.47494966, 220.42553803, 80.52821783]),
np.array([97.79246671, 219.20927275, 80.76255901]),
np.array([98.84848169, 219.60345781, 81.61663775])]]]),
# Test with zeros for all params
({'RTIB': np.array([0, 0, 0]), 'LTIB': np.array([0, 0, 0]), 'RANK': np.array([0, 0, 0]), 'LANK': np.array([0, 0, 0])},
[np.array([0, 0, 0]), np.array([0, 0, 0]),
np.array([[rand_coor, rand_coor, rand_coor], [rand_coor, rand_coor, rand_coor]])],
{'RightAnkleWidth': 0.0, 'LeftAnkleWidth': 0.0, 'RightTibialTorsion': 0.0, 'LeftTibialTorsion': 0.0},
[np.array([0, 0, 0]), np.array([0, 0, 0])],
[[[0, 0, 0], [0, 0, 0], [0, 0, 0], 7.0],
[[0, 0, 0], [0, 0, 0], [0, 0, 0], 7.0]],
[np.array([0, 0, 0]), np.array([0, 0, 0]),
[[np.array(nan_3d), np.array(nan_3d), np.array(nan_3d)],
[np.array(nan_3d), np.array(nan_3d), np.array(nan_3d)]]]),
# Testing when values are added to frame
({'RTIB': np.array([-9, 6, -9]), 'LTIB': np.array([0, 2, -1]), 'RANK': np.array([1, 0, -5]),
'LANK': np.array([2, -4, -5])},
[np.array([0, 0, 0]), np.array([0, 0, 0]),
np.array([[rand_coor, rand_coor, rand_coor], [rand_coor, rand_coor, rand_coor]])],
{'RightAnkleWidth': 0.0, 'LeftAnkleWidth': 0.0, 'RightTibialTorsion': 0.0, 'LeftTibialTorsion': 0.0},
[np.array([0, 0, 0]), np.array([0, 0, 0])],
[[[-9, 6, -9], [0, 0, 0], [1, 0, -5], 7.0],
[[0, 2, -1], [0, 0, 0], [2, -4, -5], 7.0]],
[np.array([0, 0, 0]), np.array([0, 0, 0]),
[[np.array(nan_3d), np.array(nan_3d), np.array(nan_3d)],
[np.array(nan_3d), np.array(nan_3d), np.array(nan_3d)]]]),
# Testing when values are added to knee_JC
({'RTIB': np.array([0, 0, 0]), 'LTIB': np.array([0, 0, 0]), 'RANK': np.array([0, 0, 0]), 'LANK': np.array([0, 0, 0])},
[np.array([-7, 1, 2]), np.array([9, -8, 9]),
np.array([[rand_coor, rand_coor, rand_coor], [rand_coor, rand_coor, rand_coor]])],
{'RightAnkleWidth': 0.0, 'LeftAnkleWidth': 0.0, 'RightTibialTorsion': 0.0, 'LeftTibialTorsion': 0.0},
[np.array([0, 0, 0]), np.array([0, 0, 0])],
[[[0, 0, 0], [-7, 1, 2], [0, 0, 0], 7.0],
[[0, 0, 0], [9, -8, 9], [0, 0, 0], 7.0]],
[np.array([0, 0, 0]), np.array([0, 0, 0]),
[[np.array(nan_3d), np.array(nan_3d), np.array([-0.95257934, 0.13608276, 0.27216553])],
[np.array(nan_3d), np.array(nan_3d), np.array([0.59867109, -0.53215208, 0.59867109])]]]),
# Testing when values are added to vsk
({'RTIB': np.array([0, 0, 0]), 'LTIB': np.array([0, 0, 0]), 'RANK': np.array([0, 0, 0]), 'LANK': np.array([0, 0, 0])},
[np.array([0, 0, 0]), np.array([0, 0, 0]),
np.array([[rand_coor, rand_coor, rand_coor], [rand_coor, rand_coor, rand_coor]])],
{'RightAnkleWidth': -38.0, 'LeftAnkleWidth': 18.0, 'RightTibialTorsion': 29.0, 'LeftTibialTorsion': -13.0},
[np.array([0, 0, 0]), np.array([0, 0, 0])],
[[[0, 0, 0], [0, 0, 0], [0, 0, 0], -12.0],
[[0, 0, 0], [0, 0, 0], [0, 0, 0], 16.0]],
[np.array([0, 0, 0]), np.array([0, 0, 0]),
[[np.array(nan_3d), np.array(nan_3d), np.array(nan_3d)],
[np.array(nan_3d), np.array(nan_3d), np.array(nan_3d)]]]),
# Testing when values are added to mockReturnVal
({'RTIB': np.array([0, 0, 0]), 'LTIB': np.array([0, 0, 0]), 'RANK': np.array([0, 0, 0]), 'LANK': np.array([0, 0, 0])},
[np.array([0, 0, 0]), np.array([0, 0, 0]),
np.array([[rand_coor, rand_coor, rand_coor], [rand_coor, rand_coor, rand_coor]])],
{'RightAnkleWidth': 0.0, 'LeftAnkleWidth': 0.0, 'RightTibialTorsion': 0.0, 'LeftTibialTorsion': 0.0},
[np.array([2, -5, 4]), np.array([8, -3, 1])],
[[[0, 0, 0], [0, 0, 0], [0, 0, 0], 7.0],
[[0, 0, 0], [0, 0, 0], [0, 0, 0], 7.0]],
[np.array([2, -5, 4]), np.array([8, -3, 1]),
[[np.array(nan_3d), np.array(nan_3d), np.array([1.7018576 , -4.25464401, 3.40371521])],
[np.array(nan_3d), np.array(nan_3d), np.array([7.07001889, -2.65125708, 0.88375236])]]]),
# Testing when values are added to frame and knee_JC
({'RTIB': np.array([-9, 6, -9]), 'LTIB': np.array([0, 2, -1]), 'RANK': np.array([1, 0, -5]), 'LANK': np.array([2, -4, -5])},
[np.array([-7, 1, 2]), np.array([9, -8, 9]),
np.array([[rand_coor, rand_coor, rand_coor], [rand_coor, rand_coor, rand_coor]])],
{'RightAnkleWidth': 0.0, 'LeftAnkleWidth': 0.0, 'RightTibialTorsion': 0.0, 'LeftTibialTorsion': 0.0},
[np.array([0, 0, 0]), np.array([0, 0, 0])],
[[[-9, 6, -9], [-7, 1, 2], [1, 0, -5], 7.0],
[[0, 2, -1], [9, -8, 9], [2, -4, -5], 7.0]],
[np.array([0, 0, 0]), np.array([0, 0, 0]),
[[np.array([-0.26726124, -0.80178373, -0.53452248]), np.array([0.14547859, -0.58191437, 0.80013226]), np.array([-0.95257934, 0.13608276, 0.27216553])],
[np.array([0.79317435, 0.49803971, -0.35047239]), np.array([-0.11165737, 0.68466825, 0.72025136]), np.array([0.59867109, -0.53215208, 0.59867109])]]]),
# Testing when values are added to frame, knee_JC, and vsk
({'RTIB': np.array([-9, 6, -9]), 'LTIB': np.array([0, 2, -1]), 'RANK': np.array([1, 0, -5]), 'LANK': np.array([2, -4, -5])},
[np.array([-7, 1, 2]), np.array([9, -8, 9]),
np.array([[rand_coor, rand_coor, rand_coor], [rand_coor, rand_coor, rand_coor]])],
{'RightAnkleWidth': -38.0, 'LeftAnkleWidth': 18.0, 'RightTibialTorsion': 29.0, 'LeftTibialTorsion': -13.0},
[np.array([0, 0, 0]), np.array([0, 0, 0])],
[[[-9, 6, -9], [-7, 1, 2], [1, 0, -5], -12.0],
[[0, 2, -1], [9, -8, 9], [2, -4, -5], 16.0]],
[np.array([0, 0, 0]), np.array([0, 0, 0]),
[[np.array([-0.30428137, -0.41913816, -0.85541572]), np.array([-0.00233238, -0.89766624, 0.4406698]), np.array([-0.95257934, 0.13608276, 0.27216553])],
[np.array([0.7477279, 0.63929183, -0.1794685]), np.array([-0.287221, 0.55508569, 0.7806305]), np.array([0.59867109, -0.53215208, 0.59867109])]]]),
# Testing when values are added to frame, knee_JC, vsk and mockReturnVal
({'RTIB': np.array([-9, 6, -9]), 'LTIB': np.array([0, 2, -1]), 'RANK': np.array([1, 0, -5]), 'LANK': np.array([2, -4, -5])},
[np.array([-7, 1, 2]), np.array([9, -8, 9]),
np.array([[rand_coor, rand_coor, rand_coor], [rand_coor, rand_coor, rand_coor]])],
{'RightAnkleWidth': -38.0, 'LeftAnkleWidth': 18.0, 'RightTibialTorsion': 29.0, 'LeftTibialTorsion': -13.0},
[np.array([2, -5, 4]), np.array([8, -3, 1])],
[[[-9, 6, -9], [-7, 1, 2], [1, 0, -5], -12.0],
[[0, 2, -1], [9, -8, 9], [2, -4, -5], 16.0]],
[np.array([2, -5, 4]), np.array([8, -3, 1]),
[[np.array([1.48891678, -5.83482493, 3.7953997 ]), np.array([1.73661348, -5.07447603, 4.96181124]), np.array([1.18181818, -4.45454545, 3.81818182])],
[np.array([8.87317138, -2.54514024, 1.17514093]), np.array([7.52412119, -2.28213872, 1.50814815]), np.array([8.10540926, -3.52704628, 1.84327404])]]]),
# Testing that when knee_JC is composed of lists of ints and vsk values are ints
({'RTIB': np.array([-9, 6, -9]), 'LTIB': np.array([0, 2, -1]), 'RANK': np.array([1, 0, -5]), 'LANK': np.array([2, -4, -5])},
[[-7, 1, 2], [9, -8, 9],
np.array([[rand_coor, rand_coor, rand_coor], [rand_coor, rand_coor, rand_coor]])],
{'RightAnkleWidth': -38, 'LeftAnkleWidth': 18, 'RightTibialTorsion': 29, 'LeftTibialTorsion': -13},
[np.array([2, -5, 4]), np.array([8, -3, 1])],
[[[-9, 6, -9], [-7, 1, 2], [1, 0, -5], -12.0],
[[0, 2, -1], [9, -8, 9], [2, -4, -5], 16.0]],
[np.array([2, -5, 4]), np.array([8, -3, 1]),
[[np.array([1.48891678, -5.83482493, 3.7953997]), np.array([1.73661348, -5.07447603, 4.96181124]), np.array([1.18181818, -4.45454545, 3.81818182])],
[np.array([8.87317138, -2.54514024, 1.17514093]), np.array([7.52412119, -2.28213872, 1.50814815]), np.array([8.10540926, -3.52704628, 1.84327404])]]]),
# Testing that when knee_JC is composed of numpy arrays of ints and vsk values are ints
({'RTIB': np.array([-9, 6, -9]), 'LTIB': np.array([0, 2, -1]), 'RANK': np.array([1, 0, -5]), 'LANK': np.array([2, -4, -5])},
[np.array([-7, 1, 2], dtype='int'), np.array([9, -8, 9], dtype='int'),
np.array([[rand_coor, rand_coor, rand_coor], [rand_coor, rand_coor, rand_coor]])],
{'RightAnkleWidth': -38, 'LeftAnkleWidth': 18, 'RightTibialTorsion': 29, 'LeftTibialTorsion': -13},
[np.array([2, -5, 4]), np.array([8, -3, 1])],
[[[-9, 6, -9], [-7, 1, 2], [1, 0, -5], -12.0],
[[0, 2, -1], [9, -8, 9], [2, -4, -5], 16.0]],
[np.array([2, -5, 4]), np.array([8, -3, 1]),
[[np.array([1.48891678, -5.83482493, 3.7953997]), np.array([1.73661348, -5.07447603, 4.96181124]), np.array([1.18181818, -4.45454545, 3.81818182])],
[np.array([8.87317138, -2.54514024, 1.17514093]), np.array([7.52412119, -2.28213872, 1.50814815]), np.array([8.10540926, -3.52704628, 1.84327404])]]]),
# Testing that when knee_JC is composed of lists of floats and vsk values are floats
({'RTIB': np.array([-9.0, 6.0, -9.0]), 'LTIB': np.array([0.0, 2.0, -1.0]), 'RANK': np.array([1.0, 0.0, -5.0]), 'LANK': np.array([2.0, -4.0, -5.0])},
[[-7.0, 1.0, 2.0], [9.0, -8.0, 9.0],
np.array([[rand_coor, rand_coor, rand_coor], [rand_coor, rand_coor, rand_coor]])],
{'RightAnkleWidth': -38.0, 'LeftAnkleWidth': 18.0, 'RightTibialTorsion': 29.0, 'LeftTibialTorsion': -13.0},
[np.array([2, -5, 4]), np.array([8, -3, 1])],
[[[-9, 6, -9], [-7, 1, 2], [1, 0, -5], -12.0],
[[0, 2, -1], [9, -8, 9], [2, -4, -5], 16.0]],
[np.array([2, -5, 4]), np.array([8, -3, 1]),
[[np.array([1.48891678, -5.83482493, 3.7953997]), np.array([1.73661348, -5.07447603, 4.96181124]), np.array([1.18181818, -4.45454545, 3.81818182])],
[np.array([8.87317138, -2.54514024, 1.17514093]), np.array([7.52412119, -2.28213872, 1.50814815]), np.array([8.10540926, -3.52704628, 1.84327404])]]]),
# Testing that when knee_JC is composed of numpy arrays of floats and vsk values are floats
({'RTIB': np.array([-9.0, 6.0, -9.0]), 'LTIB': np.array([0.0, 2.0, -1.0]), 'RANK': np.array([1.0, 0.0, -5.0]), 'LANK': np.array([2.0, -4.0, -5.0])},
[np.array([-7.0, 1.0, 2.0], dtype='float'), np.array([9.0, -8.0, 9.0], dtype='float'),
np.array([[rand_coor, rand_coor, rand_coor], [rand_coor, rand_coor, rand_coor]], dtype='float')],
{'RightAnkleWidth': -38.0, 'LeftAnkleWidth': 18.0, 'RightTibialTorsion': 29.0, 'LeftTibialTorsion': -13.0},
[np.array([2, -5, 4]), np.array([8, -3, 1])],
[[[-9, 6, -9], [-7, 1, 2], [1, 0, -5], -12.0],
[[0, 2, -1], [9, -8, 9], [2, -4, -5], 16.0]],
[np.array([2, -5, 4]), np.array([8, -3, 1]),
[[np.array([1.48891678, -5.83482493, 3.7953997]), np.array([1.73661348, -5.07447603, 4.96181124]), np.array([1.18181818, -4.45454545, 3.81818182])],
[np.array([8.87317138, -2.54514024, 1.17514093]), np.array([7.52412119, -2.28213872, 1.50814815]), np.array([8.10540926, -3.52704628, 1.84327404])]]])])
def test_ankleJointCenter(self, frame, knee_JC, vsk, mockReturnVal, expectedMockArgs, expected):
"""
This test provides coverage of the ankleJointCenter function in pycgmStatic.py, defined as ankleJointCenter(frame, knee_JC, delta, vsk)
This test takes 6 parameters:
frame: dictionary of marker lists
knee_JC: array of knee_JC each x,y,z position.
vsk: dictionary containing subject measurements from a VSK file
mockReturnVal: the value to be returned by the mock for findJointC
expectedMockArgs: the expected arguments used to call the mocked function, findJointC
expected: the expected result from calling ankleJointCenter on frame, knee_JC, vsk, and mockReturnVal
This test is checking to make sure the ankle joint center and axis are calculated correctly given the input
parameters. This tests mocks findJointC to make sure the correct parameters are being passed into it given the
parameters passed into ankleJointCenter, and to also ensure that ankleJointCenter returns the correct value considering
the return value of findJointC, mockReturnVal.
The ankle joint center left and right origin are defined by using the ANK, Tib, and KJC marker positions in the Rodriques' rotation formula.
The ankle joint center axis is calculated using the Ankle Axis Calculation(ref. Clinical Gait Analysis hand book, Baker2013).
Lastly, it checks that the resulting output is correct when knee_JC is composed of lists of ints, numpy arrays
of ints, lists of floats, and numpy arrays of floats and vsk values are ints and floats. The values in frame
were kept as numpy arrays as lists would cause an error in the following lines of pycgmStatic.py as lists cannot
be subtracted by each other:
tib_ank_R = tib_R-ank_R
tib_ank_L = tib_L-ank_L
"""
with patch.object(pycgmStatic, 'findJointC', side_effect=mockReturnVal) as mock_findJointC:
result = pycgmStatic.ankleJointCenter(frame, knee_JC, None, vsk)
# Asserting that there were only 2 calls to findJointC
np.testing.assert_equal(mock_findJointC.call_count, 2)
# Asserting that the correct params were sent in the 1st (right) call to findJointC
np.testing.assert_almost_equal(expectedMockArgs[0][0], mock_findJointC.call_args_list[0][0][0], rounding_precision)
np.testing.assert_almost_equal(expectedMockArgs[0][1], mock_findJointC.call_args_list[0][0][1], rounding_precision)
np.testing.assert_almost_equal(expectedMockArgs[0][2], mock_findJointC.call_args_list[0][0][2], rounding_precision)
np.testing.assert_almost_equal(expectedMockArgs[0][3], mock_findJointC.call_args_list[0][0][3], rounding_precision)
# Asserting that the correct params were sent in the 2nd (left) call to findJointC
np.testing.assert_almost_equal(expectedMockArgs[1][0], mock_findJointC.call_args_list[1][0][0], rounding_precision)
np.testing.assert_almost_equal(expectedMockArgs[1][1], mock_findJointC.call_args_list[1][0][1], rounding_precision)
np.testing.assert_almost_equal(expectedMockArgs[1][2], mock_findJointC.call_args_list[1][0][2], rounding_precision)
np.testing.assert_almost_equal(expectedMockArgs[1][3], mock_findJointC.call_args_list[1][0][3], rounding_precision)
# Asserting that findShoulderJC returned the correct result given the return value given by mocked findJointC
np.testing.assert_almost_equal(result[0], expected[0], rounding_precision)
np.testing.assert_almost_equal(result[1], expected[1], rounding_precision)
np.testing.assert_almost_equal(result[2], expected[2], rounding_precision)
@pytest.mark.parametrize(["frame", "static_info", "ankle_JC", "expected"], [
# Test from running sample data
({'RTOE': np.array([442.81997681, 381.62280273, 42.66047668]), 'LTOE': np.array([39.43652725, 382.44522095, 41.78911591])},
[[0.03482194, 0.14879424, np.random.randint(0, 10)], [0.01139704, 0.02142806, np.random.randint(0, 10)]],
[np.array([393.76181608, 247.67829633, 87.73775041]),
np.array([98.74901939, 219.46930221, 80.6306816]),
[[np.array(nan_3d), np.array([393.07114384, 248.39110006, 87.61575574]), np.array(nan_3d)],
[np.array(nan_3d), np.array([97.79246671, 219.20927275, 80.76255901]), np.array(nan_3d)]]],
[np.array([442.81997681, 381.62280273, 42.66047668]),
np.array([39.43652725, 382.44522095, 41.78911591]),
np.array([[[442.8881541, 381.76460597, 43.64802096],
[441.89515447, 382.00308979, 42.66971773],
[442.44573691, 380.70886969, 42.81754643]],
[[39.50785213, 382.67891581, 42.75880631],
[38.49231839, 382.14765966, 41.93027863],
[39.75805858, 381.51956227, 41.98854914]]])]),
# Test with zeros for all params
({'RTOE': np.array([0, 0, 0]), 'LTOE': np.array([0, 0, 0])},
[[0, 0, np.random.randint(0, 10)], [0, 0, np.random.randint(0, 10)]],
[np.array([0, 0, 0]), np.array([0, 0, 0]),
[[np.array(nan_3d), np.array([0, 0, 0]), np.array(nan_3d)],
[np.array(nan_3d), np.array([0, 0, 0]), np.array(nan_3d)]]],
[np.array([0, 0, 0]), np.array([0, 0, 0]),
np.array([[nan_3d, nan_3d, nan_3d],
[nan_3d, nan_3d, nan_3d]])]),
# Testing with values added to frame
({'RTOE': np.array([-1, -1, -5]), 'LTOE': np.array([-5, -6, 1])},
[[0, 0, np.random.randint(0, 10)], [0, 0, np.random.randint(0, 10)]],
[np.array([0, 0, 0]), np.array([0, 0, 0]),
[[np.array(nan_3d), np.array([0, 0, 0]), np.array(nan_3d)],
[np.array(nan_3d), np.array([0, 0, 0]), np.array(nan_3d)]]],
[np.array([-1, -1, -5]), np.array([-5, -6, 1]),
np.array([[nan_3d, nan_3d, nan_3d],
[nan_3d, nan_3d, nan_3d]])]),
# Testing with values added to static_info
({'RTOE': np.array([0, 0, 0]), 'LTOE': np.array([0, 0, 0])},
[[-6, 7, np.random.randint(0, 10)], [2, -9, np.random.randint(0, 10)]],
[np.array([0, 0, 0]), np.array([0, 0, 0]),
[[np.array(nan_3d), np.array([0, 0, 0]), np.array(nan_3d)],
[np.array(nan_3d), np.array([0, 0, 0]), np.array(nan_3d)]]],
[np.array([0, 0, 0]), np.array([0, 0, 0]),
np.array([[nan_3d, nan_3d, nan_3d],
[nan_3d, nan_3d, nan_3d]])]),
# Testing with values added to ankle_JC
({'RTOE': np.array([0, 0, 0]), 'LTOE': np.array([0, 0, 0])},
[[0, 0, np.random.randint(0, 10)], [0, 0, np.random.randint(0, 10)]],
[np.array([6, 0, 3]), np.array([1, 4, -3]),
[[np.array(nan_3d), np.array([-2, 8, 5]), np.array(nan_3d)],
[np.array(nan_3d), np.array([1, -6, 8]), np.array(nan_3d)]]],
[np.array([0, 0, 0]), np.array([0, 0, 0]),
np.array([[[0.3713906763541037, 0.5570860145311556, -0.7427813527082074], [-0.24913643956121992, 0.8304547985373997, 0.49827287912243984], [0.8944271909999159, 0.0, 0.4472135954999579]],
[[-0.6855829496241487, 0.538672317561831, 0.4897021068743917], [0.701080937355391, 0.3073231506215415, 0.6434578466138523], [0.19611613513818404, 0.7844645405527362, -0.5883484054145521]]])]),
# Testing with values added to frame and static_info
({'RTOE': np.array([-1, -1, -5]), 'LTOE': np.array([-5, -6, 1])},
[[-6, 7, np.random.randint(0, 10)], [2, -9, np.random.randint(0, 10)]],
[np.array([0, 0, 0]), np.array([0, 0, 0]),
[[np.array(nan_3d), np.array([0, 0, 0]), np.array(nan_3d)],
[np.array(nan_3d), np.array([0, 0, 0]), np.array(nan_3d)]]],
[np.array([-1, -1, -5]), np.array([-5, -6, 1]),
np.array([[nan_3d, nan_3d, nan_3d],
[nan_3d, nan_3d, nan_3d]])]),
# Testing with values added to frame and ankle_JC
({'RTOE': np.array([-1, -1, -5]), 'LTOE': np.array([-5, -6, 1])},
[[0, 0, np.random.randint(0, 10)], [0, 0, np.random.randint(0, 10)]],
[np.array([6, 0, 3]), np.array([1, 4, -3]),
[[np.array(nan_3d), np.array([-2, 8, 5]), np.array(nan_3d)],
[np.array(nan_3d), np.array([1, -6, 8]), np.array(nan_3d)]]],
[np.array([-1, -1, -5]), np.array([-5, -6, 1]),
np.array([[[-0.4764529245456802, -0.34134400184779123, -5.540435690791556], [-1.544126730072802, -0.25340750990010874, -4.617213172448785], [-0.3443899318928142, -0.9063414188418306, -4.250731350734645]],
[[-5.617369411832039, -5.417908840272649, 1.5291737815703186], [-4.3819280753253675, -6.057228881914318, 1.7840356822261547], [-4.513335736607712, -5.188892894346187, 0.6755571577384749]]])]),
# Testing with values added to static_info and ankle_JC
({'RTOE': np.array([0, 0, 0]), 'LTOE': np.array([0, 0, 0])},
[[-6, 7, np.random.randint(0, 10)], [2, -9, np.random.randint(0, 10)]],
[np.array([6, 0, 3]), np.array([1, 4, -3]),
[[np.array(nan_3d), np.array([-2, 8, 5]), np.array(nan_3d)],
[np.array(nan_3d), np.array([1, -6, 8]), np.array(nan_3d)]]],
[np.array([0, 0, 0]), np.array([0, 0, 0]),
np.array([[[0.8676189717605698, 0.41998838044559317, -0.2661711481957037], [-0.35944921047092726, 0.8996435491853136, 0.2478663944569317], [0.3435601620283683, -0.11937857722363693, 0.9315123028533232]],
[[0.5438323231671144, -0.8140929502604927, -0.20371321168453085], [0.12764145145799288, 0.32016712879535714, -0.9387228928222822], [0.829429963377473, 0.48450560159311296, 0.27802923924749284]]])]),
# Testing with values added to frame, static_info and ankle_JC
({'RTOE': np.array([-1, -1, -5]), 'LTOE': np.array([-5, -6, 1])},
[[-6, 7, np.random.randint(0, 10)], [2, -9, np.random.randint(0, 10)]],
[np.array([6, 0, 3]), np.array([1, 4, -3]),
[[np.array(nan_3d), np.array([-2, 8, 5]), np.array(nan_3d)],
[np.array(nan_3d), np.array([1, -6, 8]), np.array(nan_3d)]]],
[np.array([-1, -1, -5]), np.array([-5, -6, 1]),
np.array([[[-0.17456964188738444, -0.44190534702217665, -4.915176169482615], [-1.564451151846412, -0.1819624820720035, -4.889503319319258], [-1.0077214691178664, -1.139086223544123, -4.009749828914483]],
[[-4.638059331793927, -6.864633064377841, 0.6515626072260268], [-4.6226610672854616, -5.522323332954951, 0.2066272429566376], [-4.147583269429562, -5.844325128086398, 1.4991503297587707]]])]),
# Testing that when frame, static_info and ankle_JC are composed of lists of ints
({'RTOE': [-1, -1, -5], 'LTOE': [-5, -6, 1]},
[[-6, 7, np.random.randint(0, 10)], [2, -9, np.random.randint(0, 10)]],
[[6, 0, 3], [1, 4, -3],
[[nan_3d, [-2, 8, 5], nan_3d],
[nan_3d, [1, -6, 8], nan_3d]]],
[np.array([-1, -1, -5]), np.array([-5, -6, 1]),
np.array([[[-0.17456964188738444, -0.44190534702217665, -4.915176169482615], [-1.564451151846412, -0.1819624820720035, -4.889503319319258], [-1.0077214691178664, -1.139086223544123, -4.009749828914483]],
[[-4.638059331793927, -6.864633064377841, 0.6515626072260268], [-4.6226610672854616, -5.522323332954951, 0.2066272429566376], [-4.147583269429562, -5.844325128086398, 1.4991503297587707]]])]),
# Testing that when frame, static_info and ankle_JC are composed of numpy arrays of ints
({'RTOE': np.array([-1, -1, -5], dtype='int'), 'LTOE': np.array([-5, -6, 1], dtype='int')},
[np.array([-6, 7, np.random.randint(0, 10)], dtype='int'), np.array([2, -9, np.random.randint(0, 10)], dtype='int')],
[np.array([6, 0, 3], dtype='int'), np.array([1, 4, -3], dtype='int'),
[[np.array(nan_3d), np.array([-2, 8, 5], dtype='int'), np.array(nan_3d)],
[np.array(nan_3d), np.array([1, -6, 8], dtype='int'), np.array(nan_3d)]]],
[np.array([-1, -1, -5]), np.array([-5, -6, 1]),
np.array([[[-0.17456964188738444, -0.44190534702217665, -4.915176169482615], [-1.564451151846412, -0.1819624820720035, -4.889503319319258], [-1.0077214691178664, -1.139086223544123, -4.009749828914483]],
[[-4.638059331793927, -6.864633064377841, 0.6515626072260268], [-4.6226610672854616, -5.522323332954951, 0.2066272429566376], [-4.147583269429562, -5.844325128086398, 1.4991503297587707]]])]),
# Testing that when frame, static_info and ankle_JC are composed of lists of floats
({'RTOE': [-1.0, -1.0, -5.0], 'LTOE': [-5.0, -6.0, 1.0]},
[[-6.0, 7.0, np.random.randint(0, 10)], [2.0, -9.0, np.random.randint(0, 10)]],
[[6.0, 0.0, 3.0], [1.0, 4.0, -3.0],
[[nan_3d, [-2.0, 8.0, 5.0], nan_3d],
[nan_3d, [1.0, -6.0, 8.0], nan_3d]]],
[np.array([-1, -1, -5]), np.array([-5, -6, 1]),
np.array([[[-0.17456964188738444, -0.44190534702217665, -4.915176169482615], [-1.564451151846412, -0.1819624820720035, -4.889503319319258], [-1.0077214691178664, -1.139086223544123, -4.009749828914483]],
[[-4.638059331793927, -6.864633064377841, 0.6515626072260268], [-4.6226610672854616, -5.522323332954951, 0.2066272429566376], [-4.147583269429562, -5.844325128086398, 1.4991503297587707]]])]),
# Testing that when frame, static_info and ankle_JC are composed of numpy arrays of floats
({'RTOE': np.array([-1.0, -1.0, -5.0], dtype='float'), 'LTOE': np.array([-5.0, -6.0, 1.0], dtype='float')},
[np.array([-6.0, 7.0, np.random.randint(0, 10)], dtype='float'),
np.array([2.0, -9.0, np.random.randint(0, 10)], dtype='float')],
[np.array([6.0, 0.0, 3.0], dtype='float'), np.array([1.0, 4.0, -3.0], dtype='float'),
[[np.array(nan_3d), np.array([-2.0, 8.0, 5.0], dtype='float'), np.array(nan_3d)],
[np.array(nan_3d), np.array([1.0, -6.0, 8.0], dtype='float'), np.array(nan_3d)]]],
[np.array([-1, -1, -5]), np.array([-5, -6, 1]),
np.array([[[-0.17456964188738444, -0.44190534702217665, -4.915176169482615], [-1.564451151846412, -0.1819624820720035, -4.889503319319258], [-1.0077214691178664, -1.139086223544123, -4.009749828914483]],
[[-4.638059331793927, -6.864633064377841, 0.6515626072260268], [-4.6226610672854616, -5.522323332954951, 0.2066272429566376], [-4.147583269429562, -5.844325128086398, 1.4991503297587707]]])])])
def test_footJointCenter(self, frame, static_info, ankle_JC, expected):
"""
This test provides coverage of the footJointCenter function in pycgmStatic.py, defined as footJointCenter(frame, static_info, ankle_JC, knee_JC, delta)
This test takes 4 parameters:
frame: dictionaries of marker lists
static_info: array containing offset angles
ankle_JC: array of ankle_JC each x,y,z position
expected: the expected result from calling footJointCenter on frame, static_info, and ankle_JC
The incorrect foot joint axes for both feet are calculated using the following calculations:
z-axis = ankle joint center - TOE marker
y-flex = ankle joint center flexion - ankle joint center
x-axis = y-flex \cross z-axis
y-axis = z-axis cross x-axis
Calculate the foot joint axis by rotating incorrect foot joint axes about offset angle.
This test is checking to make sure the foot joint center and axis are calculated correctly given the input
parameters. The test checks to see that the correct values in expected are updated per each input parameter added:
When values are added to frame, expected[0] and expected[1] should be updated
When values are added to vsk, expected[2] should be updated as long as there are values for frame and ankle_JC
When values are added to ankle_JC, expected[2] should be updated
"""
result = pycgmStatic.footJointCenter(frame, static_info, ankle_JC, None, None)
np.testing.assert_almost_equal(result[0], expected[0], rounding_precision)
np.testing.assert_almost_equal(result[1], expected[1], rounding_precision)
np.testing.assert_almost_equal(result[2], expected[2], rounding_precision)
@pytest.mark.parametrize(["frame", "expected"], [
# Test from running sample data
({'LFHD': np.array([184.55158997, 409.68713379, 1721.34289551]), 'RFHD': np.array([325.82983398, 402.55450439, 1722.49816895]), 'LBHD': np.array([197.8621521 , 251.28889465, 1696.90197754]), 'RBHD': np.array([304.39898682, 242.91339111, 1694.97497559])},
[[[255.21590218, 407.10741939, 1722.0817318], [254.19105385, 406.14680918, 1721.91767712], [255.18370553, 405.95974655, 1722.90744993]], [255.19071197509766, 406.1208190917969, 1721.9205322265625]]),
# Basic test with a variance of 1 in the x and y dimensions of the markers
({'LFHD': np.array([1, 1, 0]), 'RFHD': np.array([0, 1, 0]), 'LBHD': np.array([1, 0, 0]), 'RBHD': np.array([0, 0, 0])},
[[[0.5, 2, 0], [1.5, 1, 0], [0.5, 1, -1]], [0.5, 1, 0]]),
# Setting the markers so there's no variance in the x-dimension
({'LFHD': np.array([0, 1, 0]), 'RFHD': np.array([0, 1, 0]), 'LBHD': np.array([0, 0, 0]), 'RBHD': np.array([0, 0, 0])},
[[nan_3d, nan_3d, nan_3d], [0, 1, 0]]),
# Setting the markers so there's no variance in the y-dimension
({'LFHD': np.array([1, 0, 0]), 'RFHD': np.array([0, 0, 0]), 'LBHD': np.array([1, 0, 0]), 'RBHD': np.array([0, 0, 0])},
[[nan_3d, nan_3d, nan_3d], [0.5, 0, 0]]),
# Setting each marker in a different xy quadrant
({'LFHD': np.array([-1, 1, 0]), 'RFHD': np.array([1, 1, 0]), 'LBHD': np.array([-1, -1, 0]), 'RBHD': np.array([1, -1, 0])},
[[[0, 2, 0], [-1, 1, 0], [0, 1, 1]], [0, 1, 0]]),
# Setting values of the markers so that midpoints will be on diagonals
({'LFHD': np.array([-2, 1, 0]), 'RFHD': np.array([1, 2, 0]), 'LBHD': np.array([-1, -2, 0]), 'RBHD': np.array([2, -1, 0])},
[[[-0.81622777, 2.4486833 , 0], [-1.4486833, 1.18377223, 0], [-0.5, 1.5, 1]], [-0.5, 1.5, 0]]),
# Adding the value of 1 in the z dimension for all 4 markers
({'LFHD': np.array([1, 1, 1]), 'RFHD': np.array([0, 1, 1]), 'LBHD': np.array([1, 0, 1]), 'RBHD': np.array([0, 0, 1])},
[[[0.5, 2, 1], [1.5, 1, 1], [0.5, 1, 0]], [0.5, 1, 1]]),
# Setting the z dimension value higher for LFHD and LBHD
({'LFHD': np.array([1, 1, 2]), 'RFHD': np.array([0, 1, 1]), 'LBHD': np.array([1, 0, 2]), 'RBHD': np.array([0, 0, 1])},
[[[0.5, 2, 1.5], [1.20710678, 1, 2.20710678], [1.20710678, 1, 0.79289322]], [0.5, 1, 1.5]]),
# Setting the z dimension value higher for LFHD and RFHD
({'LFHD': np.array([1, 1, 2]), 'RFHD': np.array([0, 1, 2]), 'LBHD': np.array([1, 0, 1]), 'RBHD': np.array([0, 0, 1])},
[[[0.5, 1.70710678, 2.70710678], [1.5, 1, 2], [0.5, 1.70710678, 1.29289322]], [0.5, 1, 2]]),
# Testing that when frame is composed of lists of ints
({'LFHD': [1, 1, 2], 'RFHD': [0, 1, 2], 'LBHD': [1, 0, 1], 'RBHD': [0, 0, 1]},
[[[0.5, 1.70710678, 2.70710678], [1.5, 1, 2], [0.5, 1.70710678, 1.29289322]], [0.5, 1, 2]]),
# Testing that when frame is composed of numpy arrays of ints
({'LFHD': np.array([1, 1, 2], dtype='int'), 'RFHD': np.array([0, 1, 2], dtype='int'),
'LBHD': np.array([1, 0, 1], dtype='int'), 'RBHD': np.array([0, 0, 1], dtype='int')},
[[[0.5, 1.70710678, 2.70710678], [1.5, 1, 2], [0.5, 1.70710678, 1.29289322]], [0.5, 1, 2]]),
# Testing that when frame is composed of lists of floats
({'LFHD': [1.0, 1.0, 2.0], 'RFHD': [0.0, 1.0, 2.0], 'LBHD': [1.0, 0.0, 1.0], 'RBHD': [0.0, 0.0, 1.0]},
[[[0.5, 1.70710678, 2.70710678], [1.5, 1, 2], [0.5, 1.70710678, 1.29289322]], [0.5, 1, 2]]),
# Testing that when frame is composed of numpy arrays of floats
({'LFHD': np.array([1.0, 1.0, 2.0], dtype='float'), 'RFHD': np.array([0.0, 1.0, 2.0], dtype='float'),
'LBHD': np.array([1.0, 0.0, 1.0], dtype='float'), 'RBHD': np.array([0.0, 0.0, 1.0], dtype='float')},
[[[0.5, 1.70710678, 2.70710678], [1.5, 1, 2], [0.5, 1.70710678, 1.29289322]], [0.5, 1, 2]])])
def test_headJC(self, frame, expected):
"""
This test provides coverage of the headJC function in pycgmStatic.py, defined as headJC(frame)
This test takes 3 parameters:
frame: dictionary of marker lists
expected: the expected result from calling headJC on frame
This test is checking to make sure the head joint center and head joint axis are calculated correctly given
the 4 coordinates given in frame. This includes testing when there is no variance in the coordinates,
when the coordinates are in different quadrants, when the midpoints will be on diagonals, and when the z
dimension is variable. It also checks to see the difference when a value is set for HeadOffSet in vsk.
The function uses the LFHD, RFHD, LBHD, and RBHD markers from the frame to calculate the midpoints of the front, back, left, and right center positions of the head.
The head axis vector components are then calculated using the aforementioned midpoints.
Afterwords, the axes are made orthogonal by calculating the cross product of each individual axis.
Finally, the head axis is then rotated around the y axis based off the head offset angle in the VSK.
Lastly, it checks that the resulting output is correct when frame composed of lists of ints, numpy arrays of
ints, lists of floats, and numpy arrays of floats and when headOffset is an int and a float.
"""
result = pycgmStatic.headJC(frame)
np.testing.assert_almost_equal(result[0], expected[0], rounding_precision)
np.testing.assert_almost_equal(result[1], expected[1], rounding_precision)
@pytest.mark.parametrize(["frame", "ankle_JC", "expected"], [
# Test from running sample data
({'RTOE': np.array([433.33508301, 354.97229004, 44.27765274]),
'LTOE': np.array([31.77310181, 331.23657227, 42.15322876])},
[np.array([397.45738291, 217.50712216, 87.83068433]), np.array([112.28082818, 175.83265027, 80.98477997]),
[[np.array(rand_coor), np.array([396.73749179, 218.18875543, 87.69979179]), np.array(rand_coor)],
[np.array(rand_coor), np.array([111.34886681, 175.49163538, 81.10789314]), np.array(rand_coor)]]],
[np.array([433.33508301, 354.97229004, 44.27765274]), np.array([31.77310181, 331.23657227, 42.15322876]),
[[[433.4256618315962, 355.25152027652007, 45.233595181827035],
[432.36890500826763, 355.2296456773885, 44.29402798451682],
[433.09363829389764, 354.0471962330562, 44.570749823731354]],
[[31.806110207058808, 331.49492345678016, 43.11871573923792],
[30.880216288550965, 330.81014854432254, 42.29786022762896],
[32.2221740692973, 330.36972887034574, 42.36983123198873]]]]),
# Test with zeros for all params
({'RTOE': np.array([0, 0, 0]), 'LTOE': np.array([0, 0, 0])},
[np.array([0, 0, 0]), np.array([0, 0, 0]),
[[np.array(rand_coor), np.array([0, 0, 0]), np.array(rand_coor)],
[np.array(rand_coor), np.array([0, 0, 0]), np.array(rand_coor)]]],
[np.array([0, 0, 0]), np.array([0, 0, 0]),
[[nan_3d, nan_3d, nan_3d],
[nan_3d, nan_3d, nan_3d]]]),
# Testing when values are added to frame['RTOE']
({'RTOE': np.array([-7, 3, -8]), 'LTOE': np.array([0, 0, 0])},
[np.array([0, 0, 0]), np.array([0, 0, 0]),
[[np.array(rand_coor), np.array([0, 0, 0]), np.array(rand_coor)],
[np.array(rand_coor), np.array([0, 0, 0]), np.array(rand_coor)]]],
[np.array([-7, 3, -8]), np.array([0, 0, 0]),
[[nan_3d, nan_3d, [-6.36624977770237, 2.7283927618724446, -7.275714031659851]],
[nan_3d, nan_3d, nan_3d]]]),
# Testing when values are added to frame['LTOE']
({'RTOE': np.array([0, 0, 0]), 'LTOE': np.array([8, 0, -8])},
[np.array([0, 0, 0]), np.array([0, 0, 0]),
[[np.array(rand_coor), np.array([0, 0, 0]), np.array(rand_coor)],
[np.array(rand_coor), np.array([0, 0, 0]), np.array(rand_coor)]]],
[np.array([0, 0, 0]), np.array([8, 0, -8]),
[[nan_3d, nan_3d, nan_3d],
[nan_3d, nan_3d, [7.292893218813452, 0.0, -7.292893218813452]]]]),
# Testing when values are added to frame
({'RTOE': np.array([-7, 3, -8]), 'LTOE': np.array([8, 0, -8])},
[np.array([0, 0, 0]), np.array([0, 0, 0]),
[[np.array(rand_coor), np.array([0, 0, 0]), np.array(rand_coor)],
[np.array(rand_coor), np.array([0, 0, 0]), np.array(rand_coor)]]],
[np.array([-7, 3, -8]), np.array([8, 0, -8]),
[[nan_3d, nan_3d, [-6.36624977770237, 2.7283927618724446, -7.275714031659851]],
[nan_3d, nan_3d, [7.292893218813452, 0.0, -7.292893218813452]]]]),
# Testing when values are added to ankle_JC[0]
({'RTOE': np.array([0, 0, 0]), 'LTOE': np.array([0, 0, 0])},
[np.array([2, -9, 1]), np.array([0, 0, 0]),
[[np.array(rand_coor), np.array([0, 0, 0]), np.array(rand_coor)],
[np.array(rand_coor), np.array([0, 0, 0]), np.array(rand_coor)]]],
[np.array([0, 0, 0]), np.array([0, 0, 0]),
[[nan_3d, nan_3d, [0.21566554640687682, -0.9704949588309457, 0.10783277320343841]],
[nan_3d, nan_3d, nan_3d]]]),
# Testing when values are added to ankle_JC[1]
({'RTOE': np.array([0, 0, 0]), 'LTOE': np.array([0, 0, 0])},
[np.array([0, 0, 0]), np.array([3, -7, 4]),
[[np.array(rand_coor), np.array([0, 0, 0]), np.array(rand_coor)],
[np.array(rand_coor), np.array([0, 0, 0]), np.array(rand_coor)]]],
[np.array([0, 0, 0]), np.array([0, 0, 0]),
[[nan_3d, nan_3d, nan_3d],
[nan_3d, nan_3d, [0.34874291623145787, -0.813733471206735, 0.46499055497527714]]]]),
# Testing when values are added to ankle_JC[2]
({'RTOE': np.array([0, 0, 0]), 'LTOE': np.array([0, 0, 0])},
[np.array([0, 0, 0]), np.array([0, 0, 0]),
[[np.array(rand_coor), np.array([8, -4, 2]), np.array(rand_coor)],
[np.array(rand_coor), np.array([-9, 7, 4]), np.array(rand_coor)]]],
[np.array([0, 0, 0]), np.array([0, 0, 0]),
[[nan_3d, nan_3d, nan_3d],
[nan_3d, nan_3d, nan_3d]]]),
# Testing when values are added to ankle_JC
({'RTOE': np.array([0, 0, 0]), 'LTOE': np.array([0, 0, 0])},
[np.array([2, -9, 1]), np.array([3, -7, 4]),
[[np.array(rand_coor), np.array([8, -4, 2]), np.array(rand_coor)],
[np.array(rand_coor), np.array([-9, 7, 4]), np.array(rand_coor)]]],
[np.array([0, 0, 0]), np.array([0, 0, 0]),
[[[0.21329967236760183, -0.06094276353360052, -0.9750842165376084], [0.9528859437838807, 0.23329276554708803, 0.1938630023560309], [0.21566554640687682, -0.9704949588309457, 0.10783277320343841]],
[[0.6597830814767823, 0.5655283555515277, 0.4948373111075868], [-0.6656310267523443, 0.1342218942833945, 0.7341115850601987], [0.34874291623145787, -0.813733471206735, 0.46499055497527714]]]]),
# Testing when values are added to frame and ankle_JC
({'RTOE': np.array([-7, 3, -8]), 'LTOE': np.array([8, 0, -8])},
[np.array([2, -9, 1]), np.array([3, -7, 4]),
[[np.array(rand_coor), np.array([8, -4, 2]), np.array(rand_coor)],
[np.array(rand_coor), np.array([-9, 7, 4]), np.array(rand_coor)]]],
[np.array([-7, 3, -8]), np.array([8, 0, -8]),
[[[-6.586075309097216, 2.6732173492872757, -8.849634891853084], [-6.249026985898898, 3.6500960420576702, -7.884178291357542], [-6.485504244572473, 2.3140056594299647, -7.485504244572473]],
[[8.623180382731631, 0.5341546137699694, -7.428751315829338], [7.295040915019964, 0.6999344300621451, -7.885437867872096], [7.6613572692607015, -0.47409982303501746, -7.187257446225685]]]]),
# Testing that when frame and ankle_JC are composed of lists of ints
({'RTOE': [-7, 3, -8], 'LTOE': [8, 0, -8]},
[[2, -9, 1], [3, -7, 4],
[[rand_coor, [8, -4, 2], rand_coor],
[rand_coor, [-9, 7, 4], rand_coor]]],
[np.array([-7, 3, -8]), np.array([8, 0, -8]),
[[[-6.586075309097216, 2.6732173492872757, -8.849634891853084], [-6.249026985898898, 3.6500960420576702, -7.884178291357542], [-6.485504244572473, 2.3140056594299647, -7.485504244572473]],
[[8.623180382731631, 0.5341546137699694, -7.428751315829338], [7.295040915019964, 0.6999344300621451, -7.885437867872096], [7.6613572692607015, -0.47409982303501746, -7.187257446225685]]]]),
# Testing that when frame and ankle_JC are composed of numpy arrays of ints
({'RTOE': np.array([-7, 3, -8], dtype='int'), 'LTOE': np.array([8, 0, -8], dtype='int')},
[np.array([2, -9, 1], dtype='int'), np.array([3, -7, 4], dtype='int'),
[np.array([rand_coor, [8, -4, 2], rand_coor], dtype='int'),
np.array([rand_coor, [-9, 7, 4], rand_coor], dtype='int')]],
[np.array([-7, 3, -8]), np.array([8, 0, -8]),
[[[-6.586075309097216, 2.6732173492872757, -8.849634891853084], [-6.249026985898898, 3.6500960420576702, -7.884178291357542], [-6.485504244572473, 2.3140056594299647, -7.485504244572473]],
[[8.623180382731631, 0.5341546137699694, -7.428751315829338], [7.295040915019964, 0.6999344300621451, -7.885437867872096], [7.6613572692607015, -0.47409982303501746, -7.187257446225685]]]]),
# Testing that when frame and ankle_JC are composed of lists of floats
({'RTOE': [-7.0, 3.0, -8.0], 'LTOE': [8.0, 0.0, -8.0]},
[[2.0, -9.0, 1.0], [3.0, -7.0, 4.0],
[[rand_coor, [8.0, -4.0, 2.0], rand_coor],
[rand_coor, [-9.0, 7.0, 4.0], rand_coor]]],
[np.array([-7, 3, -8]), np.array([8, 0, -8]),
[[[-6.586075309097216, 2.6732173492872757, -8.849634891853084], [-6.249026985898898, 3.6500960420576702, -7.884178291357542], [-6.485504244572473, 2.3140056594299647, -7.485504244572473]],
[[8.623180382731631, 0.5341546137699694, -7.428751315829338], [7.295040915019964, 0.6999344300621451, -7.885437867872096], [7.6613572692607015, -0.47409982303501746, -7.187257446225685]]]]),
# Testing that when frame and ankle_JC are composed of numpy arrays of floats
({'RTOE': np.array([-7.0, 3.0, -8.0], dtype='float'), 'LTOE': np.array([8.0, 0.0, -8.0], dtype='float')},
[np.array([2.0, -9.0, 1.0], dtype='float'), np.array([3.0, -7.0, 4.0], dtype='float'),
[np.array([rand_coor, [8.0, -4.0, 2.0], rand_coor], dtype='float'),
np.array([rand_coor, [-9.0, 7.0, 4.0], rand_coor], dtype='float')]],
[np.array([-7, 3, -8]), np.array([8, 0, -8]),
[[[-6.586075309097216, 2.6732173492872757, -8.849634891853084], [-6.249026985898898, 3.6500960420576702, -7.884178291357542], [-6.485504244572473, 2.3140056594299647, -7.485504244572473]],
[[8.623180382731631, 0.5341546137699694, -7.428751315829338], [7.295040915019964, 0.6999344300621451, -7.885437867872096], [7.6613572692607015, -0.47409982303501746, -7.187257446225685]]]])])
def test_uncorrect_footaxis(self, frame, ankle_JC, expected):
"""
This test provides coverage of the uncorrect_footaxis function in pycgmStatic.py, defined as uncorrect_footaxis(frame, ankle_JC)
This test takes 3 parameters:
frame: dictionaries of marker lists.
ankle_JC: array of ankle_JC each x,y,z position
expected: the expected result from calling uncorrect_footaxis on frame and ankle_JC, which should be the
anatomically incorrect foot axis
Given a marker RTOE and the ankle JC, the right anatomically incorrect foot axis is calculated with:
.. math::
R = [R_x + ROrigin_x, R_y + ROrigin_y, R_z + ROrigin_z]
where :math:`ROrigin_x` is the x coor of the foot axis's origin gotten from frame['RTOE']
:math:`R_x` is the unit vector of :math:`Yflex_R \times R_z`
:math:`R_y` is the unit vector of :math:`R_z \times R_x`
:math:`R_z` is the unit vector of the axis from right toe to right ankle JC
:math:`Yflex_R` is the unit vector of the axis from right ankle flexion to right ankle JC
The same calculation applies for the left anatomically incorrect foot axis by replacing all the right values
with left values
This unit test ensures that:
- the markers for RTOE and LTOE only effect either the right or the left axis
- ankle_JC_R and ankle_JC_L only effect either the right or the left axis
- the resulting output is correct when frame and ankle_JC are composed of lists of ints,
numpy arrays of ints, lists of floats, and numpy arrays of floats.
"""
result = pycgmStatic.uncorrect_footaxis(frame, ankle_JC)
np.testing.assert_almost_equal(result[0], expected[0], rounding_precision)
np.testing.assert_almost_equal(result[1], expected[1], rounding_precision)
np.testing.assert_almost_equal(result[2], expected[2], rounding_precision)
@pytest.mark.parametrize(["frame", "ankle_JC", "vsk", "expected"], [
# Test from running sample data
({'RHEE': [374.01257324, 181.57929993, 49.50960922],
'LHEE': [105.30126953, 180.2130127, 47.15660858],
'RTOE': [442.81997681, 381.62280273, 42.66047668],
'LTOE': [39.43652725, 382.44522095, 41.78911591]},
[np.array([393.76181608, 247.67829633, 87.73775041]),
np.array([98.74901939, 219.46930221, 80.6306816]),
[[np.array(rand_coor), np.array([393.07114384, 248.39110006, 87.61575574]), np.array(rand_coor)],
[np.array(rand_coor), np.array([97.79246671, 219.20927275, 80.76255901]), np.array(rand_coor)]]],
{'RightSoleDelta': 0.45, 'LeftSoleDelta': 0.45},
[np.array([442.81997681, 381.62280273, 42.66047668]),
np.array([ 39.43652725, 382.44522095, 41.78911591]),
np.array([[[442.30666241, 381.79936348, 43.50031871],
[442.02580128, 381.89596909, 42.1176458 ],
[442.49471759, 380.67717784, 42.66047668]],
[[39.14565179, 382.3504861, 42.74117514],
[38.53126992, 382.15038888, 41.48320216],
[39.74620554, 381.49437955, 41.78911591]]])]),
# Testing with zeros for all params
({'RHEE': [0, 0, 0], 'LHEE': [0, 0, 0], 'RTOE': [0, 0, 0], 'LTOE': [0, 0, 0]},
[np.array([0, 0, 0]), np.array([0, 0, 0]),
[[np.array(rand_coor), np.array([0, 0, 0]), np.array(rand_coor)],
[np.array(rand_coor), np.array([0, 0, 0]), np.array(rand_coor)]]],
{'RightSoleDelta': 0.0, 'LeftSoleDelta': 0.0},
[np.array([0, 0, 0]), np.array([0, 0, 0]),
np.array([[nan_3d, nan_3d, nan_3d],
[nan_3d, nan_3d, nan_3d]])]),
# Testing with values for frame
({'RHEE': [1, -4, -9], 'LHEE': [2, -3, -1], 'RTOE': [1, 4, -6], 'LTOE': [4, 2, 2]},
[np.array([0, 0, 0]), np.array([0, 0, 0]),
[[np.array(rand_coor), np.array([0, 0, 0]), np.array(rand_coor)],
[np.array(rand_coor), np.array([0, 0, 0]), np.array(rand_coor)]]],
{'RightSoleDelta': 0.0, 'LeftSoleDelta': 0.0},
[np.array([1, 4, -6]), np.array([4, 2, 2]),
np.array([[nan_3d, nan_3d, nan_3d],
[nan_3d, nan_3d, nan_3d]])]),
# Testing with values for ankleJC
({'RHEE': [0, 0, 0], 'LHEE': [0, 0, 0], 'RTOE': [0, 0, 0], 'LTOE': [0, 0, 0]},
[np.array([-5, -5, -1]), np.array([5, 7, 1]),
[[np.array(rand_coor), np.array([9, 3, 7]), np.array(rand_coor)],
[np.array(rand_coor), np.array([-9, 2, 9]), np.array(rand_coor)]]],
{'RightSoleDelta': 0.0, 'LeftSoleDelta': 0.0},
[np.array([0, 0, 0]), np.array([0, 0, 0]),
np.array([[nan_3d, nan_3d, nan_3d],
[nan_3d, nan_3d, nan_3d]])]),
# Testing with values for vsk
({'RHEE': [0, 0, 0], 'LHEE': [0, 0, 0], 'RTOE': [0, 0, 0], 'LTOE': [0, 0, 0]},
[np.array([0, 0, 0]), np.array([0, 0, 0]),
[[np.array(rand_coor), np.array([0, 0, 0]), np.array(rand_coor)],
[np.array(rand_coor), np.array([0, 0, 0]), np.array(rand_coor)]]],
{'RightSoleDelta': 0.64, 'LeftSoleDelta': 0.19},
[np.array([0, 0, 0]), np.array([0, 0, 0]),
np.array([[nan_3d, nan_3d, nan_3d],
[nan_3d, nan_3d, nan_3d]])]),
# Testing with values for frame and ankleJC
({'RHEE': [1, -4, -9], 'LHEE': [2, -3, -1], 'RTOE': [1, 4, -6], 'LTOE': [4, 2, 2]},
[np.array([-5, -5, -1]), np.array([5, 7, 1]),
[[np.array(rand_coor), np.array([9, 3, 7]), np.array(rand_coor)],
[np.array(rand_coor), np.array([-9, 2, 9]), np.array(rand_coor)]]],
{'RightSoleDelta': 0.0, 'LeftSoleDelta': 0.0},
[np.array([1, 4, -6]), np.array([4, 2, 2]),
np.array([[[1.4961389383568338, 4.0, -6.868243142124459], [1.8682431421244592, 4.0, -5.503861061643166], [1.0, 3.0, -6.0]],
[[4.541530361073883, 1.783387855570447, 2.8122955416108235], [3.245802523504333, 2.301678990598267, 2.5832460484899826], [3.6286093236458963, 1.0715233091147407, 2.0]]])]),
# Testing with values for frame and vsk
({'RHEE': [1, -4, -9], 'LHEE': [2, -3, -1], 'RTOE': [1, 4, -6], 'LTOE': [4, 2, 2]},
[np.array([0, 0, 0]), np.array([0, 0, 0]),
[[np.array(rand_coor), np.array([0, 0, 0]), np.array(rand_coor)],
[np.array(rand_coor), np.array([0, 0, 0]), np.array(rand_coor)]]],
{'RightSoleDelta': 0.64, 'LeftSoleDelta': 0.19},
[np.array([1, 4, -6]), np.array([4, 2, 2]),
np.array([[[0.0, 4.0, -6.0], [1.0, 4.0, -7.0], [1.0, 3.0, -6.0]],
[[3.071523309114741, 2.3713906763541037, 2.0], [4.0, 2.0, 1.0], [3.6286093236458963, 1.0715233091147407, 2.0]]])]),
# Testing with values for ankleJC and vsk
({'RHEE': [0, 0, 0], 'LHEE': [0, 0, 0], 'RTOE': [0, 0, 0], 'LTOE': [0, 0, 0]},
[np.array([-5, -5, -1]), np.array([5, 7, 1]),
[[np.array(rand_coor), np.array([9, 3, 7]), np.array(rand_coor)],
[np.array(rand_coor), np.array([-9, 2, 9]), np.array(rand_coor)]]],
{'RightSoleDelta': 0.64, 'LeftSoleDelta': 0.19},
[np.array([0, 0, 0]), np.array([0, 0, 0]),
np.array([[nan_3d, nan_3d, nan_3d],
[nan_3d, nan_3d, nan_3d]])]),
# Testing with values for frame, ankleJC, and vsk
({'RHEE': [1, -4, -9], 'LHEE': [2, -3, -1], 'RTOE': [1, 4, -6], 'LTOE': [4, 2, 2]},
[
|
np.array([-5, -5, -1])
|
numpy.array
|
import tensorflow_probability as tfp
import numpy as np
import tensorflow as tf
import time
from tqdm import tqdm
import pickle
from math import *
import matplotlib.dates as mdates
import matplotlib.ticker as ticker
myFmt = mdates.DateFormatter('%Hh')
import pandas as pd
np.set_printoptions(suppress=True,precision=6)
import matplotlib.pyplot as plt
import matplotlib
plt.style.use('ggplot')
plt.style.use('seaborn-paper')
plt.style.use('seaborn-whitegrid')
import sys
from tensorflow_probability import distributions as tfd
from geopy.distance import geodesic
from move_ns import moveNS
def setup_data(skip=2):
df = pd.read_csv('data/ovejas.csv')
df = df[df.id!=34]
df['ID'] = df['id'].astype('category').cat.rename_categories(range(0, df['id'].nunique())).astype('int')
ID = df['ID'].values
Xgps = df[['lat','lon']].values
minX = np.min(Xgps[:,0])
minY = np.min(Xgps[:,1])
secs =(pd.to_datetime(df['time'])- pd.datetime(2018,1,1)).dt.seconds.astype(float).values
days = (pd.to_datetime(df['time'])- pd.datetime(2018,1,1)).dt.days.astype(float).values
T = (days*24*60+secs/60)/(60*24) #days
T = T-np.min(T)
rescale = 24 # use hours to improve numerical stability
T = T * rescale
# use geodesic to get the straight line distance between two points
Xmetres = np.array([geodesic((xloc,minY), (minX,minY)).meters for xloc in Xgps[:,0]])
Ymetres = np.array([geodesic((minX,yloc), (minX,minY)).meters for yloc in Xgps[:,1]])
X = np.array([Xmetres, Ymetres]).T
T=T[::skip,None]
X=X[::skip]
ID=ID[::skip]
return X, T, ID
# set up positions data
X,T,ID = setup_data(skip=2)
X[:,0] = X[:,0]-X[:,0].mean()
X[:,1] = X[:,1]-X[:,1].mean()
X[:,0] = X[:,0]/1000
X[:,1] = X[:,1]/1000
# set up lower level GP locations covering 24 hours
Z = np.linspace(0,24,num=25,endpoint=False).astype(np.float64)[:,None]
def sp_shift(x):
# softplus transform with shift
return tf.nn.softplus(x)+1e-4
def periodic_kernel(x1,x2):
# periodic kernel with parameter set to encode
# daily activity pattern (period=rescale).
return tfp.math.psd_kernels.ExpSinSquared(x1,x2,np.float64(24.0))
# transform for parameter to ensure positive
transforms=[sp_shift,sp_shift]
#transforms=[sp_shift]
# diffuse priors on parameters
lpriors = [tfd.Normal(loc = np.float64(0),scale=np.float64(1)),
tfd.Normal(loc = np.float64(0),scale=np.float64(10.))]
apriors = [tfd.Normal(loc = np.float64(0.),scale=np.float64(1)),
tfd.Normal(loc = np.float64(0),scale=np.float64(10.))]
lparams_init = [0.0,0.0]
aparams_init = [0.0,0.0]
# create the model #2880
mover = moveNS(T,X,Z, ID, BATCH_SIZE=1000, MIN_REMAIN=500,velocity=True, std_obs_noise=100, mean_obs_noise=10,
akernel=periodic_kernel,
aparams_init=aparams_init,
apriors=apriors,
atransforms=transforms,
lkernel=periodic_kernel,
lparams_init=lparams_init,
lpriors=lpriors,
ltransforms=transforms)
#-mover.log_posterior(*mover.kernel_params)
learning_rate = tf.optimizers.schedules.ExponentialDecay(
initial_learning_rate=1e-1,
decay_steps=50,
decay_rate=0.99,
staircase=True)
optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate,beta_2=0.99)
train_steps = 2000
pbar = tqdm(range(train_steps))
loss_history = np.zeros((train_steps))
for i in pbar:
with tf.GradientTape() as t:
loss = -mover.log_posterior(*mover.kernel_params)
loss_history[i] = loss.numpy()
pbar.set_description("Loss %f" % (loss_history[i]))
gradients = t.gradient(loss, mover.kernel_params)
optimizer.apply_gradients(zip(gradients, mover.kernel_params))
#n=3.5
opt_params = [i.numpy() for i in mover.kernel_params]
with open('opt_params.npy', 'wb') as fp:
pickle.dump(opt_params, fp)
with open ('opt_params.npy', 'rb') as fp:
opt_params = pickle.load(fp)
opt_obs_noise = opt_params[0]
opt_ls_v = opt_params[1]
opt_ls_amp = sp_shift(opt_params[2]).numpy()
opt_ls_ls = sp_shift(opt_params[3]).numpy()
opt_amp_v = opt_params[4]
opt_amp_amp = sp_shift(opt_params[5]).numpy()
opt_amp_ls = sp_shift(opt_params[6]).numpy()
start = time.time()
num_runs=4
rescale = 24
def ls_periodic_kernel():
# periodic kernel with single variable parameter. Other parameters are set
# to encode daily activity pattern (period=rescale).
# 15 minute correlation time
return tfp.math.psd_kernels.ExpSinSquared(np.float64(opt_ls_amp),np.float64(opt_ls_ls),np.float64(24.0))
def amp_periodic_kernel():
# periodic kernel with single variable parameter. Other parameters are set
# to encode daily activity pattern (period=rescale).
# 15 minute correlation time
return tfp.math.psd_kernels.ExpSinSquared(np.float64(opt_amp_amp),
|
np.float64(opt_amp_ls)
|
numpy.float64
|
from galaxy_analysis.plot.plot_styles import *
import numpy as np
from scipy import integrate
import yt
import os, sys
import matplotlib.pyplot as plt
import glob
# AE: Comment out below import unless you feel like
# installing a bunch of stuff:
# from galaxy_analysis.plot.plot_styles import *
SolarAbundances = np.array([0.02, 0.28, 3.26E-3, 1.32E-3, 8.65E-3,
2.22E-3, 9.31E-4, 1.08E-3, 6.44E-4, 1.01E-4, 1.73E-3])
# globals since these were ifdefs in an old version of the code
# here for backwards compatability, but now these are
# read from the gizmo parameter file if they are there
# (only needed for when logbins used)
AGE_BIN_START = 1.0 # Myr
AGE_BIN_END = 14000.0 # Myr
SOLAR_METALLICITY = 0.02 # as defined in Gizmo / FIRE defaults
CYTHON_ON = True
if CYTHON_ON:
import pyximport; pyximport.install(setup_args={'include_dirs':[np.get_include()]},
language_level=3)
from galaxy_analysis.gizmo import age_fields
from galaxy_analysis.utilities import cy_convert_abundances as ca
# in Gizmo output, first metal tracer field corresponding to
# the age bins (0-14 are the 11 default species + 4 r-process)
OFFSET = 15
# Hard-coded: list of elements in standard file model (in order)
elements = ['Total','He','C','N','O','Ne','Mg','Si','S','Ca','Fe']
element_num = {}
i = 0
for e in elements:
element_num[e] = i
i = i + 1
def generate_metal_fields(ds, _agebins=None,
_elements=elements,
_yields=None,
ptype='PartType0',
age_is_fraction=False):
"""
Generate derived fields mapping the age tracers to
actual elemental abundances using the given set of
yields. yields must be a NxM array with N = the
number of age bins and M = the number of elements
(M = 11 for default FIRE). Each value here should be the
yield (in solar masses) per solar mass of star formation
in each age bin for each element.
The derived fields computing elemental mass loop through
all the age bin tracer fields.
Derived fields will be of form:
(ptype,"ELEMENTNAME_mass")
(ptype,"ELEMENTNAME_fraction")
(ptype,"ELEMENTNAME_actual_mass")
where 'ptype' is the passed particle type ("PartType0" or
"PartType4" probably) and the "ELEMENTNAME_actual_mass" field
is the actual mass of that element in the yields followed in
the simulation (e.g. something like:
(ptype,"Metallicity_X")*(ptype,"particle_mass"), where X
is the metallicity number corresponding to that given element).
"""
def _metal_mass_test(_ptype, _ei):
# test metals in bin zero
def temp(field,data):
mass_p = np.zeros(np.shape( data[(_ptype,'particle_mass')]))
# do this in cython:
if CYTHON_ON:
for i in np.arange(np.size(_agebins)-1):
fname = 'Metallicity_%02i'%(OFFSET + i)
mass_p += data[(ptype,fname)].value * _yields[i,_ei] #(agebinnum, elementnum)
#
# Both the calculations below *should* work but don't for some reason. I am
# absolutely mystified by this.... it works just fine in tests outside of
# the derived fields routines.... but for some reason this gives wrong answers...
# unfortunate since its slightly faster (and is how I wrote the cython version....
#
# age_vals = np.array([ data[(_ptype ,"Metallicity_%02i"%(OFFSET+i))].value for i in np.arange(np.size(_agebins)-1)])
# mass_p = np.matmul(age_vals.T, _yields[:,ei].T)
# mass_p = np.matmul(_yields[:,ei].T, age_vals)
else:
for i in np.arange(np.size(_agebins)-1):
fname = 'Metallicity_%02i'%(OFFSET + i)
mass_p += data[(_ptype,fname)].value * _yields[i,_ei] #(agebinnum, elementnum)
# age_vals = np.array([ data[(_ptype,"Metallicity_%02i"%(OFFSET+i))] for i in np.arange(np.size(_agebins)-1)])
# mass_p = np.matmul(_yields[:ei].T, age_vals)
#np.sum(np.transpose(age_vals) * _yields[:,ei], axis=1)
if age_is_fraction:
mass_p = (mass_p * data[(_ptype,'particle_mass')].to('code_mass').value) * yt.units.Msun
else:
mass_p = mass_p * yt.units.Msun
return mass_p / data.ds.hubble_constant
return temp
def _metal_fraction_test(_ptype, _e):
def temp(field,data):
Mp = data[(_ptype,'particle_mass')].to('Msun')
abund = data[('all',_ptype + '_' + _e + '_mass')].to('Msun') / Mp
abund[Mp==0.0] = 0.0
return abund
return temp
def _metal_mass_actual_test(_ptype,_ei):
def temp(field,data):
Mp = data[(_ptype,'particle_mass')].to('Msun')
abund = data[(_ptype,"Metallicity_%02i"%(_ei))]
return abund*Mp
return temp
for ei,e in enumerate(_elements):
ds.add_field( ('all', ptype + '_' + e + '_mass'), sampling_type='particle',
function=_metal_mass_test(ptype, ei),
units = 'Msun', force_override=True)
ds.add_field( ('all',ptype + '_' + e + '_fraction'), sampling_type='particle',
function=_metal_fraction_test(ptype,e),
units='', force_override=True)
ds.add_field( ('all',ptype + '_' + e + '_actual_mass'), sampling_type='particle',
function=_metal_mass_actual_test(ptype,ei),
units='Msun', force_override=True)
return
def _generate_star_metal_fields(ds,
_agebins=None,
_elements=elements,
_yields=None,
ptype='PartType4',
age_is_fraction = False):
"""
See above function. Computes surface abundances
of given star as derived from the age tracers
"""
def _star_metal_mass_test(_ptype, _ei):
# test metals in bin zero
def temp(field,data):
mass_p = np.zeros(np.shape( data[(_ptype,'particle_mass')]))
for i in np.arange(np.size(_agebins)-1):
fname = 'Metallicity_%02i'%(OFFSET + i)
mass_p += data[(ptype,fname)].value * _yields[i,_ei] #(agebinnum, elementnum)
if age_is_fraction:
mass_p = (mass_p * data[(ptype,'particle_mass')].to('code_mass').value) * yt.units.Msun
else:
mass_p = mass_p * yt.units.Msun
return mass_p / data.ds.hubble_constant
return temp
for ei,e in enumerate(_elements):
ds.add_field( ('all', ptype + '_' + e + '_mass'), sampling_type='particle',
function=_star_metal_mass_test(ptype, ei),
units = 'Msun', force_override=True)
return
#
# Extracted FIRE yield model:
# - Model is as-is from the code, but does not include any
# metallicity dependence (wind rates are fixed to a chosen
# metallicity, default is solar)
#
#
def sn_rate(t):
"""
CCSNE rate
SN / Gyr per solar msas of star formation
Changed output to /Gyr to keep same units as input t
"""
agemin = 0.003401 # Gyr
agebrk = 0.010370 # Gyr
agemax = 0.03753 # Gyr
RSNE = 0.0
if (t>agemin):
if (t <= agebrk):
RSNE = 5.408E-4
elif (t<=agemax):
RSNE=2.516E-4
if (t > agemax):
#RSNE=5.3E-8+1.6*np.exp(-0.5*((t-0.05)/0.01)*((t-0.05)/0.01)) # This is JUST SNIa
RSNE=0.0 # set to zero for CCSNE
return RSNE * 1000.0
def snIa_rate(t):
"""
SNIa rate (SN/Gyr) - t in Gyr
"""
agemin = 0.003401 # Gyr
agebrk = 0.010370 # Gyr
agemax = 0.03753 # Gyr
RSNE = 0.0
if (t > agemax):
RSNE=5.3E-8+1.6E-5*np.exp(-0.5*((t-0.05)/0.01)*((t-0.05)/0.01)) # This is JUST SNIa
return RSNE * 1000.0
def wind_yields(i,element=None, Z = 1.0E-5, FIRE_Z_scaling = False):
"""
Yields (in fraction) per element with winds
"""
Zsolar = Z / SOLAR_METALLICITY
yields = np.array([0.0, 0.36,0.016,0.0041,0.0118] + [0.0]*6)
if (Z < 0.033):
yields[4] *= Zsolar
else:
yields[4] *= 1.65
if FIRE_Z_scaling:
# only first 5 elements
i = 5
yields[:i] = yields[:i]*(1.0-Z)+(Zsolar*SolarAbundances[:i]-SolarAbundances[:i])
yields[0] = np.sum(yields[2:]) # total yield
if yields[4] < 0:
yields[4] = 0.0
print("Total O yield in winds is negative due to Z scaling")
if (np.any(yields < 0.0)):
print(yields)
print("Error in wind yields - negative", Z)
raise RuntimeError
# if element passed, use that - otherwise use yield indeces
if not (element is None):
if element == 'all':
return yields
return yields[i]
def wind_rate(t, Z = 1.0E-5, GasReturnFraction = 1.0):
"""
Mass loss rate from stellar winds. Z is in solar.
"""
Zsolar = Z / SOLAR_METALLICITY
p = 0.0
if (t <= 0.001):
p = 11.6846
else:
if (t <=0.0035):
logZ=np.log10(Zsolar)
p=11.6846*Zsolar*10.0**(1.838*(0.79+logZ)*(np.log10(t)-(-3.00)))
else:
if (t<=0.1):
p=72.1215*(t/0.0035)**(-3.25)+0.0103
else:
p=1.03*t**(-1.1) / (12.9-np.log(t)) # bug: this was log10 at first
#if (t < 0.1):
# p = p * 1.0
# assuming wind_rate is in Msun / Myr per solar mass of SF
rate = p * GasReturnFraction * 1.4 * 0.291175
return rate # might already be / Gyr
def snIa_yields(i, element = None, Z = 1.0E-5, FIRE_Z_scaling = False, MSNe = 1.4):
# ['Total','He','C','N','O','Ne','Mg','Si','S','Ca','Fe']
yields = np.array([1.4,0.0,0.049,1.2E-6,0.143,0.0045,0.0086,0.156,0.087,0.012,0.743])
Zsolar = Z / SOLAR_METALLICITY
if FIRE_Z_scaling:
yields = yields / MSNe
yields = yields*(1.0-Z)+(Zsolar*SolarAbundances-SolarAbundances)
yields = yields * MSNe
yields[1] = 0.0
if (np.any(yields < 0.0)):
if yields[3] < 0.0:
print("N yield in SNIA is negative")
yields[3] = 0.0
if
|
np.any(yields<0.0)
|
numpy.any
|
import numpy as np
def box_iou_calc(boxes1, boxes2):
# https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py
"""
Return intersection-over-union (Jaccard index) of boxes.
Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
Arguments:
boxes1 (Array[N, 4])
boxes2 (Array[M, 4])
Returns:
iou (Array[N, M]): the NxM matrix containing the pairwise
IoU values for every element in boxes1 and boxes2
This implementation is taken from the above link and changed so that it only uses numpy..
"""
def box_area(box):
# box = 4xn
return (box[2] - box[0]) * (box[3] - box[1])
area1 = box_area(boxes1.T)
area2 = box_area(boxes2.T)
lt = np.maximum(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2]
rb = np.minimum(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2]
inter = np.prod(np.clip(rb - lt, a_min = 0, a_max = None), 2)
return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter)
class ConfusionMatrix:
def __init__(self, num_classes, CONF_THRESHOLD = 0.3, IOU_THRESHOLD = 0.5):
self.matrix =
|
np.zeros((num_classes + 1, num_classes + 1))
|
numpy.zeros
|
"""Calculate ph-ph interaction and phonons on grid."""
# Copyright (C) 2020 <NAME>
# All rights reserved.
#
# This file is part of phono3py.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the phonopy project nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import warnings
import numpy as np
from phonopy.harmonic.dynamical_matrix import get_dynamical_matrix
from phonopy.structure.cells import (
Primitive,
compute_all_sg_permutations,
sparse_to_dense_svecs,
)
from phonopy.structure.symmetry import Symmetry
from phonopy.units import AMU, EV, Angstrom, Hbar, THz, VaspToTHz
from phono3py.phonon3.real_to_reciprocal import RealToReciprocal
from phono3py.phonon3.reciprocal_to_normal import ReciprocalToNormal
from phono3py.phonon3.triplets import get_nosym_triplets_at_q, get_triplets_at_q
from phono3py.phonon.grid import (
BZGrid,
get_grid_points_by_rotations,
get_ir_grid_points,
)
from phono3py.phonon.solver import run_phonon_solver_c, run_phonon_solver_py
class Interaction:
"""Calculate ph-ph interaction and phonons on grid.
This class instance is the heart of phono3py calculation.
Many data are stored.
The following three steps have to be done manually.
1) init_dynamical_matrix
2) set_grid_point
3) run
Attributes
----------
interaction_strength
mesh_numbers
is_mesh_symmetry
fc3
dynamical_matrix
primitive
primitive_symmetry
bz_grid
band_indices
nac_params
nac_q_direction
zero_value_positions
frequency_factor_to_THz
lapack_zheev_uplo
cutoff_frequency
"""
def __init__(
self,
primitive: Primitive,
bz_grid: BZGrid,
primitive_symmetry: Symmetry,
fc3=None,
band_indices=None,
constant_averaged_interaction=None,
frequency_factor_to_THz=VaspToTHz,
frequency_scale_factor=None,
unit_conversion=None,
is_mesh_symmetry=True,
symmetrize_fc3q=False,
cutoff_frequency=None,
lapack_zheev_uplo="L",
):
"""Init method."""
self._primitive = primitive
self._bz_grid = bz_grid
self._primitive_symmetry = primitive_symmetry
self._band_indices = None
self._set_band_indices(band_indices)
self._constant_averaged_interaction = constant_averaged_interaction
self._frequency_factor_to_THz = frequency_factor_to_THz
self._frequency_scale_factor = frequency_scale_factor
if fc3 is not None:
self._set_fc3(fc3)
# Unit to eV^2
if unit_conversion is None:
num_grid = np.prod(self.mesh_numbers)
self._unit_conversion = (
(Hbar * EV) ** 3
/ 36
/ 8
* EV ** 2
/ Angstrom ** 6
/ (2 * np.pi * THz) ** 3
/ AMU ** 3
/ num_grid
/ EV ** 2
)
else:
self._unit_conversion = unit_conversion
if cutoff_frequency is None:
self._cutoff_frequency = 0
else:
self._cutoff_frequency = cutoff_frequency
self._is_mesh_symmetry = is_mesh_symmetry
self._symmetrize_fc3q = symmetrize_fc3q
self._lapack_zheev_uplo = lapack_zheev_uplo
self._symprec = self._primitive_symmetry.tolerance
self._triplets_at_q = None
self._weights_at_q = None
self._triplets_map_at_q = None
self._ir_map_at_q = None
self._interaction_strength = None
self._g_zero = None
self._phonon_done = None
self._frequencies = None
self._eigenvectors = None
self._dm = None
self._nac_params = None
self._nac_q_direction = None
self._band_index_count = 0
svecs, multi = self._primitive.get_smallest_vectors()
if self._primitive.store_dense_svecs:
self._svecs = svecs
self._multi = multi
else:
self._svecs, self._multi = sparse_to_dense_svecs(svecs, multi)
self._masses = np.array(self._primitive.masses, dtype="double")
self._p2s = np.array(self._primitive.p2s_map, dtype="int_")
self._s2p = np.array(self._primitive.s2p_map, dtype="int_")
def run(self, lang="C", g_zero=None):
"""Run ph-ph interaction calculation."""
if (self._phonon_done == 0).any():
self.run_phonon_solver()
num_band = len(self._primitive) * 3
num_triplets = len(self._triplets_at_q)
self._interaction_strength = np.empty(
(num_triplets, len(self._band_indices), num_band, num_band), dtype="double"
)
if self._constant_averaged_interaction is None:
self._interaction_strength[:] = 0
if lang == "C":
self._run_c(g_zero)
else:
self._run_py()
else:
num_grid = np.prod(self.mesh_numbers)
self._interaction_strength[:] = (
self._constant_averaged_interaction / num_grid
)
@property
def interaction_strength(self):
"""Return ph-ph interaction strength.
Returns
-------
ndarray
shape=(num_ir_grid_points, num_specified_band, num_band, num_band),
dtype='double', order='C'
"""
return self._interaction_strength
def get_interaction_strength(self):
"""Return ph-ph interaction strength."""
warnings.warn(
"Use attribute, Interaction.interaction_strength "
"instead of Interaction.get_interaction_strength().",
DeprecationWarning,
)
return self.interaction_strength
@property
def mesh_numbers(self):
"""Return mesh numbers.
Returns
-------
ndarray
shape=(3, ), dtype='int_'
"""
return self._bz_grid.D_diag
def get_mesh_numbers(self):
"""Return mesh numbers."""
warnings.warn(
"Use attribute, Interaction.mesh_numbers "
"instead of Interaction.get_mesh_numbers().",
DeprecationWarning,
)
return self.mesh_numbers
@property
def is_mesh_symmetry(self):
"""Whether symmetry of grid is utilized or not."""
return self._is_mesh_symmetry
@property
def fc3(self):
"""Return fc3."""
return self._fc3
def get_fc3(self):
"""Return fc3."""
warnings.warn(
"Use attribute, Interaction.fc3 " "instead of Interaction.get_fc3().",
DeprecationWarning,
)
return self.fc3
@property
def dynamical_matrix(self):
"""Return DynamicalMatrix class instance."""
return self._dm
def get_dynamical_matrix(self):
"""Return DynamicalMatrix class instance."""
warnings.warn(
"Use attribute, Interaction.dynamical_matrix "
"instead of Interaction.get_dynamical_matrix().",
DeprecationWarning,
)
return self.dynamical_matrix
@property
def primitive(self):
"""Return Primitive class instance."""
return self._primitive
def get_primitive(self):
"""Return Primitive class instance."""
warnings.warn(
"Use attribute, Interaction.primitive "
"instead of Interaction.get_primitive().",
DeprecationWarning,
)
return self.primitive
@property
def primitive_symmetry(self):
"""Return Symmetry class instance of primitive cell."""
return self._primitive_symmetry
def get_triplets_at_q(self):
"""Return grid point triplets information.
triplets_at_q is in BZ-grid.
triplets_map_at_q is in GR-grid.
ir_map_at_q is in GR-grid.
See details at ``get_triplets_at_q``.
"""
return (
self._triplets_at_q,
self._weights_at_q,
self._triplets_map_at_q,
self._ir_map_at_q,
)
@property
def bz_grid(self):
"""Return BZGrid class instance."""
return self._bz_grid
@property
def band_indices(self):
"""Return band indices.
Returns
-------
ndarray
shape=(num_specified_bands, ), dtype='int_'
"""
return self._band_indices
def get_band_indices(self):
"""Return band indices."""
warnings.warn(
"Use attribute, Interaction.band_indices "
"instead of Interaction.get_band_indices().",
DeprecationWarning,
)
return self.band_indices
@property
def nac_params(self):
"""Return NAC params."""
return self._nac_params
@property
def nac_q_direction(self):
"""Return q-direction used for NAC at q->0.
Direction of q-vector watching from Gamma point used for
non-analytical term correction. This is effective only at q=0
(physically q->0). The direction is given in crystallographic
(fractional) coordinates.
shape=(3,), dtype='double'.
Default value is None, which means this feature is not used.
"""
return self._nac_q_direction
@nac_q_direction.setter
def nac_q_direction(self, nac_q_direction):
if nac_q_direction is None:
self._nac_q_direction = None
else:
self._nac_q_direction = np.array(nac_q_direction, dtype="double")
def get_nac_q_direction(self):
"""Return q-direction used for NAC at q->0."""
warnings.warn(
"Use attribute, Interaction.nac_q_direction "
"instead of Interaction.get_nac_q_direction().",
DeprecationWarning,
)
return self.nac_q_direction
def set_nac_q_direction(self, nac_q_direction=None):
"""Set NAC q-point direction valid at q->0."""
warnings.warn(
"Use attribute, Interaction.nac_q_direction "
"instead of Interaction.set_nac_q_direction().",
DeprecationWarning,
)
self.nac_q_direction = nac_q_direction
@property
def zero_value_positions(self):
"""Return zero ph-ph interaction elements information.
Returns
-------
shape is same as that of interaction_strength, dtype='byte', order='C'
"""
return self._g_zero
def get_zero_value_positions(self):
"""Return zero ph-ph interaction elements information."""
warnings.warn(
"Use attribute, Interaction.zero_value_positions "
"instead of Interaction.get_zero_value_positions().",
DeprecationWarning,
)
return self.zero_value_positions
def get_phonons(self):
"""Return phonons on grid.
Returns
-------
tuple
frequencies : ndarray
Phonon frequencies on grid.
shape=(num_bz_grid, num_band), dtype='double', order='C'
eigenvectors : ndarray
Phonon eigenvectors on grid.
shape=(num_bz_grid, num_band, num_band),
dtype="c%d" % (np.dtype('double').itemsize * 2), order='C'
phonon_done : ndarray
1 if phonon at a grid point is calcualted, otherwise 0.
shape=(num_bz_grid, ), dtype='byte'
"""
return self._frequencies, self._eigenvectors, self._phonon_done
@property
def frequency_factor_to_THz(self):
"""Return phonon frequency conversion factor to THz."""
return self._frequency_factor_to_THz
def get_frequency_factor_to_THz(self):
"""Return phonon frequency conversion factor to THz."""
warnings.warn(
"Use attribute, Interaction.frequency_factor_to_THz ",
"instead of Interaction.get_frequency_factor_to_THz().",
DeprecationWarning,
)
return self.frequency_factor_to_THz
@property
def lapack_zheev_uplo(self):
"""Return U or L for lapack zheev solver."""
return self._lapack_zheev_uplo
def get_lapack_zheev_uplo(self):
"""Return U or L for lapack zheev solver."""
warnings.warn(
"Use attribute, Interaction.lapack_zheev_uplo "
"instead of Interaction.get_lapack_zheev_uplo().",
DeprecationWarning,
)
return self.lapack_zheev_uplo
@property
def cutoff_frequency(self):
"""Return cutoff phonon frequency to judge imaginary phonon."""
return self._cutoff_frequency
def get_cutoff_frequency(self):
"""Return cutoff phonon frequency to judge imaginary phonon."""
warnings.warn(
"Use attribute, Interaction.cutoff_frequency "
"instead of Interaction.get_cutoff_frequency().",
DeprecationWarning,
)
return self.cutoff_frequency
def get_averaged_interaction(self):
"""Return sum over phonon triplets of interaction strength.
See Eq.(21) of PRB 91, 094306 (2015)
"""
# v[triplet, band0, band, band]
v = self._interaction_strength
w = self._weights_at_q
v_sum = np.dot(w, v.sum(axis=2).sum(axis=2))
return v_sum / np.prod(v.shape[2:])
def get_primitive_and_supercell_correspondence(self):
"""Return atomic pair information."""
return (self._svecs, self._multi, self._p2s, self._s2p, self._masses)
def get_unit_conversion_factor(self):
"""Return unit conversion factor."""
return self._unit_conversion
def get_constant_averaged_interaction(self):
"""Return constant averaged interaction."""
return self._constant_averaged_interaction
def set_interaction_strength(self, pp_strength, g_zero=None):
"""Set interaction strength."""
self._interaction_strength = pp_strength
self._g_zero = g_zero
def set_grid_point(self, grid_point, store_triplets_map=False):
"""Set grid point and prepare grid point triplets."""
if not self._is_mesh_symmetry:
(
triplets_at_q,
weights_at_q,
triplets_map_at_q,
ir_map_at_q,
) = get_nosym_triplets_at_q(grid_point, self._bz_grid)
else:
(
triplets_at_q,
weights_at_q,
triplets_map_at_q,
ir_map_at_q,
) = get_triplets_at_q(grid_point, self._bz_grid, swappable=True)
# Special treatment of symmetry is applied when q_direction is
# used.
if self._nac_q_direction is not None:
if (self._bz_grid.addresses[grid_point] == 0).all():
self._phonon_done[grid_point] = 0
self.run_phonon_solver(
np.array(
[
grid_point,
],
dtype="int_",
)
)
rotations = []
for i, r in enumerate(self._bz_grid.reciprocal_operations):
dq = self._nac_q_direction
dq /= np.linalg.norm(dq)
diff = np.dot(r, dq) - dq
if (abs(diff) < 1e-5).all():
rotations.append(self._bz_grid.rotations[i])
(
triplets_at_q,
weights_at_q,
triplets_map_at_q,
ir_map_at_q,
) = get_triplets_at_q(
grid_point,
self._bz_grid,
reciprocal_rotations=rotations,
is_time_reversal=False,
)
reciprocal_lattice = np.linalg.inv(self._primitive.cell)
for triplet in triplets_at_q:
sum_q = (self._bz_grid.addresses[triplet]).sum(axis=0)
if (sum_q % self.mesh_numbers != 0).any():
print("============= Warning ==================")
print("%s" % triplet)
for tp in triplet:
print(
"%s %s"
% (
self._bz_grid.addresses[tp],
np.linalg.norm(
np.dot(
reciprocal_lattice,
self._bz_grid.addresses[tp]
/ self.mesh_numbers.astype("double"),
)
),
)
)
print("%s" % sum_q)
print("============= Warning ==================")
self._triplets_at_q = triplets_at_q
self._weights_at_q = weights_at_q
if store_triplets_map:
self._triplets_map_at_q = triplets_map_at_q
self._ir_map_at_q = ir_map_at_q
def init_dynamical_matrix(
self,
fc2,
supercell,
primitive,
nac_params=None,
solve_dynamical_matrices=True,
decimals=None,
):
"""Prepare for phonon calculation on grid.
solve_dynamical_matrices : bool
When False, phonon calculation will be postponed.
"""
self._allocate_phonon()
self._nac_params = nac_params
self._dm = get_dynamical_matrix(
fc2,
supercell,
primitive,
nac_params=nac_params,
frequency_scale_factor=self._frequency_scale_factor,
decimals=decimals,
symprec=self._symprec,
)
self._phonon_done[0] = 0
if solve_dynamical_matrices:
self.run_phonon_solver()
else:
self.run_phonon_solver(
|
np.array([0], dtype="int_")
|
numpy.array
|
import numpy as np
import cv2 as cv
import math
import tensorflow as tf
import random
from os import listdir
from matplotlib import pyplot as plt
def enhance(load):#数据增强模块
with tf.Session() as sess:
for i in load:
for s in range(0,80):
raw_img = tf.gfile.FastGFile(i,'rb').read()
n=random.randint(0,11)
img_data = tf.image.decode_image(raw_img)
if n==0: #随机进行翻转,裁剪,缩放,调整对比度,色调,亮度
img_data=np.rot90(sess.run(img_data))
strload=i[0:i.find('.',-5,-1)-1]+'_'+str(s)+str(n)+'.jpg'
cv.imwrite(strload,img_data.eval())
elif n==1:
img_data = tf.image.rgb_to_grayscale(img_data)
elif n==2:
img_data = tf.image.convert_image_dtype(img_data, tf.float32)
img_data = tf.image.adjust_brightness(img_data, delta=-.7)
elif n==3:
img_data = tf.image.convert_image_dtype(img_data, tf.float32)
img_data = tf.image.random_brightness(img_data, max_delta=0.6)
elif n==4:
img_data = tf.image.convert_image_dtype(img_data, tf.float32)
img_data = tf.image.random_contrast(img_data, lower=0, upper=4)
elif n==5:
img_data = tf.image.convert_image_dtype(img_data, tf.float32)
img_data = tf.image.random_hue(img_data, 0.5)
elif n==6:
img_data = tf.image.convert_image_dtype(img_data, tf.float32)
img_data = tf.image.random_saturation(img_data, lower=0, upper=2)
elif n==7:
img_data = tf.image.central_crop(sess.run(img_data),random.random())
elif n==8:
img_data = tf.image.resize_image_with_pad(img_data,random.randint(sess.run(tf.shape(img_data))[0]/2,sess.run(tf.shape(img_data))[0]*2),random.randint(sess.run(tf.shape(img_data))[1]/2,sess.run(tf.shape(img_data))[1]*2))
elif n==9:
img_data = tf.image.flip_left_right(img_data)
elif n== 10:
img_data = tf.image.flip_up_down(img_data)
img_data = tf.image.convert_image_dtype(img_data, tf.int16)
strload=i[0:i.find('.',-5,-1)-1]+'_'+str(s)+str(n)+'.jpg'
cv.imwrite(strload,img_data.eval())
def cutimg(img_value,ROI_w,ROI_h,ROI_x,ROI_y,type):#裁剪图片
img=[]
t=0
for i in range(0,math.ceil(ROI_w/25)):
if type!=3 and i%4==0 and i>0:
t+=10
n=i*25+t
x=np.zeros((ROI_h,25,img_value.shape[2]),dtype=np.int16)
for j in range(0,ROI_h):
if ROI_w-n<25:
return img
else :
x[j][0:]=img_value[ROI_y+j][n+ROI_x:n+ROI_x+25]
img.append(x)
return img
def tool1(type,imgout,kernel,light_num,thresholdvalue):#卡号定位处理
num=t=0
ROI_w=ROI_h=ROI_x=ROI_y=0
if type==1:
retval, dst=cv.threshold(imgout,thresholdvalue+light_num,255,cv.THRESH_BINARY)
elif type==2:
retval, dst=cv.threshold(imgout,thresholdvalue-15,255,cv.THRESH_BINARY)
elif type==3:
retval, dst=cv.threshold(imgout,thresholdvalue-light_num-30,255,cv.THRESH_BINARY_INV)
dst = cv.morphologyEx(dst,cv.MORPH_GRADIENT,kernel)
contours, hierarchy=cv.findContours(dst,cv.RETR_TREE,cv.CHAIN_APPROX_SIMPLE)
for i in range(0,len(contours)):
x, y, w, h = cv.boundingRect(contours[i])
# print('ROI:',ROI_x,ROI_y,ROI_w,ROI_h,"\n")
if w>150 and h>120 and y>300:
ROI_y=0
num=0
t=10
continue
# print(x,y,w,h,"\n")
if y+h <= 480*0.75-t and y>=200 and h<=46:
if ROI_y==0:
ROI_h=46
ROI_y=y-(60-h)
ROI_x=x
ROI_w=w
elif y>=ROI_y and y+h<=ROI_y+46:
if x>ROI_x:
if x>ROI_x+ROI_w:
ROI_w=x-ROI_x+w
else:
ROI_w+=ROI_x-x
ROI_x=x
num+=(ROI_h/20+1)*(ROI_w/30+1)
elif ROI_w/640>0.7 and num>20:
break
else :
ROI_h=46
ROI_y=y-(46-h)
ROI_x=x
ROI_w=w
num=0
else:
continue
return ROI_w,ROI_h,ROI_x,ROI_y,num
def imghandle(img_name):#图片处理
img = cv.imread(img_name)
img = cv.cvtColor(img, cv.COLOR_BGR2RGB)
img=cv.resize(img,(640,480))#准备参数
imgout = cv.cvtColor(img,cv.COLOR_BGR2GRAY)
thresholdvalue=imgout[20,20]
light_num=30
kernel = np.ones((9,9), np.uint8)
print(cv.__version__,thresholdvalue)
if thresholdvalue>190:
light_num=50
elif thresholdvalue>200:
light_num=100
dst = cv.morphologyEx(imgout,cv.MORPH_TOPHAT,kernel)#形态处理
gradX = cv.Sobel(dst,cv.CV_32F,1,0,-1)
gradX =
|
np.absolute(gradX)
|
numpy.absolute
|
'''
This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
PM4Py is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
PM4Py is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with PM4Py. If not, see <https://www.gnu.org/licenses/>.
'''
from pm4py.algo.clustering.trace_attribute_driven.util import filter_subsets
import pandas as pd
import numpy as np
from collections import Counter
from scipy.spatial.distance import pdist
from pm4py.util import exec_utils
from enum import Enum
from pm4py.util import constants
class Parameters(Enum):
ATTRIBUTE_KEY = constants.PARAMETER_CONSTANT_ATTRIBUTE_KEY
ACTIVITY_KEY = constants.PARAMETER_CONSTANT_ACTIVITY_KEY
SINGLE = "single"
BINARIZE = "binarize"
POSITIVE = "positive"
LOWER_PERCENT = "lower_percent"
def occu_var_act(var_list):
'''
return dataframe that shows the frequency of each element(activity) in each variant list
:param var_list:
:return:
'''
result = Counter(var_list) # count number of occurrence of each element
df = pd.DataFrame.from_dict(dict(result), orient='index', columns=['freq'])
df = df.reset_index().rename(columns={'index': 'var'})
return df
def act_sim(var_list_1, var_list_2, log1, log2, freq_thres, num, parameters=None):
'''
this function compare the activity similarity between two sublogs via the two lists of variants.
:param var_list_1: lists of variants in sublog 1
:param var_list_2: lists of variants in sublog 2
:param freq_thres: same as sublog2df()
:param log1: input sublog1 of sublog2df(), which must correspond to var_list_1
:param log2: input sublog2 of sublog2df(), which must correspond to var_list_2
:return: the distance matrix between 2 sublogs in which each element is the distance between two variants.
'''
if parameters is None:
parameters = {}
single = exec_utils.get_param_value(Parameters.SINGLE, parameters, False)
if len(var_list_1) >= len(var_list_2):
max_len = len(var_list_1)
min_len = len(var_list_2)
max_var = var_list_1
min_var = var_list_2
var_count_max = filter_subsets.sublog2df(log1, freq_thres, num)['count']
var_count_min = filter_subsets.sublog2df(log2, freq_thres, num)['count']
else:
max_len = len(var_list_2)
min_len = len(var_list_1)
max_var = var_list_2
min_var = var_list_1
var_count_max = filter_subsets.sublog2df(log2, freq_thres, num)['count']
var_count_min = filter_subsets.sublog2df(log1, freq_thres, num)['count']
dist_matrix = np.zeros((max_len, min_len))
max_per_var = np.zeros(max_len)
max_freq = np.zeros(max_len)
col_sum = np.zeros(max_len)
if var_list_1 == var_list_2:
print("Please give different variant lists!")
else:
for i in range(max_len):
dist_vec = np.zeros(min_len)
df_1 = occu_var_act(max_var[i])
for j in range(min_len):
df_2 = occu_var_act(min_var[j])
df = pd.merge(df_1, df_2, how='outer', on='var').fillna(0)
# cosine similarity is used to calculate trace similarity
dist_vec[j] = (pdist(
|
np.array([df['freq_x'].values, df['freq_y'].values])
|
numpy.array
|
"""
MeasureColocalization
=====================
**MeasureColocalization** measures the colocalization and correlation
between intensities in different images (e.g., different color channels)
on a pixel-by-pixel basis, within identified objects or across an entire
image.
Given two or more images, this module calculates the correlation &
colocalization (Overlap, Manders, Costes’ Automated Threshold & Rank
Weighted Colocalization) between the pixel intensities. The correlation
/ colocalization can be measured for entire images, or a correlation
measurement can be made within each individual object. Correlations /
Colocalizations will be calculated between all pairs of images that are
selected in the module, as well as between selected objects. For
example, if correlations are to be measured for a set of red, green, and
blue images containing identified nuclei, measurements will be made
between the following:
- The blue and green, red and green, and red and blue images.
- The nuclei in each of the above image pairs.
A good primer on colocalization theory can be found on the `SVI website`_.
You can find a helpful review on colocalization from Aaron *et al*. `here`_.
|
============ ============ ===============
Supports 2D? Supports 3D? Respects masks?
============ ============ ===============
YES YES YES
============ ============ ===============
Measurements made by this module
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
- *Correlation:* The correlation between a pair of images *I* and *J*,
calculated as Pearson’s correlation coefficient. The formula is
covariance(\ *I* ,\ *J*)/[std(\ *I* ) × std(\ *J*)].
- *Slope:* The slope of the least-squares regression between a pair of
images I and J. Calculated using the model *A* × *I* + *B* = *J*, where *A* is the slope.
- *Overlap coefficient:* The overlap coefficient is a modification of
Pearson’s correlation where average intensity values of the pixels are
not subtracted from the original intensity values. For a pair of
images R and G, the overlap coefficient is measured as r = sum(Ri \*
Gi) / sqrt (sum(Ri\*Ri)\*sum(Gi\*Gi)).
- *Manders coefficient:* The Manders coefficient for a pair of images R
and G is measured as M1 = sum(Ri_coloc)/sum(Ri) and M2 =
sum(Gi_coloc)/sum(Gi), where Ri_coloc = Ri when Gi > 0, 0 otherwise
and Gi_coloc = Gi when Ri >0, 0 otherwise.
- *Manders coefficient (Costes Automated Threshold):* Costes’ automated
threshold estimates maximum threshold of intensity for each image
based on correlation. Manders coefficient is applied on thresholded
images as Ri_coloc = Ri when Gi > Gthr and Gi_coloc = Gi when Ri >
Rthr where Gthr and Rthr are thresholds calculated using Costes’
automated threshold method.
- *Rank Weighted Colocalization coefficient:* The RWC coefficient for a
pair of images R and G is measured as RWC1 =
sum(Ri_coloc\*Wi)/sum(Ri) and RWC2 = sum(Gi_coloc\*Wi)/sum(Gi),
where Wi is Weight defined as Wi = (Rmax - Di)/Rmax where Rmax is the
maximum of Ranks among R and G based on the max intensity, and Di =
abs(Rank(Ri) - Rank(Gi)) (absolute difference in ranks between R and
G) and Ri_coloc = Ri when Gi > 0, 0 otherwise and Gi_coloc = Gi
when Ri >0, 0 otherwise. (Singan et al. 2011, BMC Bioinformatics
12:407).
References
^^^^^^^^^^
- <NAME>, Taylor AB, Chew TL. Image co-localization - co-occurrence versus correlation.
J Cell Sci. 2018;131(3):jcs211847. Published 2018 Feb 8. doi:10.1242/jcs.211847
.. _SVI website: http://svi.nl/ColocalizationTheory
.. _here: https://jcs.biologists.org/content/joces/131/3/jcs211847.full.pdf
"""
import numpy
import scipy.ndimage
import scipy.stats
from cellprofiler_core.constants.measurement import COLTYPE_FLOAT
from cellprofiler_core.module import Module
from cellprofiler_core.setting import Divider, Binary, ValidationError
from cellprofiler_core.setting.choice import Choice
from cellprofiler_core.setting.subscriber import (
LabelListSubscriber,
ImageListSubscriber,
)
from cellprofiler_core.setting.text import Float
from cellprofiler_core.utilities.core.object import size_similarly
from centrosome.cpmorphology import fixup_scipy_ndimage_result as fix
from scipy.linalg import lstsq
M_IMAGES = "Across entire image"
M_OBJECTS = "Within objects"
M_IMAGES_AND_OBJECTS = "Both"
M_FAST = "Fast"
M_ACCURATE = "Accurate"
"""Feature name format for the correlation measurement"""
F_CORRELATION_FORMAT = "Correlation_Correlation_%s_%s"
"""Feature name format for the slope measurement"""
F_SLOPE_FORMAT = "Correlation_Slope_%s_%s"
"""Feature name format for the overlap coefficient measurement"""
F_OVERLAP_FORMAT = "Correlation_Overlap_%s_%s"
"""Feature name format for the Manders Coefficient measurement"""
F_K_FORMAT = "Correlation_K_%s_%s"
"""Feature name format for the Manders Coefficient measurement"""
F_KS_FORMAT = "Correlation_KS_%s_%s"
"""Feature name format for the Manders Coefficient measurement"""
F_MANDERS_FORMAT = "Correlation_Manders_%s_%s"
"""Feature name format for the RWC Coefficient measurement"""
F_RWC_FORMAT = "Correlation_RWC_%s_%s"
"""Feature name format for the Costes Coefficient measurement"""
F_COSTES_FORMAT = "Correlation_Costes_%s_%s"
class MeasureColocalization(Module):
module_name = "MeasureColocalization"
category = "Measurement"
variable_revision_number = 5
def create_settings(self):
"""Create the initial settings for the module"""
self.images_list = ImageListSubscriber(
"Select images to measure",
[],
doc="""Select images to measure the correlation/colocalization in.""",
)
self.objects_list = LabelListSubscriber(
"Select objects to measure",
[],
doc="""\
*(Used only when "Within objects" or "Both" are selected)*
Select the objects to be measured.""",
)
self.thr = Float(
"Set threshold as percentage of maximum intensity for the images",
15,
minval=0,
maxval=99,
doc="""\
You may choose to measure colocalization metrics only for those pixels above
a certain threshold. Select the threshold as a percentage of the maximum intensity
of the above image [0-99].
This value is used by the Overlap, Manders, and Rank Weighted Colocalization
measurements.
""",
)
self.images_or_objects = Choice(
"Select where to measure correlation",
[M_IMAGES, M_OBJECTS, M_IMAGES_AND_OBJECTS],
doc="""\
You can measure the correlation in several ways:
- *%(M_OBJECTS)s:* Measure correlation only in those pixels previously
identified as within an object. You will be asked to choose which object
type to measure within.
- *%(M_IMAGES)s:* Measure the correlation across all pixels in the
images.
- *%(M_IMAGES_AND_OBJECTS)s:* Calculate both measurements above.
All methods measure correlation on a pixel by pixel basis.
"""
% globals(),
)
self.spacer = Divider(line=True)
self.do_all = Binary(
"Run all metrics?",
True,
doc="""\
Select *{YES}* to run all of CellProfiler's correlation
and colocalization algorithms on your images and/or objects;
otherwise select *{NO}* to pick which correlation and
colocalization algorithms to run.
""".format(
**{"YES": "Yes", "NO": "No"}
),
)
self.do_corr_and_slope = Binary(
"Calculate correlation and slope metrics?",
True,
doc="""\
Select *{YES}* to run the Pearson correlation and slope metrics.
""".format(
**{"YES": "Yes"}
),
)
self.do_manders = Binary(
"Calculate the Manders coefficients?",
True,
doc="""\
Select *{YES}* to run the Manders coefficients.
""".format(
**{"YES": "Yes"}
),
)
self.do_rwc = Binary(
"Calculate the Rank Weighted Colocalization coefficients?",
True,
doc="""\
Select *{YES}* to run the Rank Weighted Colocalization coefficients.
""".format(
**{"YES": "Yes"}
),
)
self.do_overlap = Binary(
"Calculate the Overlap coefficients?",
True,
doc="""\
Select *{YES}* to run the Overlap coefficients.
""".format(
**{"YES": "Yes"}
),
)
self.do_costes = Binary(
"Calculate the Manders coefficients using Costes auto threshold?",
True,
doc="""\
Select *{YES}* to run the Manders coefficients using Costes auto threshold.
""".format(
**{"YES": "Yes"}
),
)
self.fast_costes = Choice(
"Method for Costes thresholding",
[M_FAST, M_ACCURATE],
doc=f"""\
This setting determines the method used to calculate the threshold for use within the
Costes calculations. Selecting *{M_FAST}* will prioritise testing the most relevant potential
thresholds to identify the optimal value. Selecting *{M_ACCURATE}* will test every possible
threshold value. The latter method becomes particularly time-consuming with larger images.
In most instances the results of both strategies should be identical.
In both modes Costes automatic thresholding can seriously impact performance when working with 16-bit images.
You may want to disable these specific measurements (available when "*Run All Metrics?*" is set to "*No*").
"""
)
def settings(self):
"""Return the settings to be saved in the pipeline"""
result = [
self.images_list,
self.thr,
self.images_or_objects,
self.objects_list,
self.do_all,
self.do_corr_and_slope,
self.do_manders,
self.do_rwc,
self.do_overlap,
self.do_costes,
self.fast_costes,
]
return result
def visible_settings(self):
result = [
self.images_list,
self.spacer,
self.thr,
self.images_or_objects,
]
if self.wants_objects():
result += [self.objects_list]
result += [self.do_all]
if not self.do_all:
result += [
self.do_corr_and_slope,
self.do_manders,
self.do_rwc,
self.do_overlap,
self.do_costes,
]
if self.do_all or self.do_costes:
result += [self.fast_costes]
return result
def help_settings(self):
"""Return the settings to be displayed in the help menu"""
help_settings = [
self.images_or_objects,
self.thr,
self.images_list,
self.objects_list,
self.do_all,
]
return help_settings
def get_image_pairs(self):
"""Yield all permutations of pairs of images to correlate
Yields the pairs of images in a canonical order.
"""
for i in range(len(self.images_list.value) - 1):
for j in range(i + 1, len(self.images_list.value)):
yield (
self.images_list.value[i],
self.images_list.value[j],
)
def wants_images(self):
"""True if the user wants to measure correlation on whole images"""
return self.images_or_objects in (M_IMAGES, M_IMAGES_AND_OBJECTS)
def wants_objects(self):
"""True if the user wants to measure per-object correlations"""
return self.images_or_objects in (M_OBJECTS, M_IMAGES_AND_OBJECTS)
def run(self, workspace):
"""Calculate measurements on an image set"""
col_labels = ["First image", "Second image", "Objects", "Measurement", "Value"]
statistics = []
if len(self.images_list.value) < 2:
raise ValueError("At least 2 images must be selected for analysis.")
for first_image_name, second_image_name in self.get_image_pairs():
if self.wants_images():
statistics += self.run_image_pair_images(
workspace, first_image_name, second_image_name
)
if self.wants_objects():
for object_name in self.objects_list.value:
statistics += self.run_image_pair_objects(
workspace, first_image_name, second_image_name, object_name
)
if self.show_window:
workspace.display_data.statistics = statistics
workspace.display_data.col_labels = col_labels
def display(self, workspace, figure):
statistics = workspace.display_data.statistics
if self.wants_objects():
helptext = "default"
else:
helptext = None
figure.set_subplots((1, 1))
figure.subplot_table(
0, 0, statistics, workspace.display_data.col_labels, title=helptext
)
def run_image_pair_images(self, workspace, first_image_name, second_image_name):
"""Calculate the correlation between the pixels of two images"""
first_image = workspace.image_set.get_image(
first_image_name, must_be_grayscale=True
)
second_image = workspace.image_set.get_image(
second_image_name, must_be_grayscale=True
)
first_pixel_data = first_image.pixel_data
first_mask = first_image.mask
first_pixel_count = numpy.product(first_pixel_data.shape)
second_pixel_data = second_image.pixel_data
second_mask = second_image.mask
second_pixel_count = numpy.product(second_pixel_data.shape)
#
# Crop the larger image similarly to the smaller one
#
if first_pixel_count < second_pixel_count:
second_pixel_data = first_image.crop_image_similarly(second_pixel_data)
second_mask = first_image.crop_image_similarly(second_mask)
elif second_pixel_count < first_pixel_count:
first_pixel_data = second_image.crop_image_similarly(first_pixel_data)
first_mask = second_image.crop_image_similarly(first_mask)
mask = (
first_mask
& second_mask
& (~numpy.isnan(first_pixel_data))
& (~numpy.isnan(second_pixel_data))
)
result = []
if numpy.any(mask):
fi = first_pixel_data[mask]
si = second_pixel_data[mask]
if self.do_corr_and_slope:
#
# Perform the correlation, which returns:
# [ [ii, ij],
# [ji, jj] ]
#
corr = numpy.corrcoef((fi, si))[1, 0]
#
# Find the slope as a linear regression to
# A * i1 + B = i2
#
coeffs = lstsq(numpy.array((fi, numpy.ones_like(fi))).transpose(), si)[
0
]
slope = coeffs[0]
result += [
[
first_image_name,
second_image_name,
"-",
"Correlation",
"%.3f" % corr,
],
[first_image_name, second_image_name, "-", "Slope", "%.3f" % slope],
]
if any((self.do_manders, self.do_rwc, self.do_overlap)):
# Threshold as percentage of maximum intensity in each channel
thr_fi = self.thr.value * numpy.max(fi) / 100
thr_si = self.thr.value * numpy.max(si) / 100
combined_thresh = (fi > thr_fi) & (si > thr_si)
fi_thresh = fi[combined_thresh]
si_thresh = si[combined_thresh]
tot_fi_thr = fi[(fi > thr_fi)].sum()
tot_si_thr = si[(si > thr_si)].sum()
if self.do_manders:
# Manders Coefficient
M1 = 0
M2 = 0
M1 = fi_thresh.sum() / tot_fi_thr
M2 = si_thresh.sum() / tot_si_thr
result += [
[
first_image_name,
second_image_name,
"-",
"Manders Coefficient",
"%.3f" % M1,
],
[
second_image_name,
first_image_name,
"-",
"Manders Coefficient",
"%.3f" % M2,
],
]
if self.do_rwc:
# RWC Coefficient
RWC1 = 0
RWC2 = 0
Rank1 = numpy.lexsort([fi])
Rank2 = numpy.lexsort([si])
Rank1_U = numpy.hstack([[False], fi[Rank1[:-1]] != fi[Rank1[1:]]])
Rank2_U = numpy.hstack([[False], si[Rank2[:-1]] != si[Rank2[1:]]])
Rank1_S = numpy.cumsum(Rank1_U)
Rank2_S = numpy.cumsum(Rank2_U)
Rank_im1 = numpy.zeros(fi.shape, dtype=int)
Rank_im2 = numpy.zeros(si.shape, dtype=int)
Rank_im1[Rank1] = Rank1_S
Rank_im2[Rank2] = Rank2_S
R = max(Rank_im1.max(), Rank_im2.max()) + 1
Di = abs(Rank_im1 - Rank_im2)
weight = ((R - Di) * 1.0) / R
weight_thresh = weight[combined_thresh]
RWC1 = (fi_thresh * weight_thresh).sum() / tot_fi_thr
RWC2 = (si_thresh * weight_thresh).sum() / tot_si_thr
result += [
[
first_image_name,
second_image_name,
"-",
"RWC Coefficient",
"%.3f" % RWC1,
],
[
second_image_name,
first_image_name,
"-",
"RWC Coefficient",
"%.3f" % RWC2,
],
]
if self.do_overlap:
# Overlap Coefficient
overlap = 0
overlap = (fi_thresh * si_thresh).sum() / numpy.sqrt(
(fi_thresh ** 2).sum() * (si_thresh ** 2).sum()
)
K1 = (fi_thresh * si_thresh).sum() / (fi_thresh ** 2).sum()
K2 = (fi_thresh * si_thresh).sum() / (si_thresh ** 2).sum()
result += [
[
first_image_name,
second_image_name,
"-",
"Overlap Coefficient",
"%.3f" % overlap,
]
]
if self.do_costes:
# Orthogonal Regression for Costes' automated threshold
nonZero = (fi > 0) | (si > 0)
xvar = numpy.var(fi[nonZero], axis=0, ddof=1)
yvar = numpy.var(si[nonZero], axis=0, ddof=1)
xmean = numpy.mean(fi[nonZero], axis=0)
ymean = numpy.mean(si[nonZero], axis=0)
z = fi[nonZero] + si[nonZero]
zvar = numpy.var(z, axis=0, ddof=1)
covar = 0.5 * (zvar - (xvar + yvar))
denom = 2 * covar
num = (yvar - xvar) + numpy.sqrt(
(yvar - xvar) * (yvar - xvar) + 4 * (covar * covar)
)
a = num / denom
b = ymean - a * xmean
i_step = get_scale(first_image.scale, second_image.scale)
# Start at 1 step above the maximum
img_max = max(fi.max(), si.max())
i = i_step * ((img_max // i_step) + 1)
num_true = None
while i > i_step:
Thr_fi_c = i
Thr_si_c = (a * i) + b
combt = (fi < Thr_fi_c) | (si < Thr_si_c)
try:
# Only run pearsonr if the input has changed.
if (positives := numpy.count_nonzero(combt)) != num_true:
costReg, _ = scipy.stats.pearsonr(fi[combt], si[combt])
num_true = positives
if costReg <= 0:
break
elif self.fast_costes.value == M_ACCURATE or i < i_step * 10:
i -= i_step
elif costReg > 0.45:
# We're way off, step down 10x
i -= i_step * 10
elif costReg > 0.35:
# Still far from 0, step 5x
i -= i_step * 5
elif costReg > 0.25:
# Step 2x
i -= i_step * 2
else:
i -= i_step
except ValueError:
break
# Costes' thershold calculation
combined_thresh_c = (fi > Thr_fi_c) & (si > Thr_si_c)
fi_thresh_c = fi[combined_thresh_c]
si_thresh_c = si[combined_thresh_c]
tot_fi_thr_c = fi[(fi > Thr_fi_c)].sum()
tot_si_thr_c = si[(si > Thr_si_c)].sum()
# Costes' Automated Threshold
C1 = 0
C2 = 0
C1 = fi_thresh_c.sum() / tot_fi_thr_c
C2 = si_thresh_c.sum() / tot_si_thr_c
result += [
[
first_image_name,
second_image_name,
"-",
"Manders Coefficient (Costes)",
"%.3f" % C1,
],
[
second_image_name,
first_image_name,
"-",
"Manders Coefficient (Costes)",
"%.3f" % C2,
],
]
else:
corr = numpy.NaN
slope = numpy.NaN
C1 = numpy.NaN
C2 = numpy.NaN
M1 = numpy.NaN
M2 = numpy.NaN
RWC1 = numpy.NaN
RWC2 = numpy.NaN
overlap = numpy.NaN
K1 = numpy.NaN
K2 = numpy.NaN
#
# Add the measurements
#
if self.do_corr_and_slope:
corr_measurement = F_CORRELATION_FORMAT % (
first_image_name,
second_image_name,
)
slope_measurement = F_SLOPE_FORMAT % (first_image_name, second_image_name)
workspace.measurements.add_image_measurement(corr_measurement, corr)
workspace.measurements.add_image_measurement(slope_measurement, slope)
if self.do_overlap:
overlap_measurement = F_OVERLAP_FORMAT % (
first_image_name,
second_image_name,
)
k_measurement_1 = F_K_FORMAT % (first_image_name, second_image_name)
k_measurement_2 = F_K_FORMAT % (second_image_name, first_image_name)
workspace.measurements.add_image_measurement(overlap_measurement, overlap)
workspace.measurements.add_image_measurement(k_measurement_1, K1)
workspace.measurements.add_image_measurement(k_measurement_2, K2)
if self.do_manders:
manders_measurement_1 = F_MANDERS_FORMAT % (
first_image_name,
second_image_name,
)
manders_measurement_2 = F_MANDERS_FORMAT % (
second_image_name,
first_image_name,
)
workspace.measurements.add_image_measurement(manders_measurement_1, M1)
workspace.measurements.add_image_measurement(manders_measurement_2, M2)
if self.do_rwc:
rwc_measurement_1 = F_RWC_FORMAT % (first_image_name, second_image_name)
rwc_measurement_2 = F_RWC_FORMAT % (second_image_name, first_image_name)
workspace.measurements.add_image_measurement(rwc_measurement_1, RWC1)
workspace.measurements.add_image_measurement(rwc_measurement_2, RWC2)
if self.do_costes:
costes_measurement_1 = F_COSTES_FORMAT % (
first_image_name,
second_image_name,
)
costes_measurement_2 = F_COSTES_FORMAT % (
second_image_name,
first_image_name,
)
workspace.measurements.add_image_measurement(costes_measurement_1, C1)
workspace.measurements.add_image_measurement(costes_measurement_2, C2)
return result
def run_image_pair_objects(
self, workspace, first_image_name, second_image_name, object_name
):
"""Calculate per-object correlations between intensities in two images"""
first_image = workspace.image_set.get_image(
first_image_name, must_be_grayscale=True
)
second_image = workspace.image_set.get_image(
second_image_name, must_be_grayscale=True
)
objects = workspace.object_set.get_objects(object_name)
#
# Crop both images to the size of the labels matrix
#
labels = objects.segmented
try:
first_pixels = objects.crop_image_similarly(first_image.pixel_data)
first_mask = objects.crop_image_similarly(first_image.mask)
except ValueError:
first_pixels, m1 = size_similarly(labels, first_image.pixel_data)
first_mask, m1 = size_similarly(labels, first_image.mask)
first_mask[~m1] = False
try:
second_pixels = objects.crop_image_similarly(second_image.pixel_data)
second_mask = objects.crop_image_similarly(second_image.mask)
except ValueError:
second_pixels, m1 = size_similarly(labels, second_image.pixel_data)
second_mask, m1 = size_similarly(labels, second_image.mask)
second_mask[~m1] = False
mask = (labels > 0) & first_mask & second_mask
first_pixels = first_pixels[mask]
second_pixels = second_pixels[mask]
labels = labels[mask]
result = []
first_pixel_data = first_image.pixel_data
first_mask = first_image.mask
first_pixel_count = numpy.product(first_pixel_data.shape)
second_pixel_data = second_image.pixel_data
second_mask = second_image.mask
second_pixel_count = numpy.product(second_pixel_data.shape)
#
# Crop the larger image similarly to the smaller one
#
if first_pixel_count < second_pixel_count:
second_pixel_data = first_image.crop_image_similarly(second_pixel_data)
second_mask = first_image.crop_image_similarly(second_mask)
elif second_pixel_count < first_pixel_count:
first_pixel_data = second_image.crop_image_similarly(first_pixel_data)
first_mask = second_image.crop_image_similarly(first_mask)
mask = (
first_mask
& second_mask
& (~numpy.isnan(first_pixel_data))
& (~numpy.isnan(second_pixel_data))
)
if numpy.any(mask):
fi = first_pixel_data[mask]
si = second_pixel_data[mask]
n_objects = objects.count
# Handle case when both images for the correlation are completely masked out
if n_objects == 0:
corr = numpy.zeros((0,))
overlap = numpy.zeros((0,))
K1 = numpy.zeros((0,))
K2 = numpy.zeros((0,))
M1 = numpy.zeros((0,))
M2 = numpy.zeros((0,))
RWC1 = numpy.zeros((0,))
RWC2 = numpy.zeros((0,))
C1 = numpy.zeros((0,))
C2 = numpy.zeros((0,))
elif numpy.where(mask)[0].__len__() == 0:
corr = numpy.zeros((n_objects,))
corr[:] = numpy.NaN
overlap = K1 = K2 = M1 = M2 = RWC1 = RWC2 = C1 = C2 = corr
else:
lrange = numpy.arange(n_objects, dtype=numpy.int32) + 1
if self.do_corr_and_slope:
#
# The correlation is sum((x-mean(x))(y-mean(y)) /
# ((n-1) * std(x) *std(y)))
#
mean1 = fix(scipy.ndimage.mean(first_pixels, labels, lrange))
mean2 = fix(scipy.ndimage.mean(second_pixels, labels, lrange))
#
# Calculate the standard deviation times the population.
#
std1 = numpy.sqrt(
fix(
scipy.ndimage.sum(
(first_pixels - mean1[labels - 1]) ** 2, labels, lrange
)
)
)
std2 = numpy.sqrt(
fix(
scipy.ndimage.sum(
(second_pixels - mean2[labels - 1]) ** 2, labels, lrange
)
)
)
x = first_pixels - mean1[labels - 1] # x - mean(x)
y = second_pixels - mean2[labels - 1] # y - mean(y)
corr = fix(
scipy.ndimage.sum(
x * y / (std1[labels - 1] * std2[labels - 1]), labels, lrange
)
)
# Explicitly set the correlation to NaN for masked objects
corr[scipy.ndimage.sum(1, labels, lrange) == 0] = numpy.NaN
result += [
[
first_image_name,
second_image_name,
object_name,
"Mean Correlation coeff",
"%.3f" % numpy.mean(corr),
],
[
first_image_name,
second_image_name,
object_name,
"Median Correlation coeff",
"%.3f" % numpy.median(corr),
],
[
first_image_name,
second_image_name,
object_name,
"Min Correlation coeff",
"%.3f" % numpy.min(corr),
],
[
first_image_name,
second_image_name,
object_name,
"Max Correlation coeff",
"%.3f" % numpy.max(corr),
],
]
if any((self.do_manders, self.do_rwc, self.do_overlap)):
# Threshold as percentage of maximum intensity of objects in each channel
tff = (self.thr.value / 100) * fix(
scipy.ndimage.maximum(first_pixels, labels, lrange)
)
tss = (self.thr.value / 100) * fix(
scipy.ndimage.maximum(second_pixels, labels, lrange)
)
combined_thresh = (first_pixels >= tff[labels - 1]) & (
second_pixels >= tss[labels - 1]
)
fi_thresh = first_pixels[combined_thresh]
si_thresh = second_pixels[combined_thresh]
tot_fi_thr = scipy.ndimage.sum(
first_pixels[first_pixels >= tff[labels - 1]],
labels[first_pixels >= tff[labels - 1]],
lrange,
)
tot_si_thr = scipy.ndimage.sum(
second_pixels[second_pixels >= tss[labels - 1]],
labels[second_pixels >= tss[labels - 1]],
lrange,
)
if self.do_manders:
# Manders Coefficient
M1 = numpy.zeros(len(lrange))
M2 = numpy.zeros(len(lrange))
if numpy.any(combined_thresh):
M1 = numpy.array(
scipy.ndimage.sum(fi_thresh, labels[combined_thresh], lrange)
) / numpy.array(tot_fi_thr)
M2 = numpy.array(
scipy.ndimage.sum(si_thresh, labels[combined_thresh], lrange)
) / numpy.array(tot_si_thr)
result += [
[
first_image_name,
second_image_name,
object_name,
"Mean Manders coeff",
"%.3f" % numpy.mean(M1),
],
[
first_image_name,
second_image_name,
object_name,
"Median Manders coeff",
"%.3f" % numpy.median(M1),
],
[
first_image_name,
second_image_name,
object_name,
"Min Manders coeff",
"%.3f" % numpy.min(M1),
],
[
first_image_name,
second_image_name,
object_name,
"Max Manders coeff",
"%.3f" % numpy.max(M1),
],
]
result += [
[
second_image_name,
first_image_name,
object_name,
"Mean Manders coeff",
"%.3f" % numpy.mean(M2),
],
[
second_image_name,
first_image_name,
object_name,
"Median Manders coeff",
"%.3f" % numpy.median(M2),
],
[
second_image_name,
first_image_name,
object_name,
"Min Manders coeff",
"%.3f" % numpy.min(M2),
],
[
second_image_name,
first_image_name,
object_name,
"Max Manders coeff",
"%.3f" % numpy.max(M2),
],
]
if self.do_rwc:
# RWC Coefficient
RWC1 = numpy.zeros(len(lrange))
RWC2 = numpy.zeros(len(lrange))
[Rank1] = numpy.lexsort(([labels], [first_pixels]))
[Rank2] = numpy.lexsort(([labels], [second_pixels]))
Rank1_U = numpy.hstack(
[[False], first_pixels[Rank1[:-1]] != first_pixels[Rank1[1:]]]
)
Rank2_U = numpy.hstack(
[[False], second_pixels[Rank2[:-1]] != second_pixels[Rank2[1:]]]
)
Rank1_S = numpy.cumsum(Rank1_U)
Rank2_S = numpy.cumsum(Rank2_U)
Rank_im1 = numpy.zeros(first_pixels.shape, dtype=int)
Rank_im2 = numpy.zeros(second_pixels.shape, dtype=int)
Rank_im1[Rank1] = Rank1_S
Rank_im2[Rank2] = Rank2_S
R = max(Rank_im1.max(), Rank_im2.max()) + 1
Di = abs(Rank_im1 - Rank_im2)
weight = (R - Di) * 1.0 / R
weight_thresh = weight[combined_thresh]
if numpy.any(combined_thresh):
RWC1 = numpy.array(
scipy.ndimage.sum(
fi_thresh * weight_thresh, labels[combined_thresh], lrange
)
) / numpy.array(tot_fi_thr)
RWC2 = numpy.array(
scipy.ndimage.sum(
si_thresh * weight_thresh, labels[combined_thresh], lrange
)
) / numpy.array(tot_si_thr)
result += [
[
first_image_name,
second_image_name,
object_name,
"Mean RWC coeff",
"%.3f" % numpy.mean(RWC1),
],
[
first_image_name,
second_image_name,
object_name,
"Median RWC coeff",
"%.3f" % numpy.median(RWC1),
],
[
first_image_name,
second_image_name,
object_name,
"Min RWC coeff",
"%.3f" % numpy.min(RWC1),
],
[
first_image_name,
second_image_name,
object_name,
"Max RWC coeff",
"%.3f" % numpy.max(RWC1),
],
]
result += [
[
second_image_name,
first_image_name,
object_name,
"Mean RWC coeff",
"%.3f" % numpy.mean(RWC2),
],
[
second_image_name,
first_image_name,
object_name,
"Median RWC coeff",
"%.3f" % numpy.median(RWC2),
],
[
second_image_name,
first_image_name,
object_name,
"Min RWC coeff",
"%.3f" % numpy.min(RWC2),
],
[
second_image_name,
first_image_name,
object_name,
"Max RWC coeff",
"%.3f" % numpy.max(RWC2),
],
]
if self.do_overlap:
# Overlap Coefficient
if numpy.any(combined_thresh):
fpsq = scipy.ndimage.sum(
first_pixels[combined_thresh] ** 2,
labels[combined_thresh],
lrange,
)
spsq = scipy.ndimage.sum(
second_pixels[combined_thresh] ** 2,
labels[combined_thresh],
lrange,
)
pdt = numpy.sqrt(numpy.array(fpsq) * numpy.array(spsq))
overlap = fix(
scipy.ndimage.sum(
first_pixels[combined_thresh]
* second_pixels[combined_thresh],
labels[combined_thresh],
lrange,
)
/ pdt
)
K1 = fix(
(
scipy.ndimage.sum(
first_pixels[combined_thresh]
* second_pixels[combined_thresh],
labels[combined_thresh],
lrange,
)
)
/ (numpy.array(fpsq))
)
K2 = fix(
scipy.ndimage.sum(
first_pixels[combined_thresh]
* second_pixels[combined_thresh],
labels[combined_thresh],
lrange,
)
/ numpy.array(spsq)
)
else:
overlap = K1 = K2 = numpy.zeros(len(lrange))
result += [
[
first_image_name,
second_image_name,
object_name,
"Mean Overlap coeff",
"%.3f" % numpy.mean(overlap),
],
[
first_image_name,
second_image_name,
object_name,
"Median Overlap coeff",
"%.3f" % numpy.median(overlap),
],
[
first_image_name,
second_image_name,
object_name,
"Min Overlap coeff",
"%.3f" % numpy.min(overlap),
],
[
first_image_name,
second_image_name,
object_name,
"Max Overlap coeff",
"%.3f" % numpy.max(overlap),
],
]
if self.do_costes:
nonZero = (fi > 0) | (si > 0)
xvar = numpy.var(fi[nonZero], axis=0, ddof=1)
yvar = numpy.var(si[nonZero], axis=0, ddof=1)
xmean = numpy.mean(fi[nonZero], axis=0)
ymean = numpy.mean(si[nonZero], axis=0)
z = fi[nonZero] + si[nonZero]
zvar = numpy.var(z, axis=0, ddof=1)
covar = 0.5 * (zvar - (xvar + yvar))
denom = 2 * covar
num = (yvar - xvar) + numpy.sqrt(
(yvar - xvar) * (yvar - xvar) + 4 * (covar * covar)
)
a = num / denom
b = ymean - a * xmean
i_step = get_scale(first_image.scale, second_image.scale)
# Start at 1 step above the maximum
img_max = max(fi.max(), si.max())
i = i_step * ((img_max // i_step) + 1)
num_true = None
while i > i_step:
thr_fi_c = i
thr_si_c = (a * i) + b
combt = (fi < thr_fi_c) | (si < thr_si_c)
try:
# Only run pearsonr if the input has changed.
if (positives := numpy.count_nonzero(combt)) != num_true:
costReg, _ = scipy.stats.pearsonr(fi[combt], si[combt])
num_true = positives
if costReg <= 0:
break
elif self.fast_costes.value == M_ACCURATE or i < i_step * 10:
i -= i_step
elif costReg > 0.45:
# We're way off, step down 10x
i -= i_step * 10
elif costReg > 0.35:
# Still far from 0, step 5x
i -= i_step * 5
elif costReg > 0.25:
# Step 2x
i -= i_step * 2
else:
i -= i_step
except ValueError:
break
# Costes' thershold for entire image is applied to each object
fi_above_thr = first_pixels > thr_fi_c
si_above_thr = second_pixels > thr_si_c
combined_thresh_c = fi_above_thr & si_above_thr
fi_thresh_c = first_pixels[combined_thresh_c]
si_thresh_c = second_pixels[combined_thresh_c]
if numpy.any(fi_above_thr):
tot_fi_thr_c = scipy.ndimage.sum(
first_pixels[first_pixels >= thr_fi_c],
labels[first_pixels >= thr_fi_c],
lrange,
)
else:
tot_fi_thr_c = numpy.zeros(len(lrange))
if numpy.any(si_above_thr):
tot_si_thr_c = scipy.ndimage.sum(
second_pixels[second_pixels >= thr_si_c],
labels[second_pixels >= thr_si_c],
lrange,
)
else:
tot_si_thr_c = numpy.zeros(len(lrange))
# Costes Automated Threshold
C1 = numpy.zeros(len(lrange))
C2 = numpy.zeros(len(lrange))
if numpy.any(combined_thresh_c):
C1 = numpy.array(
scipy.ndimage.sum(
fi_thresh_c, labels[combined_thresh_c], lrange
)
) / numpy.array(tot_fi_thr_c)
C2 = numpy.array(
scipy.ndimage.sum(
si_thresh_c, labels[combined_thresh_c], lrange
)
) / numpy.array(tot_si_thr_c)
result += [
[
first_image_name,
second_image_name,
object_name,
"Mean Manders coeff (Costes)",
"%.3f" % numpy.mean(C1),
],
[
first_image_name,
second_image_name,
object_name,
"Median Manders coeff (Costes)",
"%.3f" % numpy.median(C1),
],
[
first_image_name,
second_image_name,
object_name,
"Min Manders coeff (Costes)",
"%.3f" % numpy.min(C1),
],
[
first_image_name,
second_image_name,
object_name,
"Max Manders coeff (Costes)",
"%.3f" % numpy.max(C1),
],
]
result += [
[
second_image_name,
first_image_name,
object_name,
"Mean Manders coeff (Costes)",
"%.3f" % numpy.mean(C2),
],
[
second_image_name,
first_image_name,
object_name,
"Median Manders coeff (Costes)",
"%.3f" %
|
numpy.median(C2)
|
numpy.median
|
from constants import *
import math
import numpy as np
import sys
from util import bestTwoActions, UpperP, LowerP, iteratedConvergence
from evaluatePolicy import evaluatePolicy
verbose=0
def RoundRobin(mdp, start_state=0, epsilon=4, randomseed=None, delta=0.1):
global MAX_ITERATION_LIMIT, c
if(randomseed is not None):
np.random.seed(randomseed)
iteration = 0
it=0
initial_iterations = 1*mdp.numStates*mdp.numActions
rewards_s_a_sprime = np.zeros((mdp.numStates,mdp.numActions,mdp.numStates))
R_s_a = np.zeros((mdp.numStates,mdp.numActions))
sampled_frequency_s_a = np.zeros((mdp.numStates,mdp.numActions))
N_s_a_sprime = np.zeros((mdp.numStates,mdp.numActions,mdp.numStates))
P = np.zeros((mdp.numStates,mdp.numActions,mdp.numStates))
P_tilda = np.zeros((mdp.numStates,mdp.numActions,mdp.numStates))
P_lower_tilda = np.zeros((mdp.numStates,mdp.numActions,mdp.numStates))
VlowerMBAE = np.zeros((mdp.numStates))
Vlower = np.zeros((mdp.numStates))
Vstar = (mdp.Vmax/2)*np.ones((mdp.numStates))
VupperMBAE = mdp.Vmax*np.ones((mdp.numStates))
Vupper = mdp.Vmax*np.random.random([mdp.numStates])
QlowerMBAE = np.zeros((mdp.numStates,mdp.numActions))
Qlower = np.zeros((mdp.numStates,mdp.numActions))
Qstar = (mdp.Vmax/2)*np.ones((mdp.numStates,mdp.numActions))
QupperMBAE = mdp.Vmax*np.ones((mdp.numStates,mdp.numActions))
Qupper = mdp.Vmax*
|
np.random.random([mdp.numStates,mdp.numActions])
|
numpy.random.random
|
# Copyright (C) 2021 <NAME> and <NAME>
#
# SPDX-License-Identifier: MIT
import argparse
import os
import gmsh
import numpy as np
from mpi4py import MPI
try:
import meshio
except ImportError:
print("Meshio and h5py must be installed to convert meshes."
+ " Please run `pip3 install --no-binary=h5py h5py meshio`")
exit(1)
__all__ = ["model_parameters", "mesh_parameters", "domain_parameters", "surface_map", "generate_team30_mesh"]
# Model parameters for the TEAM 3- model
model_parameters = {
"mu_0": 1.25663753e-6, # Relative permability of air [H/m]=[kg m/(s^2 A^2)]
"freq": 60, # Frequency of excitation,
"J": 3.1e6 * np.sqrt(2), # [A/m^2] Current density of copper winding
"mu_r": {"Cu": 1, "Stator": 30, "Rotor": 30, "Al": 1, "Air": 1, "AirGap": 1}, # Relative permability
"sigma": {"Rotor": 1.6e6, "Al": 3.72e7, "Stator": 0, "Cu": 0, "Air": 0, "AirGap": 0}, # Conductivity
"densities": {"Rotor": 7850, "Al": 2700, "Stator": 0, "Air": 0, "Cu": 0, "AirGap": 0} # [kg/m^3]
}
# Marker for facets, and restriction to use in surface integral of airgap
surface_map = {"Exterior": 1, "MidAir": 2, "restriction": "+"}
# Copper wires is ordered in counter clock-wise order from angle = 0, 2*np.pi/num_segments...
_domain_map_single = {"Cu": (7, 8), "Stator": (6, ), "Rotor": (5, ), "Al": (4,), "AirGap": (2, 3), "Air": (1,)}
_domain_map_three = {"Cu": (7, 8, 9, 10, 11, 12), "Stator": (6, ), "Rotor": (5, ),
"Al": (4,), "AirGap": (2, 3), "Air": (1,)}
# Currents mapping to the domain marker sof the copper
_currents_single = {7: {"alpha": 1, "beta": 0}, 8: {"alpha": -1, "beta": 0}}
_currents_three = {7: {"alpha": 1, "beta": 0}, 8: {"alpha": -1, "beta": 2 * np.pi / 3},
9: {"alpha": 1, "beta": 4 * np.pi / 3}, 10: {"alpha": -1, "beta": 0},
11: {"alpha": 1, "beta": 2 * np.pi / 3}, 12: {"alpha": -1, "beta": 4 * np.pi / 3}}
# The different radiuses used in domain specifications
mesh_parameters = {"r1": 0.02, "r2": 0.03, "r3": 0.032, "r4": 0.052, "r5": 0.057}
def domain_parameters(single_phase: bool):
"""
Get domain markers and current specifications for either the single phase or three phase engine
"""
if single_phase:
return _domain_map_single, _currents_single
else:
return _domain_map_three, _currents_three
def _add_copper_segment(start_angle=0):
"""
Helper function
Add a 45 degree copper segement, r in (r3, r4) with midline at "start_angle".
"""
copper_arch_inner = gmsh.model.occ.addCircle(
0, 0, 0, mesh_parameters["r3"], angle1=start_angle - np.pi / 8, angle2=start_angle + np.pi / 8)
copper_arch_outer = gmsh.model.occ.addCircle(
0, 0, 0, mesh_parameters["r4"], angle1=start_angle - np.pi / 8, angle2=start_angle + np.pi / 8)
gmsh.model.occ.synchronize()
nodes_inner = gmsh.model.getBoundary([(1, copper_arch_inner)])
nodes_outer = gmsh.model.getBoundary([(1, copper_arch_outer)])
l0 = gmsh.model.occ.addLine(nodes_inner[0][1], nodes_outer[0][1])
l1 = gmsh.model.occ.addLine(nodes_inner[1][1], nodes_outer[1][1])
c_l = gmsh.model.occ.addCurveLoop([copper_arch_inner, l1, copper_arch_outer, l0])
copper_segment = gmsh.model.occ.addPlaneSurface([c_l])
gmsh.model.occ.synchronize()
return copper_segment
def generate_team30_mesh(filename: str, single: bool, res: np.float64, L: np.float64):
"""
Generate the single phase or three phase team 30 model, with a given minimal resolution, encapsilated in
a LxL box.
All domains are marked, while only the exterior facets and the mid air gap facets are marked
"""
if single:
angles = [0, np.pi]
domain_map = _domain_map_single
else:
spacing = (np.pi / 4) + (np.pi / 4) / 3
angles = np.array([spacing * i for i in range(6)])
domain_map = _domain_map_three
assert(len(domain_map["Cu"]) == len(angles))
gmsh.initialize()
# Generate three phase induction motor
rank = MPI.COMM_WORLD.rank
gdim = 2 # Geometric dimension of the mesh
if rank == 0:
center = gmsh.model.occ.addPoint(0, 0, 0)
air_box = gmsh.model.occ.addRectangle(-L / 2, - L / 2, 0, 2 * L / 2, 2 * L / 2)
# Define the different circular layers
strator_steel = gmsh.model.occ.addCircle(0, 0, 0, mesh_parameters["r5"])
air_2 = gmsh.model.occ.addCircle(0, 0, 0, mesh_parameters["r4"])
air = gmsh.model.occ.addCircle(0, 0, 0, mesh_parameters["r3"])
air_mid = gmsh.model.occ.addCircle(0, 0, 0, 0.5 * (mesh_parameters["r2"] + mesh_parameters["r3"]))
aluminium = gmsh.model.occ.addCircle(0, 0, 0, mesh_parameters["r2"])
rotor_steel = gmsh.model.occ.addCircle(0, 0, 0, mesh_parameters["r1"])
# Create out strator steel
steel_loop = gmsh.model.occ.addCurveLoop([strator_steel])
air_2_loop = gmsh.model.occ.addCurveLoop([air_2])
strator_steel = gmsh.model.occ.addPlaneSurface([steel_loop, air_2_loop])
# Create air layer
air_loop = gmsh.model.occ.addCurveLoop([air])
air = gmsh.model.occ.addPlaneSurface([air_2_loop, air_loop])
domains = [(2, _add_copper_segment(angle)) for angle in angles]
# Add second air segment (in two pieces)
air_mid_loop = gmsh.model.occ.addCurveLoop([air_mid])
al_loop = gmsh.model.occ.addCurveLoop([aluminium])
air_surf1 = gmsh.model.occ.addPlaneSurface([air_loop, air_mid_loop])
air_surf2 = gmsh.model.occ.addPlaneSurface([air_mid_loop, al_loop])
# Add aluminium segement
rotor_loop = gmsh.model.occ.addCurveLoop([rotor_steel])
aluminium_surf = gmsh.model.occ.addPlaneSurface([al_loop, rotor_loop])
# Add steel rotor
rotor_disk = gmsh.model.occ.addPlaneSurface([rotor_loop])
gmsh.model.occ.synchronize()
domains.extend([(2, strator_steel), (2, rotor_disk), (2, air),
(2, air_surf1), (2, air_surf2), (2, aluminium_surf)])
surfaces, _ = gmsh.model.occ.fragment([(2, air_box)], domains)
gmsh.model.occ.synchronize()
# Helpers for assigning domain markers based on area of domain
rs = [mesh_parameters[f"r{i}"] for i in range(1, 6)]
r_mid = 0.5 * (rs[1] + rs[2]) # Radius for middle of air gap
area_helper = (rs[3]**2 - rs[2]**2) * np.pi # Helper function to determine area of copper and air
frac_cu = 45 / 360
frac_air = (360 - len(angles) * 45) / (360 * len(angles))
_area_to_domain_map = {rs[0]**2 * np.pi: "Rotor",
(rs[1]**2 - rs[0]**2) * np.pi: "Al",
(r_mid**2 - rs[1]**2) * np.pi: "AirGap1",
(rs[2]**2 - r_mid**2) * np.pi: "AirGap0",
area_helper * frac_cu: "Cu",
area_helper * frac_air: "Air",
(rs[4]**2 - rs[3]**2) * np.pi: "Stator",
L**2 - np.pi * rs[4]**2: "Air"}
# Helper for assigning current wire tag to copper windings
cu_points = np.asarray([[np.cos(angle), np.sin(angle)] for angle in angles])
# Assign physical surfaces based on the mass of the segment
# For copper wires order them counter clockwise
other_air_markers = []
for surface in surfaces:
mass = gmsh.model.occ.get_mass(surface[0], surface[1])
found_domain = False
for _mass in _area_to_domain_map.keys():
if np.isclose(mass, _mass):
domain_type = _area_to_domain_map[_mass]
if domain_type == "Cu":
com = gmsh.model.occ.get_center_of_mass(surface[0], surface[1])
point =
|
np.array([com[0], com[1]])
|
numpy.array
|
# Copyright (c) 2021, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# pylint: disable=no-member
# definition of various activation fcns
import numpy as np
from sympy import Expr
import z3
import logging
try:
import dreal as dr
except Exception as e:
logging.exception('Exception while importing dReal')
from src.shared.activations import ActivationType
def activation_z3(select, p):
if select == ActivationType.IDENTITY:
return p
elif select == ActivationType.RELU:
return relu(p)
elif select == ActivationType.LINEAR:
return p
elif select == ActivationType.SQUARE:
return square_z3(p)
elif select == ActivationType.LIN_SQUARE:
return lin_square_z3(p)
elif select == ActivationType.RELU_SQUARE:
return relu_square_z3(p)
elif select == ActivationType.REQU:
return requ_z3(p)
elif select == ActivationType.TANH:
return hyper_tan_dr(p)
elif select == ActivationType.SIGMOID:
return sigm_dr(p)
elif select == ActivationType.SOFTPLUS:
return softplus_dr(p)
elif select == ActivationType.COSH:
return cosh(p)
elif select == ActivationType.LIN_TO_CUBIC:
return lqc_z3(p)
elif select == ActivationType.LIN_TO_QUARTIC:
return lqcq_z3(p)
elif select == ActivationType.LIN_TO_QUINTIC:
return lqcqp_z3(p)
elif select == ActivationType.LIN_TO_SEXTIC:
return l_e_z3(p)
elif select == ActivationType.LIN_TO_SEPTIC:
return l_s_z3(p)
elif select == ActivationType.LIN_TO_OCTIC:
return l_o_z3(p)
elif select == ActivationType.SQUARE_DEC:
return sd(p)
def activation_der_z3(select, p):
if select == ActivationType.IDENTITY:
return np.ones((p.shape))
elif select == ActivationType.RELU:
return step_z3(p)
elif select == ActivationType.LINEAR:
return np.ones((p.shape))
elif select == ActivationType.SQUARE:
return 2*p
elif select == ActivationType.LIN_SQUARE:
return lin_square_der_z3(p)
elif select == ActivationType.RELU_SQUARE:
return relu_square_der_z3(p)
elif select == ActivationType.REQU:
return requ_der_z3(p)
elif select == ActivationType.TANH:
return hyper_tan_der_dr(p)
elif select == ActivationType.SIGMOID:
return sigm_der_dr(p)
elif select == ActivationType.SOFTPLUS:
return softplus_der_dr(p)
elif select == ActivationType.COSH:
return sinh(p)
elif select == ActivationType.LIN_TO_CUBIC:
return lqc_der_z3(p)
elif select == ActivationType.LIN_TO_QUARTIC:
return lqcq_der_z3(p)
elif select == ActivationType.LIN_TO_QUINTIC:
return lqcqp_der_z3(p)
elif select == ActivationType.LIN_TO_SEXTIC:
return l_e_der_z3(p)
elif select == ActivationType.LIN_TO_SEPTIC:
return l_s_der_z3(p)
elif select == ActivationType.LIN_TO_OCTIC:
return l_o_der_z3(p)
elif select == ActivationType.SQUARE_DEC:
return sd_der(p)
def relu(x):
# Won't work with sympy
y = x.copy()
if isinstance(x[0,0], z3.ArithRef):
_If = z3.If
for idx in range(len(y)):
y[idx, 0] = z3.simplify(_If(y[idx, 0] > 0, y[idx, 0], 0))
else:
_max = dr.Max
for idx in range(len(y)):
y[idx, 0] = _max(y[idx, 0], 0)
return y
def square_z3(x):
return np.power(x, 2)
# assert(len(p[0]) == 1)
# return [[elem[0] ** 2] for elem in p]
def lin_square_z3(x):
h = int(len(x) / 2)
x1, x2 = x[:h], x[h:]
return np.vstack((x1, np.power(x2, 2)))
def relu_square_z3(x):
h = int(len(x) / 2)
x1, x2 = x[:h], x[h:]
return np.vstack((relu(x1), np.power(x2, 2)))
def requ_z3(x):
return np.multiply(x, relu(x))
def hyper_tan_dr(x):
y = x.copy()
# original_shape = y.shape
# y = y.reshape(max(y.shape[0], y.shape[1]), 1)
for idx in range(len(y)):
y[idx, 0] = dr.tanh(y[idx, 0])
return y # .reshape(original_shape)
def sigm_dr(x):
# sigmoid is f(x) = 1/(1+e^-x)
y = x.copy()
for idx in range(len(y)):
y[idx, 0] = 1/(1+dr.exp(-y[idx, 0]))
return y
def softplus_dr(x):
# softplus is f(x) = ln(1 + e^x)
y = x.copy()
for idx in range(len(y)):
y[idx, 0] = dr.log(1 + dr.exp(y[idx,0]))
return y
def cosh(x):
y = x.copy()
# original_shape = y.shape
# y = y.reshape(max(y.shape[0], y.shape[1]), 1)
for idx in range(len(y)):
y[idx, 0] = dr.cosh(y[idx, 0]) - 1
return y # .reshape(original_shape)
def lqc_z3(x):
# linear - quadratic - cubic activation
h = int(x.shape[0] / 3)
x1, x2, x3 = x[:h], x[h:2 * h], x[2 * h:]
return np.vstack([x1, np.power(x2, 2), np.power(x3, 3)])
def lqcq_z3(x):
# # linear - quadratic - cubic - quartic activation
h = int(x.shape[0] / 4)
x1, x2, x3, x4 = x[:h], x[h:2 * h], x[2 * h:3 * h], x[3 * h:]
return np.vstack([x1, np.power(x2, 2), np.power(x3, 3), np.power(x4, 4)])
def lqcqp_z3(x):
# # linear - quadratic - cubic - quartic -penta activation
h = int(x.shape[0] / 5)
x1, x2, x3, x4, x5 = x[:h], x[h:2 * h], x[2 * h:3 * h], x[3 * h:4*h], x[4*h:]
return np.vstack([x1, np.power(x2, 2), np.power(x3, 3), np.power(x4, 4), np.power(x5, 5)])
def l_e_z3(x):
# # linear - quadratic - cubic - quartic -penta activation
h = int(x.shape[0] / 6)
x1, x2, x3, x4, x5, x6 = x[:h], x[h:2 * h], x[2 * h:3 * h], x[3 * h:4*h], x[4*h:5*h], x[5*h:]
return np.vstack([x1, np.power(x2, 2), np.power(x3, 3), np.power(x4, 4),
np.power(x5, 5), np.power(x6,6 )])
def l_s_z3(x):
# # linear - quadratic - cubic - quartic -penta activation
h = int(x.shape[0] / 7)
x1, x2, x3, x4, x5, x6, x7= x[:h], x[h:2 * h], x[2 * h:3 * h], x[3 * h:4*h], \
x[4*h:5*h], x[5*h:6*h], x[6*h:]
return np.vstack([x1, np.power(x2, 2), np.power(x3, 3), np.power(x4, 4),
np.power(x5, 5), np.power(x6, 6), np.power(x7, 7)])
def l_o_z3(x):
# # linear - quadratic - cubic - quartic -penta activation
h = int(x.shape[0] / 8)
x1, x2, x3, x4, x5, x6, x7, x8 = x[:h], x[h:2 * h], x[2 * h:3 * h], x[3 * h:4*h], \
x[4*h:5*h], x[5*h:6*h], x[6*h:7*h], x[7*h:]
return np.vstack([x1, np.power(x2, 2), np.power(x3, 3), np.power(x4, 4),
np.power(x5, 5), np.power(x6, 6), np.power(x7, 7), np.power(x8, 8)])
def sd(x):
h = int(x.shape[0] / 5)
x1, x2, x3, x4, x5 = x[:h], x[h:2 * h], x[2 * h:3 * h], x[3 * h:4*h], x[4*h:]
return np.vstack([np.power(x1, 2), np.power(x2, 4), np.power(x4, 6), np.power(x4, 8), np.power(x5, 10)])
##############################
# DERIVATIVE
##############################
def step_z3(x):
y = x.copy()
original_shape = y.shape
y = y.reshape(max(y.shape[0], y.shape[1]), 1)
if isinstance(x[0,0], z3.ArithRef):
_If = z3.If
for idx in range(y.shape[0]):
y[idx, 0] = z3.simplify(_If(y[idx, 0] > 0.0, 1.0, 0.0)) # using 0.0 and 1.0 avoids int/float issues
else:
_If = dr.if_then_else
for idx in range(y.shape[0]):
y[idx, 0] = _If(y[idx, 0] > 0.0, 1.0, 0.0) # using 0.0 and 1.0 avoids int/float issues
return y.reshape(original_shape)
def lin_square_der_z3(x):
h = int(len(x) / 2)
x1, x2 = x[:h], x[h:]
return np.vstack((np.ones(x1.shape), 2*x2))
def relu_square_der_z3(x):
h = int(len(x) / 2)
x1, x2 = x[:h], x[h:]
return np.vstack((step_z3(x1), 2*x2))
def requ_der_z3(x):
return 2*relu(x)
def hyper_tan_der_dr(x):
return np.ones((x.shape)) - np.power(hyper_tan_dr(x), 2)
def sinh(x):
y = x.copy()
# original_shape = y.shape
# y = y.reshape(max(y.shape[0], y.shape[1]), 1)
for idx in range(len(y)):
y[idx, 0] = dr.sinh(y[idx, 0])
return y # .reshape(original_shape)
def sigm_der_dr(x):
y = sigm_dr(x)
# elem-wise multiplication
return np.multiply(y, (np.ones(x.shape) - y))
def lqc_der_z3(x):
# linear - quadratic - cubic activation
h = int(x.shape[0] / 3)
x1, x2, x3 = x[:h], x[h:2 * h], x[2 * h:]
return np.vstack([np.ones((h, 1)), 2 * x2, 3 * np.power(x3, 2)])
def lqcq_der_z3(x):
# # linear - quadratic - cubic - quartic activation
h = int(x.shape[0] / 4)
x1, x2, x3, x4 = x[:h], x[h:2 * h], x[2 * h:3 * h], x[3 * h:]
return np.vstack([np.ones((h, 1)), 2*x2, 3*np.power(x3, 2), 4*np.power(x4, 3)]) # torch.pow(x, 2)
def lqcqp_der_z3(x):
# # linear - quadratic - cubic - quartic - penta activation
h = int(x.shape[0] / 5)
x1, x2, x3, x4, x5 = x[:h], x[h:2 * h], x[2 * h:3 * h], x[3*h:4*h], x[4*h:]
return np.vstack([np.ones((h, 1)), 2*x2, 3*np.power(x3, 2), 4*np.power(x4, 3), 5*np.power(x5, 4)])
def l_e_der_z3(x):
# # linear - quadratic - cubic - quartic - penta activation
h = int(x.shape[0] / 6)
x1, x2, x3, x4, x5, x6 = x[:h], x[h:2 * h], x[2 * h:3 * h], x[3*h:4*h], x[4*h:5*h], x[5*h:]
return np.vstack([np.ones((h, 1)), 2*x2, 3*np.power(x3, 2), 4*np.power(x4, 3),
5*np.power(x5, 4), 6*np.power(x6, 5)])
def l_s_der_z3(x):
# # linear - quadratic - cubic - quartic - penta activation
h = int(x.shape[0] / 7)
x1, x2, x3, x4, x5, x6, x7 = x[:h], x[h:2 * h], x[2 * h:3 * h], x[3*h:4*h], \
x[4*h:5*h], x[5*h:6*h], x[6*h:]
return np.vstack([np.ones((h, 1)), 2*x2, 3*np.power(x3, 2), 4*np.power(x4, 3),
5*np.power(x5, 4), 6*np.power(x6, 5), 7*np.power(x7, 6)])
def l_o_der_z3(x):
# # linear - quadratic - cubic - quartic - penta activation
h = int(x.shape[0] / 8)
x1, x2, x3, x4, x5, x6, x7, x8 = x[:h], x[h:2 * h], x[2 * h:3 * h], x[3*h:4*h], \
x[4*h:5*h], x[5*h:6*h], x[6*h:7*h], x[7*h:]
return np.vstack([np.ones((h, 1)), 2*x2, 3*np.power(x3, 2), 4*np.power(x4, 3),
5*np.power(x5, 4), 6*np.power(x6, 5), 7*np.power(x7, 6), 8*np.power(x8, 7)])
def sd_der(x):
h = int(x.shape[0] / 5)
x1, x2, x3, x4, x5 = x[:h], x[h:2 * h], x[2 * h:3 * h], x[3 * h:4*h], x[4*h:]
return np.vstack([2 * x1, 4 *
|
np.power(x2, 3)
|
numpy.power
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The module that keeps and operates with detector's output.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
class Detector(object):
"""Keeps the detections and gets proposals for a given image-label pairs."""
def __init__(self, detections, predictive_fields):
"""Initialisation of the detector.
Args:
detections: pandas dataframe with detections
predictive_fields: list of features that will be used by IAD
"""
self.predictive_fields = predictive_fields
# We need to search in detection many times, sort them to make it faster
self.detections = detections.sort_values('image_id')
self.detections_nparray = self.detections.values[:, 0]
def get_box_proposals(self, image_id, class_id):
"""Gets a list of proposals for a given image and class.
Args:
image_id: image id for verification, str
class_id: class id for verification, int64
Returns:
coordinates: of all box proposals,
pandas dataframe with columns 'xmin', 'xmax', 'ymin', 'ymax'
features: corresponding to coordinates,
pandas dataframe with columns stated in predictive fields
"""
# as the images_id are sorted,
# let's find the first and last occurrence of image_id
# that will define the possible search range for proposals
in1 =
|
np.searchsorted(self.detections_nparray, image_id, side='left')
|
numpy.searchsorted
|
import numpy as np
import itertools as it
import gurobipy as gurobi
from gurobipy import GRB as G
from textwrap import dedent
from math import*
from . import get_logger, freeze, subdict
_LOG = get_logger('adt17')
_STATUS = {
1: 'LOADED',
2: 'OPTIMAL',
3: 'INFEASIBLE',
4: 'INF_OR_UNBD',
5: 'UNBOUNDED',
6: 'CUTOFF',
7: 'ITERATION_LIMIT',
8: 'NODE_LIMIT',
9: 'TIME_LIMIT',
10: 'SOLUTION_LIMIT',
11: 'INTERRUPTED',
12: 'NUMERIC',
13: 'SUBOPTIMAL',
}
def dict2array(d):
indices = np.array(list(d.keys()))
if not len(indices):
return None
ndim = len(indices[0])
shape = [indices[:, dim].max() + 1 for dim in range(ndim)]
array = np.zeros(shape)
for index in map(tuple, indices):
array[index] = d[index].x
return array
def dot(x, z):
grutil = gurobi.quicksum([x[i] * z[i] for i in range(len(x))])
return grutil
def bilinear(x, A, z):
return dot(x, [dot(a[i], z) for i in range(len(x))])
def L1_distance(x, y):
return sum(abs(a - b) for a, b in zip(x, y))
def Ep_distance(x,y):
return sum(a-b for a, b in zip(x, y))
class Problem(object):
def __init__(self, num_attributes, num_threads=0):
self.num_attributes = num_attributes
self.num_threads = num_threads
def infer(self, w, transform=(1, 0)):
"""Computes a highest-utility configuration w.r.t. the given weights.
Parameters
----------
w : ndarray of shape (num_attributes,)
A weight vector.
transform : tuple of (float, 1D ndarray)
The transformation parameters (a, b).
Returns
-------
x : ndarray of shape (num_attributes,)
An optimal configuration.
"""
a, b = transform
transformed_w = a * w + b
assert (transformed_w >= 0).all()
_LOG.debug(dedent('''
INFERENCE
w = {}
transformed w = {}
''').format(w, transformed_w))
model = gurobi.Model('inference')
model.params.OutputFlag = 0
model.params.Threads = self.num_threads
model.params.Seed = 0
x = [model.addVar(vtype=G.BINARY) for z in range(self.num_attributes)]
model.modelSense = G.MAXIMIZE
model.setObjective(gurobi.quicksum([w[i] * x[i] for i in range(self.num_attributes)]))
self._add_constraints(model, x)
model.optimize()
x = np.array([x[z].x for z in range(self.num_attributes)])
_LOG.debug('inferred {}'.format(x))
return x
#Implement groupwise query selection
#Compute x_star
def benchmark(self, W, omega_star):
W = np.squeeze(np.asarray(W))
omega_star = np.squeeze(np.asarray(omega_star))
ws_star = np.dot(W, omega_star, out=None)
model = gurobi.Model('inference')
model.params.Threads = self.num_threads
model.params.Seed = 0
model.params.OutputFlag = 0
x_star = [model.addVar(vtype=G.BINARY) for z in range(self.num_attributes)]
model.modelSense = G.MAXIMIZE
model.update()
model.setObjective(dot(ws_star,x_star))
self._add_constraints(model, x_star)
model.optimize()
x_star = np.array([x_star[z].x for z in range(self.num_attributes)])
print("This is True X =", x_star)
_LOG.debug('inferred {}'.format(x_star))
return x_star
def infer_query(self, W ,omega):
LAMBDA = 0.5
M = 1000000
# XXX w_star is supposed to be a matrix of shape (num_attributes,
# num_users), each column encodes the preferences of one user; omega
# a vector of shape (num_users,), each element encodes the importance
# of one user. Is this vvv correct in this case?
#print ("Shape of aggregate_utility =", W.shape, " = (num_attributes, num_users)")
#print ("Shape of omega =", omega.shape," = (num_users,)")
#omega = omega[:,None]
W = np.squeeze(np.asarray(W))
omega = np.squeeze(
|
np.asarray(omega)
|
numpy.asarray
|
# instance.method = MethodType(method, instance)
# !aws codecommit list-repositories
# !autopep8 --in-place --aggressive --aggressive brac_dual_agent.py
# gpus = tf.config.experimental.list_physical_devices('GPU')
# if gpus:
# try:
# for gpu in gpus:
# tf.config.experimental.set_memory_growth(gpu, True)
# except RuntimeError as e:
# print(e)
#############################################################################
# %matplotlib inline
# !tar -czf data.tar.gz data
# !tar -czf code.tar.gz code
from inspect import getsource
from importlib import reload
from livelossplot import PlotLosses
import pytz
import tensorflow as tf
import silence_tensorflow.auto
# tz_NY = pytz.timezone('America/New_York')
# dt.now(tz_NY).strftime("%D:%H:%M:%S")
from typing import Dict, List, Set, Tuple
from datetime import datetime as dt
import itertools
import io
import sys
import gym
import ray
import warnings
warnings.simplefilter("ignore")
# from sagemaker import get_execution_role
# role = get_execution_role()
from IPython.display import clear_output
from tqdm import tqdm
# https://github.com/tqdm/tqdm
"""
pbar = tqdm(["a", "b", "c", "d"])
for char in pbar:
time.sleep(0.25)
pbar.set_description("Processing %s" % char)
for i in tqdm(range(10)):
"""
def smooth_loss(loss, freq):
loss = arr(loss).copy()
return np.mean(loss.reshape(-1, freq), axis = -1)
from types import MethodType
import functools
from functools import reduce
#############################################################################
# Packages
import scipy as sp
import pandas as pd
from pandas import DataFrame as DF
# import statsmodels.api as sm # !pip install statsmodels
from matplotlib.pyplot import hist
import pickle
from scipy.stats import truncnorm
import matplotlib.pyplot as plt
####################################
# Random
import random
from random import seed as rseed
from numpy.random import seed as npseed
from numpy import absolute as np_abs
from numpy.random import normal as rnorm
from numpy.random import uniform as runi
from numpy.random import binomial as rbin
from numpy.random import poisson as rpoisson
from numpy.random import shuffle,randn, permutation # randn(d1,d2) is d1*d2 i.i.d N(0,1)
from numpy import squeeze
from numpy.linalg import solve
####################################
# Numpy
import numpy as np
from numpy import mean, var, std, median
from numpy import array as arr
from numpy import sqrt, log, cos, sin, exp, dot, diag, ones, identity, zeros, roll, multiply, stack, concatenate, transpose
from numpy import concatenate as v_add
from numpy.linalg import norm, inv
from numpy import apply_along_axis as apply
from numpy.random import multinomial, choice
####################################
# sklearn
import sklearn as sk
from sklearn import preprocessing as pre
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error as mse
from sklearn.metrics import r2_score
from sklearn.linear_model import LinearRegression as lm
from sklearn.model_selection import KFold
from sklearn.model_selection import GridSearchCV
from scipy.special import softmax
#############################################################################
import os
os.environ["OMP_NUM_THREADS"] = "1"
os.environ["OPENBLAS_NUM_THREADS"] = "1"
np.set_printoptions(precision = 4)
#############################################################################
import time
now = time.time
import smtplib, ssl
import datetime, pytz
def EST():
return datetime.datetime.now().astimezone(pytz.timezone('US/Eastern')).strftime("%H:%M, %m/%d")
#############################################################################
dash = "--------------------------------------"
DASH = "\n" + "--------------------------------------" + "\n"
Dash = "\n" + dash
dasH = dash + "\n"
#############################################################################
#%% utility funs
from multiprocessing import Pool
import multiprocessing
n_cores = multiprocessing.cpu_count()
def mute():
sys.stdout = open(os.devnull, 'w')
def fun(f, q_in, q_out):
while True:
i, x = q_in.get()
if i is None:
break
q_out.put((i, f(x)))
def parmap(f, X, nprocs = multiprocessing.cpu_count(), **args):#-2
q_in = multiprocessing.Queue(1)
q_out = multiprocessing.Queue()
def g(x):
return f(x, **args)
proc = [multiprocessing.Process(target=fun, args=(g, q_in, q_out))
for _ in range(nprocs)]
for p in proc:
p.daemon = True
p.start()
sent = [q_in.put((i, x)) for i, x in enumerate(X)]
[q_in.put((None, None)) for _ in range(nprocs)]
res = [q_out.get() for _ in range(len(sent))]
[p.join() for p in proc]
return [x for i, x in sorted(res)]
def setminus(A, B):
return [item for item in A if item not in B]
def listinlist2list(theList):
return [item for sublist in theList for item in sublist]
def if_exist(obj):
return obj in locals() or obj in globals()
def getSize(one_object):
print(one_object.memory_usage().sum() / 1024 ** 2, "MB")
# print(sys.getsizeof(one_object) // 1024, "MB")
def dump(file, path):
pickle.dump(file, open(path, "wb"))
def load(path):
return pickle.load(open(path, "rb"))
def get_MB(a):
MB = sys.getsizeof(a) / 1024 / 1024
return MB
def hstack_all_comb(array1, array2):
# array1 is on the left and also changes faster
res = np.hstack([
np.tile(array1, (array2.shape[0], 1))
, np.repeat(array2, array1.shape[0], axis=0)]
)
return res
def quantile(a, p):
r = [a[0] for a in DF(a).quantile(p).values]
return np.round(r, 3)
def flatten(l):
# list of sublist -> list
return [item for sublist in l for item in sublist]
def change_rate(old_targets, new_targets, numpy = False):
if numpy:
diff = np.mean(abs(new_targets-old_targets)) / (np.mean(abs(old_targets))+1e-6)
else:
diff = abs(new_targets-old_targets).mean() / (abs(old_targets).mean()+1e-6)
return min(1.0, diff)
#############################################################################
# pd.options.display.max_rows = 10
# with open('pred_columns.txt', 'w') as filehandle:
# k = 0
# for listitem in list(a):
# filehandle.write('{} {}\n'.format(k, listitem))
# k += 1
def print_all(dat, column_only = True):
if column_only:
with pd.option_context('display.max_columns', None): # more options can be specified also
print(dat)
else:
with pd.option_context('display.max_rows', None, 'display.max_columns', None): # more options can be specified also
print(dat)
def quantile(a):
return np.percentile(a, range(0,110,10))
#############################################################################
def unzip(path, zip_type = "tar_gz"):
if zip_type == "tar_gz":
import tarfile
tar = tarfile.open(path, "r:gz")
tar.extractall()
tar.close()
elif zip_type == "zip":
from zipfile import ZipFile
with ZipFile(path, 'r') as zipObj:
# Extract all the contents of zip file in current directory
zipObj.extractall()
# import shutil
# total, used, free = shutil.disk_usage("/")
# print("Total: %d GiB" % (total // (2**30)))
# print("Used: %d GiB" % (used // (2**30)))
# print("Free: %d GiB" % (free // (2**30)))
#############################################################################
# !pip install termcolor
from termcolor import colored, cprint
# https://pypi.org/project/termcolor/#description
def printR(theStr):
print(colored(theStr, 'red'))
def printG(theStr):
print(colored(theStr, 'green'))
def printB(theStr):
print(colored(theStr, 'blue'))
def sets_intersection(d):
return list(reduce(set.intersection, [set(item) for item in d ]))
def select_each_row(array, idx):
return np.take_along_axis(array, idx[:,None], axis=1)
def subtract_each_column(mat, col):
return (mat.transpose() - col).transpose()
def sample_split(L, N):
""" replay buffer?
"""
kf = KFold(n_splits=L)
kf.get_n_splits(zeros(N))
split_ind = {}
k = 0
for i, j in kf.split(range(N)):
split_ind[k] = {"train_ind" : i, "test_ind" : j}
k += 1
return split_ind
def row_repeat(mat, rep, full_block = False):
if full_block:
return np.tile(mat, (rep, 1))
else:
return
|
np.repeat(mat, rep, axis=0)
|
numpy.repeat
|
"""
Extract the annotations from PTS file
Author: <NAME>
License: MIT
Copyright: 2018-2019
"""
import rasterio
import re
import time,datetime
import argparse
from rasterio import mask,features,warp
import os
import os.path
import fiona
import numpy as np
import osgeo
from osgeo import gdal,osr
from shapely.geometry import shape,mapping
from shapely.geometry.polygon import LinearRing,Polygon
from mfuncshape import *
from PIL import Image
from m_util import sdmkdir, convertMbandstoRGB,sdsaveim
import pandas as pd
from shutil import copyfile
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
opt = parser.parse_args()
padding = 300
opt.root = '/gpfs/projects/LynchGroup/'
opt.resdir = '/gpfs/projects/LynchGroup/Penguin_workstation/data/Penguins' + '/TEST_PTS_MASK_PADDING_' + str(padding) + '/'
opt.A = opt.resdir + 'A/'
opt.B = opt.resdir + 'B/'
opt.tif_fold = opt.root + 'Orthoed/'
sdmkdir(opt.resdir)
sdmkdir(opt.A)
sdmkdir(opt.B)
opt.shape_dir = opt.root+ '/Annotated_shapefiles_PTS/'
files= []
for root,_,fnames in sorted(os.walk(opt.shape_dir)):
for fname in fnames:
if fname.endswith('tif'):
files.append(fname)
for file1 in files:
indx = file1.find('__')
file2 = file1[indx+2:]
print(file2)
match = re.search(r'\d{2}\D{3}\d{8}', file2).group(0)
date = '20'+match[0:2] + "%02d"%(time.strptime(match[2:5],'%b').tm_mon)+match[5:]
date = datetime.datetime.strptime(date, '%Y%m%d%H%M%S')
if date.year ==2017 or date.year==2018:
TIF1 = opt.tif_fold + str(date.year)+'/' + "%02d"%date.month +'/'+file2
else:
TIF1 = opt.tif_fold + str(date.year) +'/' +file2
TIF2 = opt.shape_dir + file1
Im1 = rasterio.open(TIF1)
Im2 = rasterio.open(TIF2)
print(Im1.meta)
print(Im2.meta)
out_meta = Im1.meta.copy()
out_meta.update({"count":out_meta["count"]+1
})
X = Im1.read()
print(X.shape)
GT = Im2.read()
GT = GT[0:1,:,:]
x1,y1 = np.where(GT[0,:,:]!=255)
maxx = np.max(x1) + padding
minx = np.min(x1) - padding
maxy = np.max(y1) + padding
miny = np.min(y1) - padding
im = X[:,minx:maxx,miny:maxy]
im = convertMbandstoRGB(im,file2)
mask = GT[:,minx:maxx,miny:maxy]
mask[mask!=255] = 1
mask[mask==255] = 0
mask[mask==1] = 255
print(im.shape)
print(mask.shape)
im =
|
np.transpose(im,(1,2,0))
|
numpy.transpose
|
# -*- coding: utf-8 -*-
import warnings
import numpy as np
from numpy.linalg import eigh, inv, norm, matrix_rank
import pandas as pd
from scipy.optimize import minimize
from statsmodels.tools.decorators import cache_readonly
from statsmodels.base.model import Model
from statsmodels.iolib import summary2
from statsmodels.graphics.utils import _import_mpl
from .factor_rotation import rotate_factors, promax
_opt_defaults = {'gtol': 1e-7}
def _check_args_1(endog, n_factor, corr, nobs):
msg = "Either endog or corr must be provided."
if endog is not None and corr is not None:
raise ValueError(msg)
if endog is None and corr is None:
warnings.warn('Both endog and corr are provided, ' +
'corr will be used for factor analysis.')
if n_factor <= 0:
raise ValueError('n_factor must be larger than 0! %d < 0' %
(n_factor))
if nobs is not None and endog is not None:
warnings.warn("nobs is ignored when endog is provided")
def _check_args_2(endog, n_factor, corr, nobs, k_endog):
if n_factor > k_endog:
raise ValueError('n_factor cannot be greater than the number'
' of variables! %d > %d' %
(n_factor, k_endog))
if np.max(np.abs(np.diag(corr) - 1)) > 1e-10:
raise ValueError("corr must be a correlation matrix")
if corr.shape[0] != corr.shape[1]:
raise ValueError('Correlation matrix corr must be a square '
'(rows %d != cols %d)' % corr.shape)
class Factor(Model):
"""
Factor analysis
Parameters
----------
endog : array_like
Variables in columns, observations in rows. May be `None` if
`corr` is not `None`.
n_factor : int
The number of factors to extract
corr : array_like
Directly specify the correlation matrix instead of estimating
it from `endog`. If provided, `endog` is not used for the
factor analysis, it may be used in post-estimation.
method : str
The method to extract factors, currently must be either 'pa'
for principal axis factor analysis or 'ml' for maximum
likelihood estimation.
smc : True or False
Whether or not to apply squared multiple correlations (method='pa')
endog_names : str
Names of endogenous variables. If specified, it will be used
instead of the column names in endog
nobs : int
The number of observations, not used if endog is present. Needs to
be provided for inference if endog is None.
missing : 'none', 'drop', or 'raise'
Missing value handling for endog, default is row-wise deletion 'drop'
If 'none', no nan checking is done. If 'drop', any observations with
nans are dropped. If 'raise', an error is raised.
Notes
-----
**Experimental**
Supported rotations: 'varimax', 'quartimax', 'biquartimax',
'equamax', 'oblimin', 'parsimax', 'parsimony', 'biquartimin',
'promax'
If method='ml', the factors are rotated to satisfy condition IC3
of Bai and Li (2012). This means that the scores have covariance
I, so the model for the covariance matrix is L * L' + diag(U),
where L are the loadings and U are the uniquenesses. In addition,
L' * diag(U)^{-1} L must be diagonal.
References
----------
.. [*] <NAME>. (2004). Exploratory Factor Analysis, Mathematical
Marketing. http://www.openaccesstexts.org/pdf/Quant_Chapter_11_efa.pdf
.. [*] <NAME>, <NAME> (2012). Statistical analysis of factor models of high
dimension. Annals of Statistics. https://arxiv.org/pdf/1205.6617.pdf
"""
def __init__(self, endog=None, n_factor=1, corr=None, method='pa',
smc=True, endog_names=None, nobs=None, missing='drop'):
_check_args_1(endog, n_factor, corr, nobs)
if endog is not None:
super(Factor, self).__init__(endog, exog=None, missing=missing)
endog = self.endog # after preprocessing like missing, asarray
k_endog = endog.shape[1]
nobs = endog.shape[0]
corr = self.corr = np.corrcoef(endog, rowvar=0)
elif corr is not None:
corr = self.corr = np.asarray(corr)
k_endog = self.corr.shape[0]
self.endog = None
else:
msg = "Either endog or corr must be provided."
raise ValueError(msg)
_check_args_2(endog, n_factor, corr, nobs, k_endog)
self.n_factor = n_factor
self.loadings = None
self.communality = None
self.method = method
self.smc = smc
self.nobs = nobs
self.method = method
self.corr = corr
self.k_endog = k_endog
if endog_names is None:
if hasattr(corr, 'index'):
endog_names = corr.index
if hasattr(corr, 'columns'):
endog_names = corr.columns
self.endog_names = endog_names
@property
def endog_names(self):
"""Names of endogenous variables"""
if self._endog_names is not None:
return self._endog_names
else:
if self.endog is not None:
return self.data.ynames
else:
d = 0
n = self.corr.shape[0] - 1
while n > 0:
d += 1
n //= 10
return [('var%0' + str(d) + 'd') % i
for i in range(self.corr.shape[0])]
@endog_names.setter
def endog_names(self, value):
# Check validity of endog_names:
if value is not None:
if len(value) != self.corr.shape[0]:
raise ValueError('The length of `endog_names` must '
'equal the number of variables.')
self._endog_names = np.asarray(value)
else:
self._endog_names = None
def fit(self, maxiter=50, tol=1e-8, start=None, opt_method='BFGS',
opt=None, em_iter=3):
"""
Estimate factor model parameters.
Parameters
----------
maxiter : int
Maximum number of iterations for iterative estimation algorithms
tol : float
Stopping criteria (error tolerance) for iterative estimation
algorithms
start : array_like
Starting values, currently only used for ML estimation
opt_method : str
Optimization method for ML estimation
opt : dict-like
Keyword arguments passed to optimizer, only used for ML estimation
em_iter : int
The number of EM iterations before starting gradient optimization,
only used for ML estimation.
Returns
-------
FactorResults
Results class instance.
"""
method = self.method.lower()
if method == 'pa':
return self._fit_pa(maxiter=maxiter, tol=tol)
elif method == 'ml':
return self._fit_ml(start, em_iter, opt_method, opt)
else:
msg = "Unknown factor extraction approach '%s'" % self.method
raise ValueError(msg)
def _fit_pa(self, maxiter=50, tol=1e-8):
"""
Extract factors using the iterative principal axis method
Parameters
----------
maxiter : int
Maximum number of iterations for communality estimation
tol : float
If `norm(communality - last_communality) < tolerance`,
estimation stops
Returns
-------
results : FactorResults instance
"""
R = self.corr.copy() # inplace modification below
# Parameter validation
self.n_comp = matrix_rank(R)
if self.n_factor > self.n_comp:
raise ValueError('n_factor must be smaller or equal to the rank'
' of endog! %d > %d' %
(self.n_factor, self.n_comp))
if maxiter <= 0:
raise ValueError('n_max_iter must be larger than 0! %d < 0' %
(maxiter))
if tol <= 0 or tol > 0.01:
raise ValueError('tolerance must be larger than 0 and smaller than'
' 0.01! Got %f instead' % (tol))
# Initial communality estimation
if self.smc:
c = 1 - 1 / np.diag(inv(R))
else:
c = np.ones(len(R))
# Iterative communality estimation
eigenvals = None
for i in range(maxiter):
# Get eigenvalues/eigenvectors of R with diag replaced by
# communality
for j in range(len(R)):
R[j, j] = c[j]
L, V = eigh(R, UPLO='U')
c_last = np.array(c)
ind = np.argsort(L)
ind = ind[::-1]
L = L[ind]
n_pos = (L > 0).sum()
V = V[:, ind]
eigenvals = np.array(L)
# Select eigenvectors with positive eigenvalues
n = np.min([n_pos, self.n_factor])
sL = np.diag(np.sqrt(L[:n]))
V = V[:, :n]
# Calculate new loadings and communality
A = V.dot(sL)
c = np.power(A, 2).sum(axis=1)
if norm(c_last - c) < tol:
break
self.eigenvals = eigenvals
self.communality = c
self.uniqueness = 1 - c
self.loadings = A
return FactorResults(self)
# Unpacks the model parameters from a flat vector, used for ML
# estimation. The first k_endog elements of par are the square
# roots of the uniquenesses. The remaining elements are the
# factor loadings, packed one factor at a time.
def _unpack(self, par):
return (par[0:self.k_endog]**2,
np.reshape(par[self.k_endog:], (-1, self.k_endog)).T)
# Packs the model parameters into a flat parameter, used for ML
# estimation.
def _pack(self, load, uniq):
return np.concatenate((np.sqrt(uniq), load.T.flat))
def loglike(self, par):
"""
Evaluate the log-likelihood function.
Parameters
----------
par : ndarray or tuple of 2 ndarray's
The model parameters, either a packed representation of
the model parameters or a 2-tuple containing a `k_endog x
n_factor` matrix of factor loadings and a `k_endog` vector
of uniquenesses.
Returns
-------
float
The value of the log-likelihood evaluated at par.
"""
if type(par) is np.ndarray:
uniq, load = self._unpack(par)
else:
load, uniq = par[0], par[1]
loadu = load / uniq[:, None]
lul = np.dot(load.T, loadu)
# log|GG' + S|
# Using matrix determinant lemma:
# |GG' + S| = |I + G'S^{-1}G|*|S|
lul.flat[::lul.shape[0]+1] += 1
_, ld = np.linalg.slogdet(lul)
v = np.sum(np.log(uniq)) + ld
# tr((GG' + S)^{-1}C)
# Using Sherman-Morrison-Woodbury
w = np.sum(1 / uniq)
b = np.dot(load.T, self.corr / uniq[:, None])
b = np.linalg.solve(lul, b)
b = np.dot(loadu, b)
w -= np.trace(b)
# Scaled log-likelihood
return -(v + w) / (2*self.k_endog)
def score(self, par):
"""
Evaluate the score function (first derivative of loglike).
Parameters
----------
par : ndarray or tuple of 2 ndarray's
The model parameters, either a packed representation of
the model parameters or a 2-tuple containing a `k_endog x
n_factor` matrix of factor loadings and a `k_endog` vector
of uniquenesses.
Returns
-------
ndarray
The score function evaluated at par.
"""
if type(par) is np.ndarray:
uniq, load = self._unpack(par)
else:
load, uniq = par[0], par[1]
# Center term of SMW
loadu = load / uniq[:, None]
c = np.dot(load.T, loadu)
c.flat[::c.shape[0]+1] += 1
d = np.linalg.solve(c, load.T)
# Precompute these terms
lud = np.dot(loadu, d)
cu = (self.corr / uniq) / uniq[:, None]
r = np.dot(cu, load)
lul = np.dot(lud.T, load)
luz = np.dot(cu, lul)
# First term
du = 2*np.sqrt(uniq) * (1/uniq - (d * load.T).sum(0) / uniq**2)
dl = 2*(loadu - np.dot(lud, loadu))
# Second term
h = np.dot(lud, cu)
f = np.dot(h, lud.T)
du -= 2*np.sqrt(uniq) * (np.diag(cu) - 2*np.diag(h) + np.diag(f))
dl -= 2*r
dl += 2*np.dot(lud, r)
dl += 2*luz
dl -= 2*np.dot(lud, luz)
# Cannot use _pack because we are working with the square root
# uniquenesses directly.
return -np.concatenate((du, dl.T.flat)) / (2*self.k_endog)
# Maximum likelihood factor analysis.
def _fit_ml(self, start, em_iter, opt_method, opt):
"""estimate Factor model using Maximum Likelihood
"""
# Starting values
if start is None:
load, uniq = self._fit_ml_em(em_iter)
start = self._pack(load, uniq)
elif len(start) == 2:
if len(start[1]) != start[0].shape[0]:
msg = "Starting values have incompatible dimensions"
raise ValueError(msg)
start = self._pack(start[0], start[1])
else:
raise ValueError("Invalid starting values")
def nloglike(par):
return -self.loglike(par)
def nscore(par):
return -self.score(par)
# Do the optimization
if opt is None:
opt = _opt_defaults
r = minimize(nloglike, start, jac=nscore, method=opt_method,
options=opt)
if not r.success:
warnings.warn("Fitting did not converge")
par = r.x
uniq, load = self._unpack(par)
if uniq.min() < 1e-10:
warnings.warn("Some uniquenesses are nearly zero")
# Rotate solution to satisfy IC3 of Bai and Li
load = self._rotate(load, uniq)
self.uniqueness = uniq
self.communality = 1 - uniq
self.loadings = load
self.mle_retvals = r
return FactorResults(self)
def _fit_ml_em(self, iter):
"""estimate Factor model using EM algorithm
"""
# Starting values
|
np.random.seed(3427)
|
numpy.random.seed
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""generators --- Parameter generators
======================================
Generators are used to create particle variables. They may be fixed,
random, or produce a fixed sequence. There are two classes of
generators. The first class produces single values, useful for
producing particle ages, sizes, etc. The second is based on a 3D
geometry, designed for producing ejection velocity vectors.
Generators are Python iterators and can be used with the `next`
built-in function, or can be used in loops. An additional method
allows creating multiple values:
g = Generator()
v = next(g) # create one new value
v = g.next(N=100) # create 100 new values
# generate values until `g` is exhausted, which may be never.
for v in g:
print(v)
Classes
-------
Generator
CosineAngle
Delta
Grid
Log
Normal
Sequence
Uniform
UniformAngle
Vej
Isotropic
UniformLatitude
Sunward
Exceptions
----------
InvalidDistribution
"""
__all__ = [
'CosineAngle',
'Delta',
'Grid',
'Log',
'Normal',
'Sequence',
'Uniform',
'UniformAngle',
'Isotropic',
'UniformLatitude',
'Sunward']
from abc import ABC, ABCMeta, abstractmethod
import numpy as np
class InvalidDistribution(Exception):
pass
class Generator(ABC):
"""Abstract base class for CometSuite particle generators."""
def __iter__(self):
return self
@abstractmethod
def __min__(self):
pass
@abstractmethod
def __min__(self):
pass
def __next__(self):
return self.next(1)
@abstractmethod
def next(self, N=1):
"""The next `N` values.
Parameters
----------
N : int, optional
`N > 0`.
"""
pass
@abstractmethod
def reset(self):
"""Reset to initial state.
Especially useful for generators that have a prescribed
sequence.
"""
pass
class CosineAngle(Generator):
"""Polar angle variate for a solid angle distrubution proportional to `cos`.
Picked from a distribution such that the flux through a solid
angle at theta is proportional to `cos(theta)`.
Only valid for `theta <= pi / 2`.
Parameters
----------
x0, x1 : float, optional
Minimum and maximum values, 0 <= x0 <= x1 <= pi / 2. [radians]
"""
def __init__(self, x0=0, x1=np.pi / 2):
assert x0 <= x1
assert x0 >= 0
assert x1 <= np.pi / 2
self.x0 = x0
self.x1 = x1
def __str__(self):
return "CosineAngle(x0={}, x1={})".format(self.x0, self.x1)
def __min__(self):
return self.x0
def __max__(self):
return self.x1
def next(self, N=1):
from numpy.random import rand
u = rand(N)
u = np.arccos(np.sqrt((1 - u) * np.cos(self.x0)**2
+ u * np.cos(self.x1)**2))
return u[0] if N == 1 else u
def reset(self):
# nothing to be done
pass
class Delta(Generator):
"""A "random" variate pick from the delta function distribution.
Parameters
----------
x0 : float, optional
The location of the delta function.
Returns
-------
x : float
Always returns `x0`.
"""
def __init__(self, x0=0):
self.x0 = x0
def __str__(self):
return "Delta(x0={})".format(self.x0)
def __min__(self):
return self.x0
def __max__(self):
return self.x0
def next(self, N=1):
if N == 1:
return self.x0
else:
return np.repeat(self.x0, N)
def reset(self):
# nothing to be done
pass
class Grid(Generator):
"""Variate picked from a uniform grid.
Parameters
----------
x0 : float
The start value of the sequence.
x1 : float
The end value of the sequence.
num : int
The number of samples to generate.
endpoint : bool, optional
If `True`, `x1` is the last sample.
log : bool, optional
Set to `True` if `x0`, `x1`, and the spacing are in log space.
cycle : int or float('inf'), optional
Cylce over the sequence `cycle` times. Set to `inf` to
infintely cycle over the sequence.
repeat : int, optional
Repeat each element `repeat` times.
Returns
-------
x : float
The next sequence value.
"""
def __init__(self, x0, x1, num, endpoint=True, log=False,
cycle=1, repeat=1):
self.args = (x0, x1, num)
self.kwargs = dict(endpoint=endpoint, log=log, cycle=cycle,
repeat=repeat)
if log:
seq = np.logspace(x0, x1, num, endpoint=endpoint)
else:
seq = np.linspace(x0, x1, num, endpoint=endpoint)
self._repeat = repeat
if repeat > 1:
self.seq = np.repeat(seq, repeat)
else:
self.seq = np.array(seq)
self.cycle = cycle
self.i = -1
def __str__(self):
return ("Grid({args[0]}, {args[1]}, {args[2]}, endpoint={kwargs[endpoint]}, log={kwargs[log]}, cycle={kwargs[cycle]}, repeat={kwargs[repeat]})"
.format(args=self.args, kwargs=self.kwargs))
def __min__(self):
return min(self.args[:2])
def __max__(self):
return max(self.args[:2])
def __next__(self):
self.i += 1
if self.i >= len(self.seq):
self.i = 0
self.cycle -= 1
if self.cycle <= 0:
raise StopIteration
return self.seq[self.i]
def next(self, N=1):
x = np.array((next(self) for i in range(N)))
return x[0] if N == 1 else x
def reset(self):
self.__init__(self._x0, self._x1, self._num,
endpoint=self._endpoint, log=self._log,
cycle=self._cycle, repeat=self._repeat)
class Log(Generator):
"""Random variate with the distribution dn/dlog ~ 1.
Base 10.
Parameters
----------
x0, x1 : float, optional
`log10(min_val)` and `log10(max_val)`.
"""
def __init__(self, x0=0, x1=1):
self.x0 = x0
self.x1 = x1
def __str__(self):
return "Log(x0={}, x1={})".format(self.x0, self.x1)
def __min__(self):
return 10**self.x0
def __max__(self):
return 10**self.x1
def next(self, N=1):
from numpy.random import rand
x = np.exp((rand(N) * (self.x1 - self.x0) + self.x0)
* 2.3025850929940459)
return x[0] if N == 1 else x
def reset(self):
# nothing to be done
pass
class Normal(Generator):
"""Normally distributed random variate.
Parameters
----------
mu : float, optional
The center of the distribution.
sigma : float, optional
The width of the distribution.
x0, x1 : float, optional
Minimum and maximum values. The generator is not efficient for
`x0` similar to `x1`.
Returns
-------
x : float
The random variate.
"""
def __init__(self, mu=0, sigma=1, x0=-float('inf'), x1=float('inf')):
self.mu = mu
self.sigma = sigma
assert x0 < x1
self.x0 = x0
self.x1 = x1
def __str__(self):
return "Normal(mu={}, sigma={}, x0={}, x1={})".format(
self.mu, self.sigma, self.x0, self.x1)
def __min__(self):
return self.x0
def __max__(self):
return self.x1
def __next__(self):
from numpy.random import randn
for i in range(10000):
u = randn(1)[0] * self.sigma + self.mu
if (u >= self.x0) and (u <= self.x1):
break
else:
raise ValueError("Variate limits are too restrictive:"
" no good values in 10,000 iterations.")
return u
def next(self, N=1):
x = np.array((next(self) for i in range(N)))
return x[0] if N == 1 else x
def reset(self):
# nothing to be done
pass
class Sequence(Generator):
"""Variate picked from a sequence.
Parameters
----------
seq : array
The sequence to iterate over.
cycle : int or float('inf'), optional
Cylce over the sequence `cycle` times. Set to `inf` to
infintely cycle over the sequence.
repeat : int, optional
Repeat each element `repeat` times.
Returns
-------
x : float
The next sequence value.
"""
def __init__(self, seq, cycle=1, repeat=1):
self._seq = seq
self._repeat = repeat
if repeat > 1:
self.seq = np.repeat(seq, repeat)
else:
self.seq = np.array(seq)
self.cycle = cycle
self.i = -1
def __str__(self):
return "Sequence({}, cycle={}, repeat={})".format(
np.array2string(self._seq, max_line_width=32768, separator=','),
self.cycle, self._repeat)
def __min__(self):
return min(self._seq)
def __max__(self):
return max(self._seq)
def __next__(self):
self.i += 1
if self.i >= len(self.seq):
self.i = 0
self.cycle -= 1
if self.cycle <= 0:
raise StopIteration
return self.seq[self.i]
def next(self, N=1):
x = np.array((next(self) for i in range(N)))
return x[0] if N == 1 else x
def reset(self):
Sequence.__init__(self, self._seq, cycle=self.cycle,
repeat=self._repeat)
class Uniform(Generator):
"""Uniformly distributed random variate.
Parameters
----------
x0, x1 : float, optional
Minimum and maximum values.
Returns
-------
x : float
The random variate.
"""
def __init__(self, x0=0, x1=1):
assert x0 <= x1
self.x0 = x0
self.x1 = x1
def __str__(self):
return "Uniform(x0={}, x1={})".format(self.x0, self.x1)
def __min__(self):
return self.x0
def __max__(self):
return self.x1
def next(self, N=1):
from numpy.random import rand
x = rand(N) * (self.x1 - self.x0) + self.x0
return x[0] if N == 1 else x
def reset(self):
# nothing to be done
pass
class UniformAngle(Generator):
"""Polar angle variate for a uniform solid angle distrubution.
Picked from a distribution such that the flux through a solid
angle at theta is proportional to 1.
Parameters
----------
x0, x1 : float, optional
Minimum and maximum values, 0 <= x0 <= pi. [radians]
Returns
-------
x : float
Random angle. [radians]
"""
def __init__(self, x0=0, x1=np.pi):
assert x0 <= x1
assert 0 <= x0
assert x1 <= np.pi
self.x0 = x0
self.x1 = x1
def __str__(self):
return "UniformAngle(x0={}, x1={})".format(self.x0, self.x1)
def __min__(self):
return self.x0
def __max__(self):
return self.x1
def next(self, N=1):
from numpy.random import rand
u = rand(N)
u = np.arccos((1 - u) * np.cos(self.x0) + u * np.cos(self.x1))
return u[0] if N == 1 else u
def reset(self):
# nothing to be done
pass
class Vej(Generator, metaclass=ABCMeta):
"""Abstract base class for ejection velocity generators.
Parameters
----------
pole : array, optional
The pole in Ecliptic coordinates, angular (lambda, beta) [rad]
or rectangular (x, y, z). The Vernal equinox will be
arbitrarily defined.
body_basis : array, optional
Nx3 array of x, y, and z unit vectors defining the
planetocentric coordinate system, in Ecliptic rectangular
coordinates.
w : float, optional
Full opening angle of the emission. [radians].
distribution : string, optional
The kind of distribution to use when `w` is provided:
'uniformangle': uniformly distributed in solid angle
'normal': Gaussian distribution with `w` as the FWHM.
theta_dist, phi_dist : Generator
Specific polar (`theta`) and azimuthal (`phi`) angle
distributions.
Notes
-----
If `pole` is provided, the Vernal equinox and first solstice will
be arbitrarity derived.
The vectors in `body_basis` are:
`body_basis[0]` (x): the Vernal equinox
`body_basis[1]` (y): the first solstice
`body_basis[2]` (z): the pole
Only one of `pole` or `body_basis` may be provided.
If `w` is provided, `theta_dist` and `phi_dist` are ignored.
`min()` and `max()` operators return the respective limits from
`phi_dist` and `theta_dist`.
Attributes
----------
theta_dist : Polar angle generator
phi_dist : Azimuthal angle generator
body_basis : 3x3 array, as described above
pole : polar vector
vernal_eq : Vernal equinox vector
solstice : First solstice vector
Methods
-------
axis : xxis of symmetry for the ejection cone.
next : New ejection velocity direction.
origin : Planetographic coordinates of a vector.
Static methods
-------------
pole2basis : A helper function for defining `body_basis`.
..todo: Incorporate nucleus rotation?
"""
def __init__(self, pole=None, body_basis=None, w=None,
distribution='uniformangle', theta_dist=None, phi_dist=None):
from .state import State
if body_basis is None:
if pole is None:
body_basis = np.array(((1, 0, 0), (0, 1, 0), (0, 0, 1)), float)
else:
body_basis = self.pole2basis(pole)
self.body_basis = body_basis
self._w = w
self._distribution = distribution
if w is not None:
self.phi_dist = Uniform(x0=0, x1=2 * np.pi)
if distribution.lower() == 'uniformangle':
self.theta_dist = UniformAngle(x0=0, x1=w / 2.0)
elif distribution.lower() == 'normal':
self.theta_dist = Normal(x0=0, mu=0, sigma=w / 2.35)
else:
raise InvalidDistribution("Only 'UniformAngle' and 'Normal'"
" are implemented for w != None.")
else:
if theta_dist is None:
self.theta_dist = Delta(0)
else:
self.theta_dist = theta_dist
if phi_dist is None:
self.phi_dist = Delta(0)
else:
self.phi_dist = phi_dist
def __min__(self):
return min(self.phi_dist), min(self.theta_dist)
def __max__(self):
return max(self.phi_dist), max(self.theta_dist)
@abstractmethod
def axis(self, init):
"""The axis of symmetry.
Parameters
----------
init : State or array of State
The state(s) of the parent object (comet) at the time(s) of
ejection.
Returns
-------
a : ndarray
Axis/axes of symmetry, shape (N, 3), where N is the number
of initial states provided.
"""
pass
@property
def vernal_eq(self):
return body_basis[0]
@property
def solstice(self):
return body_basis[1]
@property
def pole(self):
return body_basis[2]
@staticmethod
def pole2basis(pole, vernal_eq=None):
"""Planetographic basis vectors from pole.
Parameters
----------
pole : array-like
Ecliptic coordinates of the pole, may be angular (lambda,
beta) in radians, or rectangular (x, y, z).
vernal_eq : array-like, optional
Ecliptic coordinates of the Vernal equinox, same format as
`pole`. If `None`, then the VE will be generated from `y ×
pole`, unless `pole == y`, in which case VE will be the
x-axis.
Returns
-------
body_basis : ndarray
The basis vectors (3x3 array) for planetographic
coordinates: `body_basis[0]` (x) is the Vernal eqinox,
`body_basis[1]` (y) is the first solstice, and
`body_basis[2]` (z) is the pole.
"""
from .util import mhat, lb2xyz
z = pole if len(pole) == 3 else lb2xyz(pole)
z = mhat(z)[1]
if vernal_eq is None:
if np.allclose(z, np.array((0, 1.0, 0))):
# if the pole is the y-axis, VE should be x-axis
x = np.array((1.0, 0, 0))
else:
# use y-axis × pole
x = mhat(np.cross((0, 1.0, 0), z))[1]
else:
x = vernal_eq if len(vernal_eq) == 3 else lb2xyz(vernal_eq)
c = np.dot(x, z)
assert np.isclose(c, 0), 'Pole and vernal equinox must be perpendicular to each other, angle is {} rad'.format(np.arccos(c))
y = mhat(np.cross(z, x))[1]
return np.vstack((x, y, z))
def origin(self, v):
"""Planetographic longitude and latitude of the velocity vector.
Parameters
----------
v : array-like
The vector to analyze, shape `(3,)` or `(N, 3)`.
Returns
-------
lam : float or ndarray
Longitude. [rad]
bet : float or ndarray
Latitude. [rad]
"""
p = (self.body_basis * v[:, np.newaxis]).sum(2)
origin = np.c_[
np.arctan2(p[:, 1], p[:, 0]),
np.arctan2(p[:, 2], np.sqrt(p[:, 0]**2 + p[:, 1]**2))
]
return origin[0], origin[1]
def next(self, init, N=1):
"""New ejection velocity direction(s).
If the axis of symmetry is [0, 0, 0], the returned velocity
will also be [0, 0, 0].
Parameters
----------
init : State or array of States
The state(s) of the parent object (comet) at the time(s) of
ejection. If an array, it must be of length 1 or `N`.
N : int, optional
The number of velocities to generate.
Returns
-------
vhat : ndarray
Ejection velocity direction, shape `(N, 3)`.
"""
from . import util
if np.iterable(init):
assert len(init) in (1, N), "If `init` is an array, it must have length 1 or `N`."
# choose theta and phi, define radial vector, all w.r.t. axis
# of symmetry
theta = np.pi / 2 - self.theta_dist.next(N)
phi = self.phi_dist.next(N)
r = np.c_[np.cos(theta) * np.cos(phi),
np.cos(theta) * np.sin(phi),
np.sin(theta)]
# define axis of symmetry
axis = self.axis(init)
# if axis is [0, 0, 0], return [0, 0, 0]
if np.allclose(axis, 0):
return np.zeros((N, 3))
# rotate `r` from axis of symmetry coords to Ecliptic coords
v = util.vector_rotate(r, [0, 0, 1], axis)
return v
def reset(self):
self.theta_dist.reset()
self.phi_dist.reset()
class Isotropic(Vej):
"""Isotropic emission.
The axis of symmetry is arbitrary.
"""
def __init__(self):
Vej.__init__(self, w=2 * np.pi, distribution='UniformAngle')
self._axis = np.array([[1.0, 0.0, 0.0]])
def axis(self, init):
return self._axis
def __str__(self):
return "Isotropic()"
i = Vej.__doc__.find('Parameters')
__doc__ += Vej.__doc__[i:]
del i
class UniformLatitude(Vej):
def __init__(self, lrange, pole=None, body_basis=None):
"""Uniform emission from a range of latitudes.
Vectors are uniform in solid angle.
The axis of symmetry is the pole.
Parameters
----------
lrange : array-like
Latitude range, between -pi/2 and pi/2. [radians]
pole : array, optional
The pole in Ecliptic coordinates, angular (lambda, beta) or
rectangular (x, y, z).
body_basis : array, optional
Shape `(N, 3)` array of x, y, and z unit vectors defining
the planetocentric coordinate system, in Ecliptic
rectangular coordinates.
"""
phi_dist = Uniform(x0=0, x1=2 * np.pi)
th = np.pi / 2 -
|
np.array(lrange)
|
numpy.array
|
'''
@Description: The utils file
@Author: xieydd
@Date: 2019-08-13 14:15:23
@LastEditTime: 2019-09-24 10:55:54
@LastEditors: Please set LastEditors
'''
import logging
import numpy as np
import torchvision.datasets as dset
import os
import torch
import shutil
import csv
import torchvision.transforms as transforms
from torch.autograd import Variable
import torch.nn.functional as F
'''
@description: count the params size
@param pytorch model
@return: MB
'''
def param_size(model):
""" Compute parameter size in MB """
n_params = sum(
np.prod(v.size()) for k, v in model.named_parameters() if not k.startswith('aux_head'))
return n_params / 1024. / 1024
def count_conv_flop(layer, x):
out_h = int(x.size()[2] / layer.stride[0])
out_w = int(x.size()[3] / layer.stride[1])
delta_ops = layer.in_channels * layer.out_channels * layer.kernel_size[0] * layer.kernel_size[1] * \
out_h * out_w / layer.groups
return delta_ops
def shuffle_layer(x, groups):
batchsize, num_channels, height, width = x.data.size()
channels_per_group = num_channels // groups
# reshape
x = x.view(batchsize, groups, channels_per_group, height, width)
# transpose
x = torch.transpose(x, 1, 2).contiguous()
# flatten
x = x.view(batchsize, -1, height, width)
return x
def _data_transforms_cifar10(args):
CIFAR_MEAN = [0.49139968, 0.48215827, 0.44653124]
CIFAR_STD = [0.24703233, 0.24348505, 0.26158768]
train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(CIFAR_MEAN, CIFAR_STD),
])
if args.cutout:
train_transform.transforms.append(Cutout(args.cutout_length))
valid_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(CIFAR_MEAN, CIFAR_STD),
])
return train_transform, valid_transform
def count_parameters_in_MB(model):
return np.sum(np.prod(v.size()) for name, v in model.named_parameters() if "auxiliary" not in name)/1e6
def save(model, model_path):
torch.save(model.state_dict(), model_path)
def save_checkpoint_spos(state, iters, path,is_best=False,tag=''):
model_path = path + '/models'
if not os.path.exists(model_path):
os.makedirs(model_path)
filename = os.path.join(
model_path + '/{}checkpoint-{:06}.pth.tar'.format(tag, iters))
torch.save(state, filename)
if is_best:
latestfilename = os.path.join(
model_path + '/{}best.pth.tar'.format(tag))
torch.save(state, latestfilename)
def load_checkpoint_spos(model, path):
checkpoint = torch.load(path)
model.load_state_dict(checkpoint['state_dict'], strict=True)
w_optimizer = checkpoint['w_optimizer']
print('load from check[oint')
return w_optimizer
def get_lastest_model(path):
model_path = path + '/models/'
if not os.path.exists(model_path):
os.mkdir(model_path)
model_list = os.listdir(model_path)
if model_list == []:
return None, 0
model_list.sort()
lastest_model = model_list[-1]
iters = re.findall(r'\d+', lastest_model)
return model_path + lastest_model, int(iters[0])
def load(model, model_path):
model.load_state_dict(torch.load(model_path))
def drop_path(x, drop_prob):
if drop_prob > 0.:
keep_prob = 1.-drop_prob
mask = Variable(torch.cuda.FloatTensor(x.size(0), 1, 1, 1).bernoulli_(keep_prob))
x.div_(keep_prob)
x.mul_(mask)
return x
def create_exp_dir(path, scripts_to_save=None):
if not os.path.exists(path):
os.mkdir(path)
print('Experiment dir : {}'.format(path))
if scripts_to_save is not None:
os.mkdir(os.path.join(path, 'scripts'))
for script in scripts_to_save:
dst_file = os.path.join(path, 'scripts', os.path.basename(script))
shutil.copyfile(script, dst_file)
def time(seconds):
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
print('Using {:d}:{:02d}:{:02d}'.format(int(h), int(m), int(s)))
def rescale(alpha, alpha_new, selecteds):
result = list()
for i in range(len(alpha)):
a = alpha[i]
a_new = alpha_new[i]
selected = selecteds[i]
i_op_0 = int(selected[0])
i_op_1 = int(selected[1])
old_p = F.softmax(a, dim=-1)
new_p = F.softmax(a_new, dim=-1)
old_sum = old_p[i_op_0] + old_p[i_op_1]
new_sum = new_p[i_op_0] + new_p[i_op_1]
ratio = old_sum / new_sum
# rescaled probabilties such that sum is same as before
p_r_0 = ratio * new_p[i_op_0]
p_r_1 = ratio * new_p[i_op_1]
new_sum_a = sum([torch.exp(a_new[i]) for i in range(len(a)) if i not in [i_op_0, i_op_1]])
new_a_0 = torch.log((new_sum_a * (p_r_0 + ((p_r_0 * p_r_1)/(1-p_r_1)))) / (
1 - p_r_0 - ((p_r_0*p_r_1) / (1-p_r_1))))
new_a_1 = torch.log((new_sum_a * (p_r_1 + ((p_r_1 * p_r_0)/(1-p_r_0)))) / (
1 - p_r_1 - ((p_r_1*p_r_0) / (1-p_r_0))))
a_new.data[i_op_0] = new_a_0
a_new.data[i_op_1] = new_a_1
result.append(a_new)
return result
def binarize(alpha, probability, select=1):
selected = torch.multinomial(probability, select, replacement=False)
selected = sorted(selected)
print(selected)
alpha = [alpha[i] for i in selected]
return selected
def weights_init(m, deepth=0, max_depth=2):
if deepth > max_depth:
return
if isinstance(m, torch.nn.Conv2d):
torch.nn.init.kaiming_uniform_(m.weight.data)
if m.bias is not None:
torch.nn.init.constant_(m.bias.data, 0)
elif isinstance(m, torch.nn.Linear):
m.weight.data.normal_(0, 0.01)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, torch.nn.BatchNorm2d):
return
elif isinstance(m, torch.nn.ReLU):
return
elif isinstance(m, torch.nn.Module):
deepth += 1
for m_ in m.modules():
weights_init(m_, deepth)
else:
raise ValueError("%s is unk" % m.__class__.__name__)
def check_tensor_in_list(atensor, alist):
if any([(atensor == t_).all() for t_ in alist if atensor.shape == t_.shape]):
return True
return False
'''
@description: Sets the learning rate to the initial LR decayed by 10 every 30 epochs
'''
def adjust_learning_rate(optimizer, epoch, args):
lr = args.lr * (0.1 ** (epoch // 30))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# Horovod: using `lr = base_lr * hvd.size()` from the very beginning leads to worse final
# accuracy. Scale the learning rate `lr = base_lr` ---> `lr = base_lr * hvd.size()` during
# the first five epochs. See https://arxiv.org/abs/1706.02677 for details.
# After the warmup reduce learning rate by 10 on the 30th, 60th and 80th epochs.
def adjust_learning_rate_hvd(epoch, batch_idx, args, train_loader, size, optimizer):
if epoch < args.warmup_epochs:
epoch += float(batch_idx + 1) / len(train_loader)
lr_adj = 1. / size * (epoch * (size - 1) / args.warmup_epochs + 1)
elif epoch < 30:
lr_adj = 1.
elif epoch < 60:
lr_adj = 1e-1
elif epoch < 80:
lr_adj = 1e-2
else:
lr_adj = 1e-3
for param_group in optimizer.param_groups:
param_group['lr'] = args.base_lr * size * args.batches_per_allreduce * lr_adj
def get_logger(file_path):
""" Make python logger """
# [!] Since tensorboardX use default logger (e.g. logging.info()), we should use custom logger
logger = logging.getLogger('Awesome model')
log_format = '%(asctime)s | %(message)s'
formatter = logging.Formatter(log_format, datefmt='%m/%d %I:%M:%S %p')
file_handler = logging.FileHandler(file_path)
file_handler.setFormatter(formatter)
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.addHandler(stream_handler)
logger.setLevel(logging.INFO)
return logger
def get_data(dataset, data_path, cutout_length, validation):
""" Get torchvision dataset """
dataset = dataset.lower()
if dataset == 'cifar10':
dset_cls = dset.CIFAR10
n_classes = 10
elif dataset == 'mnist':
dset_cls = dset.MNIST
n_classes = 10
elif dataset == 'fashionmnist':
dset_cls = dset.FashionMNIST
n_classes = 10
else:
raise ValueError(dataset)
trn_transform, val_transform = preproc.data_transforms(dataset, cutout_length)
trn_data = dset_cls(root=data_path+"/"+dataset, train=True, download=False, transform=trn_transform)
# assuming shape is NHW or NHWC
shape = trn_data.train_data.shape
input_channels = 3 if len(shape) == 4 else 1
assert shape[1] == shape[2], "not expected shape = {}".format(shape)
input_size = shape[1]
ret = [input_size, input_channels, n_classes, trn_data]
if validation: # append validation data
ret.append(dset_cls(root=data_path+"/"+dataset, train=False, download=False, transform=val_transform))
return ret
'''
@description: get the top k accuracy, default is top1
'''
def accuracy(output, target, topk=(1,)):
""" Computes the precision@k for the specified values of k """
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
# one-hot case
if target.ndimension() > 1:
target = target.max(1)[1]
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(1.0 / batch_size))
return res
'''
@description: store the checkpoint, and will save the best named best.pth.tar
'''
def save_checkpoint(state, ckpt_dir, is_best=False):
filename = os.path.join(ckpt_dir, 'checkpoint.pth.tar')
torch.save(state, filename)
if is_best:
best_filename = os.path.join(ckpt_dir, 'best.pth.tar')
shutil.copyfile(filename, best_filename)
class AverageMeter():
""" Computes and stores the average and current value """
def __init__(self):
self.reset()
def reset(self):
""" Reset all statistics """
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
""" Update statistics """
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class AverageMeters(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, *meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def print(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
#num_digits = len(str(num_batches // 1))
num_digits = len(str(num_batches))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
# Horovod: average metrics from distributed training.
class Metric(object):
def __init__(self, name, hvd):
self.name = name
self.hvd = hvd
self.sum = torch.tensor(0.)
self.n = torch.tensor(0.)
def update(self, val):
self.sum += self.hvd.allreduce(val.detach().cpu(), name=self.name)
self.n += 1
@property
def avg(self):
return self.sum / self.n
'''
@description: EarlyStopping via loss and save the best result
'''
class EarlyStopping:
"""Early stops the training if validation loss doesn't improve after a given patience."""
def __init__(self, patience=7, verbose=False, delta=0):
"""
Args:
patience (int): How long to wait after last time validation loss improved.
Default: 7
verbose (bool): If True, prints a message for each validation loss improvement.
Default: False
delta (float): Minimum change in the monitored quantity to qualify as an improvement.
Default: 0
"""
self.patience = patience
self.verbose = verbose
self.counter = 0
self.best_score = None
self.early_stop = False
self.val_loss_min = np.Inf
self.delta = delta
def __call__(self, val_loss, model, ckpt_dir):
score = -val_loss
if self.best_score is None:
self.best_score = score
self.save_checkpoint(val_loss, model, ckpt_dir, True)
elif score < self.best_score - self.delta:
self.counter += 1
print('EarlyStopping counter: {0} out of {1}'.format(self.counter, self.counter))
if self.counter >= self.patience:
self.early_stop = True
print('The best loss is {0}'.format(self.best_score))
else:
self.best_score = score
self.save_checkpoint(val_loss, model, ckpt_dir, True)
self.counter = 0
def save_checkpoint(self, val_loss, model, ckpt_dir,is_best=False):
'''Saves model when validation loss decrease.'''
if self.verbose:
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(self.val_loss_min, val_loss))
filename = os.path.join(ckpt_dir, 'checkpoint.pth.tar')
torch.save(model, filename)
if is_best:
best_filename = os.path.join(ckpt_dir, 'best.pth.tar')
shutil.copyfile(filename, best_filename)
self.val_loss_min = val_loss
class data_prefetcher_imagenet():
def __init__(self, loader):
self.loader = iter(loader)
self.stream = torch.cuda.Stream()
self.mean = torch.tensor([0.485 * 255, 0.456 * 255, 0.406 * 255]).cuda().view(1,3,1,1)
self.std = torch.tensor([0.229 * 255, 0.224 * 255, 0.225 * 255]).cuda().view(1,3,1,1)
# With Amp, it isn't necessary to manually convert data to half.
# if args.fp16:
# self.mean = self.mean.half()
# self.std = self.std.half()
self.preload()
def preload(self):
try:
self.next_input, self.next_target = next(self.loader)
except StopIteration:
self.next_input = None
self.next_target = None
return
with torch.cuda.stream(self.stream):
self.next_input = self.next_input.cuda(non_blocking=True)
self.next_target = self.next_target.cuda(non_blocking=True)
# With Amp, it isn't necessary to manually convert data to half.
# if args.fp16:
# self.next_input = self.next_input.half()
# else:
self.next_input = self.next_input.float()
self.next_input = self.next_input.sub_(self.mean).div_(self.std)
def next(self):
torch.cuda.current_stream().wait_stream(self.stream)
input = self.next_input
target = self.next_target
self.preload()
return input, target
class Cutout(object):
def __init__(self, length):
self.length = length
def __call__(self, img):
h, w = img.size(1), img.size(2)
mask = np.ones((h, w), np.float32)
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - self.length // 2, 0, h)
y2 =
|
np.clip(y + self.length // 2, 0, h)
|
numpy.clip
|
"""
retain 用于实现高阶导数,分开实现是为了性能,毕竟大多数时候用不到高阶导数
需要注意的是 高阶导数并非所有的都实现了,有一些算子并未实现
"""
from typing import *
import numpy as np
from numpy import ndarray
from . import _tensor
from .base import ComputeNode, ComputeNodeDetached
from . import functional
from .dtypes import float32
if TYPE_CHECKING:
from ._tensor import Tensor
def from_numpy(data: ndarray, requires_grad=False, is_leaf=False):
return _tensor.Tensor(data, requires_grad=requires_grad, is_leaf=is_leaf)
def full_like(a: "Tensor", full_val: Union[int, float], dtype=None, requires_grad=False) -> "Tensor":
data = np.full_like(a.data, full_val, dtype or a.dtype)
return _tensor.Tensor(data, requires_grad=requires_grad)
def full(shape, fill_val, dtype=float32, requires_grad=False) -> "Tensor":
data = np.full(shape, fill_val, dtype)
return _tensor.Tensor(data, requires_grad=requires_grad)
def zeros(shape, dtype=float32, requires_grad=False) -> "Tensor":
return _tensor.Tensor(np.zeros(shape, dtype), requires_grad=requires_grad)
def zeros_like(a: 'Tensor', dtype=None, requires_grad=False) -> "Tensor":
return _tensor.Tensor(np.zeros_like(a.data, dtype or a.dtype), requires_grad=requires_grad)
def ones(shape, dtype=float32, requires_grad=False):
return _tensor.Tensor(np.ones(shape, dtype), requires_grad=requires_grad)
def ones_like(a: "Tensor", dtype=None, requires_grad=False):
return _tensor.Tensor(np.ones_like(a.data, dtype or a.dtype), requires_grad=requires_grad)
def apply_broadcast_a(sa, sb, grad_a):
if sa == sb:
return grad_a
max_dim = max(len(sa), len(sb))
new_sa = (1,) * (max_dim - len(sa)) + sa
new_sb = (1,) * (max_dim - len(sb)) + sb
for i, (da, db) in enumerate(zip(new_sa, new_sb)):
if db != da == 1:
grad_a = np.expand_dims(grad_a.sum(axis=i), i)
return grad_a.reshape(sa)
def apply_broadcast_b(sa, sb, grad_b):
if sa == sb:
return grad_b
max_dim = max(len(sa), len(sb))
new_sa = (1,) * (max_dim - len(sa)) + sa
new_sb = (1,) * (max_dim - len(sb)) + sb
for i, (da, db) in enumerate(zip(new_sa, new_sb)):
if da != db == 1:
grad_b = np.expand_dims(grad_b.sum(axis=i), i)
return grad_b.reshape(sb)
def apply_broadcast(sa, sb, grad_a, grad_b):
if sa == sb:
return grad_a, grad_b
max_dim = max(len(sa), len(sb))
new_sa = (1,) * (max_dim - len(sa)) + sa
new_sb = (1,) * (max_dim - len(sb)) + sb
for i, (da, db) in enumerate(zip(new_sa, new_sb)):
if db != da == 1:
# 这里求和的原因是广播产生了n条路径,根据乘法结合律,
# 提取公共部分后就是当前节点的n条路径相加了
# 例如 1 x 2 x 3 x c1 + 1 x 2 x 3 x c2 + 1 x 2 x 3 x c3 ==> 1 x 2 x 3 x (c1 + c2 + c3 + c4)
grad_a = np.expand_dims(grad_a.sum(i), i)
if da != db == 1:
grad_b = np.expand_dims(grad_b.sum(i), i)
return grad_a.reshape(sa), grad_b.reshape(sb)
def apply_broadcast_a_tensor(sa, sb, grad_a: "Tensor"):
if sa == sb:
return grad_a
max_dim = max(len(sa), len(sb))
new_sa = (1,) * (max_dim - len(sa)) + sa
new_sb = (1,) * (max_dim - len(sb)) + sb
for i, (da, db) in enumerate(zip(new_sa, new_sb)):
if db != da == 1:
# grad_a = np.expand_dims(grad_a.sum(axis=i), i)
grad_a = UnSqueeze(grad_a.sum(dim=i), i)
return grad_a.reshape(sa)
def apply_broadcast_b_tensor(sa, sb, grad_b: "Tensor"):
if sa == sb:
return grad_b
max_dim = max(len(sa), len(sb))
new_sa = (1,) * (max_dim - len(sa)) + sa
new_sb = (1,) * (max_dim - len(sb)) + sb
for i, (da, db) in enumerate(zip(new_sa, new_sb)):
if da != db == 1:
# grad_b = np.expand_dims(grad_b.sum(axis=i), i)
grad_b = ExpandDims(grad_b.sum(dim=i), i)
return grad_b.reshape(sb)
class Add(ComputeNodeDetached):
def forward(self) -> ndarray:
a, b = self.inputs_data
return a + b
def backward_a(self, dy):
a, b = self.inputs_data
res = apply_broadcast_a(a.shape, b.shape, dy)
return res
def backward_b(self, dy):
a, b = self.inputs_data
res = apply_broadcast_b(a.shape, b.shape, dy)
return res
def retain_backward_a(self, dy):
a, b = self.inputs
res = apply_broadcast_a_tensor(a.shape, b.shape, dy)
return res
def retain_backward_b(self, dy):
a, b = self.inputs
res = apply_broadcast_b_tensor(a.shape, b.shape, dy)
return res
@staticmethod
def at(a: "Tensor", indices, b: "Tensor"):
if not isinstance(b, (int, float)):
b = (b,) * len(indices)
if not isinstance(b, _tensor.Tensor):
b = _tensor.Tensor(b)
if isinstance(indices, list):
for i, idx in enumerate(indices):
a = a * 1
a[idx] += b[i]
elif isinstance(indices, tuple):
a[indices] = b
return a
class Sub(ComputeNodeDetached):
def forward(self) -> ndarray:
a, b = self.inputs_data
return a - b
def backward_a(self, dy):
a, b = self.inputs_data
return apply_broadcast_a(a.shape, b.shape, dy)
def backward_b(self, dy):
a, b = self.inputs_data
dy = -dy
return apply_broadcast_b(a.shape, b.shape, dy)
def retain_backward_a(self, dy):
a, b = self.inputs
return apply_broadcast_a_tensor(a.shape, b.shape, dy)
def retain_backward_b(self, dy):
a, b = self.inputs
dy = -dy
return apply_broadcast_b_tensor(a.shape, b.shape, dy)
class Mul(ComputeNodeDetached):
def forward(self) -> ndarray:
a, b = self.inputs_data
return a * b
def backward_a(self, dy):
a, b = self.inputs_data
dy = b * dy
return apply_broadcast_a(a.shape, b.shape, dy)
def backward_b(self, dy):
a, b = self.inputs_data
dy = a * dy
return apply_broadcast_b(a.shape, b.shape, dy)
def retain_backward_a(self, dy):
a, b = self.inputs
dy = b * dy
return apply_broadcast_a_tensor(a.shape, b.shape, dy)
def retain_backward_b(self, dy):
a, b = self.inputs
dy = a * dy
return apply_broadcast_b_tensor(a.shape, b.shape, dy)
class Div(ComputeNodeDetached):
def forward(self) -> ndarray:
a, b = self.inputs_data
return a / b
def backward_a(self, dy):
a, b = self.inputs_data
dy = (1 / b) * dy
return apply_broadcast_a(a.shape, b.shape, dy)
def backward_b(self, dy):
a, b = self.inputs_data
dy = -a * (b ** (-2)) * dy
return apply_broadcast_b(a.shape, b.shape, dy)
def retain_backward_a(self, dy):
a, b = self.inputs
dy = (1 / b) * dy
return apply_broadcast_a_tensor(a.shape, b.shape, dy)
def retain_backward_b(self, dy):
a, b = self.inputs
dy = -a * (b ** (-2)) * dy
return apply_broadcast_b_tensor(a.shape, b.shape, dy)
class Pow(ComputeNodeDetached):
def forward(self) -> ndarray:
a, b = self.inputs_data
return np.power(a, b)
def backward_a(self, dy):
a, b = self.inputs_data
dy = b * (a ** (b - 1)) * dy
return apply_broadcast_a(a.shape, b.shape, dy)
def backward_b(self, dy):
a, b = self.inputs_data
dy =
|
np.power(a, b)
|
numpy.power
|
# AUTOGENERATED! DO NOT EDIT! File to edit: 30_datasets.ipynb (unless otherwise specified).
__all__ = ['To3dArray', 'ToArray', 'decompress_from_url', 'get_UCR_univariate_list', 'get_UCR_multivariate_list',
'get_UCR_univariate', 'get_UCR_multivariate', 'get_UCR_data', 'ucr_to_items', 'get_simple_config',
'items_from_df', 'cats_from_df']
# Cell
from .data import *
# Cell
import numpy as np
#import torch
from fastai.torch_basics import *
from fastai.data.all import *
from fastai.callback.all import *
import torch
# Cell
import os
import tempfile
try: from urllib import urlretrieve
except ImportError: from urllib.request import urlretrieve
import shutil
from pyunpack import Archive
from scipy.io import arff
# Cell
#TSUtilities
def To3dArray(arr):
arr = ToArray(arr)
if arr.ndim == 1: arr = arr[None, None]
elif arr.ndim == 2: arr = arr[:, None]
elif arr.ndim == 4: arr = arr[0]
assert arr.ndim == 3, 'Please, review input dimensions'
return np.array(arr)
def ToArray(arr):
if isinstance(arr, torch.Tensor):
arr = np.array(arr)
elif not isinstance(arr, np.ndarray):
print(f"Can't convert {type(arr)} to np.array")
if arr.dtype == 'O': arr = np.array(arr, dtype=np.float32)
return arr
# Cell
def decompress_from_url(url, target_dir=None, verbose=False):
"""Downloads a compressed file from its URL and uncompresses it.
Parameters
----------
url : string
URL from which to download.
target_dir : str or None (default: None)
Directory to be used to extract downloaded files.
verbose : bool (default: False)
Whether to print information about the process (cached files used, ...)
Returns
-------
str or None
Directory in which the compressed file has been extracted if the process was
successful, None otherwise
"""
try:
fname = os.path.basename(url)
tmpdir = tempfile.mkdtemp()
local_comp_fname = os.path.join(tmpdir, fname)
urlretrieve(url, local_comp_fname)
except:
shutil.rmtree(tmpdir)
if verbose:
sys.stderr.write("Could not download url. Please, check url.\n")
try:
if not os.path.exists(target_dir): os.makedirs(target_dir)
Archive(local_comp_fname).extractall(target_dir)
shutil.rmtree(tmpdir)
if verbose:
print("Successfully extracted file %s to path %s" %
(local_comp_fname, target_dir))
return target_dir
except:
shutil.rmtree(tmpdir)
if verbose:
sys.stderr.write("Could not uncompress file, aborting.\n")
return None
def get_UCR_univariate_list():
return sorted([
'ACSF1', 'Adiac', 'AllGestureWiimoteX', 'AllGestureWiimoteY',
'AllGestureWiimoteZ', 'ArrowHead', 'AsphaltObstacles', 'BME', 'Beef',
'BeetleFly', 'BirdChicken', 'CBF', 'Car', 'Chinatown',
'ChlorineConcentration', 'CinCECGTorso', 'Coffee', 'Computers',
'CricketX', 'CricketY', 'CricketZ', 'Crop', 'DiatomSizeReduction',
'DistalPhalanxOutlineAgeGroup', 'DistalPhalanxOutlineCorrect',
'DistalPhalanxTW', 'DodgerLoopDay', 'DodgerLoopGame',
'DodgerLoopWeekend', 'ECG200', 'ECG5000', 'ECGFiveDays',
'EOGHorizontalSignal', 'EOGVerticalSignal', 'Earthquakes',
'ElectricDevices', 'EthanolLevel', 'FaceAll', 'FaceFour', 'FacesUCR',
'FiftyWords', 'Fish', 'FordA', 'FordB', 'FreezerRegularTrain',
'FreezerSmallTrain', 'Fungi', 'GestureMidAirD1', 'GestureMidAirD2',
'GestureMidAirD3', 'GesturePebbleZ1', 'GesturePebbleZ2', 'GunPoint',
'GunPointAgeSpan', 'GunPointMaleVersusFemale',
'GunPointOldVersusYoung', 'Ham', 'HandOutlines', 'Haptics', 'Herring',
'HouseTwenty', 'InlineSkate', 'InsectEPGRegularTrain',
'InsectEPGSmallTrain', 'InsectWingbeatSound', 'ItalyPowerDemand',
'LargeKitchenAppliances', 'Lightning2', 'Lightning7', 'Mallat', 'Meat',
'MedicalImages', 'MelbournePedestrian', 'MiddlePhalanxOutlineAgeGroup',
'MiddlePhalanxOutlineCorrect', 'MiddlePhalanxTW',
'MixedShapesRegularTrain', 'MixedShapesSmallTrain', 'MoteStrain',
'NonInvasiveFetalECGThorax1', 'NonInvasiveFetalECGThorax2', 'OSULeaf',
'OliveOil', 'PLAID', 'PhalangesOutlinesCorrect', 'Phoneme',
'PickupGestureWiimoteZ', 'PigAirwayPressure', 'PigArtPressure',
'PigCVP', 'Plane', 'PowerCons', 'ProximalPhalanxOutlineAgeGroup',
'ProximalPhalanxOutlineCorrect', 'ProximalPhalanxTW',
'RefrigerationDevices', 'Rock', 'ScreenType', 'SemgHandGenderCh2',
'SemgHandMovementCh2', 'SemgHandSubjectCh2', 'ShakeGestureWiimoteZ',
'ShapeletSim', 'ShapesAll', 'SmallKitchenAppliances', 'SmoothSubspace',
'SonyAIBORobotSurface1', 'SonyAIBORobotSurface2', 'StarLightCurves',
'Strawberry', 'SwedishLeaf', 'Symbols', 'SyntheticControl',
'ToeSegmentation1', 'ToeSegmentation2', 'Trace', 'TwoLeadECG',
'TwoPatterns', 'UMD', 'UWaveGestureLibraryAll', 'UWaveGestureLibraryX',
'UWaveGestureLibraryY', 'UWaveGestureLibraryZ', 'Wafer', 'Wine',
'WordSynonyms', 'Worms', 'WormsTwoClass', 'Yoga'
])
def get_UCR_multivariate_list():
return sorted([
'ArticularyWordRecognition', 'AtrialFibrillation', 'BasicMotions',
'CharacterTrajectories', 'Cricket', 'DuckDuckGeese', 'ERing',
'EigenWorms', 'Epilepsy', 'EthanolConcentration', 'FaceDetection',
'FingerMovements', 'HandMovementDirection', 'Handwriting', 'Heartbeat',
'InsectWingbeat', 'JapaneseVowels', 'LSST', 'Libras', 'MotorImagery',
'NATOPS', 'PEMS-SF', 'PenDigits', 'PhonemeSpectra', 'RacketSports',
'SelfRegulationSCP1', 'SelfRegulationSCP2', 'SpokenArabicDigits',
'StandWalkJump', 'UWaveGestureLibrary'
])
# Cell
def get_UCR_univariate(sel_dataset, parent_dir='data/UCR', verbose=False, drop_na=False, check=True):
if check and sel_dataset not in get_UCR_univariate_list():
print('This dataset does not exist. Please select one from this list:')
print(get_UCR_univariate_list())
return None, None, None, None
if verbose: print('Dataset:', sel_dataset)
src_website = 'http://www.timeseriesclassification.com/Downloads/'
tgt_dir = Path(parent_dir) / sel_dataset
if verbose: print('Downloading and decompressing data...')
if not os.path.isdir(tgt_dir):
decompress_from_url(
src_website + sel_dataset + '.zip', target_dir=tgt_dir, verbose=verbose)
if verbose: print('...data downloaded and decompressed')
fname_train = sel_dataset + "_TRAIN.arff"
fname_test = sel_dataset + "_TEST.arff"
train_df = pd.DataFrame(arff.loadarff(os.path.join(tgt_dir, fname_train))[0])
test_df = pd.DataFrame(arff.loadarff(os.path.join(tgt_dir, fname_test))[0])
unique_cats = train_df.iloc[:, -1].unique()
mapping = dict(zip(unique_cats, np.arange(len(unique_cats))))
train_df = train_df.replace({train_df.columns.values[-1]: mapping})
test_df = test_df.replace({test_df.columns.values[-1]: mapping})
if drop_na:
train_df.dropna(axis=1, inplace=True)
test_df.dropna(axis=1, inplace=True)
X_train = train_df.iloc[:, :-1].values.astype(np.float32)
X_test = test_df.iloc[:, :-1].values.astype(np.float32)
y_train = train_df.iloc[:, -1].values.astype(int)
y_test = test_df.iloc[:, -1].values.astype(int)
X_train = To3dArray(X_train)
X_test = To3dArray(X_test)
if verbose:
print('Successfully extracted dataset\n')
print('X_train:', X_train.shape)
print('y_train:', y_train.shape)
print('X_valid:', X_test.shape)
print('y_valid:', y_test.shape, '\n')
return X_train, y_train, X_test, y_test
def get_UCR_multivariate(sel_dataset, parent_dir='data/UCR', verbose=False, check=True):
if sel_dataset.lower() == 'mphoneme': sel_dataset = 'Phoneme'
if check and sel_dataset not in get_UCR_multivariate_list():
print('This dataset does not exist. Please select one from this list:')
print(get_UCR_multivariate_list())
return None, None, None, None
if verbose: print('Dataset:', sel_dataset)
src_website = 'http://www.timeseriesclassification.com/Downloads/'
tgt_dir = Path(parent_dir) / sel_dataset
if verbose: print('Downloading and decompressing data...')
if not os.path.isdir(tgt_dir):
decompress_from_url(
src_website + sel_dataset + '.zip', target_dir=tgt_dir, verbose=verbose)
if verbose: print('...data downloaded and decompressed')
if verbose: print('Extracting data...')
X_train_ = []
X_test_ = []
for i in range(10000):
if not os.path.isfile(
f'{parent_dir}/{sel_dataset}/{sel_dataset}Dimension'
+ str(i + 1) + '_TRAIN.arff'):
break
train_df = pd.DataFrame(
arff.loadarff(
f'{parent_dir}/{sel_dataset}/{sel_dataset}Dimension'
+ str(i + 1) + '_TRAIN.arff')[0])
unique_cats = train_df.iloc[:, -1].unique()
mapping = dict(zip(unique_cats, np.arange(len(unique_cats))))
train_df = train_df.replace({train_df.columns.values[-1]: mapping})
test_df = pd.DataFrame(
arff.loadarff(
f'{parent_dir}/{sel_dataset}/{sel_dataset}Dimension'
+ str(i + 1) + '_TEST.arff')[0])
test_df = test_df.replace({test_df.columns.values[-1]: mapping})
X_train_.append(train_df.iloc[:, :-1].values)
X_test_.append(test_df.iloc[:, :-1].values)
if verbose: print('...extraction complete')
X_train = np.stack(X_train_, axis=-1)
X_test = np.stack(X_test_, axis=-1)
# In this case we need to rearrange the arrays ()
X_train = np.transpose(X_train, (0, 2, 1))
X_test =
|
np.transpose(X_test, (0, 2, 1))
|
numpy.transpose
|
############################TESTS ON POTENTIALS################################
from __future__ import print_function, division
import os
import sys
import numpy
import pynbody
from galpy import potential
_TRAVIS= bool(os.getenv('TRAVIS'))
#Test whether the normalization of the potential works
def test_normalize_potential():
#Grab all of the potentials
pots= [p for p in dir(potential)
if ('Potential' in p and not 'plot' in p and not 'RZTo' in p
and not 'evaluate' in p)]
pots.append('mockTwoPowerIntegerSphericalPotential')
pots.append('specialTwoPowerSphericalPotential')
pots.append('HernquistTwoPowerIntegerSphericalPotential')
pots.append('JaffeTwoPowerIntegerSphericalPotential')
pots.append('NFWTwoPowerIntegerSphericalPotential')
pots.append('specialMiyamotoNagaiPotential')
pots.append('specialPowerSphericalPotential')
pots.append('specialFlattenedPowerPotential')
pots.append('specialMN3ExponentialDiskPotentialPD')
pots.append('specialMN3ExponentialDiskPotentialSECH')
rmpots= ['Potential','MWPotential','MWPotential2014',
'MovingObjectPotential',
'interpRZPotential', 'linearPotential', 'planarAxiPotential',
'planarPotential', 'verticalPotential','PotentialError',
'SnapshotRZPotential','InterpSnapshotRZPotential']
if False: #_TRAVIS: #travis CI
rmpots.append('DoubleExponentialDiskPotential')
rmpots.append('RazorThinExponentialDiskPotential')
for p in rmpots:
pots.remove(p)
for p in pots:
#if not 'NFW' in p: continue #For testing the test
#Setup instance of potential
try:
tclass= getattr(potential,p)
except AttributeError:
tclass= getattr(sys.modules[__name__],p)
tp= tclass()
if not hasattr(tp,'normalize'): continue
tp.normalize(1.)
assert (tp.Rforce(1.,0.)+1.)**2. < 10.**-16., \
"Normalization of %s potential fails" % p
assert (tp.vcirc(1.)**2.-1.)**2. < 10.**-16., \
"Normalization of %s potential fails" % p
tp.normalize(.5)
if hasattr(tp,'toPlanar'):
ptp= tp.toPlanar()
else:
ptp= tp
assert (ptp.Rforce(1.,0.)+.5)**2. < 10.**-16., \
"Normalization of %s potential fails" % p
assert (ptp.vcirc(1.)**2.-0.5)**2. < 10.**-16., \
"Normalization of %s potential fails" % p
#Test whether the derivative of the potential is minus the force
def test_forceAsDeriv_potential():
#Grab all of the potentials
pots= [p for p in dir(potential)
if ('Potential' in p and not 'plot' in p and not 'RZTo' in p
and not 'evaluate' in p)]
pots.append('mockTwoPowerIntegerSphericalPotential')
pots.append('specialTwoPowerSphericalPotential')
pots.append('HernquistTwoPowerIntegerSphericalPotential')
pots.append('JaffeTwoPowerIntegerSphericalPotential')
pots.append('NFWTwoPowerIntegerSphericalPotential')
pots.append('specialMiyamotoNagaiPotential')
pots.append('specialMN3ExponentialDiskPotentialPD')
pots.append('specialMN3ExponentialDiskPotentialSECH')
pots.append('specialPowerSphericalPotential')
pots.append('specialFlattenedPowerPotential')
pots.append('testMWPotential')
pots.append('testplanarMWPotential')
pots.append('testlinearMWPotential')
pots.append('mockInterpRZPotential')
pots.append('mockSnapshotRZPotential')
pots.append('mockInterpSnapshotRZPotential')
pots.append('mockCosmphiDiskPotentialT1')
pots.append('mockCosmphiDiskPotentialTm1')
pots.append('mockCosmphiDiskPotentialTm5')
pots.append('mockDehnenBarPotentialT1')
pots.append('mockDehnenBarPotentialTm1')
pots.append('mockDehnenBarPotentialTm5')
pots.append('mockEllipticalDiskPotentialT1')
pots.append('mockEllipticalDiskPotentialTm1')
pots.append('mockEllipticalDiskPotentialTm5')
pots.append('mockSteadyLogSpiralPotentialT1')
pots.append('mockSteadyLogSpiralPotentialTm1')
pots.append('mockSteadyLogSpiralPotentialTm5')
pots.append('mockTransientLogSpiralPotential')
pots.append('mockFlatEllipticalDiskPotential') #for evaluate w/ nonaxi lists
pots.append('mockMovingObjectPotential')
pots.append('mockMovingObjectExplSoftPotential')
rmpots= ['Potential','MWPotential','MWPotential2014',
'MovingObjectPotential',
'interpRZPotential', 'linearPotential', 'planarAxiPotential',
'planarPotential', 'verticalPotential','PotentialError',
'SnapshotRZPotential','InterpSnapshotRZPotential']
if False: #_TRAVIS: #travis CI
rmpots.append('DoubleExponentialDiskPotential')
rmpots.append('RazorThinExponentialDiskPotential')
for p in rmpots:
pots.remove(p)
Rs= numpy.array([0.5,1.,2.])
Zs= numpy.array([0.,.125,-.125,0.25,-0.25])
phis= numpy.array([0.,0.5,-0.5,1.,-1.,
numpy.pi,0.5+numpy.pi,
1.+numpy.pi])
#tolerances in log10
tol= {}
tol['default']= -8.
tol['DoubleExponentialDiskPotential']= -6. #these are more difficult
tol['RazorThinExponentialDiskPotential']= -6.
tol['mockInterpRZPotential']= -4.
for p in pots:
#if not 'NFW' in p: continue #For testing the test
#Setup instance of potential
try:
tclass= getattr(potential,p)
except AttributeError:
tclass= getattr(sys.modules[__name__],p)
tp= tclass()
if hasattr(tp,'normalize'): tp.normalize(1.)
#Set tolerance
if p in list(tol.keys()): ttol= tol[p]
else: ttol= tol['default']
#Radial force
for ii in range(len(Rs)):
for jj in range(len(Zs)):
dr= 10.**-8.
newR= Rs[ii]+dr
dr= newR-Rs[ii] #Representable number
if isinstance(tp,potential.linearPotential):
mpotderivR= (potential.evaluatelinearPotentials(Rs[ii],tp)
-potential.evaluatelinearPotentials(Rs[ii]+dr,
tp))/dr
tRforce= potential.evaluatelinearForces(Rs[ii],tp)
elif isinstance(tp,potential.planarPotential):
mpotderivR= (potential.evaluateplanarPotentials(Rs[ii],tp,phi=Zs[jj])-potential.evaluateplanarPotentials(Rs[ii]+dr,tp,phi=Zs[jj]))/dr
tRforce= potential.evaluateplanarRforces(Rs[ii],tp,
phi=Zs[jj])
else:
mpotderivR= (potential.evaluatePotentials(Rs[ii],Zs[jj],tp)
-potential.evaluatePotentials(Rs[ii]+dr,Zs[jj],
tp))/dr
tRforce= potential.evaluateRforces(Rs[ii],Zs[jj],tp)
if tRforce**2. < 10.**ttol:
assert mpotderivR**2. < 10.**ttol, \
"Calculation of the Radial force as the Radial derivative of the %s potential fails at (R,Z) = (%.3f,%.3f); diff = %e, rel. diff = %e" % (p,Rs[ii],Zs[jj],numpy.fabs(tRforce-mpotderivR), numpy.fabs((tRforce-mpotderivR)/tRforce))
else:
assert (tRforce-mpotderivR)**2./tRforce**2. < 10.**ttol, \
"Calculation of the Radial force as the Radial derivative of the %s potential fails at (R,Z) = (%.3f,%.3f); diff = %e, rel. diff = %e" % (p,Rs[ii],Zs[jj],numpy.fabs(tRforce-mpotderivR), numpy.fabs((tRforce-mpotderivR)/tRforce))
#Azimuthal force, if it exists
if isinstance(tp,potential.linearPotential): continue
for ii in range(len(Rs)):
for jj in range(len(phis)):
dphi= 10.**-8.
newphi= phis[jj]+dphi
dphi= newphi-phis[jj] #Representable number
if isinstance(tp,potential.planarPotential):
mpotderivphi= (tp(Rs[ii],phi=phis[jj])-tp(Rs[ii],phi=phis[jj]+dphi))/dphi
tphiforce= potential.evaluateplanarphiforces(Rs[ii],tp,
phi=phis[jj])
else:
mpotderivphi= (tp(Rs[ii],0.05,phi=phis[jj])-tp(Rs[ii],0.05,phi=phis[jj]+dphi))/dphi
tphiforce= potential.evaluatephiforces(Rs[ii],0.05,tp,
phi=phis[jj])
try:
if tphiforce**2. < 10.**ttol:
assert(mpotderivphi**2. < 10.**ttol)
else:
assert((tphiforce-mpotderivphi)**2./tphiforce**2. < 10.**ttol)
except AssertionError:
if isinstance(tp,potential.planarPotential):
raise AssertionError("Calculation of the azimuthal force as the azimuthal derivative of the %s potential fails at (R,phi) = (%.3f,%.3f); diff = %e, rel. diff = %e" % (p,Rs[ii],phis[jj],numpy.fabs(mpotderivphi),numpy.fabs((tphiforce-mpotderivphi)/tphiforce)))
else:
raise AssertionError("Calculation of the azimuthal force as the azimuthal derivative of the %s potential fails at (R,Z,phi) = (%.3f,0.05,%.3f); diff = %e, rel. diff = %e" % (p,Rs[ii],phis[jj],numpy.fabs(mpotderivphi),numpy.fabs((tphiforce-mpotderivphi)/tphiforce)))
#Vertical force, if it exists
if isinstance(tp,potential.planarPotential) \
or isinstance(tp,potential.linearPotential): continue
for ii in range(len(Rs)):
for jj in range(len(Zs)):
dz= 10.**-8.
newZ= Zs[jj]+dz
dz= newZ-Zs[jj] #Representable number
mpotderivz= (tp(Rs[ii],Zs[jj])-tp(Rs[ii],Zs[jj]+dz))/dz
tzforce= potential.evaluatezforces(Rs[ii],Zs[jj],tp)
if tzforce**2. < 10.**ttol:
assert mpotderivz**2. < 10.**ttol, \
"Calculation of the vertical force as the vertical derivative of the %s potential fails at (R,Z) = (%.3f,%.3f); diff = %e, rel. diff = %e" % (p,Rs[ii],Zs[jj],numpy.fabs(mpotderivz),numpy.fabs((tzforce-mpotderivz)/tzforce))
else:
assert (tzforce-mpotderivz)**2./tzforce**2. < 10.**ttol, \
"Calculation of the vertical force as the vertical derivative of the %s potential fails at (R,Z) = (%.3f,%.3f); diff = %e, rel. diff = %e" % (p,Rs[ii],Zs[jj],numpy.fabs(mpotderivz),numpy.fabs((tzforce-mpotderivz)/tzforce))
#Test whether the second derivative of the potential is minus the derivative of the force
def test_2ndDeriv_potential():
#Grab all of the potentials
pots= [p for p in dir(potential)
if ('Potential' in p and not 'plot' in p and not 'RZTo' in p
and not 'evaluate' in p)]
pots.append('mockTwoPowerIntegerSphericalPotential')
pots.append('specialTwoPowerSphericalPotential')
pots.append('HernquistTwoPowerIntegerSphericalPotential')
pots.append('JaffeTwoPowerIntegerSphericalPotential')
pots.append('NFWTwoPowerIntegerSphericalPotential')
pots.append('specialMiyamotoNagaiPotential')
pots.append('specialMN3ExponentialDiskPotentialPD')
pots.append('specialMN3ExponentialDiskPotentialSECH')
pots.append('specialPowerSphericalPotential')
pots.append('specialFlattenedPowerPotential')
pots.append('testMWPotential')
pots.append('testplanarMWPotential')
pots.append('testlinearMWPotential')
pots.append('mockInterpRZPotential')
pots.append('mockCosmphiDiskPotentialT1')
pots.append('mockCosmphiDiskPotentialTm1')
pots.append('mockCosmphiDiskPotentialTm5')
pots.append('mockDehnenBarPotentialT1')
pots.append('mockDehnenBarPotentialTm1')
pots.append('mockDehnenBarPotentialTm5')
pots.append('mockEllipticalDiskPotentialT1')
pots.append('mockEllipticalDiskPotentialTm1')
pots.append('mockEllipticalDiskPotentialTm5')
pots.append('mockSteadyLogSpiralPotentialT1')
pots.append('mockSteadyLogSpiralPotentialTm1')
pots.append('mockSteadyLogSpiralPotentialTm5')
pots.append('mockTransientLogSpiralPotential')
pots.append('mockFlatEllipticalDiskPotential') #for evaluate w/ nonaxi lists
rmpots= ['Potential','MWPotential','MWPotential2014',
'MovingObjectPotential',
'interpRZPotential', 'linearPotential', 'planarAxiPotential',
'planarPotential', 'verticalPotential','PotentialError',
'SnapshotRZPotential','InterpSnapshotRZPotential']
if False: #_TRAVIS: #travis CI
rmpots.append('DoubleExponentialDiskPotential')
rmpots.append('RazorThinExponentialDiskPotential')
for p in rmpots:
pots.remove(p)
Rs= numpy.array([0.5,1.,2.])
Zs= numpy.array([0.,.125,-.125,0.25,-0.25])
phis= numpy.array([0.,0.5,-0.5,1.,-1.,
numpy.pi,0.5+numpy.pi,
1.+numpy.pi])
#tolerances in log10
tol= {}
tol['default']= -8.
tol['DoubleExponentialDiskPotential']= -3. #these are more difficult
tol['RazorThinExponentialDiskPotential']= -6.
tol['mockInterpRZPotential']= -4.
for p in pots:
#if not 'NFW' in p: continue #For testing the test
#Setup instance of potential
try:
tclass= getattr(potential,p)
except AttributeError:
tclass= getattr(sys.modules[__name__],p)
tp= tclass()
if hasattr(tp,'normalize'): tp.normalize(1.)
#Set tolerance
if p in list(tol.keys()): ttol= tol[p]
else: ttol= tol['default']
#2nd radial
if hasattr(tp,'_R2deriv'):
for ii in range(len(Rs)):
for jj in range(len(Zs)):
if p == 'RazorThinExponentialDiskPotential' and numpy.fabs(Zs[jj]) > 0.: continue #Not implemented
dr= 10.**-8.
newR= Rs[ii]+dr
dr= newR-Rs[ii] #Representable number
if isinstance(tp,potential.linearPotential):
mRforcederivR= (tp.Rforce(Rs[ii])-tp.Rforce(Rs[ii]+dr))/dr
tR2deriv= tp.R2deriv(Rs[ii])
elif isinstance(tp,potential.planarPotential):
mRforcederivR= (tp.Rforce(Rs[ii],Zs[jj])-tp.Rforce(Rs[ii]+dr,Zs[jj]))/dr
tR2deriv= potential.evaluateplanarR2derivs(Rs[ii],tp,
phi=Zs[jj])
else:
mRforcederivR= (tp.Rforce(Rs[ii],Zs[jj])-tp.Rforce(Rs[ii]+dr,Zs[jj]))/dr
tR2deriv= potential.evaluateR2derivs(Rs[ii],Zs[jj],tp)
if tR2deriv**2. < 10.**ttol:
assert mRforcederivR**2. < 10.**ttol, \
"Calculation of the second Radial derivative of the potential as the Radial derivative of the %s Radial force fails at (R,Z) = (%.3f,%.3f); diff = %e, rel. diff = %e" % (p,Rs[ii],Zs[jj],numpy.fabs(tR2deriv-mRforcederivR), numpy.fabs((tR2deriv-mRforcederivR)/tR2deriv))
else:
assert (tR2deriv-mRforcederivR)**2./tR2deriv**2. < 10.**ttol, \
"Calculation of the second Radial derivative of the potential as the Radial derivative of the %s Radial force fails at (R,Z) = (%.3f,%.3f); diff = %e, rel. diff = %e" % (p,Rs[ii],Zs[jj],numpy.fabs(tR2deriv-mRforcederivR), numpy.fabs((tR2deriv-mRforcederivR)/tR2deriv))
#2nd azimuthal
if not isinstance(tp,potential.linearPotential) \
and hasattr(tp,'_phi2deriv'):
for ii in range(len(Rs)):
for jj in range(len(phis)):
dphi= 10.**-8.
newphi= phis[jj]+dphi
dphi= newphi-phis[jj] #Representable number
if isinstance(tp,potential.planarPotential):
mphiforcederivphi= (tp.phiforce(Rs[ii],phi=phis[jj])-tp.phiforce(Rs[ii],phi=phis[jj]+dphi))/dphi
tphi2deriv= tp.phi2deriv(Rs[ii],phi=phis[jj])
else:
mphiforcederivphi= (tp.phiforce(Rs[ii],0.05,phi=phis[jj])-tp.phiforce(Rs[ii],0.05,phi=phis[jj]+dphi))/dphi
tphi2deriv= tp.phi2deriv(Rs[ii],0.05,phi=phis[jj])
try:
if tphi2deriv**2. < 10.**ttol:
assert(mphiforcederivphi**2. < 10.**ttol)
else:
assert((tphi2deriv-mphiforcederivphi)**2./tphi2deriv**2. < 10.**ttol)
except AssertionError:
if isinstance(tp,potential.planarPotential):
raise AssertionError("Calculation of the second azimuthal derivative of the potential as the azimuthal derivative of the %s azimuthal force fails at (R,phi) = (%.3f,%.3f); diff = %e, rel. diff = %e" % (p,Rs[ii],phis[jj],numpy.fabs(tphi2deriv-mphiforcederivphi), numpy.fabs((tphi2deriv-mphiforcederivphi)/tphi2deriv)))
else:
raise AssertionError("Calculation of the second azimuthal derivative of the potential as the azimuthal derivative of the %s azimuthal force fails at (R,Z,phi) = (%.3f,0.05,%.3f); diff = %e, rel. diff = %e" % (p,Rs[ii],phis[jj],numpy.fabs(tphi2deriv-mphiforcederivphi), numpy.fabs((tphi2deriv-mphiforcederivphi)/tphi2deriv)))
#mixed radial azimuthal
if not isinstance(tp,potential.linearPotential) \
and hasattr(tp,'_Rphideriv'):
for ii in range(len(Rs)):
for jj in range(len(phis)):
dphi= 10.**-8.
newphi= phis[jj]+dphi
dphi= newphi-phis[jj] #Representable number
if isinstance(tp,potential.planarPotential):
mRforcederivphi= (tp.Rforce(Rs[ii],phi=phis[jj])-tp.Rforce(Rs[ii],phi=phis[jj]+dphi))/dphi
tRphideriv= tp.Rphideriv(Rs[ii],phi=phis[jj])
else:
mRforcederivphi= (tp.Rforce(Rs[ii],0.05,phi=phis[jj])-tp.Rforce(Rs[ii],0.05,phi=phis[jj]+dphi))/dphi
tRphideriv= tp.Rphideriv(Rs[ii],0.05,phi=phis[jj])
try:
if tRphideriv**2. < 10.**ttol:
assert(mRforcederivphi**2. < 10.**ttol)
else:
assert((tRphideriv-mRforcederivphi)**2./tRphideriv**2. < 10.**ttol)
except AssertionError:
if isinstance(tp,potential.planarPotential):
raise AssertionError("Calculation of the mixed radial, azimuthal derivative of the potential as the azimuthal derivative of the %s Radial force fails at (R,phi) = (%.3f,%.3f); diff = %e, rel. diff = %e" % (p,Rs[ii],phis[jj],numpy.fabs(tRphideriv-mRforcederivphi), numpy.fabs((tRphideriv-mRforcederivphi)/tRphideriv)))
else:
raise AssertionError("Calculation of the second azimuthal derivative of the potential as the azimuthal derivative of the %s azimuthal force fails at (R,Z,phi) = (%.3f,0.05,%.3f); diff = %e, rel. diff = %e" % (p,Rs[ii],phis[jj],numpy.fabs(tphi2deriv-mphiforcederivphi), numpy.fabs((tphi2deriv-mphiforcederivphi)/tphi2deriv)))
#2nd vertical
if not isinstance(tp,potential.planarPotential) \
and not isinstance(tp,potential.linearPotential) \
and hasattr(tp,'_z2deriv'):
for ii in range(len(Rs)):
for jj in range(len(Zs)):
if p == 'RazorThinExponentialDiskPotential': continue #Not implemented, or badly defined
if p == 'TwoPowerSphericalPotential': continue #Not implemented, or badly defined
if p == 'mockTwoPowerIntegerSphericalPotential': continue #Not implemented, or badly defined
if p == 'specialTwoPowerSphericalPotential': continue #Not implemented, or badly defined
if p == 'HernquistTwoPowerIntegerSphericalPotential': continue #Not implemented, or badly defined
if p == 'JaffeTwoPowerIntegerSphericalPotential': continue #Not implemented, or badly defined
if p == 'NFWTwoPowerIntegerSphericalPotential': continue #Not implemented, or badly defined
dz= 10.**-8.
newz= Zs[jj]+dz
dz= newz-Zs[jj] #Representable number
mzforcederivz= (tp.zforce(Rs[ii],Zs[jj])-tp.zforce(Rs[ii],Zs[jj]+dz))/dz
tz2deriv= potential.evaluatez2derivs(Rs[ii],Zs[jj],tp)
if tz2deriv**2. < 10.**ttol:
assert mzforcederivz**2. < 10.**ttol, \
"Calculation of the second vertical derivative of the potential as the vertical derivative of the %s vertical force fails at (R,Z) = (%.3f,%.3f); diff = %e, rel. diff = %e" % (p,Rs[ii],Zs[jj],numpy.fabs(tz2deriv-mzforcederivz), numpy.fabs((tz2deriv-mzforcederivz)/tz2deriv))
else:
assert (tz2deriv-mzforcederivz)**2./tz2deriv**2. < 10.**ttol, \
"Calculation of the second vertical derivative of the potential as the vertical derivative of the %s vertical force fails at (R,Z) = (%.3f,%.3f); diff = %e, rel. diff = %e" % (p,Rs[ii],Zs[jj],numpy.fabs(tz2deriv-mzforcederivz), numpy.fabs((tz2deriv-mzforcederivz)/tz2deriv))
#mixed radial vertical
if not isinstance(tp,potential.planarPotential) \
and not isinstance(tp,potential.linearPotential) \
and hasattr(tp,'_Rzderiv'):
for ii in range(len(Rs)):
for jj in range(len(Zs)):
# if p == 'RazorThinExponentialDiskPotential': continue #Not implemented, or badly defined
dz= 10.**-8.
newz= Zs[jj]+dz
dz= newz-Zs[jj] #Representable number
mRforcederivz= (tp.Rforce(Rs[ii],Zs[jj])-tp.Rforce(Rs[ii],Zs[jj]+dz))/dz
tRzderiv= potential.evaluateRzderivs(Rs[ii],Zs[jj],tp)
if tRzderiv**2. < 10.**ttol:
assert mRforcederivz**2. < 10.**ttol, \
"Calculation of the mixed radial vertical derivative of the potential as the vertical derivative of the %s radial force fails at (R,Z) = (%.3f,%.3f); diff = %e, rel. diff = %e" % (p,Rs[ii],Zs[jj],numpy.fabs(tRzderiv-mRforcederivz), numpy.fabs((tRzderiv-mRforcederivz)/tRzderiv))
else:
assert (tRzderiv-mRforcederivz)**2./tRzderiv**2. < 10.**ttol, \
"Calculation of the mixed radial vertical derivative of the potential as the vertical derivative of the %s radial force fails at (R,Z) = (%.3f,%.3f); diff = %e, rel. diff = %e" % (p,Rs[ii],Zs[jj],numpy.fabs(tRzderiv-mRforcederivz), numpy.fabs((tRzderiv-mRforcederivz)/tRzderiv))
#mixed radial, azimuthal
if not isinstance(tp,potential.linearPotential) \
and hasattr(tp,'_Rphideriv'):
for ii in range(len(Rs)):
for jj in range(len(phis)):
# if p == 'RazorThinExponentialDiskPotential': continue #Not implemented, or badly defined
dphi= 10.**-8.
newphi= phis[jj]+dphi
dphi= newphi-phis[jj] #Representable number
if isinstance(tp,potential.planarPotential):
mRforcederivphi= (tp.Rforce(Rs[ii],phi=phis[jj])\
-tp.Rforce(Rs[ii],phi=phis[jj]+dphi))/dphi
tRphideriv= potential.evaluateplanarPotentials(Rs[ii],tp,
phi=phis[jj],dR=1,dphi=1)
else:
mRforcederivphi= (tp.Rforce(Rs[ii],0.1,phi=phis[jj])\
-tp.Rforce(Rs[ii],0.1,phi=phis[jj]+dphi))/dphi
tRphideriv= potential.evaluatePotentials(Rs[ii],0.1,tp,
phi=phis[jj],dR=1,dphi=1)
if tRphideriv**2. < 10.**ttol:
assert mRforcederivphi**2. < 10.**ttol, \
"Calculation of the mixed radial azimuthal derivative of the potential as the azimuthal derivative of the %s radial force fails at (R,phi) = (%.3f,%.3f); diff = %e, rel. diff = %e" % (p,Rs[ii],phis[jj],numpy.fabs(tRphideriv-mRforcederivphi), numpy.fabs((tRphideriv-mRforcederivphi)/tRphideriv))
else:
assert (tRphideriv-mRforcederivphi)**2./tRphideriv**2. < 10.**ttol, \
"Calculation of the mixed radial azimuthal derivative of the potential as the azimuthal derivative of the %s radial force fails at (R,phi) = (%.3f,%.3f); diff = %e, rel. diff = %e" % (p,Rs[ii],phis[jj],numpy.fabs(tRphideriv-mRforcederivphi), numpy.fabs((tRphideriv-mRforcederivphi)/tRphideriv))
#Test whether the Poisson equation is satisfied if _dens and the relevant second derivatives are implemented
def test_poisson_potential():
#Grab all of the potentials
pots= [p for p in dir(potential)
if ('Potential' in p and not 'plot' in p and not 'RZTo' in p
and not 'evaluate' in p)]
pots.append('mockTwoPowerIntegerSphericalPotential')
pots.append('specialTwoPowerSphericalPotential')
pots.append('HernquistTwoPowerIntegerSphericalPotential')
pots.append('JaffeTwoPowerIntegerSphericalPotential')
pots.append('NFWTwoPowerIntegerSphericalPotential')
pots.append('specialMiyamotoNagaiPotential')
pots.append('specialMN3ExponentialDiskPotentialPD')
pots.append('specialMN3ExponentialDiskPotentialSECH')
pots.append('specialFlattenedPowerPotential')
pots.append('specialPowerSphericalPotential')
pots.append('testMWPotential')
pots.append('testplanarMWPotential')
pots.append('testlinearMWPotential')
rmpots= ['Potential','MWPotential','MWPotential2014',
'MovingObjectPotential',
'interpRZPotential', 'linearPotential', 'planarAxiPotential',
'planarPotential', 'verticalPotential','PotentialError',
'SnapshotRZPotential','InterpSnapshotRZPotential']
if False: #_TRAVIS: #travis CI
rmpots.append('DoubleExponentialDiskPotential')
rmpots.append('RazorThinExponentialDiskPotential')
for p in rmpots:
pots.remove(p)
Rs= numpy.array([0.5,1.,2.])
Zs= numpy.array([0.,.125,-.125,0.25,-0.25])
phis= numpy.array([0.,0.5,-0.5,1.,-1.,
numpy.pi,0.5+numpy.pi,
1.+numpy.pi])
#tolerances in log10
tol= {}
tol['default']= -8.
tol['DoubleExponentialDiskPotential']= -3. #these are more difficult
#tol['RazorThinExponentialDiskPotential']= -6.
for p in pots:
#if not 'NFW' in p: continue #For testing the test
#if 'Isochrone' in p: continue #For testing the test
#Setup instance of potential
try:
tclass= getattr(potential,p)
except AttributeError:
tclass= getattr(sys.modules[__name__],p)
tp= tclass()
if hasattr(tp,'normalize'): tp.normalize(1.)
#Set tolerance
if p in list(tol.keys()): ttol= tol[p]
else: ttol= tol['default']
#2nd radial
if not hasattr(tp,'_dens') or not hasattr(tp,'_R2deriv') \
or not hasattr(tp,'_Rforce') or not hasattr(tp,'phi2deriv') \
or not hasattr(tp,'_z2deriv'):
continue
for ii in range(len(Rs)):
for jj in range(len(Zs)):
for kk in range(len(phis)):
tpoissondens= tp.dens(Rs[ii],Zs[jj],phi=phis[kk],
forcepoisson=True)
tdens= potential.evaluateDensities(Rs[ii],Zs[jj],tp,
phi=phis[kk],
forcepoisson=False)
if tdens**2. < 10.**ttol:
assert tpoissondens**2. < 10.**ttol, \
"Poisson equation relation between the derivatives of the potential and the implemented density is not satisfied for the %s potential at (R,Z,phi) = (%.3f,%.3f,%.3f); diff = %e, rel. diff = %e" % (p,Rs[ii],Zs[jj],phis[kk],numpy.fabs(tdens-tpoissondens), numpy.fabs((tdens-tpoissondens)/tdens))
else:
assert (tpoissondens-tdens)**2./tdens**2. < 10.**ttol, \
"Poisson equation relation between the derivatives of the potential and the implemented density is not satisfied for the %s potential at (R,Z,phi) = (%.3f,%.3f,%.3f); diff = %e, rel. diff = %e" % (p,Rs[ii],Zs[jj],phis[kk],numpy.fabs(tdens-tpoissondens), numpy.fabs((tdens-tpoissondens)/tdens))
return None
#Test whether the _evaluate function is correctly implemented in specifying derivatives
def test_evaluateAndDerivs_potential():
#Grab all of the potentials
pots= [p for p in dir(potential)
if ('Potential' in p and not 'plot' in p and not 'RZTo' in p
and not 'evaluate' in p)]
pots.append('mockTwoPowerIntegerSphericalPotential')
pots.append('specialTwoPowerSphericalPotential')
pots.append('HernquistTwoPowerIntegerSphericalPotential')
pots.append('JaffeTwoPowerIntegerSphericalPotential')
pots.append('NFWTwoPowerIntegerSphericalPotential')
pots.append('specialMiyamotoNagaiPotential')
pots.append('specialMN3ExponentialDiskPotentialPD')
pots.append('specialMN3ExponentialDiskPotentialSECH')
pots.append('specialFlattenedPowerPotential')
pots.append('specialPowerSphericalPotential')
pots.append('mockCosmphiDiskPotentialT1')
pots.append('mockCosmphiDiskPotentialTm1')
pots.append('mockCosmphiDiskPotentialTm5')
pots.append('mockDehnenBarPotentialT1')
pots.append('mockDehnenBarPotentialTm1')
pots.append('mockDehnenBarPotentialTm5')
pots.append('mockEllipticalDiskPotentialT1')
pots.append('mockEllipticalDiskPotentialTm1')
pots.append('mockEllipticalDiskPotentialTm5')
pots.append('mockSteadyLogSpiralPotentialT1')
pots.append('mockSteadyLogSpiralPotentialTm1')
pots.append('mockSteadyLogSpiralPotentialTm5')
pots.append('mockTransientLogSpiralPotential')
pots.append('mockMovingObjectPotential')
rmpots= ['Potential','MWPotential','MWPotential2014',
'MovingObjectPotential',
'interpRZPotential', 'linearPotential', 'planarAxiPotential',
'planarPotential', 'verticalPotential','PotentialError',
'SnapshotRZPotential','InterpSnapshotRZPotential']
if False: #_TRAVIS: #travis CI
rmpots.append('DoubleExponentialDiskPotential')
rmpots.append('RazorThinExponentialDiskPotential')
for p in rmpots:
pots.remove(p)
#tolerances in log10
tol= {}
tol['default']= -12.
#tol['DoubleExponentialDiskPotential']= -3. #these are more difficult
#tol['RazorThinExponentialDiskPotential']= -6.
for p in pots:
#if 'Isochrone' in p: continue #For testing the test
#Setup instance of potential
try:
tclass= getattr(potential,p)
except AttributeError:
tclass= getattr(sys.modules[__name__],p)
tp= tclass()
if hasattr(tp,'normalize'): tp.normalize(1.)
#Set tolerance
if p in list(tol.keys()): ttol= tol[p]
else: ttol= tol['default']
#1st radial
if isinstance(tp,potential.linearPotential):
continue
elif isinstance(tp,potential.planarPotential):
tevaldr= tp(1.2,phi=0.1,dR=1)
trforce= tp.Rforce(1.2,phi=0.1)
else:
tevaldr= tp(1.2,0.1,phi=0.1,dR=1)
trforce= tp.Rforce(1.2,0.1,phi=0.1)
if not tevaldr is None:
if tevaldr**2. < 10.**ttol:
assert trforce**2. < 10.**ttol, \
"Calculation of radial derivative through _evaluate and Rforce inconsistent for the %s potential" % p
else:
assert (tevaldr+trforce)**2./tevaldr**2. < 10.**ttol, \
"Calculation of radial derivative through _evaluate and Rforce inconsistent for the %s potential" % p
#2nd radial
hasR2= True
from galpy.potential import PotentialError
if 'RazorThin' in p: R2z= 0.
else: R2z= 0.1
try:
if isinstance(tp,potential.planarPotential):
tp.R2deriv(1.2)
else:
tp.R2deriv(1.2,R2z)
except PotentialError:
hasR2= False
if hasR2:
if isinstance(tp,potential.planarPotential):
tevaldr2= tp(1.2,phi=0.1,dR=2)
tr2deriv= tp.R2deriv(1.2,phi=0.1)
else:
tevaldr2= tp(1.2,R2z,phi=0.1,dR=2)
tr2deriv= tp.R2deriv(1.2,R2z,phi=0.1)
if not tevaldr2 is None:
if tevaldr2**2. < 10.**ttol:
assert tr2deriv*2. < 10.**ttol, \
"Calculation of 2nd radial derivative through _evaluate and R2deriv inconsistent for the %s potential" % p
else:
assert (tevaldr2-tr2deriv)**2./tevaldr2**2. < 10.**ttol, \
"Calculation of 2nd radial derivative through _evaluate and R2deriv inconsistent for the %s potential" % p
#1st phi
if isinstance(tp,potential.planarPotential):
tevaldphi= tp(1.2,phi=0.1,dphi=1)
tphiforce= tp.phiforce(1.2,phi=0.1)
else:
tevaldphi= tp(1.2,0.1,phi=0.1,dphi=1)
tphiforce= tp.phiforce(1.2,0.1,phi=0.1)
if not tevaldphi is None:
if tevaldphi**2. < 10.**ttol:
assert tphiforce**2. < 10.**ttol, \
"Calculation of azimuthal derivative through _evaluate and phiforce inconsistent for the %s potential" % p
else:
assert (tevaldphi+tphiforce)**2./tevaldphi**2. < 10.**ttol, \
"Calculation of azimuthal derivative through _evaluate and phiforce inconsistent for the %s potential" % p
#2nd phi
hasphi2= True
try:
if isinstance(tp,potential.planarPotential):
tp.phi2deriv(1.2,phi=0.1)
else:
tp.phi2deriv(1.2,0.1,phi=0.1)
except (PotentialError,AttributeError):
hasphi2= False
if hasphi2 and hasattr(tp,'_phi2deriv'):
if isinstance(tp,potential.planarPotential):
tevaldphi2= tp(1.2,phi=0.1,dphi=2)
tphi2deriv= tp.phi2deriv(1.2,phi=0.1)
else:
tevaldphi2= tp(1.2,0.1,phi=0.1,dphi=2)
tphi2deriv= tp.phi2deriv(1.2,0.1,phi=0.1)
if not tevaldphi2 is None:
if tevaldphi2**2. < 10.**ttol:
assert tphi2deriv*2. < 10.**ttol, \
"Calculation of 2nd azimuthal derivative through _evaluate and phi2deriv inconsistent for the %s potential" % p
else:
assert (tevaldphi2-tphi2deriv)**2./tevaldphi2**2. < 10.**ttol, \
"Calculation of 2nd azimuthal derivative through _evaluate and phi2deriv inconsistent for the %s potential" % p
continue
#mixed radial,vertical
if isinstance(tp,potential.planarPotential):
tevaldrz= tp(1.2,0.1,phi=0.1,dR=1,dz=1)
trzderiv= tp.Rzderiv(1.2,0.1,phi=0.1)
else:
tevaldrz= tp(1.2,0.1,phi=0.1,dR=1,dz=1)
trzderiv= tp.Rzderiv(1.2,0.1,phi=0.1)
if not tevaldrz is None:
if tevaldrz**2. < 10.**ttol:
assert trzderiv*2. < 10.**ttol, \
"Calculation of mixed radial,vertical derivative through _evaluate and z2deriv inconsistent for the %s potential" % p
else:
assert (tevaldrz-trzderiv)**2./tevaldrz**2. < 10.**ttol, \
"Calculation of mixed radial,vertical derivative through _evaluate and z2deriv inconsistent for the %s potential" % p
#Finally test that much higher derivatives are not implemented
try: tp(1.2,0.1,dR=4,dphi=10)
except NotImplementedError: pass
else: raise AssertionError('Higher-order derivative request in potential __call__ does not raise NotImplementedError')
return None
# Check that the masses are calculated correctly for spherical potentials
def test_mass_spher():
#PowerPotential close to Kepler should be very steep
pp= potential.PowerSphericalPotential(amp=2.,alpha=3.001)
kp= potential.KeplerPotential(amp=2.)
assert numpy.fabs(((pp.mass(10.)-kp.mass(10.)))/kp.mass(10.)) < 10.**-2., "Mass for PowerSphericalPotential close to KeplerPotential is not close to KeplerPotential's mass"
pp= potential.PowerSphericalPotential(amp=2.)
#mass = amp x r^(3-alpha)
tR= 1.
assert numpy.fabs(pp.mass(tR,forceint=True)-pp._amp*tR**(3.-pp.alpha)) < 10.**-10., 'Mass for PowerSphericalPotential not as expected'
tR= 2.
assert numpy.fabs(pp.mass(tR,forceint=True)-pp._amp*tR**(3.-pp.alpha)) < 10.**-10., 'Mass for PowerSphericalPotential not as expected'
tR= 20.
assert numpy.fabs(pp.mass(tR,forceint=True)-pp._amp*tR**(3.-pp.alpha)) < 10.**-10., 'Mass for PowerSphericalPotential not as expected'
#Test that for a cut-off potential, the mass far beyond the cut-off is
# 2pi rc^(3-alpha) gamma(1.5-alpha/2)
pp= potential.PowerSphericalPotentialwCutoff(amp=2.)
from scipy import special
expecMass= 2.*pp._amp*numpy.pi*pp.rc**(3.-pp.alpha)*special.gamma(1.5-pp.alpha/2.)
tR= 5.
assert numpy.fabs((pp.mass(tR,forceint=True)-expecMass)/expecMass) < 10.**-6., 'Mass of PowerSphericalPotentialwCutoff far beyond the cut-off not as expected'
tR= 15.
assert numpy.fabs((pp.mass(tR,forceint=True)-expecMass)/expecMass) < 10.**-6., 'Mass of PowerSphericalPotentialwCutoff far beyond the cut-off not as expected'
tR= 50.
assert numpy.fabs((pp.mass(tR,forceint=True)-expecMass)/expecMass) < 10.**-6., 'Mass of PowerSphericalPotentialwCutoff far beyond the cut-off not as expected'
#Jaffe and Hernquist both have finite masses, NFW diverges logarithmically
jp= potential.JaffePotential(amp=2.,a=0.1)
hp= potential.HernquistPotential(amp=2.,a=0.1)
np= potential.NFWPotential(amp=2.,a=0.1)
tR= 10.
# Limiting behavior
jaffemass= jp._amp*(1.-jp.a/tR)
hernmass= hp._amp/2.*(1.-2.*hp.a/tR)
nfwmass= np._amp*(numpy.log(tR/np.a)-1.+np.a/tR)
assert numpy.fabs((jp.mass(tR,forceint=True)-jaffemass)/jaffemass) < 10.**-3., 'Limit mass for Jaffe potential not as expected'
assert numpy.fabs((hp.mass(tR,forceint=True)-hernmass)/hernmass) < 10.**-3., 'Limit mass for Jaffe potential not as expected'
assert numpy.fabs((np.mass(tR,forceint=True)-nfwmass)/nfwmass) < 10.**-2., 'Limit mass for NFW potential not as expected'
tR= 200.
# Limiting behavior, add z, to test that too
jaffemass= jp._amp*(1.-jp.a/tR)
hernmass= hp._amp/2.*(1.-2.*hp.a/tR)
nfwmass= np._amp*(numpy.log(tR/np.a)-1.+np.a/tR)
assert numpy.fabs((jp.mass(tR,forceint=True)-jaffemass)/jaffemass) < 10.**-6., 'Limit mass for Jaffe potential not as expected'
assert numpy.fabs((hp.mass(tR,forceint=True)-hernmass)/hernmass) < 10.**-6., 'Limit mass for Jaffe potential not as expected'
assert numpy.fabs((np.mass(tR,forceint=True)-nfwmass)/nfwmass) < 10.**-4., 'Limit mass for NFW potential not as expected'
tR, tz= 200., 10.
tr= numpy.sqrt(tR**2.+tz**2.)
# Limiting behavior, add z, to test that too
jaffemass= jp._amp*(1.-jp.a/tr)
hernmass= hp._amp/2.*(1.-2.*hp.a/tr)
nfwmass= np._amp*(numpy.log(tr/np.a)-1.+np.a/tr)
assert numpy.fabs((jp.mass(tR,z=tz,forceint=False)-jaffemass)/jaffemass) < 10.**-6., 'Limit mass for Jaffe potential not as expected'
assert numpy.fabs((hp.mass(tR,z=tz,forceint=False)-hernmass)/hernmass) < 10.**-6., 'Limit mass for Jaffe potential not as expected'
assert numpy.fabs((np.mass(tR,z=tz,forceint=False)-nfwmass)/nfwmass) < 10.**-4., 'Limit mass for NFW potential not as expected'
return None
# Check that the masses are implemented correctly for spherical potentials
def test_mass_spher_analytic():
#TwoPowerSphericalPotentials all have explicitly implemented masses
jp= potential.JaffePotential(amp=2.)
hp= potential.HernquistPotential(amp=2.)
np= potential.NFWPotential(amp=2.)
tp= potential.TwoPowerSphericalPotential(amp=2.)
tR= 2.
assert numpy.fabs(jp.mass(tR,forceint=True)-jp.mass(tR)) < 10.**-10., 'Explicit mass does not agree with integral of the density for Jaffe potential'
assert numpy.fabs(hp.mass(tR,forceint=True)-hp.mass(tR)) < 10.**-10., 'Explicit mass does not agree with integral of the density for Hernquist potential'
assert numpy.fabs(np.mass(tR,forceint=True)-np.mass(tR)) < 10.**-10., 'Explicit mass does not agree with integral of the density for NFW potential'
assert numpy.fabs(tp.mass(tR,forceint=True)-tp.mass(tR)) < 10.**-10., 'Explicit mass does not agree with integral of the density for TwoPowerSpherical potential'
assert numpy.fabs(tp.mass(tR,forceint=True)-tp.mass(numpy.sqrt(tR**2.-1**2.),z=1.)) < 10.**-10., 'Explicit mass does not agree with integral of the density for TwoPowerSpherical potential, for not z is None'
return None
# Check that the masses are calculated correctly for axisymmetric potentials
def test_mass_axi():
#For Miyamoto-Nagai, we know that mass integrated over everything should be equal to amp, so
mp= potential.MiyamotoNagaiPotential(amp=1.)
assert numpy.fabs(mp.mass(200.,20.)-1.) < 0.01, 'Total mass of Miyamoto-Nagai potential w/ amp=1 is not equal to 1'
#For a double-exponential disk potential, the
# mass(R,z) = amp x hR^2 x hz x (1-(1+R/hR)xe^(-R/hR)) x (1-e^(-Z/hz)
dp= potential.DoubleExponentialDiskPotential(amp=2.)
def dblexpmass(r,z,dp):
return 4.*numpy.pi*dp._amp*dp._hr**2.*dp._hz*(1.-(1.+r/dp._hr)*numpy.exp(-r/dp._hr))*(1.-numpy.exp(-z/dp._hz))
tR,tz= 0.01,0.01
assert numpy.fabs((dp.mass(tR,tz,forceint=True)-dblexpmass(tR,tz,dp))/dblexpmass(tR,tz,dp)) < 10.**-10., 'Mass for DoubleExponentialDiskPotential incorrect'
tR,tz= 0.1,0.05
assert numpy.fabs((dp.mass(tR,tz,forceint=True)-dblexpmass(tR,tz,dp))/dblexpmass(tR,tz,dp)) < 10.**-10., 'Mass for DoubleExponentialDiskPotential incorrect'
tR,tz= 1.,0.1
assert numpy.fabs((dp.mass(tR,tz,forceint=True)-dblexpmass(tR,tz,dp))/dblexpmass(tR,tz,dp)) < 10.**-10., 'Mass for DoubleExponentialDiskPotential incorrect'
tR,tz= 5.,0.1
assert numpy.fabs((dp.mass(tR,tz,forceint=True)-dblexpmass(tR,tz,dp))/dblexpmass(tR,tz,dp)) < 10.**-10., 'Mass for DoubleExponentialDiskPotential incorrect'
tR,tz= 5.,1.
assert numpy.fabs((dp.mass(tR,tz,forceint=True)-dblexpmass(tR,tz,dp))/dblexpmass(tR,tz,dp)) < 10.**-10., 'Mass for DoubleExponentialDiskPotential incorrect'
tR,tz= 100.,100.
assert numpy.fabs((dp.mass(tR,tz,forceint=True)-dblexpmass(tR,tz,dp))/dblexpmass(tR,tz,dp)) < 10.**-6., 'Mass for DoubleExponentialDiskPotential incorrect'
#Test that nonAxi raises error
from galpy.orbit import Orbit
mop= potential.MovingObjectPotential(Orbit([1.,0.1,1.1,0.1,0.,0.]))
try: mop.mass(1.,0.)
except NotImplementedError: pass
else: raise AssertionError('mass for non-axisymmetric potential should have raised NotImplementedError, but did not')
return None
# Check that toVertical and toPlanar work
def test_toVertical_toPlanar():
#Grab all of the potentials
pots= [p for p in dir(potential)
if ('Potential' in p and not 'plot' in p and not 'RZTo' in p
and not 'evaluate' in p)]
rmpots= ['Potential','MWPotential','MWPotential2014',
'MovingObjectPotential',
'interpRZPotential', 'linearPotential', 'planarAxiPotential',
'planarPotential', 'verticalPotential','PotentialError',
'SnapshotRZPotential','InterpSnapshotRZPotential']
if False: #_TRAVIS: #travis CI
rmpots.append('DoubleExponentialDiskPotential')
rmpots.append('RazorThinExponentialDiskPotential')
for p in rmpots:
pots.remove(p)
for p in pots:
#Setup instance of potential
try:
tclass= getattr(potential,p)
except AttributeError:
tclass= getattr(sys.modules[__name__],p)
tp= tclass()
if not hasattr(tp,'normalize'): continue #skip these
tp.normalize(1.)
if isinstance(tp,potential.linearPotential) or \
isinstance(tp,potential.planarPotential):
continue
tpp= tp.toPlanar()
assert isinstance(tpp,potential.planarPotential), \
"Conversion into planar potential of potential %s fails" % p
tlp= tp.toVertical(1.)
assert isinstance(tlp,potential.linearPotential), \
"Conversion into linear potential of potential %s fails" % p
def test_RZToplanarPotential():
lp= potential.LogarithmicHaloPotential(normalize=1.)
plp= potential.RZToplanarPotential(lp)
assert isinstance(plp,potential.planarPotential), 'Running an RZPotential through RZToplanarPotential does not produce a planarPotential'
#Check that a planarPotential through RZToplanarPotential is still planar
pplp= potential.RZToplanarPotential(lp)
assert isinstance(pplp,potential.planarPotential), 'Running a planarPotential through RZToplanarPotential does not produce a planarPotential'
try:
plp= potential.RZToplanarPotential('something else')
except potential.PotentialError:
pass
else:
raise AssertionError('Using RZToplanarPotential with a string rather than an RZPotential or a planarPotential did not raise PotentialError')
return None
# Sanity check the derivative of the rotation curve and the frequencies in the plane
def test_dvcircdR_omegac_epifreq_rl_vesc():
#Derivative of rotation curve
#LogarithmicHaloPotential: rotation everywhere flat
lp= potential.LogarithmicHaloPotential(normalize=1.)
assert lp.dvcircdR(1.)**2. < 10.**-16., \
"LogarithmicHaloPotential's rotation curve is not flat at R=1"
assert lp.dvcircdR(0.5)**2. < 10.**-16., \
"LogarithmicHaloPotential's rotation curve is not flat at R=0.5"
assert lp.dvcircdR(2.)**2. < 10.**-16., \
"LogarithmicHaloPotential's rotation curve is not flat at R=2"
#Kepler potential, vc = vc_0(R/R0)^-0.5 -> dvcdR= -0.5 vc_0 (R/R0)**-1.5
kp= potential.KeplerPotential(normalize=1.)
assert (kp.dvcircdR(1.)+0.5)**2. < 10.**-16., \
"KeplerPotential's rotation curve is not what it should be at R=1"
assert (kp.dvcircdR(0.5)+0.5**-0.5)**2. < 10.**-16., \
"KeplerPotential's rotation curve is not what it should be at R=0.5"
assert (kp.dvcircdR(2.)+0.5**2.5)**2. < 10.**-16., \
"KeplerPotential's rotation curve is not what it should be at R=2"
#Rotational frequency
assert (lp.omegac(1.)-1.)**2. < 10.**-16., \
"LogarithmicHalo's rotational frequency is off at R=1"
assert (lp.omegac(0.5)-2.)**2. < 10.**-16., \
"LogarithmicHalo's rotational frequency is off at R=0.5"
assert (lp.omegac(2.)-0.5)**2. < 10.**-16., \
"LogarithmicHalo's rotational frequency is off at R=2"
assert (lp.toPlanar().omegac(2.)-0.5)**2. < 10.**-16., \
"LogarithmicHalo's rotational frequency is off at R=2 through planarPotential"
#Epicycle frequency, flat rotation curve
assert (lp.epifreq(1.)-numpy.sqrt(2.)*lp.omegac(1.))**2. < 10.**-16., \
"LogarithmicHalo's epicycle and rotational frequency are inconsistent with kappa = sqrt(2) Omega at R=1"
assert (lp.epifreq(0.5)-numpy.sqrt(2.)*lp.omegac(0.5))**2. < 10.**-16., \
"LogarithmicHalo's epicycle and rotational frequency are inconsistent with kappa = sqrt(2) Omega at R=0.5"
assert (lp.epifreq(2.0)-numpy.sqrt(2.)*lp.omegac(2.0))**2. < 10.**-16., \
"LogarithmicHalo's epicycle and rotational frequency are inconsistent with kappa = sqrt(2) Omega at R=2"
assert (lp.toPlanar().epifreq(2.0)-numpy.sqrt(2.)*lp.omegac(2.0))**2. < 10.**-16., \
"LogarithmicHalo's epicycle and rotational frequency are inconsistent with kappa = sqrt(2) Omega at R=, through planar2"
#Epicycle frequency, Kepler
assert (kp.epifreq(1.)-kp.omegac(1.))**2. < 10.**-16., \
"KeplerPotential's epicycle and rotational frequency are inconsistent with kappa = Omega at R=1"
assert (kp.epifreq(0.5)-kp.omegac(0.5))**2. < 10.**-16., \
"KeplerPotential's epicycle and rotational frequency are inconsistent with kappa = Omega at R=0.5"
assert (kp.epifreq(2.)-kp.omegac(2.))**2. < 10.**-16., \
"KeplerPotential's epicycle and rotational frequency are inconsistent with kappa = Omega at R=2"
#Check radius of circular orbit, Kepler
assert (kp.rl(1.)-1.)**2. < 10.**-16., \
"KeplerPotential's radius of a circular orbit is wrong at Lz=1."
assert (kp.rl(0.5)-1./4.)**2. < 10.**-16., \
"KeplerPotential's radius of a circular orbit is wrong at Lz=0.5"
assert (kp.rl(2.)-4.)**2. < 10.**-16., \
"KeplerPotential's radius of a circular orbit is wrong at Lz=2."
#Check radius of circular orbit, PowerSphericalPotential with close-to-flat rotation curve
pp= potential.PowerSphericalPotential(alpha=1.8,normalize=1.)
assert (pp.rl(1.)-1.)**2. < 10.**-16., \
"PowerSphericalPotential's radius of a circular orbit is wrong at Lz=1."
assert (pp.rl(0.5)-0.5**(10./11.))**2. < 10.**-16., \
"PowerSphericalPotential's radius of a circular orbit is wrong at Lz=0.5"
assert (pp.rl(2.)-2.**(10./11.))**2. < 10.**-16., \
"PowerSphericalPotential's radius of a circular orbit is wrong at Lz=2."
#Check radius of circular orbit, PowerSphericalPotential with steeper rotation curve
pp= potential.PowerSphericalPotential(alpha=0.5,normalize=1.)
assert (pp.rl(1.)-1.)**2. < 10.**-16., \
"PowerSphericalPotential's radius of a circular orbit is wrong at Lz=1."
assert (pp.rl(0.0625)-0.0625**(4./7.))**2. < 10.**-16., \
"PowerSphericalPotential's radius of a circular orbit is wrong at Lz=0.0625"
assert (pp.rl(16.)-16.**(4./7.))**2. < 10.**-16., \
"PowerSphericalPotential's radius of a circular orbit is wrong at Lz=16."
#Escape velocity of Kepler potential
assert (kp.vesc(1.)**2.-2.)**2. < 10.**-16., \
"KeplerPotential's escape velocity is wrong at R=1"
assert (kp.vesc(0.5)**2.-2.*kp.vcirc(0.5)**2.)**2. < 10.**-16., \
"KeplerPotential's escape velocity is wrong at R=0.5"
assert (kp.vesc(2.)**2.-2.*kp.vcirc(2.)**2.)**2. < 10.**-16., \
"KeplerPotential's escape velocity is wrong at R=2"
assert (kp.toPlanar().vesc(2.)**2.-2.*kp.vcirc(2.)**2.)**2. < 10.**-16., \
"KeplerPotential's escape velocity is wrong at R=2, through planar"
# W/ different interface
assert (kp.vcirc(1.)-potential.vcirc(kp,1.))**2. < 10.**-16., \
"KeplerPotential's circular velocity does not agree between kp.vcirc and vcirc(kp)"
assert (kp.vcirc(1.)-potential.vcirc(kp.toPlanar(),1.))**2. < 10.**-16., \
"KeplerPotential's circular velocity does not agree between kp.vcirc and vcirc(kp.toPlanar)"
assert (kp.vesc(1.)-potential.vesc(kp,1.))**2. < 10.**-16., \
"KeplerPotential's escape velocity does not agree between kp.vesc and vesc(kp)"
assert (kp.vesc(1.)-potential.vesc(kp.toPlanar(),1.))**2. < 10.**-16., \
"KeplerPotential's escape velocity does not agree between kp.vesc and vesc(kp.toPlanar)"
return None
def test_vcirc_vesc_special():
#Test some special cases of vcirc and vesc
dp= potential.DehnenBarPotential()
try:
potential.plotRotcurve([dp])
except AttributeError: #should be raised
pass
else:
raise AssertionError("plotRotcurve for non-axisymmetric potential should have raised AttributeError, but didn't")
try:
potential.plotEscapecurve([dp])
except AttributeError: #should be raised
pass
else:
raise AssertionError("plotEscapecurve for non-axisymmetric potential should have raised AttributeError, but didn't")
lp= potential.LogarithmicHaloPotential(normalize=1.)
assert numpy.fabs(potential.calcRotcurve(lp,0.8)-lp.vcirc(0.8)) < 10.**-16., 'Circular velocity calculated with calcRotcurve not the same as that calculated with vcirc'
assert numpy.fabs(potential.calcEscapecurve(lp,0.8)-lp.vesc(0.8)) < 10.**-16., 'Escape velocity calculated with calcEscapecurve not the same as that calculated with vcirc'
return None
def test_lindbladR():
lp= potential.LogarithmicHaloPotential(normalize=1.)
assert numpy.fabs(lp.lindbladR(0.5,'corotation')-2.) < 10.**-10., 'Location of co-rotation resonance is wrong for LogarithmicHaloPotential'
assert numpy.fabs(lp.omegac(lp.lindbladR(0.5,2))-2./(2.-numpy.sqrt(2.))*0.5) < 10.**-14., 'Location of m=2 resonance is wrong for LogarithmicHaloPotential'
assert numpy.fabs(lp.omegac(lp.lindbladR(0.5,-2))+2./(-2.-numpy.sqrt(2.))*0.5) < 10.**-14., 'Location of m=-2 resonance is wrong for LogarithmicHaloPotential'
#Also through general interface
assert numpy.fabs(lp.omegac(potential.lindbladR(lp,0.5,-2))+2./(-2.-numpy.sqrt(2.))*0.5) < 10.**-14., 'Location of m=-2 resonance is wrong for LogarithmicHaloPotential'
#Also for planar
assert numpy.fabs(lp.omegac(lp.toPlanar().lindbladR(0.5,-2))+2./(-2.-numpy.sqrt(2.))*0.5) < 10.**-14., 'Location of m=-2 resonance is wrong for LogarithmicHaloPotential'
#Test non-existent ones
mp= potential.MiyamotoNagaiPotential(normalize=1.,a=0.3)
assert mp.lindbladR(3.,2) is None, 'MiyamotoNagai w/ OmegaP=3 should not have a inner m=2 LindbladR'
assert mp.lindbladR(6.,'corotation') is None, 'MiyamotoNagai w/ OmegaP=6 should not have a inner m=2 LindbladR'
#Test error
try:
lp.lindbladR(0.5,'wrong resonance')
except IOError:
pass
else:
raise AssertionError("lindbladR w/ wrong m input should have raised IOError, but didn't")
return None
def test_vterm():
lp= potential.LogarithmicHaloPotential(normalize=1.)
assert numpy.fabs(lp.vterm(30.,deg=True)-0.5*(lp.omegac(0.5)-1.)) < 10.**-10., 'vterm for LogarithmicHaloPotential at l=30 is incorrect'
assert numpy.fabs(lp.vterm(numpy.pi/3.,deg=False)-numpy.sqrt(3.)/2.*(lp.omegac(numpy.sqrt(3.)/2.)-1.)) < 10.**-10., 'vterm for LogarithmicHaloPotential at l=60 in rad is incorrect'
#Also using general interface
assert numpy.fabs(potential.vterm(lp,30.,deg=True)-0.5*(lp.omegac(0.5)-1.)) < 10.**-10., 'vterm for LogarithmicHaloPotential at l=30 is incorrect'
assert numpy.fabs(potential.vterm(lp,numpy.pi/3.,deg=False)-numpy.sqrt(3.)/2.*(lp.omegac(numpy.sqrt(3.)/2.)-1.)) < 10.**-10., 'vterm for LogarithmicHaloPotential at l=60 in rad is incorrect'
return None
def test_flattening():
#Simple tests: LogarithmicHalo
qs= [0.75,1.,1.25]
for q in qs:
lp= potential.LogarithmicHaloPotential(normalize=1.,q=q)
assert (lp.flattening(1.,0.001)-q)**2. < 10.**-16., \
"Flattening of LogarithmicHaloPotential w/ q= %f is not equal to q at (R,z) = (1.,0.001)" % q
assert (lp.flattening(1.,0.1)-q)**2. < 10.**-16., \
"Flattening of LogarithmicHaloPotential w/ q= %f is not equal to q at (R,z) = (1.,0.1)" % q
assert (lp.flattening(0.5,0.001)-q)**2. < 10.**-16., \
"Flattening of LogarithmicHaloPotential w/ q= %f is not equal to q at (R,z) = (0.5,0.001)" % q
assert (lp.flattening(0.5,0.1)-q)**2. < 10.**-16., \
"Flattening of LogarithmicHaloPotential w/ q= %f is not equal to q at (R,z) = (0.5,0.1)" % q
#One test with the general interface
assert (potential.flattening(lp,0.5,0.1)-q)**2. < 10.**-16., \
"Flattening of LogarithmicHaloPotential w/ q= %f is not equal to q at (R,z) = (0.5,0.1), through potential.flattening" % q
#Check some spherical potentials
kp= potential.KeplerPotential(normalize=1.)
assert (kp.flattening(1.,0.02)-1.)**2. < 10.**-16., \
"Flattening of KeplerPotential is not equal to 1 at (R,z) = (1.,0.02)"
np= potential.NFWPotential(normalize=1.,a=5.)
assert (np.flattening(1.,0.02)-1.)**2. < 10.**-16., \
"Flattening of NFWPotential is not equal to 1 at (R,z) = (1.,0.02)"
hp= potential.HernquistPotential(normalize=1.,a=5.)
assert (hp.flattening(1.,0.02)-1.)**2. < 10.**-16., \
"Flattening of HernquistPotential is not equal to 1 at (R,z) = (1.,0.02)"
#Disk potentials should be oblate everywhere
mp= potential.MiyamotoNagaiPotential(normalize=1.,a=0.5,b=0.05)
assert mp.flattening(1.,0.1) <= 1., \
"Flattening of MiyamotoNagaiPotential w/ a=0.5, b=0.05 is > 1 at (R,z) = (1.,0.1)"
assert mp.flattening(1.,2.) <= 1., \
"Flattening of MiyamotoNagaiPotential w/ a=0.5, b=0.05 is > 1 at (R,z) = (1.,2.)"
assert mp.flattening(3.,3.) <= 1., \
"Flattening of MiyamotoNagaiPotential w/ a=0.5, b=0.05 is > 1 at (R,z) = (3.,3.)"
return None
def test_verticalfreq():
#For spherical potentials, vertical freq should be equal to rotational freq
lp= potential.LogarithmicHaloPotential(normalize=1.,q=1.)
kp= potential.KeplerPotential(normalize=1.)
np= potential.NFWPotential(normalize=1.)
bp= potential.BurkertPotential(normalize=1.)
rs= numpy.linspace(0.2,2.,21)
for r in rs:
assert numpy.fabs(lp.verticalfreq(r)-lp.omegac(r)) < 10.**-10., \
'Verticalfreq for spherical potential does not equal rotational freq'
assert numpy.fabs(kp.verticalfreq(r)-kp.omegac(r)) < 10.**-10., \
'Verticalfreq for spherical potential does not equal rotational freq'
#Through general interface
assert numpy.fabs(potential.verticalfreq(np,r)-np.omegac(r)) < 10.**-10., \
'Verticalfreq for spherical potential does not equal rotational freq'
assert numpy.fabs(potential.verticalfreq([bp],r)-bp.omegac(r)) < 10.**-10., \
'Verticalfreq for spherical potential does not equal rotational freq'
#For Double-exponential disk potential, epi^2+vert^2-2*rot^2 =~ 0 (explicitly, because we use a Kepler potential)
if True: #not _TRAVIS:
dp= potential.DoubleExponentialDiskPotential(normalize=1.,hr=0.05,hz=0.01)
assert numpy.fabs(dp.epifreq(1.)**2.+dp.verticalfreq(1.)**2.-2.*dp.omegac(1.)**2.) < 10.**-6., 'epi^2+vert^2-2*rot^2 !=~ 0 for dblexp potential, very far from center'
#Closer to the center, this becomes the Poisson eqn.
assert numpy.fabs(dp.epifreq(.125)**2.+dp.verticalfreq(.125)**2.-2.*dp.omegac(.125)**2.-4.*numpy.pi*dp.dens(0.125,0.))/4./numpy.pi/dp.dens(0.125,0.) < 10.**-3., 'epi^2+vert^2-2*rot^2 !=~ dens for dblexp potential'
return None
def test_planar_nonaxi():
dp= potential.DehnenBarPotential()
try:
potential.evaluateplanarPotentials(1.,dp)
except potential.PotentialError:
pass
else:
raise AssertionError('evaluateplanarPotentials for non-axisymmetric potential w/o specifying phi did not raise PotentialError')
try:
potential.evaluateplanarRforces(1.,dp)
except potential.PotentialError:
pass
else:
raise AssertionError('evaluateplanarRforces for non-axisymmetric potential w/o specifying phi did not raise PotentialError')
try:
potential.evaluateplanarphiforces(1.,dp)
except potential.PotentialError:
pass
else:
raise AssertionError('evaluateplanarphiforces for non-axisymmetric potential w/o specifying phi did not raise PotentialError')
try:
potential.evaluateplanarR2derivs(1.,dp)
except potential.PotentialError:
pass
else:
raise AssertionError('evaluateplanarR2derivs for non-axisymmetric potential w/o specifying phi did not raise PotentialError')
return None
def test_ExpDisk_special():
#Test some special cases for the ExponentialDisk potentials
#if _TRAVIS: return None
#Test that array input works
dp= potential.DoubleExponentialDiskPotential(normalize=1.)
rs= numpy.linspace(0.1,2.11)
zs= numpy.ones_like(rs)*0.1
#Potential itself
dpevals= numpy.array([dp(r,z) for (r,z) in zip(rs,zs)])
assert numpy.all(numpy.fabs(dp(rs,zs)-dpevals) < 10.**-10.), \
'DoubleExppnentialDiskPotential evaluation does not work as expected for array inputs'
#Rforce
dpevals= numpy.array([dp.Rforce(r,z) for (r,z) in zip(rs,zs)])
assert numpy.all(numpy.fabs(dp.Rforce(rs,zs)-dpevals) < 10.**-10.), \
'DoubleExppnentialDiskPotential Rforce evaluation does not work as expected for array inputs'
#zforce
dpevals= numpy.array([dp.zforce(r,z) for (r,z) in zip(rs,zs)])
assert numpy.all(numpy.fabs(dp.zforce(rs,zs)-dpevals) < 10.**-10.), \
'DoubleExppnentialDiskPotential zforce evaluation does not work as expected for array inputs'
#R2deriv
dpevals= numpy.array([dp.R2deriv(r,z) for (r,z) in zip(rs,zs)])
assert numpy.all(numpy.fabs(dp.R2deriv(rs,zs)-dpevals) < 10.**-10.), \
'DoubleExppnentialDiskPotential R2deriv evaluation does not work as expected for array inputs'
#z2deriv
dpevals= numpy.array([dp.z2deriv(r,z) for (r,z) in zip(rs,zs)])
assert numpy.all(numpy.fabs(dp.z2deriv(rs,zs)-dpevals) < 10.**-10.), \
'DoubleExppnentialDiskPotential z2deriv evaluation does not work as expected for array inputs'
#Rzderiv
dpevals= numpy.array([dp.Rzderiv(r,z) for (r,z) in zip(rs,zs)])
assert numpy.all(numpy.fabs(dp.Rzderiv(rs,zs)-dpevals) < 10.**-10.), \
'DoubleExppnentialDiskPotential Rzderiv evaluation does not work as expected for array inputs'
#Check the PotentialError for z=/=0 evaluation of R2deriv of RazorThinDiskPotential
rp= potential.RazorThinExponentialDiskPotential(normalize=1.)
try: rp.R2deriv(1.,0.1)
except potential.PotentialError: pass
else: raise AssertionError("RazorThinExponentialDiskPotential's R2deriv did not raise AttributeError for z=/= 0 input")
return None
def test_MovingObject_density():
mp= mockMovingObjectPotential()
#Just test that the density far away from the object is close to zero
assert numpy.fabs(mp.dens(5.,0.)) < 10.**-8., 'Density far away from MovingObject is not close to zero'
return None
# Test that MWPotential is what it's supposed to be
def test_MWPotential2014():
pot= potential.MWPotential2014
V0, R0= 220., 8.
#Check the parameters of the bulge
assert pot[0].rc == 1.9/R0, "MWPotential2014's bulge cut-off radius is incorrect"
assert pot[0].alpha == 1.8, "MWPotential2014's bulge power-law exponent is incorrect"
assert numpy.fabs(pot[0].Rforce(1.,0.)+0.05) < 10.**-14., "MWPotential2014's bulge amplitude is incorrect"
#Check the parameters of the disk
assert numpy.fabs(pot[1]._a-3./R0) < 10.**-14., "MWPotential2014's disk scale length is incorrect"
assert numpy.fabs(pot[1]._b-0.28/R0) < 10.**-14., "MWPotential2014's disk scale heigth is incorrect"
assert numpy.fabs(pot[1].Rforce(1.,0.)+0.60) < 10.**-14., "MWPotential2014's disk amplitude is incorrect"
#Check the parameters of the halo
assert numpy.fabs(pot[2].a-16./R0) < 10.**-14., "MWPotential2014's halo scale radius is incorrect"
assert numpy.fabs(pot[2].Rforce(1.,0.)+0.35) < 10.**-14., "MWPotential2014's halo amplitude is incorrect"
return None
# Test that the virial setup of NFW works
def test_NFW_virialsetup_wrtmeanmatter():
from galpy.util import bovy_conversion
H, Om, overdens, wrtcrit= 71., 0.32, 201., False
ro, vo= 220., 8.
conc, mvir= 12., 1.1
np= potential.NFWPotential(conc=conc,mvir=mvir,vo=vo,ro=ro,
H=H,Om=Om,overdens=overdens,
wrtcrit=wrtcrit)
assert numpy.fabs(conc-np.conc(vo,ro,H=H,Om=Om,overdens=overdens,
wrtcrit=wrtcrit)) < 10.**-6., "NFWPotential virial setup's concentration does not work"
assert numpy.fabs(mvir*100./bovy_conversion.mass_in_1010msol(vo,ro)\
-np.mvir(vo,ro,H=H,Om=Om,overdens=overdens,
wrtcrit=wrtcrit)) < 10.**-6., "NFWPotential virial setup's virial mass does not work"
return None
def test_NFW_virialsetup_wrtcrit():
from galpy.util import bovy_conversion
H, Om, overdens, wrtcrit= 71., 0.32, 201., True
ro, vo= 220., 8.
conc, mvir= 12., 1.1
np= potential.NFWPotential(conc=conc,mvir=mvir,vo=vo,ro=ro,
H=H,Om=Om,overdens=overdens,
wrtcrit=wrtcrit)
assert numpy.fabs(conc-np.conc(vo,ro,H=H,Om=Om,overdens=overdens,
wrtcrit=wrtcrit)) < 10.**-6., "NFWPotential virial setup's concentration does not work"
assert numpy.fabs(mvir*100./bovy_conversion.mass_in_1010msol(vo,ro)\
-np.mvir(vo,ro,H=H,Om=Om,overdens=overdens,
wrtcrit=wrtcrit)) < 10.**-6., "NFWPotential virial setup's virial mass does not work"
return None
def test_conc_attributeerror():
pp= potential.PowerSphericalPotential(normalize=1.)
#This potential doesn't have a scale, so we cannot calculate the concentration
try: pp.conc(220.,8.)
except AttributeError: pass
else: raise AssertionError('conc function for potential w/o scale did not raise AttributeError')
return None
def test_mvir_attributeerror():
mp= potential.MiyamotoNagaiPotential(normalize=1.)
#Don't think I will ever implement the virial radius for this
try: mp.mvir(220.,8.)
except AttributeError: pass
else: raise AssertionError('mvir function for potential w/o rvir did not raise AttributeError')
return None
def test_LinShuReductionFactor():
#Test that the LinShuReductionFactor is implemented correctly, by comparing to figure 1 in Lin & Shu (1966)
from galpy.potential import LinShuReductionFactor, \
LogarithmicHaloPotential, omegac, epifreq
lp= LogarithmicHaloPotential(normalize=1.) #work in flat rotation curve
#nu^2 = 0.2, x=4 for m=2,sigmar=0.1
# w/ nu = m(OmegaP-omegac)/epifreq, x=sr^2*k^2/epifreq^2
R,m,sr = 0.9,2.,0.1
tepi, tomegac= epifreq(lp,R), omegac(lp,R)
OmegaP= tepi*numpy.sqrt(0.2)/m+tomegac #leads to nu^2 = 0.2
k= numpy.sqrt(4.)*tepi/sr
assert numpy.fabs(LinShuReductionFactor(lp,R,sr,m=m,k=k,OmegaP=OmegaP)-0.18) < 0.01, 'LinShuReductionFactor does not agree w/ Figure 1 from Lin & Shu (1966)'
#nu^2 = 0.8, x=10
OmegaP= tepi*
|
numpy.sqrt(0.8)
|
numpy.sqrt
|
# --------------------------------------------------------------------------------}
# --- Info
# --------------------------------------------------------------------------------{
# Tools for fatigue analysis
#
# Taken from:
# repository: wetb
# package: wetb.fatigue_tools,
# institution: DTU wind energy, Denmark
# main author: mmpe
'''
Created on 04/03/2013
@author: mmpe
'eq_load' calculate equivalent loads using one of the two rain flow counting methods
'cycle_matrix' calculates a matrix of cycles (binned on amplitude and mean value)
'eq_load_and_cycles' is used to calculate eq_loads of multiple time series (e.g. life time equivalent load)
The methods uses the rainflow counting routines (See documentation in top of methods):
- 'rainflow_windap': (Described in "Recommended Practices for Wind Turbine Testing - 3. Fatigue Loads",
2. edition 1990, Appendix A)
or
- 'rainflow_astm' (based on the c-implementation by <NAME> found at the MATLAB Central File Exchange
http://www.mathworks.com/matlabcentral/fileexchange/3026)
'''
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from future import standard_library
import warnings
standard_library.install_aliases()
import numpy as np
__all__ = ['rainflow_astm', 'rainflow_windap','eq_load','eq_load_and_cycles','cycle_matrix','cycle_matrix2']
def check_signal(signal):
# check input data validity
if not type(signal).__name__ == 'ndarray':
raise TypeError('signal must be ndarray, not: ' + type(signal).__name__)
elif len(signal.shape) not in (1, 2):
raise TypeError('signal must be 1D or 2D, not: ' + str(len(signal.shape)))
if len(signal.shape) == 2:
if signal.shape[1] > 1:
raise TypeError('signal must have one column only, not: ' + str(signal.shape[1]))
if np.min(signal) == np.max(signal):
raise TypeError("Signal contains no variation")
def rainflow_windap(signal, levels=255., thresshold=(255 / 50)):
"""Windap equivalent rainflow counting
Calculate the amplitude and mean values of half cycles in signal
This algorithms used by this routine is implemented directly as described in
"Recommended Practices for Wind Turbine Testing - 3. Fatigue Loads", 2. edition 1990, Appendix A
Parameters
----------
Signal : array-like
The raw signal
levels : int, optional
The signal is discretize into this number of levels.
255 is equivalent to the implementation in Windap
thresshold : int, optional
Cycles smaller than this thresshold are ignored
255/50 is equivalent to the implementation in Windap
Returns
-------
ampl : array-like
Peak to peak amplitudes of the half cycles
mean : array-like
Mean values of the half cycles
Examples
--------
>>> signal = np.array([-2.0, 0.0, 1.0, 0.0, -3.0, 0.0, 5.0, 0.0, -1.0, 0.0, 3.0, 0.0, -4.0, 0.0, 4.0, 0.0, -2.0])
>>> ampl, mean = rainflow_windap(signal)
"""
check_signal(signal)
#type <double> is required by <find_extreme> and <rainflow>
signal = signal.astype(np.double)
if np.all(np.isnan(signal)):
return None
offset = np.nanmin(signal)
signal -= offset
if np.nanmax(signal) > 0:
gain = np.nanmax(signal) / levels
signal = signal / gain
signal = np.round(signal).astype(np.int)
# If possible the module is compiled using cython otherwise the python implementation is used
#Convert to list of local minima/maxima where difference > thresshold
sig_ext = peak_trough(signal, thresshold)
#rainflow count
ampl_mean = pair_range_amplitude_mean(sig_ext)
ampl_mean = np.array(ampl_mean)
ampl_mean = np.round(ampl_mean / thresshold) * gain * thresshold
ampl_mean[:, 1] += offset
return ampl_mean.T
def rainflow_astm(signal):
"""Matlab equivalent rainflow counting
Calculate the amplitude and mean values of half cycles in signal
This implemementation is based on the c-implementation by <NAME> found at
the MATLAB Central File Exchange http://www.mathworks.com/matlabcentral/fileexchange/3026
Parameters
----------
Signal : array-like
The raw signal
Returns
-------
ampl : array-like
peak to peak amplitudes of the half cycles (note that the matlab implementation
uses peak amplitude instead of peak to peak)
mean : array-like
Mean values of the half cycles
Examples
--------
>>> signal = np.array([-2.0, 0.0, 1.0, 0.0, -3.0, 0.0, 5.0, 0.0, -1.0, 0.0, 3.0, 0.0, -4.0, 0.0, 4.0, 0.0, -2.0])
>>> ampl, mean = rainflow_astm(signal)
"""
check_signal(signal)
# type <double> is reuqired by <find_extreme> and <rainflow>
signal = signal.astype(np.double)
# Import find extremes and rainflow.
# If possible the module is compiled using cython otherwise the python implementation is used
# Remove points which is not local minimum/maximum
sig_ext = find_extremes(signal)
# rainflow count
ampl_mean = np.array(rainflowcount(sig_ext))
return np.array(ampl_mean).T
def eq_load(signals, no_bins=46, m=[3, 4, 6, 8, 10, 12], neq=1, rainflow_func=rainflow_windap):
"""Equivalent load calculation
Calculate the equivalent loads for a list of Wohler exponent and number of equivalent loads
Parameters
----------
signals : list of tuples or array_like
- if list of tuples: list must have format [(sig1_weight, sig1),(sig2_weight, sig1),...] where\n
- sigx_weight is the weight of signal x\n
- sigx is signal x\n
- if array_like: The signal
no_bins : int, optional
Number of bins in rainflow count histogram
m : int, float or array-like, optional
Wohler exponent (default is [3, 4, 6, 8, 10, 12])
neq : int, float or array-like, optional
The equivalent number of load cycles (default is 1, but normally the time duration in seconds is used)
rainflow_func : {rainflow_windap, rainflow_astm}, optional
The rainflow counting function to use (default is rainflow_windap)
Returns
-------
eq_loads : array-like
List of lists of equivalent loads for the corresponding equivalent number(s) and Wohler exponents
Examples
--------
>>> signal = np.array([-2.0, 0.0, 1.0, 0.0, -3.0, 0.0, 5.0, 0.0, -1.0, 0.0, 3.0, 0.0, -4.0, 0.0, 4.0, 0.0, -2.0])
>>> eq_load(signal, no_bins=50, neq=[1, 17], m=[3, 4, 6], rainflow_func=rainflow_windap)
[[10.311095426959747, 9.5942535021382174, 9.0789213365013932], # neq = 1, m=[3,4,6]
[4.010099657859783, 4.7249689509841746, 5.6618639965313005]], # neq = 17, m=[3,4,6]
eq_load([(.4, signal), (.6, signal)], no_bins=50, neq=[1, 17], m=[3, 4, 6], rainflow_func=rainflow_windap)
[[10.311095426959747, 9.5942535021382174, 9.0789213365013932], # neq = 1, m=[3,4,6]
[4.010099657859783, 4.7249689509841746, 5.6618639965313005]], # neq = 17, m=[3,4,6]
"""
try:
return eq_load_and_cycles(signals, no_bins, m, neq, rainflow_func)[0]
except TypeError:
return [[np.nan] * len(np.atleast_1d(m))] * len(np.atleast_1d(neq))
def eq_load_and_cycles(signals, no_bins=46, m=[3, 4, 6, 8, 10, 12], neq=[10 ** 6, 10 ** 7, 10 ** 8], rainflow_func=rainflow_windap):
"""Calculate combined fatigue equivalent load
Parameters
----------
signals : list of tuples or array_like
- if list of tuples: list must have format [(sig1_weight, sig1),(sig2_weight, sig1),...] where\n
- sigx_weight is the weight of signal x\n
- sigx is signal x\n
- if array_like: The signal
no_bins : int, optional
Number of bins for rainflow counting
m : int, float or array-like, optional
Wohler exponent (default is [3, 4, 6, 8, 10, 12])
neq : int or array-like, optional
Equivalent number, default is [10^6, 10^7, 10^8]
rainflow_func : {rainflow_windap, rainflow_astm}, optional
The rainflow counting function to use (default is rainflow_windap)
Returns
-------
eq_loads : array-like
List of lists of equivalent loads for the corresponding equivalent number(s) and Wohler exponents
cycles : array_like
2d array with shape = (no_ampl_bins, 1)
ampl_bin_mean : array_like
mean amplitude of the bins
ampl_bin_edges
Edges of the amplitude bins
"""
cycles, ampl_bin_mean, ampl_bin_edges, _, _ = cycle_matrix(signals, no_bins, 1, rainflow_func)
if 0: #to be similar to windap
ampl_bin_mean = (ampl_bin_edges[:-1] + ampl_bin_edges[1:]) / 2
cycles, ampl_bin_mean = cycles.flatten(), ampl_bin_mean.flatten()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
eq_loads = [[((np.nansum(cycles * ampl_bin_mean ** _m) / _neq) ** (1. / _m)) for _m in np.atleast_1d(m)] for _neq in np.atleast_1d(neq)]
return eq_loads, cycles, ampl_bin_mean, ampl_bin_edges
def cycle_matrix(signals, ampl_bins=10, mean_bins=10, rainflow_func=rainflow_windap):
"""Markow load cycle matrix
Calculate the Markow load cycle matrix
Parameters
----------
Signals : array-like or list of tuples
- if array-like, the raw signal\n
- if list of tuples, list of (weight, signal), e.g. [(0.1,sig1), (0.8,sig2), (.1,sig3)]\n
ampl_bins : int or array-like, optional
if int, Number of amplitude value bins (default is 10)
if array-like, the bin edges for amplitude
mean_bins : int or array-like, optional
if int, Number of mean value bins (default is 10)
if array-like, the bin edges for mea
rainflow_func : {rainflow_windap, rainflow_astm}, optional
The rainflow counting function to use (default is rainflow_windap)
Returns
-------
cycles : ndarray, shape(ampl_bins, mean_bins)
A bi-dimensional histogram of load cycles(full cycles). Amplitudes are\
histogrammed along the first dimension and mean values are histogrammed along the second dimension.
ampl_bin_mean : ndarray, shape(ampl_bins,)
The average cycle amplitude of the bins
ampl_edges : ndarray, shape(ampl_bins+1,)
The amplitude bin edges
mean_bin_mean : ndarray, shape(ampl_bins,)
The average cycle mean of the bins
mean_edges : ndarray, shape(mean_bins+1,)
The mean bin edges
Examples
--------
>>> signal = np.array([-2.0, 0.0, 1.0, 0.0, -3.0, 0.0, 5.0, 0.0, -1.0, 0.0, 3.0, 0.0, -4.0, 0.0, 4.0, 0.0, -2.0])
>>> cycles, ampl_bin_mean, ampl_edges, mean_bin_mean, mean_edges = cycle_matrix(signal)
>>> cycles, ampl_bin_mean, ampl_edges, mean_bin_mean, mean_edges = cycle_matrix([(.4, signal), (.6,signal)])
"""
if isinstance(signals[0], tuple):
weights, ampls, means = np.array([(np.zeros_like(ampl)+weight,ampl,mean) for weight, signal in signals for ampl,mean in rainflow_func(signal[:]).T], dtype=np.float64).T
else:
ampls, means = rainflow_func(signals[:])
weights = np.ones_like(ampls)
if isinstance(ampl_bins, int):
ampl_bins = np.linspace(0, 1, num=ampl_bins + 1) * ampls[weights>0].max()
cycles, ampl_edges, mean_edges = np.histogram2d(ampls, means, [ampl_bins, mean_bins], weights=weights)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
ampl_bin_sum = np.histogram2d(ampls, means, [ampl_bins, mean_bins], weights=weights * ampls)[0]
ampl_bin_mean = np.nanmean(ampl_bin_sum / np.where(cycles,cycles,np.nan),1)
mean_bin_sum =
|
np.histogram2d(ampls, means, [ampl_bins, mean_bins], weights=weights * means)
|
numpy.histogram2d
|
import pytest
import mxnet as mx
import numpy as np
from mxfusion.components.variables.runtime_variable import add_sample_dimension, is_sampled_array, get_num_samples
from mxfusion.components.distributions.gp.kernels import RBF, Linear, Bias, White
from mxfusion.util.testutils import numpy_array_reshape, prepare_mxnet_array
# These test cases depends on GPy. Put them in try/except.
try:
import GPy
def gpy_kernel_test(X, X_isSamples, X2, X2_isSamples, kernel_params, num_samples, dtype, mf_kernel_create, gpy_kernel_create):
X_mx = prepare_mxnet_array(X, X_isSamples, dtype)
X2_mx = prepare_mxnet_array(X2, X2_isSamples, dtype)
kern = mf_kernel_create()
kernel_params_mx = {kern.name + '_' + k:
prepare_mxnet_array(v[0], v[1], dtype) for k, v in
kernel_params.items()}
K_XX_mx = kern.K(mx.nd, X=X_mx, **kernel_params_mx)
K_XX2_mx = kern.K(mx.nd, X=X_mx, X2=X2_mx, **kernel_params_mx)
Kdiag_mx = kern.Kdiag(mx.nd, X=X_mx, **kernel_params_mx)
kern_gpy = gpy_kernel_create()
K_XX_gpy, K_XX2_gpy, Kdiag_gpy = [], [], []
for i in range(num_samples):
X_i = X[i] if X_isSamples else X
X2_i = X2[i] if X2_isSamples else X2
kernel_params_gpy = {k: v[0][i] if v[1] else v[0] for k, v in
kernel_params.items()}
for k, v in kernel_params_gpy.items():
setattr(kern_gpy, k, v)
K_XX_gpy.append(np.expand_dims(kern_gpy.K(X_i), axis=0))
K_XX2_gpy.append(np.expand_dims(kern_gpy.K(X_i, X2_i), axis=0))
Kdiag_gpy.append(np.expand_dims(kern_gpy.Kdiag(X_i), axis=0))
K_XX_gpy = np.vstack(K_XX_gpy)
K_XX2_gpy = np.vstack(K_XX2_gpy)
Kdiag_gpy = np.vstack(Kdiag_gpy)
assert np.issubdtype(K_XX_mx.dtype, dtype)
assert np.issubdtype(K_XX2_mx.dtype, dtype)
assert np.issubdtype(Kdiag_mx.dtype, dtype)
assert np.allclose(K_XX_gpy, K_XX_mx.asnumpy())
assert np.allclose(K_XX2_gpy, K_XX2_mx.asnumpy())
assert np.allclose(Kdiag_gpy, Kdiag_mx.asnumpy())
def gpy_comb_kernel_test(X, X_isSamples, X2, X2_isSamples, kernel_params, num_samples, dtype, mf_kernel_create, gpy_kernel_create):
X_mx = prepare_mxnet_array(X, X_isSamples, dtype)
X2_mx = prepare_mxnet_array(X2, X2_isSamples, dtype)
kern = mf_kernel_create()
kernel_params_mx = {kern.name + '_' + k + '_' + k2:
prepare_mxnet_array(v2[0], v2[1], dtype) for k, v in
kernel_params.items() for k2, v2 in v.items()}
K_XX_mx = kern.K(mx.nd, X=X_mx, **kernel_params_mx)
K_XX2_mx = kern.K(mx.nd, X=X_mx, X2=X2_mx, **kernel_params_mx)
Kdiag_mx = kern.Kdiag(mx.nd, X=X_mx, **kernel_params_mx)
kern_gpy = gpy_kernel_create()
K_XX_gpy, K_XX2_gpy, Kdiag_gpy = [], [], []
for i in range(num_samples):
X_i = X[i] if X_isSamples else X
X2_i = X2[i] if X2_isSamples else X2
for k, v in kernel_params.items():
kern_1 = getattr(kern_gpy, k)
for k2, v2 in v.items():
setattr(kern_1, k2, v2[0][i] if v2[1] else v2[0])
K_XX_gpy.append(np.expand_dims(kern_gpy.K(X_i), axis=0))
K_XX2_gpy.append(np.expand_dims(kern_gpy.K(X_i, X2_i), axis=0))
Kdiag_gpy.append(np.expand_dims(kern_gpy.Kdiag(X_i), axis=0))
K_XX_gpy = np.vstack(K_XX_gpy)
K_XX2_gpy = np.vstack(K_XX2_gpy)
Kdiag_gpy = np.vstack(Kdiag_gpy)
assert np.issubdtype(K_XX_mx.dtype, dtype)
assert np.issubdtype(K_XX2_mx.dtype, dtype)
assert np.issubdtype(Kdiag_mx.dtype, dtype)
assert np.allclose(K_XX_gpy, K_XX_mx.asnumpy())
assert np.allclose(K_XX2_gpy, K_XX2_mx.asnumpy())
assert np.allclose(Kdiag_gpy, Kdiag_mx.asnumpy())
@pytest.mark.usefixtures("set_seed")
class TestGPKernels(object):
@pytest.mark.parametrize("dtype, X, X_isSamples, X2, X2_isSamples, lengthscale, lengthscale_isSamples, variance, variance_isSamples, num_samples, input_dim, ARD", [
(np.float64, np.random.rand(5,2), False, np.random.rand(4,2), False, np.random.rand(2)+1e-4, False, np.random.rand(1)+1e-4, False, 1, 2, True),
(np.float64, np.random.rand(5,2), False, np.random.rand(4,2), False, np.random.rand(3,2)+1e-4, True, np.random.rand(1)+1e-4, False, 3, 2, True),
(np.float64, np.random.rand(5,2), False, np.random.rand(4,2), False, np.random.rand(2)+1e-4, False, np.random.rand(3,1)+1e-4, True, 3, 2, True),
(np.float64, np.random.rand(3,5,2), True, np.random.rand(3,4,2), True, np.random.rand(2)+1e-4, False, np.random.rand(1)+1e-4, False, 3, 2, True),
(np.float64, np.random.rand(3,5,2), True, np.random.rand(3,4,2), True, np.random.rand(1)+1e-4, False, np.random.rand(1)+1e-4, False, 3, 2, False),
])
def test_RBF_kernel(self, dtype, X, X_isSamples, X2, X2_isSamples,
lengthscale, lengthscale_isSamples, variance,
variance_isSamples, num_samples, input_dim, ARD):
def create_rbf():
return RBF(input_dim, ARD, 1., 1., 'rbf', None, dtype)
def create_gpy_rbf():
return GPy.kern.RBF(input_dim=input_dim, ARD=ARD)
kernel_params = {'lengthscale': (lengthscale, lengthscale_isSamples),
'variance': (variance, variance_isSamples)}
gpy_kernel_test(X, X_isSamples, X2, X2_isSamples, kernel_params,
num_samples, dtype, create_rbf, create_gpy_rbf)
@pytest.mark.parametrize("dtype, X, X_isSamples, X2, X2_isSamples, variances, variances_isSamples, num_samples, input_dim, ARD", [
(np.float64, np.random.rand(5,2), False, np.random.rand(4,2), False, np.random.rand(2)+1e-4, False, 1, 2, True),
(np.float64, np.random.rand(5,2), False, np.random.rand(4,2), False, np.random.rand(3,2)+1e-4, True, 3, 2, True),
(np.float64, np.random.rand(3,5,2), True, np.random.rand(3,4,2), True, np.random.rand(2)+1e-4, False, 3, 2, True),
(np.float64, np.random.rand(3,5,2), True, np.random.rand(3,4,2), True, np.random.rand(1)+1e-4, False, 3, 2, False),
])
def test_Linear_kernel(self, dtype, X, X_isSamples, X2, X2_isSamples,
variances, variances_isSamples, num_samples, input_dim,
ARD):
def create_linear():
return Linear(input_dim, ARD, 1., 'linear', None, dtype)
def create_gpy_linear():
return GPy.kern.Linear(input_dim=input_dim, ARD=ARD)
kernel_params = {'variances': (variances, variances_isSamples)}
gpy_kernel_test(X, X_isSamples, X2, X2_isSamples, kernel_params,
num_samples, dtype, create_linear, create_gpy_linear)
@pytest.mark.parametrize("dtype, X, X_isSamples, X2, X2_isSamples, variance, variance_isSamples, num_samples, input_dim", [
(np.float64, np.random.rand(5,2), False, np.random.rand(4,2), False, np.random.rand(1)+1e-4, False, 1, 2),
(np.float64, np.random.rand(5,2), False, np.random.rand(4,2), False, np.random.rand(3,1)+1e-4, True, 3, 2),
(np.float64, np.random.rand(3,5,2), True, np.random.rand(3,4,2), True, np.random.rand(1)+1e-4, False, 3, 2)
])
def test_Bias_kernel(self, dtype, X, X_isSamples, X2, X2_isSamples,
variance, variance_isSamples, num_samples, input_dim):
def create_bias():
return Bias(input_dim, 1., 'bias', None, dtype)
def create_gpy_bias():
return GPy.kern.Bias(input_dim=input_dim)
kernel_params = {'variance': (variance, variance_isSamples)}
gpy_kernel_test(X, X_isSamples, X2, X2_isSamples, kernel_params,
num_samples, dtype, create_bias, create_gpy_bias)
@pytest.mark.parametrize("dtype, X, X_isSamples, X2, X2_isSamples, variance, variance_isSamples, num_samples, input_dim", [
(np.float64, np.random.rand(5,2), False, np.random.rand(4,2), False, np.random.rand(1)+1e-4, False, 1, 2),
(np.float64, np.random.rand(5,2), False, np.random.rand(4,2), False, np.random.rand(3,1)+1e-4, True, 3, 2),
(np.float64, np.random.rand(3,5,2), True, np.random.rand(3,4,2), True, np.random.rand(1)+1e-4, False, 3, 2)
])
def test_White_kernel(self, dtype, X, X_isSamples, X2, X2_isSamples,
variance, variance_isSamples, num_samples, input_dim):
def create_white():
return White(input_dim, 1., 'bias', None, dtype)
def create_gpy_white():
return GPy.kern.White(input_dim=input_dim)
kernel_params = {'variance': (variance, variance_isSamples)}
gpy_kernel_test(X, X_isSamples, X2, X2_isSamples, kernel_params,
num_samples, dtype, create_white, create_gpy_white)
@pytest.mark.parametrize("dtype, X, X_isSamples, X2, X2_isSamples, rbf_lengthscale, rbf_lengthscale_isSamples, rbf_variance, rbf_variance_isSamples, linear_variances, linear_variances_isSamples, num_samples, input_dim", [
(np.float64, np.random.rand(5,2), False, np.random.rand(4,2), False, np.random.rand(2)+1e-4, False, np.random.rand(1)+1e-4, False, np.random.rand(2)+1e-4, False, 1, 2),
(np.float64, np.random.rand(3,5,2), True, np.random.rand(3,4,2), True, np.random.rand(2)+1e-4, False, np.random.rand(1)+1e-4, False, np.random.rand(2)+1e-4, False, 3, 2),
(np.float64, np.random.rand(5,2), False, np.random.rand(4,2), False, np.random.rand(3,2)+1e-4, True, np.random.rand(1)+1e-4, False, np.random.rand(2)+1e-4, False, 3, 2),
(np.float64, np.random.rand(5,2), False, np.random.rand(4,2), False, np.random.rand(2)+1e-4, False, np.random.rand(1)+1e-4, False, np.random.rand(3,2)+1e-4, True, 3, 2),
(np.float64, np.random.rand(3,5,2), True, np.random.rand(3,4,2), True, np.random.rand(3,2)+1e-4, True,
|
np.random.rand(3,1)
|
numpy.random.rand
|
import numpy as np
import pickle as pl
import matplotlib.pyplot as plt
from mdtools import Isomorph
from mpl_toolkits.mplot3d import Axes3D
"""
PARAMETER A:
1. A0 affects the curve of the isomorphic surface, with a0 = 0, being a flat surface and
a0 = 1.0 being more curved as T becomes smaller.
2. It also affects, the position on the z-axis (a), with higher values of A0 shifting the
surface higher up. When A0 = 0, then the figure is collapsed on the rho-T axis
3. The surfaces have the exact same footprint on the X-Y axis
rho2 increases as we go down the isomorphic line
T2 also increases
A2 decreases
if a_r == 0; then all the a2s are forced to be zero (0) hence causing the surface to be flat
"""
def isomorphic_surface_array(rho_list, t_list, n_list, a_list, t2,
figname, save_fig=False, save_pickle=False):
"""
@param rho_list:
@param t_list:
@param n_list:
@param a_list:
@param t2:
@param figname:
@param save_fig:
@param save_pickle:
@return:
"""
# Generate the 3D plot canvas here
fig = plt.figure(figname)
ax = Axes3D(fig)
canvas = None
edg_col = ["none", "black", "cyan", "white"]
edg_it = 0
for a_r in a_list:
for t_r in t_list:
# Varying the reference density in the line
# Merging multiple rho lines creates a surface
for n in n_list:
rho_iso = np.empty((0, len(t2)))
t_iso = np.empty((0, len(t2)))
a_iso = np.empty((0, len(t2)))
label_title = fr"n: {n}, $T_0$: {t_r:.1f}, $A_0$: {a_r:.1f}"
for i, rho_r in enumerate(rho_list):
iso = Isomorph(rho_r, t_r, a_r, t2)
# Generate isomorphic line
rho2, a2 = iso.gen_line(n)
# Creating the 3D mesh grid
rho_iso = np.append(rho_iso, [rho2], axis=0)
t_iso = np.append(t_iso, [t2], axis=0)
a_iso = np.append(a_iso, [a2], axis=0)
surf = ax.plot_surface(rho_iso, t_iso, a_iso, label=label_title,
alpha=0.9, edgecolor=edg_col[edg_it])
# This is a fix for the bug that does not allow legends to 3D surfaces
surf._facecolors2d = surf._facecolors3d
surf._edgecolors2d = surf._edgecolors3d
ax.legend(loc="best")
edg_it += 1
ax.set_xlabel(r'$\rho$')
ax.set_ylabel(r'T')
ax.set_zlabel(r'a')
# ax.view_init(elev=44, azim=-128.5)
if save_fig is True:
plt.savefig(f"{figname}.pdf")
# Save figures for loading at a later time
if save_pickle is True:
pl.dump(fig, open(f"{figname}.pickle", "wb"))
def plot_all_surfaces():
"""
Generates all the plots of the ismorphic surfaces for the MD fluid.
It also pickles the figures, so that they can be loaded at a later time.
"""
# Leave these unchanged
# Density reference
rho_list = np.linspace(0.2, 1, 10)
# Input temperature over which the isomorphic points are generated
t2 = np.linspace(0.2, 2, 20)
# PLOT VARIOUS TEMPERATURES T0
# Reference parameters
t_list = np.linspace(0.2, 1, 5)
a_list = [0.5]
n_list = [8]
isomorphic_surface_array(rho_list, t_list, n_list,
a_list, t2, "Isomorphs_with_varying_T0")
# PLOT VARIOUS PARAMETERS A0
a_list = np.linspace(0.5, 1, 3)
t_list = [0.5]
n_list = [8]
isomorphic_surface_array(rho_list, t_list, n_list,
a_list, t2, "Isomorphs_with_varying_A0")
# PLOT VARIOUS PAIR POTENTIAL STRENGTHS n
# TODO: increase color intensity cyan, light green, DISABLE SHADING, wireframe color and decrease thickness
a_list = [0.5]
t_list = [0.5]
n_list = list(range(8, 15, 2))
isomorphic_surface_array(rho_list, t_list, n_list,
a_list, t2, "Isomorphs_with_varying_n")
# PLOT VARIOUS A0 AND T0
t_list = np.linspace(0.2, 1, 3)
a_list =
|
np.linspace(0, 1, 3)
|
numpy.linspace
|
import numpy as np
def sigmoid(x):
return 1 / (1 + np.exp(-x))
X = np.array([1.0, 0.5])
W1 = np.array([[0.1, 0.3, 0.5], [0.2, 0.4, 0.6]])
B1 = np.array([0.1, 0.2, 0.3])
print(W1.shape)
print(X.shape)
print(B1.shape)
A1 = np.dot(X, W1) + B1
Z1 = sigmoid(A1)
print(A1)
print(Z1)
W2 = np.array([[0.1, 0.4], [0.2, 0.5], [0.3, 0.6]])
B2 =
|
np.array([0.1, 0.2])
|
numpy.array
|
# -*- coding: utf-8 -*-
import numpy as np
import torch
from albumentations.core.transforms_interface import BasicTransform
from numpy import cos, pi, sin
def atan2(y, x):
"""from 0 to 2 * pi"""
return np.arctan2(y, x) % (2 * pi)
def donut(img, inplace=True):
if not inplace:
img = np.copy(img)
size, _ = img.shape # square
radius = size / 2
r_in = np.random.random() * radius
r_out = np.random.random() * radius
r_in, r_out = min(r_in, r_out), max(r_in, r_out)
a1 = np.random.random() * 2 * pi
a2 = np.random.random() * 2 * pi
a1, a2 = min(a1, a2), max(a1, a2)
y0, x0 = size / 2, size / 2
y, x = np.ogrid[0: size, 0: size]
img[np.where((r_in ** 2 <= (y - y0) ** 2 + (x - x0) ** 2) & ((y - y0) ** 2 + (x - x0)
** 2 <= r_out ** 2) & (a1 <= atan2(y-y0, x-x0)) & (atan2(y-y0, x-x0) <= a2))] = 2.0
return img
def scratch(img, inplace=True):
if not inplace:
img =
|
np.copy(img)
|
numpy.copy
|
from datetime import date,datetime
from os import system,name
import numpy as np
LOCK = 1
EMPT = 0
BLK_EDGE = 10
BOARD_HEIGHT = 9
BOARD_WIDTH = 6
FILE_NAME = 'logfile.txt'
board = [
["JAN","FEB","MAR","APR","MAY","JUN"],
["JUL","AUG","SEP","OCT","NOV","DEC"],
["1" ,"2" ,"3" ,"4" ,"5" ,"6" ],
["7" ,"8" ,"9" ,"10" ,"11" ,"12" ],
["13" ,"14" ,"15" ,"16" ,"17" ,"18" ],
["19" ,"20" ,"21" ,"22" ,"23" ,"24" ],
["25" ,"26" ,"27" ,"28" ,"29" ,"30" ],
["31" ," " ," " ,"MON","TUE","WED"],
[" " ," " ,"THU","FRI","SAT","SUN"]
]
force_check = 0
bk1 = [
[LOCK,EMPT,EMPT],
[LOCK,LOCK,LOCK],
[LOCK,EMPT,EMPT],
[LOCK,EMPT,EMPT]
]
bk2 = [
[LOCK,EMPT],
[LOCK,LOCK],
[LOCK,EMPT],
[LOCK,EMPT],
[LOCK,EMPT]
]
bk3 = [
[LOCK,LOCK],
[LOCK,EMPT],
[LOCK,EMPT],
[LOCK,EMPT]
]
bk4 = [
[LOCK,LOCK],
[LOCK,EMPT],
[LOCK,EMPT],
[LOCK,EMPT],
[LOCK,EMPT]
]
bk5 = [
[LOCK,LOCK,LOCK],
[LOCK,EMPT,EMPT],
[LOCK,EMPT,EMPT],
[LOCK,EMPT,EMPT]
]
bk6 = [
[LOCK,EMPT],
[LOCK,LOCK],
[EMPT,LOCK],
[EMPT,LOCK]
]
bk7 = [
[LOCK,EMPT,LOCK],
[LOCK,LOCK,LOCK],
[LOCK,EMPT,EMPT]
]
bk8 = [
[LOCK,EMPT,EMPT],
[LOCK,LOCK,LOCK],
[EMPT,LOCK,EMPT],
[EMPT,LOCK,EMPT]
]
bk9 = [
[LOCK,EMPT],
[LOCK,LOCK],
[LOCK,LOCK]
]
def rotate(bk, times= 0):
res = np.array(bk)
if times < 0:
res = np.fliplr(res)
times += 1
res = np.rot90(res,abs(times))
return res
def position(bk,x,y):
res = np.array(bk)
row,column = res.shape
res = np.pad(res,((x,9-row-x),(y,6-y-column)))
return res
def clear():
# for windows
if name == 'nt':
_ = system('cls')
# for mac and linux(here, os.name is 'posix')
else:
_ = system('clear')
def printBk(bk):
for i in bk:
for j in i:
print(j,end=" ")
print()
def markDateBoard(daystr = ''):
today = date.today()
if daystr:
today = datetime.strptime(daystr, '%Y-%m-%d')
if today.month == 1:
global force_check
force_check = 1
nboard = np.zeros((BOARD_HEIGHT,BOARD_WIDTH),np.int8)
wdpos = [[7,3],[7,4],[7,5],[8,2],[8,3],[8,4],[8,5]]
nboard[(today.month - 1) // BOARD_WIDTH][(today.month - 1) % BOARD_WIDTH] = LOCK
nboard[(today.day - 1) // BOARD_WIDTH + 2][(today.day - 1) % BOARD_WIDTH] = LOCK
i,j = wdpos[today.weekday()]
nboard[i][j] = LOCK
return nboard
def validBoard(currBoard):
if
|
np.max(currBoard)
|
numpy.max
|
# -*- coding: utf-8 -*-
import argparse
import matplotlib.pyplot as plt
import numpy as np
import soundfile as sf
import torch
from torch_audioset.data.torch_input_processing import WaveformToInput as TorchTransform
from torch_audioset.params import YAMNetParams
from torch_audioset.yamnet.model import yamnet as torch_yamnet
from torch_audioset.yamnet.model import yamnet_category_metadata
def sf_load_from_int16(fname):
x, sr = sf.read(fname, dtype='int16', always_2d=True)
x = x / 2 ** 15
x = x.T.astype(np.float32)
return x, sr
if __name__ == '__main__':
# one wav file as argument.
parser = argparse.ArgumentParser()
parser.add_argument('wav')
args = parser.parse_args()
wav_fname = args.wav
waveforms, sr = sf_load_from_int16(wav_fname)
waveform = waveforms[0]
waveform_for_torch = torch.tensor(waveforms)
patches, spectrogram = TorchTransform().wavform_to_log_mel(waveform_for_torch, 16000)
pt_model = torch_yamnet(pretrained=False)
# Manually download the `yamnet.pth` file.
pt_model.load_state_dict(torch.load('./yamnet.pth'))
with torch.no_grad():
pt_model.eval()
# x = torch.from_numpy(patches)
# x = x.unsqueeze(1) # [5, 96, 64] -> [5, 1, 96, 64]
x = patches
pt_pred = pt_model(x, to_prob=True)
pt_pred = pt_pred.numpy()
scores = pt_pred
params = YAMNetParams()
class_names = [x['name'] for x in yamnet_category_metadata()]
# Visualize the results.
plt.figure(figsize=(10, 8))
# Plot the waveform.
plt.subplot(3, 1, 1)
plt.plot(waveform)
plt.xlim([0, len(waveform)])
# Plot the log-mel spectrogram (returned by the model).
plt.subplot(3, 1, 2)
extent = (0, spectrogram.shape[0], -0.5, spectrogram.shape[1] - 0.5)
plt.imshow(spectrogram.T, aspect='auto', interpolation='nearest', origin='lower', extent=extent)
plt.xlim([0, len(waveform) / sr / YAMNetParams.STFT_HOP_SECONDS])
# Plot and label the model output scores for the top-scoring classes.
mean_scores = np.mean(scores, axis=0)
top_N = 10
top_class_indices =
|
np.argsort(mean_scores)
|
numpy.argsort
|
import numpy as np
#import scipy.stats as stats
import torch
import torch.nn as nn
import torch.optim as optim
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if torch.cuda.is_available():
print("GPU available")
class RNN(nn.Module):
def __init__(self, in_size, h_size, n_layers, dropout=0):
super(RNN, self).__init__()
self.model = nn.GRU(in_size,hidden_size=h_size,num_layers=n_layers,
batch_first=True,dropout=dropout)
def forward(self, x):
out, _ = self.model(x, None)
return out[0][-1][None] #(1,h_size)
class MultiModel(nn.Module):
def __init__(self, mods, in_dims, out_dims, n_scores, dp=0.1):
super(MultiModel, self).__init__()
layers = []
mods = set(mods)
if 'spk_speech' in mods:
layers.append(RNN(128, in_dims, 1))
if 'spk_language' in mods:
layers.append(nn.Linear(768, in_dims))
if 'spk_vision' in mods:
layers.append(RNN(2048, in_dims, 1))
if 'lis_speech' in mods:
layers.append(RNN(128, in_dims, 1))
if 'lis_language' in mods:
layers.append(nn.Linear(768, in_dims))
if 'lis_vision' in mods:
layers.append(RNN(2048, in_dims, 1))
self.layers = nn.ModuleList(layers)
self.dp1 = nn.Dropout(dp, inplace=True)
self.relu1 = nn.ReLU(inplace=True)
self.fc1 = nn.Linear(in_dims*len(mods), out_dims)
self.dp2 = nn.Dropout(dp, inplace=True)
self.relu2 = nn.ReLU(inplace=True)
self.fc2 = nn.Linear(out_dims, n_scores)
def forward(self, data):
feats = []
for layer, x in zip(self.layers, data):
feats.append(layer(x[None])) # (1, in_dims)
y = torch.cat(feats, dim=1) # (1, in_dims * len(mods))
y = self.dp1(y)
y = self.relu1(y)
y = self.fc1(y)
y = self.dp2(y)
y = self.relu2(y)
y = self.fc2(y)
return y #(1, n_scores)
class M3:
def __init__(self, mods, task, lr, in_dims, out_dims, n_scores, dp):
print('Using multi-modalities model, hyper-params:', lr, in_dims, out_dims)
self.mods = mods
self.task = task
if task == 'reg':
self.model = MultiModel(mods, in_dims, out_dims, n_scores, dp).to(device)
self.criterion = nn.MSELoss(reduction='sum')
elif task == 'cls':
self.model = MultiModel(mods, in_dims, out_dims, n_scores+1, dp).to(device)
self.criterion = nn.CrossEntropyLoss()
else:
self.model = MultiModel(mods, in_dims, out_dims, n_scores+1, dp).to(device)
self.criterion_reg = nn.MSELoss(reduction='sum')
self.criterion_cls = nn.CrossEntropyLoss()
self.optimizer = optim.Adam(self.model.parameters(), lr=lr)
#self.optimizer = optim.SGD(self.model.parameters(), lr=lr)
def test(self, data):
self.model.eval()
X, Y = data
loss = 0
for x, y in zip(X, Y):
x = [torch.FloatTensor(i).to(device) for i in x]
with torch.no_grad():
if self.task == 'reg':
y = torch.FloatTensor(y).to(device)
pred = self.model(x).reshape(y.shape)
loss += self.criterion(pred, y).item()
elif self.task == 'cls':
y = torch.LongTensor(y).to(device)
pred = self.model(x)
loss += self.criterion(pred, y).item()
else:
y1 = torch.FloatTensor(y[:-1]).to(device)
y2 = torch.LongTensor(y[-1][None]).to(device)
pred = self.model(x)
pred1 = pred[:,:-2].reshape(y1.shape)
pred2 = pred[:,-2:]
loss1 = self.criterion_reg(pred1, y1).item()
loss2 = self.criterion_cls(pred2, y2).item()
loss += (loss1/9 + loss2*8/9)
return loss/Y.shape[0]
def train(self, train_data, eval_data, epoch, batch, path):
X, y = train_data
n = y.shape[0]
#y = y + (np.random.random(y.shape)-0.5)*0.5
for e in range(epoch):
self.model.train()
idx = np.arange(n)
|
np.random.shuffle(idx)
|
numpy.random.shuffle
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.