prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
data_src = "train.csv"
df = pd.read_csv(data_src, header=0)
# check dataset basic info
'''
<bound method DataFrame.info of PassengerId Survived Pclass ... Fare Cabin Embarked
0 1 0 3 ... 7.2500 NaN S
1 2 1 1 ... 71.2833 C85 C
2 3 1 3 ... 7.9250 NaN S
3 4 1 1 ... 53.1000 C123 S
4 5 0 3 ... 8.0500 NaN S
.. ... ... ... ... ... ... ...
886 887 0 2 ... 13.0000 NaN S
887 888 1 1 ... 30.0000 B42 S
888 889 0 3 ... 23.4500 NaN S
889 890 1 1 ... 30.0000 C148 C
890 891 0 3 ... 7.7500 NaN Q
[891 rows x 12 columns]>
'''
print(df.info)
# check data set abstract info
'''
PassengerId Survived Pclass ... SibSp Parch Fare
count 891.000000 891.000000 891.000000 ... 891.000000 891.000000 891.000000
mean 446.000000 0.383838 2.308642 ... 0.523008 0.381594 32.204208
std 257.353842 0.486592 0.836071 ... 1.102743 0.806057 49.693429
min 1.000000 0.000000 1.000000 ... 0.000000 0.000000 0.000000
25% 223.500000 0.000000 2.000000 ... 0.000000 0.000000 7.910400
50% 446.000000 0.000000 3.000000 ... 0.000000 0.000000 14.454200
75% 668.500000 1.000000 3.000000 ... 1.000000 0.000000 31.000000
max 891.000000 1.000000 3.000000 ... 8.000000 6.000000 512.329200
'''
print(df.describe())
# check first several rows
'''
PassengerId Survived Pclass ... Fare Cabin Embarked
0 1 0 3 ... 7.2500 NaN S
1 2 1 1 ... 71.2833 C85 C
2 3 1 3 ... 7.9250 NaN S
3 4 1 1 ... 53.1000 C123 S
4 5 0 3 ... 8.0500 NaN S
[5 rows x 12 columns]
'''
print(df.head())
# check total survived rate
survived_rate = df['Survived'].sum() / df['PassengerId'].count()
print(survived_rate)
print("PClass VS Survive rate", ":")
x = [df[(df.Pclass == 1)]['Pclass'].size,
df[(df.Pclass == 2)]['Pclass'].size,
df[(df.Pclass == 3)]['Pclass'].size
]
y = [df[(df.Pclass == 1) & (df.Survived == 1)]['Pclass'].size,
df[(df.Pclass == 2) & (df.Survived == 1)]['Pclass'].size,
df[(df.Pclass == 3) & (df.Survived == 1)]['Pclass'].size
]
print('1 Pclass number:'+str(x[0])+' '+' 1 Pclass survive:'+str(y[0])+' '+'1 Pclass survive rat:', float(y[0]) / x[0])
print('2 Pclass number:'+str(x[1])+' '+' 2 Pclass survive:'+str(y[1])+' '+'2 Pclass survive rat:', float(y[1]) / x[1])
print('3 Pclass number:'+str(x[2])+' '+' 3 Pclass survive:'+str(y[2])+' '+'3 Pclass survice rat:', float(y[2]) / x[2])
print("Gender VS Survive rate", ":")
male_survived = df[(df.Sex == 'male')]['Sex'].size
female_survived = df[(df.Sex == 'female')]['Sex'].size
print('male survive:', male_survived)
print('female survive:', female_survived)
Sex_survived_rate = (df.groupby(['Sex']).sum()/df.groupby(['Sex']).count())['Survived']
Sex_survived_rate.plot(kind='bar')
plt.title('Sex_survived_rate')
plt.show()
print("Age VS Survive rate", ":")
age_clean_date = df[~np.isnan(df['Age'])] # remove NaN
ages = | np.arange(0, 81, 5) | numpy.arange |
import pandas as pd
import numpy as np
import h5py
import os
from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedKFold
from pandas_plink import read_plink1_bin
from ..utils import helper_functions
from . import encoding_functions as enc
def prepare_data_files(data_dir: str, genotype_matrix_name: str, phenotype_matrix_name: str, phenotype: str,
datasplit: str, n_outerfolds: int, n_innerfolds: int, test_set_size_percentage: int,
val_set_size_percentage: int, models, user_encoding: str, maf_percentage: int):
"""
Prepare all data files for a common format: genotype matrix, phenotype matrix and index file.
First check if genotype file is .h5 file (standard format of this framework):
- YES: First check if all required information is present in the file, raise Exception if not. Then check if index file exists:
- NO: Load genotype and create all required index files
- YES: Append all required data splits and maf-filters to index file
- NO: Load genotype and create all required files
:param data_dir: data directory where the phenotype and genotype matrix are stored
:param genotype_matrix_name: name of the genotype matrix including datatype ending
:param phenotype_matrix_name: name of the phenotype matrix including datatype ending
:param phenotype: name of the phenotype to predict
:param datasplit: datasplit to use. Options are: nested-cv, cv-test, train-val-test
:param n_outerfolds: number of outerfolds relevant for nested-cv
:param n_innerfolds: number of folds relevant for nested-cv and cv-test
:param test_set_size_percentage: size of the test set relevant for cv-test and train-val-test
:param val_set_size_percentage: size of the validation set relevant for train-val-test
:param models: models to consider
:param user_encoding: encoding specified by the user
:param maf_percentage: threshold for MAF filter as percentage value
"""
print('Check if all data files have the required format')
if os.path.isfile(data_dir + '/' + genotype_matrix_name.split('.')[0] + '.h5') and \
(genotype_matrix_name.split('.')[-1] != 'h5'):
print("Found same file name with ending .h5")
print("Assuming that the raw file was already prepared using our pipepline. Will continue with the .h5 file.")
genotype_matrix_name = genotype_matrix_name.split('.')[0] + '.h5'
suffix = genotype_matrix_name.split('.')[-1]
if suffix in ('h5', 'hdf5', 'h5py'):
# Genotype matrix has standard file format -> check information in the file
check_genotype_h5_file(data_dir=data_dir, genotype_matrix_name=genotype_matrix_name,
encodings=enc.get_encoding(models=models, user_encoding=user_encoding))
print('Genotype file available in required format, check index file now.')
# Check / create index files
if check_index_file(data_dir=data_dir, genotype_matrix_name=genotype_matrix_name,
phenotype_matrix_name=phenotype_matrix_name, phenotype=phenotype):
print('Index file ' + genotype_matrix_name.split('.')[0] + '-'
+ phenotype_matrix_name.split('.')[0] + '-' + phenotype + '.h5' + ' already exists.'
' Will append required filters and data splits now.')
append_index_file(data_dir=data_dir, genotype_matrix_name=genotype_matrix_name,
phenotype_matrix_name=phenotype_matrix_name, phenotype=phenotype,
maf_percentage=maf_percentage,
datasplit=datasplit, n_outerfolds=n_outerfolds, n_innerfolds=n_innerfolds,
test_set_size_percentage=test_set_size_percentage,
val_set_size_percentage=val_set_size_percentage)
print('Done checking data files. All required datasets are available.')
else:
print('Index file ' + genotype_matrix_name.split('.')[0] + '-' + phenotype_matrix_name.split('.')[0]
+ '-' + phenotype + '.h5' + ' does not fulfill requirements. '
'Will load genotype and phenotype matrix and create new index file.')
save_all_data_files(data_dir=data_dir, genotype_matrix_name=genotype_matrix_name,
phenotype_matrix_name=phenotype_matrix_name, phenotype=phenotype,
models=models, user_encoding=user_encoding, maf_percentage=maf_percentage,
datasplit=datasplit, n_outerfolds=n_outerfolds, n_innerfolds=n_innerfolds,
test_set_size_percentage=test_set_size_percentage,
val_set_size_percentage=val_set_size_percentage)
print('Done checking data files. All required datasets are available.')
else:
print('Genotype file not in required format. Will load genotype matrix and save as .h5 file. Will also create '
'required index file.')
save_all_data_files(data_dir=data_dir, genotype_matrix_name=genotype_matrix_name,
phenotype_matrix_name=phenotype_matrix_name, phenotype=phenotype,
models=models, user_encoding=user_encoding, maf_percentage=maf_percentage,
datasplit=datasplit, n_outerfolds=n_outerfolds, n_innerfolds=n_innerfolds,
test_set_size_percentage=test_set_size_percentage,
val_set_size_percentage=val_set_size_percentage)
print('Done checking data files. All required datasets are available.')
def check_genotype_h5_file(data_dir: str, genotype_matrix_name: str, encodings: list):
"""
Check .h5 genotype file. Should contain:
- sample_ids: vector with sample names of genotype matrix,
- snp_ids: vector with SNP identifiers of genotype matrix,
- X_{enc}: (samples x SNPs)-genotype matrix in enc encoding, where enc might refer to:
- '012': additive (number of minor alleles)
- 'raw': raw (alleles)
:param data_dir: data directory where the phenotype and genotype matrix are stored
:param genotype_matrix_name: name of the phenotype matrix including datatype ending
:param encodings: list of needed encodings
"""
with h5py.File(data_dir + '/' + genotype_matrix_name, "r") as f:
keys = list(f.keys())
if {'sample_ids', 'snp_ids'}.issubset(keys):
# check if required encoding is available or can be created
for elem in encodings:
if f'X_{elem}' not in f and f'X_{enc.get_base_encoding(encoding=elem)}' not in f:
raise Exception('Genotype in ' + elem + ' encoding missing. Can not create required encoding. '
'See documentation for help')
else:
raise Exception('sample_ids and/or snp_ids are missing in' + genotype_matrix_name)
def check_index_file(data_dir: str, genotype_matrix_name: str, phenotype_matrix_name: str, phenotype: str) -> bool:
"""
Check if index file is available and if the datasets 'y', 'matched_sample_ids', 'X_index', 'y_index' and
'ma_frequency' exist.
:param data_dir: data directory where the phenotype and genotype matrix are stored
:param genotype_matrix_name: name of the genotype matrix including datatype ending
:param phenotype_matrix_name: name of the phenotype matrix including datatype ending
:param phenotype: name of the phenotype to predict
:return: bool reflecting check result
"""
index_file = data_dir + '/' + genotype_matrix_name.split('.')[0] + '-' + phenotype_matrix_name.split('.')[0] \
+ '-' + phenotype + '.h5'
if os.path.isfile(index_file):
matched_datasets = ['y', 'matched_sample_ids', 'X_index', 'y_index', 'non_informative_filter', 'ma_frequency']
with h5py.File(index_file, 'a') as f:
if 'matched_data' in f and all(z in f['matched_data'] for z in matched_datasets):
return True
else:
return False
else:
return False
def save_all_data_files(data_dir: str, genotype_matrix_name: str, phenotype_matrix_name: str, phenotype: str,
models, user_encoding: str, maf_percentage: int,
datasplit: str, n_outerfolds: int, n_innerfolds: int,
test_set_size_percentage: int, val_set_size_percentage: int):
"""
Prepare and save all required data files:
- genotype matrix in unified format as .h5 file with,
- phenotype matrix in unified format as .csv file,
- file containing maf filter and data split indices as .h5
:param data_dir: data directory where the phenotype and genotype matrix are stored
:param genotype_matrix_name: name of the genotype matrix including datatype ending
:param phenotype_matrix_name: name of the phenotype matrix including datatype ending
:param phenotype: name of the phenotype to predict
:param models: models to consider
:param user_encoding: encoding specified by the user
:param maf_percentage: threshold for MAF filter as percentage value
:param datasplit: datasplit to use. Options are: nested-cv, cv-test, train-val-test
:param n_outerfolds: number of outerfolds relevant for nested-cv
:param n_innerfolds: number of folds relevant for nested-cv and cv-test
:param test_set_size_percentage: size of the test set relevant for cv-test and train-val-test
:param val_set_size_percentage: size of the validation set relevant for train-val-test
"""
print('Load genotype file ' + data_dir + '/' + genotype_matrix_name)
X, X_ids = check_transform_format_genotype_matrix(data_dir=data_dir, genotype_matrix_name=genotype_matrix_name,
models=models, user_encoding=user_encoding)
print('Have genotype matrix. Load phenotype ' + phenotype + ' from ' + data_dir + '/' + phenotype_matrix_name)
y = check_and_load_phenotype_matrix(data_dir=data_dir,
phenotype_matrix_name=phenotype_matrix_name, phenotype=phenotype)
print('Have phenotype vector. Start matching genotype and phenotype.')
X, y, sample_ids, X_index, y_index = genotype_phenotype_matching(X=X, X_ids=X_ids, y=y)
print('Done matching genotype and phenotype. Create index file now.')
create_index_file(data_dir=data_dir, genotype_matrix_name=genotype_matrix_name,
phenotype_matrix_name=phenotype_matrix_name, phenotype=phenotype,
datasplit=datasplit, n_outerfolds=n_outerfolds, n_innerfolds=n_innerfolds,
test_set_size_percentage=test_set_size_percentage,
val_set_size_percentage=val_set_size_percentage,
maf_percentage=maf_percentage, X=X, y=y, sample_ids=sample_ids, X_index=X_index, y_index=y_index
)
def check_transform_format_genotype_matrix(data_dir: str, genotype_matrix_name: str, models, user_encoding: str) \
-> (np.array, np.array):
"""
Check the format of the specified genotype matrix.
Unified genotype matrix will be saved in subdirectory data and named NAME_OF_GENOTYPE_MATRIX.h5
Unified format of the .h5 file of the genotype matrix required for the further processes:
- mandatory:
- sample_ids: vector with sample names of genotype matrix,
- SNP_ids: vector with SNP identifiers of genotype matrix,
- X_{enc}: (samples x SNPs)-genotype matrix in enc encoding, where enc might refer to:
- '012': additive (number of minor alleles)
- 'raw': raw (alleles)
- optional: genotype in additional encodings
Accepts .h5, .hdf5, .h5py, .csv, PLINK binary and PLINK files. .h5, .hdf5, .h5py files must satisfy the unified
format. If the genotype matrix contains constant SNPs, those will be removed and a new file will be saved.
Will open .csv, PLINK and binary PLINK files and generate required .h5 format.
:param data_dir: data directory where the phenotype and genotype matrix are stored
:param genotype_matrix_name: name of the genotype matrix including datatype ending
:param models: models to consider
:param user_encoding: encoding specified by the user
:return: genotype matrix (raw encoded if present, 012 encoded otherwise) and sample ids
"""
suffix = genotype_matrix_name.split('.')[-1]
encoding = enc.get_encoding(models=models, user_encoding=user_encoding)
if suffix in ('h5', 'hdf5', 'h5py'):
with h5py.File(data_dir + '/' + genotype_matrix_name, "r") as f:
sample_ids = f['sample_ids'][:].astype(str)
if 'X_raw' in f:
X = f['X_raw'][:]
elif 'X_012' in f:
X = f['X_012'][:]
else:
if suffix == 'csv':
sample_ids, snp_ids, X = check_genotype_csv_file(data_dir=data_dir,
genotype_matrix_name=genotype_matrix_name,
encodings=encoding)
elif suffix in ('bed', 'bim', 'fam'):
sample_ids, snp_ids, X = check_genotype_binary_plink_file(data_dir=data_dir,
genotype_matrix_name=genotype_matrix_name)
elif suffix in ('map', 'ped'):
sample_ids, snp_ids, X = check_genotype_plink_file(data_dir=data_dir,
genotype_matrix_name=genotype_matrix_name)
else:
raise Exception('Only accept .h5, .hdf5, .h5py, .csv, binary PLINK and PLINK genotype files. '
'See documentation for help.')
create_genotype_h5_file(data_dir=data_dir, genotype_matrix_name=genotype_matrix_name,
sample_ids=sample_ids, snp_ids=snp_ids, X=X)
return X, sample_ids
def check_genotype_csv_file(data_dir: str, genotype_matrix_name: str, encodings: list) \
-> (np.array, np.array, np.array):
"""
Load .csv genotype file. File must have the following structure:
First column must contain the sample ids, the column names should be the SNP ids.
The values should be the genotype matrix either in additive encoding or in raw encoding.
If the name of the first column is 'MarkerID' it is assumed that the rows contain the markers and the column contain
the samples and the genotype matrix will be transposed.
If the csv file contains the genotype in biallelic notation (i.e. 'AA', 'AT', ...), this function generates a
genotype matrix in iupac notation (i.e. 'A', 'W', ...).
:param data_dir: data directory where the phenotype and genotype matrix are stored
:param genotype_matrix_name: name of the genotype matrix including datatype ending
:param encodings: list of needed encodings
:return: sample ids, SNP ids and genotype in additive / raw encoding
"""
gt = pd.read_csv(data_dir + '/' + genotype_matrix_name, index_col=0)
if gt.index.name == 'MarkerID':
gt = gt.T
snp_ids = np.asarray(gt.columns.values)
sample_ids = np.asarray(gt.index)
X = np.asarray(gt.values)
enc_of_X = enc.check_encoding_of_genotype(X=X)
# if genotype in biallelic notation, will change to iupac notation
if enc_of_X == 'biallelic':
iupac_map = {"AA": "A", "GG": "G", "TT": "T", "CC": "C", "AG": "R", "GA": "R", "CT": "Y", "TC": "Y", "GC": "S",
"CG": "S", "AT": "W", "TA": "W", "GT": "K", "TG": "K", "AC": "M", "CA": "M"}
gt = gt.applymap(lambda x: x.decode() if isinstance(x, bytes) else x)
X = np.asarray(gt.replace(iupac_map).values)
enc_of_X = 'raw'
# check encoding of X, only accept additive or raw and check if required encoding can be created
for elem in encodings:
if elem != enc_of_X and enc.get_base_encoding(encoding=elem) != enc_of_X:
raise Exception('Genotype in ' + genotype_matrix_name + ' in wrong encoding. Can not create'
' required encoding. See documentation for help.')
return sample_ids, snp_ids, X
def check_genotype_binary_plink_file(data_dir: str, genotype_matrix_name: str) -> (np.array, np.array, np.array):
"""
Load binary PLINK file, .bim, .fam, .bed files with same prefix need to be in same folder.
Compute additive and raw encoding of genotype
:param data_dir: data directory where the phenotype and genotype matrix are stored
:param genotype_matrix_name: name of the genotype matrix including datatype ending
:return: sample ids, SNP ids and genotype in raw encoding
"""
gt_file = data_dir + '/' + genotype_matrix_name.split(".")[0]
gt = read_plink1_bin(gt_file + '.bed', gt_file + '.bim', gt_file + '.fam', ref="a0", verbose=False)
sample_ids = np.array(gt['fid'], dtype=str).flatten()
snp_ids = np.array(gt['snp']).flatten()
# get raw encoding
iupac_map = {b"AA": "A", b"GG": "G", b"TT": "T", b"CC": "C", b"AG": "R", b"GA": "R", b"CT": "Y", b"TC": "Y",
b"GC": "S", b"CG": "S", b"AT": "W", b"TA": "W", b"GT": "K", b"TG": "K", b"AC": "M", b"CA": "M"}
a_0 = np.array(gt.a1.values, dtype='S1')
a_2 = np.array(gt.a0.values, dtype='S1')
a_1 = []
for i in range(len(a_0)):
a_1.append(iupac_map[a_0[i] + a_2[i]])
a = np.stack((a_0, np.array(a_1), a_2))
col = np.arange(a.shape[1])
X_012 = np.array(gt.values)
X_raw = a[X_012.astype(int), col]
return sample_ids, snp_ids, X_raw
def check_genotype_plink_file(data_dir: str, genotype_matrix_name: str) -> (np.array, np.array, np.array):
"""
Load PLINK files, .map and .ped file with same prefix need to be in same folder.
Accepts GENOTYPENAME.ped and GENOTYPENAME.map as input
:param data_dir: data directory where the phenotype and genotype matrix are stored
:param genotype_matrix_name: name of the genotype matrix including datatype ending
:return: sample ids, SNP ids and genotype in raw encoding
"""
gt_file = data_dir + '/' + genotype_matrix_name.split(".")[0]
with open(gt_file + '.map', 'r') as f:
snp_ids = []
for line in f:
tmp = line.strip().split(" ")
snp_ids.append(tmp[1].strip())
snp_ids = np.array(snp_ids)
iupac_map = {"AA": "A", "GG": "G", "TT": "T", "CC": "C", "AG": "R", "GA": "R", "CT": "Y", "TC": "Y", "GC": "S",
"CG": "S", "AT": "W", "TA": "W", "GT": "K", "TG": "K", "AC": "M", "CA": "M"}
with open(gt_file + '.ped', 'r') as f:
sample_ids = []
X_raw = []
for line in f:
tmp = line.strip().split(" ")
sample_ids.append(int(tmp[1].strip()))
snps = []
j = 6
while j < len(tmp) - 1:
snps.append(iupac_map[tmp[j] + tmp[j + 1]])
j += 2
X_raw.append(snps)
sample_ids = np.array(sample_ids)
X_raw = np.array(X_raw)
return sample_ids, snp_ids, X_raw
def create_genotype_h5_file(data_dir: str, genotype_matrix_name: str,
sample_ids: np.array, snp_ids: np.array, X: np.array):
"""
Save genotype matrix in unified .h5 file.
Structure:
- sample_ids
- snp_ids
- X_raw (or X_012 if X_raw not available)
:param data_dir: data directory where the phenotype and genotype matrix are stored
:param genotype_matrix_name: name of the genotype matrix including datatype ending
:param sample_ids: array containing sample ids of genotype data
:param snp_ids: array containing snp ids of genotype data
:param X: matrix containing genotype either in raw or in additive encoding
"""
x_file = data_dir + '/' + genotype_matrix_name.split(".")[0] + '.h5'
print('Save unified genotype file ' + x_file)
with h5py.File(x_file, 'w') as f:
f.create_dataset('sample_ids', data=sample_ids.astype(bytes), chunks=True, compression="gzip")
f.create_dataset('snp_ids', data=snp_ids.astype(bytes), chunks=True, compression="gzip")
encoding = enc.check_encoding_of_genotype(X=X)
if encoding == 'raw':
f.create_dataset('X_raw', data=X.astype(bytes), chunks=True, compression="gzip", compression_opts=7)
elif encoding == '012':
f.create_dataset('X_012', data=X, chunks=True, compression="gzip", compression_opts=7)
else:
raise Exception('Genotype neither in raw or additive encoding. Cannot save .h5 genotype file.')
def check_and_load_phenotype_matrix(data_dir: str, phenotype_matrix_name: str, phenotype: str) -> pd.DataFrame:
"""
Check and load the specified phenotype matrix. Only accept .csv, .pheno, .txt files.
Sample ids need to be in first column, remaining columns should contain phenotypic values
with phenotype name as column name
:param data_dir: data directory where the phenotype and genotype matrix are stored
:param phenotype_matrix_name: name of the phenotype matrix including datatype ending
:param phenotype: name of the phenotype to predict
:return: DataFrame with sample_ids as index and phenotype values as single column without NAN values
"""
suffix = phenotype_matrix_name.split('.')[-1]
if suffix == "csv":
y = pd.read_csv(data_dir + '/' + phenotype_matrix_name)
elif suffix in ("pheno", "txt"):
y = pd.read_csv(data_dir + '/' + phenotype_matrix_name, sep=" ")
else:
raise Exception('Only accept .csv, .pheno, .txt phenotype files. See documentation for help')
y = y.sort_values(y.columns[0]).groupby(y.columns[0]).mean()
if phenotype not in y.columns:
raise Exception('Phenotype ' + phenotype + ' is not in phenotype file ' + phenotype_matrix_name +
' See documentation for help')
else:
y = y[[phenotype]].dropna()
return y
def genotype_phenotype_matching(X: np.array, X_ids: np.array, y: pd.DataFrame) -> tuple:
"""
Match the handed over genotype and phenotype matrix for the phenotype specified by the user
:param X: genotype matrix in additive encoding
:param X_ids: sample ids of genotype matrix
:param y: pd.DataFrame containing sample ids of phenotype as index and phenotype values as single column
:return: matched genotype matrix, matched sample ids, index arrays for genotype and phenotype to redo matching
"""
y_ids = | np.asarray(y.index, dtype=X_ids.dtype) | numpy.asarray |
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
from scipy import special as sp_special
from scipy import stats as sp_stats
import tensorflow.compat.v1 as tf1
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import test_util as tfp_test_util
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import
tfd = tfp.distributions
@test_util.run_all_in_graph_and_eager_modes
class BetaTest(tfp_test_util.TestCase):
def testSimpleShapes(self):
a = np.random.rand(3)
b = np.random.rand(3)
dist = tfd.Beta(a, b)
self.assertAllEqual([], self.evaluate(dist.event_shape_tensor()))
self.assertAllEqual([3], self.evaluate(dist.batch_shape_tensor()))
self.assertEqual(tf.TensorShape([]), dist.event_shape)
self.assertEqual(tf.TensorShape([3]), dist.batch_shape)
def testComplexShapes(self):
a = np.random.rand(3, 2, 2)
b = np.random.rand(3, 2, 2)
dist = tfd.Beta(a, b)
self.assertAllEqual([], self.evaluate(dist.event_shape_tensor()))
self.assertAllEqual([3, 2, 2], self.evaluate(dist.batch_shape_tensor()))
self.assertEqual(tf.TensorShape([]), dist.event_shape)
self.assertEqual(tf.TensorShape([3, 2, 2]), dist.batch_shape)
def testComplexShapesBroadcast(self):
a = np.random.rand(3, 2, 2)
b = np.random.rand(2, 2)
dist = tfd.Beta(a, b)
self.assertAllEqual([], self.evaluate(dist.event_shape_tensor()))
self.assertAllEqual([3, 2, 2], self.evaluate(dist.batch_shape_tensor()))
self.assertEqual(tf.TensorShape([]), dist.event_shape)
self.assertEqual(tf.TensorShape([3, 2, 2]), dist.batch_shape)
def testAlphaProperty(self):
a = [[1., 2, 3]]
b = [[2., 4, 3]]
dist = tfd.Beta(a, b)
self.assertEqual([1, 3], dist.concentration1.shape)
self.assertAllClose(a, self.evaluate(dist.concentration1))
def testBetaProperty(self):
a = [[1., 2, 3]]
b = [[2., 4, 3]]
dist = tfd.Beta(a, b)
self.assertEqual([1, 3], dist.concentration0.shape)
self.assertAllClose(b, self.evaluate(dist.concentration0))
def testPdfXProper(self):
a = [[1., 2, 3]]
b = [[2., 4, 3]]
dist = tfd.Beta(a, b, validate_args=True)
self.evaluate(dist.prob([.1, .3, .6]))
self.evaluate(dist.prob([.2, .3, .5]))
# Either condition can trigger.
with self.assertRaisesOpError("Sample must be positive."):
self.evaluate(dist.prob([-1., 0.1, 0.5]))
with self.assertRaisesOpError("Sample must be positive."):
self.evaluate(dist.prob([0., 0.1, 0.5]))
with self.assertRaisesOpError("Sample must be less than `1`."):
self.evaluate(dist.prob([.1, .2, 1.2]))
with self.assertRaisesOpError("Sample must be less than `1`."):
self.evaluate(dist.prob([.1, .2, 1.0]))
def testPdfTwoBatches(self):
a = [1., 2]
b = [1., 2]
x = [.5, .5]
dist = tfd.Beta(a, b)
pdf = dist.prob(x)
self.assertAllClose([1., 3. / 2], self.evaluate(pdf))
self.assertEqual((2,), pdf.shape)
def testPdfTwoBatchesNontrivialX(self):
a = [1., 2]
b = [1., 2]
x = [.3, .7]
dist = tfd.Beta(a, b)
pdf = dist.prob(x)
self.assertAllClose([1, 63. / 50], self.evaluate(pdf))
self.assertEqual((2,), pdf.shape)
def testPdfUniformZeroBatch(self):
# This is equivalent to a uniform distribution
a = 1.
b = 1.
x = np.array([.1, .2, .3, .5, .8], dtype=np.float32)
dist = tfd.Beta(a, b)
pdf = dist.prob(x)
self.assertAllClose([1.] * 5, self.evaluate(pdf))
self.assertEqual((5,), pdf.shape)
def testPdfAlphaStretchedInBroadcastWhenSameRank(self):
a = [[1., 2]]
b = [[1., 2]]
x = [[.5, .5], [.3, .7]]
dist = tfd.Beta(a, b)
pdf = dist.prob(x)
self.assertAllClose([[1., 3. / 2], [1., 63. / 50]], self.evaluate(pdf))
self.assertEqual((2, 2), pdf.shape)
def testPdfAlphaStretchedInBroadcastWhenLowerRank(self):
a = [1., 2]
b = [1., 2]
x = [[.5, .5], [.2, .8]]
pdf = tfd.Beta(a, b).prob(x)
self.assertAllClose([[1., 3. / 2], [1., 24. / 25]], self.evaluate(pdf))
self.assertEqual((2, 2), pdf.shape)
def testPdfXStretchedInBroadcastWhenSameRank(self):
a = [[1., 2], [2., 3]]
b = [[1., 2], [2., 3]]
x = [[.5, .5]]
pdf = tfd.Beta(a, b).prob(x)
self.assertAllClose([[1., 3. / 2], [3. / 2, 15. / 8]], self.evaluate(pdf))
self.assertEqual((2, 2), pdf.shape)
def testPdfXStretchedInBroadcastWhenLowerRank(self):
a = [[1., 2], [2., 3]]
b = [[1., 2], [2., 3]]
x = [.5, .5]
pdf = tfd.Beta(a, b).prob(x)
self.assertAllClose([[1., 3. / 2], [3. / 2, 15. / 8]], self.evaluate(pdf))
self.assertEqual((2, 2), pdf.shape)
def testLogPdfOnBoundaryIsFiniteWhenAlphaIsOne(self):
b = [[0.01, 0.1, 1., 2], [5., 10., 2., 3]]
pdf = self.evaluate(tfd.Beta(1., b).prob(0.))
self.assertAllEqual(np.ones_like(pdf, dtype=np.bool), np.isfinite(pdf))
def testBetaMean(self):
a = [1., 2, 3]
b = [2., 4, 1.2]
dist = tfd.Beta(a, b)
self.assertEqual(dist.mean().shape, (3,))
expected_mean = sp_stats.beta.mean(a, b)
self.assertAllClose(expected_mean, self.evaluate(dist.mean()))
def testBetaVariance(self):
a = [1., 2, 3]
b = [2., 4, 1.2]
dist = tfd.Beta(a, b)
self.assertEqual(dist.variance().shape, (3,))
expected_variance = sp_stats.beta.var(a, b)
self.assertAllClose(expected_variance, self.evaluate(dist.variance()))
def testBetaMode(self):
a = np.array([1.1, 2, 3])
b = np.array([2., 4, 1.2])
expected_mode = (a - 1) / (a + b - 2)
dist = tfd.Beta(a, b)
self.assertEqual(dist.mode().shape, (3,))
self.assertAllClose(expected_mode, self.evaluate(dist.mode()))
def testBetaModeInvalid(self):
a = np.array([1., 2, 3])
b = np.array([2., 4, 1.2])
dist = tfd.Beta(a, b, allow_nan_stats=False)
with self.assertRaisesOpError("Condition x < y.*"):
self.evaluate(dist.mode())
a = np.array([2., 2, 3])
b = np.array([1., 4, 1.2])
dist = tfd.Beta(a, b, allow_nan_stats=False)
with self.assertRaisesOpError("Condition x < y.*"):
self.evaluate(dist.mode())
def testBetaModeEnableAllowNanStats(self):
a = np.array([1., 2, 3])
b = np.array([2., 4, 1.2])
dist = tfd.Beta(a, b, allow_nan_stats=True)
expected_mode = (a - 1) / (a + b - 2)
expected_mode[0] = np.nan
self.assertEqual((3,), dist.mode().shape)
self.assertAllClose(expected_mode, self.evaluate(dist.mode()))
a = np.array([2., 2, 3])
b = np.array([1., 4, 1.2])
dist = tfd.Beta(a, b, allow_nan_stats=True)
expected_mode = (a - 1) / (a + b - 2)
expected_mode[0] = np.nan
self.assertEqual((3,), dist.mode().shape)
self.assertAllClose(expected_mode, self.evaluate(dist.mode()))
def testBetaEntropy(self):
a = [1., 2, 3]
b = [2., 4, 1.2]
dist = tfd.Beta(a, b)
self.assertEqual(dist.entropy().shape, (3,))
expected_entropy = sp_stats.beta.entropy(a, b)
self.assertAllClose(expected_entropy, self.evaluate(dist.entropy()))
def testBetaSample(self):
a = 1.
b = 2.
beta = tfd.Beta(a, b)
n = tf.constant(100000)
samples = beta.sample(n)
sample_values = self.evaluate(samples)
self.assertEqual(sample_values.shape, (100000,))
self.assertFalse( | np.any(sample_values < 0.0) | numpy.any |
import sys
import unittest
import copy
import numpy as np
from scipy.linalg import block_diag
import pyinduct as pi
import pyinduct.hyperbolic.feedforward as hff
import pyinduct.parabolic as parabolic
import pyinduct.simulation as sim
from pyinduct.tests import show_plots
import pyqtgraph as pg
class SimpleInput(sim.SimulationInput):
"""
the simplest input we can imagine
"""
def __init__(self):
super().__init__("SimpleInput")
def _calc_output(self, **kwargs):
return 0
class MonotonousInput(sim.SimulationInput):
"""
an input that ramps up
"""
def __init__(self):
super().__init__("MonotonousInput")
def _calc_output(self, **kwargs):
t = kwargs["time"]
extra_data = np.sin(t)
if np.isclose(t % 2, 0):
extra_data = np.nan
return dict(output=kwargs["time"], extra_data=extra_data)
class CorrectInput(sim.SimulationInput):
"""
a diligent input
"""
def __init__(self, output, limits=(0, 1), der_order=0):
super().__init__(self)
self.out = np.ones(der_order + 1) * output
self.t_min, self.t_max = limits
def _calc_output(self, **kwargs):
if "time" not in kwargs:
raise ValueError("mandatory key not found!")
if "weights" not in kwargs:
raise ValueError("mandatory key not found!")
if "weight_lbl" not in kwargs:
raise ValueError("mandatory key not found!")
return dict(output=self.out)
class AlternatingInput(sim.SimulationInput):
"""
a simple alternating input, composed of smooth transitions
"""
def _calc_output(self, **kwargs):
t = kwargs["time"] % 2
if t < 1:
res = self.tr_up(t)
else:
res = self.tr_down(t)
return dict(output=res - .5)
def __init__(self):
super().__init__(self)
self.tr_up = pi.SmoothTransition(states=(0, 1),
interval=(0, 1),
method="poly")
self.tr_down = pi.SmoothTransition(states=(1, 0),
interval=(1, 2),
method="poly")
class SimulationInputTest(unittest.TestCase):
def setUp(self):
pass
def test_abstract_funcs(self):
# raise type error since abstract method is not implemented
self.assertRaises(TypeError, sim.SimulationInput)
# method implemented, should work
u = SimpleInput()
def test_call_arguments(self):
a = np.eye(2, 2)
b = np.array([[0], [1]])
u = CorrectInput(output=1, limits=(0, 1))
ic = np.zeros((2, 1))
ss = sim.StateSpace({1: a}, {0: {1: b}}, input_handle=u)
# if caller provides correct kwargs no exception should be raised
res = sim.simulate_state_space(ss, ic, pi.Domain((0, 1), num=10))
def test_storage(self):
a = np.eye(2, 2)
b = np.array([[0], [1]])
u = MonotonousInput()
ic = np.zeros((2, 1))
ss = sim.StateSpace(a, b, input_handle=u)
# run simulation to fill the internal storage
domain = pi.Domain((0, 10), num=11)
bigger_domain = pi.Domain((-1, 11), num=13)
res = sim.simulate_state_space(ss, ic, domain)
# don't return any entries that aren't there
self.assertRaises(KeyError, u.get_results, domain, "Unknown Entry")
# default key is "output"
ed = u.get_results(domain)
ed_explicit = u.get_results(domain, result_key="output")
self.assertTrue(np.array_equal(ed, ed_explicit))
# return an np.ndarray as default
self.assertIsInstance(ed, np.ndarray)
# return EvalData if corresponding flag is set
self.assertIsInstance(u.get_results(domain, as_eval_data=True),
pi.EvalData)
# if data has to be extrapolated, just repeat the last values
res = u.get_results(bigger_domain)
self.assertEqual(res[0], res[1])
self.assertEqual(res[-2], res[-1])
# nan values in the data storage should be ignored
res = u.get_results(bigger_domain, result_key="extra_data")
# storage contains values
self.assertTrue(u._time_storage)
self.assertTrue(u._value_storage)
# clear it
u.clear_cache()
# storage should be empty
self.assertFalse(u._time_storage)
self.assertFalse(u._value_storage)
# double clearing should work
u.clear_cache()
class CanonicalFormTest(unittest.TestCase):
def setUp(self):
self.cf = sim.CanonicalForm()
self.u = SimpleInput()
def test_add_to(self):
a = np.eye(5)
self.cf.add_to(dict(name="E", order=0, exponent=1), a)
self.assertTrue(np.array_equal(self.cf.matrices["E"][0][1], a))
self.cf.add_to(dict(name="E", order=0, exponent=1), 5 * a)
self.assertTrue(np.array_equal(self.cf.matrices["E"][0][1], 6 * a))
b = np.eye(10)
self.assertRaises(ValueError,
self.cf.add_to,
dict(name="E", order=0, exponent=1), b)
self.cf.add_to(dict(name="E", order=2, exponent=1), b)
self.assertTrue(np.array_equal(self.cf.matrices["E"][2][1], b))
f = np.atleast_2d(np.array(range(5))).T
self.assertRaises(ValueError,
self.cf.add_to,
dict(name="E", order=0, exponent=1), f)
self.cf.add_to(dict(name="f"), f)
self.assertTrue(np.array_equal(self.cf.matrices["f"], f))
# try to add something with derivative or exponent to f: value should
# end up in f
self.cf.add_to(dict(name="f"), f)
self.assertTrue(np.array_equal(self.cf.matrices["f"], 2 * f))
c = np.atleast_2d(np.array(range(5))).T
# that one should be easy
self.cf.add_to(dict(name="G", order=0, exponent=1), c, column=0)
self.assertTrue(np.array_equal(self.cf.matrices["G"][0][1], c))
# here G01 as to be expanded
self.cf.add_to(dict(name="G", order=0, exponent=1), c, column=1)
self.assertTrue(np.array_equal(self.cf.matrices["G"][0][1],
np.hstack((c, c))))
# here G01 as to be expanded again
self.cf.add_to(dict(name="G", order=0, exponent=1), c, column=3)
self.assertTrue(np.array_equal(self.cf.matrices["G"][0][1],
np.hstack((c, c, np.zeros_like(c), c))))
# input derivatives can occur
self.cf.add_to(dict(name="G", order=1, exponent=1), c, column=0)
self.assertTrue(np.array_equal(self.cf.matrices["G"][1][1], c))
# expansion should still work
self.cf.add_to(dict(name="G", order=1, exponent=1), c, column=1)
self.assertTrue(np.array_equal(self.cf.matrices["G"][1][1],
np.hstack((c, c))))
class ParseTest(unittest.TestCase):
def setUp(self):
# scalars
self.scalars = pi.Scalars(np.vstack(list(range(3))))
# callbacks
self.u = pi.ConstantTrajectory(7)
u1 = CorrectInput(output=1)
u2 = CorrectInput(output=2)
self.u_vec = pi.SimulationInputVector([u1, u2])
self.u_dt = CorrectInput(output=1, der_order=1)
u1_dt = CorrectInput(output=1, der_order=1)
u2_dt = CorrectInput(output=2, der_order=1)
self.u_vec_dt = pi.SimulationInputVector([u1_dt, u2_dt])
# inputs
self.input = pi.Input(self.u)
self.vec_input_1 = pi.Input(self.u_vec, index=0)
self.vec_input_2 = pi.Input(self.u_vec, index=1)
self.input_dt = pi.Input(self.u_dt, order=1)
self.vec_input_dt_1 = pi.Input(self.u_vec_dt, index=0, order=1)
self.vec_input_dt_2 = pi.Input(self.u_vec_dt, index=1, order=1)
# scale function
def heavyside(z):
if z < 0.5:
return 0
elif z == 0.5:
return .5
else:
return 1
base = pi.Base(pi.Function(heavyside))
pi.register_base("heavyside_base", base)
# distributed base
nodes = pi.Domain((0, 1), num=3)
self.distributed_base = pi.LagrangeFirstOrder.cure_interval(nodes)
pi.register_base("distributed_base", self.distributed_base)
fractions = [pi.ComposedFunctionVector(f, s) for f, s in
zip(self.distributed_base, nodes)]
self.composed_base = pi.Base(fractions)
pi.register_base("composed_base", self.composed_base)
# lumped base
self.lumped_base = pi.Base([pi.ConstantFunction(1)])
pi.register_base("lumped_base", self.lumped_base)
# Test Functions
self.test_funcs = pi.TestFunction("distributed_base")
self.test_funcs_at0 = self.test_funcs(0)
self.test_funcs_at1 = self.test_funcs(1)
self.test_funcs_dz = self.test_funcs.derive(1)
self.test_funcs_dz_at1 = self.test_funcs_dz(1)
self.comp_test_funcs = pi.TestFunction("composed_base")
self.comp_test_funcs_at0 = self.comp_test_funcs(0)
self.comp_test_funcs_at1 = self.comp_test_funcs(1)
self.comp_test_funcs_dz = self.comp_test_funcs.derive(1)
self.comp_test_funcs_dz_at1 = self.comp_test_funcs_dz(1)
# Scalar Functions
self.scalar_func = pi.ScalarFunction("heavyside_base")
# Distributed / Field Variables
self.field_var = pi.FieldVariable("distributed_base")
self.field_var_at1 = self.field_var(1)
self.field_var_dz = self.field_var.derive(spat_order=1)
self.field_var_dz_at1 = self.field_var_dz(1)
self.field_var_ddt = self.field_var.derive(temp_order=2)
self.field_var_ddt_at0 = self.field_var_ddt(0)
self.field_var_ddt_at1 = self.field_var_ddt(1)
self.comp_field_var = pi.FieldVariable("composed_base")
self.comp_field_var_at1 = self.comp_field_var(1)
self.comp_field_var_dz = self.comp_field_var.derive(spat_order=1)
self.odd_weight_field_var = pi.FieldVariable(
"distributed_base", weight_label="special_weights")
# Field variable 2
self.lumped_var = pi.FieldVariable("lumped_base")
# ---------------------------------------------------------------------
# Construction of Equation Terms
# ---------------------------------------------------------------------
# inputs
self.input_term1 = pi.ScalarTerm(pi.Product(self.test_funcs_at1,
self.input))
self.input_term1_swapped = pi.ScalarTerm(pi.Product(self.input,
self.test_funcs_at1)
)
self.input_term2 = pi.ScalarTerm(pi.Product(self.test_funcs_dz_at1,
self.input))
self.input_term3 = pi.IntegralTerm(pi.Product(self.test_funcs,
self.input),
limits=(0, 1))
self.input_term3_swapped = pi.IntegralTerm(pi.Product(self.input,
self.test_funcs),
limits=(0, 1))
self.input_term3_scaled = pi.IntegralTerm(
pi.Product(pi.Product(self.scalar_func, self.test_funcs),
self.input),
limits=(0, 1))
self.input_term3_scaled_first_half = pi.IntegralTerm(
pi.Product(pi.Product(self.scalar_func, self.test_funcs),
self.input),
limits=(0, .5))
self.input_term3_scaled_second_half = pi.IntegralTerm(
pi.Product(pi.Product(self.scalar_func, self.test_funcs),
self.input),
limits=(.5, 1))
self.input_term_dt = pi.IntegralTerm(pi.Product(self.test_funcs,
self.input_dt),
limits=(0, 1))
self.input_term_vectorial1 = pi.ScalarTerm(
pi.Product(self.test_funcs_at0, self.vec_input_1))
self.input_term_vectorial2 = pi.ScalarTerm(
pi.Product(self.test_funcs_at1, self.vec_input_2))
self.input_term_vectorial_dt1 = pi.ScalarTerm(
pi.Product(self.test_funcs_at0, self.vec_input_dt_1))
self.input_term_vectorial_dt2 = pi.ScalarTerm(
pi.Product(self.test_funcs_at1, self.vec_input_dt_2))
# pure test function terms
self.func_term = pi.ScalarTerm(self.test_funcs_at1)
self.func_term_int = pi.IntegralTerm(pi.Product(self.test_funcs,
self.test_funcs),
limits=(0, 1))
self.comp_func_term = pi.ScalarTerm(self.comp_test_funcs_at1)
self.comp_func_term_int = pi.IntegralTerm(
pi.Product(self.comp_test_funcs, self.comp_test_funcs),
limits=(0, 1))
# pure field variable terms
self.field_term_at1 = pi.ScalarTerm(self.field_var_at1)
self.field_term_dz_at1 = pi.ScalarTerm(self.field_var_dz_at1)
self.field_term_ddt_at1 = pi.ScalarTerm(self.field_var_ddt_at1)
self.field_int = pi.IntegralTerm(self.field_var, limits=(0, 1))
self.field_int_half = pi.IntegralTerm(self.field_var, limits=(0, .5))
self.field_dz_int = pi.IntegralTerm(self.field_var_dz, (0, 1))
self.field_ddt_int = pi.IntegralTerm(self.field_var_ddt, (0, 1))
self.comp_field_term_at1 = pi.ScalarTerm(self.comp_field_var_at1)
self.comp_field_int = pi.IntegralTerm(self.comp_field_var,
limits=(0, 1))
self.comp_field_dz_int = pi.IntegralTerm(self.comp_field_var,
limits=(0, 1))
# products
self.prod_term_fs_at1 = pi.ScalarTerm(
pi.Product(self.field_var_at1, self.scalars))
self.prod_int_fs = pi.IntegralTerm(pi.Product(self.field_var,
self.scalars),
(0, 1))
self.prod_int_f_f = pi.IntegralTerm(pi.Product(self.field_var,
self.test_funcs),
(0, 1))
self.prod_int_f_f_swapped = pi.IntegralTerm(pi.Product(self.test_funcs,
self.field_var),
(0, 1))
self.prod_int_f_at1_f = pi.IntegralTerm(
pi.Product(self.field_var_at1, self.test_funcs), (0, 1))
self.prod_int_f_f_at1 = pi.IntegralTerm(
pi.Product(self.field_var, self.test_funcs_at1), (0, 1))
self.prod_term_f_at1_f_at1 = pi.ScalarTerm(
pi.Product(self.field_var_at1, self.test_funcs_at1))
self.prod_int_fddt_f = pi.IntegralTerm(
pi.Product(self.field_var_ddt, self.test_funcs), (0, 1))
self.prod_term_fddt_at0_f_at0 = pi.ScalarTerm(
pi.Product(self.field_var_ddt_at0, self.test_funcs_at0))
self.prod_term_f_at1_dphi_at1 = pi.ScalarTerm(
pi.Product(self.field_var_at1, self.test_funcs_dz_at1))
self.temp_int = pi.IntegralTerm(pi.Product(self.field_var_ddt,
self.test_funcs),
limits=(0, 1))
self.spat_int = pi.IntegralTerm(pi.Product(self.field_var_dz,
self.test_funcs_dz),
limits=(0, 1))
self.spat_int_asymmetric = pi.IntegralTerm(pi.Product(self.field_var_dz,
self.test_funcs),
limits=(0, 1))
self.prod_term_tf_at0_lv_at0 = pi.ScalarTerm(
pi.Product(self.test_funcs(0), self.lumped_var(0)))
self.prod_term_tf_at0_lv_at0_swapped = pi.ScalarTerm(
pi.Product(self.lumped_var(0), self.test_funcs(0)))
self.prod_int_sf_fv = pi.IntegralTerm(pi.Product(self.scalar_func,
self.field_var),
limits=(0, 1))
self.prod_int_sf_fv_swapped = pi.IntegralTerm(
pi.Product(self.field_var, self.scalar_func),
limits=(0, 1))
self.alternating_weights_term = pi.IntegralTerm(
self.odd_weight_field_var,
limits=(0, 1))
def test_Input_term(self):
terms = sim.parse_weak_formulation(
sim.WeakFormulation(self.input_term2, name="test"),
finalize=False).get_static_terms()
self.assertFalse(np.iscomplexobj(terms["G"][0][1]))
np.testing.assert_array_almost_equal(terms["G"][0][1],
np.array([[0], [-2], [2]]))
terms = sim.parse_weak_formulation(
sim.WeakFormulation(self.input_term3, name="test"),
finalize=False).get_static_terms()
self.assertFalse(np.iscomplexobj(terms["G"][0][1]))
np.testing.assert_array_almost_equal(terms["G"][0][1],
np.array([[.25], [.5], [.25]]))
terms = sim.parse_weak_formulation(
sim.WeakFormulation(self.input_term3_swapped, name="test"),
finalize=False).get_static_terms()
self.assertFalse(np.iscomplexobj(terms["G"][0][1]))
np.testing.assert_array_almost_equal(terms["G"][0][1],
np.array([[.25], [.5], [.25]]))
terms = sim.parse_weak_formulation(
sim.WeakFormulation(self.input_term3_scaled, name="test"),
finalize=False).get_static_terms()
self.assertFalse(np.iscomplexobj(terms["G"][0][1]))
np.testing.assert_array_almost_equal(terms["G"][0][1],
np.array([[.0], [.25], [.25]]))
terms_fh = sim.parse_weak_formulation(
sim.WeakFormulation(self.input_term3_scaled_first_half, name="test"),
finalize=False).get_static_terms()
self.assertFalse(np.iscomplexobj(terms["G"][0][1]))
np.testing.assert_array_almost_equal(terms_fh["G"][0][1],
np.array([[.0], [.0], [.0]]))
terms_sh = sim.parse_weak_formulation(
sim.WeakFormulation(self.input_term3_scaled_second_half, name="test"),
finalize=False).get_static_terms()
self.assertFalse(np.iscomplexobj(terms["G"][0][1]))
np.testing.assert_array_almost_equal(terms_sh["G"][0][1],
np.array([[.0], [.25], [.25]]))
# vectorial inputs
terms = sim.parse_weak_formulation(sim.WeakFormulation(
[self.input_term_vectorial1, self.input_term_vectorial2],
name="test"),
finalize=False).get_static_terms()
self.assertFalse(np.iscomplexobj(terms["G"][0][1]))
np.testing.assert_array_almost_equal(terms["G"][0][1],
np.array([[1, 0],
[0, 0],
[0, 1]]))
# time derivatives of inputs
terms = sim.parse_weak_formulation(sim.WeakFormulation(
self.input_term_dt,
name="test"),
finalize=False).get_static_terms()
self.assertFalse(np.iscomplexobj(terms["G"][1][1]))
np.testing.assert_array_almost_equal(terms["G"][1][1],
np.array([[.25], [.5], [.25]]))
# time derivative of vectorial inputs
terms = sim.parse_weak_formulation(sim.WeakFormulation(
[self.input_term_vectorial_dt1, self.input_term_vectorial_dt2],
name="test"),
finalize=False).get_static_terms()
self.assertFalse(np.iscomplexobj(terms["G"][1][1]))
np.testing.assert_array_almost_equal(terms["G"][1][1],
np.array([[1, 0],
[0, 0],
[0, 1]]))
def test_TestFunction_term(self):
terms = sim.parse_weak_formulation(
sim.WeakFormulation(self.func_term, name="test"),
finalize=False).get_static_terms()
self.assertFalse(np.iscomplexobj(terms["f"]))
np.testing.assert_array_almost_equal(terms["f"],
np.array([[0], [0], [1]]))
terms = sim.parse_weak_formulation(
sim.WeakFormulation(self.func_term_int, name="test"),
finalize=False).get_static_terms()
self.assertFalse( | np.iscomplexobj(terms["f"]) | numpy.iscomplexobj |
"""
Wrapper for the MKL FFT routines. This implements very fast FFT on Intel
processors, much faster than the stock fftpack routines in numpy/scipy.
"""
from __future__ import division, print_function
import numpy as np
import ctypes as _ctypes
import os
from dftidefs import *
def load_libmkl():
r"""Loads the MKL library if it can be found in the library load path.
Raises
------
ValueError
If the MKL library cannot be found.
"""
if os.name == 'posix':
try:
lib_mkl = os.getenv('LIBMKL')
if lib_mkl is None:
raise ValueError('LIBMKL environment variable not found')
return _ctypes.cdll.LoadLibrary(lib_mkl)
except:
pass
try:
return _ctypes.cdll.LoadLibrary("libmkl_rt.dylib")
except:
raise ValueError('MKL Library not found')
else:
try:
return _ctypes.cdll.LoadLibrary("mkl_rt.dll")
except:
raise ValueError('MKL Library not found')
mkl = load_libmkl()
def mkl_rfft(a, n=None, axis=-1, norm=None, direction='forward', out=None, scrambled=False):
r"""Forward/backward 1D double-precision real-complex FFT.
Uses the Intel MKL libraries distributed with Anaconda Python.
Normalisation is different from Numpy!
By default, allocates new memory like 'a' for output data.
Returns the array containing output data.
See Also
--------
rfft, irfft
"""
if axis == -1:
axis = a.ndim-1
# This code only works for 1D and 2D arrays
assert a.ndim < 3
assert (axis < a.ndim and axis >= -1)
assert (direction == 'forward' or direction == 'backward')
# Convert input to complex data type if real (also memory copy)
if direction == 'forward' and a.dtype != np.float32 and a.dtype != np.float64:
if a.dtype == np.int64 or a.dtype == np.uint64:
a = np.array(a, dtype=np.float64)
else:
a = np.array(a, dtype=np.float32)
elif direction == 'backward' and a.dtype != np.complex128 and a.dtype != np.complex64:
if a.dtype == np.int64 or a.dtype == np.uint64 or a.dtype == np.float64:
a = np.array(a, dtype=np.complex128)
else:
a = np.array(a, dtype=np.complex64)
order = 'C'
if a.flags['F_CONTIGUOUS'] and not a.flags['C_CONTIGUOUS']:
order = 'F'
# Add zero padding or truncate if needed (incurs memory copy)
if n is not None:
m = n if direction == 'forward' else (n // 2 + 1)
if a.shape[axis] < m:
# pad axis with zeros
pad_width = np.zeros((a.ndim, 2), dtype=np.int)
pad_width[axis,1] = m - a.shape[axis]
a = np.pad(a, pad_width, mode='constant')
elif a.shape[axis] > m:
# truncate along axis
b = np.swapaxes(a, axis, 0)[:m,]
a = np.swapaxes(b, 0, axis).copy()
elif direction == 'forward':
n = a.shape[axis]
elif direction == 'backward':
n = 2*(a.shape[axis]-1)
# determine output type
if direction == 'backward':
out_type = np.float64
if a.dtype == np.complex64:
out_type = np.float32
elif direction == 'forward':
out_type = np.complex128
if a.dtype == np.float32:
out_type = np.complex64
# Configure output array
assert a is not out
if out is not None:
assert out.dtype == out_type
for i in range(a.ndim):
if i != axis:
assert a.shape[i] == out.shape[i]
if direction == 'forward':
assert (n // 2 + 1) == out.shape[axis]
else:
assert out.shape[axis] == n
assert not np.may_share_memory(a, out)
else:
size = list(a.shape)
size[axis] = n // 2 + 1 if direction == 'forward' else n
out = np.empty(size, dtype=out_type, order=order)
# Define length, number of transforms strides
length = _ctypes.c_int(n)
n_transforms = _ctypes.c_int(np.prod(a.shape) // a.shape[axis])
# For strides, the C type used *must* be long
strides = (_ctypes.c_long*2)(0, a.strides[axis] // a.itemsize)
if a.ndim == 2:
if axis == 0:
distance = _ctypes.c_int(a.strides[1] // a.itemsize)
out_distance = _ctypes.c_int(out.strides[1] // out.itemsize)
else:
distance = _ctypes.c_int(a.strides[0] // a.itemsize)
out_distance = _ctypes.c_int(out.strides[0] // out.itemsize)
double_precision = True
if (direction == 'forward' and a.dtype == np.float32) or (direction == 'backward' and a.dtype == np.complex64):
double_precision = False
# Create the description handle
Desc_Handle = _ctypes.c_void_p(0)
if not double_precision:
mkl.DftiCreateDescriptor(_ctypes.byref(Desc_Handle), DFTI_SINGLE, DFTI_REAL, _ctypes.c_int(1), length)
else:
mkl.DftiCreateDescriptor(_ctypes.byref(Desc_Handle), DFTI_DOUBLE, DFTI_REAL, _ctypes.c_int(1), length)
# set the storage type
mkl.DftiSetValue(Desc_Handle, DFTI_CONJUGATE_EVEN_STORAGE, DFTI_COMPLEX_COMPLEX)
# set normalization factor
if norm == 'ortho':
scale = _ctypes.c_double(1 / np.sqrt(n))
mkl.DftiSetValue(Desc_Handle, DFTI_FORWARD_SCALE, scale)
mkl.DftiSetValue(Desc_Handle, DFTI_BACKWARD_SCALE, scale)
elif norm is None:
scale = _ctypes.c_double(1. / n)
s = mkl.DftiSetValue(Desc_Handle, DFTI_BACKWARD_SCALE, scale)
# set all values if necessary
if a.ndim != 1:
mkl.DftiSetValue(Desc_Handle, DFTI_NUMBER_OF_TRANSFORMS, n_transforms)
mkl.DftiSetValue(Desc_Handle, DFTI_INPUT_DISTANCE, distance)
mkl.DftiSetValue(Desc_Handle, DFTI_OUTPUT_DISTANCE, out_distance)
mkl.DftiSetValue(Desc_Handle, DFTI_INPUT_STRIDES, _ctypes.byref(strides))
mkl.DftiSetValue(Desc_Handle, DFTI_OUTPUT_STRIDES, _ctypes.byref(strides))
if scrambled:
s = mkl.DftiSetValue(Desc_Handle, DFTI_ORDERING, DFTI_BACKWARD_SCRAMBLED)
if direction == 'forward':
fft_func = mkl.DftiComputeForward
elif direction == 'backward':
fft_func = mkl.DftiComputeBackward
else:
assert False
# Not-in-place FFT
mkl.DftiSetValue(Desc_Handle, DFTI_PLACEMENT, DFTI_NOT_INPLACE)
mkl.DftiCommitDescriptor(Desc_Handle)
fft_func(Desc_Handle, a.ctypes.data_as(_ctypes.c_void_p), out.ctypes.data_as(_ctypes.c_void_p) )
mkl.DftiFreeDescriptor(_ctypes.byref(Desc_Handle))
return out
def mkl_fft(a, n=None, axis=-1, norm=None, direction='forward', out=None, scrambled=False):
r"""Forward/backward 1D single- or double-precision FFT.
Uses the Intel MKL libraries distributed with Anaconda Python.
Normalisation is different from Numpy!
By default, allocates new memory like 'a' for output data.
Returns the array containing output data.
See Also
--------
fft, ifft
"""
# This code only works for 1D and 2D arrays
assert a.ndim < 3
assert axis < a.ndim and axis >= -1
# Add zero padding if needed (incurs memory copy)
'''
if n is not None and n != a.shape[axis]:
pad_width = np.zeros((a.ndim, 2), dtype=np.int)
pad_width[axis,1] = n - a.shape[axis]
a = np.pad(a, pad_width, mode='constant')
'''
if n is not None:
if a.shape[axis] < n:
# pad axis with zeros
pad_width = np.zeros((a.ndim, 2), dtype=np.int)
pad_width[axis,1] = n - a.shape[axis]
a = np.pad(a, pad_width, mode='constant')
elif a.shape[axis] > n:
# truncate along axis
b = np.swapaxes(a, axis, -1)[...,:n]
a = np.swapaxes(b, -1, axis).copy()
# Convert input to complex data type if real (also memory copy)
if a.dtype != np.complex128 and a.dtype != np.complex64:
if a.dtype == np.int64 or a.dtype == np.uint64 or a.dtype == np.float64:
a = np.array(a, dtype=np.complex128)
else:
a = np.array(a, dtype=np.complex64)
# Configure in-place vs out-of-place
inplace = False
if out is a:
inplace = True
elif out is not None:
assert out.dtype == a.dtype
assert a.shape == out.shape
assert not np.may_share_memory(a, out)
else:
out = np.empty_like(a)
# Define length, number of transforms strides
length = _ctypes.c_int(a.shape[axis])
n_transforms = _ctypes.c_int(np.prod(a.shape) // a.shape[axis])
# For strides, the C type used *must* be long
strides = (_ctypes.c_long*2)(0, a.strides[axis] // a.itemsize)
if a.ndim == 2:
if axis == 0:
distance = _ctypes.c_int(a.strides[1] // a.itemsize)
else:
distance = _ctypes.c_int(a.strides[0] // a.itemsize)
# Create the description handle
Desc_Handle = _ctypes.c_void_p(0)
if a.dtype == np.complex64:
mkl.DftiCreateDescriptor(_ctypes.byref(Desc_Handle), DFTI_SINGLE, DFTI_COMPLEX, _ctypes.c_int(1), length)
elif a.dtype == np.complex128:
mkl.DftiCreateDescriptor(_ctypes.byref(Desc_Handle), DFTI_DOUBLE, DFTI_COMPLEX, _ctypes.c_int(1), length)
# Set normalization factor
if norm == 'ortho':
if a.dtype == np.complex64:
scale = _ctypes.c_float(1 / np.sqrt(a.shape[axis]))
else:
scale = _ctypes.c_double(1 / np.sqrt(a.shape[axis]))
mkl.DftiSetValue(Desc_Handle, DFTI_FORWARD_SCALE, scale)
mkl.DftiSetValue(Desc_Handle, DFTI_BACKWARD_SCALE, scale)
elif norm is None:
if a.dtype == np.complex64:
scale = _ctypes.c_float(1. / a.shape[axis])
else:
scale = _ctypes.c_double(1. / a.shape[axis])
mkl.DftiSetValue(Desc_Handle, DFTI_BACKWARD_SCALE, scale)
# set all values if necessary
if a.ndim != 1:
mkl.DftiSetValue(Desc_Handle, DFTI_NUMBER_OF_TRANSFORMS, n_transforms)
mkl.DftiSetValue(Desc_Handle, DFTI_INPUT_DISTANCE, distance)
mkl.DftiSetValue(Desc_Handle, DFTI_OUTPUT_DISTANCE, distance)
mkl.DftiSetValue(Desc_Handle, DFTI_INPUT_STRIDES, _ctypes.byref(strides))
mkl.DftiSetValue(Desc_Handle, DFTI_OUTPUT_STRIDES, _ctypes.byref(strides))
if scrambled:
s = mkl.DftiSetValue(Desc_Handle, DFTI_ORDERING, DFTI_BACKWARD_SCRAMBLED)
DftiErrorMessage(s)
if direction == 'forward':
fft_func = mkl.DftiComputeForward
elif direction == 'backward':
fft_func = mkl.DftiComputeBackward
else:
assert False
if inplace:
# In-place FFT
mkl.DftiCommitDescriptor(Desc_Handle)
fft_func(Desc_Handle, a.ctypes.data_as(_ctypes.c_void_p) )
else:
# Not-in-place FFT
mkl.DftiSetValue(Desc_Handle, DFTI_PLACEMENT, DFTI_NOT_INPLACE)
mkl.DftiCommitDescriptor(Desc_Handle)
fft_func(Desc_Handle, a.ctypes.data_as(_ctypes.c_void_p), out.ctypes.data_as(_ctypes.c_void_p) )
mkl.DftiFreeDescriptor(_ctypes.byref(Desc_Handle))
return out
def proper_fft2(a, norm=None, direction='forward', mkl_dir=None, fft_nthreads=0):
r"""Forward/backward 2D single- or double-precision FFT.
Uses the Intel MKL libraries distributed with Enthought Python.
Normalisation is different from Numpy!
By default, allocates new memory like 'a' for output data.
Returns the array containing output data.
See Also
--------
fft2, ifft2
"""
# input must be complex! Not exceptions
if a.dtype != np.complex128 and a.dtype != np.complex64:
raise ValueError('prop_fftw: Unsupported data type. Must be complex64 or complex128.')
# Create the description handle
Desc_Handle = _ctypes.c_void_p(0)
dims = (_ctypes.c_int64*2)(*a.shape)
if a.dtype == np.complex64:
mkl.DftiCreateDescriptor(_ctypes.byref(Desc_Handle), DFTI_SINGLE, DFTI_COMPLEX, _ctypes.c_int(2), dims)
elif a.dtype == np.complex128:
mkl.DftiCreateDescriptor(_ctypes.byref(Desc_Handle), DFTI_DOUBLE, DFTI_COMPLEX, _ctypes.c_int(2), dims)
# Set normalization factor
if norm == 'ortho':
if a.dtype == np.complex64:
scale = _ctypes.c_float(1.0 / np.sqrt(np.prod(a.shape)))
else:
scale = _ctypes.c_double(1.0 / np.sqrt( | np.prod(a.shape) | numpy.prod |
#!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: <NAME> <<EMAIL>>
#
import ctypes
import numpy
import h5py
from pyscf import lib
libao2mo = lib.load_library('libao2mo')
class load(object):
'''load 2e integrals from hdf5 file
Usage:
with load(erifile) as eri:
print(eri.shape)
'''
def __init__(self, eri, dataname='eri_mo'):
self.eri = eri
self.dataname = dataname
self.feri = None
def __enter__(self):
if isinstance(self.eri, str):
feri = self.feri = h5py.File(self.eri, 'r')
elif isinstance(self.eri, h5py.Group):
feri = self.eri
elif isinstance(getattr(self.eri, 'name', None), str):
feri = self.feri = h5py.File(self.eri.name, 'r')
elif isinstance(self.eri, numpy.ndarray):
return self.eri
else:
raise RuntimeError('Unknown eri type %s', type(self.eri))
if self.dataname is None:
return feri
else:
return feri[self.dataname]
def __exit__(self, type, value, traceback):
if self.feri is not None:
self.feri.close()
def restore(symmetry, eri, norb, tao=None):
r'''Convert the 2e integrals (in Chemist's notation) between different
level of permutation symmetry (8-fold, 4-fold, or no symmetry)
Args:
symmetry : int or str
code to present the target symmetry of 2e integrals
| 's8' or '8' or 8 : 8-fold symmetry
| 's4' or '4' or 4 : 4-fold symmetry
| 's1' or '1' or 1 : no symmetry
| 's2ij' or '2ij' : symmetric ij pair for (ij|kl) (TODO)
| 's2ij' or '2kl' : symmetric kl pair for (ij|kl) (TODO)
Note the 4-fold symmetry requires (ij|kl) == (ij|lk) == (ij|lk)
while (ij|kl) != (kl|ij) is not required.
eri : ndarray
The symmetry of eri is determined by the size of eri and norb
norb : int
The symmetry of eri is determined by the size of eri and norb
Returns:
ndarray. The shape depends on the target symmetry.
| 8 : (norb*(norb+1)/2)*(norb*(norb+1)/2+1)/2
| 4 : (norb*(norb+1)/2, norb*(norb+1)/2)
| 1 : (norb, norb, norb, norb)
Examples:
>>> from pyscf import gto
>>> from pyscf.scf import _vhf
>>> from pyscf import ao2mo
>>> mol = gto.M(atom='O 0 0 0; H 0 1 0; H 0 0 1', basis='sto3g')
>>> eri = mol.intor('int2e')
>>> eri1 = ao2mo.restore(1, eri, mol.nao_nr())
>>> eri4 = ao2mo.restore(4, eri, mol.nao_nr())
>>> eri8 = ao2mo.restore(8, eri, mol.nao_nr())
>>> print(eri1.shape)
(7, 7, 7, 7)
>>> print(eri1.shape)
(28, 28)
>>> print(eri1.shape)
(406,)
'''
targetsym = _stand_sym_code(symmetry)
if targetsym not in ('8', '4', '1', '2kl', '2ij'):
raise ValueError('symmetry = %s' % symmetry)
if eri.dtype != numpy.double:
raise RuntimeError('Complex integrals not supported')
eri = numpy.asarray(eri, order='C')
npair = norb*(norb+1)//2
if eri.size == norb**4: # s1
if targetsym == '1':
return eri.reshape(norb,norb,norb,norb)
elif targetsym == '2kl':
eri = lib.pack_tril(eri.reshape(norb**2,norb,norb))
return eri.reshape(norb,norb,npair)
elif targetsym == '2ij':
eri = lib.pack_tril(eri.reshape(norb,norb,norb**2), axis=0)
return eri.reshape(npair,norb,norb)
else:
return _convert('1', targetsym, eri, norb)
elif eri.size == npair**2: # s4
if targetsym == '4':
return eri.reshape(npair,npair)
elif targetsym == '8':
return lib.pack_tril(eri.reshape(npair,npair))
elif targetsym == '2kl':
return lib.unpack_tril(eri, lib.SYMMETRIC, axis=0)
elif targetsym == '2ij':
return lib.unpack_tril(eri, lib.SYMMETRIC, axis=-1)
else:
return _convert('4', targetsym, eri, norb)
elif eri.size == npair*(npair+1)//2: # 8-fold
if targetsym == '8':
return eri.ravel()
elif targetsym == '4':
return lib.unpack_tril(eri.ravel(), lib.SYMMETRIC)
elif targetsym == '2kl':
return lib.unpack_tril(lib.unpack_tril(eri.ravel()), lib.SYMMETRIC, axis=0)
elif targetsym == '2ij':
return lib.unpack_tril(lib.unpack_tril(eri.ravel()), lib.SYMMETRIC, axis=-1)
else:
return _convert('8', targetsym, eri, norb)
elif eri.size == npair*norb**2 and eri.shape[0] == npair: # s2ij
if targetsym == '2ij':
return eri.reshape(npair,norb,norb)
elif targetsym == '8':
eri = lib.pack_tril(eri.reshape(npair,norb,norb))
return lib.pack_tril(eri)
elif targetsym == '4':
return lib.pack_tril(eri.reshape(npair,norb,norb))
elif targetsym == '1':
eri = lib.unpack_tril(eri.reshape(npair,norb**2), lib.SYMMETRIC, axis=0)
return eri.reshape(norb,norb,norb,norb)
elif targetsym == '2kl':
tril2sq = lib.square_mat_in_trilu_indices(norb)
trilidx = | numpy.tril_indices(norb) | numpy.tril_indices |
import numpy as np
import main.utils as utils
import matplotlib.pyplot as plt
from scipy.io import loadmat
import cv2
import os
from random import sample
import pickle
# ---------------------------- 说明 ----------------------------------
# 可视化图像检索效果图
# 需要 1. database和query数据集路径 2. xxx.npz文件(其中保存了相似度矩阵)
# 3. SMCNTF生成的最优路径文件 pathid.pkl
# 4. SeqSLAM生成的相似度矩阵 xxx.mat
# 每行打(*)号的需要根据实际情况配置
# 在论文中,对比了pairwise、MCN、SeqSLAM、SMCN和SMCNTF一共5种方法
# ---------------------------- 说明 ----------------------------------
# dataset root dir
root = 'E:/project/scut/graduation/datasets/gardens_point/' #(*)
# load xxx.npz
S_file = './vis3/gp_dlnr.npz' #(*)
S = np.load(S_file)
S_pw = S['S_pw']
S_mcn = S['S_mcn']
# use SeqSLAM2.0 toolbox from matlab, the similarity matrix has much nan value
S_seq = loadmat('../cp3_4/seqslam/gp_dlnr.mat')['S'] #(*)
tmp = np.isnan(S_seq)
S_seq[tmp] = np.max(S_seq[~tmp])
S_smcn = S['S_smcn']
S_smcntf = S['S_smcntf']
with open('../cp3_4/s_dd.pkl', 'rb') as f:
S_dd = pickle.load(f)
dbfile = 'day_left/' #(*) database file
qfile = 'night_right/' #(*) query file
dbn = len(os.listdir(root + dbfile))
qn = len(os.listdir(root + qfile))
qn = min(dbn, qn)
numPicToShow = 10 #(*)
if root.endswith('scut/'):
qn = qn // 4
picid = sample(range(0, qn), numPicToShow)
numpic = len(picid)
img_format = 'Image%03d.jpg' #(*)
err = 3 #(*)
saveName = './vis2/gp.png' #(*)
if root.endswith('robotcar/'):
db_gps = np.load(root + 'gps_snow.npy')
q_gps = np.load(root + 'gps_night.npy')
_, gt = utils.getGpsGT(db_gps, q_gps, err)
else:
gt = utils.getGroundTruthMatrix(qn, err)
gtl = []
for each in picid:
gtl.append(list(np.where(gt[:, each])[0]))
id_pw = np.argmax(S_pw[:, picid], axis=0)
id_dd = np.argmax(S_dd[:, picid], axis=0)
id_mcn = np.argmax(S_mcn[:, picid], axis=0)
id_smcn = np.argmax(S_smcn[:, picid], axis=0)
with open('./vis3/pathid.pkl', 'rb') as f: #(*)
id_smcntf = pickle.load(f)['gp']
id_smcntf = [id_smcntf[x] for x in picid]
id_seq = np.argmin(S_seq[:, picid], axis=0)
real_id_pw = np.copy(id_pw)
real_id_mcn = np.copy(id_mcn)
real_id_dd = np.copy(id_dd)
real_id_smcn = np.copy(id_smcn)
real_id_smcntf = np.copy(id_smcntf)
real_id_seq = np.copy(id_seq)
if root.endswith('scut/'):
real_id_pw *= 4
real_id_mcn *= 4
real_id_smcn *= 4
real_id_smcntf *= 4
real_id_seq *= 4
real_id_dd *= 4
numMethods = 6 #(*) how many methords to show
# 一下代码一般不需要配置
# --------------------------draw--------------------------------
pad = 7
img_size = 120
visImg = 255 * np.ones(((numMethods + 1)*(img_size+2*pad), numpic*(img_size + 2*pad), 3), dtype=np.uint8)
vboard = 0
hboard = 0
for i in range(numpic):
img = cv2.imread(root + dbfile + img_format % picid[i])
img = cv2.resize(img, (img_size, img_size))
visImg[vboard+pad:vboard+pad+img_size,
hboard+pad:hboard+pad+img_size :] = img
hboard += (2*pad+img_size)
vboard += (2*pad + img_size)
hboard = 0
for i in range(numpic):
img = cv2.imread(root + qfile + img_format % real_id_pw[i])
img = cv2.resize(img, (img_size, img_size))
if id_pw[i] in gtl[i]:
visImg[vboard:vboard + 2*pad + img_size,
hboard:hboard + 2*pad+img_size, :] = np.array([[[0, 255, 0]]])
else:
visImg[vboard:vboard + 2 * pad + img_size,
hboard:hboard + 2 * pad + img_size, :] = np.array([[[0, 0, 255]]])
visImg[vboard + pad:vboard + pad + img_size,
hboard + pad:hboard + pad + img_size:] = img
hboard += (2*pad+img_size)
vboard += (2*pad + img_size)
hboard = 0
for i in range(numpic):
img = cv2.imread(root + qfile + img_format % real_id_seq[i])
img = cv2.resize(img, (img_size, img_size))
if id_seq[i] in gtl[i]:
visImg[vboard:vboard + 2 * pad + img_size,
hboard:hboard + 2 * pad + img_size, :] = | np.array([[[0, 255, 0]]]) | numpy.array |
# %%
import os
import torch
from torch.utils.data import Dataset, DataLoader
import numpy as np
from collections import defaultdict
import json
import random
import time
import pickle
"""
CMU-MOSEI info
Train 16326 samples
Val 1871 samples
Test 4659 samples
CMU-MOSEI feature shapes
visual: (60, 35)
audio: (60, 74)
text: GLOVE->(60, 300)
label: (6) -> [happy, sad, anger, surprise, disgust, fear]
averaged from 3 annotators
unaligned:
text: (50, 300)
visual: (500, 35)
audio: (500, 74)
"""
emotion_dict = {4:0, 5:1, 6:2, 7:3, 8:4, 9:5}
class AlignedMoseiDataset(Dataset):
def __init__(self, data_path, data_type):
self.data_path = data_path
self.data_type = data_type
self.visual, self.audio, \
self.text, self.labels = self._get_data(self.data_type)
def _get_data(self, data_type):
data = torch.load(self.data_path)
data = data[data_type]
visual = data['src-visual']
audio = data['src-audio']
text = data['src-text']
labels = data['tgt']
return visual, audio, text, labels
def _get_text(self, index):
text = self.text[index]
text_mask = [1] * text.shape[0]
text_mask = np.array(text_mask)
return text, text_mask
def _get_visual(self, index):
visual = self.visual[index]
visual_mask = [1] * visual.shape[0]
visual_mask = np.array(visual_mask)
return visual, visual_mask
def _get_audio(self, index):
audio = self.audio[index]
audio[audio == -np.inf] = 0
audio_mask = [1] * audio.shape[0]
audio_mask = np.array(audio_mask)
return audio, audio_mask
def _get_labels(self, index):
label_list = self.labels[index]
label = np.zeros(6, dtype=np.float32)
filter_label = label_list[1:-1]
for emo in filter_label:
label[emotion_dict[emo]] = 1
return label
def _get_label_input(self):
labels_embedding = np.arange(6)
labels_mask = [1] * labels_embedding.shape[0]
labels_mask = np.array(labels_mask)
labels_embedding = torch.from_numpy(labels_embedding)
labels_mask = torch.from_numpy(labels_mask)
return labels_embedding, labels_mask
def __len__(self):
return len(self.labels)
def __getitem__(self, index):
text, text_mask = self._get_text(index)
visual, visual_mask = self._get_visual(index)
audio, audio_mask = self._get_audio(index)
label = self._get_labels(index)
return text, text_mask, visual, visual_mask, \
audio, audio_mask, label
class UnAlignedMoseiDataset(Dataset):
def __init__(self, data_path, data_type):
self.data_path = data_path
self.data_type = data_type
self.visual, self.audio, \
self.text, self.labels = self._get_data(self.data_type)
def _get_data(self, data_type):
label_data = torch.load(self.data_path)
label_data = label_data[data_type]
with open('/amax/cmy/mosei_senti_data_noalign.pkl', 'rb') as f:
data = pickle.load(f)
data = data[data_type]
visual = data['vision']
audio = data['audio']
text = data['text']
audio = np.array(audio)
labels = label_data['tgt']
return visual, audio, text, labels
def _get_text(self, index):
text = self.text[index]
text_mask = [1] * text.shape[0]
text_mask = np.array(text_mask)
return text, text_mask
def _get_visual(self, index):
visual = self.visual[index]
visual_mask = [1] * 50
visual_mask = np.array(visual_mask)
return visual, visual_mask
def _get_audio(self, index):
audio = self.audio[index]
audio[audio == -np.inf] = 0
audio_mask = [1] * 50
audio_mask = np.array(audio_mask)
return audio, audio_mask
def _get_labels(self, index):
label_list = self.labels[index]
label = np.zeros(6, dtype=np.float32)
filter_label = label_list[1:-1]
for emo in filter_label:
label[emotion_dict[emo]] = 1
return label
def _get_label_input(self):
labels_embedding = | np.arange(6) | numpy.arange |
# -*- coding:utf-8 -*-
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110- 1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
# ----------------------------------------------------------
# Author: <NAME> (s-leger)
#
# ----------------------------------------------------------
bl_info = {
'name': 'PolyLib',
'description': 'Polygons detection from unordered splines',
'author': 's-leger',
'license': 'GPL',
'deps': 'shapely',
'version': (1, 1),
'blender': (2, 7, 8),
'location': 'View3D > Tools > Polygons',
'warning': '',
'wiki_url': 'https://github.com/s-leger/blenderPolygons/wiki',
'tracker_url': 'https://github.com/s-leger/blenderPolygons/issues',
'link': 'https://github.com/s-leger/blenderPolygons',
'support': 'COMMUNITY',
'category': '3D View'
}
import sys
import time
import bpy
import bgl
import numpy as np
from math import cos, sin, pi, atan2
import bmesh
# let shapely import raise ImportError when missing
import shapely.ops
import shapely.prepared
from shapely.geometry import Point as ShapelyPoint
from shapely.geometry import Polygon as ShapelyPolygon
try:
import shapely.speedups
if shapely.speedups.available:
shapely.speedups.enable()
except:
pass
from .bitarray import BitArray
from .pyqtree import _QuadTree
from mathutils import Vector, Matrix
from mathutils.geometry import intersect_line_plane, interpolate_bezier
from bpy_extras import view3d_utils
from bpy.types import Operator, PropertyGroup
from bpy.props import StringProperty, FloatProperty, PointerProperty, EnumProperty, IntProperty, BoolProperty
from bpy.app.handlers import persistent
from .materialutils import MaterialUtils
from .archipack_gl import (
FeedbackPanel,
GlCursorFence,
GlCursorArea,
GlLine,
GlPolyline
)
# module globals vars dict
vars_dict = {
# spacial tree for segments and points
'seg_tree': None,
'point_tree': None,
# keep track of shapely geometry selection sets
'select_polygons': None,
'select_lines': None,
'select_points': None
}
# module constants
# precision 1e-4 = 0.1mm
EPSILON = 1.0e-4
# Qtree params
MAX_ITEMS = 10
MAX_DEPTH = 20
class CoordSys(object):
"""
reference coordsys
world : matrix from local to world
invert: matrix from world to local
width, height: bonding region size
"""
def __init__(self, objs):
x = []
y = []
if len(objs) > 0:
if hasattr(objs[0], 'bound_box'):
for obj in objs:
pos = obj.location
x.append(obj.bound_box[0][0] + pos.x)
x.append(obj.bound_box[6][0] + pos.x)
y.append(obj.bound_box[0][1] + pos.y)
y.append(obj.bound_box[6][1] + pos.y)
elif hasattr(objs[0], 'bounds'):
for geom in objs:
x0, y0, x1, y1 = geom.bounds
x.append(x0)
x.append(x1)
y.append(y0)
y.append(y1)
else:
raise Exception("CoordSys require at least one object with bounds or bound_box property to initialize")
else:
raise Exception("CoordSys require at least one object to initialize bounds")
x0 = min(x)
y0 = min(y)
x1 = max(x)
y1 = max(y)
width, height = x1 - x0, y1 - y0
midx, midy = x0 + width / 2.0, y0 + height / 2.0
# reference coordsys bounding box center
self.world = Matrix([
[1, 0, 0, midx],
[0, 1, 0, midy],
[0, 0, 1, 0],
[0, 0, 0, 1],
])
self.invert = self.world.inverted()
self.width = width
self.height = height
class Prolongement():
""" intersection of two segments outside segment (projection)
c0 = extremite sur le segment courant
c1 = intersection point on oposite segment
id = oposite segment id
t = param t on oposite segment
d = distance from ends to segment
insert = do we need to insert the point on other segment
use id, c1 and t to insert segment slices
"""
def __init__(self, c0, c1, id, t, d):
self.length = c0.distance(c1)
self.c0 = c0
self.c1 = c1
self.id = id
self.t = t
self.d = d
class Point():
def __init__(self, co, precision=EPSILON):
self.users = 0
self.co = tuple(co)
x, y, z = co
self.shapeIds = []
self.bounds = (x - precision, y - precision, x + precision, y + precision)
@property
def geom(self):
return ShapelyPoint(self.co)
def vect(self, point):
""" vector from this point to another """
return np.subtract(point.co, self.co)
def distance(self, point):
""" euclidian distance between points """
return np.linalg.norm(self.vect(point))
def add_user(self):
self.users += 1
class Segment():
def __init__(self, c0, c1, extend=EPSILON):
self.c0 = c0
self.c1 = c1
self._splits = []
self.available = True
# ensure uniqueness when merge
self.opposite = False
# this seg has an opposite
self.original = False
# source of opposite
x0, y0, z0 = c0.co
x1, y1, z1 = c1.co
self.bounds = (min(x0, x1) - extend, min(y0, y1) - extend, max(x0, x1) + extend, max(y0, y1) + extend)
@property
def splits(self):
return sorted(self._splits)
@property
def vect(self):
""" vector c0-c1"""
return np.subtract(self.c1.co, self.c0.co)
@property
def vect_2d(self):
v = self.vect
v[2] = 0
return v
def lerp(self, t):
return np.add(self.c0.co, np.multiply(t, self.vect))
def _point_sur_segment(self, point):
""" _point_sur_segment
point: Point
t: param t de l'intersection sur le segment courant
d: distance laterale perpendiculaire
"""
vect = self.vect
dp = point.vect(self.c0)
dl = np.linalg.norm(vect)
d = np.linalg.norm( | np.cross(vect, dp) | numpy.cross |
def read_e2ds(inpath,outname,config,nowave=False,molecfit=False,mode='HARPS',ignore_exp=[]):
"""This is the workhorse for reading in a time-series of archival 2D echelle
spectra and formatting these into the order-wise FITS format that Tayph uses.
The user should point this script to a folder (located at inpath) that contains
their pipeline-reduced echelle spectra. The script expects a certain data
format, depending on the instrument in question. It is designed to accept
pipeline products of the HARPS, HARPS-N, ESPRESSO and UVES instruments. In the
case of HARPS, HARPS-N and ESPRESSO these may be downloaded from the archive.
UVES is a bit special, because the 2D echelle spectra are not a standard
pipeline output. Typical use cases are explained further below.
This script formats the time series of 2D echelle spectra into 2D FITS images,
where each FITS file is the time-series of a single echelle order. If the
spectrograph has N orders, an order spans npx pixels, and M exposures
were taken during the time-series, there will be N FITS files, each measuring
M rows by npx columns. This script will read the headers of the pipeline
reduced data files to determine the date/time of each, the exposure time, the
barycentric correction (without applying it) and the airmass, and writes
these to an ASCII table along with the FITS files containing the spectral
orders.
A crucial functionality of this script is that it also acts as a wrapper
for the Molecfit telluric correction software. If installed properly, the
user can call this script with the molecfit keyword to let Molecfit loop
over the entire timeseries. To enable this functionality, the script
reads the full-width, 1D spectra that are output by the instrument pipelines
as well. Molecfit is applied to this time-series of 1D spectra, creating a
time-series of models of the telluric absorption spectrum that is saved along
with the 2D fits files. Tayph later interpolates these models onto the 2D
spectra. Molecfit is called once in GUI-mode, allowing the user to select the
relevant fitting regions and parameters, after which it is repeated
automatically for the entire time series.
Without Molecfit, this script finishes in a matter of seconds. However with
molecfit enabled, it can take many hours (so if I wish to telluric-correct
my data, I run this script overnight).
The processing of HARPS, HARPS-N and ESPRESSO data is executed in an almost
identical manner, because the pipeline-reduced products are almost identical.
To run on either of these instruments, the user simply downloads all pipeline
products of a given time-series, and extracts these in the same folder (meaning
ccfs, e2ds/s2d, s1d, blaze, wave files, etc.) This happens to be the standard
format when downloading pipeline-reduced data from the archive.
For UVES, the functionality is much more constricted because the pipeline
reduced data in the ESO archive is generally not of sufficient stability to
enable precise time-resolved spectroscopy. I designed this function therefore
to run on the pipeline-products produced by the Reflex (GUI) software. For this,
a user should download the raw UVES data of their time series, letting ESO's
calselector tool find the associated calibration files. This can easily be
many GBs worth of data for a given observing program. The user should then
reduce these data with the Reflex software. Reflex creates resampled, stitched
1D spectra as its primary output. However, we will elect to use the intermediate
pipeline products, which include the 2D extracted orders, located in Reflex's
working directory after the reduction process is completed.
A further complication of UVES data is that it can be used with different
dichroics and 'arms', leading to spectral coverage on the blue, redu and/or redl
chips. The user should take care that their time series contains only one of these
types at any time. If they are mixed, this script will throw an exception.
Set the nowave keyword to True if the dataset is HARPS or HARPSN, but it has
no wave files associated with it. This may happen if you downloaded ESO
Advanced Data Products, which include reduced science e2ds's but not reduced
wave e2ds's. The wavelength solution is still encoded in the fits header however,
so we take it from there, instead.
Set the ignore_exp keyword to a list of exposures (start counting at 0) that
need to be ignored when reading, e.g. because they are bad for some reason.
If you have set molecfit to True, this becomes an expensive parameter to
play with in terms of computing time, so its better to figure out which
exposures you'd wish to ignore first (by doing most of your analysis),
before actually running Molecfit, which is icing on the cake in many use-
cases in the optical.
The config parameter points to a configuration file (usually your generic
run definition file) that is only used to point the Molecfit wrapper to the
Molecfit installation on your system. If you are not using molecfit, you may
pass an empty string here.
"""
import os
import pdb
from astropy.io import fits
import astropy.constants as const
import numpy as np
import matplotlib.pyplot as plt
import sys
import tayph.util as ut
from tayph.vartests import typetest,dimtest
import tayph.tellurics as mol
import tayph.system_parameters as sp
import tayph.functions as fun
import copy
import scipy.interpolate as interp
import pickle
from pathlib import Path
import warnings
import glob
# molecfit = False
#First check the input:
inpath=ut.check_path(inpath,exists=True)
typetest(outname,str,'outname in read_HARPS_e2ds()')
typetest(nowave,bool,'nowave switch in read_HARPS_e2ds()')
typetest(molecfit,bool,'molecfit switch in read_HARPS_e2ds()')
typetest(ignore_exp,list,'ignore_exp in read_HARPS_e2ds()')
typetest(mode,str,'mode in read_HARPS_e2ds()')
if molecfit:
config = ut.check_path(config,exists=True)
if mode not in ['HARPS','HARPSN','HARPS-N','ESPRESSO','UVES-red','UVES-blue']:
raise ValueError("in read_HARPS_e2ds: mode needs to be set to HARPS, HARPSN, UVES-red, UVES-blue or ESPRESSO.")
filelist=os.listdir(inpath)#If mode == UVES, these are folders. Else, they are fits files.
N=len(filelist)
if len(filelist) == 0:
raise FileNotFoundError(f" in read_e2ds: input folder {str(inpath)} is empty.")
#The following variables define lists in which all the necessary data will be stored.
framename=[]
header=[]
s1dhdr=[]
type=[]
texp=np.array([])
date=[]
mjd=np.array([])
s1dmjd=np.array([])
npx=np.array([])
norders=np.array([])
e2ds=[]
s1d=[]
wave1d=[]
airmass=np.array([])
berv=np.array([])
wave=[]
blaze=[]
wavefile_used = []
outpath = Path('data/'+outname)
if os.path.exists(outpath) != True:
os.makedirs(outpath)
e2ds_count = 0
sci_count = 0
wave_count = 0
blaze_count = 0
s1d_count = 0
if mode=='HARPS-N': mode='HARPSN'
#MODE SWITCHING HERE:
if mode in ['HARPS','UVES-red','UVES-blue']:
catkeyword = 'HIERARCH ESO DPR CATG'
bervkeyword = 'HIERARCH ESO DRS BERV'
thfilekeyword = 'HIERARCH ESO DRS CAL TH FILE'
Zstartkeyword = 'HIERARCH ESO TEL AIRM START'
Zendkeyword = 'HIERARCH ESO TEL AIRM END'
if mode == 'HARPSN':
catkeyword = 'OBS-TYPE'
bervkeyword = 'HIERARCH TNG DRS BERV'
thfilekeyword = 'HIERARCH TNG DRS CAL TH FILE'
Zstartkeyword = 'AIRMASS'
Zendkeyword = 'AIRMASS'#These are the same because HARPSN doesnt have start and end keywords.
#Down there, the airmass is averaged, so there is no problem in taking the average of the same number.
#Here is the actual parsing of the list of files that were read above. The
#behaviour is different depending on whether this is HARPS, UVES or ESPRESSO
#data, so it switches with a big if-statement in which there is a forloop
#over the filelist in each case. The result is lists or np.arrays containing
#the 2D spectra, the 1D spectra, their 2D and 1D wavelength solutions, the
#headers, the MJDs, the BERVs and the airmasses, as well as (optionally) CCFs
#and blaze files, though these are not really used.
print(f'Read_e2ds is attempting to read a {mode} datafolder.')
if mode == 'UVES-red' or mode == 'UVES-blue':#IF we are UVES-like
for i in range(N):
print(filelist[i])
if (inpath/Path(filelist[i])).is_dir():
tmp_products = [i for i in (inpath/Path(filelist[i])).glob('resampled_science_*.fits')]
tmp_products1d = [i for i in (inpath/Path(filelist[i])).glob('red_science_*.fits')]
if mode == 'UVES-red' and len(tmp_products) != 2:
raise ValueError(f"in read_e2ds: When mode=UVES-red there should be 2 resampled_science files (redl and redu), but {len(tmp_products)} were detected.")
if mode == 'UVES-blue' and len(tmp_products) != 1:
raise ValueError(f"in read_e2ds: When mode=UVES-rblue there should be 1 resampled_science files (blue), but {len(tmp_products)} were detected.")
if mode == 'UVES-red' and len(tmp_products1d) != 2:
raise ValueError(f"in read_e2ds: When mode=UVES-red there should be 2 red_science files (redl and redu), but {len(tmp_products1d)} were detected.")
if mode == 'UVES-blue' and len(tmp_products1d) != 1:
raise ValueError(f"in read_e2ds: When mode=UVES-rblue there should be 1 red_science files (blue), but {len(tmp_products1d)} were detected.")
data_combined = []#This will store the two chips (redu and redl) in case of UVES_red, or simply the blue chip if otherwise.
wave_combined = []
wave1d_combined=[]
data1d_combined=[]
norders_tmp = 0
for tmp_product in tmp_products:
hdul = fits.open(tmp_product)
data = copy.deepcopy(hdul[0].data)
hdr = hdul[0].header
hdul.close()
del hdul[0].data
if not hdr['HIERARCH ESO PRO SCIENCE']:#Only add if it's actually a science product:#I force the user to supply only science exposures in the input folder. No BS allowed... UVES is hard enough as it is.
raise ValueError(f' in read_e2ds: UVES file {tmp_product} is not classified as a SCIENCE file, but should be. Remove it from the folder?')
wavedata=ut.read_wave_from_e2ds_header(hdr,mode='UVES')
data_combined.append(data)
wave_combined.append(wavedata)
norders_tmp+=np.shape(data)[0]
for tmp_product in tmp_products1d:
hdul = fits.open(tmp_product)
data_1d = copy.deepcopy(hdul[0].data)
hdr1d = hdul[0].header
hdul.close()
del hdul[0].data
if not hdr1d['HIERARCH ESO PRO SCIENCE']:#Only add if it's actually a science product:#I force the user to supply only science exposures in the input folder. No BS allowed... UVES is hard enough as it is.
raise ValueError(f' in read_e2ds: UVES file {tmp_product} is not classified as a SCIENCE file, but should be. Remove it from the folder?')
npx1d = hdr1d['NAXIS1']
wavedata = fun.findgen(npx1d)*hdr1d['CDELT1']+hdr1d['CRVAL1']
data1d_combined.append(data_1d)
wave1d_combined.append(wavedata)
if len(data_combined) < 1 or len(data_combined) > 2:#Double-checking that length here...
raise ValueError(f'in read_e2ds(): Expected 1 or 2 chips, but {len(data_combined)} files were somehow read.')
#The chips generally don't give the same size. Therefore I will pad the smaller one with NaNs to make it fit:
if len(data_combined) != len(data1d_combined):
raise ValueError(f'in read_e2ds(): The number of chips in the 1d and 2d spectra is not the same {len(data1d_combined)} vs {len(data_combined)}.')
if len(data_combined) == 2:
chip1 = data_combined[0]
chip2 = data_combined[1]
wave1 = wave_combined[0]
wave2 = wave_combined[1]
npx_1 = np.shape(chip1)[1]
npx_2 = np.shape(chip2)[1]
no_1 = | np.shape(chip1) | numpy.shape |
import argparse
import math
import sys
from pathlib import Path
from typing import Optional
import PIL
import numpy as np
import pickle
from sources.dataset_creation_utils.create_iid_dataset_utils import get_full_iid_dataset_filename
from sources.dataset_creation_utils.get_iid_dataset_utils import get_full_iid_dataset
from sources.utils.set_random_seeds import DEFAULT_SEED
from sources.global_data_properties import LEAF_DATASET_NAME_LIST
def get_dataset_dist(data_dir, dataset_identifier):
dataset_dir = data_dir / dataset_identifier
total = []
for subdir in dataset_dir.iterdir():
with (subdir / "train.pickle").open("rb") as f:
data = pickle.load(f)
total.append(len(data["x"]))
total = np.array(total)
return total
def create_iid_clients(data_dir: Path, dataset_identifier: str, num_clients: Optional[int] = None,
empirical=False):
dist = get_dataset_dist(data_dir, dataset_identifier)
rng = np.random.default_rng(DEFAULT_SEED)
dataset_sizes = None
if empirical:
dataset_sizes = dist
if num_clients is not None:
dataset_sizes = rng.choice(dist, size=num_clients, replace=True)
dataset_sizes = list(map(int, dataset_sizes))
else:
avg = math.ceil(np.average(dist))
if num_clients is None:
dataset_sizes = np.full_like(dist, avg, dtype=np.int32)
else:
dataset_sizes = | np.full(num_clients, avg, dtype=np.int32) | numpy.full |
#!/usr/bin/env python
"""
@package ion_functions.data.sfl_functions
@file ion_functions/data/sfl_functions.py
@author <NAME>, <NAME>
@brief Module containing Seafloor Properties related data-calculations.
"""
import numpy as np
import numexpr as ne
from scipy.interpolate import RectBivariateSpline
from ion_functions.data.generic_functions import replace_fill_with_nan
# used by def sfl_trhph_chloride
from ion_functions.data.sfl_functions_surface import tdat, sdat, cdat
# .............................................................................
# THSPH data products: THSPHHC, THSPHHS, THSPHPH (4 PH products) ..............
# .............................................................................
def sfl_thsph_ph(counts_ysz, counts_agcl, temperature, e2l_ysz, e2l_agcl,
arr_hgo, arr_agcl, arr_tac, arr_tbc1, arr_tbc2, arr_tbc3, chl):
"""
Description:
Calculates the THSPHPH-PH_L2 data product, one of the 4 THSPHPH data
products for the THSPH instruments. The PH data product algorithm
calculates pH assuming good chloride data is available from
TRHPH (TRHPHCC_L2) and a working AgCl reference electrode.
Implemented by:
2014-07-08: <NAME>. Initial Code.
2015-07-24: <NAME>iderio. Incorporated calculate_vent_pH function.
Usage:
pH = sfl_thsph_ph(counts_ysz, counts_agcl, temperature, e2l_ysz, e2l_agcl,
arr_hgo, arr_agcl, arr_tac, arr_tbc1, arr_tbc2, arr_tbc3, chl)
where
pH = vent fluid pH: THSPHPH-PH_L2) [unitless]
counts_ysz = raw data recorded by ysz electrode THSPHPH-YSZ_L0 [counts]
counts_agcl = raw data recorded by AgCl electrode THSPHPH-AGCL_L0 [counts]
temperature = temperature near sample inlet THSPHTE-TH_L1 [deg_C].
e2l_ysz = array of 5th degree polynomial coefficients to convert the
ysz electrode engineering values to lab calibrated values.
e2l_agcl = array of 5th degree polynomial coefficients to convert the
agcl electrode engineering values to lab calibrated values.
arr_hgo = array of 5th degree polynomial coefficients to calculate the
electrode material response to temperature.
arr_agcl = array of 5th degree polynomial coefficients to calculate the
AgCl electrode material response to temperature.
arr_tac = array containing the 5th degree polynomial coefficients to calculate tac (=tbc0).
arr_tbc1 = array containing the 5th degree polynomial coefficients to calculate tbc1.
arr_tbc2 = array containing the 5th degree polynomial coefficients to calculate tbc2.
arr_tbc3 = array containing the 5th degree polynomial coefficients to calculate tbc3.
chl = vent fluid chloride concentration from TRHPHCC_L2 [mmol kg-1].
References:
OOI (2014). Data Product Specification for Vent Fluid pH. Document Control
Number 1341-00190. https://alfresco.oceanobservatories.org/
(See: Company Home >> OOI>> Controlled >> 1000 System Level >>
1341-00190_Data_Product_Spec_THSPHPH_OOI.pdf)
"""
# calculate lab calibrated electrode response [V]
v_labcal_ysz = v_labcal(counts_ysz, e2l_ysz)
# AgCl reference electrode
v_labcal_agcl = v_labcal(counts_agcl, e2l_agcl)
# calculate chloride activity
act_chl = chloride_activity(temperature, arr_tac, arr_tbc1, arr_tbc2, arr_tbc3, chl)
pH = calculate_vent_pH(v_labcal_ysz, v_labcal_agcl, temperature, arr_hgo, arr_agcl, act_chl)
return pH
def sfl_thsph_ph_acl(counts_ysz, counts_agcl, temperature, e2l_ysz, e2l_agcl,
arr_hgo, arr_agcl, arr_tac, arr_tbc1, arr_tbc2, arr_tbc3):
"""
Description:
Calculates the THSPHPH-PH-ACL_L2 data product, one of the 4 THSPHPH
data products for the THSPH instruments. The PH-ACL data product
algorithm calculates pH assuming no good chloride data available from
TRHPH (TRHPHCC_L2) (assumes instead a pre-determined chloride concentration
which is set in the chloride_activity function). The data from the AgCl
reference electrode is also assumed to be good and used in this
calculation.
Implemented by:
2014-07-08: <NAME>. Initial Code.
2015-07-24: <NAME>. Incorporated calculate_vent_pH function.
Usage:
pH = sfl_thsph_ph_acl(counts_ysz, counts_agcl, temperature, e2l_ysz, e2l_agcl,
arr_hgo, arr_agcl, arr_tac, arr_tbc1, arr_tbc2, arr_tbc3)
where
pH = vent fluid pH: THSPHPH-PH-ACL_L2) [unitless]
counts_ysz = raw data recorded by ysz electrode THSPHPH-YSZ_L0 [counts]
counts_agcl = raw data recorded by AgCl electrode THSPHPH-AGCL_L0 [counts]
temperature = temperature near sample inlet THSPHTE-TH_L1 [deg_C].
e2l_ysz = array of 5th degree polynomial coefficients to convert the
ysz electrode engineering values to lab calibrated values.
e2l_agcl = array of 5th degree polynomial coefficients to convert the
agcl electrode engineering values to lab calibrated values.
arr_hgo = array of 5th degree polynomial coefficients to calculate the
electrode material response to temperature.
arr_agcl = array of 5th degree polynomial coefficients to calculate the
AgCl electrode material response to temperature.
arr_tac = array containing the 5th degree polynomial coefficients to calculate tac (=tbc0).
arr_tbc1 = array containing the 5th degree polynomial coefficients to calculate tbc1.
arr_tbc2 = array containing the 5th degree polynomial coefficients to calculate tbc2.
arr_tbc3 = array containing the 5th degree polynomial coefficients to calculate tbc3.
References:
OOI (2014). Data Product Specification for Vent Fluid pH. Document Control
Number 1341-00190. https://alfresco.oceanobservatories.org/
(See: Company Home >> OOI>> Controlled >> 1000 System Level >>
1341-00190_Data_Product_Spec_THSPHPH_OOI.pdf)
"""
# calculate lab calibrated electrode response [V]
v_labcal_ysz = v_labcal(counts_ysz, e2l_ysz)
# AgCl reference electrode
v_labcal_agcl = v_labcal(counts_agcl, e2l_agcl)
# chloride activity assuming the default value for chloride concentration
# set in the chloride_activity subroutine
act_chl = chloride_activity(temperature, arr_tac, arr_tbc1, arr_tbc2, arr_tbc3)
pH = calculate_vent_pH(v_labcal_ysz, v_labcal_agcl, temperature, arr_hgo, arr_agcl, act_chl)
return pH
def sfl_thsph_ph_noref(counts_ysz, temperature, arr_agclref, e2l_ysz, arr_hgo,
arr_agcl, arr_tac, arr_tbc1, arr_tbc2, arr_tbc3, chl):
"""
Description:
Calculates the THSPHPH-PH-NOREF_L2 data product, one of the 4 THSPHPH
data products for the THSPH instruments. The PH-NOREF data product
algorithm calculates pH assuming no good reference (AgCl) electrode data
(uses instead a theoretical value calculated from vent temperature) and
also uses (presumably good) chloride data from TRHPH (TRHPHCC_L2).
Implemented by:
2014-07-08: <NAME>. Initial Code.
2015-07-24: <NAME>. Incorporated calculate_vent_pH function.
Usage:
pH = sfl_thsph_ph_noref(counts_ysz, temperature, arr_agclref, e2l_ysz, arr_hgo,
arr_agcl, arr_tac, arr_tbc1, arr_tbc2, arr_tbc3, chl)
where
pH = vent fluid pH: THSPHPH-PH-NOREF_L2) [unitless]
counts_ysz = raw data recorded by ysz electrode THSPHPH-YSZ_L0 [counts]
temperature = temperature near sample inlet THSPHTE-TH_L1 [deg_C].
arr_agclref = array of 5th degree polynomial coefficients to calculate the
theoretical reference electrode potential, replacing measured
reference AgCl electrode potential values.
e2l_ysz = array of 5th degree polynomial coefficients to convert the
ysz electrode engineering values to lab calibrated values.
arr_hgo = array of 5th degree polynomial coefficients to calculate the
electrode material response to temperature.
arr_agcl = array of 5th degree polynomial coefficients to calculate the
AgCl electrode material response to temperature.
arr_tac = array containing the 5th degree polynomial coefficients to calculate tac (=tbc0).
arr_tbc1 = array containing the 5th degree polynomial coefficients to calculate tbc1.
arr_tbc2 = array containing the 5th degree polynomial coefficients to calculate tbc2.
arr_tbc3 = array containing the 5th degree polynomial coefficients to calculate tbc3.
chl = vent fluid chloride concentration from TRHPHCC_L2 [mmol kg-1].
References:
OOI (2014). Data Product Specification for Vent Fluid pH. Document Control
Number 1341-00190. https://alfresco.oceanobservatories.org/
(See: Company Home >> OOI>> Controlled >> 1000 System Level >>
1341-00190_Data_Product_Spec_THSPHPH_OOI.pdf)
"""
# calculate lab calibrated electrode response [V]
v_labcal_ysz = v_labcal(counts_ysz, e2l_ysz)
# theoretical reference value calculated from vent temperature
e_refcalc = eval_poly(temperature, arr_agclref)
# calculate chloride activity
act_chl = chloride_activity(temperature, arr_tac, arr_tbc1, arr_tbc2, arr_tbc3, chl)
pH = calculate_vent_pH(v_labcal_ysz, e_refcalc, temperature, arr_hgo, arr_agcl, act_chl)
return pH
def sfl_thsph_ph_noref_acl(counts_ysz, temperature, arr_agclref, e2l_ysz, arr_hgo,
arr_agcl, arr_tac, arr_tbc1, arr_tbc2, arr_tbc3):
"""
Description:
Calculates the THSPHPH-PH-NOREF-ACL_L2 data product, one of the 4 THSPHPH
data products for the THSPH instruments. The PH-NOREF-ACL data product
algorithm calculates pH assuming no good reference (AgCl) electrode data
(uses instead a theoretical value calculated from vent temperature) and
assuming no good chloride data from TRHPH (TRHPHCC_L2) (assumes instead a
pre-determined chloride concentration which is set in the chloride_activity
function).
Implemented by:
2014-07-08: <NAME>. Initial Code.
2015-07-24: <NAME>. Incorporated calculate_vent_pH function.
Usage:
pH = sfl_thsph_ph_noref_acl(counts_ysz, temperature, arr_agclref, e2l_ysz, arr_hgo,
arr_agcl, arr_tac, arr_tbc1, arr_tbc2, arr_tbc3)
where
pH = vent fluid pH: THSPHPH-PH-NOREF-ACL_L2) [unitless]
counts_ysz = raw data recorded by ysz electrode THSPHPH-YSZ_L0 [counts]
temperature = temperature near sample inlet THSPHTE-TH_L1 [deg_C].
arr_agclref = array of 5th degree polynomial coefficients to calculate the
theoretical reference electrode potential, replacing measured
reference AgCl electrode potential values.
e2l_ysz = array of 5th degree polynomial coefficients to convert the
ysz electrode engineering values to lab calibrated values.
arr_hgo = array of 5th degree polynomial coefficients to calculate the
electrode material response to temperature.
arr_agcl = array of 5th degree polynomial coefficients to calculate the
AgCl electrode material response to temperature.
arr_tac = array containing the 5th degree polynomial coefficients to calculate tac (=tbc0).
arr_tbc1 = array containing the 5th degree polynomial coefficients to calculate tbc1.
arr_tbc2 = array containing the 5th degree polynomial coefficients to calculate tbc2.
arr_tbc3 = array containing the 5th degree polynomial coefficients to calculate tbc3.
References:
OOI (2014). Data Product Specification for Vent Fluid pH. Document Control
Number 1341-00190. https://alfresco.oceanobservatories.org/
(See: Company Home >> OOI>> Controlled >> 1000 System Level >>
1341-00190_Data_Product_Spec_THSPHPH_OOI.pdf)
"""
# calculate lab calibrated electrode response [V]
v_labcal_ysz = v_labcal(counts_ysz, e2l_ysz)
# theoretical reference value calculated from vent temperature
e_refcalc = eval_poly(temperature, arr_agclref)
# chloride activity assuming the default value for chloride concentration
# set in the subroutine
act_chl = chloride_activity(temperature, arr_tac, arr_tbc1, arr_tbc2, arr_tbc3)
pH = calculate_vent_pH(v_labcal_ysz, e_refcalc, temperature, arr_hgo, arr_agcl, act_chl)
return pH
def calculate_vent_pH(e_ph, e_ref, temperature, arr_hgo, arr_agcl, act_chl):
"""
Description:
Worker function to calculate the vent fluid pH for the THSPH instruments. This
function is called by
sfl_thsph_ph
sfl_thsph_ph_acl
sfl_thsph_ph_noref
sfl_thsph_ph_noref_acl.
Implemented by:
2015-07-24: <NAME>. Initial Code.
Usage:
pH = calculate_vent_pH(e_ph, e_ref, temperature, arr_hgo, arr_agcl, act_chl)
where
pH = vent fluid pH
e_ph = intermediate pH potential uncorrected for reference
e_ref = reference pH potential, either measured or calculated
temperature = temperature near sample inlet THSPHTE-TH_L1 [deg_C].
arr_hgo = array of 5th degree polynomial coefficients to calculate the
electrode material response to temperature.
arr_agcl = array of 5th degree polynomial coefficients to calculate the
AgCl electrode material response to temperature.
act_chl = calculated chloride activity.
References:
OOI (2014). Data Product Specification for Vent Fluid pH. Document Control
Number 1341-00190. https://alfresco.oceanobservatories.org/
(See: Company Home >> OOI>> Controlled >> 1000 System Level >>
1341-00190_Data_Product_Spec_THSPHPH_OOI.pdf)
"""
# fill value local to this function to avoid python warnings when nans are encountered
# in boolean expressions. the masking will convert values derived from this local fill
# back to nans.
unphysical_pH_fill_value = -99999.0
# calculate intermediate quantities that depend upon temperature
e_nernst = nernst(temperature)
e_hgo = eval_poly(temperature, arr_hgo)
e_agcl = eval_poly(temperature, arr_agcl)
# calculate pH potential
e_phcalc = e_ph - e_ref
# check for unphysical values as specified in the DPS.
# logical indexing with boolean arrays is faster than integer indexing using np.where.
# ok to apply mask at end of calculation.
e_phcalc[np.isnan(e_phcalc)] = unphysical_pH_fill_value
bad_eph_mask = np.logical_or(np.less(e_phcalc, -0.7), np.greater(e_phcalc, 0.0))
# final data product calculation
act_chl[act_chl <= 0.0] = np.nan # trap out python warning
pH = (e_phcalc - e_agcl + e_hgo) / e_nernst + np.log10(act_chl)
# second check for unphysical values, as specified in the DPS
pH[np.isnan(pH)] = unphysical_pH_fill_value
bad_ph_mask = np.logical_or(np.less(pH, 3.0), np.greater(pH, 7.0))
# set all out-of-range values to fill values
pH[np.logical_or(bad_eph_mask, bad_ph_mask)] = np.nan
return pH
def sfl_thsph_sulfide(counts_hs, counts_ysz, temperature, e2l_hs, e2l_ysz, arr_hgo,
arr_logkfh2g, arr_eh2sg, arr_yh2sg):
"""
Description:
Calculates the THSPHHS_L2 data product (hydrogen sulfide concentration) for
the THSPH instruments from vent temperature and from data from its sulfide
and YSZ electrodes. Note that the chemical formula for hydrogen is H2, and
that for hydrogen sulfide is H2S; this could lead to confusion in the
variable and array names from the DPS if care is not taken. Note also that
this hydrogen sulfide DPA does use an intermediate data product and its
'calibration' coefficients (hydrogen fugacity) that are also used in the
hydrogen concentration DPA.
Implemented by:
2014-07-08: <NAME>. Initial Code.
Usage:
h2s = sfl_thsph_sulfide(counts_hs, counts_ysz, temperature, e2l_hs, e2l_ysz,
arr_hgo, arr_logkfh2g, arr_eh2sg, arr_yh2sg)
where
h2s = hydrogen sulfide concentration at the vent: THSPHHS_L2 [mmol kg-1]
counts_hs = raw data recorded by sulfide electrode THSPHHS_L0 [counts]
counts_ysz = raw data recorded by ysz electrode THSPHPH-YSZ_L0 [counts]
temperature = temperature near sample inlet THSPHTE-TH_L1 [deg_C].
e2l_hs = array of 5th degree polynomial coefficients to convert the
sulfide electrode engineering values to lab calibrated values.
e2l_ysz = array of 5th degree polynomial coefficients to convert the
ysz electrode engineering values to lab calibrated values.
arr_hgo = array of 5th degree polynomial coefficients to calculate the
electrode material response to temperature.
arr_logkfh2g = array of 5th degree polynomial coefficients to calculate the
equilibrium hydrogen fugacity as a function of temperature.
arr_eh2sg = array of 5th degree polynomial coefficients to calculate the
theoretical potential of gas phase hydrogen sulfide to temperature;
in the current DPS, highest degree term is first order, so pad
this array with entries of zero [0., 0., 0., 0., c1, c0].
arr_yh2sg = array of 5th degree polynomial coefficients to calculate the
fugacity/concentration quotient yh2sg from hydrogen fugacity.
References:
OOI (2014). Data Product Specification for Vent Fluid Hydrogen Sulfide
Concentration. Document Control Number 1341-00200.
https://alfresco.oceanobservatories.org/
(See: Company Home >> OOI>> Controlled >> 1000 System Level >>
1341-00200_Data_Product_Spec_THSPHHS_OOI.pdf)
"""
# calculate lab calibrated electrode responses [V]
v_labcal_hs = v_labcal(counts_hs, e2l_hs)
v_labcal_ysz = v_labcal(counts_ysz, e2l_ysz)
# calculate intermediate products that depend upon temperature
e_nernst = nernst(temperature)
e_hgo = eval_poly(temperature, arr_hgo)
e_h2sg = eval_poly(temperature, arr_eh2sg)
log_kfh2g = eval_poly(temperature, arr_logkfh2g)
# y_h2sg depends on temperature because hydrogen fugacity depends on temperature
y_h2sg = eval_poly(log_kfh2g, arr_yh2sg)
# explicitly follow the DPS calculation for clarity:
# measured potential of the sulfide electrode [V]
e_h2s = v_labcal_ysz - v_labcal_hs
# (common) log of measured hydrogen sulfide fugacity
log_fh2sg = 2.0 * (e_h2s - e_hgo + e_h2sg) / e_nernst
# final data product, hydrogen sulfide concentration, [mmol/kg]
# in the DPS, this is 1000 * 10^( logfh2sg - log( yh2sg ) )
h2s = 1000.0 * (10.0 ** (log_fh2sg)) / y_h2sg
return h2s
def sfl_thsph_hydrogen(counts_h2, counts_ysz, temperature, e2l_h2, e2l_ysz, arr_hgo,
arr_logkfh2g):
"""
Description:
Calculates the THSPHHC_L2 data product (hydrogen concentration) for the THSPH
instruments from vent temperature and from data from its hydrogen and YSZ
electrodes.
Implemented by:
2014-07-08: <NAME>. Initial Code.
Usage:
h2 = sfl_thsph_hydrogen(counts_h2, counts_ysz, temperature, e2l_h2, e2l_ysz, arr_hgo,
arr_logkfh2g)
where
h2 = hydrogen concentration at the vent: THSPHHC_L2 [mmol kg-1]
counts_h2 = raw data recorded by hydrogen electrode THSPHHC_L0 [counts]
counts_ysz = raw data recorded by ysz electrode THSPHPH-YSZ_L0 [counts]
temperature = temperature near sample inlet THSPHTE-TH_L1 [deg_C].
e2l_h2 = array of 5th degree polynomial coefficients to convert the
hydrogen electrode engineering values to lab calibrated values.
e2l_ysz = array of 5th degree polynomial coefficients to convert the
ysz electrode engineering values to lab calibrated values.
arr_hgo = array of 5th degree polynomial coefficients to calculate the
electrode material response to temperature.
arr_logkfh2g = array of 5th degree polynomial coefficients to calculate the
equilibrium hydrogen fugacity as a function of temperature.
OOI (2014). Data Product Specification for Vent Fluid Hydrogen Concentration.
Document Control Number 1341-00210. https://alfresco.oceanobservatories.org/
(See: Company Home >> OOI>> Controlled >> 1000 System Level >>
1341-00210_Data_Product_Spec_THSPHHC_OOI.pdf)
"""
# calculate lab calibrated electrode responses [V]
v_labcal_h2 = v_labcal(counts_h2, e2l_h2)
v_labcal_ysz = v_labcal(counts_ysz, e2l_ysz)
# calculate intermediate products that depend upon temperature
e_nernst = nernst(temperature)
e_hgo = eval_poly(temperature, arr_hgo)
log_kfh2g = eval_poly(temperature, arr_logkfh2g)
# explicitly follow the DPS calculation for clarity:
# measured potential of the h2 electrode [V]
e_h2 = v_labcal_ysz - v_labcal_h2
# (common) log of measured hydrogen fugacity
log_fh2g = 2.0 * (e_h2 - e_hgo) / e_nernst
# final data product, hydrogen concentration, [mmol/kg]
h2 = 1000.0 * (10.0 ** (log_fh2g - log_kfh2g))
return h2
def chloride_activity(temperature, arr_tac, arr_tbc1, arr_tbc2, arr_tbc3, chloride=250.0):
"""
Description:
Subfunction to calculate the chloride activity as a function of temperature
needed by the THSPHPH_L2 data products for the THSPH instruments. The chloride
value can either come from the TRHPHCC_L2 data product or the default value
of 250.0 mmol/kg can be used.
Implemented by:
2014-07-08: <NAME>. Initial Code.
Usage:
act_chl = chloride_activity(temperature, arr_tac, arr_tbc1, arr_tbc2, arr_tbc3[, chloride])
where
act_chl = calculated chloride activity.
temperature = temperature near sample inlet THSPHTE-TH_L1 [deg_C].
arr_tac = array containing the 5th degree polynomial coefficients to calculate tac (=tbc0).
arr_tbc1 = array containing the 5th degree polynomial coefficients to calculate tbc1.
arr_tbc2 = array containing the 5th degree polynomial coefficients to calculate tbc2.
arr_tbc3 = array containing the 5th degree polynomial coefficients to calculate tbc3.
chloride [optional] = if specified, vent fluid chloride concentration from TRHPH
(TRHPHCC_L2) [mmol kg-1], else a value of 250.0 mmol/kg will be used.
"""
# find number of data packets to be processed;
# this also works if temperature is not an np.array.
nvalues = np.array([temperature]).shape[-1]
# change units of chloride from mmol/kg to mol/kg
chloride = chloride/1000.0
# if chloride is not given in the argument list,
# replicate its default value into a vector with
# the same number of elements as temperature;
# do so without using a conditional
nreps = nvalues / np.array([chloride]).shape[-1]
chloride = | np.tile(chloride, nreps) | numpy.tile |
'''
Copyright (c) Microsoft Corporation, <NAME> and <NAME>.
Licensed under the MIT license.
Authors: <NAME> (<EMAIL>), <NAME> and <NAME>
'''
from torch.utils.data import Dataset
import numpy as np
import pandas as pd
import torch
from itertools import chain
import os
import pickle
# from snorkel_process import weak_supervision
class FakeNewsDataset(Dataset):
def __init__(self, file_name, tokenizer, is_weak, max_length, weak_type="", overwrite=False, balance_weak=False):
super(FakeNewsDataset, self).__init__()
tokenizer_name = type(tokenizer).__name__
# if tokenizer_name == "BertTokenizer" or tokenizer_name == "RobertaTokenizer":
pickle_file = file_name.replace(".csv", '_{}_{}.pkl'.format(max_length, tokenizer_name))
self.weak_label_count = 3
if os.path.exists(pickle_file) and overwrite is False:
load_data = pickle.load(open(pickle_file, "rb"))
for key, value in load_data.items():
setattr(self, key, value)
else:
save_data = {}
data = pd.read_csv(file_name)
# if tokenizer_name == "BertTokenizer" or tokenizer_name == "RobertaTokenizer":
self.news = [tokenizer.encode(i, max_length=max_length, pad_to_max_length=True) for i in data['news'].values.tolist()]
self.attention_mask = [[1] * (i.index(tokenizer.pad_token_id) if tokenizer.pad_token_id in i else len(i))
for i in self.news]
self.attention_mask = [mask + [0] * (max_length - len(mask)) for mask in self.attention_mask]
if is_weak:
self.weak_labels = []
assert "label" not in data.columns, "noise data should not contain the clean label"
self.weak_labels = data.iloc[:, list(range(1, len(data.columns)))].values.tolist()
save_data.update({"weak_labels": data.iloc[:, list(range(1, len(data.columns)))].values.tolist()})
else:
self.labels = data['label'].values.tolist()
save_data.update({"news": self.news, "attention_mask": self.attention_mask})
if is_weak is False:
save_data.update({"labels": self.labels})
pickle.dump(save_data, open(pickle_file, "wb"))
if is_weak:
if weak_type == "most_vote":
self.weak_labels = [1 if np.sum(i) > 1 else 0 for i in self.weak_labels]
elif weak_type == "flat":
self.weak_labels = list(chain.from_iterable(self.weak_labels))
self.news = list(chain.from_iterable([[i] * self.weak_label_count for i in self.news]))
self.attention_mask = list(
chain.from_iterable([[i] * self.weak_label_count for i in self.attention_mask]))
#"credit_label","polarity_label","bias_label"
elif weak_type.isdigit():
self.weak_labels = [i[int(weak_type)] for i in self.weak_labels]
else:
print("Default setting of dataset")
self.is_weak = is_weak
self.weak_type = weak_type
if self.is_weak and balance_weak:
self.__balance_helper()
if self.is_weak:
self.__instance_shuffle()
def __bert_tokenizer(self, tokenizer, max_length, data):
encode_output = [tokenizer.encode_plus(i, max_length=max_length, pad_to_max_length=True) for i in
data['news'].values.tolist()]
self.news = [i['input_ids'] for i in encode_output]
self.attention_mask = [i['attention_mask'] for i in encode_output]
self.token_type_ids = [i['token_type_ids'] for i in encode_output]
def __instance_shuffle(self):
index_array = np.array(list(range(len(self))))
| np.random.shuffle(index_array) | numpy.random.shuffle |
"""
tests the pysat averaging code
"""
import pysat
import pandas as pds
from nose.tools import assert_raises, raises
import nose.tools
import pysat.instruments.pysat_testing
import numpy as np
import os
import sys
if sys.version_info[0] >= 3:
if sys.version_info[1] < 4:
import imp
re_load = imp.reload
else:
import importlib
re_load = importlib.reload
else:
re_load = reload
class TestBasics():
def setup(self):
"""Runs before every method to create a clean testing setup."""
self.testInst = pysat.Instrument('pysat','testing', clean_level='clean')
def teardown(self):
"""Runs after every method to clean up previous testing."""
del self.testInst
def test_basic_seasonal_average(self):
self.testInst.bounds = (pysat.datetime(2008,1,1), pysat.datetime(2008,2,1))
results = pysat.ssnl.avg.median2D(self.testInst, [0., 360., 24.], 'longitude',
[0., 24, 24], 'mlt', ['dummy1', 'dummy2', 'dummy3'])
dummy_val = results['dummy1']['median']
dummy_dev = results['dummy1']['avg_abs_dev']
dummy2_val = results['dummy2']['median']
dummy2_dev = results['dummy2']['avg_abs_dev']
dummy3_val = results['dummy3']['median']
dummy3_dev = results['dummy3']['avg_abs_dev']
dummy_x = results['dummy1']['bin_x']
dummy_y = results['dummy1']['bin_y']
# iterate over all y rows, value should be equal to integer value of mlt
# no variation in the median, all values should be the same
check = []
for i, y in enumerate(dummy_y[:-1]):
assert np.all(dummy_val[i, :] == y.astype(int))
assert np.all(dummy_dev[i, :] == 0)
for i, x in enumerate(dummy_x[:-1]):
assert np.all(dummy2_val[:, i] == x/15.)
assert np.all(dummy2_dev[:, i] == 0)
for i, x in enumerate(dummy_x[:-1]):
check.append(np.all(dummy3_val[:, i] == x/15.*1000. + dummy_y[:-1]) )
check.append(np.all(dummy3_dev[:, i] == 0))
# holds here because there are 32 days, no data is discarded,
# each day holds same amount of data
assert self.testInst.data['dummy1'].size*32 == sum([ sum(i) for i in results['dummy1']['count'] ])
assert np.all(check)
def test_basic_daily_mean(self):
self.testInst.bounds = (pysat.datetime(2008,1,1), pysat.datetime(2008,2,1))
ans = pysat.ssnl.avg.mean_by_day(self.testInst, 'dummy4')
assert | np.all(ans == 86399/2.) | numpy.all |
# license: Copyright (C) 2018 NVIDIA Corporation. All rights reserved.
# Licensed under the CC BY-NC-SA 4.0 license
# (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
# this file describes lots of different camera classes
import numpy as np
import pdb
import tensorflow as tf
from kinect_spec import *
from utils import *
import scipy.misc
import scipy.interpolate
from matplotlib import pyplot as plt
root_dir = '/userhome/dataset/original/FLAT'
class cam_baseline:
# baseline tof camera, uses square wave for emission and modulation
# other cameras can inherit from this class
def __init__(self,cam):
for key in cam.keys():
self.cam[key] = cam[key]
# create the camera function
self.cam['T'] = 1e6
self.cam['fr'] = self.cam['T']/self.cam['exp']
self.cam['tabs'] = self.cam['dimt']
self.cam['amp_e'] = 4.
self.cam['off_e'] = 2.
self.cam['amp_m'] = 1.
self.cam['off_m'] = 1.
self.cam['phase'] = np.array([0, PI/2, PI, 3*PI/2])
self.vars = {}
self.dtype= tf.float32
# build computation graph
self.graph = tf.Graph()
self.session = tf.Session(graph = self.graph)
self.build_graph()
def build_graph(self):
# shorten the name
cam = self.cam
with self.graph.as_default():
# inputs
# impulse response
ipr_init = np.zeros((cam['dimy'],cam['dimx'],cam['dimt']))
self.ipr_in = tf.Variable(ipr_init,dtype=self.dtype)
ipr = tf.Variable(self.ipr_in,dtype=self.dtype)
# variables
# exposure time of each frame
exp = tf.Variable(cam['exp'],dtype=self.dtype)
# amplitude of lighting
amp_e = tf.Variable(cam['amp_e'],dtype=self.dtype)
# offset of lighting
off_e = tf.Variable(cam['off_e'],dtype=self.dtype)
# amplitude of modulation
amp_m = tf.Variable(cam['amp_m'],dtype=self.dtype)
# offset of modulation
off_m = tf.Variable(cam['off_m'],dtype=self.dtype)
# constants
tabs = cam['tabs'] # number of tabs/frs in impulse response
fr = cam['fr'] # length of one period in frame number
hf = (fr-1-1e-1)/2 # half of the period
# create the signals
idx = tf.constant(np.arange(fr),dtype=self.dtype)
f = amp_e*(0.5-tf.sign(idx-hf)/2)+off_e
g = tf.stack(
[\
amp_m*(0.5-tf.sign(idx-hf)/2)+off_m,
amp_m*(0.5-tf.sign(np.mod(idx-fr/2,fr)-hf)/2)+off_m,
amp_m*(0.5-tf.sign(np.mod(idx-fr/4,fr)-hf)/2)+off_m,
amp_m*(0.5-tf.sign(np.mod(idx-3*fr/4,fr)-hf)/2)+off_m,
],
axis=1
)
# manually conduct partial correlation
with tf.device('/cpu:0'):
# the partial correlation needs too large memory to GPU
# so we use CPU instead
cor = []
for i in np.arange(tabs):
cor.append(
tf.matmul(\
tf.expand_dims(
amp_e*(0.5-tf.sign(\
tf.constant(\
np.mod(np.arange(fr)-i,fr)-hf,\
dtype=self.dtype\
)
)/2)+off_e,\
axis=0),g
)
)
cor = tf.concat(cor,axis=0)
# compute the raw measurement
cor_exp = tf.tile(\
tf.expand_dims(tf.expand_dims(cor,0),0),
[cam['dimy'],cam['dimx'],1,1]
)
ipr_exp = tf.tile(\
tf.expand_dims(ipr,-1),[1,1,1,4]
)
meas = tf.reduce_sum(cor_exp * ipr_exp, 2)
# phase and depth
phase = \
(meas[:,:,2]-meas[:,:,3])/2/\
(\
tf.abs(meas[:,:,0]-meas[:,:,1])+\
tf.abs(meas[:,:,2]-meas[:,:,3])\
)
depth = phase * hf * exp * C /2
# save some data for debugging
self.vars['ipr'] = ipr
self.vars['g'] = g
self.vars['f'] = f
self.vars['cor'] = cor
self.vars['meas'] = meas
self.vars['phase'] = phase
self.vars['depth'] = depth
# input data
self.input_data = tf.group(\
ipr.assign(self.ipr_in)
)
#
init_op = tf.global_variables_initializer()
self.session.run(init_op)
return
def process(self,prop):
# process data
self.input_dict = {
self.ipr_in : prop,
}
self.session.run(self.input_data, self.input_dict)
res_dict = {
'f' : self.vars['f'],
'g' : self.vars['g'],
'cor' : self.vars['cor'],
'meas' : self.vars['meas'],
'phase' : self.vars['phase'],
'depth' : self.vars['depth'],
}
return self.session.run(res_dict)
class cam_sin(cam_baseline):
# baseline tof camera, uses square wave for emission and modulation
# other cameras can inherit from this class
def __init__(self,cam):
for key in cam.keys():
self.cam[key] = cam[key]
# create the camera function
self.cam['T'] = 5e4
self.cam['fr'] = self.cam['T']/self.cam['exp']
self.cam['tabs'] = self.cam['dimt']
self.cam['amp_e'] = 1.
self.cam['off_e'] = 1.
self.cam['amp_m'] = 1.
self.cam['off_m'] = 1.
self.cam['phase'] = np.array([0, PI/2, PI, 3*PI/2])
self.vars = {}
self.dtype= tf.float32
# build computation graph
self.graph = tf.Graph()
self.session = tf.Session(graph = self.graph)
self.build_graph()
def build_graph(self):
# shorten the name
cam = self.cam
with self.graph.as_default():
# inputs
# impulse response
ipr_init = np.zeros((cam['dimy'],cam['dimx'],cam['dimt']))
self.ipr_in = tf.Variable(ipr_init,dtype=self.dtype)
ipr = tf.Variable(self.ipr_in,dtype=self.dtype)
# variables
# exposure time of each frame
exp = tf.Variable(cam['exp'],dtype=self.dtype)
# amplitude of lighting
off_e = tf.Variable(cam['off_e'],dtype=self.dtype)
amp_e = tf.Variable(cam['amp_e'],dtype=self.dtype)
off_m = tf.Variable(cam['off_m'],dtype=self.dtype)
amp_m = tf.Variable(cam['amp_m'],dtype=self.dtype)
# constants
tabs = cam['tabs'] # number of tabs/frs in impulse response
fr = cam['fr'] # length of one period in frame number
hf = (fr-1-1e-1)/2 # half of the period
# create the signals
idx = tf.constant(np.arange(fr),dtype=self.dtype)
f = amp_e*tf.sin(idx*2*PI/fr)+off_e
g = tf.stack(
[\
amp_m*tf.sin(idx*2*PI/fr)+off_m,
amp_m*tf.sin(np.mod(idx-fr/2,fr)*2*PI/fr)+off_m,
amp_m*tf.sin(np.mod(idx-fr/4,fr)*2*PI/fr)+off_m,
amp_m*tf.sin(np.mod(idx-3*fr/4,fr)*2*PI/fr)+off_m,
],
axis=1
)
# manually conduct partial correlation
with tf.device('/cpu:0'):
# the partial correlation needs too large memory to GPU
# so we use CPU instead
cor = []
for i in np.arange(tabs):
cor.append(
tf.matmul(\
tf.expand_dims(
amp_e*np.sin(np.mod(np.arange(fr)-i,fr)*2*PI/fr)+off_e,\
axis=0
),g
)
)
cor = tf.concat(cor,axis=0)
# compute the raw measurement
cor_exp = tf.tile(\
tf.expand_dims(tf.expand_dims(cor,0),0),
[cam['dimy'],cam['dimx'],1,1]
)
ipr_exp = tf.tile(\
tf.expand_dims(ipr,-1),[1,1,1,4]
)
meas = tf.reduce_sum(cor_exp * ipr_exp, 2)
# phase and depth
phase = tf.atan((meas[:,:,2]-meas[:,:,3])/(meas[:,:,0]-meas[:,:,1]))
ampl = tf.sqrt((meas[:,:,2]-meas[:,:,3])**2+(meas[:,:,0]-meas[:,:,1])**2)
depth = phase * cam['T']/2/PI * C /2
# save some data for debugging
self.vars['ipr'] = ipr
self.vars['g'] = g
self.vars['f'] = f
self.vars['cor'] = cor
self.vars['meas'] = meas
self.vars['phase'] = phase
self.vars['ampl'] = ampl
self.vars['depth'] = depth
# input data
self.input_data = tf.group(\
ipr.assign(self.ipr_in)
)
#
init_op = tf.global_variables_initializer()
self.session.run(init_op)
return
def process(self,ipr_idx,ipr_s):
depth = self.sess.run(\
self.vars['depth'],
feed_dict={\
self.vars['ipr_s']:ipr_s,
}
)
return depth
class cam_real(cam_baseline):
# baseline tof camera, uses square wave for emission and modulation
# other cameras can inherit from this class
def __init__(self,cam):
for key in cam.keys():
self.cam[key] = cam[key]
# create the camera function
self.cam['T'] = 1e6
self.cam['fr'] = self.cam['T']/self.cam['exp']
self.cam['tabs'] = self.cam['dimt']
self.cam['amp_e'] = 4.
self.cam['off_e'] = 2.
self.cam['amp_m'] = 1.
self.cam['off_m'] = 1.
self.cam['phase'] = np.array([0, PI/2, PI, 3*PI/2])
self.vars = {}
self.dtype= tf.float32
# build computation graph
self.graph = tf.Graph()
self.session = tf.Session(graph = self.graph)
self.build_graph()
def build_graph(self):
# shorten the name
cam = self.cam
with self.graph.as_default():
# inputs
# impulse response
ipr_init = np.zeros((cam['dimy'],cam['dimx'],cam['dimt']))
self.ipr_in = tf.Variable(ipr_init,dtype=self.dtype)
ipr = tf.Variable(self.ipr_in,dtype=self.dtype)
# variables
# exposure time of each frame
exp = tf.Variable(cam['exp'],dtype=self.dtype)
# amplitude of lighting
off_e = tf.Variable(cam['off_e'],dtype=self.dtype)
amp_e = tf.Variable(cam['amp_e'],dtype=self.dtype)
off_m = tf.Variable(cam['off_m'],dtype=self.dtype)
amp_m = tf.Variable(cam['amp_m'],dtype=self.dtype)
# constants
tabs = cam['tabs'] # number of tabs/frs in impulse response
fr = cam['fr'] # length of one period in frame number
hf = (fr-1-1e-1)/2 # half of the period
# create the signals
idx = tf.constant(np.arange(fr),dtype=self.dtype)
f = amp_e*tf.sin(idx*2*PI/fr)+off_e
g = tf.stack(
[\
amp_m*(0.5-tf.sign(idx-hf)/2)+off_m,
amp_m*(0.5-tf.sign(np.mod(idx-fr/2,fr)-hf)/2)+off_m,
amp_m*(0.5-tf.sign(np.mod(idx-fr/4,fr)-hf)/2)+off_m,
amp_m*(0.5-tf.sign(np.mod(idx-3*fr/4,fr)-hf)/2)+off_m,
],
axis=1
)
# manually conduct partial correlation
with tf.device('/cpu:0'):
# the partial correlation needs too large memory to GPU
# so we use CPU instead
cor = []
for i in np.arange(tabs):
cor.append(
tf.matmul(\
tf.expand_dims(
amp_e*np.sin(np.mod(np.arange(fr)-i,fr)*2*PI/fr)+off_e,\
axis=0
),g
)
)
cor = tf.concat(cor,axis=0)
# compute the raw measurement
cor_exp = tf.tile(\
tf.expand_dims(tf.expand_dims(cor,0),0),
[cam['dimy'],cam['dimx'],1,1]
)
ipr_exp = tf.tile(\
tf.expand_dims(ipr,-1),[1,1,1,4]
)
meas = tf.reduce_sum(cor_exp * ipr_exp, 2)
# phase and depth
phase = tf.atan((meas[:,:,2]-meas[:,:,3])/(meas[:,:,0]-meas[:,:,1]))
ampl = tf.sqrt((meas[:,:,2]-meas[:,:,3])**2+(meas[:,:,0]-meas[:,:,1])**2)
depth = phase * cam['T']/2/PI * C /2
# save some data for debugging
self.vars['ipr'] = ipr
self.vars['g'] = g
self.vars['f'] = f
self.vars['cor'] = cor
self.vars['meas'] = meas
self.vars['phase'] = phase
self.vars['ampl'] = ampl
self.vars['depth'] = depth
# input data
self.input_data = tf.group(\
ipr.assign(self.ipr_in)
)
#
init_op = tf.global_variables_initializer()
self.session.run(init_op)
return
class cam_real_fast(cam_baseline):
# baseline tof camera, uses square wave for emission and modulation
# other cameras can inherit from this class
def __init__(self,cam):
self.cam = {}
for key in cam.keys():
self.cam[key] = cam[key]
# create the camera function
self.cam['T'] = 1e6
self.cam['fr'] = self.cam['T']/self.cam['exp']
self.cam['tabs'] = self.cam['dimt']
self.cam['amp_e'] = 4.
self.cam['off_e'] = 2.
self.cam['amp_m'] = 1.
self.cam['off_m'] = 1.
self.cam['phase'] = np.array([0, PI/2, PI, 3*PI/2])
# create the camera function
self.cor = self.cam_func()
self.vars = {}
self.dtype= tf.float32
# build computation graph
self.graph = tf.Graph()
self.session = tf.Session(graph = self.graph)
self.build_graph()
def cam_func(self):
# precreate the camera function
exp = self.cam['exp']
# amplitude of lighting
off_e = self.cam['off_e']
amp_e = self.cam['amp_e']
off_m = self.cam['off_m']
amp_m = self.cam['amp_m']
phase = self.cam['phase']
# constants
tabs = self.cam['tabs'] # number of tabs/frs in impulse response
fr = self.cam['fr'] # length of one period in frame number
hf = (fr-1-1e-1)/2 # half of the period
# create the signals
idx = np.arange(fr)
f = amp_e*np.sin(idx*2*PI/fr)+off_e
g = np.stack(
[\
amp_m*(0.5-np.sign(np.mod(idx-fr*phase[i]/(2*PI),fr)-hf)/2)+off_m\
for i in range(len(phase))
],axis=1
)
# manually conpute the correlation
cor = []
for i in np.arange(tabs):
cor.append(
np.matmul(\
np.expand_dims(
amp_e*np.sin(np.mod(np.arange(fr)-i,fr)*2*PI/fr)+off_e,\
axis=0
),g
)
)
cor = np.concatenate(cor,axis=0)
return cor
def build_graph(self):
# shorten the name
cam = self.cam
with self.graph.as_default():
# inputs
# impulse response
ipr_init = np.zeros((cam['dimy'],cam['dimx'],cam['dimt']))
self.ipr_in = tf.Variable(ipr_init,dtype=self.dtype)
ipr = tf.Variable(self.ipr_in,dtype=self.dtype)
# camera function
cor = tf.constant(self.cor, dtype=self.dtype)
# compute the raw measurement
cor_exp = tf.tile(\
tf.expand_dims(tf.expand_dims(cor,0),0),
[cam['dimy'],cam['dimx'],1,1]
)
ipr_exp = tf.tile(\
tf.expand_dims(ipr,-1),[1,1,1,4]
)
meas = tf.reduce_sum(cor_exp * ipr_exp, 2)
# phase and depth
Q = tf.zeros((cam['dimy'], cam['dimx']))
I = tf.zeros((cam['dimy'], cam['dimx']))
for i in range(len(cam['phase'])):
Q += meas[:,:,i] * tf.sin(cam['phase'][i].astype(np.float32))
I += meas[:,:,i] * np.cos(cam['phase'][i].astype(np.float32))
# the generalized form of phase stepping
phase = tf.atan(Q/I)
ampl = 2*tf.sqrt(Q**2+I**2)/len(cam['phase'])
depth = phase * cam['T']/2/PI * C /2
# save some data for debugging
self.vars['ipr'] = ipr
self.vars['cor'] = cor
self.vars['meas'] = meas
self.vars['phase'] = phase
self.vars['ampl'] = ampl
self.vars['depth'] = depth # unwrapped depth
# input data
self.input_data = tf.group(\
ipr.assign(self.ipr_in)
)
#
init_op = tf.global_variables_initializer()
self.session.run(init_op)
return
def process(self,prop):
# process data
self.input_dict = {
self.ipr_in : prop,
}
self.session.run(self.input_data, self.input_dict)
res_dict = {
'meas' : self.vars['meas'],
'phase' : self.vars['phase'],
'ampl' : self.vars['ampl'],
}
# phase unwrapping
result = self.session.run(res_dict)
result['phase'][np.where(result['phase']<0)] += PI
result['depth'] = result['phase'] * self.cam['T']/2/PI * C /2
return result
class cam_real_np(cam_baseline):
# baseline tof camera, uses square wave for emission and modulation
# other cameras can inherit from this class
def __init__(self,cam):
for key in cam.keys():
self.cam[key] = cam[key]
# create the camera function
self.cam['T'] = 1e6
self.cam['fr'] = self.cam['T']/self.cam['exp']
self.cam['tabs'] = self.cam['dimt']
self.cam['amp_e'] = 4.
self.cam['off_e'] = 2.
self.cam['amp_m'] = 1.
self.cam['off_m'] = 1.
self.cam['phase'] = np.array([0, PI/2, PI, 3*PI/2])
# create the camera function
self.cor = self.cam_func()
self.vars = {}
self.dtype= tf.float32
def cam_func(self):
# precreate the camera function
exp = self.cam['exp']
phase = self.cam['phase']
# amplitude of lighting
off_e = self.cam['off_e']
amp_e = self.cam['amp_e']
off_m = self.cam['off_m']
amp_m = self.cam['amp_m']
# constants
tabs = self.cam['tabs'] # number of tabs/frs in impulse response
fr = self.cam['fr'] # length of one period in frame number
hf = (fr-1-1e-1)/2 # half of the period
# create the signals
idx = np.arange(fr)
f = amp_e*np.sin(idx*2*PI/fr)+off_e
g = np.stack(
[\
amp_m*(0.5-np.sign(np.mod(idx-fr*phase[i]/(2*PI),fr)-hf)/2)+off_m\
for i in range(len(phase))
],axis=1
)
# manually conpute the correlation
cor = []
for i in np.arange(tabs):
cor.append(
np.matmul(\
np.expand_dims(
amp_e*np.sin(np.mod(np.arange(fr)-i,fr)*2*PI/fr)+off_e,\
axis=0
),g
)
)
cor = np.concatenate(cor,axis=0)
return cor
def simulate_quads(self, ipr):
# this function simulates the impulse response
cam = self.cam
cor = self.cor
# find out the non-zero part
ipr_sum = np.sum(ipr, axis=(0,1))
idx = np.where(ipr_sum!=0)
ipr_s = ipr[:,:,idx[0]]
cor_s = cor[idx[0],:]
cor_exp = np.tile(\
np.expand_dims(np.expand_dims(cor_s,0),0),
[cam['dimy'],cam['dimx'],1,1]
)
ipr_exp = np.tile(\
np.expand_dims(ipr_s,-1),[1,1,1,4]
)
meas = np.sum(cor_exp * ipr_exp, 2)
# phase and depth
Q = np.zeros((cam['dimy'], cam['dimx']))
I = np.zeros((cam['dimy'], cam['dimx']))
for i in range(len(cam['phase'])):
Q += meas[:,:,i] * np.sin(cam['phase'][i])
I += meas[:,:,i] * np.cos(cam['phase'][i])
# the generalized form of phase stepping
phase = np.arctan2(Q,I)
ampl = 2*np.sqrt(Q**2+I**2)/len(cam['phase'])
depth = phase * cam['T']/2/PI * C /2
# save some data for debugging
self.vars['ipr'] = ipr
self.vars['cor'] = cor
self.vars['meas'] = meas
self.vars['phase'] = phase
self.vars['ampl'] = ampl
self.vars['depth'] = depth
return
def process(self,prop):
self.simulate_quads(prop)
res_dict = {
'meas' : self.vars['meas'],
'phase' : self.vars['phase'],
'ampl' : self.vars['ampl'],
'depth' : self.vars['depth'],
}
return res_dict
# not finished
# TODO: New Chinese Remainder Algorithm
class cam_real_mult(cam_baseline):
# baseline tof camera, uses square wave for emission and modulation
# other cameras can inherit from this class
def __init__(self,cam):
for key in cam.keys():
self.cam[key] = cam[key]
# create the camera function
self.cam['T'] = np.array([1.7e4, 2.5e4])
self.cam['fr'] = np.array([\
self.cam['T'][i]/self.cam['exp']
for i in range(len(self.cam['T']))
])
self.cam['tabs'] = np.array([\
self.cam['dimt']
for i in range(len(self.cam['T']))
])
self.cam['amp_e'] = [4.,4.]
self.cam['off_e'] = [2.,2.]
self.cam['amp_m'] = [1.,1.]
self.cam['off_m'] = [1.,1.]
self.cam['phase'] = np.array([\
[0, PI/2, PI, 3*PI/2]
for i in range(len(self.cam['T']))
])
# create the camera function
self.cor = [\
self.cam_func(i)\
for i in range(len(self.cam['T']))
]
self.vars = {}
self.dtype= tf.float32
# build computation graph
self.graph = tf.Graph()
self.session = tf.Session(graph = self.graph)
self.build_graph()
def cam_func(self, i):
# precreate the camera function
exp = self.cam['exp']
# amplitude of lighting
off_e = self.cam['off_e'][i]
amp_e = self.cam['amp_e'][i]
off_m = self.cam['off_m'][i]
amp_m = self.cam['amp_m'][i]
phase = self.cam['phase'][i]
# constants
tabs = self.cam['tabs'][i] # number of tabs/frs in impulse response
fr = self.cam['fr'][i] # length of one period in frame number
hf = (fr-1-1e-1)/2 # half of the period
# create the signals
idx = np.arange(fr)
f = amp_e*np.sin(idx*2*PI/fr)+off_e
g = np.stack(
[\
amp_m*(0.5-np.sign(np.mod(idx-fr*phase[i]/(2*PI),fr)-hf)/2)+off_m\
for i in range(len(phase))
],axis=1
)
# manually conpute the correlation
cor = []
for i in np.arange(tabs):
cor.append(
np.matmul(\
np.expand_dims(
amp_e*np.sin(np.mod(np.arange(fr)-i,fr)*2*PI/fr)+off_e,\
axis=0
),g
)
)
cor = np.concatenate(cor,axis=0)
return cor
def build_graph(self):
# shorten the name
cam = self.cam
self.vars['meas_f'] = []
self.vars['phase_f'] = []
self.vars['ampl_f'] = []
with self.graph.as_default():
# inputs
# impulse response
ipr_init = np.zeros((cam['dimy'],cam['dimx'],cam['dimt']))
self.ipr_in = tf.Variable(ipr_init,dtype=self.dtype)
ipr = tf.Variable(self.ipr_in,dtype=self.dtype)
# camera function
for idx in range(len(self.cam['T'])):
cor = tf.constant(self.cor[i], dtype=self.dtype)
# compute the raw measurement
cor_exp = tf.tile(\
tf.expand_dims(tf.expand_dims(cor,0),0),
[cam['dimy'],cam['dimx'],1,1]
)
ipr_exp = tf.tile(\
tf.expand_dims(ipr,-1),[1,1,1,4]
)
meas = tf.reduce_sum(cor_exp * ipr_exp, 2)
# phase and depth
Q = tf.zeros((cam['dimy'], cam['dimx']))
I = tf.zeros((cam['dimy'], cam['dimx']))
for i in range(len(cam['phase'][idx])):
Q += meas[:,:,i] * tf.sin(cam['phase'][idx].astype(np.float32))
I += meas[:,:,i] * np.cos(cam['phase'][idx].astype(np.float32))
# the generalized form of phase stepping
phase = tf.atan(Q/I)
ampl = 2*tf.sqrt(Q**2+I**2)/len(cam['phase'])
#
self.vars['meas'].append(meas)
self.vars['phase_f'].append(phase)
self.vars['ampl_f'].append(ampl)
# new Chinese remainder theorem
depth = phase * cam['T']/2/PI * C /2
# save some data for debugging
self.vars['ipr'] = ipr
self.vars['cor'] = cor
self.vars['meas'] = meas
self.vars['phase'] = phase
self.vars['ampl'] = ampl
self.vars['depth'] = depth
# input data
self.input_data = tf.group(\
ipr.assign(self.ipr_in)
)
#
init_op = tf.global_variables_initializer()
self.session.run(init_op)
return
def process(self,prop):
# process data
self.input_dict = {
self.ipr_in : prop,
}
self.session.run(self.input_data, self.input_dict)
res_dict = {
'meas' : self.vars['meas'],
'phase' : self.vars['phase'],
'ampl' : self.vars['ampl'],
'depth' : self.vars['depth'],
}
return self.session.run(res_dict)
class kinect_sin:
# simulate kinect sensor, uses sin wave for emission and modulation
# linear camera, uses look up table for noise
# other cameras can inherit from this class
def __init__(self,cam):
self.cam = {}
for key in cam.keys():
self.cam[key] = cam[key]
self.cam = kinect_sin_spec(self.cam)
self.cor = self.cam['cor']
self.vars = {}
self.dtype= tf.float32
def process(self,ipr):
# find out the non-zero part
ipr_sum = np.sum(ipr, axis=(0,1))
idx = np.where(ipr_sum!=0)
ipr_s = ipr[:,:,idx[0]]
meas = []
for i in range(len(self.cam['T'])):
cor = self.cor[i]
cor_s = cor[idx[0],:]
cor_exp = np.tile(\
np.expand_dims(np.expand_dims(cor_s,0),0),
[self.cam['dimy'],self.cam['dimx'],1,1]
)
ipr_exp = np.tile(\
np.expand_dims(ipr_s,-1),[1,1,1,len(self.cam['phase'][0])]
)
meas.append(np.sum(cor_exp * ipr_exp, 2))
meas = np.concatenate(meas,axis=2)
fig = plt.figure()
for i in range(9):
ax = fig.add_subplot(3,3,i+1)
ax.imshow(meas[:,:,i])
plt.show()
# TODO: vignetting
result = {
'meas' : meas
}
return result
class kinect_real:
# simulate kinect sensor, uses sin wave for emission and modulation
# linear camera, uses look up table for noise
# other cameras can inherit from this class
def __init__(self,cam):
self.cam = {}
for key in cam.keys():
self.cam[key] = cam[key]
self.cam = kinect_real_spec(self.cam)
self.cor = self.cam['cor']
self.vars = {}
self.dtype= tf.float32
def process(self,ipr):
# find out the non-zero part
ipr_sum = np.sum(ipr, axis=(0,1))
idx = np.where(ipr_sum!=0)
ipr_s = ipr[:,:,idx[0]]
meas = []
for i in range(len(self.cam['T'])):
cor = np.transpose(self.cor[(i*3):(i*3+3),:])
cor_s = cor[idx[0],:]
cor_exp = np.tile(\
np.expand_dims(np.expand_dims(cor_s,0),0),
[self.cam['dimy'],self.cam['dimx'],1,1]
)
ipr_exp = np.tile(\
np.expand_dims(ipr_s,-1),[1,1,1,len(self.cam['phase'][0])]
)
meas.append(np.sum(cor_exp * ipr_exp, 2))
meas = np.concatenate(meas,axis=2)
result = {
'meas' : meas
}
return result
class kinect_real_tf:
# simulate kinect sensor, uses sin wave for emission and modulation
# linear camera, uses look up table for noise
# other cameras can inherit from this class
def __init__(self):
# kinect spec
self.cam = kinect_real_tf_spec()
# response graph
self.dtype= tf.float32
self.rg = self.res_graph()
self.dir = root_dir + '/params/kinect/'
# delay map
self.cam['delay'] = np.loadtxt(self.dir+'delay.txt',delimiter=',')
# vig map
self.cam['vig'] = np.loadtxt(self.dir+'vig.txt',delimiter=',')
# gains
self.cam['raw_max'] = 3500 # this brightness will be projected to
self.cam['map_max'] = 3500 # this brightness will be the threshold for the kinect output
self.cam['lut_max'] = 3800 # this is the ending of lut table
self.cam['sat'] = 32767 # this is the saturated brightness
# noise sampling
self.cam['noise_samp'] = np.loadtxt(self.dir+'noise_samp_2000_notail.txt',delimiter=',')
self.cam['val_lut'] = np.arange(self.cam['noise_samp'].shape[1])-\
(self.cam['noise_samp'].shape[1]-1)/2
# gain and noise graph
self.gng = self.gain_noise_graph()
self.gg = self.gain_graph()
# initialize the session
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
return
def res_graph(self):
# shorten the name
cam = self.cam
# the fastest way currently to generate raw measuerment
ipr_s = tf.placeholder(\
self.dtype,
[None],
name='ipr_s'
)
ipr_idx = tf.placeholder(\
tf.int64,
[3, None],
name='ipr_idx'
)
# camera function
cha_num = 9
cor = tf.placeholder(\
self.dtype,
[cha_num,None],
name='cor',
)
meas = []
for i in range(cha_num):
# expand and compute measurement
cor_cha = cor[i,:]
cor_exp = tf.gather(cor_cha, ipr_idx[2,:])
# compute measurement
tmp = cor_exp * ipr_s
tmp = tf.SparseTensor(tf.transpose(ipr_idx), tmp, [cam['dimy'],cam['dimx'],tf.reduce_max(ipr_idx[2,:])])
tmp1 = tf.sparse_reduce_sum(tmp,2)
meas.append(tmp1)
#
meas = tf.stack(meas, 2)
return {'meas':meas,'ipr_s':ipr_s,'ipr_idx':ipr_idx,'cor':cor,'tmp':tmp,'tmp1':tmp1}
def res_delay_vig_graph(self):
# shorten the name
cam = self.cam
# the fastest way currently to generate raw measuerment
ipr_s = tf.placeholder(\
self.dtype,
[None],
name='ipr_s'
)
ipr_idx = tf.placeholder(\
tf.int64,
[3, None],
name='ipr_idx'
)
delay_idx = tf.placeholder(\
self.dtype,
[None],
name='delay_idx'
)
final_idx = tf.cast(ipr_idx[2,:],self.dtype)+delay_idx
vig = tf.constant(self.cam['vig'],self.dtype)
# camera function
cha_num = 9
cor = tf.placeholder(\
self.dtype,
[cha_num,None],
name='cor',
)
meas = []
for i in range(cha_num):
# expand and compute measurement
# cor_cha = cor[i,:]
# cor_exp = tf.gather(cor_cha, ipr_idx[2,:])
cor_exp = tf.py_func(self.f[i],[final_idx], tf.float64)
cor_exp = tf.cast(cor_exp, self.dtype)
# compute measurement
tmp = cor_exp * ipr_s
tmp = tf.SparseTensor(tf.transpose(ipr_idx), tmp, [cam['dimy'],cam['dimx'],tf.reduce_max(ipr_idx[2,:])])
tmp1 = tf.sparse_reduce_sum(tmp,2)
meas.append(tmp1/vig)
#
meas = tf.stack(meas, 2)
return {\
'meas':meas,
'ipr_s':ipr_s,
'ipr_idx':ipr_idx,
'delay_idx':delay_idx,
'cor':cor,
'tmp':tmp,
'tmp1':tmp1,
}
def res_delay_vig_motion_graph(self):
# shorten the name
cam = self.cam
# the fastest way currently to generate raw measuerment
ipr_s = tf.placeholder(\
self.dtype,
[9,None],
name='ipr_s'
)
ipr_idx = tf.placeholder(\
tf.int64,
[3, None],
name='ipr_idx'
)
delay_idx = tf.placeholder(\
self.dtype,
[9,None],
name='delay_idx'
)
vig = tf.constant(self.cam['vig'],self.dtype)
# camera function
cha_num = 9
cor = tf.placeholder(\
self.dtype,
[cha_num,None],
name='cor',
)
meas = []
for i in range(cha_num):
# expand and compute measurement
final_idx = tf.cast(ipr_idx[2,:],self.dtype)+delay_idx[i,:]
cor_exp = tf.py_func(self.f[i],[final_idx], tf.float64)
cor_exp = tf.cast(cor_exp, self.dtype)
# compute measurement
tmp = cor_exp * ipr_s[i,:]
tmp = tf.SparseTensor(tf.transpose(ipr_idx), tmp, [cam['dimy'],cam['dimx'],tf.reduce_max(ipr_idx[2,:])])
tmp1 = tf.sparse_reduce_sum(tmp,2)
meas.append(tmp1/vig)
#
meas = tf.stack(meas, 2)
return {\
'meas':meas,
'ipr_s':ipr_s,
'ipr_idx':ipr_idx,
'delay_idx':delay_idx,
'cor':cor,
'tmp':tmp,
'tmp1':tmp1,
}
def gain_noise_graph(self):
# shorten the name
cam = self.cam
# gain
raw_max = self.cam['raw_max']
map_max = self.cam['map_max']
lut_max = self.cam['lut_max']
# noise
noise_samp = tf.constant(\
self.cam['noise_samp'],
tf.int32,
)
val_lut = tf.constant(\
self.cam['val_lut'],
dtype=self.dtype,
)
# input
meas_i = tf.placeholder(\
self.dtype,
[cam['dimy'],cam['dimx'],9],
name='meas',
)
# adjust gain
meas = meas_i * map_max / raw_max
# add noise
msk = tf.less(tf.abs(meas),lut_max)
idx = tf.where(tf.abs(meas)<lut_max) # for modifying the values
hf = tf.cast((tf.shape(noise_samp)[1]-1)/2,self.dtype)
mean_idx = tf.cast(tf.boolean_mask(meas,msk)+hf, tf.int32)
samp_idx = tf.cast(tf.random_uniform(\
tf.shape(mean_idx),minval=0,maxval=self.cam['noise_samp'].shape[0],dtype=tf.int32\
),tf.int32)
idx_lut = tf.stack([samp_idx,mean_idx],1)
idx_n = tf.gather_nd(noise_samp, idx_lut)
noise = tf.gather(val_lut, idx_n, name='noise_samp')
# use sparse matrix to add noise
noise_s = tf.SparseTensor(idx, noise, tf.cast(tf.shape(meas),tf.int64))
noise_s = tf.sparse_tensor_to_dense(noise_s)
meas = tf.cast(noise_s, tf.int32)
# thresholding
idx_thre = tf.where(tf.abs(meas)<map_max)
flg = tf.ones(tf.shape(idx_thre[:,0]),tf.int32)
flg_s = tf.SparseTensor(idx_thre, flg, tf.cast(tf.shape(meas),tf.int64))
flg_s = tf.sparse_tensor_to_dense(flg_s)
meas = meas * flg_s + (1-flg_s)*map_max
# normalize to make max to equal to one
meas_o = meas / map_max
res_dict = {
'meas_i' : meas_i,
'meas_o' : meas_o,
'noise' : noise,
'mean_idx' : mean_idx,
'idx_n' : idx_n,
'idx_lut' : idx_lut,
'noise_s' : noise_s,
}
return res_dict
def gain_graph(self):
# shorten the name
cam = self.cam
# gain
raw_max = self.cam['raw_max']
map_max = self.cam['map_max']
lut_max = self.cam['lut_max']
# input
meas_i = tf.placeholder(\
self.dtype,
[cam['dimy'],cam['dimx'],9],
name='meas',
)
# adjust gain
meas = meas_i * map_max / raw_max
# thresholding
idx_thre = tf.where(tf.abs(meas)<map_max)
flg = tf.ones(tf.shape(idx_thre[:,0]),tf.int32)
flg_s = tf.SparseTensor(idx_thre, flg, tf.cast(tf.shape(meas),tf.int64))
flg_s = tf.sparse_tensor_to_dense(flg_s)
meas = tf.cast(meas, tf.int32) * flg_s + (1-flg_s)*map_max
# normalize to make max to equal to one
meas_o = meas / map_max
res_dict = {
'meas_i' : meas_i,
'meas_o' : meas_o,
}
return res_dict
def process(self,cam,ipr_idx,ipr_s,scenes,depth_true):
# camera function: find out the non-zero part
self.cam['dimt'] = cam['dimt']
self.cam['exp'] = cam['exp']
cor = compute_cor(self.cam)
# obtain the raw measurement
max_len = int(2e6) # max number for a GPU
meas = np.zeros((self.cam['dimy'],self.cam['dimx'],9))
for i in range(0,len(ipr_s),max_len):
end = min(len(ipr_s),i+max_len)
meas += self.sess.run(\
self.rg['meas'],
feed_dict={\
self.rg['ipr_s']:ipr_s[i:end],\
self.rg['ipr_idx']:np.array(ipr_idx)[:,i:end],\
self.rg['cor']:cor,\
}
)
# gain and noise
meas = self.sess.run(self.gng['meas_o'],feed_dict={self.gng['meas_i']:meas})
result = {
'meas' : meas
}
return result
def process_no_noise(self,cam,ipr_idx,ipr_s,scenes,depth_true):
# camera function: find out the non-zero part
self.cam['dimt'] = cam['dimt']
self.cam['exp'] = cam['exp']
cor = compute_cor(self.cam)
# obtain the raw measurement
max_len = int(2e6) # max number for a GPU
meas = np.zeros((self.cam['dimy'],self.cam['dimx'],9))
for i in range(0,len(ipr_s),max_len):
end = min(len(ipr_s),i+max_len)
meas += self.sess.run(\
self.rg['meas'],
feed_dict={\
self.rg['ipr_s']:ipr_s[i:end],\
self.rg['ipr_idx']:np.array(ipr_idx)[:,i:end],\
self.rg['cor']:cor,\
}
)
# gain and noise
meas = self.sess.run(self.gg['meas_o'],feed_dict={self.gg['meas_i']:meas})
result = {
'meas' : meas
}
return result
def process_gt(self,cam,depth_true):
raw_max = self.cam['raw_max']
# camera function
self.cam['dimt'] = cam['dimt']
self.cam['exp'] = cam['exp']
cor = compute_cor(self.cam)
# resize the true depth
t = depth_true / (C/2)
t_idx = t / self.cam['exp']
t_idx[np.where(depth_true<1e-4)] = np.nan
t_idx[np.where(t_idx>cor.shape[1])] = np.nan
t_idx = scipy.misc.imresize(t_idx,(cam['dimy'],cam['dimx']),mode='F')
# create the delay function
self.f = []
for i in range(cor.shape[0]):
self.f.append(scipy.interpolate.interp1d(np.arange(cor.shape[1]),cor[i,:]))
meas = [self.f[i](t_idx) for i in range(cor.shape[0])]
meas = np.stack(meas, 2)
# normalize and change the gain
meas /= self.cam['raw_max']
# deprecate the invalid part
meas[np.where(np.isnan(meas))] = 0
result = {
'meas': meas
}
return result
def process_gt_vig(self,cam,depth_true):
raw_max = self.cam['raw_max']
# camera function
self.cam['dimt'] = cam['dimt']
self.cam['exp'] = cam['exp']
cor = compute_cor(self.cam)
# resize the true depth
t = depth_true / (C/2)
t_idx = t / self.cam['exp']
t_idx[np.where(depth_true<1e-4)] = np.nan
t_idx[np.where(t_idx>cor.shape[1])] = np.nan
t_idx = scipy.misc.imresize(t_idx,(cam['dimy'],cam['dimx']),mode='F')
# create the delay function
self.f = []
for i in range(cor.shape[0]):
self.f.append(scipy.interpolate.interp1d(np.arange(cor.shape[1]),cor[i,:]))
meas = [self.f[i](t_idx)/self.cam['vig'] for i in range(cor.shape[0])]
meas = np.stack(meas, 2)
# # normalize based on the gain
# meas /= np.nanmax(np.abs(meas))
# normalize and change the gain
meas /= self.cam['raw_max']
# deprecate the invalid part
meas[np.where(np.isnan(meas))] = 0
result = {
'meas': meas
}
return result
def process_gt_vig_dist_surf(self,cam,ipr_idx,ipr_s,scenes,depth_true):
# camera function: find out the non-zero part
self.cam['dimt'] = cam['dimt']
self.cam['exp'] = cam['exp']
cor = compute_cor(self.cam)
# find the first nonzero time frame of each pixel
y=ipr_idx[0]
x=ipr_idx[1]
idx = y*self.cam['dimx']+x
idx_u, I = np.unique(idx, return_index=True)
ipr_idx = (ipr_idx[0][(I,)], ipr_idx[1][(I,)], ipr_idx[2][(I,)])
ipr_s = ipr_s[(I,)]
# obtain the raw measurement
max_len = int(2e6) # max number for a GPU
meas = np.zeros((self.cam['dimy'],self.cam['dimx'],9))
for i in range(0,len(ipr_s),max_len):
end = min(len(ipr_s),i+max_len)
meas += self.sess.run(\
self.rg['meas'],
feed_dict={\
self.rg['ipr_s']:ipr_s[i:end],\
self.rg['ipr_idx']:np.array(ipr_idx)[:,i:end],\
self.rg['cor']:cor,\
}
)
# vignetting
vig = np.tile(np.expand_dims(self.cam['vig'],-1),[1,1,9])
meas /= vig
# normalize and change the gain
meas /= self.cam['raw_max']
# deprecate the invalid part
meas[np.where(np.isnan(meas))] = 0
result = {
'meas': meas
}
return result
def process_gt_vig_dist_surf_mapmax(self,cam,ipr_idx,ipr_s,scenes,depth_true):
# camera function: find out the non-zero part
self.cam['dimt'] = cam['dimt']
self.cam['exp'] = cam['exp']
cor = compute_cor(self.cam)
# find the first nonzero time frame of each pixel
y=ipr_idx[0]
x=ipr_idx[1]
idx = y*self.cam['dimx']+x
idx_u, I = np.unique(idx, return_index=True)
ipr_idx = (ipr_idx[0][(I,)], ipr_idx[1][(I,)], ipr_idx[2][(I,)])
ipr_s = ipr_s[(I,)]
# obtain the raw measurement
max_len = int(2e6) # max number for a GPU
meas = np.zeros((self.cam['dimy'],self.cam['dimx'],9))
for i in range(0,len(ipr_s),max_len):
end = min(len(ipr_s),i+max_len)
meas += self.sess.run(\
self.rg['meas'],
feed_dict={\
self.rg['ipr_s']:ipr_s[i:end],\
self.rg['ipr_idx']:np.array(ipr_idx)[:,i:end],\
self.rg['cor']:cor,\
}
)
# vignetting
vig = np.tile(np.expand_dims(self.cam['vig'],-1),[1,1,9])
meas /= vig
# normalize and change the gain
meas /= self.cam['raw_max']
# deprecate the invalid part
meas[np.where(np.isnan(meas))] = 0
result = {
'meas': meas
}
return result
def process_one_bounce(self,cam,ipr_idx,ipr_s,scenes,depth_true):
# camera function: find out the non-zero part
self.cam['dimt'] = cam['dimt']
self.cam['exp'] = cam['exp']
cor = compute_cor(self.cam)
cam = nonlinear_adjust(self.cam,cor)
# find the first nonzero time frame of each pixel
y=ipr_idx[0]
x=ipr_idx[1]
idx = y*self.cam['dimx']+x
idx_u, I = np.unique(idx, return_index=True)
ipr_idx = (ipr_idx[0][(I,)], ipr_idx[1][(I,)], ipr_idx[2][(I,)])
ipr_s = ipr_s[(I,)]
# obtain the raw measurement
max_len = int(2e6) # max number for a GPU
meas = np.zeros((self.cam['dimy'],self.cam['dimx'],9))
for i in range(0,len(ipr_s),max_len):
end = min(len(ipr_s),i+max_len)
meas += self.sess.run(\
self.rg['meas'],
feed_dict={\
self.rg['ipr_s']:ipr_s[i:end],\
self.rg['ipr_idx']:np.array(ipr_idx)[:,i:end],\
self.rg['cor']:cor,\
}
)
# gain and noise
meas = self.sess.run(self.gg['meas_o'],feed_dict={self.gg['meas_i']:meas})
result = {
'meas': meas
}
return result
def process_one_bounce_noise(self,cam,ipr_idx,ipr_s,scenes,depth_true):
# camera function: find out the non-zero part
self.cam['dimt'] = cam['dimt']
self.cam['exp'] = cam['exp']
cor = compute_cor(self.cam)
cam = nonlinear_adjust(self.cam,cor)
# find the first nonzero time frame of each pixel
y=ipr_idx[0]
x=ipr_idx[1]
idx = y*self.cam['dimx']+x
idx_u, I = np.unique(idx, return_index=True)
ipr_idx = (ipr_idx[0][(I,)], ipr_idx[1][(I,)], ipr_idx[2][(I,)])
ipr_s = ipr_s[(I,)]
# obtain the raw measurement
max_len = int(2e6) # max number for a GPU
meas = np.zeros((self.cam['dimy'],self.cam['dimx'],9))
for i in range(0,len(ipr_s),max_len):
end = min(len(ipr_s),i+max_len)
meas += self.sess.run(\
self.rg['meas'],
feed_dict={\
self.rg['ipr_s']:ipr_s[i:end],\
self.rg['ipr_idx']:np.array(ipr_idx)[:,i:end],\
self.rg['cor']:cor,\
}
)
# gain and noise
meas = self.sess.run(self.gng['meas_o'],feed_dict={self.gng['meas_i']:meas})
result = {
'meas': meas
}
return result
#########################################################
# the four process functions below are what you need
def process_delay_vig_gain_noise(self,cam,ipr_idx,ipr_s,scenes,depth_true):
# camera function: find out the non-zero part
self.cam['dimt'] = cam['dimt']
self.cam['exp'] = cam['exp']
cor = compute_cor(self.cam)
# create the delay function
self.f = []
for i in range(cor.shape[0]):
self.f.append(scipy.interpolate.interp1d(np.arange(cor.shape[1]),cor[i,:]))
if not hasattr(self, 'rdvg'):
self.rdvg = self.res_delay_vig_graph()
# compute delay index and interpolates the correlation
delay_idx = self.cam['delay'][ipr_idx[0:2]]
delay_idx /= (C/2)
delay_idx /= self.cam['exp']
# obtain the raw measurement
max_len = int(1e7) # max number for a GPU
meas = np.zeros((self.cam['dimy'],self.cam['dimx'],9))
for i in range(0,len(ipr_s),max_len):
end = min(len(ipr_s),i+max_len)
meas += self.sess.run(\
self.rdvg['meas'],
feed_dict={\
self.rdvg['ipr_s']:ipr_s[i:end],\
self.rdvg['ipr_idx']: | np.array(ipr_idx) | numpy.array |
#!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "<NAME>" at 20:22, 12/06/2020 %
# %
# Email: <EMAIL> %
# Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 %
# Github: https://github.com/thieu1995 %
#-------------------------------------------------------------------------------------------------------%
import concurrent.futures as parallel
from functools import partial
import numpy as np
from mealpy.optimizer import Optimizer
class BaseSMA(Optimizer):
"""
My modified version of: Slime Mould Algorithm (SMA)
(Slime Mould Algorithm: A New Method for Stochastic Optimization)
Link:
https://doi.org/10.1016/j.future.2020.03.055
https://www.researchgate.net/publication/340431861_Slime_mould_algorithm_A_new_method_for_stochastic_optimization
Notes:
+ Selected 2 unique and random solution to create new solution (not to create variable) --> remove third loop in original version
+ Check bound and update fitness after each individual move instead of after the whole population move in the original version
+ My version not only faster but also better
"""
ID_WEI = 2
def __init__(self, problem, epoch=10000, pop_size=100, pr=0.03, **kwargs):
"""
Args:
epoch (int): maximum number of iterations, default = 10000
pop_size (int): number of population size, default = 100
pr (float): probability threshold (z in the paper), default = 0.03
"""
super().__init__(problem, kwargs)
self.nfe_per_epoch = pop_size
self.sort_flag = True
self.epoch = epoch
self.pop_size = pop_size
self.pr = pr
def create_solution(self):
"""
Returns:
The position position with 2 element: index of position/location and index of fitness wrapper
The general format: [position, [target, [obj1, obj2, ...]], weight]
## To get the position, fitness wrapper, target and obj list
## A[self.ID_POS] --> Return: position
## A[self.ID_FIT] --> Return: [target, [obj1, obj2, ...]]
## A[self.ID_FIT][self.ID_TAR] --> Return: target
## A[self.ID_FIT][self.ID_OBJ] --> Return: [obj1, obj2, ...]
"""
position = np.random.uniform(self.problem.lb, self.problem.ub)
fitness = self.get_fitness_position(position=position)
weight = np.zeros(self.problem.n_dims)
return [position, fitness, weight]
def create_child(self, idx, pop_copy, g_best, a, b):
# Update the Position of search agent
if np.random.uniform() < self.pr: # Eq.(2.7)
pos_new = | np.random.uniform(self.problem.lb, self.problem.ub) | numpy.random.uniform |
#!/usr/bin/env python3
import numpy as np
import pandas as pd
from sklearn import mixture
import matplotlib.pyplot as plt
import theano.tensor as T
from ilqr import iLQR
from ilqr.cost import QRCost
from ilqr.dynamics import constrain
from ilqr.dynamics import AutoDiffDynamics
def accel(X, u, m=1, M=5, L=2, g=9.80665, d=1):
temp = (u[0] + m * L * X[4]**2 * X[2])/(M + m)
num = g * X[2] - X[3] * temp
den = L * (4/3 - (m * X[3]**2)/(M + m))
ang_acc = num/den
lin_acc = temp - (m * L * ang_acc * X[3])/(M + m)
theta = T.arctan2(X[2], X[3])
next_theta = theta + X[4] * dt
return lin_acc, ang_acc, next_theta
def augment_state(X):
if X.ndim == 1:
x, x_dot, theta, theta_dot = X
else:
x = X[..., 0].reshape(-1, 1)
x_dot = X[..., 1].reshape(-1, 1)
theta = X[..., 2].reshape(-1, 1)
theta_dot = X[..., 3].reshape(-1, 1)
return np.hstack([x, x_dot, np.sin(theta), np.cos(theta), theta_dot])
def deaugment_state(X):
if X.ndim == 1:
x, x_dot, sin_theta, cos_theta, theta_dot = X
else:
x = X[..., 0].reshape(-1, 1)
x_dot = X[..., 1].reshape(-1, 1)
sin_theta = X[..., 2].reshape(-1, 1)
cos_theta = X[..., 3].reshape(-1, 1)
theta_dot = X[..., 4].reshape(-1, 1)
theta = np.arctan2(sin_theta, cos_theta)
return | np.hstack([x, x_dot, theta, theta_dot]) | numpy.hstack |
import unittest
import numpy as np
from feastruct.pre.material import Steel
from feastruct.pre.section import Section
import feastruct.fea.cases as cases
from feastruct.fea.frame_analysis import FrameAnalysis2D
from feastruct.solvers.linstatic import LinearStatic
class TestUDL(unittest.TestCase):
"""Tests problems related to 1D beam bending from the American Wood Council:
https://www.awc.org/pdf/codes-standards/publications/design-aids/AWC-DA6-BeamFormulas-0710.pdf
"""
def setUp(self):
self.steel = Steel()
self.elastic_modulus = self.steel.elastic_modulus
self.ixx = np.random.uniform(10e6, 200e6)
self.length = np.random.uniform(2e3, 10e3)
self.q = -np.random.uniform(1, 10)
self.pl = -np.random.uniform(5e3, 50e3)
def test_fig1(self):
"""Simple Beam – Uniformly Distributed Load"""
# create 2d frame analysis object
analysis = FrameAnalysis2D()
# create section
section = Section(ixx=self.ixx)
# create nodes
node_a = analysis.create_node(coords=[0])
node_b = analysis.create_node(coords=[self.length])
# create beam elements
element = analysis.create_element(
el_type='EB2-2D', nodes=[node_a, node_b], material=self.steel, section=section
)
# add supports
freedom_case = cases.FreedomCase()
freedom_case.add_nodal_support(node=node_a, val=0, dof=0)
freedom_case.add_nodal_support(node=node_a, val=0, dof=1)
freedom_case.add_nodal_support(node=node_b, val=0, dof=1)
# add loads
load_case = cases.LoadCase()
load_case.add_element_load(element.generate_udl(q=self.q))
# add analysis case
analysis_case = cases.AnalysisCase(freedom_case=freedom_case, load_case=load_case)
# linear static solver
LinearStatic(analysis=analysis, analysis_cases=[analysis_case]).solve()
# check displacements
def analytical_disp(x):
factor = self.q * x / 24 / self.elastic_modulus / self.ixx
l0 = self.length
return factor * (l0 * l0 * l0 - 2 * l0 * x * x + x * x * x)
# get displacements
displacements = element.get_displacements(11, analysis_case)
# loop through each station
for disp in displacements:
xi = disp[0]
x = self.length * xi
v = disp[2]
# check displacements
self.assertTrue(np.isclose(v, analytical_disp(x), atol=1e-06))
# check max displacement
l0 = self.length
v_max = 5 * self.q * l0 * l0 * l0 * l0 / 384 / self.elastic_modulus / self.ixx
# check value
self.assertTrue(np.isclose(abs(v_max), max(np.abs(displacements[:, 2]))))
# check position
self.assertTrue(np.isclose(0.5, displacements[np.abs(displacements[:, 2]).argmax(), 0],
atol=1e-06))
# check bending moments
def analytical_bmd(x):
return self.q * x / 2 * (self.length - x)
# get bmd
(xis, bmd) = element.get_bmd(11, analysis_case)
# loop through each station
for (i, m) in enumerate(bmd):
xi = xis[i]
x = self.length * xi
# check bending moment
self.assertTrue(np.isclose(m, analytical_bmd(x), atol=1e-06))
# check max bending moment
l0 = self.length
m_max = self.q * l0 * l0 / 8
# check value
self.assertTrue(np.isclose(abs(m_max), max(np.abs(bmd)), atol=1e-06))
# check position
self.assertTrue(np.isclose(0.5, xis[np.abs(bmd).argmax()], atol=1e-06))
# check shear force
def analytical_sfd(x):
return self.q * (x - self.length / 2)
# get sfd
(xis, sfd) = element.get_sfd(11, analysis_case)
# loop through each station
for (i, sf) in enumerate(sfd):
xi = xis[i]
x = self.length * xi
# check shear force
self.assertTrue(np.isclose(sf, analytical_sfd(x), atol=1e-06))
def test_fig2(self):
"""Simple Beam – Uniform Load Partially Distributed"""
a = self.length * np.random.uniform(0.1, 0.4)
c = self.length * np.random.uniform(0.1, 0.4)
b = self.length - a - c
# create 2d frame analysis object
analysis = FrameAnalysis2D()
# create section
section = Section(ixx=self.ixx)
# create nodes
node_a = analysis.create_node(coords=[0])
node_b = analysis.create_node(coords=[a])
node_c = analysis.create_node(coords=[a+b])
node_d = analysis.create_node(coords=[self.length])
# create beam elements
element_ab = analysis.create_element(
el_type='EB2-2D', nodes=[node_a, node_b], material=self.steel, section=section
)
element_bc = analysis.create_element(
el_type='EB2-2D', nodes=[node_b, node_c], material=self.steel, section=section
)
element_cd = analysis.create_element(
el_type='EB2-2D', nodes=[node_c, node_d], material=self.steel, section=section
)
# add supports
freedom_case = cases.FreedomCase()
freedom_case.add_nodal_support(node=node_a, val=0, dof=0)
sup1 = freedom_case.add_nodal_support(node=node_a, val=0, dof=1)
sup2 = freedom_case.add_nodal_support(node=node_d, val=0, dof=1)
# add loads
load_case = cases.LoadCase()
load_case.add_element_load(element_bc.generate_udl(q=self.q))
# add analysis case
analysis_case = cases.AnalysisCase(freedom_case=freedom_case, load_case=load_case)
# linear static solver
LinearStatic(analysis=analysis, analysis_cases=[analysis_case]).solve()
# check reactions
r1 = -sup1.get_reaction(analysis_case)
r2 = -sup2.get_reaction(analysis_case)
self.assertTrue(np.isclose(r1, self.q * b / 2 / self.length * (2 * c + b), atol=1e-06))
self.assertTrue(np.isclose(r2, self.q * b / 2 / self.length * (2 * a + b), atol=1e-06))
# check bending moments
def analytical_bmd_ab(x):
return r1 * x
def analytical_bmd_bc(x):
return r1 * x - self.q / 2 * (x - a) * (x - a)
def analytical_bmd_cd(x):
return r2 * (self.length - x)
# get bmds
(xis_ab, bmd_ab) = element_ab.get_bmd(11, analysis_case)
(xis_bc, bmd_bc) = element_bc.get_bmd(11, analysis_case)
(xis_cd, bmd_cd) = element_cd.get_bmd(11, analysis_case)
# element_ab - loop through each station
for (i, m) in enumerate(bmd_ab):
xi = xis_ab[i]
x = a * xi
# check bending moments
self.assertTrue(np.isclose(m, analytical_bmd_ab(x), atol=1e-06))
# element_bc - loop through each station
for (i, m) in enumerate(bmd_bc):
xi = xis_bc[i]
x = b * xi + a
# check bending moments
self.assertTrue(np.isclose(m, analytical_bmd_bc(x), atol=1e-06))
# element_cd - loop through each station
for (i, m) in enumerate(bmd_cd):
xi = xis_cd[i]
x = c * xi + a + b
# check bending moments
self.assertTrue(np.isclose(m, analytical_bmd_cd(x), atol=1e-06))
# check max bending moment
m_max = r1 * (a + r1 / 2 / self.q)
pos = a + r1 / self.q
x = 1 / b * (pos - a)
# check value
self.assertTrue(np.isclose(abs(m_max), max(np.abs(bmd_bc)), atol=1e-06))
# check position
self.assertTrue(np.isclose(x, xis_bc[np.abs(bmd_bc).argmax()], atol=1e-06))
# check shear force
def analytical_sfd_ab(x):
return -r1
def analytical_sfd_bc(x):
return -r1 + self.q * (x - a)
def analytical_sfd_cd(x):
return r2
# get sfds
(xis_ab, sfd_ab) = element_ab.get_sfd(11, analysis_case)
(xis_bc, sfd_bc) = element_bc.get_sfd(11, analysis_case)
(xis_cd, sfd_cd) = element_cd.get_sfd(11, analysis_case)
# element_ab - loop through each station
for (i, sf) in enumerate(sfd_ab):
xi = xis_ab[i]
x = a * xi
# check shear forces
self.assertTrue(np.isclose(sf, analytical_sfd_ab(x), atol=1e-06))
# element_bc - loop through each station
for (i, sf) in enumerate(sfd_bc):
xi = xis_bc[i]
x = b * xi + a
# check shear forces
self.assertTrue(np.isclose(sf, analytical_sfd_bc(x), atol=1e-06))
# element_cd - loop through each station
for (i, sf) in enumerate(sfd_cd):
xi = xis_cd[i]
x = c * xi + a + b
# check shear forces
self.assertTrue(np.isclose(sf, analytical_sfd_cd(x), atol=1e-06))
def test_fig3(self):
"""Simple Beam – Uniform Load Partially Distributed at One End"""
a = self.length * np.random.uniform(0.1, 0.9)
# create 2d frame analysis object
analysis = FrameAnalysis2D()
# create section
section = Section(ixx=self.ixx)
# create nodes
node_a = analysis.create_node(coords=[0])
node_b = analysis.create_node(coords=[a])
node_c = analysis.create_node(coords=[self.length])
# create beam elements
element_ab = analysis.create_element(
el_type='EB2-2D', nodes=[node_a, node_b], material=self.steel, section=section
)
element_bc = analysis.create_element(
el_type='EB2-2D', nodes=[node_b, node_c], material=self.steel, section=section
)
# add supports
freedom_case = cases.FreedomCase()
freedom_case.add_nodal_support(node=node_a, val=0, dof=0)
sup1 = freedom_case.add_nodal_support(node=node_a, val=0, dof=1)
sup2 = freedom_case.add_nodal_support(node=node_c, val=0, dof=1)
# add loads
load_case = cases.LoadCase()
load_case.add_element_load(element_ab.generate_udl(q=self.q))
# add analysis case
analysis_case = cases.AnalysisCase(freedom_case=freedom_case, load_case=load_case)
# linear static solver
LinearStatic(analysis=analysis, analysis_cases=[analysis_case]).solve()
# check reactions
r1 = -sup1.get_reaction(analysis_case)
r2 = -sup2.get_reaction(analysis_case)
self.assertTrue(np.isclose(r1, self.q * a / 2 / self.length * (2 * self.length - a),
atol=1e-06))
self.assertTrue(np.isclose(r2, self.q * a * a / 2 / self.length, atol=1e-06))
# check displacements
def analytical_disp_ab(x):
l0 = self.length
factor = self.q * x / 24 / self.elastic_modulus / self.ixx / l0
return factor * (a * a * (2 * l0 - a) * (2 * l0 - a) - 2 * a * x * x * (
2 * l0 - a) + l0 * x * x * x)
def analytical_disp_bc(x):
l0 = self.length
factor = self.q * a * a * (l0 - x) / 24 / self.elastic_modulus / self.ixx / l0
return factor * (4 * x * l0 - 2 * x * x - a * a)
# get displacements
displacements_ab = element_ab.get_displacements(11, analysis_case)
displacements_bc = element_bc.get_displacements(11, analysis_case)
# loop through each station
for disp in displacements_ab:
xi = disp[0]
x = a * xi
v = disp[2]
# check displacements
self.assertTrue(np.isclose(v, analytical_disp_ab(x), atol=1e-06))
# loop through each station
for disp in displacements_bc:
xi = disp[0]
x = (self.length - a) * xi + a
v = disp[2]
# check displacements
self.assertTrue(np.isclose(v, analytical_disp_bc(x), atol=1e-06))
# check bending moments
def analytical_bmd_ab(x):
return r1 * x - self.q * x * x / 2
def analytical_bmd_bc(x):
return r2 * (self.length - x)
# get bmds
(xis_ab, bmd_ab) = element_ab.get_bmd(11, analysis_case)
(xis_bc, bmd_bc) = element_bc.get_bmd(11, analysis_case)
# element_ab - loop through each station
for (i, m) in enumerate(bmd_ab):
xi = xis_ab[i]
x = a * xi
# check bending moments
self.assertTrue(np.isclose(m, analytical_bmd_ab(x), atol=1e-06))
# element_bc - loop through each station
for (i, m) in enumerate(bmd_bc):
xi = xis_bc[i]
x = (self.length - a) * xi + a
# check bending moments
self.assertTrue(np.isclose(m, analytical_bmd_bc(x), atol=1e-06))
# check max bending moment
m_max = r1 * r1 / 2 / self.q
pos = r1 / self.q
x = pos / a
# check value
self.assertTrue(np.isclose(abs(m_max), max(np.abs(bmd_ab)), atol=1e-06))
# check position
self.assertTrue(np.isclose(x, xis_ab[np.abs(bmd_ab).argmax()], atol=1e-06))
# check shear force
def analytical_sfd_ab(x):
return -r1 + self.q * x
def analytical_sfd_bc(x):
return r2
# get sfds
(xis_ab, sfd_ab) = element_ab.get_sfd(11, analysis_case)
(xis_bc, sfd_bc) = element_bc.get_sfd(11, analysis_case)
# element_ab - loop through each station
for (i, sf) in enumerate(sfd_ab):
xi = xis_ab[i]
x = a * xi
# check shear forces
self.assertTrue(np.isclose(sf, analytical_sfd_ab(x), atol=1e-06))
# element_bc - loop through each station
for (i, sf) in enumerate(sfd_bc):
xi = xis_bc[i]
x = (self.length - a) * xi + a
# check shear forces
self.assertTrue(np.isclose(sf, analytical_sfd_bc(x), atol=1e-06))
def test_fig4(self):
"""Simple Beam – Uniform Load Partially Distributed at Each End"""
a = self.length * np.random.uniform(0.1, 0.4)
c = self.length * np.random.uniform(0.1, 0.4)
b = self.length - a - c
q2 = -np.random.uniform(1, 10)
# create 2d frame analysis object
analysis = FrameAnalysis2D()
# create section
section = Section(ixx=self.ixx)
# create nodes
node_a = analysis.create_node(coords=[0])
node_b = analysis.create_node(coords=[a])
node_c = analysis.create_node(coords=[a+b])
node_d = analysis.create_node(coords=[self.length])
# create beam elements
element_ab = analysis.create_element(
el_type='EB2-2D', nodes=[node_a, node_b], material=self.steel, section=section
)
element_bc = analysis.create_element(
el_type='EB2-2D', nodes=[node_b, node_c], material=self.steel, section=section
)
element_cd = analysis.create_element(
el_type='EB2-2D', nodes=[node_c, node_d], material=self.steel, section=section
)
# add supports
freedom_case = cases.FreedomCase()
freedom_case.add_nodal_support(node=node_a, val=0, dof=0)
sup1 = freedom_case.add_nodal_support(node=node_a, val=0, dof=1)
sup2 = freedom_case.add_nodal_support(node=node_d, val=0, dof=1)
# add loads
load_case = cases.LoadCase()
load_case.add_element_load(element_ab.generate_udl(q=self.q))
load_case.add_element_load(element_cd.generate_udl(q=q2))
# add analysis case
analysis_case = cases.AnalysisCase(freedom_case=freedom_case, load_case=load_case)
# linear static solver
LinearStatic(analysis=analysis, analysis_cases=[analysis_case]).solve()
# check reactions
r1 = -sup1.get_reaction(analysis_case)
r1_ana = (self.q * a * (2 * self.length - a) + q2 * c * c) / (2 * self.length)
r2 = -sup2.get_reaction(analysis_case)
r2_ana = (q2 * c * (2 * self.length - c) + self.q * a * a) / (2 * self.length)
self.assertTrue(np.isclose(r1, r1_ana, atol=1e-06))
self.assertTrue(np.isclose(r2, r2_ana, atol=1e-06))
# check bending moments
def analytical_bmd_ab(x):
return r1 * x - self.q * 0.5 * x * x
def analytical_bmd_bc(x):
return r1 * x - self.q * a * 0.5 * (2 * x - a)
def analytical_bmd_cd(x):
return r2 * (self.length - x) - q2 * (self.length - x) * (self.length - x) * 0.5
# get bmds
(xis_ab, bmd_ab) = element_ab.get_bmd(11, analysis_case)
(xis_bc, bmd_bc) = element_bc.get_bmd(11, analysis_case)
(xis_cd, bmd_cd) = element_cd.get_bmd(11, analysis_case)
# element_ab - loop through each station
for (i, m) in enumerate(bmd_ab):
xi = xis_ab[i]
x = a * xi
# check bending moments
self.assertTrue(np.isclose(m, analytical_bmd_ab(x), atol=1e-06))
# element_bc - loop through each station
for (i, m) in enumerate(bmd_bc):
xi = xis_bc[i]
x = b * xi + a
# check bending moments
self.assertTrue(np.isclose(m, analytical_bmd_bc(x), atol=1e-06))
# element_cd - loop through each station
for (i, m) in enumerate(bmd_cd):
xi = xis_cd[i]
x = c * xi + a + b
# check bending moments
self.assertTrue(np.isclose(m, analytical_bmd_cd(x), atol=1e-06))
# check max bending moment
if abs(r1) < abs(self.q * a):
m_max = r1 * r1 / 2 / self.q
pos = r1 / self.q
x = pos / a
# check value
self.assertTrue(np.isclose(abs(m_max), max(np.abs(bmd_ab)), atol=1e-06))
# check position
self.assertTrue(np.isclose(x, xis_ab[np.abs(bmd_ab).argmax()], atol=1e-06))
if abs(r2) < abs(q2 * c):
m_max = r2 * r2 / 2 / q2
pos = self.length - r2 / q2
x = 1 / c * (pos - a - b)
# check value
self.assertTrue(np.isclose(abs(m_max), max(np.abs(bmd_cd)), atol=1e-06))
# check position
self.assertTrue(np.isclose(x, xis_cd[ | np.abs(bmd_cd) | numpy.abs |
import numpy as np
import nibabel as ni
from scipy.io import loadmat
import os
from nitime.timeseries import TimeSeries
from mvpa_itab.conn.connectivity import z_fisher, glm, get_bold_signals
from mvpa_itab.conn.operations import flatten_matrix
import logging
from mvpa_itab.io.connectivity import load_matrices
from mvpa2.datasets.base import dataset_wizard
logger = logging.getLogger(__name__)
def load_fcmri_dataset(data, subjects, conditions, group, level, n_run=3):
attributes = []
samples = []
for ic, c in enumerate(conditions):
for isb, s in enumerate(subjects):
for i in range(n_run):
matrix = data[ic,isb,i,:]
fmatrix = flatten_matrix(matrix)
samples.append(fmatrix)
attributes.append([c, s, i, group[isb], level[isb]])
attributes = np.array(attributes)
ds = dataset_wizard(np.array(samples), targets=attributes.T[0], chunks=attributes.T[1])
ds.sa['run'] = attributes.T[2]
ds.sa['group'] = attributes.T[3]
ds.sa['level'] = np.int_(attributes.T[4])
ds.sa['meditation'] = attributes.T[0]
return ds
def load_mat_dataset(datapath, bands, conditions, networks=None):
target_list = []
sample_list = []
chunk_list = []
band_list = []
labels = np.loadtxt(os.path.join(datapath, 'roi_labels.txt'),
dtype=np.str_,
delimiter='\t')
#labels = labels.T[1]
subject_list_chunks = np.loadtxt(os.path.join(datapath, 'subj_list'),
dtype=np.str)
filelist = os.listdir(datapath)
filelist = [f for f in filelist if f.find('.mat') != -1]
#print filelist
mask = np.zeros(len(labels.T[0]))
if networks != None:
for n in networks:
mask += labels.T[-1] == n
else:
mask = np.ones(len(labels.T[0]), dtype=np.bool_)
mask_roi = np.meshgrid(mask, mask)[1] * np.meshgrid(mask, mask)[0]
for cond in conditions:
for band in bands:
filt_list = [f for f in filelist if f.find(cond) != -1 \
and f.find(band) != -1]
data = loadmat(os.path.join(datapath, filt_list[0]))
mat_ = data[data.keys()[0]]
#mat_[np.isinf(mat_)] = 0
il = np.tril_indices(mat_[0].shape[0])
masked_mat = mat_ * mask_roi[np.newaxis,:]
for m in masked_mat:
m[il] = 0
#samples = np.array([m[il] = 0 for m in masked_mat])
samples = np.array([m[np.nonzero(m)] for m in masked_mat])
targets = [cond for i in samples]
band_ = [band for i in samples]
target_list.append(targets)
sample_list.append(samples)
chunk_list.append(subject_list_chunks)
band_list.append(band_)
targets = np.hstack(target_list)
samples = np.vstack(sample_list)
chunks = np.hstack(chunk_list)
#zsamples = sc_zscore(samples, axis=1)
ds = dataset_wizard(samples, targets=targets, chunks=chunks)
ds.sa['band'] = np.hstack(band_list)
#zscore(ds, chunks_attr='band')
#zscore(ds, chunks_attr='chunks')
#zscore(ds, chunks_attr='band')
#print ds.shape
return ds
def load_correlation_matrix(path, pattern_):
"""
Gets a pathname in which is supposed to be a list of txt files
with the connectivity matrices, filenames are composed by a common
pattern to filter the file list in the folder.
The output is the array shaped subj x node x node
"""
flist_conn = os.listdir(path)
flist_conn = [f for f in flist_conn if f.find(pattern_) != -1]
conn_data = []
for f in flist_conn:
data_ = np.genfromtxt(os.path.join(path, f))
conn_data.append(data_)
conn_data = np.array(conn_data)
return conn_data
class CorrelationLoader(object):
def load(self, path, filepattern, conditions=None):
# Check what we have in the path (subjdirs, subjfiles, singlefile)
subjects = os.listdir(path)
subjects = [s for s in subjects if s.find('configuration') == -1 \
and s.find('.') == -1]
result = []
for c in conditions:
s_list = []
for s in subjects:
sub_path = os.path.join(path, s)
filel = os.listdir(sub_path)
filel = [f for f in filel if f.find(c) != -1]
c_list = []
for f in filel:
matrix = np.loadtxt(os.path.join(sub_path, f))
c_list.append(matrix)
s_list.append(np.array(c_list))
result.append(np.array(s_list))
return np.array(result)
class RegressionDataset(object):
def __init__(self, X, y, group=None, conditions=None):
self.X = X
self.y = y
if group != None:
if len(self.group) != len(y):
raise ValueError("Data mismatch: Check if \
data and group have the same numerosity!")
self.group = np.array(group)
if conditions != None:
if len(self.group) != len(y):
raise ValueError("Data mismatch: Check if \
data and conditions have the same numerosity!")
self.conditions = conditions
def get_group(self, group_name):
if group_name not in np.unique(self.group):
raise ValueError("%s not included in loaded groups!",
group_name)
group_mask = self.group == group_name
rds = RegressionDataset(self.X[group_mask],
self.y[group_mask],
group=self.group[group_mask])
return rds
class ConnectivityPreprocessing(object):
def __init__(self, path, subject, boldfile, brainmask, regressormask, subdir='fmri'):
self.path = path
self.subject = subject
self.subdir = subdir
self.bold = ni.load(os.path.join(path, subject, subdir, boldfile))
self.loadedSignals = False
self.brain_mask = ni.load(os.path.join(path, subject, subdir, brainmask))
self.mask = []
for mask_ in regressormask:
m = ni.load(os.path.join(path, subject, subdir, mask_))
self.mask.append(m)
def execute(self, gsr=True, filter_params={'ub': 0.08, 'lb':0.009}, tr=4.):
# Get timeseries
if not self.loadedSignals:
self._load_signals(tr, gsr, filter_params=filter_params)
elif self.loadedSignals['gsr']!=gsr or self.loadedSignals['filter_params']!=filter_params:
self._load_signals(tr, gsr, filter_params=filter_params)
beta = glm(self.fmri_ts.data.T, self.regressors.T)
residuals = self.fmri_ts.data.T - np.dot(self.regressors.T, beta)
ts_residual = TimeSeries(residuals.T, sampling_interval=tr)
'''
ub = filter_params['ub']
lb = filter_params['lb']
F = FilterAnalyzer(ts_residual, ub=ub, lb=lb)
'''
residual_4d = np.zeros_like(self.bold.get_data())
residual_4d [self.brain_mask.get_data() > 0] = ts_residual.data
residual_4d[np.isnan(residual_4d)] = 0
self._save(residual_4d, gsr=gsr)
def _save(self, image, gsr=True):
gsr_string = ''
if gsr:
gsr_string = '_gsr'
filename = 'residual_filtered_first%s.nii.gz' % (gsr_string)
img = ni.Nifti1Image(image, self.bold.get_affine())
filepath = os.path.join(self.path, self.subject, self.subdir, filename)
ni.save(img, filepath)
def _load_signals(self, tr, gsr, filter_params=None):
regressor = []
self.fmri_ts = get_bold_signals(self.bold,
self.brain_mask,
tr,
ts_extraction='none',
filter_par=filter_params)
if gsr:
gsr_ts = get_bold_signals(self.bold,
self.brain_mask,
tr,
filter_par=filter_params)
regressor.append(gsr_ts.data)
for mask_ in self.mask:
ts_ = get_bold_signals(self.bold,
mask_,
tr,
filter_par=filter_params )
regressor.append(ts_.data)
self.loadedSignals = {'gsr':gsr, 'filter_params':filter_params}
self.regressors = | np.vstack(regressor) | numpy.vstack |
#! /usr/bin/env python
from __future__ import print_function
#from builtins import range
import rospy
from tf.transformations import quaternion_from_euler, euler_from_quaternion
from nav_msgs.msg import Odometry
from sensor_msgs.msg import Imu, LaserScan, Joy
from geometry_msgs.msg import Twist, Vector3Stamped
import numpy as np
from std_msgs.msg import Int32MultiArray, Float32MultiArray, Float32
import pandas
from geometry_msgs.msg import WrenchStamped
from scipy.signal import savgol_filter
import os
from datetime import datetime
motor_con = 4
def maprange(a, b, s):
(a1, a2), (b1, b2) = a, b
return b1 + ((s - a1) * (b2 - b1) / (a2 - a1))
def smooth(y, box_pts):
box = np.ones(box_pts)/box_pts
y_smooth = np.convolve(y, box, mode='same')
return y_smooth
class Actions:
def __init__(self):
self.des_to_pub = Int32MultiArray()
self.des_cmd = np.array([350, 350, 120, 120], dtype=np.int32)
self.arm_bucket_pub = rospy.Publisher('/arm/des_cmd', Int32MultiArray, queue_size=10)
self.vel_pub = rospy.Publisher('/cmd_vel', Twist, queue_size=10)
self.vel_msg = Twist()
# assumption we are moving just in x-axis
self.vel_msg.linear.y = 0
self.vel_msg.linear.z = 0
self.vel_msg.angular.x = 0
self.vel_msg.angular.y = 0
self.vel_msg.angular.z = 0
def move(self, cmd): # cmd [velocity , arm , bucket ]
self.vel_msg.linear.x = cmd[0]
arm_cmd = cmd[1]
bucket_cmd = cmd[2]
self.des_to_pub.data = self.normalize_arm_cmd(arm_cmd, bucket_cmd)
self.arm_bucket_pub.publish(self.des_to_pub)
self.vel_pub.publish(self.vel_msg)
# rospy.loginfo([self.des_to_pub.data , self.vel_msg.linear.x])
def reset_move(self, cmd):
self.vel_msg.linear.x = cmd[0]
arm_cmd = cmd[1]
bucket_cmd = cmd[2]
self.des_to_pub.data = self.normalize_arm_cmd(arm_cmd, bucket_cmd)
self.arm_bucket_pub.publish(self.des_to_pub)
self.vel_pub.publish(self.vel_msg)
def normalize_arm_cmd(self,arm_cmd , bucket_cmd):
des_cmd = | np.array([arm_cmd, arm_cmd, bucket_cmd, bucket_cmd]) | numpy.array |
from numpy.testing import (assert_allclose, assert_almost_equal,
assert_array_equal, assert_array_almost_equal_nulp)
import numpy as np
import pytest
import matplotlib.mlab as mlab
from matplotlib.cbook.deprecation import MatplotlibDeprecationWarning
def _stride_repeat(*args, **kwargs):
with pytest.warns(MatplotlibDeprecationWarning):
return mlab.stride_repeat(*args, **kwargs)
class TestStride:
def get_base(self, x):
y = x
while y.base is not None:
y = y.base
return y
def calc_window_target(self, x, NFFT, noverlap=0, axis=0):
"""
This is an adaptation of the original window extraction algorithm.
This is here to test to make sure the new implementation has the same
result.
"""
step = NFFT - noverlap
ind = np.arange(0, len(x) - NFFT + 1, step)
n = len(ind)
result = np.zeros((NFFT, n))
# do the ffts of the slices
for i in range(n):
result[:, i] = x[ind[i]:ind[i]+NFFT]
if axis == 1:
result = result.T
return result
@pytest.mark.parametrize('shape', [(), (10, 1)], ids=['0D', '2D'])
def test_stride_windows_invalid_input_shape(self, shape):
x = np.arange(np.prod(shape)).reshape(shape)
with pytest.raises(ValueError):
mlab.stride_windows(x, 5)
@pytest.mark.parametrize('n, noverlap',
[(0, None), (11, None), (2, 2), (2, 3)],
ids=['n less than 1', 'n greater than input',
'noverlap greater than n',
'noverlap equal to n'])
def test_stride_windows_invalid_params(self, n, noverlap):
x = np.arange(10)
with pytest.raises(ValueError):
mlab.stride_windows(x, n, noverlap)
@pytest.mark.parametrize('shape', [(), (10, 1)], ids=['0D', '2D'])
def test_stride_repeat_invalid_input_shape(self, shape):
x = np.arange(np.prod(shape)).reshape(shape)
with pytest.raises(ValueError):
_stride_repeat(x, 5)
@pytest.mark.parametrize('axis', [-1, 2],
ids=['axis less than 0',
'axis greater than input shape'])
def test_stride_repeat_invalid_axis(self, axis):
x = np.array(0)
with pytest.raises(ValueError):
_stride_repeat(x, 5, axis=axis)
def test_stride_repeat_n_lt_1_ValueError(self):
x = np.arange(10)
with pytest.raises(ValueError):
_stride_repeat(x, 0)
@pytest.mark.parametrize('axis', [0, 1], ids=['axis0', 'axis1'])
@pytest.mark.parametrize('n', [1, 5], ids=['n1', 'n5'])
def test_stride_repeat(self, n, axis):
x = np.arange(10)
y = _stride_repeat(x, n, axis=axis)
expected_shape = [10, 10]
expected_shape[axis] = n
yr = np.repeat(np.expand_dims(x, axis), n, axis=axis)
assert yr.shape == y.shape
assert_array_equal(yr, y)
assert tuple(expected_shape) == y.shape
assert self.get_base(y) is x
@pytest.mark.parametrize('axis', [0, 1], ids=['axis0', 'axis1'])
@pytest.mark.parametrize('n, noverlap',
[(1, 0), (5, 0), (15, 2), (13, -3)],
ids=['n1-noverlap0', 'n5-noverlap0',
'n15-noverlap2', 'n13-noverlapn3'])
def test_stride_windows(self, n, noverlap, axis):
x = np.arange(100)
y = mlab.stride_windows(x, n, noverlap=noverlap, axis=axis)
expected_shape = [0, 0]
expected_shape[axis] = n
expected_shape[1 - axis] = 100 // (n - noverlap)
yt = self.calc_window_target(x, n, noverlap=noverlap, axis=axis)
assert yt.shape == y.shape
assert_array_equal(yt, y)
assert tuple(expected_shape) == y.shape
assert self.get_base(y) is x
@pytest.mark.parametrize('axis', [0, 1], ids=['axis0', 'axis1'])
def test_stride_windows_n32_noverlap0_unflatten(self, axis):
n = 32
x = np.arange(n)[np.newaxis]
x1 = np.tile(x, (21, 1))
x2 = x1.flatten()
y = mlab.stride_windows(x2, n, axis=axis)
if axis == 0:
x1 = x1.T
assert y.shape == x1.shape
assert_array_equal(y, x1)
def test_stride_ensure_integer_type(self):
N = 100
x = np.full(N + 20, np.nan)
y = x[10:-10]
y[:] = 0.3
# previous to #3845 lead to corrupt access
y_strided = mlab.stride_windows(y, n=33, noverlap=0.6)
assert_array_equal(y_strided, 0.3)
# previous to #3845 lead to corrupt access
y_strided = mlab.stride_windows(y, n=33.3, noverlap=0)
assert_array_equal(y_strided, 0.3)
# even previous to #3845 could not find any problematic
# configuration however, let's be sure it's not accidentally
# introduced
y_strided = _stride_repeat(y, n=33.815)
assert_array_equal(y_strided, 0.3)
def _apply_window(*args, **kwargs):
with pytest.warns(MatplotlibDeprecationWarning):
return mlab.apply_window(*args, **kwargs)
class TestWindow:
def setup(self):
np.random.seed(0)
n = 1000
self.sig_rand = np.random.standard_normal(n) + 100.
self.sig_ones = np.ones(n)
def check_window_apply_repeat(self, x, window, NFFT, noverlap):
"""
This is an adaptation of the original window application algorithm.
This is here to test to make sure the new implementation has the same
result.
"""
step = NFFT - noverlap
ind = np.arange(0, len(x) - NFFT + 1, step)
n = len(ind)
result = np.zeros((NFFT, n))
if np.iterable(window):
windowVals = window
else:
windowVals = window(np.ones(NFFT, x.dtype))
# do the ffts of the slices
for i in range(n):
result[:, i] = windowVals * x[ind[i]:ind[i]+NFFT]
return result
def test_window_none_rand(self):
res = mlab.window_none(self.sig_ones)
assert_array_equal(res, self.sig_ones)
def test_window_none_ones(self):
res = mlab.window_none(self.sig_rand)
assert_array_equal(res, self.sig_rand)
def test_window_hanning_rand(self):
targ = np.hanning(len(self.sig_rand)) * self.sig_rand
res = mlab.window_hanning(self.sig_rand)
assert_allclose(targ, res, atol=1e-06)
def test_window_hanning_ones(self):
targ = np.hanning(len(self.sig_ones))
res = mlab.window_hanning(self.sig_ones)
assert_allclose(targ, res, atol=1e-06)
def test_apply_window_1D_axis1_ValueError(self):
x = self.sig_rand
window = mlab.window_hanning
with pytest.raises(ValueError):
_apply_window(x, window, axis=1, return_window=False)
def test_apply_window_1D_els_wrongsize_ValueError(self):
x = self.sig_rand
window = mlab.window_hanning(np.ones(x.shape[0]-1))
with pytest.raises(ValueError):
_apply_window(x, window)
def test_apply_window_0D_ValueError(self):
x = np.array(0)
window = mlab.window_hanning
with pytest.raises(ValueError):
_apply_window(x, window, axis=1, return_window=False)
def test_apply_window_3D_ValueError(self):
x = self.sig_rand[np.newaxis][np.newaxis]
window = mlab.window_hanning
with pytest.raises(ValueError):
_apply_window(x, window, axis=1, return_window=False)
def test_apply_window_hanning_1D(self):
x = self.sig_rand
window = mlab.window_hanning
window1 = mlab.window_hanning(np.ones(x.shape[0]))
y, window2 = _apply_window(x, window, return_window=True)
yt = window(x)
assert yt.shape == y.shape
assert x.shape == y.shape
assert_allclose(yt, y, atol=1e-06)
assert_array_equal(window1, window2)
def test_apply_window_hanning_1D_axis0(self):
x = self.sig_rand
window = mlab.window_hanning
y = _apply_window(x, window, axis=0, return_window=False)
yt = window(x)
assert yt.shape == y.shape
assert x.shape == y.shape
assert_allclose(yt, y, atol=1e-06)
def test_apply_window_hanning_els_1D_axis0(self):
x = self.sig_rand
window = mlab.window_hanning(np.ones(x.shape[0]))
window1 = mlab.window_hanning
y = _apply_window(x, window, axis=0, return_window=False)
yt = window1(x)
assert yt.shape == y.shape
assert x.shape == y.shape
assert_allclose(yt, y, atol=1e-06)
def test_apply_window_hanning_2D_axis0(self):
x = np.random.standard_normal([1000, 10]) + 100.
window = mlab.window_hanning
y = _apply_window(x, window, axis=0, return_window=False)
yt = np.zeros_like(x)
for i in range(x.shape[1]):
yt[:, i] = window(x[:, i])
assert yt.shape == y.shape
assert x.shape == y.shape
assert_allclose(yt, y, atol=1e-06)
def test_apply_window_hanning_els1_2D_axis0(self):
x = np.random.standard_normal([1000, 10]) + 100.
window = mlab.window_hanning(np.ones(x.shape[0]))
window1 = mlab.window_hanning
y = _apply_window(x, window, axis=0, return_window=False)
yt = np.zeros_like(x)
for i in range(x.shape[1]):
yt[:, i] = window1(x[:, i])
assert yt.shape == y.shape
assert x.shape == y.shape
assert_allclose(yt, y, atol=1e-06)
def test_apply_window_hanning_els2_2D_axis0(self):
x = np.random.standard_normal([1000, 10]) + 100.
window = mlab.window_hanning
window1 = mlab.window_hanning(np.ones(x.shape[0]))
y, window2 = _apply_window(x, window, axis=0, return_window=True)
yt = np.zeros_like(x)
for i in range(x.shape[1]):
yt[:, i] = window1*x[:, i]
assert yt.shape == y.shape
assert x.shape == y.shape
assert_allclose(yt, y, atol=1e-06)
assert_array_equal(window1, window2)
def test_apply_window_hanning_els3_2D_axis0(self):
x = np.random.standard_normal([1000, 10]) + 100.
window = mlab.window_hanning
window1 = mlab.window_hanning(np.ones(x.shape[0]))
y, window2 = _apply_window(x, window, axis=0, return_window=True)
yt = _apply_window(x, window1, axis=0, return_window=False)
assert yt.shape == y.shape
assert x.shape == y.shape
assert_allclose(yt, y, atol=1e-06)
assert_array_equal(window1, window2)
def test_apply_window_hanning_2D_axis1(self):
x = np.random.standard_normal([10, 1000]) + 100.
window = mlab.window_hanning
y = _apply_window(x, window, axis=1, return_window=False)
yt = np.zeros_like(x)
for i in range(x.shape[0]):
yt[i, :] = window(x[i, :])
assert yt.shape == y.shape
assert x.shape == y.shape
assert_allclose(yt, y, atol=1e-06)
def test_apply_window_hanning_2D_els1_axis1(self):
x = | np.random.standard_normal([10, 1000]) | numpy.random.standard_normal |
"""Computes recombination rate
"""
import os
import os.path as osp
import numpy as np
import matplotlib.pyplot as plt
import pathlib
class RecRate(object):
"""Class to compute Badnell (radiative/dielectronic) recombination rates,
Draine (2011)'s recombination rates
"""
def __init__(self):
# read data
self._read_data()
def _read_data(self):
basedir = osp.join(pathlib.Path(__file__).parent.absolute(),
'../../data/microphysics')
self.fname_dr_C = os.path.join(basedir, 'badnell_dr_C.dat')
self.fname_dr_E = os.path.join(basedir, 'badnell_dr_E.dat')
self.fname_rr = os.path.join(basedir, 'badnell_rr.dat')
# Read dielectronic recombination rate data
with open(self.fname_dr_C, 'r') as fp:
lines1 = fp.readlines()
with open(self.fname_dr_E, 'r') as fp:
lines2 = fp.readlines()
i0 = 4
nline = len(lines1) - i0
if len(lines1) != len(lines2):
print('Check data file (lines1, lines2) = {0:d}, {1:d}',
len(lines1), len(lines2))
raise
self.Zd = np.zeros(nline, dtype='uint8')
self.Nd = np.zeros(nline, dtype='uint8')
self.Md = np.zeros(nline, dtype='uint8')
self.Wd = np.zeros(nline, dtype='uint8')
self.Cd = np.zeros((nline, 9))
self.Ed = np.zeros((nline, 9))
self.nd = np.zeros(nline, dtype='uint8')
for i, (l1, l2) in enumerate(zip(lines1[i0:i0 + nline],
lines2[i0:i0 + nline])):
l1 = l1.split()
l2 = l2.split()
# Make sure that Z, N, M, W all match
if int(l1[0]) == int(l2[0]) and \
int(l1[1]) == int(l2[1]) and \
int(l1[2]) == int(l2[2]) and \
int(l1[3]) == int(l2[3]):
self.Zd[i] = int(l1[0])
self.Nd[i] = int(l1[1])
self.Md[i] = int(l1[2])
self.Wd[i] = int(l1[3])
for j, l1_ in enumerate(l1[4:]):
self.Cd[i, j] = float(l1_)
for j, l2_ in enumerate(l2[4:]):
self.Ed[i, j] = float(l2_)
self.nd[i] = j + 1
else:
print("Columns do not match!")
raise
del lines1, lines2
# Read radiative recombination rate data
with open(self.fname_rr, 'r') as fp:
lines = fp.readlines()
i0 = 4
nline = len(lines) - i0
self.Zr = np.zeros(nline, dtype='uint8')
self.Nr = np.zeros(nline, dtype='uint8')
self.Mr = np.zeros(nline, dtype='uint8')
self.Wr = np.zeros(nline, dtype='uint8')
self.Ar = np.zeros(nline)
self.Br = np.zeros(nline)
self.T0r = np.zeros(nline)
self.T1r = np.zeros(nline)
self.Cr = np.zeros(nline)
self.T2r = np.zeros(nline)
# Use modifed B for low-charge ions
self.modr = np.zeros(nline, dtype='bool')
for i, l1 in enumerate(lines[i0:i0 + nline]):
l1 = l1.split()
self.Zr[i] = int(l1[0])
self.Nr[i] = int(l1[1])
self.Mr[i] = int(l1[2])
self.Wr[i] = int(l1[3])
self.Ar[i] = float(l1[4])
self.Br[i] = float(l1[5])
self.T0r[i] = float(l1[6])
self.T1r[i] = float(l1[7])
try:
self.Cr[i] = float(l1[8])
self.T2r[i] = float(l1[9])
self.modr[i] = True
except IndexError:
self.modr[i] = False
def get_rr_rate(self, Z, N, T, M=1):
"""
Calculate radiative recombination rate coefficient
Parameters
----------
Z : int
Nuclear Charge
N : int
Number of electrons of the initial target ion
T : array of floats
Temperature [K]
M : int
Initial metastable levels (M=1 for the ground state) of the ground
and metastable terms. The defualt value is 1.
Returns
-------
rr: array of floats
Radiative recombination coefficients [cm^3 s^-1]
"""
c1 = self.Zr == Z
c2 = self.Nr == N
c3 = self.Mr == M
idx = np.where(c1 & c2 & c3)
i = idx[0][0]
sqrtTT0 = np.sqrt(T/self.T0r[i])
sqrtTT1 = np.sqrt(T/self.T1r[i])
if self.modr[i]:
B = self.Br[i] + self.Cr[i]*np.exp(-self.T2r[i]/T)
else:
B = self.Br[i]
rr = self.Ar[i] / (sqrtTT0 * (1.0 + sqrtTT0)**(1.0 - B) * \
(1.0 + sqrtTT1)**(1.0 + B))
return rr
def get_dr_rate(self, Z, N, T, M=1):
"""
Calculate dielectronic recombination rate coefficient
Parameters
----------
Z : int
Nuclear Charge
N : int
Number of electrons of the initial target ion (before recombination)
T : array of floats
Temperature [K]
M : int
Initial metastable levels (M=1 for the ground state) of the ground
and metastable terms. The defualt value is 1.
Returns
-------
rr: array of floats
Dielectronic recombination coefficients [cm^3 s^-1]
"""
c1 = self.Zd == Z
c2 = self.Nd == N
c3 = self.Md == M
idx = np.where(c1 & c2 & c3)
i = idx[0][0]
dr = 0.0
for m in range(self.nd[i]):
dr += self.Cd[i, m]*np.exp(-self.Ed[i, m]/T)
dr *= T**(-1.5)
return dr
def get_rec_rate(self, Z, N, T, M=1, kind='badnell'):
"""
Calculate radiative + dielectronic recombination rate coefficient
Parameters
----------
Z : int
Nuclear Charge
N : int
Number of electrons of the initial target ion (before recombination)
T : array of floats
Temperature [K]
M : int
Initial metastable levels (M=1 for the ground state) of the ground
and metastable terms. The defualt value is 1.
kind : str
Set to 'badnell' to use fits Badnell fits or 'dr11' to use
Draine (2011)'s formula.
Returns
-------
rrate: array of floats
Recombination rate coefficient [cm^3 s^-1]
"""
if kind == 'badnell':
if Z == 1: # No dielectronic recombination
return self.get_rr_rate(Z, N, T, M=M)
else:
return self.get_rr_rate(Z, N, T, M=M) + \
self.get_dr_rate(Z, N, T, M=M)
elif kind == 'dr11':
if Z == 1:
return self.get_rec_rate_H_caseA(T)
else:
print('Z > 1 is not supported for dr11 recombination rate.')
raise
@staticmethod
def get_rec_rate_H_caseA(T):
"""Compute case A recombination rate coefficient for H
Table 14.1 in Draine (2011)
"""
T4 = T*1e-4
return 4.13e-13*T4**(-0.7131 - 0.0115*np.log(T4))
@staticmethod
def get_rec_rate_H_caseB(T):
"""Compute case B recombination rate coefficient for H
Table 14.1 in Draine (2011)
"""
T4 = T*1e-4
return 2.54e-13*T4**(-0.8163 - 0.0208*np.log(T4))
@staticmethod
def get_alpha_gr(T, psi, Z):
# Parameters for Fit (14.37) to Grain Recombination Rate coefficients
# alpha_gr(X +) for selected ions. (Draine 2011)
C = dict()
C['H'] = np.array([12.25, 8.074e-6, 1.378, 5.087e2, 1.586e-2, 0.4723, 1.102e-5])
C['He']= np.array([5.572, 3.185e-7, 1.512, 5.115e3, 3.903e-7, 0.4956, 5.494e-7])
C['C'] = np.array([45.58, 6.089e-3, 1.128, 4.331e2, 4.845e-2, 0.8120, 1.333e-4])
C['Mg']= np.array([2.510, 8.116e-8, 1.864, 6.170e4, 2.169e-6, 0.9605, 7.232e-5])
C['S'] = np.array([3.064, 7.769e-5, 1.319, 1.087e2, 3.475e-1, 0.4790, 4.689e-2])
C['Ca']= np.array([1.636, 8.208e-9, 2.289, 1.254e5, 1.349e-9, 1.1506, 7.204e-4])
if Z == 1:
e = 'H'
elif Z == 2:
e = 'He'
elif Z == 6:
e = 'C'
elif Z == 12:
e = 'Mg'
elif Z == 16:
e = 'S'
elif Z == 20:
e = 'Ca'
return 1e-14*C[e][0]/(1.0 + C[e][1]*psi**C[e][2]*\
(1.0 + C[e][3]*T**C[e][4]*psi**(-C[e][5]-C[e][6]*np.log(T))))
@staticmethod
def get_rec_rate_grain(ne, G0, T, Z):
"""Compute grain assisted recombination coefficient
Ch 14.8 in Draine (2011)
"""
psi = G0*T**0.5/ne
return RecRate.get_alpha_gr(T, psi, Z)
def plt_rec_rate(self, Z, N, M=1):
T = | np.logspace(3, 6) | numpy.logspace |
#!/usr/bin/env python3
import numpy as np
import re
from pkg_resources import resource_filename
from ..num.num_input import Num_input
from directdm.run import rge
#-----------------------#
# Conventions and Basis #
#-----------------------#
# The basis of operators in the DM-SM sector below the weak scale (5-flavor EFT) is given by
# dim.5 (2 operators)
#
# 'C51', 'C52',
# dim.6 (32 operators)
#
# 'C61u', 'C61d', 'C61s', 'C61c', 'C61b', 'C61e', 'C61mu', 'C61tau',
# 'C62u', 'C62d', 'C62s', 'C62c', 'C62b', 'C62e', 'C62mu', 'C62tau',
# 'C63u', 'C63d', 'C63s', 'C63c', 'C63b', 'C63e', 'C63mu', 'C63tau',
# 'C64u', 'C64d', 'C64s', 'C64c', 'C64b', 'C64e', 'C64mu', 'C64tau',
# dim.7 (129 operators)
#
# 'C71', 'C72', 'C73', 'C74',
# 'C75u', 'C75d', 'C75s', 'C75c', 'C75b', 'C75e', 'C75mu', 'C75tau',
# 'C76u', 'C76d', 'C76s', 'C76c', 'C76b', 'C76e', 'C76mu', 'C76tau',
# 'C77u', 'C77d', 'C77s', 'C77c', 'C77b', 'C77e', 'C77mu', 'C77tau',
# 'C78u', 'C78d', 'C78s', 'C78c', 'C78b', 'C78e', 'C78mu', 'C78tau',
# 'C79u', 'C79d', 'C79s', 'C79c', 'C79b', 'C79e', 'C79mu', 'C79tau',
# 'C710u', 'C710d', 'C710s', 'C710c', 'C710b', 'C710e', 'C710mu', 'C710tau',
# 'C711', 'C712', 'C713', 'C714',
# 'C715u', 'C715d', 'C715s', 'C715c', 'C715b', 'C715e', 'C715mu', 'C715tau',
# 'C716u', 'C716d', 'C716s', 'C716c', 'C716b', 'C716e', 'C716mu', 'C716tau',
# 'C717u', 'C717d', 'C717s', 'C717c', 'C717b', 'C717e', 'C717mu', 'C717tau',
# 'C718u', 'C718d', 'C718s', 'C718c', 'C718b', 'C718e', 'C718mu', 'C718tau',
# 'C719u', 'C719d', 'C719s', 'C719c', 'C719b', 'C719e', 'C719mu', 'C719tau',
# 'C720u', 'C720d', 'C720s', 'C720c', 'C720b', 'C720e', 'C720mu', 'C720tau',
# 'C721u', 'C721d', 'C721s', 'C721c', 'C721b', 'C721e', 'C721mu', 'C721tau',
# 'C722u', 'C722d', 'C722s', 'C722c', 'C722b', 'C722e', 'C722mu', 'C722tau',
# 'C723u', 'C723d', 'C723s', 'C723c', 'C723b', 'C723e', 'C723mu', 'C723tau',
# 'C725',
# dim.8 (12 operators)
#
# 'C81u', 'C81d', 'C81s', 'C82u', 'C82d', 'C82s'
# 'C83u', 'C83d', 'C83s', 'C84u', 'C84d', 'C84s'
# In total, we have 2+32+129+12=175 operators.
# In total, we have 2+32+129=163 operators w/o dim.8.
#-----------------------------#
# The QED anomalous dimension #
#-----------------------------#
def ADM_QED(nf):
""" Return the QED anomalous dimension in the DM-SM sector for nf flavor EFT """
Qu = 2/3
Qd = -1/3
Qe = -1
nc = 3
gamma_QED = np.array([[8/3*Qu*Qu*nc, 8/3*Qu*Qd*nc, 8/3*Qu*Qd*nc, 8/3*Qu*Qu*nc,\
8/3*Qu*Qd*nc, 8/3*Qu*Qe*nc, 8/3*Qu*Qe*nc, 8/3*Qu*Qe*nc],
[8/3*Qd*Qu*nc, 8/3*Qd*Qd*nc, 8/3*Qd*Qd*nc, 8/3*Qd*Qu*nc,\
8/3*Qd*Qd*nc, 8/3*Qd*Qe*nc, 8/3*Qd*Qe*nc, 8/3*Qd*Qe*nc],
[8/3*Qd*Qu*nc, 8/3*Qd*Qd*nc, 8/3*Qd*Qd*nc, 8/3*Qd*Qu*nc,\
8/3*Qd*Qd*nc, 8/3*Qd*Qe*nc, 8/3*Qd*Qe*nc, 8/3*Qd*Qe*nc],
[8/3*Qu*Qu*nc, 8/3*Qu*Qd*nc, 8/3*Qu*Qd*nc, 8/3*Qu*Qu*nc,\
8/3*Qu*Qd*nc, 8/3*Qu*Qe*nc, 8/3*Qu*Qe*nc, 8/3*Qu*Qe*nc],
[8/3*Qd*Qu*nc, 8/3*Qd*Qd*nc, 8/3*Qd*Qd*nc, 8/3*Qd*Qu*nc,\
8/3*Qd*Qd*nc, 8/3*Qd*Qe*nc, 8/3*Qd*Qe*nc, 8/3*Qd*Qe*nc],
[8/3*Qe*Qu, 8/3*Qe*Qd, 8/3*Qe*Qd, 8/3*Qe*Qu,\
8/3*Qe*Qd, 8/3*Qe*Qe, 8/3*Qe*Qe, 8/3*Qe*Qe],
[8/3*Qe*Qu, 8/3*Qe*Qd, 8/3*Qe*Qd, 8/3*Qe*Qu,\
8/3*Qe*Qd, 8/3*Qe*Qe, 8/3*Qe*Qe, 8/3*Qe*Qe],
[8/3*Qe*Qu, 8/3*Qe*Qd, 8/3*Qe*Qd, 8/3*Qe*Qu,\
8/3*Qe*Qd, 8/3*Qe*Qe, 8/3*Qe*Qe, 8/3*Qe*Qe]])
gamma_QED_1 = np.zeros((2,163))
gamma_QED_2 = np.hstack((np.zeros((8,2)),gamma_QED,np.zeros((8,153))))
gamma_QED_3 = np.hstack((np.zeros((8,10)),gamma_QED,np.zeros((8,145))))
gamma_QED_4 = np.zeros((145,163))
gamma_QED = np.vstack((gamma_QED_1, gamma_QED_2, gamma_QED_3, gamma_QED_4))
if nf == 5:
return gamma_QED
elif nf == 4:
return np.delete(np.delete(gamma_QED, [6, 14, 22, 30, 42, 50, 58, 66, 74, 82, 94,\
102, 110, 118, 126, 134, 142, 150, 158], 0)\
, [6, 14, 22, 30, 42, 50, 58, 66, 74, 82, 94,\
102, 110, 118, 126, 134, 142, 150, 158], 1)
elif nf == 3:
return np.delete(np.delete(gamma_QED, [5,6, 13,14, 21,22, 29,30, 41,42,\
49,50, 57,58, 65,66, 73,74, 81,82,\
93,94, 101,102, 109,110, 117,118,\
125,126, 133,134, 141,142, 149,150, 158,159], 0)\
, [5,6, 13,14, 21,22, 29,30, 41,42,\
49,50, 57,58, 65,66, 73,74, 81,82,\
93,94, 101,102, 109,110, 117,118,\
125,126, 133,134, 141,142, 149,150, 158,159], 1)
else:
raise Exception("nf has to be 3, 4 or 5")
def ADM_QED2(nf):
""" Return the QED anomalous dimension in the DM-SM sector for nf flavor EFT at alpha^2 """
# Mixing of Q_{11}^(7) into Q_{5,f}^(7) and Q_{12}^(7) into Q_{6,f}^(7), adapted from Hill et al. [1409.8290].
gamma_gf = -8
gamma_QED2_gf = np.array([5*[gamma_gf]])
gamma_QED2_1 = np.zeros((86,163))
gamma_QED2_2 = np.hstack((np.zeros((1,38)),gamma_QED2_gf,np.zeros((1,120))))
gamma_QED2_3 = np.hstack((np.zeros((1,46)),gamma_QED2_gf,np.zeros((1,112))))
gamma_QED2_4 = np.zeros((75,163))
gamma_QED2 = np.vstack((gamma_QED2_1, gamma_QED2_2, gamma_QED2_3, gamma_QED2_4))
if nf == 5:
return gamma_QED2
elif nf == 4:
return np.delete(np.delete(gamma_QED2, [6, 14, 22, 30, 42, 50, 58, 66, 74, 82, 94,\
102, 110, 118, 126, 134, 142, 150, 158], 0)\
, [6, 14, 22, 30, 42, 50, 58, 66, 74, 82, 94,\
102, 110, 118, 126, 134, 142, 150, 158], 1)
elif nf == 3:
return np.delete(np.delete(gamma_QED2, [5,6, 13,14, 21,22, 29,30, 41,42,\
49,50, 57,58, 65,66, 73,74, 81,82,\
93,94, 101,102, 109,110, 117,118,\
125,126, 133,134, 141,142, 149,150, 158,159], 0)\
, [5,6, 13,14, 21,22, 29,30, 41,42,\
49,50, 57,58, 65,66, 73,74, 81,82,\
93,94, 101,102, 109,110, 117,118,\
125,126, 133,134, 141,142, 149,150, 158,159], 1)
else:
raise Exception("nf has to be 3, 4 or 5")
#------------------------------#
# The QCD anomalous dimensions #
#------------------------------#
def ADM_QCD(nf):
""" Return the QCD anomalous dimension in the DM-SM sector for nf flavor EFT, when ADM starts at O(alphas) """
gamma_QCD_T = 32/3 * np.eye(5)
gt2qq = 64/9
gt2qg = -4/3
gt2gq = -64/9
gt2gg = 4/3*nf
gamma_twist2 = np.array([[gt2qq, 0, 0, 0, 0, 0, 0, 0, gt2qg],
[0, gt2qq, 0, 0, 0, 0, 0, 0, gt2qg],
[0, 0, gt2qq, 0, 0, 0, 0, 0, gt2qg],
[0, 0, 0, gt2qq, 0, 0, 0, 0, gt2qg],
[0, 0, 0, 0, gt2qq, 0, 0, 0, gt2qg],
[0, 0, 0, 0, 0, 0, 0, 0, 0 ],
[0, 0, 0, 0, 0, 0, 0, 0, 0 ],
[0, 0, 0, 0, 0, 0, 0, 0, 0 ],
[gt2gq, gt2gq, gt2gq, gt2gq, gt2gq, 0, 0, 0, gt2gg]])
gamma_QCD_1 = np.zeros((70,163))
gamma_QCD_2 = np.hstack((np.zeros((5,70)), gamma_QCD_T, np.zeros((5,88))))
gamma_QCD_3 = np.zeros((3,163))
gamma_QCD_4 = np.hstack((np.zeros((5,78)), gamma_QCD_T, np.zeros((5,80))))
gamma_QCD_5 = np.zeros((71,163))
gamma_QCD_6 = np.hstack((np.zeros((9,154)), gamma_twist2))
gamma_QCD = [np.vstack((gamma_QCD_1, gamma_QCD_2, gamma_QCD_3,\
gamma_QCD_4, gamma_QCD_5, gamma_QCD_6))]
if nf == 5:
return gamma_QCD
elif nf == 4:
return np.delete(np.delete(gamma_QCD, [6, 14, 22, 30, 42, 50, 58, 66, 74, 82, 94,\
102, 110, 118, 126, 134, 142, 150, 158], 1)\
, [6, 14, 22, 30, 42, 50, 58, 66, 74, 82, 94,\
102, 110, 118, 126, 134, 142, 150, 158], 2)
elif nf == 3:
return np.delete(np.delete(gamma_QCD, [5,6, 13,14, 21,22, 29,30, 41,42,\
49,50, 57,58, 65,66, 73,74, 81,82,\
93,94, 101,102, 109,110, 117,118,\
125,126, 133,134, 141,142, 149,150, 158,159], 1)\
, [5,6, 13,14, 21,22, 29,30, 41,42,\
49,50, 57,58, 65,66, 73,74, 81,82,\
93,94, 101,102, 109,110, 117,118,\
125,126, 133,134, 141,142, 149,150, 158,159], 2)
else:
raise Exception("nf has to be 3, 4 or 5")
def ADM_QCD2(nf):
# CHECK ADM #
""" Return the QCD anomalous dimension in the DM-SM sector for nf flavor EFT, when ADM starts at O(alphas^2) """
# Mixing of Q_1^(7) into Q_{5,q}^(7) and Q_2^(7) into Q_{6,q}^(7), from Hill et al. [1409.8290].
# Note that we have different prefactors and signs.
cf = 4/3
gamma_gq = 8*cf # changed 2019-08-29, double check with RG solution
# Mixing of Q_3^(7) into Q_{7,q}^(7) and Q_4^(7) into Q_{8,q}^(7), from Hill et al. [1409.8290].
# Note that we have different prefactors and signs.
gamma_5gq = -8 # changed 2019-08-29, double check with RG solution
gamma_QCD2_gq = np.array([5*[gamma_gq]])
gamma_QCD2_5gq = np.array([5*[gamma_5gq]])
gamma_QCD2_1 = np.zeros((34,163))
gamma_QCD2_2 = np.hstack((np.zeros((1,38)),gamma_QCD2_gq,np.zeros((1,120))))
gamma_QCD2_3 = np.hstack((np.zeros((1,46)),gamma_QCD2_gq,np.zeros((1,112))))
gamma_QCD2_4 = np.hstack((np.zeros((1,54)),gamma_QCD2_5gq,np.zeros((1,104))))
gamma_QCD2_5 = np.hstack((np.zeros((1,62)),gamma_QCD2_5gq,np.zeros((1,96))))
gamma_QCD2_6 = np.zeros((125,163))
gamma_QCD2 = [np.vstack((gamma_QCD2_1, gamma_QCD2_2, gamma_QCD2_3,\
gamma_QCD2_4, gamma_QCD2_5, gamma_QCD2_6))]
if nf == 5:
return gamma_QCD2
elif nf == 4:
return np.delete(np.delete(gamma_QCD2, [6, 14, 22, 30, 42, 50, 58, 66, 74, 82, 94,\
102, 110, 118, 126, 134, 142, 150, 158], 1)\
, [6, 14, 22, 30, 42, 50, 58, 66, 74, 82, 94,\
102, 110, 118, 126, 134, 142, 150, 158], 2)
elif nf == 3:
return np.delete(np.delete(gamma_QCD2, [5,6, 13,14, 21,22, 29,30, 41,42,\
49,50, 57,58, 65,66, 73,74, 81,82,\
93,94, 101,102, 109,110, 117,118,\
125,126, 133,134, 141,142, 149,150, 158,159], 1)\
, [5,6, 13,14, 21,22, 29,30, 41,42,\
49,50, 57,58, 65,66, 73,74, 81,82,\
93,94, 101,102, 109,110, 117,118,\
125,126, 133,134, 141,142, 149,150, 158,159], 2)
else:
raise Exception("nf has to be 3, 4 or 5")
def ADM5(Ychi, dchi):
""" The dimension-five anomalous dimension
Return a numpy array with the anomalous dimension matrices for g1, g2, g3, and yt
The Higgs self coupling lambda is currently ignored.
Variables
---------
Ychi: The DM hypercharge, defined via the Gell-Mann - Nishijima relation Q = I_W^3 + Ychi/2.
dchi: The dimension of the electroweak SU(2) representation furnished by the DM multiplet.
"""
jj1 = (dchi**2-1)/4
# The beta functions for one multiplet
b1 = - 41/6 - Ychi**2 * dchi/3
b2 = 19/6 - 4*jj1*dchi/9
adm5_g1 = np.array([[5/2*Ychi**2-2*b1, 0, -6*Ychi, 0, 0, 0, 0, 0],
[-4*Ychi*jj1, Ychi**2/2, 0, 12*Ychi, 0, 0, 0, 0],
[0, 0, -3/2*(1+Ychi**2), 0, 0, 0, 0, 0],
[0, 0, 0, -3/2*(1+Ychi**2), 0, 0, 0, 0],
[0, 0, 0, 0, 5/2*Ychi**2-2*b1, 0, -6*Ychi, 0],
[0, 0, 0, 0, -4*Ychi*jj1, Ychi**2/2, 0, 12*Ychi],
[0, 0, 0, 0, 0, 0, -3/2*(1+Ychi**2), 0],
[0, 0, 0, 0, 0, 0, 0, -3/2*(1+Ychi**2)]])
adm5_g2 = np.array([[2*jj1, -4*Ychi, 0, -24, 0, 0, 0, 0],
[0, (10*jj1-8)-2*b2, 12*jj1, 0, 0, 0, 0, 0],
[0, 0, (-9/2-6*jj1), 0, 0, 0, 0, 0],
[0, 0, 0, (3/2-6*jj1), 0, 0, 0, 0],
[0, 0, 0, 0, 2*jj1, -4*Ychi, 0, -24],
[0, 0, 0, 0, 0, (10*jj1-8)-2*b2, 12*jj1, 0],
[0, 0, 0, 0, 0, 0, (-9/2-6*jj1), 0],
[0, 0, 0, 0, 0, 0, 0, (3/2-6*jj1)]])
adm5_g3 = np.zeros((8,8))
adm5_yc = np.diag([0,0,6,6,0,0,6,6])
adm5_ytau = np.diag([0,0,2,2,0,0,2,2])
adm5_yb = np.diag([0,0,6,6,0,0,6,6])
adm5_yt = np.diag([0,0,6,6,0,0,6,6])
adm5_lam = np.diag([0,0,3,1,0,0,3,1])
full_adm = np.array([adm5_g1, adm5_g2, adm5_g3, adm5_yc, adm5_ytau, adm5_yb, adm5_yt, adm5_lam])
if dchi == 1:
return np.delete(np.delete(full_adm, [1,3,5,7], 1), [1,3,5,7], 2)
else:
return full_adm
def ADM6(Ychi, dchi):
""" The dimension-five anomalous dimension
Return a numpy array with the anomalous dimension matrices for g1, g2, g3, ytau, yb, and yt
The running due to the Higgs self coupling lambda is currently ignored.
The operator basis is Q1-Q14 1st, 2nd, 3rd gen.; S1-S17 (mixing of gen: 1-1, 2-2, 3-3, 1-2, 1-3, 2-3),
S18-S24 1st, 2nd, 3rd gen., S25; D1-D4.
The explicit ordering of the operators, including flavor indices, is contained in the file
"directdm/run/operator_ordering.txt"
Variables
---------
Ychi: The DM hypercharge, defined via the Gell-Mann - Nishijima relation Q = I_W^3 + Ychi/2.
dchi: The dimension of the electroweak SU(2) representation furnished by the DM multiplet.
"""
scope = locals()
def load_adm(admfile):
with open(admfile, "r") as f:
adm = []
for line in f:
line = re.sub("\n", "", line)
line = line.split(",")
adm.append(list(map(lambda x: eval(x, scope), line)))
return adm
admg1 = load_adm(resource_filename("directdm", "run/full_adm_g1.py"))
admg2 = load_adm(resource_filename("directdm", "run/full_adm_g2.py"))
admg3 = np.zeros((207,207))
admyc = load_adm(resource_filename("directdm", "run/full_adm_yc.py"))
admytau = load_adm(resource_filename("directdm", "run/full_adm_ytau.py"))
admyb = load_adm(resource_filename("directdm", "run/full_adm_yb.py"))
admyt = load_adm(resource_filename("directdm", "run/full_adm_yt.py"))
admlam = np.zeros((207,207))
full_adm = np.array([np.array(admg1), np.array(admg2), admg3,\
np.array(admyc), np.array(admytau), np.array(admyb),\
np.array(admyt), np.array(admlam)])
if dchi == 1:
return np.delete(np.delete(full_adm, [0, 4, 8, 11, 14, 18, 22, 25, 28, 32, 36, 39,\
42, 44, 205, 206], 1),\
[0, 4, 8, 11, 14, 18, 22, 25, 28, 32, 36, 39,\
42, 44, 205, 206], 2)
else:
return full_adm
def ADM_QCD_dim8(nf):
""" Return the QCD anomalous dimension in the DM-SM sector at dim.8, for nf flavor EFT """
beta0 = rge.QCD_beta(nf, 1).trad()
gammam0 = rge.QCD_gamma(nf, 1).trad()
ADM8 = 2*(gammam0 - beta0) * np.eye(12)
return ADM8
def ADM_SM_QCD(nf):
""" Return the QCD anomalous dimension in the SM-SM sector for nf flavor EFT, for a subset of SM dim.6 operators
The basis is spanned by a subset of 10*8 + 5*4 = 100 SM operators, with Wilson coefficients
['P61ud', 'P62ud', 'P63ud', 'P63du', 'P64ud', 'P65ud', 'P66ud', 'P66du',
'P61us', 'P62us', 'P63us', 'P63su', 'P64us', 'P65us', 'P66us', 'P66su',
'P61uc', 'P62uc', 'P63uc', 'P63cu', 'P64uc', 'P65uc', 'P66uc', 'P66cu',
'P61ub', 'P62ub', 'P63ub', 'P63bu', 'P64ub', 'P65ub', 'P66ub', 'P66bu',
'P61ds', 'P62ds', 'P63ds', 'P63sd', 'P64ds', 'P65ds', 'P66ds', 'P66sd',
'P61dc', 'P62dc', 'P63dc', 'P63cd', 'P64dc', 'P65dc', 'P66dc', 'P66cd',
'P61db', 'P62db', 'P63db', 'P63bd', 'P64db', 'P65db', 'P66db', 'P66bd',
'P61sc', 'P62sc', 'P63sc', 'P63cs', 'P64sc', 'P65sc', 'P66sc', 'P66cs',
'P61sb', 'P62sb', 'P63sb', 'P63bs', 'P64sb', 'P65sb', 'P66sb', 'P66bs',
'P61cb', 'P62cb', 'P63cb', 'P63bc', 'P64cb', 'P65cb', 'P66cb', 'P66bc',
'P61u', 'P62u', 'P63u', 'P64u',
'P61d', 'P62d', 'P63d', 'P64d',
'P61s', 'P62s', 'P63s', 'P64s',
'P61c', 'P62c', 'P63c', 'P64c',
'P61b', 'P62b', 'P63b', 'P64b']
"""
adm_qqp_qqp = np.array([[0, 0, 0, 0, 0, 12, 0, 0],
[0, 0, 0, 0, 12, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 12],
[0, 0, 0, 0, 0, 0, 12, 0],
[0, 8/3, 0, 0, - 19/3, 5, 0, 0],
[8/3, 0, 0, 0, 5, - 9, 0, 0],
[0, 0, 0, 8/3, 0, 0, - 23/3, 5],
[0, 0, 8/3, 0, 0, 0, 5, - 23/3]])
adm_qqp_qqpp = np.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 4/3, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 4/3, 0],
[0, 0, 0, 0, 0, 0, 0, 0]])
adm_qpq_qppq = np.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 4/3, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 4/3]])
adm_qqp_qppq = np.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 4/3, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 4/3],
[0, 0, 0, 0, 0, 0, 0, 0]])
adm_qpq_qqpp = np.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 4/3, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 4/3, 0]])
adm_q_q = np.array([[4, 4, 0, - 28/3],
[0, 0, 0, 44/3],
[0, 0, 44/9, 0],
[5/3, 13/3, 0, - 106/9]])
adm_qqp_q = np.array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 4/3],
[0, 0, 0, 0],
[0, 0, 4/9, 0],
[0, 0, 0, 0]])
adm_qpq_q = np.array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 4/3],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 4/9, 0]])
adm_q_qqp = np.array([[0, 0, 0, 0, 8/3, 0, 0, 0],
[0, 0, 0, 0, 8/3, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 8/3, 0],
[0, 0, 0, 0, 20/9, 0, 0, 0]])
adm_q_qpq = np.array([[0, 0, 0, 0, 8/3, 0, 0, 0],
[0, 0, 0, 0, 8/3, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 8/3],
[0, 0, 0, 0, 20/9, 0, 0, 0]])
adm_ud = np.hstack((adm_qqp_qqp, adm_qqp_qqpp, adm_qqp_qqpp,\
adm_qqp_qqpp, adm_qpq_qqpp, adm_qpq_qqpp,\
adm_qpq_qqpp, np.zeros((8, 24)), adm_qqp_q,\
adm_qpq_q, np.zeros((8,12))))
adm_us = np.hstack((adm_qqp_qqpp, adm_qqp_qqp, adm_qqp_qqpp,\
adm_qqp_qqpp, adm_qpq_qppq, np.zeros((8,16)),\
adm_qpq_qqpp, adm_qpq_qqpp, np.zeros((8, 8)),\
adm_qqp_q, np.zeros((8,4)), adm_qpq_q, np.zeros((8,8))))
adm_uc = np.hstack((adm_qqp_qqpp, adm_qqp_qqpp, adm_qqp_qqp,\
adm_qqp_qqpp, np.zeros((8,8)), adm_qpq_qppq,\
np.zeros((8,8)), adm_qpq_qppq, np.zeros((8, 8)),\
adm_qpq_qqpp, adm_qqp_q, np.zeros((8,8)),\
adm_qpq_q, np.zeros((8,4))))
adm_ub = np.hstack((adm_qqp_qqpp, adm_qqp_qqpp, adm_qqp_qqpp,\
adm_qqp_qqp, np.zeros((8,16)), adm_qpq_qppq,\
np.zeros((8,8)), adm_qpq_qppq, adm_qpq_qppq,\
adm_qqp_q, np.zeros((8,12)), adm_qpq_q))
adm_ds = np.hstack((adm_qqp_qppq, adm_qpq_qppq, np.zeros((8,16)),\
adm_qqp_qqp, adm_qqp_qqpp, adm_qqp_qqpp,\
adm_qpq_qqpp, adm_qpq_qqpp, np.zeros((8,8)),\
np.zeros((8,4)), adm_qqp_q, adm_qpq_q, np.zeros((8,8))))
adm_dc = np.hstack((adm_qqp_qppq, np.zeros((8,8)), adm_qpq_qppq,\
np.zeros((8,8)), adm_qqp_qqpp, adm_qqp_qqp, adm_qqp_qqpp,\
adm_qpq_qppq, np.zeros((8,8)), adm_qpq_qqpp,\
np.zeros((8,4)), adm_qqp_q, np.zeros((8,4)),\
adm_qpq_q, np.zeros((8,4))))
adm_db = np.hstack((adm_qqp_qppq, np.zeros((8,16)), adm_qpq_qppq,\
adm_qqp_qqpp, adm_qqp_qqpp, adm_qqp_qqp,\
np.zeros((8,8)), adm_qpq_qppq, adm_qpq_qppq,\
np.zeros((8,4)), adm_qqp_q, np.zeros((8,8)), adm_qpq_q))
adm_sc = np.hstack((np.zeros((8,8)), adm_qqp_qppq, adm_qpq_qppq,\
np.zeros((8,8)), adm_qqp_qppq, adm_qpq_qppq, np.zeros((8,8)),\
adm_qqp_qqp, adm_qqp_qqpp, adm_qpq_qqpp, np.zeros((8,8)),\
adm_qqp_q, adm_qpq_q, np.zeros((8,4))))
adm_sb = np.hstack((np.zeros((8,8)), adm_qqp_qppq, np.zeros((8,8)),\
adm_qpq_qppq, adm_qqp_qppq, np.zeros((8,8)), adm_qpq_qppq,\
adm_qqp_qqpp, adm_qqp_qqp, adm_qpq_qppq, np.zeros((8,8)),\
adm_qqp_q, np.zeros((8,4)), adm_qpq_q))
adm_cb = np.hstack((np.zeros((8,16)), adm_qqp_qppq, adm_qpq_qppq,\
np.zeros((8,8)), adm_qqp_qppq, adm_qpq_qppq,\
adm_qqp_qppq, adm_qpq_qppq, adm_qqp_qqp,\
np.zeros((8,12)), adm_qqp_q, adm_qpq_q))
adm_u = np.hstack((adm_q_qqp, adm_q_qqp, adm_q_qqp, adm_q_qqp,\
np.zeros((4,48)), adm_q_q, np.zeros((4,16))))
adm_d = np.hstack((adm_q_qpq, np.zeros((4,24)), adm_q_qqp, adm_q_qqp,\
adm_q_qqp, np.zeros((4,24)), np.zeros((4,4)),\
adm_q_q, np.zeros((4,12))))
adm_s = np.hstack((np.zeros((4,8)), adm_q_qpq, np.zeros((4,16)),\
adm_q_qpq, np.zeros((4,16)), adm_q_qqp,\
adm_q_qqp, np.zeros((4,8)),\
np.zeros((4,8)), adm_q_q, np.zeros((4,8))))
adm_c = np.hstack((np.zeros((4,16)), adm_q_qpq, np.zeros((4,16)),\
adm_q_qpq, np.zeros((4,8)),\
adm_q_qpq, np.zeros((4,8)), adm_q_qqp,\
np.zeros((4,12)), adm_q_q, np.zeros((4,4))))
adm_b = np.hstack((np.zeros((4,24)), adm_q_qpq, np.zeros((4,16)),\
adm_q_qpq, np.zeros((4,8)), adm_q_qpq,\
adm_q_qpq, np.zeros((4,16)), adm_q_q))
adm = np.vstack((adm_ud, adm_us, adm_uc, adm_ub, adm_ds,\
adm_dc, adm_db, adm_sc, adm_sb, adm_cb,\
adm_u, adm_d, adm_s, adm_c, adm_b))
if nf == 5:
return adm
elif nf == 4:
return np.delete(np.delete(adm, np.r_[np.s_[24:32], np.s_[48:56],\
np.s_[64:80], np.s_[96:100]], 0),\
np.r_[np.s_[24:32], np.s_[48:56],\
np.s_[64:80], np.s_[96:100]], 1)
else:
raise Exception("nf has to be 4 or 5")
def ADT_QCD(nf, input_dict=None):
""" Return the QCD anomalous dimension tensor for nf flavor EFT,
for double insertions of DM-SM and SM-SM operators
Our basis of operators below the electroweak scale includes a set of 12 dimension-eight operators,
with Wilson coefficients for Dirac DM
['C81u', 'C81d', 'C81s', 'C82u', 'C82d', 'C82s', 'C83u', 'C83d', 'C83s', 'C84u', 'C84d', 'C84s']
and by a subset of 10*8 = 80 SM operators, with Wilson coefficients
['P61ud', 'P62ud', 'P63ud', 'P63du', 'P64ud', 'P65ud', 'P66ud', 'P66du',
'P61us', 'P62us', 'P63us', 'P63su', 'P64us', 'P65us', 'P66us', 'P66su',
'P61uc', 'P62uc', 'P63uc', 'P63cu', 'P64uc', 'P65uc', 'P66uc', 'P66cu',
'P61ub', 'P62ub', 'P63ub', 'P63bu', 'P64ub', 'P65ub', 'P66ub', 'P66bu',
'P61ds', 'P62ds', 'P63ds', 'P63sd', 'P64ds', 'P65ds', 'P66ds', 'P66sd',
'P61dc', 'P62dc', 'P63dc', 'P63cd', 'P64dc', 'P65dc', 'P66dc', 'P66cd',
'P61db', 'P62db', 'P63db', 'P63bd', 'P64db', 'P65db', 'P66db', 'P66bd',
'P61sc', 'P62sc', 'P63sc', 'P63cs', 'P64sc', 'P65sc', 'P66sc', 'P66cs',
'P61sb', 'P62sb', 'P63sb', 'P63bs', 'P64sb', 'P65sb', 'P66sb', 'P66bs',
'P61cb', 'P62cb', 'P63cb', 'P63bc', 'P64cb', 'P65cb', 'P66cb', 'P66bc']
The anomalous dimension tensor defined below uses the following subset of the dim.6 DM-SM basis,
['C63u', 'C63d', 'C63s', 'C63c', 'C63b', 'C64u', 'C64d', 'C64s', 'C64c', 'C64b']
and the basis above.
Arguments
---------
nf -- the number of active flavors
input_dict (optional) -- a dictionary of hadronic input parameters
(default is Num_input().input_parameters)
"""
if input_dict is None:
ip = Num_input().input_parameters
# One should include a warning in case the dictionary
# does not contain all necessary keys
else:
ip = input_dict
mb = ip['mb_at_MZ']
mc = ip['mc_at_MZ']
ms = ip['ms_at_MZ']
md = ip['md_at_MZ']
mu = ip['mu_at_MZ']
# Create the ADT:
gamma_hat_P63cu_Q81u = np.hstack((np.zeros(3), -48 * mc**2/mu**2, np.zeros(6)))
gamma_hat_P63bu_Q81u = np.hstack((np.zeros(4), -48 * mb**2/mu**2, np.zeros(5)))
gamma_hat_P63cd_Q81d = np.hstack((np.zeros(3), -48 * mc**2/md**2, np.zeros(6)))
gamma_hat_P63bd_Q81d = np.hstack((np.zeros(4), -48 * mb**2/md**2, np.zeros(5)))
gamma_hat_P63cs_Q81s = np.hstack((np.zeros(3), -48 * mc**2/ms**2, np.zeros(6)))
gamma_hat_P63bs_Q81s = np.hstack((np.zeros(4), -48 * mb**2/ms**2, np.zeros(5)))
gamma_hat_P63cu_Q82u = np.hstack((np.zeros(8), -48 * mc**2/mu**2, np.zeros(1)))
gamma_hat_P63bu_Q82u = np.hstack((np.zeros(9), -48 * mb**2/mu**2))
gamma_hat_P63cd_Q82d = np.hstack((np.zeros(8), -48 * mc**2/md**2, np.zeros(1)))
gamma_hat_P63bd_Q82d = np.hstack((np.zeros(9), -48 * mb**2/md**2))
gamma_hat_P63cs_Q82s = np.hstack((np.zeros(8), -48 * mc**2/ms**2, np.zeros(1)))
gamma_hat_P63bs_Q82s = np.hstack((np.zeros(9), -48 * mb**2/ms**2))
gamma_hat_P62uc_Q83u = np.hstack((np.zeros(3), -48 * mc**2/mu**2, np.zeros(6)))
gamma_hat_P62ub_Q83u = np.hstack((np.zeros(4), -48 * mb**2/mu**2, np.zeros(5)))
gamma_hat_P62dc_Q83d = np.hstack((np.zeros(3), -48 * mc**2/md**2, np.zeros(6)))
gamma_hat_P62db_Q83d = np.hstack((np.zeros(4), -48 * mb**2/md**2, np.zeros(5)))
gamma_hat_P62sc_Q83s = np.hstack((np.zeros(3), -48 * mc**2/ms**2, np.zeros(6)))
gamma_hat_P62sb_Q83s = np.hstack(( | np.zeros(4) | numpy.zeros |
#!/usr/bin/env python3
# mc_chain_sw_module.py
#------------------------------------------------------------------------------------------------#
# This software was written in 2016/17 #
# by <NAME> <<EMAIL>>/<<EMAIL>> #
# and <NAME> <<EMAIL>> ("the authors"), #
# to accompany the book "Computer Simulation of Liquids", second edition, 2017 ("the text"), #
# published by Oxford University Press ("the publishers"). #
# #
# LICENCE #
# Creative Commons CC0 Public Domain Dedication. #
# To the extent possible under law, the authors have dedicated all copyright and related #
# and neighboring rights to this software to the PUBLIC domain worldwide. #
# This software is distributed without any warranty. #
# You should have received a copy of the CC0 Public Domain Dedication along with this software. #
# If not, see <http://creativecommons.org/publicdomain/zero/1.0/>. #
# #
# DISCLAIMER #
# The authors and publishers make no warranties about the software, and disclaim liability #
# for all uses of the software, to the fullest extent permitted by applicable law. #
# The authors and publishers do not recommend use of this software for any purpose. #
# It is made freely available, solely to clarify points made in the text. When using or citing #
# the software, you should not imply endorsement by the authors or publishers. #
#------------------------------------------------------------------------------------------------#
"""Monte Carlo, single chain, square wells."""
fast = True # Change this to replace NumPy potential evaluation with slower Python
def introduction():
"""Prints out introductory statements at start of run."""
print('Hard-sphere chain with fixed bond length')
print('Square-well attractive potential')
print('Diameter, sigma = 1')
if fast:
print('Fast NumPy potential routine')
else:
print('Slow Python potential routine')
def conclusion():
"""Prints out concluding statements at end of run."""
print('Program ends')
def regrow ( s, m_max, k_max, bond, q_range, r, q ):
"""Carries out single regrowth move, returning new r, q and indicator of success."""
# A short sequence of m atoms (m<=m_max) is deleted and regrown in the CBMC manner
# We randomly select which end of the chain to apply each of these operations to
# At each stage, k_max different atom positions are tried
# The bond length is fixed throughout
# Weights used in the regrowth are athermal, computed only on the basis of the
# hard-core overlap part of the non-bonded interactions: essentially they count non-overlaps
# Hence they are suitable for use in both NVT and Wang-Landau simulations
# r_old and r_new are used as working arrays
import numpy as np
from maths_module import random_vector
w_tol = 1.e-10 # Min weight tolerance
n, d = r.shape
assert d==3, 'Dimension error in regrow'
r_try = np.empty((k_max,3),dtype=np.float_)
w = np.empty(k_max,dtype=np.float_)
r_old = np.copy(r) # Store copy of r
q_old = q # Store old q
if m_max <= 0:
return r_old, q_old, False
m = 1+np.random.randint ( m_max ) # Number of atoms to regrow
c = np.random.randint ( 4 ) # Growth option
# PART 1: CONSTRUCT NEW CONFIGURATION WITH NEW WEIGHT
if c==0: # Remove from end and add to end
r[:n-m,:] = r_old[:n-m,:] # Copy first n-m atoms
elif c==1: # Remove from end and add to start
r[:n-m,:] = r_old[n-m-1::-1,:] # Copy and reverse first n-m atoms
elif c==2: # Remove from start and add to start
r[:n-m,:] = r_old[:m-1:-1,:] # Copy and reverse last n-m atoms
else: # Remove from start and add to end
r[:n-m,:] = r_old[m:,:] # Copy last n-m atoms
# Take the opportunity to place atom 0 at the origin
r0 = | np.copy(r[0,:]) | numpy.copy |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Imaging
improve:
reinit, uncert,
rand_norm, rand_splitnorm, rand_pointing,
slice, slice_inv_sq, crop, rebin, groupixel
smooth, artifact, mask
Jy_per_pix_to_MJy_per_sr(improve):
header, image, wave
iuncert(improve):
unc
islice(improve):
image, wave, filenames, clean
icrop(improve):
header, image, wave
irebin(improve):
header, image, wave
igroupixel(improve):
header, image, wave
ismooth(improve):
header, image, wave
imontage(improve):
reproject, reproject_mc, coadd, clean
iswarp(improve):
footprint, combine, combine_mc, clean
iconvolve(improve):
spitzer_irs, choker, do_conv, image, wave,
filenames, clean
cupid(improve):
spec_build, sav_build,
header, image, wave
wmask, wclean, interfill, hextract, hswarp,
concatenate
"""
from tqdm import tqdm, trange
import os
import math
import numpy as np
from scipy.io import readsav
from scipy.interpolate import interp1d
from astropy import wcs
from astropy.io import ascii
from astropy.table import Table
from reproject import reproject_interp, reproject_exact, reproject_adaptive
from reproject.mosaicking import reproject_and_coadd
import subprocess as SP
import warnings
# warnings.filterwarnings("ignore", category=RuntimeWarning)
# warnings.filterwarnings("ignore", message="Skipping SYSTEM_VARIABLE record")
## Local
from utilities import InputError
from inout import (fitsext, csvext, ascext, fclean,
read_fits, write_fits, savext, write_hdf5,
# read_csv, write_csv, read_ascii,
)
from arrays import listize, closest, pix2sup, sup2pix
from maths import nanavg, bsplinterpol
from astrom import fixwcs, get_pc, pix2sr
##-----------------------------------------------
##
## <improve> based tools
##
##-----------------------------------------------
class improve:
'''
IMage PROcessing VEssel
'''
def __init__(self, filIN=None, header=None, image=None, wave=None,
wmod=0, verbose=False):
'''
self: filIN, wmod, hdr, w, cdelt, pc, cd, Ndim, Nx, Ny, Nw, im, wvl
'''
## INPUTS
self.filIN = filIN
self.wmod = wmod
self.verbose = verbose
## Read image/cube
if filIN is not None:
ds = read_fits(filIN)
self.hdr = ds.header
self.im = ds.data
self.wvl = ds.wave
else:
self.hdr = header
self.im = image
self.wvl = wave
if self.im is not None:
self.Ndim = self.im.ndim
if self.Ndim==3:
self.Nw, self.Ny, self.Nx = self.im.shape
## Nw=1 patch
if self.im.shape[0]==1:
self.Ndim = 2
elif self.Ndim==2:
self.Ny, self.Nx = self.im.shape
self.Nw = None
if self.hdr is not None:
hdr = self.hdr.copy()
ws = fixwcs(header=hdr, mode='red_dim')
self.hdred = ws.header # reduced header
self.w = ws.wcs
pcdelt = get_pc(wcs=ws.wcs)
self.cdelt = pcdelt.cdelt
self.pc = pcdelt.pc
self.cd = pcdelt.cd
if verbose==True:
print('<improve> file: ', filIN)
print('Raw size (pix): {} * {}'.format(self.Nx, self.Ny))
def reinit(self, filIN=None, header=None, image=None, wave=None,
wmod=0, verbose=False):
'''
Update init variables
'''
## INPUTS
self.filIN = filIN
self.wmod = wmod
self.verbose = verbose
## Read image/cube
if filIN is not None:
ds = read_fits(filIN)
self.hdr = ds.header
self.im = ds.data
self.wvl = ds.wave
else:
self.hdr = header
self.im = image
self.wvl = wave
self.Ndim = self.im.ndim
self.hdr['NAXIS'] = self.Ndim
if self.Ndim==3:
self.Nw, self.Ny, self.Nx = self.im.shape
## Nw=1 patch
if self.im.shape[0]==1:
self.Ndim = 2
del self.hdr['NAXIS3']
else:
self.hdr['NAXIS3'] = self.Nw
elif self.Ndim==2:
self.Ny, self.Nx = self.im.shape
self.Nw = None
self.hdr['NAXIS2'] = self.Ny
self.hdr['NAXIS1'] = self.Nx
hdr = self.hdr.copy()
ws = fixwcs(header=hdr, mode='red_dim')
self.hdred = ws.header # reduced header
self.w = ws.wcs
pcdelt = get_pc(wcs=ws.wcs)
self.cdelt = pcdelt.cdelt
self.pc = pcdelt.pc
self.cd = pcdelt.cd
if verbose==True:
print('<improve> file: ', filIN)
print('Image size (pix): {} * {}'.format(self.Nx, self.Ny))
def uncert(self, filOUT=None, filUNC=None, filWGT=None, wfac=1.,
BG_image=None, BG_weight=None, zerovalue=np.nan):
'''
Estimate uncertainties from the background map
So made error map is uniform/weighted
------ INPUT ------
filOUT output uncertainty map (FITS)
filUNC input uncertainty map (FITS)
filWGT input weight map (FITS)
wfac multiplication factor for filWGT (Default: 1)
BG_image background image array used to generate unc map
BG_weight background weight array
zerovalue value used to replace zero value (Default: NaN)
------ OUTPUT ------
unc estimated unc map
'''
if filUNC is not None:
unc = read_fits(filUNC).data
else:
if BG_image is not None:
im = BG_image
Ny, Nx = BG_image.shape
else:
im = self.im
Ny = self.Ny
Nx = self.Nx
Nw = self.Nw
## sigma: std dev of (weighted) flux distribution of bg region
if BG_weight is not None:
if self.Ndim==3:
sigma = np.nanstd(im * BG_weight, axis=(1,2))
elif self.Ndim==2:
sigma = np.nanstd(im * BG_weight)
else:
if self.Ndim==3:
sigma = np.nanstd(im, axis=(1,2))
elif self.Ndim==2:
sigma = np.nanstd(im)
## wgt: weight map
if filWGT is not None:
wgt = read_fits(filWGT).data * wfac
else:
wgt = np.ones(self.im.shape) * wfac
## unc: weighted rms = root of var/wgt
if self.Ndim==3:
unc = []
for w in range(Nw):
unc.append(np.sqrt(1./wgt[w,:,:]) * sigma(w))
unc = np.array(unc)
elif self.Ndim==2:
unc = np.sqrt(1./wgt) * sigma
## Replace zero values
unc[unc==0] = zerovalue
self.unc = unc
if filOUT is not None:
write_fits(filOUT, self.hdr, unc, self.wvl, self.wmod)
return unc
def rand_norm(self, filUNC=None, unc=None, sigma=1., mu=0.):
'''
Add random N(0,1) noise
'''
if filUNC is not None:
unc = read_fits(filUNC).data
if unc is not None:
## unc should have the same dimension with im
theta = np.random.normal(mu, sigma, self.im.shape)
self.im += theta * unc
return self.im
def rand_splitnorm(self, filUNC=None, unc=None, sigma=1., mu=0.):
'''
Add random SN(0,lam,lam*tau) noise
------ INPUT ------
filUNC 2 FITS files for unc of left & right sides
unc 2 uncertainty ndarrays
------ OUTPUT ------
'''
if filUNC is not None:
unc = []
for f in filUNC:
unc.append(read_fits(f).data)
if unc is not None:
## unc[i] should have the same dimension with self.im
tau = unc[1]/unc[0]
peak = 1/(1+tau)
theta = np.random.normal(mu, sigma, self.im.shape) # ~N(0,1)
flag = np.random.random(self.im.shape) # ~U(0,1)
if self.Ndim==2:
for x in range(self.Nx):
for y in range(self.Ny):
if flag[y,x]<peak[y,x]:
self.im[y,x] += -abs(theta[y,x]) * unc[0][y,x]
else:
self.im[y,x] += abs(theta[y,x]) * unc[1][y,x]
elif self.Ndim==3:
for x in range(self.Nx):
for y in range(self.Ny):
for k in range(self.Nw):
if flag[k,y,x]<peak[k,y,x]:
self.im[k,y,x] += -abs(
theta[k,y,x]) * unc[0][k,y,x]
else:
self.im[k,y,x] += abs(
theta[k,y,x]) * unc[1][k,y,x]
return self.im
def rand_pointing(self, sigma=0, header=None, fill='med',
xscale=1, yscale=1, swarp=False, tmpdir=None):
'''
Add pointing uncertainty to WCS
------ INPUT ------
sigma pointing accuracy (arcsec)
header baseline
fill fill value of no data regions after shift
'med': axis median (default)
'avg': axis average
'near': nearest non-NaN value on the same axis
float: constant
xscale,yscale regrouped super pixel size
swarp use SWarp to perform position shifts
Default: False (not support supix)
------ OUTPUT ------
'''
if sigma>=0:
sigma /= 3600.
d_ro = abs(np.random.normal(0., sigma)) # N(0,sigma)
d_phi = np.random.random() *2. * np.pi # U(0,2*pi)
# d_ro, d_phi = 0.0002, 4.5
# print('d_ro,d_phi = ', d_ro,d_phi)
## New header/WCS
if header is None:
header = self.hdr
wcs = fixwcs(header=header, mode='red_dim').wcs
Nx = header['NAXIS1']
Ny = header['NAXIS2']
newheader = header.copy()
newheader['CRVAL1'] += d_ro * np.cos(d_phi)
newheader['CRVAL2'] += d_ro * np.sin(d_phi)
newcs = fixwcs(header=newheader, mode='red_dim').wcs
## Convert world increment to pix increment
pix = wcs.all_world2pix(newheader['CRVAL1'], newheader['CRVAL2'], 1)
d_x = pix[0] - header['CRPIX1']
d_y = pix[1] - header['CRPIX2']
# print('Near CRPIXn increments: ', d_x, d_y)
# val1 = np.array(newcs.all_pix2world(0.5, 0.5, 1))
# d_x, d_y = wcs.all_world2pix(val1[np.newaxis,:], 1)[0] - 0.5
# print('Near (1,1) increments: ', d_x, d_y)
oldimage = self.im
## Resampling
if swarp:
## Set path of tmp files (SWarp use only)
if tmpdir is None:
path_tmp = os.getcwd()+'/tmp_swp/'
else:
path_tmp = tmpdir
if not os.path.exists(path_tmp):
os.makedirs(path_tmp)
## Works but can be risky since iswarp.combine included rand_pointing...
write_fits(path_tmp+'tmp_rand_shift',
newheader, self.im, self.wvl)
swp = iswarp(refheader=self.hdr, tmpdir=path_tmp)
rep = swp.combine(path_tmp+'tmp_rand_shift',
combtype='avg', keepedge=True)
self.im = rep.data
else:
if self.Ndim==3:
Nxs = math.ceil(Nx/xscale)
cube_supx = np.zeros((self.Nw,Ny,Nxs))
frac2 = d_x / xscale
f2 = math.floor(frac2)
frac1 = 1 - frac2
for xs in range(Nxs):
if frac2>=0:
x0 = sup2pix(0, xscale, Npix=Nx, origin=0)
else:
x0 = sup2pix(Nxs-1, xscale, Npix=Nx, origin=0)
if fill=='med':
fill_value = np.nanmedian(self.im,axis=2)
elif fill=='avg':
fill_value = np.nanmean(self.im,axis=2)
elif fill=='near':
fill_value = np.nanmean(self.im[:,:,x0[0]:x0[-1]+1],axis=2)
else:
fill_value = fill
if frac2>=0:
if xs>=f2:
x1 = sup2pix(xs-f2, xscale, Npix=Nx, origin=0)
cube_supx[:,:,xs] += (f2+frac1) * np.nanmean(self.im[:,:,x1[0]:x1[-1]+1],axis=2)
if xs>f2:
x2 = sup2pix(xs-f2-1, xscale, Npix=Nx, origin=0)
cube_supx[:,:,xs] += (frac2-f2) * np.nanmean(self.im[:,:,x2[0]:x2[-1]+1],axis=2)
else:
cube_supx[:,:,xs] += (frac2-f2) * fill_value
else:
cube_supx[:,:,xs] += fill_value
# if self.verbose:
# warnings.warn('Zero appears at super x = {}'.format(xs))
else:
if xs<=Nxs+f2:
x2 = sup2pix(xs-f2-1, xscale, Npix=Nx, origin=0)
cube_supx[:,:,xs] += (frac2-f2) * np.nanmean(self.im[:,:,x2[0]:x2[-1]+1],axis=2)
if xs<Nxs+f2:
x1 = sup2pix(xs-f2, xscale, Npix=Nx, origin=0)
cube_supx[:,:,xs] += (f2+frac1) * np.nanmean(self.im[:,:,x1[0]:x1[-1]+1],axis=2)
else:
cube_supx[:,:,xs] += (f2+frac1) * fill_value
else:
cube_supx[:,:,xs] += fill_value
# if self.verbose:
# warnings.warn('Zero appears at super x = {}'.format(xs))
Nys = math.ceil(Ny/yscale)
supcube = np.zeros((self.Nw,Nys,Nxs))
frac2 = d_y / yscale
f2 = math.floor(frac2)
frac1 = 1 - frac2
for ys in range(Nys):
if frac2>=0:
y0 = sup2pix(0, yscale, Npix=Ny, origin=0)
else:
y0 = sup2pix(Nys-1, yscale, Npix=Ny, origin=0)
if fill=='med':
fill_value = np.nanmedian(cube_supx,axis=1)
elif fill=='avg':
fill_value = np.nanmean(cube_supx,axis=1)
elif fill=='near':
fill_value = np.nanmean(cube_supx[:,y0[0]:y0[-1]+1,:],axis=1)
else:
fill_value = fill
if frac2>=0:
if ys>=f2:
y1 = sup2pix(ys-f2, yscale, Npix=Ny, origin=0)
supcube[:,ys,:] += (f2+frac1) * np.nanmean(cube_supx[:,y1[0]:y1[-1]+1,:],axis=1)
if ys>f2:
y2 = sup2pix(ys-f2-1, yscale, Npix=Ny, origin=0)
supcube[:,ys,:] += (frac2-f2) * np.nanmean(cube_supx[:,y2[0]:y2[-1]+1,:],axis=1)
else:
supcube[:,ys,:] += (frac2-f2) * fill_value
else:
supcube[:,ys,:] += fill_value
# if self.verbose:
# warnings.warn('Zero appears at super y = {}'.format(ys))
else:
if ys<=Nys+f2:
y2 = sup2pix(ys-f2-1, yscale, Npix=Ny, origin=0)
supcube[:,ys,:] += (frac2-f2) * np.nanmean(cube_supx[:,y2[0]:y2[-1]+1,:],axis=1)
if ys<Nys+f2:
y1 = sup2pix(ys-f2, yscale, Npix=Ny, origin=0)
supcube[:,ys,:] += (f2+frac1) * np.nanmean(cube_supx[:,y1[0]-1:y1[-1],:],axis=1)
else:
supcube[:,ys,:] += (f2+frac1) * fill_value
else:
supcube[:,ys,:] += fill_value
# if self.verbose:
# warnings.warn('Zero appears at super y = {}'.format(ys))
for x in range(Nx):
for y in range(Ny):
xs = pix2sup(x, xscale, origin=0)
ys = pix2sup(y, yscale, origin=0)
self.im[:,y,x] = supcube[:,ys,xs]
elif self.Ndim==2:
Nxs = math.ceil(Nx/xscale)
cube_supx = np.zeros((Ny,Nxs))
frac2 = d_x / xscale
f2 = math.floor(frac2)
frac1 = 1 - frac2
for xs in range(Nxs):
if frac2>=0:
x0 = sup2pix(0, xscale, Npix=Nx, origin=0)
else:
x0 = sup2pix(Nxs-1, xscale, Npix=Nx, origin=0)
if fill=='med':
fill_value = np.nanmedian(self.im,axis=1)
elif fill=='avg':
fill_value = np.nanmean(self.im,axis=1)
elif fill=='near':
fill_value = np.nanmean(self.im[:,x0[0]:x0[-1]+1],axis=1)
else:
fill_value = fill
if frac2>=0:
if xs>=f2:
x1 = sup2pix(xs-f2, xscale, Npix=Nx, origin=0)
cube_supx[:,xs] += (f2+frac1) * np.nanmean(self.im[:,x1[0]:x1[-1]+1],axis=1)
if xs>f2:
x2 = sup2pix(xs-f2-1, xscale, Npix=Nx, origin=0)
cube_supx[:,xs] += (frac2-f2) * np.nanmean(self.im[:,x2[0]:x2[-1]+1],axis=1)
else:
cube_supx[:,xs] += (frac2-f2) * fill_value
else:
cube_supx[:,xs] += fill_value
# if self.verbose:
# warnings.warn('Zero appears at super x = {}'.format(xs))
else:
if xs<=Nxs+f2:
x2 = sup2pix(xs-f2-1, xscale, Npix=Nx, origin=0)
cube_supx[:,xs] += (frac2-f2) * np.nanmean(self.im[:,x2[0]:x2[-1]+1],axis=1)
if xs<Nxs+f2:
x1 = sup2pix(xs-f2, xscale, Npix=Nx, origin=0)
cube_supx[:,xs] += (f2+frac1) * np.nanmean(self.im[:,x1[0]:x1[-1]+1],axis=1)
else:
cube_supx[:,xs] += (f2+frac1) * fill_value
else:
cube_supx[:,xs] += fill_value
# if self.verbose:
# warnings.warn('Zero appears at super x = {}'.format(xs))
Nys = math.ceil(Ny/yscale)
supcube = np.zeros((Nys,Nxs))
frac2 = d_y / yscale
f2 = math.floor(frac2)
frac1 = 1 - frac2
for ys in range(Nys):
if frac2>=0:
y0 = sup2pix(0, yscale, Npix=Ny, origin=0)
else:
y0 = sup2pix(Nys-1, yscale, Npix=Ny, origin=0)
if fill=='med':
fill_value = np.nanmedian(cube_supx,axis=0)
elif fill=='avg':
fill_value = np.nanmean(cube_supx,axis=0)
elif fill=='near':
fill_value = | np.nanmean(cube_supx[y0[0]:y0[-1]+1,:],axis=0) | numpy.nanmean |
import random
from pprint import pprint as pp
import numpy as np
import quaternion
import qukf
class QUKFModel1(qukf.QuaternionUKFModel):
"""
The internal state:
additive:
world_position(3)
world_velocity(3)
body_acceleration(1), it's only moving forward
angular_velocity(3)
quaternions:
orientation
Measurements:
* angular
* acceleration
* position
* down
* north
"""
def f(self, dt, xa, xq):
w_position = xa[:3]
w_velocity = xa[3:6]
b_acceleration = xa[6]
angular = xa[7:10]
xq = qukf.quats_add_residuals(xq, angular * dt)
# world acceleration
a = qukf.quat_rotate(xq[0], np.array([0, 0, b_acceleration]), inverse=True)
w_velocity += a * dt
w_position += w_velocity * dt + 1/2 * dt**2 * a
return xa, xq
def h(self, xa, xq, hint=None):
if hint == 'angular':
return xa[7:10]
elif hint == 'acceleration':
return xa[6:7]
elif hint == 'position':
return xa[0:3]
elif hint == 'down':
return qukf.quat_rotate(xq[0], np.array([0, 0, -1]))
elif hint == 'north':
return qukf.quat_rotate(xq[0], np.array([0, 1, 0]))
else:
raise ValueError('Invalid hint {}'.format(hint))
def q(self, dt):
return np.diag(
[5.] * 3 + [2.] * 3 + [1.] + [1.] * 3 + [1.] * 3) * dt
def r(self, hint=None):
if hint == 'angular':
return np.diag([1.] * 3)
elif hint == 'acceleration':
return np.diag([1.])
elif hint == 'position':
return np.diag([1.] * 3)
elif hint == 'down':
return np.diag([1.] * 3)
elif hint == 'north':
return np.diag([1.] * 3)
else:
raise ValueError('Invalid hint {}'.format(hint))
def parse_state(self, xa, xq):
d = {}
d['position'] = xa[0:3]
d['velocity'] = xa[3:6]
d['b_acceleration'] = xa[6:7]
d['angular'] = xa[7:10]
d['orientation'] = xq[0]
return d
class DummyDevice(object):
"""
It's a device controlled by 3D acceleration and rotation.
"""
def __init__(self):
self.w_position = np.zeros(3)
self.w_velocity = np.zeros(3)
self.b_acceleration = | np.zeros(1) | numpy.zeros |
"""
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
from copy import deepcopy
import os.path as osp
import glog as logging
import numpy as np
from six import iteritems
from tqdm import tqdm
import json
import cv2
from utils.object_detection_evaluation import DetectionEvaluation
from utils.image_grabber import ImageGrabber
#pylint: disable=invalid-name
image_provider = ImageGrabber()
num_classes = 3
ignored_id = -2
valid_labels = ['person', 'vehicle', 'non-vehicle']
labels_to_class_idx = {'__background__': -1,
'ignored': ignored_id,
'person': 0,
'vehicle': 1,
'non-vehicle': 2}
def parse_args():
""" Parse input parameters
"""
parser = argparse.ArgumentParser(description='')
parser.add_argument('ground_truth_file_path')
parser.add_argument('detections_file_path')
parser.add_argument('--remap', dest='classes_remap', choices=('all', 'pascal', 'coco'), default='all')
return parser.parse_args()
#pylint: disable=consider-using-enumerate
def prepare_ground_truth(ground_truth):
""" Prepare ground truth detections
"""
for gt in ground_truth:
to_delete = []
objects = gt['objects']
for i in range(len(objects)):
obj = objects[i]
if len(obj) == 0:
to_delete.append(i)
continue
# If object has label 'ignored', add the same box
# for each valid object label and set 'ignored' flag to True.
if obj['label'] == 'ignored':
to_delete.append(i)
for j in range(num_classes):
new_obj = deepcopy(obj)
new_obj['label'] = valid_labels[j]
new_obj['ignored'] = True
objects.append(new_obj)
else:
obj['difficult'] = obj.get('difficult', False) or obj.get('occluded', False)
if 'occluded' in obj:
del obj['occluded']
obj['ignored'] = False
for i in reversed(sorted(to_delete)):
del objects[i]
return None
def add_detections(evaluator,
detections,
ground_truth,
verbose=True):
""" Add found detections to evaluator
"""
detections_image_paths_list = np.asarray([osp.basename(d['image']) for d in detections])
ground_truth_image_paths_list = np.asarray([osp.basename(gt['image']) for gt in ground_truth])
assert np.array_equal(detections_image_paths_list, ground_truth_image_paths_list)
for image_id, (det, gt) in tqdm(enumerate(zip(detections, ground_truth)),
desc='for every image', disable=not verbose,
total=len(detections)):
bboxes = []
labels = []
ignored_mask = []
for object_gt in gt['objects']:
bboxes.append(object_gt['bbox'])
labels.append(labels_to_class_idx.get(object_gt['label'], -1))
ignored_mask.append(object_gt['ignored'])
if len(bboxes) > 0:
bboxes = | np.asarray(bboxes, dtype=np.float32) | numpy.asarray |
# -*- coding: utf-8 -*-
"""
@author: truthless
"""
import os
import logging
import json
import numpy as np
import torch
import torch.nn as nn
from torch import optim
import torch.utils.data as data
from convlab2.util.train_util import to_device
from convlab2.policy.vector.dataset import ActStateDataset
from convlab2.policy.mle.multiwoz.loader import ActMLEPolicyDataLoaderMultiWoz
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class RewardEstimator(object):
def __init__(self, vector, pretrain = False):
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'config.json'), 'r') as f:
cfg = json.load(f)
self.irl = AIRL(cfg['gamma'], cfg['hi_dim'], vector.state_dim, vector.da_dim).to(device=DEVICE)
self.bce_loss = nn.BCEWithLogitsLoss()
self.step = 0
self.anneal = cfg['anneal']
self.irl_params = self.irl.parameters()
self.irl_optim = optim.RMSprop(self.irl_params, lr=cfg['lr_irl'])
self.weight_cliping_limit = cfg['clip']
self.save_dir = cfg['save_dir']
self.save_per_epoch = cfg['save_per_epoch']
self.optim_batchsz = cfg['batchsz']
self.irl.eval()
manager = ActEstimatorDataLoaderMultiWoz()
self.data_train = manager.create_dataset_irl('train', cfg['batchsz'])
self.irl_iter = iter(self.data_train)
if pretrain:
self.data_train = manager.create_dataset_irl('train', cfg['batchsz'])
self.data_valid = manager.create_dataset_irl('valid', cfg['batchsz'])
self.data_test = manager.create_dataset_irl('test', cfg['batchsz'])
self.irl_iter = iter(self.data_train)
self.irl_iter_valid = iter(self.data_valid)
self.irl_iter_test = iter(self.data_test)
def kl_divergence(self, mu, logvar, istrain):
klds = -0.5 * (1 + logvar - mu.pow(2) - logvar.exp()).sum()
beta = min(self.step/self.anneal, 1) if istrain else 1
return beta*klds
def irl_loop(self, data_real, data_gen):
s_real, a_real, next_s_real = to_device(data_real)
s, a, next_s = data_gen
# train with real data
weight_real = self.irl(s_real, a_real, next_s_real)
loss_real = -weight_real.mean()
# train with generated data
weight = self.irl(s, a, next_s)
loss_gen = weight.mean()
return loss_real, loss_gen
def train_irl(self, batch, epoch):
self.irl.train()
input_s = torch.from_numpy( | np.stack(batch.state) | numpy.stack |
# Import folder where sorting algorithms
import sys
import unittest
import numpy as np
# For importing from different folders
# OBS: This is supposed to be done with automated testing,
# hence relative to folder we want to import from
sys.path.append("ML/algorithms/linearregression")
# If run from local:
# sys.path.append('../../ML/algorithms/linearregression')
from linear_regression_gradient_descent import LinearRegression
class TestLinearRegression_GradientDescent(unittest.TestCase):
def setUp(self):
# test cases we want to run
self.linearReg = LinearRegression()
self.X1 = np.array([[0, 1, 2]])
self.y1 = np.array([[1, 2, 3]])
self.W1_correct = | np.array([[1, 1]]) | numpy.array |
"""
Example of how to trace a Fresnel Zone Plate
by <NAME> and <NAME>.
1) The source is from get_beam(), is a collimated source of squared cross section (800 microns),
monochromatic (1.54 A)
2) fresnel_zone_plane() calculates a fzp, centered at (0,0). It returns a new shadow3 beam
containing the beam after the fzp at the same plane of fzp.
The fzp parameters are: inner zone radius: 12.4 microns, diameter:
619 microns
focal distance (at nominal wavelength 1.54 A): 100 cm
3) main() does:
i) create the source with get_beam()
ii) Traces a FZP placed at the same source plane
iii) retraces noth the source and the focused source and displays both results.
One can see hoe the FWZ focuses well the beam
<EMAIL> - Written. Translated from macro_fresnelzonplate example in ShadowVUI
"""
import Shadow
import numpy
def get_beam():
#
# Python script to run shadow3. Created automatically with ShadowTools.make_python_script_from_list().
#
# write (1) or not (0) SHADOW files start.xx end.xx star.xx
iwrite = 0
#
# initialize shadow3 source (oe0) and beam
#
beam = Shadow.Beam()
oe0 = Shadow.Source()
oe1 = Shadow.OE()
#
# Define variables. See meaning of variables in:
# https://raw.githubusercontent.com/srio/shadow3/master/docs/source.nml
# https://raw.githubusercontent.com/srio/shadow3/master/docs/oe.nml
#
oe0.FSOUR = 1
oe0.HDIV1 = 1e-08
oe0.HDIV2 = 1e-08
oe0.IDO_VX = 0
oe0.IDO_VZ = 0
oe0.IDO_X_S = 0
oe0.IDO_Y_S = 0
oe0.IDO_Z_S = 0
oe0.PH1 = 1.54
oe0.VDIV1 = 1e-08
oe0.VDIV2 = 1e-08
oe0.WXSOU = 0.08
oe0.WZSOU = 0.08
#Run SHADOW to create the source
if iwrite:
oe0.write("start.00")
beam.genSource(oe0)
if iwrite:
oe0.write("end.00")
beam.write("begin.dat")
return beam
def fresnel_zone_plane(beam_in,
DDm = 618. , # FZP diameter in microns
nomlambdaA = 1.54 , # nominal wavelength in Angstrom
focal = 100. , # focal distance (cm)
R0m = 12.4 , # inner zone radius (microns)
):
"""
Fresnel zone plate. Simple calculation
Coded by <NAME> (<EMAIL>) and
<NAME> (<EMAIL>)
This shadow3 script calculates the effect of a Fresnel Zone Plate
It supposes the fzp is on top of a screen plane
centered on the optical axis.
:param beam: FZP diameter in microns
:param DDm: nominal wavelength in Angstrom
:param nomlambdaA:
:param focal: focal distance (cm)
:param R0m: inner zone radius (microns)
:return:
"""
#
# Widget_Control,/HourGlass ; create an horglass icon during calculation
# change units to cm
#
DD = DDm*1.e-4 # cm
R0 = R0m*1.e-4 # cm
nomlambda = nomlambdaA*1.e-8 # cm
beam = beam_in.duplicate()
#
# reading Shadow file variables
#
#
lambda1 = beam.getshonecol(19) # lambda in Angstroms
x = beam.getshonecol(1)
z = beam.getshonecol(3)
xpin = beam.getshonecol(4)
zpin = beam.getshonecol(6)
#
#
# ;
#
Kmod = 2 * numpy.pi / lambda1 # wavevector modulus in Angstrom-1
r = numpy.sqrt(x**2. + z**2.) # distance to center
Kxin = Kmod * xpin
Kzin = Kmod * zpin
nrays = x.size
n = numpy.zeros(nrays)
d = | numpy.zeros(nrays) | numpy.zeros |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import imageio
import torch
import sys
sys.path.append('../')
from torch.utils.data import Dataset
from .data_utils import random_crop, get_nearest_pose_ids
from .llff_data_utils import load_llff_data, batch_parse_llff_poses
class LLFFTestDataset(Dataset):
def __init__(self, args, mode, scenes=(), random_crop=True, **kwargs):
self.folder_path = os.path.join(args.rootdir, 'data/nerf_llff_data/')
self.args = args
self.mode = mode # train / test / validation
self.num_source_views = args.num_source_views
self.random_crop = random_crop
self.render_rgb_files = []
self.render_intrinsics = []
self.render_poses = []
self.render_train_set_ids = []
self.render_depth_range = []
self.train_intrinsics = []
self.train_poses = []
self.train_rgb_files = []
all_scenes = os.listdir(self.folder_path)
if len(scenes) > 0:
if isinstance(scenes, str):
scenes = [scenes]
else:
scenes = all_scenes
print("loading {} for {}".format(scenes, mode))
for i, scene in enumerate(scenes):
scene_path = os.path.join(self.folder_path, scene)
_, poses, bds, render_poses, i_test, rgb_files = load_llff_data(scene_path, load_imgs=False, factor=4)
near_depth = np.min(bds)
far_depth = np.max(bds)
intrinsics, c2w_mats = batch_parse_llff_poses(poses)
i_test = np.arange(poses.shape[0])[::self.args.llffhold]
i_train = np.array([j for j in np.arange(int(poses.shape[0])) if
(j not in i_test and j not in i_test)])
if mode == 'train':
i_render = i_train
else:
i_render = i_test
self.train_intrinsics.append(intrinsics[i_train])
self.train_poses.append(c2w_mats[i_train])
self.train_rgb_files.append(np.array(rgb_files)[i_train].tolist())
num_render = len(i_render)
self.render_rgb_files.extend(np.array(rgb_files)[i_render].tolist())
self.render_intrinsics.extend([intrinsics_ for intrinsics_ in intrinsics[i_render]])
self.render_poses.extend([c2w_mat for c2w_mat in c2w_mats[i_render]])
self.render_depth_range.extend([[near_depth, far_depth]]*num_render)
self.render_train_set_ids.extend([i]*num_render)
def __len__(self):
return len(self.render_rgb_files) * 100000 if self.mode == 'train' else len(self.render_rgb_files)
def __getitem__(self, idx):
idx = idx % len(self.render_rgb_files)
rgb_file = self.render_rgb_files[idx]
rgb = imageio.imread(rgb_file).astype(np.float32) / 255.
render_pose = self.render_poses[idx]
intrinsics = self.render_intrinsics[idx]
depth_range = self.render_depth_range[idx]
train_set_id = self.render_train_set_ids[idx]
train_rgb_files = self.train_rgb_files[train_set_id]
train_poses = self.train_poses[train_set_id]
train_intrinsics = self.train_intrinsics[train_set_id]
img_size = rgb.shape[:2]
camera = np.concatenate((list(img_size), intrinsics.flatten(),
render_pose.flatten())).astype(np.float32)
if self.mode == 'train':
if rgb_file in train_rgb_files:
id_render = train_rgb_files.index(rgb_file)
else:
id_render = -1
subsample_factor = np.random.choice(np.arange(1, 4), p=[0.2, 0.45, 0.35])
num_select = self.num_source_views + np.random.randint(low=-2, high=2)
else:
id_render = -1
subsample_factor = 1
num_select = self.num_source_views
nearest_pose_ids = get_nearest_pose_ids(render_pose,
train_poses,
min(self.num_source_views*subsample_factor, 28),
tar_id=id_render,
angular_dist_method='dist')
nearest_pose_ids = np.random.choice(nearest_pose_ids, min(num_select, len(nearest_pose_ids)), replace=False)
assert id_render not in nearest_pose_ids
# occasionally include input image
if | np.random.choice([0, 1], p=[0.995, 0.005]) | numpy.random.choice |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils function for routing game experiment."""
# pylint:disable=too-many-lines,import-error,missing-function-docstring,protected-access,too-many-locals,invalid-name,too-many-arguments,too-many-branches,missing-class-docstring,too-few-public-methods
# pylint:disable=line-too-long
import random
import time
import matplotlib.pyplot as plt
import numpy as np
import tensorflow.compat.v1 as tf
from open_spiel.python import policy as policy_module
from open_spiel.python import rl_environment
from open_spiel.python.algorithms import cfr
from open_spiel.python.algorithms import expected_game_score
from open_spiel.python.algorithms import exploitability
from open_spiel.python.algorithms import external_sampling_mccfr as external_mccfr
from open_spiel.python.algorithms import fictitious_play
from open_spiel.python.algorithms import nfsp
from open_spiel.python.algorithms import noisy_policy
from open_spiel.python.games import dynamic_routing
from open_spiel.python.games import dynamic_routing_utils
from open_spiel.python.mfg.algorithms import distribution as distribution_module
from open_spiel.python.mfg.algorithms import fictitious_play as mean_field_fictitious_play_module
from open_spiel.python.mfg.algorithms import mirror_descent
from open_spiel.python.mfg.algorithms import nash_conv as nash_conv_module
from open_spiel.python.mfg.algorithms import policy_value
from open_spiel.python.mfg.games import dynamic_routing as mean_field_routing_game
import pyspiel
# pylint:enable=line-too-long
def create_games(origin,
destination,
num_vehicles,
graph,
max_time_step,
time_step_length=1.0,
departure_time=None):
if departure_time is not None:
raise NotImplementedError("To do.")
list_of_vehicles = [
dynamic_routing_utils.Vehicle(origin, destination)
for _ in range(num_vehicles)
]
game = dynamic_routing.DynamicRoutingGame(
{
"max_num_time_step": max_time_step,
"time_step_length": time_step_length
},
network=graph,
vehicles=list_of_vehicles)
seq_game = pyspiel.convert_to_turn_based(game)
od_demand = [
dynamic_routing_utils.OriginDestinationDemand(origin, destination, 0,
num_vehicles)
]
mfg_game = mean_field_routing_game.MeanFieldRoutingGame(
{
"max_num_time_step": max_time_step,
"time_step_length": time_step_length
},
network=graph,
od_demand=od_demand)
return game, seq_game, mfg_game
def create_braess_network(capacity):
graph_dict = {
"A": {
"connection": {
"B": {
"a": 0,
"b": 1.0,
"capacity": capacity,
"free_flow_travel_time": 0
}
},
"location": [0, 0]
},
"B": {
"connection": {
"C": {
"a": 1.0,
"b": 1.0,
"capacity": capacity,
"free_flow_travel_time": 1.0
},
"D": {
"a": 0,
"b": 1.0,
"capacity": capacity,
"free_flow_travel_time": 2.0
}
},
"location": [1, 0]
},
"C": {
"connection": {
"D": {
"a": 0,
"b": 1.0,
"capacity": capacity,
"free_flow_travel_time": 0.25
},
"E": {
"a": 0,
"b": 1.0,
"capacity": capacity,
"free_flow_travel_time": 2.0
}
},
"location": [2, 1]
},
"D": {
"connection": {
"E": {
"a": 1,
"b": 1.0,
"capacity": capacity,
"free_flow_travel_time": 1.0
}
},
"location": [2, -1]
},
"E": {
"connection": {
"F": {
"a": 0,
"b": 1.0,
"capacity": capacity,
"free_flow_travel_time": 0.0
}
},
"location": [3, 0]
},
"F": {
"connection": {},
"location": [4, 0]
}
}
adjacency_list = {
key: list(value["connection"].keys())
for key, value in graph_dict.items()
}
bpr_a_coefficient = {}
bpr_b_coefficient = {}
capacity = {}
free_flow_travel_time = {}
for o_node, value_dict in graph_dict.items():
for d_node, section_dict in value_dict["connection"].items():
road_section = dynamic_routing_utils._nodes_to_road_section(
origin=o_node, destination=d_node)
bpr_a_coefficient[road_section] = section_dict["a"]
bpr_b_coefficient[road_section] = section_dict["b"]
capacity[road_section] = section_dict["capacity"]
free_flow_travel_time[road_section] = section_dict[
"free_flow_travel_time"]
node_position = {key: value["location"] for key, value in graph_dict.items()}
return dynamic_routing_utils.Network(
adjacency_list,
node_position=node_position,
bpr_a_coefficient=bpr_a_coefficient,
bpr_b_coefficient=bpr_b_coefficient,
capacity=capacity,
free_flow_travel_time=free_flow_travel_time)
def create_augmented_braess_network(capacity):
graph_dict = {
"A": {
"connection": {
"B": {
"a": 0,
"b": 1.0,
"capacity": capacity,
"free_flow_travel_time": 0
}
},
"location": [0, 0]
},
"B": {
"connection": {
"C": {
"a": 1.0,
"b": 1.0,
"capacity": capacity,
"free_flow_travel_time": 1.0
},
"D": {
"a": 0,
"b": 1.0,
"capacity": capacity,
"free_flow_travel_time": 2.0
}
},
"location": [1, 0]
},
"C": {
"connection": {
"D": {
"a": 0,
"b": 1.0,
"capacity": capacity,
"free_flow_travel_time": 0.25
},
"E": {
"a": 0,
"b": 1.0,
"capacity": capacity,
"free_flow_travel_time": 2.0
}
},
"location": [2, 1]
},
"D": {
"connection": {
"E": {
"a": 1,
"b": 1.0,
"capacity": capacity,
"free_flow_travel_time": 1.0
},
"G": {
"a": 0,
"b": 1.0,
"capacity": capacity,
"free_flow_travel_time": 0.0
}
},
"location": [2, -1]
},
"E": {
"connection": {
"F": {
"a": 0,
"b": 1.0,
"capacity": capacity,
"free_flow_travel_time": 0.0
}
},
"location": [3, 0]
},
"F": {
"connection": {},
"location": [4, 0]
},
"G": {
"connection": {},
"location": [3, -1]
}
}
adjacency_list = {
key: list(value["connection"].keys())
for key, value in graph_dict.items()
}
bpr_a_coefficient = {}
bpr_b_coefficient = {}
capacity = {}
free_flow_travel_time = {}
for o_node, value_dict in graph_dict.items():
for d_node, section_dict in value_dict["connection"].items():
road_section = dynamic_routing_utils._nodes_to_road_section(
origin=o_node, destination=d_node)
bpr_a_coefficient[road_section] = section_dict["a"]
bpr_b_coefficient[road_section] = section_dict["b"]
capacity[road_section] = section_dict["capacity"]
free_flow_travel_time[road_section] = section_dict[
"free_flow_travel_time"]
node_position = {key: value["location"] for key, value in graph_dict.items()}
return dynamic_routing_utils.Network(
adjacency_list,
node_position=node_position,
bpr_a_coefficient=bpr_a_coefficient,
bpr_b_coefficient=bpr_b_coefficient,
capacity=capacity,
free_flow_travel_time=free_flow_travel_time)
def create_series_parallel_network(num_network_in_series,
time_step_length=1,
capacity=1):
i = 0
origin = "A_0->B_0"
graph_dict = {}
while i < num_network_in_series:
tt_up = random.random() + time_step_length
tt_down = random.random() + time_step_length
graph_dict.update({
f"A_{i}": {
"connection": {
f"B_{i}": {
"a": 0,
"b": 1.0,
"capacity": capacity,
"free_flow_travel_time": time_step_length
}
},
"location": [0 + 3 * i, 0]
},
f"B_{i}": {
"connection": {
f"C_{i}": {
"a": 1.0,
"b": 1.0,
"capacity": capacity,
"free_flow_travel_time": tt_up
},
f"D_{i}": {
"a": 1.0,
"b": 1.0,
"capacity": capacity,
"free_flow_travel_time": tt_down
}
},
"location": [1 + 3 * i, 0]
},
f"C_{i}": {
"connection": {
f"A_{i+1}": {
"a": 0,
"b": 1.0,
"capacity": capacity,
"free_flow_travel_time": time_step_length
}
},
"location": [2 + 3 * i, 1]
},
f"D_{i}": {
"connection": {
f"A_{i+1}": {
"a": 0,
"b": 1.0,
"capacity": capacity,
"free_flow_travel_time": time_step_length
}
},
"location": [2 + 3 * i, -1]
}
})
i += 1
graph_dict[f"A_{i}"] = {
"connection": {
"END": {
"a": 0,
"b": 1.0,
"capacity": capacity,
"free_flow_travel_time": time_step_length
}
},
"location": [0 + 3 * i, 0]
}
graph_dict["END"] = {"connection": {}, "location": [1 + 3 * i, 0]}
time_horizon = int(3.0 * (num_network_in_series + 1) / time_step_length)
destination = f"A_{i}->END"
adjacency_list = {
key: list(value["connection"].keys())
for key, value in graph_dict.items()
}
bpr_a_coefficient = {}
bpr_b_coefficient = {}
capacity = {}
free_flow_travel_time = {}
for o_node, value_dict in graph_dict.items():
for d_node, section_dict in value_dict["connection"].items():
road_section = dynamic_routing_utils._nodes_to_road_section(
origin=o_node, destination=d_node)
bpr_a_coefficient[road_section] = section_dict["a"]
bpr_b_coefficient[road_section] = section_dict["b"]
capacity[road_section] = section_dict["capacity"]
free_flow_travel_time[road_section] = section_dict[
"free_flow_travel_time"]
node_position = {key: value["location"] for key, value in graph_dict.items()}
return dynamic_routing_utils.Network(
adjacency_list,
node_position=node_position,
bpr_a_coefficient=bpr_a_coefficient,
bpr_b_coefficient=bpr_b_coefficient,
capacity=capacity,
free_flow_travel_time=free_flow_travel_time
), origin, destination, time_horizon
def create_sioux_falls_network():
sioux_falls_adjacency_list = {}
sioux_falls_node_position = {}
bpr_a_coefficient = {}
bpr_b_coefficient = {}
capacity = {}
free_flow_travel_time = {}
content = open("./SiouxFalls_node.csv", "r").read()
for line in content.split("\n")[1:]:
row = line.split(",")
sioux_falls_node_position[row[0]] = [int(row[1]) / 1e5, int(row[2]) / 1e5]
sioux_falls_node_position[f"bef_{row[0]}"] = [
int(row[1]) / 1e5, int(row[2]) / 1e5
]
sioux_falls_node_position[f"aft_{row[0]}"] = [
int(row[1]) / 1e5, int(row[2]) / 1e5
]
sioux_falls_adjacency_list[f"bef_{row[0]}"] = [row[0]]
sioux_falls_adjacency_list[row[0]] = [f"aft_{row[0]}"]
sioux_falls_adjacency_list[f"aft_{row[0]}"] = []
bpr_a_coefficient[f"{row[0]}->aft_{row[0]}"] = 0.0
bpr_b_coefficient[f"{row[0]}->aft_{row[0]}"] = 1.0
capacity[f"{row[0]}->aft_{row[0]}"] = 0.0
free_flow_travel_time[f"{row[0]}->aft_{row[0]}"] = 0.0
bpr_a_coefficient[f"bef_{row[0]}->{row[0]}"] = 0.0
bpr_b_coefficient[f"bef_{row[0]}->{row[0]}"] = 1.0
capacity[f"bef_{row[0]}->{row[0]}"] = 0.0
free_flow_travel_time[f"bef_{row[0]}->{row[0]}"] = 0.0
content = open("./SiouxFalls_net.csv", "r").read()
for l in content.split("\n")[1:-1]:
_, origin, destination, a0, a1, a2, a3, a4 = l.split(",")
assert all(int(x) == 0 for x in [a1, a2, a3])
sioux_falls_adjacency_list[origin].append(destination)
road_section = f"{origin}->{destination}"
bpr_a_coefficient[road_section] = float(a4)
bpr_b_coefficient[road_section] = 4.0
capacity[road_section] = 1.0
free_flow_travel_time[road_section] = float(a0)
sioux_falls_od_demand = []
content = open("./SiouxFalls_od.csv", "r").read()
for line in content.split("\n")[1:-1]:
row = line.split(",")
sioux_falls_od_demand.append(
dynamic_routing_utils.OriginDestinationDemand(
f"bef_{row[0]}->{row[0]}", f"{row[1]}->aft_{row[1]}", 0,
float(row[2])))
return dynamic_routing_utils.Network(
sioux_falls_adjacency_list,
node_position=sioux_falls_node_position,
bpr_a_coefficient=bpr_a_coefficient,
bpr_b_coefficient=bpr_b_coefficient,
capacity=capacity,
free_flow_travel_time=free_flow_travel_time), sioux_falls_od_demand
def plot_network_n_player_game(g: dynamic_routing_utils.Network,
vehicle_locations=None):
"""Plot the network.
Args:
g: network to plot
vehicle_locations: vehicle location
"""
_, ax = plt.subplots()
o_xs, o_ys, d_xs, d_ys = g.return_list_for_matplotlib_quiver()
ax.quiver(
o_xs,
o_ys,
| np.subtract(d_xs, o_xs) | numpy.subtract |
"""
Module for computing the vector form factor for pi+pi.
"""
from dataclasses import dataclass
from typing import Union
import numpy as np
import numpy.typing as npt
from scipy.special import gamma # type:ignore
from hazma.vector_mediator.form_factors.utils import (
MPI_GEV,
breit_wigner_fw,
breit_wigner_gs,
dhhatds,
gamma_generator,
h,
hhat,
)
@dataclass(frozen=True)
class FormFactorPiPiParameters:
"""
Class for storing the parameters needed to compute the form factor for
V-pi-pi.
"""
omega_mag: complex
omega_phase: complex
omega_mass: complex
omega_width: complex
omega_weight: complex
mass: npt.NDArray[np.float64]
width: npt.NDArray[np.float64]
coup: npt.NDArray[np.complex128]
hres: npt.NDArray[np.float64]
h0: npt.NDArray[np.float64]
dh: npt.NDArray[np.float64]
def compute_pipi_form_factor_parameters(n_max: int = 2000) -> FormFactorPiPiParameters:
"""
Compute the parameters needed for computing the V-pi-pi form factor.
Parameters
----------
n_max: int
Number of resonances to include.
Returns
-------
params: FormFactorPiPiParameters
Parameters of the resonances for the V-pi-pi form factor.
"""
# Set up the parameters.
omega_mag = 0.00187 + 0j
omega_phase = 0.106 + 0j
omega_mass = 0.7824 + 0j
omega_width = 0.00833 + 0j
omega_wgt = 0j
mass = np.zeros(n_max, dtype=np.float64)
width = | np.zeros(n_max, dtype=np.float64) | numpy.zeros |
import numpy as np
import time
from astropy import wcs
from tabulate import tabulate
import astropy.io.fits as fits
import pandas as pd
def setdiff_nd(a1, a2):
"""
python 使用numpy求二维数组的差集
:param a1:
:param a2:
:return:
"""
# a1 = index_value
# a2 = np.array([point_ii_xy])
a1_rows = a1.view([('', a1.dtype)] * a1.shape[1])
a2_rows = a2.view([('', a2.dtype)] * a2.shape[1])
a3 = np.setdiff1d(a1_rows, a2_rows).view(a1.dtype).reshape(-1, a1.shape[1])
return a3
def get_xyz(data):
"""
:param data: 3D data
:return: 3D data coordinates
第1,2,3维数字依次递增
:param data: 2D data
:return: 2D data coordinates
第1,2维数字依次递增
"""
nim = data.ndim
if nim == 3:
size_x, size_y, size_z = data.shape
x_arange = np.arange(1, size_x+1)
y_arange = np.arange(1, size_y+1)
z_arange = np.arange(1, size_z+1)
[xx, yy, zz] = np.meshgrid(x_arange, y_arange, z_arange, indexing='ij')
xyz = np.column_stack([zz.flatten(), yy.flatten(), xx.flatten()])
else:
size_x, size_y = data.shape
x_arange = np.arange(1, size_x + 1)
y_arange = np.arange(1, size_y + 1)
[xx, yy] = np.meshgrid(x_arange, y_arange, indexing='ij')
xyz = np.column_stack([yy.flatten(), xx.flatten()])
return xyz
def kc_coord_3d(point_ii_xy, xm, ym, zm, r):
"""
:param point_ii_xy: 当前点坐标(x,y,z)
:param xm: size_x
:param ym: size_y
:param zm: size_z
:param r: 2 * r + 1
:return:
返回delta_ii_xy点r邻域的点坐标
"""
it = point_ii_xy[0]
jt = point_ii_xy[1]
kt = point_ii_xy[2]
xyz_min = np.array([[1, it - r], [1, jt - r], [1, kt - r]])
xyz_min = xyz_min.max(axis=1)
xyz_max = np.array([[xm, it + r], [ym, jt + r], [zm, kt + r]])
xyz_max = xyz_max.min(axis=1)
x_arange = np.arange(xyz_min[0], xyz_max[0] + 1)
y_arange = np.arange(xyz_min[1], xyz_max[1] + 1)
v_arange = np.arange(xyz_min[2], xyz_max[2] + 1)
[p_k, p_i, p_j] = np.meshgrid(x_arange, y_arange, v_arange, indexing='ij')
Index_value = np.column_stack([p_k.flatten(), p_i.flatten(), p_j.flatten()])
Index_value = setdiff_nd(Index_value, np.array([point_ii_xy]))
ordrho_jj = np.matmul(Index_value - 1, | np.array([[1], [xm], [ym * xm]]) | numpy.array |
import numpy as np
import itertools as it
import scipy as sp
from mallows_model import *
def weighted_median(sample, ws):
"""
Parameters
----------
sample: numpy array
RANKINGS
ws: float
weight of each permutation
Returns
-------
ranking
weigthed median ranking
"""
return borda(sample * ws[:, None])
def max_dist(n):
"""
Parameters
----------
n: int
length of permutations
Returns
-------
int
Maximum distance between permutations of given n length
"""
return n * (n - 1) // 2 # Integer division
def compose(s, p):
"""This function composes two given permutations
Parameters
----------
s: ndarray
The first permutation array
p: ndarray
The second permutation array
Returns
-------
ndarray
The composition of the permutations
"""
return np.array(s[p])
def compose_partial(partial, full):
"""This function composes a partial permutation with an other (full)
Parameters
----------
partial: ndarray
The partial permutation (should be filled with float)
full:
The full permutation (should be filled with integers)
Returns
-------
ndarray
The composition of the permutations
"""
# MANUEL: If full contains np.nan, then it cannot be filled with integers, because np.nan is float.
return [partial[i] if not np.isnan(i) else np.nan for i in full]
def inverse_partial(sigma):
"""This function computes the inverse of a given partial permutation
Parameters
----------
sigma: ndarray
A partial permutation array (filled with float)
Returns
-------
ndarray
The inverse of given partial permutation
"""
inv = np.full(len(sigma), np.nan)
for i,j in enumerate(sigma):
if not np.isnan(j):
inv[int(j)] = i
return inv
def inverse(s):
"""This function computes the inverse of a given permutation
Parameters
----------
s: ndarray
A permutation array
Returns
-------
ndarray
The inverse of given permutation
"""
return np.argsort(s)
def borda(rankings):
"""This function computes an average permutation given several permutations
Parameters
----------
rankings: ndarray
Matrix of several permutations
Returns
-------
ndarray
The 'average' permutation of permutations given
"""
# MANUEL: Using inverse instead of np.argsort clarifies the intention
consensus = inverse( # give the inverse of result --> sigma_0
inverse( # give the indexes to sort the sum vector --> sigma_0^-1
rankings.sum(axis=0) # sum the indexes of all permutations
)
) #borda
return consensus
def borda_partial(rankings, w, k):
"""
Parameters
----------
Returns
-------
"""
a, b = rankings, w
a, b = np.nan_to_num(rankings,nan=k), w
aux = a * b
borda = np.argsort(np.argsort(np.nanmean(aux, axis=0))).astype(float)
mask = np.isnan(rankings).all(axis=0)
borda[mask]=np.nan
return borda
def expected_dist_mm(n, theta=None, phi=None):
"""Compute the expected distance, MM under the Kendall's-tau distance
Parameters
----------
n: int
Length of the permutation in the considered model
theta: float
Real dispersion parameter (optionnal if phi is given)
phi: float
Real dispersion parameter (optionnal if theta is given)
Returns
-------
float
The expected disance under the MMs
"""
theta, phi = check_theta_phi(theta, phi)
# MANUEL:
# rnge = np.array(range(1,n+1))
rnge = np.arange(1, n + 1)
# exp_j_theta = np.exp(-j * theta)
# exp_dist = (n * n.exp(-theta) / (1 - n.exp(-theta))) - np.sum(j * exp_j_theta / (1 - exp_j_theta)
expected_dist = n * np.exp(-theta) / (1-np.exp(-theta)) - np.sum(rnge * np.exp(-rnge*theta) / (1 - np.exp(-rnge*theta)))
return expected_dist
def variance_dist_mm(n, theta=None, phi=None):
"""
Parameters
----------
Returns
-------
"""
theta, phi = check_theta_phi(theta, phi)
rnge = np.array(range(1,n+1))
variance = (phi*n)/(1-phi)**2 - np.sum((pow(phi,rnge) * rnge**2)/(1-pow(phi,rnge))**2)
return variance
def expected_v(n, theta=None, phi=None, k=None):#txapu integrar
"""This function computes the expected decomposition vector
Parameters
----------
n: int
Length of the permutation in the considered model
theta: float
Real dispersion parameter (optionnal if phi is given)
phi: float
Real dispersion parameter (optionnal if theta is given)
k: int
Index to which the decomposition vector is needed ???
Returns
-------
ndarray
The expected decomposition vector
"""
theta, phi = check_theta_phi(theta, phi)
if k is None: k = n-1
if type(theta)!=list: theta = np.full(k, theta)
rnge = np.array(range(k))
expected_v = np.exp(-theta[rnge]) / (1-np.exp(-theta[rnge])) - (n-rnge) * np.exp(-(n-rnge)*theta[rnge]) / (1 - np.exp(-(n-rnge)*theta[rnge]))
return expected_v
def variance_v(n, theta=None, phi=None, k=None):
"""
Parameters
----------
Returns
-------
"""
theta, phi = check_theta_phi(theta, phi)
if k is None:
k = n-1
if type(phi)!=list:
phi = np.full(k, phi)
rnge = np.array(range(k))
var_v = phi[rnge]/(1-phi[rnge])**2 - (n-rnge)**2 * phi[rnge]**(n-rnge) / (1-phi[rnge]**(n-rnge))**2
return var_v
def expected_dist_top_k(n, k, theta=None, phi=None):
"""Compute the expected distance for top-k rankings, following
a MM under the Kendall's-tau distance
Parameters
----------
n: int
Length of the permutation in the considered model
theta: float
Real dispersion parameter (optionnal if phi is given)
phi: float
Real dispersion parameter (optionnal if theta is given)
Returns
-------
float
The expected disance under the MMs
"""
theta, phi = check_theta_phi(theta, phi)
rnge = np.array(range(n-k+1,n+1))
expected_dist = k * np.exp(-theta) / (1-np.exp(-theta)) - np.sum(rnge * np.exp(-rnge*theta) / (1 - np.exp(-rnge*theta)))
return expected_dist
def variance_dist_top_k(n, k, theta=None, phi=None):
"""
Compute the variance of the distance for top-k rankings, following
a MM under the Kendall's-tau distance
Parameters
----------
Returns
-------
"""
theta, phi = check_theta_phi(theta, phi)
rnge = np.array(range(n-k+1,n+1))
variance = (phi*k)/(1-phi)**2 - np.sum((pow(phi,rnge) * rnge**2)/(1-pow(phi,rnge))**2)
return variance
def psi_mm(n, theta=None, phi=None):
"""This function computes the normalization constant psi
Parameters
----------
n: int
Length of the permutation in the considered model
theta: float
Real dispersion parameter (optionnal if phi is given)
phi: float
Real dispersion parameter (optionnal if theta is given)
Returns
-------
float
The normalization constant psi
"""
rnge = np.array(range(2,n+1))
if theta is not None:
return np.prod((1-np.exp(-theta*rnge))/(1-np.exp(-theta)))
if phi is not None:
return np.prod((1-np.power(phi,rnge))/(1-phi))
theta, phi = check_theta_phi(theta, phi)
# def prob_mode(n, theta):
# """This function computes the probability mode
# Parameters for both Mallows and Generalized Mallows
# ----------
# n: int
# Length of the permutation in the considered model
# theta: float/int/list/numpy array (see theta, params)
# Real dispersion parameter
# Returns
# -------
# float
# The probability mode
# """
# psi = (1 - np.exp(( - n + np.arange(n-1) )*(theta)))/(1 - np.exp( -theta))
# psi = np.prod(psi)
# return np.prod(1.0/psi)
def prob(sigma, sigma0, theta=None,phi=None):
"""This function computes the probability of a permutation given a distance to the consensus
Parameters
----------
n: int
Length of the permutation in the considered model
theta: float
Dispersion vector
dist: int
Distance of the permutation to the consensus permutation
Returns
-------
float
Probability of the permutation
"""
theta, phi = check_theta_phi(theta, phi)
n = len(sigma)
# rnge = np.array(range(n-1))
psi = (1 - np.exp(( - n + np.arange(n-1) )*(theta)))/(1 - np.exp( -theta))
psi = np.prod(psi)
return np.exp(-theta * distance(sigma,sigma0)) / psi
def prob_sample(perms, sigma, theta=None, phi=None):
"""This function computes the probabilities for each permutation of a sample
of several permutations
Parameters
----------
perms: ndarray
The matrix of permutations
sigma: ndarray
Permutation mode
theta: float
Real dispersion parameter (optionnal if phi is given)
phi: float
Real dispersion parameter (optionnal if theta is given)
Returns
-------
ndarray
Array of probabilities for each permutation given as input
"""
m, n = perms.shape
theta, phi = check_theta_phi(theta, phi)
rnge = np.array(range(n-1))
psi = (1 - np.exp(( - n + rnge )*(theta)))/(1 - np.exp( -theta))
psi = np.prod(psi)
return np.array([np.exp(-theta*distance(perm, sigma)) / psi for perm in perms])
def fit_mm(rankings, s0=None):
"""This function computes the consensus permutation and the MLE for the
dispersion parameter phi for MM models
Parameters
----------
rankings: ndarray
The matrix of permutations
s0: ndarray, optional
The consensus permutation (default value is None)
Returns
-------
tuple
The ndarray corresponding to s0 the consensus permutation and the
MLE for the dispersion parameter phi
"""
m, n = rankings.shape
if s0 is None: s0 = np.argsort(np.argsort(rankings.sum(axis=0))) #borda
dist_avg = np.mean(np.array([distance(s0, perm) for perm in rankings]))
try:
theta = sp.optimize.newton(mle_theta_mm_f, 0.01, fprime=mle_theta_mm_fdev, args=(n, dist_avg), tol=1.48e-08, maxiter=500, fprime2=None)
except:
if dist_avg == 0.0:
return s0, np.exp(-5)#=phi
print("Error in function: fit_mm. dist_avg=",dist_avg, dist_avg == 0.0)
print(rankings)
print(s0)
raise
return s0, np.exp(-theta)#=phi
# def fit_mm_phi(n, dist_avg):
# """Same as fit_mm but just returns phi ??? Also does not compute dist_avg
# but take it as a parameter
#
# Parameters
# ----------
# n: int
# Dimension of the permutations
# dist_avg: float
# Average distance of the sample (between the consensus and the
# permutations of the consensus)
#
# Returns
# -------
# float
# The MLE for the dispersion parameter phi
# """
# try:
# theta = sp.optimize.newton(mle_theta_mm_f, 0.01, fprime=mle_theta_mm_fdev, args=(n, dist_avg), tol=1.48e-08, maxiter=500, fprime2=None)
# except:
# if dist_avg == 0.0:
# return s0, np.exp(-5)#=phi
# print("error. fit_mm. dist_avg=",dist_avg, dist_avg == 0.0)
# print(rankings)
# print(s0)
# raise
# # theta = - np.log(phi)
# return np.exp(-theta)
def fit_gmm(rankings, s0=None):
"""This function computes the consensus permutation and the MLE for the
dispersion parameters theta_j for GMM models
Parameters
----------
rankings: ndarray
The matrix of permutations
s0: ndarray, optional
The consensus permutation (default value is None)
Returns
-------
tuple
The ndarray corresponding to s0 the consensus permutation and the
MLE for the dispersion parameters theta
"""
m, n = rankings.shape
if s0 is None:
s0 = np.argsort(np.argsort(rankings.sum(axis=0))) #borda
V_avg = np.mean(np.array([ranking_to_v(sigma)[:-1] for sigma in rankings]), axis = 0)
try:
theta = []
for j in range(1, n):
theta_j = sp.optimize.newton(mle_theta_j_gmm_f, 0.01, fprime=mle_theta_j_gmm_fdev, args=(n, j, V_avg[j-1]), tol=1.48e-08, maxiter=500, fprime2=None)
theta.append(theta_j)
except:
print("Error in function fit_gmm")
raise
return s0, theta
def mle_theta_mm_f(theta, n, dist_avg):
"""Computes the derivative of the likelihood
parameter
Parameters
----------
theta: float
The dispersion parameter
n: int
Dimension of the permutations
dist_avg: float
Average distance of the sample (between the consensus and the
permutations of the consensus)
Returns
-------
float
Value of the function for given parameters
"""
aux = 0
rnge = np.array(range(1,n))
aux = np.sum((n-rnge+1)*np.exp(-theta*(n-rnge+1))/(1-np.exp(-theta*(n-rnge+1))))
aux2 = (n-1) / (np.exp( theta ) - 1) - dist_avg
return aux2 - aux
def mle_theta_mm_fdev(theta, n, dist_avg):
"""This function computes the derivative of the function mle_theta_mm_f
given the dispersion parameter and the average distance
Parameters
----------
theta: float
The dispersion parameter
n: int
The dimension of the permutations
dist_avg: float
Average distance of the sample (between the consensus and the
permutations of the consensus)
Returns
-------
float
The value of the derivative of function mle_theta_mm_f for given
parameters
"""
aux = 0
rnge = np.array(range(1, n))
aux = np.sum((n-rnge+1)*(n-rnge+1)*np.exp(-theta*(n-rnge+1))/pow((1 - np.exp(-theta * (n-rnge+1))), 2))
aux2 = (- n + 1) * np.exp( theta ) / pow ((np.exp( theta ) - 1), 2)
return aux2 + aux
def mle_theta_j_gmm_f(theta_j, n, j, v_j_avg):
"""Computes the derivative of the likelihood
parameter theta_j in the GMM
Parameters
----------
theta: float
The jth dispersion parameter theta_j
n: int
Dimension of the permutations
j: int
The position of the theta_j in vector theta of dispersion parameters
v_j_avg: float
jth element of the average decomposition vector over the sample
Returns
-------
float
Value of the function for given parameters
"""
f_1 = np.exp( -theta_j ) / ( 1 - np.exp( -theta_j ) )
f_2 = - ( n - j + 1 ) * np.exp( - theta_j * ( n - j + 1 ) ) / ( 1 - np.exp( - theta_j * ( n - j + 1 ) ) )
return f_1 + f_2 - v_j_avg
def mle_theta_j_gmm_fdev(theta_j, n, j, v_j_avg):
"""This function computes the derivative of the function mle_theta_j_gmm_f
given the jth element of the dispersion parameter and the jth element of the
average decomposition vector
Parameters
----------
theta: float
The jth dispersion parameter theta_j
n: int
Dimension of the permutations
j: int
The position of the theta_j in vector theta of dispersion parameters
v_j_avg: float
jth element of the average decomposition vector over the sample
Returns
-------
float
The value of the derivative of function mle_theta_j_gmm_f for given
parameters
"""
fdev_1 = - np.exp( - theta_j ) / pow( ( 1 - np.exp( -theta_j ) ), 2 )
fdev_2 = pow( n - j + 1, 2 ) * np.exp( - theta_j * ( n - j + 1 ) ) / pow( 1 - np.exp( - theta_j * ( n - j + 1 ) ), 2 )
return fdev_1 + fdev_2
def likelihood_mm(perms, s0, theta):
"""This function computes the log-likelihood for MM model given a matrix of
permutation, the consensus permutation, and the dispersion parameter
Parameters
----------
perms: ndarray
A matrix of permutations
s0: ndarray
The consensus permutation
theta: float
The dispersion parameter
Returns
-------
float
Value of log-likelihood for given parameters
"""
m,n = perms.shape
rnge = np.array(range(2,n+1))
psi = 1.0 / np.prod((1-np.exp(-theta*rnge))/(1-np.exp(-theta)))
probs = np.array([np.log(np.exp(-distance(s0, perm)*theta)/psi) for perm in perms])
# print(probs,m,n)
return probs.sum()
def sample(m, n, k=None, theta=None, phi=None, s0=None):
"""This function generates m permutations (rankings) according
to Mallows Models (if the given parameters are m, n, k/None,
theta/phi: float, s0/None) or Generalized Mallows Models
(if the given parameters are m, theta/phi: ndarray, s0/None).
Moreover, the parameter k allows the function to generate top-k rankings only.
Parameters
----------
m: int
The number of rankings to generate
theta: float or ndarray, optional (if phi given)
The dispersion parameter theta
phi: float or ndarray, optional (if theta given)
The dispersion parameter phi
k: int
number of known positions of items for the rankings
s0: ndarray
The consensus ranking
Returns
-------
list
The rankings generated
"""
if k is not None and n is None:
# MANUEL: If we don't raise an error the program continues which makes debugging difficult.
raise ValueError("Error, n is not given!")
theta, phi = check_theta_phi(theta, phi)
if n is not None: #TODO, n should be always given
theta = np.full(n-1, theta)
n = len(theta) + 1 #TODO, n should be always given
if s0 is None:
s0 = np.array(range(n))
rnge = np.arange(n - 1)
psi = (1 - np.exp(( - n + rnge )*(theta[ rnge ])))/(1 - np.exp( -theta[rnge]))
vprobs = np.zeros((n,n))
for j in range(n-1):
vprobs[j][0] = 1.0/psi[j]
for r in range(1,n-j):
vprobs[j][r] = np.exp( -theta[j] * r ) / psi[j]
sample = []
vs = []
for samp in range(m):
v = [np.random.choice(n,p=vprobs[i,:]) for i in range(n-1)]
v += [0]
ranking = v_to_ranking(v, n)
sample.append(ranking)
sample = np.array([s[s0] for s in sample])
if k is not None:
sample_rankings = np.array([inverse(ordering) for ordering in sample])
sample_rankings = np.array([ran[s0] for ran in sample_rankings])
sample = np.array([[i if i in range(k) else np.nan for i in ranking] for
ranking in sample_rankings])
return sample.squeeze()
def v_to_ranking(v, n):
"""This function computes the corresponding permutation given
a decomposition vector
Parameters
----------
v: ndarray
Decomposition vector, same length as the permutation, last item must be 0
n: int
Length of the permutation
Returns
-------
ndarray
The permutation corresponding to the decomposition vectors
"""
rem = list(range(n))
rank = np.full(n, np.nan)
for i in range(len(v)):
rank[i] = rem[v[i]]
rem.pop(v[i])
return rank
def ranking_to_v(sigma, k=None):
"""This function computes the corresponding decomposition vector given
a permutation
Parameters
----------
sigma: ndarray
A permutation
k: int, optionnal
The index to perform the conversion for a partial
top-k list
Returns
-------
ndarray
The decomposition vector corresponding to the permutation. Will be
of length n and finish with 0
"""
n = len(sigma)
if k is not None:
sigma = sigma[:k]
sigma = np.concatenate((sigma,np.array([ | np.float(i) | numpy.float |
#
# For licensing see accompanying LICENSE file.
# Copyright (C) 2021 Apple Inc. All Rights Reserved.
#
# Trains a standard/sparse/quantized line/poly chain/bezier curve.
import random
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import curve_utils
import model_logging
import models.networks as models
import utils
from models import modules
from models.builder import Builder
from models.networks import resprune
from models.networks import utils as network_utils
from models.networks import vggprune
from models.quantized_modules import ConvBn2d
def sparse_module_updates(model, training=False, alpha=None, **regime_params):
current_iteration = regime_params["current_iteration"]
warmup_iterations = regime_params["warmup_iterations"]
if alpha is None:
alpha = curve_utils.alpha_sampling(**regime_params)
df = | np.max([1 - current_iteration / warmup_iterations, 0]) | numpy.max |
import os
import glob
import wget
import time
import subprocess
import shlex
import sys
import warnings
import random
from Bio.SeqUtils import seq1
from Bio.PDB.PDBParser import PDBParser
from Bio import AlignIO
from sklearn.base import TransformerMixin
from sklearn.preprocessing import StandardScaler, Normalizer , MinMaxScaler , RobustScaler
from sklearn.decomposition import PCA
sys.path.append('./ProFET/ProFET/feat_extract/')
import FeatureGen
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
import h5py
#PCA and scaler
class NDSRobust(TransformerMixin):
def __init__(self, **kwargs):
self._scaler = RobustScaler(copy=True, **kwargs)
self._orig_shape = None
def fit(self, X, **kwargs):
X = np.array(X)
# Save the original shape to reshape the flattened X later
# back to its original shape
if len(X.shape) > 1:
self._orig_shape = X.shape[1:]
X = self._flatten(X)
self._scaler.fit(X, **kwargs)
return self
def transform(self, X, **kwargs):
X = np.array(X)
X = self._flatten(X)
X = self._scaler.transform(X, **kwargs)
X = self._reshape(X)
return X
def inverse_transform(self, X, **kwargs):
X = np.array(X)
X = self._flatten(X)
X = self._scaler.inverse_transform(X, **kwargs)
X = self._reshape(X)
return X
def _flatten(self, X):
# Reshape X to <= 2 dimensions
if len(X.shape) > 2:
n_dims = np.prod(self._orig_shape)
X = X.reshape(-1, n_dims)
return X
def _reshape(self, X):
# Reshape X back to it's original shape
if len(X.shape) >= 2:
X = X.reshape(-1, *self._orig_shape)
return X
#ndimensional PCA for arrays
class NDSPCA(TransformerMixin):
def __init__(self, **kwargs):
self._scaler = PCA(copy = True, **kwargs)
self._orig_shape = None
def fit(self, X, **kwargs):
X = np.array(X)
# Save the original shape to reshape the flattened X later
# back to its original shape
if len(X.shape) > 1:
self._orig_shape = X.shape[1:]
X = self._flatten(X)
self._scaler.fit(X, **kwargs)
self.explained_variance_ratio_ = self._scaler.explained_variance_ratio_
self.components_ =self._scaler.components_
return self
def transform(self, X, **kwargs):
X = np.array(X)
X = self._flatten(X)
X = self._scaler.transform(X, **kwargs)
return X
def inverse_transform(self, X, **kwargs):
X = np.array(X)
X = self._flatten(X)
X = self._scaler.inverse_transform(X, **kwargs)
X = self._reshape(X)
return X
def _flatten(self, X):
# Reshape X to <= 2 dimensions
if len(X.shape) > 2:
n_dims = np.prod(self._orig_shape)
X = X.reshape(-1, n_dims)
return X
def _reshape(self, X):
# Reshape X back to it's original shape
if len(X.shape) >= 2:
X = X.reshape(-1, *self._orig_shape)
return X
#fit the components of the output space
#stacked distmats (on the 1st axis)
def fit_y( y , components = 300 , FFT = True ):
if FFT == True:
#got through a stack of structural distmats. these should be 0 padded to all fit in an array
y = np.stack([ np.fft.rfft2(y[i,:,:]) for i in range(y.shape[0])] )
print(y.shape)
y = np.hstack( [ np.real(y) , np.imag(y)] )
print(y.shape)
ndpca = NDSPCA(n_components=components)
ndpca.fit(y)
print('explained variance')
print(np.sum(ndpca.explained_variance_ratio_))
y = ndpca.transform(y)
scaler0 = RobustScaler( )
scaler0.fit(y)
return scaler0, ndpca
def transform_y(y, scaler0, ndpca, FFT = False):
if FFT == True:
y = np.stack([np.fft.rfft2(y[i,:,:]) for i in range(y.shape[0])])
print(y.shape)
y = np.hstack( [ np.real(y) , np.imag(y)] )
y = ndpca.transform(y)
print(y.shape)
y = scaler0.transform(y)
return y
def inverse_transform_y(y, scaler0, ndpca, FFT=False):
y = scaler0.inverse_transform(y)
y = ndpca.inverse_transform(y)
if FFT == True:
split = int(y.shape[1]/2)
y = np.stack([ np.fft.irfft2(y[i,:split,:] + 1j*y[i,split:,:]) for i in range(y.shape[0]) ] )
return y
#fit the components of the in space
#stacked align voxels (on the 1st axis)
def fit_x(x, components = 300, FFT = True):
if FFT == True:
#got through a stack of align voxels. these should be 0 padded to all fit in an array
x = np.stack([ np.fft.rfftn(x[i,:,:,:]) for i in range(x.shape[0])] )
print(x.shape)
x = np.hstack( [ np.real(x) , np.imag(x)] )
print(x.shape)
ndpca = NDSPCA(n_components=components)
ndpca.fit(x)
print('explained variance')
print(np.sum(ndpca.explained_variance_ratio_))
x = ndpca.transform(x)
scaler0 = RobustScaler( )
scaler0.fit(x)
return scaler0, ndpca
def transform_x(x, scaler0, ndpca, FFT = False):
if FFT == True:
x = np.stack([ np.fft.rfftn(x[i,:,:,:]) for i in range(x.shape[0])] )
print(x.shape)
x = np.hstack( [ np.real(x) , np.imag(x)] )
x = ndpca.transform(x)
print(x.shape)
x = scaler0.transform(x)
return x
#todo -- check the split is happening in the right dimension
def inverse_transform_x(x, scaler0, ndpca, FFT=False):
x = scaler0.inverse_transform(x)
x = ndpca.inverse_transform(x)
if FFT == True:
split = int(x.shape[1]/2)
x = np.stack([ np.fft.irfftn(x[i,:split,:,:] + 1j*x[i,split:,:,:]) for i in range(x.shape[0]) ] )
return x
#get align files
def runclustalo( infile , runIdentifier, path = 'clustalo' , outdir='./', args = '' , verbose = False):
if verbose == True:
print( infile , runIdentifier , path , outdir )
#i usually use filenames that reflect what the pipeline has done until that step
outfile= outdir+runIdentifier+infile+".aln.fasta"
#here we write the command as a string using all the args
args = path + ' -i '+ infile +' -o '+ outfile + ' ' +args
args = shlex.split(args)
if verbose == True:
print(args)
p = subprocess.Popen(args )
#return the opened process and the file it's creating
#we can also use the communicate function later to grad stdout if we need to
return p , outfile
#TODO - add sequence to align
def alnFileToArray(filename, returnMsa = False):
alnfile = filename
msa = AlignIO.read(alnfile , format = 'fasta')
align_array = np.array([ list(rec.upper()) for rec in msa], np.character)
if returnMsa:
return align_array, msa
return align_array
def alnArrayLineToSequence(align_array, index):
seq = ''
for aa in align_array[index]:
seq += aa.decode('utf-8')
return seq
#generate align list
def generateAlignList(directory = 'alns', returnMsa = False):
aligns = list()
msas = list()
#read through align files to get align arrays list
for file in os.listdir(directory):
if file.endswith('.fasta'):
aligns.append(alnFileToArray(directory+'/'+file, returnMsa)[0])
if returnMsa:
msas.append(alnFileToArray(directory+'/'+file, returnMsa)[1])
if returnMsa:
return aligns, msas
return aligns
#find biggest align shape (for padding) - aligns is a list of arrays
def biggestAlignShape(aligns):
longestProts = 0
mostProts = 0
for aln in aligns:
if aln.shape[0] > mostProts:
mostProts = aln.shape[0]
if aln.shape[1] > longestProts:
longestProts = aln.shape[1]
return mostProts, longestProts
def rundssp( infile , runIdentifier, path = 'dssp' , outdir='./', args = '' , verbose = False):
if verbose == True:
print( infile , runIdentifier , path , outdir )
#i usually use filenames that reflect what the pipeline has done until that step
outfile= outdir+runIdentifier+infile+".dssp"
#here we write the command as a string using all the args
args = path + ' -i '+ infile +' -o '+ outfile + ' ' +args
args = shlex.split(args)
if verbose == True:
print(args)
p = subprocess.Popen(args)
#return the opened process and the file it's creating
#we can also use the communicate function later to grad stdout if we need to
return p , outfile
def dssp2pandas(dsspstr):
#read the dssp file format into a pandas dataframe
start = False
lines = {}
count = 0
for l in dsspstr.split('\n'):
if '#' in l:
start = True
if start == True:
if count > 0:
lines[count] = dict(zip(header,l.split()))
else:
header = l.split()
count +=1
df = pd.DataFrame.from_dict( lines , orient = 'index')
return df
#structs is a dictionary of all the structures (which are then subdivided into chains)
def parsePDB(structs):
parser = PDBParser()
converter = {'ALA': 'A', 'ASX': 'B', 'CYS': 'C', 'ASP': 'D', 'GLU': 'E', 'PHE': 'F', 'GLY': 'G',
'HIS': 'H', 'ILE': 'I', 'LYS': 'K', 'LEU': 'L', 'MET': 'M', 'ASN': 'N', 'PRO': 'P',
'GLN': 'Q', 'ARG': 'R', 'SER': 'S', 'THR': 'T', 'SEC': 'U', 'VAL': 'V', 'TRP': 'W',
'XAA': 'X', 'TYR': 'Y', 'GLX': 'Z'}
structseqs={}
with open( 'structs.fast' , 'w') as fastout:
for s in structs:
Structure = PDBParser().get_structure(s, structs[s])
for model in Structure:
for chain in model:
res = chain.get_residues()
seq = ''.join([ converter[r.get_resname()] for r in res if r.get_resname() in converter ] )
fastout.write('>' + s + '|'+ chain.id +'\\n')
fastout.write(str( seq ) +'\\n' )
structseqs[ s + '|'+ chain.id ] = seq
return structseqs
def generateProtFeatDict(sequence):
features = FeatureGen.Get_Protein_Feat(sequence)
return features
#generate complete set of dictionary keys generated by protFET
def protFeatKeys(align_array):
dictKeys = set()
for i in range(align_array.shape[0]):
sequence = alnArrayLineToSequence(align_array, i)
#sequence = str(msa[i].seq)
#temporary fix for ProtFeat not supporting B, Z, X
sequence = sequence.replace('B', 'D')
sequence = sequence.replace('Z', 'E')
sequence = sequence.replace('X', 'A')
sequence = sequence.replace('.', '')
sequence = sequence.replace('-','')
dictKeys = dictKeys.union(set(generateProtFeatDict(sequence).keys()) - dictKeys)
return dictKeys
#generate ProtFET array for given align (maxKeys: all keys of the feature dictionary, over the entire set)
def alignToProtFeat(align_array, dictKeys):
#generate 2d array of ProtFET features for each sequence in align
align_features = np.zeros((align_array.shape[0], len(dictKeys)), dtype=float)
missingFeatures = set()
for i in range(align_array.shape[0]):
sequence = alnArrayLineToSequence(align_array, i)
#temporary fix for ProtFeat not supporting B, Z, X
sequence = sequence.replace('B', 'D')
sequence = sequence.replace('Z', 'E')
sequence = sequence.replace('X', 'A')
sequence = sequence.replace('.', '')
sequence = sequence.replace('-','')
featuresDict = generateProtFeatDict(sequence)
missingFeatures = dictKeys - set(featuresDict.keys())
for newKey in missingFeatures:
featuresDict[newKey] = float(0)
features = np.array(list(featuresDict.values()))
align_features[i,:] = features
return align_features
#generate array of ProtFeat features for all aligns
def protFeatArrays(aligns):
maxKeys = set()
mostProts = biggestAlignShape(aligns)[0]
#build set of all keys used in the set
for i in range(len(aligns)):
maxKeys = maxKeys.union(protFeatKeys(aligns[i]) - maxKeys)
setFeatures = np.zeros((len(aligns), mostProts, len(maxKeys)))
for i in range(len(aligns)):
np.append(setFeatures, alignToProtFeat(aligns[i], maxKeys))
return setFeatures
def generateGapMatrix(align_array):
gap_array = np.array([[1 if (align_array[i][j] == b'.' or align_array[i][j] == b'-') else 0 for j in range(align_array.shape[1])] for i in range(align_array.shape[0])])
return gap_array
def generateAlignVoxel(align_array, propAmount = 12):
align_prop_array = np.zeros((align_array.shape[0], align_array.shape[1], propAmount + 1), dtype=float)
gap_array = generateGapMatrix(align_array)
for i in range(align_array.shape[0]):
align_prop_array[i,:,:12] = [[properties[prop][bstring] for prop in numerical] for bstring in align_array[i]]
align_prop_array[i,:,12] = gap_array[i,:]
return align_prop_array
#generate 4D array of stacked 3D voxels for FFT (and PCA)
def generateVoxelArray(aligns, propAmount = 12):
#find biggest align_array (the depth of the voxel is fixed by the number of properties)
mostProts, longestProts = biggestAlignShape(aligns)
#pad all aligns (with 'b'.) to be the same size
for i in range(len(aligns)):
padded = np.full((mostProts, longestProts), b'.')
padded[:aligns[i].shape[0],:aligns[i].shape[1]] = aligns[i]
aligns[i] = padded
#generate voxel array
voxels = np.zeros((len(aligns), mostProts, longestProts, propAmount + 1))
for i in range(len(aligns)):
voxels[i, :, :, :] = generateAlignVoxel(aligns[i])
return voxels
#builds a dictionary of distmats in the set - structs is a dictionary of all the structures (which are then subdivided into chains)
def PDBToDistmat(structs, show = False):
distances = {}
for s in structs:
Structure = PDBParser().get_structure(s, structs[s])
distances[s] = {}
for model in Structure:
for chain in model:
res = [r for r in chain.get_residues()]
distmat = [ [res2['CA'] - res1['CA'] if 'CA' in res1 and 'CA' in res2 and i > j else 0 for i,res1 in enumerate(res)] for j,res2 in enumerate(res)]
distmat = np.array(distmat)
distmat+= distmat.T
distances[s][chain] = distmat
if show:
for s in distances:
print(s)
for c in distances[s]:
sns.heatmap(distances[s][c])
plt.show()
return distances
#builds 3D array of all distmats in the set
def distmatDictToArray(distances):
#make list of proteins, containing list of distance arrays for each chain
protChainsList = list()
chainDistArrayList = list()
for protein in distances:
for chain in distances[protein]:
distArray = np.array(distances[protein][chain])
if np.sum(distArray) != 0: #if we leave empty chains, the pca's variance calculations don't work (division by 0)
chainDistArrayList.append(distArray)
protChainsList.append(chainDistArrayList)
chainDistArrayList = list()
#preserve original shape before flattening (not needed for now, but might be useful later)
chainAmounts = np.zeros(len(protChainsList), dtype=int)
for i in range(len(protChainsList)):
chainAmounts[i] = len(protChainsList[i])
#flatten 2D list into 1D list
arrayList = list()
[[arrayList.append(protChainsList[i][j]) for j in range(chainAmounts[i])] for i in range(len(protChainsList))]
#find size of the largest distmat
maxX, maxY = biggestAlignShape(arrayList)
#pad the arrays so they're all the same size
for i in range(len(arrayList)):
padded = | np.zeros((maxX, maxY)) | numpy.zeros |
import argparse
import numpy as np
from PIL import Image
from noize.noise import exponential, salt_and_pepper, rayleigh, gaussian, erlang, periodic
CMD_PER = "periodic"
CMD_SP = "salt-and-pepper"
CMD_GSS = "gaussian"
CMD_RAY = "rayleigh"
CMD_ER = "erlang"
CMD_EXP = "exponential"
CMD_UNF = "uniform"
def apply_cmd(args: argparse.Namespace) -> None:
img = Image.open(args.img)
if args.command == CMD_PER:
noisy_im = periodic(np.array(img), args.mode, args.angle, args.wavelength)
elif args.command == CMD_SP:
noisy_im = salt_and_pepper(np.array(img), args.probability, args.seed)
elif args.command == CMD_GSS:
noisy_im = gaussian(np.array(img), args.mean, args.var, args.seed)
elif args.command == CMD_RAY:
noisy_im = rayleigh( | np.array(img) | numpy.array |
# This file is part of siti_tools
#
# MIT License
#
# siti_tools, Copyright (c) 2021 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
from typing import Generator
import numpy as np
from enum import Enum
import av
class FileFormat(Enum):
YUV420P = 1
def read_yuv(
input_file: str,
width: int,
height: int,
file_format=FileFormat.YUV420P,
full_range=False,
) -> Generator[np.ndarray, None, None]:
"""Read a YUV420p file and yield the per-frame Y data
Args:
input_file (str): Input file path
width (int): Width in pixels
height (int): Height in pixels
file_format (str, optional): The input file format. Defaults to FileFormat.YUV420P.
full_range (bool, optional): Whether to assume full range input. Defaults to False.
Raises:
NotImplementedError: If a wrong file format is chosen
Yields:
np.ndarray: The frame data, integer
"""
# TODO: add support for other YUV types
if file_format != FileFormat.YUV420P:
raise NotImplementedError("Other file formats are not yet implemented!")
# get the number of frames
file_size = os.path.getsize(input_file)
num_frames = file_size // (width * height * 3 // 2)
with open(input_file, "rb") as in_f:
for _ in range(num_frames):
y_data = (
np.frombuffer(in_f.read((width * height)), dtype=np.uint8)
.reshape((height, width))
.astype("int")
)
# read U and V components, but skip
in_f.read((width // 2) * (height // 2) * 2)
# in case we need the data later, you can uncomment this:
# u_data = (
# np.frombuffer(in_f.read(((width // 2) * (height // 2))), dtype=np.uint8)
# .reshape((height // 2, width // 2))
# .astype("int")
# )
# v_data = (
# np.frombuffer(in_f.read(((width // 2) * (height // 2))), dtype=np.uint8)
# .reshape((height // 2, width // 2))
# .astype("int")
# )
if not full_range:
# check if we don't actually exceed minimum range
if | np.min(y_data) | numpy.min |
"""
Tests for the mgrit class
"""
import numpy as np
from pymgrit.core.mgrit import Mgrit
from pymgrit.heat.heat_1d import Heat1D
def rhs(x, t):
"""
Right-hand side of 1D heat equation example problem at a given space-time point (x,t)
:param x: spatial grid point
:param t: time point
:return: right-hand side of 1D heat equation example problem at point (x,t)
"""
return - np.sin(np.pi * x) * (np.sin(t) - 1 * np.pi ** 2 * np.cos(t))
def init_cond(x):
"""
Initial condition of 1D heat equation example,
u(x,0) = sin(pi*x)
:param x: spatial grid point
:return: initial condition of 1D heat equation example problem
"""
return np.sin(np.pi * x)
def test_split_into():
"""
Test the function split_into
"""
heat0 = Heat1D(x_start=0, x_end=2, nx=5, a=1, rhs=rhs, init_cond=init_cond, t_start=0, t_stop=2,
nt=2 ** 2 + 1)
result = np.array([4, 3, 3])
mgrit = Mgrit(problem=[heat0], transfer=[], nested_iteration=False)
np.testing.assert_equal(result, mgrit.split_into(10, 3))
def test_split_points():
"""
Test the function split points
"""
heat0 = Heat1D(x_start=0, x_end=2, nx=5, a=1, rhs=rhs, init_cond=init_cond, t_start=0, t_stop=2,
nt=2 ** 2 + 1)
result_proc0 = (4, 0)
result_proc1 = (3, 4)
result_proc2 = (3, 7)
mgrit = Mgrit(problem=[heat0], nested_iteration=False)
np.testing.assert_equal(result_proc0, mgrit.split_points(10, 3, 0))
np.testing.assert_equal(result_proc1, mgrit.split_points(10, 3, 1))
np.testing.assert_equal(result_proc2, mgrit.split_points(10, 3, 2))
def test_heat_equation_run():
"""
Test one run for the heat equation
"""
heat0 = Heat1D(x_start=0, x_end=2, nx=5, a=1, rhs=rhs, init_cond=init_cond, t_start=0, t_stop=2, nt=65)
heat1 = Heat1D(x_start=0, x_end=2, nx=5, a=1, rhs=rhs, init_cond=init_cond, t_start=0, t_stop=2, nt=17)
heat2 = Heat1D(x_start=0, x_end=2, nx=5, a=1, rhs=rhs, init_cond=init_cond, t_start=0, t_stop=2, nt=5)
problem = [heat0, heat1, heat2]
mgrit = Mgrit(problem=problem, cf_iter=1, nested_iteration=True, max_iter=2, random_init_guess=False)
res = mgrit.solve()
result_conv = np.array([0.00267692, 0.00018053])
np.testing.assert_almost_equal(result_conv, res['conv'])
def test_time_stepping():
heat0 = Heat1D(x_start=0, x_end=2, nx=5, a=1, rhs=rhs, init_cond=init_cond, t_start=0, t_stop=2, nt=65)
mgrit = Mgrit(problem=[heat0], cf_iter=1, nested_iteration=True, max_iter=2, random_init_guess=False)
res = mgrit.solve()
result_conv = np.array([])
np.testing.assert_almost_equal(result_conv, res['conv'])
def test_setup_points_and_comm_info():
"""
Test for the function setup_points_and_comm_info
"""
heat0 = Heat1D(x_start=0, x_end=2, nx=5, a=1, rhs=rhs, init_cond=init_cond, t_start=0, t_stop=2, nt=65)
heat1 = Heat1D(x_start=0, x_end=2, nx=5, a=1, rhs=rhs, init_cond=init_cond, t_start=0, t_stop=2, nt=17)
heat2 = Heat1D(x_start=0, x_end=2, nx=5, a=1, rhs=rhs, init_cond=init_cond, t_start=0, t_stop=2, nt=5)
problem = [heat0, heat1, heat2]
mgrit = Mgrit(problem=problem, cf_iter=1, nested_iteration=True, max_iter=2)
size = 7
cpts = []
comm_front = []
comm_back = []
block_size_this_lvl = []
index_local = []
index_local_c = []
index_local_f = []
first_is_c_point = []
first_is_f_point = []
last_is_c_point = []
last_is_f_point = []
send_to = []
get_from = []
for i in range(size):
mgrit.comm_time_size = size
mgrit.comm_time_rank = i
mgrit.int_start = 0 # First time points of process interval
mgrit.int_stop = 0 # Last time points of process interval
mgrit.cpts = [] # C-points per process and level corresponding to complete time interval
mgrit.comm_front = [] # Communication inside F-relax per MGRIT level
mgrit.comm_back = [] # Communication inside F-relax per MGRIT level
mgrit.block_size_this_lvl = [] # Block size per process and level with ghost point
mgrit.index_local_c = [] # Local indices of C-Points
mgrit.index_local_f = [] # Local indices of F-Points
mgrit.index_local = [] # Local indices of all points
mgrit.first_is_f_point = [] # Communication after C-relax
mgrit.first_is_c_point = [] # Communication after F-relax
mgrit.last_is_f_point = [] # Communication after F-relax
mgrit.last_is_c_point = [] # Communication after C-relax
mgrit.send_to = []
mgrit.get_from = []
for lvl in range(mgrit.lvl_max):
mgrit.t.append(np.copy(mgrit.problem[lvl].t))
mgrit.setup_points_and_comm_info(lvl=lvl)
cpts.append(mgrit.cpts)
comm_front.append(mgrit.comm_front)
comm_back.append(mgrit.comm_back)
block_size_this_lvl.append(mgrit.block_size_this_lvl)
index_local.append(mgrit.index_local)
index_local_c.append(mgrit.index_local_c)
index_local_f.append(mgrit.index_local_f)
first_is_c_point.append(mgrit.first_is_c_point)
first_is_f_point.append(mgrit.first_is_f_point)
last_is_c_point.append(mgrit.last_is_c_point)
last_is_f_point.append(mgrit.last_is_f_point)
send_to.append(mgrit.send_to)
get_from.append(mgrit.get_from)
test_cpts = [[np.array([0, 4, 8]), np.array([0]), | np.array([0]) | numpy.array |
import copy
from numbers import Real, Integral
import os
import numpy as np
import h5py
from scipy.interpolate import interp1d
from scipy.integrate import simps
from scipy.special import eval_legendre
import openmc
import openmc.mgxs
from openmc.checkvalue import check_type, check_value, check_greater_than, \
check_iterable_type, check_less_than, check_filetype_version
# Supported incoming particle MGXS angular treatment representations
_REPRESENTATIONS = ['isotropic', 'angle']
# Supported scattering angular distribution representations
_SCATTER_TYPES = ['tabular', 'legendre', 'histogram']
# List of MGXS indexing schemes
_XS_SHAPES = ["[G][G'][Order]", "[G]", "[G']", "[G][G']", "[DG]", "[DG][G]",
"[DG][G']", "[DG][G][G']"]
# Number of mu points for conversion between scattering formats
_NMU = 257
# Filetype name of the MGXS Library
_FILETYPE_MGXS_LIBRARY = 'mgxs'
# Current version of the MGXS Library Format
_VERSION_MGXS_LIBRARY = 1
class XSdata(object):
"""A multi-group cross section data set providing all the
multi-group data necessary for a multi-group OpenMC calculation.
Parameters
----------
name : str
Name of the mgxs data set.
energy_groups : openmc.mgxs.EnergyGroups
Energy group structure
representation : {'isotropic', 'angle'}, optional
Method used in generating the MGXS (isotropic or angle-dependent flux
weighting). Defaults to 'isotropic'
temperatures : Iterable of float
Temperatures (in units of Kelvin) of the provided datasets. Defaults
to a single temperature at 294K.
num_delayed_groups : int
Number of delayed groups
Attributes
----------
name : str
Unique identifier for the xsdata object
atomic_weight_ratio : float
Atomic weight ratio of an isotope. That is, the ratio of the mass
of the isotope to the mass of a single neutron.
temperatures : numpy.ndarray
Temperatures (in units of Kelvin) of the provided datasets. Defaults
to a single temperature at 294K.
energy_groups : openmc.mgxs.EnergyGroups
Energy group structure
num_delayed_groups : int
Num delayed groups
fissionable : bool
Whether or not this is a fissionable data set.
scatter_format : {'legendre', 'histogram', or 'tabular'}
Angular distribution representation (legendre, histogram, or tabular)
order : int
Either the Legendre order, number of bins, or number of points used to
describe the angular distribution associated with each group-to-group
transfer probability.
representation : {'isotropic', 'angle'}
Method used in generating the MGXS (isotropic or angle-dependent flux
weighting).
num_azimuthal : int
Number of equal width angular bins that the azimuthal angular domain is
subdivided into. This only applies when :attr:`XSdata.representation`
is "angle".
num_polar : int
Number of equal width angular bins that the polar angular domain is
subdivided into. This only applies when :attr:`XSdata.representation`
is "angle".
total : list of numpy.ndarray
Group-wise total cross section.
absorption : list of numpy.ndarray
Group-wise absorption cross section.
scatter_matrix : list of numpy.ndarray
Scattering moment matrices presented with the columns representing
incoming group and rows representing the outgoing group. That is,
down-scatter will be above the diagonal of the resultant matrix.
multiplicity_matrix : list of numpy.ndarray
Ratio of neutrons produced in scattering collisions to the neutrons
which undergo scattering collisions; that is, the multiplicity provides
the code with a scaling factor to account for neutrons produced in
(n,xn) reactions.
fission : list of numpy.ndarray
Group-wise fission cross section.
kappa_fission : list of numpy.ndarray
Group-wise kappa_fission cross section.
chi : list of numpy.ndarray
Group-wise fission spectra ordered by increasing group index (i.e.,
fast to thermal). This attribute should be used if making the common
approximation that the fission spectra does not depend on incoming
energy. If the user does not wish to make this approximation, then
this should not be provided and this information included in the
:attr:`XSdata.nu_fission` attribute instead.
chi_prompt : list of numpy.ndarray
Group-wise prompt fission spectra ordered by increasing group index
(i.e., fast to thermal). This attribute should be used if chi from
prompt and delayed neutrons is being set separately.
chi_delayed : list of numpy.ndarray
Group-wise delayed fission spectra ordered by increasing group index
(i.e., fast to thermal). This attribute should be used if chi from
prompt and delayed neutrons is being set separately.
nu_fission : list of numpy.ndarray
Group-wise fission production cross section vector (i.e., if ``chi`` is
provided), or is the group-wise fission production matrix.
prompt_nu_fission : list of numpy.ndarray
Group-wise prompt fission production cross section vector.
delayed_nu_fission : list of numpy.ndarray
Group-wise delayed fission production cross section vector.
beta : list of numpy.ndarray
Delayed-group-wise delayed neutron fraction cross section vector.
decay_rate : list of numpy.ndarray
Delayed-group-wise decay rate vector.
inverse_velocity : list of numpy.ndarray
Inverse of velocity, in units of sec/cm.
xs_shapes : dict of iterable of int
Dictionary with keys of _XS_SHAPES and iterable of int values with the
corresponding shapes where "Order" corresponds to the pn scattering
order, "G" corresponds to incoming energy group, "G'" corresponds to
outgoing energy group, and "DG" corresponds to delayed group.
Notes
-----
The parameters containing cross section data have dimensionalities which
depend upon the value of :attr:`XSdata.representation` as well as the
number of Legendre or other angular dimensions as described by
:attr:`XSdata.order`. The :attr:`XSdata.xs_shapes` are provided to obtain
the dimensionality of the data for each temperature.
The following are cross sections which should use each of the properties.
Note that some cross sections can be input in more than one shape so they
are listed multiple times:
[G][G'][Order]: scatter_matrix
[G]: total, absorption, fission, kappa_fission, nu_fission,
prompt_nu_fission, delayed_nu_fission, inverse_velocity
[G']: chi, chi_prompt, chi_delayed
[G][G']: multiplicity_matrix, nu_fission, prompt_nu_fission
[DG]: beta, decay_rate
[DG][G]: delayed_nu_fission, beta, decay_rate
[DG][G']: chi_delayed
[DG][G][G']: delayed_nu_fission
"""
def __init__(self, name, energy_groups, temperatures=[294.],
representation='isotropic', num_delayed_groups=0):
# Initialize class attributes
self.name = name
self.energy_groups = energy_groups
self.num_delayed_groups = num_delayed_groups
self.temperatures = temperatures
self.representation = representation
self._atomic_weight_ratio = None
self._fissionable = False
self._scatter_format = 'legendre'
self._order = None
self._num_polar = None
self._num_azimuthal = None
self._total = len(temperatures) * [None]
self._absorption = len(temperatures) * [None]
self._scatter_matrix = len(temperatures) * [None]
self._multiplicity_matrix = len(temperatures) * [None]
self._fission = len(temperatures) * [None]
self._nu_fission = len(temperatures) * [None]
self._prompt_nu_fission = len(temperatures) * [None]
self._delayed_nu_fission = len(temperatures) * [None]
self._kappa_fission = len(temperatures) * [None]
self._chi = len(temperatures) * [None]
self._chi_prompt = len(temperatures) * [None]
self._chi_delayed = len(temperatures) * [None]
self._beta = len(temperatures) * [None]
self._decay_rate = len(temperatures) * [None]
self._inverse_velocity = len(temperatures) * [None]
self._xs_shapes = None
def __deepcopy__(self, memo):
existing = memo.get(id(self))
# If this is the first time we have tried to copy this object, copy it
if existing is None:
clone = type(self).__new__(type(self))
clone._name = self.name
clone._energy_groups = copy.deepcopy(self.energy_groups, memo)
clone._num_delayed_groups = self.num_delayed_groups
clone._temperatures = copy.deepcopy(self.temperatures, memo)
clone._representation = self.representation
clone._atomic_weight_ratio = self._atomic_weight_ratio
clone._fissionable = self._fissionable
clone._scatter_format = self._scatter_format
clone._order = self._order
clone._num_polar = self._num_polar
clone._num_azimuthal = self._num_azimuthal
clone._total = copy.deepcopy(self._total, memo)
clone._absorption = copy.deepcopy(self._absorption, memo)
clone._scatter_matrix = copy.deepcopy(self._scatter_matrix, memo)
clone._multiplicity_matrix = \
copy.deepcopy(self._multiplicity_matrix, memo)
clone._fission = copy.deepcopy(self._fission, memo)
clone._nu_fission = copy.deepcopy(self._nu_fission, memo)
clone._prompt_nu_fission = \
copy.deepcopy(self._prompt_nu_fission, memo)
clone._delayed_nu_fission = \
copy.deepcopy(self._delayed_nu_fission, memo)
clone._kappa_fission = copy.deepcopy(self._kappa_fission, memo)
clone._chi = copy.deepcopy(self._chi, memo)
clone._chi_prompt = copy.deepcopy(self._chi_prompt, memo)
clone._chi_delayed = copy.deepcopy(self._chi_delayed, memo)
clone._beta = copy.deepcopy(self._beta, memo)
clone._decay_rate = copy.deepcopy(self._decay_rate, memo)
clone._inverse_velocity = \
copy.deepcopy(self._inverse_velocity, memo)
clone._xs_shapes = copy.deepcopy(self._xs_shapes, memo)
memo[id(self)] = clone
return clone
# If this object has been copied before, return the first copy made
else:
return existing
@property
def name(self):
return self._name
@property
def energy_groups(self):
return self._energy_groups
@property
def num_delayed_groups(self):
return self._num_delayed_groups
@property
def representation(self):
return self._representation
@property
def atomic_weight_ratio(self):
return self._atomic_weight_ratio
@property
def fissionable(self):
return self._fissionable
@property
def temperatures(self):
return self._temperatures
@property
def scatter_format(self):
return self._scatter_format
@property
def order(self):
return self._order
@property
def num_polar(self):
return self._num_polar
@property
def num_azimuthal(self):
return self._num_azimuthal
@property
def total(self):
return self._total
@property
def absorption(self):
return self._absorption
@property
def scatter_matrix(self):
return self._scatter_matrix
@property
def multiplicity_matrix(self):
return self._multiplicity_matrix
@property
def fission(self):
return self._fission
@property
def nu_fission(self):
return self._nu_fission
@property
def prompt_nu_fission(self):
return self._prompt_nu_fission
@property
def delayed_nu_fission(self):
return self._delayed_nu_fission
@property
def kappa_fission(self):
return self._kappa_fission
@property
def chi(self):
return self._chi
@property
def chi_prompt(self):
return self._chi_prompt
@property
def chi_delayed(self):
return self._chi_delayed
@property
def num_orders(self):
if self._order is not None:
if self._scatter_format in (None, 'legendre'):
return self._order + 1
else:
return self._order
@property
def xs_shapes(self):
if self._xs_shapes is None:
self._xs_shapes = {}
self._xs_shapes["[G]"] = (self.energy_groups.num_groups,)
self._xs_shapes["[G']"] = (self.energy_groups.num_groups,)
self._xs_shapes["[G][G']"] = (self.energy_groups.num_groups,
self.energy_groups.num_groups)
self._xs_shapes["[DG]"] = (self.num_delayed_groups,)
self._xs_shapes["[DG][G]"] = (self.num_delayed_groups,
self.energy_groups.num_groups)
self._xs_shapes["[DG][G']"] = (self.num_delayed_groups,
self.energy_groups.num_groups)
self._xs_shapes["[DG][G][G']"] = (self.num_delayed_groups,
self.energy_groups.num_groups,
self.energy_groups.num_groups)
self._xs_shapes["[G][G'][Order]"] \
= (self.energy_groups.num_groups,
self.energy_groups.num_groups, self.num_orders)
# If representation is by angle prepend num polar and num azim
if self.representation == 'angle':
for key, shapes in self._xs_shapes.items():
self._xs_shapes[key] \
= (self.num_polar, self.num_azimuthal) + shapes
return self._xs_shapes
@name.setter
def name(self, name):
check_type('name for XSdata', name, str)
self._name = name
@energy_groups.setter
def energy_groups(self, energy_groups):
check_type('energy_groups', energy_groups, openmc.mgxs.EnergyGroups)
if energy_groups.group_edges is None:
msg = 'Unable to assign an EnergyGroups object ' \
'with uninitialized group edges'
raise ValueError(msg)
self._energy_groups = energy_groups
@num_delayed_groups.setter
def num_delayed_groups(self, num_delayed_groups):
check_type('num_delayed_groups', num_delayed_groups, Integral)
check_less_than('num_delayed_groups', num_delayed_groups,
openmc.mgxs.MAX_DELAYED_GROUPS, equality=True)
check_greater_than('num_delayed_groups', num_delayed_groups, 0,
equality=True)
self._num_delayed_groups = num_delayed_groups
@representation.setter
def representation(self, representation):
check_value('representation', representation, _REPRESENTATIONS)
self._representation = representation
@atomic_weight_ratio.setter
def atomic_weight_ratio(self, atomic_weight_ratio):
check_type('atomic_weight_ratio', atomic_weight_ratio, Real)
check_greater_than('atomic_weight_ratio', atomic_weight_ratio, 0.0)
self._atomic_weight_ratio = atomic_weight_ratio
@temperatures.setter
def temperatures(self, temperatures):
check_iterable_type('temperatures', temperatures, Real)
self._temperatures = np.array(temperatures)
@scatter_format.setter
def scatter_format(self, scatter_format):
check_value('scatter_format', scatter_format, _SCATTER_TYPES)
self._scatter_format = scatter_format
@order.setter
def order(self, order):
check_type('order', order, Integral)
check_greater_than('order', order, 0, equality=True)
self._order = order
@num_polar.setter
def num_polar(self, num_polar):
check_type('num_polar', num_polar, Integral)
check_greater_than('num_polar', num_polar, 0)
self._num_polar = num_polar
@num_azimuthal.setter
def num_azimuthal(self, num_azimuthal):
check_type('num_azimuthal', num_azimuthal, Integral)
check_greater_than('num_azimuthal', num_azimuthal, 0)
self._num_azimuthal = num_azimuthal
def add_temperature(self, temperature):
"""This method re-sizes the attributes of this XSdata object so that it
can accomodate an additional temperature. Note that the set_* methods
will still need to be executed.
Parameters
----------
temperature : float
Temperature (in units of Kelvin) of the provided dataset.
"""
check_type('temperature', temperature, Real)
temp_store = self.temperatures.tolist().append(temperature)
self.temperatures = temp_store
self._total.append(None)
self._absorption.append(None)
self._scatter_matrix.append(None)
self._multiplicity_matrix.append(None)
self._fission.append(None)
self._nu_fission.append(None)
self._prompt_nu_fission.append(None)
self._delayed_nu_fission.append(None)
self._kappa_fission.append(None)
self._chi.append(None)
self._chi_prompt.append(None)
self._chi_delayed.append(None)
self._beta.append(None)
self._decay_rate.append(None)
self._inverse_velocity.append(None)
def set_total(self, total, temperature=294.):
"""This method sets the cross section for this XSdata object at the
provided temperature.
Parameters
----------
total: np.ndarray
Total Cross Section
temperature : float
Temperature (in Kelvin) of the data. Defaults to room temperature
(294K).
See also
--------
openmc.mgxs_library.set_total_mgxs()
"""
# Get the accepted shapes for this xs
shapes = [self.xs_shapes["[G]"]]
# Convert to a numpy array so we can easily get the shape for checking
total = np.asarray(total)
check_value('total shape', total.shape, shapes)
check_type('temperature', temperature, Real)
check_value('temperature', temperature, self.temperatures)
i = np.where(self.temperatures == temperature)[0][0]
self._total[i] = total
def set_absorption(self, absorption, temperature=294.):
"""This method sets the cross section for this XSdata object at the
provided temperature.
Parameters
----------
absorption: np.ndarray
Absorption Cross Section
temperature : float
Temperature (in Kelvin) of the data. Defaults to room temperature
(294K).
See also
--------
openmc.mgxs_library.set_absorption_mgxs()
"""
# Get the accepted shapes for this xs
shapes = [self.xs_shapes["[G]"]]
# Convert to a numpy array so we can easily get the shape for checking
absorption = np.asarray(absorption)
check_value('absorption shape', absorption.shape, shapes)
check_type('temperature', temperature, Real)
check_value('temperature', temperature, self.temperatures)
i = np.where(self.temperatures == temperature)[0][0]
self._absorption[i] = absorption
def set_fission(self, fission, temperature=294.):
"""This method sets the cross section for this XSdata object at the
provided temperature.
Parameters
----------
fission: np.ndarray
Fission Cross Section
temperature : float
Temperature (in Kelvin) of the data. Defaults to room temperature
(294K).
See also
--------
openmc.mgxs_library.set_fission_mgxs()
"""
# Get the accepted shapes for this xs
shapes = [self.xs_shapes["[G]"]]
# Convert to a numpy array so we can easily get the shape for checking
fission = np.asarray(fission)
check_value('fission shape', fission.shape, shapes)
check_type('temperature', temperature, Real)
check_value('temperature', temperature, self.temperatures)
i = np.where(self.temperatures == temperature)[0][0]
self._fission[i] = fission
if np.sum(fission) > 0.0:
self._fissionable = True
def set_kappa_fission(self, kappa_fission, temperature=294.):
"""This method sets the cross section for this XSdata object at the
provided temperature.
Parameters
----------
kappa_fission: np.ndarray
Kappa-Fission Cross Section
temperature : float
Temperature (in Kelvin) of the data. Defaults to room temperature
(294K).
See also
--------
openmc.mgxs_library.set_kappa_fission_mgxs()
"""
# Get the accepted shapes for this xs
shapes = [self.xs_shapes["[G]"]]
# Convert to a numpy array so we can easily get the shape for checking
kappa_fission = np.asarray(kappa_fission)
check_value('kappa fission shape', kappa_fission.shape, shapes)
check_type('temperature', temperature, Real)
check_value('temperature', temperature, self.temperatures)
i = np.where(self.temperatures == temperature)[0][0]
self._kappa_fission[i] = kappa_fission
if np.sum(kappa_fission) > 0.0:
self._fissionable = True
def set_chi(self, chi, temperature=294.):
"""This method sets the cross section for this XSdata object at the
provided temperature.
Parameters
----------
chi: np.ndarray
Fission Spectrum
temperature : float
Temperature (in Kelvin) of the data. Defaults to room temperature
(294K).
See also
--------
openmc.mgxs_library.set_chi_mgxs()
"""
# Get the accepted shapes for this xs
shapes = [self.xs_shapes["[G']"]]
# Convert to a numpy array so we can easily get the shape for checking
chi = np.asarray(chi)
check_value('chi shape', chi.shape, shapes)
check_type('temperature', temperature, Real)
check_value('temperature', temperature, self.temperatures)
i = np.where(self.temperatures == temperature)[0][0]
self._chi[i] = chi
def set_chi_prompt(self, chi_prompt, temperature=294.):
"""This method sets the cross section for this XSdata object at the
provided temperature.
Parameters
----------
chi_prompt : np.ndarray
Prompt fission Spectrum
temperature : float
Temperature (in units of Kelvin) of the provided dataset. Defaults
to 294K
See also
--------
openmc.mgxs_library.set_chi_prompt_mgxs()
"""
# Get the accepted shapes for this xs
shapes = [self.xs_shapes["[G']"]]
# Convert to a numpy array so we can easily get the shape for checking
chi_prompt = np.asarray(chi_prompt)
check_value('chi prompt shape', chi_prompt.shape, shapes)
check_type('temperature', temperature, Real)
check_value('temperature', temperature, self.temperatures)
i = np.where(self.temperatures == temperature)[0][0]
self._chi_prompt[i] = chi_prompt
def set_chi_delayed(self, chi_delayed, temperature=294.):
"""This method sets the cross section for this XSdata object at the
provided temperature.
Parameters
----------
chi_delayed : np.ndarray
Delayed fission Spectrum
temperature : float
Temperature (in units of Kelvin) of the provided dataset. Defaults
to 294K
See also
--------
openmc.mgxs_library.set_chi_delayed_mgxs()
"""
# Get the accepted shapes for this xs
shapes = [self.xs_shapes["[G']"], self.xs_shapes["[DG][G']"]]
# Convert to a numpy array so we can easily get the shape for checking
chi_delayed = | np.asarray(chi_delayed) | numpy.asarray |
import logging
from dataclasses import dataclass, replace
from typing import Tuple, Any, Optional
import numpy as np
from numpy import ndarray
logger = logging.getLogger(__name__)
@dataclass
class COOData:
indices: ndarray
data: ndarray
shape: Tuple[int, ...]
local_shape: Optional[Tuple[int, ...]]
@staticmethod
def _assemble_scipy_csr(
indices: ndarray,
data: ndarray,
shape: Tuple[int, ...],
local_shape: Optional[Tuple[int, ...]]
):
from scipy.sparse import coo_matrix
K = coo_matrix((data, (indices[0], indices[1])), shape=shape)
K.eliminate_zeros()
return K.tocsr()
def __radd__(self, other):
return self.__add__(other)
def tolocal(self, basis=None):
"""Return an array of local finite element matrices.
Parameters
----------
basis
Optionally, sum local facet matrices to form elemental matrices if
the corresponding :class:`skfem.assembly.FacetBasis` is provided.
"""
if self.local_shape is None:
raise NotImplementedError("Cannot build local matrices if "
"local_shape is not specified.")
assert len(self.local_shape) == 2
local = np.moveaxis(self.data.reshape(self.local_shape + (-1,),
order='C'), -1, 0)
if basis is not None:
out = np.zeros((basis.mesh.nfacets,) + local.shape[1:])
out[basis.find] = local
local = np.sum(out[basis.mesh.t2f], axis=0)
return local
def fromlocal(self, local):
"""Reverse of :meth:`COOData.tolocal`."""
return replace(
self,
data=np.moveaxis(local, 0, -1).flatten('C'),
)
def inverse(self):
"""Invert each elemental matrix."""
return self.fromlocal(np.linalg.inv(self.tolocal()))
def __add__(self, other):
if isinstance(other, int):
return self
return replace(
self,
indices=np.hstack((self.indices, other.indices)),
data=np.hstack((self.data, other.data)),
shape=tuple(max(self.shape[i],
other.shape[i]) for i in range(len(self.shape))),
local_shape=None,
)
def tocsr(self):
"""Return a sparse SciPy CSR matrix."""
return self._assemble_scipy_csr(
self.indices,
self.data,
self.shape,
self.local_shape,
)
def toarray(self) -> ndarray:
"""Return a dense numpy array."""
if len(self.shape) == 1:
from scipy.sparse import coo_matrix
return coo_matrix(
(self.data, (self.indices[0], np.zeros_like(self.indices[0]))),
shape=self.shape + (1,),
).toarray().T[0]
elif len(self.shape) == 2:
return self.tocsr().toarray()
# slow implementation for testing N-tensors
out = np.zeros(self.shape)
for itr in range(self.indices.shape[1]):
out[tuple(self.indices[:, itr])] += self.data[itr]
return out
def astuple(self):
return self.indices, self.data, self.shape
def todefault(self) -> Any:
"""Return the default data type.
Scalar for 0-tensor, numpy array for 1-tensor, scipy csr matrix for
2-tensor, self otherwise.
"""
if len(self.shape) == 0:
return np.sum(self.data, axis=0)
elif len(self.shape) == 1:
return self.toarray()
elif len(self.shape) == 2:
return self.tocsr()
return self
def dot(self, x, D=None):
"""Matrix-vector product.
Parameters
----------
x
The vector to multiply with.
D
Optionally, keep some DOFs unchanged. An array of DOF indices.
"""
y = self.data * x[self.indices[1]]
z = np.zeros_like(x)
| np.add.at(z, self.indices[0], y) | numpy.add.at |
import random
import numpy as np
import random
import matplotlib.pyplot as plt
from arms.bernoulli import BernoulliArm
from arms.normal import NormalArm
from matplotlib import rcParams
rcParams['font.family'] = ['Roboto']
for w in ["font.weight", "axes.labelweight", "axes.titleweight", "figure.titleweight"]:
rcParams[w] = 'regular'
def plot_regret(X, Y, cumulative_optimal_reward, cumulative_reward, average_reward_in_each_round, T, algo_name):
fig, axs = plt.subplots(2) # get two figures, top is regret, bottom is average reward in each round
fig.suptitle(f'Performance of {algo_name}')
fig.subplots_adjust(hspace=0.5)
axs[0].plot(X, Y, color='red', label='Regret of UCB')
axs[0].set(xlabel='round number', ylabel='Regret')
axs[0].grid(True)
axs[0].legend(loc='lower right')
axs[0].set_xlim(0, T)
axs[0].set_ylim(0, 1.1*(cumulative_optimal_reward - cumulative_reward))
axs[1].plot(X, average_reward_in_each_round, color='black', label='average reward')
axs[1].set(xlabel='round number', ylabel='Average Reward per round')
axs[1].grid(True)
axs[1].legend(loc='lower right')
axs[1].set_xlim(0, T)
axs[1].set_ylim(0, 1.0)
plt.savefig("./figures/prog3_figure.png")
plt.show()
'''Initialisation and Preprocess phase'''
# C candidate prices, N competitors (demand functions)
cand_prices = [5, 6, 7, 8]
T = 5 # rounds
total_iteration = 3
demand_list = [[110, 105, 100, 95], [120, 89, 85, 81], [100, 90, 60, 50]]
# competitors and prices are 0-index based
N = len(demand_list)
C = len(demand_list[0]) # len(cand_prices)
optimal_revenue = 700
demand_matrix = np.zeros(shape=(N, C, T + 1), dtype=int) # keeps appending every timestep
avg_dem_matrix = np.zeros(shape=(N, C), dtype=int) # dm: will be overwritten every time-step
revenue_matrix = np.zeros(shape=(N, C, T + 1), dtype=int) # keeps appending every timestep
avg_rev_matrix = np.zeros(shape=(N, C), dtype=int) # rm: will be overwritten every time-step
# the first element is 0 because time is 1-index based in MAB for-loop
observed_demand = [0, 112, 64, 88, 74, 80]
def update_optimal_prices():
opt_prices = []
for i in range(N):
idx = np.argmax(avg_rev_matrix[i])
opt_prices.append(cand_prices[idx])
return opt_prices
def update_avg_demand_revenue():
for i in range(N):
for j in range(C):
avg_dem_matrix[i][j] = | np.sum(demand_matrix[i][j]) | numpy.sum |
"""
..
Copyright (c) 2016-2017, Magni developers.
All rights reserved.
See LICENSE.rst for further information.
Module providing public functions for the magni.imaging.measurements
subpackage.
Routine listings
----------------
lissajous_sample_image(h, w, scan_length, num_points, f_y=1., f_x=1.,
theta_y=0., theta_x=np.pi / 2)
Function for lissajous sampling an image.
lissajous_sample_surface(l, w, speed, sample_rate, time, f_y=1., f_x=1.,
theta_y=0., theta_x=np.pi / 2, speed_mode=0)
Function for lissajous sampling a surface.
"""
from __future__ import division
import numpy as np
from magni.imaging.measurements import _util
from magni.utils.validation import decorate_validation as _decorate_validation
from magni.utils.validation import validate_numeric as _numeric
__all__ = ['lissajous_sample_image', 'lissajous_sample_surface']
_min_l = _util.min_l
_min_w = _util.min_w
_min_speed = _util.min_speed
_min_sample_rate = _util.min_sample_rate
_min_time = _util.min_time
_min_scan_length = _util.min_scan_length
_min_num_points = _util.min_num_points
def lissajous_sample_image(h, w, scan_length, num_points, f_y=1., f_x=1.,
theta_y=0., theta_x=np.pi / 2):
"""
Sample an image using a lissajous pattern.
The coordinates (in units of pixels) resulting from sampling an image of
size `h` times `w` using a lissajous pattern are determined. The
`scan_length` determines the length of the path scanned whereas
`num_points` indicates the number of samples taken on that path.
Parameters
----------
h : int
The height of the area to scan in units of pixels.
w : int
The width of the area to scan in units of pixels.
scan_length : float
The length of the path to scan in units of pixels.
num_points : int
The number of samples to take on the scanned path.
f_y : float
The frequency of the y-sinusoid (the default value is 1.0).
f_x : float
The frequency of the x-sinusoid (the default value is 1.0).
theta_y : float
The starting phase of the y-sinusoid (the default is 0.0).
theta_x : float
The starting phase of the x-sinusoid (the default is pi / 2).
Returns
-------
coords : ndarray
The coordinates of the samples arranged into a 2D array, such that each
row is a coordinate pair (x, y).
Notes
-----
The orientation of the coordinate system is such that the width `w` is
measured along the x-axis whereas the height `h` is measured along the
y-axis.
Examples
--------
For example,
>>> import numpy as np
>>> from magni.imaging.measurements import lissajous_sample_image
>>> h = 10
>>> w = 10
>>> scan_length = 50.0
>>> num_points = 12
>>> np.set_printoptions(suppress=True)
>>> lissajous_sample_image(h, w, scan_length, num_points)
array([[ 5. , 9.5 ],
[ 1.40370042, 7.70492686],
[ 0.67656563, 3.75183526],
[ 3.39871123, 0.79454232],
[ 7.39838148, 1.19240676],
[ 9.48459832, 4.62800824],
[ 7.99295651, 8.36038857],
[ 4.11350322, 9.41181634],
[ 0.94130617, 6.94345168],
[ 1.0071768 , 2.92458128],
[ 4.25856283, 0.56150128],
[ 8.10147506, 1.7395012 ],
[ 9.4699986 , 5.51876059]])
"""
@_decorate_validation
def validate_input():
_numeric('h', 'integer', range_='[2;inf)')
_numeric('w', 'integer', range_='[2;inf)')
_numeric('scan_length', 'floating',
range_='[{};inf)'.format(_min_scan_length))
_numeric('num_points', 'integer',
range_='[{};inf)'.format(_min_num_points))
_numeric('f_y', 'floating', range_='(0;inf)')
_numeric('f_x', 'floating', range_='(0;inf)')
_numeric('theta_y', 'floating', range_='(-inf;inf)')
_numeric('theta_x', 'floating', range_='(-inf;inf)')
validate_input()
coords = lissajous_sample_surface(
float(h - 1), float(w - 1), scan_length, float(num_points), 1.,
f_y=f_y, f_x=f_x, theta_y=theta_y, theta_x=theta_x)
coords = coords + 0.5
return coords
def lissajous_sample_surface(l, w, speed, sample_rate, time, f_y=1., f_x=1.,
theta_y=0., theta_x=np.pi / 2, speed_mode=0):
"""
Sample a surface area using a lissajous pattern.
The coordinates (in units of meters) resulting from sampling an area of
size `l` times `w` using a lissajous pattern are determined. The scanned
path is determined from the probe `speed` and the scan `time`.
Parameters
----------
l : float
The length of the area to scan in units of meters.
w : float
The width of the area to scan in units of meters.
speed : float
The probe speed in units of meters/second.
sample_rate : float
The sample rate in units of Hertz.
time : float
The scan time in units of seconds.
f_y : float
The frequency of the y-sinusoid (the default value is 1.0).
f_x : float
The frequency of the x-sinusoid (the default value is 1.0).
theta_y : float
The starting phase of the y-sinusoid (the default is 0.0).
theta_x : float
The starting phase of the x-sinusoid (the default is pi / 2).
speed_mode : int
The speed mode used to select sampling points (the default is 0 which
implies that the speed argument determines the speed, and f_y and f_x
determine the ratio between the relative frequencies used).
Returns
-------
coords : ndarray
The coordinates of the samples arranged into a 2D array, such that each
row is a coordinate pair (x, y).
Notes
-----
The orientation of the coordinate system is such that the width `w` is
measured along the x-axis whereas the length `l` is measured along the
y-axis.
Generally, the lissajous sampling pattern does not provide constant speed,
and this cannot be compensated for without violating f_y, f_x, or both.
Therefore, `speed_mode` allows the user to determine how this issue is
handled: In `speed_mode` 0, constant speed equal to `speed` is ensured by
non-uniform sampling of a lissajous curve, whereby `f_y` and `f_x` are not
constant frequencies. In `speed_mode` 1, average speed equal to `speed` is
ensured by scaling `f_y` and `f_x` by the same constant. In `speed_mode` 2,
`f_y` and `f_x` are kept constant and the `speed` is only used to determine
the path length in combination with `time`.
Examples
--------
For example,
>>> import numpy as np
>>> from magni.imaging.measurements import lissajous_sample_surface
>>> l = 1e-6
>>> w = 1e-6
>>> speed = 7e-7
>>> sample_rate = 1.0
>>> time = 12.0
>>> np.set_printoptions(suppress=True)
>>> lissajous_sample_surface(l, w, speed, sample_rate, time)
array([[ 0.0000005 , 0.000001 ],
[ 0.00000001, 0.00000058],
[ 0.00000033, 0.00000003],
[ 0.00000094, 0.00000025],
[ 0.00000082, 0.00000089],
[ 0.00000017, 0.00000088],
[ 0.00000007, 0.00000024],
[ 0.00000068, 0.00000003],
[ 0.00000099, 0.0000006 ],
[ 0.00000048, 0.000001 ],
[ 0. , 0.00000057],
[ 0.00000035, 0.00000002],
[ 0.00000094, 0.00000027]])
"""
@_decorate_validation
def validate_input():
_numeric('l', 'floating', range_='[{};inf)'.format(_min_l))
_numeric('w', 'floating', range_='[{};inf)'.format(_min_w))
_numeric('speed', 'floating', range_='[{};inf)'.format(_min_speed))
_numeric('sample_rate', 'floating',
range_='[{};inf)'.format(_min_sample_rate))
_numeric('time', 'floating', range_='[{};inf)'.format(_min_time))
_numeric('f_y', 'floating', range_='(0;inf)')
_numeric('f_x', 'floating', range_='(0;inf)')
_numeric('theta_y', 'floating', range_='(-inf;inf)')
_numeric('theta_x', 'floating', range_='(-inf;inf)')
_numeric('speed_mode', 'integer', range_='[0;2]')
validate_input()
s_x = w / 2
s_y = l / 2
if speed_mode in (0, 1):
# The probe moves 4 * s_x * f_x and 4 * s_y * f_y pixels a second in
# the x-direction and y-direction, respectively, and the 2-norm of this
# is a lower bound on the distance per second. Thus, t is an upper
# bound on the scan time.
t = speed * time / np.sqrt((4 * s_x * f_x)**2 + (4 * s_y * f_y)**2)
# The above assumes that f_x * t and f_y * t are integral numbers and
# so t is increased to ensure the upper bound.
t = max(np.ceil(f_x * t) / f_x, np.ceil(f_y * t) / f_y)
# The distance between sampling points on the curve is chosen small
# enough to approximate the curve by straight line segments.
dt = 1 / (10**4 * max(f_x, f_y))
t = np.linspace(0, t, int(t / dt))
x = s_x * np.cos(2 * np.pi * f_x * t + theta_x) + s_x
y = s_y * np.cos(2 * np.pi * f_y * t + theta_y) + s_y
dx = x[1:] - x[:-1]
dy = y[1:] - y[:-1]
l = np.zeros(t.shape)
l[1:] = np.cumsum((dx**2 + dy**2)**(1 / 2))
if speed_mode == 0:
# Constant speed entails constant distance between samples.
l_mode_0 = np.linspace(0, speed * time, sample_rate * time + 1)
t = np.interp(l_mode_0, l, t)
else: # speed_mode == 1
# The value of t where the desired scan length is reached.
t_end = np.argmax(l > speed * time) * dt
t = np.linspace(0, t_end, sample_rate * time + 1)
else: # speed_mode == 2
t = np.linspace(0, time, sample_rate * time + 1)
x = s_x * np.cos(2 * np.pi * f_x * t + theta_x) + s_x
y = s_y * np.cos(2 * np.pi * f_y * t + theta_y) + s_y
return | np.column_stack((x, y)) | numpy.column_stack |
# !/usr/bin/env python
# -*- coding: utf-8 -*-
# vispy: gallery 2
"""
Example demonstrating the use of textures in vispy.gloo.
Three textures are created and combined in the fragment shader.
"""
from vispy.gloo import Program, Texture2D, VertexBuffer
from vispy import app, dataio
from vispy.gloo import gl
import numpy as np
# Texture 1
im1 = dataio.crate()
# Texture with bumbs (to muliply with im1)
im2 = np.ones((20, 20), 'float32')
im2[::3, ::3] = 0.5
# Texture with a plus sign (to subtract from im1)
im3 = | np.zeros((30, 30), 'float32') | numpy.zeros |
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 6 10:07:51 2021
@author: robin
"""
#%% fonction test relou
"""
ZDT toolkit
x = {x1.. xn}
y = {x1.. xj} = {y1.. yj} #for simplicity, here, j = Int(n/2)
z = {x(j+1).. xn} = {z1.. zk}
Testing functions with the shape :
f1 : y -> f1(y)
f2 : y,z -> g(z)h(f1(y),g(z))
xbounds = [0,1]**n
"""
import numpy as np
import random
from smt.problems.problem import Problem
class ZDT(Problem):
def _initialize(self):
self.options.declare(
"ndim", 2, types=int
) # not necesary as I look at the shape of x
self.options.declare("name", "ZDT", types=str)
self.options.declare(
"type", 1, values=[1, 2, 3, 4, 5], types=int
) # one of the 5 test functions
def _setup(self):
self.xlimits[:, 1] = 1.0
def _evaluate(self, x, kx): # kx useless
"""
Arguments
---------
x : ndarray[ne, n_dim]
Evaluation points.
Returns
-------
[ndarray[ne, 1],ndarray[ne, 1]]
Functions values.
"""
ne, nx = x.shape
j = min(1, nx - 1) # if one entry then no bug
f1 = np.zeros((ne, 1), complex)
if self.options["type"] < 5:
f1[:, 0] = x[:, 0]
else:
f1[:, 0] = 1 - np.exp(-4 * x[:, 0]) * np.sin(6 * np.pi * x[:, 0]) ** 6
# g
g = np.zeros((ne, 1), complex)
if self.options["type"] < 4:
for i in range(ne):
g[i, 0] = 1 + 9 / (nx - j) * sum(x[i, j:nx])
elif self.options["type"] == 4:
for i in range(ne):
g[i, 0] = (
1
+ 10 * (nx - j)
+ sum(x[i, j:nx] ** 2 - 10 * np.cos(4 * np.pi * x[i, j + 1 : nx]))
)
else:
for i in range(ne):
g[i, 0] = 1 + 9 * (sum(x[i, j:nx]) / (nx - j)) ** 0.25
# h
h = np.zeros((ne, 1), complex)
if self.options["type"] == 1 or self.options["type"] == 4:
for i in range(ne):
h[i, 0] = 1 - | np.sqrt(f1[i, 0] / g[i, 0]) | numpy.sqrt |
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 29 21:45:02 2021
@author: <NAME> -Spatial structure index value distribution of urban streetscape
"""
from mayavi import mlab
from tvtk.api import tvtk # python wrappers for the C++ vtk ecosystem
import numpy as np
from mayavi import mlab
from tvtk.api import tvtk
import matplotlib.pyplot as plt # only for manipulating the input image
import glob,os, pickle
label_mapping={
0:"pole",
1:"slight",
2:"bboard",
3:"tlight",
4:"car",
5:"truck",
6:"bicycle",
7:"motor",
8:"bus",
9:"tsignf",
10:"tsignb",
11:"road",
12:"sidewalk",
13:"curbcut",
14:"crosspln",
15:"bikelane",
16:"curb",
17:"fence",
18:"wall",
19:"building",
20:"person",
21:"rider",
22:"sky",
23:"vege",
24:"terrain",
25:"markings",
26:"crosszeb",
27:"Nan",
}
label_color={
0:(117,115,102), #"pole",
1:(212,209,156),#"slight",
2:(224,9,9),#"bboard",
3:(227,195,66),#"tlight",
4:(137,147,169),#"car",
5:(53,67,98),#"truck",
6:(185,181,51),#"bicycle",
7:(238,108,91),#"motor",
8:(247,5,5),#"bus",
9:(127,154,82),#"tsignf",
10:(193,209,167),#"tsignb",
11:(82,83,76),#"road",
12:(141,142,133),#"sidewalk",
13:(208,212,188),#"curbcut",
14:(98,133,145),#"crosspln",
15:(194,183,61),#"bikelane",
16:(141,139,115),#"curb",
17:(157,186,133),#"fence",
18:(114,92,127),#"wall",
19:(78,61,76),#"building",
20:(100,56,67),#"person",
21:(240,116,148),#"rider",
22:(32,181,191),#"sky",
23:(55,204,26),#"vege",
24:(84,97,82),#"terrain",
25:(231,24,126),#"markings",
26:(141,173,166),#"crosszeb",
27:(0,0,0),#"Nan",
}
def auto_sphere(image_file):
# create a figure window (and scene)
fig = mlab.figure(size=(600, 600))
# load and map the texture
img = tvtk.JPEGReader()
img.file_name = image_file
texture = tvtk.Texture(input_connection=img.output_port, interpolate=1)
# print(texture)
# (interpolate for a less raster appearance when zoomed in)
# use a TexturedSphereSource, a.k.a. getting our hands dirty
R = 1
Nrad = 180
# create the sphere source with a given radius and angular resolution
sphere = tvtk.TexturedSphereSource(radius=R, theta_resolution=Nrad,
phi_resolution=Nrad)
# print(sphere)
# assemble rest of the pipeline, assign texture
sphere_mapper = tvtk.PolyDataMapper(input_connection=sphere.output_port)
sphere_actor = tvtk.Actor(mapper=sphere_mapper, texture=texture)
fig.scene.add_actor(sphere_actor)
mlab.show()
def manual_sphere(image_file):
# caveat 1: flip the input image along its first axis
img = plt.imread(image_file) # shape (N,M,3), flip along first dim
outfile = image_file.replace('.jfif', '_flipped.jpg')
# flip output along first dim to get right chirality of the mapping
img = img[::-1,...]
plt.imsave(outfile, img)
image_file = outfile # work with the flipped file from now on
# parameters for the sphere
R = 1 # radius of the sphere
Nrad = 180 # points along theta and phi
phi = np.linspace(0, 2 * np.pi, Nrad) # shape (Nrad,)
theta = np.linspace(0, np.pi, Nrad) # shape (Nrad,)
phigrid,thetagrid = np.meshgrid(phi, theta) # shapes (Nrad, Nrad)
# compute actual points on the sphere
x = R * np.sin(thetagrid) * np.cos(phigrid)
y = R * np.sin(thetagrid) * np.sin(phigrid)
z = R * np.cos(thetagrid)
# create figure
mlab.figure(size=(600, 600))
# create meshed sphere
mesh = mlab.mesh(x,y,z)
mesh.actor.actor.mapper.scalar_visibility = False
mesh.actor.enable_texture = True # probably redundant assigning the texture later
# load the (flipped) image for texturing
img = tvtk.JPEGReader(file_name=image_file)
texture = tvtk.Texture(input_connection=img.output_port, interpolate=0, repeat=0)
# print(texture)
mesh.actor.actor.texture = texture
# tell mayavi that the mapping from points to pixels happens via a sphere
mesh.actor.tcoord_generator_mode = 'sphere' # map is already given for a spherical mapping
cylinder_mapper = mesh.actor.tcoord_generator
# caveat 2: if prevent_seam is 1 (default), half the image is used to map half the sphere
cylinder_mapper.prevent_seam = 0 # use 360 degrees, might cause seam but no fake data
#cylinder_mapper.center = np.array([0,0,0]) # set non-trivial center for the mapping sphere if necessary
def mpl_sphere(image_file):
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
img = plt.imread(image_file)
# define a grid matching the map size, subsample along with pixels
theta = np.linspace(0, np.pi, img.shape[0])
phi = np.linspace(0, 2*np.pi, img.shape[1])
print(img.shape)
print(theta.shape)
print(phi.shape)
#'''
count =180 #180 # keep 180 points along theta and phi
theta_inds = np.linspace(0, img.shape[0] - 1, count).round().astype(int)
phi_inds = np.linspace(0, img.shape[1] - 1, count).round().astype(int)
# print(theta_inds)
theta = theta[theta_inds]
phi = phi[phi_inds]
print(theta.shape)
print(phi.shape)
img = img[np.ix_(theta_inds, phi_inds)]
print("_"*50)
print(img.shape)
#'''
theta,phi = np.meshgrid(theta, phi)
print(theta.shape,phi.shape)
R = 1
# sphere
x = R * np.sin(theta) * np.cos(phi)
y = R * np.sin(theta) * np.sin(phi)
z = R * np.cos(theta)
# create 3d Axes
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(x.T, y.T, z.T, facecolors=img/255, cstride=1, rstride=1) # we've already pruned ourselves
# make the plot more spherical
ax.axis('scaled')
plt.show()
def spherical_segs_pts_show(label_seg_fn,label_color):
from tqdm import tqdm
import pickle
import numpy as np
from skimage.io._plugins.pil_plugin import ndarray_to_pil, pil_to_ndarray
from PIL import Image,ImageOps
fig=mlab.figure(size=(600, 600))
print(label_seg_fn)
with open(label_seg_fn,'rb') as f:
label_seg=pickle.load(f).numpy()
print('\nseg shape={}'.format(label_seg.shape))
# define a grid matching the map size, subsample along with pixels
theta=np.linspace(0, np.pi, label_seg.shape[0])
phi=np.linspace(0, 2*np.pi, label_seg.shape[1])
print("theta shape={};phi shape={}".format(theta.shape,phi.shape))
theta,phi=np.meshgrid(theta, phi)
print("theta shape={};phi shape={}".format(theta.shape,phi.shape))
label_seg_color=np.array([label_color[v] for v in label_seg.flatten()]).reshape((label_seg.shape[0],label_seg.shape[1],3))
print("\nlabel_seg_color shape={}".format(label_seg_color.shape))
R=10
# sphere
x=R * np.sin(theta) * np.cos(phi)
y=R * np.sin(theta) * np.sin(phi)
z=R * np.cos(theta)
print("x,y,z shape={},{},{}".format(x.shape,y.shape,z.shape))
mask=label_seg==22
# print(len(np.extract(mask,x.T)),len(np.extract(mask,y.T)),len(np.extract(mask,z.T)),len(np.extract(mask,label_seg_color[:,:,0]/255)))
mlab.points3d(x.T, y.T, z.T, label_seg_color[:,:,0]/255,) #opacity=0.75,scale_factor=0.1
# mlab.points3d(np.extract(mask,x.T),np.extract(mask,y.T),np.extract(mask,z.T),)
theta_phi=np.dstack((theta,phi))
mlab.show()
def spherical_segs_object_changing(label_seg_path,label_color):
from tqdm import tqdm
import glob,os
import pickle
import numpy as np
from skimage.io._plugins.pil_plugin import ndarray_to_pil, pil_to_ndarray
from PIL import Image,ImageOps
# fig=mlab.figure(size=(600, 600))
label_seg_fns=glob.glob(os.path.join(label_seg_path,'*.pkl'))
# print(label_seg_fns)
for label_seg_fn in tqdm(label_seg_fns):
print(label_seg_fn)
with open(label_seg_fn,'rb') as f:
label_seg=pickle.load(f).numpy()
print('\nseg shape={}'.format(label_seg.shape))
# define a grid matching the map size, subsample along with pixels
theta=np.linspace(0, np.pi, label_seg.shape[0])
phi=np.linspace(0, 2*np.pi, label_seg.shape[1])
print("theta shape={};phi shape={}".format(theta.shape,phi.shape))
theta,phi=np.meshgrid(theta, phi)
print("theta shape={};phi shape={}".format(theta.shape,phi.shape))
label_seg_color=np.array([label_color[v] for v in label_seg.flatten()]).reshape((label_seg.shape[0],label_seg.shape[1],3))
print("\nlabel_seg_color shape={}".format(label_seg_color.shape))
R=10
# sphere
x=R * np.sin(theta) * np.cos(phi)
y=R * np.sin(theta) * np.sin(phi)
z=R * np.cos(theta)
print("x,y,z shape={},{},{}".format(x.shape,y.shape,z.shape))
mask=label_seg==22
# print(len(np.extract(mask,x.T)),len(np.extract(mask,y.T)),len(np.extract(mask,z.T)),len(np.extract(mask,label_seg_color[:,:,0]/255)))
# mlab.points3d(x.T, y.T, z.T, label_seg_color[:,:,0]/255,) #opacity=0.75,scale_factor=0.1
# mlab.show()
# mlab.points3d(np.extract(mask,x.T),np.extract(mask,y.T),np.extract(mask,z.T),)
theta_phi=np.dstack((theta,phi))
break
def fns_sort(fns_list):
from pathlib import Path
fns_dict={int(Path(p).stem.split('_')[-1]):p for p in fns_list}
fns_dict_key=list(fns_dict.keys())
fns_dict_key.sort()
fns_dict_sorted=[fns_dict[k] for k in fns_dict_key]
return fns_dict_sorted
def panorama_object_change(label_seg_path,label_color):
from tqdm import tqdm
import glob,os
import pickle
import numpy as np
from skimage.io._plugins.pil_plugin import ndarray_to_pil, pil_to_ndarray
from PIL import Image,ImageOps
from pathlib import Path
import pandas as pd
from sklearn import preprocessing
label_seg_fns=glob.glob(os.path.join(label_seg_path,'*.pkl'))
label_seg_fns_sorted=fns_sort(label_seg_fns)
pixels={}
# i=0
for label_seg_fn in tqdm(label_seg_fns_sorted):
# print(label_seg_fn)
with open(label_seg_fn,'rb') as f:
label_seg=pickle.load(f).numpy()
# print('\nseg shape={}'.format(label_seg.shape))
fn_stem=Path(label_seg_fn).stem
fn_key,fn_idx=fn_stem.split("_")
pixels[fn_stem]=label_seg.flatten()
# if i==10:break
# i+=1
img_pixels_df=pd.DataFrame.from_dict(pixels,orient='index')
pixels_diff=img_pixels_df.diff()
pixels_diff[pixels_diff!=0]=1
# print(img_pixels_df)
pixels_diff_sum=pixels_diff.sum(axis=0)
pixels_diff_array=np.array(pixels_diff_sum).reshape(label_seg.shape)
min_max_scaler=preprocessing.MinMaxScaler()
pixels_diff_array_standardization=min_max_scaler.fit_transform(pixels_diff_array)
img_object_change=Image.fromarray(np.uint8(pixels_diff_array_standardization * 255) , 'L')
img_object_change.save('./processed data/img_object_change.jpg')
with open('./processed data/pixels_diff_array_standardization.pkl','wb') as f:
pickle.dump(pixels_diff_array_standardization,f)
with open('./processed data/pixels_diff_array.pkl','wb') as f:
pickle.dump(pixels_diff_array,f)
return img_object_change,pixels_diff_array_standardization
def spherical_img_pts_show(panorama_fn,FOV=False):
from tqdm import tqdm
import pickle,math
import numpy as np
from skimage.io._plugins.pil_plugin import ndarray_to_pil, pil_to_ndarray
from PIL import Image,ImageOps
import numpy.ma as ma
from PIL import Image
img=plt.imread(panorama_fn)
print('\nseg shape={}'.format(img.shape))
# define a grid matching the map size, subsample along with pixels
theta=np.linspace(0, np.pi, img.shape[0])
phi=np.linspace(0, 2*np.pi, img.shape[1])
print("theta shape={};phi shape={}".format(theta.shape,phi.shape))
theta,phi=np.meshgrid(theta, phi)
theta=theta.T
phi=phi.T
print("theta shape={};phi shape={}".format(theta.shape,phi.shape))
theta_phi=np.dstack((theta,phi))
if FOV==True:
verticalFOV_limit_ofVisual_field=[50,90-(-70)]
horizontalFOV_visual_limit_field=[62,90-(-62)]
horizontal_offset=0
verticalFOV_limit_ofVisual_field_radians=[math.radians(d) for d in verticalFOV_limit_ofVisual_field]
horizontalFOV_visual_limit_field_radians=[math.radians(d) for d in horizontalFOV_visual_limit_field]
horizontal_offset_radians=math.radians(horizontal_offset)
print(verticalFOV_limit_ofVisual_field_radians,horizontalFOV_visual_limit_field_radians,horizontal_offset_radians)
mask=np.bitwise_and(theta>=verticalFOV_limit_ofVisual_field_radians[0], theta<=verticalFOV_limit_ofVisual_field_radians[1])
theta=theta[mask]
phi=phi[mask]
img=img[mask]
R=50
# sphere
x=R * np.sin(theta) * np.cos(phi)
y=R * np.sin(theta) * np.sin(phi)
z=R * np.cos(theta)
print("x,y,z shape={},{},{}".format(x.shape,y.shape,z.shape))
# print(img)
fig=mlab.figure(size=(600, 600),bgcolor=(1, 1, 1))
mlab.points3d(x, y, z, img/255,scale_factor=.25) #opacity=0.75,scale_factor=0.1
mlab.points3d(0, 0, 0,scale_factor=3,color=(1,0,0))
# Plot the equator and the tropiques
theta_equator=np.linspace(0, 2 * np.pi, 100)
veiw_scope_dic={}
for i,angle in enumerate([-math.radians(70), 0, math.radians(50)]):
x_equator=R * np.cos(theta_equator) * np.cos(angle)
y_equator=R * np.sin(theta_equator) * np.cos(angle)
z_equator=R * np.ones_like(theta_equator) * np.sin(angle)
mlab.plot3d(x_equator, y_equator, z_equator, color=(0, 0, 0),opacity=0.6, tube_radius=None)
veiw_scope_dic[i]=[x_equator,y_equator,z_equator]
str_info={0:'lower limit of visual filed:-70',1:'Standard line of sight:0',2:'Upper limit of visual filed:+50'}
for k,v in str_info.items():
mlab.text(veiw_scope_dic[k][0][0], veiw_scope_dic[k][1][0], v, z=veiw_scope_dic[k][2][0],width=0.025 * len(v), name=v,color=(0,0,0))
vertical_label_radians=np.linspace(0, np.pi,14)
vertical_label_degree=["{:.2f}".format(90-math.degrees(radi)) for radi in vertical_label_radians]
phi_label=0
for idx in range(len(vertical_label_radians)):
theta_labe=vertical_label_radians[idx]
x_label=R * np.sin(theta_labe) * np.cos(phi_label)
y_label=R * np.sin(theta_labe) * np.sin(phi_label)
z_label=R * np.cos(theta_labe)
mlab.points3d(x_label, y_label, z_label,scale_factor=1,color=(0,0,0))
label=vertical_label_degree[idx]
mlab.text(x_label, y_label, label, z=z_label,width=0.02 * len(label), name=label,color=(0,0,0))
mlab.show()
def array_classifier(array,n_classes=9):
import mapclassify as mc
import numpy as np
import pandas as pd
from PIL import Image
from sklearn import preprocessing
array_shape=array.shape
array_flatten=array.flatten()
classifier=mc.NaturalBreaks(array_flatten,k=n_classes)
print(classifier)
classifications=pd.DataFrame(array).apply(classifier)
classifications_array=classifications.to_numpy().reshape(array_shape)
min_max_scaler=preprocessing.MinMaxScaler()
classifications_array_standardization=min_max_scaler.fit_transform(classifications_array)
classifications_object_change=Image.fromarray(np.uint8(classifications_array_standardization * 255) , 'L')
classifications_object_change.save('./processed data/classifications_object_change.jpg')
return classifications_array
def auto_sphere_label(image_file):
import math
# create a figure window (and scene)
fig = mlab.figure(size=(600, 600),bgcolor=(1, 1, 1))
# load and map the texture
img = tvtk.JPEGReader()
img.file_name = image_file
texture = tvtk.Texture(input_connection=img.output_port, interpolate=1)
# print(texture)
# (interpolate for a less raster appearance when zoomed in)
# use a TexturedSphereSource, a.k.a. getting our hands dirty
R = 50
Nrad = 180
# create the sphere source with a given radius and angular resolution
sphere = tvtk.TexturedSphereSource(radius=R, theta_resolution=Nrad,phi_resolution=Nrad)
# print(sphere)
# assemble rest of the pipeline, assign texture
sphere_mapper = tvtk.PolyDataMapper(input_connection=sphere.output_port)
sphere_actor = tvtk.Actor(mapper=sphere_mapper, texture=texture)
fig.scene.add_actor(sphere_actor)
# Plot the equator and the tropiques
theta_equator=np.linspace(0, 2 * np.pi, 100)
veiw_scope_dic={}
for i,angle in enumerate([-math.radians(70), 0, math.radians(50)]):
x_equator=R * | np.cos(theta_equator) | numpy.cos |
# Equation numbers refer to <NAME>'s "Conjugate Bayesian analysis of the Gaussian
# distribution" note unless otherwise specified.
from dataclasses import dataclass
import numpy as np
from scipy.special import gammaln
from dpmmlearn.density import (multivariate_t_density, normal_density,
scaled_IX_density, t_density)
from dpmmlearn.utils import gammadln, is_pos_def, random_invwish
# from scipy.special import gamma
# from dpmmlearn.utils import gammad
class Prior():
"""
In general, `Prior` object models represent the proabilistic graphical model:
psi -> theta -> [x]
where psi are hyperparameters of a probability,
theta are parameters of the model being constrained,
and [x] are data.
For example, for an InvGamma model, the data (x) are scalar samples, the model is to infer the
variance (theta) of a generative Gaussian distribution with known mean, and alpha, beta (psi)
are hyperparameters for the probability on the Gaussian variance.
We define the following probability distributions for each `Prior` object:
Likelihood: Pr([x] | theta). Note [x] is conditionally independent of psi given theta.
Prior: Pr(theta | psi).
Posterior: Pr(theta | x, psi) By Bayes theorem, equal to:
Pr(x | theta) Pr(theta | psi) / Pr (x | psi)
Predictive (or evidence): Pr(x | psi) = \\int Pr(x | theta) Pr(theta | psi) d(theta).
"""
def __init__(self, post=None, *args, **kwargs):
# Assume conjugate prior by default, i.e. that posterior is same form
# as prior
if post is None:
post = type(self)
self._post = post
def sample(self, size=None):
"""Return one or more samples of the model parameters from prior distribution."""
raise NotImplementedError
def like1(self, x, *args, **kwargs):
"""Return likelihood for single data element. Pr(x | theta). This is conditionally
independent of the hyperparameters psi. If more than one data element is passed, then the
likelihood will be returned for each element."""
raise NotImplementedError
def likelihood(self, X, *args, **kwargs):
# It's quite likely overriding this will yield faster results...
"""Returns Pr(X | theta). Does not broadcast over theta!"""
return np.prod(self.like1(X, *args, **kwargs))
# return np.exp(
# np.sum(np.log(self.like1(X, *args, **kwargs)))
# )
def lnlikelihood(self, X, *args, **kwargs):
"""Returns ln(Pr(X | theta)). Does not broadcast over theta!"""
lh = self.likelihood(X, *args, **kwargs)
if lh == 0:
return np.array(-np.inf)
else:
return np.log(lh)
def __call__(self, *args):
"""Returns Pr(theta | psi), i.e. the prior probability."""
return np.exp(self.lnprior(*args))
def lnprior(self, *args):
"""Returns lnPr(theta | psi), i.e. the prior probability."""
raise NotImplementedError
def _post_params(self, X):
"""Returns new hyperparameters psi' for updating prior->posterior. Can be sent to
constructor to initialize a new object."""
raise NotImplementedError
def create_post(self, X):
"""Returns new Prior object using updated hyperparameters psi, which is the posterior given
the data X."""
return self._post(*self._post_params(X))
def pred(self, x):
"""Prior predictive. Pr(x | params). Integrates out theta."""
raise NotImplementedError
@dataclass
class GaussianMeanKnownVariance(Prior):
"""Model univariate Gaussian with known variance and unknown mean.
This prior is for 1-d data.
Model parameters
----------------
mu : float
mean.
Prior parameters
----------------
mu_0 : float
prior mean.
sigsqr_0 : float
prior variance.
Fixed parameters
----------------
sigsqr : float
Known variance. Treat as a prior parameter to make __init__() with with
_post_params(), post(), post_pred(), etc., though note this never actually gets
updated.
"""
mu_0: float
sigsqr_0: float
sigsqr: float
def __init__(self, mu_0, sigsqr_0, sigsqr):
self.mu_0 = mu_0
self.sigsqr_0 = sigsqr_0
self.sigsqr = sigsqr
self._norm1 = np.sqrt(2 * np.pi * self.sigsqr)
self._norm2 = np.sqrt(2 * np.pi * self.sigsqr_0)
super(GaussianMeanKnownVariance, self).__init__()
assert self.sigsqr_0 > 0
assert self.sigsqr > 0
def sample(self, size=None):
"""Return a sample `mu` or samples [mu1, mu2, ...] from distribution."""
if size is None:
return np.random.normal(self.mu_0, np.sqrt(self.sigsqr_0))
else:
return np.random.normal(
self.mu_0, np.sqrt(
self.sigsqr_0), size=size)
def like1(self, x, mu):
"""Returns likelihood Pr(x | mu), for a single data point.
"""
return np.exp(-0.5 * (x - mu)**2 / self.sigsqr) / self._norm1
def __call__(self, mu):
"""Returns Pr(mu), i.e., the prior."""
return np.exp(-0.5 * (mu - self.mu_0)**2 / self.sigsqr_0) / self._norm2
def lnprior(self, mu):
"""Returns lnPr(mu), i.e. the prior probability."""
return -0.5 * (mu - self.mu_0)**2 / self.sigsqr_0 - np.log(self._norm2)
def _post_params(self, X):
"""Recall X is [NOBS]."""
try:
n = len(X)
except TypeError:
n = 1
Xbar = np.mean(X)
sigsqr_n = 1. / (n / self.sigsqr + 1. / self.sigsqr_0)
mu_n = sigsqr_n * (self.mu_0 / self.sigsqr_0 + n * Xbar / self.sigsqr)
return mu_n, sigsqr_n, self.sigsqr
def pred(self, x):
"""Prior predictive. Pr(x)"""
sigsqr = self.sigsqr + self.sigsqr_0
return np.exp(-0.5 * (x - self.mu_0)**2 / sigsqr) / \
np.sqrt(2 * np.pi * sigsqr)
def evidence(self, X):
"""Fully marginalized likelihood Pr(X)"""
raise NotImplementedError
# FIXME!
# def evidence(self, D):
# """Fully marginalized likelihood Pr(D)"""
# try:
# n = len(D)
# except:
# n = 1
# # import ipdb; ipdb.set_trace()
# D = np.array(D)
# Xbar = np.sum(D)
# num = np.sqrt(self.sigsqr)
# den = (2*np.pi*self.sigsqr)**(n/2.0)*np.sqrt(n*self.sigsqr_0+self.sigsqr)
# exponent = -np.sum(D**2)/(2.0*self.sigsqr) - self.mu_0/(2.0*self.sigsqr_0)
# expnum = self.sigsqr_0*n**2*Xbar**2/self.sigsqr + self.sigsqr*self.mu_0**2/self.sigsqr_0
# expnum += 2.0*n*Xbar*self.mu_0
# expden = 2.0*(n*self.sigsqr_0+self.sigsqr)
# return num/den*np.exp(exponent+expnum/expden)
@dataclass
class InvGamma(Prior):
"""Inverse Gamma distribution. Note this parameterization matches Murphy's, not wikipedia's.
This prior is for 1-d data.
Model parameters
----------------
var : float
variance.
Prior parameters
----------------
alpha : float
prior shape.
alpha must be > 0.
beta : float
prior scale.
beta must be > 0.
Fixed parameters
----------------
mu : float
Known mean. Treat as a prior parameter to make __init__() with with
_post_params(), post(), post_pred(), etc., though note this never actually gets
updated.
"""
alpha: float
beta: float
mu: float
def __init__(self, alpha, beta, mu):
self.alpha = alpha
self.beta = beta
self.mu = mu
super(InvGamma, self).__init__()
assert self.alpha > 0
assert self.beta > 0
def sample(self, size=None):
return 1. / np.random.gamma(self.alpha, scale=self.beta, size=size)
def like1(self, x, var):
"""Returns likelihood Pr(x | var), for a single data point."""
return np.exp(-0.5 * (x - self.mu)**2 / var) / np.sqrt(2 * np.pi * var)
def __call__(self, var):
"""Returns Pr(var), i.e., the prior density."""
# al, be = self.alpha, self.beta
# return be**(-al) / gamma(al) * var**(-1. - al) * \
# np.exp(-1. / (be * var))
return np.exp(self.lnprior(var))
def lnprior(self, var):
"""Returns lnPr(var), i.e. the prior probability."""
al, be = self.alpha, self.beta
return np.log(be) * (-al) - gammaln(al) + np.log(var) * (-1. - al) + (-1. / (be * var))
def _post_params(self, X):
try:
n = len(X)
except TypeError:
n = 1
al_n = self.alpha + n / 2.0
be_n = 1. / (1. / self.beta + 0.5 * np.sum((np.array(X) - self.mu)**2))
return al_n, be_n, self.mu
def pred(self, x):
"""Prior predictive. Pr(x)"""
# Careful. Use 1/beta/alpha to match Murphy, not wikipedia!
return t_density(
2 *
self.alpha,
self.mu,
1. /
self.beta /
self.alpha,
x)
def evidence(self, X):
"""Fully marginalized likelihood Pr(X)"""
raise NotImplementedError
@dataclass
class InvGamma2D(Prior):
"""Inverse Gamma distribution, but for modeling 2D covariance matrices proportional to the
identity matrix.
This prior is for 2-d data.
Model parameters
----------------
var : float
variance.
Prior parameters
----------------
alpha : float
prior shape.
alpha must be > 0.
beta : float
prior scale.
beta must be > 0.
Fixed parameters
----------------
mu : array-like of shape (2, )
Known mean. Treat as a prior parameter to make __init__() with with
_post_params(), post(), post_pred(), etc., though note this never actually gets
updated.
"""
alpha: float
beta: float
mu: np.array
def __init__(self, alpha, beta, mu):
self.alpha = alpha
self.beta = beta
self.mu = np.array(mu)
assert len(mu) == 2
super(InvGamma2D, self).__init__()
assert self.alpha > 0
assert self.beta > 0
def sample(self, size=None):
return 1. / np.random.gamma(self.alpha, scale=self.beta, size=size)
def like1(self, x, var):
"""Returns likelihood Pr(x | var), for a single data point."""
assert isinstance(x, np.ndarray)
assert x.shape[-1] == 2
return np.exp(-0.5 * np.sum((x - self.mu)**2,
axis=-1) / var) / (2 * np.pi * var)
def lnlikelihood(self, X, var):
"""Returns the log likelihood for data X"""
return -0.5 * np.sum((X - self.mu)**2) / var - \
X.shape[0] * np.log(2 * np.pi * var)
def __call__(self, var):
"""Returns Pr(var), i.e., the prior density."""
# al, be = self.alpha, self.beta
# return be**(-al) / gamma(al) * var**(-1. - al) * \
# np.exp(-1. / (be * var))
return np.exp(self.lnprior(var))
def lnprior(self, var):
"""Returns lnPr(var), i.e. the prior probability."""
al, be = self.alpha, self.beta
return -al * np.log(be) - gammaln(al) + (-1. - al) * np.log(var) + (-1. / (be * var))
def _post_params(self, X):
try:
n = len(X)
except TypeError:
n = 1
al_n = self.alpha + n # it's + n/2.0 in InvGamma, but in 2D it's + n.
# Same formula for beta.
be_n = 1. / (1. / self.beta + 0.5 * np.sum((np.array(X) - self.mu)**2))
return al_n, be_n, self.mu
def pred(self, x):
"""Prior predictive. Pr(x)"""
assert isinstance(x, np.ndarray)
assert x.shape[-1] == 2
# Generalized from InvGamma. Tested numerically.
return multivariate_t_density(
2 *
self.alpha,
self.mu,
1. /
self.beta /
self.alpha *
np.eye(2),
x)
def evidence(self, X):
"""Fully marginalized likelihood Pr(X)"""
raise NotImplementedError
@dataclass
class NormInvChi2(Prior):
"""Normal-Inverse-Chi-Square model for univariate Gaussian with params for mean and variance.
This prior is for 2-d data.
Model parameters
----------------
mu : float
mean.
var : float
variance.
Prior parameters
----------------
mu_0 : float
prior mean.
kappa_0 : float
belief in mu_0.
kappa_0 must be > 0.
sigsqr_0 : float
prior variance.
nu_0 : float
belief in sigsqr_0.
nu_0 must be > 0.
"""
mu_0: float
kappa_0: float
sigsqr_0: float
nu_0: float
def __init__(self, mu_0, kappa_0, sigsqr_0, nu_0):
self.mu_0 = float(mu_0)
self.kappa_0 = float(kappa_0)
self.sigsqr_0 = float(sigsqr_0)
self.nu_0 = float(nu_0)
self.model_dtype = np.dtype([('mu', float), ('var', float)])
super(NormInvChi2, self).__init__()
assert self.kappa_0 > 0
assert self.sigsqr_0 > 0
assert self.nu_0 > 0
def sample(self, size=None):
if size is None:
var = 1. / \
np.random.chisquare(df=self.nu_0) * self.nu_0 * self.sigsqr_0
ret = np.zeros(1, dtype=self.model_dtype)
ret['mu'] = np.random.normal(
self.mu_0, np.sqrt(var / self.kappa_0))
ret['var'] = var
return ret[0]
else:
var = 1. / \
np.random.chisquare(df=self.nu_0, size=size) * self.nu_0 * self.sigsqr_0
ret = np.zeros(size, dtype=self.model_dtype)
ret['mu'] = (
np.random.normal(
self.mu_0,
np.sqrt(
1. /
self.kappa_0),
size=size) *
np.sqrt(var))
ret['var'] = var
return ret
def like1(self, *args):
"""Returns likelihood Pr(x | mu, var), for a single data point."""
if len(args) == 3:
x, mu, var = args
elif len(args) == 2:
x, theta = args
mu = theta['mu']
var = theta['var']
return np.exp(-0.5 * (x - mu)**2 / var) / np.sqrt(2 * np.pi * var)
def __call__(self, *args):
"""Returns Pr(mu, var), i.e., the prior density."""
if len(args) == 2:
mu, var = args
elif len(args) == 1:
mu = args[0]['mu']
var = args[0]['var']
return (normal_density(self.mu_0, var / self.kappa_0, mu) *
scaled_IX_density(self.nu_0, self.sigsqr_0, var))
def lnprior(self, *args):
"""Returns lnPr(mu, var), i.e. the prior probability."""
if len(args) == 2:
mu, var = args
elif len(args) == 1:
mu = args[0]['mu']
var = args[0]['var']
return np.log(normal_density(self.mu_0, var / self.kappa_0, mu) *
scaled_IX_density(self.nu_0, self.sigsqr_0, var))
def _post_params(self, X):
try:
n = len(X)
except TypeError:
n = 1
Xbar = np.mean(X)
kappa_n = self.kappa_0 + n
mu_n = (self.kappa_0 * self.mu_0 + n * Xbar) / kappa_n
nu_n = self.nu_0 + n
sigsqr_n = ((self.nu_0 * self.sigsqr_0 + np.sum((X - Xbar)**2) + n *
self.kappa_0 / (self.kappa_0 + n) * (self.mu_0 - Xbar)**2) / nu_n)
return mu_n, kappa_n, sigsqr_n, nu_n
def pred(self, x):
"""Prior predictive. Pr(x)"""
return t_density(
self.nu_0,
self.mu_0,
(1. +
self.kappa_0) *
self.sigsqr_0 /
self.kappa_0,
x)
def evidence(self, X):
"""Fully marginalized likelihood Pr(X)"""
mu_n, kappa_n, sigsqr_n, nu_n = self._post_params(X)
try:
n = len(X)
except BaseException:
n = 1
# return (gamma(nu_n / 2.0) / gamma(self.nu_0 / 2.0) * np.sqrt(self.kappa_0 / kappa_n) *
# (self.nu_0 * self.sigsqr_0)**(self.nu_0 / 2.0) /
# (nu_n * sigsqr_n)**(nu_n / 2.0) /
# np.pi**(n / 2.0))
return np.exp(
gammaln(nu_n / 2.0) - gammaln(self.nu_0 / 2.0) + 0.5 * np.log(self.kappa_0 / kappa_n)
+ np.log(self.nu_0 * self.sigsqr_0) * (self.nu_0 / 2.0)
- np.log(nu_n * sigsqr_n) * (nu_n / 2.0) - np.log(np.pi) * (n / 2.0)
)
def marginal_var(self, var):
"""Return Pr(var)"""
return scaled_IX_density(self.nu_0, self.sigsqr_0, var)
def marginal_mu(self, mu):
return t_density(
self.nu_0,
self.mu_0,
self.sigsqr_0 /
self.kappa_0,
mu)
@ dataclass
class NormInvGamma(Prior):
"""Normal-Inverse-Gamma prior for univariate Gaussian with params for mean and variance.
Model parameters
----------------
mu : float
mean.
var : float
variance.
Prior parameters
----------------
mu_0 : float
prior mean.
V_0 : float
variance scale.
V_0 must be > 0.
a_0 : float
gamma parameters (note these are a/b-like, not alpha/beta-like).
a_0 must be > 0.
b_0 : float
gamma parameters (note these are a/b-like, not alpha/beta-like).
b_0 must be > 0.
"""
m_0: float
V_0: float
a_0: float
b_0: float
def __init__(self, m_0, V_0, a_0, b_0):
self.m_0 = float(m_0)
self.V_0 = float(V_0)
self.a_0 = float(a_0)
self.b_0 = float(b_0)
self.model_dtype = np.dtype([('mu', float), ('var', float)])
super(NormInvGamma, self).__init__()
assert self.V_0 > 0
assert self.a_0 > 0
assert self.b_0 > 0
def sample(self, size=None):
if size is None:
var = 1. / np.random.gamma(self.a_0, scale=1. / self.b_0)
ret = np.zeros(1, dtype=self.model_dtype)
ret['mu'] = np.random.normal(self.m_0, | np.sqrt(self.V_0 * var) | numpy.sqrt |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from numpy.testing import assert_allclose
import astropy.units as u
from astropy.coordinates.angle_utilities import angular_separation
from astropy.coordinates import SkyCoord
from astropy.time import Time
from regions import CircleSkyRegion
from gammapy.data.gti import GTI
from gammapy.datasets.map import MapEvaluator
from gammapy.irf import EDispKernel, PSFKernel
from gammapy.maps import Map, MapAxis, WcsGeom, RegionGeom
from gammapy.modeling import Parameter
from gammapy.modeling.models import (
BackgroundModel,
CompoundSpectralModel,
ConstantSpectralModel,
ConstantTemporalModel,
GaussianSpatialModel,
Models,
PointSpatialModel,
PowerLawNormSpectralModel,
PowerLawSpectralModel,
SkyModel,
SpatialModel,
TemplateSpatialModel,
create_fermi_isotropic_diffuse_model,
)
from gammapy.utils.testing import mpl_plot_check, requires_data, requires_dependency
@pytest.fixture(scope="session")
def sky_model():
spatial_model = GaussianSpatialModel(
lon_0="3 deg", lat_0="4 deg", sigma="3 deg", frame="galactic"
)
spectral_model = PowerLawSpectralModel(
index=2, amplitude="1e-11 cm-2 s-1 TeV-1", reference="1 TeV"
)
temporal_model = ConstantTemporalModel()
return SkyModel(
spatial_model=spatial_model,
spectral_model=spectral_model,
temporal_model=temporal_model,
name="source-1",
)
@pytest.fixture(scope="session")
def gti():
start = [1, 3, 5] * u.day
stop = [2, 3.5, 6] * u.day
t_ref = Time(55555, format="mjd")
gti = GTI.create(start, stop, reference_time=t_ref)
return gti
@pytest.fixture(scope="session")
def diffuse_model():
axis = MapAxis.from_nodes([0.1, 100], name="energy_true", unit="TeV", interp="log")
m = Map.create(
npix=(4, 3), binsz=2, axes=[axis], unit="cm-2 s-1 MeV-1 sr-1", frame="galactic"
)
m.data += 42
spatial_model = TemplateSpatialModel(m, normalize=False)
return SkyModel(PowerLawNormSpectralModel(), spatial_model)
@pytest.fixture(scope="session")
def geom():
axis = MapAxis.from_edges(np.logspace(-1, 1, 3), unit=u.TeV, name="energy")
return WcsGeom.create(skydir=(0, 0), npix=(5, 4), frame="galactic", axes=[axis])
@pytest.fixture(scope="session")
def geom_true():
axis = MapAxis.from_edges(np.logspace(-1, 1, 4), unit=u.TeV, name="energy_true")
return WcsGeom.create(skydir=(0, 0), npix=(5, 4), frame="galactic", axes=[axis])
@pytest.fixture(scope="session")
def exposure(geom_true):
m = Map.from_geom(geom_true)
m.quantity = np.ones(geom_true.data_shape) * u.Quantity("100 m2 s")
m.data[1] *= 10
return m
@pytest.fixture(scope="session")
def background(geom):
m = Map.from_geom(geom)
m.quantity = np.ones(geom.data_shape) * 1e-7
return m
@pytest.fixture(scope="session")
def edisp(geom, geom_true):
e_reco = geom.axes["energy"].edges
e_true = geom_true.axes["energy_true"].edges
return EDispKernel.from_diagonal_response(energy_true=e_true, energy=e_reco)
@pytest.fixture(scope="session")
def psf(geom_true):
sigma = 0.5 * u.deg
return PSFKernel.from_gauss(geom_true, sigma)
@pytest.fixture(scope="session")
def evaluator(sky_model, exposure, psf, edisp, gti):
return MapEvaluator(sky_model, exposure, psf=psf, edisp=edisp, gti=gti)
@pytest.fixture(scope="session")
def diffuse_evaluator(diffuse_model, exposure, psf, edisp):
return MapEvaluator(diffuse_model, exposure, psf=psf, edisp=edisp)
@pytest.fixture(scope="session")
def sky_models(sky_model):
sky_model_2 = sky_model.copy(name="source-2")
sky_model_3 = sky_model.copy(name="source-3")
return Models([sky_model_2, sky_model_3])
@pytest.fixture(scope="session")
def sky_models_2(sky_model):
sky_model_4 = sky_model.copy(name="source-4")
sky_model_5 = sky_model.copy(name="source-5")
return Models([sky_model_4, sky_model_5])
def test_sky_model_init():
with pytest.raises(TypeError):
spatial_model = GaussianSpatialModel()
SkyModel(spectral_model=1234, spatial_model=spatial_model)
with pytest.raises(TypeError):
SkyModel(spectral_model=PowerLawSpectralModel(), spatial_model=1234)
def test_sky_model_spatial_none_io(tmpdir):
pwl = PowerLawSpectralModel()
model = SkyModel(spectral_model=pwl, name="test")
models = Models([model])
filename = tmpdir / "test-models-none.yaml"
models.write(filename)
models = Models.read(filename)
assert models["test"].spatial_model is None
def test_sky_model_spatial_none_evaluate(geom_true, gti):
pwl = PowerLawSpectralModel()
model = SkyModel(spectral_model=pwl, name="test")
data = model.evaluate_geom(geom_true, gti).to_value("cm-2 s-1 TeV-1")
assert data.shape == (3, 1, 1)
assert_allclose(data[0], 1.256774e-11, rtol=1e-6)
def test_skymodel_addition(sky_model, sky_models, sky_models_2, diffuse_model):
models = sky_model + sky_model.copy()
assert isinstance(models, Models)
assert len(models) == 2
models = sky_model + sky_models
assert isinstance(models, Models)
assert len(models) == 3
models = sky_models + sky_model
assert isinstance(models, Models)
assert len(models) == 3
models = sky_models + diffuse_model
assert isinstance(models, Models)
assert len(models) == 3
models = sky_models + sky_models_2
assert isinstance(models, Models)
assert len(models) == 4
models = sky_model + sky_models
assert isinstance(models, Models)
assert len(models) == 3
def test_background_model(background):
bkg1 = BackgroundModel(background)
bkg1.spectral_model.norm.value = 2.0
npred1 = bkg1.evaluate()
assert_allclose(npred1.data[0][0][0], background.data[0][0][0] * 2.0, rtol=1e-3)
assert_allclose(npred1.data.sum(), background.data.sum() * 2.0, rtol=1e-3)
bkg2 = BackgroundModel(background)
bkg2.spectral_model.norm.value = 2.0
bkg2.spectral_model.tilt.value = 0.2
bkg2.spectral_model.reference.quantity = "1000 GeV"
npred2 = bkg2.evaluate()
assert_allclose(npred2.data[0][0][0], 2.254e-07, rtol=1e-3)
assert_allclose(npred2.data.sum(), 7.352e-06, rtol=1e-3)
def test_background_model_io(tmpdir, background):
filename = str(tmpdir / "test-bkg-file.fits")
bkg = BackgroundModel(background, filename=filename)
bkg.spectral_model.norm.value = 2.0
bkg.map.write(filename, overwrite=True)
bkg_dict = bkg.to_dict()
bkg_read = bkg.from_dict(bkg_dict)
assert_allclose(
bkg_read.evaluate().data.sum(), background.data.sum() * 2.0, rtol=1e-3
)
assert bkg_read.filename == filename
class TestSkyModels:
@staticmethod
def test_parameters(sky_models):
parnames = [
"index",
"amplitude",
"reference",
"lon_0",
"lat_0",
"sigma",
"e",
"phi",
] * 2
assert sky_models.parameters.names == parnames
# Check that model parameters are references to the parts
p1 = sky_models.parameters["lon_0"]
p2 = sky_models[0].parameters["lon_0"]
assert p1 is p2
@staticmethod
def test_str(sky_models):
assert "Component 0" in str(sky_models)
assert "Component 1" in str(sky_models)
@staticmethod
def test_get_item(sky_models):
model = sky_models["source-2"]
assert model.name == "source-2"
model = sky_models["source-3"]
assert model.name == "source-3"
with pytest.raises(ValueError):
sky_models["spam"]
@staticmethod
def test_names(sky_models):
assert sky_models.names == ["source-2", "source-3"]
@requires_data()
def test_models_mutation(sky_model, sky_models, sky_models_2):
mods = sky_models
mods.insert(0, sky_model)
assert mods.names == ["source-1", "source-2", "source-3"]
mods.extend(sky_models_2)
assert mods.names == ["source-1", "source-2", "source-3", "source-4", "source-5"]
mod3 = mods[3]
mods.remove(mods[3])
assert mods.names == ["source-1", "source-2", "source-3", "source-5"]
mods.append(mod3)
assert mods.names == ["source-1", "source-2", "source-3", "source-5", "source-4"]
mods.pop(3)
assert mods.names == ["source-1", "source-2", "source-3", "source-4"]
with pytest.raises(ValueError, match="Model names must be unique"):
mods.append(sky_model)
with pytest.raises(ValueError, match="Model names must be unique"):
mods.insert(0, sky_model)
with pytest.raises(ValueError, match="Model names must be unique"):
mods.extend(sky_models_2)
with pytest.raises(ValueError, match="Model names must be unique"):
mods = sky_models + sky_models_2
class TestSkyModel:
@staticmethod
def test_repr(sky_model):
assert "SkyModel" in repr(sky_model)
@staticmethod
def test_str(sky_model):
assert "SkyModel" in str(sky_model)
@staticmethod
def test_parameters(sky_model):
# Check that model parameters are references to the spatial and spectral parts
p1 = sky_model.parameters["lon_0"]
p2 = sky_model.spatial_model.parameters["lon_0"]
assert p1 is p2
p1 = sky_model.parameters["amplitude"]
p2 = sky_model.spectral_model.parameters["amplitude"]
assert p1 is p2
@staticmethod
def test_evaluate_scalar(sky_model):
lon = 3 * u.deg
lat = 4 * u.deg
energy = 1 * u.TeV
q = sky_model.evaluate(lon, lat, energy)
assert q.unit == "cm-2 s-1 TeV-1 sr-1"
assert np.isscalar(q.value)
assert_allclose(q.to_value("cm-2 s-1 TeV-1 deg-2"), 1.76879232e-13)
@staticmethod
def test_evaluate_array(sky_model):
lon = 3 * u.deg * np.ones(shape=(3, 4))
lat = 4 * u.deg * np.ones(shape=(3, 4))
energy = [1, 1, 1, 1, 1] * u.TeV
q = sky_model.evaluate(lon, lat, energy[:, np.newaxis, np.newaxis])
assert q.shape == (5, 3, 4)
assert_allclose(q.to_value("cm-2 s-1 TeV-1 deg-2"), 1.76879232e-13)
@staticmethod
def test_processing(sky_model):
assert sky_model.apply_irf == {"exposure": True, "psf": True, "edisp": True}
out = sky_model.to_dict()
assert "apply_irf" not in out
sky_model.apply_irf["edisp"] = False
out = sky_model.to_dict()
assert out["apply_irf"] == {"exposure": True, "psf": True, "edisp": False}
sky_model.apply_irf["edisp"] = True
class Test_Template_with_cube:
@staticmethod
def test_evaluate_scalar(diffuse_model):
# Check pixel inside map
val = diffuse_model.evaluate(0 * u.deg, 0 * u.deg, 10 * u.TeV)
assert val.unit == "cm-2 s-1 MeV-1 sr-1"
assert val.shape == (1,)
| assert_allclose(val.value, 42) | numpy.testing.assert_allclose |
import numpy as np
import quaternion
from scipy.integrate import odeint
from .enviroment import Enviroment
from .rocket import Rocket
class TrajectorySolver:
def __init__(
self,
rocket:Rocket,
dt=0.05,
max_t=1000.0,
cons_out=True):
self.state = 1
self.apogee_flag = False
self.rocket = rocket
self.dt = dt
self.max_t = max_t
self.cons = cons_out
self.solver_log = {}
self.t = np.r_[
np.arange(0.0,3.,self.dt/10),
np.arange(3., self.max_t, self.dt)
]
def solve(self):
u0 = np.r_[
self.rocket.x,
self.rocket.v,
quaternion.as_float_array(self.rocket.q),
self.rocket.omega
]
self.solver_log = {}
self.solution = odeint(self.__f_main, u0, self.t)
return self.solution
def add_solver_log(self, name:str, **kwargs):
self.solver_log[name] = kwargs
def __f_main(self, u, t):
rocket = self.rocket
env = rocket.enviroment
air = rocket.air
launcher = rocket.launcher
if self.state == 5:
return u*0.
# --------------------------
# extract vectors
# --------------------------
x = u[0:3]
v = u[3:6]
q = quaternion.as_quat_array(u[6:10])
omega = u[10:]
rocket.t = t
rocket.x = x
rocket.v = v
rocket.q = q
rocket.omega = omega
# ----------------------------
# Direction Cosine Matrix for input q
# ----------------------------
# Tbl = transform from local(fixed) coordinate to body coord.
# note: as_rotation_matrix is for vector rotation
# -> for coordinate rotation, input conj(q)
Tbl = quaternion.as_rotation_matrix(np.conj(q))
if self.state == 1 and launcher.is1stlugOff():
if self.cons:
print('------------------')
print('1stlug off at t=', t, '[s]')
print('altitude:', x[2], '[m]')
self.add_solver_log('1stlug_off', t=t, x=x, v=v, q=q, omega=omega)
self.state = 1.1
elif self.state == 1.1 and launcher.is2ndlugOff():
if self.cons:
print('------------------')
print('2ndlug off at t=', t, '[s]')
print('altitude:', x[2], '[m]')
self.add_solver_log('2ndlug_off', t=t, x=x, v=v, q=q, omega=omega)
self.state = 2
elif self.state <= 2 and t >= rocket.engine.thrust_cutoff_time:
if self.cons:
print('------------------')
print('MECO at t=', t, '[s]')
print('altitude:', x[2], '[m]')
self.add_solver_log('MECO', t=t, x=x, v=v, q=q, omega=omega)
self.state = 3
elif self.state == 3:
if rocket.hasDroguechute():
if rocket.isDroguechuteDeployed():
if self.cons:
print('------------------')
print('drogue chute deployed at t=', t, '[s]')
print('altitude:', x[2], '[m]')
self.add_solver_log('drogue', t=t, x=x, v=v, q=q, omega=omega)
self.state = 3.5 # ドローグ展開
else:
if rocket.isParachuteDeployed():
if self.cons:
print('------------------')
print('main parachute deployed at t=', t, '[s]')
print('altitude:', x[2], '[m]')
self.add_solver_log('para', t=t, x=x, v=v, q=q, omega=omega)
self.state = 4
elif self.state == 3.5 and rocket.isParachuteDeployed():
if self.cons:
print('------------------')
print('main parachute deployed at t=', t, '[s]')
print('altitude:', x[2], '[m]')
self.add_solver_log('para', t=t, x=x, v=v, q=q, omega=omega)
self.state = 4
elif self.state > 1 and self.state < 5 and x[2] < 0.0 and t > rocket.engine.thrust_startup_time:
if self.cons:
print('------------------')
print('landing at t=', t, '[s]')
print('x:', x)
self.add_solver_log('landing', t=t, x=x, v=v, q=q, omega=omega)
self.state = 5
return u*0
# dx_dt:地球座標系での地球から見たロケットの速度
# v:機体座標系なので地球座標系に変換
dx_dt = np.dot(Tbl.T, v)
if self.apogee_flag is False and dx_dt[2] < 0.0:
if self.cons:
print('------------------')
print('apogee at t=', t, '[s]')
print('altitude:', x[2], '[m]')
rocket.t_apogee = t
self.add_solver_log('apogee', t=t, x=x, v=v, q=q, omega=omega)
self.apogee_flag = True
# 重量・重心・慣性モーメント計算
mass = rocket.getMass(t)
CG = rocket.getCG(t)
MOI = rocket.getMOI(t)
# 慣性モーメントの微分
# 現在は次の推力サンプル点でのモーメントとの平均変化率で近似している
# (モーメントの変化は推力サンプル間隔)
dt = 1.0e-3
MOI_next = rocket.getMOI(t + dt)
dMOI_dt = (MOI_next - MOI)/dt
# v_air: 機体座標系での相対風ベクトル
v_air = -v + np.dot(Tbl, air.wind(x[2]))
v_air_norm = np.linalg.norm(v_air)
if v_air_norm == 0:
alpha = 0.
else:
# v_air[0]: 地球から見た機体座標系での機軸方向速度
alpha = np.arccos(np.abs(v_air[0])/v_air_norm)
# ロール方向の風向
phi = np.arctan2(-v_air[1], -v_air[2])
_, _, rho, sound_speed = air.standard_air(x[2])
mach = v_air_norm / sound_speed
#Cd = air.getCd(mach, alpha)
#Cl = air.getCl(mach, alpha)
#CP = air.getCP(mach, alpha)
Cd = rocket.getCd(mach, alpha)
Cl = rocket.getCl(mach, alpha)
CP = rocket.getCP(mach, alpha)
cosa = np.cos(alpha)
sina = np.sin(alpha)
air_coeff = np.array(
[(-Cl*sina + Cd*cosa),
(Cl*cosa + Cd*sina)*np.sin(phi),
(Cl*cosa + Cd*sina)*np.cos(phi)]
)
rocket_xarea = (rocket.diameter/2)**2 * np.pi
air_force = 0.5 * rho * v_air_norm**2.0 * rocket_xarea * (-1 * air_coeff)
air_moment_CG = np.cross(np.array([CG - CP, 0.0, 0.0]), air_force)
l = np.array([rocket.diameter, rocket.height, rocket.height])
air_moment_damping = 0.25 * rho * v_air_norm * rocket.Cm * (l**2) * rocket_xarea * omega
air_moment = air_moment_CG + air_moment_damping
# 重力加速度
g = env.g(x[2])
#print('F_coriolis', env.Coriolis(v, Tbl))
if self.state <= 1.1:
# state <= 1.1: ラグがランチャーに拘束されている時
# 運動方向は機体x方向(機軸方向)のみ
# 並進力のうち機体座標系で表現されているもの
# TODO: 振動friction()関数の実装
thrust_vec = np.array([rocket.engine.thrust(t), 0.0, 0.0])
F_body = air_force + thrust_vec
# 合計加速度
dv_dt = - | np.cross(omega, v) | numpy.cross |
"""Abstract baseclass for all distributions."""
import logging
import numpy
import chaospy
from .utils import check_dependencies
class Distribution(object):
"""Baseclass for all probability distributions."""
__array_priority__ = 9000
"""Numpy override variable."""
interpret_as_integer = False
"""
Flag indicating that return value from the methods sample, and inv
should be interpreted as integers instead of floating point.
"""
@property
def stochastic_dependent(self):
"""True if distribution contains stochastically dependent components."""
return any(len(deps) > 1 for deps in self._dependencies)
def __init__(
self,
parameters,
dependencies,
rotation=None,
exclusion=None,
repr_args=None,
):
"""
Distribution initializer.
In addition to assigning some object variables, also checks for
some consistency issues.
Args:
parameters (Optional[Distribution[str, Union[ndarray, Distribution]]]):
Collection of model parameters.
dependencies (Optional[Sequence[Set[int]]]):
Dependency identifiers. One collection for each dimension.
rotation (Optional[Sequence[int]]):
The order of which to resolve dependencies.
exclusion (Optional[Sequence[int]]):
Distributions that has been "taken out of play" and
therefore can not be reused other places in the
dependency hierarchy.
repr_args (Optional[Sequence[str]]):
Positional arguments to place in the object string
representation. The repr output will then be:
`<class name>(<arg1>, <arg2>, ...)`.
Raises:
StochasticallyDependentError:
For dependency structures that can not later be
rectified. This include under-defined
distributions, and inclusion of distributions that
should be exclusion.
"""
assert isinstance(parameters, dict)
self._parameters = parameters
self._dependencies = list(dependencies)
if rotation is None:
rotation = sorted(enumerate(self._dependencies), key=lambda x: len(x[1]))
rotation = [key for key, _ in rotation]
rotation = list(rotation)
assert len(set(rotation)) == len(dependencies)
assert min(rotation) == 0
assert max(rotation) == len(dependencies)-1
self._rotation = rotation
if exclusion is None:
exclusion = set()
self._exclusion = set(exclusion)
if repr_args is None:
repr_args = ("{}={}".format(key, self._parameters[key])
for key in sorted(self._parameters))
self._repr_args = list(repr_args)
self._mom_cache = {(0,)*len(dependencies): 1.}
self._ttr_cache = {}
self._indices = {}
self._all_dependencies = {dep for deps in self._dependencies for dep in deps}
if len(self._all_dependencies) < len(dependencies):
raise chaospy.StochasticallyDependentError(
"%s is an under-defined probability distribution." % self)
for key, param in list(parameters.items()):
if isinstance(param, Distribution):
if self._all_dependencies.intersection(param._exclusion):
raise chaospy.StochasticallyDependentError((
"%s contains dependencies that can not also exist "
"other places in the dependency hierarchy") % param)
self._exclusion.update(param._exclusion)
else:
self._parameters[key] = numpy.asarray(param)
def get_parameters(self, idx, cache, assert_numerical=True):
"""Get distribution parameters."""
del assert_numerical
out = self._parameters.copy()
assert isinstance(cache, dict)
if idx is not None:
assert not isinstance(idx, dict), idx
assert idx == int(idx), idx
assert "idx" not in out
assert "cache" not in out
out["cache"] = cache
out["idx"] = idx
return out
@property
def lower(self):
"""Lower bound for the distribution."""
cache = {}
out = numpy.zeros(len(self))
for idx in self._rotation:
out[idx] = self._get_lower(idx, cache=cache)
return out
def _get_lower(self, idx, cache):
"""In-processes function for getting lower bounds."""
if (idx, self) in cache:
return cache[idx, self][0]
if hasattr(self, "get_lower_parameters"):
parameters = self.get_lower_parameters(idx, cache)
else:
parameters = self.get_parameters(idx, cache, assert_numerical=False)
out = self._lower(**parameters)
assert not isinstance(out, Distribution), (self, out)
out = numpy.atleast_1d(out)
assert out.ndim == 1, (self, out, cache)
cache[idx, self] = (out, None)
return out
def _lower(self, **kwargs): # pragma: no cover
"""Backend lower bound."""
raise chaospy.UnsupportedFeature("lower not supported")
@property
def upper(self):
"""Upper bound for the distribution."""
cache = {}
out = numpy.zeros(len(self))
for idx in self._rotation:
out[idx] = self._get_upper(idx, cache=cache)
return out
def _get_upper(self, idx, cache):
"""In-processes function for getting upper bounds."""
if (idx, self) in cache:
return cache[idx, self][0]
if hasattr(self, "get_upper_parameters"):
parameters = self.get_upper_parameters(idx, cache)
else:
parameters = self.get_parameters(idx, cache, assert_numerical=False)
out = self._upper(**parameters)
assert not isinstance(out, Distribution), (self, out)
out = numpy.atleast_1d(out)
assert out.ndim == 1, (self, out, cache)
cache[idx, self] = (out, None)
size = max([elem[0].size for elem in cache.values()])
assert all([elem[0].size in (1, size) for elem in cache.values()])
return out
def _upper(self, **kwargs): # pragma: no cover
"""Backend upper bound."""
raise chaospy.UnsupportedFeature("lower not supported")
def fwd(self, x_data):
"""
Forward Rosenblatt transformation.
Args:
x_data (numpy.ndarray):
Location for the distribution function. ``x_data.shape`` must
be compatible with distribution shape.
Returns:
(numpy.ndarray):
Evaluated distribution function values, where
``out.shape==x_data.shape``.
"""
logger = logging.getLogger(__name__)
check_dependencies(self)
x_data = numpy.asfarray(x_data)
shape = x_data.shape
x_data = x_data.reshape(len(self), -1)
cache = {}
q_data = numpy.zeros(x_data.shape)
for idx in self._rotation:
q_data[idx] = self._get_fwd(x_data[idx], idx, cache)
indices = (q_data > 1) | (q_data < 0)
if numpy.any(indices): # pragma: no cover
logger.debug("%s.fwd: %d/%d outputs out of bounds",
self, numpy.sum(indices), len(indices))
q_data = numpy.clip(q_data, a_min=0, a_max=1)
q_data = q_data.reshape(shape)
return q_data
def _get_fwd(self, x_data, idx, cache):
"""In-process function for getting cdf-values."""
logger = logging.getLogger(__name__)
assert (idx, self) not in cache, "repeated evaluation"
lower = numpy.broadcast_to(self._get_lower(idx, cache=cache.copy()), x_data.shape)
upper = numpy.broadcast_to(self._get_upper(idx, cache=cache.copy()), x_data.shape)
parameters = self.get_parameters(idx, cache, assert_numerical=True)
ret_val = self._cdf(x_data, **parameters)
assert not isinstance(ret_val, Distribution), (self, ret_val)
out = numpy.zeros(x_data.shape)
out[:] = ret_val
indices = x_data < lower
if numpy.any(indices):
logger.debug("%s.fwd: %d/%d inputs below bounds",
self, numpy.sum(indices), len(indices))
out = numpy.where(indices, 0, out)
indices = x_data > upper
if numpy.any(indices):
logger.debug("%s.fwd: %d/%d inputs above bounds",
self, numpy.sum(indices), len(indices))
out = numpy.where(indices, 1, out)
assert numpy.all((out >= 0) | (out <= 1))
cache[idx, self] = (x_data, out)
assert out.ndim == 1, (self, out, cache)
return out
def cdf(self, x_data):
"""
Cumulative distribution function.
Note that chaospy only supports cumulative distribution functions for
stochastically independent distributions.
Args:
x_data (numpy.ndarray):
Location for the distribution function. Assumes that
``len(x_data) == len(distribution)``.
Returns:
(numpy.ndarray):
Evaluated distribution function values, where output has shape
``x_data.shape`` in one dimension and ``x_data.shape[1:]`` in
higher dimensions.
"""
check_dependencies(self)
if self.stochastic_dependent:
raise chaospy.StochasticallyDependentError(
"Cumulative distribution does not support dependencies.")
x_data = numpy.asarray(x_data)
if self.interpret_as_integer:
x_data = x_data+0.5
q_data = self.fwd(x_data)
if len(self) > 1:
q_data = numpy.prod(q_data, 0)
return q_data
def inv(self, q_data, max_iterations=100, tollerance=1e-5):
"""
Inverse Rosenblatt transformation.
If possible the transformation is done analytically. If not possible,
transformation is approximated using an algorithm that alternates
between Newton-Raphson and binary search.
Args:
q_data (numpy.ndarray):
Probabilities to be inverse. If any values are outside ``[0,
1]``, error will be raised. ``q_data.shape`` must be compatible
with distribution shape.
max_iterations (int):
If approximation is used, this sets the maximum number of
allowed iterations in the Newton-Raphson algorithm.
tollerance (float):
If approximation is used, this set the error tolerance level
required to define a sample as converged.
Returns:
(numpy.ndarray):
Inverted probability values where
``out.shape == q_data.shape``.
"""
logger = logging.getLogger(__name__)
check_dependencies(self)
q_data = numpy.asfarray(q_data)
assert numpy.all((q_data >= 0) & (q_data <= 1)), "sanitize your inputs!"
shape = q_data.shape
q_data = q_data.reshape(len(self), -1)
cache = {}
x_data = numpy.zeros(q_data.shape)
for idx in self._rotation:
x_data[idx] = self._get_inv(q_data[idx], idx, cache)
x_data = x_data.reshape(shape)
return x_data
def _get_inv(self, q_data, idx, cache):
"""In-process function for getting ppf-values."""
logger = logging.getLogger(__name__)
assert numpy.all(q_data <= 1) and numpy.all(q_data >= 0)
assert q_data.ndim == 1
if (idx, self) in cache:
return cache[idx, self][0]
lower = numpy.broadcast_to(self._get_lower(idx, cache=cache.copy()), q_data.shape)
upper = numpy.broadcast_to(self._get_upper(idx, cache=cache.copy()), q_data.shape)
try:
parameters = self.get_parameters(idx, cache, assert_numerical=True)
ret_val = self._ppf(q_data, **parameters)
except chaospy.UnsupportedFeature:
ret_val = chaospy.approximate_inverse(
self, idx, q_data, cache=cache)
assert not isinstance(ret_val, Distribution), (self, ret_val)
out = | numpy.zeros(q_data.shape) | numpy.zeros |
from .. import ParallelSampler, sample_ellipsoid
import numpy as np
import sys
def log_probability_function(p):
r = emcee_pipeline.run_results(p)
return r.post, (r.prior, r.extra)
class EmceeSampler(ParallelSampler):
parallel_output = False
supports_resume = True
sampler_outputs = [("prior", float), ("post", float)]
def config(self):
global emcee_pipeline
emcee_pipeline = self.pipeline
if self.is_master():
import emcee
self.emcee = emcee
self.emcee_version = int(self.emcee.__version__[0])
# Parameters of the emcee sampler
self.nwalkers = self.read_ini("walkers", int, 2)
self.samples = self.read_ini("samples", int, 1000)
self.nsteps = self.read_ini("nsteps", int, 100)
assert self.nsteps>0, "You specified nsteps<=0 in the ini file - please set a positive integer"
assert self.samples>0, "You specified samples<=0 in the ini file - please set a positive integer"
random_start = self.read_ini("random_start", bool, False)
start_file = self.read_ini("start_points", str, "")
covmat_file = self.read_ini("covmat", str, "")
self.ndim = len(self.pipeline.varied_params)
#Starting positions and values for the chain
self.num_samples = 0
self.prob0 = None
self.blob0 = None
if start_file:
self.p0 = self.load_start(start_file)
self.output.log_info("Loaded starting position from %s", start_file)
elif self.distribution_hints.has_cov():
center = self.start_estimate()
cov = self.distribution_hints.get_cov()
self.p0 = sample_ellipsoid(center, cov, size=self.nwalkers)
self.output.log_info("Generating starting positions from covmat from earlier in pipeline")
elif covmat_file:
center = self.start_estimate()
cov = self.load_covmat(covmat_file)
self.output.log_info("Generating starting position from covmat in %s", covmat_file)
iterations_limit = 100000
n=0
p0 = []
for i in range(iterations_limit):
p = self.emcee.utils.sample_ellipsoid(center, cov)[0]
if np.isfinite(self.pipeline.prior(p)):
p0.append(p)
if len(p0)==self.nwalkers:
break
else:
raise ValueError("The covmat you used could not generate points inside the prior")
self.p0 = np.array(p0)
elif random_start:
self.p0 = [self.pipeline.randomized_start()
for i in range(self.nwalkers)]
self.output.log_info("Generating random starting positions from within prior")
else:
center_norm = self.pipeline.normalize_vector(self.start_estimate())
sigma_norm=np.repeat(1e-3, center_norm.size)
p0_norm = self.emcee.utils.sample_ball(center_norm, sigma_norm, size=self.nwalkers)
p0_norm[p0_norm<=0] = 0.001
p0_norm[p0_norm>=1] = 0.999
self.p0 = [self.pipeline.denormalize_vector(p0_norm_i) for p0_norm_i in p0_norm]
self.output.log_info("Generating starting positions in small ball around starting point")
#Finally we can create the sampler
self.ensemble = self.emcee.EnsembleSampler(self.nwalkers, self.ndim,
log_probability_function,
pool=self.pool)
def resume(self):
if self.output.resumed:
data = np.genfromtxt(self.output._filename, invalid_raise=False)[:, :self.ndim]
num_samples = len(data) // self.nwalkers
self.p0 = data[-self.nwalkers:]
self.num_samples += num_samples
if self.num_samples >= self.samples:
print("You told me to resume the chain - it has already completed (with {} samples), so sampling will end.".format(len(data)))
print("Increase the 'samples' parameter to keep going.")
else:
print("Continuing emcee from existing chain - have {} samples already".format(len(data)))
def load_start(self, filename):
#Load the data and cut to the bits we need.
#This means you can either just use a test file with
#starting points, or an emcee output file.
data = np.genfromtxt(filename, invalid_raise=False)[-self.nwalkers:, :self.ndim]
if data.shape != (self.nwalkers, self.ndim):
raise RuntimeError("There are not enough lines or columns "
"in the starting point file %s" % filename)
return list(data)
def load_covmat(self, covmat_file):
covmat = | np.loadtxt(covmat_file) | numpy.loadtxt |
# -*- coding: utf-8 -*-
"""
mslib.mswms.mpl_hsec_styles
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Matplotlib horizontal section styles.
In this module, the visualisation styles of the horizontal map products
that can be provided through the WMS are defined. The styles are classes
that are derived from MPLBasemapHorizontalSectionStyle (defined in
mpl_hsec.py). If you want to define a new product, copy an existing
implementation and modify it according to your needs.
A few notes:
1) The idea: Each product defines the data fields it requires as NetCDF-CF
compliant standard names in the variable 'required_datafields' (a list
of tuples (leveltype, variablename), where leveltype can be ml (model levels),
pl (pressure levels), or whatever you data source may provide. The data
driver invoked by the WSGI module is responsible for loading the data.
The superclass MPLBasemapHorizontalSectionStyle sets up the plot and
draws the map. What is left to do for the product class is to implement
specific post-processing actions on the data, and to do the visualisation
on the map.
2) If your product requires some sort of post-processing (e.g. the derivation
of potential temperature or any other parameter, place it in the
_prepare_datafields() method.
3) All visualisation commands go to the _plot_style() method. In this
method, you can assume that the data fields you have requested are available
as 2D arrays in the 'self.data' field.
4) All defined products MUST define a name (the WMS layer name) and a title.
5) If you want to provide different styles according to the WMS standard,
define the names of the styles in the 'styles' variable and check in
_plot_style() for the 'self.style' variable to know which style to deliver.
6) Your products should consider the 'self.noframe' variable to place a
legend and a title. If this variable is True (default WMS behaviour), plotting
anything outside the map axis will lead to erroneous plots. Look at the
provided styles to get a feeling of how title and legends can be best placed.
This file is part of mss.
:copyright: Copyright 2008-2014 Deutsches Zentrum fuer Luft- und Raumfahrt e.V.
:copyright: Copyright 2011-2014 <NAME> (mr)
:copyright: Copyright 2016-2021 by the mss team, see AUTHORS.
:license: APACHE-2.0, see LICENSE for details.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import warnings
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.axes_grid1.inset_locator
import matplotlib.colors
import mpl_toolkits.basemap
from matplotlib import patheffects
from mslib.mswms.mpl_hsec import MPLBasemapHorizontalSectionStyle
from mslib.mswms.utils import Targets, get_style_parameters, get_cbar_label_format, make_cbar_labels_readable
from mslib.utils import thermolib
from mslib.utils.units import convert_to
class HS_CloudsStyle_01(MPLBasemapHorizontalSectionStyle):
"""
Surface Field: CLOUDS
"""
name = "TCC"
title = "Cloud Cover (0-1)"
styles = [
("default", "Total Cloud Cover"),
("TOT", "Total Cloud Cover"),
("LOW", "Low Cloud Cover"),
("MED", "Medium Cloud Cover"),
("HIGH", "High Cloud Cover")]
# Variables with the highest number of dimensions first (otherwise
# MFDatasetCommonDims will throw an exception)!
required_datafields = [
('sfc', 'low_cloud_area_fraction', 'dimensionless'),
('sfc', 'medium_cloud_area_fraction', 'dimensionless'),
('sfc', 'high_cloud_area_fraction', 'dimensionless'),
('sfc', 'air_pressure_at_sea_level', 'hPa')]
def _plot_style(self):
"""
"""
bm = self.bm
ax = self.bm.ax
data = self.data
if self.style.lower() == "default":
self.style = "TOT"
if self.style in ["LOW", "TOT"]:
lcc = bm.contourf(self.lonmesh, self.latmesh, data['low_cloud_area_fraction'],
np.arange(0.2, 1.1, 0.1), cmap=plt.cm.autumn_r)
self.add_colorbar(lcc, "Cloud cover fraction in grid box (0-1)")
if self.style in ["MED", "TOT"]:
mcc = bm.contourf(self.lonmesh, self.latmesh, data['medium_cloud_area_fraction'],
np.arange(0.2, 1.1, 0.1), cmap=plt.cm.summer_r)
self.add_colorbar(mcc, width="2%" if self.style == "TOT" else "3%",
cb_format='' if self.style == "TOT" else "%.1f")
if self.style in ["HIGH", "TOT"]:
hcc = bm.contourf(self.lonmesh, self.latmesh, data['high_cloud_area_fraction'],
np.arange(0.2, 1.1, 0.1), cmap=plt.cm.Blues)
bm.contour(self.lonmesh, self.latmesh, data['high_cloud_area_fraction'],
[0.2], colors="blue", linestyles="dotted")
self.add_colorbar(hcc, width="1%" if self.style == "TOT" else "3%",
cb_format='' if self.style == "TOT" else "%.1f")
# Colors in python2.6/site-packages/matplotlib/colors.py
cs = bm.contour(self.lonmesh, self.latmesh, data['air_pressure_at_sea_level'],
np.arange(950, 1050, 4), colors="burlywood", linewidths=2)
ax.clabel(cs, fontsize=8, fmt='%.0f')
titlestring = "Total cloud cover (high, medium, low) (0-1)"
if self.style == "LOW":
titlestring = "Low cloud cover (0-1)"
elif self.style == "MED":
titlestring = "Medium cloud cover (0-1)"
elif self.style == "HIGH":
titlestring = "High cloud cover (0-1)"
titlestring += f'\nValid: {self.valid_time.strftime("%a %Y-%m-%d %H:%M UTC")}'
if self.uses_inittime_dimension():
time_step = self.valid_time - self.init_time
time_step_hrs = (time_step.days * 86400 + time_step.seconds) // 3600
titlestring += f' (step {time_step_hrs:d} hrs from {self.init_time.strftime("%a %Y-%m-%d %H:%M UTC")})'
if not self.noframe:
ax.set_title(titlestring,
horizontalalignment='left', x=0, fontsize=14)
else:
ax.text(bm.llcrnrx, bm.llcrnry, titlestring,
fontsize=10, bbox=dict(facecolor='white', alpha=0.6))
class HS_MSLPStyle_01(MPLBasemapHorizontalSectionStyle):
"""
Surface Field: Mean Sea Level Pressure
"""
name = "MSLP"
title = "Mean Sea Level Pressure (hPa)"
# Variables with the highest number of dimensions first (otherwise
# MFDatasetCommonDims will throw an exception)!
required_datafields = [
("sfc", "air_pressure_at_sea_level", "hPa"),
("sfc", "surface_eastward_wind", "knots"),
("sfc", "surface_northward_wind", "knots")]
def _plot_style(self):
bm = self.bm
ax = self.bm.ax
data = self.data
thick_contours = np.arange(952, 1050, 8)
thin_contours = [c for c in np.arange(952, 1050, 2)
if c not in thick_contours]
mslp = data['air_pressure_at_sea_level']
# Colors in python2.6/site-packages/matplotlib/colors.py
cs = bm.contour(self.lonmesh, self.latmesh, mslp,
thick_contours, colors="darkblue", linewidths=2)
ax.clabel(cs, fontsize=12, fmt='%.0f')
cs = bm.contour(self.lonmesh, self.latmesh, mslp,
thin_contours, colors="darkblue", linewidths=1)
# Convert wind data from m/s to knots.
u = data['surface_eastward_wind']
v = data['surface_northward_wind']
# Transform wind vector field to fit map.
lons2 = ((self.lons + 180) % 360) - 180
lons2_ind = lons2.argsort()
udat, vdat, xv, yv = bm.transform_vector(u[:, lons2_ind], v[:, lons2_ind],
lons2[lons2_ind], self.lats,
16, 16, returnxy=True, masked=True)
# Plot wind barbs.
bm.barbs(xv, yv, udat, vdat,
barbcolor='firebrick', flagcolor='firebrick', pivot='middle',
linewidths=1)
# Find local minima and maxima.
# min_indices, min_values = local_minima(mslp.ravel(), window=50)
# #min_indices, min_values = local_minima(mslp, window=(50,50))
# minfits = minimum_filter(mslp, size=(50,50), mode="wrap")
# logging.debug("%s", minfits)
# #logging.debug("%s // %s // %s", min_values, lonmesh_.ravel()[min_indices],
# # self.latmesh_.ravel()[min_indices])
# bm.scatter(lonmesh.ravel()[min_indices], self.latmesh.ravel()[min_indices],
# s=20, c='blue', marker='s')
titlestring = "Mean sea level pressure (hPa) and surface wind"
titlestring += f'\nValid: {self.valid_time.strftime("%a %Y-%m-%d %H:%M UTC")}'
if self.uses_inittime_dimension():
time_step = self.valid_time - self.init_time
time_step_hrs = (time_step.days * 86400 + time_step.seconds) // 3600
titlestring += f' (step {time_step_hrs:d} hrs from {self.init_time.strftime("%a %Y-%m-%d %H:%M UTC")})'
if not self.noframe:
ax.set_title(titlestring,
horizontalalignment='left', x=0, fontsize=14)
else:
ax.text(bm.llcrnrx, bm.llcrnry, titlestring,
fontsize=10, bbox=dict(facecolor='white', alpha=0.6))
class HS_SEAStyle_01(MPLBasemapHorizontalSectionStyle):
"""
Surface Field: Solar Elevation Angle
"""
name = "SEA"
title = "Solar Elevation Angle (degrees)"
# Variables with the highest number of dimensions first (otherwise
# MFDatasetCommonDims will throw an exception)!
required_datafields = [
("sfc", "solar_elevation_angle", "degree")]
def _plot_style(self):
"""
"""
bm = self.bm
ax = self.bm.ax
data = self.data
thick_contours = np.arange(-10, 95, 5)
thin_contours = [c for c in np.arange(0, 90, 1)
if c not in thick_contours]
neg_thin_contours = [c for c in np.arange(-10, 0, 1)
if c not in thick_contours]
sea = data['solar_elevation_angle']
# Filled contour plot.
scs = bm.contourf(self.lonmesh, self.latmesh, sea,
np.arange(0, 91, 1), cmap=plt.cm.nipy_spectral)
self.add_colorbar(scs, label="Solar Elevation Angle (degrees)")
# Contour lines plot.
# Colors in python2.6/site-packages/matplotlib/colors.py
bm.contour(self.lonmesh, self.latmesh, sea,
thick_contours, colors="saddlebrown",
linewidths=3, linestyles="solid")
cs2 = bm.contour(self.lonmesh, self.latmesh, sea,
thin_contours, colors="white", linewidths=1)
cs2.clabel(cs2.levels, fontsize=14, fmt='%.0f')
cs3 = bm.contour(self.lonmesh, self.latmesh, sea,
neg_thin_contours, colors="saddlebrown",
linewidths=1, linestyles="solid")
cs3.clabel(fontsize=14, fmt='%.0f')
# Plot title.
titlestring = "Solar Elevation Angle "
titlestring += f"\nValid: {self.valid_time.strftime('%a %Y-%m-%d %H:%M UTC')}"
if not self.noframe:
ax.set_title(titlestring,
horizontalalignment='left', x=0, fontsize=14)
else:
ax.text(bm.llcrnrx, bm.llcrnry, titlestring,
fontsize=10, bbox=dict(facecolor='white', alpha=0.6))
class HS_SeaIceStyle_01(MPLBasemapHorizontalSectionStyle):
"""
Surface Field: Sea Ice Cover
"""
name = "CI"
title = "Sea Ice Cover Fraction (0-1)"
styles = [
("default", "pseudocolor plot"),
("PCOL", "pseudocolor plot"),
("CONT", "contour plot")]
# Variables with the highest number of dimensions first (otherwise
# MFDatasetCommonDims will throw an exception)!
required_datafields = [
("sfc", "sea_ice_area_fraction", 'dimensionless')]
def _plot_style(self):
"""
"""
bm = self.bm
ax = self.bm.ax
data = self.data
ice = data['sea_ice_area_fraction']
if self.style.lower() == "default":
self.style = "PCOL"
# Filled contour plot.
if self.style == "PCOL":
scs = bm.pcolormesh(self.lonmesh, self.latmesh, ice,
cmap=plt.cm.Blues,
norm=matplotlib.colors.Normalize(vmin=0.1, vmax=1.0),
shading="nearest", edgecolors='none')
else:
scs = bm.contourf(self.lonmesh, self.latmesh, ice,
np.arange(0.1, 1.1, .1), cmap=plt.cm.Blues)
self.add_colorbar(scs, label="Sea Ice Cover Fraction (0-1)")
# Plot title.
titlestring = "Sea Ice Cover"
titlestring += f'\nValid: {self.valid_time.strftime("%a %Y-%m-%d %H:%M UTC")}'
if self.uses_inittime_dimension():
time_step = self.valid_time - self.init_time
time_step_hrs = (time_step.days * 86400 + time_step.seconds) // 3600
titlestring += f' (step {time_step_hrs:d} hrs from {self.init_time.strftime("%a %Y-%m-%d %H:%M UTC")})'
if not self.noframe:
ax.set_title(titlestring,
horizontalalignment='left', x=0, fontsize=14)
else:
ax.text(bm.llcrnrx, bm.llcrnry, titlestring,
fontsize=10, bbox=dict(facecolor='white', alpha=0.6))
class HS_TemperatureStyle_ML_01(MPLBasemapHorizontalSectionStyle):
"""
Upper Air Field: Temperature
"""
name = "MLTemp01"
title = "Temperature (Model Level) (degC)"
# Variables with the highest number of dimensions first (otherwise
# MFDatasetCommonDims will throw an exception)!
required_datafields = [
("ml", "air_temperature", "degC")]
def _plot_style(self):
"""
"""
bm = self.bm
ax = self.bm.ax
data = self.data
cmin = -72
cmax = 42
thick_contours = np.arange(cmin, cmax, 6)
thin_contours = [c for c in np.arange(cmin, cmax, 2)
if c not in thick_contours]
tempC = data['air_temperature']
tc = bm.contourf(self.lonmesh, self.latmesh, tempC,
np.arange(cmin, cmax, 2), cmap=plt.cm.nipy_spectral)
self.add_colorbar(tc, "Temperature (degC)")
# Colors in python2.6/site-packages/matplotlib/colors.py
cs = bm.contour(self.lonmesh, self.latmesh, tempC,
[0], colors="red", linewidths=4)
cs = bm.contour(self.lonmesh, self.latmesh, tempC,
thick_contours, colors="saddlebrown", linewidths=2)
ax.clabel(cs, fontsize=14, fmt='%.0f')
cs = bm.contour(self.lonmesh, self.latmesh, tempC,
thin_contours, colors="saddlebrown", linewidths=1)
titlestring = f"Temperature (degC) at model level {self.level}"
titlestring += f'\nValid: {self.valid_time.strftime("%a %Y-%m-%d %H:%M UTC")}'
if self.uses_inittime_dimension():
time_step = self.valid_time - self.init_time
time_step_hrs = (time_step.days * 86400 + time_step.seconds) // 3600
titlestring += f' (step {time_step_hrs:d} hrs from {self.init_time.strftime("%a %Y-%m-%d %H:%M UTC")})'
if not self.noframe:
ax.set_title(titlestring,
horizontalalignment='left', x=0, fontsize=14)
else:
ax.text(bm.llcrnrx, bm.llcrnry, titlestring,
fontsize=10, bbox=dict(facecolor='white', alpha=0.6))
class HS_GenericStyle(MPLBasemapHorizontalSectionStyle):
"""
Pressure level version for Chemical Mixing ratios.
"""
styles = [
("auto", "auto colour scale"),
("autolog", "auto logcolour scale"), ]
def _plot_style(self):
bm = self.bm
ax = self.bm.ax
show_data = np.ma.masked_invalid(self.data[self.dataname])
# get cmin, cmax, cbar_log and cbar_format for level_key
cmin, cmax = Targets.get_range(self.dataname, self.level, self.name[-2:])
cmin, cmax, clevs, cmap, norm, ticks = get_style_parameters(
self.dataname, self.style, cmin, cmax, show_data)
tc = bm.contourf(self.lonmesh, self.latmesh, show_data, levels=clevs, cmap=cmap, extend="both", norm=norm)
for cont_data, cont_levels, cont_colour, cont_label_colour, cont_style, cont_lw, pe in self.contours:
cs_pv = ax.contour(self.lonmesh, self.latmesh, self.data[cont_data], cont_levels,
colors=cont_colour, linestyles=cont_style, linewidths=cont_lw)
cs_pv_lab = ax.clabel(cs_pv, colors=cont_label_colour, fmt='%.0f')
if pe:
plt.setp(cs_pv.collections, path_effects=[
patheffects.withStroke(linewidth=cont_lw + 2, foreground="w")])
plt.setp(cs_pv_lab, path_effects=[patheffects.withStroke(linewidth=1, foreground="w")])
# define position of the colorbar and the orientation of the ticks
if self.crs.lower() == "epsg:77774020":
cbar_location = 3
tick_pos = 'right'
else:
cbar_location = 4
tick_pos = 'left'
# Format for colorbar labels
cbar_label = self.title
cbar_format = get_cbar_label_format(self.style, np.median(np.abs(clevs)))
if not self.noframe:
cbar = self.fig.colorbar(tc, fraction=0.05, pad=0.08, shrink=0.7,
label=cbar_label, format=cbar_format, ticks=ticks)
cbar.set_ticks(clevs)
cbar.set_ticklabels(clevs)
else:
axins1 = mpl_toolkits.axes_grid1.inset_locator.inset_axes(
ax, width="3%", height="40%", loc=cbar_location)
self.fig.colorbar(tc, cax=axins1, orientation="vertical", format=cbar_format, ticks=ticks)
axins1.yaxis.set_ticks_position(tick_pos)
make_cbar_labels_readable(self.fig, axins1)
def make_generic_class(name, entity, vert, add_data=None, add_contours=None,
fix_styles=None, add_styles=None, add_prepare=None):
if add_data is None:
add_data = [(vert, "ertel_potential_vorticity", "PVU")]
if add_contours is None:
add_contours = [("ertel_potential_vorticity", [2, 4, 8, 16], "dimgrey", "dimgrey", "solid", 2, True)]
class fnord(HS_GenericStyle):
name = f"{entity}_{vert}"
dataname = entity
title = Targets.TITLES.get(entity, entity)
long_name = entity
units, _ = Targets.get_unit(entity)
if units:
title += f" ({units})"
required_datafields = [(vert, entity, units)] + add_data
contours = add_contours
fnord.__name__ = name
fnord.styles = list(fnord.styles)
if Targets.get_thresholds(entity) is not None:
fnord.styles += [("nonlinear", "nonlinear colour scale")]
if all(_x is not None for _x in Targets.get_range(entity, None, vert)):
fnord.styles += [
("default", "fixed colour scale"),
("log", "fixed logarithmic colour scale")]
if add_styles is not None:
fnord.styles += add_styles
if fix_styles is not None:
fnord.styles = fix_styles
if add_prepare is not None:
fnord._prepare_datafields = add_prepare
globals()[name] = fnord
for vert in ["al", "ml", "pl", "tl"]:
for ent in Targets.get_targets():
make_generic_class(f"HS_GenericStyle_{vert.upper()}_{ent}", ent, vert)
make_generic_class(
f"HS_GenericStyle_{vert.upper()}_{'equivalent_latitude'}",
"equivalent_latitude", vert, [], [],
fix_styles=[("equivalent_latitude_nh", "northern hemisphere"),
("equivalent_latitude_sh", "southern hemisphere")])
make_generic_class(
f"HS_GenericStyle_{vert.upper()}_{'ertel_potential_vorticity'}",
"ertel_potential_vorticity", vert, [], [],
fix_styles=[("ertel_potential_vorticity_nh", "northern hemisphere"),
("ertel_potential_vorticity_sh", "southern hemisphere")])
make_generic_class(
f"HS_GenericStyle_{vert.upper()}_{'square_of_brunt_vaisala_frequency_in_air'}",
"square_of_brunt_vaisala_frequency_in_air", vert, [], [],
fix_styles=[("square_of_brunt_vaisala_frequency_in_air", "")])
make_generic_class(
"HS_GenericStyle_SFC_tropopause_altitude",
"tropopause_altitude", "sfc", [],
[("tropopause_altitude", np.arange(5, 20.1, 0.500), "yellow", "red", "solid", 0.5, False)],
fix_styles=[("tropopause_altitude", "tropopause_altitude")])
class HS_TemperatureStyle_PL_01(MPLBasemapHorizontalSectionStyle):
"""
Pressure level version of the temperature style.
"""
name = "PLTemp01"
title = "Temperature (degC) and Geopotential Height (m)"
# Variables with the highest number of dimensions first (otherwise
# MFDatasetCommonDims will throw an exception)!
required_datafields = [
("pl", "air_temperature", "degC"),
("pl", "geopotential_height", "m")]
def _plot_style(self):
"""
"""
bm = self.bm
ax = self.bm.ax
data = self.data
cmin = -72
cmax = 42
thick_contours = np.arange(cmin, cmax, 6)
thin_contours = [c for c in np.arange(cmin, cmax, 2)
if c not in thick_contours]
tempC = data['air_temperature']
tc = bm.contourf(self.lonmesh, self.latmesh, tempC,
np.arange(cmin, cmax, 2), cmap=plt.cm.nipy_spectral)
self.add_colorbar(tc, "Temperature (degC)")
# Colors in python2.6/site-packages/matplotlib/colors.py
with warnings.catch_warnings():
warnings.simplefilter("ignore")
cs = bm.contour(self.lonmesh, self.latmesh, tempC,
[0], colors="red", linewidths=4)
cs = bm.contour(self.lonmesh, self.latmesh, tempC,
thick_contours, colors="saddlebrown",
linewidths=2, linestyles="solid")
ax.clabel(cs, colors="black", fontsize=14, fmt='%.0f')
cs = bm.contour(self.lonmesh, self.latmesh, tempC,
thin_contours, colors="white",
linewidths=1, linestyles="solid")
# Plot geopotential height contours.
gpm = self.data["geopotential_height"]
geop_contours = np.arange(400, 28000, 40)
cs = bm.contour(self.lonmesh, self.latmesh, gpm,
geop_contours, colors="black", linewidths=1)
if cs.levels[0] in geop_contours[::2]:
lablevels = cs.levels[::2]
else:
lablevels = cs.levels[1::2]
ax.clabel(cs, lablevels, fontsize=10, fmt='%.0f')
titlestring = "Temperature (degC) and Geopotential Height (m) at " \
f"{self.level:.0f} hPa"
titlestring += f'\nValid: {self.valid_time.strftime("%a %Y-%m-%d %H:%M UTC")}'
if self.uses_inittime_dimension():
time_step = self.valid_time - self.init_time
time_step_hrs = (time_step.days * 86400 + time_step.seconds) // 3600
titlestring += f' (step {time_step_hrs:d} hrs from {self.init_time.strftime("%a %Y-%m-%d %H:%M UTC")})'
if not self.noframe:
ax.set_title(titlestring,
horizontalalignment='left', x=0, fontsize=14)
else:
ax.text(bm.llcrnrx, bm.llcrnry, titlestring,
fontsize=10, bbox=dict(facecolor='white', alpha=0.6))
class HS_GeopotentialWindStyle_PL(MPLBasemapHorizontalSectionStyle):
"""
Upper Air Field: Geopotential and Wind
"""
name = "PLGeopWind"
title = "Geopotential Height (m) and Horizontal Wind (m/s)"
styles = [
("default", "Wind Speed 10-85 m/s"),
("wind_10_105", "Wind Speed 10-105 m/s"),
("wind_10_65", "Wind Speed 10-65 m/s"),
("wind_20_55", "Wind Speed 20-55 m/s"),
("wind_15_55", "Wind Speed 15-55 m/s")]
# Variables with the highest number of dimensions first (otherwise
# MFDatasetCommonDims will throw an exception)!
required_datafields = [
("pl", "geopotential_height", "m"),
("pl", "eastward_wind", "m/s"),
("pl", "northward_wind", "m/s")]
def _plot_style(self):
"""
"""
bm = self.bm
ax = self.bm.ax
data = self.data
# Compute wind speed.
u = data["eastward_wind"]
v = data["northward_wind"]
wind = np.hypot(u, v)
# Plot wind contours.
# NOTE: Setting alpha=0.8 raises the transparency problem in the client
# (the imshow issue, see ../issues/transparency; surfaces with alpha
# values < 1 are mixed with grey). Hence, it is better to disable
# alpha blending here until a fix has been found. (mr 2011-02-01)
wind_contours = np.arange(10, 90, 5) # default wind contours
if self.style.lower() == "wind_10_65":
wind_contours = np.arange(10, 70, 5)
elif self.style.lower() == "wind_20_55":
wind_contours = np.arange(20, 60, 5)
elif self.style.lower() == "wind_15_55":
wind_contours = np.arange(15, 60, 5)
elif self.style.lower() == "wind_10_105":
wind_contours = np.arange(10, 110, 5)
cs = bm.contourf(self.lonmesh, self.latmesh, wind,
wind_contours, cmap=plt.cm.inferno_r)
self.add_colorbar(cs, "Wind Speed (m/s)")
# Plot geopotential height contours.
gpm = self.data["geopotential_height"]
gpm_interval = 20
if self.level <= 20:
gpm_interval = 120
elif self.level <= 100:
gpm_interval = 80
elif self.level <= 500:
gpm_interval = 40
geop_contours = np.arange(400, 55000, gpm_interval)
cs = bm.contour(self.lonmesh, self.latmesh, gpm,
geop_contours, colors="green", linewidths=2)
if cs.levels[0] in geop_contours[::2]:
lablevels = cs.levels[::2]
else:
lablevels = cs.levels[1::2]
ax.clabel(cs, lablevels, fontsize=14, fmt='%.0f')
# Convert wind data from m/s to knots for the wind barbs.
uk = convert_to(u, "m/s", "knots")
vk = convert_to(v, "m/s", "knots")
# Transform wind vector field to fit map.
lons2 = ((self.lons + 180) % 360) - 180
lons2_ind = lons2.argsort()
udat, vdat, xv, yv = bm.transform_vector(uk[:, lons2_ind], vk[:, lons2_ind],
lons2[lons2_ind], self.lats,
16, 16, returnxy=True, masked=True)
# Plot wind barbs.
bm.barbs(xv, yv, udat, vdat,
barbcolor='firebrick', flagcolor='firebrick', pivot='middle',
linewidths=0.5, length=6, zorder=1)
# Plot title.
titlestring = "Geopotential Height (m) and Horizontal Wind (m/s) " \
f"at {self.level:.0f} hPa"
titlestring += f'\nValid: {self.valid_time.strftime("%a %Y-%m-%d %H:%M UTC")}'
if self.uses_inittime_dimension():
time_step = self.valid_time - self.init_time
time_step_hrs = (time_step.days * 86400 + time_step.seconds) // 3600
titlestring += f' (step {time_step_hrs:d} hrs from {self.init_time.strftime("%a %Y-%m-%d %H:%M UTC")})'
if not self.noframe:
ax.set_title(titlestring,
horizontalalignment='left', x=0, fontsize=14)
else:
ax.text(bm.llcrnrx, bm.llcrnry, titlestring,
fontsize=10, bbox=dict(facecolor='white', alpha=0.6))
class HS_RelativeHumidityStyle_PL_01(MPLBasemapHorizontalSectionStyle):
"""
Upper Air Field: Relative Humidity
Relative humidity and geopotential on pressure levels.
"""
name = "PLRelHum01"
title = "Relative Humditiy (%) and Geopotential Height (m)"
# Variables with the highest number of dimensions first (otherwise
# MFDatasetCommonDims will throw an exception)!
required_datafields = [
("pl", "air_temperature", "K"),
("pl", "geopotential_height", "m"),
("pl", "specific_humidity", "kg/kg")]
def _prepare_datafields(self):
"""
Computes relative humidity from p, t, q.
"""
pressure = convert_to(self.level, self.get_elevation_units(), "Pa")
self.data["relative_humidity"] = thermolib.rel_hum(
pressure, self.data["air_temperature"], self.data["specific_humidity"])
def _plot_style(self):
"""
"""
bm = self.bm
ax = self.bm.ax
data = self.data
filled_contours = np.arange(70, 140, 15)
thin_contours = | np.arange(10, 140, 15) | numpy.arange |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 <NAME> <<EMAIL>>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated tests for similarity algorithms (the similarities package).
"""
import logging
import unittest
import math
import os
import numpy
import scipy
from gensim import utils
from gensim.corpora import Dictionary
from gensim.models import word2vec
from gensim.models import doc2vec
from gensim.models import KeyedVectors
from gensim.models import TfidfModel
from gensim import matutils, similarities
from gensim.models import Word2Vec, FastText
from gensim.test.utils import (
datapath, get_tmpfile,
common_texts as TEXTS, common_dictionary as DICTIONARY, common_corpus as CORPUS,
)
from gensim.similarities import UniformTermSimilarityIndex
from gensim.similarities import WordEmbeddingSimilarityIndex
from gensim.similarities import SparseTermSimilarityMatrix
from gensim.similarities import LevenshteinSimilarityIndex
from gensim.similarities.docsim import _nlargest
from gensim.similarities.levenshtein import levdist, levsim
try:
from pyemd import emd # noqa:F401
PYEMD_EXT = True
except (ImportError, ValueError):
PYEMD_EXT = False
SENTENCES = [doc2vec.TaggedDocument(words, [i]) for i, words in enumerate(TEXTS)]
@unittest.skip("skipping abstract base class")
class _TestSimilarityABC(unittest.TestCase):
"""
Base class for SparseMatrixSimilarity and MatrixSimilarity unit tests.
"""
def factoryMethod(self):
"""Creates a SimilarityABC instance."""
return self.cls(CORPUS, num_features=len(DICTIONARY))
def test_full(self, num_best=None, shardsize=100):
if self.cls == similarities.Similarity:
index = self.cls(None, CORPUS, num_features=len(DICTIONARY), shardsize=shardsize)
else:
index = self.cls(CORPUS, num_features=len(DICTIONARY))
if isinstance(index, similarities.MatrixSimilarity):
expected = numpy.array([
[0.57735026, 0.57735026, 0.57735026, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.40824831, 0.0, 0.40824831, 0.40824831, 0.40824831, 0.40824831, 0.40824831, 0.0, 0.0, 0.0, 0.0],
[0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5, 0.5, 0.5, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.40824831, 0.0, 0.0, 0.0, 0.81649661, 0.0, 0.40824831, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.57735026, 0.57735026, 0.0, 0.0, 0.57735026, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1., 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.70710677, 0.70710677, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.57735026, 0.57735026, 0.57735026],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.57735026, 0.0, 0.0, 0.0, 0.0, 0.57735026, 0.57735026],
], dtype=numpy.float32)
# HACK: dictionary can be in different order, so compare in sorted order
self.assertTrue(numpy.allclose(sorted(expected.flat), sorted(index.index.flat)))
index.num_best = num_best
query = CORPUS[0]
sims = index[query]
expected = [(0, 0.99999994), (2, 0.28867513), (3, 0.23570226), (1, 0.23570226)][: num_best]
# convert sims to full numpy arrays, so we can use allclose() and ignore
# ordering of items with the same similarity value
expected = matutils.sparse2full(expected, len(index))
if num_best is not None: # when num_best is None, sims is already a numpy array
sims = matutils.sparse2full(sims, len(index))
self.assertTrue(numpy.allclose(expected, sims))
if self.cls == similarities.Similarity:
index.destroy()
def test_num_best(self):
if self.cls == similarities.WmdSimilarity and not PYEMD_EXT:
self.skipTest("pyemd not installed")
for num_best in [None, 0, 1, 9, 1000]:
self.testFull(num_best=num_best)
def test_full2sparse_clipped(self):
vec = [0.8, 0.2, 0.0, 0.0, -0.1, -0.15]
expected = [(0, 0.80000000000000004), (1, 0.20000000000000001), (5, -0.14999999999999999)]
self.assertTrue(matutils.full2sparse_clipped(vec, topn=3), expected)
def test_scipy2scipy_clipped(self):
# Test for scipy vector/row
vec = [0.8, 0.2, 0.0, 0.0, -0.1, -0.15]
expected = [(0, 0.80000000000000004), (1, 0.20000000000000001), (5, -0.14999999999999999)]
vec_scipy = scipy.sparse.csr_matrix(vec)
vec_scipy_clipped = matutils.scipy2scipy_clipped(vec_scipy, topn=3)
self.assertTrue(scipy.sparse.issparse(vec_scipy_clipped))
self.assertTrue(matutils.scipy2sparse(vec_scipy_clipped), expected)
# Test for scipy matrix
vec = [0.8, 0.2, 0.0, 0.0, -0.1, -0.15]
expected = [(0, 0.80000000000000004), (1, 0.20000000000000001), (5, -0.14999999999999999)]
matrix_scipy = scipy.sparse.csr_matrix([vec] * 3)
matrix_scipy_clipped = matutils.scipy2scipy_clipped(matrix_scipy, topn=3)
self.assertTrue(scipy.sparse.issparse(matrix_scipy_clipped))
self.assertTrue([matutils.scipy2sparse(x) for x in matrix_scipy_clipped], [expected] * 3)
def test_empty_query(self):
index = self.factoryMethod()
if isinstance(index, similarities.WmdSimilarity) and not PYEMD_EXT:
self.skipTest("pyemd not installed")
query = []
try:
sims = index[query]
self.assertTrue(sims is not None)
except IndexError:
self.assertTrue(False)
def test_chunking(self):
if self.cls == similarities.Similarity:
index = self.cls(None, CORPUS, num_features=len(DICTIONARY), shardsize=5)
else:
index = self.cls(CORPUS, num_features=len(DICTIONARY))
query = CORPUS[:3]
sims = index[query]
expected = numpy.array([
[0.99999994, 0.23570226, 0.28867513, 0.23570226, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.23570226, 1.0, 0.40824831, 0.33333334, 0.70710677, 0.0, 0.0, 0.0, 0.23570226],
[0.28867513, 0.40824831, 1.0, 0.61237246, 0.28867513, 0.0, 0.0, 0.0, 0.0]
], dtype=numpy.float32)
self.assertTrue(numpy.allclose(expected, sims))
# test the same thing but with num_best
index.num_best = 3
sims = index[query]
expected = [
[(0, 0.99999994), (2, 0.28867513), (1, 0.23570226)],
[(1, 1.0), (4, 0.70710677), (2, 0.40824831)],
[(2, 1.0), (3, 0.61237246), (1, 0.40824831)]
]
self.assertTrue(numpy.allclose(expected, sims))
if self.cls == similarities.Similarity:
index.destroy()
def test_iter(self):
if self.cls == similarities.Similarity:
index = self.cls(None, CORPUS, num_features=len(DICTIONARY), shardsize=5)
else:
index = self.cls(CORPUS, num_features=len(DICTIONARY))
sims = [sim for sim in index]
expected = numpy.array([
[0.99999994, 0.23570226, 0.28867513, 0.23570226, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.23570226, 1.0, 0.40824831, 0.33333334, 0.70710677, 0.0, 0.0, 0.0, 0.23570226],
[0.28867513, 0.40824831, 1.0, 0.61237246, 0.28867513, 0.0, 0.0, 0.0, 0.0],
[0.23570226, 0.33333334, 0.61237246, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.70710677, 0.28867513, 0.0, 0.99999994, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.70710677, 0.57735026, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.70710677, 0.99999994, 0.81649655, 0.40824828],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.57735026, 0.81649655, 0.99999994, 0.66666663],
[0.0, 0.23570226, 0.0, 0.0, 0.0, 0.0, 0.40824828, 0.66666663, 0.99999994]
], dtype=numpy.float32)
self.assertTrue(numpy.allclose(expected, sims))
if self.cls == similarities.Similarity:
index.destroy()
def test_persistency(self):
if self.cls == similarities.WmdSimilarity and not PYEMD_EXT:
self.skipTest("pyemd not installed")
fname = get_tmpfile('gensim_similarities.tst.pkl')
index = self.factoryMethod()
index.save(fname)
index2 = self.cls.load(fname)
if self.cls == similarities.Similarity:
# for Similarity, only do a basic check
self.assertTrue(len(index.shards) == len(index2.shards))
index.destroy()
else:
if isinstance(index, similarities.SparseMatrixSimilarity):
# hack SparseMatrixSim indexes so they're easy to compare
index.index = index.index.todense()
index2.index = index2.index.todense()
self.assertTrue(numpy.allclose(index.index, index2.index))
self.assertEqual(index.num_best, index2.num_best)
def test_persistency_compressed(self):
if self.cls == similarities.WmdSimilarity and not PYEMD_EXT:
self.skipTest("pyemd not installed")
fname = get_tmpfile('gensim_similarities.tst.pkl.gz')
index = self.factoryMethod()
index.save(fname)
index2 = self.cls.load(fname)
if self.cls == similarities.Similarity:
# for Similarity, only do a basic check
self.assertTrue(len(index.shards) == len(index2.shards))
index.destroy()
else:
if isinstance(index, similarities.SparseMatrixSimilarity):
# hack SparseMatrixSim indexes so they're easy to compare
index.index = index.index.todense()
index2.index = index2.index.todense()
self.assertTrue(numpy.allclose(index.index, index2.index))
self.assertEqual(index.num_best, index2.num_best)
def test_large(self):
if self.cls == similarities.WmdSimilarity and not PYEMD_EXT:
self.skipTest("pyemd not installed")
fname = get_tmpfile('gensim_similarities.tst.pkl')
index = self.factoryMethod()
# store all arrays separately
index.save(fname, sep_limit=0)
index2 = self.cls.load(fname)
if self.cls == similarities.Similarity:
# for Similarity, only do a basic check
self.assertTrue(len(index.shards) == len(index2.shards))
index.destroy()
else:
if isinstance(index, similarities.SparseMatrixSimilarity):
# hack SparseMatrixSim indexes so they're easy to compare
index.index = index.index.todense()
index2.index = index2.index.todense()
self.assertTrue(numpy.allclose(index.index, index2.index))
self.assertEqual(index.num_best, index2.num_best)
def test_large_compressed(self):
if self.cls == similarities.WmdSimilarity and not PYEMD_EXT:
self.skipTest("pyemd not installed")
fname = get_tmpfile('gensim_similarities.tst.pkl.gz')
index = self.factoryMethod()
# store all arrays separately
index.save(fname, sep_limit=0)
index2 = self.cls.load(fname, mmap=None)
if self.cls == similarities.Similarity:
# for Similarity, only do a basic check
self.assertTrue(len(index.shards) == len(index2.shards))
index.destroy()
else:
if isinstance(index, similarities.SparseMatrixSimilarity):
# hack SparseMatrixSim indexes so they're easy to compare
index.index = index.index.todense()
index2.index = index2.index.todense()
self.assertTrue(numpy.allclose(index.index, index2.index))
self.assertEqual(index.num_best, index2.num_best)
def test_mmap(self):
if self.cls == similarities.WmdSimilarity and not PYEMD_EXT:
self.skipTest("pyemd not installed")
fname = get_tmpfile('gensim_similarities.tst.pkl')
index = self.factoryMethod()
# store all arrays separately
index.save(fname, sep_limit=0)
# same thing, but use mmap to load arrays
index2 = self.cls.load(fname, mmap='r')
if self.cls == similarities.Similarity:
# for Similarity, only do a basic check
self.assertTrue(len(index.shards) == len(index2.shards))
index.destroy()
else:
if isinstance(index, similarities.SparseMatrixSimilarity):
# hack SparseMatrixSim indexes so they're easy to compare
index.index = index.index.todense()
index2.index = index2.index.todense()
self.assertTrue(numpy.allclose(index.index, index2.index))
self.assertEqual(index.num_best, index2.num_best)
def test_mmap_compressed(self):
if self.cls == similarities.WmdSimilarity and not PYEMD_EXT:
self.skipTest("pyemd not installed")
fname = get_tmpfile('gensim_similarities.tst.pkl.gz')
index = self.factoryMethod()
# store all arrays separately
index.save(fname, sep_limit=0)
# same thing, but use mmap to load arrays
self.assertRaises(IOError, self.cls.load, fname, mmap='r')
class TestMatrixSimilarity(_TestSimilarityABC):
def setUp(self):
self.cls = similarities.MatrixSimilarity
class TestWmdSimilarity(_TestSimilarityABC):
def setUp(self):
self.cls = similarities.WmdSimilarity
self.w2v_model = Word2Vec(TEXTS, min_count=1).wv
def factoryMethod(self):
# Override factoryMethod.
return self.cls(TEXTS, self.w2v_model)
@unittest.skipIf(PYEMD_EXT is False, "pyemd not installed")
def test_full(self, num_best=None):
# Override testFull.
index = self.cls(TEXTS, self.w2v_model)
index.num_best = num_best
query = TEXTS[0]
sims = index[query]
if num_best is not None:
# Sparse array.
for i, sim in sims:
# Note that similarities are bigger than zero, as they are the 1/ 1 + distances.
self.assertTrue(numpy.alltrue(sim > 0.0))
else:
self.assertTrue(sims[0] == 1.0) # Similarity of a document with itself is 0.0.
self.assertTrue(numpy.alltrue(sims[1:] > 0.0))
self.assertTrue(numpy.alltrue(sims[1:] < 1.0))
@unittest.skipIf(PYEMD_EXT is False, "pyemd not installed")
def test_non_increasing(self):
''' Check that similarities are non-increasing when `num_best` is not
`None`.'''
# NOTE: this could be implemented for other similarities as well (i.e.
# in _TestSimilarityABC).
index = self.cls(TEXTS, self.w2v_model, num_best=3)
query = TEXTS[0]
sims = index[query]
sims2 = numpy.asarray(sims)[:, 1] # Just the similarities themselves.
# The difference of adjacent elements should be negative.
cond = sum(numpy.diff(sims2) < 0) == len(sims2) - 1
self.assertTrue(cond)
@unittest.skipIf(PYEMD_EXT is False, "pyemd not installed")
def test_chunking(self):
# Override testChunking.
index = self.cls(TEXTS, self.w2v_model)
query = TEXTS[:3]
sims = index[query]
for i in range(3):
self.assertTrue(numpy.alltrue(sims[i, i] == 1.0)) # Similarity of a document with itself is 0.0.
# test the same thing but with num_best
index.num_best = 3
sims = index[query]
for sims_temp in sims:
for i, sim in sims_temp:
self.assertTrue(numpy.alltrue(sim > 0.0))
self.assertTrue(numpy.alltrue(sim <= 1.0))
@unittest.skipIf(PYEMD_EXT is False, "pyemd not installed")
def test_iter(self):
# Override testIter.
index = self.cls(TEXTS, self.w2v_model)
for sims in index:
self.assertTrue(numpy.alltrue(sims >= 0.0))
self.assertTrue(numpy.alltrue(sims <= 1.0))
class TestSoftCosineSimilarity(_TestSimilarityABC):
def setUp(self):
self.cls = similarities.SoftCosineSimilarity
self.tfidf = TfidfModel(dictionary=DICTIONARY)
similarity_matrix = scipy.sparse.identity(12, format="lil")
similarity_matrix[DICTIONARY.token2id["user"], DICTIONARY.token2id["human"]] = 0.5
similarity_matrix[DICTIONARY.token2id["human"], DICTIONARY.token2id["user"]] = 0.5
self.similarity_matrix = SparseTermSimilarityMatrix(similarity_matrix)
def factoryMethod(self):
return self.cls(CORPUS, self.similarity_matrix)
def test_full(self, num_best=None):
# Single query
index = self.cls(CORPUS, self.similarity_matrix, num_best=num_best)
query = DICTIONARY.doc2bow(TEXTS[0])
sims = index[query]
if num_best is not None:
# Sparse array.
for i, sim in sims:
self.assertTrue(numpy.alltrue(sim <= 1.0))
self.assertTrue(numpy.alltrue(sim >= 0.0))
else:
self.assertAlmostEqual(1.0, sims[0]) # Similarity of a document with itself is 1.0.
self.assertTrue(numpy.alltrue(sims[1:] >= 0.0))
self.assertTrue(numpy.alltrue(sims[1:] < 1.0))
# Corpora
for query in (
CORPUS, # Basic text corpus.
self.tfidf[CORPUS]): # Transformed corpus without slicing support.
index = self.cls(query, self.similarity_matrix, num_best=num_best)
sims = index[query]
if num_best is not None:
# Sparse array.
for result in sims:
for i, sim in result:
self.assertTrue(numpy.alltrue(sim <= 1.0))
self.assertTrue(numpy.alltrue(sim >= 0.0))
else:
for i, result in enumerate(sims):
self.assertAlmostEqual(1.0, result[i]) # Similarity of a document with itself is 1.0.
self.assertTrue(numpy.alltrue(result[:i] >= 0.0))
self.assertTrue(numpy.alltrue(result[:i] < 1.0))
self.assertTrue(numpy.alltrue(result[i + 1:] >= 0.0))
self.assertTrue(numpy.alltrue(result[i + 1:] < 1.0))
def test_non_increasing(self):
""" Check that similarities are non-increasing when `num_best` is not `None`."""
# NOTE: this could be implemented for other similarities as well (i.e. in _TestSimilarityABC).
index = self.cls(CORPUS, self.similarity_matrix, num_best=5)
query = DICTIONARY.doc2bow(TEXTS[0])
sims = index[query]
sims2 = numpy.asarray(sims)[:, 1] # Just the similarities themselves.
# The difference of adjacent elements should be less than or equal to zero.
cond = sum(numpy.diff(sims2) <= 0) == len(sims2) - 1
self.assertTrue(cond)
def test_chunking(self):
index = self.cls(CORPUS, self.similarity_matrix)
query = [DICTIONARY.doc2bow(document) for document in TEXTS[:3]]
sims = index[query]
for i in range(3):
self.assertTrue(numpy.alltrue(sims[i, i] == 1.0)) # Similarity of a document with itself is 1.0.
# test the same thing but with num_best
index.num_best = 5
sims = index[query]
for i, chunk in enumerate(sims):
expected = i
self.assertAlmostEqual(expected, chunk[0][0], places=2)
expected = 1.0
self.assertAlmostEqual(expected, chunk[0][1], places=2)
def test_iter(self):
index = self.cls(CORPUS, self.similarity_matrix)
for sims in index:
self.assertTrue(numpy.alltrue(sims >= 0.0))
self.assertTrue(numpy.alltrue(sims <= 1.0))
class TestSparseMatrixSimilarity(_TestSimilarityABC):
def setUp(self):
self.cls = similarities.SparseMatrixSimilarity
def test_maintain_sparsity(self):
"""Sparsity is correctly maintained when maintain_sparsity=True"""
num_features = len(DICTIONARY)
index = self.cls(CORPUS, num_features=num_features)
dense_sims = index[CORPUS]
index = self.cls(CORPUS, num_features=num_features, maintain_sparsity=True)
sparse_sims = index[CORPUS]
self.assertFalse(scipy.sparse.issparse(dense_sims))
self.assertTrue(scipy.sparse.issparse(sparse_sims))
numpy.testing.assert_array_equal(dense_sims, sparse_sims.todense())
def test_maintain_sparsity_with_num_best(self):
"""Tests that sparsity is correctly maintained when maintain_sparsity=True and num_best is not None"""
num_features = len(DICTIONARY)
index = self.cls(CORPUS, num_features=num_features, maintain_sparsity=False, num_best=3)
dense_topn_sims = index[CORPUS]
index = self.cls(CORPUS, num_features=num_features, maintain_sparsity=True, num_best=3)
scipy_topn_sims = index[CORPUS]
self.assertFalse(scipy.sparse.issparse(dense_topn_sims))
self.assertTrue(scipy.sparse.issparse(scipy_topn_sims))
self.assertEqual(dense_topn_sims, [matutils.scipy2sparse(v) for v in scipy_topn_sims])
class TestSimilarity(_TestSimilarityABC):
def setUp(self):
self.cls = similarities.Similarity
def factoryMethod(self):
# Override factoryMethod.
return self.cls(None, CORPUS, num_features=len(DICTIONARY), shardsize=5)
def test_sharding(self):
for num_best in [None, 0, 1, 9, 1000]:
for shardsize in [1, 2, 9, 1000]:
self.testFull(num_best=num_best, shardsize=shardsize)
def test_reopen(self):
"""test re-opening partially full shards"""
index = similarities.Similarity(None, CORPUS[:5], num_features=len(DICTIONARY), shardsize=9)
_ = index[CORPUS[0]] # noqa:F841 forces shard close
index.add_documents(CORPUS[5:])
query = CORPUS[0]
sims = index[query]
expected = [(0, 0.99999994), (2, 0.28867513), (3, 0.23570226), (1, 0.23570226)]
expected = matutils.sparse2full(expected, len(index))
self.assertTrue(numpy.allclose(expected, sims))
index.destroy()
def test_mmap_compressed(self):
pass
# turns out this test doesn't exercise this because there are no arrays
# to be mmaped!
def test_chunksize(self):
index = self.cls(None, CORPUS, num_features=len(DICTIONARY), shardsize=5)
expected = [sim for sim in index]
index.chunksize = len(index) - 1
sims = [sim for sim in index]
self.assertTrue(numpy.allclose(expected, sims))
index.destroy()
def test_nlargest(self):
sims = ([(0, 0.8), (1, 0.2), (2, 0.0), (3, 0.0), (4, -0.1), (5, -0.15)],)
expected = [(0, 0.8), (1, 0.2), (5, -0.15)]
self.assertTrue(_nlargest(3, sims), expected)
class TestWord2VecAnnoyIndexer(unittest.TestCase):
def setUp(self):
try:
import annoy # noqa:F401
except ImportError as e:
raise unittest.SkipTest("Annoy library is not available: %s" % e)
from gensim.similarities.annoy import AnnoyIndexer
self.indexer = AnnoyIndexer
def test_word2vec(self):
model = word2vec.Word2Vec(TEXTS, min_count=1)
index = self.indexer(model, 10)
self.assertVectorIsSimilarToItself(model.wv, index)
self.assertApproxNeighborsMatchExact(model.wv, model.wv, index)
self.assertIndexSaved(index)
self.assertLoadedIndexEqual(index, model)
def test_fast_text(self):
class LeeReader:
def __init__(self, fn):
self.fn = fn
def __iter__(self):
with utils.open(self.fn, 'r', encoding="latin_1") as infile:
for line in infile:
yield line.lower().strip().split()
model = FastText(LeeReader(datapath('lee.cor')), bucket=5000)
index = self.indexer(model, 10)
self.assertVectorIsSimilarToItself(model.wv, index)
self.assertApproxNeighborsMatchExact(model.wv, model.wv, index)
self.assertIndexSaved(index)
self.assertLoadedIndexEqual(index, model)
def test_annoy_indexing_of_keyed_vectors(self):
from gensim.similarities.annoy import AnnoyIndexer
keyVectors_file = datapath('lee_fasttext.vec')
model = KeyedVectors.load_word2vec_format(keyVectors_file)
index = AnnoyIndexer(model, 10)
self.assertEqual(index.num_trees, 10)
self.assertVectorIsSimilarToItself(model, index)
self.assertApproxNeighborsMatchExact(model, model, index)
def test_load_missing_raises_error(self):
from gensim.similarities.annoy import AnnoyIndexer
test_index = AnnoyIndexer()
self.assertRaises(IOError, test_index.load, fname='test-index')
def assertVectorIsSimilarToItself(self, wv, index):
vector = wv.get_normed_vectors()[0]
label = wv.index_to_key[0]
approx_neighbors = index.most_similar(vector, 1)
word, similarity = approx_neighbors[0]
self.assertEqual(word, label)
self.assertAlmostEqual(similarity, 1.0, places=2)
def assertApproxNeighborsMatchExact(self, model, wv, index):
vector = wv.get_normed_vectors()[0]
approx_neighbors = model.most_similar([vector], topn=5, indexer=index)
exact_neighbors = model.most_similar(positive=[vector], topn=5)
approx_words = [neighbor[0] for neighbor in approx_neighbors]
exact_words = [neighbor[0] for neighbor in exact_neighbors]
self.assertEqual(approx_words, exact_words)
def assertAllSimilaritiesDisableIndexer(self, model, wv, index):
vector = wv.get_normed_vectors()[0]
approx_similarities = model.most_similar([vector], topn=None, indexer=index)
exact_similarities = model.most_similar(positive=[vector], topn=None)
self.assertEqual(approx_similarities, exact_similarities)
self.assertEqual(len(approx_similarities), len(wv.vectors))
def assertIndexSaved(self, index):
fname = get_tmpfile('gensim_similarities.tst.pkl')
index.save(fname)
self.assertTrue(os.path.exists(fname))
self.assertTrue(os.path.exists(fname + '.d'))
def assertLoadedIndexEqual(self, index, model):
from gensim.similarities.annoy import AnnoyIndexer
fname = get_tmpfile('gensim_similarities.tst.pkl')
index.save(fname)
index2 = AnnoyIndexer()
index2.load(fname)
index2.model = model
self.assertEqual(index.index.f, index2.index.f)
self.assertEqual(index.labels, index2.labels)
self.assertEqual(index.num_trees, index2.num_trees)
class TestDoc2VecAnnoyIndexer(unittest.TestCase):
def setUp(self):
try:
import annoy # noqa:F401
except ImportError as e:
raise unittest.SkipTest("Annoy library is not available: %s" % e)
from gensim.similarities.annoy import AnnoyIndexer
self.model = doc2vec.Doc2Vec(SENTENCES, min_count=1)
self.index = AnnoyIndexer(self.model, 300)
self.vector = self.model.dv.get_normed_vectors()[0]
def test_document_is_similar_to_itself(self):
approx_neighbors = self.index.most_similar(self.vector, 1)
doc, similarity = approx_neighbors[0]
self.assertEqual(doc, 0)
self.assertAlmostEqual(similarity, 1.0, places=2)
def test_approx_neighbors_match_exact(self):
approx_neighbors = self.model.dv.most_similar([self.vector], topn=5, indexer=self.index)
exact_neighbors = self.model.dv.most_similar([self.vector], topn=5)
approx_words = [neighbor[0] for neighbor in approx_neighbors]
exact_words = [neighbor[0] for neighbor in exact_neighbors]
self.assertEqual(approx_words, exact_words)
def test_save(self):
fname = get_tmpfile('gensim_similarities.tst.pkl')
self.index.save(fname)
self.assertTrue(os.path.exists(fname))
self.assertTrue(os.path.exists(fname + '.d'))
def test_load_not_exist(self):
from gensim.similarities.annoy import AnnoyIndexer
self.test_index = AnnoyIndexer()
self.assertRaises(IOError, self.test_index.load, fname='test-index')
def test_save_load(self):
from gensim.similarities.annoy import AnnoyIndexer
fname = get_tmpfile('gensim_similarities.tst.pkl')
self.index.save(fname)
self.index2 = AnnoyIndexer()
self.index2.load(fname)
self.index2.model = self.model
self.assertEqual(self.index.index.f, self.index2.index.f)
self.assertEqual(self.index.labels, self.index2.labels)
self.assertEqual(self.index.num_trees, self.index2.num_trees)
class TestWord2VecNmslibIndexer(unittest.TestCase):
def setUp(self):
try:
import nmslib # noqa:F401
except ImportError as e:
raise unittest.SkipTest("NMSLIB library is not available: %s" % e)
from gensim.similarities.nmslib import NmslibIndexer
self.indexer = NmslibIndexer
def test_word2vec(self):
model = word2vec.Word2Vec(TEXTS, min_count=1)
index = self.indexer(model)
self.assertVectorIsSimilarToItself(model.wv, index)
self.assertApproxNeighborsMatchExact(model.wv, model.wv, index)
self.assertIndexSaved(index)
self.assertLoadedIndexEqual(index, model)
def test_fasttext(self):
class LeeReader:
def __init__(self, fn):
self.fn = fn
def __iter__(self):
with utils.open(self.fn, 'r', encoding="latin_1") as infile:
for line in infile:
yield line.lower().strip().split()
model = FastText(LeeReader(datapath('lee.cor')), bucket=5000)
index = self.indexer(model)
self.assertVectorIsSimilarToItself(model.wv, index)
self.assertApproxNeighborsMatchExact(model.wv, model.wv, index)
self.assertIndexSaved(index)
self.assertLoadedIndexEqual(index, model)
def test_indexing_keyedvectors(self):
from gensim.similarities.nmslib import NmslibIndexer
keyVectors_file = datapath('lee_fasttext.vec')
model = KeyedVectors.load_word2vec_format(keyVectors_file)
index = NmslibIndexer(model)
self.assertVectorIsSimilarToItself(model, index)
self.assertApproxNeighborsMatchExact(model, model, index)
def test_load_missing_raises_error(self):
from gensim.similarities.nmslib import NmslibIndexer
self.assertRaises(IOError, NmslibIndexer.load, fname='test-index')
def assertVectorIsSimilarToItself(self, wv, index):
vector = wv.get_normed_vectors()[0]
label = wv.index_to_key[0]
approx_neighbors = index.most_similar(vector, 1)
word, similarity = approx_neighbors[0]
self.assertEqual(word, label)
self.assertAlmostEqual(similarity, 1.0, places=2)
def assertApproxNeighborsMatchExact(self, model, wv, index):
vector = wv.get_normed_vectors()[0]
approx_neighbors = model.most_similar([vector], topn=5, indexer=index)
exact_neighbors = model.most_similar([vector], topn=5)
approx_words = [word_id for word_id, similarity in approx_neighbors]
exact_words = [word_id for word_id, similarity in exact_neighbors]
self.assertEqual(approx_words, exact_words)
def assertIndexSaved(self, index):
fname = get_tmpfile('gensim_similarities.tst.pkl')
index.save(fname)
self.assertTrue(os.path.exists(fname))
self.assertTrue(os.path.exists(fname + '.d'))
def assertLoadedIndexEqual(self, index, model):
from gensim.similarities.nmslib import NmslibIndexer
fname = get_tmpfile('gensim_similarities.tst.pkl')
index.save(fname)
index2 = NmslibIndexer.load(fname)
index2.model = model
self.assertEqual(index.labels, index2.labels)
self.assertEqual(index.index_params, index2.index_params)
self.assertEqual(index.query_time_params, index2.query_time_params)
class TestDoc2VecNmslibIndexer(unittest.TestCase):
def setUp(self):
try:
import nmslib # noqa:F401
except ImportError as e:
raise unittest.SkipTest("NMSLIB library is not available: %s" % e)
from gensim.similarities.nmslib import NmslibIndexer
self.model = doc2vec.Doc2Vec(SENTENCES, min_count=1)
self.index = NmslibIndexer(self.model)
self.vector = self.model.dv.get_normed_vectors()[0]
def test_document_is_similar_to_itself(self):
approx_neighbors = self.index.most_similar(self.vector, 1)
doc, similarity = approx_neighbors[0]
self.assertEqual(doc, 0)
self.assertAlmostEqual(similarity, 1.0, places=2)
def test_approx_neighbors_match_exact(self):
approx_neighbors = self.model.dv.most_similar([self.vector], topn=5, indexer=self.index)
exact_neighbors = self.model.dv.most_similar([self.vector], topn=5)
approx_tags = [tag for tag, similarity in approx_neighbors]
exact_tags = [tag for tag, similarity in exact_neighbors]
self.assertEqual(approx_tags, exact_tags)
def test_save(self):
fname = get_tmpfile('gensim_similarities.tst.pkl')
self.index.save(fname)
self.assertTrue(os.path.exists(fname))
self.assertTrue(os.path.exists(fname + '.d'))
def test_load_not_exist(self):
from gensim.similarities.nmslib import NmslibIndexer
self.assertRaises(IOError, NmslibIndexer.load, fname='test-index')
def test_save_load(self):
from gensim.similarities.nmslib import NmslibIndexer
fname = get_tmpfile('gensim_similarities.tst.pkl')
self.index.save(fname)
self.index2 = NmslibIndexer.load(fname)
self.index2.model = self.model
self.assertEqual(self.index.labels, self.index2.labels)
self.assertEqual(self.index.index_params, self.index2.index_params)
self.assertEqual(self.index.query_time_params, self.index2.query_time_params)
class TestUniformTermSimilarityIndex(unittest.TestCase):
def setUp(self):
self.documents = [[u"government", u"denied", u"holiday"], [u"holiday", u"slowing", u"hollingworth"]]
self.dictionary = Dictionary(self.documents)
def test_most_similar(self):
"""Test most_similar returns expected results."""
# check that the topn works as expected
index = UniformTermSimilarityIndex(self.dictionary)
results = list(index.most_similar(u"holiday", topn=1))
self.assertLess(0, len(results))
self.assertGreaterEqual(1, len(results))
results = list(index.most_similar(u"holiday", topn=4))
self.assertLess(1, len(results))
self.assertGreaterEqual(4, len(results))
# check that the term itself is not returned
index = UniformTermSimilarityIndex(self.dictionary)
terms = [term for term, similarity in index.most_similar(u"holiday", topn=len(self.dictionary))]
self.assertFalse(u"holiday" in terms)
# check that the term_similarity works as expected
index = UniformTermSimilarityIndex(self.dictionary, term_similarity=0.2)
similarities = numpy.array([
similarity for term, similarity in index.most_similar(u"holiday", topn=len(self.dictionary))])
self.assertTrue(numpy.all(similarities == 0.2))
class TestSparseTermSimilarityMatrix(unittest.TestCase):
def setUp(self):
self.documents = [
[u"government", u"denied", u"holiday"],
[u"government", u"denied", u"holiday", u"slowing", u"hollingworth"]]
self.dictionary = Dictionary(self.documents)
self.tfidf = TfidfModel(dictionary=self.dictionary)
zero_index = UniformTermSimilarityIndex(self.dictionary, term_similarity=0.0)
self.index = UniformTermSimilarityIndex(self.dictionary, term_similarity=0.5)
self.identity_matrix = SparseTermSimilarityMatrix(zero_index, self.dictionary)
self.uniform_matrix = SparseTermSimilarityMatrix(self.index, self.dictionary)
self.vec1 = self.dictionary.doc2bow([u"government", u"government", u"denied"])
self.vec2 = self.dictionary.doc2bow([u"government", u"holiday"])
def test_empty_dictionary(self):
with self.assertRaises(ValueError):
SparseTermSimilarityMatrix(self.index, [])
def test_type(self):
"""Test the type of the produced matrix."""
matrix = SparseTermSimilarityMatrix(self.index, self.dictionary).matrix
self.assertTrue(isinstance(matrix, scipy.sparse.csc_matrix))
def test_diagonal(self):
"""Test the existence of ones on the main diagonal."""
matrix = SparseTermSimilarityMatrix(self.index, self.dictionary).matrix.todense()
self.assertTrue(numpy.all(numpy.diag(matrix) == numpy.ones(matrix.shape[0])))
def test_order(self):
"""Test the matrix order."""
matrix = SparseTermSimilarityMatrix(self.index, self.dictionary).matrix.todense()
self.assertEqual(matrix.shape[0], len(self.dictionary))
self.assertEqual(matrix.shape[1], len(self.dictionary))
def test_dtype(self):
"""Test the dtype parameter of the matrix constructor."""
matrix = SparseTermSimilarityMatrix(self.index, self.dictionary, dtype=numpy.float32).matrix.todense()
self.assertEqual(numpy.float32, matrix.dtype)
matrix = SparseTermSimilarityMatrix(self.index, self.dictionary, dtype=numpy.float64).matrix.todense()
self.assertEqual(numpy.float64, matrix.dtype)
def test_nonzero_limit(self):
"""Test the nonzero_limit parameter of the matrix constructor."""
matrix = SparseTermSimilarityMatrix(self.index, self.dictionary, nonzero_limit=100).matrix.todense()
self.assertGreaterEqual(101, numpy.max(numpy.sum(matrix != 0, axis=0)))
matrix = SparseTermSimilarityMatrix(self.index, self.dictionary, nonzero_limit=4).matrix.todense()
self.assertGreaterEqual(5, numpy.max(numpy.sum(matrix != 0, axis=0)))
matrix = SparseTermSimilarityMatrix(self.index, self.dictionary, nonzero_limit=1).matrix.todense()
self.assertGreaterEqual(2, numpy.max(numpy.sum(matrix != 0, axis=0)))
matrix = SparseTermSimilarityMatrix(self.index, self.dictionary, nonzero_limit=0).matrix.todense()
self.assertEqual(1, numpy.max(numpy.sum(matrix != 0, axis=0)))
self.assertTrue(numpy.all(matrix == numpy.eye(matrix.shape[0])))
def test_symmetric(self):
"""Test the symmetric parameter of the matrix constructor."""
matrix = SparseTermSimilarityMatrix(self.index, self.dictionary).matrix.todense()
self.assertTrue(numpy.all(matrix == matrix.T))
matrix = SparseTermSimilarityMatrix(
self.index, self.dictionary, nonzero_limit=1).matrix.todense()
expected_matrix = numpy.array([
[1.0, 0.5, 0.0, 0.0, 0.0],
[0.5, 1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0]])
self.assertTrue(numpy.all(expected_matrix == matrix))
matrix = SparseTermSimilarityMatrix(
self.index, self.dictionary, nonzero_limit=1, symmetric=False).matrix.todense()
expected_matrix = numpy.array([
[1.0, 0.5, 0.5, 0.5, 0.5],
[0.5, 1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0]])
self.assertTrue(numpy.all(expected_matrix == matrix))
def test_dominant(self):
"""Test the dominant parameter of the matrix constructor."""
negative_index = UniformTermSimilarityIndex(self.dictionary, term_similarity=-0.5)
matrix = SparseTermSimilarityMatrix(
negative_index, self.dictionary, nonzero_limit=2).matrix.todense()
expected_matrix = numpy.array([
[1.0, -.5, -.5, 0.0, 0.0],
[-.5, 1.0, 0.0, -.5, 0.0],
[-.5, 0.0, 1.0, 0.0, 0.0],
[0.0, -.5, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0]])
self.assertTrue(numpy.all(expected_matrix == matrix))
matrix = SparseTermSimilarityMatrix(
negative_index, self.dictionary, nonzero_limit=2, dominant=True).matrix.todense()
expected_matrix = numpy.array([
[1.0, -.5, 0.0, 0.0, 0.0],
[-.5, 1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0]])
self.assertTrue(numpy.all(expected_matrix == matrix))
def test_tfidf(self):
"""Test the tfidf parameter of the matrix constructor."""
matrix = SparseTermSimilarityMatrix(
self.index, self.dictionary, nonzero_limit=1).matrix.todense()
expected_matrix = numpy.array([
[1.0, 0.5, 0.0, 0.0, 0.0],
[0.5, 1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0]])
self.assertTrue(numpy.all(expected_matrix == matrix))
matrix = SparseTermSimilarityMatrix(
self.index, self.dictionary, nonzero_limit=1, tfidf=self.tfidf).matrix.todense()
expected_matrix = numpy.array([
[1.0, 0.0, 0.0, 0.5, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0],
[0.5, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0]])
self.assertTrue(numpy.all(expected_matrix == matrix))
def test_encapsulation(self):
"""Test the matrix encapsulation."""
# check that a sparse matrix will be converted to a CSC format
expected_matrix = numpy.array([
[1.0, 2.0, 3.0],
[0.0, 1.0, 4.0],
[0.0, 0.0, 1.0]])
matrix = SparseTermSimilarityMatrix(scipy.sparse.csc_matrix(expected_matrix)).matrix
self.assertTrue(isinstance(matrix, scipy.sparse.csc_matrix))
self.assertTrue(numpy.all(matrix.todense() == expected_matrix))
matrix = SparseTermSimilarityMatrix(scipy.sparse.csr_matrix(expected_matrix)).matrix
self.assertTrue(isinstance(matrix, scipy.sparse.csc_matrix))
self.assertTrue(numpy.all(matrix.todense() == expected_matrix))
def test_inner_product_zerovector_zerovector_default(self):
"""Test the inner product between two zero vectors with the default normalization."""
self.assertEqual(0.0, self.uniform_matrix.inner_product([], []))
def test_inner_product_zerovector_zerovector_false_maintain(self):
"""Test the inner product between two zero vectors with the (False, 'maintain') normalization."""
self.assertEqual(0.0, self.uniform_matrix.inner_product([], [], normalized=(False, 'maintain')))
def test_inner_product_zerovector_zerovector_false_true(self):
"""Test the inner product between two zero vectors with the (False, True) normalization."""
self.assertEqual(0.0, self.uniform_matrix.inner_product([], [], normalized=(False, True)))
def test_inner_product_zerovector_zerovector_maintain_false(self):
"""Test the inner product between two zero vectors with the ('maintain', False) normalization."""
self.assertEqual(0.0, self.uniform_matrix.inner_product([], [], normalized=('maintain', False)))
def test_inner_product_zerovector_zerovector_maintain_maintain(self):
"""Test the inner product between two zero vectors with the ('maintain', 'maintain') normalization."""
self.assertEqual(0.0, self.uniform_matrix.inner_product([], [], normalized=('maintain', 'maintain')))
def test_inner_product_zerovector_zerovector_maintain_true(self):
"""Test the inner product between two zero vectors with the ('maintain', True) normalization."""
self.assertEqual(0.0, self.uniform_matrix.inner_product([], [], normalized=('maintain', True)))
def test_inner_product_zerovector_zerovector_true_false(self):
"""Test the inner product between two zero vectors with the (True, False) normalization."""
self.assertEqual(0.0, self.uniform_matrix.inner_product([], [], normalized=(True, False)))
def test_inner_product_zerovector_zerovector_true_maintain(self):
"""Test the inner product between two zero vectors with the (True, 'maintain') normalization."""
self.assertEqual(0.0, self.uniform_matrix.inner_product([], [], normalized=(True, 'maintain')))
def test_inner_product_zerovector_zerovector_true_true(self):
"""Test the inner product between two zero vectors with the (True, True) normalization."""
self.assertEqual(0.0, self.uniform_matrix.inner_product([], [], normalized=(True, True)))
def test_inner_product_zerovector_vector_default(self):
"""Test the inner product between a zero vector and a vector with the default normalization."""
self.assertEqual(0.0, self.uniform_matrix.inner_product([], self.vec2))
def test_inner_product_zerovector_vector_false_maintain(self):
"""Test the inner product between a zero vector and a vector with the (False, 'maintain') normalization."""
self.assertEqual(0.0, self.uniform_matrix.inner_product([], self.vec2, normalized=(False, 'maintain')))
def test_inner_product_zerovector_vector_false_true(self):
"""Test the inner product between a zero vector and a vector with the (False, True) normalization."""
self.assertEqual(0.0, self.uniform_matrix.inner_product([], self.vec2, normalized=(False, True)))
def test_inner_product_zerovector_vector_maintain_false(self):
"""Test the inner product between a zero vector and a vector with the ('maintain', False) normalization."""
self.assertEqual(0.0, self.uniform_matrix.inner_product([], self.vec2, normalized=('maintain', False)))
def test_inner_product_zerovector_vector_maintain_maintain(self):
"""Test the inner product between a zero vector and a vector with the ('maintain', 'maintain') normalization."""
self.assertEqual(0.0, self.uniform_matrix.inner_product([], self.vec2, normalized=('maintain', 'maintain')))
def test_inner_product_zerovector_vector_maintain_true(self):
"""Test the inner product between a zero vector and a vector with the ('maintain', True) normalization."""
self.assertEqual(0.0, self.uniform_matrix.inner_product([], self.vec2, normalized=('maintain', True)))
def test_inner_product_zerovector_vector_true_false(self):
"""Test the inner product between a zero vector and a vector with the (True, False) normalization."""
self.assertEqual(0.0, self.uniform_matrix.inner_product([], self.vec2, normalized=(True, False)))
def test_inner_product_zerovector_vector_true_maintain(self):
"""Test the inner product between a zero vector and a vector with the (True, 'maintain') normalization."""
self.assertEqual(0.0, self.uniform_matrix.inner_product([], self.vec2, normalized=(True, 'maintain')))
def test_inner_product_zerovector_vector_true_true(self):
"""Test the inner product between a zero vector and a vector with the (True, True) normalization."""
self.assertEqual(0.0, self.uniform_matrix.inner_product([], self.vec2, normalized=(True, True)))
def test_inner_product_vector_zerovector_default(self):
"""Test the inner product between a vector and a zero vector with the default normalization."""
self.assertEqual(0.0, self.uniform_matrix.inner_product(self.vec1, []))
def test_inner_product_vector_zerovector_false_maintain(self):
"""Test the inner product between a vector and a zero vector with the (False, 'maintain') normalization."""
self.assertEqual(0.0, self.uniform_matrix.inner_product(self.vec1, [], normalized=(False, 'maintain')))
def test_inner_product_vector_zerovector_false_true(self):
"""Test the inner product between a vector and a zero vector with the (False, True) normalization."""
self.assertEqual(0.0, self.uniform_matrix.inner_product(self.vec1, [], normalized=(False, True)))
def test_inner_product_vector_zerovector_maintain_false(self):
"""Test the inner product between a vector and a zero vector with the ('maintain', False) normalization."""
self.assertEqual(0.0, self.uniform_matrix.inner_product(self.vec1, [], normalized=('maintain', False)))
def test_inner_product_vector_zerovector_maintain_maintain(self):
"""Test the inner product between a vector and a zero vector with the ('maintain', 'maintain') normalization."""
self.assertEqual(0.0, self.uniform_matrix.inner_product(self.vec1, [], normalized=('maintain', 'maintain')))
def test_inner_product_vector_zerovector_maintain_true(self):
"""Test the inner product between a vector and a zero vector with the ('maintain', True) normalization."""
self.assertEqual(0.0, self.uniform_matrix.inner_product(self.vec1, [], normalized=('maintain', True)))
def test_inner_product_vector_zerovector_true_false(self):
"""Test the inner product between a vector and a zero vector with the (True, False) normalization."""
self.assertEqual(0.0, self.uniform_matrix.inner_product(self.vec1, [], normalized=(True, False)))
def test_inner_product_vector_zerovector_true_maintain(self):
"""Test the inner product between a vector and a zero vector with the (True, 'maintain') normalization."""
self.assertEqual(0.0, self.uniform_matrix.inner_product(self.vec1, [], normalized=(True, 'maintain')))
def test_inner_product_vector_zerovector_true_true(self):
"""Test the inner product between a vector and a zero vector with the (True, True) normalization."""
self.assertEqual(0.0, self.uniform_matrix.inner_product(self.vec1, [], normalized=(True, True)))
def test_inner_product_vector_vector_default(self):
"""Test the inner product between two vectors with the default normalization."""
expected_result = 0.0
expected_result += 2 * 1.0 * 1 # government * s_{ij} * government
expected_result += 2 * 0.5 * 1 # government * s_{ij} * holiday
expected_result += 1 * 0.5 * 1 # denied * s_{ij} * government
expected_result += 1 * 0.5 * 1 # denied * s_{ij} * holiday
result = self.uniform_matrix.inner_product(self.vec1, self.vec2)
self.assertAlmostEqual(expected_result, result, places=5)
def test_inner_product_vector_vector_false_maintain(self):
"""Test the inner product between two vectors with the (False, 'maintain') normalization."""
expected_result = self.uniform_matrix.inner_product(self.vec1, self.vec2)
expected_result /= math.sqrt(self.uniform_matrix.inner_product(self.vec2, self.vec2))
expected_result *= math.sqrt(self.identity_matrix.inner_product(self.vec2, self.vec2))
result = self.uniform_matrix.inner_product(self.vec1, self.vec2, normalized=(False, 'maintain'))
self.assertAlmostEqual(expected_result, result, places=5)
def test_inner_product_vector_vector_false_true(self):
"""Test the inner product between two vectors with the (False, True) normalization."""
expected_result = self.uniform_matrix.inner_product(self.vec1, self.vec2)
expected_result /= math.sqrt(self.uniform_matrix.inner_product(self.vec2, self.vec2))
result = self.uniform_matrix.inner_product(self.vec1, self.vec2, normalized=(False, True))
self.assertAlmostEqual(expected_result, result, places=5)
def test_inner_product_vector_vector_maintain_false(self):
"""Test the inner product between two vectors with the ('maintain', False) normalization."""
expected_result = self.uniform_matrix.inner_product(self.vec1, self.vec2)
expected_result /= math.sqrt(self.uniform_matrix.inner_product(self.vec1, self.vec1))
expected_result *= math.sqrt(self.identity_matrix.inner_product(self.vec1, self.vec1))
result = self.uniform_matrix.inner_product(self.vec1, self.vec2, normalized=('maintain', False))
self.assertAlmostEqual(expected_result, result, places=5)
def test_inner_product_vector_vector_maintain_maintain(self):
"""Test the inner product between two vectors with the ('maintain', 'maintain') normalization."""
expected_result = self.uniform_matrix.inner_product(self.vec1, self.vec2)
expected_result /= math.sqrt(self.uniform_matrix.inner_product(self.vec1, self.vec1))
expected_result *= math.sqrt(self.identity_matrix.inner_product(self.vec1, self.vec1))
expected_result /= math.sqrt(self.uniform_matrix.inner_product(self.vec2, self.vec2))
expected_result *= math.sqrt(self.identity_matrix.inner_product(self.vec2, self.vec2))
result = self.uniform_matrix.inner_product(self.vec1, self.vec2, normalized=('maintain', 'maintain'))
self.assertAlmostEqual(expected_result, result, places=5)
def test_inner_product_vector_vector_maintain_true(self):
"""Test the inner product between two vectors with the ('maintain', True) normalization."""
expected_result = self.uniform_matrix.inner_product(self.vec1, self.vec2)
expected_result /= math.sqrt(self.uniform_matrix.inner_product(self.vec1, self.vec1))
expected_result *= math.sqrt(self.identity_matrix.inner_product(self.vec1, self.vec1))
expected_result /= math.sqrt(self.uniform_matrix.inner_product(self.vec2, self.vec2))
result = self.uniform_matrix.inner_product(self.vec1, self.vec2, normalized=('maintain', True))
self.assertAlmostEqual(expected_result, result, places=5)
def test_inner_product_vector_vector_true_false(self):
"""Test the inner product between two vectors with the (True, False) normalization."""
expected_result = self.uniform_matrix.inner_product(self.vec1, self.vec2)
expected_result /= math.sqrt(self.uniform_matrix.inner_product(self.vec1, self.vec1))
result = self.uniform_matrix.inner_product(self.vec1, self.vec2, normalized=(True, False))
self.assertAlmostEqual(expected_result, result, places=5)
def test_inner_product_vector_vector_true_maintain(self):
"""Test the inner product between two vectors with the (True, 'maintain') normalization."""
expected_result = self.uniform_matrix.inner_product(self.vec1, self.vec2)
expected_result /= math.sqrt(self.uniform_matrix.inner_product(self.vec1, self.vec1))
expected_result /= math.sqrt(self.uniform_matrix.inner_product(self.vec2, self.vec2))
expected_result *= math.sqrt(self.identity_matrix.inner_product(self.vec2, self.vec2))
result = self.uniform_matrix.inner_product(self.vec1, self.vec2, normalized=(True, 'maintain'))
self.assertAlmostEqual(expected_result, result, places=5)
def test_inner_product_vector_vector_true_true(self):
"""Test the inner product between two vectors with the (True, True) normalization."""
expected_result = self.uniform_matrix.inner_product(self.vec1, self.vec2)
expected_result /= math.sqrt(self.uniform_matrix.inner_product(self.vec1, self.vec1))
expected_result /= math.sqrt(self.uniform_matrix.inner_product(self.vec2, self.vec2))
result = self.uniform_matrix.inner_product(self.vec1, self.vec2, normalized=(True, True))
self.assertAlmostEqual(expected_result, result, places=5)
def test_inner_product_vector_corpus_default(self):
"""Test the inner product between a vector and a corpus with the default normalization."""
expected_result = 0.0
expected_result += 2 * 1.0 * 1 # government * s_{ij} * government
expected_result += 2 * 0.5 * 1 # government * s_{ij} * holiday
expected_result += 1 * 0.5 * 1 # denied * s_{ij} * government
expected_result += 1 * 0.5 * 1 # denied * s_{ij} * holiday
expected_result = numpy.full((1, 2), expected_result)
result = self.uniform_matrix.inner_product(self.vec1, [self.vec2] * 2)
self.assertTrue(isinstance(result, numpy.ndarray))
self.assertTrue(numpy.allclose(expected_result, result))
def test_inner_product_vector_corpus_false_maintain(self):
"""Test the inner product between a vector and a corpus with the (False, 'maintain') normalization."""
expected_result = self.uniform_matrix.inner_product(self.vec1, self.vec2)
expected_result /= math.sqrt(self.uniform_matrix.inner_product(self.vec2, self.vec2))
expected_result *= math.sqrt(self.identity_matrix.inner_product(self.vec2, self.vec2))
expected_result = numpy.full((1, 2), expected_result)
result = self.uniform_matrix.inner_product(self.vec1, [self.vec2] * 2, normalized=(False, 'maintain'))
self.assertTrue(isinstance(result, numpy.ndarray))
self.assertTrue(numpy.allclose(expected_result, result))
def test_inner_product_vector_corpus_false_true(self):
"""Test the inner product between a vector and a corpus with the (False, True) normalization."""
expected_result = self.uniform_matrix.inner_product(self.vec1, self.vec2)
expected_result /= math.sqrt(self.uniform_matrix.inner_product(self.vec2, self.vec2))
expected_result = numpy.full((1, 2), expected_result)
result = self.uniform_matrix.inner_product(self.vec1, [self.vec2] * 2, normalized=(False, True))
self.assertTrue(isinstance(result, numpy.ndarray))
self.assertTrue(numpy.allclose(expected_result, result))
def test_inner_product_vector_corpus_maintain_false(self):
"""Test the inner product between a vector and a corpus with the ('maintain', False) normalization."""
expected_result = self.uniform_matrix.inner_product(self.vec1, self.vec2)
expected_result /= math.sqrt(self.uniform_matrix.inner_product(self.vec1, self.vec1))
expected_result *= math.sqrt(self.identity_matrix.inner_product(self.vec1, self.vec1))
expected_result = numpy.full((1, 2), expected_result)
result = self.uniform_matrix.inner_product(self.vec1, [self.vec2] * 2, normalized=('maintain', False))
self.assertTrue(isinstance(result, numpy.ndarray))
self.assertTrue(numpy.allclose(expected_result, result))
def test_inner_product_vector_corpus_maintain_maintain(self):
"""Test the inner product between a vector and a corpus with the ('maintain', 'maintain') normalization."""
expected_result = self.uniform_matrix.inner_product(self.vec1, self.vec2)
expected_result /= math.sqrt(self.uniform_matrix.inner_product(self.vec1, self.vec1))
expected_result *= math.sqrt(self.identity_matrix.inner_product(self.vec1, self.vec1))
expected_result /= math.sqrt(self.uniform_matrix.inner_product(self.vec2, self.vec2))
expected_result *= math.sqrt(self.identity_matrix.inner_product(self.vec2, self.vec2))
expected_result = numpy.full((1, 2), expected_result)
result = self.uniform_matrix.inner_product(self.vec1, [self.vec2] * 2, normalized=('maintain', 'maintain'))
self.assertTrue(isinstance(result, numpy.ndarray))
self.assertTrue( | numpy.allclose(expected_result, result) | numpy.allclose |
import numpy as np
import time
def multi_list(a,b):
c=[]
for i in a:
c.append(i*b[i])
return c
def multi_array(a,b):
c=a*b
return c
a=range(1000)
b=range(1000)
time_start=time.time()
for i in range(10000):
c=multi_list(a,b)
time_end=time.time()
print('multi list totally cost',time_end-time_start)
a= | np.array(a,np.int16) | numpy.array |
import numpy as np
from baselines.ecbp.agents.buffer.ps_learning_process import PSLearningProcess
# from baselines.ecbp.agents.graph.build_graph_mer_attention import *
from baselines.ecbp.agents.graph.build_graph_mer_bvae_attention import *
import logging
from multiprocessing import Pipe
import os
from baselines.ecbp.agents.psmp_learning_target_agent import PSMPLearnTargetAgent
import cv2
class BVAEAttentionAgent(PSMPLearnTargetAgent):
def __init__(self, encoder_func,decoder_func, exploration_schedule, obs_shape, vector_input=True, lr=1e-4, buffer_size=1000000,
num_actions=6, latent_dim=32,
gamma=0.99, knn=4, eval_epsilon=0.1, queue_threshold=5e-5, batch_size=32, density=True, trainable=True,
num_neg=10, tf_writer=None):
self.conn, child_conn = Pipe()
self.replay_buffer = np.empty((buffer_size + 10,) + obs_shape, np.float32 if vector_input else np.uint8)
self.ec_buffer = PSLearningProcess(num_actions, buffer_size, latent_dim*2, obs_shape, child_conn, gamma,
density=density)
self.obs = None
self.z = None
self.cur_capacity = 0
self.ind = -1
self.writer = tf_writer
self.sequence = []
self.gamma = gamma
self.queue_threshold = queue_threshold
self.num_actions = num_actions
self.exploration_schedule = exploration_schedule
self.latent_dim = latent_dim
self.knn = knn
self.steps = 0
self.batch_size = batch_size
self.rmax = 100000
self.logger = logging.getLogger("ecbp")
self.log("psmp learning agent here")
self.eval_epsilon = eval_epsilon
self.train_step = 4
self.alpha = 1
self.burnin = 2000
self.burnout = 10000000000
self.update_target_freq = 10000
self.buffer_capacity = 0
self.trainable = trainable
self.num_neg = num_neg
self.loss_type = ["attention"]
input_type = U.Float32Input if vector_input else U.Uint8Input
# input_type = U.Uint8Input
self.hash_func, self.unmask_z_func,self.train_func, self.eval_func, self.norm_func, self.attention_func, self.value_func, self.reconstruct_func,self.update_target_func = build_train_mer_bvae_attention(
input_type=input_type,
obs_shape=obs_shape,
encoder_func=encoder_func,
decoder_func=decoder_func,
num_actions=num_actions,
optimizer=tf.train.AdamOptimizer(learning_rate=lr, epsilon=1e-4),
gamma=gamma,
grad_norm_clipping=10,
latent_dim=latent_dim,
loss_type=self.loss_type,
batch_size=batch_size,
num_neg=num_neg,
c_loss_type="sqmargin",
)
self.finds = [0, 0]
self.ec_buffer.start()
def train(self):
# sample
# self.log("begin training")
# print("training",self.writer)
noise = np.random.randn(9,self.batch_size,self.latent_dim)
samples = self.send_and_receive(4, (self.batch_size, self.num_neg))
samples_u = self.send_and_receive(4, (self.batch_size, self.num_neg))
samples_v = self.send_and_receive(4, (self.batch_size, self.num_neg))
index_u, _, _, _, value_u, _, _, _ = samples_u
index_v, _, _, _, value_v, _, _, _ = samples_v
index_tar, index_pos, index_neg, reward_tar, value_tar, action_tar, neighbours_index, neighbours_value = samples
if len(index_tar) < self.batch_size:
return
obs_tar = [self.replay_buffer[ind] for ind in index_tar]
obs_pos = [self.replay_buffer[ind] for ind in index_pos]
obs_neg = [self.replay_buffer[ind] for ind in index_neg]
obs_neighbour = [self.replay_buffer[ind] for ind in neighbours_index]
obs_u = [self.replay_buffer[ind] for ind in index_u]
obs_v = [self.replay_buffer[ind] for ind in index_v]
# print(obs_tar[0].shape)
if "regression" in self.loss_type:
value_original = self.norm_func(np.array(obs_tar))
value_tar = np.array(value_tar)
self.log(value_original, "value original")
self.log(value_tar, "value tar")
value_original = np.array(value_original).squeeze() / self.alpha
assert value_original.shape == | np.array(value_tar) | numpy.array |
from matplotlib import pyplot as plt
from matplotlib import cm
import csv
import os
import glob
import numpy as np
import gc
import statistics as st
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
##########################################################################################################
#writeCSV : write csv tables (has to be list of list)
##########################################################################################################
def writeCSV(list2w, filePath):
with open(filePath, 'w') as f:
writer = csv.writer(f)
if type(list2w[0]) == list:
writer.writerows(list2w)
else:
writer.writerow(list2w)
f.close()
##########################################################################################################
#cm2inch : convert centimeters into invhes for plots
##########################################################################################################
def cm2inch(value):
return value/2.54
##########################################################################################################
#getCoastlineGeometry : load the Christchurch Coastline geometry
##########################################################################################################
def getCoastlineGeometry():
full_path_c = "./data/_postprocessing/Coastline.csv"
Coast_poly = []
with open(full_path_c) as csvfile:
readCSV = csv.reader(csvfile)
for row in readCSV:
Coast_poly.append([row[0], row[1]])
csvfile.close()
return Coast_poly
##########################################################################################################
#getPortHillsGeometry : load the Christchurch PortHills geometry
##########################################################################################################
def getPortHillsGeometry():
full_path_ph = "./data/_postprocessing/PortHill_Boundary.csv"
PH_poly = []
with open(full_path_ph) as csvfile:
readCSV = csv.reader(csvfile)
for row in readCSV:
PH_poly.append([row[0], row[1]])
csvfile.close()
return PH_poly
##########################################################################################################
#getPipeGeometry : load the network geometry
##########################################################################################################
def getPipeGeometry(network_name):
pipegeo_path = "./gen/networks/" + network_name + "/_post_network_pipegeometry.csv"
points_loc = []
with open(pipegeo_path, encoding = "ISO-8859-1") as csvfile:
readCSV = csv.reader(csvfile, delimiter=';')
for row in readCSV:
pipe_raw_loc = row[1:-1]
pipe_pts_loc_E = []
pipe_pts_loc_N = []
for i in range(len(pipe_raw_loc)):
if i%2 == 0: #Easting
pipe_pts_loc_E.append(float(pipe_raw_loc[i]))
else: #Northing
pipe_pts_loc_N.append(float(pipe_raw_loc[i]))
pipe_pts_loc = [pipe_pts_loc_E, pipe_pts_loc_N]
points_loc.append(pipe_pts_loc)
csvfile.close()
return points_loc
##########################################################################################################
#getPumpGeometry : load the network geometry
##########################################################################################################
def getPumpGeometry(network_name):
pumpgeo_path = "./gen/networks/" + network_name + "/_post_network_pump.csv"
pump_loc = []
pump_day = []
with open(pumpgeo_path, encoding = "ISO-8859-1") as csvfile:
readCSV = csv.reader(csvfile, delimiter=';')
for row in readCSV:
pump_day.append(int(row[3]))
pump_pts_loc_E = 0
pump_pts_loc_N = 0
b_loc = row[1:3]
for i in range(len(b_loc)):
if i == 0: #Easting
pump_pts_loc_E = float(b_loc[i])
else: #Northing
pump_pts_loc_N = float(b_loc[i])
pumpst_loc = [pump_pts_loc_E, pump_pts_loc_N]
pump_loc.append(pumpst_loc)
csvfile.close()
return [pump_loc, pump_day]
##########################################################################################################
#getCommunityGeometry : load the community geometry
##########################################################################################################
def getCommunityGeometry(commgeo_name, axis_limit):
commgeo_path = "./gen/communities/" + commgeo_name + "/_post_community_geometry.csv"
build_area = []
build_loc = []
build_pop = []
build_type = []
build_util = []
adapt_coeff = np.sqrt(max(axis_limit[1]-axis_limit[0], axis_limit[3]-axis_limit[2]))*(-0.03)+5.25
with open(commgeo_path, encoding = "ISO-8859-1") as csvfile:
readCSV = csv.reader(csvfile, delimiter=';')
for row in readCSV:
build_pts_loc_E = 0
build_pts_loc_N = 0
build_area.append(adapt_coeff*np.sqrt(float(row[1])))
build_pop.append(float(row[2]))
build_type.append(row[3].strip(' '))
build_util.append(float(row[4]))
b_loc = [row[5], row[6]]
for i in range(len(b_loc)):
if i == 0: #Easting
build_pts_loc_E = float(b_loc[i])
else: #Northing
build_pts_loc_N = float(b_loc[i])
building_loc = [build_pts_loc_E, build_pts_loc_N]
build_loc.append(building_loc)
csvfile.close()
return [build_loc, build_area, build_pop, build_type, build_util]
##########################################################################################################
#getBuildingLocation : extract location of buildings
##########################################################################################################
def getBuildingLocation(raw_res):
n = []
for i in range(len(raw_res[0])):
n.append(raw_res[0][i])
return n
##########################################################################################################
#getBuildingArea : extract vector of building area
##########################################################################################################
def getBuildingArea(raw_res):
n = []
for i in range(len(raw_res[1])):
n.append(raw_res[1][i])
return n
##########################################################################################################
#getBuildingPop : extract vector of building population
##########################################################################################################
def getBuildingPop(raw_res):
n = []
for i in range(len(raw_res[2])):
n.append(raw_res[2][i])
return n
##########################################################################################################
#getBuildingType : extract vector of building type
##########################################################################################################
def getBuildingType(raw_res):
n = []
for i in range(len(raw_res[3])):
n.append(raw_res[3][i])
return n
##########################################################################################################
#getBuildingUtility : extract vector of building utility
##########################################################################################################
def getBuildingUtility(raw_res):
n = []
for i in range(len(raw_res[4])):
n.append(raw_res[4][i])
return n
##########################################################################################################
#getFailureGeometry : load the location of the pipe failures
##########################################################################################################
def getFailureGeometry(failgeo_name):
failgeo_path = "./data/failure_collection/" + failgeo_name + ".csv"
failure_loc = []
with open(failgeo_path, encoding = "ISO-8859-1") as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
for row in readCSV:
loc = []
loc.append(float(row[0]))
loc.append(float(row[1]))
failure_loc.append(loc)
csvfile.close()
return failure_loc
##########################################################################################################
#getRawResultPipeFailureSingleSim : load the raw results for number of failures and failure per segment from 1 simulation
##########################################################################################################
def getRawResultPipeFailureSingleSim(sim_path, i_fx, pipe_nbr_break, i_sim):
failure_path = sim_path + "/_post_network_pipefailure.csv"
total_nbr_break = 0
i = 0
with open(failure_path, encoding = "ISO-8859-1") as csvfile:
readCSV = csv.reader(csvfile, delimiter=';')
for row in readCSV:
pipe_break = int(row[i_fx+1])
total_nbr_break = total_nbr_break + pipe_break
if i_sim == 0:
if pipe_break > 0:
pipe_nbr_break.append(1)
else:
pipe_nbr_break.append(0)
else:
if pipe_break > 0:
pipe_nbr_break[i] = pipe_nbr_break[i] + 1
i = i+1
csvfile.close()
return [total_nbr_break, pipe_nbr_break]
##########################################################################################################
#getRawResultPipeConnectSingleSim : load the raw results for pipe connectivity from 1 simulation
##########################################################################################################
def getRawResultPipeConnectSingleSim(sim_path, i_fx, pipe_connect, i_sim):
pipeconnect_path = sim_path + "/_post_network_pipeconnection.csv"
i = 0
with open(pipeconnect_path, encoding = "ISO-8859-1") as csvfile:
readCSV = csv.reader(csvfile, delimiter=';')
for row in readCSV:
pipe_connected = int(row[i_fx+1])
if i_sim == 0:
if pipe_connected < 0:
pipe_connect.append(1)
else:
pipe_connect.append(0)
else:
if pipe_connected < 0:
pipe_connect[i] = pipe_connect[i] + 1
i = i+1
csvfile.close()
return pipe_connect
##########################################################################################################
#getRawResultBuildingConnectSingleSim : load the raw results for building connectivity from 1 simulation
##########################################################################################################
def getRawResultBuildingConnectSingleSim(sim_path, i_fx, build_pop, build_util, build_type, build_connect, i_sim):
buildingconnect_path = sim_path + "/_post_community_buildingconnection.csv"
i = 0
n_build_disconnect = 0
n_people_disconnect = 0
n_business = 0
n_medical = 0
n_critical = 0
n_school = 0
utility = 0
with open(buildingconnect_path, encoding = "ISO-8859-1") as csvfile:
readCSV = csv.reader(csvfile, delimiter=';')
for row in readCSV:
build_connected = int(row[i_fx+1])
if i_sim == 0:
if build_connected > 0:
build_connect.append(0)
else:
build_connect.append(1)
n_build_disconnect = n_build_disconnect + 1
n_people_disconnect = n_people_disconnect + build_pop[i]
if build_type[i] == 'business':
n_business = n_business + 1
elif build_type[i] == 'medical':
n_medical = n_medical + 1
elif build_type[i] == 'critical':
n_critical = n_critical + 1
elif build_type[i] == 'school':
n_school = n_school + 1
utility = utility + build_util[i]
else:
if build_connected <= 0:
build_connect[i] = build_connect[i] + 1
n_build_disconnect = n_build_disconnect + 1
n_people_disconnect = n_people_disconnect + build_pop[i]
if build_type[i] == 'business':
n_business = n_business + 1
elif build_type[i] == 'medical':
n_medical = n_medical + 1
elif build_type[i] == 'critical':
n_critical = n_critical + 1
elif build_type[i] == 'school':
n_school = n_school + 1
utility = utility + build_util[i]
i = i+1
csvfile.close()
return [build_connect, n_build_disconnect, n_people_disconnect, n_business, n_medical, n_critical, n_school, utility]
##########################################################################################################
#getRawResultPipeFailure : load the raw results for pipeline failrues from all simulations
##########################################################################################################
def getRawResultPipeFailure(root_sim_path, i_fx, str_folder):
n_sim = len(glob.glob1(root_sim_path, str_folder))
simul_name = glob.glob1(root_sim_path, str_folder)
pipe_nbr_break = []
sim_break = []
for sim in range(n_sim):
if 'day_' in str_folder:
sim_path = root_sim_path + '/' + str_folder
else:
sim_path = root_sim_path + simul_name[sim]
result_sim = getRawResultPipeFailureSingleSim(sim_path, i_fx, pipe_nbr_break, sim)
pipe_nbr_break = result_sim[1]
sim_break.append(result_sim[0])
pipe_nbr_break[:] = [x/n_sim for x in pipe_nbr_break]
return [sim_break, pipe_nbr_break]
##########################################################################################################
#getRawResultPipeNumberFailure : extract matrix of failure per pipe
##########################################################################################################
def getRawResultPipeNumberFailure(raw_res):
n = []
for i in range(len(raw_res)):
n.append(raw_res[i][1])
return n
##########################################################################################################
#getRawResultNumberFailure : extract vector of failures on the network
##########################################################################################################
def getRawResultNumberFailure(raw_res):
n = []
for i in range(len(raw_res)):
n.append(raw_res[i][0])
return n
##########################################################################################################
#getRawResultPipeConnectivity : load the raw results for pipeline connectivity from all simulations
##########################################################################################################
def getRawResultPipeConnectivity(root_sim_path, i_fx, str_folder):
sgmt_connect = []
n_sim = len(glob.glob1(root_sim_path, str_folder))
simul_name = glob.glob1(root_sim_path, str_folder)
for sim in range(n_sim):
if 'day_' in str_folder:
sim_path = root_sim_path + '/' + str_folder
else:
sim_path = root_sim_path + simul_name[sim]
sgmt_connect = getRawResultPipeConnectSingleSim(sim_path, i_fx, sgmt_connect, sim)
sgmt_connect[:] = [x/n_sim for x in sgmt_connect]
return sgmt_connect
##########################################################################################################
#getRawResultBuildingConnectivity : load the raw results for building connectivity from all simulations
##########################################################################################################
def getRawResultBuildingConnectivity(root_sim_path, i_fx, str_folder, build_pop, build_util, build_type):
build_connect = []
n_build_disconnect = []
n_people_disconnect = []
n_business = []
n_medical = []
n_critical = []
n_school = []
utility = []
n_sim = len(glob.glob1(root_sim_path, str_folder))
simul_name = glob.glob1(root_sim_path, str_folder)
for sim in range(n_sim):
if 'day_' in str_folder:
sim_path = root_sim_path + '/' + str_folder
else:
sim_path = root_sim_path + simul_name[sim]
result_sim = getRawResultBuildingConnectSingleSim(sim_path, i_fx, build_pop, build_util, build_type, build_connect, sim)
build_connect = result_sim[0]
n_build_disconnect.append(result_sim[1])
n_people_disconnect.append(result_sim[2])
n_business.append(result_sim[3])
n_medical.append(result_sim[4])
n_critical.append(result_sim[5])
n_school.append(result_sim[6])
utility.append(result_sim[7])
build_connect[:] = [x/n_sim for x in build_connect]
return [build_connect, n_build_disconnect, n_people_disconnect, n_business, n_medical, n_critical, n_school, utility]
##########################################################################################################
#getResultNumberDisconnectBuilding : extract vector of number of disconncted buidlings
##########################################################################################################
def getResultNumberDisconnectBuilding(raw_res):
n = []
for i in range(len(raw_res)):
n.append(raw_res[i][1])
return n
##########################################################################################################
#getResultDisconnectBuilding : extract matrix of building connectivity
##########################################################################################################
def getResultDisconnectBuilding(raw_res):
n = []
for i in range(len(raw_res)):
n.append(raw_res[i][0])
return n
##########################################################################################################
#getResultDisconnectPeople : extract vector disconnected people
##########################################################################################################
def getResultDisconnectPeople(cnnt_build, pop):
n = []
for i in range(len(cnnt_build)):
n_per_sim = 0
for j in range(len(pop)):
if cnnt_build[i][j] == 0:
n_per_sim = n_per_sim + pop[j]
n.append(n_per_sim)
return n
##########################################################################################################
#evaluateProbaPipeFailure : evaluate the probability of failure for 1 scenario based on loaded simulations
##########################################################################################################
def evaluateProbaPipeFailure(pipe_failure):
P_f = []
n_pipe = len(pipe_failure[0])
n_sim = len(pipe_failure)
for i in range(n_pipe):
P_f_pipe = 0
for j in range(n_sim):
if pipe_failure[j][i]>0:
P_f_pipe = P_f_pipe + 1
P_f.append(P_f_pipe/n_sim)
return P_f
##########################################################################################################
#evaluateProbaPipeConnect : evaluate the probability of disconnection (pipe) for 1 scenario based on loaded simulations
##########################################################################################################
def evaluateProbaPipeConnect(pipe_connect):
P_f_disc = []
n_pipe = len(pipe_connect[0])
n_sim = len(pipe_connect)
for i in range(n_pipe):
P_f_disc_pipe = 0
for j in range(n_sim):
P_f_disc_pipe = P_f_disc_pipe + pipe_connect[j][i]
P_f_disc.append(1-(P_f_disc_pipe/n_sim))
return P_f_disc
##########################################################################################################
#evaluateProbaBuildingConnect : evaluate the probability of disconnection (building) for 1 scenario based on loaded simulations
##########################################################################################################
def evaluateProbaBuildingConnect(building_connect):
P_f_disc = []
n_pipe = len(building_connect[0])
n_sim = len(building_connect)
for i in range(n_pipe):
P_f_disc_build = 0
for j in range(n_sim):
P_f_disc_build = P_f_disc_build + building_connect[j][i]
P_f_disc.append(1-(P_f_disc_build/n_sim))
return P_f_disc
##########################################################################################################
#getNumberFragilityFunction : determine the number of fragility funcitons used to assess the pipe network
##########################################################################################################
def getNumberFragilityFunction(root_sim_path):
simul_name = glob.glob1(root_sim_path,"sim_*")
sim_path = root_sim_path + simul_name[0]
failure_path = sim_path + "/_post_network_pipefailure.csv"
with open(failure_path, encoding = "ISO-8859-1") as csvfile:
readCSV = csv.reader(csvfile, delimiter=';')
for row in readCSV:
n_fx = len(row)-1
break
csvfile.close()
return n_fx
##########################################################################################################
#getAxisLimit : deterine the axis limits based on pipe geometry
##########################################################################################################
def getAxisLimit(network_name):
pipegeo_path = "./gen/networks/" + network_name + "/_post_network_pipegeometry.csv"
x_max_ = -1
x_min_ = 1000000000
y_max_ = -1
y_min_ = 1000000000
with open(pipegeo_path, encoding = "ISO-8859-1") as csvfile:
readCSV = csv.reader(csvfile, delimiter=';')
points_loc = []
for row in readCSV:
pipe_raw_loc = row[1:-1]
for i in range(len(pipe_raw_loc)):
if i%2 == 0: #Easting
if x_max_<float(pipe_raw_loc[i]):
x_max_ = float(pipe_raw_loc[i])
if x_min_>float(pipe_raw_loc[i]):
x_min_ = float(pipe_raw_loc[i])
else: #Northing
if y_max_<float(pipe_raw_loc[i]):
y_max_ = float(pipe_raw_loc[i])
if y_min_>float(pipe_raw_loc[i]):
y_min_ = float(pipe_raw_loc[i])
#Update x, y, max and min
x_max = x_max_ + (x_max_-x_min_)*0.1
x_min = x_min_ - (x_max_-x_min_)*0.1
y_max = y_max_ + (y_max_-y_min_)*0.1
y_min = y_min_ - (y_max_-y_min_)*0.1
if x_min < 1554000:
x_min = 1554000
if x_max > 1585000:
x_max = 1585000
if y_min < 5174000:
y_min = 5174000
if y_max > 5191000:
y_max = 5191000
csvfile.close()
return [x_min, x_max, y_min, y_max]
##########################################################################################################
#sturgesRule : compute the appropriate number of bins for an histogram using the Sturges rule
##########################################################################################################
def sturgesRule(n):
n_bin_hist = int(np.ceil(np.log2(n))+1)
return n_bin_hist
##########################################################################################################
#getStatisticsNumberFailure : return basic statistics for number of failures from 1 scenario
##########################################################################################################
def getStatisticsNumberFailure(n_failure):
mean_f = np.mean(n_failure, dtype=np.float64)
med_f = st.median(n_failure)
std_f = np.std(n_failure, dtype=np.float64)
return [mean_f, med_f, std_f]
##########################################################################################################
#getTopography : return patch collection representing the topography
##########################################################################################################
def getTopography():
coastline = getCoastlineGeometry()
portHills = getPortHillsGeometry()
solid_shape = []
solid_shape.append(Polygon(portHills, True))
solid_shape.append(Polygon(coastline, True))
p = PatchCollection(solid_shape, alpha=0.5)
return p
##########################################################################################################
#getTopographyColor : return vector of color for topography
##########################################################################################################
def getTopographyColor():
return ['#8b4513', '#deb887']
##########################################################################################################
#getDiscreteColorbar : return a colorbar object using user-defined parameters
##########################################################################################################
def getDiscreteColorbar(str_colormap_name, n_inter, v_max, ax):
cmap = cm.get_cmap(str_colormap_name, n_inter)
norm = plt.Normalize(vmin = 0, vmax = v_max)
sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
sm.set_array([])
bounds = np.linspace(0, v_max, n_inter+1)
return [sm, bounds]
##########################################################################################################
#plotFailureHistogram : plot the histogram of failure number for 1 scenario using 1 fragility function set
##########################################################################################################
def plotFailureHistogram(n_failure, ax=None):
if ax is None:
ax = plt.gca()
n_sim = len(n_failure)
n_bin = sturgesRule(n_sim)
[mean_f, med_f, std_f] = getStatisticsNumberFailure(n_failure)
str_text = "Mean: " + str(float("{0:.2f}".format(mean_f))) + "\nMedian: " + str(float("{0:.2f}".format(med_f))) + "\nStd: " + str(float("{0:.2f}".format(std_f)))
y, x, _ = ax.hist(n_failure, bins=n_bin)
ax.grid(True)
ax.text(0.8*(max(n_failure)-min(n_failure)), 0.8*y.max(), str_text, fontsize=11)
ax.set_xlabel('Number of pipe failures')
ax.set_ylabel('Number of occurences')
return ax
##########################################################################################################
#plotDisconnectBuildingHistogram : plot the histogram of buildings for 1 scenario using 1 fragility function set
##########################################################################################################
def plotDisconnectBuildingHistogram(n_b, ax=None):
if ax is None:
ax = plt.gca()
n_sim = len(n_b)
n_bin = sturgesRule(n_sim)
[mean_f, med_f, std_f] = getStatisticsNumberFailure(n_b)
str_text = "Mean: " + str(float("{0:.2f}".format(mean_f))) + "\nMedian: " + str(float("{0:.2f}".format(med_f))) + "\nStd: " + str(float("{0:.2f}".format(std_f)))
y, x, _ = ax.hist(n_b, bins=n_bin)
ax.grid(True)
ax.text(0.8*(max(n_b)-min(n_b)), 0.8*y.max(), str_text, fontsize=11)
ax.set_xlabel('Number of disconnected buildings')
ax.set_ylabel('Number of occurences')
return ax
##########################################################################################################
#plotDisconnectPeopleHistogram : plot the histogram of people for 1 scenario using 1 fragility function set
##########################################################################################################
def plotDisconnectPeopleHistogram(n_p, ax=None):
if ax is None:
ax = plt.gca()
n_sim = len(n_p)
n_bin = sturgesRule(n_sim)
[mean_f, med_f, std_f] = getStatisticsNumberFailure(n_p)
str_text = "Mean: " + str(float("{0:.2f}".format(mean_f))) + "\nMedian: " + str(float("{0:.2f}".format(med_f))) + "\nStd: " + str(float("{0:.2f}".format(std_f)))
y, x, _ = ax.hist(n_p, bins=n_bin)
ax.grid(True)
ax.text(0.8*(max(n_p)-min(n_p)), 0.8*y.max(), str_text, fontsize=11)
ax.set_xlabel('Number of disconnected people')
ax.set_ylabel('Number of occurences')
return ax
##########################################################################################################
#plotDisconnectBusinessHistogram : plot the histogram of business buildings for 1 scenario using 1 fragility function set
##########################################################################################################
def plotDisconnectBusinessHistogram(n_p, ax=None):
if ax is None:
ax = plt.gca()
n_sim = len(n_p)
n_bin = sturgesRule(n_sim)
[mean_f, med_f, std_f] = getStatisticsNumberFailure(n_p)
str_text = "Mean: " + str(float("{0:.2f}".format(mean_f))) + "\nMedian: " + str(float("{0:.2f}".format(med_f))) + "\nStd: " + str(float("{0:.2f}".format(std_f)))
y, x, _ = ax.hist(n_p, bins=n_bin)
ax.grid(True)
ax.text(0.8*(max(n_p)-min(n_p)), 0.8*y.max(), str_text, fontsize=11)
ax.set_xlabel('Number of disconnected business buildings')
ax.set_ylabel('Number of occurences')
return ax
##########################################################################################################
#plotDisconnectMedicalHistogram : plot the histogram of medical buildings for 1 scenario using 1 fragility function set
##########################################################################################################
def plotDisconnectMedicalHistogram(n_p, ax=None):
if ax is None:
ax = plt.gca()
n_sim = len(n_p)
n_bin = sturgesRule(n_sim)
[mean_f, med_f, std_f] = getStatisticsNumberFailure(n_p)
str_text = "Mean: " + str(float("{0:.2f}".format(mean_f))) + "\nMedian: " + str(float("{0:.2f}".format(med_f))) + "\nStd: " + str(float("{0:.2f}".format(std_f)))
y, x, _ = ax.hist(n_p, bins=n_bin)
ax.grid(True)
ax.text(0.8*(max(n_p)-min(n_p)), 0.8*y.max(), str_text, fontsize=11)
ax.set_xlabel('Number of disconnected medical buildings')
ax.set_ylabel('Number of occurences')
return ax
##########################################################################################################
#plotDisconnectCriticalHistogram : plot the histogram of critical buildings for 1 scenario using 1 fragility function set
##########################################################################################################
def plotDisconnectCriticalHistogram(n_p, ax=None):
if ax is None:
ax = plt.gca()
n_sim = len(n_p)
n_bin = sturgesRule(n_sim)
[mean_f, med_f, std_f] = getStatisticsNumberFailure(n_p)
str_text = "Mean: " + str(float("{0:.2f}".format(mean_f))) + "\nMedian: " + str(float("{0:.2f}".format(med_f))) + "\nStd: " + str(float("{0:.2f}".format(std_f)))
y, x, _ = ax.hist(n_p, bins=n_bin)
ax.grid(True)
ax.text(0.8*(max(n_p)-min(n_p)), 0.8*y.max(), str_text, fontsize=11)
ax.set_xlabel('Number of disconnected critical buildings')
ax.set_ylabel('Number of occurences')
return ax
##########################################################################################################
#plotDisconnectSchoolHistogram : plot the histogram of school buildings for 1 scenario using 1 fragility function set
##########################################################################################################
def plotDisconnectSchoolHistogram(n_p, ax=None):
if ax is None:
ax = plt.gca()
n_sim = len(n_p)
n_bin = sturgesRule(n_sim)
[mean_f, med_f, std_f] = getStatisticsNumberFailure(n_p)
str_text = "Mean: " + str(float("{0:.2f}".format(mean_f))) + "\nMedian: " + str(float("{0:.2f}".format(med_f))) + "\nStd: " + str(float("{0:.2f}".format(std_f)))
y, x, _ = ax.hist(n_p, bins=n_bin)
ax.grid(True)
ax.text(0.8*(max(n_p)-min(n_p)), 0.8*y.max(), str_text, fontsize=11)
ax.set_xlabel('Number of disconnected school buildings')
ax.set_ylabel('Number of occurences')
return ax
##########################################################################################################
#plotDisconnectUtilityHistogram : plot the histogram of utility for 1 scenario using 1 fragility function set
##########################################################################################################
def plotDisconnectUtilityHistogram(n_p, ax=None):
if ax is None:
ax = plt.gca()
n_sim = len(n_p)
n_bin = sturgesRule(n_sim)
[mean_f, med_f, std_f] = getStatisticsNumberFailure(n_p)
str_text = "Mean: " + str(float("{0:.2f}".format(mean_f))) + "\nMedian: " + str(float("{0:.2f}".format(med_f))) + "\nStd: " + str(float("{0:.2f}".format(std_f)))
y, x, _ = ax.hist(n_p, bins=n_bin)
ax.grid(True)
ax.text(0.8*(max(n_p)-min(n_p)), 0.8*y.max(), str_text, fontsize=11)
ax.set_xlabel('Disconnected utility')
ax.set_ylabel('Number of occurences')
return ax
##########################################################################################################
#plotPipeProbaFailure : plot the network assets with their probability of failure
##########################################################################################################
def plotPipeProbaFailure(pipe_loc, P_f, plot_lim, ax):
if ax is None:
ax = plt.gca()
topography = getTopography()
topo_color = getTopographyColor()
ax.add_collection(topography)
topography.set_color(topo_color)
[sm, bounds] = getDiscreteColorbar('jet', 9, max(P_f), ax)
for i in range(len(pipe_loc)):
im = ax.plot(pipe_loc[i][0][:], pipe_loc[i][1][:], color=sm.to_rgba(P_f[i]))
ax.grid(True)
ax.set_xlim(plot_lim[0], plot_lim[1])
ax.set_ylim(plot_lim[2], plot_lim[3])
ax.set_xlabel('NZTM2000 Easting, [m]')
ax.set_ylabel('NZTM2000 Northing, [m]')
cbar = plt.colorbar(sm, ax=ax)
cbar.set_ticks(bounds, update_ticks=True)
cbar.set_ticklabels(np.round(bounds, 2), update_ticks=True)
cbar.ax.set_ylabel('Probability of failure', rotation=90)
return ax
##########################################################################################################
#plotPipeProbaDisconnect : plot the network assets with their probability of disconnection
##########################################################################################################
def plotPipeProbaDisconnect(pipe_loc, pump_loc, P_disc, pump_day, plot_lim, ax):
if ax is None:
ax = plt.gca()
topography = getTopography()
topo_color = getTopographyColor()
ax.add_collection(topography)
topography.set_color(topo_color)
[sm, bounds] = getDiscreteColorbar('jet', 9, max(P_disc), ax)
for i in range(len(pipe_loc)):
im = ax.plot(pipe_loc[i][0][:], pipe_loc[i][1][:], color=sm.to_rgba(P_disc[i]))
for i in range(len(pump_loc)):
im = ax.scatter(pump_loc[i][0], pump_loc[i][1], marker='^', color='k', s=120)
ax.grid(True)
ax.set_xlim(plot_lim[0], plot_lim[1])
ax.set_ylim(plot_lim[2], plot_lim[3])
ax.set_xlabel('NZTM2000 Easting, [m]')
ax.set_ylabel('NZTM2000 Northing, [m]')
cbar = plt.colorbar(sm, ax=ax)
cbar.set_ticks(bounds, update_ticks=True)
cbar.set_ticklabels(np.round(bounds, 2), update_ticks=True)
cbar.ax.set_ylabel('Probability of service reduction', rotation=90)
return ax
##########################################################################################################
#plotBuildingProbaDisconnect : plot the network assets with their probability of disconnection
##########################################################################################################
def plotBuildingProbaDisconnect(pipe_loc, pump_loc, build_loc, build_area, P_disc, pump_day, plot_lim, ax):
if ax is None:
ax = plt.gca()
topography = getTopography()
topo_color = getTopographyColor()
ax.add_collection(topography)
topography.set_color(topo_color)
[sm, bounds] = getDiscreteColorbar('jet', 9, max(P_disc), ax)
for i in range(len(pipe_loc)):
im = ax.plot(pipe_loc[i][0][:], pipe_loc[i][1][:], color='k')
for i in range(len(pump_loc)):
if pump_day[i] < 1:
im = ax.scatter(pump_loc[i][0], pump_loc[i][1], marker='^', color='k', s=120, zorder=2)
else:
im = ax.scatter(pump_loc[i][0], pump_loc[i][1], marker='^', facecolors='none', edgecolors='k', s=120, zorder=2)
for i in range(len(build_loc)):
im = ax.scatter(build_loc[i][0], build_loc[i][1], s=build_area[i], color=sm.to_rgba(P_disc[i]))
ax.grid(True)
ax.set_xlim(plot_lim[0], plot_lim[1])
ax.set_ylim(plot_lim[2], plot_lim[3])
ax.set_xlabel('NZTM2000 Easting, [m]')
ax.set_ylabel('NZTM2000 Northing, [m]')
cbar = plt.colorbar(sm, ax=ax)
cbar.set_ticks(bounds, update_ticks=True)
cbar.set_ticklabels(np.round(bounds, 2), update_ticks=True)
cbar.ax.set_ylabel('Probability of service reduction', rotation=90)
return ax
##########################################################################################################
#plotSinglePredictiveScenario : save the required plots for a particular scenario
##########################################################################################################
def plotSinglePredictiveScenario(GM_name, network_name, community_name, lvl, plotResult, exportResult):
#Level of analysis managed by lvl:
#0: distribution of failure number
#1: proba of failure of assets
#2: proba of disconnection of assets
#3: proba of disconnection of buildings
#Initialize all potentially used variables
pipe_loc = []
pump_loc = []
pump_day = []
build_loc = []
build_area = []
build_pop = []
build_type = []
build_util = []
n_failure = []
P_f = []
P_disc = []
P_b_disc = []
plot_lim = []
#Change font
plt.rcParams["font.family"] = "serif"
#Determine simulation folder path and number of fragility functions used
str_folder = 'sim_*'
sim_root_folder = './gen/MCS/predictions/' + network_name + '/' + GM_name + '/simulations/'
n_fx = getNumberFragilityFunction(sim_root_folder)
#Load required geometries
if lvl >= 1:
plot_lim = getAxisLimit(network_name)
pipe_loc = getPipeGeometry(network_name)
if lvl >= 2:
pump_data = getPumpGeometry(network_name)
pump_loc = pump_data[0]
pump_day = pump_data[1]
if lvl >= 3:
build_loc = getBuildingLocation(getCommunityGeometry(community_name, plot_lim))
build_area = getBuildingArea(getCommunityGeometry(community_name, plot_lim))
build_pop = getBuildingPop(getCommunityGeometry(community_name, plot_lim))
build_type = getBuildingType(getCommunityGeometry(community_name, plot_lim))
build_util = getBuildingUtility(getCommunityGeometry(community_name, plot_lim))
#For each fragility function
for i in range(n_fx):
#Failure number histogram
if lvl >= 0:
pipe_f_results = getRawResultPipeFailure(sim_root_folder, i, str_folder)
if plotResult:
fig_path = './gen/MCS/predictions/' + network_name + '/' + GM_name + '/n_failure_dist_' + str(i) + '.pdf'
fig, (ax1) = plt.subplots(1, 1, sharex=False, sharey=False)
fig.set_size_inches(cm2inch(28), cm2inch(20))
plotFailureHistogram(pipe_f_results[0], ax=ax1)
fig.tight_layout()
fig.savefig(fig_path, dpi=600)
fig.clf()
plt.close()
gc.collect()
if exportResult:
csv_path = './gen/MCS/predictions/' + network_name + '/' + GM_name + '/n_failure_data_' + str(i) + '.csv'
writeCSV(pipe_f_results[0], csv_path)
#Pipeline proba of failure
if lvl >= 1:
if plotResult:
fig_path = './gen/MCS/predictions/' + network_name + '/' + GM_name + '/failure_proba_' + str(i) + '.pdf'
fig, (ax1) = plt.subplots(1, 1, sharex=False, sharey=False)
fig.set_size_inches(cm2inch(28), cm2inch(20))
plotPipeProbaFailure(pipe_loc, pipe_f_results[1], plot_lim, ax1)
fig.tight_layout()
fig.savefig(fig_path, dpi=600)
fig.clf()
plt.close()
gc.collect()
#Proba of pipe disconnection
if lvl >= 2:
if plotResult:
P_disc = getRawResultPipeConnectivity(sim_root_folder, i, str_folder)
fig_path = './gen/MCS/predictions/' + network_name + '/' + GM_name + '/pipe_disconnect_proba' + str(i) + '.pdf'
fig, (ax1) = plt.subplots(1, 1, sharex=False, sharey=False)
fig.set_size_inches(cm2inch(28), cm2inch(20))
plotPipeProbaDisconnect(pipe_loc, pump_loc, P_disc, pump_day, plot_lim, ax1)
fig.tight_layout()
fig.savefig(fig_path, dpi=600)
fig.clf()
plt.close()
gc.collect()
#Proba of building disconnection
if lvl == 3:
#GIS representation
build_res = getRawResultBuildingConnectivity(sim_root_folder, i, str_folder, build_pop, build_util, build_type)
if plotResult:
fig_path = './gen/MCS/predictions/' + network_name + '/' + GM_name + '/building_disconnect_proba' + str(i) + '.pdf'
fig, (ax1) = plt.subplots(1, 1, sharex=False, sharey=False)
fig.set_size_inches(cm2inch(28), cm2inch(20))
plotBuildingProbaDisconnect(pipe_loc, pump_loc, build_loc, build_area, build_res[0], pump_day, plot_lim, ax1)
fig.tight_layout()
fig.savefig(fig_path, dpi=600)
fig.clf()
plt.close()
gc.collect()
#Histogram of disconnected building number
fig_path = './gen/MCS/predictions/' + network_name + '/' + GM_name + '/n_building_disconnect' + str(i) + '.pdf'
fig, (ax1) = plt.subplots(1, 1, sharex=False, sharey=False)
fig.set_size_inches(cm2inch(28), cm2inch(20))
plotDisconnectBuildingHistogram(build_res[1], ax1)
fig.tight_layout()
fig.savefig(fig_path, dpi=600)
fig.clf()
plt.close()
gc.collect()
#Histogram of disconnected business building number
fig_path = './gen/MCS/predictions/' + network_name + '/' + GM_name + '/n_business_disconnect' + str(i) + '.pdf'
fig, (ax1) = plt.subplots(1, 1, sharex=False, sharey=False)
fig.set_size_inches(cm2inch(28), cm2inch(20))
plotDisconnectBusinessHistogram(build_res[3], ax1)
fig.tight_layout()
fig.savefig(fig_path, dpi=600)
fig.clf()
plt.close()
gc.collect()
#Histogram of disconnected medical building number
fig_path = './gen/MCS/predictions/' + network_name + '/' + GM_name + '/n_medical_disconnect' + str(i) + '.pdf'
fig, (ax1) = plt.subplots(1, 1, sharex=False, sharey=False)
fig.set_size_inches(cm2inch(28), cm2inch(20))
plotDisconnectMedicalHistogram(build_res[4], ax1)
fig.tight_layout()
fig.savefig(fig_path, dpi=600)
fig.clf()
plt.close()
gc.collect()
#Histogram of disconnected business building number
fig_path = './gen/MCS/predictions/' + network_name + '/' + GM_name + '/n_critical_disconnect' + str(i) + '.pdf'
fig, (ax1) = plt.subplots(1, 1, sharex=False, sharey=False)
fig.set_size_inches(cm2inch(28), cm2inch(20))
plotDisconnectCriticalHistogram(build_res[5], ax1)
fig.tight_layout()
fig.savefig(fig_path, dpi=600)
fig.clf()
plt.close()
gc.collect()
#Histogram of disconnected people
fig_path = './gen/MCS/predictions/' + network_name + '/' + GM_name + '/n_population_disconnect' + str(i) + '.pdf'
fig, (ax1) = plt.subplots(1, 1, sharex=False, sharey=False)
fig.set_size_inches(cm2inch(28), cm2inch(20))
plotDisconnectPeopleHistogram(build_res[2], ax1)
fig.tight_layout()
fig.savefig(fig_path, dpi=600)
fig.clf()
plt.close()
gc.collect()
#Histogram of disconnected people
fig_path = './gen/MCS/predictions/' + network_name + '/' + GM_name + '/n_school_disconnect' + str(i) + '.pdf'
fig, (ax1) = plt.subplots(1, 1, sharex=False, sharey=False)
fig.set_size_inches(cm2inch(28), cm2inch(20))
plotDisconnectPeopleHistogram(build_res[6], ax1)
fig.tight_layout()
fig.savefig(fig_path, dpi=600)
fig.clf()
plt.close()
gc.collect()
#Histogram of lost utility
fig_path = './gen/MCS/predictions/' + network_name + '/' + GM_name + '/utility_disconnect' + str(i) + '.pdf'
fig, (ax1) = plt.subplots(1, 1, sharex=False, sharey=False)
fig.set_size_inches(cm2inch(28), cm2inch(20))
plotDisconnectUtilityHistogram(build_res[7], ax1)
fig.tight_layout()
fig.savefig(fig_path, dpi=600)
fig.clf()
plt.close()
gc.collect()
if exportResult:
#GIS
GIS_build_res = []
for j in range(len(build_loc)):
GIS_build_res.append([build_loc[j][0], build_loc[j][1], build_res[0][j]])
csv_path = './gen/MCS/predictions/' + network_name + '/' + GM_name + '/Pdisc_building_data_' + str(i) + '.csv'
writeCSV(GIS_build_res, csv_path)
#N buildings
csv_path = './gen/MCS/predictions/' + network_name + '/' + GM_name + '/n_building_data_' + str(i) + '.csv'
writeCSV(build_res[1], csv_path)
#N businesses
csv_path = './gen/MCS/predictions/' + network_name + '/' + GM_name + '/n_business_data_' + str(i) + '.csv'
writeCSV(build_res[3], csv_path)
#N medical
csv_path = './gen/MCS/predictions/' + network_name + '/' + GM_name + '/n_medical_data_' + str(i) + '.csv'
writeCSV(build_res[4], csv_path)
#N critical
csv_path = './gen/MCS/predictions/' + network_name + '/' + GM_name + '/n_critical_data_' + str(i) + '.csv'
writeCSV(build_res[5], csv_path)
#Population
csv_path = './gen/MCS/predictions/' + network_name + '/' + GM_name + '/population_data_' + str(i) + '.csv'
writeCSV(build_res[2], csv_path)
#Utility
csv_path = './gen/MCS/predictions/' + network_name + '/' + GM_name + '/utility_data_' + str(i) + '.csv'
writeCSV(build_res[7], csv_path)
#N schools
csv_path = './gen/MCS/predictions/' + network_name + '/' + GM_name + '/n_school_data_' + str(i) + '.csv'
writeCSV(build_res[6], csv_path)
return GIS_build_res
##########################################################################################################
#getMaxHistoricalRecoveryTime : get the maximum recovery time
##########################################################################################################
def getMaxHistoricalRecoveryTime(root_sim_path):
n_day = 0
n_sim = len(glob.glob1(root_sim_path,"sim_*"))
simul_name = glob.glob1(root_sim_path,"sim_*")
for i in range(n_sim):
root_reco_path = root_sim_path + simul_name[i]
if n_day < len(glob.glob1(root_reco_path,"day_*")):
n_day = len(glob.glob1(root_reco_path,"day_*"))
return n_day
##########################################################################################################
#getMaxRecoveryTime : get the maximum recovery time
##########################################################################################################
def getMaxRecoveryTime(root_sim_path):
n_day = 0
n_sim = len(glob.glob1(root_sim_path,"sim_*"))
simul_name = glob.glob1(root_sim_path,"sim_*")
for i in range(n_sim):
root_reco_path = root_sim_path + simul_name[i] + '/recovery/'
if n_day < len(glob.glob1(root_reco_path,"day_*")):
n_day = len(glob.glob1(root_reco_path,"day_*"))
return n_day
##########################################################################################################
#getRawResultBuildingConnectSingleSimRecovery : load the raw results for building connectivity from 1 simulation
##########################################################################################################
def getRawResultBuildingConnectSingleSimRecovery(sim_path, i_fx, build_pop, build_util, build_type, build_connect, i_sim):
buildingconnect_path = sim_path + "/_post_community_buildingconnection.csv"
i = 0
n_build_disconnect = 0
n_people_disconnect = 0
n_business = 0
n_medical = 0
n_critical = 0
n_school = 0
utility = 0
with open(buildingconnect_path, encoding = "ISO-8859-1") as csvfile:
readCSV = csv.reader(csvfile, delimiter=';')
for row in readCSV:
build_connected = int(row[i_fx+1])
if i_sim == 0:
if build_connected > 0:
build_connect.append(0)
else:
build_connect.append(1)
n_build_disconnect = n_build_disconnect + 1
n_people_disconnect = n_people_disconnect + build_pop[i]
if build_type[i] == 'business':
n_business = n_business + 1
elif build_type[i] == 'medical':
n_medical = n_medical + 1
elif build_type[i] == 'critical':
n_critical = n_critical + 1
elif build_type[i] == 'school':
n_school = n_school + 1
utility = utility + build_util[i]
else:
if build_connected <= 0:
build_connect[i] = build_connect[i] + 1
n_build_disconnect = n_build_disconnect + 1
n_people_disconnect = n_people_disconnect + build_pop[i]
if build_type[i] == 'business':
n_business = n_business + 1
elif build_type[i] == 'medical':
n_medical = n_medical + 1
elif build_type[i] == 'critical':
n_critical = n_critical + 1
elif build_type[i] == 'school':
n_school = n_school + 1
utility = utility + build_util[i]
i = i+1
csvfile.close()
return [build_connect, n_build_disconnect, n_people_disconnect, n_business, n_medical, n_critical, n_school, utility]
##########################################################################################################
#getBuildingHistoricalRecoveryRawResults : get the raw results of building recovery
##########################################################################################################
def getBuildingHistoricalRecoveryRawResults(root_sim_path, build_pop, build_util, build_type, n_day):
n_sim = len(glob.glob1(root_sim_path,"sim_*"))
simul_name = glob.glob1(root_sim_path,"sim_*")
build_reco = []
n_build_reco = []
n_people_reco = []
n_people_med = []
n_people_mean = []
n_people_std = []
n_business_med = []
n_business_mean = []
n_business_std = []
n_medical_med = []
n_medical_mean = []
n_medical_std = []
n_critical_med = []
n_critical_mean = []
n_critical_std = []
n_school_med = []
n_school_mean = []
n_school_std = []
utility_med = []
utility_mean = []
utility_std = []
n_build_med = []
n_build_mean = []
n_build_std = []
#For over day
for i in range(n_day):
#Initialize n_people_day and n_build_day
n_build_day = []
n_people_day = []
n_business_day = []
n_medical_day = []
n_critical_day = []
n_school_day = []
utility_day = []
#For over simulation
for j in range(n_sim):
#Compile simulation - day name
folder_str = root_sim_path + simul_name[j]
#Check existence
if os.path.exists(folder_str + '/day_' + str(i)):
#Gather results
result_1d1s = getRawResultBuildingConnectivity(folder_str, 0, 'day_' + str(i), build_pop, build_util, build_type)
#Accumulate status of buildings
if not build_reco:
build_reco = result_1d1s[0]
else:
for k in range(len(build_reco)):
build_reco[k] = build_reco[k] + result_1d1s[0][k]
#Get number of people disconnected
n_people_day.append(result_1d1s[1][0])
#Get number of building disconnected
n_build_day.append(result_1d1s[2][0])
#Get number of business buildings disconnected
n_business_day.append(result_1d1s[3][0])
#Get number of medical buildings disconnected
n_medical_day.append(result_1d1s[4][0])
#Get number of critical buildings disconnected
n_critical_day.append(result_1d1s[5][0])
#Get number of critical buildings disconnected
n_school_day.append(result_1d1s[6][0])
#Get number of medical buildings disconnected
utility_day.append(result_1d1s[7][0])
else:
#Number of people and buildings disconnected == 0
n_people_day.append(0)
n_build_day.append(0)
n_business_day.append(0)
n_medical_day.append(0)
n_critical_day.append(0)
n_school_day.append(0)
utility_day.append(0)
#Compute statistics for for day i of number of people and number of buildings
stat_people = getStatisticsNumberFailure(n_people_day)
stat_build = getStatisticsNumberFailure(n_build_day)
stat_business = getStatisticsNumberFailure(n_business_day)
stat_medical = getStatisticsNumberFailure(n_medical_day)
stat_critical = getStatisticsNumberFailure(n_critical_day)
stat_school = getStatisticsNumberFailure(n_school_day)
stat_utility = getStatisticsNumberFailure(utility_day)
#Append stats
n_people_med.append(stat_people[1])
n_people_mean.append(stat_people[0])
n_people_std.append(stat_people[2])
n_build_med.append(stat_build[1])
n_build_mean.append(stat_build[0])
n_build_std.append(stat_build[2])
n_business_med.append(stat_business[1])
n_business_mean.append(stat_business[0])
n_business_std.append(stat_business[2])
n_medical_med.append(stat_medical[1])
n_medical_mean.append(stat_medical[0])
n_medical_std.append(stat_medical[2])
n_critical_med.append(stat_critical[1])
n_critical_mean.append(stat_critical[0])
n_critical_std.append(stat_critical[2])
n_school_med.append(stat_school[1])
n_school_mean.append(stat_school[0])
n_school_std.append(stat_school[2])
utility_med.append(stat_utility[1])
utility_mean.append(stat_utility[0])
utility_std.append(stat_utility[2])
#Divide accumulated status of buildings by n_sim to get average disconnect time
build_reco[:] = [x/n_sim for x in build_reco]
#Build result list
n_people_reco = [n_people_mean, n_people_med, n_people_std]
n_build_reco = [n_build_mean, n_build_med, n_build_std]
n_business_reco = [n_business_mean, n_business_med, n_business_std]
n_medical_reco = [n_medical_mean, n_medical_med, n_medical_std]
n_critical_reco = [n_critical_mean, n_critical_med, n_critical_std]
n_school_reco = [n_school_mean, n_school_med, n_school_std]
utility_reco = [utility_mean, utility_med, utility_std]
return [build_reco, n_build_reco, n_people_reco, n_business_reco, n_medical_reco, n_critical_reco, n_school_reco, utility_reco]
##########################################################################################################
#getBuildingRecoveryRawResults : get the raw results of building recovery
##########################################################################################################
def getBuildingRecoveryRawResults(root_sim_path, build_pop, build_util, build_type, i_fx, n_day):
n_sim = len(glob.glob1(root_sim_path,"sim_*"))
simul_name = glob.glob1(root_sim_path,"sim_*")
build_reco = []
n_build_reco = []
n_people_reco = []
n_people_med = []
n_people_mean = []
n_people_std = []
n_business_med = []
n_business_mean = []
n_business_std = []
n_medical_med = []
n_medical_mean = []
n_medical_std = []
n_critical_med = []
n_critical_mean = []
n_critical_std = []
n_school_med = []
n_school_mean = []
n_school_std = []
utility_med = []
utility_mean = []
utility_std = []
n_build_med = []
n_build_mean = []
n_build_std = []
#For over day
for i in range(n_day):
#Initialize n_people_day and n_build_day
n_build_day = []
n_people_day = []
n_business_day = []
n_medical_day = []
n_critical_day = []
n_school_day = []
utility_day = []
#For over simulation
for j in range(n_sim):
#Compile simulation - day name
folder_str = root_sim_path + simul_name[j] + '/recovery'
#Check exitence
if os.path.exists(folder_str + '/day_' + str(i)):
#Gather results
result_1d1s = getRawResultBuildingConnectivity(folder_str, 0, 'day_' + str(i), build_pop, build_util, build_type)
#Accumulate status of buildings
if not build_reco:
build_reco = result_1d1s[0]
else:
for k in range(len(build_reco)):
build_reco[k] = build_reco[k] + result_1d1s[0][k]
#Get number of people disconnected
n_people_day.append(result_1d1s[1][0])
#Get number of building disconnected
n_build_day.append(result_1d1s[2][0])
#Get number of business buildings disconnected
n_business_day.append(result_1d1s[3][0])
#Get number of medical buildings disconnected
n_medical_day.append(result_1d1s[4][0])
#Get number of critical buildings disconnected
n_critical_day.append(result_1d1s[5][0])
#Get number of critical buildings disconnected
n_school_day.append(result_1d1s[6][0])
#Get number of medical buildings disconnected
utility_day.append(result_1d1s[7][0])
else:
#Number of people and buildings disconnected == 0
n_people_day.append(0)
n_build_day.append(0)
n_business_day.append(0)
n_medical_day.append(0)
n_critical_day.append(0)
n_school_day.append(0)
utility_day.append(0)
#Compute statistics for for day i of number of people and number of buildings
stat_people = getStatisticsNumberFailure(n_people_day)
stat_build = getStatisticsNumberFailure(n_build_day)
stat_business = getStatisticsNumberFailure(n_business_day)
stat_medical = getStatisticsNumberFailure(n_medical_day)
stat_critical = getStatisticsNumberFailure(n_critical_day)
stat_school = getStatisticsNumberFailure(n_school_day)
stat_utility = getStatisticsNumberFailure(utility_day)
#Append stats
n_people_med.append(stat_people[1])
n_people_mean.append(stat_people[0])
n_people_std.append(stat_people[2])
n_build_med.append(stat_build[1])
n_build_mean.append(stat_build[0])
n_build_std.append(stat_build[2])
n_business_med.append(stat_business[1])
n_business_mean.append(stat_business[0])
n_business_std.append(stat_business[2])
n_medical_med.append(stat_medical[1])
n_medical_mean.append(stat_medical[0])
n_medical_std.append(stat_medical[2])
n_critical_med.append(stat_critical[1])
n_critical_mean.append(stat_critical[0])
n_critical_std.append(stat_critical[2])
n_school_med.append(stat_school[1])
n_school_mean.append(stat_school[0])
n_school_std.append(stat_school[2])
utility_med.append(stat_utility[1])
utility_mean.append(stat_utility[0])
utility_std.append(stat_utility[2])
#Divide accumulated status of buildings by n_sim to get average disconnect time
build_reco[:] = [x/n_sim for x in build_reco]
#Build result list
n_people_reco = [n_people_mean, n_people_med, n_people_std]
n_build_reco = [n_build_mean, n_build_med, n_build_std]
n_business_reco = [n_business_mean, n_business_med, n_business_std]
n_medical_reco = [n_medical_mean, n_medical_med, n_medical_std]
n_critical_reco = [n_critical_mean, n_critical_med, n_critical_std]
n_school_reco = [n_school_mean, n_school_med, n_school_std]
utility_reco = [utility_mean, utility_med, utility_std]
return [build_reco, n_build_reco, n_people_reco, n_business_reco, n_medical_reco, n_critical_reco, n_school_reco, utility_reco]
##########################################################################################################
#getNumberBuildingRecoveryResults : get the results of building recovery
##########################################################################################################
def getNumberBuildingRecoveryResults(raw_res, n_day):
mean = []
std = []
med = []
for i in range(n_day):
day = []
for j in range(len(raw_res[0])):
if i < len(raw_res[0][j]):
day.append(raw_res[0][j][i])
else:
day.append(0)
mean.append(np.mean(day, dtype=np.float64))
med.append(st.median(day))
std.append(np.std(day, dtype=np.float64))
return [mean, med, std]
##########################################################################################################
#getNumberPeopleRecoveryResults : get the results of people recovery
##########################################################################################################
def getNumberPeopleRecoveryResults(raw_res, n_day):
mean = []
std = []
med = []
for i in range(n_day):
day = []
for j in range(len(raw_res[2])):
if i < len(raw_res[2][j]):
day.append(raw_res[2][j][i])
else:
day.append(0)
mean.append( | np.mean(day, dtype=np.float64) | numpy.mean |
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
# #########################################################################
# Copyright (c) 2015, UChicago Argonne, LLC. All rights reserved. #
# #
# Copyright 2015. UChicago Argonne, LLC. This software was produced #
# under U.S. Government contract DE-AC02-06CH11357 for Argonne National #
# Laboratory (ANL), which is operated by UChicago Argonne, LLC for the #
# U.S. Department of Energy. The U.S. Government has rights to use, #
# reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR #
# UChicago Argonne, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR #
# ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is #
# modified to produce derivative works, such modified software should #
# be clearly marked, so as not to confuse it with the version available #
# from ANL. #
# #
# Additionally, redistribution and use in source and binary forms, with #
# or without modification, are permitted provided that the following #
# conditions are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of UChicago Argonne, LLC, Argonne National #
# Laboratory, ANL, the U.S. Government, nor the names of its #
# contributors may be used to endorse or promote products derived #
# from this software without specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY UChicago Argonne, LLC AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UChicago #
# Argonne, LLC OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, #
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, #
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; #
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER #
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT #
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN #
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
# #########################################################################
'''
Author: <NAME>
This Script use the technique described by Xianbo Shi in
https://doi.org/10.1364/OE.22.014041
'''
#There is a problem in the scalling of z for tilted grating
# %%
import numpy as np
import matplotlib as mpl
mpl.rcParams['image.interpolation']='none'
import matplotlib.pyplot as plt
import itertools
import scipy.constants as constants
import skimage.transform
hc = constants.value('inverse meter-electron volt relationship') # hc
def _checkerboard(shape,transmission=1.0, phase=np.pi):
checkerboard = np.ones(shape)*0j
checkerboard[0:shape[0] // 2,
0:shape[1] // 2] = transmission*np.exp(1j*phase)
checkerboard[shape[0] // 2:shape[0],
shape[1] // 2:shape[1]] = transmission*np.exp(1j*phase)
return checkerboard
def _mesh(shape, transmission=1.0, phase=np.pi, inverseDutyCycle=2):
mesh = np.ones(shape)*0j
mesh[0:shape[0] // inverseDutyCycle,
0:shape[1] // inverseDutyCycle] = transmission*np.exp(1j*phase)
return mesh
# %% create grating
periodX = periodY = 4.8e-6
Lx = Ly = periodX
phase_gr = np.pi/2 # TODO:
global wavelength
wavelength = hc/8e3
npoints = 100
if npoints % 2 == 0:
npoints += 1
yy, xx = np.mgrid[0:Lx:npoints*1j, 0:Ly:npoints*1j]
grProfile = _checkerboard(xx.shape,
transmission=1.00,
phase=phase_gr)
#grProfile = _mesh(xx.shape,
# transmission=1.00,
# phase=phase_gr,
# inverseDutyCycle=2)
# rotate CB 45 deg
#grProfile = np.concatenate((grProfile, grProfile), axis=0)
#grProfile = np.concatenate((grProfile, grProfile), axis=1)
#grProfile.real = skimage.transform.rotate(grProfile.real, 45, mode='wrap')
#grProfile.imag = skimage.transform.rotate(grProfile.imag, 45, mode='wrap')
#
#
#grProfile = np.roll(np.roll(grProfile, 20, axis=1), 20, axis=0)
#
#grProfile = grProfile[int(npoints*(1-np.sqrt(2)/4)):int(npoints*(1+np.sqrt(2)/4)),
# int(npoints*(1-np.sqrt(2)/4)):int(npoints*(1+np.sqrt(2)/4))]
#
#periodX = periodY = 4.8e-6*np.sqrt(2)/2
#Lx = Ly = periodX
#
#yy, xx = np.mgrid[0:Lx:npoints*1j, 0:Ly:npoints*1j]
#
#grProfile = np.concatenate((grProfile, grProfile), axis=0)
#grProfile = np.concatenate((grProfile, grProfile), axis=1)
#
#Lx = Ly = 4*periodX
#yy, xx = np.mgrid[0:Lx:npoints*1j, 0:Ly:npoints*1j]
t_distance = 2*periodX**2/wavelength
dist4all = t_distance/2 # TODO:
titleStr = 'CB, {:.2f}'.format(phase_gr/np.pi) + r'$\times \pi$, '
# %% rebininb detector
#
#import scipy.ndimage
#
#
#grProfile_average_i = scipy.ndimage.uniform_filter(np.imag(grProfile), size=12,
# output=None, mode='wrap',
# origin=0)
#
#grProfile_average_r = scipy.ndimage.uniform_filter(np.real(grProfile), size=12,
# output=None, mode='wrap',
# origin=0)*144
#
#grProfile = grProfile_average_r[::12,::12] + 1j*grProfile_average_i[::12,::12]
#
#npoints = grProfile.shape[0]
#
#yy, xx = np.mgrid[0:Lx:npoints*1j, 0:Ly:npoints*1j]
# %% plot grating
def _extent(xx, yy, multFactor):
return [xx[0, 0]*multFactor, xx[-1, -1]*multFactor,
yy[0, 0]*multFactor, yy[-1, -1]*multFactor]
fig = plt.figure()
ax1 = plt.subplot(121)
ax1.imshow(np.real(grProfile), vmax=1, vmin=-1, extent=_extent(xx, yy, 1/periodX))
ax1.set_title(titleStr + 'Real part')
ax2 = plt.subplot(122)
ax2.imshow(np.imag(grProfile), vmax=1, vmin=-1, extent=_extent(xx, yy, 1/periodX))
ax2.set_title(titleStr + 'Imaginary part')
plt.show(block=True)
# %% Fourier Optics propagation
import sys
sys.path.append('/home/grizolli/workspace/pythonWorkspace/wgTools')
import myFourierLib as wgfo
grProfile2 = grProfile
dist4fop = dist4all
u2_Summerfield = wgfo.propTF_RayleighSommerfeld(grProfile2,
xx[-1, -1] - xx[0,0],
yy[-1, -1] - yy[0,0],
wavelength, dist4fop)
# %% plot Fourier Optics propagation
plt.figure()
plt.imshow( | np.abs(u2_Summerfield) | numpy.abs |
# Copyright (c) 2018-2022, NVIDIA CORPORATION.
import numpy as np
import pandas as pd
import pytest
from pandas.api import types as ptypes
import cudf
from cudf.api import types as types
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, False),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, False),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), False),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), False),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), False),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, True),
(pd.CategoricalDtype, True),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), False),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), True),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, True),
(cudf.ListDtype, False),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, False),
(cudf.Decimal64Dtype, False),
(cudf.Decimal32Dtype, False),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), True),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), False),
(cudf.Decimal64Dtype(5, 2), False),
(cudf.Decimal32Dtype(5, 2), False),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), False),
(cudf.Series(dtype="int"), False),
(cudf.Series(dtype="float"), False),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), True),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False),
# TODO: Currently creating an empty Series of list type ignores the
# provided type and instead makes a float64 Series.
(cudf.Series([[1, 2], [3, 4, 5]]), False),
# TODO: Currently creating an empty Series of struct type fails because
# it uses a numpy utility that doesn't understand StructDtype.
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_categorical_dtype(obj, expect):
assert types.is_categorical_dtype(obj) == expect
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, True),
(int, True),
(float, True),
(complex, True),
(str, False),
(object, False),
# NumPy types.
(np.bool_, True),
(np.int_, True),
(np.float64, True),
(np.complex128, True),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), True),
(np.int_(), True),
(np.float64(), True),
(np.complex128(), True),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), True),
(np.dtype("int"), True),
(np.dtype("float"), True),
(np.dtype("complex"), True),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), True),
(np.array([], dtype=np.int_), True),
(np.array([], dtype=np.float64), True),
(np.array([], dtype=np.complex128), True),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), True),
(pd.Series(dtype="int"), True),
(pd.Series(dtype="float"), True),
(pd.Series(dtype="complex"), True),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, False),
(cudf.ListDtype, False),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, True),
(cudf.Decimal64Dtype, True),
(cudf.Decimal32Dtype, True),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), False),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), True),
(cudf.Decimal64Dtype(5, 2), True),
(cudf.Decimal32Dtype(5, 2), True),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), True),
(cudf.Series(dtype="int"), True),
(cudf.Series(dtype="float"), True),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), False),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), True),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), True),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), True),
(cudf.Series([[1, 2], [3, 4, 5]]), False),
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_numeric_dtype(obj, expect):
assert types.is_numeric_dtype(obj) == expect
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, True),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, True),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), True),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), True),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), True),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), True),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, False),
(cudf.ListDtype, False),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, False),
(cudf.Decimal64Dtype, False),
(cudf.Decimal32Dtype, False),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), False),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), False),
(cudf.Decimal64Dtype(5, 2), False),
(cudf.Decimal32Dtype(5, 2), False),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), False),
(cudf.Series(dtype="int"), True),
(cudf.Series(dtype="float"), False),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), False),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False),
(cudf.Series([[1, 2], [3, 4, 5]]), False),
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_integer_dtype(obj, expect):
assert types.is_integer_dtype(obj) == expect
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), True),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, False),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, False),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), True),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), False),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), False),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), False),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, False),
(cudf.ListDtype, False),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, False),
(cudf.Decimal64Dtype, False),
(cudf.Decimal32Dtype, False),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), False),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), False),
(cudf.Decimal64Dtype(5, 2), False),
(cudf.Decimal32Dtype(5, 2), False),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), False),
(cudf.Series(dtype="int"), False),
(cudf.Series(dtype="float"), False),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), False),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False),
(cudf.Series([[1, 2], [3, 4, 5]]), False),
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_integer(obj, expect):
assert types.is_integer(obj) == expect
# TODO: Temporarily ignoring all cases of "object" until we decide what to do.
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, False),
(float, False),
(complex, False),
(str, True),
# (object, False),
# NumPy types.
(np.bool_, False),
(np.int_, False),
(np.float64, False),
(np.complex128, False),
(np.str_, True),
(np.unicode_, True),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), False),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), True),
(np.unicode_(), True),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), False),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), True),
(np.dtype("unicode"), True),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
# (np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), False),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), True),
(np.array([], dtype=np.unicode_), True),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
# (np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), False),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), True),
(pd.Series(dtype="unicode"), True),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
# (pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, False),
(cudf.ListDtype, False),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, False),
(cudf.Decimal64Dtype, False),
(cudf.Decimal32Dtype, False),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), False),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), False),
(cudf.Decimal64Dtype(5, 2), False),
(cudf.Decimal32Dtype(5, 2), False),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), False),
(cudf.Series(dtype="int"), False),
(cudf.Series(dtype="float"), False),
(cudf.Series(dtype="str"), True),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), False),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False),
(cudf.Series([[1, 2], [3, 4, 5]]), False),
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_string_dtype(obj, expect):
assert types.is_string_dtype(obj) == expect
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, False),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, False),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, True),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), False),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), True),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), False),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), True),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), False),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), True),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), False),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), True),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, False),
(cudf.ListDtype, False),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, False),
(cudf.Decimal64Dtype, False),
(cudf.Decimal32Dtype, False),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), False),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), False),
(cudf.Decimal64Dtype(5, 2), False),
(cudf.Decimal32Dtype(5, 2), False),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), False),
(cudf.Series(dtype="int"), False),
(cudf.Series(dtype="float"), False),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), True),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), False),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False),
(cudf.Series([[1, 2], [3, 4, 5]]), False),
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_datetime_dtype(obj, expect):
assert types.is_datetime_dtype(obj) == expect
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, False),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, False),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), False),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), False),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), False),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), False),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, False),
(cudf.ListDtype, True),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, False),
(cudf.Decimal64Dtype, False),
(cudf.Decimal32Dtype, False),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), False),
(cudf.ListDtype(int), True),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), False),
(cudf.Decimal64Dtype(5, 2), False),
(cudf.Decimal32Dtype(5, 2), False),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), False),
(cudf.Series(dtype="int"), False),
(cudf.Series(dtype="float"), False),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), False),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False),
(cudf.Series([[1, 2], [3, 4, 5]]), True),
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_list_dtype(obj, expect):
assert types.is_list_dtype(obj) == expect
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, False),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, False),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), False),
(np.float64(), False),
(np.complex128(), False),
( | np.str_() | numpy.str_ |
__all__ = ["DistinctEvaluator"]
from dataclasses import dataclass
from typing import Sequence, Optional, List, Dict
import torch
import numpy as np
from nltk import ngrams
import utils
from utils import TensorMap
from datasets import VocabSet
from ...evaluator import DialogEvaluator
@dataclass
class DistinctEvaluator(DialogEvaluator):
vocabs: VocabSet
ngrams: Sequence[int] = frozenset({1, 2})
_values: Dict[int, List[float]] = utils.private_field(default_factory=dict)
_values_spkr: Dict[str, Dict[int, List[float]]] = \
utils.private_field(default_factory=dict)
def reset(self):
self._values.clear()
self._values_spkr.clear()
@staticmethod
def compute_distinct(tokens, n):
if len(tokens) == 0:
return 0.0
vocab = set(ngrams(tokens, n))
return len(vocab) / len(tokens)
def compute(self, samples: Sequence, spkr=None):
return {i: [self.compute_distinct(turn.text, i)
for sample in samples for turn in sample.output.turns
if spkr is None or turn.speaker == spkr]
for i in self.ngrams}
def update(self, samples: Sequence) -> Optional[TensorMap]:
res = self.compute(samples)
for i, values in res.items():
if i not in self._values:
self._values[i] = list()
self._values[i].extend(values)
for spkr in self.vocabs.speaker.f2i:
if spkr == "<unk>":
continue
if spkr not in self._values_spkr:
self._values_spkr[spkr] = dict()
res = self.compute(samples, spkr)
for i, values in res.items():
if i not in self._values_spkr[spkr]:
self._values_spkr[spkr][i] = list()
self._values_spkr[spkr][i].extend(values)
return
def get(self) -> TensorMap:
stats = {f"dist-{i}": torch.tensor( | np.mean(vs) | numpy.mean |
import cv2
import numpy as np
import argparse
import os
'''
Script to apply color transfer from a source reference image to a target input image
Theory:
https://www.scss.tcd.ie/Rozenn.Dahyot/pdf/pitie08bookchapter.pdf
https://www.cse.cuhk.edu.hk/leojia/all_final_papers/color_cvpr05.PDF
http://www.inf.ed.ac.uk/teaching/courses/vis/lecture_notes/lecture6.pdf
'''
def read_image(image):
if isinstance(image, str):
# read images as BGR
return cv2.imread(image, cv2.IMREAD_COLOR)
elif isinstance(image, np.ndarray):
# use np image
return image
#elif pil .Image...:
else:
raise ValueError("Unexpected image type. Either a path or a np.ndarray are supported")
def scale_img(source=None, target=None):
"""
Scale a source image to the same size as a target image
"""
#raise ValueError("source and target shapes must be equal")
#expand source to target size
width = int(target.shape[1])
height = int(target.shape[0])
dim = (width, height)
return cv2.resize(source, dim, interpolation = cv2.INTER_AREA)
def expand_img(image=None):
# expand dimensions if grayscale
if len(image.shape) < 3:
return image[:,:,np.newaxis]
else:
return image
def _imstats(image, calc='direct'):
"""
Calculate mean and standard deviation of an image along each channel.
Using individual channels there's a very small difference with array forms,
doesn't change the results
Parameters:
-------
image: NumPy array OpenCV image
calc: how to perform the canculation (differences are minimal,
only included for completion)
Returns:
-------
Mean (mu) and standard deviations (sigma)
"""
if calc == 'reshape':
# reshape image from (H x W x 3) to (3 x HW) for vectorized operations
image = image.astype("float32").reshape(-1, 3).T
# calculate mean
mu = np.mean(image, axis=1, keepdims=False)
# calculate standard deviation
sigma = np.std(image, axis=1, keepdims=False)
elif calc == 'direct':
# calculate mean
mu = np.mean(image, axis=(0, 1), keepdims=True)
# calculate standard deviation
sigma = np.std(image, axis=(0, 1), keepdims=True)
elif calc == 'split':
# compute the mean and standard deviation of each channel independently
(l, a, b) = cv2.split(image)
(lMean, lStd) = (l.mean(), l.std())
(aMean, aStd) = (a.mean(), a.std())
(bMean, bStd) = (b.mean(), b.std())
mu = [lMean, aMean, bMean]
sigma = [lStd, aStd, bStd]
# return the color statistics
return (mu, sigma)
def _scale_array(arr, clip=True, new_range=(0, 255)):
"""
Trim NumPy array values to be in [0, 255] range with option of
clipping or scaling.
Parameters:
-------
arr: array to be trimmed to new_range (default: [0, 255] range)
clip: if True, array will be limited with np.clip.
if False then input array will be min-max scaled to
range [max([arr.min(), 0]), min([arr.max(), 255])]
by default
new_range: range to be used for scaling
Returns:
-------
NumPy array that has been scaled to be in [0, 255] range
"""
if clip:
# scaled = arr.copy()
# scaled[scaled < 0] = 0
# scaled[scaled > 255] = 255
scaled = np.clip(arr, new_range[0], new_range[1])
# scaled = np.clip(arr, 0, 255)
else:
scale_range = (max([arr.min(), new_range[0]]), min([arr.max(), new_range[1]]))
scaled = _min_max_scale(arr, new_range=new_range)
return scaled
def _min_max_scale(arr, new_range=(0, 255)):
"""
Perform min-max scaling to a NumPy array
Parameters:
-------
arr: NumPy array to be scaled to [new_min, new_max] range
new_range: tuple of form (min, max) specifying range of
transformed array
Returns:
-------
NumPy array that has been scaled to be in
[new_range[0], new_range[1]] range
"""
# get array's current min and max
mn = arr.min()
mx = arr.max()
# check if scaling needs to be done to be in new_range
if mn < new_range[0] or mx > new_range[1]:
# perform min-max scaling
scaled = (new_range[1] - new_range[0]) * (arr - mn) / (mx - mn) + new_range[0]
else:
# return array if already in range
scaled = arr
return scaled
def im2double(im):
if im.dtype == 'uint8':
out = im.astype('float') / 255
elif im.dtype == 'uint16':
out = im.astype('float') / 65535
elif im.dtype == 'float':
out = im
else:
assert False
out = np.clip(out, 0, 1)
return out
def bgr2ycbcr(img, only_y=True):
'''bgr version of matlab rgb2ycbcr
Python opencv library (cv2) cv2.COLOR_BGR2YCrCb has
different parameters with MATLAB color convertion.
only_y: only return Y channel
Input:
uint8, [0, 255]
float, [0, 1]
'''
in_img_type = img.dtype
img_ = img.astype(np.float32)
if in_img_type != np.uint8:
img_ *= 255.
# convert
if only_y:
# mat = [24.966, 128.553, 65.481])
# rlt = np.dot(img_ , mat)/ 255.0 + 16.0
rlt = np.dot(img_ , [24.966, 128.553, 65.481]) / 255.0 + 16.0
else:
# mat = np.array([[24.966, 128.553, 65.481],[112, -74.203, -37.797], [-18.214, -93.786, 112.0]])
# mat = mat.T/255.0
# offset = np.array([[[16, 128, 128]]])
# rlt = np.dot(img_, mat) + offset
# rlt = np.clip(rlt, 0, 255)
## rlt = np.rint(rlt).astype('uint8')
rlt = np.matmul(img_ , [[24.966, 112.0, -18.214], [128.553, -74.203, -93.786],
[65.481, -37.797, 112.0]]) / 255.0 + [16, 128, 128]
# to make ycrcb like cv2
# rlt = rlt[:, :, (0, 2, 1)]
if in_img_type == np.uint8:
rlt = rlt.round()
else:
rlt /= 255.
return rlt.astype(in_img_type)
def ycbcr2rgb_(img):
'''same as matlab ycbcr2rgb
Input:
uint8, [0, 255]
float, [0, 1]
'''
in_img_type = img.dtype
img_ = img.astype(np.float32)
if in_img_type != np.uint8:
img_ *= 255.
# convert
rlt = np.matmul(img_ , [[0.00456621, 0.00456621, 0.00456621], [0, -0.00153632, 0.00791071],
[0.00625893, -0.00318811, 0]]) * 255.0 + [-222.921, 135.576, -276.836]
# xform = np.array([[1, 0, 1.402], [1, -0.34414, -.71414], [1, 1.772, 0]])
# img_[:, :, [1, 2]] -= 128
# rlt = img_.dot(xform.T)
np.putmask(rlt, rlt > 255, 255)
np.putmask(rlt, rlt < 0, 0)
if in_img_type == np.uint8:
rlt = rlt.round()
else:
rlt /= 255.
return rlt.astype(in_img_type)
def ycbcr2rgb(img, only_y=True):
'''
bgr version of matlab ycbcr2rgb
Python opencv library (cv2) cv2.COLOR_YCrCb2BGR has
different parameters with MATLAB color convertion.
Input:
uint8, [0, 255]
float, [0, 1]
'''
in_img_type = img.dtype
img_ = img.astype(np.float32)
if in_img_type != np.uint8:
img_ *= 255.
# to make ycrcb like cv2
# rlt = rlt[:, :, (0, 2, 1)]
# convert
mat = np.array([[24.966, 128.553, 65.481],[112, -74.203, -37.797], [-18.214, -93.786, 112.0]])
mat = np.linalg.inv(mat.T) * 255
offset = np.array([[[16, 128, 128]]])
rlt = np.dot((img_ - offset), mat)
rlt = np.clip(rlt, 0, 255)
## rlt = np.rint(rlt).astype('uint8')
if in_img_type == np.uint8:
rlt = rlt.round()
else:
rlt /= 255.
return rlt.astype(in_img_type)
def replace_channels(source=None, target=None, ycbcr = True, hsv = False, transfersv = False):
"""
Extracts channels from source img and replaces the same channels
from target, then returns the converted image.
Args:
target: bgr numpy array of input image.
source: bgr numpy array of reference image.
ycbcr: replace the color channels (Cb and Cr)
hsv: replace the hue channel
transfersv: if using hsv option, can also transfer the
mean/std of the S and V channels
Returns:
target: transfered bgr numpy array of input image.
"""
target = read_image(target)
source = read_image(source)
if source.shape != target.shape:
source = scale_img(source, target)
if ycbcr:
# ycbcr_in = bgr2ycbcr(target, only_y=False)
ycbcr_in = cv2.cvtColor(target, cv2.COLOR_BGR2YCR_CB)
# if keep_y:
y_in, _, _ = cv2.split(ycbcr_in)
# ycbcr_ref = bgr2ycbcr(source, only_y=False)
ycbcr_ref = cv2.cvtColor(source, cv2.COLOR_BGR2YCR_CB)
# if histo_match:
# ycbcr_ref = histogram_matching(reference=ycbcr_ref, image=ycbcr_in)
# ycbcr_out = stats_transfer(target=ycbcr_in, source=ycbcr_ref)
# if keep_y:
_, cb_out, cr_out = cv2.split(ycbcr_ref)
ycbcr_out = cv2.merge([y_in, cb_out, cr_out])
# target = ycbcr2rgb(ycbcr_out)
target = cv2.cvtColor(ycbcr_out, cv2.COLOR_YCR_CB2BGR)
if hsv:
hsv_in = cv2.cvtColor(target, cv2.COLOR_BGR2HSV)
_, s_in, v_in = cv2.split(hsv_in)
# h_in, s_in, v_in = cv2.split(hsv_in)
hsv_ref = cv2.cvtColor(source, cv2.COLOR_BGR2HSV)
h_out, _, _ = cv2.split(hsv_ref)
if transfersv:
hsv_out = stats_transfer(target=hsv_in, source=hsv_ref)
_, s_out, v_out = cv2.split(hsv_out)
hsv_out = cv2.merge([h_out, s_out, v_out])
else:
hsv_out = cv2.merge([h_out, s_in, v_in])
target = cv2.cvtColor(hsv_out, cv2.COLOR_HSV2BGR)
return target.astype('uint8')
def hue_transfer(source=None, target=None):
""" Extracts hue from source img and applies mean and
std transfer from target, then returns image with converted y.
Args:
target: bgr numpy array of input image.
source: bgr numpy array of reference image.
Returns:
img_arr_out: transfered bgr numpy array of input image.
"""
target = read_image(target)
source = read_image(source)
hsv_in = cv2.cvtColor(target, cv2.COLOR_BGR2HSV)
_, s_in, v_in = cv2.split(hsv_in)
# h_in, s_in, v_in = cv2.split(hsv_in)
hsv_ref = cv2.cvtColor(source, cv2.COLOR_BGR2HSV)
hsv_out = stats_transfer(target=hsv_in, source=hsv_ref)
h_out, _, _ = cv2.split(hsv_out)
# h_out, s_out, v_out = cv2.split(hsv_out)
hsv_out = cv2.merge([h_out, s_in, v_in])
# hsv_out = cv2.merge([h_in, s_out, v_out])
img_arr_out = cv2.cvtColor(hsv_out, cv2.COLOR_HSV2BGR)
return img_arr_out.astype('uint8')
def luminance_transfer(source=None, target=None):
""" Extracts luminance from source img and applies mean and
std transfer from target, then returns image with converted y.
Args:
target: bgr numpy array of input image.
source: bgr numpy array of reference image.
Returns:
img_arr_out: transfered bgr numpy array of input image.
"""
target = read_image(target)
source = read_image(source)
# ycbcr_in = bgr2ycbcr(target, only_y=False)
ycbcr_in = cv2.cvtColor(target, cv2.COLOR_BGR2YCR_CB)
_, cb_in, cr_in = cv2.split(ycbcr_in)
# ycbcr_ref = bgr2ycbcr(source, only_y=False)
ycbcr_ref = cv2.cvtColor(source, cv2.COLOR_BGR2YCR_CB)
ycbcr_out = stats_transfer(target=ycbcr_in, source=ycbcr_ref)
y_out, _, _ = cv2.split(ycbcr_out)
ycbcr_out = cv2.merge([y_out, cb_in, cr_in])
# img_arr_out = ycbcr2rgb(ycbcr_out)
img_arr_out = cv2.cvtColor(ycbcr_out, cv2.COLOR_YCR_CB2BGR)
return img_arr_out.astype('uint8')
def ycbcr_transfer(source=None, target=None, keep_y=True, histo_match=False):
""" Convert img from rgb space to ycbcr space, apply mean and
std transfer, then convert back.
Args:
target: bgr numpy array of input image.
source: bgr numpy array of reference image.
keep_y: option to keep the original target y channel unchanged.
histo_match: option to do histogram matching before transfering the
image statistics (if combined with keep_y, only color channels
are modified).
Returns:
img_arr_out: transfered bgr numpy array of input image.
"""
target = read_image(target)
source = read_image(source)
# ycbcr_in = bgr2ycbcr(target, only_y=False)
ycbcr_in = cv2.cvtColor(target, cv2.COLOR_BGR2YCR_CB)
if keep_y:
y_in, _, _ = cv2.split(ycbcr_in)
# ycbcr_ref = bgr2ycbcr(source, only_y=False)
ycbcr_ref = cv2.cvtColor(source, cv2.COLOR_BGR2YCR_CB)
if histo_match:
ycbcr_ref = histogram_matching(reference=ycbcr_ref, image=ycbcr_in)
ycbcr_out = stats_transfer(target=ycbcr_in, source=ycbcr_ref)
if keep_y:
_, cb_out, cr_out = cv2.split(ycbcr_out)
ycbcr_out = cv2.merge([y_in, cb_out, cr_out])
# img_arr_out = ycbcr2rgb(ycbcr_out)
img_arr_out = cv2.cvtColor(ycbcr_out, cv2.COLOR_YCR_CB2BGR)
return img_arr_out.astype('uint8')
def lab_transfer(source=None, target=None):
""" Convert img from rgb space to lab space, apply mean and
std transfer, then convert back.
Args:
target: bgr numpy array of input image.
source: bgr numpy array of reference image.
Returns:
img_arr_out: transfered bgr numpy array of input image.
"""
target = read_image(target)
source = read_image(source)
lab_in = cv2.cvtColor(target, cv2.COLOR_BGR2LAB)
lab_ref = cv2.cvtColor(source, cv2.COLOR_BGR2LAB)
lab_out = stats_transfer(target=lab_in, source=lab_ref)
img_arr_out = cv2.cvtColor(lab_out, cv2.COLOR_LAB2BGR)
return img_arr_out.astype('uint8')
def stats_transfer(source=None, target=None):
""" Adapt target's (mean, std) to source's (mean, std).
img_o = (img_i - mean(img_i)) / std(img_i) * std(img_r) + mean(img_r).
Args:
target: bgr numpy array of input image.
source: bgr numpy array of reference image.
Returns:
img_arr_out: transfered bgr numpy array of input image.
"""
target = read_image(target)
source = read_image(source)
mean_in, std_in = _imstats(target)
mean_ref, std_ref = _imstats(source)
img_arr_out = (target - mean_in) / std_in * std_ref + mean_ref
# clip
img_arr_out = _scale_array(img_arr_out)
return img_arr_out.astype('uint8')
def _match_cumulative_cdf(source, template):
"""
Return modified source array so that the cumulative density function of
its values matches the cumulative density function of the template.
"""
src_values, src_unique_indices, src_counts = np.unique(source.ravel(),
return_inverse=True,
return_counts=True)
tmpl_values, tmpl_counts = np.unique(template.ravel(), return_counts=True)
# calculate normalized quantiles for each array
src_quantiles = np.cumsum(src_counts) / source.size
tmpl_quantiles = np.cumsum(tmpl_counts) / template.size
# use linear interpolation of cdf to find new pixel values = interp(image, bins, cdf)
interp_a_values = np.interp(src_quantiles, tmpl_quantiles, tmpl_values)
# reshape to original image shape and return
return interp_a_values[src_unique_indices].reshape(source.shape)
def histogram_matching(reference=None, image=None, clip=None):
"""
Adjust an image so that its cumulative histogram matches that of another.
The adjustment is applied separately for each channel.
(https://en.wikipedia.org/wiki/Histogram_matching)
Parameters
----------
image : ndarray
Input image. Can be gray-scale or in color.
reference : ndarray
Image to match histogram of. Must have the same number of channels as
image.
Returns
-------
matched : ndarray
Transformed input image.
Raises
------
ValueError
Thrown when the number of channels in the input image and the reference
differ.
References
----------
.. [1] http://paulbourke.net/miscellaneous/equalisation/
.. [2] https://github.com/scikit-image/scikit-image/blob/master/skimage/exposure/histogram_matching.py
"""
image = read_image(image) # target
reference = read_image(reference) # ref
# expand dimensions if grayscale
image = expand_img(image)
reference = expand_img(reference)
if image.ndim != reference.ndim:
raise ValueError('Image and reference must have the same number '
'of channels.')
if image.shape[-1] != reference.shape[-1]:
raise ValueError('Number of channels in the input image and '
'reference image must match!')
matched = np.empty(image.shape, dtype=image.dtype)
for channel in range(image.shape[-1]):
matched_channel = _match_cumulative_cdf(image[..., channel],
reference[..., channel])
matched[..., channel] = matched_channel
if clip:
matched = _scale_array(matched, clip=clip)
return matched.astype("uint8")
def SOTransfer(source, target, steps=10, batch_size=5, reg_sigmaXY=16.0, reg_sigmaV=5.0, clip=False):
"""
Color Transform via Sliced Optimal Transfer, ported by @iperov
https://dcoeurjo.github.io/OTColorTransfer
source - any float range any channel image
target - any float range any channel image, same shape as src
steps - number of solver steps
batch_size - solver batch size
reg_sigmaXY - apply regularization and sigmaXY of filter, otherwise set to 0.0
reg_sigmaV - sigmaV of filter
return value
"""
source = read_image(source).astype("float32")
target = read_image(target).astype("float32")
if not np.issubdtype(source.dtype, np.floating):
raise ValueError("source value must be float")
if not np.issubdtype(target.dtype, np.floating):
raise ValueError("target value must be float")
# expand dimensions if grayscale
target = expand_img(image=target)
source = expand_img(image=source)
#expand source to target size if smaller
if source.shape != target.shape:
source = scale_img(source, target)
target_dtype = target.dtype
h,w,c = target.shape
new_target = target.copy()
for step in range (steps):
advect = np.zeros ((h*w,c), dtype=target_dtype)
for batch in range (batch_size):
dir = np.random.normal(size=c).astype(target_dtype)
dir /= np.linalg.norm(dir)
projsource = np.sum(new_target*dir, axis=-1).reshape((h*w))
projtarget = np.sum(source*dir, axis=-1).reshape((h*w))
idSource = np.argsort(projsource)
idTarget = np.argsort(projtarget)
a = projtarget[idTarget]-projsource[idSource]
for i_c in range(c):
advect[idSource,i_c] += a * dir[i_c]
new_target += advect.reshape((h,w,c)) / batch_size
new_target = _scale_array(new_target, clip=clip)
if reg_sigmaXY != 0.0:
target_diff = new_target-target
new_target = target + cv2.bilateralFilter (target_diff, 0, reg_sigmaV, reg_sigmaXY)
#new_target = _scale_array(new_target, clip=clip)
return new_target.astype("uint8")
class Regrain:
def __init__(self, smoothness=1):
'''
Regraining post-process to match color of resulting image and
gradient of the source image.
Automated colour grading using colour distribution transfer.
<NAME> , <NAME> and <NAME> (2007) Computer Vision and Image
Understanding.
https://github.com/frcs/colour-transfer/blob/master/regrain.m
Parameters:
smoothness (default=1, smoothness>=0): sets the fidelity of the
original gradient field. e.g. smoothness = 0 implies resulting
image = graded image.
'''
self.nbits = [4, 16, 32, 64, 64, 64]
self.smoothness = smoothness
self.level = 0
# self.eps = 2.2204e-16
def regrain(self, source=None, target=None):
'''
Keep gradient of target and color of source.
https://github.com/frcs/colour-transfer/blob/master/regrain.m
Resulting image = regrain(I_original, I_graded, [self.smoothness])
'''
source = read_image(source) # ref
target = read_image(target) # target
#expand source to target size if smaller
if source.shape != target.shape:
source = scale_img(source, target)
target = target / 255.
source = source / 255.
img_arr_out = np.copy(target)
img_arr_out = self.regrain_rec(img_arr_out, target, source, self.nbits, self.level)
# clip
img_arr_out = _scale_array(img_arr_out, new_range=(0,1))
img_arr_out = (255. * img_arr_out).astype('uint8')
return img_arr_out
def regrain_rec(self, img_arr_out, target, source, nbits, level):
'''
Direct translation of matlab code.
https://github.com/frcs/colour-transfer/blob/master/regrain.m
'''
[h, w, _] = target.shape
h2 = (h + 1) // 2
w2 = (w + 1) // 2
if len(nbits) > 1 and h2 > 20 and w2 > 20:
#Note: could use matlab-like bilinear imresize instead, cv2 has no antialias
resize_arr_in = cv2.resize(target, (w2, h2), interpolation=cv2.INTER_LINEAR)
resize_arr_col = cv2.resize(source, (w2, h2), interpolation=cv2.INTER_LINEAR)
resize_arr_out = cv2.resize(img_arr_out, (w2, h2), interpolation=cv2.INTER_LINEAR)
resize_arr_out = self.regrain_rec(resize_arr_out, resize_arr_in, resize_arr_col, nbits[1:], level+1)
img_arr_out = cv2.resize(resize_arr_out, (w, h), interpolation=cv2.INTER_LINEAR)
img_arr_out = self.solve(img_arr_out, target, source, nbits[0], level)
return img_arr_out
def solve(self, img_arr_out, target, source, nbit, level, eps=1e-6):
'''
Direct translation of matlab code.
https://github.com/frcs/colour-transfer/blob/master/regrain.m
'''
[width, height, c] = target.shape
first_pad_0 = lambda arr : np.concatenate((arr[:1, :], arr[:-1, :]), axis=0)
first_pad_1 = lambda arr : np.concatenate((arr[:, :1], arr[:, :-1]), axis=1)
last_pad_0 = lambda arr : np.concatenate((arr[1:, :], arr[-1:, :]), axis=0)
last_pad_1 = lambda arr : np.concatenate((arr[:, 1:], arr[:, -1:]), axis=1)
delta_x= last_pad_1(target) - first_pad_1(target)
delta_y = last_pad_0(target) - first_pad_0(target)
delta = np.sqrt((delta_x**2 + delta_y**2).sum(axis=2, keepdims=True))
psi = 256*delta/5
psi[psi > 1] = 1
phi = 30. * 2**(-level) / (1 + 10*delta/self.smoothness)
phi1 = (last_pad_1(phi) + phi) / 2
phi2 = (last_pad_0(phi) + phi) / 2
phi3 = (first_pad_1(phi) + phi) / 2
phi4 = (first_pad_0(phi) + phi) / 2
rho = 1/5.
for i in range(nbit):
den = psi + phi1 + phi2 + phi3 + phi4
num = (np.tile(psi, [1, 1, c])*source
+ np.tile(phi1, [1, 1, c])*(last_pad_1(img_arr_out) - last_pad_1(target) + target)
+ np.tile(phi2, [1, 1, c])*(last_pad_0(img_arr_out) - last_pad_0(target) + target)
+ | np.tile(phi3, [1, 1, c]) | numpy.tile |
try:
import importlib.resources as pkg_resources
except ImportError:
# Try backported to PY<37 `importlib_resources`.
import importlib_resources as pkg_resources
from . import images
from gym import Env, spaces
from time import time
import numpy as np
from copy import copy
import colorsys
import pygame
from pygame.transform import scale
class MinesweeperEnv(Env):
def __init__(self, grid_shape=(10, 15), bombs_density=0.1, n_bombs=None, impact_size=3, max_time=999, chicken=False):
self.grid_shape = grid_shape
self.grid_size = np.prod(grid_shape)
self.n_bombs = max(1, int(bombs_density * self.grid_size)) if n_bombs is None else n_bombs
self.n_bombs = min(self.grid_size - 1, self.n_bombs)
self.flaged_bombs = 0
self.flaged_empty = 0
self.max_time = max_time
if impact_size % 2 == 0:
raise ValueError('Impact_size must be an odd number !')
self.impact_size = impact_size
# Define constants
self.HIDDEN = 0
self.REVEAL = 1
self.FLAG = 2
self.BOMB = self.impact_size ** 2
# Setting up gym Env conventions
nvec_observation = (self.BOMB + 2) * np.ones(self.grid_shape)
self.observation_space = spaces.MultiDiscrete(nvec_observation)
nvec_action = np.array(self.grid_shape + (2,))
self.action_space = spaces.MultiDiscrete(nvec_action)
# Initalize state
self.state = np.zeros(self.grid_shape + (2,), dtype=np.uint8)
## Setup bombs places
idx = np.indices(self.grid_shape).reshape(2, -1)
bombs_ids = np.random.choice(range(self.grid_size), size=self.n_bombs, replace=False)
self.bombs_positions = idx[0][bombs_ids], idx[1][bombs_ids]
## Place numbers
self.semi_impact_size = (self.impact_size-1)//2
bomb_impact = np.ones((self.impact_size, self.impact_size), dtype=np.uint8)
for bombs_id in bombs_ids:
bomb_x, bomb_y = idx[0][bombs_id], idx[1][bombs_id]
x_min, x_max, dx_min, dx_max = self.clip_index(bomb_x, 0)
y_min, y_max, dy_min, dy_max = self.clip_index(bomb_y, 1)
bomb_region = self.state[x_min:x_max, y_min:y_max, 0]
bomb_region += bomb_impact[dx_min:dx_max, dy_min:dy_max]
## Place bombs
self.state[self.bombs_positions + (0,)] = self.BOMB
self.start_time = time()
self.time_left = int(time() - self.start_time)
# Setup rendering
self.pygame_is_init = False
self.chicken = chicken
self.done = False
self.score = 0
def get_observation(self):
observation = copy(self.state[:, :, 1])
revealed = observation == 1
flaged = observation == 2
observation += self.impact_size ** 2 + 1
observation[revealed] = copy(self.state[:, :, 0][revealed])
observation[flaged] -= 1
return observation
def reveal_around(self, coords, reward, done, without_loss=False):
if not done:
x_min, x_max, _, _ = self.clip_index(coords[0], 0)
y_min, y_max, _, _ = self.clip_index(coords[1], 1)
region = self.state[x_min:x_max, y_min:y_max, :]
unseen_around = np.sum(region[..., 1] == 0)
if unseen_around == 0:
if not without_loss:
reward -= 0.001
return
flags_around = | np.sum(region[..., 1] == 2) | numpy.sum |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 18 09:11:28 2019
@author: bressler
"""
import SBCcode as sbc
from os import listdir
from os.path import isfile,join
import numpy as np
import matplotlib.pyplot as plt
import scipy
from random import randrange
import random
# Global variable because I'm a physicist not a developer:
CONVERSION_TO_CHARGE = (125.0/128)*(1/50.0)*(1/1000.0)*(1/(1.602e-19))
def total_area(trace,t):
""" Gets total area of trace with time array t"""
return scipy.integrate.trapz(trace,x=t)*CONVERSION_TO_CHARGE
def get_pulse(trace, t, dt, locale, pk_loc, std):
tPulse = []
pulse = []
tracemaxi = list(trace).index(max(trace))
#print("tracemaxi: %d"%tracemaxi)
for i in range(len(t)):
if trace[i] < std and i > tracemaxi:
break
#print(np.abs(t[i]-pk_loc))
elif trace[i]>=std and np.fabs(t[i]-pk_loc) <= locale:
tPulse.append(t[i])
pulse.append(trace[i])
return [pulse,tPulse]
def stitchTraces(ch0Trace,ch1Trace):
j = list(ch0Trace).index(128)
multiplier = 128/ch1Trace[j]
ch1Trace = [x*multiplier for x in ch1Trace]
for i in range(len(ch0Trace)):
if ch0Trace[i] ==128:
ch0Trace[i] = ch1Trace[i]
return ch0Trace
def SBC_pulse_integrator_bressler(trace,dt):
"""
takes:
trace - flipped (and stitched, if desired) PMT trace
dt - time step
returns: (as a list)
ret - area of pulse
Npeaks - number of peaks scipy found in the trace
totIntegral - total area under trace
pk_times - times of the peaks scipy found
"""
baseline = np.mean(trace[0:50])
baseline_std = np.std(trace[0:50])
trace = trace - baseline
trace = trace[0:-100]
pk_ind = scipy.signal.find_peaks(trace,5)
#print(pk_ind)
pk_times = [pk*dt for pk in pk_ind[0]]
pk_vals = [trace[k] for k in pk_ind[0]]
Npeaks = len(pk_vals)
tPMT = np.arange(len(trace))*dt
totIntegral = total_area(trace,tPMT)
if Npeaks == 1:
[pulse,tPulse] = get_pulse(trace, tPMT, dt, 0.5e-7, pk_times[0], baseline_std)
ret = 0
startind = 0
for j in range(len(tPulse)-1):
dist = tPulse[j+1] - tPulse[j]
if dist>dt+1e-9:
#print("break in t array at %d"%j)
ret += scipy.integrate.trapz(pulse[startind:j+1],tPulse[startind:j+1])*CONVERSION_TO_CHARGE
#print("ret inside pulse_integrator: %f"%ret)
startind = j+1
elif j == len(tPulse) - 2:
#print("end of pulse condition, j = %d, t = %e"%(j,tPulse[j]))
ret += scipy.integrate.trapz(pulse[startind:j+1],tPulse[startind:j+1])*CONVERSION_TO_CHARGE
elif Npeaks ==0:
[pulse,tPulse] = get_pulse(trace, tPMT, dt, 0.5e-7, 200*dt, baseline_std)
ret = 0
startind = 0
#print(t)
for j in range(len(tPulse)-1):
dist = tPulse[j+1] - tPulse[j]
if dist>dt+1e-9:
#print("break in t array at %d"%j)
ret += scipy.integrate.trapz(pulse[startind:j+1],tPulse[startind:j+1])*CONVERSION_TO_CHARGE
#print("ret inside pulse_integrator: %f"%ret)
startind = j+1
elif j == len(tPulse) - 2:
#print(j)
ret += scipy.integrate.trapz(pulse[startind:],tPulse[startind:])*CONVERSION_TO_CHARGE
elif Npeaks == 2:
if np.abs(pk_times[0]-pk_times[1])>=2e-7:
[firstPulse, tFirstPulse] = get_pulse(trace, tPMT, dt, 0.5e-7, pk_times[0], baseline_std)
ret = 0
startind = 0
#print(t)
for j in range(len(tFirstPulse)-1):
dist = tFirstPulse[j+1] - tFirstPulse[j]
if dist>dt + 1e-9:
#print("break in t array at %d"%j)
ret += scipy.integrate.trapz(firstPulse[startind:j+1],tFirstPulse[startind:j+1])*CONVERSION_TO_CHARGE
#print("ret inside pulse_integrator: %f"%ret)
startind = j+1
elif j == len(tFirstPulse) - 2:
#print(j)
ret += scipy.integrate.trapz(firstPulse[startind:],tFirstPulse[startind:])*CONVERSION_TO_CHARGE
[secondPulse, tSecondPulse] = get_pulse(trace,tPMT,dt, 0.5e-7, pk_times[1],baseline_std)
startind = 0
#print(t)
for j in range(len(tSecondPulse)-1):
dist = tSecondPulse[j+1] - tSecondPulse[j]
if dist>dt+1e-9:
#print("break in t array at %d"%j)
ret += scipy.integrate.trapz(secondPulse[startind:j+1],tSecondPulse[startind:j+1])*CONVERSION_TO_CHARGE
#print("ret inside pulse_integrator: %f"%ret)
startind = j+1
elif j == len(tSecondPulse) - 2:
#print(j)
ret += scipy.integrate.trapz(secondPulse[startind:],tSecondPulse[startind:])*CONVERSION_TO_CHARGE
"""
if randrange(100) == 1 :
plt.figure()
#plt.title("baseline=%s"%str(baseline_std))
plt.plot(tPMT,trace)
plt.plot(tFirstPulse,firstPulse,linewidth=3)
plt.plot(tSecondPulse,secondPulse,linewidth=3)
plt.show
"""
else:
#print('-1')
Npeaks = -1
integral_t0_index = np.argmax(np.diff(trace)>4)
integral_t0 = tPMT[integral_t0_index]
p,t = get_pulse(trace,tPMT,dt, 5e-7,integral_t0,baseline_std)
ret = 0
startind = 0
#print(t)
for j in range(len(t)-1):
dist = t[j+1] - t[j]
if dist>dt+1e-9:
#print("break in t array at %d"%j)
ret += scipy.integrate.trapz(p[startind:j+1],t[startind:j+1])*CONVERSION_TO_CHARGE
#print("ret inside pulse_integrator: %f"%ret)
startind = j+1
elif j == len(t) - 2:
#print(j)
ret += scipy.integrate.trapz(p[startind:],t[startind:])*CONVERSION_TO_CHARGE
else:
integral_t0_index = np.argmax(np.diff(trace)>4)
integral_t0 = tPMT[integral_t0_index]
p,t = get_pulse(trace, tPMT, dt, 5e-7, integral_t0, baseline_std)
ret = 0
startind = 0
#print(t)
for j in range(len(t)-1):
dist = t[j+1] - t[j]
if dist>dt+1e-9:
#print("break in t array at %d"%j)
ret += scipy.integrate.trapz(p[startind:j+1],t[startind:j+1])*CONVERSION_TO_CHARGE
#print("ret inside pulse_integrator: %f"%ret)
startind = j+1
elif j == len(t) - 2:
#print(j)
ret += scipy.integrate.trapz(p[startind:],t[startind:])*CONVERSION_TO_CHARGE
"""
if random.random()<0.001:
plt.figure()
plt.plot(tPMT,trace)
plt.plot(t,p)
plt.xlabel('time (s)')
plt.ylabel('signal (ADC units)')
plt.show()
"""
return [ret,Npeaks,totIntegral,pk_times]
def main():
run = '20170709_8'
runpath = "/bluearc/storage/SBC-17-data/"+run+'/'
event = 0
e = sbc.DataHandling.GetSBCEvent.GetEvent(runpath,event)
tr = e["PMTtraces"]
trac = tr["traces"]
dt = tr["dt"]
for i in range(len(trac)):
trace = np.fabs(trac[i][0])
rawtrace = trac[i][0]
stitched = False
if max(trace) == 128:
trace = stitchTraces(trace, np.fabs(trac[i][1]))
stitchedtrace = stitchTraces(np.fabs(trac[i][0]), np.fabs(trac[i][1]))
stitched=True
else:
abstrace = np.fabs(trac[i][0])
b = | np.mean(trace[0:50]) | numpy.mean |
from __future__ import print_function, division
# from config.settings import mkdir
import os
import random as rn
import numpy as np
from torch.utils.data import Dataset
import random
import matplotlib.pyplot as plt
from os import listdir
import pickle
# https://github.com/gorchard/event-Python/tree/78dd3b0a7fc508d551cecdbf93b959dc2d265765
class NMNIST:
def __init__(self, root, object_classes, height, width, nr_events_window=-1, augmentation=False, mode='training',
event_representation='histogram', shuffle=True, proc_rate=100):
"""
Creates an iterator over the N_MNIST dataset.
:param root: path to dataset root
:param object_classes: list of string containing objects or 'all' for all classes
:param height: height of dataset image
:param width: width of dataset image
:param nr_events_window: number of events in a sliding window histogram, -1 corresponds to all events
:param augmentation: flip, shift and random window start for training
:param mode: 'training', 'testing' or 'validation'
:param event_representation: 'histogram' or 'event_queue'
"""
self.mode = mode
if mode == 'training':
mode = 'Train'
elif mode == 'testing':
mode = 'Test'
if mode == 'validation':
mode = 'Val'
root = os.path.join(root, mode)
# self.object_classes = listdir(root)
self.object_classes = ['0','1','2','3','4','5','6','7','8','9']
# self.object_classes = ['0']
self.width = width
self.height = height
self.augmentation = augmentation
self.nr_events_window = nr_events_window
self.nr_classes = len(self.object_classes)
self.event_representation = event_representation
self.files = []
self.labels = []
for i, object_class in enumerate(self.object_classes):
new_files = [os.path.join(root, object_class, f) for f in listdir(os.path.join(root, object_class))]
self.files += new_files
self.labels += [i] * len(new_files)
self.nr_samples = len(self.labels)
self.proc_rate = proc_rate
def __len__(self):
return len(self.files)
def __getitem__(self, idx):
if self.mode=='testing':
return self.getitem_seq(idx)
else:
return self.getitem_rand(idx)
def getitem_seq(self, idx):
label = self.labels[idx]
with open(self.files[idx], "rb") as fp: #Pickling
batch_inputs = pickle.load(fp)
histograms = []
for b, event in enumerate(batch_inputs):
event[:,0] -= 1
event[:,1] -= 1
histogram = self.generate_input_representation(event, (self.height, self.width), no_t=True)
histograms.append(histogram)
histograms = np.stack(histograms, axis=3)
return batch_inputs[0], batch_inputs[0], label, histograms, histograms
def getitem_rand(self, idx):
"""
returns events and label, loading events from aedat
:param idx:
:return: x,y,t,p, label
"""
label = self.labels[idx]
filename = self.files[idx]
events = read_dataset(filename)
nr_events = events.shape[0]
window_start0 = 0
window_start1 = min(nr_events,self.proc_rate)
window_end0 = nr_events
window_end1 = nr_events
if self.augmentation:
# events = random_shift_events(events, max_shift=1, resolution=(self.height, self.width))
window_start0 = random.randrange(0, max(1, nr_events - self.nr_events_window-self.proc_rate))
window_start1 = min(nr_events, window_start0+self.proc_rate)
if self.nr_events_window != -1:
# Catch case if number of events in batch is lower than number of events in window.
window_end0 = min(nr_events, window_start0 + self.nr_events_window)
window_end1 = min(nr_events, window_start1 + self.nr_events_window)
# First Events
events0 = events[window_start0:window_end0, :]
histogram0= self.generate_input_representation(events0, (self.height, self.width))
events1 = events[window_start1:window_end1, :]
histogram1= self.generate_input_representation(events1, (self.height, self.width))
if 0:
plt.imshow(255*histogram0[:,:,0]/np.max(histogram0))
plt.savefig('sample/nmnist/histogram0_' + str(idx)+ '.png')
plt.imshow(255*histogram1[:,:,0]/np.max(histogram1))
plt.savefig('sample/nmnist/histogram1_' + str(idx)+ '.png')
return events0, events1, label, histogram0, histogram1
def generate_input_representation(self, events, shape, no_t=False):
"""
Events: N x 4, where cols are x, y, t, polarity, and polarity is in {0,1}. x and y correspond to image
coordinates u and v.
"""
if self.event_representation == 'histogram':
return self.generate_event_histogram(events, shape, no_t=no_t)
elif self.event_representation == 'event_queue':
return self.generate_event_queue(events, shape)
@staticmethod
def generate_event_histogram(events, shape, no_t=False):
"""
Events: N x 4, where cols are x, y, t, polarity, and polarity is in {0,1}. x and y correspond to image
coordinates u and v.
"""
H, W = shape
if no_t:
x, y, p = events.T
else:
x, y, t, p = events.T
x = x.astype(np.int)
y = y.astype(np.int)
# if 1:
# x[x>(W-1)]= W-1
# y[y>(H-1)]= H-1
img_pos = np.zeros((H * W,), dtype="float32")
img_neg = | np.zeros((H * W,), dtype="float32") | numpy.zeros |
from multiprocessing import Process, Pipe, Queue
import numpy as np
from chainer import cuda
from generate_data import generate_nice
from sinkhorn import sinkhorn_fb
from sdtw import soft_dtw, soft_dtw_grad
from wdtw import gradient_descent
GPU_COUNT = 7
GPU_PRIORITY = [2, 6, 0, 1, 4, 5, 3]
GPU_PRIORITY_REVERSE = list( | np.argsort(GPU_PRIORITY) | numpy.argsort |
import scipy.misc
import numpy as np
import os
from glob import glob
import imageio
import tensorflow as tf
import tensorflow.contrib.slim as slim
from keras.datasets import cifar10, mnist
import matplotlib.pyplot as plt
import pickle
class ImageData:
def __init__(self, load_size, channels, crop_pos='center', zoom_range=0.0):
self.load_size = load_size
self.channels = channels
self.crop_pos = crop_pos
self.zoom_range = zoom_range
def image_processing(self, filename):
x = tf.io.read_file(filename)
x_decode = tf.image.decode_jpeg(x, channels=self.channels)
s = tf.shape(x_decode)
w, h = s[0], s[1]
# height, width, channel = x_decode.eval(session=self.sess).shape
c = tf.minimum(w, h)
zoom_factor = 0.15
c_ = tf.cast(tf.cast(c, dtype=tf.float32) * (1 - tf.random.uniform(shape=[])*zoom_factor), dtype=tf.int32)
if self.crop_pos == 'random':
print('crop random')
k = tf.random.uniform(shape=[])
l = tf.random.uniform(shape=[])
w_start = tf.cast(tf.cast((w - c_), dtype=tf.float32) * k, dtype=tf.int32)
h_start = tf.cast(tf.cast((h - c_), dtype=tf.float32) * l, dtype=tf.int32)
else:
w_start = (w - c_) // 2
h_start = (h - c_) // 2
img = x_decode[w_start:w_start + c_, h_start:h_start + c_]
img = tf.image.resize_images(img, [self.load_size, self.load_size])
img = tf.cast(img, tf.float32) / 127.5 - 1
return img
def load_mnist(size=64):
(train_data, train_labels), (test_data, test_labels) = mnist.load_data()
train_data = normalize(train_data)
test_data = normalize(test_data)
x = np.concatenate((train_data, test_data), axis=0)
# y = np.concatenate((train_labels, test_labels), axis=0).astype(np.int)
seed = 777
np.random.seed(seed)
np.random.shuffle(x)
# np.random.seed(seed)
# np.random.shuffle(y)
# x = np.expand_dims(x, axis=-1)
x = np.asarray([scipy.misc.imresize(x_img, [size, size]) for x_img in x])
x = np.expand_dims(x, axis=-1)
return x
def load_cifar10(size=64) :
(train_data, train_labels), (test_data, test_labels) = cifar10.load_data()
train_data = normalize(train_data)
test_data = normalize(test_data)
x = np.concatenate((train_data, test_data), axis=0)
# y = np.concatenate((train_labels, test_labels), axis=0).astype(np.int)
seed = 777
np.random.seed(seed)
np.random.shuffle(x)
# np.random.seed(seed)
# np.random.shuffle(y)
x = np.asarray([scipy.misc.imresize(x_img, [size, size]) for x_img in x])
return x
def load_data(dataset_name, size=64) :
x = glob(f'{dataset_name}/*/*.jpg')
x.extend(glob(f'{dataset_name}/*.jpg'))
x.extend(glob(f'{dataset_name}/*/*.png'))
x.extend(glob(f'{dataset_name}/*.png'))
print(x)
return x
def preprocessing(x, size):
x = scipy.misc.imread(x, mode='RGB')
x = scipy.misc.imresize(x, [size, size])
x = normalize(x)
return x
def normalize(x) :
return x/127.5 - 1
def save_images(images, size, image_path):
return imsave(inverse_transform(images), size, image_path)
def save_image(image, image_path):
image = inverse_transform(image)
image = to_uint8(image)
imageio.imwrite(image_path, image)
def save_images_plt(images, size, image_path, mode=None):
images = inverse_transform(images)
images = to_uint8(images)
if mode == 'sample':
h = 10
else:
h = 21.6
img_dir = '/'.join(image_path.split('/')[:-1])+'/'+image_path.split('/')[-1][:-4]
print(img_dir)
if not os.path.isdir(img_dir):
os.makedirs(img_dir)
w = size[0]/size[1] * h
plt.figure(figsize=(w,h), dpi=100)
n_rows = size[1]
n_cols = size[0]
for i in range(images.shape[0]):
plt.subplot(n_rows, n_cols, i+1)
image = images[i]
if mode != 'sample':
img_path = f'{img_dir}/{i:03d}.png'
imageio.imwrite(img_path, image)
if image.shape[2] == 1:
plt.imshow(image.reshape((image.shape[0], image.shape[1])), cmap='gray')
else:
plt.imshow(image)
plt.axis('off')
plt.tight_layout()
is_exist = os.path.isfile(image_path)
i = 1
image_path_temp = image_path
while is_exist == True:
image_path = image_path_temp[:-4] + f' ({i:02d})'+image_path_temp[-4:]
is_exist = os.path.isfile(image_path)
i+=1
plt.savefig(image_path)
plt.close()
def merge(images, size):
h, w = images.shape[1], images.shape[2]
if (images.shape[3] in (3,4)):
c = images.shape[3]
img = np.zeros((h * size[0], w * size[1], c))
for idx, image in enumerate(images):
i = idx % size[1]
j = idx // size[1]
img[j * h:j * h + h, i * w:i * w + w, :] = image
return img
elif images.shape[3]==1:
img = | np.zeros((h * size[0], w * size[1])) | numpy.zeros |
from __future__ import division, print_function
import numpy as np
import os
from scipy.stats import multivariate_normal
import sys
import struct
try:
import sounddevice as sd
have_sounddevice = True
except:
have_sounddevice = False
from .stft import stft
from .acoustics import mfcc
class CircularGaussianEmission:
def __init__(self, nstates, odim=1, examples=None):
''' Initialize the Gaussian emission object '''
# The emissions parameters
self.K = nstates
if examples is None:
# Initialize to random components
self.O = odim
self.mu = np.random.normal(size=(self.K, self.O))
self.Sigma = | np.ones((self.K, self.O)) | numpy.ones |
from scipy.stats import loguniform, uniform
import numpy as np
from dask import compute
from gillespy2 import Model, Species, Reaction, Parameter, RateRule, AssignmentRule, FunctionDefinition
from gillespy2 import VariableSSACSolver
def minmax_normalize(data, min_=None, max_=None):
data_ = np.copy(data)
if min_ is None:
min_ = np.min(data_)
min_ = np.expand_dims(min_,axis=[0,1])
if max_ is None:
max_ = np.max(data_)
max_ = np.expand_dims(max_,axis=[0,1])
min_[np.where(min_ == max_)] = 0
return (data_ - min_) / (max_ - min_), min_, max_
def checkprior(theta, prior):
flag = prior.pdf(theta) > 0
print(f'inside of initial prior: {sum(flag)}')
print(f'out of total: {len(theta)}')
return theta[flag,:]
def loguniform_prior(Ndata=2_500, log=True):
a0, b0 = 0.002, 2
a1, b1 = 0.002, 2
k1 = loguniform.rvs(a0,b0,size=Ndata)
k2 = loguniform.rvs(a1,b1,size=Ndata)
k3 = loguniform.rvs(a0,b0,size=Ndata)
theta = np.vstack((k1,k2,k3)).T
if log:
return np.log(theta)
return theta
from scipy.stats import uniform
class uniform_prior:
def __init__(self, left = [0.002,0.002,0.002], right =[2,2,2]):
self.left = np.asarray(left)
self.right = np.asarray(right)
self.m = (self.left+self.right)/2
self.var = (self.right-self.left)**2/12
self.S = np.diag(self.var)
def gen(self, Ndata=2_500):
"""
generates random samples from the uniform prior, for 3 parameters.
param:
Ndata, number of samples
left, left boundary
right, right boundary
output:
theta, random samples of size (Ndata,3)
"""
print("*** USED PRIOR ***")
left = np.log(self.left)
right = | np.log(self.right) | numpy.log |
# This file is a part of the HiRISE DTM Importer for Blender
#
# Copyright (C) 2017 Arizona Board of Regents on behalf of the Planetary Image
# Research Laboratory, Lunar and Planetary Laboratory at the University of
# Arizona.
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
"""Objects for creating 3D models in Blender"""
import bpy
import bmesh
import numpy as np
from .triangulate import Triangulate
class BTerrain:
"""
Functions for creating Blender meshes from DTM objects
This class contains functions that convert DTM objects to Blender meshes.
Its main responsiblity is to triangulate a mesh from the elevation data in
the DTM. Additionally, it attaches some metadata to the object and creates
a UV map for it so that companion ortho-images drape properly.
This class provides two public methods: `new()` and `reload()`.
`new()` creates a new object[1] and attaches a new mesh to it.
`reload()` replaces the mesh that is attached to an already existing
object. This allows us to retain the location and orientation of the parent
object's coordinate system but to reload the terrain at a different
resolution.
Notes
----------
[1] If you're unfamiliar with Blender, one thing that will help you in
reading this code is knowing the difference between 'meshes' and
'objects'. A mesh is just a collection of vertices, edges and
faces. An object may have a mesh as a child data object and
contains additional information, e.g. the location and orientation
of the coordinate system its child-meshes are reckoned in terms of.
"""
@staticmethod
def new(dtm, name='Terrain'):
"""
Loads a new terrain
Parameters
----------
dtm : DTM
name : str, optional
The name that will be assigned to the new object, defaults
to 'Terrain' (and, if an object named 'Terrain' already
exists, Blender will automatically extend the name of the
new object to something like 'Terrain.001')
Returns
----------
obj : bpy_types.Object
"""
bpy.ops.object.add(type="MESH")
obj = bpy.context.object
obj.name = name
# Fill the object data with a Terrain mesh
obj.data = BTerrain._mesh_from_dtm(dtm)
# Add some meta-information to the object
metadata = BTerrain._create_metadata(dtm)
BTerrain._setobjattrs(obj, **metadata)
# Center the mesh to its origin and create a UV map for draping
# ortho images.
BTerrain._center(obj)
return obj
@staticmethod
def reload(obj, dtm):
"""
Replaces an exisiting object's terrain mesh
This replaces an object's mesh with a new mesh, transferring old
materials over to the new mesh. This is useful for reloading DTMs
at different resolutions but maintaining textures/location/rotation.
Parameters
-----------
obj : bpy_types.Object
An already existing Blender object
dtm : DTM
Returns
----------
obj : bpy_types.Object
"""
old_mesh = obj.data
new_mesh = BTerrain._mesh_from_dtm(dtm)
# Copy any old materials to the new mesh
for mat in old_mesh.materials:
new_mesh.materials.append(mat.copy())
# Swap out the old mesh for the new one
obj.data = new_mesh
# Update out-dated meta-information
metadata = BTerrain._create_metadata(dtm)
BTerrain._setobjattrs(obj, **metadata)
# Center the mesh to its origin and create a UV map for draping
# ortho images.
BTerrain._center(obj)
return obj
@staticmethod
def _mesh_from_dtm(dtm, name='Terrain'):
"""
Creates a Blender *mesh* from a DTM
Parameters
----------
dtm : DTM
name : str, optional
The name that will be assigned to the new mesh, defaults
to 'Terrain' (and, if an object named 'Terrain' already
exists, Blender will automatically extend the name of the
new object to something like 'Terrain.001')
Returns
----------
mesh : bpy_types.Mesh
Notes
----------
* We are switching coordinate systems from the NumPy to Blender.
Numpy: Blender:
+ ----> (0, j) ^ (0, y)
| |
| |
v (i, 0) + ----> (x, 0)
"""
# Create an empty mesh
mesh = bpy.data.meshes.new(name)
# Get the xy-coordinates from the DTM, see docstring notes
y, x = np.indices(dtm.data.shape).astype('float64')
x *= dtm.mesh_scale
y *= -1 * dtm.mesh_scale
# Create an array of 3D vertices
vertices = np.dstack([x, y, dtm.data]).reshape((-1, 3))
# Drop vertices with NaN values (used in the DTM to represent
# areas with no data)
vertices = vertices[~ | np.isnan(vertices) | numpy.isnan |
import keras
import numpy as np
import sys
import tensorflow as tf
import cv2
def random_crop(x,dn):
dx = np.random.randint(dn,size=1)[0]
dy = | np.random.randint(dn,size=1) | numpy.random.randint |
import numpy as num
from random import randrange
from scipy.sparse.linalg import gmres
import matplotlib.pyplot as plt
import math
import datetime
def gen_matrix(n1) :
a1 = ''
for i in range(n1):
for j in range(n1):
a1 += str(randrange(n1*10))
a1 += ' '
if i != n1-1:
a1 += ';'
a1 += ' '
return num.matrix(a1)
def _plot_graph ():
plt.plot(range(len(g_1)) , g_1 , color='black')
plt.xlabel('N')
plt.ylabel('error')
plt.title('plot')
def gmres_algorithm (A , b , x0 , error , max_iter ):
res = b - num.asarray(num.dot(A,x0)).reshape(-1) # residual error
#print ("res " , res)
x_pred = []
q_ = [0] * max_iter
x_pred.append(res)
q_[0] = res / num.linalg.norm(res)
#print("q_ " , q_)
h_ = num.zeros((max_iter + 1, max_iter))
for k in range(min(max_iter , A.shape[0])) :
y_out = num.asarray(num.dot(A,q_[k])).reshape(-1)
#print (" y_out : " , y_out)
for j in range(k+1) :
h_[j , k] = num.dot(q_[j],y_out)
y_out = y_out - h_[j , k] * q_[j]
#print ("y_out : " , y_out)
h_[k+1 , k] = num.linalg.norm(y_out)
if (h_[k + 1, k] != 0 and k != max_iter - 1):
q_[k+1] = y_out / h_[k+1 , k]
b_ = num.zeros(max_iter + 1)
b_[0] = num.linalg.norm(res)
c_ = num.linalg.lstsq(h_ , b_)[0]
prod_ = num.asarray(num.dot(num.asarray(q_).transpose() , c_))
if (k == max_iter - 1) :
print('q_ ' + str(num.asarray(q_).shape) + ' c_shape = ' + str(c_.shape) + ' prod_ = ' + str(prod_.shape))
x_pred.append(prod_ + x0)
#print ("h_ : " , h_)
#print ("b_ : " , b_)
#print ("x_pred " , prod_ + x0 )
x_temp_ = (num.linalg.norm(b - num.dot(A ,(prod_ + x0)).reshape(-1)) / | num.linalg.norm(b) | numpy.linalg.norm |
# Copyright (c) 2020, <NAME>, Honda Research Institute Europe GmbH, and
# Technical University of Darmstadt.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of <NAME>, Honda Research Institute Europe GmbH,
# or Technical University of Darmstadt, nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <NAME>, HONDA RESEARCH INSTITUTE EUROPE GMBH,
# OR TECHNICAL UNIVERSITY OF DARMSTADT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
This script yields the values for the illustrative example in
.. seealso::
[1] <NAME>, <NAME>, <NAME>, "Assessing Transferability from Simulation to Reality for Reinforcement
Learning", PAMI, 2021
"""
import os
import os.path as osp
import numpy as np
from matplotlib import pyplot as plt
from scipy import special
import pyrado
from pyrado import set_seed
from pyrado.environments.one_step.catapult import CatapultExample
from pyrado.plotting.curve import draw_curve_from_data
from pyrado.utils.argparser import get_argparser
def calc_E_n_Jhat(n, th):
r"""
Calculate $E_\\xi[ \hat{J}_n(\theta) ]$ approximated by $sum_{i=1}^n p(\\xi_i) \hat{J}_n(\theta)$.
:param n: number of domains $n$ to approximate the expectation
:param th: (arbitrary) policy parameter, might be estimated using n domain parameters, but does not have to be
:return: approximation of $E_\\xi[ \hat{J}_n(\theta) ]$
"""
E_n_Jhat_th = 0
for i in range(n + 1):
# i is the number of Venus draws
binom_coeff = special.binom(n, i)
E_n_Jhat_th += binom_coeff * pow(psi, i) * pow(1 - psi, n - i) * env.est_expec_return(th, n - i, i)
return E_n_Jhat_th
def calc_E_n_Jhat_th_opt(n):
r"""
Calculate $E_\\xi[ \hat{J}_n(\theta^*) ]$ approximated by $sum_{i=1}^n p(\\xi_i) \hat{J}_n(\theta^*)$.
:param n: number of domains $n$ to approximate the expectation
:return: approximation of $E_\\xi[ \hat{J}_n(\theta^*) ]$
"""
E_n_Jhat_th_opt = 0
for i in range(n + 1):
# i is the number of Venus draws
binom_coeff = special.binom(n, i)
E_n_Jhat_th_opt += binom_coeff * pow(psi, i) * pow(1 - psi, n - i) * env.opt_est_expec_return(n - i, i)
return E_n_Jhat_th_opt
def check_E_n_Jhat(th_n_opt, n):
"""
Check the influence of the number of domains $n$ used for the expectation operator.
:param th_n_opt: optimal policy parameter determined from n domains
:param n: number of domains $n$ used for determining the policy parameters
"""
# "Manual" expectation using n=3 domain parameters
E_3_Jhat_n_opt = (
1 * pow(psi, 3) * env.est_expec_return(th_n_opt, 0, 3)
+ 3 * pow(psi, 2) * pow(1 - psi, 1) * env.est_expec_return(th_n_opt, 1, 2)
+ 3 * pow(psi, 1) * pow(1 - psi, 2) * env.est_expec_return(th_n_opt, 2, 1)
+ 1 * pow(1 - psi, 3) * env.est_expec_return(th_n_opt, 3, 0)
)
print(f"E_3_Jhat_{n}_opt: {E_3_Jhat_n_opt}")
# Expectation using n=50 domain parameters
E_3_Jhat_n_opt = calc_E_n_Jhat(3, th_n_opt)
print(f"E_3_Jhat_{n}_opt: {E_3_Jhat_n_opt}")
# Expectation using n=50 domain parameters
E_50_Jhat_n_opt = calc_E_n_Jhat(50, th_n_opt)
print(f"E_50_Jhat_{n}_opt: {E_50_Jhat_n_opt}")
# Expectation using n=500 domain parameters
E_500_Jhat_n_opt = calc_E_n_Jhat(500, th_n_opt)
print(f"E_500_Jhat_{n}_opt: {E_500_Jhat_n_opt}")
if __name__ == "__main__":
# Parse command line arguments
args = get_argparser().parse_args()
# Set up the example
ex_dir = osp.join(pyrado.EVAL_DIR, "illustrative_example")
env = CatapultExample(m=1.0, g_M=3.71, k_M=1000.0, x_M=0.5, g_V=8.87, k_V=3000.0, x_V=1.5)
psi = 0.7 # true probability of drawing Venus
num_samples = 100
num_iter = 30
noise_th_scale = 0.15
set_seed(args.seed)
fig_size = tuple([0.75 * x for x in pyrado.figsize_thesis_1percol_18to10])
th_true_opt = env.opt_policy_param(1 - psi, psi) # true probabilities instead of counts
J_true_opt = env.opt_est_expec_return(1 - psi, psi) # true probabilities instead of counts
print(f"th_true_opt: {th_true_opt}")
print(f"J_true_opt: {J_true_opt}\n")
# Initialize containers
n_M_hist = np.empty((num_samples, num_iter))
n_V_hist = np.empty((num_samples, num_iter))
th_n_opt_hist = np.empty((num_samples, num_iter))
th_c_hist = np.empty((num_samples, num_iter))
Jhat_th_n_opt_hist = | np.empty((num_samples, num_iter)) | numpy.empty |
import tempfile
import numpy as np
import h5py
from pyscf import lib
#einsum = np.einsum
einsum = lib.einsum
# This is restricted (R)CCSD
# Ref: Hirata et al., J. Chem. Phys. 120, 2581 (2004)
### Eqs. (37)-(39) "kappa"
def cc_Foo(t1,t2,eris):
nocc, nvir = t1.shape
foo = eris.fock[:nocc,:nocc]
Fki = foo.copy()
Fki += 2*einsum('kcld,ilcd->ki',eris.ovov,t2)
Fki += -einsum('kdlc,ilcd->ki',eris.ovov,t2)
Fki += 2*einsum('kcld,ic,ld->ki',eris.ovov,t1,t1)
Fki += -einsum('kdlc,ic,ld->ki',eris.ovov,t1,t1)
return Fki
def cc_Fvv(t1,t2,eris):
nocc, nvir = t1.shape
fvv = eris.fock[nocc:,nocc:]
Fac = fvv.copy()
Fac += -2*einsum('kcld,klad->ac',eris.ovov,t2)
Fac += einsum('kdlc,klad->ac',eris.ovov,t2)
Fac += -2*einsum('kcld,ka,ld->ac',eris.ovov,t1,t1)
Fac += einsum('kdlc,ka,ld->ac',eris.ovov,t1,t1)
return Fac
def cc_Fov(t1,t2,eris):
nocc, nvir = t1.shape
fov = eris.fock[:nocc,nocc:]
Fkc = fov.copy()
Fkc += 2*einsum('kcld,ld->kc',eris.ovov,t1)
Fkc += -einsum('kdlc,ld->kc',eris.ovov,t1)
return Fkc
### Eqs. (40)-(41) "lambda"
def Loo(t1,t2,eris):
nocc, nvir = t1.shape
fov = eris.fock[:nocc,nocc:]
Lki = cc_Foo(t1,t2,eris) + einsum('kc,ic->ki',fov,t1)
Lki += 2*einsum('kilc,lc->ki',eris.ooov,t1)
Lki += -einsum('likc,lc->ki',eris.ooov,t1)
return Lki
def Lvv(t1,t2,eris):
nocc, nvir = t1.shape
fov = eris.fock[:nocc,nocc:]
Lac = cc_Fvv(t1,t2,eris) - einsum('kc,ka->ac',fov,t1)
eris_ovvv = lib.unpack_tril(np.asarray(eris.ovvv).reshape(nocc*nvir,-1)).reshape(nocc,nvir,nvir,nvir)
Lac += 2*einsum('kdac,kd->ac',eris_ovvv,t1)
Lac += -einsum('kcad,kd->ac',eris_ovvv,t1)
return Lac
### Eqs. (42)-(45) "chi"
def cc_Woooo(t1,t2,eris):
Wklij = np.array(eris.oooo).transpose(0,2,1,3).copy()
Wklij += einsum('kilc,jc->klij',eris.ooov,t1)
Wklij += einsum('ljkc,ic->klij',eris.ooov,t1)
Wklij += einsum('kcld,ijcd->klij',eris.ovov,t2)
Wklij += einsum('kcld,ic,jd->klij',eris.ovov,t1,t1)
return Wklij
def cc_Wvvvv(t1,t2,eris):
## Incore
#Wabcd = np.array(eris.vvvv).transpose(0,2,1,3)
#Wabcd += -einsum('kdac,kb->abcd',eris.ovvv,t1)
#Wabcd += -einsum('kcbd,ka->abcd',eris.ovvv,t1)
## HDF5
if t1.dtype == np.complex: ds_type = 'c16'
else: ds_type = 'f8'
_tmpfile1 = tempfile.NamedTemporaryFile(dir=lib.param.TMPDIR)
fimd = h5py.File(_tmpfile1.name)
nocc,nvir = t1.shape
Wabcd = fimd.create_dataset('vvvv', (nvir,nvir,nvir,nvir), ds_type)
# avoid transpose inside loop
eris_ovvv = lib.unpack_tril(np.asarray(eris.ovvv).reshape(nocc*nvir,-1)).reshape(nocc,nvir,nvir,nvir)
ovvv = np.array(eris_ovvv).transpose(0,2,1,3)
for a in range(nvir):
# Wabcd[a] = eris.vvvv[a].transpose(1,0,2)
# Wabcd[a] += -einsum('kdc,kb->bcd',eris_ovvv[:,:,a,:],t1)
# #Wabcd[a] += -einsum('kcbd,k->bcd',eris_ovvv,t1[:,a])
# Wabcd[a] += -einsum('k,kbcd->bcd',t1[:,a],ovvv)
w_vvv = einsum('kdc,kb->bcd',eris_ovvv[:,:,a,:],-t1)
w_vvv -= einsum('k,kbcd->bcd',t1[:,a],ovvv)
a0 = a*(a+1)//2
w_vvv[:,:a+1] += lib.unpack_tril(eris.vvvv[a0:a0+a+1]).transpose(1,0,2)
for i in range(a+1,nvir):
w_vvv[:,i] += lib.unpack_tril(eris.vvvv[i*(i+1)//2+a])
Wabcd[a] = w_vvv
return Wabcd
def cc_Wvoov(t1,t2,eris):
nocc, nvir = t1.shape
eris_ovvv = lib.unpack_tril(np.asarray(eris.ovvv).reshape(nocc*nvir,-1)).reshape(nocc,nvir,nvir,nvir)
Wakic = np.array(eris.ovvo).transpose(1,3,0,2)
Wakic -= einsum('likc,la->akic',eris.ooov,t1)
Wakic += einsum('kcad,id->akic',eris_ovvv,t1)
Wakic -= 0.5*einsum('ldkc,ilda->akic',eris.ovov,t2)
Wakic -= einsum('ldkc,id,la->akic',eris.ovov,t1,t1)
Wakic += einsum('ldkc,ilad->akic',eris.ovov,t2)
Wakic += -0.5*einsum('lckd,ilad->akic',eris.ovov,t2)
return Wakic
def cc_Wvovo(t1,t2,eris):
nocc, nvir = t1.shape
eris_ovvv = lib.unpack_tril(np.asarray(eris.ovvv).reshape(nocc*nvir,-1)).reshape(nocc,nvir,nvir,nvir)
Wakci = np.array(eris.oovv).transpose(2,0,3,1)
Wakci -= einsum('kilc,la->akci',eris.ooov,t1)
Wakci += einsum('kdac,id->akci',eris_ovvv,t1)
Wakci -= 0.5*einsum('lckd,ilda->akci',eris.ovov,t2)
Wakci -= einsum('lckd,id,la->akci',eris.ovov,t1,t1)
return Wakci
def Wooov(t1,t2,eris):
Wklid = np.asarray(eris.ooov).transpose(0,2,1,3) + einsum('ic,kcld->klid',t1,eris.ovov)
return Wklid
def Wvovv(t1,t2,eris):
nocc, nvir = t1.shape
eris_ovvv = lib.unpack_tril(np.asarray(eris.ovvv).reshape(nocc*nvir,-1)).reshape(nocc,nvir,nvir,nvir)
Walcd = np.asarray(eris_ovvv).transpose(2,0,3,1) - einsum('ka,kcld->alcd',t1,eris.ovov)
return Walcd
def W1ovvo(t1,t2,eris):
Wkaci = np.array(eris.ovvo).transpose(3,1,2,0)
Wkaci += 2*einsum('kcld,ilad->kaci',eris.ovov,t2)
Wkaci += -einsum('kcld,liad->kaci',eris.ovov,t2)
Wkaci += -einsum('kdlc,ilad->kaci',eris.ovov,t2)
return Wkaci
def W2ovvo(t1,t2,eris):
nocc, nvir = t1.shape
eris_ovvv = lib.unpack_tril(np.asarray(eris.ovvv).reshape(nocc*nvir,-1)).reshape(nocc,nvir,nvir,nvir)
Wkaci = einsum('la,lkic->kaci',-t1,Wooov(t1,t2,eris))
Wkaci += einsum('kcad,id->kaci',eris_ovvv,t1)
return Wkaci
def Wovvo(t1,t2,eris):
Wkaci = W1ovvo(t1,t2,eris) + W2ovvo(t1,t2,eris)
return Wkaci
def W1ovov(t1,t2,eris):
Wkbid = np.array(eris.oovv).transpose(0,2,1,3)
Wkbid += -einsum('kcld,ilcb->kbid',eris.ovov,t2)
return Wkbid
def W2ovov(t1,t2,eris):
nocc, nvir = t1.shape
eris_ovvv = lib.unpack_tril(np.asarray(eris.ovvv).reshape(nocc*nvir,-1)).reshape(nocc,nvir,nvir,nvir)
Wkbid = einsum('klid,lb->kbid',Wooov(t1,t2,eris),-t1)
Wkbid += einsum('kcbd,ic->kbid',eris_ovvv,t1)
return Wkbid
def Wovov(t1,t2,eris):
return W1ovov(t1,t2,eris) + W2ovov(t1,t2,eris)
def Woooo(t1,t2,eris):
Wklij = np.array(eris.oooo).transpose(0,2,1,3).copy()
Wklij += einsum('kcld,ijcd->klij',eris.ovov,t2)
Wklij += einsum('kcld,ic,jd->klij',eris.ovov,t1,t1)
Wklij += einsum('kild,jd->klij',eris.ooov,t1)
Wklij += einsum('ljkc,ic->klij',eris.ooov,t1)
return Wklij
def Wvvvv(t1,t2,eris):
## Incore
#Wabcd = np.array(eris.vvvv).transpose(0,2,1,3)
#Wabcd += einsum('kcld,klab->abcd',eris.ovov,t2)
#Wabcd += einsum('kcld,ka,lb->abcd',eris.ovov,t1,t1)
#Wabcd += -einsum('ldac,lb->abcd',eris.ovvv,t1)
#Wabcd += -einsum('kcbd,ka->abcd',eris.ovvv,t1)
## HDF5
if t1.dtype == np.complex: ds_type = 'c16'
else: ds_type = 'f8'
_tmpfile1 = tempfile.NamedTemporaryFile(dir=lib.param.TMPDIR)
fimd = h5py.File(_tmpfile1.name)
nocc,nvir = t1.shape
Wabcd = fimd.create_dataset('vvvv', (nvir,nvir,nvir,nvir), ds_type)
eris_ovvv = lib.unpack_tril(np.asarray(eris.ovvv).reshape(nocc*nvir,-1)).reshape(nocc,nvir,nvir,nvir)
for a in range(nvir):
#Wabcd[a] = eris.vvvv[a].transpose(1,0,2)
#Wabcd[a] += -einsum('ldc,lb->bcd',eris_ovvv[:,:,a,:],t1)
#Wabcd[a] += -einsum('kcbd,k->bcd',eris_ovvv,t1[:,a])
#Wabcd[a] += einsum('kcld,klb->bcd',eris.ovov,t2[:,:,a,:])
#Wabcd[a] += einsum('kcld,k,lb->bcd',eris.ovov,t1[:,a],t1)
w_vvv = einsum('ldc,lb->bcd',eris_ovvv[:,:,a,:],-t1)
w_vvv += einsum('kcbd,k->bcd',eris_ovvv,-t1[:,a])
w_vvv += einsum('kcld,klb->bcd',eris.ovov,t2[:,:,a,:])
w_vvv += einsum('kcld,k,lb->bcd',eris.ovov,t1[:,a],t1)
a0 = a*(a+1)//2
w_vvv[:,:a+1] += lib.unpack_tril(eris.vvvv[a0:a0+a+1]).transpose(1,0,2)
for i in range(a+1,nvir):
w_vvv[:,i] += lib.unpack_tril(eris.vvvv[i*(i+1)//2+a])
Wabcd[a] = w_vvv
return Wabcd
def Wvvvo(t1,t2,eris,_Wvvvv=None):
nocc, nvir = t1.shape
nocc,nvir = t1.shape
eris_ovvv = lib.unpack_tril(np.asarray(eris.ovvv).reshape(nocc*nvir,-1)).reshape(nocc,nvir,nvir,nvir)
Wabcj = np.array(eris_ovvv).transpose(3,1,2,0).conj()
# Check if t1=0 (HF+MBPT(2))
# einsum will check, but don't make vvvv if you can avoid it!
if np.any(t1):
if _Wvvvv is None:
_Wvvvv = Wvvvv(t1,t2,eris)
for a in range(nvir):
Wabcj[a] += einsum('bcd,jd->bcj',_Wvvvv[a],t1)
Wabcj += -einsum('alcj,lb->abcj',W1ovov(t1,t2,eris).transpose(1,0,3,2),t1)
Wabcj += -einsum('kbcj,ka->abcj',W1ovvo(t1,t2,eris),t1)
Wabcj += 2*einsum('ldac,ljdb->abcj',eris_ovvv,t2)
Wabcj += -einsum('ldac,ljbd->abcj',eris_ovvv,t2)
Wabcj += -einsum('lcad,ljdb->abcj',eris_ovvv,t2)
Wabcj += -einsum('kcbd,jkda->abcj',eris_ovvv,t2)
Wabcj += einsum('ljkc,lkba->abcj',eris.ooov,t2)
Wabcj += einsum('ljkc,lb,ka->abcj',eris.ooov,t1,t1)
Wabcj += -einsum('kc,kjab->abcj',cc_Fov(t1,t2,eris),t2)
return Wabcj
def Wovoo(t1,t2,eris):
nocc, nvir = t1.shape
eris_ovvv = lib.unpack_tril( | np.asarray(eris.ovvv) | numpy.asarray |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" Test Operator construction, including OpPrimitives and singletons. """
import unittest
from test.aqua import QiskitAquaTestCase
import itertools
import scipy
from scipy.stats import unitary_group
import numpy as np
from ddt import ddt, data
from qiskit import QiskitError
from qiskit.aqua import AquaError
from qiskit.circuit import QuantumCircuit, QuantumRegister, Instruction, Parameter, ParameterVector
from qiskit.extensions.exceptions import ExtensionError
from qiskit.quantum_info import Operator, Pauli, Statevector
from qiskit.circuit.library import CZGate, ZGate
from qiskit.aqua.operators import (
X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn,
CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp,
SummedOp, OperatorBase, Zero
)
from qiskit.aqua.operators import MatrixOperator
# pylint: disable=invalid-name
@ddt
class TestOpConstruction(QiskitAquaTestCase):
"""Operator Construction tests."""
def test_pauli_primitives(self):
""" from to file test """
newop = X ^ Y ^ Z ^ I
self.assertEqual(newop.primitive, Pauli(label='XYZI'))
kpower_op = (Y ^ 5) ^ (I ^ 3)
self.assertEqual(kpower_op.primitive, Pauli(label='YYYYYIII'))
kpower_op2 = (Y ^ I) ^ 4
self.assertEqual(kpower_op2.primitive, Pauli(label='YIYIYIYI'))
# Check immutability
self.assertEqual(X.primitive, Pauli(label='X'))
self.assertEqual(Y.primitive, Pauli(label='Y'))
self.assertEqual(Z.primitive, Pauli(label='Z'))
self.assertEqual(I.primitive, Pauli(label='I'))
def test_composed_eval(self):
""" Test eval of ComposedOp """
self.assertAlmostEqual(Minus.eval('1'), -.5 ** .5)
def test_evals(self):
""" evals test """
# pylint: disable=no-member
# TODO: Think about eval names
self.assertEqual(Z.eval('0').eval('0'), 1)
self.assertEqual(Z.eval('1').eval('0'), 0)
self.assertEqual(Z.eval('0').eval('1'), 0)
self.assertEqual(Z.eval('1').eval('1'), -1)
self.assertEqual(X.eval('0').eval('0'), 0)
self.assertEqual(X.eval('1').eval('0'), 1)
self.assertEqual(X.eval('0').eval('1'), 1)
self.assertEqual(X.eval('1').eval('1'), 0)
self.assertEqual(Y.eval('0').eval('0'), 0)
self.assertEqual(Y.eval('1').eval('0'), -1j)
self.assertEqual(Y.eval('0').eval('1'), 1j)
self.assertEqual(Y.eval('1').eval('1'), 0)
with self.assertRaises(ValueError):
Y.eval('11')
with self.assertRaises(ValueError):
(X ^ Y).eval('1111')
with self.assertRaises(ValueError):
Y.eval((X ^ X).to_matrix_op())
# Check that Pauli logic eval returns same as matrix logic
self.assertEqual(PrimitiveOp(Z.to_matrix()).eval('0').eval('0'), 1)
self.assertEqual(PrimitiveOp(Z.to_matrix()).eval('1').eval('0'), 0)
self.assertEqual(PrimitiveOp(Z.to_matrix()).eval('0').eval('1'), 0)
self.assertEqual(PrimitiveOp(Z.to_matrix()).eval('1').eval('1'), -1)
self.assertEqual(PrimitiveOp(X.to_matrix()).eval('0').eval('0'), 0)
self.assertEqual(PrimitiveOp(X.to_matrix()).eval('1').eval('0'), 1)
self.assertEqual(PrimitiveOp(X.to_matrix()).eval('0').eval('1'), 1)
self.assertEqual(PrimitiveOp(X.to_matrix()).eval('1').eval('1'), 0)
self.assertEqual(PrimitiveOp(Y.to_matrix()).eval('0').eval('0'), 0)
self.assertEqual(PrimitiveOp(Y.to_matrix()).eval('1').eval('0'), -1j)
self.assertEqual(PrimitiveOp(Y.to_matrix()).eval('0').eval('1'), 1j)
self.assertEqual(PrimitiveOp(Y.to_matrix()).eval('1').eval('1'), 0)
pauli_op = Z ^ I ^ X ^ Y
mat_op = PrimitiveOp(pauli_op.to_matrix())
full_basis = list(map(''.join, itertools.product('01', repeat=pauli_op.num_qubits)))
for bstr1, bstr2 in itertools.product(full_basis, full_basis):
# print('{} {} {} {}'.format(bstr1, bstr2, pauli_op.eval(bstr1, bstr2),
# mat_op.eval(bstr1, bstr2)))
np.testing.assert_array_almost_equal(pauli_op.eval(bstr1).eval(bstr2),
mat_op.eval(bstr1).eval(bstr2))
gnarly_op = SummedOp([(H ^ I ^ Y).compose(X ^ X ^ Z).tensor(Z),
PrimitiveOp(Operator.from_label('+r0I')),
3 * (X ^ CX ^ T)], coeff=3 + .2j)
gnarly_mat_op = PrimitiveOp(gnarly_op.to_matrix())
full_basis = list(map(''.join, itertools.product('01', repeat=gnarly_op.num_qubits)))
for bstr1, bstr2 in itertools.product(full_basis, full_basis):
np.testing.assert_array_almost_equal(gnarly_op.eval(bstr1).eval(bstr2),
gnarly_mat_op.eval(bstr1).eval(bstr2))
def test_circuit_construction(self):
""" circuit construction test """
hadq2 = H ^ I
cz = hadq2.compose(CX).compose(hadq2)
qc = QuantumCircuit(2)
qc.append(cz.primitive, qargs=range(2))
ref_cz_mat = PrimitiveOp(CZGate()).to_matrix()
np.testing.assert_array_almost_equal(cz.to_matrix(), ref_cz_mat)
def test_io_consistency(self):
""" consistency test """
new_op = X ^ Y ^ I
label = 'XYI'
# label = new_op.primitive.to_label()
self.assertEqual(str(new_op.primitive), label)
np.testing.assert_array_almost_equal(new_op.primitive.to_matrix(),
Operator.from_label(label).data)
self.assertEqual(new_op.primitive, Pauli(label=label))
x_mat = X.primitive.to_matrix()
y_mat = Y.primitive.to_matrix()
i_mat = np.eye(2, 2)
np.testing.assert_array_almost_equal(new_op.primitive.to_matrix(),
np.kron(np.kron(x_mat, y_mat), i_mat))
hi = np.kron(H.to_matrix(), I.to_matrix())
hi2 = Operator.from_label('HI').data
hi3 = (H ^ I).to_matrix()
np.testing.assert_array_almost_equal(hi, hi2)
np.testing.assert_array_almost_equal(hi2, hi3)
xy = np.kron(X.to_matrix(), Y.to_matrix())
xy2 = Operator.from_label('XY').data
xy3 = (X ^ Y).to_matrix()
np.testing.assert_array_almost_equal(xy, xy2)
np.testing.assert_array_almost_equal(xy2, xy3)
# Check if numpy array instantiation is the same as from Operator
matrix_op = Operator.from_label('+r')
np.testing.assert_array_almost_equal(PrimitiveOp(matrix_op).to_matrix(),
PrimitiveOp(matrix_op.data).to_matrix())
# Ditto list of lists
np.testing.assert_array_almost_equal(PrimitiveOp(matrix_op.data.tolist()).to_matrix(),
PrimitiveOp(matrix_op.data).to_matrix())
# TODO make sure this works once we resolve endianness mayhem
# qc = QuantumCircuit(3)
# qc.x(2)
# qc.y(1)
# from qiskit import BasicAer, QuantumCircuit, execute
# unitary = execute(qc, BasicAer.get_backend('unitary_simulator')).result().get_unitary()
# np.testing.assert_array_almost_equal(new_op.primitive.to_matrix(), unitary)
def test_to_matrix(self):
"""to matrix text """
np.testing.assert_array_equal(X.to_matrix(), Operator.from_label('X').data)
np.testing.assert_array_equal(Y.to_matrix(), Operator.from_label('Y').data)
np.testing.assert_array_equal(Z.to_matrix(), Operator.from_label('Z').data)
op1 = Y + H
np.testing.assert_array_almost_equal(op1.to_matrix(), Y.to_matrix() + H.to_matrix())
op2 = op1 * .5
np.testing.assert_array_almost_equal(op2.to_matrix(), op1.to_matrix() * .5)
op3 = (4 - .6j) * op2
np.testing.assert_array_almost_equal(op3.to_matrix(), op2.to_matrix() * (4 - .6j))
op4 = op3.tensor(X)
np.testing.assert_array_almost_equal(op4.to_matrix(),
np.kron(op3.to_matrix(), X.to_matrix()))
op5 = op4.compose(H ^ I)
np.testing.assert_array_almost_equal(op5.to_matrix(), np.dot(op4.to_matrix(),
(H ^ I).to_matrix()))
op6 = op5 + PrimitiveOp(Operator.from_label('+r').data)
np.testing.assert_array_almost_equal(
op6.to_matrix(), op5.to_matrix() + Operator.from_label('+r').data)
param = Parameter("α")
m = | np.array([[0, -1j], [1j, 0]]) | numpy.array |
import cPickle as pickle
import numpy as np
import mxnet as mx
fr = open('/media/E/models/detectron/res18_1mlp_fpn64_320.pkl')
# fr=open('/media/E/models/detectron/res18_1mlp_fpn64_512.pkl')
# fr=open('/home/long/github/detectron/detectron-output/res18_1mlp_fpn64_512/train/fisher_train_221:fisher_val_221/generalized_rcnn/model_final.pkl')
# fr =open('/media/E/models/resnet/resnet18caffe2.pkl')
#fr =open('/home/long/github/detectron/detectron-output/fish_1mlp_fpn64_512/train/fisher_train_221:fisher_val_221/generalized_rcnn/model_final.pkl')
#fr =open('/home/long/github/detectron/detectron-output/fish_1mlp_fpn128_512/train/fisher_train_221:fisher_val_221/generalized_rcnn/model_final.pkl')
# fr = open('/media/E/models/detectron/compactfishfasterfpn50.pkl')
# fr = open('/home/long/github/detectron/detectron-output/fisherall/train/fisher_train:fisher_val/generalized_rcnn/model_final.pkl')
# fr = open('/home/long/github/detectron/detectron-output/lighthead/train/coco_2014_train:coco_2014_valminusminival/generalized_rcnn/model_final.pkl')
# fr = open('/media/E/models/detectron/e2e_faster_rcnn_R-50-FPN_2x.pkl')
# fr = open('/home/long/github/detectron/models/detectron/ImageNetPretrained/R-50.pkl')
# fr = open('/home/long/github/MobileNet-Caffe/mobilenet.caffemodel')
# fr = open('/home/long/github/MobileNet-Caffe/mobilenet-0000.params')
#
inf = pickle.load(fr)
lines = fr.readlines()
fr.close()
blobs = inf['blobs']
# blobs=inf
max=0
min=0
for k,v in blobs.items():
if np.max(v)>max:
max = | np.max(v) | numpy.max |
import numpy as np
import scipy.sparse
import tensorflow as tf
class ProductFn:
"""Abstract class. Instances can be passed to function `fsvd`.
An intance of (a concrete implementation of) this class would hold an implicit
matrix `M`, such that, this class is able to multiply it with another matrix
`m` (by implementing function `dot`).
Attribute `T` should evaluate to a `ProductFn` with implicit matrix being
transpose of `M`.
`shape` attribute must evaluate to shape of `M`
"""
def dot(self, m):
raise NotImplementedError(
'dot: must be able to multiply (implicit) matrix by another matrix `m`.')
@property
def T(self):
raise NotImplementedError(
'T: must return instance of ProductFn that is transpose of this one.')
@property
def shape(self):
raise NotImplementedError(
'shape: must return shape of implicit matrix.')
## Functional TF implementation of Truncated Singular Value Decomposition
# The algorithm is based on Halko et al 2009 and their recommendations, with
# some ideas adopted from code of scikit-learn.
def fsvd(fn, k, n_redundancy=None, n_iter=10):
"""Functional TF Randomized SVD based on Halko et al 2009
Args:
fn: Instance of a class implementing ProductFn. Should hold implicit matrix
`M` with (arbitrary) shape. Then, it must be that `fn.shape == (r, c)`,
and `fn.dot(M1)` where `M1` has shape `(c, s)` must return `M @ M1` with
shape `(r, s)`. Further, `fn.T.dot(M2)` where M2 has shape `(r, h)` must
return `M @ M2` with shape `(c, h)`.
k: rank of decomposition. Returns (approximate) top-k singular values in S
and their corresponding left- and right- singular vectors in U, V, such
that, `tf.matmul(U * S, V, transpose_b=True)` is the best rank-k
approximation of matrix `M` (implicitly) stored in `fn`.
n_redundancy: rank of "randomized" decomposition of Halko. The analysis of
Halko provides that if n_redundancy == k, then the rank-k SVD approximation
is, in expectation, no worse (in frobenius norm) than twice of the "true"
rank-k SVD compared to the (implicit) matrix represented by fn.
However, n_redundancy == k is too slow when k is large. Default sets it
to min(k, 30).
n_iter: Number of iterations. >=4 gives good results (with 4 passes over the
data). We set to 10 (slower than 4) to ensure close approximation accuracy.
The error decays exponentially with n_iter.
Returns:
U, s, V, s.t. tf.matmul(U*s, V, transpose_b=True) is a rank-k approximation
of fn.
"""
if n_redundancy is None:
n_redundancy = min(k, 30)
n_random = k + n_redundancy
n_samples, n_features = fn.shape
transpose = n_samples < n_features
if transpose:
# This is faster
fn = fn.T
Q = tf.random.normal(shape=(fn.shape[1], n_random))
for i in range(n_iter):
# Halko says it is more accurate (but slower) to do QR decomposition here.
# TODO: Provide a faster (but less accurate) version.
Q, _ = tf.linalg.qr(fn.dot(Q))
Q, _ = tf.linalg.qr(fn.T.dot(Q))
Q, _ = tf.linalg.qr(fn.dot(Q))
B = tf.transpose(fn.T.dot(Q))
s, Uhat, V = tf.linalg.svd(B)
del B
U = tf.matmul(Q, Uhat)
U, V = _sign_correction(u=U, v=V, u_based_decision=not transpose)
if transpose:
return V[:, :k], s[:k], U[:, :k]
else:
return U[:, :k], s[:k], V[:, :k]
def _sign_correction(u, v, u_based_decision=True):
M = u if u_based_decision else v
max_abs_cols = tf.argmax(tf.abs(M), axis=0)
signs = tf.sign(tf.gather_nd(M, tf.stack([max_abs_cols, tf.range(M.shape[1], dtype=tf.int64)], axis=1)))
return u*signs, v*signs
# End of: Functional TF implementation of Truncated Singular Value Decomposition
##
#### ProductFn implementations.
class SparseMatrixPF(ProductFn):
"""The "implicit" matrix comes directly from a scipy.sparse.csr_matrix
This is the most basic version: i.e., this really only extends TensorFlow to
run "sparse SVD" on a matrix. The given `scipy.sparse.csr_matrix` will be
converted to `tf.sparse.SparseTensor`.
"""
def __init__(self, csr_mat=None, precomputed_tfs=None, T=None):
"""Constructs matrix from csr_mat (or alternatively, tf.sparse.tensor).
Args:
csr_mat: instance of scipy.sparse.csr_mat (or any other sparse matrix
class). This matrix will only be read once and converted to
tf.sparse.SparseTensor.
precomputed_tfs: (optional) matrix (2D) instance of tf.sparse.SparseTensor.
if not given, will be initialized from `csr_mat`.
T: (do not provide) if given, must be instance of ProductFn with implicit
matrix as the transpose of this one. If not provided (recommended) it
will be automatically (lazily) computed.
"""
if precomputed_tfs is None and csr_mat is None:
raise ValueError('Require at least one of csr_mat or precomputed_tfs')
if precomputed_tfs is None:
rows, cols = csr_mat.nonzero()
values = np.array(csr_mat[rows, cols], dtype='float32')[0]
precomputed_tfs = tf.sparse.SparseTensor(
tf.stack([np.array(rows, dtype='int64'), np.array(cols, dtype='int64')], axis=1),
values,
csr_mat.shape)
self._shape = precomputed_tfs.shape
self.csr_mat = csr_mat
self.tfs = precomputed_tfs # tensorflow sparse tensor.
self._t = T
def dot(self, v):
return tf.sparse.sparse_dense_matmul(self.tfs, v)
@property
def T(self):
"""Returns ProductFn with implicit matrix being transpose of this one."""
if self._t is None:
self._t = SparseMatrixPF(
self.csr_mat.T if self.csr_mat is not None else None,
precomputed_tfs=tf.sparse.transpose(self.tfs),
T=self)
return self._t
@property
def shape(self):
return self._shape
class BlockWisePF(ProductFn):
"""Product that concatenates, column-wise, one or more (implicit) matrices.
Constructor takes one or more ProductFn instances. All of which must contain
the same number of rows (e.g., = r) but can have different number of columns
(e.g., c1, c2, c3, ...). As expected, the resulting shape will have the same
number of rows as the input matrices and the number of columns will is the sum
of number of columns of input (shape = (r, c1+c2+c3+...)).
"""
def __init__(self, fns, T=None, concat_axis=1):
"""Concatenate (implicit) matrices stored in `fns`, column-wise.
Args:
fns: list. Each entry must be an instance of class implementing ProductFn.
T: (do not provide) if given, must be instance of ProductFn with implicit
matrix as the transpose of this one. If not provided (recommended) it
will be automatically (lazily) computed.
concat_axis: fixed to 1 (i.e. concatenates column-wise).
"""
self.fns = fns
self._t = T
self.concat_axis = concat_axis
@property
def shape(self):
size_other_axis = self.fns[0].shape[1 - self.concat_axis]
for fn in self.fns[1:]:
assert fn.shape[1 - self.concat_axis] == size_other_axis
total = sum([fn.shape[self.concat_axis] for fn in self.fns])
myshape = [0, 0]
myshape[self.concat_axis] = total
myshape[1 - self.concat_axis] = size_other_axis
return tuple(myshape)
def dot(self, v):
if self.concat_axis == 0:
dots = [fn.dot(v) for fn in self.fns]
return tf.concat(dots, axis=self.concat_axis)
else:
dots = []
offset = 0
for fn in self.fns:
fn_columns = fn.shape[1]
dots.append(fn.dot(v[offset:offset+fn_columns]))
offset += fn_columns
return tf.reduce_sum(dots, axis=0)
@property
def T(self):
"""Returns ProductFn with implicit matrix being transpose of this one."""
if self._t is None:
fns_T = [fn.T for fn in self.fns]
self._t = BlockWisePF(fns_T, T=self, concat_axis=1 - self.concat_axis)
return self._t
class DenseMatrixPF(ProductFn):
"""Product function where implicit matrix is Dense tensor.
On its own, this is not needed as one could just run tf.linalg.svd directly
on the implicit matrix. However, this is useful when a dense matrix to be
concatenated (column-wise) next to SparseMatrix (or any other implicit matrix)
implementing ProductFn.
"""
def __init__(self, m, T=None):
"""
Args:
m: tf.Tensor (dense 2d matrix). This will be the "implicit" matrix.
T: (do not provide) if given, must be instance of ProductFn with implicit
matrix as the transpose of this one. If not provided (recommended) it
will be automatically (lazily) computed.
"""
self.m = m
self._t = T
def dot(self, v):
return tf.matmul(self.m, v)
@property
def shape(self):
return self.m.shape
@property
def T(self):
"""Returns ProductFn with implicit matrix being transpose of this one."""
if self._t is None:
self._t = DenseMatrixPF(tf.transpose(self.m), T=self)
return self._t
class WYSDeepWalkPF(ProductFn):
"""ProductFn for matrix approximating Watch Your Step derivation of DeepWalk.
"""
def __init__(self, csr_adj, window=10, mult_degrees=False,
Q=None, neg_sample_coef=None,
tfs_unnormalized=None, tfs_normalized=None, tfs_degrees=None,
T=None):
"""Constructs (implicit) matrix as approximating WYS derivation of DeepWalk.
The implicit matrix looks like:
M = \sum_i (Tr)^i q_i
where q_i is entry in vector `Q`.
Optionally (following WYS codebase):
M := M * degrees # only if `mult_degrees` is set.
Args:
csr_adj: Binary adjacency matrix as scipy.sparse.csr_mat (or any other
scipy.sparse matrix class). Read only once and converted to tensorflow.
window: Context window size (hyperparameter is C in WYS & our paper).
mult_degrees: If set, the implicit matrix will be multipled by diagonal
matrix of node degrees. Effectively, this starts a number of walks
proportional from each node proportional to its degree.
Q: Context distribution. Vector of size `C=window` that will be used for
looking up q_1, ..., q_C. Entries should be positive but need not add
to one. In paper, the entries are referred to c_1, ... c_C.
neg_sample_coef: Scalar coefficient of the `(1-A)` term in implicit matrix
`M`.
tfs_unnormalized: Optional. If given, it must be a 2D matrix of type
`tf.sparse.Tensor` containing the adjacency matrix (i.e. must equal
to csr_adj, but with type tf). If not given, it will be constructed
from `csr_adj`.
tfs_normalized: Optional. If given, it must be a 2D matrix of type
`tf.sparse.Tensor` containing the row-normalized transition matrix i.e.
each row should sum to one. If not given, it will be computed.
tfs_degrees: Optional. It will be computed if tfs_normalized is to be
computed. If given, it must be a tf.sparse.SparseTensor diagonal matrix
containing node degrees along the diagonal.
"""
self.mult_degrees = mult_degrees
self.neg_sample_coef = neg_sample_coef
self._t = T # Transpose
self.window = window
self.csr_mat = csr_adj
if Q is None:
Q = window - tf.range(window, dtype='float32') # Default of deepwalk per WYS
self.Q = Q
rows, cols = csr_adj.nonzero()
n, _ = csr_adj.shape
if tfs_unnormalized is None:
tfs_unnormalized = tf.sparse.SparseTensor(
tf.stack([np.array(rows, dtype='int64'), np.array(cols, dtype='int64')], axis=1),
tf.ones(len(rows), dtype=tf.float32),
(n, n))
self.tfs_unnormalized = tfs_unnormalized
if tfs_normalized is None:
# Normalize
degrees = np.array(csr_adj.sum(axis=1))[:, 0]
degrees = np.clip(degrees, 1, None)
inv_degrees = scipy.sparse.diags(1.0/degrees)
csr_normalized = inv_degrees.dot(csr_adj)
tfs_normalized = tf.sparse.SparseTensor(
tf.stack([np.array(rows, dtype='int64'), | np.array(cols, dtype='int64') | numpy.array |
"""Methods for adaptively setting algorithmic parameters of transitions."""
from abc import ABC, abstractmethod
from math import exp, log
import numpy as np
from mici.errors import IntegratorError, AdaptationError
from mici.matrices import PositiveDiagonalMatrix, DensePositiveDefiniteMatrix
class Adapter(ABC):
"""Abstract adapter for implementing schemes to adapt transition parameters.
Adaptation schemes are assumed to be based on updating a collection of adaptation
variables (collectively termed the adapter state here) after each chain transition
based on the sampled chain state and/or statistics of the transition such as an
acceptance probability statistic. After completing a chain of one or more adaptive
transitions, the final adapter state may be used to perform a final update to the
transition parameters.
"""
@abstractmethod
def initialize(self, chain_state, transition):
"""Initialize adapter state prior to starting adaptive transitions.
Args:
chain_state (mici.states.ChainState): Initial chain state adaptive
transition will be started from. May be used to calculate initial
adapter state but should not be mutated by method.
transition (mici.transitions.Transition): Markov transition being adapted.
Attributes of the transition or child objects may be updated in-place by
the method.
Returns:
adapt_state (Dict[str, Any]): Initial adapter state.
"""
@abstractmethod
def update(self, adapt_state, chain_state, trans_stats, transition):
"""Update adapter state after sampling transition being adapted.
Args:
adapt_state (Dict[str, Any]): Current adapter state. Entries will be updated
in-place by the method.
chain_state (mici.states.ChainState): Current chain state following sampling
from transition being adapted. May be used to calculate adapter state
updates but should not be mutated by method.
trans_stats (Dict[str, numeric]): Dictionary of statistics associated with
transition being adapted. May be used to calculate adapter state updates
but should not be mutated by method.
transition (mici.transitions.Transition): Markov transition being adapted.
Attributes of the transition or child objects may be updated in-place by
the method.
"""
@abstractmethod
def finalize(self, adapt_states, chain_states, transition, rngs):
"""Update transition parameters based on final adapter state or states.
Optionally, if multiple adapter states are available, e.g. from a set of
independent adaptive chains, then these adaptation information from all the
chains may be combined to set the transition parameter(s).
Args:
adapt_states (Dict[str, Any] or List[Dict[str, Any]]): Final adapter state
or a list of per chain adapter states. Arrays / buffers associated with
the adapter state entries may be recycled to reduce memory usage - if so
the corresponding entries will be removed from the adapter state
dictionary / dictionaries.
chain_states (ChainState or List[mici.states.ChainState]): Final state of
chain (or states of chains) in current sampling stage. May be updated
in-place if transition parameters altered by adapter require updating
any state components.
transition (mici.transitions.Transition): Markov transition being dapted.
Attributes of the transition or child objects will be updated in-place
by the method.
rngs (numpy.random.Generator or List[numpy.random.Generator]): Random number
generator for the chain or a list of per-chain random number generators.
Used to resample any components of states needing to be updated due to
adaptation if required.
"""
@property
@abstractmethod
def is_fast(self):
"""Whether the adapter is 'fast' or 'slow'.
An adapter which requires only local information to adapt the transition
parameters should be classified as fast while one which requires more
global information and so more chain iterations should be classified
as slow i.e. `is_fast == False`.
"""
class DualAveragingStepSizeAdapter(Adapter):
"""Dual averaging integrator step size adapter.
Implementation of the dual algorithm step size adaptation algorithm described in
[1], a modified version of the stochastic optimisation scheme of [2]. By default the
adaptation is performed to control the `accept_prob` statistic of an integration
transition to be close to a target value but the statistic adapted on can be altered
by changing the `adapt_stat_func`.
References:
1. <NAME>. and <NAME>., 2014. The No-U-turn sampler: adaptively setting
path lengths in Hamiltonian Monte Carlo. Journal of Machine Learning Research,
15(1), pp.1593-1623.
2. Nesterov, Y., 2009. Primal-dual subgradient methods for convex problems.
Mathematical programming 120(1), pp.221-259.
"""
is_fast = True
def __init__(
self,
adapt_stat_target=0.8,
adapt_stat_func=None,
log_step_size_reg_target=None,
log_step_size_reg_coefficient=0.05,
iter_decay_coeff=0.75,
iter_offset=10,
max_init_step_size_iters=100,
):
"""
Args:
adapt_stat_target (float): Target value for the transition statistic
being controlled during adaptation.
adapt_stat_func (Callable[[Dict[str, numeric]], numeric]): Function which
given a dictionary of transition statistics outputs the value of the
statistic to control during adaptation. By default this is set to a
function which simply selects the 'accept_stat' value in the statistics
dictionary.
log_step_size_reg_target (float or None): Value to regularize the controlled
output (logarithm of the integrator step size) towards. If `None` set to
`log(10 * init_step_size)` where `init_step_size` is the initial
'reasonable' step size found by a coarse search as recommended in
Hoffman and Gelman (2014). This has the effect of giving the dual
averaging algorithm a tendency towards testing step sizes larger than
the initial value, with typically integrating with a larger step size
having a lower computational cost.
log_step_size_reg_coefficient (float): Coefficient controlling amount of
regularisation of controlled output (logarithm of the integrator step
size) towards `log_step_size_reg_target`. Defaults to 0.05 as
recommended in Hoffman and Gelman (2014).
iter_decay_coeff (float): Coefficient controlling exponent of decay in
schedule weighting stochastic updates to smoothed log step size
estimate. Should be in the interval (0.5, 1] to ensure asymptotic
convergence of adaptation. A value of 1 gives equal weight to the whole
history of updates while setting to a smaller value increasingly highly
weights recent updates, giving a tendency to 'forget' early updates.
Defaults to 0.75 as recommended in Hoffman and Gelman (2014).
iter_offset (int): Offset used for the iteration based weighting of the
adaptation statistic error estimate. Should be set to a non-negative
value. A value > 0 has the effect of stabilising early iterations.
Defaults to the value of 10 as recommended in Hoffman and Gelman (2014).
max_init_step_size_iters (int): Maximum number of iterations to use in
initial search for a reasonable step size with an `AdaptationError`
exception raised if a suitable step size is not found within this many
iterations.
"""
self.adapt_stat_target = adapt_stat_target
if adapt_stat_func is None:
def adapt_stat_func(stats):
return stats["accept_stat"]
self.adapt_stat_func = adapt_stat_func
self.log_step_size_reg_target = log_step_size_reg_target
self.log_step_size_reg_coefficient = log_step_size_reg_coefficient
self.iter_decay_coeff = iter_decay_coeff
self.iter_offset = iter_offset
self.max_init_step_size_iters = max_init_step_size_iters
def initialize(self, chain_state, transition):
integrator = transition.integrator
system = transition.system
adapt_state = {
"iter": 0,
"smoothed_log_step_size": 0.0,
"adapt_stat_error": 0.0,
}
init_step_size = self._find_and_set_init_step_size(
chain_state, system, integrator
)
if self.log_step_size_reg_target is None:
adapt_state["log_step_size_reg_target"] = log(10 * init_step_size)
else:
adapt_state["log_step_size_reg_target"] = self.log_step_size_reg_target
return adapt_state
def _find_and_set_init_step_size(self, state, system, integrator):
"""Find initial step size by coarse search using single step statistics.
Adaptation of Algorithm 4 in Hoffman and Gelman (2014).
Compared to the Hoffman and Gelman algorithm, this version makes two changes:
1. The absolute value of the change in Hamiltonian over a step being larger or
smaller than log(2) is used to determine whether the step size is too big
or small as opposed to the value of the equivalent Metropolis accept
probability being larger or smaller than 0.5. Although a negative change in
the Hamiltonian over a step of magnitude more than log(2) will lead to an
accept probability of 1 for the forward move, the corresponding reversed
move will have an accept probability less than 0.5, and so a change in the
Hamiltonian over a step of magnitude more than log(2) irrespective of the
sign of the change is indicative of the minimum acceptance probability over
both forward and reversed steps being less than 0.5.
2. To allow for integrators for which an integrator step may fail due to e.g.
a convergence error in an iterative solver, the step size is also
considered to be too big if any of the step sizes tried in the search
result in a failed integrator step, with in this case the step size always
being decreased on subsequent steps irrespective of the initial Hamiltonian
error, until a integrator step successfully completes and the absolute
value of the change in Hamiltonian is below the threshold of log(2)
(corresponding to a minimum acceptance probability over forward and
reversed steps of 0.5).
"""
init_state = state.copy()
h_init = system.h(init_state)
if np.isnan(h_init):
raise AdaptationError("Hamiltonian evaluating to NaN at initial state.")
integrator.step_size = 1
delta_h_threshold = log(2)
for s in range(self.max_init_step_size_iters):
try:
state = integrator.step(init_state)
delta_h = abs(h_init - system.h(state))
if s == 0 or np.isnan(delta_h):
step_size_too_big = np.isnan(delta_h) or delta_h > delta_h_threshold
if (step_size_too_big and delta_h <= delta_h_threshold) or (
not step_size_too_big and delta_h > delta_h_threshold
):
return integrator.step_size
elif step_size_too_big:
integrator.step_size /= 2
else:
integrator.step_size *= 2
except IntegratorError:
step_size_too_big = True
integrator.step_size /= 2
raise AdaptationError(
f"Could not find reasonable initial step size in "
f"{self.max_init_step_size_iters} iterations (final step size "
f"{integrator.step_size}). A very large final step size may "
f"indicate that the target distribution is improper such that the "
f"negative log density is flat in one or more directions while a "
f"very small final step size may indicate that the density function"
f" is insufficiently smooth at the point initialized at."
)
def update(self, adapt_state, chain_state, trans_stats, transition):
adapt_state["iter"] += 1
error_weight = 1 / (self.iter_offset + adapt_state["iter"])
adapt_state["adapt_stat_error"] *= 1 - error_weight
adapt_state["adapt_stat_error"] += error_weight * (
self.adapt_stat_target - self.adapt_stat_func(trans_stats)
)
smoothing_weight = (1 / adapt_state["iter"]) ** self.iter_decay_coeff
log_step_size = adapt_state["log_step_size_reg_target"] - (
adapt_state["adapt_stat_error"]
* adapt_state["iter"] ** 0.5
/ self.log_step_size_reg_coefficient
)
adapt_state["smoothed_log_step_size"] *= 1 - smoothing_weight
adapt_state["smoothed_log_step_size"] += smoothing_weight * log_step_size
transition.integrator.step_size = exp(log_step_size)
def finalize(self, adapt_states, chain_states, transition, rngs):
if isinstance(adapt_states, dict):
transition.integrator.step_size = exp(
adapt_states["smoothed_log_step_size"]
)
else:
transition.integrator.step_size = sum(
exp(adapt_state["smoothed_log_step_size"])
for adapt_state in adapt_states
) / len(adapt_states)
class OnlineVarianceMetricAdapter(Adapter):
"""Diagonal metric adapter using online variance estimates.
Uses Welford's algorithm [1] to stably compute an online estimate of the sample
variances of the chain state position components during sampling. If online
estimates are available from multiple independent chains, the final variance
estimate is calculated from the per-chain statistics using the parallel / batched
incremental variance algorithm described by Chan et al. [2]. The variance estimates
are optionally regularized towards a common scalar value, with increasing weight for
small number of samples, to decrease the effect of noisy estimates for small sample
sizes, following the approach in Stan [3]. The metric matrix representation is set
to a diagonal matrix with diagonal elements corresponding to the reciprocal of the
(regularized) variance estimates.
References:
1. <NAME>., 1962. Note on a method for calculating corrected sums of
squares and products. Technometrics, 4(3), pp. 419–420.
2. <NAME>., <NAME>., <NAME>., 1979. Updating formulae and a
pairwise algorithm for computing sample variances. Technical Report
STAN-CS-79-773, Department of Computer Science, Stanford University.
3. <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., Betancourt,
M., <NAME>., <NAME>., <NAME>. and <NAME>., 2017. Stan: A probabilistic
programming language. Journal of Statistical Software, 76(1).
"""
is_fast = False
def __init__(self, reg_iter_offset=5, reg_scale=1e-3):
"""
Args:
reg_iter_offset (int): Iteration offset used for calculating iteration
dependent weighting between regularisation target and current covariance
estimate. Higher values cause stronger regularisation during initial
iterations. A value of zero corresponds to no regularisation; this
should only be used if the sample covariance is guaranteed to be
positive definite.
reg_scale (float): Positive scalar defining value variance estimates are
regularized towards.
"""
self.reg_iter_offset = reg_iter_offset
self.reg_scale = reg_scale
def initialize(self, chain_state, transition):
return {
"iter": 0,
"mean": | np.zeros_like(chain_state.pos) | numpy.zeros_like |
#!/usr/bin/env python
# -*- coding: UTF-8 no BOM -*-
"""
General module describing crystalline material properties
NOTE:
1. Currently support lattice:
None
cubic, bcc, fcc
hexagonal, hex, hcp
orthorhombic, ortho
tetragonal, tet
2. When comparing orientation related quantities, it is better to restrict
them to the same referance frame, such as sample or lab frame.
"""
import numpy as np
from dataclasses import dataclass
from hexomap.npmath import normalize
from hexomap.orientation import Quaternion
from hexomap.orientation import Orientation
from hexomap.orientation import sym_operator
def in_fundamental_zone(o: "Orientation", lattice: str) -> bool:
"""
Description
-----------
Chekc if the orientation is in its fundamental zone by checking its
Rodrigues representation.
Parameter
---------
o: Orientation
Orientation represents a certain attitude
lattice: str
Lattice symmetry
Returns
-------
bool
NOTE:
migrated from DAMASK.orientation module
"""
r = | np.absolute(o.as_rodrigues.as_array) | numpy.absolute |
import itertools
import os
import shutil
import sys
import glob
from collections import defaultdict
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
#
from sklearn.metrics import confusion_matrix, precision_recall_fscore_support, \
accuracy_score, cohen_kappa_score
from sklearn.metrics import f1_score
from matplotlib import gridspec
import seaborn as sns
CLASSES = ['W', 'N1', 'N2', 'N3', 'REM']
def get_basename_(path):
name = os.path.basename(os.path.normpath(path))
# cut of number for ordering
if len(name)>1 and name[1] == '_':
name = name.split("_")[-1]
return name
def cm_figure_(prediction, truth, classes, configuration_name):
classes = classes.copy()
cm = confusion_matrix(truth, prediction, labels=range(len(classes)))
num_classes = cm.shape[0]
per_class_metrics = np.array(
precision_recall_fscore_support(truth, prediction, beta=1.0,
labels=range(
len(classes)))).T.round(2)
cm_norm = cm.astype('float') * 1 / (cm.sum(axis=1)[:, np.newaxis]+1e-7)
cm_norm = np.nan_to_num(cm_norm, copy=True)
fig = plt.figure(figsize=(3, 2), dpi=320, facecolor='w',
edgecolor='k')
ax = fig.add_subplot(1, 1, 1)
im = ax.imshow(
np.concatenate((cm_norm, np.zeros((len(classes), 4))), axis=1),
cmap='Oranges')
classes += ['PR', 'RE', 'F1', 'S']
xtick_marks = np.arange(len(classes))
ytick_marks = np.arange(len(classes) - 4)
ax.set_xlabel('Predicted', fontsize=5, weight='bold')
ax.set_xticks(xtick_marks)
c = ax.set_xticklabels(classes, fontsize=5, ha='center')
#ax.xaxis.set_label_position('top')
#ax.xaxis.tick_top()
ax.set_ylabel('True Label', fontsize=5, weight='bold')
ax.set_yticks(ytick_marks)
ax.set_yticklabels(classes[:-4], fontsize=5, va='center')
ax.yaxis.set_label_position('left')
ax.yaxis.tick_left()
ax.set_title(configuration_name, fontsize=5, horizontalalignment='center')
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
ax.text(j, i, '{}\n({:.2f})'.format(cm[i, j], cm_norm[i, j]),
horizontalalignment="center", fontsize=5,
verticalalignment='center', color="black")
for i, j in itertools.product(range(cm.shape[0]),
range(cm.shape[1], cm.shape[1] + 4)):
val = per_class_metrics[i, j - num_classes]
ax.text(j, i, val if j != cm.shape[1] + 3 else int(val),
horizontalalignment="center", fontsize=5,
verticalalignment='center', color="black")
return fig
def heatmap(data, row_labels, col_labels, ax=None,
cbar_kw={}, cbarlabel="", xlabel=None, ylabel=None, **kwargs):
"""
Create a heatmap from a numpy array and two lists of labels.
Arguments:
data : A 2D numpy array of shape (N,M)
row_labels : A list or array of length N with the labels
for the rows
col_labels : A list or array of length M with the labels
for the columns
Optional arguments:
ax : A matplotlib.axes.Axes instance to which the heatmap
is plotted. If not provided, use current axes or
create a new one.
cbar_kw : A dictionary with arguments to
:meth:`matplotlib.Figure.colorbar`.
cbarlabel : The label for the colorbar
All other arguments are directly passed on to the imshow call.
"""
if not ax:
ax = plt.gca()
# Plot the heatmap
im = ax.imshow(data, **kwargs)
# Create colorbar
#cbar = ax.figure.colorbar(im, ax=ax, **cbar_kw)
#cbar.ax.set_ylabel(cbarlabel, rotation=-90, va="bottom")
# We want to show all ticks...
ax.set_xticks(np.arange(data.shape[1]))
ax.set_yticks(np.arange(data.shape[0]))
# ... and label them with the respective list entries.
ax.set_xticklabels(col_labels)
ax.set_yticklabels(row_labels)
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
# Let the horizontal axes labeling appear on top.
ax.tick_params(top=False, bottom=True,
labeltop=False, labelbottom=True)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=0, ha="center",
rotation_mode="anchor")
# Turn spines off and create white grid.
for edge, spine in ax.spines.items():
spine.set_visible(False)
ax.set_xticks(np.arange(data.shape[1]+1)-.5, minor=True)
ax.set_yticks(np.arange(data.shape[0]+1)-.5, minor=True)
ax.grid(which="minor", color="w", linestyle='-', linewidth=3)
ax.tick_params(which="minor", bottom=False, left=False)
return im
def annotate_heatmap(im, data=None, valfmt="{x:.2f}",
textcolors=["black", "white"],
threshold=None, **textkw):
"""
A function to annotate a heatmap.
Arguments:
im : The AxesImage to be labeled.
Optional arguments:
data : Data used to annotate. If None, the image's data is used.
valfmt : The format of the annotations inside the heatmap.
This should either use the string format method, e.g.
"$ {x:.2f}", or be a :class:`matplotlib.ticker.Formatter`.
textcolors : A list or array of two color specifications. The first is
used for values below a threshold, the second for those
above.
threshold : Value in data units according to which the colors from
textcolors are applied. If None (the default) uses the
middle of the colormap as separation.
Further arguments are passed on to the created text labels.
"""
import matplotlib
if not isinstance(data, (list, np.ndarray)):
data = im.get_array()
# Normalize the threshold to the images color range.
if threshold is not None:
threshold = im.norm(threshold)
else:
threshold = im.norm(data.max())/2.
# Set default alignment to center, but allow it to be
# overwritten by textkw.
kw = dict(horizontalalignment="center",
verticalalignment="center", fontsize=8)
kw.update(textkw)
# Get the formatter in case a string is supplied
if isinstance(valfmt, str):
valfmt = matplotlib.ticker.StrMethodFormatter(valfmt)
# Loop over the data and create a `Text` for each "pixel".
# Change the text's color depending on the data.
texts = []
for i in range(data.shape[0]):
for j in range(data.shape[1]):
if data[i, j] <=1:
kw.update(color=textcolors[im.norm(data[i, j]) > threshold])
text = im.axes.text(j, i, valfmt(data[i, j], None), **kw)
else:
text = im.axes.text(j, i, "{:d}".format(int(data[i, j]), None),
**kw)
texts.append(text)
return texts
def table_plot_(table, yticks, xticks, agg_table: bool = True):
num_yticks = len(yticks)
# m.configs]
aggs = np.stack([np.mean(table, 0), np.std(table, 0)], axis=0)
#fig = plt.figure(figsize=(8.27, 11.69), dpi=320, facecolor='w',
# edgecolor='k')
fig = plt.figure(figsize=(len(xticks), .5*len(yticks)), dpi=120,
facecolor='w',
edgecolor='k')
gs = gridspec.GridSpec(num_yticks + 4, len(xticks))
ax1 = fig.add_subplot(gs[:num_yticks, :])
# plt.suptitle(PREFIX, fontsize=12)
# ax1 = plt.subplot(211)#fig.add_subplot(2, 1, 1)
ax1.imshow(table[:num_yticks], cmap='YlGn', aspect="auto")
for i, j in itertools.product(range(num_yticks),
range(table.shape[1])):
ax1.text(j, i, '{:.3f}'.format(table[i, j]),
horizontalalignment="center", fontsize=10,
verticalalignment='center', color="black")
ytick_marks = np.arange(num_yticks)
ax1.set_yticks(ytick_marks)
ax1.set_yticklabels(yticks)
ax1.set_xticklabels([])
if agg_table:
ax2 = fig.add_subplot(gs[num_yticks + 1:, :])
ax2.imshow(aggs, cmap='YlGn', aspect="auto")
# ax2.set_aspect('equal', 'box')
# plt.imshow(table,cmap='Oranges')
for i, j in itertools.product(range(aggs.shape[0]),
range(aggs.shape[1])):
ax2.text(j, i, '{:.3f}'.format(aggs[i, j]),
horizontalalignment="center", fontsize=10,
verticalalignment='center', color="black")
ytick_marks = np.arange(2)
ax2.set_yticks(ytick_marks)
ax2.set_yticklabels(['mean', 'std'])
ax1 = ax2
xtick_marks = np.arange(len(xticks))
ax1.set_xticks(xtick_marks)
ax1.set_xticklabels(xticks, rotation=60)
return fig
def table_plot_folded_(table, yticks, xticks, agg_table: bool = False):
yticks = [y.replace("WESA_","").replace("_MLready.npz","") for y in yticks]
num_yticks = (len(yticks)+1) //2
max_yticks = len(yticks)
xticks = xticks + xticks
# m.configs]
min_val = min([min(t) for t in table])
max_val = max([max(t) for t in table])
aggs = np.stack([np.mean(table, 0), np.std(table, 0)], axis=0)
#fig = plt.figure(figsize=(8.27, 11.69), dpi=320, facecolor='w',
# edgecolor='k')
fig = plt.figure(figsize=(len(xticks), .5*num_yticks), dpi=120,
facecolor='w',
edgecolor='k')
gs = gridspec.GridSpec(num_yticks + 4, len(xticks))
ax1 = fig.add_subplot(gs[:num_yticks, :(len(xticks)//2)])
ax1.imshow(table[:num_yticks], cmap='YlGn', aspect="auto", vmin=min_val, vmax=max_val)
for i, j in itertools.product(range(num_yticks),
range(table.shape[1])):
ax1.text(j, i, '{:.3f}'.format(table[i, j]),
horizontalalignment="center", fontsize=10,
verticalalignment='center', color="black")
ax2 = fig.add_subplot(gs[:num_yticks, (len(xticks)//2):])
ax2.imshow(table[num_yticks:], cmap='YlGn', aspect="auto", vmin=min_val, vmax=max_val)
for i, j in itertools.product(range(num_yticks, max_yticks),
range(table.shape[1])):
ax2.text(j, i-num_yticks, '{:.3f}'.format(table[i, j]),
horizontalalignment="center", fontsize=10,
verticalalignment='center', color="black")
ytick_marks = np.arange(num_yticks)
ax1.set_yticks(ytick_marks)
ax1.set_yticklabels(yticks[:num_yticks])
ax1.set_xticklabels([])
#plt.draw()
#yax = ax1.get_yaxis()
#pad = max(T.label.get_window_extent().width for T in yax.majorTicks)
#yax.set_tick_params(pad=pad)
ytick_marks = np.arange(num_yticks)
ax2.set_yticks(ytick_marks)
ax2.set_yticklabels(yticks[num_yticks:])
ax2.set_xticklabels([])
if agg_table:
ax3 = fig.add_subplot(gs[num_yticks + 1:, :])
ax3.imshow(aggs, cmap='YlGn', aspect="auto")
# ax2.set_aspect('equal', 'box')
# plt.imshow(table,cmap='Oranges')
for i, j in itertools.product(range(aggs.shape[0]),
range(aggs.shape[1])):
ax3.text(j, i, '{:.3f}'.format(aggs[i, j]),
horizontalalignment="center", fontsize=10,
verticalalignment='center', color="black")
ytick_marks = np.arange(2)
ax3.set_yticks(ytick_marks)
ax3.set_yticklabels(['mean', 'std'])
xtick_marks = np.arange(len(xticks) // 2)
ax3.set_xticks(xtick_marks)
ax3.set_xticklabels(xticks, rotation=60)
#ax1 = ax2
xtick_marks = np.arange(len(xticks)//2)
ax1.set_xticks(xtick_marks)
ax1.set_xticklabels(xticks, rotation=60)
ax1.tick_params(labelbottom=False, labeltop=True, labelleft=True, labelright=False,
bottom=False, top=True, left=True, right=False)
ax2.set_xticks(xtick_marks)
ax2.set_xticklabels(xticks, rotation=60)
ax2.tick_params(labelbottom=False, labeltop=True, labelleft=False, labelright=True,
bottom=False, top=True, left=False, right=True)
return fig
class Model(object):
def __init__(self, path):
self.name = get_basename_(path)
self.path = path
print(f"model {self.name}")
self.configs = [Configurations(p) for p in sorted(glob.glob(path + '/*'))]
class Runs(object):
def __init__(self, path):
self.name = get_basename_(path)
print(f"runs: {self.name}")
self.path = path
self.subjects = sorted(glob.glob(path + '/*'))
class Configurations(object):
def __init__(self, path):
self.name = get_basename_(path)
self.path = path
print(f"config: {self.name}")
self.runs = [Runs(p) for p in sorted(glob.glob(path + '/*'))]
class Evaluation(object):
def __init__(self, path):
self.path = path
self.models = [Model(p) for p in sorted(glob.glob(path + '/*'))]
def cm(self):
for i, model in enumerate(self.models):
runs = []
for config in model.configs:
runs.append(config.name)
truth = []
prediction = []
run = config.runs[0]
for path in run.subjects:
result = self.read_subject_file(path)
truth.append(result['y_true'])
prediction.append(result['y_pred'])
truth = list(itertools.chain.from_iterable(truth))
prediction = list(itertools.chain.from_iterable(prediction))
cm = confusion_matrix(truth, prediction,
labels=range(5))
cm_norm = cm.astype('float') * 1 / (
cm.sum(axis=1)[:, np.newaxis] + 1e-7)
cm_norm = np.nan_to_num(cm_norm, copy=True)
fig, (ax2) = plt.subplots(1, 1,
figsize=(2.5,2.5),
dpi=200) #
plt.subplots_adjust(hspace=.05)
fig.suptitle(get_basename_(model.name),
fontsize=8, weight="bold",y=0.93)
per_class_metrics = np.array(
precision_recall_fscore_support(truth, prediction, beta=1.0,
labels=range(
5))).round(
2)
#im = heatmap(per_class_metrics, ['PR', 'RE', 'F1', 'S'],
# ('W', 'N1', 'N2', 'N3', 'REM'),
# ax=ax1, cmap="YlGn", vmin=0,vmax=1e10,
# aspect='auto')
#texts = annotate_heatmap(im, valfmt="{x:.2f} ")
im = heatmap(cm_norm, ('W', 'N1', 'N2', 'N3', 'REM'),
('W', 'N1', 'N2', 'N3', 'REM'),
ax=ax2, cmap="YlGn", aspect='auto',
xlabel="Predicted Label", ylabel="True Label")
texts = annotate_heatmap(im, valfmt="{x:.2f} ")
#ax2.get_shared_x_axes().join(ax1, ax2)
#ax1.tick_params(axis="x", labelbottom=0)
#ax1.tick_params(
# axis='x', # changes apply to the x-axis
# which='both', # both major and minor ticks are affected
# bottom=False, # ticks along the bottom edge are off
# top=False, # ticks along the top edge are off
# labelbottom=False) # labels along the bottom edge are off
try:
plt.savefig("cv_plots/cv_cm_" + model.name + ".eps", dpi=300, transparent=True, bbox_inches="tight")
except:
print("Failed saving plot.")
def boxplot(self, xlabel=None, ymin=.4):
models = []
rows = []
for i, model in enumerate(self.models):
models.append(model.name)
configs = []
for config in model.configs:
configs.append(config.name)
if len(config.runs) == 0: continue
run = config.runs[0]
for path in run.subjects:
result = self.read_subject_file(path)
acc = result['acc']/100
rows.append([get_basename_(path), model.name, config.name,
acc])
df = pd.DataFrame(rows, columns=['subject', 'model', 'config',
'accuracy'])
fig, ax = plt.subplots(figsize=(6,4), dpi=120)
#ax.set_title("Subject-wise accuracy", fontsize=14)
ax = sns.boxplot(x="config", y="accuracy", hue="model", data=df,
#palette="Set3",
order=[c.name for c in self.models[0].configs])
ax.tick_params(labelsize=10)
if xlabel is not None:
ax.set_xlabel(xlabel, fontsize=10)
else:
ax.set_xlabel("")
ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.15),
fancybox=True, shadow=True, ncol=5, fontsize=10)
ax.set_axisbelow(True)
ax.yaxis.grid(color='gray', linestyle='dashed')
ax.set_ylim(ymin=ymin, ymax=1)
ax.set_ylabel('subject accuracy', fontsize=10)
def bar(self, xlabel=None, ymin=0.4):
models = []
means = []
stds = []
rows = []
for i, model in enumerate(self.models):
models.append(model.name)
runs = []
model_mean = []
model_std = []
for config in model.configs:
runs.append(config.name)
accs = np.array([])
for j, run in enumerate(config.runs):
truth = []
prediction = []
for path in run.subjects:
result = self.read_subject_file(path)
truth.append(result['y_true'])
prediction.append(result['y_pred'])
truth = list(itertools.chain.from_iterable(truth))
prediction = list(itertools.chain.from_iterable(prediction))
acc = accuracy_score(truth, prediction)
f1m = f1_score(truth, prediction, average='macro')
_, _, f1c, _ = precision_recall_fscore_support(truth,
prediction,
beta=1.0,
labels=range(
5))
kappa = cohen_kappa_score(truth, prediction)
rows.append(
[model.name, config.name, acc, f1m, kappa] + list(f1c))
accs = np.append(accs, acc)
model_mean.append(np.mean(accs))
model_std.append(np.std(accs))
means.append(model_mean)
stds.append(model_std)
cols = ['model', 'config',
'accuracy', 'f1m', 'kappa', 'W',
'N1', 'N2', 'N3', 'R']
df = pd.DataFrame(rows, columns=cols)
fig, ax = plt.subplots(figsize=(6, 4), dpi=120)
res = df.groupby(['model', 'config'], as_index=False)[cols].mean()
print(res.round(3).to_latex())
ax.set_title("Overall accuracy")
ax = sns.barplot(x="config", y="accuracy", hue="model", data=df,
#palette="Set3",
order=[c.name for c in self.models[0].configs])
ax.tick_params(labelsize=10)
if xlabel is not None:
ax.set_xlabel(xlabel, fontsize=10)
else:
ax.set_xlabel("")
ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.15),
fancybox=True, shadow=True, ncol=5, fontsize=10)
ax.set_axisbelow(True)
ax.yaxis.grid(color='gray', linestyle='dashed')
ax.set_ylim(ymin=ymin, ymax=1)
ax.set_ylabel('accuracy', fontsize=10)
def hypnogram(self, index=0, models=None, config=None, start=None,
end=None):
models = self.models if models is None else [m for m in self.models
if m.name in models]
if len(models) == 0: raise ValueError("no matching models found!")
f, axarr = plt.subplots(len(models), 1, squeeze=False,
sharex=True, sharey=True,
figsize=(10, 3.5 * len(models)), dpi=320)
plt.yticks(range(5), ['W', 'N1', 'N2', 'N3', 'REM'], fontsize=10)
for i, model in enumerate(models):
cfg = model.configs[0] if config is None else\
next((item for item in model.configs if item.name == config),
None)
if cfg is None:
raise ValueError(f"config {config} not found")
run = cfg.runs[0]
path = run.subjects[index]
subject = get_basename_(path)
f.suptitle(f"{subject}", fontsize=12)
result = self.read_subject_file(path)
# only part of record
if start is None and end is None:
end = len(result['y_pred'])
start = 0
axarr[i, 0].set_xlim(xmin=start, xmax=end)
axarr[i, 0].plot(range(len(result['y_pred'])), result['y_pred'],
label="prediction")
axarr[i, 0].set_ylim(ymin=0.0)
#axarr[i, 0].plot(range(len(result['y_true'])), result[
# 'y_true'], alpha=0.9, label="truth", linestyle=':')
wrong = np.argwhere(np.not_equal(result['y_true'], result[
'y_pred']))
axarr[i, 0].plot(wrong, result['y_true'][wrong], '.',
label="error")
acc = result['acc']
#axarr[i, 0].set_title(f"{model.name} ({cfg.name}) - "
axarr[i, 0].set_title(f"{model.name} [ACC: {acc:.2f}%]",
fontsize=10)
# f"[{acc:.2f}%]", fontsize=10)
if 'attention' in result.keys():
ax2 = axarr[i, 0].twinx()
# same x-axis
color = 'tab:green'
ax2.set_ylabel('attention', color=color, fontsize=10)
attention = result['attention']
ax2.plot(range(len(attention)), attention, color=color)
ax2.tick_params(axis='y', labelcolor=color)
ax2.set_ylim(0.0, 1)
if 'drop' in result.keys():
dropped = np.argwhere(result['drop'])
for d in dropped:
axarr[i, 0].axvspan(d-0.5, d+0.5, alpha=0.2, color='red')
axarr[i, 0].legend(loc='upper center', bbox_to_anchor=(0.5, -0.15),
fancybox=True, shadow=True, ncol=5, fontsize=12)
axarr[i, 0].set_xlabel("epoch", fontsize=10)
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
def table(self, folded=False):
table = []
for i, model in enumerate(self.models):
for config in model.configs:
column = []
run = config.runs[0]
for path in run.subjects:
result = self.read_subject_file(path)
column.append(result['acc'])
table.append(column)
table = np.vstack(table).T
subjects = [get_basename_(p) for p in run.subjects]
xticks = [m.name + '-' + r.name for m in self.models for r in m.configs]
if folded:
table_plot_folded_(table, subjects, xticks)
else:
table_plot_(table, subjects, xticks)
try:
plt.savefig("cv_plots/cv_tab_" + model.name + ".eps", dpi=300, transparent=True, bbox_inches="tight")
except:
print("Failed saving plot.")
def att_subject_table(self):
att_models = []
table = []
for i, model in enumerate(self.models):
for config in model.configs:
column = []
run = config.runs[0]
for path in run.subjects:
result = self.read_subject_file(path)
if not 'attention' in result.keys():
continue
column.append(np.mean(result['attention']))
if column != []:
table.append(column)
att_models.append(model.name + f"({config.name})")
table = np.vstack(table).T
subjects = [get_basename_(p) for p in run.subjects]
#xticks = [m.name + '-' + r.name for m in self.models for r in
# m.configs]
table_plot_(table, subjects, att_models)
def att_table(self):
att_models = []
table = []
for i, model in enumerate(self.models):
for config in model.configs:
print(model.name)
column = [[],[],[],[],[]]
run = config.runs[0]
for path in run.subjects:
result = self.read_subject_file(path)
if not 'attention' in result.keys():
continue
att_per_label = zip(result['y_pred'], result['attention'])
assert(not np.isnan(np.min(result['attention'])))
for label, a in att_per_label:
column[label].append(a)
if column != [[],[],[],[],[]]:
column = [np.mean(np.array(av)) if av != [] else 0 for av
in column]
table.append(column)
att_models.append(model.name)
table = np.vstack(table)
table_plot_(table, att_models, ['W', 'N1', "N2", "N3", "REM"],
agg_table=False)
def extract_experts(self):
def get_acc(prediction, truth):
wrong = np.argwhere(np.not_equal(truth, prediction))
acc = 100 * (1 - (len(wrong) / len(truth)))
return acc
for i, model in enumerate(self.models):
configs = []
true_label_dict = None
for config in model.configs:
experts = None
soft_votes_dict = defaultdict(lambda : [])
hard_votes_dict = defaultdict(lambda : [])
true_label_dict = {}
configs.append(config.name)
accs = np.array([])
if len(config.runs) == 0: continue
run = config.runs[0]
# print("run: ", run.name)
for path in run.subjects:
result = self.read_subject_file(path)
subject = get_basename_(path)
expert_base_path = os.path.join(self.path, os.path.basename(
config.path))
if experts is None:
experts = result['expert_channels']
for expert in experts:
os.makedirs(
os.path.join(self.path, 'Expert-' +
expert, os.path.basename(config.path), 'Expert-' +
expert))
voting_models = ['SOFT-V', 'MAJ-V']
for new_model in voting_models:
path = os.path.join(self.path, new_model, os.path.basename(
config.path), os.path.basename(
config.path))
if os.path.exists(path) and os.path.isdir(path):
shutil.rmtree(path)
for new_model in voting_models:
os.makedirs(os.path.join(self.path, new_model, os.path.basename(
config.path), os.path.basename(
config.path)))
for i in range(result['y_experts'].shape[1]):
y_expert_prob = result['y_experts'][:, i, :]
y_expert_pred = np.argmax(y_expert_prob, 1)
expert = result['expert_channels'][i]
y_true = result['y_true']
true_label_dict[subject] = y_true
a = result['a'][:, i]
drop = None
if 'drop_channels' in result.keys():
drop = result['drop_channels'][:, i]
hard_votes_dict[subject].append(y_expert_pred)
soft_votes_dict[subject].append(y_expert_prob)
wrong = np.argwhere(np.not_equal(y_true, y_expert_pred))
acc = 100*(1-wrong.shape[0]/len(y_expert_pred))
savepath = os.path.join(self.path, 'Expert-' +
expert, os.path.basename(config.path), 'Expert-' +
expert, subject)
savedict = {'y_true': y_true, 'y_pred': y_expert_pred,
'acc': acc, 'attention': a}
if drop is not None:
savedict['drop'] = drop
np.savez(savepath, **savedict)
for subject, predictions in soft_votes_dict.items():
soft_votes = np.array(predictions)
soft_vote = np.mean(soft_votes, axis=0)
soft_vote = | np.argmax(soft_vote, axis=1) | numpy.argmax |
#!/usr/bin/env python
# encoding: utf-8
"""
towerstruc.py
Created by <NAME> on 2012-01-20.
Copyright (c) NREL. All rights reserved.
HISTORY: 2012 created
-7/2014: R.D. Bugs found in the call to shellBucklingEurocode from towerwithFrame3DD. Fixed.
Also set_as_top added.
-10/2014: R.D. Merged back with some changes Andrew did on his end.
-12/2014: A.N. fixed some errors from the merge (redundant drag calc). pep8 compliance. removed several unneccesary variables and imports (including set_as_top)
- 6/2015: A.N. major rewrite. removed pBEAM. can add spring stiffness anywhere. can add mass anywhere.
can use different material props throughout.
- 7/2015 : R.D. modified to use commonse modules.
- 1/2018 : G.B. modified for easier use with other modules, reducing user input burden, and shifting more to commonse
"""
from __future__ import print_function
import numpy as np
from openmdao.api import Component, Group, Problem, IndepVarComp
from commonse.WindWaveDrag import AeroHydroLoads, CylinderWindDrag, CylinderWaveDrag
from commonse.environment import WindBase, WaveBase, LinearWaves, TowerSoil, PowerWind, LogWind
from commonse.tube import CylindricalShellProperties
from commonse.utilities import assembleI, unassembleI, nodal2sectional
from commonse import gravity, eps, NFREQ
from commonse.vertical_cylinder import CylinderDiscretization, CylinderMass, CylinderFrame3DD
#from fusedwind.turbine.tower import TowerFromCSProps
#from fusedwind.interface import implement_base
import commonse.UtilizationSupplement as Util
# -----------------
# Components
# -----------------
class TowerDiscretization(Component):
def __init__(self):
super(TowerDiscretization, self).__init__()
self.add_param('hub_height', val=0.0, units='m', desc='diameter at tower base')
self.add_param('z_end', val=0.0, units='m', desc='Last node point on tower')
self.add_output('height_constraint', val=0.0, units='m', desc='mismatch between tower height and desired hub_height')
def solve_nonlinear(self, params, unknowns, resids):
unknowns['height_constraint'] = params['hub_height'] - params['z_end']
def linearize(self, params, unknowns, resids):
J = {}
J['height_constraint','hub_height'] = 1
J['height_constraint','z_end'] = -1
return J
class TowerMass(Component):
def __init__(self, nPoints):
super(TowerMass, self).__init__()
self.add_param('cylinder_mass', val=np.zeros(nPoints-1), units='kg', desc='Total cylinder mass')
self.add_param('cylinder_cost', val=0.0, units='USD', desc='Total cylinder cost')
self.add_param('cylinder_center_of_mass', val=0.0, units='m', desc='z-position of center of mass of cylinder')
self.add_param('cylinder_section_center_of_mass', val=np.zeros(nPoints-1), units='m', desc='z position of center of mass of each can in the cylinder')
self.add_param('cylinder_I_base', np.zeros((6,)), units='kg*m**2', desc='mass moment of inertia of cylinder about base [xx yy zz xy xz yz]')
self.add_output('tower_raw_cost', val=0.0, units='USD', desc='Total tower cost')
self.add_output('tower_mass', val=0.0, units='kg', desc='Total tower mass')
self.add_output('tower_center_of_mass', val=0.0, units='m', desc='z-position of center of mass of tower')
self.add_output('tower_section_center_of_mass', val=np.zeros(nPoints-1), units='m', desc='z position of center of mass of each can in the tower')
self.add_output('tower_I_base', np.zeros((6,)), units='kg*m**2', desc='mass moment of inertia of tower about base [xx yy zz xy xz yz]')
def solve_nonlinear(self, params, unknowns, resids):
unknowns['tower_raw_cost'] = params['cylinder_cost']
unknowns['tower_mass'] = params['cylinder_mass'].sum()
unknowns['tower_center_of_mass'] = params['cylinder_center_of_mass']
unknowns['tower_section_center_of_mass'] = params['cylinder_section_center_of_mass']
unknowns['tower_I_base'] = params['cylinder_I_base']
def linearize(self, params, unknowns, resids):
npts = len(params['cylinder_section_center_of_mass'])
zeroPts = np.zeros(npts)
zero6 = np.zeros(6)
J = {}
J['tower_mass','cylinder_mass'] = np.ones(len(unknowns['cylinder_mass']))
J['tower_mass','cylinder_cost'] = 0.0
J['tower_mass','cylinder_center_of_mass'] = 0.0
J['tower_mass','cylinder_section_center_of_mass'] = zeroPts
J['tower_mass','cylinder_I_base'] = zero6
J['tower_raw_cost','cylinder_mass'] = np.zeros(len(unknowns['cylinder_mass']))
J['tower_raw_cost','cylinder_cost'] = 1.0
J['tower_raw_cost','cylinder_center_of_mass'] = 0.0
J['tower_raw_cost','cylinder_section_center_of_mass'] = zeroPts
J['tower_raw_cost','cylinder_I_base'] = zero6
J['tower_center_of_mass','cylinder_mass'] = 0.0
J['tower_center_of_mass','cylinder_cost'] = 0.0
J['tower_center_of_mass','cylinder_center_of_mass'] = 1.0
J['tower_center_of_mass','cylinder_section_center_of_mass'] = zeroPts
J['tower_center_of_mass','cylinder_I_base'] = zero6
J['tower_section_center_of_mass','cylinder_mass'] = 0.0
J['tower_section_center_of_mass','cylinder_cost'] = 0.0
J['tower_section_center_of_mass','cylinder_center_of_mass'] = 0.0
J['tower_section_center_of_mass','cylinder_section_center_of_mass'] = np.eye(npts)
J['tower_section_center_of_mass','cylinder_I_base'] = np.zeros((npts,6))
J['tower_I_base','cylinder_mass'] = 1.0
J['tower_I_base','cylinder_cost'] = 0.0
J['tower_I_base','cylinder_center_of_mass'] = 0.0
J['tower_I_base','cylinder_section_center_of_mass'] = np.zeros((6,npts))
J['tower_I_base','cylinder_I_base'] = np.eye(len(params['cylinder_I_base']))
return J
class TurbineMass(Component):
def __init__(self):
super(TurbineMass, self).__init__()
self.add_param('hubH', val=0.0, units='m', desc='Hub-height')
self.add_param('rna_mass', val=0.0, units='kg', desc='Total tower mass')
self.add_param('rna_I', np.zeros((6,)), units='kg*m**2', desc='mass moment of inertia of rna about tower top [xx yy zz xy xz yz]')
self.add_param('rna_cg', np.zeros((3,)), units='m', desc='xyz-location of rna cg relative to tower top')
self.add_param('tower_mass', val=0.0, units='kg', desc='Total tower mass')
self.add_param('tower_center_of_mass', val=0.0, units='m', desc='z-position of center of mass of tower')
self.add_param('tower_I_base', np.zeros((6,)), units='kg*m**2', desc='mass moment of inertia of tower about base [xx yy zz xy xz yz]')
self.add_output('turbine_mass', val=0.0, units='kg', desc='Total mass of tower+rna')
self.add_output('turbine_center_of_mass', val=np.zeros((3,)), units='m', desc='xyz-position of tower+rna center of mass')
self.add_output('turbine_I_base', np.zeros((6,)), units='kg*m**2', desc='mass moment of inertia of tower about base [xx yy zz xy xz yz]')
# Derivatives
self.deriv_options['type'] = 'fd'
self.deriv_options['form'] = 'central'
self.deriv_options['step_calc'] = 'relative'
self.deriv_options['step_size'] = 1e-5
def solve_nonlinear(self, params, unknowns, resids):
unknowns['turbine_mass'] = params['rna_mass'] + params['tower_mass']
cg_rna = params['rna_cg'] + | np.array([0.0, 0.0, params['hubH']]) | numpy.array |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import networkx as nx
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib import cm
import msprime
from sklearn.decomposition import PCA
from scipy.spatial.distance import pdist, squareform
import pickle as pkl
import os
class GenotypeSimulator(object):
"""Class for simulating genotypes under the coalescent
given a habitat, a directed graph which individuals migrate
over
Arguments
---------
hab : Habitat
habitat object
sim_path: str
path to simulation pkl file
chrom_length: float
length of chrom to simulate
mu: float
mutation rate
n_samp: int
n haploid samples per deme
n_rep: int
number of indepdent regions to simulate from
eps: float
min derived allele frequency for filtering out rare variants
Attributes
----------
hab : Habitat
habitat object
chrom_length: float
length of chrom to simulate
mu: float
mutation rate
n_samp: int
n haploid samples per deme
n_rep: int
number of indepdent regions to simulate from
eps: float
min derived allele frequency for filtering out rare variants
y : array
n x p genotype matrix
tree_sequences :
geneologies object
n : int
number of individuals
p : int
number of snps
"""
def __init__(self, hab, sim_path, chrom_length=1, mu=1e-3, n_e=1,
n_samp=10, n_rep=1e4, eps=.05):
# habitat object
self.hab = hab
# choromosome length
self.chrom_length = chrom_length
# mutation rate
self.mu = mu
# effective sizes
self.n_e = n_e
# number of haploids per deme
self.n_samp = n_samp
# number of indepdent chunks to simulate
self.n_rep = n_rep
# min derived allele frequency to filter out
self.eps = eps
# if the simulation was already performed extract genotypes
if os.path.exists(sim_path):
with open(sim_path, 'rb') as geno:
self.y = pkl.load(geno)
# otherwise run the simulation
else:
# simulate geneologies from the defined model
self._simulate_trees()
self._simulate_genotypes()
with open(sim_path, 'wb') as geno:
pkl.dump(self.y, geno)
# number of snps
self.n, self.p = self.y.shape
# node ids for each individual
self.v = np.repeat(self.hab.v, int(self.n / self.hab.d)).T
# spatial positions for each individual
self.s = np.vstack([np.repeat(self.hab.s[:,0], int(self.n / self.hab.d)),
np.repeat(self.hab.s[:,1], int(self.n / self.hab.d))]).T
def _simulate_trees(self):
"""Simulate trees under the coalescent migration model
defined in the habitat with constant population
sizes
"""
# simulate trees
population_configurations = [msprime.PopulationConfiguration(sample_size=self.n_samp) for _ in range(self.hab.d)]
self.tree_sequences = msprime.simulate(population_configurations=population_configurations,
migration_matrix=self.hab.m.tolist(),
length=self.chrom_length,
mutation_rate=self.mu,
num_replicates=self.n_rep,
Ne=self.n_e)
def _simulate_genotypes(self):
"""Extract trees and simulate mutations in each
independent region to obtain a genotype matrix
"""
# extract mutations
genotypes = []
# loop through each region
for i,tree_sequence in enumerate(self.tree_sequences):
if i % 250 == 0:
print('extracting tree {}'.format(i))
shape = tree_sequence.get_num_mutations(), tree_sequence.get_sample_size()
g = | np.empty(shape, dtype="u1") | numpy.empty |
import argparse
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import time
from scipy import stats
from sklearn.metrics import r2_score
import math
# Force using CPU globally by hiding GPU(s)
tf.config.set_visible_devices([], 'GPU')
# import edl
import evidential_deep_learning as edl
import data_loader
import trainers
import models
from models.toy.h_params import h_params
import itertools
tf.config.threading.set_intra_op_parallelism_threads(1)
import random
data_name = 'flight_delay'
original_data_path = '../flight_delay_data/'
results_path = './Results_DER/'+data_name + '_DER_results.txt'
save_loss_history = False
save_loss_history_path = './Results_DER/loss_history/'
plot_loss_history = False
plot_loss_history_path = './Results_DER/loss_curves/'
parser = argparse.ArgumentParser()
parser.add_argument("--num-trials", default=1, type=int,
help="Number of trials to repreat training for \
statistically significant results.")
parser.add_argument("--num-epochs", default=100, type=int)
parser.add_argument('--datasets', nargs='+', default=["flight_delay"],
choices=['flight_delay'])
dataset = data_name
# learning_rate = h_params[dataset]["learning_rate"]
# batch_size = h_params[dataset]["batch_size"]
learning_rate = 1e-4
batch_size = 512
neurons = 100
### New flight delay data loader for customized train/test data same with PI3NN method
xTrain, yTrain, yTrain_scale, test_data_list = data_loader.load_flight_delays('../flight_delay_data/')
# '''choose the train/test dataset '''
x_train = xTrain
y_train = yTrain
y_scale = yTrain_scale
test_idx = 0 # [0, 1, 2, 3] for test 1,2,3,4
x_test = test_data_list[test_idx][0]
y_test = test_data_list[test_idx][1]
seed = 12345
random.seed(seed)
np.random.seed(seed)
tf.random.set_seed(seed)
args = parser.parse_args()
args.datasets[0] = data_name
training_schemes = [trainers.Evidential]
datasets = args.datasets
print('--- Printing datasets:')
print(datasets)
num_trials = args.num_trials
print('num_trials:{}'.format(num_trials))
# num_trials = 3
num_epochs = args.num_epochs
dev = "/cpu:0" # for small datasets/models cpu is faster than gpu
"""" ================================================"""
RMSE = np.zeros((len(datasets), len(training_schemes), num_trials))
NLL = np.zeros((len(datasets), len(training_schemes), num_trials))
PICP_arr = np.zeros(num_trials)
MPIW_arr = np.zeros(num_trials)
R2_arr = np.zeros(num_trials)
for di, dataset in enumerate(datasets):
# print(di)
# print(dataset)
for ti, trainer_obj in enumerate(training_schemes):
for n in range(num_trials):
print('*********************************************')
print('--- data: {}, trial: {}'.format(data_name, n+1))
print('*********************************************')
# batch_size = h_params[dataset]["batch_size"]
num_iterations = num_epochs * x_train.shape[0]//batch_size
print('num_epochs: {}, num_x_data: {}, batch_size: {}, total iters {} = {} * {} // {}'.format(num_epochs, x_train.shape[0], batch_size, num_iterations, num_epochs, x_train.shape[0], batch_size))
done = False
while not done:
with tf.device(dev):
model_generator = models.get_correct_model(dataset="toy", trainer=trainer_obj)
model, opts = model_generator.create(input_shape=x_train.shape[1:], num_neurons=neurons, tf_seed=seed)
trainer = trainer_obj(model, opts, dataset, learning_rate=learning_rate)
model, rmse, nll, loss = trainer.train(x_train, y_train, x_test, y_test, y_scale, batch_size=batch_size, iters=num_iterations,
verbose=True, data_name=data_name, rnd_seed=seed, trial_num=n,
bool_plot_loss=False, bool_save_loss=True,
save_loss_path=save_loss_history_path,
plot_loss_path=plot_loss_history_path)
''' Evaluate the PICP and MPIW for each trial '''
### taken from the 'plot_ng' function from the original evidential regression code
x_test_input_tf = tf.convert_to_tensor(x_test, tf.float32)
outputs = model(x_test_input_tf)
mu, v, alpha, beta = tf.split(outputs, 4, axis=1)
epistemic_var = np.sqrt(beta / (v * (alpha - 1)))
epistemic_var = np.minimum(epistemic_var, 1e3)
y_pred_U = mu.numpy() + epistemic_var * 1.96
y_pred_L = mu.numpy() - epistemic_var * 1.96
# print('y_pred_U: {}'.format(y_pred_U))
# print('y_pred_L: {}'.format(y_pred_L))
''' Do same thing for training data in order to do OOD analysis '''
x_train_input_tf = tf.convert_to_tensor(x_train, tf.float32)
outputs_train = model(x_train_input_tf)
mu_train, v_train, alpha_train, beta_train = tf.split(outputs_train, 4, axis=1)
epistemic_var_train = np.sqrt(beta_train / (v_train * (alpha_train - 1)))
epistemic_var_train = np.minimum(epistemic_var_train, 1e3)
y_pred_U_train = mu_train.numpy() + epistemic_var_train * 1.96
y_pred_L_train = mu_train.numpy() - epistemic_var_train * 1.96
if np.isnan(y_pred_U).any() or | np.isnan(y_pred_L) | numpy.isnan |
from abc import ABC, abstractmethod
from sigpipes import features
from sigpipes.sigcontainer import SigContainer, DPath
from sigpipes.sigfuture import SigFuture, SignalSpace
from sigpipes.auxtools import seq_wrap
from sigpipes.auxtools import TimeUnit
import gzip
from typing import Sequence, Union, Iterable, Optional, MutableMapping, Any, Mapping
import collections.abc
import sys
import fractions
from pathlib import Path
import numpy as np
import scipy.signal as sig
import scipy.fftpack as fft
from deprecated import deprecated
class SigOperator:
"""
Base abstract class of signal operators.
"""
def apply(self, container: SigContainer) -> Any:
raise NotImplementedError("Abstract method")
def prepare_container(self, container: SigContainer) -> SigContainer:
"""
Prepare container at the beginning of apply method.
(this method must be called at the first line of `apply` method)
Args:
container: prepared signal container
"""
return container
def __ror__(self, container: Union[SigContainer, Sequence[SigContainer], "SigOperator"]
) -> Any:
"""
Pipe operator for streamlining of signal operators
Args:
container: left operand i.e signal container (input), sequence of containers
(multiple inputs) or another signal operator (formation of compound operators).
Returns:
- for container as input: container, sequence of containers,
or another data structures (only consumers)
- for sequence of containers as input: sequence of containers,
sequence of another data structures (only consumers)
- for signal operators in both operands: compound signal operator
"""
if isinstance(container, SigContainer):
container.d["log"].append(self.log())
return self.apply(container)
elif isinstance(container, collections.abc.Sequence):
return [c | self for c in container]
elif isinstance(container, SigOperator):
return CompoundSigOperator(container, self)
elif isinstance(container, SigFuture):
if isinstance(self, ParallelSigOperator):
return self.par_apply(container)
else:
return SigFuture(container, fn=self.apply,
sigspace=self.sigspace_transformation(container.sigspace),
node_description=self.log())
else:
raise TypeError("Unsupported left operand of pipe")
def __or__(self, other):
return CompoundSigOperator(self, other)
def log(self):
"""
Identification of operation for logging purposes.
Returns:
Simple (and if possible short) identification.
"""
return self.__class__.__name__
def sigspace_transformation(self, sigspace:SignalSpace) -> SignalSpace:
return sigspace
class ParallelSigOperator(ABC):
@abstractmethod
def par_apply(self, future: SigFuture) -> SigFuture:
pass
class Identity(SigOperator):
"""
Base class for operators which do not modify container.
"""
def apply(self, container: SigContainer) -> SigContainer:
container = self.prepare_container(container)
return container
def log(self):
return "#" + self.__class__.__name__
class MaybeConsumerOperator(Identity):
"""
Abstract class for operators which can works as final consumers i.e. it can produce different
representation of signal data e.g. dataframes, matplot figures, etc.
"""
pass
class CompoundSigOperator(SigOperator):
def __init__(self, left_operator: SigOperator, right_operator: SigOperator) -> None:
self.left = left_operator
self.right = right_operator
def apply(self, container: SigContainer):
container = self.prepare_container(container)
return container | self.left | self.right
def log(self):
return "#COMP"
class Print(Identity):
"""
Operator which prints debug text representation into text output
"""
def __init__(self, output=">", header=True):
"""
Args:
output: file like object or name of output file (">" is stdout out, ">2" stderr)
header: the header with log-id is printed
"""
self.output = output
self.header = header
def apply(self, container: SigContainer) -> SigContainer:
container = self.prepare_container(container)
if self.output == ">":
f = sys.stdout
elif self.output == ">2":
f = sys.stderr
elif isinstance(self.output, str):
f = open(self.output, "wt") # open in apply, because the file objects are not pickable
else:
f = self.output
if self.header:
print(container.id, file=f)
print("-"*40, file=f)
print(str(container), file=f)
return container
class SigModifierOperator(SigOperator):
"""
Abstract class for operators which modify signal data.
"""
def prepare_container(self, container: SigContainer) -> SigContainer:
return SigContainer(container.d.deepcopy(shared_folders=["annotations"],
empty_folders=["meta"]))
class Sample(SigModifierOperator):
"""
Sample (continuous interval) of signal (for all channels)
"""
def __init__(self, start: Union[int, float, np.timedelta64],
end: Union[int, float, np.timedelta64]):
"""
Args:
start: start point of sample. integer: sample number, float: time in seconds,
np.timedelta64: time represented by standard time representation of numpy)
end: end point of sample (see `start` for interpretation)
"""
self.start = start
self.end = end
def apply(self, container: SigContainer) -> SigContainer:
container = self.prepare_container(container)
fs = container.d["signals/fs"]
lag = container.lag
start = TimeUnit.to_sample(self.start, fs, TimeUnit.time_unit_mapper(self.start), lag)
end = TimeUnit.to_sample(self.end, fs, TimeUnit.time_unit_mapper(self.end), lag)
container.d["signals/data"] = container.d["signals/data"][:, start:end]
container.d["signals/lag"] = lag - start
if "annotations" in container.d:
adict = container.d["annotations"]
newdict = SigContainer.cut_annots(adict, start, end)
adict.update(newdict)
return container
def log(self):
return f"SAMP@{str(self.start)}@{str(self.end)}"
class ChannelSelect(SigOperator):
"""
Selection of limited subset of channels.
"""
def __init__(self, selector: Sequence[int]) -> None:
"""
Args:
selector: sequence of (integer) indexes of channels
"""
self.selector = selector
def prepare_container(self, container: SigContainer) -> SigContainer:
return SigContainer(container.d.deepcopy(shared_folders=["annotations"],
empty_folders=["signals"]))
def apply(self, container: SigContainer) -> SigContainer:
nc = self.prepare_container(container)
nc.d["signals/data"] = container.d["signals/data"][self.selector, :]
nc.d["signals/channels"] = np.array(container.d["signals/channels"])[self.selector].tolist()
nc.d["signals/units"] = np.array(container.d["signals/units"])[self.selector].tolist()
nc.d["signals/fs"] = container.d["signals/fs"]
if "meta" in nc.d:
nc.d.map(lambda a: a[self.selector], root="meta")
return nc
def log(self):
return f"CHSEL@{','.join(str(s) for s in self.selector)}"
class MetaProducerOperator(SigOperator):
"""
Abstract class for operators which product metadata (i.e. data inferred from signals)
"""
def prepare_container(self, container: SigContainer) -> SigContainer:
return SigContainer(container.d.deepcopy(["signals", "annotation"]))
class FeatureExtractor(MetaProducerOperator):
def __init__(self, features_dict: Mapping[str, Union[bool,float,Sequence[float]]] = None,
*, wamp_threshold: Union[float, Sequence[float]] = (),
zc_diff_threshold: float = (), zc_mul_threshold = (),
sc_threshold: float = ()):
self.feature_dict = features_dict if features_dict is not None else {feature: True for feature
in features.NON_THRESHOLD}
if wamp_threshold:
self.feature_dict["WAMP"] = wamp_threshold
if zc_diff_threshold and zc_mul_threshold:
self.feature_dict["ZC"] = zip(zc_diff_threshold, zc_mul_threshold)
if sc_threshold:
self.feature_dict["SC"] = sc_threshold
def apply(self, container: SigContainer) -> SigContainer:
container = self.prepare_container(container)
n = container.sample_count
data = container.d["signals/data"]
fkeys = {key for key in self.feature_dict.keys() if self.feature_dict[key]}
thresholds = {key : value for key,value in self.feature_dict.items() if key in features.WITH_THRESHOLD}
fdict = features.features(data, fkeys, thresholds)
path = "meta/features"
container.d.make_folder(path)
container.d[path].update(fdict)
return container
class FeatureExtraction(MetaProducerOperator):
"""
Extraction of basic features of signal.
"""
def __init__(self, *, wamp_threshold: Union[float, Sequence[float]] = (),
zc_diff_threshold: float = 0.0, zc_mul_threshold = 0.0,
sc_threshold: float = 0.0):
"""
Args:
wamp_threshold: threshold value (or sequence of values) foe WAMP feature
"""
self.wamp_threshold = seq_wrap(wamp_threshold)
self.target = "features"
self.zc_diff_threshold = zc_diff_threshold
self.zc_mul_threshold = zc_mul_threshold
self.sc_threshold = sc_threshold
def apply(self, container: SigContainer) -> SigContainer:
container = self.prepare_container(container)
n = container.sample_count
data = container.d["signals/data"]
absum = np.sum(np.abs(data), axis=1)
container.d[f"meta/{self.target}/IEMG"] = absum
container.d[f"meta/{self.target}/MAV"] = absum / n
data1 = np.abs(data[:, :n//4])
data2 = np.abs(data[:, n//4:3*n//4+1])
data3 = np.abs(data[:, 3*n//4+1:])
wsum = np.sum(data2, axis=1)
container.d[f"meta/{self.target}/MMAV1"] = (
(0.5 * np.sum(data1, axis=1) + wsum + 0.5 * np.sum(data3, axis=1)) / n)
koef1 = 4 * np.arange(1, n//4 + 1, dtype=np.float64) / n
koef3 = 4 * (np.arange(3*n//4 + 2, n+1, dtype=np.float64) - n) / n
container.d[f"meta/{self.target}/MMAV2"] = (
(np.sum(koef1 * data1, axis=1) + wsum + np.sum(koef3 * data3, axis=1)) / n)
qsum = np.sum(data * data, axis=1)
container.d[f"meta/{self.target}/SSI"] = qsum
container.d[f"meta/{self.target}/VAR"] = qsum / (n-1)
container.d[f"meta/{self.target}/RMS"] = np.sqrt(qsum / n)
df = np.abs(data[:, :-1] - data[:, 1:])
container.d[f"meta/{self.target}/WL"] = np.sum(df, axis=1)
container.d.make_folder(f"meta/{self.target}/WAMP")
container.d[f"meta/{self.target}/WAMP"].update(
{str(t): np.sum(np.where(df >= t, 1, 0), axis=1) for t in self.wamp_threshold})
container.d[f"meta/{self.target}/LOG"] = np.exp(np.sum(np.log(np.abs(data)), axis=1) / n)
container.d[f"meta/{self.target}/ZC"] = np.sum(
np.where(np.logical_and(data[:, :-1] * data[:, 1:] >= self.zc_mul_threshold,
df >= self.zc_diff_threshold), 1, 0), axis=1)
container.d[f"meta/{self.target}/SC"] = np.sum(
np.where((data[:, 1:-1] - data[:, :-2]) * (data[:, 1:-1] - data[:, 2:])
>= self.sc_threshold, 1, 0), axis=1)
return container
def log(self) -> str:
return f"FEX"
class SplitterOperator(SigOperator):
"""
Abstract class for splitters (i.e. operators which split container into several containers
(segments) that can be processes independently as sequence of container.
"""
def container_factory(self, container: SigContainer, a: int, b: int, splitter_id: str) -> SigContainer:
c = SigContainer(container.d.deepcopy(empty_folders=["meta", "annotations"]))
c.d["signals/data"] = c.d["signals/data"][:, a:b]
newlog = list(c.d["log"])
newlog.append(f"{splitter_id}@{a}-{b}")
c.d["log"] = newlog
if "annotations" in container.d:
c.d["annotations"].update(SigContainer.cut_annots(container.d["annotations"], a, b))
return c
class SampleSplitter(SplitterOperator):
"""
Splitting of signals data to several containers in points defined by samples
or its absolute time. Only inner intervals are included!
The returned data can be processes independently as sequence of container.
"""
def __init__(self, points: Sequence[Union[int, float, np.timedelta64]]) -> None:
self.points = points
def apply(self, container: SigContainer) -> Sequence[SigContainer]:
container = self.prepare_container(container)
fs = container.d["signals/fs"]
limits = [TimeUnit.to_sample(point, fs, TimeUnit.time_unit_mapper(point))
for point in self.points]
limits.sort()
return [self.container_factory(container, a, b, "SPL")
for a, b in zip(limits, limits[1:])]
class MarkerSplitter(SplitterOperator):
"""
Splitting of signals data to several containers in points defined by annotation (marker).
The returned data can be processes independently as sequence of container.
"""
def __init__(self, annotation_spec: str, left_outer_segments: bool = False,
right_outer_segment: bool = False) -> None:
"""
Args:
annotation_spec: specification of splitting annotations (annotator)
left_outer_segments: true = signal before the first splitting annotation is included
right_outer_segment: true = signal after the last splitting annotation is included
"""
self.aspec = annotation_spec
self.left_segment = left_outer_segments
self.right_segment = right_outer_segment
def apply(self, container: SigContainer) -> Sequence[SigContainer]:
container = self.prepare_container(container)
limits = container.get_annotation_positions(self.aspec, TimeUnit.SAMPLE,
container.d["signals/fs"])
if self.left_segment and limits[0] != 0:
limits = np.insert(limits, 0, 0)
if self.right_segment and limits[-1] != container.sample_count - 1:
limits = np.append(limits, [container.sample_count])
return [self.container_factory(container, a, b, f"MSPL@{self.aspec}]")
for a, b in zip(limits, limits[1:])]
class ChannelSplitter(SigOperator):
def __init__(self, channels: Sequence[int] = None):
self.channels = channels
def apply(self, container: SigContainer) -> Sequence[SigContainer]:
container = self.prepare_container(container)
containers = []
#TODO: zohlednit kanály
for i in range(container.channel_count):
c = SigContainer(container.d.deepcopy(["annotations"], empty_folders=["signals", "meta"]))
c.d["signals/data"] = container.d["signals/data"][i, :].reshape(1,container.sample_count)
c.d["signals/channels"] = [container.d["signals/channels"][i]]
c.d["signals/units"] = [container.d["signals/units"][i]]
c.d["signals/fs"] = container.d["signals/fs"]
c.d["log"] = list(container.d["log"])
c.d["log"].append(f"C{i}")
containers.append(c)
return containers
def log(self):
return "#ChannelSplit"
class SimpleBranching(SigOperator):
"""
Abstract class for branching operators i.e operators bifurcating stream to two or more branches
which are initially identical (based on the same container).
"""
def __init__(self, *branches):
self.branches = branches
@staticmethod
def container_factory(container: SigContainer):
nc = SigContainer(container.d.deepcopy())
nc.d["log"] = list(nc.d["log"])
return nc
class Tee(SimpleBranching, ParallelSigOperator):
"""
Tee branching operator. For each parameters of constructor the container is duplicated
and processed by pipeline passed by this parameter (i.e. all pipelines have the same source,
but they are independent). Only original container is returned (i.e. only one stream continues)
"""
def __init__(self, *branches):
"""
Args:
*branches: one or more parameters in the form of signals operators (including whole
pipelines in the form of compound operator)
"""
super().__init__(*branches)
def apply(self, container: SigContainer) -> SigContainer:
container = self.prepare_container(container)
for branch in self.branches:
copy = SimpleBranching.container_factory(container)
copy | branch
return container
def par_apply(self, future: SigFuture) -> SigFuture:
for branch in self.branches:
(future | branch).done()
return future
def log(self):
return "#TEE"
@deprecated(reason='new united Fork operator')
class VariantsSplitter(SimpleBranching):
pass
class Fork(SimpleBranching, ParallelSigOperator):
"""
Alternative branching operator. For each parameters of constructor the container is duplicated
and processed by pipeline passed by this parameter (i.e. all pipelines have the same source,
but they are independent).
List of containers are returned including original containers and all processed
duplicates.
"""
def __init__(self, *alternatives, original=False):
"""
Args:
*alternatives: one or more parameters in the form of signals operators (including whole
pipelines in the form of compound operator)
"""
super().__init__(*alternatives)
self.original = original
def apply(self, container: SigContainer) -> Sequence[SigContainer]:
container = self.prepare_container(container)
if self.original:
acontainer = [container]
else:
acontainer = []
for branch in self.branches:
copy = SimpleBranching.container_factory(container)
acontainer.append(copy | branch)
return acontainer
def par_apply(self, future: SigFuture) -> SigFuture:
if self.original:
acontainer = [future]
else:
acontainer = []
for branch in self.branches:
acontainer.append(future | branch)
return acontainer
def log(self):
return "#FORK"
@deprecated(reason='new united Fork operator')
class AltOptional(VariantsSplitter):
"""
Alternative branching operator. For each parameters of constructor the container is duplicated
and processed by pipeline passed by this parameter (i.e. all pipelines have the same source,
but they are independent).
List of containers are returned including original containers and all processed
duplicates.
"""
def __init__(self, *alternatives):
"""
Args:
*branches: one or more parameters in the form of signals operators (including whole
pipelines in the form of compound operator)
"""
super().__init__(*alternatives)
def apply(self, container: SigContainer) -> Sequence[SigContainer]:
container = self.prepare_container(container)
acontainer = [container]
for branch in self.branches:
copy = SimpleBranching.container_factory(container)
acontainer.append(copy | branch)
return acontainer
def log(self):
return "#ALTOPT"
@deprecated(reason='new united Fork operator')
class Alternatives(VariantsSplitter):
"""
Alternative branching operator. For each parameters of constructor the container is duplicated
and processed by pipeline passed by this parameter (i.e. all pipelines have the same source, but they are
independent). List of containers are returned including all processed duplicates.
"""
def __init__(self, *alternatives):
super().__init__(*alternatives)
def apply(self, container: SigContainer) -> Sequence[SigContainer]:
container = self.prepare_container(container)
acontainer = []
for branch in self.branches:
copy = SimpleBranching.container_factory(container)
acontainer.append(copy | branch)
return acontainer
def log(self):
return "#ALT"
class UfuncOnSignals(SigModifierOperator):
"""
Application of unary numpy ufunc on signals.
Examples:
container | UfuncOnSignals(np.abs)
"""
def __init__(self, ufunc):
self.ufunc = ufunc
def apply(self, container: SigContainer) -> SigContainer:
container = self.prepare_container(container)
container.d["signals/data"] = self.ufunc(container.d["signals/data"])
return container
def log(self):
if hasattr(self.ufunc, "__name__"):
return f"UF@{self.ufunc.__name__}"
else:
return "UF"
class Scale(SigModifierOperator):
"""
Scale signal by scalar.
Examples:
container | Scale(-1.0)
"""
def __init__(self, scalar: float):
self.scalar = scalar
def apply(self, container: SigContainer) -> SigContainer:
container = self.prepare_container(container)
container.d["signals/data"] = self.scalar * container.d["signals/data"]
return container
def log(self):
return f"{self.scalar}x"
class MVNormalization(SigModifierOperator):
"""
Mean and variance normalization
"""
def __init__(self, mean: Optional[float] = 0.0, variance: Optional[float] = 1.0):
self.mean = mean
self.variance = variance
def apply(self, container: SigContainer) -> SigContainer:
if self.mean is not None:
mean = np.mean(container.d["signals/data"], axis=1).reshape(container.channel_count, 1)
if self.mean == 0:
container.d["signals/data"] -= mean
else:
container.d["signals/data"] -= mean - self.mean
if self.variance is not None:
variance = np.var(container.d["signals/data"], axis=1).reshape(container.channel_count, 1)
if self.variance == 1.0:
container.d["signals/data"] /= variance
else:
container.d["signals/data"] /= variance / self.variance
return container
def log(self):
return f"MVNorm@{self.mean},{self.variance}"
class RangeNormalization(SigModifierOperator):
"""
Normalize signal to range <a,b>.
"""
def __init__(self, min=0, max=1.0):
assert min < max
self.min = min
self.max = max
def apply(self, container: SigContainer) -> Any:
dmax = np.max(container.signals, axis=1).reshape(container.channel_count, 1)
dmin = np.min(container.signals, axis=1).reshape(container.channel_count, 1)
drange = (dmax - dmin).reshape(container.channel_count, 1)
range = self.max - self.min
container.d["signals/data"] = self.min + range * (container.signals - dmin) / drange
container.d["signals/units"] = ["unit"] * container.channel_count
return container
def log(self):
return f"RangeNorm@{self.min},{self.max}"
class Convolution(SigModifierOperator):
"""
Convolution of signal data (all signals)
"""
def __init__(self, v: Sequence[float]):
self.v = np.array(v, dtype=np.float)
self.sum = | np.sum(self.v) | numpy.sum |
import numpy as np
def multNumMat(num, mat):
matResult = []
for i in range(len(mat)):
matResCurrentLine = []
for j in range(len(mat[i])):
matResCurrentLine.append(num * mat[i][j])
matResult.append(matResCurrentLine)
return np.array(matResult)
def printMat(mat):
for i in range(len(mat)):
print(mat[i])
def createMat(name):
matData = []
while True:
try:
# Ex: 2 3; 1 2; 3 1..
ordMat = input(f'\nOrdem da matriz {name.upper()}: ').split()
if len(ordMat) != 2:
raise Exception
break
except Exception:
print('\nOrdem inválida\nTente novamente')
for i in range(int(ordMat[0])):
while True:
try:
# Ex: 2 3; 1 2 1; 12 3 22 3211..
matCurrentLine = input(
f'Matriz {name.upper()}, linha {i+1}: ').split()
if len(matCurrentLine) != int(ordMat[1]):
raise Exception
matData.append([int(j) for j in matCurrentLine])
break
except Exception:
print('\nNúmero de colunas inválido\nTente novamente\n')
return | np.array(matData) | numpy.array |
#!/usr/bin/env python
import subprocess
import datetime
from argparse import ArgumentParser
import numpy as np
import fitsio
from data_collection.RA_DEC_MatchingClassModule import RA_DEC_MatchingClass
# settings.
fpn_QSO_cat = "/global/cfs/cdirs/desi/target/analysis/RF/Catalogs/DR16Q_red.fits"
fpn_var_cat = "/global/cfs/cdirs/desi/target/analysis/RF/Catalogs/Str82_variability_wise_bdt_qso_star_DR7_BOSS_-50+60.fits"
radius4matching = 1.4/3600. # [deg]
NNVar_th = 0.5
# reading arguments.
parser = ArgumentParser()
parser.add_argument('-i', '--infits', type=str, default=None, metavar='INFITS', help='input fits')
parser.add_argument('-o', '--outfits', type=str, default=None, metavar='OUTFITS', help='output fits')
parser.add_argument('-r', '--release', type=str, default=None, metavar='RELEASE', help='release ("dr7","dr8s", "dr8n")')
parser.add_argument('-rd', '--radec', type=str, default='0,360,-90,90', metavar='RADEC', help='ramin,ramax,decmin,decmax')
parser.add_argument('-s', '--selcrit', type=str, default=None, metavar='SELCRIT', help='selection criterion ("qso", "stars", "test")')
parser.add_argument('-l', '--logfile', type=str, default='none', metavar='LOGFILE', help='log file')
arg = parser.parse_args()
INFITS, OUTFITS, RELEASE, RADEC, SELCRIT, LOGFILE = arg.infits, arg.outfis, arg.release, arg.radec, arg.selcrit, arg.logfile
# RADEC.
RAMIN, RAMAX, DECMIN, DECMAX = np.array(RADEC.split(',')).astype('float')
# print()
print('[start: '+datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")+']')
# print()
# reading.
hdu = fitsio.FITS(INFITS)[1]
ra = hdu['RA'][:]
dec = hdu['DEC'][:]
if (RAMAX < RAMIN):
keep_radec = ((ra > RAMIN) | (ra < RAMAX)) & (dec > DECMIN) & (dec < DECMAX)
else:
keep_radec = (ra > RAMIN) & (ra < RAMAX) & (dec > DECMIN) & (dec < DECMAX)
ra = ra[keep_radec]
dec = dec[keep_radec]
ramin = np.min(ra)
ramax = np.max(ra)
decmax = np.max(dec)
decmin = np.min(dec)
# QSO.
if (SELCRIT == 'qso'):
QSO_hdu = fitsio.FITS(fpn_QSO_cat)[1]
QSO_ra = QSO_hdu['RA'][:]
QSO_dec = QSO_hdu['DEC'][:]
if (ramax < ramin):
QSO_keep_radec = ((QSO_ra > ramin) | (QSO_ra < ramax)) & (QSO_dec > decmin) & (QSO_dec < decmax)
else:
QSO_keep_radec = (QSO_ra > ramin) & (QSO_ra < ramax) & (QSO_dec > decmin) & (QSO_dec < decmax)
if np.any(QSO_keep_radec):
QSO_ra = QSO_ra[QSO_keep_radec]
QSO_dec = QSO_dec[QSO_keep_radec]
RA_DEC_MatchingObj = RA_DEC_MatchingClass()
RA_DEC_MatchingObj.LoadRA_DEC_CatalogData(ra, dec)
RA_DEC_MatchingObj.LoadRA_DEC4MatchingData(QSO_ra, QSO_dec)
RA_DEC_MatchingObj(radius4matching, 1) # "1" seul voisin le plus proche.
res = RA_DEC_MatchingObj.nNeighResInd
valid_res = res[0] > -1
if np.any(valid_res): # facultatif.
hdu_temp = hdu[:][keep_radec][res[0][valid_res]]
QSO_hdu_temp = QSO_hdu[:][QSO_keep_radec][res[1][valid_res]]
else:
hdu_temp = np.zeros(0, dtype=hdu[:].dtype)
QSO_hdu_temp = np.zeros(0, dtype=QSO_hdu[:].dtype)
else:
hdu_temp = np.zeros(0, dtype=hdu[:].dtype)
QSO_hdu_temp = np.zeros(0, dtype=QSO_hdu[:].dtype)
newhdu = fitsio.FITS(OUTFITS, 'rw')
newhdu.write(hdu_temp)
newhdu[1].insert_column('ra_SDSS', QSO_hdu_temp['RA'])
newhdu[1].insert_column('dec_SDSS', QSO_hdu_temp['DEC'])
newhdu[1].insert_column('zred', QSO_hdu_temp['Z'])
print('[INFO] IS_NORTH SET AS FALSE -> NO QSO IN THE NORTH PHOTOMETRY')
newhdu[1].insert_column('IS_NORTH', np.zeros(QSO_hdu_temp['Z'].size, dtype=bool))
newhdu.close()
# STARS
elif (SELCRIT == 'stars'):
# ATTENTION EN FONCTION DES RELEASES ILS NE SONT PAS CAPABLES DE
# GARDER LE MEME NOM DE VARIABLE pour dr8 : 'PSF '.
keep_PSF = (hdu['TYPE'][:][keep_radec] == 'PSF')
hdu_temp = hdu[:][keep_radec][keep_PSF]
# Virer les objets *connus* ET variables.
if np.any(keep_PSF):
var_hdu = fitsio.FITS(fpn_var_cat)[1]
var_ra = var_hdu['RA'][:]
var_dec = var_hdu['DEC'][:]
if (ramax < ramin):
var_keep_radec = ((var_ra > ramin) | (var_ra < ramax)) & (var_dec > decmin) & (var_dec < decmax)
else:
var_keep_radec = (var_ra > ramin) & (var_ra < ramax) & (var_dec > decmin) & (var_dec < decmax)
if np.any(var_keep_radec):
ra = hdu_temp['RA']
dec = hdu_temp['DEC']
var_ra = var_ra[var_keep_radec]
var_dec = var_dec[var_keep_radec]
RA_DEC_MatchingObj = RA_DEC_MatchingClass()
RA_DEC_MatchingObj.LoadRA_DEC_CatalogData(ra, dec)
RA_DEC_MatchingObj.LoadRA_DEC4MatchingData(var_ra, var_dec)
RA_DEC_MatchingObj(radius4matching, 1) # "1" seul voisin le plus proche.
res = RA_DEC_MatchingObj.nNeighResInd
valid_res = res[0] > -1
if np.any(valid_res):
rej_data_ind = res[0][valid_res]
var_hdu_temp = var_hdu[:][var_keep_radec][res[1][valid_res]]
rej_var = var_hdu_temp['NNVariability'] > NNVar_th
hdu_temp = np.delete(hdu_temp, rej_data_ind[rej_var])
# Virer les QSO connus.
if len(hdu_temp) > 0:
QSO_hdu = fitsio.FITS(fpn_QSO_cat)[1]
QSO_ra = QSO_hdu['RA'][:]
QSO_dec = QSO_hdu['DEC'][:]
if (ramax < ramin):
QSO_keep_radec = ((QSO_ra > ramin) | (QSO_ra < ramax)) & (QSO_dec > decmin) & (QSO_dec < decmax)
else:
QSO_keep_radec = (QSO_ra > ramin) & (QSO_ra < ramax) & (QSO_dec > decmin) & (QSO_dec < decmax)
if np.any(QSO_keep_radec):
ra = hdu_temp['RA']
dec = hdu_temp['DEC']
QSO_ra = QSO_ra[QSO_keep_radec]
QSO_dec = QSO_dec[QSO_keep_radec]
RA_DEC_MatchingObj = RA_DEC_MatchingClass()
RA_DEC_MatchingObj.LoadRA_DEC_CatalogData(ra, dec)
RA_DEC_MatchingObj.LoadRA_DEC4MatchingData(QSO_ra, QSO_dec)
RA_DEC_MatchingObj(radius4matching, 1) # "1" seul voisin le plus proche.
res = RA_DEC_MatchingObj.nNeighResInd
valid_res = res[0] > -1
rej_data_ind = res[0][valid_res]
hdu_temp = np.delete(hdu_temp, rej_data_ind)
newhdu = fitsio.FITS(OUTFITS, 'rw')
newhdu.write(hdu_temp)
print('[INFO] IS_NORTH SET AS FALSE -> NO STARS IN THE NORTH PHOTOMETRY')
newhdu[1].insert_column('IS_NORTH', np.zeros(hdu_temp.size, dtype=bool))
newhdu.close()
# TEST SAMPLE.
elif (SELCRIT == 'test'):
hdu_temp = hdu[:][keep_radec]
# Identifier les QSOs connus.
QSO_hdu = fitsio.FITS(fpn_QSO_cat)[1]
QSO_ra = QSO_hdu['RA'][:]
QSO_dec = QSO_hdu['DEC'][:]
if (ramax < ramin):
QSO_keep_radec = ((QSO_ra > ramin) | (QSO_ra < ramax)) & (QSO_dec > decmin) & (QSO_dec < decmax)
else:
QSO_keep_radec = (QSO_ra > ramin) & (QSO_ra < ramax) & (QSO_dec > decmin) & (QSO_dec < decmax)
QSO_hdu_temp = QSO_hdu[:][QSO_keep_radec]
if | np.any(QSO_keep_radec) | numpy.any |
from collections import defaultdict
import json
import re
import sys
import time
import matplotlib.pyplot as plt
from itertools import permutations
import numpy as np
import pandas as pd
from scipy.cluster.hierarchy import fcluster, linkage
from scipy.spatial.distance import pdist
from scipy.stats import lognorm
import seaborn as sns
from sklearn.cluster import DBSCAN
import statsmodels.nonparametric.api as smnp
#############################################################################
### Parameters
### Theoretical scale markers
### PYT = Pythagorean tuning
### EQ{N} = N-Tone Equal Temperament
### JI = Just intonation
### CHINA = Shi-er-lu
### The rest are sourced from Rechberger, Herman
PYT_INTS = np.array([0., 90.2, 203.9, 294.1, 407.8, 498.1, 611.7, 702., 792.2, 905., 996.1, 1109.8, 1200.])
EQ5_INTS = np.linspace(0, 1200, num=6, endpoint=True, dtype=float)
EQ7_INTS = np.linspace(0, 1200, num=8, endpoint=True, dtype=float)
EQ9_INTS = np.linspace(0, 1200, num=10, endpoint=True, dtype=float)
EQ10_INTS = np.linspace(0, 1200, num=11, endpoint=True, dtype=float)
EQ12_INTS = np.linspace(0, 1200, num=13, endpoint=True, dtype=float)
EQ24_INTS = np.linspace(0, 1200, num=25, endpoint=True, dtype=float)
EQ53_INTS = np.linspace(0, 1200, num=54, endpoint=True, dtype=float)
JI_INTS = np.array([0., 111.7, 203.9, 315.6, 386.3, 498.1, 590.2, 702., 813.7, 884.4, 1017.6, 1088.3, 1200.])
SLENDRO = | np.array([263., 223., 253., 236., 225.]) | numpy.array |
""" Functions to fit and analyse Coulomb peaks """
import warnings
import copy
import numpy as np
import scipy.optimize as opt
import scipy.ndimage
import scipy.ndimage.measurements
import qtt.measurements.scans
import qtt.data
import qtt.pgeometry as pgeometry
import matplotlib.pyplot as plt
from qtt.algorithms.generic import issorted
from qtt.algorithms.functions import logistic
from qtt.algorithms.generic import nonmaxsuppts
# %% Functions related to detection of Coulumb peaks
def gauss(x, p):
""" Gaussian function with parameters
Args:
x (array or float): input variable
p (array): parameters [mean, std. dev., amplitude]
Returns:
array or float: calculated Gaussian
"""
return p[2] * 1.0 / (p[1] * np.sqrt(2 * np.pi)) * np.exp(-(x - p[0]) ** 2 / (2 * p[1] ** 2))
def analyseCoulombPeaks(all_data, fig=None, verbose=1, parameters=None):
""" Find Coulomb peaks in a 1D dataset
Args:
all_data (DataSet): The data to analyse.
fig (int or None): Figure handle to the plot.
parameters (dict): dictionary with parameters that is passed to subfunctions
Returns:
peaks (list): fitted peaks
"""
x_data, y_data = qtt.data.dataset1Ddata(all_data)
return analyseCoulombPeaksArray(x_data, y_data, fig=fig, verbose=verbose, parameters=parameters)
def analyseCoulombPeaksArray(x_data, y_data, fig=None, verbose=1, parameters=None):
""" Find Coulomb peaks in arrays of data. This is very similar to analyseCoulombPeaks,
but takes arrays of data as input. Hence the y_data can for example be either the
I, Q or any combination of both obtained with RF reflectometry.
Args:
x_data (1D array): The data of varied parameter.
y_data (1D array): The signal data.
fig (None or int): figure handle
parameters (dict): dictionary with parameters that is passed to subfunctions
Returns:
(list of dict): The detected peaks.
"""
if parameters is None:
parameters = {}
sampling_rate = parameters.get('sampling_rate', (x_data[-1] - x_data[0]) / (x_data.size - 1))
return coulombPeaks(x_data, y_data, verbose=verbose, fig=fig, plothalf=True, sampling_rate=sampling_rate,
parameters=parameters)
def fitCoulombPeaks(x_data, y_data, lowvalue=None, verbose=1, fig=None, sampling_rate=1):
""" Fit Coulumb peaks in a measurement series.
Args:
x_data (1D array): The data of varied parameter.
y_data (1D array): The signal data.
sampling_rate (float): The sampling rate in mV/pixel.
Returns:
(list): A list with detected peaks.
"""
minval = np.percentile(y_data, 5) + 0.1 * (np.percentile(y_data, 95) - np.percentile(y_data, 5))
local_maxima, _ = nonmaxsuppts(y_data, d=int(12 / sampling_rate), minval=minval)
fit_data = fitPeaks(x_data, y_data, local_maxima, verbose=verbose >= 2, fig=fig)
if lowvalue is None:
lowvalue = np.percentile(y_data, 1)
highvalue = np.percentile(y_data, 99)
peaks = []
for ii, f in enumerate(fit_data):
p = local_maxima[ii]
peak = dict({'p': p, 'x': x_data[p], 'y': y_data[p], 'gaussfit': f})
peak['halfvaluelow'] = (y_data[p] - lowvalue) / 2 + lowvalue
peak['height'] = (y_data[p] - lowvalue)
if peak['height'] < .1 * (highvalue - lowvalue):
peak['valid'] = 0
else:
peak['valid'] = 1
peak['lowvalue'] = lowvalue
peak['type'] = 'peak'
peaks.append(peak)
if verbose:
print('fitCoulombPeaks: peak %d: position %.2f max %.2f valid %d' %
(ii, peak['x'], peak['y'], peak['valid']))
return peaks
# %%
def plotPeaks(x, y, peaks, showPeaks=True, plotLabels=False, fig=10, plotScore=False, plotsmooth=True, plothalf=False,
plotbottom=False, plotmarker='.-b'):
""" Plot detected peaks
Args:
x (array): independent variable data
y (array): dependent variable data
peaks (list): list of peaks to plot
showPeaks, plotLabels, plotScore, plothalf (bool): plotting options
Returns:
dictionary: graphics handles
"""
kk = | np.ones(3) | numpy.ones |
import numpy as np
from ._base_metric import _BaseMetric
from .. import _timing
from functools import partial
from .. import utils
from ..utils import TrackEvalException
class TrackMAP(_BaseMetric):
"""Class which implements the TrackMAP metrics"""
@staticmethod
def get_default_metric_config():
"""Default class config values"""
default_config = {
'USE_AREA_RANGES': True, # whether to evaluate for certain area ranges
'AREA_RANGES': [[0 ** 2, 32 ** 2], # additional area range sets for which TrackMAP is evaluated
[32 ** 2, 96 ** 2], # (all area range always included), default values for TAO
[96 ** 2, 1e5 ** 2]], # evaluation
'AREA_RANGE_LABELS': ["area_s", "area_m", "area_l"], # the labels for the area ranges
'USE_TIME_RANGES': True, # whether to evaluate for certain time ranges (length of tracks)
'TIME_RANGES': [[0, 3], [3, 10], [10, 1e5]], # additional time range sets for which TrackMAP is evaluated
# (all time range always included) , default values for TAO evaluation
'TIME_RANGE_LABELS': ["time_s", "time_m", "time_l"], # the labels for the time ranges
'IOU_THRESHOLDS': np.arange(0.5, 0.96, 0.05), # the IoU thresholds
'RECALL_THRESHOLDS': np.linspace(0.0, 1.00, int(np.round((1.00 - 0.0) / 0.01) + 1), endpoint=True),
# recall thresholds at which precision is evaluated
'MAX_DETECTIONS': 0, # limit the maximum number of considered tracks per sequence (0 for unlimited)
'PRINT_CONFIG': True
}
return default_config
def __init__(self, config=None):
super().__init__()
self.config = utils.init_config(config, self.get_default_metric_config(), self.get_name())
self.num_ig_masks = 1
self.lbls = ['all']
self.use_area_rngs = self.config['USE_AREA_RANGES']
if self.use_area_rngs:
self.area_rngs = self.config['AREA_RANGES']
self.area_rng_lbls = self.config['AREA_RANGE_LABELS']
self.num_ig_masks += len(self.area_rng_lbls)
self.lbls += self.area_rng_lbls
self.use_time_rngs = self.config['USE_TIME_RANGES']
if self.use_time_rngs:
self.time_rngs = self.config['TIME_RANGES']
self.time_rng_lbls = self.config['TIME_RANGE_LABELS']
self.num_ig_masks += len(self.time_rng_lbls)
self.lbls += self.time_rng_lbls
self.array_labels = self.config['IOU_THRESHOLDS']
self.rec_thrs = self.config['RECALL_THRESHOLDS']
self.maxDet = self.config['MAX_DETECTIONS']
self.float_array_fields = ['AP_' + lbl for lbl in self.lbls] + ['AR_' + lbl for lbl in self.lbls]
self.fields = self.float_array_fields
self.summary_fields = self.float_array_fields
@_timing.time
def eval_sequence(self, data):
"""Calculates GT and Tracker matches for one sequence for TrackMAP metrics. Adapted from
https://github.com/TAO-Dataset/"""
# Initialise results to zero for each sequence as the fields are only defined over the set of all sequences
res = {}
for field in self.fields:
res[field] = [0 for _ in self.array_labels]
gt_ids, dt_ids = data['gt_track_ids'], data['dt_track_ids']
if len(gt_ids) == 0 and len(dt_ids) == 0:
for idx in range(self.num_ig_masks):
res[idx] = None
return res
# get track data
gt_tr_areas = data.get('gt_track_areas', None) if self.use_area_rngs else None
gt_tr_lengths = data.get('gt_track_lengths', None) if self.use_time_rngs else None
gt_tr_iscrowd = data.get('gt_track_iscrowd', None)
dt_tr_areas = data.get('dt_track_areas', None) if self.use_area_rngs else None
dt_tr_lengths = data.get('dt_track_lengths', None) if self.use_time_rngs else None
is_nel = data.get('not_exhaustively_labeled', False)
# compute ignore masks for different track sets to eval
gt_ig_masks = self._compute_track_ig_masks(len(gt_ids), track_lengths=gt_tr_lengths, track_areas=gt_tr_areas,
iscrowd=gt_tr_iscrowd)
dt_ig_masks = self._compute_track_ig_masks(len(dt_ids), track_lengths=dt_tr_lengths, track_areas=dt_tr_areas,
is_not_exhaustively_labeled=is_nel, is_gt=False)
boxformat = data.get('boxformat', 'xywh')
ious = self._compute_track_ious(data['dt_tracks'], data['gt_tracks'], iou_function=data['iou_type'],
boxformat=boxformat)
for mask_idx in range(self.num_ig_masks):
gt_ig_mask = gt_ig_masks[mask_idx]
# Sort gt ignore last
gt_idx = np.argsort([g for g in gt_ig_mask], kind="mergesort")
gt_ids = [gt_ids[i] for i in gt_idx]
ious_sorted = ious[:, gt_idx] if len(ious) > 0 else ious
num_thrs = len(self.array_labels)
num_gt = len(gt_ids)
num_dt = len(dt_ids)
# Array to store the "id" of the matched dt/gt
gt_m = np.zeros((num_thrs, num_gt)) - 1
dt_m = np.zeros((num_thrs, num_dt)) - 1
gt_ig = np.array([gt_ig_mask[idx] for idx in gt_idx])
dt_ig = np.zeros((num_thrs, num_dt))
for iou_thr_idx, iou_thr in enumerate(self.array_labels):
if len(ious_sorted) == 0:
break
for dt_idx, _dt in enumerate(dt_ids):
iou = min([iou_thr, 1 - 1e-10])
# information about best match so far (m=-1 -> unmatched)
# store the gt_idx which matched for _dt
m = -1
for gt_idx, _ in enumerate(gt_ids):
# if this gt already matched continue
if gt_m[iou_thr_idx, gt_idx] > 0:
continue
# if _dt matched to reg gt, and on ignore gt, stop
if m > -1 and gt_ig[m] == 0 and gt_ig[gt_idx] == 1:
break
# continue to next gt unless better match made
if ious_sorted[dt_idx, gt_idx] < iou - np.finfo('float').eps:
continue
# if match successful and best so far, store appropriately
iou = ious_sorted[dt_idx, gt_idx]
m = gt_idx
# No match found for _dt, go to next _dt
if m == -1:
continue
# if gt to ignore for some reason update dt_ig.
# Should not be used in evaluation.
dt_ig[iou_thr_idx, dt_idx] = gt_ig[m]
# _dt match found, update gt_m, and dt_m with "id"
dt_m[iou_thr_idx, dt_idx] = gt_ids[m]
gt_m[iou_thr_idx, m] = _dt
dt_ig_mask = dt_ig_masks[mask_idx]
dt_ig_mask = np.array(dt_ig_mask).reshape((1, num_dt)) # 1 X num_dt
dt_ig_mask = np.repeat(dt_ig_mask, num_thrs, 0) # num_thrs X num_dt
# Based on dt_ig_mask ignore any unmatched detection by updating dt_ig
dt_ig = np.logical_or(dt_ig, np.logical_and(dt_m == -1, dt_ig_mask))
# store results for given video and category
res[mask_idx] = {
"dt_ids": dt_ids,
"gt_ids": gt_ids,
"dt_matches": dt_m,
"gt_matches": gt_m,
"dt_scores": data['dt_track_scores'],
"gt_ignore": gt_ig,
"dt_ignore": dt_ig,
}
return res
def combine_sequences(self, all_res):
"""Combines metrics across all sequences. Computes precision and recall values based on track matches.
Adapted from https://github.com/TAO-Dataset/
"""
num_thrs = len(self.array_labels)
num_recalls = len(self.rec_thrs)
# -1 for absent categories
precision = -np.ones(
(num_thrs, num_recalls, self.num_ig_masks)
)
recall = -np.ones((num_thrs, self.num_ig_masks))
for ig_idx in range(self.num_ig_masks):
ig_idx_results = [res[ig_idx] for res in all_res.values() if res[ig_idx] is not None]
# Remove elements which are None
if len(ig_idx_results) == 0:
continue
# Append all scores: shape (N,)
# limit considered tracks for each sequence if maxDet > 0
if self.maxDet == 0:
dt_scores = np.concatenate([res["dt_scores"] for res in ig_idx_results], axis=0)
dt_idx = np.argsort(-dt_scores, kind="mergesort")
dt_m = np.concatenate([e["dt_matches"] for e in ig_idx_results],
axis=1)[:, dt_idx]
dt_ig = np.concatenate([e["dt_ignore"] for e in ig_idx_results],
axis=1)[:, dt_idx]
elif self.maxDet > 0:
dt_scores = np.concatenate([res["dt_scores"][0:self.maxDet] for res in ig_idx_results], axis=0)
dt_idx = np.argsort(-dt_scores, kind="mergesort")
dt_m = np.concatenate([e["dt_matches"][:, 0:self.maxDet] for e in ig_idx_results],
axis=1)[:, dt_idx]
dt_ig = np.concatenate([e["dt_ignore"][:, 0:self.maxDet] for e in ig_idx_results],
axis=1)[:, dt_idx]
else:
raise Exception("Number of maximum detections must be >= 0, but is set to %i" % self.maxDet)
gt_ig = np.concatenate([res["gt_ignore"] for res in ig_idx_results])
# num gt anns to consider
num_gt = np.count_nonzero(gt_ig == 0)
if num_gt == 0:
continue
tps = np.logical_and(dt_m != -1, np.logical_not(dt_ig))
fps = np.logical_and(dt_m == -1, np.logical_not(dt_ig))
tp_sum = np.cumsum(tps, axis=1).astype(dtype=np.float)
fp_sum = np.cumsum(fps, axis=1).astype(dtype=np.float)
for iou_thr_idx, (tp, fp) in enumerate(zip(tp_sum, fp_sum)):
tp = np.array(tp)
fp = np.array(fp)
num_tp = len(tp)
rc = tp / num_gt
if num_tp:
recall[iou_thr_idx, ig_idx] = rc[-1]
else:
recall[iou_thr_idx, ig_idx] = 0
# np.spacing(1) ~= eps
pr = tp / (fp + tp + np.spacing(1))
pr = pr.tolist()
# Ensure precision values are monotonically decreasing
for i in range(num_tp - 1, 0, -1):
if pr[i] > pr[i - 1]:
pr[i - 1] = pr[i]
# find indices at the predefined recall values
rec_thrs_insert_idx = np.searchsorted(rc, self.rec_thrs, side="left")
pr_at_recall = [0.0] * num_recalls
try:
for _idx, pr_idx in enumerate(rec_thrs_insert_idx):
pr_at_recall[_idx] = pr[pr_idx]
except IndexError:
pass
precision[iou_thr_idx, :, ig_idx] = (np.array(pr_at_recall))
res = {'precision': precision, 'recall': recall}
# compute the precision and recall averages for the respective alpha thresholds and ignore masks
for lbl in self.lbls:
res['AP_' + lbl] = np.zeros((len(self.array_labels)), dtype=np.float)
res['AR_' + lbl] = np.zeros((len(self.array_labels)), dtype=np.float)
for a_id, alpha in enumerate(self.array_labels):
for lbl_idx, lbl in enumerate(self.lbls):
p = precision[a_id, :, lbl_idx]
if len(p[p > -1]) == 0:
mean_p = -1
else:
mean_p = np.mean(p[p > -1])
res['AP_' + lbl][a_id] = mean_p
res['AR_' + lbl][a_id] = recall[a_id, lbl_idx]
return res
def combine_classes_class_averaged(self, all_res):
"""Combines metrics across all classes by averaging over the class values"""
res = {}
for field in self.fields:
res[field] = np.zeros((len(self.array_labels)), dtype=np.float)
field_stacked = np.array([res[field] for res in all_res.values()])
for a_id, alpha in enumerate(self.array_labels):
values = field_stacked[:, a_id]
if len(values[values > -1]) == 0:
mean = -1
else:
mean = np.mean(values[values > -1])
res[field][a_id] = mean
return res
def combine_classes_det_averaged(self, all_res):
"""Combines metrics across all classes by averaging over the detection values"""
res = {}
for field in self.fields:
res[field] = np.zeros((len(self.array_labels)), dtype=np.float)
field_stacked = np.array([res[field] for res in all_res.values()])
for a_id, alpha in enumerate(self.array_labels):
values = field_stacked[:, a_id]
if len(values[values > -1]) == 0:
mean = -1
else:
mean = | np.mean(values[values > -1]) | numpy.mean |
import argparse
import json
import pickle
from torch.utils.data import DataLoader
import numpy as np
from nnlib.nnlib import training, metrics, callbacks, utils
from nnlib.nnlib.data_utils.wrappers import SubsetDataWrapper, LabelSubsetWrapper, ResizeImagesWrapper
from nnlib.nnlib.data_utils.base import get_loaders_from_datasets, get_input_shape
from modules.data_utils import load_data_from_arguments, SwitchableRandomSampler, TurnOnTrainShufflingCallback
import methods
def mnist_ld_schedule(lr, beta, iteration):
if iteration % 100 == 0:
lr = lr * 0.9
beta = min(4000, max(100, 10 * np.exp(iteration / 100)))
return lr, beta
def cifar_resnet50_ld_schedule(lr, beta, iteration):
if iteration % 300 == 0:
lr = lr * 0.9
beta = min(16000, max(100, 10 * np.exp(iteration / 300)))
return lr, beta
def load_data(args):
all_examples, _, _, _ = load_data_from_arguments(args, build_loaders=False)
# select labels if needed
if args.which_labels is not None:
all_examples = LabelSubsetWrapper(all_examples, which_labels=args.which_labels)
# resize if needed
if args.resize_to_imagenet:
all_examples = ResizeImagesWrapper(all_examples, size=(224, 224))
# select 2n examples (tilde{z})
assert len(all_examples) >= 2 * args.n
np.random.seed(args.seed)
include_indices = np.random.choice(range(len(all_examples)), size=2 * args.n, replace=False)
all_examples = SubsetDataWrapper(all_examples, include_indices=include_indices)
return all_examples
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--config', '-c', type=str, required=True)
parser.add_argument('--device', '-d', default='cuda', help='specifies the main device')
parser.add_argument('--all_device_ids', nargs='+', type=str, default=None,
help="If not None, this list specifies devices for multiple GPU training. "
"The first device should match with the main device (args.device).")
parser.add_argument('--batch_size', '-b', type=int, default=256)
parser.add_argument('--epochs', '-e', type=int, default=400)
parser.add_argument('--stopping_param', type=int, default=2**30)
parser.add_argument('--save_iter', '-s', type=int, default=10)
parser.add_argument('--vis_iter', '-v', type=int, default=10)
parser.add_argument('--seed', type=int, default=42)
parser.add_argument('--S_seed', type=int, default=42)
parser.add_argument('--exp_name', type=str, required=True)
parser.add_argument('--deterministic', action='store_true', dest='deterministic')
parser.add_argument('--shuffle_train_only_after_first_epoch', action='store_true',
dest="shuffle_train_only_after_first_epoch")
# data parameters
parser.add_argument('--dataset', '-D', type=str, default='corrupt4_mnist')
parser.add_argument('--data_augmentation', '-A', action='store_true', dest='data_augmentation')
parser.set_defaults(data_augmentation=False)
parser.add_argument('--error_prob', type=float, default=0.0)
parser.add_argument('--n', '-n', type=int, required=True, default='Number of training examples')
parser.add_argument('--which_labels', nargs='+', default=None, type=int)
parser.add_argument('--clean_validation', action='store_true', default=False)
parser.add_argument('--resize_to_imagenet', action='store_true', default=False)
# hyper-parameters
parser.add_argument('--model_class', '-m', type=str, default='StandardClassifier')
parser.add_argument('--load_from', type=str, default=None)
parser.add_argument('--optimizer', type=str, default='adam', choices=['adam', 'sgd'])
parser.add_argument('--weight_decay', type=float, default=0.0)
parser.add_argument('--lr', type=float, default=1e-3, help='Learning rate')
parser.add_argument('--momentum', type=float, default=0.0, help='momentum')
parser.add_argument('--ld_lr', type=float, help='initial learning rate of Langevin dynamics')
parser.add_argument('--ld_beta', type=float, help='initial inverse temperature of LD')
parser.add_argument('--ld_track_grad_variance', dest='ld_track_grad_variance', action='store_true')
parser.add_argument('--ld_track_every_iter', type=int, default=1)
args = parser.parse_args()
print(args)
# Load data
all_examples = load_data(args)
# select the train/val split (S)
| np.random.seed(args.S_seed) | numpy.random.seed |
# --------------------------------------------------------
# Deep Iterative Matching Network
# Licensed under The Apache-2.0 License [see LICENSE for details]
# Written by <NAME>, <NAME>
# --------------------------------------------------------
from __future__ import print_function, division
import numpy as np
import os, sys
cur_path = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(1, os.path.join(cur_path, ".."))
from lib.pair_matching.RT_transform import *
from lib.utils.mkdir_if_missing import mkdir_if_missing
import random
from lib.render_glumpy.render_py_light_modelnet import Render_Py_Light_ModelNet
import cv2
classes = [
"airplane",
"bed",
"bench",
"bookshelf",
"car",
"chair",
"guitar",
"laptop",
"mantel", #'dresser',
"piano",
"range_hood",
"sink",
"stairs",
"stool",
"tent",
"toilet",
"tv_stand",
"door",
"glass_box",
"wardrobe",
"plant",
"xbox",
"bathtub",
"table",
"monitor",
"sofa",
"night_stand",
]
# print(classes)
# config for renderer
width = 640
height = 480
K = np.array([[572.4114, 0, 325.2611], [0, 573.57043, 242.04899], [0, 0, 1]]) # LM
ZNEAR = 0.25
ZFAR = 6.0
depth_factor = 1000
modelnet_root = os.path.join(cur_path, "../data/ModelNet")
modelnet40_root = os.path.join(modelnet_root, "ModelNet40")
import argparse
def parse_args():
parser = argparse.ArgumentParser(description="renderer")
parser.add_argument("--model_path", required=True, help="model path")
parser.add_argument("--texture_path", required=True, help="texture path")
parser.add_argument("--seed", required=True, type=int, help="seed")
args = parser.parse_args()
return args
args = parse_args()
model_path = args.model_path
texture_path = args.texture_path
random.seed(args.seed)
np.random.seed(args.seed)
def angle_axis_to_quat(angle, rot_axis):
angle = angle % (2 * np.pi)
# print(angle)
q = np.zeros(4)
q[0] = np.cos(0.5 * angle)
q[1:] = np.sin(0.5 * angle) * rot_axis
if q[0] < 0:
q *= -1
# print('norm of q: ', LA.norm(q))
q = q / np.linalg.norm(q)
# print('norm of q: ', LA.norm(q))
return q
def angle(u, v):
c = np.dot(u, v) / (np.linalg.norm(u) * np.linalg.norm(v)) # -> cosine of the angle
rad = np.arccos( | np.clip(c, -1, 1) | numpy.clip |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import sys
import warnings
from math import sqrt, pi, exp, log, floor
from abc import ABCMeta, abstractmethod
import numpy as np
from .. import constants as const
from ..config import ConfigurationItem
from ..utils.misc import isiterable
from ..utils.exceptions import AstropyUserWarning
from .. import units as u
from . import parameters
# Originally authored by <NAME> (<EMAIL>),
# and modified by <NAME> (<EMAIL>) and <NAME> (<EMAIL>).
# Many of these adapted from Hogg 1999, astro-ph/9905116
# and Linder 2003, PRL 90, 91301
__all__ = ["FLRW", "LambdaCDM", "FlatLambdaCDM", "wCDM", "FlatwCDM",
"Flatw0waCDM", "w0waCDM", "wpwaCDM", "w0wzCDM", "get_current",
"set_current", "WMAP5", "WMAP7", "WMAP9", "Planck13"]
__doctest_requires__ = {'*': ['scipy.integrate']}
# Constants
# Mpc in km
Mpc_km = (1 * u.Mpc).to(u.km)
arcsec_in_radians = 1 / 3600. * pi / 180
arcmin_in_radians = 1 / 60. * pi / 180
# Radiation parameter over c^2 in cgs
a_B_c2 = 4 * const.sigma_sb.cgs.value / const.c.cgs.value ** 3
# Boltzmann constant in eV / K
kB_evK = const.k_B.decompose().to(u.eV / u.K)
DEFAULT_COSMOLOGY = ConfigurationItem(
'default_cosmology', 'no_default',
'The default cosmology to use. Note this is only read on import, '
'so changing this value at runtime has no effect.')
class CosmologyError(Exception):
pass
class Cosmology(object):
""" Placeholder for when a more general Cosmology class is
implemented. """
pass
class FLRW(Cosmology):
""" A class describing an isotropic and homogeneous
(Friedmann-Lemaitre-Robertson-Walker) cosmology.
This is an abstract base class -- you can't instantiate
examples of this class, but must work with one of its
subclasses such as `LambdaCDM` or `wCDM`.
Parameters
----------
H0 : float or scalar astropy.units.Quantity
Hubble constant at z = 0. If a float, must be in [km/sec/Mpc]
Om0 : float
Omega matter: density of non-relativistic matter in units of the
critical density at z=0.
Ode0 : float
Omega dark energy: density of dark energy in units of the critical
density at z=0.
Tcmb0 : float or scalar astropy.units.Quantity
Temperature of the CMB z=0. If a float, must be in [K]. Default: 2.725.
Setting this to zero will turn off both photons and neutrinos (even
massive ones)
Neff : float
Effective number of Neutrino species. Default 3.04.
m_nu : astropy.units.Quantity
Mass of each neutrino species. If this is a scalar Quantity, then all
neutrino species are assumed to have that mass. Otherwise, the mass of
each species. The actual number of neutrino species (and hence the
number of elements of m_nu if it is not scalar) must be the floor of
Neff. Usually this means you must provide three neutrino masses unless
you are considering something like a sterile neutrino.
name : str
Optional name for this cosmological object.
Notes
-----
Class instances are static -- you can't change the values
of the parameters. That is, all of the attributes above are
read only.
"""
__metaclass__ = ABCMeta
def __init__(self, H0, Om0, Ode0, Tcmb0=2.725, Neff=3.04,
m_nu=u.Quantity(0.0, u.eV), name=None):
# all densities are in units of the critical density
self._Om0 = float(Om0)
if self._Om0 < 0.0:
raise ValueError("Matter density can not be negative")
self._Ode0 = float(Ode0)
self._Neff = float(Neff)
if self._Neff < 0.0:
raise ValueError("Effective number of neutrinos can "
"not be negative")
self.name = name
# Tcmb may have units
if isinstance(Tcmb0, u.Quantity):
if not Tcmb0.isscalar:
raise ValueError("Tcmb0 is a non-scalar quantity")
self._Tcmb0 = Tcmb0.to(u.K)
else:
self._Tcmb0 = float(Tcmb0) * u.K
# Hubble parameter at z=0, km/s/Mpc
if isinstance(H0, u.Quantity):
if not H0.isscalar:
raise ValueError("H0 is a non-scalar quantity")
self._H0 = H0.to(u.km / u.s / u.Mpc)
else:
self._H0 = float(H0) * u.km / u.s / u.Mpc
# 100 km/s/Mpc * h = H0 (so h is dimensionless)
self._h = self._H0.value / 100.
# Hubble distance
self._hubble_distance = (const.c / self._H0).to(u.Mpc)
# H0 in s^-1
H0_s = self._H0.to(1.0 / u.s)
# Hubble time
self._hubble_time = (1. / H0_s).to(u.Gyr)
# critical density at z=0 (grams per cubic cm)
self._critical_density0 = (3. * H0_s ** 2 /
(8. * pi * const.G.cgs)).cgs
# Load up neutrino masses.
self._nneutrinos = floor(self._Neff)
# We are going to share Neff between the neutrinos equally.
# In detail this is not correct, but it is a standard assumption
# because propertly calculating it is a) complicated b) depends
# on the details of the massive nuetrinos (e.g., their weak
# interactions, which could be unusual if one is considering sterile
# neutrinos)
self._massivenu = False
if self._nneutrinos > 0 and self._Tcmb0.value > 0:
self._neff_per_nu = self._Neff / self._nneutrinos
if not isinstance(m_nu, u.Quantity):
raise ValueError("m_nu must be a Quantity")
m_nu = m_nu.to(u.eV, equivalencies=u.mass_energy())
# Now, figure out if we have massive neutrinos to deal with,
# and, if so, get the right number of masses
# It is worth the effort to keep track of massless ones seperately
# (since they are quite easy to deal with, and a common use case
# is to set only one neutrino to have mass)
if m_nu.isscalar:
# Assume all neutrinos have the same mass
if m_nu.value == 0:
self._nmasslessnu = self._nneutrinos
self._nmassivenu = 0
else:
self._massivenu = True
self._nmasslessnu = 0
self._nmassivenu = self._nneutrinos
self._massivenu_mass = (m_nu.value *
np.ones(self._nneutrinos))
else:
# Make sure we have the right number of masses
# -unless- they are massless, in which case we cheat a little
if m_nu.value.min() < 0:
raise ValueError("Invalid (negative) neutrino mass"
" encountered")
if m_nu.value.max() == 0:
self._nmasslessnu = self._nneutrinos
self._nmassivenu = 0
else:
self._massivenu = True
if len(m_nu) != self._nneutrinos:
raise ValueError("Unexpected number of neutrino masses")
# Segregate out the massless ones
try:
# Numpy < 1.6 doesn't have count_nonzero
self._nmasslessnu = np.count_nonzero(m_nu.value == 0)
except AttributeError:
self._nmasslessnu = len(np.nonzero(m_nu.value == 0)[0])
self._nmassivenu = self._nneutrinos - self._nmasslessnu
w = np.nonzero(m_nu.value > 0)[0]
self._massivenu_mass = m_nu[w]
# Compute photon density, Tcmb, neutrino parameters
# Tcmb0=0 removes both photons and neutrinos, is handled
# as a special case for efficiency
if self._Tcmb0.value > 0:
# Compute photon density from Tcmb
self._Ogamma0 = a_B_c2 * self._Tcmb0.value ** 4 /\
self._critical_density0.value
# Compute Neutrino temperature
# The constant in front is (4/11)^1/3 -- see any
# cosmology book for an explanation -- for example,
# Weinberg 'Cosmology' p 154 eq (3.1.21)
self._Tnu0 = 0.7137658555036082 * self._Tcmb0
# Compute Neutrino Omega and total relativistic component
# for massive neutrinos
if self._massivenu:
nu_y = self._massivenu_mass / (kB_evK * self._Tnu0)
self._nu_y = nu_y.value
self._Onu0 = self._Ogamma0 * self.nu_relative_density(0)
else:
# This case is particularly simple, so do it directly
# The 0.2271... is 7/8 (4/11)^(4/3) -- the temperature
# bit ^4 (blackbody energy density) times 7/8 for
# FD vs. BE statistics.
self._Onu0 = 0.22710731766 * self._Neff * self._Ogamma0
else:
self._Ogamma0 = 0.0
self._Tnu0 = u.Quantity(0.0, u.K)
self._Onu0 = 0.0
# Compute curvature density
self._Ok0 = 1.0 - self._Om0 - self._Ode0 - self._Ogamma0 - self._Onu0
def _namelead(self):
""" Helper function for constructing __repr__"""
if self.name is None:
return "{0:s}(".format(self.__class__.__name__)
else:
return "{0:s}(name=\"{1:s}\", ".format(self.__class__.__name__,
self.name)
def __repr__(self):
retstr = "{0:s}H0={1:.3g}, Om0={2:.3g}, Ode0={3:.3g}, "\
"Tcmb0={4:.4g}, Neff={5:.3g}, m_nu={6:s})"
return retstr.format(self._namelead(), self._H0, self._Om0, self._Ode0,
self._Tcmb0, self._Neff, self.m_nu)
# Set up a set of properties for H0, Om0, Ode0, Ok0, etc. for user access.
# Note that we don't let these be set (so, obj.Om0 = value fails)
@property
def H0(self):
""" Return the Hubble constant as an astropy.units.Quantity at z=0"""
return self._H0
@property
def Om0(self):
""" Omega matter; matter density/critical density at z=0"""
return self._Om0
@property
def Ode0(self):
""" Omega dark energy; dark energy density/critical density at z=0"""
return self._Ode0
@property
def Ok0(self):
""" Omega curvature; the effective curvature density/critical density
at z=0"""
return self._Ok0
@property
def Tcmb0(self):
""" Temperature of the CMB as astropy.units.Quantity at z=0"""
return self._Tcmb0
@property
def Tnu0(self):
""" Temperature of the neutrino background as astropy.units.Quantity at z=0"""
return self._Tnu0
@property
def Neff(self):
""" Number of effective neutrino species"""
return self._Neff
@property
def has_massive_nu(self):
""" Does this cosmology have at least one massive neutrino species?"""
if self._Tnu0.value == 0:
return False
return self._massivenu
@property
def m_nu(self):
""" Mass of neutrino species"""
if self._Tnu0.value == 0:
return None
if not self._massivenu:
# Only massless
return u.Quantity(np.zeros(self._nmasslessnu), u.eV)
if self._nmasslessnu == 0:
# Only massive
return u.Quantity(self._massivenu_mass, u.eV)
# A mix -- the most complicated case
return u.Quantity(np.append(np.zeros(self._nmasslessnu),
self._massivenu_mass.value), u.eV)
@property
def h(self):
""" Dimensionless Hubble constant: h = H_0 / 100 [km/sec/Mpc]"""
return self._h
@property
def hubble_time(self):
""" Hubble time as astropy.units.Quantity"""
return self._hubble_time
@property
def hubble_distance(self):
""" Hubble distance as astropy.units.Quantity"""
return self._hubble_distance
@property
def critical_density0(self):
""" Critical density as astropy.units.Quantity at z=0"""
return self._critical_density0
@property
def Ogamma0(self):
""" Omega gamma; the density/critical density of photons at z=0"""
return self._Ogamma0
@property
def Onu0(self):
""" Omega nu; the density/critical density of neutrinos at z=0"""
return self._Onu0
@abstractmethod
def w(self, z):
""" The dark energy equation of state.
Parameters
----------
z : array_like
Input redshifts.
Returns
-------
w : ndarray, or float if input scalar
The dark energy equation of state
Notes
------
The dark energy equation of state is defined as
:math:`w(z) = P(z)/\\rho(z)`, where :math:`P(z)` is the
pressure at redshift z and :math:`\\rho(z)` is the density
at redshift z, both in units where c=1.
This must be overridden by subclasses.
"""
raise NotImplementedError("w(z) is not implemented")
def Om(self, z):
""" Return the density parameter for non-relativistic matter
at redshift `z`.
Parameters
----------
z : array_like
Input redshifts.
Returns
-------
Om : ndarray, or float if input scalar
The density of non-relativistic matter relative to the critical
density at each redshift.
"""
if isiterable(z):
z = np.asarray(z)
return self._Om0 * (1. + z) ** 3 * self.inv_efunc(z) ** 2
def Ok(self, z):
""" Return the equivalent density parameter for curvature
at redshift `z`.
Parameters
----------
z : array_like
Input redshifts.
Returns
-------
Ok : ndarray, or float if input scalar
The equivalent density parameter for curvature at each redshift.
"""
if self._Ok0 == 0:
# Common enough case to be worth checking
return np.zeros_like(z)
if isiterable(z):
z = np.asarray(z)
return self._Ok0 * (1. + z) ** 2 * self.inv_efunc(z) ** 2
def Ode(self, z):
""" Return the density parameter for dark energy at redshift `z`.
Parameters
----------
z : array_like
Input redshifts.
Returns
-------
Ode : ndarray, or float if input scalar
The density of non-relativistic matter relative to the critical
density at each redshift.
"""
if self._Ode0 == 0:
return np.zeros_like(z)
return self._Ode0 * self.de_density_scale(z) * self.inv_efunc(z) ** 2
def Ogamma(self, z):
""" Return the density parameter for photons at redshift `z`.
Parameters
----------
z : array_like
Input redshifts.
Returns
-------
Ogamma : ndarray, or float if input scalar
The energy density of photons relative to the critical
density at each redshift.
"""
if self._Ogamma0 == 0:
# Common enough case to be worth checking (although it clearly
# doesn't represent any real universe)
return np.zeros_like(z)
if isiterable(z):
z = np.asarray(z)
return self._Ogamma0 * (1. + z) ** 4 * self.inv_efunc(z) ** 2
def Onu(self, z):
""" Return the density parameter for massless neutrinos at redshift `z`.
Parameters
----------
z : array_like
Input redshifts.
Returns
-------
Onu : ndarray, or float if input scalar
The energy density of photons relative to the critical
density at each redshift. Note that this includes their
kinetic energy (if they have mass), so it is not equal to
the commonly used :math:`\\sum \\frac{m_{\\nu}}{94 eV}`,
which does not include kinetic energy.
"""
if self._Onu0 == 0:
# Common enough case to be worth checking (although it clearly
# doesn't represent any real universe)
return np.zeros_like(z)
if isiterable(z):
z = np.asarray(z)
return self.Ogamma(z) * self.nu_relative_density(z)
def Tcmb(self, z):
""" Return the CMB temperature at redshift `z`.
Parameters
----------
z : array_like
Input redshifts.
Returns
-------
Tcmb : astropy.units.Quantity
The temperature of the CMB in K.
"""
if isiterable(z):
z = np.asarray(z)
return self._Tcmb0 * (1.0 + z)
def Tnu(self, z):
""" Return the neutrino temperature at redshift `z`.
Parameters
----------
z : array_like
Input redshifts.
Returns
-------
Tnu : astropy.units.Quantity
The temperature of the cosmic neutrino background in K.
"""
if isiterable(z):
z = np.asarray(z)
return self._Tnu0 * (1.0 + z)
def nu_relative_density(self, z):
""" Neutrino density function relative to the energy density in
photons.
Parameters
----------
z : array like
Redshift
Returns
-------
f : ndarray, or float if z is scalar
The neutrino density scaling factor relative to the density
in photons at each redshift
Notes
-----
The density in neutrinos is given by
.. math::
\\rho_{\\nu} \\left(a\\right) = 0.2271 \\, N_{eff} \\,
f\\left(m_{\\nu} a / T_{\\nu 0} \\right) \\,
\\rho_{\\gamma} \\left( a \\right)
where
.. math::
f \\left(y\\right) = \\frac{120}{7 \\pi^4}
\\int_0^{\\infty} \\, dx \\frac{x^2 \\sqrt{x^2 + y^2}}
{e^x + 1}
assuming that all neutrino species have the same mass.
If they have different masses, a similar term is calculated
for each one. Note that f has the asymptotic behavior :math:`f(0) = 1`.
This method returns :math:`0.2271 f` using an
analytical fitting formula given in Komatsu et al. 2011, ApJS 192, 18.
"""
# See Komatsu et al. 2011, eq 26 and the surrounding discussion
# However, this is modified to handle multiple neutrino masses
# by computing the above for each mass, then summing
prefac = 0.22710731766 # 7/8 (4/11)^4/3 -- see any cosmo book
# The massive and massless contribution must be handled seperately
# But check for common cases first
if not self._massivenu:
return prefac * self._Neff * np.ones_like(z)
p = 1.83
invp = 1.0 / 1.83
if np.isscalar(z):
curr_nu_y = self._nu_y / (1.0 + z) # only includes massive ones
rel_mass_per = (1.0 + (0.3173 * curr_nu_y) ** p) ** invp
rel_mass = rel_mass_per.sum() + self._nmasslessnu
return prefac * self._neff_per_nu * rel_mass
else:
z = np.asarray(z)
retarr = np.empty_like(z)
for i, redshift in enumerate(z):
curr_nu_y = self._nu_y / (1.0 + redshift)
rel_mass_per = (1.0 + (0.3173 * curr_nu_y) ** p) ** invp
rel_mass = rel_mass_per.sum() + self._nmasslessnu
retarr[i] = prefac * self._neff_per_nu * rel_mass
return retarr
def _w_integrand(self, ln1pz):
""" Internal convenience function for w(z) integral."""
# See Linder 2003, PRL 90, 91301 eq (5)
# Assumes scalar input, since this should only be called
# inside an integral
z = exp(ln1pz) - 1.0
return 1.0 + self.w(z)
def de_density_scale(self, z):
""" Evaluates the redshift dependence of the dark energy density.
Parameters
----------
z : array_like
Input redshifts.
Returns
-------
I : ndarray, or float if input scalar
The scaling of the energy density of dark energy with redshift.
Notes
-----
The scaling factor, I, is defined by :math:`\\rho(z) = \\rho_0 I`,
and is given by
.. math::
I = \\exp \\left( 3 \int_{a}^1 \\frac{ da^{\\prime} }{ a^{\\prime} }
\\left[ 1 + w\\left( a^{\\prime} \\right) \\right] \\right)
It will generally helpful for subclasses to overload this method if
the integral can be done analytically for the particular dark
energy equation of state that they implement.
"""
# This allows for an arbitrary w(z) following eq (5) of
# Linder 2003, PRL 90, 91301. The code here evaluates
# the integral numerically. However, most popular
# forms of w(z) are designed to make this integral analytic,
# so it is probably a good idea for subclasses to overload this
# method if an analytic form is available.
#
# The integral we actually use (the one given in Linder)
# is rewritten in terms of z, so looks slightly different than the
# one in the documentation string, but it's the same thing.
from scipy.integrate import quad
if isiterable(z):
z = np.asarray(z)
ival = np.array([quad(self._w_integrand, 0, log(1 + redshift))[0]
for redshift in z])
return np.exp(3 * ival)
else:
ival = quad(self._w_integrand, 0, log(1 + z))[0]
return exp(3 * ival)
def efunc(self, z):
""" Function used to calculate H(z), the Hubble parameter.
Parameters
----------
z : array_like
Input redshifts.
Returns
-------
E : ndarray, or float if input scalar
The redshift scaling of the Hubble constant.
Notes
-----
The return value, E, is defined such that :math:`H(z) = H_0 E`.
It is not necessary to override this method, but if de_density_scale
takes a particularly simple form, it may be advantageous to.
"""
if isiterable(z):
z = np.asarray(z)
Om0, Ode0, Ok0 = self._Om0, self._Ode0, self._Ok0
if self._massivenu:
Or = self._Ogamma0 * (1 + self.nu_relative_density(z))
else:
Or = self._Ogamma0 + self._Onu0
zp1 = 1.0 + z
return np.sqrt(zp1 ** 2 * ((Or * zp1 + Om0) * zp1 + Ok0) +
Ode0 * self.de_density_scale(z))
def inv_efunc(self, z):
"""Inverse of efunc.
Parameters
----------
z : array_like
Input redshifts.
Returns
-------
E : ndarray, or float if input scalar
The redshift scaling of the inverse Hubble constant.
"""
# Avoid the function overhead by repeating code
if isiterable(z):
z = np.asarray(z)
Om0, Ode0, Ok0 = self._Om0, self._Ode0, self._Ok0
if self._massivenu:
Or = self._Ogamma0 * (1 + self.nu_relative_density(z))
else:
Or = self._Ogamma0 + self._Onu0
zp1 = 1.0 + z
return 1.0 / np.sqrt(zp1 ** 2 * ((Or * zp1 + Om0) * zp1 + Ok0) +
Ode0 * self.de_density_scale(z))
def _tfunc(self, z):
""" Integrand of the lookback time.
Parameters
----------
z : array_like
Input redshifts.
Returns
-------
I : ndarray, or float if input scalar
The integrand for the lookback time
References
----------
Eqn 30 from Hogg 1999.
"""
if isiterable(z):
zp1 = 1.0 + np.asarray(z)
else:
zp1 = 1. + z
return 1.0 / (zp1 * self.efunc(z))
def _xfunc(self, z):
""" Integrand of the absorption distance.
Parameters
----------
z : array_like
Input redshifts.
Returns
-------
X : ndarray, or float if input scalar
The integrand for the absorption distance
References
----------
See Hogg 1999 section 11.
"""
if isiterable(z):
zp1 = 1.0 + np.asarray(z)
else:
zp1 = 1. + z
return zp1 ** 2 / self.efunc(z)
def H(self, z):
""" Hubble parameter (km/s/Mpc) at redshift `z`.
Parameters
----------
z : array_like
Input redshifts.
Returns
-------
H : astropy.units.Quantity
Hubble parameter at each input redshift.
"""
return self._H0 * self.efunc(z)
def scale_factor(self, z):
""" Scale factor at redshift `z`.
The scale factor is defined as :math:`a = 1 / (1 + z)`.
Parameters
----------
z : array_like
Input redshifts.
Returns
-------
a : ndarray, or float if input scalar
Scale factor at each input redshift.
"""
if isiterable(z):
z = np.asarray(z)
return 1. / (1. + z)
def lookback_time(self, z):
""" Lookback time in Gyr to redshift `z`.
The lookback time is the difference between the age of the
Universe now and the age at redshift `z`.
Parameters
----------
z : array_like
Input redshifts.
Returns
-------
t : astropy.units.Quantity
Lookback time in Gyr to each input redshift.
"""
from scipy.integrate import quad
if not isiterable(z):
return self._hubble_time * quad(self._tfunc, 0, z)[0]
out = np.array([quad(self._tfunc, 0, redshift)[0] for redshift in z])
return self._hubble_time * np.array(out)
def age(self, z):
""" Age of the universe in Gyr at redshift `z`.
Parameters
----------
z : array_like
Input redshifts.
Returns
-------
t : astropy.units.Quantity
The age of the universe in Gyr at each input redshift.
"""
from scipy.integrate import quad
if not isiterable(z):
return self._hubble_time * quad(self._tfunc, z, np.inf)[0]
out = [quad(self._tfunc, redshift, np.inf)[0] for redshift in z]
return self._hubble_time * np.array(out)
def critical_density(self, z):
""" Critical density in grams per cubic cm at redshift `z`.
Parameters
----------
z : array_like
Input redshifts.
Returns
-------
rho : astropy.units.Quantity
Critical density in g/cm^3 at each input redshift.
"""
return self._critical_density0 * (self.efunc(z)) ** 2
def comoving_distance(self, z):
""" Comoving line-of-sight distance in Mpc at a given
redshift.
The comoving distance along the line-of-sight between two
objects remains constant with time for objects in the Hubble
flow.
Parameters
----------
z : array_like
Input redshifts.
Returns
-------
d : ndarray, or float if input scalar
Comoving distance in Mpc to each input redshift.
"""
from scipy.integrate import quad
if not isiterable(z):
return self._hubble_distance * quad(self.inv_efunc, 0, z)[0]
out = [quad(self.inv_efunc, 0, redshift)[0] for redshift in z]
return self._hubble_distance * np.array(out)
def comoving_transverse_distance(self, z):
""" Comoving transverse distance in Mpc at a given redshift.
This value is the transverse comoving distance at redshift `z`
corresponding to an angular separation of 1 radian. This is
the same as the comoving distance if omega_k is zero (as in
the current concordance lambda CDM model).
Parameters
----------
z : array_like
Input redshifts.
Returns
-------
d : astropy.units.Quantity
Comoving transverse distance in Mpc at each input redshift.
Notes
-----
This quantity also called the 'proper motion distance' in some
texts.
"""
Ok0 = self._Ok0
dc = self.comoving_distance(z)
if Ok0 == 0:
return dc
sqrtOk0 = sqrt(abs(Ok0))
dh = self._hubble_distance
if Ok0 > 0:
return dh / sqrtOk0 * np.sinh(sqrtOk0 * dc.value / dh.value)
else:
return dh / sqrtOk0 * np.sin(sqrtOk0 * dc.value / dh.value)
def angular_diameter_distance(self, z):
""" Angular diameter distance in Mpc at a given redshift.
This gives the proper (sometimes called 'physical') transverse
distance corresponding to an angle of 1 radian for an object
at redshift `z`.
Weinberg, 1972, pp 421-424; Weedman, 1986, pp 65-67; Peebles,
1993, pp 325-327.
Parameters
----------
z : array_like
Input redshifts.
Returns
-------
d : astropy.units.Quantity
Angular diameter distance in Mpc at each input redshift.
"""
if isiterable(z):
z = np.asarray(z)
return self.comoving_transverse_distance(z) / (1. + z)
def luminosity_distance(self, z):
""" Luminosity distance in Mpc at redshift `z`.
This is the distance to use when converting between the
bolometric flux from an object at redshift `z` and its
bolometric luminosity.
Parameters
----------
z : array_like
Input redshifts.
Returns
-------
d : astropy.units.Quantity
Luminosity distance in Mpc at each input redshift.
References
----------
Weinberg, 1972, pp 420-424; Weedman, 1986, pp 60-62.
"""
if isiterable(z):
z = np.asarray(z)
return (1. + z) * self.comoving_transverse_distance(z)
def angular_diameter_distance_z1z2(self, z1, z2):
""" Angular diameter distance between objects at 2 redshifts.
Useful for gravitational lensing.
Parameters
----------
z1, z2 : array_like, shape (N,)
Input redshifts. z2 must be large than z1.
Returns
-------
d : astropy.units.Quantity, shape (N,) or single if input scalar
The angular diameter distance between each input redshift
pair.
Raises
------
CosmologyError
If omega_k is < 0.
Notes
-----
This method only works for flat or open curvature
(omega_k >= 0).
"""
# does not work for negative curvature
Ok0 = self._Ok0
if Ok0 < 0:
raise CosmologyError('Ok0 must be >= 0 to use this method.')
outscalar = False
if not isiterable(z1) and not isiterable(z2):
outscalar = True
z1 = np.atleast_1d(z1)
z2 = np.atleast_1d(z2)
if z1.size != z2.size:
raise ValueError('z1 and z2 must be the same size.')
if (z1 > z2).any():
raise ValueError('z2 must greater than z1')
# z1 < z2
if (z2 < z1).any():
z1, z2 = z2, z1
dm1 = self.comoving_transverse_distance(z1).value
dm2 = self.comoving_transverse_distance(z2).value
dh_2 = self._hubble_distance.value ** 2
if Ok0 == 0:
# Common case worth checking
out = (dm2 - dm1) / (1. + z2)
else:
out = ((dm2 * np.sqrt(1. + Ok0 * dm1 ** 2 / dh_2) -
dm1 * np.sqrt(1. + Ok0 * dm2 ** 2 / dh_2)) /
(1. + z2))
if outscalar:
return u.Quantity(out[0], u.Mpc)
return u.Quantity(out, u.Mpc)
def absorption_distance(self, z):
""" Absorption distance at redshift `z`.
This is used to calculate the number of objects with some
cross section of absorption and number density intersecting a
sightline per unit redshift path.
Parameters
----------
z : array_like
Input redshifts.
Returns
-------
d : float or ndarray
Absorption distance (dimensionless) at each input redshift.
References
----------
Hogg 1999 Section 11. (astro-ph/9905116)
Bahcall, <NAME>. and <NAME>. 1969, ApJ, 156L, 7B
"""
from scipy.integrate import quad
if not isiterable(z):
return quad(self._xfunc, 0, z)[0]
out = np.array([quad(self._xfunc, 0, redshift)[0] for redshift in z])
return out
def distmod(self, z):
""" Distance modulus at redshift `z`.
The distance modulus is defined as the (apparent magnitude -
absolute magnitude) for an object at redshift `z`.
Parameters
----------
z : array_like
Input redshifts.
Returns
-------
distmod : astropy.units.Quantity
Distance modulus at each input redshift, in magnitudes
"""
# Remember that the luminosity distance is in Mpc
val = 5. * np.log10(self.luminosity_distance(z).value * 1.e5)
return u.Quantity(val, u.mag)
def comoving_volume(self, z):
""" Comoving volume in cubic Mpc at redshift `z`.
This is the volume of the universe encompassed by redshifts
less than `z`. For the case of omega_k = 0 it is a sphere of
radius `comoving_distance(z)` but it is less intuitive if
omega_k is not 0.
Parameters
----------
z : array_like
Input redshifts.
Returns
-------
V : astropy.units.Quantity
Comoving volume in :math:`Mpc^3` at each input redshift.
"""
Ok0 = self._Ok0
if Ok0 == 0:
return 4. / 3. * pi * self.comoving_distance(z) ** 3
dh = self._hubble_distance.value # .value for speed
dm = self.comoving_transverse_distance(z).value
term1 = 4. * pi * dh ** 3 / (2. * Ok0) * u.Mpc ** 3
term2 = dm / dh * np.sqrt(1 + Ok0 * (dm / dh) ** 2)
term3 = sqrt(abs(Ok0)) * dm / dh
if Ok0 > 0:
return term1 * (term2 - 1. / sqrt(abs(Ok0)) * np.arcsinh(term3))
else:
return term1 * (term2 - 1. / sqrt(abs(Ok0)) * np.arcsin(term3))
def kpc_comoving_per_arcmin(self, z):
""" Separation in transverse comoving kpc corresponding to an
arcminute at redshift `z`.
Parameters
----------
z : array_like
Input redshifts.
Returns
-------
d : astropy.units.Quantity
The distance in comoving kpc corresponding to an arcmin at each
input redshift.
"""
return (self.comoving_transverse_distance(z).to(u.kpc) *
arcmin_in_radians / u.arcmin)
def kpc_proper_per_arcmin(self, z):
""" Separation in transverse proper kpc corresponding to an
arcminute at redshift `z`.
Parameters
----------
z : array_like
Input redshifts.
Returns
-------
d : astropy.units.Quantity
The distance in proper kpc corresponding to an arcmin at each
input redshift.
"""
return (self.angular_diameter_distance(z).to(u.kpc) *
arcmin_in_radians / u.arcmin)
def arcsec_per_kpc_comoving(self, z):
""" Angular separation in arcsec corresponding to a comoving kpc
at redshift `z`.
Parameters
----------
z : array_like
Input redshifts.
Returns
-------
theta : astropy.units.Quantity
The angular separation in arcsec corresponding to a comoving kpc
at each input redshift.
"""
return u.arcsec / (self.comoving_transverse_distance(z).to(u.kpc) *
arcsec_in_radians)
def arcsec_per_kpc_proper(self, z):
""" Angular separation in arcsec corresponding to a proper kpc at
redshift `z`.
Parameters
----------
z : array_like
Input redshifts.
Returns
-------
theta : astropy.units.Quantity
The angular separation in arcsec corresponding to a proper kpc
at each input redshift.
"""
return u.arcsec / (self.angular_diameter_distance(z).to(u.kpc) *
arcsec_in_radians)
class LambdaCDM(FLRW):
"""FLRW cosmology with a cosmological constant and curvature.
This has no additional attributes beyond those of FLRW.
Parameters
----------
H0 : float or astropy.units.Quantity
Hubble constant at z = 0. If a float, must be in [km/sec/Mpc]
Om0 : float
Omega matter: density of non-relativistic matter in units of the
critical density at z=0.
Ode0 : float
Omega dark energy: density of the cosmological constant in units of the
critical density at z=0.
Tcmb0 : float or astropy.units.Quantity
Temperature of the CMB z=0. If a float, must be in [K]. Default: 2.725.
Neff : float
Effective number of Neutrino species. Default 3.04.
m_nu : astropy.units.Quantity
Mass of each neutrino species. If this is a scalar Quantity, then all
neutrino species are assumed to have that mass. Otherwise, the mass of
each species. The actual number of neutrino species (and hence the
number of elements of m_nu if it is not scalar) must be the floor of
Neff. Usually this means you must provide three neutrino masses unless
you are considering something like a sterile neutrino.
name : str
Optional name for this cosmological object.
Examples
--------
>>> from astropy.cosmology import LambdaCDM
>>> cosmo = LambdaCDM(H0=70, Om0=0.3, Ode0=0.7)
The comoving distance in Mpc at redshift z:
>>> z = 0.5
>>> dc = cosmo.comoving_distance(z)
"""
def __init__(self, H0, Om0, Ode0, Tcmb0=2.725, Neff=3.04,
m_nu=u.Quantity(0.0, u.eV), name=None):
FLRW.__init__(self, H0, Om0, Ode0, Tcmb0, Neff, m_nu, name=name)
def w(self, z):
"""Returns dark energy equation of state at redshift `z`.
Parameters
----------
z : array_like
Input redshifts.
Returns
-------
w : ndarray, or float if input scalar
The dark energy equation of state
Notes
------
The dark energy equation of state is defined as
:math:`w(z) = P(z)/\\rho(z)`, where :math:`P(z)` is the
pressure at redshift z and :math:`\\rho(z)` is the density
at redshift z, both in units where c=1. Here this is
:math:`w(z) = -1`.
"""
return -1.0 * np.ones_like(z)
def de_density_scale(self, z):
""" Evaluates the redshift dependence of the dark energy density.
Parameters
----------
z : array_like
Input redshifts.
Returns
-------
I : ndarray, or float if input scalar
The scaling of the energy density of dark energy with redshift.
Notes
-----
The scaling factor, I, is defined by :math:`\\rho(z) = \\rho_0 I`,
and in this case is given by :math:`I = 1`.
"""
return np.ones_like(z)
def efunc(self, z):
""" Function used to calculate H(z), the Hubble parameter.
Parameters
----------
z : array_like
Input redshifts.
Returns
-------
E : ndarray, or float if input scalar
The redshift scaling of the Hubble consant.
Notes
-----
The return value, E, is defined such that :math:`H(z) = H_0 E`.
"""
if isiterable(z):
z = np.asarray(z)
# We override this because it takes a particularly simple
# form for a cosmological constant
Om0, Ode0, Ok0 = self._Om0, self._Ode0, self._Ok0
if self._massivenu:
Or = self._Ogamma0 * (1 + self.nu_relative_density(z))
else:
Or = self._Ogamma0 + self._Onu0
zp1 = 1.0 + z
return np.sqrt(zp1 ** 2 * ((Or * zp1 + Om0) * zp1 + Ok0) + Ode0)
def inv_efunc(self, z):
r""" Function used to calculate :math:`\frac{1}{H_z}`.
Parameters
----------
z : array_like
Input redshifts.
Returns
-------
E : ndarray, or float if input scalar
The inverse redshift scaling of the Hubble constant.
Notes
-----
The return value, E, is defined such that :math:`H_z = H_0 /
E`.
"""
if isiterable(z):
z = np.asarray(z)
Om0, Ode0, Ok0 = self._Om0, self._Ode0, self._Ok0
if self._massivenu:
Or = self._Ogamma0 * (1 + self.nu_relative_density(z))
else:
Or = self._Ogamma0 + self._Onu0
zp1 = 1.0 + z
return 1.0 / np.sqrt(zp1 ** 2 * ((Or * zp1 + Om0) * zp1 + Ok0) + Ode0)
class FlatLambdaCDM(LambdaCDM):
"""FLRW cosmology with a cosmological constant and no curvature.
This has no additional attributes beyond those of FLRW.
Parameters
----------
H0 : float or astropy.units.Quantity
Hubble constant at z = 0. If a float, must be in [km/sec/Mpc]
Om0 : float
Omega matter: density of non-relativistic matter in units of the
critical density at z=0.
Tcmb0 : float or astropy.units.Quantity
Temperature of the CMB z=0. If a float, must be in [K]. Default: 2.725.
Neff : float
Effective number of Neutrino species. Default 3.04.
m_nu : astropy.units.Quantity
Mass of each neutrino species. If this is a scalar Quantity, then all
neutrino species are assumed to have that mass. Otherwise, the mass of
each species. The actual number of neutrino species (and hence the
number of elements of m_nu if it is not scalar) must be the floor of
Neff. Usually this means you must provide three neutrino masses unless
you are considering something like a sterile neutrino.
name : str
Optional name for this cosmological object.
Examples
--------
>>> from astropy.cosmology import FlatLambdaCDM
>>> cosmo = FlatLambdaCDM(H0=70, Om0=0.3)
The comoving distance in Mpc at redshift z:
>>> z = 0.5
>>> dc = cosmo.comoving_distance(z)
"""
def __init__(self, H0, Om0, Tcmb0=2.725, Neff=3.04,
m_nu=u.Quantity(0.0, u.eV), name=None):
FLRW.__init__(self, H0, Om0, 0.0, Tcmb0, Neff, m_nu, name=name)
# Do some twiddling after the fact to get flatness
self._Ode0 = 1.0 - self._Om0 - self._Ogamma0 - self._Onu0
self._Ok0 = 0.0
def efunc(self, z):
""" Function used to calculate H(z), the Hubble parameter.
Parameters
----------
z : array_like
Input redshifts.
Returns
-------
E : ndarray, or float if input scalar
The redshift scaling of the Hubble consant.
Notes
-----
The return value, E, is defined such that :math:`H(z) = H_0 E`.
"""
if isiterable(z):
z = np.asarray(z)
# We override this because it takes a particularly simple
# form for a cosmological constant
Om0, Ode0 = self._Om0, self._Ode0
if self._massivenu:
Or = self._Ogamma0 * (1 + self.nu_relative_density(z))
else:
Or = self._Ogamma0 + self._Onu0
zp1 = 1.0 + z
return np.sqrt(zp1 ** 3 * (Or * zp1 + Om0) + Ode0)
def inv_efunc(self, z):
r"""Function used to calculate :math:`\frac{1}{H_z}`.
Parameters
----------
z : array_like
Input redshifts.
Returns
-------
E : ndarray, or float if input scalar
The inverse redshift scaling of the Hubble constant.
Notes
-----
The return value, E, is defined such that :math:`H_z = H_0 / E`.
"""
if isiterable(z):
z = np.asarray(z)
Om0, Ode0 = self._Om0, self._Ode0
if self._massivenu:
Or = self._Ogamma0 * (1 + self.nu_relative_density(z))
else:
Or = self._Ogamma0 + self._Onu0
zp1 = 1.0 + z
return 1.0 / np.sqrt(zp1 ** 3 * (Or * zp1 + Om0) + Ode0)
def __repr__(self):
retstr = "{0:s}H0={1:.3g}, Om0={2:.3g}, Tcmb0={3:.4g}, "\
"Neff={4:.3g}, m_nu={5:s})"
return retstr.format(self._namelead(), self._H0, self._Om0,
self._Tcmb0, self._Neff, self.m_nu)
class wCDM(FLRW):
"""FLRW cosmology with a constant dark energy equation of state
and curvature.
This has one additional attribute beyond those of FLRW.
Parameters
----------
H0 : float or astropy.units.Quantity
Hubble constant at z = 0. If a float, must be in [km/sec/Mpc]
Om0 : float
Omega matter: density of non-relativistic matter in units of the
critical density at z=0.
Ode0 : float
Omega dark energy: density of dark energy in units of the critical
density at z=0.
w0 : float
Dark energy equation of state at all redshifts. This is
pressure/density for dark energy in units where c=1. A cosmological
constant has w0=-1.0.
Tcmb0 : float or astropy.units.Quantity
Temperature of the CMB z=0. If a float, must be in [K]. Default: 2.725.
Neff : float
Effective number of Neutrino species. Default 3.04.
m_nu : astropy.units.Quantity
Mass of each neutrino species. If this is a scalar Quantity, then all
neutrino species are assumed to have that mass. Otherwise, the mass of
each species. The actual number of neutrino species (and hence the
number of elements of m_nu if it is not scalar) must be the floor of
Neff. Usually this means you must provide three neutrino masses unless
you are considering something like a sterile neutrino.
name : str
Optional name for this cosmological object.
Examples
--------
>>> from astropy.cosmology import wCDM
>>> cosmo = wCDM(H0=70, Om0=0.3, Ode0=0.7, w0=-0.9)
The comoving distance in Mpc at redshift z:
>>> z = 0.5
>>> dc = cosmo.comoving_distance(z)
"""
def __init__(self, H0, Om0, Ode0, w0=-1., Tcmb0=2.725,
Neff=3.04, m_nu=u.Quantity(0.0, u.eV), name=None):
FLRW.__init__(self, H0, Om0, Ode0, Tcmb0, Neff, m_nu, name=name)
self._w0 = float(w0)
@property
def w0(self):
""" Dark energy equation of state"""
return self._w0
def w(self, z):
"""Returns dark energy equation of state at redshift `z`.
Parameters
----------
z : array_like
Input redshifts.
Returns
-------
w : ndarray, or float if input scalar
The dark energy equation of state
Notes
------
The dark energy equation of state is defined as
:math:`w(z) = P(z)/\\rho(z)`, where :math:`P(z)` is the
pressure at redshift z and :math:`\\rho(z)` is the density
at redshift z, both in units where c=1. Here this is
:math:`w(z) = w_0`.
"""
return self._w0 * np.ones_like(z)
def de_density_scale(self, z):
""" Evaluates the redshift dependence of the dark energy density.
Parameters
----------
z : array_like
Input redshifts.
Returns
-------
I : ndarray, or float if input scalar
The scaling of the energy density of dark energy with redshift.
Notes
-----
The scaling factor, I, is defined by :math:`\\rho(z) = \\rho_0 I`,
and in this case is given by
:math:`I = \\left(1 + z\\right)^{3\\left(1 + w_0\\right)}`
"""
if isiterable(z):
z = np.asarray(z)
return (1.0 + z) ** (3 * (1 + self._w0))
def efunc(self, z):
""" Function used to calculate H(z), the Hubble parameter.
Parameters
----------
z : array_like
Input redshifts.
Returns
-------
E : ndarray, or float if input scalar
The redshift scaling of the Hubble consant.
Notes
-----
The return value, E, is defined such that :math:`H(z) = H_0 E`.
"""
if isiterable(z):
z = np.asarray(z)
Om0, Ode0, Ok0, w0 = self._Om0, self._Ode0, self._Ok0, self._w0
if self._massivenu:
Or = self._Ogamma0 * (1 + self.nu_relative_density(z))
else:
Or = self._Ogamma0 + self._Onu0
zp1 = 1.0 + z
return np.sqrt(zp1 ** 2 * ((Or * zp1 + Om0) * zp1 + Ok0) +
Ode0 * zp1 ** (3.0 * (1 + w0)))
def inv_efunc(self, z):
r""" Function used to calculate :math:`\frac{1}{H_z}`.
Parameters
----------
z : array_like
Input redshifts.
Returns
-------
E : ndarray, or float if input scalar
The inverse redshift scaling of the Hubble constant.
Notes
-----
The return value, E, is defined such that :math:`H_z = H_0 / E`.
"""
if isiterable(z):
z = np.asarray(z)
Om0, Ode0, Ok0, w0 = self._Om0, self._Ode0, self._Ok0, self._w0
if self._massivenu:
Or = self._Ogamma0 * (1 + self.nu_relative_density(z))
else:
Or = self._Ogamma0 + self._Onu0
zp1 = 1.0 + z
return 1.0 / np.sqrt(zp1 ** 2 * ((Or * zp1 + Om0) * zp1 + Ok0) +
Ode0 * zp1 ** (3 * (1 + w0)))
def __repr__(self):
retstr = "{0:s}H0={1:.3g}, Om0={2:.3g}, Ode0={3:.3g}, w0={4:.3g}, "\
"Tcmb0={5:.4g}, Neff={6:.3g}, m_nu={7:s})"
return retstr.format(self._namelead(), self._H0, self._Om0,
self._Ode0, self._w0, self._Tcmb0, self._Neff,
self.m_nu)
class FlatwCDM(wCDM):
"""FLRW cosmology with a constant dark energy equation of state
and no spatial curvature.
This has one additional attribute beyond those of FLRW.
Parameters
----------
H0 : float or astropy.units.Quantity
Hubble constant at z = 0. If a float, must be in [km/sec/Mpc]
Om0 : float
Omega matter: density of non-relativistic matter in units of the
critical density at z=0.
w0 : float
Dark energy equation of state at all redshifts. This is
pressure/density for dark energy in units where c=1. A cosmological
constant has w0=-1.0.
Tcmb0 : float or astropy.units.Quantity
Temperature of the CMB z=0. If a float, must be in [K]. Default: 2.725.
Neff : float
Effective number of Neutrino species. Default 3.04.
m_nu : astropy.units.Quantity
Mass of each neutrino species. If this is a scalar Quantity, then all
neutrino species are assumed to have that mass. Otherwise, the mass of
each species. The actual number of neutrino species (and hence the
number of elements of m_nu if it is not scalar) must be the floor of
Neff. Usually this means you must provide three neutrino masses unless
you are considering something like a sterile neutrino.
name : str
Optional name for this cosmological object.
Examples
--------
>>> from astropy.cosmology import FlatwCDM
>>> cosmo = FlatwCDM(H0=70, Om0=0.3, w0=-0.9)
The comoving distance in Mpc at redshift z:
>>> z = 0.5
>>> dc = cosmo.comoving_distance(z)
"""
def __init__(self, H0, Om0, w0=-1., Tcmb0=2.725,
Neff=3.04, m_nu=u.Quantity(0.0, u.eV), name=None):
FLRW.__init__(self, H0, Om0, 0.0, Tcmb0, Neff, m_nu, name=name)
self._w0 = float(w0)
# Do some twiddling after the fact to get flatness
self._Ode0 = 1.0 - self._Om0 - self._Ogamma0 - self._Onu0
self._Ok0 = 0.0
def efunc(self, z):
""" Function used to calculate H(z), the Hubble parameter.
Parameters
----------
z : array_like
Input redshifts.
Returns
-------
E : ndarray, or float if input scalar
The redshift scaling of the Hubble consant.
Notes
-----
The return value, E, is defined such that :math:`H(z) = H_0 E`.
"""
if isiterable(z):
z = np.asarray(z)
Om0, Ode0, w0 = self._Om0, self._Ode0, self._w0
if self._massivenu:
Or = self._Ogamma0 * (1 + self.nu_relative_density(z))
else:
Or = self._Ogamma0 + self._Onu0
zp1 = 1.0 + z
return np.sqrt(zp1 ** 3 * (Or * zp1 + Om0) +
Ode0 * zp1 ** (3.0 * (1 + w0)))
def inv_efunc(self, z):
r""" Function used to calculate :math:`\frac{1}{H_z}`.
Parameters
----------
z : array_like
Input redshifts.
Returns
-------
E : ndarray, or float if input scalar
The inverse redshift scaling of the Hubble constant.
Notes
-----
The return value, E, is defined such that :math:`H_z = H_0 / E`.
"""
if isiterable(z):
z = np.asarray(z)
Om0, Ode0, Ok0, w0 = self._Om0, self._Ode0, self._Ok0, self._w0
if self._massivenu:
Or = self._Ogamma0 * (1 + self.nu_relative_density(z))
else:
Or = self._Ogamma0 + self._Onu0
zp1 = 1.0 + z
return 1.0 / np.sqrt(zp1 ** 3 * (Or * zp1 + Om0) +
Ode0 * zp1 ** (3 * (1 + w0)))
def __repr__(self):
retstr = "{0:s}H0={1:.3g}, Om0={2:.3g}, w0={3:.3g}, Tcmb0={4:.4g}, "\
"Neff={5:.3g}, m_nu={6:s})"
return retstr.format(self._namelead(), self._H0, self._Om0, self._w0,
self._Tcmb0, self._Neff, self.m_nu)
class w0waCDM(FLRW):
"""FLRW cosmology with a CPL dark energy equation of state and curvature.
The equation for the dark energy equation of state uses the
CPL form as described in Chevallier & Polarski Int. J. Mod. Phys.
D10, 213 (2001) and Linder PRL 90, 91301 (2003):
:math:`w(z) = w_0 + w_a (1-a) = w_0 + w_a z / (1+z)`.
Parameters
----------
H0 : float or astropy.units.Quantity
Hubble constant at z = 0. If a float, must be in [km/sec/Mpc]
Om0 : float
Omega matter: density of non-relativistic matter in units of the
critical density at z=0.
Ode0 : float
Omega dark energy: density of dark energy in units of the critical
density at z=0.
w0 : float
Dark energy equation of state at z=0 (a=1). This is pressure/density
for dark energy in units where c=1.
wa : float
Negative derivative of the dark energy equation of state with respect
to the scale factor. A cosmological constant has w0=-1.0 and wa=0.0.
Tcmb0 : float or astropy.units.Quantity
Temperature of the CMB z=0. If a float, must be in [K]. Default: 2.725.
Neff : float
Effective number of Neutrino species. Default 3.04.
m_nu : astropy.units.Quantity
Mass of each neutrino species. If this is a scalar Quantity, then all
neutrino species are assumed to have that mass. Otherwise, the mass of
each species. The actual number of neutrino species (and hence the
number of elements of m_nu if it is not scalar) must be the floor of
Neff. Usually this means you must provide three neutrino masses unless
you are considering something like a sterile neutrino.
name : str
Optional name for this cosmological object.
Examples
--------
>>> from astropy.cosmology import w0waCDM
>>> cosmo = w0waCDM(H0=70, Om0=0.3, Ode0=0.7, w0=-0.9, wa=0.2)
The comoving distance in Mpc at redshift z:
>>> z = 0.5
>>> dc = cosmo.comoving_distance(z)
"""
def __init__(self, H0, Om0, Ode0, w0=-1., wa=0., Tcmb0=2.725,
Neff=3.04, m_nu=u.Quantity(0.0, u.eV), name=None):
FLRW.__init__(self, H0, Om0, Ode0, Tcmb0, Neff, m_nu, name=name)
self._w0 = float(w0)
self._wa = float(wa)
@property
def w0(self):
""" Dark energy equation of state at z=0"""
return self._w0
@property
def wa(self):
""" Negative derivative of dark energy equation of state w.r.t. a"""
return self._wa
def w(self, z):
"""Returns dark energy equation of state at redshift `z`.
Parameters
----------
z : array_like
Input redshifts.
Returns
-------
w : ndarray, or float if input scalar
The dark energy equation of state
Notes
------
The dark energy equation of state is defined as
:math:`w(z) = P(z)/\\rho(z)`, where :math:`P(z)` is the
pressure at redshift z and :math:`\\rho(z)` is the density
at redshift z, both in units where c=1. Here this is
:math:`w(z) = w_0 + w_a (1 - a) = w_0 + w_a \\frac{z}{1+z}`.
"""
if isiterable(z):
z = np.asarray(z)
return self._w0 + self._wa * z / (1.0 + z)
def de_density_scale(self, z):
""" Evaluates the redshift dependence of the dark energy density.
Parameters
----------
z : array_like
Input redshifts.
Returns
-------
I : ndarray, or float if input scalar
The scaling of the energy density of dark energy with redshift.
Notes
-----
The scaling factor, I, is defined by :math:`\\rho(z) = \\rho_0 I`,
and in this case is given by
.. math::
I = \\left(1 + z\\right)^{3 \\left(1 + w_0 + w_a\\right)}
\exp \\left(-3 w_a \\frac{z}{1+z}\\right)
"""
if isiterable(z):
z = | np.asarray(z) | numpy.asarray |
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import sys
"""
"""
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data(path="mnist.npz")
inptsz = 28 * 28
hidsz = 100
outsz = 10
#Activation functions
def ReLU(x):
return (x > 0) * x
def dReLU(x):
return 1 * (x > 0)
def softmax(x):
x -= np.max(x)
return np.exp(x)/np.sum(np.exp(x))
def sigmoid(x):
return 1 /(1 + np.exp(-x))
def dsigmoid(x):
return x * (1 -x)
#Initializing learnable parameters
W_1 = 0.2 * np.random.random((hidsz, inptsz)) - 0.1
W_2 = 0.2 * np.random.random((outsz,hidsz)) - 0.1
b_1 = np.random.randn(hidsz, 1) + 0.01
b_2 = np.random.randn(outsz, 1) + 0.01
lr = 0.001 #Learning rate
#One_hot_Encoding train and test labels
one_hot_labels = np.zeros((len(y_train), 10, 1))
for i, l in enumerate(y_train):
one_hot_labels[i][l] = 1
y_train = one_hot_labels
test_labels = np.zeros((len(y_test),10,1))
for i,l in enumerate(y_test):
test_labels[i][l] = 1
#flatening data
y_test = test_labels
x = np.mean(x_train)
x_train = x_train.reshape(60000, 28*28, 1) - x
x_test = x_test.reshape(10000, 28*28, 1) - x
lmda = 0.005
plt.figure()
#Updating parameters
for i in range(1000):
dW_2 = np.zeros(W_2.shape)
db_2 = np.zeros(b_2.shape)
dW_1 = np.zeros(W_1.shape)
db_1 = np.zeros(b_1.shape)
loss = 0.0
reg_loss = 0.0
count = 0
for j in range(10000):
#Forward Propagation
z_1 = np.dot(W_1, x_train[j]) + b_1
a_1 = sigmoid(z_1)
z_2 = | np.dot(W_2, a_1) | numpy.dot |
from numba import jit, njit
import numpy as np
@njit
def quantile(x, q):
"""
Return, roughly, the q-th quantile of univariate data set x.
Not exact, skips linear interpolation. Works fine for large
samples.
"""
k = len(x)
x.sort()
return x[int(q * k)]
@njit
def lininterp_1d(grid, vals, x):
"""
Linearly interpolate (grid, vals) to evaluate at x.
Here grid must be regular (evenly spaced).
Based on linear interpolation code written by @albop.
Parameters
----------
grid and vals are numpy arrays, x is a float
Returns
-------
a float, the interpolated value
"""
a, b, G = np.min(grid), np.max(grid), len(grid)
s = (x - a) / (b - a)
q_0 = max(min(int(s * (G - 1)), (G - 2)), 0)
v_0 = vals[q_0]
v_1 = vals[q_0 + 1]
λ = s * (G - 1) - q_0
return (1 - λ) * v_0 + λ * v_1
@njit
def lininterp_2d(x_grid, y_grid, vals, s):
"""
Fast 2D interpolation. Uses linear extrapolation for points outside the
grid.
Based on linear interpolation code written by @albop.
Parameters
----------
x_grid: np.ndarray
grid points for x, one dimensional
y_grid: np.ndarray
grid points for y, one dimensional
vals: np.ndarray
vals[i, j] = f(x[i], y[j])
s: np.ndarray
2D point at which to evaluate
"""
nx = len(x_grid)
ny = len(y_grid)
ax, bx = x_grid[0], x_grid[-1]
ay, by = y_grid[0], y_grid[-1]
s_0 = s[0]
s_1 = s[1]
# (s_1, ..., sn_d) : normalized evaluation point (in [0,1] inside the grid)
s_0 = (s_0 - ax) / (bx - ax)
s_1 = (s_1 - ay) / (by - ay)
# q_k : index of the interval "containing" s_k
q_0 = max(min(int(s_0 *(nx - 1)), (nx - 2) ), 0)
q_1 = max(min(int(s_1 *(ny - 1)), (ny - 2) ), 0)
# lam_k : barycentric coordinate in interval k
lam_0 = s_0 * (nx-1) - q_0
lam_1 = s_1 * (ny-1) - q_1
# v_ij: values on vertices of hypercube "containing" the point
v_00 = vals[(q_0), (q_1)]
v_01 = vals[(q_0), (q_1+1)]
v_10 = vals[(q_0+1), (q_1)]
v_11 = vals[(q_0+1), (q_1+1)]
# interpolated/extrapolated value
out = (1-lam_0) * ((1-lam_1) * (v_00) + \
(lam_1) * (v_01)) + (lam_0) * ((1-lam_1) * (v_10) \
+ (lam_1) * (v_11))
return out
@njit
def lininterp_3d(x_grid, y_grid, z_grid, vals, s):
"""
Fast 3D interpolation. Uses linear extrapolation for points outside the
grid. Note that the grid must be regular (i.e., evenly spaced).
Based on linear interpolation code written by @albop.
Parameters
----------
x_grid: np.ndarray
grid points for x, one dimensional regular grid
y_grid: np.ndarray
grid points for y, one dimensional regular grid
z_grid: np.ndarray
grid points for z, one dimensional regular grid
vals: np.ndarray
vals[i, j, k] = f(x[i], y[j], z[k])
s: np.ndarray
3D point at which to evaluate function
"""
d = 3
smin = (x_grid[0], y_grid[0], z_grid[0])
smax = (x_grid[-1], y_grid[-1], z_grid[-1])
order_0 = len(x_grid)
order_1 = len(y_grid)
order_2 = len(z_grid)
# (s_1, ..., s_d) : evaluation point
s_0 = s[0]
s_1 = s[1]
s_2 = s[2]
# normalized evaluation point (in [0,1] inside the grid)
s_0 = (s_0-smin[0])/(smax[0]-smin[0])
s_1 = (s_1-smin[1])/(smax[1]-smin[1])
s_2 = (s_2-smin[2])/(smax[2]-smin[2])
# q_k : index of the interval "containing" s_k
q_0 = max( min( int(s_0 *(order_0-1)), (order_0-2) ), 0 )
q_1 = max( min( int(s_1 *(order_1-1)), (order_1-2) ), 0 )
q_2 = max( min( int(s_2 *(order_2-1)), (order_2-2) ), 0 )
# lam_k : barycentric coordinate in interval k
lam_0 = s_0*(order_0-1) - q_0
lam_1 = s_1*(order_1-1) - q_1
lam_2 = s_2*(order_2-1) - q_2
# v_ij: values on vertices of hypercube "containing" the point
v_000 = vals[(q_0), (q_1), (q_2)]
v_001 = vals[(q_0), (q_1), (q_2+1)]
v_010 = vals[(q_0), (q_1+1), (q_2)]
v_011 = vals[(q_0), (q_1+1), (q_2+1)]
v_100 = vals[(q_0+1), (q_1), (q_2)]
v_101 = vals[(q_0+1), (q_1), (q_2+1)]
v_110 = vals[(q_0+1), (q_1+1), (q_2)]
v_111 = vals[(q_0+1), (q_1+1), (q_2+1)]
# interpolated/extrapolated value
output = (1-lam_0)*((1-lam_1)*((1-lam_2)*(v_000) + (lam_2)*(v_001)) + (lam_1)*((1-lam_2)*(v_010) + (lam_2)*(v_011))) + (lam_0)*((1-lam_1)*((1-lam_2)*(v_100) + (lam_2)*(v_101)) + (lam_1)*((1-lam_2)*(v_110) + (lam_2)*(v_111)))
return output
@njit
def lininterp_4d(u_grid, v_grid, w_grid, x_grid, vals, s):
"""
Fast 4D interpolation. Uses linear extrapolation for points outside the
grid. Note that the grid must be regular (i.e., evenly spaced).
Based on linear interpolation code written by @albop.
Parameters
----------
u_grid: np.ndarray
grid points for u, one dimensional regular grid
v_grid: np.ndarray
grid points for v, one dimensional regular grid
w_grid: np.ndarray
grid points for w, one dimensional regular grid
x_grid: np.ndarray
grid points for x, one dimensional regular grid
vals: np.ndarray
vals[i, j, k, l] = f(u[i], v[j], w[k], x[l])
s: np.ndarray
4D point at which to evaluate function
"""
d = 4
smin = (u_grid[0], v_grid[0], w_grid[0], x_grid[0])
smax = (u_grid[-1], v_grid[-1], w_grid[-1], x_grid[-1])
order_0 = len(u_grid)
order_1 = len(v_grid)
order_2 = len(w_grid)
order_3 = len(x_grid)
# (s_1, ..., s_d) : evaluation point
s_0 = s[0]
s_1 = s[1]
s_2 = s[2]
s_3 = s[3]
# (s_1, ..., sn_d) : normalized evaluation point (in [0,1] inside the grid)
s_0 = (s_0-smin[0])/(smax[0]-smin[0])
s_1 = (s_1-smin[1])/(smax[1]-smin[1])
s_2 = (s_2-smin[2])/(smax[2]-smin[2])
s_3 = (s_3-smin[3])/(smax[3]-smin[3])
# q_k : index of the interval "containing" s_k
q_0 = max( min( int(s_0 *(order_0-1)), (order_0-2) ), 0 )
q_1 = max( min( int(s_1 *(order_1-1)), (order_1-2) ), 0 )
q_2 = max( min( int(s_2 *(order_2-1)), (order_2-2) ), 0 )
q_3 = max( min( int(s_3 *(order_3-1)), (order_3-2) ), 0 )
# lam_k : barycentric coordinate in interval k
lam_0 = s_0*(order_0-1) - q_0
lam_1 = s_1*(order_1-1) - q_1
lam_2 = s_2*(order_2-1) - q_2
lam_3 = s_3*(order_3-1) - q_3
# v_ij: values on vertices of hypercube "containing" the point
v_0000 = vals[(q_0), (q_1), (q_2), (q_3)]
v_0001 = vals[(q_0), (q_1), (q_2), (q_3+1)]
v_0010 = vals[(q_0), (q_1), (q_2+1), (q_3)]
v_0011 = vals[(q_0), (q_1), (q_2+1), (q_3+1)]
v_0100 = vals[(q_0), (q_1+1), (q_2), (q_3)]
v_0101 = vals[(q_0), (q_1+1), (q_2), (q_3+1)]
v_0110 = vals[(q_0), (q_1+1), (q_2+1), (q_3)]
v_0111 = vals[(q_0), (q_1+1), (q_2+1), (q_3+1)]
v_1000 = vals[(q_0+1), (q_1), (q_2), (q_3)]
v_1001 = vals[(q_0+1), (q_1), (q_2), (q_3+1)]
v_1010 = vals[(q_0+1), (q_1), (q_2+1), (q_3)]
v_1011 = vals[(q_0+1), (q_1), (q_2+1), (q_3+1)]
v_1100 = vals[(q_0+1), (q_1+1), (q_2), (q_3)]
v_1101 = vals[(q_0+1), (q_1+1), (q_2), (q_3+1)]
v_1110 = vals[(q_0+1), (q_1+1), (q_2+1), (q_3)]
v_1111 = vals[(q_0+1), (q_1+1), (q_2+1), (q_3+1)]
# interpolated/extrapolated value
output = (1-lam_0)*((1-lam_1)*((1-lam_2)*((1-lam_3)*(v_0000) + (lam_3)*(v_0001)) + (lam_2)*((1-lam_3)*(v_0010) + (lam_3)*(v_0011))) + (lam_1)*((1-lam_2)*((1-lam_3)*(v_0100) + (lam_3)*(v_0101)) + (lam_2)*((1-lam_3)*(v_0110) + (lam_3)*(v_0111)))) + (lam_0)*((1-lam_1)*((1-lam_2)*((1-lam_3)*(v_1000) + (lam_3)*(v_1001)) + (lam_2)*((1-lam_3)*(v_1010) + (lam_3)*(v_1011))) + (lam_1)*((1-lam_2)*((1-lam_3)*(v_1100) + (lam_3)*(v_1101)) + (lam_2)*((1-lam_3)*(v_1110) + (lam_3)*(v_1111))))
return output
def test_lininterp():
"""
Make sure that we can at least represent linear functions exactly.
"""
intercept = 1.0
a, b, c, d = 0.1, 0.2, 0.3, 0.4
@njit
def f1(x):
return intercept + a * x
nx = 10
x_grid = np.linspace(-1, 1, nx)
vals = np.empty(nx)
for i in range(nx):
vals[i] = f1(x_grid[i])
for q in range(10):
t = np.random.randn()
p = f1(t)
p_hat = lininterp_1d(x_grid, vals, t)
assert np.isclose(p, p_hat)
@njit
def f2(x, y):
return intercept + a * x + b * y
nx, ny = 10, 10
x_grid = np.linspace(-1, 1, nx)
y_grid = np.linspace(-1, 1, ny)
vals = np.empty((nx, ny))
for i in range(nx):
for j in range(ny):
vals[i, j] = f2(x_grid[i], y_grid[j])
for q in range(10):
t = np.random.randn(2)
p = f2(*t)
p_hat = lininterp_2d(x_grid, y_grid, vals, t)
assert np.allclose(p, p_hat)
@njit
def f3(x, y, z):
return intercept + a * x + b * y + c * z
nx, ny, nz = 10, 10, 10
x_grid = np.linspace(-1, 1, nx)
y_grid = | np.linspace(-1, 1, ny) | numpy.linspace |
# coding=utf-8
import talib
import numpy as np
'''
TALIB综合调用函数
name:函数名称
price_h:最高价
price_l:最低价
price_c:收盘价
price_v:成交量
price_o:开盘价
'''
def ta(name, price_h, price_l, price_c, price_v, price_o):
# function 'MAX'/'MAXINDEX'/'MIN'/'MININDEX'/'MINMAX'/'MINMAXINDEX'/'SUM' is missing
if name == 'AD':
return talib.AD(np.array(price_h), np.array(price_l), np.array(price_c), np.asarray(price_v, dtype='float'))
if name == 'ADOSC':
return talib.ADOSC(np.array(price_h), np.array(price_l), np.array(price_c), np.asarray(price_v, dtype='float'),
fastperiod=2, slowperiod=10)
if name == 'ADX':
return talib.ADX(np.array(price_h), np.array(price_l), np.asarray(price_c, dtype='float'), timeperiod=14)
if name == 'ADXR':
return talib.ADXR(np.array(price_h), np.array(price_l), np.asarray(price_c, dtype='float'), timeperiod=14)
if name == 'APO':
return talib.APO(np.asarray(price_c, dtype='float'), fastperiod=12, slowperiod=26, matype=0)
if name == 'AROON':
AROON_DWON, AROON2_UP = talib.AROON(np.array(price_h), np.asarray(price_l, dtype='float'), timeperiod=90)
return (AROON_DWON, AROON2_UP)
if name == 'AROONOSC':
return talib.AROONOSC(np.array(price_h), np.asarray(price_l, dtype='float'), timeperiod=14)
if name == 'ATR':
return talib.ATR(np.array(price_h), np.array(price_l), np.asarray(price_c, dtype='float'), timeperiod=14)
if name == 'AVGPRICE':
return talib.AVGPRICE(np.array(price_o), np.array(price_h), np.array(price_l), np.asarray(price_c, dtype='float'))
if name == 'BBANDS':
BBANDS1, BBANDS2, BBANDS3 = talib.BBANDS(np.asarray(price_c, dtype='float'), matype=MA_Type.T3)
return BBANDS1
if name == 'BETA':
return talib.BETA(np.array(price_h), np.asarray(price_l, dtype='float'), timeperiod=5)
if name == 'BOP':
return talib.BOP(np.array(price_o), np.array(price_h), np.array(price_l), np.asarray(price_c, dtype='float'))
if name == 'CCI':
return talib.CCI(np.array(price_h), np.array(price_l), np.asarray(price_c, dtype='float'), timeperiod=14)
if name == 'CDL2CROWS':
return talib.CDL2CROWS(np.array(price_o), np.array(price_h), np.array(price_l), np.asarray(price_c, dtype='float'))
if name == 'CDL3BLACKCROWS':
return talib.CDL3BLACKCROWS(np.array(price_o), np.array(price_h), np.array(price_l),
np.asarray(price_c, dtype='float'))
if name == 'CDL3INSIDE':
return talib.CDL3INSIDE(np.array(price_o), np.array(price_h), np.array(price_l),
np.asarray(price_c, dtype='float'))
if name == 'CDL3LINESTRIKE':
return talib.CDL3LINESTRIKE(np.array(price_o), np.array(price_h), np.array(price_l),
np.asarray(price_c, dtype='float'))
if name == 'CDL3OUTSIDE':
return talib.CDL3OUTSIDE(np.array(price_o), np.array(price_h), np.array(price_l),
np.asarray(price_c, dtype='float'))
if name == 'CDL3STARSINSOUTH':
return talib.CDL3STARSINSOUTH(np.array(price_o), np.array(price_h), np.array(price_l),
np.asarray(price_c, dtype='float'))
if name == 'CDL3WHITESOLDIERS':
return talib.CDL3WHITESOLDIERS(np.array(price_o), np.array(price_h), np.array(price_l),
np.asarray(price_c, dtype='float'))
if name == 'CDLABANDONEDBABY':
return talib.CDLABANDONEDBABY(np.array(price_o), np.array(price_h), np.array(price_l),
np.asarray(price_c, dtype='float'), penetration=0)
if name == 'CDLADVANCEBLOCK':
return talib.CDLADVANCEBLOCK(np.array(price_o), np.array(price_h), np.array(price_l),
np.asarray(price_c, dtype='float'))
if name == 'CDLBELTHOLD':
return talib.CDLBELTHOLD(np.array(price_o), np.array(price_h), np.array(price_l),
np.asarray(price_c, dtype='float'))
if name == 'CDLBREAKAWAY':
return talib.CDLBREAKAWAY(np.array(price_o), np.array(price_h), np.array(price_l),
np.asarray(price_c, dtype='float'))
if name == 'CDLCLOSINGMARUBOZU':
return talib.CDLCLOSINGMARUBOZU(np.array(price_o), np.array(price_h), np.array(price_l),
np.asarray(price_c, dtype='float'))
if name == 'CDLCONCEALBABYSWALL':
return talib.CDLCONCEALBABYSWALL(np.array(price_o), np.array(price_h), np.array(price_l),
np.asarray(price_c, dtype='float'))
if name == 'CDLCOUNTERATTACK':
return talib.CDLCOUNTERATTACK(np.array(price_o), np.array(price_h), np.array(price_l),
np.asarray(price_c, dtype='float'))
if name == 'CDLDARKCLOUDCOVER':
return talib.CDLDARKCLOUDCOVER(np.array(price_o), np.array(price_h), np.array(price_l),
np.asarray(price_c, dtype='float'), penetration=0)
if name == 'CDLDOJI':
return talib.CDLDOJI(np.array(price_o), np.array(price_h), np.array(price_l), np.asarray(price_c, dtype='float'))
if name == 'CDLDOJISTAR':
return talib.CDLDOJISTAR(np.array(price_o), np.array(price_h), np.array(price_l),
np.asarray(price_c, dtype='float'))
if name == 'CDLDRAGONFLYDOJI':
return talib.CDLDRAGONFLYDOJI(np.array(price_o), np.array(price_h), np.array(price_l),
np.asarray(price_c, dtype='float'))
if name == 'CDLENGULFING':
return talib.CDLENGULFING(np.array(price_o), np.array(price_h), np.array(price_l),
np.asarray(price_c, dtype='float'))
if name == 'CDLEVENINGDOJISTAR':
return talib.CDLEVENINGDOJISTAR(np.array(price_o), np.array(price_h), np.array(price_l),
np.asarray(price_c, dtype='float'), penetration=0)
if name == 'CDLEVENINGSTAR':
return talib.CDLEVENINGSTAR(np.array(price_o), np.array(price_h), np.array(price_l),
np.asarray(price_c, dtype='float'), penetration=0)
if name == 'CDLGAPSIDESIDEWHITE':
return talib.CDLGAPSIDESIDEWHITE(np.array(price_o), np.array(price_h), np.array(price_l),
np.asarray(price_c, dtype='float'))
if name == 'CDLGRAVESTONEDOJI':
return talib.CDLGRAVESTONEDOJI(np.array(price_o), np.array(price_h), np.array(price_l),
np.asarray(price_c, dtype='float'))
if name == 'CDLHAMMER':
return talib.CDLHAMMER(np.array(price_o), np.array(price_h), np.array(price_l), np.asarray(price_c, dtype='float'))
if name == 'CDLHANGINGMAN':
return talib.CDLHANGINGMAN(np.array(price_o), np.array(price_h), np.array(price_l),
np.asarray(price_c, dtype='float'))
if name == 'CDLHARAMI':
return talib.CDLHARAMI(np.array(price_o), np.array(price_h), np.array(price_l), np.asarray(price_c, dtype='float'))
if name == 'CDLHARAMICROSS':
return talib.CDLHARAMICROSS(np.array(price_o), np.array(price_h), np.array(price_l),
np.asarray(price_c, dtype='float'))
if name == 'CDLHIGHWAVE':
return talib.CDLHIGHWAVE(np.array(price_o), np.array(price_h), np.array(price_l),
np.asarray(price_c, dtype='float'))
if name == 'CDLHIKKAKE':
return talib.CDLHIKKAKE(np.array(price_o), np.array(price_h), np.array(price_l),
np.asarray(price_c, dtype='float'))
if name == 'CDLHIKKAKEMOD':
return talib.CDLHIKKAKEMOD(np.array(price_o), np.array(price_h), np.array(price_l),
np.asarray(price_c, dtype='float'))
if name == 'CDLHOMINGPIGEON':
return talib.CDLHOMINGPIGEON(np.array(price_o), np.array(price_h), np.array(price_l),
np.asarray(price_c, dtype='float'))
if name == 'CDLIDENTICAL3CROWS':
return talib.CDLIDENTICAL3CROWS(np.array(price_o), np.array(price_h), np.array(price_l),
np.asarray(price_c, dtype='float'))
if name == 'CDLINNECK':
return talib.CDLINNECK(np.array(price_o), np.array(price_h), np.array(price_l), np.asarray(price_c, dtype='float'))
if name == 'CDLINVERTEDHAMMER':
return talib.CDLINVERTEDHAMMER(np.array(price_o), np.array(price_h), np.array(price_l),
np.asarray(price_c, dtype='float'))
if name == 'CDLKICKING':
return talib.CDLKICKING(np.array(price_o), np.array(price_h), np.array(price_l),
np.asarray(price_c, dtype='float'))
if name == 'CDLKICKINGBYLENGTH':
return talib.CDLKICKINGBYLENGTH(np.array(price_o), np.array(price_h), np.array(price_l),
np.asarray(price_c, dtype='float'))
if name == 'CDLLADDERBOTTOM':
return talib.CDLLADDERBOTTOM(np.array(price_o), np.array(price_h), np.array(price_l),
np.asarray(price_c, dtype='float'))
if name == 'CDLLONGLEGGEDDOJI':
return talib.CDLLONGLEGGEDDOJI(np.array(price_o), np.array(price_h), np.array(price_l),
np.asarray(price_c, dtype='float'))
if name == 'CDLLONGLINE':
return talib.CDLLONGLINE(np.array(price_o), np.array(price_h), np.array(price_l),
np.asarray(price_c, dtype='float'))
if name == 'CDLMARUBOZU':
return talib.CDLMARUBOZU(np.array(price_o), np.array(price_h), np.array(price_l),
np.asarray(price_c, dtype='float'))
if name == 'CDLMATCHINGLOW':
return talib.CDLMATCHINGLOW(np.array(price_o), np.array(price_h), np.array(price_l),
np.asarray(price_c, dtype='float'))
if name == 'CDLMATHOLD':
return talib.CDLMATHOLD(np.array(price_o), np.array(price_h), np.array(price_l),
np.asarray(price_c, dtype='float'))
if name == 'CDLMORNINGDOJISTAR':
return talib.CDLMORNINGDOJISTAR(np.array(price_o), np.array(price_h), np.array(price_l),
np.asarray(price_c, dtype='float'), penetration=0)
if name == 'CDLMORNINGSTAR':
return talib.CDLMORNINGSTAR(np.array(price_o), np.array(price_h), np.array(price_l),
np.asarray(price_c, dtype='float'), penetration=0)
if name == 'CDLONNECK':
return talib.CDLONNECK(np.array(price_o), np.array(price_h), np.array(price_l), np.asarray(price_c, dtype='float'))
if name == 'CDLPIERCING':
return talib.CDLPIERCING(np.array(price_o), np.array(price_h), np.array(price_l),
np.asarray(price_c, dtype='float'))
if name == 'CDLRICKSHAWMAN':
return talib.CDLRICKSHAWMAN(np.array(price_o), np.array(price_h), np.array(price_l),
np.asarray(price_c, dtype='float'))
if name == 'CDLRISEFALL3METHODS':
return talib.CDLRISEFALL3METHODS(np.array(price_o), np.array(price_h), np.array(price_l),
np.asarray(price_c, dtype='float'))
if name == 'CDLSEPARATINGLINES':
return talib.CDLSEPARATINGLINES(np.array(price_o), np.array(price_h), np.array(price_l),
np.asarray(price_c, dtype='float'))
if name == 'CDLSHOOTINGSTAR':
return talib.CDLSHOOTINGSTAR(np.array(price_o), np.array(price_h), np.array(price_l),
np.asarray(price_c, dtype='float'))
if name == 'CDLSHORTLINE':
return talib.CDLSHORTLINE(np.array(price_o), np.array(price_h), np.array(price_l),
np.asarray(price_c, dtype='float'))
if name == 'CDLSPINNINGTOP':
return talib.CDLSPINNINGTOP(np.array(price_o), np.array(price_h), np.array(price_l),
np.asarray(price_c, dtype='float'))
if name == 'CDLSTALLEDPATTERN':
return talib.CDLSTALLEDPATTERN(np.array(price_o), np.array(price_h), np.array(price_l),
np.asarray(price_c, dtype='float'))
if name == 'CDLSTICKSANDWICH':
return talib.CDLSTICKSANDWICH(np.array(price_o), np.array(price_h), np.array(price_l),
np.asarray(price_c, dtype='float'))
if name == 'CDLTAKURI':
return talib.CDLTAKURI(np.array(price_o), np.array(price_h), np.array(price_l), np.asarray(price_c, dtype='float'))
if name == 'CDLTASUKIGAP':
return talib.CDLTASUKIGAP(np.array(price_o), np.array(price_h), np.array(price_l),
np.asarray(price_c, dtype='float'))
if name == 'CDLTHRUSTING':
return talib.CDLTHRUSTING(np.array(price_o), np.array(price_h), np.array(price_l),
np.asarray(price_c, dtype='float'))
if name == 'CDLTRISTAR':
return talib.CDLTRISTAR(np.array(price_o), np.array(price_h), np.array(price_l),
np.asarray(price_c, dtype='float'))
if name == 'CDLUNIQUE3RIVER':
return talib.CDLUNIQUE3RIVER(np.array(price_o), np.array(price_h), np.array(price_l),
np.asarray(price_c, dtype='float'))
if name == 'CDLUPSIDEGAP2CROWS':
return talib.CDLUPSIDEGAP2CROWS(np.array(price_o), np.array(price_h), np.array(price_l),
np.asarray(price_c, dtype='float'))
if name == 'CDLXSIDEGAP3METHODS':
return talib.CDLXSIDEGAP3METHODS(np.array(price_o), | np.array(price_h) | numpy.array |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 14 19:50:56 2020
@author: hiroyasu
"""
import cvxpy as cp
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import control
import SCPmulti as scp
import pickle
import TrainRNN as trnn
import torch
import pandas as pd
DT = scp.DT
TSPAN = scp.TSPAN
M = scp.M
II = scp.II
L = scp.L
bb = scp.bb
FMIN = scp.FMIN
FMAX = scp.FMAX
RungeNum = scp.RungeNum
AA = scp.AA
Robs = scp.Robs
Rsafe = scp.Rsafe
XOBSs = scp.XOBSs
XXd0 = np.load('data/params/desired_n/Xhis.npy')
UUd0 = np.load('data/params/desired_n/Uhis.npy')
dratio = 0.2
d_over = np.sqrt(3)*dratio
X0 = scp.X0
Xf = scp.Xf
n_input = trnn.n_input
n_hidden = trnn.n_hidden
n_output = trnn.n_output
n_layers = trnn.n_layers
class Spacecraft:
def __init__(self,XXd0,UUd0,tspan=TSPAN,dt=DT,runge_num=RungeNum,m=M,I=II,l=L,b=bb,A=AA,fmin=FMIN,fmax=FMAX):
self.XXd = XXd0
self.UUd = UUd0
self.tspan = tspan
self.dt = dt
self.runge_num = runge_num
self.h = dt/runge_num
self.m = m
self.I = I
self.l = l
self.b = b
self.A = A
self.fmin = fmin
self.fmax = fmax
self.net = trnn.RNN(n_input,n_hidden,n_output,n_layers)
self.net.load_state_dict(torch.load('data/trained_nets/RNNLorenz.pt'))
self.net.eval()
self.YC = np.load('data/trained_nets/Y_params.npy')
self.ns = 6
self.RNN = 1
def GetP(self,X):
Xnet = self.Np2Var(X)
if self.RNN == 1:
cP = self.net(Xnet.view(1,1,-1))
cP = cP.data.numpy()*self.YC
P = self.cP2P(cP[0,0,:])
else:
cP = self.net(Xnet)
cP = cP.data.numpy()
P = self.cP2P(cP)
return P
def GetK(self,X):
P = self.GetP(X)
B = self.GetB(X)
K = B.T@P
return K
def Np2Var(self,X):
X = X.astype(np.float32)
X = torch.from_numpy(X)
return X
def cP2P(self,cP):
cPnp = 0
for i in range(self.ns):
lb = i*(i+1)/2
lb = int(lb)
ub = (i+1)*(i+2)/2
ub = int(ub)
Di = cP[lb:ub]
Di = np.diag(Di,self.ns-(i+1))
cPnp += Di
P = (cPnp.T)@cPnp
return P
def GetPdot(self,X,Ud,Xdp1):
dX = Xdp1-X
P = self.GetP(X)
Pdot = 0
dXdt = self.dynamics(0,X,Ud)
for i in range(self.ns):
dx = np.zeros(self.ns)
dx[i] = dX[i]
Pdot += (self.GetP(X+dx)-P)/np.linalg.norm(dx)*dXdt[i]
return Pdot
def dynamics(self,t,states,inputs):
A = self.A
B = self.GetB(states)
Xv = np.transpose(np.array([states]))
Uv = np.transpose(np.array([inputs]))
dXdt = A.dot(Xv)+B.dot(Uv)
dXdt = dXdt[:,0]
return dXdt
def GetB(self,states):
m = self.m
I = self.I
l = self.l
b = self.b
th = states[2]
T = np.array([[np.cos(th)/m,np.sin(th)/m,0],[-np.sin(th)/m,np.cos(th)/m,0],[0,0,1/2./I]])
H = np.array([[-1,-1,0,0,1,1,0,0],[0,0,-1,-1,0,0,1,1],[-l,l,-b,b,-l,l,-b,b]])
B = np.vstack((np.zeros((3,8)),T@H))
return B
def rk4(self,t,X,U):
h = self.h
k1 = self.dynamics(t,X,U)
k2 = self.dynamics(t+h/2.,X+k1*h/2.,U)
k3 = self.dynamics(t+h/2.,X+k2*h/2.,U)
k4 = self.dynamics(t+h,X+k3*h,U)
return t+h,X+h*(k1+2.*k2+2.*k3+k4)/6.
def one_step_sim(self,t,X,U):
runge_num = self.runge_num
for num in range(0, runge_num):
t,X = self.rk4(t,X,U)
return t,X
def GetCCM(self,alp):
dt = self.dt
epsilon = 0.
XX = self.XXd
N = XX.shape[0]-1
I = np.identity(6)
WW = {}
for i in range(N+1):
WW[i] = cp.Variable((6,6),PSD=True)
nu = cp.Variable(nonneg=True)
chi = cp.Variable(nonneg=True)
constraints = [chi*I-WW[0] >> epsilon*I,WW[0]-I >> epsilon*I]
for k in range(N):
Xk = XX[k,:]
Ax = self.A
Bx = self.GetB(Xk)
Wk = WW[k]
Wkp1 = WW[k+1]
constraints += [-2*alp*Wk-(-(Wkp1-Wk)/dt+Ax@[email protected]*nu*[email protected]) >> epsilon*I]
constraints += [chi*I-Wkp1 >> epsilon*I,Wkp1-I >> epsilon*I]
prob = cp.Problem(cp.Minimize(chi),constraints)
prob.solve(solver=cp.MOSEK)
cvx_status = prob.status
print(cvx_status)
WWout = {}
MMout = {}
for i in range(N+1):
WWout[i] = WW[i].value/nu.value
MMout[i] = np.linalg.inv(WWout[i])
chi = chi.value
nu = nu.value
cvx_optval = chi/alp
return cvx_status,cvx_optval,WWout,MMout,chi,nu
def CLFQP(self,X,Xd,Xdp1,M,Ud,alp):
dt = self.dt
U = cp.Variable((8,1))
Ud = np.array([Ud]).T
p = cp.Variable((1,1))
fmin = self.fmin
fmax = self.fmax
A = self.A
Bx = self.GetB(X)
Bxd = self.GetB(Xd)
evec = np.array([X-Xd]).T
Mdot = self.GetPdot(X,Ud,Xdp1)
constraints = [evec.T@(Mdot+M@A+A.T@M)@evec+2*evec.T@M@Bx@U-2*evec.T@M@Bxd@Ud <= -2*alp*evec.T@M@evec+p]
for i in range(8):
constraints += [U[i,0] <= fmax, U[i,0] >= fmin]
prob = cp.Problem(cp.Minimize(cp.sum_squares(U-Ud)+p**2),constraints)
prob.solve()
cvx_status = prob.status
U = U.value
U = np.ravel(U)
return U
def FinalTrajectory(self,MM,alp,XXdRCT,UUdRCT):
dt = self.dt
XXd = self.XXd
UUd = self.UUd
N = UUd.shape[0]
X0 = XXd[0,:]
B = self.GetB(X0)
t = 0
t1 = 0
t2 = 0
X1 = X0
X2 = X0
X3 = X0
Xd = XXd[0,:]
XdRCT = XXdRCT[0,:]
this = np.zeros(N+1)
X1his = np.zeros((N+1,X0.size))
X2his = np.zeros((N+1,X0.size))
X3his = np.zeros((N+1,X0.size))
this[0] = t
X1his[0,:] = X1
U1his = np.zeros((N,B.shape[1]))
X2his[0,:] = X2
U2his = np.zeros((N,B.shape[1]))
X3his[0,:] = X3
U3his = np.zeros((N,B.shape[1]))
U1hisN = np.zeros(N)
U2hisN = np.zeros(N)
U3hisN = np.zeros(N)
U1hisND = np.zeros(N)
U2hisND = np.zeros(N)
U3hisND = np.zeros(N)
dnMs = np.zeros(N)
dnUs = np.zeros(N)
dnXs = np.zeros(N)
for i in range(N):
M = MM[i]
A = self.A
Bx3 = self.GetB(X3)
Q = 2.4*np.identity(6)
R = 1*np.identity(8)
K,P,E = control.lqr(A,Bx3,Q,R)
P1 = self.GetP(X1)
U3 = UUd[i,:]-Bx3.T@P@(X3-Xd)
for j in range(8):
if U3[j] >= self.fmax:
U3[j] = self.fmax
elif U3[j] <= self.fmin:
U3[j] = self.fmin
#U1 = self.CLFQP(X1,Xd,XXd[i+1,:],P,UUd[i,:],alp)
U1 = self.CLFQP(X1,XdRCT,XXdRCT[i+1,:],P1,UUdRCT[i,:],alp)
U2 = self.CLFQP(X2,XdRCT,XXdRCT[i+1,:],M,UUdRCT[i,:],alp)
t,X1 = self.one_step_sim(t,X1,U1)
t1,X2 = self.one_step_sim(t1,X2,U2)
t2,X3 = self.one_step_sim(t2,X3,U3)
Xd = XXd[i+1,:]
XdRCT = XXdRCT[i+1,:]
d1 = np.random.choice(np.array([-1,1]),1)[0]
d2 = np.random.choice(np.array([-1,1]),1)[0]
d3 = np.random.choice(np.array([-1,1]),1)[0]
d1 = np.array([0,0,0,d1,d2,d3])*2
#d1 = 0
#d1 = np.hstack((np.zeros(3),(np.random.rand(3)*2-1)))*dratio*20
#d1 = (np.random.rand(6)*2-1)*0.1
X1 = X1+d1*dt
X2 = X2+d1*dt
X3 = X3+d1*dt
this[i+1] = t
X1his[i+1,:] = X1
U1his[i,:] = U1
X2his[i+1,:] = X2
U2his[i,:] = U2
X3his[i+1,:] = X3
U3his[i,:] = U3
U1hisN[i] = np.linalg.norm(U1)
U2hisN[i] = np.linalg.norm(U2)
U3hisN[i] = np.linalg.norm(U3)
U1hisND[i] = np.linalg.norm(U1-UUd[i,:])
U2hisND[i] = np.linalg.norm(U2-UUdRCT[i,:])
U3hisND[i] = np.linalg.norm(U3-UUdRCT[i,:])
dnMs[i] = np.linalg.norm(M-P1,ord=2)
dnUs[i] = np.linalg.norm(U1-U2)
dnXs[i] = np.linalg.norm(X1-X2)
return this,X1his,U1his,X2his,U2his,X3his,U3his,U1hisN,U2hisN,U3hisN,U1hisND,U2hisND,U3hisND,dnMs,dnUs,dnXs
def Rot2d(th):
R = np.array([[np.cos(th),-np.sin(th)],[np.sin(th),np.cos(th)]])
return R
def GetTubePlot(XXdRCT,alp,chi,d_over,th):
Rtube = d_over*np.sqrt(chi)/alp
xxdRCT = XXdRCT[:,0:2]
dxxdRCT = np.diff(xxdRCT,axis=0)
for i in range(dxxdRCT.shape[0]):
dxxdRCT[i,:] = Rot2d(th)@(dxxdRCT[i,:]/np.linalg.norm(dxxdRCT[i,:]))
return xxdRCT[0:dxxdRCT.shape[0],:]+dxxdRCT*Rtube
def SaveDict(filename,var):
output = open(filename,'wb')
pickle.dump(var,output)
output.close()
pass
def LoadDict(filename):
pkl_file = open(filename,'rb')
varout = pickle.load(pkl_file)
pkl_file.close()
return varout
if __name__ == "__main__":
sc = Spacecraft(XXd0,UUd0)
alp = np.load('data/params/alpha_mmicro/alp.npy')
XXdRCT = np.load('data/params/desiredRCT/XXdRCT.npy')
UUdRCT = np.load('data/params/desiredRCT/UUdRCT.npy')
np.random.seed(seed=32)
cvx_status,cvx_optval,WW,MM,chi,nu = sc.GetCCM(alp)
this,X1his,U1his,X2his,U2his,X3his,U3his,U1hisN,U2hisN,U3hisN,U1hisND,U2hisND,U3hisND,dnMs,dnUs,dnXs = sc.FinalTrajectory(MM,alp,XXdRCT,UUdRCT)
xxTube1 = GetTubePlot(XXdRCT,alp,chi,d_over,np.pi/2)
xxTube2 = GetTubePlot(XXdRCT,alp,chi,d_over,-np.pi/2)
Nplot = xxTube1.shape[0]
plt.figure()
plt.plot(X1his[:,0],X1his[:,1])
plt.plot(X2his[:,0],X2his[:,1])
plt.plot(X3his[:,0],X3his[:,1])
#plt.plot(xxTube1[:,0],xxTube1[:,1])
#plt.plot(xxTube2[:,0],xxTube2[:,1])
plt.plot(XXdRCT[:,0],XXdRCT[:,1],'--k')
plt.fill_between(xxTube1[:,0],xxTube1[:,1],np.zeros(Nplot),facecolor='black',alpha=0.2)
plt.fill_between(xxTube2[:,0],xxTube2[:,1],np.zeros(Nplot),facecolor='white')
plt.fill_between(np.linspace(19.347,22,100),16.9*np.ones(100),np.zeros(100),facecolor='white')
plt.fill_between(np.linspace(-1.03228,1.03228,100),-0.2*np.ones(100),np.zeros(100),facecolor='black',alpha=0.2)
for i in range(XOBSs.shape[0]):
x,y=[],[]
for _x in np.linspace(0,2*np.pi):
x.append(Robs*np.cos(_x)+XOBSs[i,0])
y.append(Robs*np.sin(_x)+XOBSs[i,1])
plt.plot(x,y,'k')
plt.axes().set_aspect('equal')
plt.show()
data1 = {'score': U1hisN}
data2 = {'score': U2hisN}
data3 = {'score': U3hisN}
# Create dataframe
df1 = pd.DataFrame(data1)
df2 = pd.DataFrame(data2)
df3 = pd.DataFrame(data3)
dfU1hisN = df1.rolling(window=50).mean()
dfU2hisN = df2.rolling(window=50).mean()
dfU3hisN = df3.rolling(window=50).mean()
plt.figure()
plt.plot(this[0:500],np.cumsum(U1hisN**2)*DT)
plt.plot(this[0:500],np.cumsum(U2hisN**2)*DT)
plt.plot(this[0:500],np.cumsum(U3hisN**2)*DT)
plt.show()
plt.figure()
plt.plot(this[0:500],U1hisND)
plt.plot(this[0:500],U2hisND)
plt.plot(this[0:500],U3hisND)
plt.show()
plt.figure()
plt.plot(this[0:500],U1his)
plt.plot(this[0:500],U2his)
plt.plot(this[0:500],U3his)
plt.show()
this,X1his,U1his,X2his,U2his,X3his,U3his,U1hisN,U2hisN,U3hisN,U1hisND,U2hisND,U3hisND
np.save('data/simulation/this.npy',this)
np.save('data/simulation/X1his.npy',X1his)
np.save('data/simulation/U1his.npy',U1his)
np.save('data/simulation/X2his.npy',X2his)
np.save('data/simulation/U2his.npy',U2his)
| np.save('data/simulation/X3his.npy',X3his) | numpy.save |
import numpy as np
import matplotlib.pyplot as plt
import math
import py_local_maxima
from timeit import timeit
# Set up what we'll be benchmarking. Tests are tuples consisting of:
# 1. A user-identifiable string for what the test is doing
# 2. The command to run, as a string
_tests = [
('CPU Max Filter',
'py_local_maxima.cpu.detect_maximum_filter(image, neighborhood)'),
('CPU skimage Implementation',
'py_local_maxima.cpu.detect_skimage(image, neighborhood)'),
]
# Add GPU tests conditionally
try:
import pycuda # type: ignore (silence warning about module import)
_tests.extend([
('GPU Naive Max Filter',
'py_local_maxima.gpu.detect_naive(image, neighborhood)'),
])
except ImportError:
pass
_setup = 'import py_local_maxima'
def _print_benchmark(algorithm_name, timer, neighborhood_shape):
template = '{0:30}: {1:5f} seconds per run ({2} neighborhood)'
print(str.format(template, algorithm_name, timer, neighborhood_shape))
def benchmark(image, neighborhood= | np.ones((3, 3)) | numpy.ones |
import streamlit as st
# To make things easier later, we're also importing numpy and pandas for
# working with sample data.
import numpy as np
import pandas as pd
from scipy.integrate import *
import scipy.optimize
import matplotlib.pyplot as plt
from functools import partial
import os, sys
st.sidebar.markdown("## Parameters used in the simulation")
st.sidebar.markdown("Enter your own custom values to run the model")
je = float(st.sidebar.text_input('Current density j_e [10^10 A/m^2]', 10))
periSampl = 1000 #
class Parameters:
gamma = 2.2128e5
alpha = float(st.sidebar.text_input('Gilbert damping constant', 1))
K1 = float(st.sidebar.text_input('Anisotropy constant K_1 [J/m^3]', 1.5 * 9100))
Js = float(st.sidebar.text_input('Saturation magnetization Js [T]', 0.65))
RAHE = float(st.sidebar.text_input('Anomalous Hall effect coefficient', 0.65))
d = float(st.sidebar.text_input('FM layer thickness [nm]', (0.6+1.2+1.1) * 1e-9))
frequency = float(st.sidebar.text_input('AC frequency [Hz]', 0.1e9))
currentd = je * 1e10
hbar = 1.054571e-34
e = 1.602176634e-19
mu0 = 4 * 3.1415927 * 1e-7
easy_axis = np.array([0,0,1])
p_axis = | np.array([0,-1,0]) | numpy.array |
from sklearn.metrics import roc_auc_score, accuracy_score
import os
import numpy as np
def ana(temp_data, step=-1):
store = []
for f in temp_data:
if f[:-4].split('_')[-1] == '0':
store.append([])
store[-1].append(f)
if step != -1:
store = [i[:step] for i in store]
return store
def cal_auc(label_file, prob_file, dir_pre=None):
if dir_pre:
label_file = dir_pre + label_file
prob_file = dir_pre + prob_file
label = np.load(label_file)
prob = | np.load(prob_file) | numpy.load |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/15_interp_latent.ipynb (unless otherwise specified).
__all__ = ['CosineSearch', 'InterpEmbeddings']
# Cell
import numpy as np
import pandas as pd
from typing import Dict, List, Any
from forgebox.html import DOM
# Cell
class CosineSearch:
"""
Build a index search on cosine distance
cos = CosineSearch(base_array)
idx_order = cos(vec)
"""
def __init__(self, base: np.ndarray):
"""
base: np.ndarray, embedding matrix of shape:
(num_items, hidden_size)
"""
assert len(base.shape) == 2,\
f"Base array has to be 2 dimentional, input is {len(base.shape)}"
self.base = base
self.base_norm = self.calc_base_norm(self.base)
self.normed_base = self.base/self.base_norm[:, None]
self.dim = self.base.shape[1]
def __repr__(self):
return f"[Consine Similarity Search] ({len(self)} items)"
def __len__(self): return self.base.shape[0]
@staticmethod
def calc_base_norm(base: np.ndarray) -> np.ndarray:
return np.sqrt(np.power(base, 2).sum(1))
def search(self, vec: np.ndarray, return_similarity: bool = False):
if return_similarity:
similarity = (vec * self.normed_base /
(np.power(vec, 2).sum())).sum(1)
order = similarity.argsort()[::-1]
return order, similarity[order]
return self(vec)
def __call__(self, vec: np.ndarray) -> np.ndarray:
"""
Return the order index of the closest vector to the furthest
vec: an 1 dimentional vector, marks the closest index
to the further ones
"""
return (vec * self.normed_base).sum(1).argsort()[::-1]
# Cell
class InterpEmbeddings:
"""
interp = InterpEmbeddings(embedding_matrix, vocab_dict)
interp.search("computer")
# visualize the embedding with tensorboard
interp.visualize_in_tb()
"""
def __init__(
self,
embedding_matrix: np.ndarray,
vocab: Dict[int, str]
):
"""
embedding_matrix: np.ndarray, embedding matrix of shape:
(num_items, hidden_size)
"""
self.base = embedding_matrix
self.cosine = CosineSearch(embedding_matrix)
self.vocab = vocab
self.c2i = dict((v, k) for k, v in vocab.items())
def __repr__(self) -> str:
cls = self.__class__.__name__
return f"{cls} with\n\t{self.cosine}"
def search(
self,
category: str,
top_k: int = 20,
) -> pd.DataFrame:
"""
search for similar words with embedding and vocabulary dictionary
"""
token_id = self.c2i.get(category)
if token_id is None:
match_list = []
for token_id, token in self.vocab.items():
if category.lower() in str(token).lower():
match_list.append({"token": token, "token_id": token_id})
if len(match_list)==0:
raise KeyError(
f"[UnpackAI] category: {category} not in vocabulary")
else:
match_df = pd.DataFrame(match_list)
DOM("Search with the following categories","h3")()
display(match_df)
token_ids = list(match_df.token_id)
else:
DOM(f"Search with token id {token_id}","h3")()
token_ids = [token_id,]
# combine multiple tokens into 1
vec = self.base[token_ids].mean(0)
# distance search
closest, similarity = self.cosine.search(vec, return_similarity=True)
closest = closest[:top_k]
similarity = similarity[:top_k]
tokens = list(self.vocab.get(idx) for idx in closest)
return pd.DataFrame({
"tokens": tokens,
"idx": closest,
"similarity": similarity})
def visualize_in_tb(
self,
log_dir:str="./logs",
selection: np.ndarray=None,
first_k:int=500,
) -> None:
"""
Visualize the embedding in tensorboard
For now this function is only supported on colab
"""
# since this won't be excute too many times within a notebook
# in large chances... so to avoid missing library when import
# other function under this module: we import related stuff here
from torch.utils.tensorboard import SummaryWriter
# this version's pd has vc for quick value counts
from forgebox.imports import pd
import tensorflow as tf
import tensorboard as tb
import os
# possible tensorflow version error
tf.io.gfile = tb.compat.tensorflow_stub.io.gfile
os.system(f"rm -rf {log_dir}")
writer = SummaryWriter(log_dir=log_dir,)
self.i2c = dict((v,k) for k,v in self.c2i.items())
tokens = list(self.i2c.get(i) for i in range(len(self.i2c)))
if selection is None:
vecs = self.base[:first_k]
tokens = tokens[:first_k]
else:
selection = | np.array(selection) | numpy.array |
import math
import warnings
from copy import copy, deepcopy
from datetime import datetime
from typing import Mapping, MutableMapping, MutableSequence, Optional
import numpy as np # type: ignore
import pytest # type: ignore
from rads.rpn import (
ABS,
ACOS,
ACOSD,
ACOSH,
ADD,
AND,
ASIN,
ASIND,
ASINH,
ATAN,
ATAN2,
ATAND,
ATANH,
AVG,
BOXCAR,
BTEST,
CEIL,
CEILING,
COS,
COSD,
COSH,
D2R,
DIF,
DIV,
DUP,
DXDY,
EQ,
EXCH,
EXP,
FLOOR,
FMOD,
GAUSS,
GE,
GT,
HYPOT,
IAND,
INRANGE,
INV,
IOR,
ISAN,
ISNAN,
LE,
LOG,
LOG10,
LT,
MAX,
MIN,
MUL,
NAN,
NE,
NEG,
NINT,
OR,
PI,
POP,
POW,
R2,
R2D,
RINT,
SIN,
SIND,
SINH,
SQR,
SQRT,
SUB,
SUM,
TAN,
TAND,
TANH,
YMDHMS,
CompleteExpression,
E,
Expression,
Literal,
StackUnderflowError,
Token,
Variable,
token,
)
from rads.typing import FloatOrArray
GOLDEN_RATIO = math.log((1 + math.sqrt(5)) / 2)
class TestLiteral:
def test_init(self):
Literal(3)
Literal(3.14)
with pytest.raises(TypeError):
Literal("not a number") # type: ignore
def test_pops(self):
assert Literal(3).pops == 0
def test_puts(self):
assert Literal(3).puts == 1
def test_value(self):
assert Literal(3).value == 3
assert Literal(3.14).value == 3.14
def test_call(self):
stack: MutableSequence[FloatOrArray] = []
environment: MutableMapping[str, FloatOrArray] = {}
assert Literal(3.14)(stack, environment) is None
assert Literal(2.71)(stack, environment) is None
assert stack == [3.14, 2.71]
assert environment == {}
def test_eq(self):
assert Literal(3.14) == Literal(3.14)
assert not Literal(3.14) == Literal(2.71)
assert not Literal(3.14) == 3.14
def test_ne(self):
assert Literal(3.14) != Literal(2.71)
assert not Literal(3.14) != Literal(3.14)
assert Literal(3.14) != 3.14
def test_lt(self):
assert Literal(2.71) < Literal(3.14)
assert not Literal(3.14) < Literal(2.71)
with pytest.raises(TypeError):
Literal(2.71) < 3.14
with pytest.raises(TypeError):
2.71 < Literal(3.14)
def test_le(self):
assert Literal(2.71) <= Literal(3.14)
assert Literal(3.14) <= Literal(3.14)
assert not Literal(3.14) <= Literal(2.71)
with pytest.raises(TypeError):
Literal(2.71) <= 3.14
with pytest.raises(TypeError):
2.71 <= Literal(3.14)
def test_gt(self):
assert Literal(3.14) > Literal(2.71)
assert not Literal(2.71) > Literal(3.14)
with pytest.raises(TypeError):
Literal(3.14) > 2.71
with pytest.raises(TypeError):
3.14 > Literal(2.71)
def test_ge(self):
assert Literal(3.14) >= Literal(2.71)
assert Literal(3.14) >= Literal(3.14)
assert not Literal(2.71) >= Literal(3.14)
with pytest.raises(TypeError):
Literal(3.14) >= 2.71
with pytest.raises(TypeError):
3.14 >= Literal(2.71)
def test_repr(self):
assert repr(Literal(3)) == "Literal(3)"
assert repr(Literal(3.14)) == "Literal(3.14)"
def test_str(self):
assert str(Literal(3)) == "3"
assert str(Literal(3.14)) == "3.14"
def test_pi(self):
assert PI.value == pytest.approx(np.pi)
def test_e(self):
assert E.value == pytest.approx(np.e)
class TestVariable:
def test_init(self):
Variable("alt")
with pytest.raises(ValueError):
Variable("3")
with pytest.raises(ValueError):
Variable("3name")
with pytest.raises(TypeError):
Variable(3) # type: ignore
with pytest.raises(TypeError):
Variable(3.14) # type: ignore
def test_pops(self):
assert Variable("alt").pops == 0
def test_puts(self):
assert Variable("alt").puts == 1
def test_name(self):
assert Variable("alt").name == "alt"
def test_call(self):
stack: MutableSequence[FloatOrArray] = []
environment = {"alt": np.array([1, 2, 3]), "dry_tropo": 4, "wet_tropo": 5}
assert Variable("wet_tropo")(stack, environment) is None
assert Variable("alt")(stack, environment) is None
assert len(stack) == 2
assert stack[0] == 5
assert np.all(stack[1] == np.array([1, 2, 3]))
assert len(environment) == 3
assert "alt" in environment
assert "dry_tropo" in environment
assert "wet_tropo" in environment
assert np.all(environment["alt"] == np.array([1, 2, 3]))
assert environment["dry_tropo"] == 4
assert environment["wet_tropo"] == 5
with pytest.raises(KeyError):
assert Variable("alt")(stack, {}) is None
assert len(stack) == 2
assert stack[0] == 5
assert np.all(stack[1] == np.array([1, 2, 3]))
def test_eq(self):
assert Variable("alt") == Variable("alt")
assert not Variable("alt") == Variable("dry_tropo")
assert not Variable("alt") == "alt"
def test_ne(self):
assert Variable("alt") != Variable("dry_tropo")
assert not Variable("alt") != Variable("alt")
assert Variable("alt") != "alt"
def test_repr(self):
assert repr(Variable("alt")) == "Variable('alt')"
def test_str(self):
assert str(Variable("alt")) == "alt"
def contains_array(stack: MutableSequence[FloatOrArray]) -> bool:
for item in stack:
if isinstance(item, np.ndarray):
return True
return False
def contains_nan(stack: MutableSequence[FloatOrArray]) -> bool:
for item in stack:
try:
if math.isnan(item):
return True
except TypeError:
pass
return False
def assert_token(
operator: Token,
pre_stack: MutableSequence[FloatOrArray],
post_stack: MutableSequence[FloatOrArray],
environment: Optional[Mapping[str, FloatOrArray]] = None,
*,
approx: bool = False,
rtol: float = 1e-15,
atol: float = 1e-16,
) -> None:
"""Assert that a token modifies the stack properly.
Parameters
----------
operator
Operator to test.
pre_stack
Stack state before calling the operator.
post_stack
Desired stack state after calling the operator.
environment
Optional dictionary like object providing the environment for
variable lookup.
approx
Set to true to use approximate equality instead of exact.
rtol
Relative tolerance. Only used if :paramref:`approx` is True.
atol
Absolute tolerance. Only used if :paramref:`approx` is True.
Raises
------
AssertionError
If the operator does not produce the proper post stack state or the
environment parameter is changed.
"""
if not environment:
environment = {"dont_touch": 5}
original_environment = deepcopy(environment)
stack = pre_stack
operator(stack, environment)
# environment should be unchanged
assert environment == original_environment
# check stack
if approx or contains_nan(post_stack) or contains_array(post_stack):
assert len(stack) == len(post_stack)
for a, b in zip(stack, post_stack):
if isinstance(a, np.ndarray) or isinstance(b, np.ndarray):
if approx:
np.testing.assert_allclose(
a, b, rtol=rtol, atol=atol, equal_nan=True
)
else:
np.testing.assert_equal(a, b)
else:
if math.isnan(b):
assert math.isnan(a)
elif approx:
assert a == pytest.approx(b, rel=rtol, abs=atol)
else:
assert a == b
else:
assert stack == post_stack
class TestSUBOperator:
def test_repr(self):
assert repr(SUB) == "SUB"
def test_pops(self):
assert SUB.pops == 2
def test_puts(self):
assert SUB.puts == 1
def test_no_copy(self):
assert copy(SUB) is SUB
assert deepcopy(SUB) is SUB
def test_call(self):
assert_token(SUB, [2, 4], [-2])
assert_token(SUB, [2, np.array([4, 1])], [np.array([-2, 1])])
assert_token(SUB, [np.array([4, 1]), 2], [np.array([2, -1])])
assert_token(SUB, [np.array([4, 1]), np.array([1, 4])], [np.array([3, -3])])
# extra stack elements
assert_token(SUB, [0, 2, 4], [0, -2])
# not enough stack elements
with pytest.raises(StackUnderflowError):
SUB([], {})
with pytest.raises(StackUnderflowError):
SUB([1], {})
class TestADDOperator:
def test_repr(self):
assert repr(ADD) == "ADD"
def test_pops(self):
assert ADD.pops == 2
def test_puts(self):
assert ADD.puts == 1
def test_no_copy(self):
assert copy(ADD) is ADD
assert deepcopy(ADD) is ADD
def test_call(self):
assert_token(ADD, [2, 4], [6])
assert_token(ADD, [2, np.array([4, 1])], [np.array([6, 3])])
assert_token(ADD, [np.array([4, 1]), 2], [np.array([6, 3])])
assert_token(ADD, [np.array([4, 1]), np.array([1, 4])], [np.array([5, 5])])
# extra stack elements
assert_token(ADD, [0, 2, 4], [0, 6])
# not enough stack elements
with pytest.raises(StackUnderflowError):
ADD([], {})
with pytest.raises(StackUnderflowError):
ADD([1], {})
class TestMULOperator:
def test_repr(self):
assert repr(MUL) == "MUL"
def test_pops(self):
assert MUL.pops == 2
def test_puts(self):
assert MUL.puts == 1
def test_no_copy(self):
assert copy(MUL) is MUL
assert deepcopy(MUL) is MUL
def test_call(self):
assert_token(MUL, [2, 4], [8])
assert_token(MUL, [2, np.array([4, 1])], [np.array([8, 2])])
assert_token(MUL, [np.array([4, 1]), 2], [np.array([8, 2])])
assert_token(MUL, [np.array([4, 1]), np.array([1, 4])], [np.array([4, 4])])
# extra stack elements
assert_token(MUL, [0, 2, 4], [0, 8])
# not enough stack elements
with pytest.raises(StackUnderflowError):
MUL([], {})
with pytest.raises(StackUnderflowError):
MUL([1], {})
class TestPOPOperator:
def test_repr(self):
assert repr(POP) == "POP"
def test_pops(self):
assert POP.pops == 1
def test_puts(self):
assert POP.puts == 0
def test_no_copy(self):
assert copy(POP) is POP
assert deepcopy(POP) is POP
def test_call(self):
assert_token(POP, [1], [])
assert_token(POP, [1, 2], [1])
# not enough stack elements
with pytest.raises(StackUnderflowError):
POP([], {})
class TestNEGOperator:
def test_repr(self):
assert repr(NEG) == "NEG"
def test_pops(self):
assert NEG.pops == 1
def test_puts(self):
assert NEG.puts == 1
def test_no_copy(self):
assert copy(NEG) is NEG
assert deepcopy(NEG) is NEG
def test_call(self):
assert_token(NEG, [2], [-2])
assert_token(NEG, [-2], [2])
assert_token(NEG, [np.array([4, -1])], [np.array([-4, 1])])
assert_token(NEG, [np.array([-4, 1])], [np.array([4, -1])])
# extra stack elements
assert_token(NEG, [0, 2], [0, -2])
# not enough stack elements
with pytest.raises(StackUnderflowError):
NEG([], {})
class TestABSOperator:
def test_repr(self):
assert repr(ABS) == "ABS"
def test_pops(self):
assert ABS.pops == 1
def test_puts(self):
assert ABS.puts == 1
def test_no_copy(self):
assert copy(ABS) is ABS
assert deepcopy(ABS) is ABS
def test_call(self):
assert_token(ABS, [2], [2])
assert_token(ABS, [-2], [2])
assert_token(ABS, [np.array([4, -1])], [np.array([4, 1])])
assert_token(ABS, [np.array([-4, 1])], [np.array([4, 1])])
# extra stack elements
assert_token(ABS, [0, -2], [0, 2])
# not enough stack elements
with pytest.raises(StackUnderflowError):
ABS([], {})
class TestINVOperator:
def test_repr(self):
assert repr(INV) == "INV"
def test_pops(self):
assert INV.pops == 1
def test_puts(self):
assert INV.puts == 1
def test_no_copy(self):
assert copy(INV) is INV
assert deepcopy(INV) is INV
def test_call(self):
assert_token(INV, [2], [0.5])
assert_token(INV, [-2], [-0.5])
assert_token(INV, [np.array([4, -1])], [np.array([0.25, -1])])
assert_token(INV, [np.array([-4, 1])], [np.array([-0.25, 1])])
# extra stack elements
assert_token(INV, [0, 2], [0, 0.5])
# not enough stack elements
with pytest.raises(StackUnderflowError):
INV([], {})
class TestSQRTOperator:
def test_repr(self):
assert repr(SQRT) == "SQRT"
def test_pops(self):
assert SQRT.pops == 1
def test_puts(self):
assert SQRT.puts == 1
def test_no_copy(self):
assert copy(SQRT) is SQRT
assert deepcopy(SQRT) is SQRT
def test_call(self):
assert_token(SQRT, [4], [2])
assert_token(SQRT, [np.array([4, 16])], [np.array([2, 4])])
# extra stack elements
assert_token(SQRT, [0, 4], [0, 2])
# not enough stack elements
with pytest.raises(StackUnderflowError):
SQRT([], {})
class TestSQROperator:
def test_repr(self):
assert repr(SQR) == "SQR"
def test_pops(self):
assert SQR.pops == 1
def test_puts(self):
assert SQR.puts == 1
def test_no_copy(self):
assert copy(EXP) is EXP
assert deepcopy(EXP) is EXP
def test_call(self):
assert_token(SQR, [2], [4])
assert_token(SQR, [-2], [4])
assert_token(SQR, [np.array([4, -1])], [np.array([16, 1])])
assert_token(SQR, [np.array([-4, 1])], [np.array([16, 1])])
# extra stack elements
assert_token(SQR, [0, -2], [0, 4])
# not enough stack elements
with pytest.raises(StackUnderflowError):
SQR([], {})
class TestEXPOperator:
def test_repr(self):
assert repr(EXP) == "EXP"
def test_pops(self):
assert EXP.pops == 1
def test_puts(self):
assert EXP.puts == 1
def test_no_copy(self):
assert copy(EXP) is EXP
assert deepcopy(EXP) is EXP
def test_call(self):
assert_token(EXP, [math.log(1)], [1.0], approx=True)
assert_token(EXP, [math.log(2)], [2.0], approx=True)
assert_token(
EXP, [np.array([np.log(4), np.log(1)])], [np.array([4.0, 1.0])], approx=True
)
# extra stack elements
assert_token(EXP, [0, np.log(1)], [0, 1.0], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
EXP([], {})
class TestLOGOperator:
def test_repr(self):
assert repr(LOG) == "LOG"
def test_pops(self):
assert LOG.pops == 1
def test_puts(self):
assert LOG.puts == 1
def test_no_copy(self):
assert copy(LOG) is LOG
assert deepcopy(LOG) is LOG
def test_call(self):
assert_token(LOG, [math.e], [1.0], approx=True)
assert_token(LOG, [math.e ** 2], [2.0], approx=True)
assert_token(LOG, [math.e ** -2], [-2.0], approx=True)
assert_token(
LOG,
[np.array([np.e ** 4, np.e ** -1])],
[np.array([4.0, -1.0])],
approx=True,
)
assert_token(
LOG,
[np.array([np.e ** -4, np.e ** 1])],
[np.array([-4.0, 1.0])],
approx=True,
)
# extra stack elements
assert_token(LOG, [0, np.e], [0, 1.0], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
LOG([], {})
class TestLOG10Operator:
def test_repr(self):
assert repr(LOG10) == "LOG10"
def test_pops(self):
assert LOG10.pops == 1
def test_puts(self):
assert LOG10.puts == 1
def test_no_copy(self):
assert copy(LOG10) is LOG10
assert deepcopy(LOG10) is LOG10
def test_call(self):
assert_token(LOG10, [10], [1.0], approx=True)
assert_token(LOG10, [10 ** 2], [2.0], approx=True)
assert_token(LOG10, [10 ** -2], [-2.0], approx=True)
assert_token(
LOG10, [np.array([10 ** 4, 10 ** -1])], [np.array([4.0, -1.0])], approx=True
)
assert_token(
LOG10, [np.array([10 ** -4, 10 ** 1])], [np.array([-4.0, 1.0])], approx=True
)
# extra stack elements
assert_token(LOG10, [0, 10], [0, 1.0], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
LOG10([], {})
class TestSINOperator:
def test_repr(self):
assert repr(SIN) == "SIN"
def test_pops(self):
assert SIN.pops == 1
def test_puts(self):
assert SIN.puts == 1
def test_no_copy(self):
assert copy(SIN) is SIN
assert deepcopy(SIN) is SIN
def test_call(self):
assert_token(SIN, [0.0], [0.0], approx=True)
assert_token(SIN, [math.pi / 6], [1 / 2], approx=True)
assert_token(SIN, [math.pi / 4], [1 / math.sqrt(2)], approx=True)
assert_token(SIN, [math.pi / 3], [math.sqrt(3) / 2], approx=True)
assert_token(SIN, [math.pi / 2], [1.0], approx=True)
assert_token(
SIN,
[np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3, np.pi / 2])],
[np.array([0.0, 1 / 2, 1 / np.sqrt(2), np.sqrt(3) / 2, 1.0])],
approx=True,
)
assert_token(
SIN,
[-np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3, np.pi / 2])],
[-np.array([0.0, 1 / 2, 1 / np.sqrt(2), np.sqrt(3) / 2, 1.0])],
approx=True,
)
# extra stack elements
assert_token(SIN, [0, math.pi / 2], [0, 1.0], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
SIN([], {})
class TestCOSOperator:
def test_repr(self):
assert repr(COS) == "COS"
def test_pops(self):
assert COS.pops == 1
def test_puts(self):
assert COS.puts == 1
def test_no_copy(self):
assert copy(COS) is COS
assert deepcopy(COS) is COS
def test_call(self):
assert_token(COS, [0.0], [1.0], approx=True)
assert_token(COS, [math.pi / 6], [math.sqrt(3) / 2], approx=True)
assert_token(COS, [math.pi / 4], [1 / math.sqrt(2)], approx=True)
assert_token(COS, [math.pi / 3], [1 / 2], approx=True)
assert_token(COS, [math.pi / 2], [0.0], approx=True)
assert_token(
COS,
[np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3, np.pi / 2])],
[np.array([1.0, np.sqrt(3) / 2, 1 / np.sqrt(2), 1 / 2, 0.0])],
approx=True,
)
assert_token(
COS,
[-np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3, np.pi / 2])],
[np.array([1.0, np.sqrt(3) / 2, 1 / np.sqrt(2), 1 / 2, 0.0])],
approx=True,
)
# extra stack elements
assert_token(COS, [0, math.pi / 2], [0, 0.0], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
COS([], {})
class TestTANOperator:
def test_repr(self):
assert repr(TAN) == "TAN"
def test_pops(self):
assert TAN.pops == 1
def test_puts(self):
assert TAN.puts == 1
def test_no_copy(self):
assert copy(TAN) is TAN
assert deepcopy(TAN) is TAN
def test_call(self):
assert_token(TAN, [0.0], [0.0], approx=True)
assert_token(TAN, [math.pi / 6], [1 / math.sqrt(3)], approx=True)
assert_token(TAN, [math.pi / 4], [1.0], approx=True)
assert_token(TAN, [math.pi / 3], [math.sqrt(3)], approx=True)
assert_token(
TAN,
[np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3])],
[np.array([0.0, 1 / np.sqrt(3), 1.0, np.sqrt(3)])],
approx=True,
)
assert_token(
TAN,
[-np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3])],
[-np.array([0.0, 1 / np.sqrt(3), 1.0, np.sqrt(3)])],
approx=True,
)
# extra stack elements
assert_token(TAN, [0, math.pi / 4], [0, 1.0], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
TAN([], {})
class TestSINDOperator:
def test_repr(self):
assert repr(SIND) == "SIND"
def test_pops(self):
assert SIND.pops == 1
def test_puts(self):
assert SIND.puts == 1
def test_no_copy(self):
assert copy(COSD) is COSD
assert deepcopy(COSD) is COSD
def test_call(self):
assert_token(SIND, [0], [0.0], approx=True)
assert_token(SIND, [30], [1 / 2], approx=True)
assert_token(SIND, [45], [1 / math.sqrt(2)], approx=True)
assert_token(SIND, [60], [math.sqrt(3) / 2], approx=True)
assert_token(SIND, [90], [1.0], approx=True)
assert_token(
SIND,
[np.array([0, 30, 45, 60, 90])],
[np.array([0.0, 1 / 2, 1 / np.sqrt(2), np.sqrt(3) / 2, 1.0])],
approx=True,
)
assert_token(
SIND,
[-np.array([0, 30, 45, 60, 90])],
[-np.array([0.0, 1 / 2, 1 / np.sqrt(2), np.sqrt(3) / 2, 1.0])],
approx=True,
)
# extra stack elements
assert_token(SIND, [0, 90], [0, 1.0], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
SIND([], {})
class TestCOSDOperator:
def test_repr(self):
assert repr(COSD) == "COSD"
def test_pops(self):
assert COSD.pops == 1
def test_puts(self):
assert COSD.puts == 1
def test_no_copy(self):
assert copy(COSD) is COSD
assert deepcopy(COSD) is COSD
def test_call(self):
assert_token(COSD, [0], [1.0], approx=True)
assert_token(COSD, [30], [math.sqrt(3) / 2], approx=True)
assert_token(COSD, [45], [1 / math.sqrt(2)], approx=True)
assert_token(COSD, [60], [1 / 2], approx=True)
assert_token(COSD, [90], [0.0], approx=True)
assert_token(
COSD,
[np.array([0, 30, 45, 60, 90])],
[np.array([1.0, np.sqrt(3) / 2, 1 / np.sqrt(2), 1 / 2, 0.0])],
approx=True,
)
assert_token(
COSD,
[-np.array([0, 30, 45, 60, 90])],
[np.array([1.0, np.sqrt(3) / 2, 1 / np.sqrt(2), 1 / 2, 0.0])],
approx=True,
)
# extra stack elements
assert_token(COSD, [0, 90], [0, 0.0], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
COSD([], {})
class TestTANDOperator:
def test_repr(self):
assert repr(TAND) == "TAND"
def test_pops(self):
assert TAND.pops == 1
def test_puts(self):
assert TAND.puts == 1
def test_no_copy(self):
assert copy(TAND) is TAND
assert deepcopy(TAND) is TAND
def test_call(self):
assert_token(TAND, [0], [0], approx=True)
assert_token(TAND, [30], [1 / math.sqrt(3)], approx=True)
assert_token(TAND, [45], [1.0], approx=True)
assert_token(TAND, [60], [math.sqrt(3)], approx=True)
assert_token(
TAND,
[np.array([0, 30, 45, 60])],
[np.array([0.0, 1 / np.sqrt(3), 1.0, np.sqrt(3)])],
approx=True,
)
assert_token(
TAND,
[-np.array([0, 30, 45, 60])],
[-np.array([0.0, 1 / np.sqrt(3), 1.0, np.sqrt(3)])],
approx=True,
)
# extra stack elements
assert_token(TAND, [0, 45], [0, 1.0], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
TAND([], {})
class TestSINHOperator:
def test_repr(self):
assert repr(SINH) == "SINH"
def test_pops(self):
assert SINH.pops == 1
def test_puts(self):
assert SINH.puts == 1
def test_no_copy(self):
assert copy(SINH) is SINH
assert deepcopy(SINH) is SINH
def test_call(self):
assert_token(SINH, [0.0], [0.0], approx=True)
assert_token(SINH, [GOLDEN_RATIO], [0.5], approx=True)
assert_token(
SINH, [np.array([0.0, GOLDEN_RATIO])], [np.array([0.0, 0.5])], approx=True
)
# extra stack elements
assert_token(SINH, [0, GOLDEN_RATIO], [0, 0.5], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
SINH([], {})
class TestCOSHOperator:
def test_repr(self):
assert repr(COSH) == "COSH"
def test_pops(self):
assert COSH.pops == 1
def test_puts(self):
assert COSH.puts == 1
def test_no_copy(self):
assert copy(COSH) is COSH
assert deepcopy(COSH) is COSH
def test_call(self):
assert_token(COSH, [0.0], [1.0], approx=True)
assert_token(COSH, [GOLDEN_RATIO], [math.sqrt(5) / 2], approx=True)
assert_token(
COSH,
[np.array([0.0, GOLDEN_RATIO])],
[np.array([1.0, np.sqrt(5) / 2])],
approx=True,
)
# extra stack elements
assert_token(COSH, [0, GOLDEN_RATIO], [0, math.sqrt(5) / 2], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
COSH([], {})
class TestTANHOperator:
def test_repr(self):
assert repr(TANH) == "TANH"
def test_pops(self):
assert TANH.pops == 1
def test_puts(self):
assert TANH.puts == 1
def test_no_copy(self):
assert copy(TANH) is TANH
assert deepcopy(TANH) is TANH
def test_call(self):
assert_token(TANH, [0.0], [0.0], approx=True)
assert_token(TANH, [GOLDEN_RATIO], [math.sqrt(5) / 5], approx=True)
assert_token(
TANH,
[np.array([0.0, GOLDEN_RATIO])],
[np.array([0.0, np.sqrt(5) / 5])],
approx=True,
)
# extra stack elements
assert_token(TANH, [0, GOLDEN_RATIO], [0, math.sqrt(5) / 5], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
TANH([], {})
class TestASINOperator:
def test_repr(self):
assert repr(ASIN) == "ASIN"
def test_pops(self):
assert ASIN.pops == 1
def test_puts(self):
assert ASIN.puts == 1
def test_no_copy(self):
assert copy(ASIN) is ASIN
assert deepcopy(ASIN) is ASIN
def test_call(self):
assert_token(ASIN, [0.0], [0.0], approx=True)
assert_token(ASIN, [1 / 2], [math.pi / 6], approx=True)
assert_token(ASIN, [1 / math.sqrt(2)], [math.pi / 4], approx=True)
assert_token(ASIN, [math.sqrt(3) / 2], [math.pi / 3], approx=True)
assert_token(ASIN, [1.0], [math.pi / 2], approx=True)
assert_token(
ASIN,
[np.array([0.0, 1 / 2, 1 / np.sqrt(2), np.sqrt(3) / 2, 1.0])],
[np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3, np.pi / 2])],
approx=True,
)
assert_token(
ASIN,
[-np.array([0.0, 1 / 2, 1 / np.sqrt(2), np.sqrt(3) / 2, 1.0])],
[-np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3, np.pi / 2])],
approx=True,
)
# extra stack elements
assert_token(ASIN, [0, 1.0], [0, math.pi / 2], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
ASIN([], {})
class TestACOSOperator:
def test_repr(self):
assert repr(ACOS) == "ACOS"
def test_pops(self):
assert ACOS.pops == 1
def test_puts(self):
assert ACOS.puts == 1
def test_no_copy(self):
assert copy(ACOS) is ACOS
assert deepcopy(ACOS) is ACOS
def test_call(self):
assert_token(ACOS, [1.0], [0.0], approx=True)
assert_token(ACOS, [math.sqrt(3) / 2], [math.pi / 6], approx=True)
assert_token(ACOS, [1 / math.sqrt(2)], [math.pi / 4], approx=True)
assert_token(ACOS, [1 / 2], [math.pi / 3], approx=True)
assert_token(ACOS, [0.0], [math.pi / 2], approx=True)
assert_token(
ACOS,
[np.array([1.0, np.sqrt(3) / 2, 1 / np.sqrt(2), 1 / 2, 0.0])],
[np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3, np.pi / 2])],
approx=True,
)
# extra stack elements
assert_token(ACOS, [0, 0.0], [0, math.pi / 2], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
ACOS([], {})
class TestATANOperator:
def test_repr(self):
assert repr(ATAN) == "ATAN"
def test_pops(self):
assert ATAN.pops == 1
def test_puts(self):
assert ATAN.puts == 1
def test_no_copy(self):
assert copy(ATAN) is ATAN
assert deepcopy(ATAN) is ATAN
def test_call(self):
assert_token(ATAN, [0.0], [0.0], approx=True)
assert_token(ATAN, [1 / math.sqrt(3)], [math.pi / 6], approx=True)
assert_token(ATAN, [1.0], [math.pi / 4], approx=True)
assert_token(ATAN, [math.sqrt(3)], [math.pi / 3], approx=True)
assert_token(
ATAN,
[np.array([0.0, 1 / np.sqrt(3), 1.0, np.sqrt(3)])],
[np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3])],
approx=True,
)
assert_token(
ATAN,
[-np.array([0.0, 1 / np.sqrt(3), 1.0, np.sqrt(3)])],
[-np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3])],
approx=True,
)
# extra stack elements
assert_token(ATAN, [0, 1.0], [0, math.pi / 4], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
ATAN([], {})
class TestASINDOperator:
def test_repr(self):
assert repr(ASIND) == "ASIND"
def test_pops(self):
assert ASIND.pops == 1
def test_puts(self):
assert ASIND.puts == 1
def test_no_copy(self):
assert copy(ASIND) is ASIND
assert deepcopy(ASIND) is ASIND
def test_call(self):
assert_token(ASIND, [0.0], [0], approx=True)
assert_token(ASIND, [1 / 2], [30], approx=True)
assert_token(ASIND, [1 / math.sqrt(2)], [45], approx=True)
assert_token(ASIND, [math.sqrt(3) / 2], [60], approx=True)
assert_token(ASIND, [1.0], [90], approx=True)
assert_token(
ASIND,
[np.array([0.0, 1 / 2, 1 / np.sqrt(2), np.sqrt(3) / 2, 1.0])],
[np.array([0, 30, 45, 60, 90])],
approx=True,
)
assert_token(
ASIND,
[-np.array([0.0, 1 / 2, 1 / np.sqrt(2), np.sqrt(3) / 2, 1.0])],
[-np.array([0, 30, 45, 60, 90])],
approx=True,
)
# extra stack elements
assert_token(ASIND, [0, 1.0], [0, 90], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
ASIND([], {})
class TestACOSDOperator:
def test_repr(self):
assert repr(ACOSD) == "ACOSD"
def test_pops(self):
assert ACOSD.pops == 1
def test_puts(self):
assert ACOSD.puts == 1
def test_no_copy(self):
assert copy(ACOSD) is ACOSD
assert deepcopy(ACOSD) is ACOSD
def test_call(self):
assert_token(ACOSD, [1.0], [0], approx=True)
assert_token(ACOSD, [math.sqrt(3) / 2], [30], approx=True)
assert_token(ACOSD, [1 / math.sqrt(2)], [45], approx=True)
assert_token(ACOSD, [1 / 2], [60], approx=True)
assert_token(ACOSD, [0.0], [90], approx=True)
assert_token(
ACOSD,
[np.array([1.0, np.sqrt(3) / 2, 1 / np.sqrt(2), 1 / 2, 0.0])],
[np.array([0, 30, 45, 60, 90])],
approx=True,
)
# extra stack elements
assert_token(ACOSD, [0, 0.0], [0, 90], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
ACOSD([], {})
class TestATANDOperator:
def test_repr(self):
assert repr(ATAND) == "ATAND"
def test_pops(self):
assert ATAND.pops == 1
def test_puts(self):
assert ATAND.puts == 1
def test_no_copy(self):
assert copy(ATAND) is ATAND
assert deepcopy(ATAND) is ATAND
def test_call(self):
assert_token(ATAND, [0.0], [0], approx=True)
assert_token(ATAND, [1 / math.sqrt(3)], [30], approx=True)
assert_token(ATAND, [1.0], [45], approx=True)
assert_token(ATAND, [math.sqrt(3)], [60], approx=True)
assert_token(
ATAND,
[np.array([0.0, 1 / np.sqrt(3), 1.0, np.sqrt(3)])],
[np.array([0, 30, 45, 60])],
approx=True,
)
assert_token(
ATAND,
[-np.array([0.0, 1 / np.sqrt(3), 1.0, np.sqrt(3)])],
[-np.array([0, 30, 45, 60])],
approx=True,
)
# extra stack elements
assert_token(ATAND, [0, 1.0], [0, 45], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
ATAND([], {})
class TestASINHOperator:
def test_repr(self):
assert repr(ASINH) == "ASINH"
def test_pops(self):
assert ASINH.pops == 1
def test_puts(self):
assert ASINH.puts == 1
def test_no_copy(self):
assert copy(ASINH) is ASINH
assert deepcopy(ASINH) is ASINH
def test_call(self):
assert_token(ASINH, [0.0], [0.0], approx=True)
assert_token(ASINH, [0.5], [GOLDEN_RATIO], approx=True)
assert_token(
ASINH, [np.array([0.0, 0.5])], [np.array([0.0, GOLDEN_RATIO])], approx=True
)
# extra stack elements
assert_token(ASINH, [0, 0.5], [0, GOLDEN_RATIO], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
ASINH([], {})
class TestACOSHOperator:
def test_repr(self):
assert repr(ACOSH) == "ACOSH"
def test_pops(self):
assert ACOSH.pops == 1
def test_puts(self):
assert ACOSH.puts == 1
def test_no_copy(self):
assert copy(ACOSH) is ACOSH
assert deepcopy(ACOSH) is ACOSH
def test_call(self):
assert_token(ACOSH, [1.0], [0.0], approx=True)
assert_token(ACOSH, [math.sqrt(5) / 2], [GOLDEN_RATIO], approx=True)
assert_token(
ACOSH,
[np.array([1.0, np.sqrt(5) / 2])],
[np.array([0.0, GOLDEN_RATIO])],
approx=True,
)
# extra stack elements
assert_token(ACOSH, [0, math.sqrt(5) / 2], [0, GOLDEN_RATIO], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
ACOSH([], {})
class TestATANHOperator:
def test_repr(self):
assert repr(ATANH) == "ATANH"
def test_pops(self):
assert ATANH.pops == 1
def test_puts(self):
assert ATANH.puts == 1
def test_no_copy(self):
assert copy(ATANH) is ATANH
assert deepcopy(ATANH) is ATANH
def test_call(self):
assert_token(ATANH, [0.0], [0.0], approx=True)
assert_token(ATANH, [math.sqrt(5) / 5], [GOLDEN_RATIO], approx=True)
assert_token(
ATANH,
[np.array([0.0, np.sqrt(5) / 5])],
[np.array([0.0, GOLDEN_RATIO])],
approx=True,
)
# extra stack elements
assert_token(ATANH, [0, math.sqrt(5) / 5], [0, GOLDEN_RATIO], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
ATANH([], {})
class TestISNANOperator:
def test_repr(self):
assert repr(ISNAN) == "ISNAN"
def test_pops(self):
assert ISNAN.pops == 1
def test_puts(self):
assert ISNAN.puts == 1
def test_no_copy(self):
assert copy(ISNAN) is ISNAN
assert deepcopy(ISNAN) is ISNAN
def test_call(self):
assert_token(ISNAN, [2], [False])
assert_token(ISNAN, [float("nan")], [True])
assert_token(ISNAN, [np.array([4, np.nan])], [np.array([False, True])])
assert_token(ISNAN, [np.array([np.nan, 1])], [np.array([True, False])])
# extra stack elements
assert_token(ISNAN, [0, float("nan")], [0, True])
# not enough stack elements
with pytest.raises(StackUnderflowError):
ISNAN([], {})
class TestISANOperator:
def test_repr(self):
assert repr(ISAN) == "ISAN"
def test_pops(self):
assert ISAN.pops == 1
def test_puts(self):
assert ISAN.puts == 1
def test_no_copy(self):
assert copy(ISAN) is ISAN
assert deepcopy(ISAN) is ISAN
def test_call(self):
assert_token(ISAN, [2], [True])
assert_token(ISAN, [float("nan")], [False])
assert_token(ISAN, [np.array([4, np.nan])], [np.array([True, False])])
assert_token(ISAN, [np.array([np.nan, 1])], [np.array([False, True])])
# extra stack elements
assert_token(ISAN, [0, 2], [0, True])
# not enough stack elements
with pytest.raises(StackUnderflowError):
ISAN([], {})
class TestRINTOperator:
def test_repr(self):
assert repr(RINT) == "RINT"
def test_pops(self):
assert RINT.pops == 1
def test_puts(self):
assert RINT.puts == 1
def test_no_copy(self):
assert copy(RINT) is RINT
assert deepcopy(RINT) is RINT
def test_call(self):
assert_token(RINT, [1.6], [2])
assert_token(RINT, [2.4], [2])
assert_token(RINT, [-1.6], [-2])
assert_token(RINT, [-2.4], [-2])
assert_token(RINT, [np.array([1.6, 2.4])], [np.array([2, 2])])
assert_token(RINT, [np.array([-1.6, -2.4])], [np.array([-2, -2])])
# extra stack elements
assert_token(RINT, [0, 1.6], [0, 2])
# not enough stack elements
with pytest.raises(StackUnderflowError):
RINT([], {})
class TestNINTOperator:
def test_repr(self):
assert repr(NINT) == "NINT"
def test_pops(self):
assert NINT.pops == 1
def test_puts(self):
assert NINT.puts == 1
def test_no_copy(self):
assert copy(NINT) is NINT
assert deepcopy(NINT) is NINT
def test_call(self):
assert_token(NINT, [1.6], [2])
assert_token(NINT, [2.4], [2])
assert_token(NINT, [-1.6], [-2])
assert_token(NINT, [-2.4], [-2])
assert_token(NINT, [np.array([1.6, 2.4])], [np.array([2, 2])])
assert_token(NINT, [np.array([-1.6, -2.4])], [np.array([-2, -2])])
# extra stack elements
assert_token(NINT, [0, 1.6], [0, 2])
# not enough stack elements
with pytest.raises(StackUnderflowError):
NINT([], {})
class TestCEILOperator:
def test_repr(self):
assert repr(CEIL) == "CEIL"
def test_pops(self):
assert CEIL.pops == 1
def test_puts(self):
assert CEIL.puts == 1
def test_no_copy(self):
assert copy(CEIL) is CEIL
assert deepcopy(CEIL) is CEIL
def test_call(self):
assert_token(CEIL, [1.6], [2])
assert_token(CEIL, [2.4], [3])
assert_token(CEIL, [-1.6], [-1])
assert_token(CEIL, [-2.4], [-2])
assert_token(CEIL, [np.array([1.6, 2.4])], [np.array([2, 3])])
assert_token(CEIL, [np.array([-1.6, -2.4])], [np.array([-1, -2])])
# extra stack elements
assert_token(CEIL, [0, 1.2], [0, 2])
# not enough stack elements
with pytest.raises(StackUnderflowError):
CEIL([], {})
class TestCEILINGOperator:
def test_repr(self):
assert repr(CEILING) == "CEILING"
def test_pops(self):
assert CEILING.pops == 1
def test_puts(self):
assert CEILING.puts == 1
def test_no_copy(self):
assert copy(CEILING) is CEILING
assert deepcopy(CEILING) is CEILING
def test_call(self):
assert_token(CEILING, [1.6], [2])
assert_token(CEILING, [2.4], [3])
assert_token(CEILING, [-1.6], [-1])
assert_token(CEILING, [-2.4], [-2])
assert_token(CEILING, [np.array([1.6, 2.4])], [np.array([2, 3])])
assert_token(CEILING, [np.array([-1.6, -2.4])], [np.array([-1, -2])])
# extra stack elements
assert_token(CEILING, [0, 1.2], [0, 2])
# not enough stack elements
with pytest.raises(StackUnderflowError):
CEILING([], {})
class TestFLOOROperator:
def test_repr(self):
assert repr(FLOOR) == "FLOOR"
def test_pops(self):
assert FLOOR.pops == 1
def test_puts(self):
assert FLOOR.puts == 1
def test_no_copy(self):
assert copy(FLOOR) is FLOOR
assert deepcopy(FLOOR) is FLOOR
def test_call(self):
assert_token(FLOOR, [1.6], [1])
assert_token(FLOOR, [2.4], [2])
assert_token(FLOOR, [-1.6], [-2])
assert_token(FLOOR, [-2.4], [-3])
assert_token(FLOOR, [np.array([1.6, 2.4])], [np.array([1, 2])])
assert_token(FLOOR, [np.array([-1.6, -2.4])], [np.array([-2, -3])])
# extra stack elements
assert_token(FLOOR, [0, 1.8], [0, 1])
# not enough stack elements
with pytest.raises(StackUnderflowError):
FLOOR([], {})
class TestD2ROperator:
def test_repr(self):
assert repr(D2R) == "D2R"
def test_pops(self):
assert D2R.pops == 1
def test_puts(self):
assert D2R.puts == 1
def test_no_copy(self):
assert copy(D2R) is D2R
assert deepcopy(D2R) is D2R
def test_call(self):
assert_token(D2R, [0], [0.0], approx=True)
assert_token(D2R, [30], [math.pi / 6], approx=True)
assert_token(D2R, [45], [math.pi / 4], approx=True)
assert_token(D2R, [60], [math.pi / 3], approx=True)
assert_token(D2R, [90], [math.pi / 2], approx=True)
assert_token(
D2R,
[np.array([0, 30, 45, 60, 90])],
[np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3, np.pi / 2])],
approx=True,
)
assert_token(
D2R,
[-np.array([0, 30, 45, 60, 90])],
[-np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3, np.pi / 2])],
approx=True,
)
# extra stack elements
assert_token(D2R, [0, 90], [0, math.pi / 2], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
D2R([], {})
class TestR2DOperator:
def test_repr(self):
assert repr(R2D) == "R2D"
def test_pops(self):
assert R2D.pops == 1
def test_puts(self):
assert R2D.puts == 1
def test_no_copy(self):
assert copy(R2D) is R2D
assert deepcopy(R2D) is R2D
def test_call(self):
assert_token(R2D, [0.0], [0], approx=True)
assert_token(R2D, [math.pi / 6], [30], approx=True)
assert_token(R2D, [math.pi / 4], [45], approx=True)
assert_token(R2D, [math.pi / 3], [60], approx=True)
assert_token(R2D, [math.pi / 2], [90], approx=True)
assert_token(
R2D,
[np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3, np.pi / 2])],
[np.array([0, 30, 45, 60, 90])],
approx=True,
)
assert_token(
R2D,
[-np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3, np.pi / 2])],
[-np.array([0, 30, 45, 60, 90])],
approx=True,
)
# extra stack elements
assert_token(R2D, [0, math.pi / 2], [0, 90], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
R2D([], {})
class TestYMDHMSOperator:
def test_repr(self):
assert repr(YMDHMS) == "YMDHMS"
def test_pops(self):
assert YMDHMS.pops == 1
def test_puts(self):
assert YMDHMS.puts == 1
def test_no_copy(self):
assert copy(YMDHMS) is YMDHMS
assert deepcopy(YMDHMS) is YMDHMS
def test_call(self):
epoch = datetime(1985, 1, 1, 0, 0, 0, 0)
date1 = datetime(2008, 7, 4, 12, 19, 19, 570865)
date2 = datetime(2019, 6, 26, 12, 31, 6, 930575)
seconds1 = (date1 - epoch).total_seconds()
seconds2 = (date2 - epoch).total_seconds()
assert_token(YMDHMS, [seconds1], [80704121919.570865], approx=True)
assert_token(YMDHMS, [seconds2], [190626123106.930575], approx=True)
assert_token(
YMDHMS,
[np.array([seconds1, seconds2])],
[np.array([80704121919.570865, 190626123106.930575])],
approx=True,
)
# extra stack elements
assert_token(YMDHMS, [0, seconds1], [0, 80704121919.570865], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
YMDHMS([], {})
class TestSUMOperator:
def test_repr(self):
assert repr(SUM) == "SUM"
def test_pops(self):
assert SUM.pops == 1
def test_puts(self):
assert SUM.puts == 1
def test_no_copy(self):
assert copy(SUM) is SUM
assert deepcopy(SUM) is SUM
def test_call(self):
assert_token(SUM, [2], [2])
assert_token(SUM, [-2], [-2])
assert_token(SUM, [float("nan")], [0])
assert_token(SUM, [np.array([4, -1])], [3])
assert_token(SUM, [np.array([-4, 1])], [-3])
assert_token(SUM, [np.array([1, np.nan, 3])], [4])
assert_token(SUM, [np.array([np.nan])], [0])
# extra stack elements
assert_token(SUM, [0, 2], [0, 2])
# not enough stack elements
with pytest.raises(StackUnderflowError):
SUM([], {})
class TestDIFFOperator:
def test_repr(self):
assert repr(DIF) == "DIF"
def test_pops(self):
assert DIF.pops == 1
def test_puts(self):
assert DIF.puts == 1
def test_no_copy(self):
assert copy(DIF) is DIF
assert deepcopy(DIF) is DIF
def test_call(self):
assert_token(DIF, [2], [np.array([np.nan])])
assert_token(DIF, [np.array([1, 2])], [np.array([np.nan, 1])])
assert_token(DIF, [np.array([1, 2, 5])], [np.array([np.nan, 1, 3])])
assert_token(
DIF, [np.array([1, np.nan, 5])], [np.array([np.nan, np.nan, np.nan])]
)
# extra stack elements
assert_token(DIF, [0, 2], [0, np.array([np.nan])])
with pytest.raises(StackUnderflowError):
DIF([], {})
class TestDUPOperator:
def test_repr(self):
assert repr(DUP) == "DUP"
def test_pops(self):
assert DUP.pops == 1
def test_puts(self):
assert DUP.puts == 2
def test_no_copy(self):
assert copy(DUP) is DUP
assert deepcopy(DUP) is DUP
def test_call(self):
assert_token(DUP, [2], [2, 2])
assert_token(DUP, [np.array([4, -1])], [np.array([4, -1]), np.array([4, -1])])
# extra stack elements
assert_token(DUP, [0, 2], [0, 2, 2])
with pytest.raises(StackUnderflowError):
DUP([], {})
class TestDIVOperator:
def test_repr(self):
assert repr(DIV) == "DIV"
def test_pops(self):
assert DIV.pops == 2
def test_puts(self):
assert DIV.puts == 1
def test_no_copy(self):
assert copy(DIV) is DIV
assert deepcopy(DIV) is DIV
def test_call(self):
assert_token(DIV, [10, 2], [5])
assert_token(DIV, [10, np.array([2, 5])], [np.array([5, 2])])
assert_token(DIV, [np.array([10, 4]), 2], [np.array([5, 2])])
assert_token(DIV, [np.array([8, 16]), np.array([2, 4])], [np.array([4, 4])])
# extra stack elements
assert_token(DIV, [0, 10, 2], [0, 5])
# not enough stack elements
with pytest.raises(StackUnderflowError):
DIV([], {})
with pytest.raises(StackUnderflowError):
DIV([1], {})
class TestPOWOperator:
def test_repr(self):
assert repr(POW) == "POW"
def test_pops(self):
assert POW.pops == 2
def test_puts(self):
assert POW.puts == 1
def test_no_copy(self):
assert copy(POW) is POW
assert deepcopy(POW) is POW
def test_call(self):
assert_token(POW, [1, 2], [1])
assert_token(POW, [2, 2], [4])
assert_token(POW, [2, 4], [16])
assert_token(POW, [2, np.array([1, 2, 3])], [np.array([2, 4, 8])])
assert_token(POW, [np.array([1, 2, 3]), 2], [np.array([1, 4, 9])])
assert_token(POW, [np.array([2, 3]), np.array([5, 6])], [np.array([32, 729])])
# extra stack elements
assert_token(POW, [0, 2, 4], [0, 16])
# not enough stack elements
with pytest.raises(StackUnderflowError):
POW([], {})
with pytest.raises(StackUnderflowError):
POW([1], {})
class TestFMODOperator:
def test_repr(self):
assert repr(FMOD) == "FMOD"
assert FMOD.pops == 2
assert FMOD.puts == 1
def test_pops(self):
assert repr(FMOD) == "FMOD"
assert FMOD.pops == 2
assert FMOD.puts == 1
def test_puts(self):
assert repr(FMOD) == "FMOD"
assert FMOD.pops == 2
assert FMOD.puts == 1
def test_no_copy(self):
assert copy(FMOD) is FMOD
assert deepcopy(FMOD) is FMOD
def test_call(self):
assert_token(FMOD, [1, 2], [1])
assert_token(FMOD, [2, 10], [2])
assert_token(FMOD, [12, 10], [2])
assert_token(FMOD, [13, np.array([10, 100])], [np.array([3, 13])])
assert_token(FMOD, [np.array([7, 15]), 10], [np.array([7, 5])])
assert_token(FMOD, [np.array([7, 15]), np.array([10, 5])], [np.array([7, 0])])
# extra stack elements
assert_token(FMOD, [0, 12, 10], [0, 2])
# not enough stack elements
with pytest.raises(StackUnderflowError):
FMOD([], {})
with pytest.raises(StackUnderflowError):
FMOD([1], {})
class TestMINOperator:
def test_repr(self):
assert repr(MIN) == "MIN"
def test_pops(self):
assert MIN.pops == 2
def test_puts(self):
assert MIN.puts == 1
def test_no_copy(self):
assert copy(MIN) is MIN
assert deepcopy(MIN) is MIN
def test_call(self):
assert_token(MIN, [2, 3], [2])
assert_token(MIN, [3, 2], [2])
assert_token(MIN, [2, np.array([1, 3])], [np.array([1, 2])])
assert_token(MIN, [np.array([1, 3]), 2], [np.array([1, 2])])
assert_token(MIN, [np.array([2, 3]), np.array([3, 2])], [np.array([2, 2])])
# # extra stack elements
assert_token(MIN, [0, 2, 3], [0, 2])
# not enough stack elements
with pytest.raises(StackUnderflowError):
MIN([], {})
with pytest.raises(StackUnderflowError):
MIN([1], {})
class TestMAXOperator:
def test_repr(self):
assert repr(MAX) == "MAX"
def test_pops(self):
assert MAX.pops == 2
def test_puts(self):
assert MAX.puts == 1
def test_no_copy(self):
assert copy(MAX) is MAX
assert deepcopy(MAX) is MAX
def test_call(self):
assert_token(MAX, [2, 3], [3])
assert_token(MAX, [3, 2], [3])
assert_token(MAX, [2, np.array([1, 3])], [np.array([2, 3])])
assert_token(MAX, [np.array([1, 3]), 2], [np.array([2, 3])])
assert_token(MAX, [np.array([2, 3]), np.array([3, 2])], [np.array([3, 3])])
# # extra stack elements
assert_token(MAX, [0, 2, 3], [0, 3])
# not enough stack elements
with pytest.raises(StackUnderflowError):
MAX([], {})
with pytest.raises(StackUnderflowError):
MAX([1], {})
class TestATAN2Operator:
def test_repr(self):
assert repr(ATAN2) == "ATAN2"
def test_pops(self):
assert ATAN2.pops == 2
def test_puts(self):
assert ATAN2.puts == 1
def test_no_copy(self):
assert copy(ATAN2) is ATAN2
assert deepcopy(ATAN2) is ATAN2
def test_call(self):
# NOTE: second parameter is x, first is y
assert_token(ATAN2, [0, 1], [0], approx=True)
assert_token(ATAN2, [1, math.sqrt(3)], [math.pi / 6], approx=True)
assert_token(ATAN2, [1, 1], [math.pi / 4], approx=True)
assert_token(ATAN2, [math.sqrt(3), 1], [math.pi / 3], approx=True)
assert_token(ATAN2, [1, 0], [math.pi / 2], approx=True)
assert_token(
ATAN2, [math.sqrt(3), -1], [math.pi / 2 + math.pi / 6], approx=True
)
assert_token(ATAN2, [1, -1], [math.pi / 2 + math.pi / 4], approx=True)
assert_token(
ATAN2, [1, -math.sqrt(3)], [math.pi / 2 + math.pi / 3], approx=True
)
assert_token(ATAN2, [0, -1], [math.pi / 2 + math.pi / 2], approx=True)
assert_token(
ATAN2,
[
np.array([0, 1, 1, np.sqrt(3), 1, np.sqrt(3), 1, 1, 0]),
np.array([1, np.sqrt(3), 1, 1, 0, -1, -1, -np.sqrt(3), -1]),
],
[
np.array(
[
0.0,
np.pi / 6,
np.pi / 4,
np.pi / 3,
np.pi / 2,
np.pi / 2 + np.pi / 6,
np.pi / 2 + np.pi / 4,
np.pi / 2 + np.pi / 3,
np.pi / 2 + np.pi / 2,
]
)
],
approx=True,
)
# extra stack elements
assert_token(ATAN2, [0, 1, 1], [0, math.pi / 4], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
ATAN2([], {})
class TestHYPOTOperator:
def test_repr(self):
assert repr(HYPOT) == "HYPOT"
def test_pops(self):
assert HYPOT.pops == 2
def test_puts(self):
assert HYPOT.puts == 1
def test_no_copy(self):
assert copy(HYPOT) is HYPOT
assert deepcopy(HYPOT) is HYPOT
def test_call(self):
assert_token(HYPOT, [1, 1], [math.sqrt(2)], approx=True)
assert_token(HYPOT, [math.sqrt(3), 1], [2], approx=True)
assert_token(
HYPOT,
[1, np.array([np.sqrt(3), 1])],
[np.array([2, np.sqrt(2)])],
approx=True,
)
assert_token(
HYPOT,
[np.array([np.sqrt(3), 1]), 1],
[np.array([2, np.sqrt(2)])],
approx=True,
)
assert_token(
HYPOT,
[np.array([np.sqrt(3), 1]), np.array([1, 1])],
[np.array([2, np.sqrt(2)])],
approx=True,
)
# extra stack elements
assert_token(HYPOT, [0, math.sqrt(3), 1], [0, 2], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
HYPOT([], {})
with pytest.raises(StackUnderflowError):
HYPOT([1], {})
class TestR2Operator:
def test_repr(self):
assert repr(R2) == "R2"
def test_pops(self):
assert R2.pops == 2
def test_puts(self):
assert R2.puts == 1
def test_no_copy(self):
assert copy(R2) is R2
assert deepcopy(R2) is R2
def test_call(self):
assert_token(R2, [2, 3], [13])
assert_token(R2, [2, np.array([3, 4])], [np.array([13, 20])])
assert_token(R2, [np.array([3, 4]), 2], [np.array([13, 20])])
assert_token(R2, [np.array([1, 2]), np.array([3, 4])], [np.array([10, 20])])
# extra stack elements
assert_token(R2, [0, 2, 3], [0, 13], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
R2([], {})
with pytest.raises(StackUnderflowError):
R2([1], {})
class TestEQOperator:
def test_repr(self):
assert repr(EQ) == "EQ"
def test_pops(self):
assert EQ.pops == 2
def test_puts(self):
assert EQ.puts == 1
def test_no_copy(self):
assert copy(EQ) is EQ
assert deepcopy(EQ) is EQ
def test_call(self):
assert_token(EQ, [2, 2], [True])
assert_token(EQ, [2, 3], [False])
assert_token(
EQ, [2, np.array([1, np.nan, 2])], [np.array([False, False, True])]
)
assert_token(
EQ, [np.array([1, np.nan, 2]), 2], [np.array([False, False, True])]
)
assert_token(
EQ,
[np.array([1, np.nan, 3, 3]), np.array([1, np.nan, 2, 3])],
[np.array([True, False, False, True])],
)
# extra stack elements
assert_token(EQ, [0, 2, 2], [0, True])
# not enough stack elements
with pytest.raises(StackUnderflowError):
EQ([], {})
with pytest.raises(StackUnderflowError):
EQ([1], {})
class TestNEOperator:
def test_repr(self):
assert repr(NE) == "NE"
def test_pops(self):
assert NE.pops == 2
def test_puts(self):
assert NE.puts == 1
def test_no_copy(self):
assert copy(NE) is NE
assert deepcopy(NE) is NE
def test_call(self):
assert_token(NE, [2, 2], [False])
assert_token(NE, [2, 3], [True])
assert_token(NE, [2, np.array([1, np.nan, 2])], [np.array([True, True, False])])
assert_token(NE, [np.array([1, np.nan, 2]), 2], [np.array([True, True, False])])
assert_token(
NE,
[np.array([1, np.nan, 3, 3]), np.array([1, np.nan, 2, 3])],
[np.array([False, True, True, False])],
)
# extra stack elements
assert_token(NE, [0, 2, 2], [0, False])
# not enough stack elements
with pytest.raises(StackUnderflowError):
NE([], {})
with pytest.raises(StackUnderflowError):
NE([1], {})
class TestLTOperator:
def test_repr(self):
assert repr(LT) == "LT"
def test_pops(self):
assert LT.pops == 2
def test_puts(self):
assert LT.puts == 1
def test_no_copy(self):
assert copy(LT) is LT
assert deepcopy(LT) is LT
def test_call(self):
assert_token(LT, [2, 3], [True])
assert_token(LT, [2, 2], [False])
assert_token(LT, [3, 2], [False])
assert_token(LT, [2, np.array([1, 2, 3])], [np.array([False, False, True])])
assert_token(LT, [np.array([1, 2, 3]), 2], [np.array([True, False, False])])
assert_token(
LT,
[np.array([1, 2, 3]), np.array([3, 2, 1])],
[np.array([True, False, False])],
)
# extra stack elements
assert_token(LT, [0, 2, 3], [0, True])
# not enough stack elements
with pytest.raises(StackUnderflowError):
LT([], {})
with pytest.raises(StackUnderflowError):
LT([1], {})
class TestLEOperator:
def test_repr(self):
assert repr(LE) == "LE"
def test_pops(self):
assert LE.pops == 2
def test_puts(self):
assert LE.puts == 1
def test_no_copy(self):
assert copy(LE) is LE
assert deepcopy(LE) is LE
def test_le(self):
assert_token(LE, [2, 3], [True])
assert_token(LE, [2, 2], [True])
assert_token(LE, [3, 2], [False])
assert_token(LE, [2, np.array([1, 2, 3])], [np.array([False, True, True])])
assert_token(LE, [np.array([1, 2, 3]), 2], [np.array([True, True, False])])
assert_token(
LE,
[np.array([1, 2, 3]), np.array([3, 2, 1])],
[np.array([True, True, False])],
)
# # extra stack elements
assert_token(LE, [0, 2, 3], [0, True])
# not enough stack elements
with pytest.raises(StackUnderflowError):
LE([], {})
with pytest.raises(StackUnderflowError):
LE([1], {})
class TestGTOperator:
def test_repr(self):
assert repr(GT) == "GT"
def test_pops(self):
assert GT.pops == 2
def test_puts(self):
assert GT.puts == 1
def test_no_copy(self):
assert copy(GT) is GT
assert deepcopy(GT) is GT
def test_call(self):
assert_token(GT, [2, 3], [False])
assert_token(GT, [2, 2], [False])
assert_token(GT, [3, 2], [True])
assert_token(GT, [2, np.array([1, 2, 3])], [np.array([True, False, False])])
assert_token(GT, [np.array([1, 2, 3]), 2], [np.array([False, False, True])])
assert_token(
GT,
[np.array([1, 2, 3]), np.array([3, 2, 1])],
[np.array([False, False, True])],
)
# extra stack elements
assert_token(GT, [0, 2, 3], [0, False])
# not enough stack elements
with pytest.raises(StackUnderflowError):
GT([], {})
with pytest.raises(StackUnderflowError):
GT([1], {})
class TestGEOperator:
def test_repr(self):
assert repr(GE) == "GE"
def test_pops(self):
assert GE.pops == 2
def test_puts(self):
assert GE.puts == 1
def test_no_copy(self):
assert copy(GE) is GE
assert deepcopy(GE) is GE
def test_call(self):
assert_token(GE, [2, 3], [False])
assert_token(GE, [2, 2], [True])
assert_token(GE, [3, 2], [True])
assert_token(GE, [2, np.array([1, 2, 3])], [np.array([True, True, False])])
assert_token(GE, [np.array([1, 2, 3]), 2], [np.array([False, True, True])])
assert_token(
GE,
[np.array([1, 2, 3]), np.array([3, 2, 1])],
[np.array([False, True, True])],
)
# extra stack elements
assert_token(GE, [0, 2, 3], [0, False])
# not enough stack elements
with pytest.raises(StackUnderflowError):
GE([], {})
with pytest.raises(StackUnderflowError):
GE([1], {})
class TestNANOperator:
def test_repr(self):
assert repr(NAN) == "NAN"
def test_pops(self):
assert NAN.pops == 2
def test_puts(self):
assert NAN.puts == 1
def test_no_copy(self):
assert copy(NAN) is NAN
assert deepcopy(NAN) is NAN
def test_call(self):
assert_token(NAN, [2, 2], [float("nan")])
assert_token(NAN, [2, 3], [2])
assert_token(NAN, [2, np.array([2, 3])], [np.array([np.nan, 2])])
assert_token(NAN, [np.array([2, 3]), 2], [np.array([np.nan, 3])])
assert_token(
NAN, [np.array([1, 2, 3]), np.array([3, 2, 1])], [np.array([1, np.nan, 3])]
)
# as float
assert_token(
NAN,
[np.array([1.0, 2.0, 3.0]), np.array([3, 2, 1])],
[np.array([1, np.nan, 3])],
approx=True,
)
# extra stack elements
assert_token(NAN, [0, 2, 2], [0, float("nan")])
# not enough stack elements
with pytest.raises(StackUnderflowError):
NAN([], {})
with pytest.raises(StackUnderflowError):
NAN([1], {})
class TestANDOperator:
def test_repr(self):
assert repr(AND) == "AND"
def test_pops(self):
assert AND.pops == 2
def test_puts(self):
assert AND.puts == 1
def test_no_copy(self):
assert copy(AND) is AND
assert deepcopy(AND) is AND
def test_call(self):
assert_token(AND, [2, 3], [2])
assert_token(AND, [float("nan"), 3], [3])
assert_token(AND, [float("nan"), np.array([2, 3])], [np.array([2, 3])])
assert_token(AND, [np.array([np.nan, 3]), 2], [np.array([2, 3])])
assert_token(
AND,
[np.array([10, np.nan, 30]), np.array([1, 2, 3])],
[np.array([10, 2, 30])],
)
# extra stack elements
assert_token(AND, [0, float("nan"), 3], [0, 3])
# not enough stack elements
with pytest.raises(StackUnderflowError):
AND([], {})
with pytest.raises(StackUnderflowError):
AND([1], {})
class TestOROperator:
def test_repr(self):
assert repr(OR) == "OR"
def test_pops(self):
assert OR.pops == 2
def test_puts(self):
assert OR.puts == 1
def test_no_copy(self):
assert copy(OR) is OR
assert deepcopy(OR) is OR
def test_call(self):
assert_token(OR, [2, 3], [2])
assert_token(OR, [2, float("nan")], [float("nan")])
assert_token(OR, [2, np.array([3, np.nan])], [np.array([2, np.nan])])
assert_token(OR, [np.array([2, 3]), np.nan], [np.array([np.nan, np.nan])])
assert_token(
OR,
[np.array([1, 2, 3]), np.array([10, np.nan, 30])],
[np.array([1, np.nan, 3])],
)
# as float
assert_token(
OR,
[np.array([1.0, 2.0, 3.0]), np.array([10, np.nan, 30])],
[np.array([1, np.nan, 3])],
)
# extra stack elements
assert_token(OR, [0, 2, float("nan")], [0, float("nan")])
# not enough stack elements
with pytest.raises(StackUnderflowError):
OR([], {})
with pytest.raises(StackUnderflowError):
OR([1], {})
class TestIANDOperator:
def test_repr(self):
assert repr(IAND) == "IAND"
def test_pops(self):
assert IAND.pops == 2
def test_puts(self):
assert IAND.puts == 1
def test_no_copy(self):
assert copy(IAND) is IAND
assert deepcopy(IAND) is IAND
def test_call(self):
assert_token(IAND, [5, 3], [1])
assert_token(IAND, [15, 21], [5])
assert_token(IAND, [21, 15], [5])
assert_token(IAND, [15, np.array([9, 21, 35])], [np.array([9, 5, 3])])
assert_token(IAND, [np.array([9, 21, 35]), 15], [np.array([9, 5, 3])])
assert_token(
IAND,
[np.array([9, 21, 35]), np.array([3, 15, 127])],
[np.array([1, 5, 35])],
)
# extra stack elements
assert_token(IAND, [0, 15, 21], [0, 5])
# floats are not supported
with pytest.raises(TypeError):
IAND([1.0, 2], {})
with pytest.raises(TypeError):
IAND([1, 2.0], {})
with pytest.raises(TypeError):
IAND([1, np.array([2.0, 3.0])], {})
with pytest.raises(TypeError):
IAND([np.array([2.0, 3.0]), 1], {})
# not enough stack elements
with pytest.raises(StackUnderflowError):
IAND([], {})
with pytest.raises(StackUnderflowError):
IAND([1], {})
class TestIOROperator:
def test_repr(self):
assert repr(IOR) == "IOR"
def test_pops(self):
assert IOR.pops == 2
def test_puts(self):
assert IOR.puts == 1
def test_no_copy(self):
assert copy(IOR) is IOR
assert deepcopy(IOR) is IOR
def test_call(self):
assert_token(IOR, [5, 3], [7])
assert_token(IOR, [15, 21], [31])
assert_token(IOR, [21, 15], [31])
assert_token(IOR, [15, np.array([9, 21, 35])], [np.array([15, 31, 47])])
assert_token(IOR, [np.array([9, 21, 35]), 15], [np.array([15, 31, 47])])
assert_token(
IOR,
[np.array([9, 21, 35]), np.array([3, 15, 127])],
[np.array([11, 31, 127])],
)
# extra stack elements
assert_token(IOR, [0, 15, 21], [0, 31])
# floats are not supported
with pytest.raises(TypeError):
IOR([1.0, 2], {})
with pytest.raises(TypeError):
IOR([1, 2.0], {})
with pytest.raises(TypeError):
IOR([1, np.array([2.0, 3.0])], {})
with pytest.raises(TypeError):
IOR([np.array([2.0, 3.0]), 1], {})
# not enough stack elements
with pytest.raises(StackUnderflowError):
IOR([], {})
with pytest.raises(StackUnderflowError):
IOR([1], {})
class TestBTESTOperator:
def test_repr(self):
assert repr(BTEST) == "BTEST"
def test_pops(self):
assert BTEST.pops == 2
def test_puts(self):
assert BTEST.puts == 1
def test_no_copy(self):
assert copy(BTEST) is BTEST
assert deepcopy(BTEST) is BTEST
def test_call(self):
assert_token(BTEST, [9, 0], [True])
assert_token(BTEST, [9, 1], [False])
assert_token(BTEST, [9, 2], [False])
assert_token(BTEST, [9, 3], [True])
assert_token(BTEST, [9, 4], [False])
assert_token(
BTEST,
[9, np.array([0, 1, 2, 3, 4])],
[np.array([True, False, False, True, False])],
)
assert_token(BTEST, [np.array([1, 3, 5]), 1], [np.array([False, True, False])])
assert_token(
BTEST,
[np.array([1, 3, 5]), np.array([1, 2, 0])],
[np.array([False, False, True])],
)
# extra stack elements
assert_token(BTEST, [0, 9, 3], [0, True])
# floats are not supported
with pytest.raises(TypeError):
BTEST([1.0, 2], {})
with pytest.raises(TypeError):
BTEST([1, 2.0], {})
with pytest.raises(TypeError):
BTEST([1, np.array([2.0, 3.0])], {})
with pytest.raises(TypeError):
BTEST([np.array([2.0, 3.0]), 1], {})
# not enough stack elements
with pytest.raises(StackUnderflowError):
BTEST([], {})
with pytest.raises(StackUnderflowError):
BTEST([1], {})
class TestAVGOperator:
def test_repr(self):
assert repr(AVG) == "AVG"
def test_pops(self):
assert AVG.pops == 2
def test_puts(self):
assert AVG.puts == 1
def test_no_copy(self):
assert copy(AVG) is AVG
assert deepcopy(AVG) is AVG
def test_call(self):
assert repr(AVG) == "AVG"
assert AVG.pops == 2
assert AVG.puts == 1
assert_token(AVG, [5, 11], [8])
assert_token(AVG, [float("nan"), 11], [11])
assert_token(AVG, [5, float("nan")], [5])
assert_token(AVG, [3, np.array([7, np.nan, 11])], [np.array([5, 3, 7])])
assert_token(AVG, [np.nan, np.array([1, 2, 3])], [np.array([1, 2, 3])])
assert_token(AVG, [np.array([7, np.nan, 11]), 3], [np.array([5, 3, 7])])
assert_token(AVG, [np.array([1, 2, 3]), np.nan], [np.array([1, 2, 3])])
assert_token(
AVG,
[np.array([3, np.nan, 11]), np.array([7, 2, np.nan])],
[np.array([5, 2, 11])],
)
# extra stack elements
assert_token(AVG, [0, 5, 11], [0, 8])
# not enough stack elements
with pytest.raises(StackUnderflowError):
AVG([], {})
with pytest.raises(StackUnderflowError):
AVG([1], {})
class TestDXDYOperator:
def test_repr(self):
assert repr(DXDY) == "DXDY"
def test_pops(self):
assert DXDY.pops == 2
def test_puts(self):
assert DXDY.puts == 1
def test_no_copy(self):
assert copy(DXDY) is DXDY
assert deepcopy(DXDY) is DXDY
def test_call(self):
assert_token(DXDY, [5, 11], [float("nan")])
assert_token(DXDY, [3, np.array([5, 11])], [np.array([np.nan, np.nan])])
assert_token(DXDY, [3, np.array([5, 7, 11])], [np.array([np.nan, 0, np.nan])])
assert_token(
DXDY, [3, np.array([5, 7, 8, 11])], [np.array([np.nan, 0, 0, np.nan])]
)
with warnings.catch_warnings(): # divide by zero in numpy
warnings.simplefilter("ignore")
assert_token(DXDY, [np.array([5, 11]), 3], [np.array([np.nan, np.nan])])
assert_token(
DXDY, [np.array([5, 7, 11]), 3], [np.array([np.nan, np.inf, np.nan])]
)
assert_token(
DXDY,
[np.array([5, 7, 8, 11]), 3],
[np.array([np.nan, np.inf, np.inf, np.nan])],
)
assert_token(
DXDY,
[np.array([0, 4, 9, 8]), np.array([5, 7, 8, 11])],
[np.array([np.nan, 3, 1, np.nan])],
)
# extra stack elements
assert_token(DXDY, [0, 5, 11], [0, float("nan")])
# not enough stack elements
with pytest.raises(StackUnderflowError):
DXDY([], {})
with pytest.raises(StackUnderflowError):
DXDY([1], {})
class TestEXCHOperator:
def test_rerpr(self):
assert repr(EXCH) == "EXCH"
def test_pops(self):
assert EXCH.pops == 2
def test_puts(self):
assert EXCH.puts == 2
def test_no_copy(self):
assert copy(EXCH) is EXCH
assert deepcopy(EXCH) is EXCH
def test_call(self):
assert_token(EXCH, [5, 11], [11, 5])
assert_token(EXCH, [3, np.array([5, 11])], [np.array([5, 11]), 3])
assert_token(EXCH, [np.array([5, 11]), 3], [3, np.array([5, 11])])
assert_token(
EXCH,
[np.array([1, 2]), np.array([3, 4])],
[np.array([3, 4]), np.array([1, 2])],
)
# extra stack elements
assert_token(EXCH, [0, 5, 11], [0, 11, 5])
# not enough stack elements
with pytest.raises(StackUnderflowError):
EXCH([], {})
with pytest.raises(StackUnderflowError):
EXCH([1], {})
class TestINRANGEOperator:
def test_repr(self):
assert repr(INRANGE) == "INRANGE"
def test_pops(self):
assert INRANGE.pops == 3
def test_puts(self):
assert INRANGE.puts == 1
def test_no_copy(self):
assert copy(INRANGE) is INRANGE
assert deepcopy(INRANGE) is INRANGE
def test_call(self):
assert_token(INRANGE, [0, 1, 3], [False])
assert_token(INRANGE, [1, 1, 3], [True])
assert_token(INRANGE, [2, 1, 3], [True])
assert_token(INRANGE, [3, 1, 3], [True])
assert_token(INRANGE, [4, 1, 3], [False])
assert_token(
INRANGE,
[np.array([0, 1, 2, 3, 4]), 1, 3],
[np.array([False, True, True, True, False])],
)
assert_token(
INRANGE, [2, np.array([1, 2, 3]), 3], [np.array([True, True, False])]
)
assert_token(
INRANGE, [2, 1, np.array([1, 2, 3])], [np.array([False, True, True])]
)
assert_token(
INRANGE,
[np.array([1, 2, 3]), np.array([1, 2, 3]), np.array([1, 2, 3])],
[np.array([True, True, True])],
)
# extra stack elements
assert_token(INRANGE, [0, 2, 1, 3], [0, True])
# not enough stack elements
with pytest.raises(StackUnderflowError):
INRANGE([], {})
with pytest.raises(StackUnderflowError):
INRANGE([1], {})
with pytest.raises(StackUnderflowError):
INRANGE([1, 2], {})
class TestBOXCAROperator:
def test_repr(self):
assert repr(BOXCAR) == "BOXCAR"
def test_pops(self):
assert BOXCAR.pops == 3
def test_puts(self):
assert BOXCAR.puts == 1
def test_no_copy(self):
assert copy(BOXCAR) is BOXCAR
assert deepcopy(BOXCAR) is BOXCAR
def test_call(self):
# returns value if scalar
assert_token(BOXCAR, [1, 2, 3], [1])
# simple
assert_token(
BOXCAR,
[np.array([0, 1, 2, 3, 4]), 0, 3],
[np.array([1 / 3, 1, 2, 3, 11 / 3])],
approx=True,
)
# window size of 1 should return original array
assert_token(
BOXCAR,
[np.array([0, 1, 2, 3, 4]), 0, 1],
[np.array([0, 1, 2, 3, 4])],
approx=True,
)
# with nan it should base result on non nan values in window
assert_token(
BOXCAR,
[np.array([0, np.nan, 2, 3, 4]), 0, 3],
[np.array([0, np.nan, 2.5, 3, 11 / 3])],
approx=True,
)
# multi-dimensional x
assert_token(
BOXCAR,
[np.array([[0, 1, 2, 3, 4], [0, 10, 20, 30, 40]]), 1, 3],
[np.array([[1 / 3, 1, 2, 3, 11 / 3], [10 / 3, 10, 20, 30, 110 / 3]])],
approx=True,
)
assert_token(
BOXCAR,
[
np.array(
[[0, 1, 2, 3, 4], [0, 10, 20, 30, 40], [0, 100, 200, 300, 400]]
),
0,
3,
],
[
np.array(
[[0, 4, 8, 12, 16], [0, 37, 74, 111, 148], [0, 70, 140, 210, 280]]
)
],
approx=True,
)
# extra stack elements
assert_token(BOXCAR, [0, 1, 2, 3], [0, 1])
# y must be a scalar
with pytest.raises(ValueError):
BOXCAR([np.array([1, 2, 3]), np.array([1, 2]), 3], {})
# z must be a scalar
with pytest.raises(ValueError):
BOXCAR([np.array([1, 2, 3]), 2, np.array([1, 2])], {})
# x must have dimension y
with pytest.raises(IndexError):
BOXCAR([np.array([1, 2, 3]), 1, 3], {})
# not enough stack elements
with pytest.raises(StackUnderflowError):
BOXCAR([], {})
with pytest.raises(StackUnderflowError):
BOXCAR([1], {})
with pytest.raises(StackUnderflowError):
BOXCAR([1, 2], {})
class TestGAUSSOperator:
def test_repr(self):
assert repr(GAUSS) == "GAUSS"
def test_pops(self):
assert GAUSS.pops == 3
def test_puts(self):
assert GAUSS.puts == 1
def test_no_copy(self):
assert copy(GAUSS) is GAUSS
assert deepcopy(GAUSS) is GAUSS
def test_call(self):
# returns value if scalar
assert_token(GAUSS, [1, 2, 3], [1])
# simple
assert_token(
GAUSS,
[np.array([0, 1, 2, 3, 4]), 0, 3],
[np.array([1.06300638, 1.5089338, 2.0, 2.4910662, 2.93699362])],
approx=True,
rtol=1e-6,
atol=1e-6,
)
# with nan it should base result on non nan values in window
assert_token(
GAUSS,
[np.array([0, np.nan, 2, 3, 4]), 0, 3],
[ | np.array([1.07207303, np.nan, 2.14390036, 2.66876589, 3.10693751]) | numpy.array |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.