prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
import numpy
import struct
import warnings
from .compat import structured_cast
# from .logger import logger
from ..lib.arraybase import set_or_add_to_structured, to_structured
from ..lib.iterable import split
try:
import xxhash
_HAS_XXHASH = True
except ImportError:
_HAS_XXHASH = False
import hashlib
# Pure numpy implementation of hashmaps
# This file implements low level classes, but as a user you should try to use:
# unique, unique_count, search, get_dense_labels_map, factorize or Hashmap
UINT64 = numpy.uint64
INV_PHI = UINT64(11400714819323198485) # 1<<64/phi
STEP_MULT = UINT64(11223344556677889999) # arbitrary, could be optimized
_DEFAULT = object()
class _BaseHashmap(object):
"""
template for HashMap keys => values
* hashmaps gives efficient O(n) search in un-sorted set of keys or map keys => values
* hashmaps without values are called hashtables
* this implementation relies on Fibonacci hashing
* we support any dtype for the keys and values, by implementing these sub-classes:
* - vanilla hash for uint64 keys
* - python's tuple-hash for struct of uint64 fields
* - xxhash or sha1 for bytes objects (or encoded str objects), or struct of them
* - python's object hash for any object
* for the first two, the keys are casted into uint64 or uint64-tuple
* when possible for struct of a few tiny field, we view them as a single uint64
"""
@classmethod
def new(cls, keys, values=None, cast_dtype=None, view_dtype=None, empty=_DEFAULT):
"""
:param array keys: (n,) key-dtype array
:param array? values: (n,) val-dtype array
:param dtype? cast_dtype: dtype to cast ``keys``
:param dtype? view_dtype: dtype to view ``keys``
:param int? empty: empty value (default: 0)
"""
original_dtype = keys.dtype
cast_dtype = numpy.dtype(cast_dtype or keys.dtype)
view_dtype = numpy.dtype(view_dtype or keys.dtype)
_keys = cls._cast(keys, cast_dtype, view_dtype)
# keys = structured_cast(keys, dtype)
empty = cls._choose_empty_value(_keys, view_dtype, empty)
n, = _keys.shape
log_size = (cls.init_space_ratio(n) * n - 1).bit_length()
size = 1 << log_size
table = numpy.full(size, empty, dtype=view_dtype)
values_table = numpy.zeros(
size, dtype=values.dtype) if values is not None else None
hashmap = cls(table, values_table, empty,
log_size, original_dtype, cast_dtype)
hashmap._set_initial(_keys, values)
return hashmap
def __init__(self, table, values, empty, log_size, original_dtype, cast_dtype, can_resize=True):
""" low-level constructor, use .new instead """
self._table = table
self.values = values
self._empty = empty
self.log_size = log_size
self.can_resize = can_resize
self.shift = UINT64(64 - self.log_size)
self.n_used = (self._table != self._empty).sum()
self.original_dtype = original_dtype
self.cast_dtype = cast_dtype
@property
def size(self):
return self._table.size
@property
def nbytes(self):
summed = self._table.nbytes
if self.values is not None:
summed += self.values.nbytes
return summed
def set_many(self, keys, values=None):
"""
:param array keys: (n,) key-dtype array
:param array? values: (n,) val-dtype array
"""
_keys = self._cast(keys, self.cast_dtype, self._table.dtype)
if values is not None and self.values.dtype.names:
# align fields
values = values[[k for k in self.values.dtype.names]]
if _keys.size > 0 and (_keys == self._empty).any():
self._change_empty(_keys)
n, = _keys.shape
if self.min_space_ratio(n) * (self.n_used + n) > self.size:
self._resize(self.n_used + n)
# step=0
step = UINT64(0)
indexes = self._shifted_hash(_keys, step)
done = False
max_steps = self.max_steps(n)
for _ in range(max_steps):
available = self._table[indexes] == self._empty
available_indexes = indexes[available]
self._table[available_indexes] = _keys[available]
collisions = self._table[indexes] != _keys
if values is not None:
self.values[indexes[~collisions]] = values[~collisions]
if not collisions.any():
done = True
break
# next step: work only in `collisions`
step += UINT64(1)
_keys = _keys[collisions]
if values is not None:
values = values[collisions]
indexes = self._shifted_hash(_keys, step)
if not done:
raise RuntimeError(f'could not set_many within {max_steps} steps')
self.n_used = (self._table != self._empty).sum()
def lookup(self, keys):
"""
Search keys in hashtable (do not confuse with ``get_many`` of hashmap)
:param array keys: (n,) key-dtype array
:returns: tuple(
indexes: (n,) uint64 array,
found: (n,) bool array,
)
"""
_keys = self._cast(keys, self.cast_dtype, self._table.dtype)
if _keys.size > 0 and (_keys == self._empty).any():
self._change_empty(_keys)
n, = _keys.shape
# working idx in all (lazy build at first collisions)
idx_in_all = None
# step=0
step = UINT64(0)
all_indexes = self._shifted_hash(_keys, step)
indexes = all_indexes
table_values = self._table[indexes]
all_found = table_values != self._empty
found = all_found
done = False
max_steps = self.max_steps(n)
for _ in range(max_steps):
collisions = found & (table_values != _keys)
if not collisions.any():
done = True
break
# next step: work only in `collisions`
step += UINT64(1)
_keys = _keys[collisions]
if idx_in_all is None:
idx_in_all = numpy.where(collisions)[0]
else:
idx_in_all = idx_in_all[collisions]
indexes = self._shifted_hash(_keys, step)
all_indexes[idx_in_all] = indexes
table_values = self._table[indexes]
found = table_values != self._empty
all_found[idx_in_all] = found
if not done:
raise RuntimeError(f'could not lookup within {max_steps} steps')
return all_indexes, all_found
def contains(self, keys):
"""
:param array keys: (n,) key-dtype array
:returns: (n,) bool array
"""
_, found = self.lookup(keys)
return found
def get_many(self, keys):
"""
:param array keys: (n,) key-dtype array
:returns: tuple(
values: (n,) val-dtype array,
found: (n,) bool array,
)
"""
if self.values is None:
raise ValueError(
'`get_many` is only available when values is not None, use `lookup`')
indexes, found = self.lookup(keys)
values = self.values[indexes]
return values, found
def unique_keys(self, return_table_mask=False, return_values=False):
""" :returns: (
(n,) key-dtype array,
[if return_table_mask] (m,) bool array with n "1s"
[if return_values] (n,) val-dtype array,
)
"""
has_key = self._table != self._empty
_keys = self._table[has_key]
keys = self._cast_back(_keys)
if not return_table_mask and not return_values:
return keys
out = (keys,)
if return_table_mask:
out = out + (has_key,)
if return_values:
out = out + (self.values[has_key],)
return out
def keys_hash(self):
""" returns order-invarient hash of keys (not __hash__ because we don't look at values) """
# combine raw hash (pre-shift) by global sum
has_key = self._table != self._empty
_keys = self._table[has_key]
# compute raw uint64 hash
_keys_hsh = self._hash(_keys, UINT64(0))
# aggregate
hsh_uint64 = numpy.bitwise_xor.reduce(_keys_hsh)
return int(hsh_uint64.view(numpy.int64)) # return as int
@classmethod
def init_space_ratio(cls, n):
""" multiplier to set table size """
return 4 if n < (1<<26) else 2
@classmethod
def min_space_ratio(cls, n):
""" when to trigger resize """
return 3 if n < (1<<26) else 1.5
@classmethod
def max_steps(cls, n):
""" prevent infinite loop by a cap on the nb of steps (heuristic but very large) """
return max(64, n // (32 * cls.init_space_ratio(n)))
def _resize(self, n):
log_size = int(self.init_space_ratio(n) * n - 1).bit_length()
has_value = self._table != self._empty
_keys = self._table[has_value]
if self.values is not None:
values = self.values[has_value]
else:
values = None
if values is not None:
self.values = numpy.zeros(self.size, dtype=values.dtype)
# re-allocate tables
self.log_size = log_size
self.shift = UINT64(64 - self.log_size)
new_size = 1 << log_size
self._table = numpy.full(
new_size, self._empty, dtype=self._table.dtype)
if values is not None:
self.values = numpy.zeros(new_size, dtype=values.dtype)
self._set_initial(_keys, values)
def _set_initial(self, _keys, values):
n, = _keys.shape
# step=0
step = UINT64(0)
indexes = self._shifted_hash(_keys, step)
self._table[indexes] = _keys
if values is not None:
self.values[indexes] = values
done = False
max_steps = self.max_steps(n)
for _ in range(max_steps):
collisions = self._table[indexes] != _keys
if not collisions.any():
done = True
break
# next step: work only in `collisions`
step += UINT64(1)
_keys = _keys[collisions]
if values is not None:
values = values[collisions]
# TOOPTIMIZE re-use computed hashes
indexes = self._shifted_hash(_keys, step)
available = self._table[indexes] == self._empty
available_indexes = indexes[available]
self._table[available_indexes] = _keys[available]
if values is not None:
self.values[available_indexes] = values[available]
if not done:
raise RuntimeError(f'could not _set_initial within {max_steps} steps')
self.n_used = (self._table != self._empty).sum()
def _change_empty(self, new_keys):
# edge case: the empty value we set clashes with a new key
_uniq_keys = self._table[self._table != self._empty]
all_keys = numpy.r_[_uniq_keys, new_keys]
new_empty = self._choose_empty_value(all_keys, self._table.dtype)
self._table[self._table == self._empty] = new_empty
self._empty = new_empty
@classmethod
def _hash(cls, _keys, step):
raise NotImplementedError()
def _shifted_hash(self, _keys, step):
_hash = self._hash(_keys, step)
_hash >>= self.shift
return _hash
@classmethod
def _fibonacci_hash_uint64(cls, _keys, step, copy=True):
if copy:
_keys = _keys.copy()
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore', r'overflow encountered in ulong(long)?_scalars')
_keys += STEP_MULT * step
_keys *= INV_PHI
return _keys
@classmethod
def _choose_empty_value(cls, _keys, dtype, empty=_DEFAULT):
raise NotImplementedError()
@classmethod
def _cast(cls, keys, cast_dtype, view_dtype):
if keys.dtype != cast_dtype:
keys = structured_cast(keys, cast_dtype)
if keys.dtype != view_dtype:
if not keys.dtype.hasobject and not view_dtype.hasobject:
keys = keys.view(view_dtype)
else:
# HACK! numpy doesn't allow views with object, so we use a workaround
# warning: SegFault if `keys.dtype` has offsets, but we clean in structured_cast
keys = numpy.ndarray(keys.shape, view_dtype, keys.data)
return keys
def _cast_back(self, keys):
if keys.dtype != self.cast_dtype:
if not keys.dtype.hasobject and not self.cast_dtype.hasobject:
keys = keys.view(self.cast_dtype)
else:
# HACK! numpy doesn't allow views with object, so we use a workaround
# warning: SegFault if `keys.dtype` has offsets, but we clean in structured_cast
keys = numpy.ndarray(keys.shape, self.cast_dtype, keys.data)
if keys.dtype != self.original_dtype:
keys = structured_cast(keys, self.original_dtype)
return keys
class UInt64Hashmap(_BaseHashmap):
"""
a mapping from uint64 to arbitrary values in a numpy array
consider using the higher-level ``Hashmap`` instead
"""
@classmethod
def new(cls, keys, values=None, cast_dtype=None, view_dtype=None, empty=_DEFAULT):
"""
:param array keys: (n,) uint64 array
:param array? values: (n,) val-dtype array
:param dtype? cast_dtype: dtype to cast ``keys`` (default: uint64)
:param dtype? view_dtype: dtype to view ``keys`` (default: uint64)
:param object? empty: empty value (default: row of 0)
"""
cast_dtype = cast_dtype or UINT64
view_dtype = view_dtype or UINT64
return super().new(keys, values, cast_dtype, view_dtype, empty)
@classmethod
def _hash(cls, _keys, step):
# (_keys << 21) ^ (_keys >> 33)
_keys_cpy = _keys << 21
_keys_cpy ^= _keys >> 33
return cls._fibonacci_hash_uint64(_keys_cpy, step, copy=False)
@classmethod
def _choose_empty_value(cls, _keys, dtype, empty=_DEFAULT):
# empty defined by user
if empty is not _DEFAULT:
return empty
# use zero if keys are strictly positive
zero = UINT64(0)
if zero not in _keys:
return zero
# otherwise pick a random number
while True:
empty = numpy.random.randint(
low=1<<62, high=1<<63, dtype='uint64')
if empty not in _keys:
return empty
class UInt64StructHashmap(_BaseHashmap):
"""
a mapping from uint64-struct to arbitrary values in a numpy array
can be used on any structured dtypes without ``'O'`` by using views
consider using the higher-level ``Hashmap`` instead
"""
# almost INV_PHI but different one (used in xxhash.c)
_PRIME_1 = UINT64(11400714785074694791)
_PRIME_2 = UINT64(14029467366897019727)
_PRIME_5 = UINT64(2870177450012600261)
@classmethod
def new(cls, keys, values=None, cast_dtype=None, view_dtype=None, empty=_DEFAULT):
"""
:param array keys: (n,) uint64-struct array
:param array? values: (n,) val-dtype array
:param dtype? cast_dtype: dtype to cast ``keys`` (default: uint64 for each field)
:param dtype? view_dtype: dtype to view ``keys`` (default: uint64 for each field)
:param object? empty: empty value (default: row of 0)
"""
cast_dtype = cast_dtype or [(name, 'uint64')
for name in keys.dtype.names]
view_dtype = view_dtype or [(name, 'uint64')
for name in keys.dtype.names]
return super().new(keys, values, cast_dtype, view_dtype, empty)
@classmethod
def _hash(cls, _keys, step):
""" use Python's algorithm for tuple to get consistent values """
n, = _keys.shape
n_cols = len(_keys.dtype)
acc = numpy.full(n, cls._PRIME_5)
buf = numpy.empty_like(acc)
for col in sorted(_keys.dtype.names):
# acc += _keys[col] * cls._PRIME_2
buf[:] = _keys[col]
buf *= cls._PRIME_2
acc += buf
# acc = (acc << 31) | (acc >> 33)
buf[:] = acc
buf >>= 33
acc <<= 31
acc |= buf
#
acc *= cls._PRIME_1
acc += UINT64(n_cols) ^ (cls._PRIME_5 ^ UINT64(3527539))
return cls._fibonacci_hash_uint64(acc, step, copy=False)
@classmethod
def _choose_empty_value(cls, _keys, dtype, empty=_DEFAULT):
# empty defined by user
if empty is not _DEFAULT:
return empty
# use zeros if keys are strictly positive
wrapper = numpy.zeros(1, dtype=dtype)
empty = wrapper[0]
if empty not in _keys:
return empty
# otherwise pick random numbers
d = len(dtype) or None
while True:
rdm = numpy.random.randint(low=1<<62, high=1<<63, size=d, dtype='uint64')
if d:
rdm = tuple(rdm)
wrapper[0] = rdm
empty = wrapper[0]
if empty not in _keys:
return empty
class ObjectHashmap(_BaseHashmap):
"""
a mapping from arbitrary keys to arbitrary values in a numpy array
internally uses python ``hash``, so hashes are not consistent (not even for string or bytes)
consider using the higher-level ``Hashmap`` instead
"""
@classmethod
def new(cls, keys, values=None, cast_dtype=None, view_dtype=None, empty=_DEFAULT):
"""
:param array keys: (n,) object array
:param array? values: (n,) val-dtype array
:param dtype? cast_dtype: dtype to cast ``keys`` (default: keys.type)
:param dtype? view_dtype: dtype to view ``keys`` (default: keys.type)
:param object? empty: empty value (default: row of 0)
"""
cast_dtype = cast_dtype or keys.dtype
view_dtype = view_dtype or cast_dtype
return super().new(keys, values, cast_dtype, view_dtype, empty)
@classmethod
def _hash(cls, _keys, step):
n = _keys.shape[0]
hashes = numpy.fromiter((cls._hash_single_obj(obj) for obj in _keys),
count=n, dtype=UINT64)
return cls._fibonacci_hash_uint64(hashes, step, copy=False)
@classmethod
def _hash_single_obj(cls, obj):
try:
return hash(obj)
except TypeError:
# cast single numpy array to bytes
if isinstance(obj, numpy.ndarray):
return hash(obj.tobytes())
# cast all numpy arrays in tuple/void to bytes
if isinstance(obj, (tuple, numpy.void)):
obj_ = tuple((a.tobytes() if isinstance(a, numpy.ndarray) else a)
for a in tuple(obj))
return hash(obj_)
raise
@classmethod
def _choose_empty_value(cls, _keys, dtype, empty=_DEFAULT):
return UInt64StructHashmap._choose_empty_value(_keys, dtype, empty)
class BytesObjectHashmap(ObjectHashmap):
"""
hashmap from bytes strings keys encoded as object
internally uses xxhash or hashlib to get consistent hashes
consider using the higher-level ``Hashmap`` instead
"""
if _HAS_XXHASH:
@classmethod
def _hash_single_obj(cls, obj):
return xxhash.xxh3_64_intdigest(obj)
else:
@classmethod
def _hash_single_obj(cls, obj):
sha1 = hashlib.sha1()
sha1.update(obj)
return struct.unpack('<Q', sha1.digest()[:8])[0]
class StrObjectHashmap(BytesObjectHashmap):
"""
hashmap from unicode strings keys encoded as object
internally uses xxhash or hashlib to get consistent hashes
consider using the higher-level ``Hashmap`` instead
"""
@classmethod
def _hash_single_obj(cls, obj):
return super()._hash_single_obj(obj.encode(errors='ignore'))
class BytesObjectTupleHashmap(BytesObjectHashmap):
"""
hashmap from tuple of either non-object, or bytes/unicode strings keys encoded as object
internally uses xxhash or hashlib to get consistent hashes
consider using the higher-level ``Hashmap`` instead
"""
@classmethod
def _hash_single_obj(cls, obj_tuple):
h = xxhash.xxh3_64() if _HAS_XXHASH else hashlib.sha1()
for obj in obj_tuple:
if isinstance(obj, str):
obj = obj.encode(errors='ignore')
h.update(obj)
return struct.unpack('<Q', h.digest()[:8])[0]
def Hashmap(keys, values=None):
"""
fake class to select between uint64/struct/object from dtype of arguments
:param array keys: (n,) key-dtype array
:param array? values: (n,) val-dtype array
"""
# switch type from keys
cls, cast_dtype, view_dtype = _get_optimal_cast(keys)
# build hashmap
return cls.new(keys, values, cast_dtype, view_dtype)
def _get_optimal_cast(keys, allow_object_hashmap=False):
"""
select best hashmap type to fit ``dtype``
:param array keys:
:param bool? allow_object_hashmap:
:returns: cls, cast_dtype, view_dtype
"""
dtype = keys.dtype
kind = dtype.kind
names = dtype.names
# scalar input (or strings of less than 8 bytes) we can view as uint64
if kind in 'buifcSUV' and dtype.itemsize <= 8 and not names:
if kind == 'b':
kind = 'u'
# how many units of `kind` we need for get 8 bytes, e.g. 2 for 'U'
inner_dtype_len = 8 // numpy.dtype(f'{kind}1').itemsize
cast_dtype = f'{kind}{inner_dtype_len}'
view_dtype = UINT64
return UInt64Hashmap, numpy.dtype(cast_dtype), | numpy.dtype(view_dtype) | numpy.dtype |
import numpy as np
import tensorflow as tf
import cv2
import tqdm
from network_sn import Network
import load
import random
IMAGE_SIZE = 128
LOCAL_SIZE = 64
HOLE_MIN = 24
HOLE_MAX = 48
LEARNING_RATE = 5e-4
BATCH_SIZE = 16
PRETRAIN_EPOCH = 100
HOGO = 100
BETA1 = 0.9
BETA2 = 0.999
RETAIN = True
def train():
val_g = 5000
x = tf.placeholder(
tf.float32, [BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, 3], name="x")
mask = tf.placeholder(
tf.float32, [BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, 1], name="mask")
local_x = tf.placeholder(
tf.float32, [BATCH_SIZE, LOCAL_SIZE, LOCAL_SIZE, 3], name="local_x")
is_training = tf.placeholder(tf.bool, [], name="is_training")
alpha_G = tf.placeholder(tf.float32, name="alpha")
start_point = tf.placeholder(tf.int32, [BATCH_SIZE, 4], name="start_point")
model = Network(x, mask, local_x, is_training, batch_size=BATCH_SIZE,
alpha_G=alpha_G, start_point=start_point)
sess = tf.Session()
global_step = tf.Variable(0, name='global_step', trainable=False)
epoch = tf.Variable(0, name='epoch', trainable=False)
opt = tf.train.AdamOptimizer(
learning_rate=LEARNING_RATE, beta1=BETA1, beta2=BETA2)
g_train_op = opt.minimize(
model.g_loss, global_step=global_step, var_list=model.g_variables)
g_second_train_op = opt.minimize(
model.mixed_loss, global_step=global_step, var_list=model.g_variables)
d_train_op = opt.minimize(
model.d_loss, global_step=global_step, var_list=model.d_variables)
"""
dl_train_op = opt.minimize(
model.d_loss, global_step=global_step, var_list=model.dl_variables)
dg_train_op = opt.minimize(
model.d_loss, global_step=global_step, var_list=model.dg_variables)
dc_train_op = opt.minimize(
model.d_loss, global_step=global_step, var_list=model.dc_variables)
"""
epochlog = "./epoch_log.txt"
f = open(epochlog, "a")
if input("make new epoch_log? Y/N\n").strip() != "N":
print("make new")
f.close()
f = open(epochlog, "w")
itelog = "./ite_log.txt"
iteite = open(itelog, "a")
if input("make new ite_log? Y/N\n").strip() != "N":
print("make new")
iteite.close()
iteite = open(itelog, "w")
init_op = tf.global_variables_initializer()
sess.run(init_op)
if tf.train.get_checkpoint_state('./backup'):
saver = tf.train.Saver()
if RETAIN:
saver.restore(sess, './backup/latest')
else:
saver.restore(sess, './backup/pretrained')
x_train, x_test = load.load()
x_train = np.array([a / 127.5 - 1 for a in x_train])
x_test = | np.array([a / 127.5 - 1 for a in x_test]) | numpy.array |
"""
/*
* Copyright (C) 2019-2021 University of South Florida
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
"""
import os
# Import dependencies
from collections import defaultdict
from pathlib import Path
import haversine as hs
import numpy as np
import pandas as pd
from haversine import Unit
from src.gt_merger import constants
from src.gt_merger.args import get_parser
from src.gt_merger.preprocess import preprocess_gt_data, preprocess_oba_data, is_valid_oba_dataframe, \
is_valid_gt_dataframe
# -------------------------------------------
def main():
# Verify if the OBA input file exists
if not os.path.isfile(command_line_args.obaFile):
print("OBA data file not found:", command_line_args.obaFile)
exit()
# Verify if GT input file exists
if not os.path.isfile(command_line_args.gtFile):
print("Ground truth data file not found:", command_line_args.gtFile)
exit()
# Verify if there is a list of devices
if command_line_args.deviceList:
# Verify if the list of devices file exists
if os.path.isfile(command_line_args.deviceList):
with open(command_line_args.deviceList) as f:
list_of_devices = f.readline().split(",")
list_of_devices = [s.strip() for s in list_of_devices]
else:
print("File with white list of devices not found:", command_line_args.deviceList)
exit()
else:
list_of_devices = []
# Verify if the data folder exists
if not os.path.isdir(command_line_args.outputDir):
print("Data folder not found, trying to create it in the current working directory:",
command_line_args.outputDir)
try:
os.makedirs(command_line_args.outputDir, exist_ok=True)
except OSError:
print("There was an error while creating the data folder:", command_line_args.outputDir)
exit()
# Create sub-folders for output an logs
path_logs = os.path.join(command_line_args.outputDir, constants.FOLDER_LOGS)
if not os.path.isdir(path_logs):
try:
os.mkdir(path_logs)
except OSError:
print("There was an error while creating the sub folder for logs:", path_logs)
exit()
path_output = os.path.join(command_line_args.outputDir, constants.FOLDER_MERGED_DATA)
if not os.path.isdir(path_output):
try:
os.mkdir(path_output)
except OSError:
print("There was an error while creating the sub-folder for output files:", path_logs)
exit()
# Create path OS independent for excel file
excel_path = Path(command_line_args.gtFile)
# Load ground truth data to a dataframe
gt_data = pd.read_excel(excel_path)
# Validate gt dataframe
if not is_valid_gt_dataframe(gt_data):
print("Ground truth data frame is empty or does not have the required columns.")
exit()
# Preprocess ground truth data
gt_data, data_gt_dropped = preprocess_gt_data(gt_data, command_line_args.removeStillMode)
print("Ground truth data preprocessed.")
# Save data to be dropped to a csv file
dropped_file_path = os.path.join(command_line_args.outputDir, constants.FOLDER_LOGS,
constants.GT_DROPPED_DATA_FILE_NAME)
data_gt_dropped.to_csv(path_or_buf=dropped_file_path, index=False)
# Create path OS independent for csv file
csv_path = Path(command_line_args.obaFile)
# Load OBA data
oba_data = pd.read_csv(csv_path)
# Validate oba dataframe
if not is_valid_oba_dataframe(oba_data):
print("OBA data frame is empty or does not have the required columns.")
exit()
# If a devices white list was provided, list the devices
if list_of_devices:
oba_data = oba_data[oba_data["User ID"].isin(list_of_devices)]
# Preprocess OBA data
oba_data, data_csv_dropped = preprocess_oba_data(oba_data, command_line_args.minActivityDuration,
command_line_args.minTripLength, command_line_args.removeStillMode)
print("OBA data preprocessed.")
print(oba_data.info())
print(gt_data.info())
# Data preprocessing IS OVER
# Save oba dropped data to a csv file
dropped_file_path = os.path.join(command_line_args.outputDir, constants.FOLDER_LOGS,
constants.OBA_DROPPED_DATA_FILE_NAME)
data_csv_dropped.to_csv(path_or_buf=dropped_file_path, index=False)
if command_line_args.iterateOverTol:
first_tol = 30000
save_to_path = os.path.join(constants.FOLDER_MERGED_DATA, "batch")
else:
save_to_path = os.path.join(constants.FOLDER_MERGED_DATA)
first_tol = constants.TOLERANCE
for tol in range(first_tol, command_line_args.tolerance + 1, constants.CALCULATE_EVERY_N_SECS):
print("TOLERANCE:", str(tol))
# merge dataframes one to one or one to many according to the commandline parameter
if command_line_args.mergeOneToOne:
merged_data_frame, num_matches_df = merge(gt_data, oba_data, tol)
else:
merged_data_frame, num_matches_df, unmatched_oba_trips_df = merge_to_many(gt_data, oba_data, tol)
# Save unmatched oba records to csv
unmatched_file_path = os.path.join(command_line_args.outputDir, save_to_path,
"oba_records_without_match_on_GT.csv")
unmatched_oba_trips_df.to_csv(path_or_buf=unmatched_file_path, index=False)
# Calculate difference
merged_data_frame['Time_Difference'] = merged_data_frame.apply(
lambda x: (x['Activity Start Date and Time* (UTC)'] - x['GT_DateTimeOrigUTC_Backup']) / | np.timedelta64(1, 's') | numpy.timedelta64 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 28 16:21:46 2021
@author: jiayingweng
"""
import numpy as np
import scipy.linalg as la
__all__ = ['generateX', 'generateY']
def generateX(n, p, covstr):
"""
Generate X for simulation
Args:
n (int): sample size
p (int): number of dimension of X
covstr (0-3): covariance structure
Returns:
X: n times p array
"""
## generate X
if covstr == 0:
covx = np.eye(p)
elif covstr == 1:
v = 0.5 ** np.arange(p)
covx = la.toeplitz(v)
elif covstr == 2:
offdiag = 0.2
covx = np.ones((p,p)) * offdiag
covx = covx + np.eye(p) * (1-offdiag)
elif covstr == 3:
v = 0.8 ** np.arange(p)
covx = la.toeplitz(v)
L = np.linalg.cholesky(covx)
Z = np.random.randn(p,n)
X = (L @ Z).T
return(X)
def generateY(X, M):
"""
Generate Y based on X
Args:
X: input covariate
M: model 1-7 uni; 10-15 multi
Returns:
Y: outcome
d: structural dimension
p: the dimension of Y
b: the true beta
"""
[n,p] = X.shape
## generate Y
if M == 1: # Qian M1
d = 1
q = 1
b = np.zeros((p,d))
y = np.zeros((n,q))
index = np.arange(5)
b[index,:] = 1
y[:,0] = np.exp(X @ b[:,0]) + np.random.randn(n)
elif M == 2: # Qian M2
d = 2
q = 1
b = np.zeros((p,d))
y = np.zeros((n,q))
index1 = np.arange(4) #np.random.randint(p, size = 5)
index2 = np.arange(p-4,p)
b[index1,0] = 1
b[index2, 1] = 1
y[:,0] = np.sign(X @ b[:,0]) * np.log( np.abs( X @ b[:,1] + 5 ) ) + 0.2 * np.random.randn(n)
elif M == 3: # Tan AOS Model 1
d = 1
q = 1
b = np.zeros((p,d))
y = np.zeros((n,q))
index = np.arange(5)
b[index,:] = 1
y[:,0] = np.sin(X @ b[:,0]) ** 2 + X @ b[:,0] + np.random.randn(n)
elif M == 4: # Tan AOS Model 2
d = 1
q = 1
b = np.zeros((p,d))
y = np.zeros((n,q))
index = np.arange(5)
b[index,:] = 1
y[:,0] = 2 * np.tanh(X @ b[:,0]) + np.random.randn(n)
elif M == 5: # <NAME>
d = 1
q = 1
b = np.zeros((p,d))
index = np.arange(1)
b[index,:] = 1
X = 1/4 * np.sqrt(0.1) * ( np.random.randn(p,n) + 1) + 1/2 * np.sqrt(0.1) * ( np.random.randn(p,n) + 2 ) + 1/4 * np.sqrt(10) * (np.random.randn(p,n) + 1)
X = X.T
y = np.abs( np.sin( X @ b[:,0] ) ) + 0.2 * np.random.randn(n)
elif M == 6:
d = 2
q = 1
b = | np.zeros((p,d)) | numpy.zeros |
import numpy as np
from numpy import pi
from numpy import tan,arctan,sin,cos
from VREP import vrep
from time import sleep
import signal
import time
import sys
import rospy
from std_msgs.msg import String, Float32, Header
from geometry_msgs.msg import Pose, Twist
from nav_msgs.msg import Odometry
import transforms3d
from KalmanFilters.EKF import ExtendedKalmanFilter
running=True
r=0.5*6.3407e-01
d=0.755
l=2.5772
desiredSteeringAngle=0
desiredSpeed=0/(r)
#State-Variables
odom=Odometry()
body_name='nakedAckermannSteeringCar'
joint_names = ['nakedCar_steeringLeft','nakedCar_steeringRight']
throttle_joint = ['nakedCar_motorLeft','nakedCar_motorRight']
Lr=1.2888
Lf=1.2884
deltaTime = 10./1000.
elapsedTime=0
def h(x):
return x
def f(x,u):
xDot=np.array(np.zeros((1,3)))
beta=arctan((Lr/(Lf+Lr))*tan(u[1]))
xDot[0,0]=u[0]*cos(x[2]+beta)
xDot[0,1]=u[0]*sin(x[2]+beta)
xDot[0,2]=(u[0]/Lr)*sin(beta)
return (x+ xDot*deltaTime)[0]
def jacobianF(x,u):
beta=arctan((Lr/(Lf+Lr))*tan(u[1]))
return np.array(np.eye(3))+np.array([[0,0,-u[0]*sin(x[2]+beta)*(u[0]/Lr)*sin(beta)],[0,0,u[0]*cos(x[2]+beta)*(u[0]/Lr)*sin(beta)],[0,0,0]])*deltaTime
def jacobianH(x):
return np.array([[1, 0, 0],[0, 1, 0],[0, 0, 1]])
F=jacobianF
H=jacobianH
P=np.zeros(3)
p_var=5e-3
o_var=5e-8
Q= | np.eye(3) | numpy.eye |
import pandas as pd
import QuantLib2 as ql
import numpy as np
import math
def timestamp_to_qldate(ts):
def _timestamp_to_qldate(t):
t = pd.Timestamp(t)
return ql.Date(t.day, t.month, t.year)
try:
return [_timestamp_to_qldate(t) for t in ts]
except TypeError:
return _timestamp_to_qldate(ts)
def annual_to_continuous(y):
return math.ln(1+y)
def ql_array(iterable):
A = ql.Array(len(iterable))
for i,b in enumerate(iterable):
A[i] = float(b)
return A
# Nelson, Svensson, Siegel yield curve using Quantlib Kappa substitution
_maturities = np.arange(0.0, 121.0/12.0, 1.0/12.0)
_maturities[0] = .001
def nss_yield(params):
[Beta0, Beta1, Beta2, Beta3, Kappa1, Kappa2] = [p for p in params]
return [Beta0 + Beta1 * (1 - np.exp(-n * Kappa1)) / (n * Kappa1) + \
Beta2 * ((1 - np.exp(-n * Kappa1))/(n * Kappa1) - np.exp(-n * Kappa1)) + \
Beta3 * ((1 - np.exp(-n * Kappa2))/(n * Kappa2) - np.exp(-n * Kappa2)) for n in _maturities]
def par_yield_semiannual(yc, mat_date, debug=False):
dc = ql.SimpleDayCounter() # We use a simple day counter for these theoretical rate constructions.
calc_date = yc.referenceDate()
if dc.yearFraction(calc_date, mat_date) < 1/30:
return yc.zeroRate(mat_date, yc.dayCounter(), ql.Annual).rate()
schedule = ql.Schedule(calc_date,
mat_date,
ql.Period(6, ql.Months),
ql.NullCalendar(),
ql.Unadjusted,
ql.Unadjusted,
ql.DateGeneration.Backward, False)
discounts = np.array([yc.discount(d) for d in schedule])
if schedule[0] != calc_date: # schedule[0] is just today so we won't use it
print("Schedule[0] wasn't today!")
first_coupon_date = schedule[1]
discounts = discounts[1:]
accrfrac = (1 - dc.yearFraction(calc_date, first_coupon_date) * 2.0)
par_yield = 2 * (1 - discounts[-1]) / ( | np.sum(discounts) | numpy.sum |
# -*- coding: utf-8 -*-
"""
Function for parameter estimation.
"""
import numpy as np
import pandas as pd
def param_est(self, df, z_msl=500, lat=-43.6, lon=172, TZ_lon=173, z_u=2, time_int='days', K_rs=0.16, a_s=0.25, b_s=0.5, alb=0.23):
"""
Function to estimate the parameters necessary to calculate reference ET (ETo) from the `FAO 56 paper <http://www.fao.org/docrep/X0490E/X0490E00.htm>`_ [1]_ using a minimum of T_min and T_max for daily estimates and T_mean and RH_mean for hourly, but optionally utilising the maximum number of available met parameters. The function prioritizes the estimation of specific parameters based on the available input data.
Parameters
----------
df : DataFrame
Input Metereological data (see Notes section).
z_msl : float or int
Elevation of the met station above mean sea level (m) (only needed if P is not in df).
lat : float or int
The latitude of the met station (dec deg) (only needed if R_s or R_n are not in df).
lon : float or int
The longitude of the met station (dec deg) (only needed if calculating ETo hourly)
TZ_lon : float or int
The longitude of the center of the time zone (dec deg) (only needed if calculating ETo hourly).
z_u : float or int
The height of the wind speed measurement (m).
time_int : str
The time interval of the input and output (either 'days' or 'hours').
K_rs : float
Rs calc coefficient (0.16 for inland stations, 0.19 for coastal stations)
a_s : float
Rs calc coefficient
b_s : float
Rs calc coefficient
alb : float
Albedo (should be fixed for the reference crop)
Returns
-------
DataFrame
Notes
--------
The input data must be a DataFrame with specific column names according to the met parameter. The column names should be a minimum of T_min and T_max for daily estimates and T_mean and RH_mean for hourly, but can contain any/all of the following:
R_n
Net radiation (MJ/m2)
R_s
Incoming shortwave radiation (MJ/m2)
G
Net soil heat flux (MJ/m2)
T_min
Minimum Temperature (deg C)
T_max
Maximum Temperature (deg C)
T_mean
Mean Temperature (deg C)
T_dew
Dew point temperature (deg C)
RH_min
Minimum relative humidity
RH_max
Maximum relative humidity
RH_mean
Mean relative humidity
n_sun
Number of sunshine hours per day
U_z
Wind speed at height z (m/s)
P
Atmospheric pressure (kPa)
e_a
Actual Vapour pressure derrived from RH
Parameter estimation values refer to the quality level of the input parameters into the ETo equations. Where a 0 (or nothing) refers to no necessary parameter estimation (all measurement data was available), while a 1 refers to parameters that have the best input estimations and up to a value of 3 is the worst. Starting from the right, the first value refers to U_z, the second value refers to G, the third value refers to R_n, the fourth value refers to R_s, the fifth value refers to e_a, the sixth value refers to T_mean, the seventh value refers to P.
References
----------
.. [1] <NAME>., <NAME>., <NAME>., & <NAME>. (1998). Crop evapotranspiration-Guidelines for computing crop water requirements-FAO Irrigation and drainage paper 56. FAO, Rome, 300(9), D05109.
"""
met_names = np.array(['R_n', 'R_s', 'G', 'T_min', 'T_max', 'T_mean', 'T_dew', 'RH_min', 'RH_max', 'RH_mean', 'n_sun', 'U_z', 'P', 'e_a'])
if time_int == 'days':
self.time_int = 'D'
elif time_int == 'hours':
self.time_int = 'H'
####################################
##### Set up the DataFrame and estimated values series
new_cols = met_names[~np.in1d(met_names, df.columns)]
new_df = pd.DataFrame(np.nan, index=df.index, columns=new_cols)
self.ts_param = pd.concat([df, new_df], axis=1).copy()
self.est_val = pd.Series(0, index=self.ts_param.index, name='est_val')
####################################
###### Calculations
######
### Time index
if type(df.index) is not pd.DatetimeIndex:
raise ValueError('DataFrame must have a datetime index!')
# Create the Day of the year vector
Day = df.index.dayofyear
######
## Atmospheric components
# Air Pressure
self.est_val.loc[self.ts_param['P'].isnull()] = self.est_val.loc[self.ts_param['P'].isnull()] + 1000000
self.ts_param.loc[self.ts_param['P'].isnull(), 'P'] = 101.3*((293 - 0.0065*z_msl)/293)**5.26
# Psychrometric constant
self.ts_param['gamma'] = (0.665*10**-3)*self.ts_param['P']
######
## Temperature and humidity components
self.est_val.loc[self.ts_param['T_mean'].isnull()] = self.est_val.loc[self.ts_param['T_mean'].isnull()] + 100000
self.ts_param.loc[self.ts_param['T_mean'].isnull(), 'T_mean'] = (self.ts_param.loc[self.ts_param['T_mean'].isnull(), 'T_max'] + self.ts_param.loc[self.ts_param['T_mean'].isnull(), 'T_min'])/2
## Vapor pressures
if time_int == 'days':
self.ts_param['e_max'] = 0.6108*np.exp(17.27*self.ts_param['T_max']/(self.ts_param['T_max']+237.3))
self.ts_param['e_min'] = 0.6108*np.exp(17.27*self.ts_param['T_min']/(self.ts_param['T_min']+237.3))
self.ts_param['e_s'] = (self.ts_param['e_max']+self.ts_param['e_min'])/2
self.ts_param['delta'] = 4098*(0.6108*np.exp(17.27*self.ts_param['T_mean']/(self.ts_param['T_mean'] + 237.3)))/((self.ts_param['T_mean'] + 237.3)**2)
# e_a if dewpoint temperature is known
self.ts_param.loc[self.ts_param['e_a'].isnull(), 'e_a'] = 0.6108*np.exp(17.27*self.ts_param.loc[self.ts_param['e_a'].isnull(), 'T_dew']/(self.ts_param.loc[self.ts_param['e_a'].isnull(), 'T_dew'] + 237.3))
# e_a if min and max temperatures and humidities are known
self.est_val.loc[self.ts_param['T_dew'].isnull()] = self.est_val.loc[self.ts_param['T_dew'].isnull()] + 10000
self.ts_param['e_a'].loc[self.ts_param['e_a'].isnull()] = (self.ts_param['e_min'][self.ts_param['e_a'].isnull()] * self.ts_param.loc[self.ts_param['e_a'].isnull(), 'RH_max']/100 + self.ts_param['e_max'][self.ts_param['e_a'].isnull()] * self.ts_param.loc[self.ts_param['e_a'].isnull(), 'RH_min']/100)/2
# self.ts_param['e_a'] if only mean humidity is known
self.est_val.loc[self.ts_param['e_a'].isnull()] = self.est_val.loc[self.ts_param['e_a'].isnull()] + 10000
self.ts_param['e_a'].loc[self.ts_param['e_a'].isnull()] = self.ts_param.loc[self.ts_param['e_a'].isnull(), 'RH_mean']/100*(self.ts_param['e_max'][self.ts_param['e_a'].isnull()] + self.ts_param['e_min'][self.ts_param['e_a'].isnull()])/2
# e_a if humidity is not known
self.est_val.loc[self.ts_param['e_a'].isnull()] = self.est_val.loc[self.ts_param['e_a'].isnull()] + 10000
self.ts_param['e_a'].loc[self.ts_param['e_a'].isnull()] = 0.6108*np.exp(17.27*self.ts_param.loc[self.ts_param['e_a'].isnull(), 'T_min']/(self.ts_param.loc[self.ts_param['e_a'].isnull(), 'T_min'] + 237.3))
elif time_int == 'hours':
self.ts_param['e_mean'] = 0.6108*np.exp(17.27*self.ts_param['T_mean']/(self.ts_param['T_mean']+237.3))
self.ts_param.loc[self.ts_param['e_a'].isnull(), 'e_a'] = self.ts_param.loc[self.ts_param['e_a'].isnull(), 'e_mean']*self.ts_param.loc[self.ts_param['e_a'].isnull(), 'RH_mean']/100
else:
raise ValueError('time_int must be either days or hours.')
######
## Raditation components
# R_a
phi = lat*np.pi/180
delta = 0.409*np.sin(2*np.pi*Day/365-1.39)
d_r = 1+0.033*np.cos(2*np.pi*Day/365)
w_s = np.arccos(-np.tan(phi)*np.tan(delta))
if time_int == 'days':
self.ts_param['R_a'] = 24*60/np.pi*0.082*d_r*(w_s*np.sin(phi)*np.sin(delta) + np.cos(phi)*np.cos(delta)*np.sin(w_s))
elif time_int == 'hours':
hour_vec = df.index.hour
b = (2*np.pi*(Day - 81))/364
S_c = 0.1645*np.sin(2*b) - 0.1255*np.cos(b) - 0.025*np.sin(b)
w = np.pi/12*(((hour_vec+0.5) + 0.6666667*(TZ_lon - lon) + S_c) - 12)
w_1 = w - (np.pi*hour_vec)/24
w_2 = w + (np.pi*hour_vec)/24
self.ts_param['R_a'] = 12*60/np.pi*0.082*d_r*((w_2 - w_1)*np.sin(phi)* | np.sin(delta) | numpy.sin |
import os
import tempfile
import numpy as np
import scipy.ndimage.measurements as meas
from functools import reduce
import warnings
import sys
sys.path.append(os.path.abspath(r'../lib'))
import NumCppPy as NumCpp # noqa E402
####################################################################################
def factors(n):
return set(reduce(list.__add__,
([i, n//i] for i in range(1, int(n**0.5) + 1) if n % i == 0)))
####################################################################################
def test_seed():
np.random.seed(1)
####################################################################################
def test_abs():
randValue = np.random.randint(-100, -1, [1, ]).astype(np.double).item()
assert NumCpp.absScaler(randValue) == np.abs(randValue)
components = np.random.randint(-100, -1, [2, ]).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.absScaler(value), 9) == np.round(np.abs(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.absArray(cArray), np.abs(data))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols]) + \
1j * np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.absArray(cArray), 9), np.round(np.abs(data), 9))
####################################################################################
def test_add():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(-100, 100, [shape.rows, shape.cols])
data2 = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(cArray, value), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(value, cArray), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(cArray, value), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(value, cArray), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArray(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(cArray, value), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(value, cArray), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(cArray, value), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(value, cArray), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
####################################################################################
def test_alen():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.alen(cArray) == shape.rows
####################################################################################
def test_all():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.all(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.all(data).item()
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.all(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.all(data).item()
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.ROW).flatten().astype(bool), np.all(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.ROW).flatten().astype(bool), np.all(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.COL).flatten().astype(bool), np.all(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.COL).flatten().astype(bool), np.all(data, axis=1))
####################################################################################
def test_allclose():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
cArray3 = NumCpp.NdArray(shape)
tolerance = 1e-5
data1 = np.random.randn(shape.rows, shape.cols)
data2 = data1 + tolerance / 10
data3 = data1 + 1
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
assert NumCpp.allclose(cArray1, cArray2, tolerance) and not NumCpp.allclose(cArray1, cArray3, tolerance)
####################################################################################
def test_amax():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.amax(cArray, NumCpp.Axis.NONE).item() == np.max(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.amax(cArray, NumCpp.Axis.NONE).item() == np.max(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.amax(cArray, NumCpp.Axis.ROW).flatten(), np.max(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.amax(cArray, NumCpp.Axis.ROW).flatten(), np.max(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.amax(cArray, NumCpp.Axis.COL).flatten(), np.max(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.amax(cArray, NumCpp.Axis.COL).flatten(), np.max(data, axis=1))
####################################################################################
def test_amin():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.amin(cArray, NumCpp.Axis.NONE).item() == np.min(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.amin(cArray, NumCpp.Axis.NONE).item() == np.min(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.amin(cArray, NumCpp.Axis.ROW).flatten(), np.min(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.amin(cArray, NumCpp.Axis.ROW).flatten(), np.min(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.amin(cArray, NumCpp.Axis.COL).flatten(), np.min(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.amin(cArray, NumCpp.Axis.COL).flatten(), np.min(data, axis=1))
####################################################################################
def test_angle():
components = np.random.randint(-100, -1, [2, ]).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.angleScaler(value), 9) == np.round(np.angle(value), 9) # noqa
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols]) + \
1j * np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.angleArray(cArray), 9), np.round(np.angle(data), 9))
####################################################################################
def test_any():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.any(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.any(data).item()
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.any(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.any(data).item()
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.any(cArray, NumCpp.Axis.ROW).flatten().astype(bool), np.any(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.any(cArray, NumCpp.Axis.ROW).flatten().astype(bool), np.any(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.any(cArray, NumCpp.Axis.COL).flatten().astype(bool), np.any(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.any(cArray, NumCpp.Axis.COL).flatten().astype(bool), np.any(data, axis=1))
####################################################################################
def test_append():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(0, 100, [shape.rows, shape.cols])
data2 = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.append(cArray1, cArray2, NumCpp.Axis.NONE).getNumpyArray().flatten(),
np.append(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
numRows = np.random.randint(1, 100, [1, ]).item()
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item() + numRows, shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
data1 = np.random.randint(0, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(0, 100, [shape2.rows, shape2.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.append(cArray1, cArray2, NumCpp.Axis.ROW).getNumpyArray(),
np.append(data1, data2, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
NumCppols = np.random.randint(1, 100, [1, ]).item()
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + NumCppols)
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
data1 = np.random.randint(0, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(0, 100, [shape2.rows, shape2.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.append(cArray1, cArray2, NumCpp.Axis.COL).getNumpyArray(),
np.append(data1, data2, axis=1))
####################################################################################
def test_arange():
start = np.random.randn(1).item()
stop = np.random.randn(1).item() * 100
step = np.abs(np.random.randn(1).item())
if stop < start:
step *= -1
data = np.arange(start, stop, step)
assert np.array_equal(np.round(NumCpp.arange(start, stop, step).flatten(), 9), np.round(data, 9))
####################################################################################
def test_arccos():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.arccosScaler(value), 9) == np.round(np.arccos(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arccosScaler(value), 9) == np.round(np.arccos(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arccosArray(cArray), 9), np.round(np.arccos(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arccosArray(cArray), 9), np.round(np.arccos(data), 9))
####################################################################################
def test_arccosh():
value = np.abs(np.random.rand(1).item()) + 1
assert np.round(NumCpp.arccoshScaler(value), 9) == np.round(np.arccosh(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arccoshScaler(value), 9) == np.round(np.arccosh(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols) + 1
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arccoshArray(cArray), 9), np.round(np.arccosh(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arccoshArray(cArray), 9), np.round(np.arccosh(data), 9))
####################################################################################
def test_arcsin():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.arcsinScaler(value), 9) == np.round(np.arcsin(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arcsinScaler(value), 9) == np.round(np.arcsin(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arcsinArray(cArray), 9), np.round(np.arcsin(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
np.array_equal(np.round(NumCpp.arcsinArray(cArray), 9), np.round(np.arcsin(data), 9))
####################################################################################
def test_arcsinh():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.arcsinhScaler(value), 9) == np.round(np.arcsinh(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arcsinhScaler(value), 9) == np.round(np.arcsinh(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arcsinhArray(cArray), 9), np.round(np.arcsinh(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
np.array_equal(np.round(NumCpp.arcsinhArray(cArray), 9), np.round(np.arcsinh(data), 9))
####################################################################################
def test_arctan():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.arctanScaler(value), 9) == np.round(np.arctan(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arctanScaler(value), 9) == np.round(np.arctan(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arctanArray(cArray), 9), np.round(np.arctan(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
np.array_equal(np.round(NumCpp.arctanArray(cArray), 9), np.round(np.arctan(data), 9))
####################################################################################
def test_arctan2():
xy = np.random.rand(2) * 2 - 1
assert np.round(NumCpp.arctan2Scaler(xy[1], xy[0]), 9) == np.round(np.arctan2(xy[1], xy[0]), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArrayX = NumCpp.NdArray(shape)
cArrayY = NumCpp.NdArray(shape)
xy = np.random.rand(*shapeInput, 2) * 2 - 1
xData = xy[:, :, 0].reshape(shapeInput)
yData = xy[:, :, 1].reshape(shapeInput)
cArrayX.setArray(xData)
cArrayY.setArray(yData)
assert np.array_equal(np.round(NumCpp.arctan2Array(cArrayY, cArrayX), 9), np.round(np.arctan2(yData, xData), 9))
####################################################################################
def test_arctanh():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.arctanhScaler(value), 9) == np.round(np.arctanh(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arctanhScaler(value), 9) == np.round(np.arctanh(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arctanhArray(cArray), 9), np.round(np.arctanh(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
np.array_equal(np.round(NumCpp.arctanhArray(cArray), 9), np.round(np.arctanh(data), 9))
####################################################################################
def test_argmax():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.NONE).item(), np.argmax(data))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.NONE).item(), np.argmax(data))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.ROW).flatten(), np.argmax(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.ROW).flatten(), np.argmax(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.COL).flatten(), np.argmax(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.COL).flatten(), np.argmax(data, axis=1))
####################################################################################
def test_argmin():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.NONE).item(), np.argmin(data))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.NONE).item(), np.argmin(data))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.ROW).flatten(), np.argmin(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.ROW).flatten(), np.argmin(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.COL).flatten(), np.argmin(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.COL).flatten(), np.argmin(data, axis=1))
####################################################################################
def test_argsort():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
dataFlat = data.flatten()
assert np.array_equal(dataFlat[NumCpp.argsort(cArray, NumCpp.Axis.NONE).flatten().astype(np.uint32)],
dataFlat[np.argsort(data, axis=None)])
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
dataFlat = data.flatten()
assert np.array_equal(dataFlat[NumCpp.argsort(cArray, NumCpp.Axis.NONE).flatten().astype(np.uint32)],
dataFlat[np.argsort(data, axis=None)])
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
pIdx = np.argsort(data, axis=0)
cIdx = NumCpp.argsort(cArray, NumCpp.Axis.ROW).astype(np.uint16)
allPass = True
for idx, row in enumerate(data.T):
if not np.array_equal(row[cIdx[:, idx]], row[pIdx[:, idx]]):
allPass = False
break
assert allPass
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
pIdx = np.argsort(data, axis=0)
cIdx = NumCpp.argsort(cArray, NumCpp.Axis.ROW).astype(np.uint16)
allPass = True
for idx, row in enumerate(data.T):
if not np.array_equal(row[cIdx[:, idx]], row[pIdx[:, idx]]):
allPass = False
break
assert allPass
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
pIdx = np.argsort(data, axis=1)
cIdx = NumCpp.argsort(cArray, NumCpp.Axis.COL).astype(np.uint16)
allPass = True
for idx, row in enumerate(data):
if not np.array_equal(row[cIdx[idx, :]], row[pIdx[idx, :]]): # noqa
allPass = False
break
assert allPass
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
pIdx = np.argsort(data, axis=1)
cIdx = NumCpp.argsort(cArray, NumCpp.Axis.COL).astype(np.uint16)
allPass = True
for idx, row in enumerate(data):
if not np.array_equal(row[cIdx[idx, :]], row[pIdx[idx, :]]):
allPass = False
break
assert allPass
####################################################################################
def test_argwhere():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
randValue = np.random.randint(0, 100, [1, ]).item()
data2 = data > randValue
cArray.setArray(data2)
assert np.array_equal(NumCpp.argwhere(cArray).flatten(), np.argwhere(data.flatten() > randValue).flatten())
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
randValue = np.random.randint(0, 100, [1, ]).item()
data2 = data > randValue
cArray.setArray(data2)
assert np.array_equal(NumCpp.argwhere(cArray).flatten(), np.argwhere(data.flatten() > randValue).flatten())
####################################################################################
def test_around():
value = np.abs(np.random.rand(1).item()) * np.random.randint(1, 10, [1, ]).item()
numDecimalsRound = np.random.randint(0, 10, [1, ]).astype(np.uint8).item()
assert NumCpp.aroundScaler(value, numDecimalsRound) == np.round(value, numDecimalsRound)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols) * np.random.randint(1, 10, [1, ]).item()
cArray.setArray(data)
numDecimalsRound = np.random.randint(0, 10, [1, ]).astype(np.uint8).item()
assert np.array_equal(NumCpp.aroundArray(cArray, numDecimalsRound), np.round(data, numDecimalsRound))
####################################################################################
def test_array_equal():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
cArray3 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 100, shapeInput)
data2 = np.random.randint(1, 100, shapeInput)
cArray1.setArray(data1)
cArray2.setArray(data1)
cArray3.setArray(data2)
assert NumCpp.array_equal(cArray1, cArray2) and not NumCpp.array_equal(cArray1, cArray3)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
cArray3 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data1)
cArray3.setArray(data2)
assert NumCpp.array_equal(cArray1, cArray2) and not NumCpp.array_equal(cArray1, cArray3)
####################################################################################
def test_array_equiv():
shapeInput1 = np.random.randint(1, 100, [2, ])
shapeInput3 = np.random.randint(1, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput1[0].item(), shapeInput1[1].item())
shape2 = NumCpp.Shape(shapeInput1[1].item(), shapeInput1[0].item())
shape3 = NumCpp.Shape(shapeInput3[0].item(), shapeInput3[1].item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
cArray3 = NumCpp.NdArray(shape3)
data1 = np.random.randint(1, 100, shapeInput1)
data3 = np.random.randint(1, 100, shapeInput3)
cArray1.setArray(data1)
cArray2.setArray(data1.reshape([shapeInput1[1].item(), shapeInput1[0].item()]))
cArray3.setArray(data3)
assert NumCpp.array_equiv(cArray1, cArray2) and not NumCpp.array_equiv(cArray1, cArray3)
shapeInput1 = np.random.randint(1, 100, [2, ])
shapeInput3 = np.random.randint(1, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput1[0].item(), shapeInput1[1].item())
shape2 = NumCpp.Shape(shapeInput1[1].item(), shapeInput1[0].item())
shape3 = NumCpp.Shape(shapeInput3[0].item(), shapeInput3[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape1)
cArray2 = NumCpp.NdArrayComplexDouble(shape2)
cArray3 = NumCpp.NdArrayComplexDouble(shape3)
real1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
imag1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data1 = real1 + 1j * imag1
real3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
imag3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
data3 = real3 + 1j * imag3
cArray1.setArray(data1)
cArray2.setArray(data1.reshape([shapeInput1[1].item(), shapeInput1[0].item()]))
cArray3.setArray(data3)
assert NumCpp.array_equiv(cArray1, cArray2) and not NumCpp.array_equiv(cArray1, cArray3)
####################################################################################
def test_asarray():
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayArray1D(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayArray1D(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayArray1DCopy(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayArray1DCopy(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayArray2D(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayArray2D(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayArray2DCopy(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayArray2DCopy(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayVector1D(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayVector1D(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayVector1DCopy(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayVector1DCopy(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayVector2D(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayVector2D(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayVectorArray2D(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayVectorArray2D(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayVectorArray2DCopy(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayVectorArray2DCopy(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayDeque1D(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayDeque1D(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayDeque2D(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayDeque2D(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayList(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayList(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayIterators(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayIterators(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayPointerIterators(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayPointerIterators(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayPointer(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayPointer(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayPointer2D(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayPointer2D(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayPointerShell(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayPointerShell(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayPointerShell2D(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayPointerShell2D(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayPointerShellTakeOwnership(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayPointerShellTakeOwnership(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayPointerShell2DTakeOwnership(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayPointerShell2DTakeOwnership(*values), data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
cArrayCast = NumCpp.astypeDoubleToUint32(cArray).getNumpyArray()
assert np.array_equal(cArrayCast, data.astype(np.uint32))
assert cArrayCast.dtype == np.uint32
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
cArrayCast = NumCpp.astypeDoubleToComplex(cArray).getNumpyArray()
assert np.array_equal(cArrayCast, data.astype(np.complex128))
assert cArrayCast.dtype == np.complex128
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
cArrayCast = NumCpp.astypeComplexToComplex(cArray).getNumpyArray()
assert np.array_equal(cArrayCast, data.astype(np.complex64))
assert cArrayCast.dtype == np.complex64
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
cArrayCast = NumCpp.astypeComplexToDouble(cArray).getNumpyArray()
warnings.filterwarnings('ignore', category=np.ComplexWarning)
assert np.array_equal(cArrayCast, data.astype(np.double))
warnings.filters.pop() # noqa
assert cArrayCast.dtype == np.double
####################################################################################
def test_average():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.round(NumCpp.average(cArray, NumCpp.Axis.NONE).item(), 9) == np.round(np.average(data), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.round(NumCpp.average(cArray, NumCpp.Axis.NONE).item(), 9) == np.round(np.average(data), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.average(cArray, NumCpp.Axis.ROW).flatten(), 9),
np.round(np.average(data, axis=0), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.average(cArray, NumCpp.Axis.ROW).flatten(), 9),
np.round(np.average(data, axis=0), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.average(cArray, NumCpp.Axis.COL).flatten(), 9),
np.round(np.average(data, axis=1), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.average(cArray, NumCpp.Axis.COL).flatten(), 9),
np.round(np.average(data, axis=1), 9))
####################################################################################
def test_averageWeighted():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
cWeights = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
weights = np.random.randint(1, 5, [shape.rows, shape.cols])
cArray.setArray(data)
cWeights.setArray(weights)
assert np.round(NumCpp.averageWeighted(cArray, cWeights, NumCpp.Axis.NONE).item(), 9) == \
np.round(np.average(data, weights=weights), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
cWeights = NumCpp.NdArray(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
weights = np.random.randint(1, 5, [shape.rows, shape.cols])
cArray.setArray(data)
cWeights.setArray(weights)
assert np.round(NumCpp.averageWeighted(cArray, cWeights, NumCpp.Axis.NONE).item(), 9) == \
np.round(np.average(data, weights=weights), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
cWeights = NumCpp.NdArray(1, shape.cols)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
weights = np.random.randint(1, 5, [1, shape.rows])
cArray.setArray(data)
cWeights.setArray(weights)
assert np.array_equal(np.round(NumCpp.averageWeighted(cArray, cWeights, NumCpp.Axis.ROW).flatten(), 9),
np.round(np.average(data, weights=weights.flatten(), axis=0), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
cWeights = NumCpp.NdArray(1, shape.cols)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = | np.random.randint(1, 100, [shape.rows, shape.cols]) | numpy.random.randint |
# Implement code for Locality Sensitive Hashing here!
import numpy as np
from collections import defaultdict, Counter
import utils
def gen_hash(length, bucket_width=None, hashing_type='min'):
if hashing_type == 'min':
mapper = min_hash_mapper(length)
return gen_min_hash(length, mapper)
elif hashing_type == 'hamming':
return gen_hamming_hash(length)
elif hashing_type == 'e2lsh':
assert bucket_width is not None, "E2LSH hash requires a bucket width"
return gen_e2lsh_hash(length, bucket_width=bucket_width)
def gen_hamming_hash(length):
c = np.random.choice(length, 1)[0]
return lambda x: x[c]
def gen_hash_band(r, length, bucket_width=None, hashing_type='min'):
b = [gen_hash(length, hashing_type=hashing_type, bucket_width=bucket_width) for _ in range(r)]
return lambda x: [f(x) for f in b]
def min_hash_mapper(length):
return np.random.choice(np.int(np.log2(length))**2, length)
def gen_min_hash(length, mapper):
order = | np.arange(length) | numpy.arange |
import math
import numpy as np
from pandas import DataFrame
from sklearn.base import BaseEstimator, TransformerMixin
'''
Add new feature - distance to the nearest city
'''
class CityDistance(BaseEstimator, TransformerMixin):
r = 6371
def __init__(self, cities: DataFrame):
self.cities = cities
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
d_lat_1 = X['latitude']
d_lon_1 = X['longitude']
X['dist_city'] = 0
for row in self.cities.itertuples():
d_lat_2 = row.latitude
d_lon_2 = row.longitude
d_lat = to_rad(np.subtract(d_lat_2, d_lat_1))
d_lon = to_rad(np.subtract(d_lon_2, d_lon_1))
a = np.sin(d_lat / 2) * np.sin(d_lat / 2) + \
np.cos(to_rad(d_lat_1)) * np.cos(to_rad(d_lat_2)) * \
np.sin(d_lon / 2) * np.sin(d_lon / 2)
c = 2 * np.arctan2(np.sqrt(a), | np.sqrt(1 - a) | numpy.sqrt |
# evaluate dynamic classifier selection DCS-LA using local class accuracy
from numpy import mean
from numpy import std
from sklearn.datasets import make_classification
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import RepeatedStratifiedKFold
from deslib.dcs.lca import LCA
# define dataset
X, y = make_classification(n_samples=10000, n_features=20, n_informative=15, n_redundant=5, random_state=7)
# define the model
model = LCA() ##################################### default k open the code of the class
# define the evaluation procedure
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)
# evaluate the model
n_scores = cross_val_score(model, X, y, scoring='accuracy', cv=cv, n_jobs=-1)
# report performance
print('Mean Accuracy: %.3f (%.3f)' % ( | mean(n_scores) | numpy.mean |
#python code for 2D LDC implementation
# python package dependencies
import math
import numpy as np
import matplotlib.pyplot as plt
import time
def ldc2D(Re=100,N=101, Num_ts = 50000, omega = 1.8, visTs=10000):
# density and kinematic viscosity of glycol
rho_p = 965.3 # kg/m^3
nu_p = 0.06/rho_p
# set Geometry
Lx = 1.0 # cavity length in the x-direction
Ly = 1.0 # cavity length in the y-direction
# Based on the Reynolds number, characteristic length and fluid viscosity,
# compute the corresponding lid velocity:
L_o = Ly # characteristic length
V_o = Re*nu_p/L_o # corresponding characteristic velocity
T_o = L_o/V_o
# convert to dimensionless units
T_d = 1.0
L_d = 1.0
U_d = (T_o/L_o)*V_o
nu_d = 1./float(Re)
# convert to LBM units
dx = 1./(float(N) - 1.)
dt = (dx**2.)*float(Re)*((1./3.)*((1./omega)-0.5))
u_lbm = (dt/dx)*U_d
# get conversion factors from lattice units to physical units
u_conv_fact = (dx/dt)*(L_o/T_o) #multiply LBM velocity by u_conv_fact to get physical velocity
t_conv_fact = (dt*T_o) # multiply LBM time step by t_conv_fact to get physical time
l_conv_fact = dx*L_o # multiply LBM lattice location by l_conv_fact to get physical distance
p_conv_fact = ((l_conv_fact/t_conv_fact)**2.)*(1./3.) # multiply LBM density by p_conv_fact to get pressure
rho_lbm = rho_p # not prescribed, but I have never had a problem with doing it this way.
print(f'Lid velocity = %5.4f m/sec' % V_o);
print(f'LBM flow Mach number = %5.4f' % u_lbm);
print(f'Physical time simulated = %6.3f seconds' % (t_conv_fact*(float(Num_ts))))
numSpd = 9
Ny = int(math.ceil((Ly/L_o)*(float(N))))
Nx = int(math.ceil((Lx/L_o)*float(N)))
nnodes = Nx*Ny
# need to identify which lattice points are on stationary and moving boundaries - to do so, we
# define the geometry:
x_left = 0.; x_right = x_left + Lx
y_bottom = 0.; y_top = y_bottom + Ly
x_space = np.linspace(x_left,x_right,Nx,dtype=np.float64)
y_space = np.linspace(y_bottom,y_top,Ny,dtype=np.float64)
xx,yy = np.meshgrid(x_space,y_space)
gcoord = np.zeros((2,nnodes))
gcoord[0][:] = np.reshape(xx,nnodes) # x-coordinate values
gcoord[1][:] = np.reshape(yy,nnodes) # y-coordinate values
# find which node numbers are along the line y == 0
bottom_nodes = np.argwhere(gcoord[1][:]<dx/2.).flatten()
left_nodes = np.argwhere(gcoord[0][:]<dx/2.).flatten()
right_nodes = np.argwhere(gcoord[0][:]>(Lx - dx/2.)).flatten()
moving_nodes = np.argwhere(gcoord[1][:]>(Ly - dx/2.)).flatten()
solid_nodes = np.unique(np.concatenate((bottom_nodes,left_nodes,right_nodes)))
# removes any previously identified solid nodes from the moving node list.
moving_nodes = np.setxor1d(moving_nodes,np.intersect1d(moving_nodes,solid_nodes))
# check against other code
node_type = np.zeros(nnodes).astype(np.int32)
node_type[solid_nodes]=1
node_type[moving_nodes]=2
#print str(node_type)
# initialize data structures to hold the density distribution functions
fIn = np.ones((numSpd,nnodes),dtype=np.float64)
w = np.array([[4./9.], [1./9.],[1./9.],[1./9.],[1./9.],
[1./36.],[1./36.],[1./36.],[1./36.]])
for spd in range(numSpd):
fIn[spd][:]*=rho_lbm*w[spd]
fOut = np.copy(fIn)
ex = np.array([0.,1.,0.,-1.,0.,1.,-1.,-1.,1.])
ey = np.array([0.,0.,1.,0.,-1.,1.,1.,-1.,-1.])
bb_spd = [0,3,4,1,2,7,8,5,6];
fEq = np.zeros_like(fIn)
snl = solid_nodes
vnl = moving_nodes
u = u_lbm
# make a stream target matrix - incorporate periodicity to the lattice here.
stm = np.zeros((numSpd,nnodes),dtype=np.int)
ind = np.arange(nnodes)
ind = np.reshape(ind,(Ny,Nx))
for spd in range(numSpd):
tInd = np.roll(ind,-int(ex[spd]),axis=1)
tInd = np.roll(tInd,-int(ey[spd]),axis=0)
tInd = np.reshape(tInd,(1,nnodes))
stm[spd][:] = tInd
# commence time stepping
t0 = time.time()
for t in range(Num_ts):
if t%50 == 0:
print(f'Commencing time step %i' % t)
# compute density
rho = np.sum(fIn,axis=0)
# compute velocity
ux = np.dot(np.transpose(ex),fIn)/rho
uy = np.dot(np.transpose(ey),fIn)/rho
# set microscopic Dirichlet-type BC
ux_t = ux[vnl]; uy_t = uy[vnl];
dx = u - ux_t; dy = 0. - uy_t;
for spd in range(1,numSpd):
cu = 3.*(ex[spd]*dx + ey[spd]*dy)
fIn[spd][vnl]+=w[spd]*(rho[vnl]*cu)
# set macroscopic Dirichlet-type boundary conditions
ux[snl]=0.; uy[snl]=0.
ux[vnl]=u; uy[vnl]=0.
# compute Equilibrium density distribution
fEq = np.zeros_like(fIn)
for spd in range(numSpd):
cu = 3.*(ex[spd]*ux + ey[spd]*uy)
fEq[spd][:]=w[spd]*rho*(1.+cu + (0.5)*cu*cu -
(3./2.)*(ux*ux + uy*uy))
# collide
fOut = fIn - (fIn - fEq)*omega
# bounce-back the solid nodes
for spd in range(numSpd):
fOut[spd][snl]=fIn[bb_spd[spd]][snl]
# stream
for spd in range(numSpd):
fIn[spd][stm[spd][:]]=fOut[spd][:]
# write fIn data out after time step
#np.save('step1_gold',fIn)
if t%visTs==0 and t>0:
# do some visualization of the output
uMag = np.sqrt(ux*ux + uy*uy)
fig,(ax1,ax2) = plt.subplots(nrows=2,figsize=(6,10))
ax1.imshow(np.reshape(uMag,(Ny,Nx)),extent=[0,Nx,0,Ny])
ax1.set_title('Velocity')
pressure = rho*p_conv_fact
pressure -= pressure[int(nnodes/2)] # make pressure relative
ax2.imshow(np.reshape(pressure,(Ny,Nx)),extent=[0,Nx,0,Ny])
ax2.set_title('Density')
plt.tight_layout()
plt.show()
t1 = time.time();
elapsedTime = t1 - t0;
LPU = float(nnodes)*float(Num_ts)
LPU_sec = LPU/float(elapsedTime)
print(f'Lattice point updates per second = %g.' % LPU_sec)
uMag = np.sqrt(ux*ux + uy*uy)
fig,(ax1,ax2) = plt.subplots(nrows=2,figsize=(6,10))
ax1.imshow(np.reshape(uMag,(Ny,Nx)),extent=[0,Nx,0,Ny])
ax1.set_title('Velocity')
ax2.imshow( | np.reshape(rho,(Ny,Nx)) | numpy.reshape |
from ...utils import gradient_diffusion
import numpy as np
import skimage.morphology as mp
from skimage import measure as ms
def gvf_tracking(I, Mask, K=1000, Diffusions=10, Mu=5, Lambda=5, Iterations=10,
dT=0.05):
"""
Performs gradient-field tracking to segment smoothed images of cell nuclei.
Takes as input a smoothed intensity or Laplacian-of-Gaussian filtered image
and a foreground mask, and groups pixels by tracking them to mutual
gradient sinks. Typically requires merging of sinks (seeds) as a post
processing steps.
Parameters
----------
I : array_like
Smoothed intensity or log-filtered response where nuclei regions have
larger intensity values than background.
Mask : array_like
Binary mask where foreground objects have value 1, and background
objects have value 0. Used to restrict influence of background vectors
on diffusion process and to reduce tracking computations.
K : float
Number of steps to check for tracking cycle. Default value = 1000.
Mu : float
Weight parmeter from Navier-Stokes diffusion - weights divergence and
Laplacian terms. Default value = 5.
Lambda : float
Weight parameter from Navier-Stokes diffusion - used to weight
divergence. Default value = 5.
Iterations : float
Number of time-steps to use in Navier-Stokes diffusion. Default value =
10.
dT : float
Timestep to be used in Navier-Stokes diffusion. Default value = 0.05.
Returns
-------
Segmentation : array_like
Label image where positive values correspond to foreground pixels that
share mutual sinks.
Sinks : array_like
N x 2 array containing the (x,y) locations of the tracking sinks. Each
row is an (x,y) pair - in that order.
See Also
--------
histomicstk.utils.gradient_diffusion,
histomicstk.segmentation.label.shuffle
References
----------
.. [#] G. Li et al "3D cell nuclei segmentation based on gradient flow
tracking" in BMC Cell Biology,vol.40,no.8, 2007.
"""
# get image shape
M = I.shape[0]
N = I.shape[1]
# calculate gradient
dy, dx = | np.gradient(I) | numpy.gradient |
# -*- coding: utf-8 -*-
# Compatible with Python 3.8
# Copyright (C) 2020-2021 <NAME>
# mailto: <EMAIL>
r"""Orca related routines."""
from time import time
import warnings
import numpy as np
from scipy.constants import physical_constants, c, hbar, epsilon_0, mu_0
from scipy.sparse import spdiags
from scipy.sparse import eye as sp_eye
from matplotlib import pyplot as plt
from sympy import oo
from orca_memories.misc import (time_bandwith_product,
vapour_number_density, rayleigh_range,
ffftfreq, iffftfft, interpolator, sinc,
hermite_gauss, num_integral, build_Z_mesh,
build_t_mesh, build_mesh_fdm, harmonic,
rel_error, glo_error, get_range)
from orca_memories.fdm import (derivative_operator,
fdm_derivative_operators, bfmt, bfmtf,
set_block,
solve_fdm)
from orca_memories.graphical import plot_solution
def set_parameters_ladder(custom_parameters=None, fitted_couplings=True,
calculate_atom=False):
r"""Set the parameters for a ladder memory.
Only completely independent parameters are taken from settings.py.
The rest are derived from them.
"""
#########################################################################
# We set the default values of independent parameters
if True:
ignore_lower_f = False; ignore_lower_f = True
verbose = 1
a0 = physical_constants["Bohr radius"][0]
e_charge = physical_constants["elementary charge"][0]
kB = physical_constants["Boltzmann constant"][0]
# The number of time steps Nt, and the number of z points Nz.
Nt = 1020
Nz = 50
# The number of velocity groups to consider (better an odd number)
Nv = 1
# The number of standard deviations to consider on either side
# of the velocity distribution.
Nsigma = 4
# The data for the time discretization.
# The total time of the simulation (in s).
T = 8e-9
# T = 16e-9
# The time step.
# dt = T/(Nt-1)
# The data for the spacial discretization.
# Cell length (in m).
L = 0.072
######################
# The temperature of the cell.
Temperature = 90.0 + 273.15
################################################
# The characteristics of the beams:
# The waists of the beams (in m):
w1 = 280e-6
w2 = 320e-6
# The full widths at half maximum of the gaussian envelope of
# the powers spectra (in Hz).
sigma_power1 = 1.0e9
sigma_power2 = 1.0e9
sigma_power1 = 0.807222536902e9
sigma_power2 = 0.883494520871e9
# This corresponds to 300 ps.
sigma_power1 = 1.47090400101768e9
sigma_power2 = 1.47090400101768e9
# The time of arrival of the beams
t0s = 1.1801245283489222e-09
t0w = t0s
t0r = t0w + 3.5e-9
wr_ratio = 1.0
# t_cutoff = t0r+D/2/c+tau1
t_cutoff = 3.0e-9
######################
# The detuning of the signal field (in Hz):
delta1 = -2*np.pi*9e9
# The detuning of the control field (in Hz):
# This is the two-photon transition condition.
delta2 = -delta1
# We choose an atom:
element = "Cs"; isotope = 133; n_atom = 6
# Control pulse energy.
energy_pulse2 = 50e-12 # Joules.
# The default flags.
USE_HG_CTRL = False
USE_HG_SIG = False
USE_SQUARE_SIG = False
USE_SQUARE_CTRL = False
nshg = 0; nwhg = 0; nrhg = 0
nssquare = 1; nwsquare = 1; nrsquare = 1
################################################
# We set the default values of the independent parameters.
pms = {"e_charge": e_charge,
"hbar": hbar,
"c": c,
"epsilon_0": epsilon_0,
"kB": kB,
"element": element,
"isotope": isotope,
"Nt": Nt,
"Nz": Nz,
"Nv": Nv,
"T": T,
"L": L,
"Temperature": Temperature,
"Nsigma": Nsigma,
"delta1": delta1,
"sigma_power1": sigma_power1,
"sigma_power2": sigma_power2,
"w1": w1,
"w2": w2,
"t0s": t0s,
"t0w": t0w,
"t0r": t0r,
"energy_pulse2": energy_pulse2,
"wr_ratio": wr_ratio,
"t_cutoff": t_cutoff,
"element": element,
"isotope": isotope,
"verbose": verbose,
"USE_HG_SIG": USE_HG_SIG,
"USE_HG_CTRL": USE_HG_CTRL,
"USE_SQUARE_SIG": USE_SQUARE_SIG,
"USE_SQUARE_CTRL": USE_SQUARE_CTRL,
"nshg": nshg, "nwhg": nwhg, "nrhg": nrhg,
"nssquare": nssquare, "nwsquare": nwsquare, "nrsquare": nrsquare,
"ntauw": 1.0, "N": 101,
"pumping": 0.0,
"with_focusing": False,
"rep_rate": 80e6}
# NOTE: if an independent parameter is added here, it must also
# be added in the next block of code to update it.
#########################################################################
# We replace independent parameters by custom ones if given.
if True:
if custom_parameters is None:
custom_parameters = {}
pm_names_ind = pms.keys()
pm_names_dep = ["mass", "gamma21", "gamma32", "omega21", "omega32",
"omega_laser1", "omega_laser2", "delta2", "r1", "r2",
"taus", "tauw", "taur", "energy_pulse1"]
for i in custom_parameters:
if (i not in pm_names_ind) and (i not in pm_names_dep):
raise ValueError(str(i)+" is not a valid parameter name.")
# We replace "oo" by oo.
aux = ["nssquare", "nwsquare", "nrsquare"]
for key in aux:
if key in custom_parameters and custom_parameters[key] == "oo":
custom_parameters[key] = oo
# Quick code generation for the folliwing block.
# for name in pm_names_ind:
# line1 = 'if "{}" in custom_parameters.keys():'
# print(line1.format(name))
# line2 = ' pms["{}"] = custom_parameters["{}"]'
# print(line2.format(name, name))
# line3 = ' {} = custom_parameters["{}"]'
# print(line3.format(name, name))
if True:
if "e_charge" in custom_parameters.keys():
pms["e_charge"] = custom_parameters["e_charge"]
e_charge = custom_parameters["e_charge"]
# if "hbar" in custom_parameters.keys():
# pms["hbar"] = custom_parameters["hbar"]
# hbar = custom_parameters["hbar"]
# if "c" in custom_parameters.keys():
# pms["c"] = custom_parameters["c"]
# c = custom_parameters["c"]
# if "epsilon_0" in custom_parameters.keys():
# pms["epsilon_0"] = custom_parameters["epsilon_0"]
# epsilon_0 = custom_parameters["epsilon_0"]
if "kB" in custom_parameters.keys():
pms["kB"] = custom_parameters["kB"]
kB = custom_parameters["kB"]
if "element" in custom_parameters.keys():
pms["element"] = custom_parameters["element"]
element = custom_parameters["element"]
if "isotope" in custom_parameters.keys():
pms["isotope"] = custom_parameters["isotope"]
isotope = custom_parameters["isotope"]
if "Nt" in custom_parameters.keys():
pms["Nt"] = custom_parameters["Nt"]
Nt = custom_parameters["Nt"]
if "Nz" in custom_parameters.keys():
pms["Nz"] = custom_parameters["Nz"]
Nz = custom_parameters["Nz"]
if "Nv" in custom_parameters.keys():
pms["Nv"] = custom_parameters["Nv"]
Nv = custom_parameters["Nv"]
if "T" in custom_parameters.keys():
pms["T"] = custom_parameters["T"]
T = custom_parameters["T"]
if "L" in custom_parameters.keys():
pms["L"] = custom_parameters["L"]
L = custom_parameters["L"]
if "Temperature" in custom_parameters.keys():
pms["Temperature"] = custom_parameters["Temperature"]
Temperature = custom_parameters["Temperature"]
if "Nsigma" in custom_parameters.keys():
pms["Nsigma"] = custom_parameters["Nsigma"]
Nsigma = custom_parameters["Nsigma"]
if "delta1" in custom_parameters.keys():
pms["delta1"] = custom_parameters["delta1"]
delta1 = custom_parameters["delta1"]
if "sigma_power1" in custom_parameters.keys():
pms["sigma_power1"] = custom_parameters["sigma_power1"]
sigma_power1 = custom_parameters["sigma_power1"]
if "sigma_power2" in custom_parameters.keys():
pms["sigma_power2"] = custom_parameters["sigma_power2"]
sigma_power2 = custom_parameters["sigma_power2"]
if "w1" in custom_parameters.keys():
pms["w1"] = custom_parameters["w1"]
w1 = custom_parameters["w1"]
if "w2" in custom_parameters.keys():
pms["w2"] = custom_parameters["w2"]
w2 = custom_parameters["w2"]
if "t0s" in custom_parameters.keys():
pms["t0s"] = custom_parameters["t0s"]
t0s = custom_parameters["t0s"]
if "t0w" in custom_parameters.keys():
pms["t0w"] = custom_parameters["t0w"]
t0w = custom_parameters["t0w"]
if "t0r" in custom_parameters.keys():
pms["t0r"] = custom_parameters["t0r"]
t0r = custom_parameters["t0r"]
if "energy_pulse2" in custom_parameters.keys():
pms["energy_pulse2"] = custom_parameters["energy_pulse2"]
energy_pulse2 = custom_parameters["energy_pulse2"]
if "wr_ratio" in custom_parameters.keys():
pms["wr_ratio"] = custom_parameters["wr_ratio"]
wr_ratio = custom_parameters["wr_ratio"]
if "t_cutoff" in custom_parameters.keys():
pms["t_cutoff"] = custom_parameters["t_cutoff"]
t_cutoff = custom_parameters["t_cutoff"]
if "element" in custom_parameters.keys():
pms["element"] = custom_parameters["element"]
element = custom_parameters["element"]
if "isotope" in custom_parameters.keys():
pms["isotope"] = custom_parameters["isotope"]
isotope = custom_parameters["isotope"]
if "verbose" in custom_parameters.keys():
pms["verbose"] = custom_parameters["verbose"]
verbose = custom_parameters["verbose"]
if "USE_HG_SIG" in custom_parameters.keys():
pms["USE_HG_SIG"] = custom_parameters["USE_HG_SIG"]
USE_HG_SIG = custom_parameters["USE_HG_SIG"]
if "USE_HG_CTRL" in custom_parameters.keys():
pms["USE_HG_CTRL"] = custom_parameters["USE_HG_CTRL"]
USE_HG_CTRL = custom_parameters["USE_HG_CTRL"]
if "USE_SQUARE_SIG" in custom_parameters.keys():
pms["USE_SQUARE_SIG"] = custom_parameters["USE_SQUARE_SIG"]
USE_SQUARE_SIG = custom_parameters["USE_SQUARE_SIG"]
if "USE_SQUARE_CTRL" in custom_parameters.keys():
pms["USE_SQUARE_CTRL"] = custom_parameters["USE_SQUARE_CTRL"]
USE_SQUARE_CTRL = custom_parameters["USE_SQUARE_CTRL"]
if "nshg" in custom_parameters.keys():
pms["nshg"] = custom_parameters["nshg"]
nshg = custom_parameters["nshg"]
if "nwhg" in custom_parameters.keys():
pms["nwhg"] = custom_parameters["nwhg"]
nwhg = custom_parameters["nwhg"]
if "nrhg" in custom_parameters.keys():
pms["nrhg"] = custom_parameters["nrhg"]
nrhg = custom_parameters["nrhg"]
if "nssquare" in custom_parameters.keys():
pms["nssquare"] = custom_parameters["nssquare"]
nssquare = custom_parameters["nssquare"]
if "nwsquare" in custom_parameters.keys():
pms["nwsquare"] = custom_parameters["nwsquare"]
nwsquare = custom_parameters["nwsquare"]
if "nrsquare" in custom_parameters.keys():
pms["nrsquare"] = custom_parameters["nrsquare"]
nrsquare = custom_parameters["nrsquare"]
if "N" in custom_parameters.keys():
pms["N"] = custom_parameters["N"]
nrsquare = custom_parameters["N"]
if "ntauw" in custom_parameters.keys():
pms["ntauw"] = custom_parameters["ntauw"]
nrsquare = custom_parameters["ntauw"]
if "pumping" in custom_parameters.keys():
pms["pumping"] = custom_parameters["pumping"]
if "with_focusing" in custom_parameters.keys():
pms["with_focusing"] = custom_parameters["with_focusing"]
if "rep_rate" in custom_parameters.keys():
pms["rep_rate"] = custom_parameters["rep_rate"]
#########################################################################
if calculate_atom:
from fast import State, Transition, make_list_of_states, Atom
from fast import calculate_boundaries, Integer
from fast import calculate_matrices
# from fast import fancy_r_plot, fancy_matrix_plot
from fast import vapour_number_density
# from matplotlib import pyplot
atom = Atom(element, isotope)
mass = atom.mass
n_atom = atom.ground_state_n
n_atomic0 = vapour_number_density(Temperature, element)
g = State(element, isotope, n_atom, 0, 1/Integer(2))
e = State(element, isotope, n_atom, 1, 3/Integer(2))
l = State(element, isotope, n_atom, 2, 5/Integer(2))
fine_states = [g, e, l]
magnetic_states = make_list_of_states(fine_states,
"magnetic", verbose=0)
bounds = calculate_boundaries(fine_states, magnetic_states)
g_index = bounds[0][0][1]-1
e_index = bounds[0][1][1]-1
l_index = bounds[1][6][1]-1
g = magnetic_states[g_index]
e = magnetic_states[e_index]
l = magnetic_states[l_index]
if verbose >= 1:
print
print("Calculating atomic properties ...")
print("We are choosing the couplings of")
print(magnetic_states[g_index], magnetic_states[e_index],)
print(magnetic_states[l_index])
print("as a basis to estimate the values of gamma_ij, r^l.")
# We calculate the matrices for the given states.
omega, gamma, r = calculate_matrices(magnetic_states, 1.0)
# We get the parameters for the simplified scheme.
# The couplings.
r1 = r[2][e_index][g_index]
r2 = r[2][l_index][e_index]
r1 = r1*a0
r2 = r2*a0
# The decay frequencies.
gamma21 = gamma[e_index][g_index]
gamma32 = gamma[l_index][e_index]
# print gamma21, gamma32
# We determine which fraction of the population is in the lower
# and upper ground states. The populations will be approximately
# those of a thermal state. At room temperature the populations
# of all Zeeman states will be approximately equal.
fs = State(element, isotope, n_atom, 0, 1/Integer(2)).fperm
# lower_fraction = (2*fs[0]+1)/(2*fs[0]+1.0 + 2*fs[1]+1.0)
upper_fraction = (2*fs[1]+1)/(2*fs[0]+1.0 + 2*fs[1]+1.0)
if ignore_lower_f:
g_index = bounds[0][0][1]-1
e_index = bounds[1][3][1]-1
g = magnetic_states[g_index]
e = magnetic_states[e_index]
n_atomic0 = upper_fraction*n_atomic0
else:
g_index = bounds[0][0][1]-1
e_index = bounds[0][1][1]-1
l_index = bounds[1][6][1]-1
g = magnetic_states[g_index]
e = magnetic_states[e_index]
l = magnetic_states[l_index]
omega21 = Transition(e, g).omega
omega32 = Transition(l, e).omega
else:
if (element, isotope) == ("Rb", 85):
gamma21, gamma32 = (38107518.888, 3102649.47106)
if ignore_lower_f:
omega21, omega32 = (2.4141820325e+15, 2.42745336743e+15)
else:
omega21, omega32 = (2.41418319096e+15, 2.42745220897e+15)
r1, r2 = (2.23682340192e-10, 5.48219440757e-11)
mass = 1.40999341816e-25
if ignore_lower_f:
n_atomic0 = 1.8145590576e+18
else:
n_atomic0 = 3.11067267018e+18
elif (element, isotope) == ("Rb", 87):
gamma21, gamma32 = (38107518.888, 3102649.47106)
if ignore_lower_f:
omega21, omega32 = (2.41417295963e+15, 2.42745419204e+15)
else:
omega21, omega32 = (2.41417562114e+15, 2.42745153053e+15)
r1, r2 = (2.23682340192e-10, 5.48219440757e-11)
r1, r2 = (1.58167299508e-10, 4.47619298768e-11)
mass = 1.44316087206e-25
if ignore_lower_f:
n_atomic0 = 1.94417041886e+18
else:
n_atomic0 = 3.11067267018e+18
elif (element, isotope) == ("Cs", 133):
gamma21, gamma32 = (32886191.8978, 14878582.8074)
if ignore_lower_f:
omega21, omega32 = (2.20993141261e+15, 2.05306420003e+15)
else:
omega21, omega32 = (2.20993425498e+15, 2.05306135765e+15)
r1, r2 = (2.37254506627e-10, 1.54344650829e-10)
r1, r2 = (1.67764270425e-10, 1.26021879628e-10)
mass = 2.2069469161e-25
if ignore_lower_f:
n_atomic0 = 4.72335166533e+18
else:
n_atomic0 = 8.39706962725e+18
# We calculate dependent parameters
if True:
# The frequencies of the optical fields.
omega_laser1 = delta1 + omega21
omega_laser2 = delta2 + omega32
######################
# The energies of the photons.
energy_phot1 = hbar*omega_laser1
# The energies of the pulses.
energy_pulse1 = 1*energy_phot1 # Joules.
delta1 = pms["delta1"]
delta2 = -delta1
omega_laser1 = delta1 + omega21
omega_laser2 = delta2 + omega32
if USE_SQUARE_CTRL:
tauw = time_bandwith_product(nwsquare)/sigma_power2
taur = time_bandwith_product(nrsquare)/sigma_power2
else:
tauw = time_bandwith_product(1) / sigma_power2
taur = time_bandwith_product(1) / sigma_power2
if USE_SQUARE_SIG:
taus = time_bandwith_product(nssquare)/sigma_power1
else:
taus = time_bandwith_product(1) / sigma_power1
# We make a few checks
if pms["Nv"] == 2:
raise ValueError("Nv = 2 is a very bad choice.")
pms.update({"mass": mass,
"gamma21": gamma21,
"gamma32": gamma32,
"omega21": omega21,
"omega32": omega32,
"omega_laser1": omega_laser1,
"omega_laser2": omega_laser2,
"delta2": delta2,
"r1": r1,
"r2": r2,
"energy_pulse1": energy_pulse1,
"energy_pulse2": energy_pulse2,
"taus": taus,
"tauw": tauw,
"taur": taur})
cond1 = "r1" not in custom_parameters
cond2 = "r2" not in custom_parameters
if fitted_couplings and cond1 and cond2:
pms.update({"r1": pms["r1"]*0.2556521})
pms.update({"r2": pms["r2"]*0.72474758})
# We force any custom dependent parameters.
for name in pm_names_dep:
if name in custom_parameters:
if pms["verbose"] >= 1:
mes = "WARNING: parameter " + name
mes += " may be inconsistent with independent parameters."
print(mes)
pms.update({name: custom_parameters[name]})
return pms
def print_params(params):
r"""Print parameters."""
# Nt = params["Nt"]
# Nz = params["Nz"]
# T = params["T"]
L = params["L"]
# hbar = params["hbar"]
# epsilon_0 = params["epsilon_0"]
# e_charge = params["e_charge"]
# c = params["c"]
# r2 = params["r2"]
t0s = params["t0s"]
t0w = params["t0w"]
t0r = params["t0r"]
sigma_power1 = params["sigma_power1"]
sigma_power2 = params["sigma_power2"]
taus = params["taus"]
tauw = params["tauw"]
Xi = calculate_Xi(params)
w1 = params["w1"]
w2 = params["w2"]
delta1 = params["delta1"]
delta2 = params["delta2"]
energy_pulse2 = params["energy_pulse2"]
rep_rate = params["rep_rate"]
Temperature = params["Temperature"]
pumping = params["pumping"]
n = vapour_number_density(params)
kappa = calculate_kappa(params)
ZRs, ZRc = rayleigh_range(params)
Ecrit = calculate_pulse_energy(params)
# print("Grid size: %i x %i = %i points" % (Nt, Nz, Nt*Nz))
# print("Spacetime size: %2.3f ns, %2.3f cm" % (T*1e9, D*100))
print("Atom: {}{}".format(params["element"], params["isotope"]))
print("delta1: %2.3f GHz" % (delta1/2/np.pi*1e-9))
print("delta2: %2.3f GHz" % (delta2/2/np.pi*1e-9))
print("Rabi frequency: %2.3f GHz" % (Xi/2/np.pi*1e-9))
aux = (sigma_power1*1e-9, sigma_power2*1e-9)
print("Signal & Control bandwidth: %2.3f GHz, %2.3f GHz" % aux)
aux = (taus*1e9, tauw*1e9)
print("Signal & Control duration: %2.3f ns, %2.3f ns" % aux)
aux = (w1*1e6, w2*1e6)
print("Signal & Control waists: %2.3f um, %2.3f um" % aux)
aux = (2*ZRs*100, 2*ZRc*100)
print("Signal & Control double Rayleigh range: %2.3f cm, %2.3f cm" % aux)
print("Control pulse energy : {:10.3f} nJ".format(energy_pulse2*1e9))
print("Critical pulse energy: {:10.3f} nJ".format(Ecrit*1e9))
print("Average control power: {:10.3f} W".format(energy_pulse2*rep_rate))
print("Critical average control power: {:10.3f} W".format(Ecrit*rep_rate))
aux = [t0s*1e9, t0w*1e9, t0r*1e9]
print("t0s: {:2.3f} ns, t0w: {:2.3f} ns, t0r: {:2.3f} ns".format(*aux))
print("L: {:2.3f} cm".format(L*100))
print("Temperature: {:6.2f} °C".format(Temperature-273.15))
print("n: {:.2e} m^-3 ".format(n))
print("kappa: {:.2e} sqrt((m s)^-1)".format(kappa))
print("Pumping: {}".format(pumping))
def calculate_Gamma21(params):
r"""Get the complex detuning."""
gamma21 = params["gamma21"]
delta1 = params["delta1"]
return gamma21/2 - 1j*delta1
def calculate_Gamma32(params):
r"""Get the complex detuning."""
gamma32 = params["gamma32"]
delta1 = params["delta1"]
delta2 = params["delta2"]
return gamma32/2 - 1j*(delta1+delta2)
def calculate_Xi(params):
r"""Calculate the effective (time averaged) Rabi frequency."""
energy_pulse2 = params["energy_pulse2"]
hbar = params["hbar"]
e_charge = params["e_charge"]
r2 = params["r2"]
w2 = params["w2"]
m = params["nwsquare"]
sigma_power2 = params["sigma_power2"]
tbp = time_bandwith_product(m)
T_Xi = tbp/sigma_power2
Xi = 4 * e_charge**2*r2**2 * energy_pulse2 * c * mu_0
Xi = Xi/(hbar**2*w2**2*np.pi*T_Xi)
Xi = np.sqrt(np.float64(Xi))
return Xi
def calculate_Xitz(params, Xit, tau2, Z):
r"""Calculate the Rabi frequency as a function of tau and z."""
Xi0 = calculate_Xi(params)
w2 = params["w2"]
tauw = params["tauw"]
with_focusing = params["with_focusing"]
Nt2 = len(tau2)
Nz = len(Z)
if with_focusing:
zRS, zRXi = rayleigh_range(params)
wz = w2*np.sqrt(1 + (Z/zRXi)**2)
wz = np.outer(np.ones(Nt2), wz)
else:
wz = w2*np.ones((Nt2, Nz))
if Xit == "square":
Xi = Xi0*np.ones((Nt2, Nz))
else:
Xi = Xi0*np.sqrt(tauw)*np.outer(Xit(tau2), np.ones(Nz))
return Xi*w2/wz
def calculate_power(params, Xi):
r"""Calculate the power of the given Rabi frequency."""
hbar = params["hbar"]
e_charge = params["e_charge"]
r2 = params["r2"]
w2 = params["w2"]
wz = w2
dim = len( | np.array(Xi) | numpy.array |
#! /usr/bin/env python
import argparse
import sys
import os
try:
import pandas as pd
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from matplotlib.dates import DateFormatter
plt.style.use('seaborn-whitegrid')
except ImportError:
print('This script needs pandas and mathplotlib.' )
print('Looks like at least one of these module is missing.')
print('Please install these modules first and then retry. ')
sys.exit(-1)
# Define the labels/units for beautification
axisunits = {'vmem':'kb', 'pss':'kb', 'rss':'kb', 'swap':'kb',
'utime':'sec', 'stime':'sec', 'wtime':'sec',
'rchar':'b','wchar':'b',
'read_bytes':'b','write_bytes':'b',
'rx_packets':'1', 'tx_packets':'1',
'rx_bytes':'b','tx_bytes':'b',
'nprocs':'1', 'nthreads':'1' }
axisnames = {'vmem':'Memory',
'pss':'Memory',
'rss':'Memory',
'swap':'Memory',
'utime':'CPU-time',
'stime':'CPU-time',
'wtime':'Wall-time',
'rchar':'I/O',
'wchar':'I/O',
'read_bytes':'I/O',
'write_bytes':'I/O',
'rx_packets':'Network',
'tx_packets':'Network',
'rx_bytes':'Network',
'tx_bytes':'Network',
'nprocs':'Count',
'nthreads':'Count'}
legendnames = {'vmem':'Virtual Memory',
'pss':'Proportional Set Size',
'rss':'Resident Set Size',
'swap':'Swap Size',
'utime':'User CPU-time',
'stime':'System CPU-time',
'wtime':'Wall-time',
'rchar':'I/O Read (rchar)',
'wchar':'I/O Written (wchar)',
'read_bytes':'I/O Read (read_bytes)',
'write_bytes':'I/O Written (write_bytes)',
'rx_packets':'Network Received (packets)',
'tx_packets':'Network Transmitted (packets)',
'rx_bytes':'Network Received (bytes)',
'tx_bytes':'Network Transmitted (bytes)',
'nprocs':'Number of Processes',
'nthreads':'Number of Threads'}
multipliers = {'SEC': 1.,
'MIN': 60.,
'HOUR': 60.*60.,
'B': 1.,
'KB': 1024.,
'MB': 1024.*1024.,
'GB': 1024.*1024.*1024.,
'1': 1.}
# A few basic functions for labels/ conversions
def get_axis_label(nom, denom = None):
label = axisnames[nom]
if denom:
label = '$\Delta$'+label+'/$\Delta$'+axisnames[denom]
return label
def get_multiplier(label, unit):
return multipliers[axisunits[label].upper()]/multipliers[unit]
# Main function
if '__main__' in __name__:
# Parse the user input
parser = argparse.ArgumentParser(description = 'Configurable plotting script')
parser.add_argument('--input', type = str, default = 'prmon.txt',
help = 'PrMon TXT output that will be used as input' )
parser.add_argument('--xvar', type = str, default = 'wtime',
help = 'name of the variable to be plotted in the x-axis')
parser.add_argument('--xunit', nargs = '?', default = 'SEC',
choices=['SEC', 'MIN', 'HOUR', 'B', 'KB', 'MB', 'GB', '1'],
help = 'unit of the variable to be plotted in the x-axis')
parser.add_argument('--yvar', type = str, default = 'pss',
help = 'name(s) of the variable to be plotted in the y-axis'
' (comma seperated list is accepted)')
parser.add_argument('--yunit', nargs = '?', default = 'MB',
choices=['SEC', 'MIN', 'HOUR', 'B', 'KB', 'MB', 'GB', '1'],
help = 'unit of the variable to be plotted in the y-axis')
parser.add_argument('--stacked', dest = 'stacked', action = 'store_true',
help = 'stack plots if specified')
parser.add_argument('--diff', dest = 'diff', action = 'store_true',
help = 'plot the ratio of the discrete differences of '
' the elements for yvars and xvars if specified')
parser.add_argument('--otype', nargs = '?', default = 'png',
choices=['png', 'pdf', 'svg'],
help = 'format of the output image')
parser.set_defaults(stacked = False)
parser.set_defaults(diff = False)
args = parser.parse_args()
# Check the input file exists
if not os.path.exists(args.input):
print('Input file %s does not exists'%(args.input))
sys.exit(-1)
# Load the data
data = pd.read_table(args.input, sep = '\t')
data['Time'] = pd.to_datetime(data['Time'], unit = 's')
# Check the variables are in data
if args.xvar not in list(data):
print('Variable %s is not available in data'%(args.xvar))
sys.exit(-1)
ylist = args.yvar.split(',')
for carg in ylist:
if carg not in list(data):
print('Variable %s is not available in data'%(carg))
sys.exit(-1)
# Labels and output information
xlabel = args.xvar
ylabel = ''
for carg in ylist:
if ylabel: ylabel += '_'
ylabel += carg.lower()
if args.diff: ylabel = 'diff_' + ylabel
output = 'PrMon_%s_vs_%s.%s'%(xlabel,ylabel,args.otype)
# Calculate the multipliers
xmultiplier = get_multiplier(xlabel, args.xunit)
ymultiplier = get_multiplier(ylist[0], args.yunit)
# Here comes the figure and data extraction
fig, ax1 = plt.subplots()
xdata = np.array(data[xlabel])*xmultiplier
ydlist = []
for carg in ylist:
if args.diff:
num = np.array(data[carg].diff())*ymultiplier
denom = np.array(data[xlabel].diff())*xmultiplier
ratio = np.where(denom != 0, num/denom, np.nan)
ydlist.append(ratio)
else:
ydlist.append( | np.array(data[carg]) | numpy.array |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import time
import math
import random
from tqdm import trange
class spinLattice:
def __init__(self, rows, cols, J, mu, type, k):
self.rows = rows
self.cols = cols
self.J = J
self.mu = mu
self.mu_matrixA = np.empty(1)
self.mu_matrixB = np.empty(1)
self.couplingType = type
self.latticeA = np.empty(1)
self.latticeB = np.empty(1)
self.lattice = np.empty(1)
self.k = k
def initLattice(self):
M = self.rows
N = self.cols
# Create randomized lattice
L = np.random.rand(M, N)
# Assign points to high spin (HS) or low spin (LS) state
L[L < 0.5] = -1
L[L >= 0.5] = 1
self.lattice = L
return L
def initLineCoupledLattice(self):
"""
:param M:
:param N:
:return:
"""
M = self.rows
N = self.cols
# Create randomized lattice
self.latticeA = np.random.rand(M, N)
self.latticeB = np.random.rand(M, N)
# Assign points to high spin (HS) or low spin (LS) state
self.latticeA[self.latticeA < 0.5] = -1
self.latticeA[self.latticeA >= 0.5] = 1
self.latticeB[self.latticeB < 0.5] = -1
self.latticeB[self.latticeB >= 0.5] = 1
self.lattice = np.concatenate((self.latticeA, self.latticeB), axis=1)
return self.lattice
def initPlaneCoupledLattice(self):
"""
:param N:
:return:
"""
M = self.rows
N = self.cols
# Create randomized lattice
self.latticeA = np.random.rand(M, N)
self.latticeB = | np.random.rand(M, N) | numpy.random.rand |
"""
Created on 10:25 at 08/07/2021/
@author: bo
"""
import argparse
import os
import numpy as np
import pickle
import data.rruff as rruff
from sklearn.metrics import roc_curve, auc
from scipy.special import expit, softmax
import const
import test
import vis_utils as vis_utils
import data.prepare_data as pdd
import matplotlib
import matplotlib.ticker as ticker
# matplotlib.use("pgf")
# matplotlib.rcParams.update({
# "pgf.texsystem": "pdflatex",
# 'text.usetex': True,
# })
matplotlib.rcParams.update({
'font.family': 'serif',
"font.size": 7,
"legend.fontsize": 7,
"xtick.labelsize": 7,
"ytick.labelsize": 7,
"legend.title_fontsize": 7,
"axes.titlesize": 7,
})
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter, StrMethodFormatter, NullFormatter
import matplotlib.ticker as mticker
TEXTWIDTH = 6.75133
def give_args():
"""This function is used to give the argument"""
parser = argparse.ArgumentParser(description='Reproduce figures in the paper')
parser.add_argument('--dir2read_exp', type=str, default="../exp_data/exp_group/")
parser.add_argument('--dir2read_data', type=str, default="../data_group/")
parser.add_argument('--dir2save', type=str, default="figures/")
parser.add_argument('--index', type=str, default="figure_1", help="which figure or table do you want to produce?")
parser.add_argument("--save", type=const.str2bool, default=False, help="whether to save the image or not")
parser.add_argument("--pdf_pgf", type=str, default="pgf", help="in what kind of format will I save the image?")
return parser.parse_args()
# ------------------------------------------------------------------------------------
def set_size(width, fraction=1, enlarge=0):
"""
Args:
width: inches
fraction: float
"""
# Width of figure (in pts)
fig_width_in = width * fraction
golden_ratio = (5 ** .5 - 1) / 2
if enlarge != 0:
golden_ratio *= enlarge
fig_height_in = fig_width_in * golden_ratio
fig_dim = (fig_width_in, fig_height_in)
return fig_dim
def give_figure_specify_size(fraction, enlarge=0):
fig = plt.figure()
fig.set_size_inches(set_size(TEXTWIDTH, fraction, enlarge))
return fig
# -------------- First figure --------------------#
def give_data_augmentation_example(tds_dir_use="../exp_data/eerst_paper_figures/",
save=False, pdf_pgf="pgf", data_path="../data_group/"):
args = const.give_args_test(raman_type="excellent_unoriented")
args["pre_define_tt_filenames"] = False
tr_data, _, _, label_name_tr = test.get_data(args, None, read_twin_triple="cls", dir2read=data_path)
show_data_augmentation_example(args, tr_data[0], tr_data[1], label_name_tr,
tds_dir_use, save, pdf_pgf)
def show_data_augmentation_example(args, tr_spectrum, tr_label, label_name_tr,
tds_dir_use="../exp_data/eerst_paper_figures/",
save=False, pdf_pgf="pdf"):
"""Illustrate the data augmentation process
Args:
args: the arguments that can tell me the maximum and minimum wavenumber
tr_spectrum: [num_spectra, wavenumbers]
tr_label: [num_spectra]
label_name_tr: corresponding names for each class in the tr label
tds_dir_use: the directory to save the data.
save: bool, whether to save the figure
"""
select_index = np.where(label_name_tr == "AlumNa")[0] #AlumNa
tr_select = tr_spectrum[np.where(tr_label == select_index)[0]]
u_spectrum = tr_select[np.random.choice(len(tr_select), 1)[0]]
std_s_spectrum = rruff.calc_std(u_spectrum, 10)
rand_noise = np.random.normal(0, 3, [3, len(u_spectrum)]) # 5 before
generate = abs(np.expand_dims(u_spectrum, 0) + rand_noise * np.expand_dims(std_s_spectrum, 0))
generate = generate / np.max(generate, axis=-1, keepdims=True)
wavenumber = np.arange(args["max_wave"])[args["min_wave"]:]
text_use = ["%s" % label_name_tr[select_index][0], "Synthetic"]
fig = give_figure_specify_size(0.5, 1.1)
ax = fig.add_subplot(111)
for i, s_c in enumerate(["r", "g"]):
ax.plot([], [], color=s_c)
ax.plot(wavenumber, u_spectrum, 'r', lw=0.8)
ax.text(250, 0.5, text_use[0])
for i, s in enumerate(generate):
ax.plot(wavenumber, s + i + 1, 'g', lw=0.8)
ax.text(250, 0.5 + i + 1, text_use[-1])
ax.yaxis.set_major_formatter(plt.NullFormatter())
ax.set_xlabel("Wavenumber (cm" + r"$^{-1})$")
ax.set_ylabel("Intensity (a.u.)")
if save:
plt.savefig(
tds_dir_use + "/augmentation_example_on_RRUFF_%s.%s" % (label_name_tr[select_index][0],
pdf_pgf),
pad_inches=0, bbox_inches='tight')
# --------------------------- second & third figure ------------------------------#
def show_example_spectra(tds_dir="../exp_data/eerst_paper_figures/", save=False, pdf_pgf="pgf",
data_path="../data_group/"):
"""This function shows the example spectra from each dataset. It should also show the distribution of the classes
"""
dataset = ["RRUFF", "RRUFF", "ORGANIC", "ORGANIC", "BACTERIA"]
raman_type = ["raw", "excellent_unoriented", "organic_target_raw", "organic_target", "bacteria_reference_finetune"]
color_group = ['r', 'g']
fig = give_figure_specify_size(0.5, 3.0)
ax_global = vis_utils.ax_global_get(fig)
ax_global.set_xticks([])
ax_global.set_yticks([])
im_index = 0
title_group = ["Mineral (r)", "Mineral (p)", "Organic (r)", "Organic (p)", "Bacteria"]
tr_frequency_count = []
for s_data, s_raman in zip(dataset, raman_type):
ax = fig.add_subplot(5, 1, im_index + 1)
args = const.give_args_test(raman_type=s_raman)
args["pre_define_tt_filenames"] = False
if s_data == "RRUFF" or s_data == "ORGANIC":
tr_data, _, _, label_name_tr = test.get_data(args, None, read_twin_triple="cls", dir2read=data_path)
else:
tr_data, _, _, _, label_name_tr = test.get_data(args, None, read_twin_triple="cls", dir2read=data_path)
tr_spectra, tr_label = tr_data
unique_label, unique_count = np.unique(tr_label, return_counts=True)
if s_data == "RRUFF":
tr_frequency_count.append(unique_count)
if s_data == "RRUFF":
class_name = "Beryl"
select_label = np.where(label_name_tr == class_name)[0]
index = np.where(tr_label == select_label)[0]
else:
select_label = unique_label[np.argmax(unique_count)]
if s_data == "ORGANIC":
select_label = 1
class_name = label_name_tr[select_label]
if s_data == "ORGANIC":
class_name = "Benzidine"
index = np.where(tr_label == select_label)[0]
if len(index) > 15:
index = np.random.choice(index, 5, replace=False)
_spectra = tr_spectra[index]
if s_data == "RRUFF":
wavenumber = np.arange(args["max_wave"])[args["min_wave"]:]
ax.set_xlim((0, 1500))
elif s_data == "BACTERIA":
wavenumber = np.load("../bacteria/wavenumbers.npy")
elif s_data == "ORGANIC":
wavenumber = np.linspace(106.62457839661, 3416.04065695651, np.shape(tr_spectra)[1])
for j, s in enumerate(_spectra):
ax.plot(wavenumber, s, alpha=0.8, lw=0.8)
ax.set_title(title_group[im_index] + ": " + class_name)
im_index += 1
if s_raman == "bacteria_finetune":
ax.set_xlabel("Wavenumber (cm" + r"$^{-1})$")
ax_global.set_ylabel("Intensity (a.u.)\n\n")
plt.subplots_adjust(hspace=0.47)
if save:
plt.savefig(tds_dir + "/example_spectra.%s" % pdf_pgf, pad_inches=0, bbox_inches='tight')
title_group = ["Mineral (r)", "Mineral (p)"]
fig = give_figure_specify_size(0.5, 0.8)
ax_global = vis_utils.ax_global_get(fig)
ax_global.set_xticks([])
ax_global.set_yticks([])
max_count = np.max([np.max(np.unique(v, return_counts=True)[1]) for v in tr_frequency_count])
for i, s in enumerate(tr_frequency_count):
ax = fig.add_subplot(1, 2, i + 1)
ax.hist(s, bins=np.max(s), ec="white", lw=0.4)
ax.set_yscale("symlog")
ax.set_ylim((0, max_count))
if i == 1:
ax.yaxis.set_ticks_position('none')
ax.yaxis.set_major_formatter(plt.NullFormatter())
else:
ax.yaxis.set_major_formatter(StrMethodFormatter('{x:.0f}'))
ax.set_title(title_group[i])
plt.subplots_adjust(wspace=0.04)
ax_global.set_xlabel("\n\n Number of spectra per class")
ax_global.set_ylabel("Number of classes \n\n")
if save:
plt.savefig(tds_dir + "/class_distribution_on_RRUFF.%s" % pdf_pgf, pad_inches=0, bbox_inches='tight')
# -------------------- figure 4 --------------------------
def give_uncertainty_distribution_figure_with_confidence_interval(tds_dir="../exp_data/eerst_paper_figures/",
save=False,
pdf_pgf="pgf",
path_init="../", use_nll_or_prob="prob",
data_path="../data_group/", strategy="sigmoid"):
_, rruff_raw_avg, rruff_raw_std = get_multiple_rruff_uncertainty("raw", path_init,
use_nll_or_prob=use_nll_or_prob,
data_path=data_path, strategy=strategy)
_, rruff_pre_avg, rruff_pre_std = get_multiple_rruff_uncertainty("excellent_unoriented", path_init,
use_nll_or_prob=use_nll_or_prob,
data_path=data_path, strategy=strategy)
_, organic_raw_avg, organic_raw_std = get_multiple_organic_uncertainty("organic_target_raw", data_path=data_path, path_init=path_init,
use_nll_or_prob="prob", strategy=strategy)
_, organic_pre_avg, organic_pre_std = get_multiple_organic_uncertainty("organic_target", data_path=data_path, path_init=path_init,
use_nll_or_prob="prob", strategy=strategy)
_, bacteria_avg, bacteria_std = get_multiple_bacteria_uncertainty(path_init,
use_nll_or_prob=use_nll_or_prob,
data_path=data_path, strategy=strategy)
color_use = ["r", "g", "b", "orange", "m"]
title_group = "Correct match (%)"
dataset = ["Mineral (r)", "Mineral (p)", "Organic (r)", "Organic (p)", "Bacteria"]
fig = give_figure_specify_size(0.5, 1.25)
ax = fig.add_subplot(111)
for j, stat in enumerate([[rruff_raw_avg, rruff_raw_std],
[rruff_pre_avg, rruff_pre_std],
[organic_raw_avg, organic_raw_std],
[organic_pre_avg, organic_pre_std],
[bacteria_avg, bacteria_std]]):
if strategy != "none":
plot_fillx_filly(stat[0][0]*100, stat[1][0],
stat[0][1]*100, stat[1][1], ax, color_use=color_use[j])
else:
plot_fillx_filly(stat[0][0], stat[1][0], stat[0][1]*100, stat[1][1],
ax, color_use=color_use[j])
ax.legend(dataset, loc='best', handlelength=1.1, handletextpad=0.5,
borderpad=0.25) # bbox_to_anchor=(1.0, 0.8), loc="upper left",
if strategy == "softmax" or strategy == "sigmoid":
ax.plot([0, 100], [0, 100], ls='--', color='black')
ax.set_xlim((0, 100))
ax.set_ylim((0, 100))
ax.set_ylabel(title_group)
ax.yaxis.set_major_formatter(FuncFormatter(form3))
ax.set_xlabel("Similarity score")
if save:
plt.savefig(tds_dir + "/uncertainty_distribution_for_the_test_dataset_with_confidence_interval_%s.%s" % (strategy, pdf_pgf),
pad_inches=0, bbox_inches='tight')
def motivation_for_conformal_prediction_bacteria(save=False, pdf_pgf="pgf",
path_init="../exp_data/exp_group/",
path2save="../exp_data/eerst_paper_figures/",
data_path="../data_group/"):
dataset = ["BACTERIA"]
output_bacteria = motivation_for_conformal_prediction(dataset[0], select_length=1, show=False, path_init=path_init,
data_path=data_path)
two_select_index = np.where(np.array([len(v) for v in output_bacteria[4]]) == 2)[0]
fig = give_figure_specify_size(1.1, 0.8)
ax_global = vis_utils.ax_global_get(fig)
ax_global.set_xticks([])
ax_global.set_yticks([])
ax = fig.add_subplot(2, 2, 1)
_show_motivation_for_conformal_prediction(*output_bacteria, select_index=579, ax=ax, save=False, pdf_pgf="None",
path2save=path2save)
ax = fig.add_subplot(2, 2, 3)
_show_motivation_for_conformal_prediction(*output_bacteria, select_index=two_select_index[5], ax=ax, save=False, pdf_pgf="None",
path2save=path2save)
ax = fig.add_subplot(1, 2, 2)
_show_motivation_for_conformal_prediction(*output_bacteria, select_index=463, ax=ax, save=False, pdf_pgf="None",
path2save=path2save)
plt.subplots_adjust(wspace=0.04)
ax_global.set_xlabel("\nWavenumber (cm" + r"$^{-1}$" + ")")
ax_global.set_ylabel("Intensity (a.u.) \n")
return output_bacteria
def motivation_for_conformal_prediction_multiple_datasets(save=False, pdf_pgf="pgf",
path_init="../exp_data/exp_group/",
path2save="../exp_data/eerst_paper_figures/",
data_path="../data_group/"):
dataset = ["RRUFF_excellent_unoriented",
"RRUFF_raw",
"BACTERIA"]
fig = give_figure_specify_size(1.1, 0.8)
ax_global = vis_utils.ax_global_get(fig)
ax_global.set_xticks([])
ax_global.set_yticks([])
output_rruff_r = motivation_for_conformal_prediction(dataset[1], select_length=1, show=False, path_init=path_init,
data_path=data_path)
output_rruff_p = motivation_for_conformal_prediction(dataset[0], select_length=1, show=False, path_init=path_init,
data_path=data_path)
output_bacteria = motivation_for_conformal_prediction(dataset[2], select_length=1, show=False, path_init=path_init,
data_path=data_path)
ax = fig.add_subplot(2, 3, 1)
_show_motivation_for_conformal_prediction(*output_bacteria, select_index=579, ax=ax, save=False, pdf_pgf="None",
path2save=path2save)
ax = fig.add_subplot(2, 3, 4)
_show_motivation_for_conformal_prediction(*output_rruff_p, select_index=25, ax=ax, save=False, pdf_pgf="None",
path2save=path2save)
ax = fig.add_subplot(1, 3, 2)
_show_motivation_for_conformal_prediction(*output_rruff_r, select_index=145, ax=ax, save=False, pdf_pgf="None",
path2save=path2save)
ax = fig.add_subplot(1, 3, 3)
_show_motivation_for_conformal_prediction(*output_bacteria, select_index=463, ax=ax, save=False, pdf_pgf="None",
path2save=path2save)
plt.subplots_adjust(wspace=0.04)
ax_global.set_xlabel("\nWavenumber (cm" + r"$^{-1}$" + ")")
ax_global.set_ylabel("Intensity (a.u.) \n")
if save:
plt.savefig(path2save + "conformal_motivation.%s" % pdf_pgf, pad_inches=0, bbox_inches='tight')
def _calc_motivation_for_conformal_prediction(alpha_use=0.05, use_original_weight="original",
dataset="BACTERIA",
path_init="../exp_data/exp_group/",
data_path="../data_group/"):
if dataset == "BACTERIA":
wavenumbers = np.load("../bacteria/wavenumbers.npy")
raman_type = "bacteria_random_reference_finetune"
args = const.give_args_test(raman_type=raman_type)
args["pre_define_tt_filenames"] = False
tr_data, val_data, tt_data, _, label_name_tr = test.get_data(args, None, read_twin_triple="cls",
print_info=False, dir2read=data_path)
tr_spectra, tt_spectra = tr_data[0], tt_data[0]
tr_label_group = [tr_data[1], tr_data[1]]
val_label, tt_label = val_data[1], tt_data[1]
path2load = path_init + "bacteria_reference_finetune/tds/"
s_split = 1
path = path2load + [v for v in os.listdir(path2load) if "split_%d" % s_split in v and ".txt" not in v][0] + "/"
val_prediction = pickle.load(open(path + "validation_prediction.obj", "rb"))
tt_prediction = pickle.load(open(path + "test_prediction.obj", "rb"))
elif "RRUFF" in dataset:
raman_type = dataset.split("RRUFF_")[1]
dataset = "RRUFF"
args = const.give_args_test(raman_type=raman_type)
wavenumbers = np.arange(args["max_wave"])[args["min_wave"]:]
args["pre_define_tt_filenames"] = False
tr_data, tt_data, _, label_name_tr = test.get_data(args, None, read_twin_triple="cls",
print_info=False, dir2read=data_path)
[_, reference_val_label], [_, val_label] = pdd.get_fake_reference_and_test_data(tr_data, 1, data=dataset)
tr_label_group = [reference_val_label, tr_data[1]]
tr_spectra, tt_spectra = tr_data[0], tt_data[0]
tt_label = tt_data[1]
path2load = path_init + "%s/tds/" % raman_type
s_split = 1
path = path2load + [v for v in os.listdir(path2load) if "split_%d" % s_split in v and '.txt' not in v][0] + "/"
val_prediction = pickle.load(open(path + "validation_prediction.obj", "rb"))
tt_prediction = pickle.load(open(path + "test_prediction.obj", "rb"))
if use_original_weight == "original":
val_pred_en, tt_pred_en = val_prediction[0]["ensemble_avg"], tt_prediction[0]["ensemble_avg"]
else:
val_pred_en, tt_pred_en = val_prediction[1]["ensemble_avg"], tt_prediction[1]["ensemble_avg"]
val_pred_baseon_cls, _ = test.reorganize_similarity_score(val_pred_en, tr_label_group[0])
tt_pred_baseon_cls, tt_corr_tr_index = test.reorganize_similarity_score(tt_pred_en, tr_label_group[1])
val_prediction_score = give_calibration_single_score_prediction(val_pred_baseon_cls, True, val_label)
threshold = np.quantile(val_prediction_score, alpha_use)
tt_top1 = np.argmax(tt_pred_baseon_cls, axis=-1)
accu = [v == q for v, q in zip(tt_top1, tt_label)]
tt_prediction, \
tt_accuracy = give_test_prediction_baseon_single_score_threshold(tt_pred_baseon_cls,
True, tt_label,
threshold)
tt_pred_softmax = softmax(tt_pred_baseon_cls, axis=-1)
tt_correct_or_wrong = [1 if tt_label[i] in v else 0 for i, v in enumerate(tt_prediction)]
return tr_label_group, [val_label, tt_label], [tr_spectra, tt_spectra], \
tt_pred_softmax, tt_prediction, tt_correct_or_wrong, tt_corr_tr_index, label_name_tr, wavenumbers
def _show_motivation_for_conformal_prediction(tr_label_group, tt_label,
tr_spectra, tt_spectra,
tt_prediction, tt_pred_baseon_cls_softmax,
tt_corr_tr_index,
label_name,
wavenumbers, select_index, ax, save, pdf_pgf, path2save):
"""Args
select_index: a single index
save: bool variable
"""
_tr_corr_index = np.where(tr_label_group[1] == tt_label[select_index])[0]
if len(tt_prediction[select_index]) >= 3:
height = 1.5
elif len(tt_prediction[select_index]) == 2:
height = 1.2
else:
height = 1.0
if not ax:
fig = give_figure_specify_size(0.5, height)
ax = fig.add_subplot(111)
color_input = 'r'
color_group = ['g', 'b', 'orange', "c", "tab:blue"]
select_prediction = tt_prediction[select_index]
score = tt_pred_baseon_cls_softmax[select_index]
score_select = score[select_prediction]
score_select_sort_index = np.argsort(score_select)[::-1]
select_prediction = select_prediction[score_select_sort_index]
score_select_sorted = score_select[score_select_sort_index]
input_name = "Input: %s" % label_name[tt_label[select_index]]
scale = 1.4
ax.plot(wavenumbers, tt_spectra[select_index] + len(select_prediction) * scale, color=color_input)
if len(label_name) == 30:
x_loc = 450
else:
x_loc = 100
ax.text(x_loc, len(select_prediction) * scale + 0.95, input_name, color=color_input)
for i, s in enumerate(select_prediction):
if s == tt_label[select_index]:
color_use = color_input
else:
color_use = color_group[i]
_tr_corr_index = tt_corr_tr_index[select_index][s]
match_name = "Match: %s (p=%.2f)" % (label_name[s], score_select_sorted[i])
ax.plot(wavenumbers, tr_spectra[_tr_corr_index] + (len(select_prediction) - i - 1) * scale,
color=color_use)
ax.text(x_loc, (len(select_prediction) - i - 1) * scale + 1, match_name, color=color_use)
ax.yaxis.set_major_formatter(plt.NullFormatter())
if save:
_name = label_name[tt_label[select_index]]
plt.savefig(path2save + "conformal_motivation_%s_%d.%s" % (_name, select_index, pdf_pgf),
pad_inches=0, bbox_inches='tight')
def motivation_for_conformal_prediction(dataset="RRUFF_excellent_unoriented",
select_length=3, path_init="../", show=False, save=False,
pdf_pgf="pgf", data_path="../data_group/"):
if dataset == "RRUFF_excellent_unoriented":
alpha_use = 0.01
elif dataset == "RRUFF_raw":
alpha_use = 0.0005
elif dataset == "BACTERIA":
alpha_use = 0.05
tr_label_group, [val_label, tt_label], [tr_spectra, tt_spectra], \
tt_pred_softmax, tt_prediction, tt_correct_or_wrong, \
tt_corr_tr_index, label_name, wavenumbers = _calc_motivation_for_conformal_prediction(alpha_use=alpha_use,
dataset=dataset,
path_init=path_init,
data_path=data_path)
def filter_index(select_length):
tt_index = []
for i, v in enumerate(tt_prediction):
prob_subset = tt_pred_softmax[i, v]
prob_subset_sort_index = np.argsort(prob_subset)[::-1]
_pred_label = np.array(v)[prob_subset_sort_index]
if len(v) == select_length and tt_correct_or_wrong[i] == 1 and _pred_label[-1] == tt_label[i]:
tt_index.append(i)
return tt_index
if select_length != 0:
tt_index = filter_index(select_length)
select_index = np.random.choice(tt_index, 1)
else:
if dataset == "RRUFF_raw":
select_index = [191, 182, 145]
elif dataset == "RRUFF_excellent_unoriented":
select_index = [25, 594, 312, 1213, 53]
elif dataset == "BACTERIA":
select_index = [463]
if show:
for _select_index in select_index:
_show_motivation_for_conformal_prediction(tr_label_group, tt_label,
tr_spectra, tt_spectra,
tt_prediction, tt_pred_softmax,
tt_corr_tr_index,
label_name, wavenumbers, _select_index, ax=None, save=save,
pdf_pgf=pdf_pgf, path2save=None)
return tr_label_group, tt_label, tr_spectra, tt_spectra, tt_prediction, tt_pred_softmax, tt_corr_tr_index, \
label_name, wavenumbers
def give_conformal_prediction_for_bacteria_paper(path_init="../",
use_original_weight="original",
tds_dir=None, save=False, pdf_pgf="pdf",
data_path="../data_group/",
apply_softmax="none"):
alpha_group = np.linspace(0, 0.20, 10)
path2load, split_version = get_path_for_conformal(path_init, "bacteria_reference_finetune")
stat_bacteria = main_plot_for_scoring_rule(path2load, split_version,
"bacteria_random_reference_finetune",
"BACTERIA", use_original_weight,
alpha_group, show=False, data_path=data_path, apply_softmax=apply_softmax)
fig = give_figure_specify_size(1.0, 0)
title_group = ["Bacteria: 82.71"]
loc = [[0.80, 0.92]]
orig_perf = [82.71]
orig_perf = [v - 1 for v in orig_perf]
for i, stat in enumerate([stat_bacteria]):
stat_avg = np.mean(stat, axis=0)
ax = fig.add_subplot(2, 2, 1)
x_axis = 100 - alpha_group * 100
ax.plot(x_axis, stat_avg[:, 0] * 100, color='r', marker='.')
ax.plot(x_axis, x_axis, color='g', ls=':')
ax.yaxis.set_major_formatter(FuncFormatter(form3))
ax.set_xlim(np.min(x_axis), np.max(x_axis))
ax.set_ylim(np.min(x_axis), np.max(x_axis))
ax.set_ylabel("Empirical coverage (%)")
ax.xaxis.set_major_formatter(plt.NullFormatter())
# plt.axis('square')
ax.set_title(title_group[i])
ax = fig.add_subplot(2, 2, 3)
ax.plot(x_axis, stat_avg[:, 1], color='b', marker='.')
# plt.axis('square')
# ax.set_yscale("symlog")
ax.set_ylabel("Average set size")
ax.set_xlabel("Theoretical coverage (1 - " + r'$\alpha$' + ")" + "(%)")
ax.yaxis.set_major_formatter(FuncFormatter(form3))
ax.xaxis.set_major_formatter(FuncFormatter(form3))
dataset = ["BACTERIA"]
output_bacteria = motivation_for_conformal_prediction(dataset[0], select_length=1, show=False, path_init=path_init,
data_path=data_path)
two_select_index = np.where(np.array([len(v) for v in output_bacteria[4]]) == 2)[0]
# fig = give_figure_specify_size(1.1, 0.8)
# ax_global = vis_utils.ax_global_get(fig)
# ax_global.set_xticks([])
# ax_global.set_yticks([])
# ax = fig.add_subplot(3, 2, 2)
# _show_motivation_for_conformal_prediction(*output_bacteria, select_index=579, ax=ax, save=False, pdf_pgf="None",
# path2save=None)
# ax.xaxis.set_major_formatter(plt.NullFormatter())
ax = fig.add_subplot(2, 2, 2)
_show_motivation_for_conformal_prediction(*output_bacteria, select_index=two_select_index[-4], ax=ax, save=False, pdf_pgf="None",
path2save=None)
ax.xaxis.set_major_formatter(plt.NullFormatter())
ax.set_title("Example prediction set")
ax.set_ylabel("Intensity (a.u.)")
ax = fig.add_subplot(2, 2, 4)
_show_motivation_for_conformal_prediction(*output_bacteria, select_index=463, ax=ax, save=False, pdf_pgf="None",
path2save=None)
ax.set_ylabel("Intensity (a.u.)")
ax.set_xlabel("Wavenumber")
# plt.subplots_adjust(wspace=0.23)
# ax_global.set_xlabel("\nWavenumber (cm" + r"$^{-1}$" + ")")
# ax_global.set_ylabel("Intensity (a.u.) \n")
plt.subplots_adjust(hspace=0.1, wspace=0.2)
if save:
if pdf_pgf == "pdf":
plt.savefig(tds_dir + "/correlation_between_alpha_and_accuracy_and_set_size_%s.pdf" % apply_softmax,
pad_inches=0, bbox_inches='tight')
elif pdf_pgf == "pgf":
plt.savefig(tds_dir + "/correlation_between_alpha_and_accuracy_and_set_size.pgf",
pad_inches=0, bbox_inches='tight')
def give_conformal_prediction_for_multiple_datasets(path_init="../",
use_original_weight="weighted",
tds_dir=None, save=False, pdf_pgf="pdf",
data_path="../data_group/"):
# rruff raw
alpha_group_group = []
alpha_group = np.linspace(0, 0.03, 10)
alpha_group_group.append(alpha_group)
path2load, split_version = get_path_for_conformal(path_init, "raw")
stat_rruff_raw = main_plot_for_scoring_rule(path2load, split_version,
"raw", "RRUFF", use_original_weight,
alpha_group, show=False, data_path=data_path)
alpha_group = np.linspace(0, 0.05, 10)
alpha_group_group.append(alpha_group)
path2load, split_version = get_path_for_conformal(path_init, "excellent_unoriented")
stat_rruff_preprocess = main_plot_for_scoring_rule(path2load, split_version,
"excellent_unoriented", "RRUFF",
"original", alpha_group, show=False, data_path=data_path)
alpha_group = np.linspace(0, 0.011, 10)
alpha_group_group.append(alpha_group)
path2load, split_version = get_path_for_conformal(path_init, "organic_target_raw")
stat_organic_raw = main_plot_for_scoring_rule(path2load, split_version, "organic_target_raw", "ORGANIC",
"original", alpha_group, show=False, data_path=data_path)
alpha_group = np.linspace(0, 0.04, 10)
alpha_group_group.append(alpha_group)
path2load, split_version = get_path_for_conformal(path_init, "organic_target")
stat_organic = main_plot_for_scoring_rule(path2load, split_version, "organic_target", "ORGANIC",
"original", alpha_group, show=False, data_path=data_path)
alpha_group = np.linspace(0, 0.20, 10)
alpha_group_group.append(alpha_group)
path2load, split_version = get_path_for_conformal(path_init, "bacteria_reference_finetune")
stat_bacteria = main_plot_for_scoring_rule(path2load, split_version,
"bacteria_random_reference_finetune",
"BACTERIA", use_original_weight,
alpha_group, show=False, data_path=data_path)
fig = give_figure_specify_size(0.5, 4.0)
ax_global = vis_utils.ax_global_get(fig)
ax_global.set_xticks([])
ax_global.set_yticks([])
ax_global.spines['top'].set_visible(False)
ax_global.spines['right'].set_visible(False)
ax_global.spines['bottom'].set_visible(False)
ax_global.spines['left'].set_visible(False)
title_group = ["Mineral (r): 94.48", "Mineral (p): 91.86", "Organic (r): 98.26", "Organic (p): 98.26",
"Bacteria: 82.71"]
loc = [[0.97, 0.958], [0.95, 0.95], [0.989, 0.987], [0.96, 0.987], [0.80, 0.92]]
orig_perf = [94.48, 91.86, 98.26, 98.26, 82.71]
orig_perf = [v - 1 for v in orig_perf]
for i, stat in enumerate([stat_rruff_raw, stat_rruff_preprocess,
stat_organic_raw, stat_organic, stat_bacteria]):
stat_avg = np.mean(stat, axis=0)
ax = fig.add_subplot(len(title_group), 1, i + 1)
vis_utils.show_twinx(alpha_group_group[i] * 100, stat_avg[:, 0] * 100, stat_avg[:, 1],
ax=ax)
ax.set_title(title_group[i])
ax.set_ylim(bottom=orig_perf[i])
ax.set_yticks(np.linspace(orig_perf[i], 100, 4))
ax.yaxis.set_major_formatter(FuncFormatter(form3))
ax.xaxis.set_major_formatter(FuncFormatter(form3))
ax_global.set_ylabel("Empirical coverage (%) \n\n\n", color='r')
ax_global_t = ax_global.twinx()
ax_global_t.set_yticks([])
ax_global_t.spines['top'].set_visible(False)
ax_global_t.spines['right'].set_visible(False)
ax_global_t.spines['bottom'].set_visible(False)
ax_global_t.spines['left'].set_visible(False)
# ax_global_t.grid(None)
ax_global_t.set_ylabel("\n\n\n Average set size", color='g')
ax_global.set_xlabel("\n \n Theoretical coverage (1 - " + r'$\alpha$' + ")" + "(%)")
plt.subplots_adjust(hspace=0.47)
if save:
if pdf_pgf == "pdf":
plt.savefig(tds_dir + "/correlation_between_alpha_and_accuracy_and_set_size.pdf",
pad_inches=0, bbox_inches='tight')
elif pdf_pgf == "pgf":
plt.savefig(tds_dir + "/correlation_between_alpha_and_accuracy_and_set_size.pgf",
pad_inches=0, bbox_inches='tight')
def give_qualitative_result_allinone(path_init, tds_dir="../exp_data/eerst_paper_figures/",
save=False, pdf_pgf="pdf", data_path="../data_group/"):
fig = give_figure_specify_size(1.2, 0.5)
ax_global = vis_utils.ax_global_get(fig)
ax_global.set_xticks([])
ax_global.set_yticks([])
dataset_names = ["Mineral (r)", "Mineral (p)", "Organic", "Bacteria"]
for i in range(4):
ax_g_0 = fig.add_subplot(2, 4, i + 1)
ax_g_1 = fig.add_subplot(2, 4, i + 1 + 4)
if i == 0:
give_qualitative_result_rruff_raw(path_init, [ax_g_0, ax_g_1], data_path=data_path)
elif i == 1:
give_qualitative_result_rruff_preprocess(path_init, [ax_g_0, ax_g_1], data_path=data_path)
elif i == 2:
give_qualitative_result_organic(path_init, [ax_g_0, ax_g_1], data_path=data_path)
elif i == 3:
give_qualitative_result_bacteria(path_init, [ax_g_0, ax_g_1], data_path=data_path)
if i == 0:
ax_g_0.set_ylabel("Correct")
ax_g_1.set_ylabel("Wrong")
ax_g_0.set_title(dataset_names[i])
ax_global.set_xlabel("\n Wavenumber (cm" + r"$^{-1})$")
ax_global.set_ylabel("Intensity (a.u.)\n\n")
plt.subplots_adjust(wspace=0.05, hspace=0.05)
if save:
plt.savefig(tds_dir + "/qualitative_result.%s" % pdf_pgf, pad_inches=0, bbox_inches='tight')
def form3(x, pos):
""" This function returns a string with 3 decimal places, given the input x"""
return '%.1f' % x
def find_the_best_threshold_and_evaluate_accuracy(val_prediction, tt_ensemble,
selected_index,
reference_label_val,
val_label, reference_label_tt, tt_label, predicted_label_tt,
voting_number):
"""This function finds the best threshold (uncertainty) based on the validation dataset. Then we group
the test predictions to low-uncertainty and high-uncertainty group and evaluate the matching accuracy under
each group
Args:
val_prediction: [original_val, weighted_val]
tt_ensemble: [original_tt_ensemble, weighted_tt_ensemble]
selected_index: [selected index for original, selected index for the weighted]
reference_label_val: the ground truth for the validation dataset
val_label: the ground truth for the validation dataset
reference_label_tt: the ground truth for the test dataset
tt_label: the ground truth for the test data
predicted_label_tt: the predicted label (it needs to be result after
applying majority voting for the bacteria dataset)
voting_number: the majority voting numbers
"""
keys = list(val_prediction[0].keys())
val_original_ensemble, \
val_weighted_ensemble = np.zeros_like(val_prediction[0][keys[0]]), np.zeros_like(val_prediction[0][keys[0]])
val_ensemble = [val_original_ensemble, val_weighted_ensemble]
for i, s_stat in enumerate(val_prediction):
for j, key in enumerate(s_stat.keys()):
if j in selected_index[i]:
val_ensemble[i] += s_stat[key]
val_ensemble = [v / len(selected_index[0]) for v in val_ensemble]
val_pred_baseon_class = [test.reorganize_similarity_score(v, reference_label_val)[0] for v in
val_ensemble]
if len(voting_number) == 0:
val_prediction = [reference_label_val[np.argmax(v, axis=-1)] for v in val_ensemble]
else:
val_prediction = []
for i, s_val_pred in enumerate(val_ensemble):
_, _pred_label = vis_utils.majority_voting(s_val_pred, reference_label_val,
val_label, voting_number[i])
val_prediction.append(_pred_label)
val_threshold = []
for i in range(2):
correct_or_wrong = np.array([0 if v == q else 1 for v, q in zip(val_prediction[i], val_label)])
if i == 0:
norm_pred = softmax(val_pred_baseon_class[i], axis=-1)
else:
norm_pred = val_pred_baseon_class[i]
selected_predict = norm_pred[np.arange(len(val_label)), val_prediction[i]]
_nll = -np.log(selected_predict)
fpr, tpr, thresholds = roc_curve(correct_or_wrong, _nll)
optimal_idx = np.argmax(tpr - fpr)
optimal_threshold = thresholds[optimal_idx]
val_threshold.append(optimal_threshold)
stat_baseon_uncertainty = np.zeros([2, 4])
for i in range(2):
tt_pred_baseon_class, _ = test.reorganize_similarity_score(tt_ensemble[i],
reference_label_tt)
if i == 0:
tt_pred_baseon_class = softmax(tt_pred_baseon_class, axis=-1)
select_predict = tt_pred_baseon_class[np.arange(len(tt_label)), predicted_label_tt[i]]
_nll = -np.log(select_predict)
correct_or_wrong = np.array([0 if v == q else 1 for v, q in zip(predicted_label_tt[i], tt_label)])
high_uncertainty_index = np.where(_nll >= val_threshold[i])[0]
high_uncertainty_correct = len(high_uncertainty_index) - np.sum(correct_or_wrong[high_uncertainty_index])
low_uncertainty_index = np.where(_nll < val_threshold[i])[0]
low_uncertainty_correct = len(low_uncertainty_index) - np.sum(correct_or_wrong[low_uncertainty_index])
stat_baseon_uncertainty[i, :] = [low_uncertainty_correct, len(low_uncertainty_index),
high_uncertainty_correct, len(high_uncertainty_index)]
return stat_baseon_uncertainty, val_threshold
def _give_uncertainty_distribution_for_single_dataset(dataset, raman_type,
num_select, voting_number, uncertainty, prediction_status,
split_version=100, qualitative_study=False, path_init="../",
get_similarity=False, data_path="../data_group/", strategy="sigmoid"):
path2load = path_init + "/%s/" % raman_type + "/tds/"
folder2read = [v for v in os.listdir(path2load) if os.path.isdir(path2load + v) and "split_%d" % split_version in v]
dir2load_data = path_init + "/%s/" % raman_type
dir2load_data = [dir2load_data + "/" + v + "/data_splitting/" for v in os.listdir(dir2load_data) if
"tds" not in v and "version_%d" % split_version in v][0]
folder2read = folder2read[0]
original_weight_stat = ["original", "weighted"]
folder2read = path2load + folder2read
val_prediction = pickle.load(open(folder2read + "/validation_prediction.obj", "rb"))
tt_prediction = pickle.load(open(folder2read + "/test_prediction.obj", "rb"))
original_val, weighted_val = val_prediction
original_tt, weighted_tt = tt_prediction
args = const.give_args_test(raman_type=raman_type)
args["pre_define_tt_filenames"] = True
validation_accuracy = np.zeros([len(list(original_val.keys())) - 1, 2])
if dataset == "RRUFF" or dataset == "ORGANIC":
if dataset == "RRUFF":
tr_data, tt_data, _, label_name_tr = test.get_data(args, dir2load_data, read_twin_triple="cls",
print_info=False, dir2read=data_path)
else:
tr_data, tt_data, _, label_name_tr = test.get_data(args, dir2load_data, read_twin_triple="cls",
print_info=False, dir2read=data_path)
fake_val_reference, fake_val_data = pdd.get_fake_reference_and_test_data(tr_data, 1, data=dataset)
reference_val_label, val_label = fake_val_reference[1], fake_val_data[1]
for j, key in enumerate(list(original_val.keys())[:-1]):
_val_pred = original_val[key]
if strategy == "sigmoid" or strategy == "sigmoid_softmax":
_val_pred = expit(_val_pred)
_correct = np.sum(fake_val_reference[1][np.argmax(_val_pred, axis=-1)] == fake_val_data[1]) / len(
fake_val_data[0])
validation_accuracy[j, 0] = _correct
for j, key in enumerate(list(weighted_val.keys())[:-1]):
_val_pred = weighted_val[key]
_correct = np.sum(fake_val_reference[1][np.argmax(_val_pred, axis=-1)] == fake_val_data[1]) / len(
fake_val_data[0])
validation_accuracy[j, 1] = _correct
else:
tr_data, val_data, tt_data, _, label_name_tr = test.get_data(args, None, read_twin_triple="cls",
print_info=False, dir2read=data_path)
reference_val_label, val_label = tr_data[1], val_data[1]
for m, stat in enumerate([original_val, weighted_val]):
for j, key in enumerate(list(stat.keys())[:-1]):
if m == 0:
if strategy == "sigmoid" or strategy == "sigmoid_softmax":
_val_pred = expit(stat[key])
else:
_val_pred = stat[key]
_correct = np.sum(tr_data[1][np.argmax(_val_pred, axis=-1)] == val_data[1]) / len(val_data[1])
validation_accuracy[j, m] = _correct
original_select = np.argsort(validation_accuracy[:, 0])[-num_select:]
weighted_select = np.argsort(validation_accuracy[:, 1])[-num_select:]
for j, key in enumerate(list(original_tt.keys())):
if j == 0:
original_tt_ensemble = np.zeros_like(original_tt[key])
if strategy == "sigmoid" or strategy == "sigmoid_softmax":
original_tt[key] = expit(original_tt[key])
if j in original_select:
original_tt_ensemble += original_tt[key]
original_tt_ensemble /= len(original_select)
for j, key in enumerate(list(weighted_tt.keys())):
if j == 0:
weighted_tt_ensemble = np.zeros_like(weighted_tt[key])
if j in weighted_select:
weighted_tt_ensemble += weighted_tt[key]
weighted_tt_ensemble /= len(weighted_select)
predicted_label_on_test_data = []
correspond_tr_index = []
for j, single_stat in enumerate([original_tt_ensemble, weighted_tt_ensemble]):
if dataset != "BACTERIA":
_pred_label = tr_data[1][np.argmax(single_stat, axis=-1)]
accuracy = np.sum(_pred_label == np.array(tt_data[1])) / len(tt_data[0])
else:
accuracy, _pred_label = vis_utils.majority_voting(single_stat, tr_data[1],
tt_data[1], voting_number[j])
pred_baseon_class, corr_tr_index = test.reorganize_similarity_score(single_stat,
tr_data[1])
if strategy == "softmax":
pred_baseon_class = softmax(pred_baseon_class, axis=-1)
_nll_prediction = pred_baseon_class[np.arange(len(tt_data[0])), _pred_label]
print("NLL prediction", np.max(_nll_prediction), np.min(_nll_prediction))
_nll_score = _nll_prediction
if split_version == 100:
uncertainty.update({"%s_%s_%s" % (dataset, raman_type, original_weight_stat[j]): _nll_score})
else:
uncertainty.update({"%s_%s_%s_version_%d" % (dataset, raman_type, original_weight_stat[j],
split_version): _nll_score})
_pred_stat = np.concatenate([np.expand_dims(tt_data[1], axis=-1),
np.expand_dims(_pred_label, axis=-1)], axis=-1)
if split_version == 100:
prediction_status.update({"%s_%s_%s" % (dataset, raman_type, original_weight_stat[j]): _pred_stat})
else:
prediction_status.update({"%s_%s_%s_version_%d" % (dataset, raman_type, original_weight_stat[j],
split_version): _pred_stat})
print("%s + %s + %s : %.4f" % (dataset, raman_type, original_weight_stat[j], accuracy))
predicted_label_on_test_data.append(_pred_label)
correspond_tr_index.append(corr_tr_index)
accuracy_baseon_uncertainty, \
optimal_threshold = find_the_best_threshold_and_evaluate_accuracy([original_val, weighted_val],
[original_tt_ensemble, weighted_tt_ensemble],
[original_select, weighted_select],
reference_val_label,
val_label,
tr_data[1], tt_data[1],
predicted_label_on_test_data, voting_number)
if not qualitative_study:
return uncertainty, prediction_status, accuracy_baseon_uncertainty, optimal_threshold
else:
if not get_similarity:
return uncertainty, prediction_status, correspond_tr_index, \
optimal_threshold, tr_data, tt_data, label_name_tr, np.arange(args["max_wave"])[args["min_wave"]:]
else:
return original_val, original_tt_ensemble, original_select, \
reference_val_label, val_label, tr_data[1], tt_data[1]
def give_original_weight_uncertainty(uncertainty, prediction_status, dataset, use_nll_or_prob="nll"):
stat_orig, stat_weight = {}, {}
min_value = 0
high_value = [6 if use_nll_or_prob == "nll" else 1][0]
if dataset == "RRUFF_R":
num_bins = 5 # 8
elif dataset == "RRUFF_P":
num_bins = 5
elif dataset == "ORGANIC":
num_bins=3
else:
num_bins = 7
uncertainty_array, prediction_array = [], []
for key in uncertainty.keys():
predict_prob = uncertainty[key]
print(key, np.max(predict_prob), np.min(predict_prob))
_stat = group_uncertainty_and_prediction(predict_prob,
prediction_status[key],
min_value, high_value, num_bins, False)
if "weight" in key:
stat_weight[key] = _stat
else:
stat_orig[key] = _stat
prediction_array.append(prediction_status[key])
uncertainty_array.append(predict_prob)
if dataset == "RRUFF_Rs" or dataset == "RRUFF_Ps" or dataset == "ORGANICs":
return stat_weight
else:
return stat_orig, prediction_array, uncertainty_array
def give_avg_std_for_uncertainty(stat_weight):
stat = [[] for _ in range(3)]
max_dim = np.max([np.shape(stat_weight[key])[1] for key in stat_weight.keys()])
for key in stat_weight.keys():
_value = stat_weight[key]
if np.shape(_value)[1] < max_dim:
_value = np.concatenate([_value, np.zeros([len(_value), max_dim - np.shape(_value)[1]])],
axis=-1)
for j in range(3):
stat[j].append(_value[j])
for j, v in enumerate(stat):
stat[j] = np.array(v)
tot = stat[1] + stat[2]
tot[tot == 0] = 1
stat_c_percent = stat[1] / tot
stat_w_percent = stat[2] / tot
percent_stat = [stat_c_percent, stat_w_percent]
stat_avg, stat_std = [], []
for j in range(3):
if j == 0:
x_avg = np.sum(stat[0], axis=0) / np.sum(stat[0] != 0, axis=0)
else:
_divide = np.sum(percent_stat[j - 1] != 0, axis=0)
_divide[_divide == 0] = 1
x_avg = np.sum(percent_stat[j - 1], axis=0) / _divide
stat_avg.append(x_avg)
x_std = np.zeros_like(x_avg)
for m in range(np.shape(stat[0])[1]):
if j == 0:
v = stat[j][:, m]
else:
v = percent_stat[j - 1][:, m]
if len(v[v != 0]) > 0:
if np.sum(v[v != 0]) != 0:
x_std[m] = 1.95 * np.std(v[v != 0]) / np.sqrt( | np.sum(v != 0) | numpy.sum |
"""Copyright (c) 2018, <NAME>
2021, <NAME>"""
import warnings
import numba
import numpy as np
import pandas as pd
def idx_at_times(index_surv, times, steps='pre', assert_sorted=True):
"""Gives index of `index_surv` corresponding to `time`, i.e.
`index_surv[idx_at_times(index_surv, times)]` give the values of `index_surv`
closet to `times`.
Arguments:
index_surv {np.array} -- Durations of survival estimates
times {np.array} -- Values one want to match to `index_surv`
Keyword Arguments:
steps {str} -- Round 'pre' (closest value higher) or 'post'
(closest value lower) (default: {'pre'})
assert_sorted {bool} -- Assert that index_surv is monotone (default: {True})
Returns:
np.array -- Index of `index_surv` that is closest to `times`
"""
if assert_sorted:
assert pd.Series(index_surv).is_monotonic_increasing, "Need 'index_surv' to be monotonic increasing"
if steps == 'pre':
idx = np.searchsorted(index_surv, times)
elif steps == 'post':
idx = np.searchsorted(index_surv, times, side='right') - 1
return idx.clip(0, len(index_surv) - 1)
@numba.njit
def _group_loop(n, surv_idx, durations, events, di, ni):
idx = 0
for i in range(n):
idx += durations[i] != surv_idx[idx]
di[idx] += events[i]
ni[idx] += 1
return di, ni
def kaplan_meier(durations, events, start_duration=0):
"""A very simple Kaplan-Meier fitter. For a more complete implementation
see `lifelines`.
Arguments:
durations {np.array} -- durations array
events {np.arrray} -- events array 0/1
Keyword Arguments:
start_duration {int} -- Time start as `start_duration`. (default: {0})
Returns:
pd.Series -- Kaplan-Meier estimates.
"""
n = len(durations)
assert n == len(events)
if start_duration > durations.min():
warnings.warn(f"start_duration {start_duration} is larger than minimum duration {durations.min()}. "
"If intentional, consider changing start_duration when calling kaplan_meier.")
order = np.argsort(durations)
durations = durations[order]
events = events[order]
surv_idx = np.unique(durations)
ni = np.zeros(len(surv_idx), dtype='int')
di = np.zeros_like(ni)
di, ni = _group_loop(n, surv_idx, durations, events, di, ni)
ni = n - ni.cumsum()
ni[1:] = ni[:-1]
ni[0] = n
survive = 1 - di / ni
zero_survive = survive == 0
if zero_survive.any():
i = | np.argmax(zero_survive) | numpy.argmax |
import gc
import math
import logging
import numpy as np
import scipy.sparse as sp
import torch
import torch.nn as nn
import torch.nn.functional as F
import pyro
from itertools import combinations
from sklearn.metrics import roc_auc_score, average_precision_score
import pickle
class GAug(object):
def __init__(self, adj_matrix, features, labels, tvt_nids, cuda=-1, hidden_size=128, emb_size=64, n_layers=2, epochs=200, seed=-1, lr=1e-2, weight_decay=5e-4, dropout=0.5, gae=False, beta=0.5, temperature=0.2, log=True, name='debug', warmup=3, gnnlayer_type='gcn', jknet=False, alpha=1, sample_type='add_sample', feat_norm='no', batch_size=15000):
self.lr = lr
self.weight_decay = weight_decay
self.n_epochs = epochs
self.gae = gae
self.beta = beta
self.warmup = warmup
self.feat_norm = feat_norm
self.batch_size = batch_size
# create a logger, logs are saved to GAug-[name].log when name is not None
if log:
self.logger = self.get_logger(name)
else:
# disable logger if wanted
# logging.disable(logging.CRITICAL)
self.logger = logging.getLogger()
# config device (force device to cpu when cuda is not available)
if not torch.cuda.is_available():
cuda = -1
self.device = torch.device(f'cuda:{cuda}' if cuda>=0 else 'cpu')
# log all parameters to keep record
all_vars = locals()
self.log_parameters(all_vars)
# fix random seeds if needed
if seed > 0:
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# load data
self.load_data(adj_matrix, features, labels, tvt_nids, gnnlayer_type)
# setup the model
self.model = GAug_model(self.features.size(1),
hidden_size,
emb_size,
self.out_size,
n_layers,
F.relu,
dropout,
self.device,
gnnlayer_type,
temperature=temperature,
gae=gae,
jknet=jknet,
alpha=alpha,
sample_type=sample_type)
def load_data(self, adj_matrix, features, labels, tvt_nids, gnnlayer_type):
""" preprocess data """
# features (torch.FloatTensor)
if isinstance(features, torch.FloatTensor):
self.features = features
else:
self.features = torch.FloatTensor(features)
# normalize feature matrix if needed
if self.feat_norm == 'row':
self.features = F.normalize(self.features, p=1, dim=1)
elif self.feat_norm == 'col':
self.features = self.col_normalization(self.features)
else:
pass
# original adj_matrix for training vgae (torch.FloatTensor)
assert sp.issparse(adj_matrix)
if not isinstance(adj_matrix, sp.coo_matrix):
adj_matrix = sp.coo_matrix(adj_matrix)
adj_matrix.setdiag(1)
self.adj_orig = sp.csr_matrix(adj_matrix)
# normalized adj_matrix used as input for ep_net (torch.sparse.FloatTensor)
degrees = np.array(adj_matrix.sum(1))
degree_mat_inv_sqrt = sp.diags( | np.power(degrees, -0.5) | numpy.power |
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from numpy import testing as npt
from ...tests.helper import pytest, assert_quantity_allclose as assert_allclose
from ... import units as u
from ...utils import minversion
"""
These are the tests for coordinate matching.
Note that this requires scipy.
"""
try:
import scipy
HAS_SCIPY = True
except ImportError:
HAS_SCIPY = False
if HAS_SCIPY and minversion(scipy, '0.12.0', inclusive=False):
OLDER_SCIPY = False
else:
OLDER_SCIPY = True
@pytest.mark.skipif(str('not HAS_SCIPY'))
def test_matching_function():
from .. import ICRS
from ..matching import match_coordinates_3d
#this only uses match_coordinates_3d because that's the actual implementation
cmatch = ICRS([4, 2.1]*u.degree, [0, 0]*u.degree)
ccatalog = ICRS([1, 2, 3, 4]*u.degree, [0, 0, 0, 0]*u.degree)
idx, d2d, d3d = match_coordinates_3d(cmatch, ccatalog)
npt.assert_array_equal(idx, [3, 1])
| npt.assert_array_almost_equal(d2d.degree, [0, 0.1]) | numpy.testing.assert_array_almost_equal |
import abc
from dbac_lib import dbac_util
import numpy as np
import pandas as pd
import os
import logging
import json
logger = logging.getLogger(__name__)
DB_NAMES = ['cub200', 'awa2']
_DB_SPLIT_KEYS = ['train_exps', 'test_exps', 'train_imgs', 'val_imgs', 'test_imgs', 'valid_prims',
'train_combs', 'test_combs']
DB_IMAGE_SPLITS = ['discarded', 'train', 'val', 'test']
DB_EXP_SPLITS = ['train', 'test']
DB_COMB_SPLITS = ['train', 'test']
class IDataset(metaclass=abc.ABCMeta):
def __init__(self, name, root_path):
# dataset name
self.name = name
# path to dataset root directory
self.root_path = root_path
# Placeholders
# array of labels names [M]
self.labels_names = None
# array of images path [N]
self.images_path = None
# array of labels [NXM]
self.labels = None
# array of labels group names [G]
self.labels_group_names = None
# array of labels groups [M] => [G]
self.labels_group = None
# Placeholder for the split file
# boolean array of valid primitives [M]
self.valid_primitives = None
# Valid Expression
# array of valid expressions (op, p1, p2) [E]
self.expressions = None
# array of expressions split [E] (0,1)
self.expressions_split = None
# array for split the images in train, val and test [N]
self.images_split = None
# Place holders for combinations
# Combinations of expressions ((),()...) [C]
self.combinations = None
# Combinations splits [C] (0, 1, 2)
self.combinations_split = None
@abc.abstractmethod
def load_split(self, split_file, comb_file=None):
raise NotImplementedError()
@staticmethod
def factory(name, root_path):
db = None
if name == DB_NAMES[0]:
db = CUB200(root_path)
elif name == DB_NAMES[1]:
db = AWA2(root_path)
else:
raise ValueError("Dataset {} in directory {} is not defined.".format(name, root_path))
return db
class CUB200(IDataset):
def __init__(self, root_path):
super().__init__(DB_NAMES[0], root_path)
# read general info
df_att = pd.read_csv(os.path.join(self.root_path, 'attributes/attributes.txt'), sep='\s+',
names=['att_id', 'att_name'])
df_att_ant = pd.read_csv(os.path.join(self.root_path, 'attributes/image_attribute_labels.txt'), sep='\s+',
names=['img_id', 'att_id', 'is_pres', 'cert_id', 'time'])
df_images = pd.read_csv(os.path.join(self.root_path, 'images.txt'), sep='\s+', names=['img_id', 'img_path'])
df_labels = pd.read_csv(os.path.join(self.root_path, 'classes.txt'), sep='\s+', names=['cls_id', 'cls_name'])
df_is_train = pd.read_csv(os.path.join(self.root_path, 'train_test_split.txt'), sep='\s+',
names=['img_id', 'is_train'])
df_data = pd.read_csv(os.path.join(self.root_path, 'image_class_labels.txt'), sep='\s+',
names=['img_id', 'cls_id'])
# merge informations
df_data = pd.merge(df_images, df_data, on='img_id', how='left')
df_data = pd.merge(df_data, df_labels, on='cls_id', how='left')
df_data = pd.merge(df_data, df_is_train, on='img_id', how='left')
df_data_att = pd.merge(df_att_ant, df_att, on='att_id', how='left')
df_data_att = df_data_att.loc[(df_data_att['is_pres'] == 1) & (df_data_att['cert_id'] > 2)]
# Fill placeholders
self.labels_group_names = np.array(['class', 'attribute'], np.str)
self.labels_group = np.hstack([np.ones(df_labels['cls_name'].size, np.int) * 0,
np.ones(df_att['att_name'].size, np.int) * 1])
self.labels_names = np.hstack([df_labels['cls_name'].values.astype(np.str),
df_att['att_name'].values.astype(np.str)])
self.images_path = []
self.labels = np.zeros((df_data.shape[0], self.labels_names.size), np.bool)
for i, (_, row) in enumerate(df_data.iterrows()):
self.images_path.append(os.path.join(self.root_path, 'images', row['img_path']))
labels = list(df_data_att.loc[(df_data_att['img_id'] == row['img_id'])]['att_name'].values)
labels.append(row['cls_name'])
labels = [np.where(self.labels_names == label)[0][0] for label in labels]
self.labels[i, labels] = 1.0
self.images_path = np.array(self.images_path, np.str)
logger.info("Dataset {} with {} images and {} labels read from {}".format(
self.name, self.images_path.size, self.labels_names.size, self.root_path))
def load_split(self, split_file, comb_file=None):
# read json of partition
split_dic = None
with open(split_file, 'r') as f:
split_dic = json.load(f)
# fill placeholders
self.valid_primitives = np.zeros_like(self.labels_names, dtype=np.bool)
self.valid_primitives[split_dic[_DB_SPLIT_KEYS[5]]] = 1
self.expressions = np.vstack([split_dic[_DB_SPLIT_KEYS[0]], split_dic[_DB_SPLIT_KEYS[1]]])
self.expressions_split = np.hstack([np.zeros(len(split_dic[_DB_SPLIT_KEYS[0]]), dtype=np.int),
np.ones(len(split_dic[_DB_SPLIT_KEYS[1]]), dtype=np.int)])
self.images_split = np.zeros(self.images_path.size, dtype=np.int)
self.images_split[split_dic[_DB_SPLIT_KEYS[2]]] = 1
self.images_split[split_dic[_DB_SPLIT_KEYS[3]]] = 2
self.images_split[split_dic[_DB_SPLIT_KEYS[4]]] = 3
if comb_file:
comb_dic = None
with open(comb_file, 'r') as f:
comb_dic = json.load(f)
self.combinations = np.vstack([np.array(comb_dic[_DB_SPLIT_KEYS[6]], dtype=object),
np.array(comb_dic[_DB_SPLIT_KEYS[7]], dtype=object)])
self.combinations_split = np.hstack([0 * np.ones(len(comb_dic[_DB_SPLIT_KEYS[6]]), dtype=np.int),
1 * np.ones(len(comb_dic[_DB_SPLIT_KEYS[7]]), dtype=np.int)])
class AWA2(IDataset):
def __init__(self, root_path):
super().__init__(DB_NAMES[1], root_path)
# Read Informations
df_cls = pd.read_csv(os.path.join(self.root_path, 'classes.txt'), sep='\s+', names=['dummy', 'cls_name'])
df_att = pd.read_csv(os.path.join(self.root_path, 'predicates.txt'), sep='\s+', names=['dummy', 'att_name'])
att_mat = np.loadtxt(os.path.join(self.root_path, 'predicate-matrix-binary.txt'))
images_path, labels = [], []
for label_idx, cls_name in enumerate(df_cls['cls_name']):
for img_path in dbac_util.list_pictures(os.path.join(self.root_path, 'JPEGImages', cls_name)):
images_path.append(img_path)
labels.append(att_mat[label_idx])
# Fill placeholders
self.labels_group_names = np.array(['attribute'], np.str)
self.labels_names = df_att['att_name'].values.astype(np.str)
self.labels = np.vstack(labels).astype(np.bool)
self.labels_group = | np.zeros(self.labels.shape[1], dtype=np.int) | numpy.zeros |
# From Caoxiang's CoilPy
# copied 11 Jan 2021
import numpy as np
class FourSurf(object):
'''
toroidal surface in Fourier representation
R = \sum RBC cos(mu-nv) + RBS sin(mu-nv)
Z = \sum ZBC cos(mu-nv) + ZBS sin(mu-nv)
'''
def __init__(self, xm=[], xn=[], rbc=[], zbs=[], rbs=[], zbc=[]):
"""Initialization with Fourier harmonics.
Parameters:
xm -- list or numpy array, array of m index (default: [])
xn -- list or numpy array, array of n index (default: [])
rbc -- list or numpy array, array of radial cosine harmonics (default: [])
zbs -- list or numpy array, array of z sine harmonics (default: [])
rbs -- list or numpy array, array of radial sine harmonics (default: [])
zbc -- list or numpy array, array of z cosine harmonics (default: [])
"""
self.xm = np.atleast_1d(xm)
self.xn = np.atleast_1d(xn)
self.rbc = np.atleast_1d(rbc)
self.rbs = np.atleast_1d(rbs)
self.zbc = np.atleast_1d(zbc)
self.zbs = np.atleast_1d(zbs)
self.mn = len(self.xn)
return
@classmethod
def read_focus_input(cls, filename, Mpol=9999, Ntor=9999):
"""initialize surface from the FOCUS format input file 'plasma.boundary'
Parameters:
filename -- string, path + name to the FOCUS input boundary file
Mpol -- maximum truncated poloidal mode number (default: 9999)
Ntol -- maximum truncated toroidal mode number (default: 9999)
Returns:
fourier_surface class
"""
with open(filename, 'r') as f:
line = f.readline() #skip one line
line = f.readline()
num = int(line.split()[0]) #harmonics number
nfp = int(line.split()[1]) #number of field periodicity
nbn = int(line.split()[2]) #number of Bn harmonics
xm = []
xn = []
rbc = []
rbs = []
zbc = []
zbs = []
line = f.readline() #skip one line
line = f.readline() #skip one line
for i in range(num):
line = f.readline()
line_list = line.split()
n = int(line_list[0])
m = int(line_list[1])
if abs(m)>Mpol or abs(n)>Ntor:
continue
xm.append(m)
xn.append(n)
rbc.append(float(line_list[2]))
rbs.append(float(line_list[3]))
zbc.append(float(line_list[4]))
zbs.append(float(line_list[5]))
return cls(xm=np.array(xm), xn=np.array(xn)*nfp,
rbc=np.array(rbc), rbs=np.array(rbs),
zbc=np.array(zbc), zbs=np.array(zbs))
@classmethod
def read_spec_input(cls, filename, Mpol=9999, Ntor=9999):
"""initialize surface from the SPEC input file '*.sp'
Parameters:
filename -- string, path + name to the FOCUS input boundary file
Mpol -- maximum truncated poloidal mode number (default: 9999)
Ntol -- maximum truncated toroidal mode number (default: 9999)
Returns:
fourier_surface class
"""
import FortranNamelist.namelist as nml
from misc import vmecMN
spec = nml.NamelistFile(filename)
# spec['physicslist'] =
Mpol = min(Mpol, spec['physicslist']['MPOL'])
Ntor = min(Ntor, spec['physicslist']['NTOR'])
xm, xn = vmecMN(Mpol, Ntor)
return
@classmethod
def read_spec_output(cls, spec_out, ns=-1):
"""initialize surface from the ns-th interface SPEC output
Parameters:
spec_out -- SPEC class, SPEC hdf5 results
ns -- integer, the index of SPEC interface (default: -1)
Returns:
fourier_surface class
"""
# check if spec_out is in correct format
#if not isinstance(spec_out, SPEC):
# raise TypeError("Invalid type of input data, should be SPEC type.")
# get required data
xm = spec_out.output.im
xn = spec_out.output.in1
rbc = spec_out.output.Rbc[ns,:]
zbs = spec_out.output.Zbs[ns,:]
if spec_out.input.physics.Istellsym:
# stellarator symmetry enforced
rbs = np.zeros_like(rbc)
zbc = np.zeros_like(rbc)
else:
rbs = spec_out.output.Rbs[ns,:]
zbc = spec_out.output.Zbc[ns,:]
return cls(xm=xm, xn=xn, rbc=rbc, rbs=rbs, zbc=zbc, zbs=zbs)
@classmethod
def read_vmec_output(cls, woutfile, ns=-1):
"""initialize surface from the ns-th interface SPEC output
Parameters:
woutfile -- string, path + name to the wout file from VMEC output
ns -- integer, the index of VMEC nested flux surfaces (default: -1)
Returns:
fourier_surface class
"""
import xarray as ncdata # read netcdf file
vmec = ncdata.open_dataset(woutfile)
xm = vmec['xm'].values
xn = vmec['xn'].values
rmnc = vmec['rmnc'].values
zmns = vmec['zmns'].values
rbc = rmnc[ns,:]
zbs = zmns[ns,:]
if vmec['lasym__logical__'].values:
# stellarator symmetry enforced
zmnc = vmec['zmnc'].values
rmns = vmec['rmns'].values
rbs = rmns[ns,:]
zbc = zmnc[ns,:]
else :
rbs = np.zeros_like(rbc)
zbc = np.zeros_like(rbc)
return cls(xm=xm, xn=xn, rbc=rbc, rbs=rbs, zbc=zbc, zbs=zbs)
@classmethod
def read_winding_surfce(cls, filename, Mpol=9999, Ntor=9999):
"""initialize surface from the NESCOIL format input file 'nescin.xxx'
Parameters:
filename -- string, path + name to the NESCOIL input boundary file
Mpol -- maximum truncated poloidal mode number (default: 9999)
Ntol -- maximum truncated toroidal mode number (default: 9999)
Returns:
fourier_surface class
"""
with open(filename, 'r') as f:
line = ''
while "phip_edge" not in line:
line = f.readline()
line = f.readline()
nfp = int(line.split()[0])
#print "nfp:",nfp
line = ''
while "Current Surface" not in line:
line = f.readline()
line = f.readline()
line = f.readline()
#print "Number of Fourier modes in coil surface from nescin file: ",line
num = int(line)
xm = []
xn = []
rbc = []
rbs = []
zbc = []
zbs = []
line = f.readline() #skip one line
line = f.readline() #skip one line
for i in range(num):
line = f.readline()
line_list = line.split()
m = int(line_list[0])
n = int(line_list[1])
if abs(m)>Mpol or abs(n)>Ntor:
continue
xm.append(m)
xn.append(n)
rbc.append(float(line_list[2]))
zbs.append(float(line_list[3]))
rbs.append(float(line_list[4]))
zbc.append(float(line_list[5]))
# NESCOIL uses mu+nv, minus sign is added
return cls(xm=np.array(xm), xn=-np.array(xn)*nfp,
rbc=np.array(rbc), rbs=np.array(rbs),
zbc=np.array(zbc), zbs=np.array(zbs))
def rz(self, theta, zeta, normal=False):
""" get r,z position of list of (theta, zeta)
Parameters:
theta -- float array_like, poloidal angle
zeta -- float array_like, toroidal angle value
normal -- logical, calculate the normal vector or not (default: False)
Returns:
r, z -- float array_like
r, z, [rt, zt], [rz, zz] -- if normal
"""
assert len(np.atleast_1d(theta)) == len(np.atleast_1d(zeta)), "theta, zeta should be equal size"
# mt - nz (in matrix)
_mtnz = np.matmul( np.reshape(self.xm, (-1,1)), np.reshape(theta, (1,-1)) ) \
- np.matmul( np.reshape(self.xn, (-1,1)), np.reshape( zeta, (1,-1)) )
_cos = np.cos(_mtnz)
_sin = np.sin(_mtnz)
r = np.matmul( np.reshape(self.rbc, (1,-1)), _cos ) \
+ np.matmul( np.reshape(self.rbs, (1,-1)), _sin )
z = np.matmul( np.reshape(self.zbc, (1,-1)), _cos ) \
+ np.matmul( np.reshape(self.zbs, (1,-1)), _sin )
if not normal :
return (r.ravel(), z.ravel())
else:
rt = np.matmul( np.reshape(self.xm * self.rbc, (1,-1)), -_sin ) \
+ np.matmul( np.reshape(self.xm * self.rbs, (1,-1)), _cos )
zt = np.matmul( np.reshape(self.xm * self.zbc, (1,-1)), -_sin ) \
+ np.matmul( np.reshape(self.xm * self.zbs, (1,-1)), _cos )
rz = np.matmul( np.reshape(-self.xn * self.rbc, (1,-1)), -_sin ) \
+ np.matmul( np.reshape(-self.xn * self.rbs, (1,-1)), _cos )
zz = np.matmul( np.reshape(-self.xn * self.zbc, (1,-1)), -_sin ) \
+ np.matmul( | np.reshape(-self.xn * self.zbs, (1,-1)) | numpy.reshape |
import helpers
import numpy as np
from waveform import Waveform
class Chord:
def __init__(self, root=None, quality=None, name='', notes=None):
'''
Parameters
----------
root : str
The root note of this chord. eg) 'C#4'
quality : str
The quality of this chord. eg) 'major'
notes : list of Note
Used instead of root/quality if given
'''
self.root = root
self.quality = quality
self.name = name
self.notes = []
if notes is None:
# Assign notes using root/quality
root_note = helpers.get_note_by_name(self.root)
for offset in helpers.qualities[self.quality]:
note_number = root_note.number + offset
note = helpers.get_note_by_number(note_number)
self.notes.append(note)
# Generate name if not already given
if name == '':
self.name = f'{root} {quality}'
else:
# Or just use notes if they're given
self.notes = notes
def get_waveform(self, sample_rate=44100, duration=4):
'''
Parameters
----------
sample_rate : int
How many points will represent the waveform per second
duration : float
How long, in seconds, the waveform will be
Returns
-------
Waveform
The waveform for this chord
'''
total_samples = sample_rate * duration
step = duration / total_samples
index = 0
for note in self.notes:
t = np.arange(0, duration, step)
x = np.sin(2 * np.pi * note.frequency * t)
if index == 0:
points = np.sin(x) * note.velocity
else:
points += | np.sin(x) | numpy.sin |
import numpy as np
from scipy.stats import skew, kurtosis
def calc_stat(arr, stat_name, axis=None):
"""
Parameters:
-----------
arr: ndarray
the input array
stat_name: str
the name of the statistics.
"max", "min", "mean", "var", "std"
axis: int, optional
the axis over which the statistics is calculated
Returns:
--------
out: ndarray
"""
if stat_name == "all":
out = np.array([np.amin(arr, axis), np.amax(arr, axis), np.mean(arr, axis), np.var(arr, axis), np.sum(arr, axis)])
elif stat_name == "min":
out = np.amin(arr, axis)
elif stat_name == "max":
out = np.amax(arr, axis)
elif stat_name == "var":
out = np.var(arr, axis)
elif stat_name == "mean":
out = np.mean(arr, axis)
elif stat_name == "std":
out = np.std(arr, axis)
else: # stat_name == "sum":
out = np.sum(arr, axis)
return out
def calc_stats(arr, stat_names, axis=None):
out = []
for s in stat_names:
if s == "all":
out = np.array([np.amin(arr, axis), np.amax(arr, axis), np.mean(arr, axis), np.var(arr, axis), np.sum(arr, axis)])
return out
if s == "moments":
out = [np.mean(arr, axis), np.var(arr, axis)]
if type(axis) == tuple:
sk = np.apply_over_axes(skew, arr, axis)
k = np.apply_over_axes(kurtosis, arr, axis)
out.append(sk.flatten())
out.append(k.flatten())
else:
out.extend([skew(arr, axis), kurtosis(arr, axis)])
return np.array(out)
if s == "min":
out.append(np.amin(arr, axis))
if s == "max":
out.append(np.amax(arr, axis))
if s == "mean":
out.append(np.mean(arr, axis))
if s == "var":
out.append(np.var(arr, axis))
if s == "std":
out.append(np.std(arr, axis))
if s == "sum":
out.append( | np.sum(arr, axis) | numpy.sum |
import re
from numpy import array, clip, dot, float32
from numpy.linalg import inv
from vispy.app import Canvas, run, Timer, use_app
from vispy.gloo import Program
from vispy.visuals import LinePlotVisual, TextVisual
from functions import functions
from newton_method import newton_method
def unpack_double(double):
vector = array([double, 0.0], dtype=float32)
vector[1] = double - vector[0]
return vector
class FractalCanvas(Canvas):
@property
def fragment_shader(self):
replacements = {
'#define FUNCTION(z) (VECTOR2(0.0, 0.0))': f'#define FUNCTION(z) ({self.functions[self.function_index].function_gl})',
'#define DERIVATIVE(z) (VECTOR2(1.0, 0.0))': f'#define DERIVATIVE(z) ({self.functions[self.function_index].derivative_gl})',
'#define ROOTS (VECTOR2[](VECTOR2(0.0, 0.0)))': f'#define ROOTS (VECTOR2[]({self.functions[self.function_index].roots_gl}))'
}
return re.compile('|'.join(re.escape(key) for key in replacements.keys())).sub(lambda match: replacements[match.group(0)], self.fragment_shader_template)
@property
def function_info(self):
return f'f(z) = {self.functions[self.function_index].function_py.replace(" ** ", "^").replace("*", "·")}\n' + \
f'f\'(z) = {self.functions[self.function_index].derivative_py.replace(" ** ", "^").replace("*", "·")}'
@property
def pixel_to_complex_transform(self):
return array([
[self.scale / self.size[0], 0.0, self.center[0] - 0.5 * self.scale],
[0.0, -self.scale / self.size[0], 0.5 * self.size[1] / self.size[0] * self.scale + self.center[1]],
[0.0, 0.0, 1.0]])
@property
def complex_to_pixel_transform(self):
return inv(self.pixel_to_complex_transform)
# noinspection PyShadowingNames
def __init__(self, vertex_shader, fragment_shader_template, functions, *args, **kwargs):
super().__init__(*args, **kwargs)
self.vertex_shader = vertex_shader
self.fragment_shader_template = fragment_shader_template
self.functions = functions
self.function_index = 0
self.program = Program(self.vertex_shader, self.fragment_shader)
self.program['position'] = [(-1.0, -1.0), (-1.0, 1.0), (1.0, 1.0), (-1.0, -1.0), (1.0, 1.0), (1.0, -1.0)]
self.program['resolution'] = self.size
self.center = array([0.0, 0.0])
self.program['center'] = array([*unpack_double(self.center[0]), *unpack_double(self.center[1])])
self.center_min, self.center_max = array([-10.0, -10.0]), array([10.0, 10.0])
self.scale = 2.5
self.program['scale'] = unpack_double(self.scale)
self.scale_min, self.scale_max = 10.0 ** -10.0, 10.0 ** 2.0
self.line = LinePlotVisual(array([[-10, -10]]), color='white')
self.position_text = TextVisual('', color='white', font_size=10, anchor_x='right', anchor_y='top')
self.iterations_text = TextVisual('', color='white', font_size=10, anchor_x='left', anchor_y='top')
self.info_text = TextVisual(self.function_info, pos=(5, 5), color='white', font_size=10, anchor_x='left', anchor_y='bottom')
if use_app().backend_name == 'PyQt5':
self._backend.leaveEvent = self.on_mouse_exit
self.timer = Timer(connect=self.update, start=True)
self.show()
def on_draw(self, event):
self.program.draw()
self.line.draw()
self.position_text.draw()
self.iterations_text.draw()
self.info_text.draw()
def on_resize(self, event):
self.program['resolution'] = self.size
self.line.transforms.configure(canvas=self, viewport=(0, 0, *self.size))
self.position_text.transforms.configure(canvas=self, viewport=(0, 0, *self.size))
self.iterations_text.transforms.configure(canvas=self, viewport=(0, 0, *self.size))
self.info_text.transforms.configure(canvas=self, viewport=(0, 0, *self.size))
def on_mouse_exit(self, event):
self.on_mouse_handler('mouse_exit', event)
def on_mouse_move(self, event):
self.on_mouse_handler('mouse_move', event)
def on_mouse_release(self, event):
self.on_mouse_handler('mouse_release', event)
def on_mouse_wheel(self, event):
self.on_mouse_handler('mouse_wheel', event)
def on_mouse_handler(self, event_type, event):
if event_type == 'mouse_move' or event_type == 'mouse_wheel':
if event.type == 'mouse_wheel':
self.zoom(0.9 if event.delta[1] > 0.0 else 1.0 / 0.9, event.pos)
self.newton_method(event.pos)
if event.is_dragging and event.buttons[0] == 1:
new_position_complex = dot(self.pixel_to_complex_transform, array([[event.pos[0]], [event.pos[1]], [1.0]]))
old_position_complex = dot(self.pixel_to_complex_transform, array([[event.last_event.pos[0]], [event.last_event.pos[1]], [1.0]]))
self.translate((new_position_complex - old_position_complex)[:2].flatten())
elif event_type == 'mouse_release':
if event.last_event.is_dragging:
return
old_function_index = self.function_index
self.function_index = (self.function_index + (1 if event.button == 1 else (-1 if event.button == 2 else 0))) % len(self.functions)
new_function_index = self.function_index
if new_function_index != old_function_index:
self.program.set_shaders(vert=self.vertex_shader, frag=self.fragment_shader)
self.newton_method(event.pos)
self.info_text.text = self.function_info
elif event_type == 'mouse_exit':
self.line.set_data(array([[-10, -10]]))
self.position_text.pos = (0, 0)
self.iterations_text.pos = (0, 0)
def newton_method(self, position_pixel):
position_complex = dot(self.pixel_to_complex_transform, array([[position_pixel[0]], [position_pixel[1]], [1.0]]))
z_0 = complex(*position_complex[:2].flatten())
z_n, iterations = newton_method(z=z_0, function_string=self.functions[self.function_index].function_py, derivative_string=self.functions[self.function_index].derivative_py)
# noinspection PyTypeChecker
self.line.set_data(array([dot(self.complex_to_pixel_transform, | array([[z[0]], [z[1]], [1.0]]) | numpy.array |
#! /usr/bin/env python
"""Calculate gradients on a raster grid.
Gradient calculators for raster grids
+++++++++++++++++++++++++++++++++++++
.. autosummary::
~landlab.grid.raster_gradients.calc_grad_at_link
~landlab.grid.raster_gradients.calc_grad_across_cell_faces
~landlab.grid.raster_gradients.calc_grad_across_cell_corners
"""
from collections import deque
import numpy as np
from landlab.core.utils import make_optional_arg_into_id_array, radians_to_degrees
from landlab.grid import gradients
from landlab.utils.decorators import use_field_name_or_array
@use_field_name_or_array("node")
def calc_diff_at_d8(grid, node_values, out=None):
"""Calculate differences of node values over links and diagonals.
Calculates the difference in quantity *node_values* at each link in the
grid.
Parameters
----------
grid : ModelGrid
A ModelGrid.
node_values : ndarray or field name
Values at grid nodes.
out : ndarray, optional
Buffer to hold the result.
Returns
-------
ndarray
Differences across links.
Examples
--------
>>> import numpy as np
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((3, 4), xy_spacing=(4, 3))
>>> z = [
... [60.0, 60.0, 60.0, 60.0],
... [60.0, 60.0, 0.0, 0.0],
... [60.0, 0.0, 0.0, 0.0],
... ]
>>> grid.calc_diff_at_d8(z)
array([ 0., 0., 0., 0., 0., -60., -60., 0., -60., 0., 0.,
-60., 0., 0., -60., 0., 0., 0., 0., -60., 0., -60.,
-60., -60., 0., -60., 0., 0., 0.])
LLCATS: LINF GRAD
"""
if out is None:
out = np.empty(grid.number_of_d8)
node_values = np.asarray(node_values)
return np.subtract(
node_values[grid.nodes_at_d8[:, 1]],
node_values[grid.nodes_at_d8[:, 0]],
out=out,
)
@use_field_name_or_array("node")
def calc_diff_at_diagonal(grid, node_values, out=None):
"""Calculate differences of node values over diagonals.
Calculates the difference in quantity *node_values* at each link in the
grid.
Parameters
----------
grid : ModelGrid
A ModelGrid.
node_values : ndarray or field name
Values at grid nodes.
out : ndarray, optional
Buffer to hold the result.
Returns
-------
ndarray
Differences across links.
Examples
--------
>>> import numpy as np
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((3, 4), xy_spacing=(4, 3))
>>> z = [
... [5.0, 5.0, 5.0, 5.0],
... [5.0, 5.0, 0.0, 0.0],
... [5.0, 0.0, 0.0, 0.0],
... ]
>>> grid.calc_diff_at_diagonal(z)
array([ 0., 0., -5., 0., -5., -5., -5., 0., -5., 0., 0., 0.])
LLCATS: LINF GRAD
"""
if out is None:
out = np.empty(grid.number_of_diagonals)
node_values = np.asarray(node_values)
return np.subtract(
node_values[grid.nodes_at_diagonal[:, 1]],
node_values[grid.nodes_at_diagonal[:, 0]],
out=out,
)
def calc_grad_at_d8(grid, node_values, out=None):
"""Calculate gradients over all diagonals and links.
Parameters
----------
grid : RasterModelGrid
A grid.
node_values : array_like or field name
Values at nodes.
out : ndarray, optional
Buffer to hold result. If `None`, create a new array.
Examples
--------
>>> from landlab import RasterModelGrid
>>> import numpy as np
>>> grid = RasterModelGrid((3, 4), xy_spacing=(4, 3))
>>> z = [
... [60.0, 60.0, 60.0, 60.0],
... [60.0, 60.0, 0.0, 0.0],
... [60.0, 0.0, 0.0, 0.0],
... ]
>>> grid.calc_grad_at_d8(z)
array([ 0., 0., 0., 0., 0., -20., -20., 0., -15., 0., 0.,
-20., 0., 0., -15., 0., 0., 0., 0., -12., 0., -12.,
-12., -12., 0., -12., 0., 0., 0.])
LLCATS: LINF GRAD
"""
grads = calc_diff_at_d8(grid, node_values, out=out)
grads /= grid.length_of_d8[: grid.number_of_d8]
return grads
def calc_grad_at_diagonal(grid, node_values, out=None):
"""Calculate gradients over all diagonals.
Parameters
----------
grid : RasterModelGrid
A grid.
node_values : array_like or field name
Values at nodes.
out : ndarray, optional
Buffer to hold result. If `None`, create a new array.
Examples
--------
>>> from landlab import RasterModelGrid
>>> import numpy as np
>>> grid = RasterModelGrid((3, 4), xy_spacing=(4, 3))
>>> z = [
... [5.0, 5.0, 5.0, 5.0],
... [5.0, 5.0, 0.0, 0.0],
... [5.0, 0.0, 0.0, 0.0],
... ]
>>> grid.calc_grad_at_diagonal(z)
array([ 0., 0., -1., 0., -1., -1., -1., 0., -1., 0., 0., 0.])
LLCATS: LINF GRAD
"""
grads = calc_diff_at_diagonal(grid, node_values, out=out)
grads /= grid.length_of_diagonal[: grid.number_of_diagonals]
return grads
@use_field_name_or_array("node")
def calc_grad_at_link(grid, node_values, out=None):
"""Calculate gradients in node_values at links.
Parameters
----------
grid : RasterModelGrid
A grid.
node_values : array_like or field name
Values at nodes.
out : ndarray, optional
Buffer to hold result. If `None`, create a new array.
Returns
-------
ndarray
Gradients of the nodes values for each link.
Examples
--------
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((3, 3))
>>> node_values = [0., 0., 0.,
... 1., 3., 1.,
... 2., 2., 2.]
>>> grid.calc_grad_at_link(node_values)
array([ 0., 0., 1., 3., 1., 2., -2., 1., -1., 1., 0., 0.])
>>> out = np.empty(grid.number_of_links, dtype=float)
>>> rtn = grid.calc_grad_at_link(node_values, out=out)
>>> rtn is out
True
>>> out
array([ 0., 0., 1., 3., 1., 2., -2., 1., -1., 1., 0., 0.])
>>> grid = RasterModelGrid((3, 3), xy_spacing=(2, 1))
>>> grid.calc_grad_at_link(node_values)
array([ 0., 0., 1., 3., 1., 1., -1., 1., -1., 1., 0., 0.])
>>> _ = grid.add_field("elevation", node_values, at="node")
>>> grid.calc_grad_at_link('elevation')
array([ 0., 0., 1., 3., 1., 1., -1., 1., -1., 1., 0., 0.])
LLCATS: LINF GRAD
"""
grads = gradients.calc_diff_at_link(grid, node_values, out=out)
grads /= grid.length_of_link[: grid.number_of_links]
return grads
@use_field_name_or_array("node")
def calc_grad_across_cell_faces(grid, node_values, *args, **kwds):
"""calc_grad_across_cell_faces(grid, node_values, [cell_ids], out=None)
Get gradients across the faces of a cell.
Calculate gradient of the value field provided by *node_values* across
each of the faces of the cells of a grid. The returned gradients are
ordered as right, top, left, and bottom.
Note that the returned gradients are masked to exclude neighbor nodes which
are closed. Beneath the mask is the value -1.
Parameters
----------
grid : RasterModelGrid
Source grid.
node_values : array_like or field name
Quantity to take the gradient of defined at each node.
cell_ids : array_like, optional
If provided, cell ids to measure gradients. Otherwise, find gradients
for all cells.
out : array_like, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
Returns
-------
(N, 4) Masked ndarray
Gradients for each face of the cell.
Examples
--------
Create a grid with two cells.
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((3, 4))
>>> x = np.array([0., 0., 0., 0.,
... 0., 0., 1., 1.,
... 3., 3., 3., 3.])
A decrease in quantity across a face is a negative gradient.
>>> grid.calc_grad_across_cell_faces(x) # doctest: +NORMALIZE_WHITESPACE
masked_array(data =
[[ 1. 3. 0. 0.]
[ 0. 2. -1. -1.]],
mask =
False,
fill_value = 1e+20)
>>> grid = RasterModelGrid((3, 4), xy_spacing=(1, 2))
>>> grid.calc_grad_across_cell_faces(x) # doctest: +NORMALIZE_WHITESPACE
masked_array(data =
[[ 1. 1.5 0. 0. ]
[ 0. 1. -1. -0.5]],
mask =
False,
fill_value = 1e+20)
LLCATS: FINF GRAD
"""
padded_node_values = np.empty(node_values.size + 1, dtype=float)
padded_node_values[-1] = grid.BAD_INDEX
padded_node_values[:-1] = node_values
cell_ids = make_optional_arg_into_id_array(grid.number_of_cells, *args)
node_ids = grid.node_at_cell[cell_ids]
neighbors = grid.active_adjacent_nodes_at_node[node_ids]
if grid.BAD_INDEX != -1:
neighbors = np.where(neighbors == grid.BAD_INDEX, -1, neighbors)
values_at_neighbors = padded_node_values[neighbors]
masked_neighbor_values = np.ma.array(
values_at_neighbors, mask=neighbors == grid.BAD_INDEX
)
values_at_nodes = node_values[node_ids].reshape(len(node_ids), 1)
out = np.subtract(masked_neighbor_values, values_at_nodes, **kwds)
out[:, (0, 2)] /= grid.dx
out[:, (1, 3)] /= grid.dy
return out
@use_field_name_or_array("node")
def calc_grad_across_cell_corners(grid, node_values, *args, **kwds):
"""calc_grad_across_cell_corners(grid, node_values, [cell_ids], out=None)
Get gradients to diagonally opposite nodes.
Calculate gradient of the value field provided by *node_values* to
the values at diagonally opposite nodes. The returned gradients are
ordered as upper-right, upper-left, lower-left and lower-right.
Parameters
----------
grid : RasterModelGrid
Source grid.
node_values : array_like or field name
Quantity to take the gradient of defined at each node.
cell_ids : array_like, optional
If provided, cell ids to measure gradients. Otherwise, find gradients
for all cells.
out : array_like, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
Returns
-------
(N, 4) ndarray
Gradients to each diagonal node.
Examples
--------
Create a grid with two cells.
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((3, 4))
>>> x = np.array([1., 0., 0., 1.,
... 0., 0., 1., 1.,
... 3., 3., 3., 3.])
A decrease in quantity to a diagonal node is a negative gradient.
>>> from math import sqrt
>>> grid.calc_grad_across_cell_corners(x) * sqrt(2.)
array([[ 3., 3., 1., 0.],
[ 2., 2., -1., 0.]])
>>> grid = RasterModelGrid((3, 4), xy_spacing=(4, 3))
>>> grid.calc_grad_across_cell_corners(x)
array([[ 0.6, 0.6, 0.2, 0. ],
[ 0.4, 0.4, -0.2, 0. ]])
LLCATS: CNINF GRAD
"""
cell_ids = make_optional_arg_into_id_array(grid.number_of_cells, *args)
node_ids = grid.node_at_cell[cell_ids]
values_at_diagonals = node_values[grid.diagonal_adjacent_nodes_at_node[node_ids]]
values_at_nodes = node_values[node_ids].reshape(len(node_ids), 1)
out = np.subtract(values_at_diagonals, values_at_nodes, **kwds)
np.divide(out, np.sqrt(grid.dy**2.0 + grid.dx**2.0), out=out)
return out
@use_field_name_or_array("node")
def calc_grad_along_node_links(grid, node_values, *args, **kwds):
"""calc_grad_along_node_links(grid, node_values, [cell_ids], out=None)
Get gradients along links touching a node.
Calculate gradient of the value field provided by *node_values* across
each of the faces of the nodes of a grid. The returned gradients are
ordered as right, top, left, and bottom. All returned values follow our
standard sign convention, where a link pointing N or E and increasing in
value is positive, a link pointing S or W and increasing in value is
negative.
Note that the returned gradients are masked to exclude neighbor nodes which
are closed. Beneath the mask is the value -1.
Parameters
----------
grid : RasterModelGrid
Source grid.
node_values : array_like or field name
Quantity to take the gradient of defined at each node.
node_ids : array_like, optional
If provided, node ids to measure gradients. Otherwise, find gradients
for all nodes.
out : array_like, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
Returns
-------
(N, 4) Masked ndarray
Gradients for each link of the node. Ordering is E,N,W,S.
Examples
--------
Create a grid with nine nodes.
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((3, 3))
>>> x = np.array([0., 0., 0.,
... 0., 1., 2.,
... 2., 2., 2.])
A decrease in quantity across a face is a negative gradient.
>>> grid.calc_grad_along_node_links(x) # doctest: +NORMALIZE_WHITESPACE
masked_array(data =
[[-- -- -- --]
[-- 1.0 -- --]
[-- -- -- --]
[1.0 -- -- --]
[1.0 1.0 1.0 1.0]
[-- -- 1.0 --]
[-- -- -- --]
[-- -- -- 1.0]
[-- -- -- --]],
mask =
[[ True True True True]
[ True False True True]
[ True True True True]
[False True True True]
[False False False False]
[ True True False True]
[ True True True True]
[ True True True False]
[ True True True True]],
fill_value = 1e+20)
>>> grid = RasterModelGrid((3, 3), xy_spacing=(4, 2))
>>> grid.calc_grad_along_node_links(x) # doctest: +NORMALIZE_WHITESPACE
masked_array(data =
[[-- -- -- --]
[-- 0.5 -- --]
[-- -- -- --]
[0.25 -- -- --]
[0.25 0.5 0.25 0.5]
[-- -- 0.25 --]
[-- -- -- --]
[-- -- -- 0.5]
[-- -- -- --]],
mask =
[[ True True True True]
[ True False True True]
[ True True True True]
[False True True True]
[False False False False]
[ True True False True]
[ True True True True]
[ True True True False]
[ True True True True]],
fill_value = 1e+20)
LLCATS: NINF LINF GRAD
"""
padded_node_values = np.empty(node_values.size + 1, dtype=float)
padded_node_values[-1] = grid.BAD_INDEX
padded_node_values[:-1] = node_values
node_ids = make_optional_arg_into_id_array(grid.number_of_nodes, *args)
neighbors = grid.active_adjacent_nodes_at_node[node_ids]
values_at_neighbors = padded_node_values[neighbors]
masked_neighbor_values = np.ma.array(
values_at_neighbors, mask=values_at_neighbors == grid.BAD_INDEX
)
values_at_nodes = node_values[node_ids].reshape(len(node_ids), 1)
out = np.ma.empty_like(masked_neighbor_values, dtype=float)
np.subtract(masked_neighbor_values[:, :2], values_at_nodes, out=out[:, :2], **kwds)
np.subtract(values_at_nodes, masked_neighbor_values[:, 2:], out=out[:, 2:], **kwds)
out[:, (0, 2)] /= grid.dx
out[:, (1, 3)] /= grid.dy
return out
def calc_unit_normals_at_cell_subtriangles(grid, elevs="topographic__elevation"):
"""Calculate unit normals on a cell.
Calculate the eight unit normal vectors <a, b, c> to the eight
subtriangles of a four-cornered (raster) cell.
Parameters
----------
grid : RasterModelGrid
A grid.
elevs : str or ndarray, optional
Field name or array of node values.
Returns
-------
(n_ENE, n_NNE, n_NNW, n_WNW, n_WSW, n_SSW, n_SSE, n_ESE) :
each a num-cells x length-3 array
Len-8 tuple of the eight unit normal vectors <a, b, c> for the eight
subtriangles in the cell. Order is from north of east, counter
clockwise to south of east (East North East, North North East, North
North West, West North West, West South West, South South West, South
South East, East South East).
Examples
--------
>>> import numpy as np
>>> from landlab import RasterModelGrid
>>> mg = RasterModelGrid((3, 3))
>>> z = mg.node_x ** 2
>>> eight_tris = mg.calc_unit_normals_at_cell_subtriangles(z)
>>> type(eight_tris) is tuple
True
>>> len(eight_tris)
8
>>> eight_tris[0].shape == (mg.number_of_cells, 3)
True
>>> eight_tris # doctest: +NORMALIZE_WHITESPACE
(array([[-0.9486833 , 0. , 0.31622777]]),
array([[-0.9486833 , 0. , 0.31622777]]),
array([[-0.70710678, 0. , 0.70710678]]),
array([[-0.70710678, 0. , 0.70710678]]),
array([[-0.70710678, 0. , 0.70710678]]),
array([[-0.70710678, 0. , 0.70710678]]),
array([[-0.9486833 , 0. , 0.31622777]]),
array([[-0.9486833 , 0. , 0.31622777]]))
LLCATS: CINF GRAD
"""
# identify the grid neigbors at each location
node_at_cell = grid.node_at_cell
# calculate unit normals at all nodes.
(
n_ENE,
n_NNE,
n_NNW,
n_WNW,
n_WSW,
n_SSW,
n_SSE,
n_ESE,
) = _calc_subtriangle_unit_normals_at_node(grid, elevs=elevs)
# return only those at cell.
return (
n_ENE[node_at_cell, :],
n_NNE[node_at_cell, :],
n_NNW[node_at_cell, :],
n_WNW[node_at_cell, :],
n_WSW[node_at_cell, :],
n_SSW[node_at_cell, :],
n_SSE[node_at_cell, :],
n_ESE[node_at_cell, :],
)
def _calc_subtriangle_unit_normals_at_node(grid, elevs="topographic__elevation"):
"""Private Function: Calculate unit normals on subtriangles at all nodes.
Calculate the eight unit normal vectors <a, b, c> to the eight
subtriangles of a four-cornered (raster) cell. Unlike
calc_unit_normals_at_node_subtriangles, this function also
calculated unit normals at the degenerate part-cells around the
boundary.
On the grid boundaries where the cell is not fully defined, the unit normal
is given as <nan, nan, nan>.
Parameters
----------
grid : RasterModelGrid
A grid.
elevs : str or ndarray, optional
Field name or array of node values.
Returns
-------
(n_ENE, n_NNE, n_NNW, n_WNW, n_WSW, n_SSW, n_SSE, n_ESE) :
each a num-nodes x length-3 array
Len-8 tuple of the eight unit normal vectors <a, b, c> for the eight
subtriangles in the cell. Order is from north of east, counter
clockwise to south of east (East North East, North North East, North
North West, West North West, West South West, South South West, South
South East, East South East).
Examples
--------
>>> import numpy as np
>>> from landlab import RasterModelGrid
>>> from landlab.grid.raster_gradients import(
... _calc_subtriangle_unit_normals_at_node
... )
>>> mg = RasterModelGrid((3, 3))
>>> z = mg.node_x ** 2
>>> eight_tris = _calc_subtriangle_unit_normals_at_node(mg, z)
>>> type(eight_tris) is tuple
True
>>> len(eight_tris)
8
>>> eight_tris[0].shape == (mg.number_of_nodes, 3)
True
>>> eight_tris[0] # doctest: +NORMALIZE_WHITESPACE
array([[-0.70710678, 0. , 0.70710678],
[-0.9486833 , 0. , 0.31622777],
[ nan, nan, nan],
[-0.70710678, 0. , 0.70710678],
[-0.9486833 , 0. , 0.31622777],
[ nan, nan, nan],
[ nan, nan, nan],
[ nan, nan, nan],
[ nan, nan, nan]])
LLCATS: CINF GRAD
"""
try:
z = grid.at_node[elevs]
except TypeError:
z = elevs
# cell has center node I
# orthogonal neighbors P, R, T, V, counter clockwise from East
# diagonal neihbors Q, S, U, W, counter clocwise from North East
# There are 8 subtriangles that can be defined with the following corners
# (starting from the central node, and progressing counter-clockwise).
# ENE: IPQ
# NNE: IQR
# NNW: IRS
# WNW: IST
# WSW: ITU
# SSW: IUV
# SSE: IVW
# ESE: IWP
# There are thus 8 vectors, IP, IQ, IR, IS, IT, IU, IV, IW
# initialized difference matricies for cross product
diff_xyz_IP = np.empty((grid.number_of_nodes, 3)) # East
# ^this is the vector (xP-xI, yP-yI, zP-yI)
diff_xyz_IQ = np.empty((grid.number_of_nodes, 3)) # Northeast
diff_xyz_IR = np.empty((grid.number_of_nodes, 3)) # North
diff_xyz_IS = np.empty((grid.number_of_nodes, 3)) # Northwest
diff_xyz_IT = np.empty((grid.number_of_nodes, 3)) # West
diff_xyz_IU = np.empty((grid.number_of_nodes, 3)) # Southwest
diff_xyz_IV = np.empty((grid.number_of_nodes, 3)) # South
diff_xyz_IW = np.empty((grid.number_of_nodes, 3)) # Southeast
# identify the grid neigbors at each location
node_at_cell = np.arange(grid.number_of_nodes)
P = grid.adjacent_nodes_at_node[node_at_cell, 0]
Q = grid.diagonal_adjacent_nodes_at_node[node_at_cell, 0]
R = grid.adjacent_nodes_at_node[node_at_cell, 1]
S = grid.diagonal_adjacent_nodes_at_node[node_at_cell, 1]
T = grid.adjacent_nodes_at_node[node_at_cell, 2]
U = grid.diagonal_adjacent_nodes_at_node[node_at_cell, 2]
V = grid.adjacent_nodes_at_node[node_at_cell, 3]
W = grid.diagonal_adjacent_nodes_at_node[node_at_cell, 3]
# get x, y, z coordinates for each location
x_I = grid.node_x[node_at_cell]
y_I = grid.node_y[node_at_cell]
z_I = z[node_at_cell]
x_P = grid.node_x[P]
y_P = grid.node_y[P]
z_P = z[P]
x_Q = grid.node_x[Q]
y_Q = grid.node_y[Q]
z_Q = z[Q]
x_R = grid.node_x[R]
y_R = grid.node_y[R]
z_R = z[R]
x_S = grid.node_x[S]
y_S = grid.node_y[S]
z_S = z[S]
x_T = grid.node_x[T]
y_T = grid.node_y[T]
z_T = z[T]
x_U = grid.node_x[U]
y_U = grid.node_y[U]
z_U = z[U]
x_V = grid.node_x[V]
y_V = grid.node_y[V]
z_V = z[V]
x_W = grid.node_x[W]
y_W = grid.node_y[W]
z_W = z[W]
# calculate vectors by differencing
diff_xyz_IP[:, 0] = x_P - x_I
diff_xyz_IP[:, 1] = y_P - y_I
diff_xyz_IP[:, 2] = z_P - z_I
diff_xyz_IQ[:, 0] = x_Q - x_I
diff_xyz_IQ[:, 1] = y_Q - y_I
diff_xyz_IQ[:, 2] = z_Q - z_I
diff_xyz_IR[:, 0] = x_R - x_I
diff_xyz_IR[:, 1] = y_R - y_I
diff_xyz_IR[:, 2] = z_R - z_I
diff_xyz_IS[:, 0] = x_S - x_I
diff_xyz_IS[:, 1] = y_S - y_I
diff_xyz_IS[:, 2] = z_S - z_I
diff_xyz_IT[:, 0] = x_T - x_I
diff_xyz_IT[:, 1] = y_T - y_I
diff_xyz_IT[:, 2] = z_T - z_I
diff_xyz_IU[:, 0] = x_U - x_I
diff_xyz_IU[:, 1] = y_U - y_I
diff_xyz_IU[:, 2] = z_U - z_I
diff_xyz_IV[:, 0] = x_V - x_I
diff_xyz_IV[:, 1] = y_V - y_I
diff_xyz_IV[:, 2] = z_V - z_I
diff_xyz_IW[:, 0] = x_W - x_I
diff_xyz_IW[:, 1] = y_W - y_I
diff_xyz_IW[:, 2] = z_W - z_I
# calculate cross product to get unit normal
# cross product is orthogonal to both vectors, and is the normal
# n = <a, b, c>, where plane is ax + by + cz = d
nhat_ENE = np.cross(diff_xyz_IP, diff_xyz_IQ) # <a, b, c>
nhat_NNE = np.cross(diff_xyz_IQ, diff_xyz_IR)
nhat_NNW = np.cross(diff_xyz_IR, diff_xyz_IS)
nhat_WNW = np.cross(diff_xyz_IS, diff_xyz_IT)
nhat_WSW = np.cross(diff_xyz_IT, diff_xyz_IU)
nhat_SSW = np.cross(diff_xyz_IU, diff_xyz_IV)
nhat_SSE = np.cross(diff_xyz_IV, diff_xyz_IW)
nhat_ESE = | np.cross(diff_xyz_IW, diff_xyz_IP) | numpy.cross |
import scipy.io
import scipy.stats
import numpy as np
from EasyTL import EasyTL
import time
if __name__ == "__main__":
datadir = r"D:\Datasets\EasyTL\amazon_review"
str_domain = ["books", "dvd", "elec", "kitchen"]
list_acc = []
for i in range(len(str_domain)):
for j in range(len(str_domain)):
if i == j:
continue
print("{} - {}".format(str_domain[i], str_domain[j]))
mat1 = scipy.io.loadmat(datadir + "/{}_400.mat".format(str_domain[i]))
Xs = mat1["fts"]
Ys = mat1["labels"]
mat2 = scipy.io.loadmat(datadir + "/{}_400.mat".format(str_domain[j]))
Xt = mat2["fts"]
Yt = mat2["labels"]
Ys += 1
Yt += 1
Xs = Xs / np.tile(np.sum(Xs,axis=1).reshape(-1,1), [1, Xs.shape[1]])
Xs = scipy.stats.mstats.zscore(Xs);
Xt = Xt / np.tile(np.sum(Xt,axis=1).reshape(-1,1), [1, Xt.shape[1]])
Xt = scipy.stats.mstats.zscore(Xt);
Xs[np.isnan(Xs)] = 0
Xt[np.isnan(Xt)] = 0
t0 = time.time()
Acc1, _ = EasyTL(Xs,Ys,Xt,Yt,"raw")
t1 = time.time()
print("Time Elapsed: {:.2f} sec".format(t1 - t0))
Acc2, _ = EasyTL(Xs,Ys,Xt,Yt)
t2 = time.time()
print("Time Elapsed: {:.2f} sec".format(t2 - t1))
print('EasyTL(c) Acc: {:.1f} % || EasyTL Acc: {:.1f} %'.format(Acc1*100, Acc2*100))
list_acc.append([Acc1,Acc2])
acc = np.array(list_acc)
avg = | np.mean(acc, axis=0) | numpy.mean |
# Lint as: python3
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from pyiree.tf.support import tf_test_utils
import tensorflow.compat.v2 as tf
class Conv2dModule(tf.Module):
@tf.function(input_signature=[
tf.TensorSpec([1, 4, 5, 1], tf.float32),
tf.TensorSpec([1, 1, 1, 1], tf.float32),
])
def conv2d_1451x1111_valid(self, img, kernel):
return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], "VALID", name="result")
@tf.function(input_signature=[
tf.TensorSpec([2, 4, 5, 1], tf.float32),
tf.TensorSpec([1, 1, 1, 1], tf.float32),
])
def conv2d_2451x1111_valid(self, img, kernel):
return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], "VALID", name="result")
@tf.function(input_signature=[
tf.TensorSpec([1, 4, 5, 1], tf.float32),
tf.TensorSpec([2, 3, 1, 1], tf.float32),
])
def conv2d_1451x2311_valid(self, img, kernel):
return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], "VALID", name="result")
@tf.function(input_signature=[
tf.TensorSpec([1, 4, 5, 1], tf.float32),
tf.TensorSpec([2, 3, 1, 1], tf.float32),
])
def conv2d_1451x2311_same(self, img, kernel):
return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], "SAME", name="result")
@tf.function(input_signature=[
tf.TensorSpec([2, 4, 5, 1], tf.float32),
tf.TensorSpec([2, 3, 1, 1], tf.float32),
])
def conv2d_2451x2311_same(self, img, kernel):
return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], "SAME", name="result")
@tf.function(input_signature=[
tf.TensorSpec([1, 4, 5, 2], tf.float32),
tf.TensorSpec([3, 2, 2, 1], tf.float32),
])
def conv2d_1452x3221_same(self, img, kernel):
return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], "SAME", name="result")
@tf.function(input_signature=[
tf.TensorSpec([1, 4, 5, 1], tf.float32),
tf.TensorSpec([1, 1, 1, 2], tf.float32),
])
def conv2d_1451x1112_same(self, img, kernel):
return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], "SAME", name="result")
@tf.function(input_signature=[
tf.TensorSpec([1, 4, 5, 2], tf.float32),
tf.TensorSpec([1, 1, 2, 2], tf.float32),
])
def conv2d_1452x1122_same(self, img, kernel):
return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], "SAME", name="result")
@tf.function(input_signature=[
tf.TensorSpec([1, 4, 5, 2], tf.float32),
tf.TensorSpec([2, 2, 2, 3], tf.float32),
])
def conv2d_1452x2223_same(self, img, kernel):
return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], "SAME", name="result")
@tf.function(input_signature=[
tf.TensorSpec([1, 4, 5, 2], tf.float32),
tf.TensorSpec([2, 2, 2, 3], tf.float32),
])
def conv2d_1452x2223_valid(self, img, kernel):
return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], "VALID", name="result")
@tf.function(input_signature=[
tf.TensorSpec([2, 4, 5, 2], tf.float32),
tf.TensorSpec([2, 2, 2, 3], tf.float32),
])
def conv2d_2452x2223_valid(self, img, kernel):
return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], "VALID", name="result")
@tf_test_utils.compile_module(Conv2dModule)
class ConvTest(tf_test_utils.SavedModelTestCase):
def test_id_batch_size_1(self):
i = np.arange(20, dtype=np.float32).reshape([1, 4, 5, 1])
k = np.ones([1, 1, 1, 1], dtype=np.float32)
r = self.get_module().conv2d_1451x1111_valid(i, k)
r.print().assert_all_close()
def test_id_batch_size_2(self):
i = np.arange(40, dtype=np.float32).reshape([2, 4, 5, 1])
k = np.ones([1, 1, 1, 1], dtype=np.float32)
r = self.get_module().conv2d_2451x1111_valid(i, k)
r.print().assert_all_close()
def test_asym_kernel(self):
i = np.arange(20, dtype=np.float32).reshape([1, 4, 5, 1])
k = np.array([[1, 4, 2], [-2, 0, 1]], dtype=np.float32).reshape(2, 3, 1, 1)
r = self.get_module().conv2d_1451x2311_valid(i, k)
r.print().assert_all_close()
def test_padding(self):
i = np.arange(20, dtype=np.float32).reshape([1, 4, 5, 1])
k = np.array([[1, 4, 2], [-2, 0, 1]], dtype=np.float32).reshape(2, 3, 1, 1)
r = self.get_module().conv2d_1451x2311_same(i, k)
r.print().assert_all_close()
def test_batched_padding(self):
i = np.arange(40, dtype=np.float32).reshape([2, 4, 5, 1])
k = np.array([[1, 4, 2], [-2, 0, 1]], dtype=np.float32).reshape(2, 3, 1, 1)
r = self.get_module().conv2d_2451x2311_same(i, k)
r.print().assert_all_close()
def test_feature_reduce(self):
i = np.arange(40, dtype=np.float32).reshape([1, 4, 5, 2])
k = np.ones([3, 2, 2, 1], dtype=np.float32)
r = self.get_module().conv2d_1452x3221_same(i, k)
r.print().assert_all_close()
def test_feature_inflate(self):
i = np.arange(20, dtype=np.float32).reshape([1, 4, 5, 1])
k = np.arange(2, dtype=np.float32).reshape([1, 1, 1, 2])
r = self.get_module().conv2d_1451x1112_same(i, k)
r.print().assert_all_close()
def test_feature_mix(self):
i = | np.arange(40, dtype=np.float32) | numpy.arange |
"""
CMSC733 Spring 2019: Classical and Deep Learning Approaches for
Geometric Computer Vision
Project 1: Autopano
Author(s):
<NAME> (<EMAIL>)
Graduate Student in M.Eng Robotics,
University of Maryland, College Park
<NAME> (<EMAIL>)
Graduate Student in M.Eng Robotics,
University of Maryland, College Park
"""
# Dependencies:
# opencv, do (pip install opencv-python)
# skimage, do (apt install python-skimage)
import cv2
import os
import sys
import glob
import random
from skimage import data, exposure, img_as_float
import matplotlib.pyplot as plt
from Network.Network import *
from Misc.MiscUtils import *
from Misc.DataUtils import *
from Wrapper import *
import numpy as np
import time
import argparse
import shutil
import string
import math as m
from tqdm import tqdm
from Misc.TFSpatialTransformer import *
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
# Don't generate pyc codes
sys.dont_write_bytecode = True
'''-------------GPU Verification-------------'''
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.Session(config=config)
print("\n-->> TotalGPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU')))
if (len(tf.config.experimental.list_physical_devices('GPU')) > 0) :
print("\n<<<<<<<<<<--------------------Preparing To Run on GPU-------------------->>>>>>>>>>")
else:
print("\n<<<<<<<<<<--------------------NO GPU FOUND !!!!-------------------->>>>>>>>>>")
def GenerateBatch(BasePath, DirNamesTrain, TrainLabels, ImageSize, MiniBatchSize, NumTestSamples):
"""
Inputs:
BasePath - Path to COCO folder without "/" at the end
DirNamesTrain - Variable with Subfolder paths to train files
NOTE that Train can be replaced by Val/Test for generating batch corresponding to validation (held-out testing in this case)/testing
TrainLabels - Labels corresponding to Train
NOTE that TrainLabels can be replaced by Val/TestLabels for generating batch corresponding to validation (held-out testing in this case)/testing
ImageSize - Size of the Image
MiniBatchSize is the size of the MiniBatch
Outputs:
I1Batch - Batch of images
LabelBatch - Batch of one-hot encoded labels
"""
image_IA_batches = []
corner_1_batches = []
patch_2_batches = []
patch_batches = []
for n in range(NumTestSamples):
index = n
p_1_dir = BasePath + os.sep + "Patch_A/" + DirNamesTrain[index, 0]
p_2_dir = BasePath + os.sep + "Patch_B/" + DirNamesTrain[index, 0]
image_IA_dir = BasePath + os.sep + "Image_IA/" + DirNamesTrain[index, 0]
p_1 = cv2.imread(p_1_dir, cv2.IMREAD_GRAYSCALE)
p_2 = cv2.imread(p_2_dir, cv2.IMREAD_GRAYSCALE)
image_IA = cv2.imread(image_IA_dir, cv2.IMREAD_GRAYSCALE)
if(p_1 is None) or (p_2 is None):
print("\nPatch empty moving on ..")
continue
p_1 = np.float32(p_1)
p_2 = np.float32(p_2)
image_IA = np.float32(image_IA)
p_pair = np.dstack((p_1, p_2))
o_corner = TrainLabels[index, :, :, 0]
patch_batches.append(p_pair)
corner_1_batches.append(o_corner)
patch_2_batches.append(p_2.reshape(128, 128, 1))
image_IA_batches.append(image_IA.reshape(image_IA.shape[0], image_IA.shape[1], 1))
patch_batches = np.array(patch_batches)
corner_1_batches = np.array(corner_1_batches)
patch_2_batches = np.array(patch_2_batches)
image_IA_batches = np.array(image_IA_batches)
p_indices_batch = []
for i in range(corner_1_batches.shape[0]):
x_min, y_min = corner_1_batches[i, 0, 0], corner_1_batches[i, 0, 1]
x_max, y_max = corner_1_batches[i, 3, 0], corner_1_batches[i, 3, 1]
X_, Y_ = np.mgrid[x_min : x_max, y_min : y_max]
p_indices_batch.append(np.dstack((Y_, X_)))
return patch_batches, corner_1_batches, patch_2_batches, image_IA_batches, p_indices_batch
def testUnsupervised(ImgPH, LabelPH, DirNamesTrain, ImageSize, TrainLabels, CornerPH, Patch2PH, patchIndicesPH, SavePath, ModelPath, BasePath, NumTestSamples):
if(not (os.path.isdir(SavePath))):
print(SavePath, "\nCreating Results Dir ...")
os.makedirs(SavePath)
_, h_4_batch, _ = unsuperHomographyModel(LabelPH, ImgPH, CornerPH, patchIndicesPH)
Saver = tf.train.Saver()
with tf.Session() as sess:
Saver.restore(sess, ModelPath)
print('Number of parameters in this model are %d ' % np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()]))
patch_batches, corner_1_batches, _, image_IA_batches, p_indices_batch = GenerateBatch(BasePath, DirNamesTrain, TrainLabels, ImageSize, _, NumTestSamples)
FeedDict = {LabelPH: patch_batches, CornerPH: corner_1_batches, ImgPH: image_IA_batches, patchIndicesPH: p_indices_batch}
pred_h = sess.run(h_4_batch, FeedDict)
np.save(SavePath+"predicted_H.npy", pred_h)
def testSupervised(LabelPH, ModelPath, SavePath):
"""
Inputs:
ImgPH is the Input Image placeholder
ImageSize is the size of the image
ModelPath - Path to load trained model from
DataPath - Paths of all images where testing will be run on
LabelsPathPred - Path to save predictions
Outputs:
Predictions written to ./TxtFiles/PredOut.txt
"""
image_IA_dir= "../Data/Val/"
image_name = os.listdir(image_IA_dir)
ran_index = random.randint(0, len(image_name) - 1)
a = image_name[ran_index]
ad = cv2.imread(image_IA_dir + a, 0)
random_Image = cv2.resize(ad, (320, 240), interpolation = cv2.INTER_AREA)
pA, pB, _, _, coors = patchCreation(random_Image)
patch_batch = np.dstack((pA, pB))
h_4_points = superHomographyModel(LabelPH)
Saver = tf.train.Saver()
with tf.Session() as sess:
Saver.restore(sess, ModelPath)
print('Number of parameters in this model are %d ' % np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()]))
patch_batch = np.array(patch_batch).reshape(1, 128, 128, 2)
# patch_batch = tf.reshape(patch_batch, shape=(1, 128, 128, 2))
FeedDict = {LabelPH: patch_batch}
Predicted = sess.run(h_4_points, FeedDict)
pA_new = coors[0] + Predicted.reshape(4, 2)
h_4_points_new = coors[1] - pA_new
cv2.polylines(ad, np.int32([coors[0]]), True,(0, 255, 0), 3)
cv2.polylines(ad, np.int32([coors[1]]), True,(255, 0, 0), 5)
cv2.polylines(ad, np.int32([pA_new]), True,(0, 0, 255), 5)
plt.figure()
plt.imshow(ad)
plt.show()
cv2.imwrite(SavePath + "Stacked" + ".png", ad)
def main():
"""
Inputs:
None
Outputs:
Prints out the confusion matrix with accuracy
"""
# Parse Command Line arguments
Parser = argparse.ArgumentParser()
Parser.add_argument('--ModelPath', dest='ModelPath', default='../Checkpoints/unsupervised/9model.ckpt', help='Path to load latest model from, Default:ModelPath')
Parser.add_argument('--CheckPointPath', dest='CheckPointPath', default= '../Checkpoints/unsupervised/', help='Path to load latest model from, Default:CheckPointPath')
Parser.add_argument('--BasePath', dest='BasePath', default='../Data/Validated_', help='Path to load images from, Default:BasePath')
Parser.add_argument('--SavePath', dest='SavePath', default='./Results/', help='Path of labels file, Default: ./Results/')
Parser.add_argument('--ModelType', default='Unsup', help='Model type, Supervised or Unsupervised? Choose from Sup and Unsup, Default:Unsup')
Args = Parser.parse_args()
ModelPath = Args.ModelPath
BasePath = Args.BasePath
CheckPointPath = Args.CheckPointPath
SavePath = Args.SavePath
ModelType = Args.ModelType
# Plot Confusion Matrix
# LabelsTrue, LabelsPred = ReadLabels(LabelsPath, LabelsPathPred)
# ConfusionMatrix(LabelsTrue, LabelsPred)
if ModelType == 'Unsup':
DirNamesTrain, SaveCheckPoint, ImageSize, _, TrainLabels, NumClasses = SetupAll(BasePath, CheckPointPath)
NumTestSamples = 100
TrainLabels = np.load(BasePath + "/points_list.npy")
CornerPH = tf.placeholder(tf.float32, shape = (NumTestSamples, 4, 2))
LabelPH = tf.placeholder(tf.float32, shape = (NumTestSamples, 128, 128 ,2))
Patch2PH = tf.placeholder(tf.float32, shape = (NumTestSamples, 128, 128, 1))
ImgPH = tf.placeholder(tf.float32, shape = (NumTestSamples, 240, 320, 1))
patchIndicesPH = tf.placeholder(tf.int32, shape = (NumTestSamples, 128, 128 , 2))
testUnsupervised(ImgPH, LabelPH, DirNamesTrain, ImageSize, TrainLabels, CornerPH, Patch2PH, patchIndicesPH, SavePath + "unsupervised/", ModelPath, BasePath, NumTestSamples)
ran_index = np.random.randint(0, NumTestSamples - 1, size = 5)
for eachIndex in ran_index:
TrainLabels = np.load(BasePath + "/points_list.npy")
pred_y = np.load(SavePath + "unsupervised/predicted_H.npy")
ground_t_h_4 = pd.read_csv(BasePath + "/h_4_list.csv", index_col = False)
ground_t_h_4 = ground_t_h_4.to_numpy()
patch_list = pd.read_csv(BasePath + "/images_list.csv")
patch_list = patch_list.to_numpy()
pA_corners = TrainLabels[eachIndex, :, :, 0]
image_IA_dir = BasePath + "/Image_IA/" + patch_list[eachIndex,0]
image_IA = cv2.imread(image_IA_dir)
rm = ground_t_h_4[eachIndex].reshape(2, 4).T
first_corners = | np.array(pA_corners) | numpy.array |
import numpy
import matplotlib.pyplot as plt
from qtpy.QtCore import Qt, Signal, Slot, QStringListModel
from qtpy.QtGui import QIcon, QBrush, QColor
from qtpy.QtWidgets import (QFileDialog, QHBoxLayout, QLabel, QPushButton,
QVBoxLayout, QWidget, QCompleter, QLineEdit, QMessageBox, QToolButton)
from cnapy.flux_vector_container import FluxVectorContainer
class ModeNavigator(QWidget):
"""A navigator widget"""
def __init__(self, appdata, central_widget):
QWidget.__init__(self)
self.appdata = appdata
self.central_widget = central_widget
self.current = 0
self.mode_type = 0 # EFM or some sort of flux vector
self.scenario = {}
self.setFixedHeight(70)
self.layout = QVBoxLayout()
self.layout.setContentsMargins(0, 0, 0, 0)
self.save_button = QPushButton()
self.save_button.setIcon(QIcon(":/icons/save.png"))
self.save_button_connection = None
self.clear_button = QPushButton()
self.clear_button.setIcon(QIcon(":/icons/clear.png"))
self.prev_button = QPushButton("<")
self.next_button = QPushButton(">")
self.label = QLabel()
self.reaction_participation_button = QPushButton("Reaction participation")
self.size_histogram_button = QPushButton("Size histogram")
l1 = QHBoxLayout()
self.title = QLabel("Mode Navigation")
self.selector = SelectorLineEdit(self)
self.selector.setPlaceholderText("Select...")
self.selector.setClearButtonEnabled(True)
self.completion_list = QStringListModel()
self.completer = CustomCompleter(self)
self.completer.setModel(self.completion_list)
self.completer.setCaseSensitivity(Qt.CaseInsensitive)
self.selector.setCompleter(self.completer)
l12 = QHBoxLayout()
l12.setAlignment(Qt.AlignRight)
l12.addWidget(self.save_button)
l12.addWidget(self.clear_button)
l1.addWidget(self.title)
l1.addWidget(self.selector)
l1.addLayout(l12)
l2 = QHBoxLayout()
l2.addWidget(self.prev_button)
l2.addWidget(self.label)
l2.addWidget(self.next_button)
l2.addWidget(self.reaction_participation_button)
l2.addWidget(self.size_histogram_button)
self.layout.addLayout(l1)
self.layout.addLayout(l2)
self.setLayout(self.layout)
self.prev_button.clicked.connect(self.prev)
self.next_button.clicked.connect(self.next)
self.clear_button.clicked.connect(self.clear)
self.selector.returnPressed.connect(self.apply_selection)
self.selector.findChild(QToolButton).triggered.connect(self.reset_selection) # findChild(QToolButton) retrieves the clear button
self.size_histogram_button.clicked.connect(self.size_histogram)
self.central_widget.broadcastReactionID.connect(self.selector.receive_input)
def update(self):
txt = str(self.current + 1) + "/" + \
str(len(self.appdata.project.modes))
if self.num_selected < len(self.appdata.project.modes):
txt = txt + " (" + str(self.num_selected) + " selected)"
if isinstance(self.appdata.project.modes, FluxVectorContainer):
if self.appdata.project.modes.irreversible.shape != ():
if self.appdata.project.modes.irreversible[self.current]:
txt = txt + " irreversible"
else:
txt = txt + " reversible"
if self.appdata.project.modes.unbounded.shape != ():
if self.appdata.project.modes.unbounded[self.current]:
txt = txt + " unbounded"
else:
txt = txt + " bounded"
self.label.setText(txt)
def save_mcs(self):
dialog = QFileDialog(self)
filename: str = dialog.getSaveFileName(
directory=self.appdata.work_directory, filter="*.npz")[0]
if not filename or len(filename) == 0:
return
self.appdata.project.modes.save(filename)
def save_efm(self):
dialog = QFileDialog(self)
filename: str = dialog.getSaveFileName(
directory=self.appdata.work_directory, filter="*.npz")[0]
if not filename or len(filename) == 0:
return
self.appdata.project.modes.save(filename)
def save_sd(self):
dialog = QFileDialog(self)
filename: str = dialog.getSaveFileName(
directory=self.appdata.work_directory, filter="*.sds")[0]
if not filename or len(filename) == 0:
return
elif len(filename)<=4 or filename[-4:] != '.sds':
filename += '.sds'
self.appdata.project.sd_solutions.save(filename)
def update_completion_list(self):
reac_id = self.appdata.project.cobra_py_model.reactions.list_attr("id")
self.completion_list.setStringList(reac_id+["!"+str(r) for r in reac_id])
def set_to_mcs(self):
self.mode_type = 1
self.title.setText("MCS Navigation")
if self.save_button_connection is not None:
self.save_button.clicked.disconnect(self.save_button_connection)
self.save_button_connection = self.save_button.clicked.connect(self.save_mcs)
self.save_button.setToolTip("save minimal cut sets")
self.clear_button.setToolTip("clear minimal cut sets")
self.select_all()
self.update_completion_list()
def set_to_efm(self):
self.mode_type = 0 # EFM or some sort of flux vector
self.title.setText("Mode Navigation")
if self.save_button_connection is not None:
self.save_button.clicked.disconnect(self.save_button_connection)
self.save_button_connection = self.save_button.clicked.connect(self.save_efm)
self.save_button.setToolTip("save modes")
self.clear_button.setToolTip("clear modes")
self.select_all()
self.update_completion_list()
def set_to_strain_design(self):
self.mode_type = 2
self.title.setText("Strain Design Navigation")
if self.save_button_connection is not None:
self.save_button.clicked.disconnect(self.save_button_connection)
self.save_button_connection = self.save_button.clicked.connect(self.save_sd)
self.save_button.setToolTip("save strain designs")
self.clear_button.setToolTip("clear strain designs")
self.select_all()
self.update_completion_list()
def clear(self):
self.mode_type = 0 # EFM or some sort of flux vector
self.appdata.project.modes.clear()
self.appdata.recreate_scenario_from_history()
self.selector.accept_signal_input = False
self.hide()
self.modeNavigatorClosed.emit()
def display_mode(self):
self.appdata.modes_coloring = True
self.update()
self.changedCurrentMode.emit(self.current)
self.appdata.modes_coloring = False
def prev(self):
while True:
if self.current == 0:
self.current = len(self.appdata.project.modes)-1
else:
self.current -= 1
if self.selection[self.current]:
break
self.display_mode()
def next(self):
while True:
if self.current == len(self.appdata.project.modes)-1:
self.current = 0
else:
self.current += 1
if self.selection[self.current]:
break
self.display_mode()
def select_all(self):
self.selection = numpy.ones(len(self.appdata.project.modes), dtype=numpy.bool)
self.num_selected = len(self.appdata.project.modes)
self.selector.setText("")
def reset_selection(self):
self.selector.accept_signal_input = False
self.selection[:] = True # select all
self.num_selected = len(self.appdata.project.modes)
self.update()
def apply_selection(self):
must_occur = []
must_not_occur = []
self.selector.accept_signal_input = False
selector_text = self.selector.text().strip()
if len(selector_text) == 0:
self.reset_selection()
else:
try:
for r in map(str.strip, selector_text.split(',')):
if r[0] == "!":
must_not_occur.append(r[1:].lstrip())
else:
must_occur.append(r)
self.select(must_occur=must_occur, must_not_occur=must_not_occur)
except (ValueError, IndexError): # some ID was not found / an empty ID was encountered
QMessageBox.critical(self, "Cannot apply selection", "Check the selection for mistakes.")
if self.num_selected == 0:
QMessageBox.information(self, "Selection not applied", "This selection is empty and was therefore not applied.")
self.reset_selection()
else:
self.current = 0
if self.selection[self.current]:
self.display_mode()
else:
self.next()
def select(self, must_occur=None, must_not_occur=None):
self.selection[:] = True # reset selection
if self.appdata.window.centralWidget().mode_navigator.mode_type <=1:
if must_occur is not None:
for r in must_occur:
r_idx = self.appdata.project.modes.reac_id.index(r)
for i, selected in enumerate(self.selection):
if selected and self.appdata.project.modes.fv_mat[i, r_idx] == 0:
self.selection[i] = False
if must_not_occur is not None:
for r in must_not_occur:
r_idx = self.appdata.project.modes.reac_id.index(r)
for i, selected in enumerate(self.selection):
if selected and self.appdata.project.modes.fv_mat[i, r_idx] != 0:
self.selection[i] = False
elif self.appdata.window.centralWidget().mode_navigator.mode_type == 2:
if must_occur is not None:
for r in must_occur:
for i, selected in enumerate(self.selection):
s = self.appdata.project.modes[i]
if selected and r not in s or numpy.any(numpy.isnan(s[r])) or numpy.all((s[r] == 0)):
self.selection[i] = False
if must_not_occur is not None:
for r in must_not_occur:
for i, selected in enumerate(self.selection):
s = self.appdata.project.modes[i]
if selected and r in s and not numpy.any(numpy.isnan(s[r])) or numpy.all((s[r] == 0)):
self.selection[i] = False
if self.appdata.window.sd_sols and self.appdata.window.sd_sols.__weakref__: # if dialog exists
for i in range(self.appdata.window.sd_sols.sd_table.rowCount()):
r_sd_idx = int(self.appdata.window.sd_sols.sd_table.item(i,0).text())-1
if self.selection[r_sd_idx]:
self.appdata.window.sd_sols.sd_table.item(i,0).setForeground(QBrush(QColor(0, 0, 0)))
self.appdata.window.sd_sols.sd_table.item(i,1).setForeground(QBrush(QColor(0, 0, 0)))
if self.appdata.window.sd_sols.sd_table.columnCount() == 3:
self.appdata.window.sd_sols.sd_table.item(i,2).setForeground(QBrush(QColor(0, 0, 0)))
else:
self.appdata.window.sd_sols.sd_table.item(i,0).setForeground(QBrush(QColor(200, 200, 200)))
self.appdata.window.sd_sols.sd_table.item(i,1).setForeground(QBrush(QColor(200, 200, 200)))
if self.appdata.window.sd_sols.sd_table.columnCount() == 3:
self.appdata.window.sd_sols.sd_table.item(i,2).setForeground(QBrush(QColor(200, 200, 200)))
self.num_selected = | numpy.sum(self.selection) | numpy.sum |
import json
import copy
import numpy as np # contains helpful math functions like numpy.exp()
import numpy.random as random # see numpy.random module
# import random # alternative to numpy.random module
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
map = mpimg.imread("map.png") # US States & Capitals map
# List of 30 US state capitals and corresponding coordinates on the map
with open('capitals.json', 'r') as capitals_file:
capitals = json.load(capitals_file)
capitals_list = list(capitals.items())
def show_path(path, starting_city, w=12, h=8):
"""Plot a TSP path overlaid on a map of the US States & their capitals."""
x, y = list(zip(*path))
_, (x0, y0) = starting_city
plt.imshow(map)
plt.plot(x0, y0, 'y*', markersize=15) # y* = yellow star for starting point
plt.plot(x + x[:1], y + y[:1]) # include the starting point at the end of path
plt.axis("off")
fig = plt.gcf()
fig.set_size_inches([w, h])
def simulated_annealing(problem, schedule):
"""The simulated annealing algorithm, a version of stochastic hill climbing
where some downhill moves are allowed. Downhill moves are accepted readily
early in the annealing schedule and then less often as time goes on. The
schedule input determines the value of the temperature T as a function of
time. [Norvig, AIMA Chapter 3]
Parameters
----------
problem : Problem
An optimization problem, already initialized to a random starting state.
The Problem class interface must implement a callable method
"successors()" which returns states in the neighborhood of the current
state, and a callable function "get_value()" which returns a fitness
score for the state. (See the `TravelingSalesmanProblem` class below
for details.)
schedule : callable
A function mapping time to "temperature". "Time" is equivalent in this
case to the number of loop iterations.
Returns
-------
Problem
An approximate solution state of the optimization problem
Notes
-----
(1) DO NOT include the MAKE-NODE line from the AIMA pseudocode
(2) Modify the termination condition to return when the temperature
falls below some reasonable minimum value (e.g., 1e-10) rather than
testing for exact equality to zero
See Also
--------
AIMA simulated_annealing() pseudocode
https://github.com/aimacode/aima-pseudocode/blob/master/md/Simulated-Annealing.md
"""
t=1
delt_E=0
current_state=problem.copy()
while True:
T=schedule(t)
# print(t)
# print(T)
# print("T",T,"t",t)
if T<=1e-10:
# Return the current state
return current_state
else:
# print("Problem is ",problem)
next_state=random.choice(current_state.successors())
delt_E=next_state.get_value()-current_state.get_value()
if delt_E > 0:
# print("IN the if delt_E>0")
current_state = next_state
else:
current_state = next_state if random.rand() > | np.exp(delt_E/t) | numpy.exp |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from scipy.sparse import csr_matrix, identity, kron
from scipy.sparse.linalg import eigs, eigsh
import itertools
from scipy.linalg import block_diag, eig, expm, eigh
from scipy.sparse import save_npz, load_npz, csr_matrix, csc_matrix
import scipy.sparse as sp
from scipy.special import binom
import yaml
import copy
import warnings
import os
import time
from .Hamiltonians import DisplacedAnharmonicOscillator, PolymerVibrations, Polymer, DiagonalizeHamiltonian, LadderOperators
from .general_Liouvillian_classes import LiouvillianConstructor
class OpenPolymer(Polymer,LiouvillianConstructor):
def __init__(self,site_energies,site_couplings,dipoles):
"""Extends Polymer object to an open systems framework,
using the Lindblad formalism to describe bath coupling
"""
super().__init__(site_energies,site_couplings,dipoles)
# Values that need to be set
self.optical_dephasing_gamma = 0
self.optical_relaxation_gamma = 0
self.site_to_site_dephasing_gamma = 0
self.site_to_site_relaxation_gamma = 0
self.exciton_relaxation_gamma = 0
self.exciton_exciton_dephasing_gamma = 0
self.kT = 0
def optical_dephasing_operator(self):
total_deph = self.occupied_list[0].copy()
for i in range(1,len(self.occupied_list)):
total_deph += self.occupied_list[i]
return total_deph
def optical_dephasing_instructions(self):
O = self.optical_dephasing_operator()
gamma = self.optical_dephasing_gamma
return self.make_Lindblad_instructions(gamma,O)
def optical_dephasing_Liouvillian(self):
instructions = self.optical_dephasing_instructions()
return self.make_Liouvillian(instructions)
def boltzmann_factors(self,E1,E2):
if E1 == E2:
return 0.5,0.5
if E1 < E2:
return self.boltzmann_factors_ordered_inputs(E1,E2)
else:
E1_to_E2, E2_to_E1 = self.boltzmann_factors_ordered_inputs(E2,E1)
return E2_to_E1, E1_to_E2
def boltzmann_factors_ordered_inputs(self,E1,E2):
"""E1 must be less than E2"""
if self.kT == 0:
return 1, 0
Z = np.exp(-E1/self.kT) + np.exp(-E2/self.kT)
if np.isclose(Z,0):
E2_to_E1 = 1
E1_to_E2 = 0
else:
E2_to_E1 = np.exp(-E1/self.kT)/Z
E1_to_E2 = np.exp(-E2/self.kT)/Z
return E2_to_E1, E1_to_E2
def optical_relaxation_instructions(self):
eg = 0
ins_list = []
gamma = self.optical_relaxation_gamma
for n in range(len(self.energies)):
en = self.energies[n]
bg, bn = self.boltzmann_factors(eg,en)
O = self.up_list[n]
instructions2 = self.make_Lindblad_instructions(gamma * bg,O.T)
ins_list += instructions2
if np.isclose(bn,0):
pass
else:
instructions1 = self.make_Lindblad_instructions(gamma * bn,O)
ins_list += instructions1
return ins_list
def optical_relaxation_Liouvillian(self):
inst_list = self.optical_relaxation_instructions()
L = self.make_Liouvillian(inst_list)
return L
def site_to_site_relaxation_instructions(self):
nm = itertools.combinations(range(len(self.energies)),2)
i = 0
ins_list = []
gamma = self.site_to_site_relaxation_gamma
for n,m in nm:
en = self.energies[n]
em = self.energies[m]
bn,bm = self.boltzmann_factors(en,em)
O = self.exchange_list[i]
instructions1 = self.make_Lindblad_instructions(gamma * bn,O)
instructions2 = self.make_Lindblad_instructions(gamma * bm,O.T)
ins_list += instructions1
ins_list += instructions2
i+=1
return ins_list
def site_to_site_relaxation_Liouvillian(self):
inst_list = self.site_to_site_relaxation_instructions()
L = self.make_Liouvillian(inst_list)
return L
def site_to_site_dephasing_operator_list(self):
s_deph_list = []
for (i,j) in itertools.combinations(range(self.num_sites),2):
s_deph_list.append(self.occupied_list[i] - self.occupied_list[j])
return s_deph_list
def all_site_dephasing_instructions(self):
s_deph_list = self.site_to_site_dephasing_operator_list()
Lindblad_instruction_list = []
gamma = self.site_to_site_dephasing_gamma
for O in s_deph_list:
Lindblad_instruction_list += self.make_Lindblad_instructions(gamma,O)
return Lindblad_instruction_list
def all_site_dephasing_Liouvillian(self):
inst_list = self.all_site_dephasing_instructions()
L = self.make_Liouvillian(inst_list)
return L/(2*self.num_sites)
def set_electronic_dissipation_instructions(self):
inst_list = []
if self.optical_dephasing_gamma != 0:
inst_list += self.optical_dephasing_instructions()
if self.site_to_site_dephasing_gamma != 0:
inst_list += self.all_site_dephasing_instructions()
if self.site_to_site_relaxation_gamma != 0:
inst_list += self.site_to_site_relaxation_instructions()
if self.optical_relaxation_gamma != 0:
inst_list += self.optical_relaxation_instructions()
self.electronic_dissipation_instructions = inst_list
def make_manifold_hamiltonian_instructions(self,ket_manifold,bra_manifold):
Hket = self.get_electronic_hamiltonian(manifold_num = ket_manifold)
Hbra = self.get_electronic_hamiltonian(manifold_num = bra_manifold)
return self.make_commutator_instructions2(-1j*Hket,-1j*Hbra)
def make_total_Liouvillian(self):
drho = self.make_Liouvillian(self.make_manifold_hamiltonian_instructions('all','all'))
if self.num_sites > 1:
drho += self.all_exciton_dephasing_Liouvillian()
drho += self.exciton_relaxation_Liouvillian()
# drho += self.optical_relaxation_Liouvillian()
drho += self.optical_dephasing_Liouvillian()
self.L = drho
def eigfun(self,L,*,check_eigenvectors = True,invert = True,populations_only = False):
eigvals, eigvecs = np.linalg.eig(L)
eigvals = np.round(eigvals,12)
sort_indices = eigvals.argsort()
eigvals.sort()
eigvecs = eigvecs[:,sort_indices]
for i in range(eigvals.size):
max_index = np.argmax(np.abs(eigvecs[:,i]))
if np.real(eigvecs[max_index,i]) < 0:
eigvecs[:,i] *= -1
if eigvals[i] == 0:
# eigenvalues of 0 correspond to thermal distributions,
# which should have unit trace in the Hamiltonian space
if populations_only:
trace_norm = eigvecs[:,i].sum()
eigvecs[:,i] = eigvecs[:,i] / trace_norm
else:
shape = int(np.sqrt(eigvals.size))
trace_norm = eigvecs[:,i].reshape(shape,shape).trace()
if np.isclose(trace_norm,0):
pass
else:
eigvecs[:,i] = eigvecs[:,i] / trace_norm
if invert:
eigvecs_left = np.linalg.pinv(eigvecs)
else:
eigvals_left, eigvecs_left = np.linalg.eig(L.T)
eigvals_left = np.round(eigvals_left,12)
sort_indices_left = eigvals_left.argsort()
eigvals_left.sort()
eigvecs_left = eigvecs_left[:,sort_indices_left]
eigvecs_left = eigvecs_left.T
for i in range(eigvals_left.size):
norm = np.dot(eigvecs_left[i,:],eigvecs[:,i])
eigvecs_left[i,:] *= 1/norm
if check_eigenvectors:
LV = L.dot(eigvecs)
D = eigvecs_left.dot(LV)
if np.allclose(D,np.diag(eigvals),rtol=1E-10,atol=1E-10):
pass
else:
warnings.warn('Using eigenvectors to diagonalize Liouvillian does not result in the expected diagonal matrix to tolerance, largest deviation is {}'.format(np.max(np.abs(D - np.diag(eigvals)))))
self.eigenvalues = eigvals
self.eigenvectors = {'left':eigvecs_left,'right':eigvecs}
return eigvals, eigvecs, eigvecs_left
def save_L(self,dirname):
save_npz(os.path.join(dirname,'L.npz'),csr_matrix(self.L))
def save_L_by_manifold(self):
np.savez(os.path.join(self.base_path,'L.npz'),**self.L_by_manifold)
def save_eigsystem(self,dirname):
np.savez(os.path.join(dirname,'right_eigenvectors.npz'),all_manifolds = self.eigenvectors['right'])
np.savez(os.path.join(dirname,'left_eigenvectors.npz'),all_manifolds = self.eigenvectors['left'])
np.savez(os.path.join(dirname,'eigenvalues.npz'),all_manifolds = self.eigenvalues)
def save_mu(self,dirname,*,mask=True):
evl = self.eigenvectors['left']
ev = self.eigenvectors['right']
II = np.eye(self.mu.shape[0])
mu_ket = np.kron(self.mu,II.T)
mu_bra = np.kron(II,self.mu.T)
mu_mask_tol = 10
mu_ket_t = np.dot( | np.dot(evl,mu_ket) | numpy.dot |
# take a raw inputs file for aprox21 and convert to the aprox19
# nuclei. This means getting rid of Cr56 and Fe56 (by lumping them
# into Ni56)
import numpy as np
import matplotlib.pyplot as plt
def find_r_for_rho(r, rho, rho_want):
idx = | np.where(rho < rho_want) | numpy.where |
'''
Module : StreamProcessor
Authors: <NAME> (<EMAIL>)
<NAME> (<EMAIL>)
<NAME> (<EMAIL>)
'''
import pandas as pd
from functools import wraps
import time as time
import numpy as np
import random
from libscores import *
class Utils:
"""
Generic utility functions that our model would require
"""
@staticmethod
def random_sample_in_order(X,y,removeperc,seed=1):
if removeperc==0:
return X,y
num_train_samples = len(X)
rem_samples=int(num_train_samples*removeperc)
np.random.seed(seed)
skip = sorted(random.sample(range(num_train_samples),num_train_samples-rem_samples))
print('AutoGBT[Utils]:Random sample length:',num_train_samples-rem_samples)
return X[skip,:],y[skip,:]
"""
A function to perform majority downsampling. in case of class-imbalance,
pick all examples from minority class and include random samples from
majority class to make it balanced at a specific ratio
"""
@staticmethod
def majority_undersample(X,y,frac=1.0,seed=1):
MINORITY_THRESHOLD = 20000
## warn if too many samples are present
y=y.reshape(len(y))
class_0_freq = len(y[y==0])
class_1_freq = len(y[y==1])
majority_class = 0
if class_1_freq>class_0_freq:
majority_class = 1
minority_count = class_0_freq
else:
minority_count = class_1_freq
minority_class = int(not majority_class)
if minority_count > MINORITY_THRESHOLD:
print('AutoGBT[Utils]:Minority samples exceed threshold=',\
MINORITY_THRESHOLD,'total minority samples=',minority_count)
### do downsampling as per remove percent ###
indices = np.array(range(len(y)))
majority_ind = indices[y==majority_class]
minority_index = indices[y==minority_class]
np.random.seed(seed)
if int(minority_count*frac) > len(majority_ind):
size = len(majority_ind)
else:
size = int(minority_count*frac)
majority_index = np.random.choice(indices[y==majority_class],size=size,replace=False)
sorted_index = sorted(np.concatenate([minority_index,majority_index]))
print('AutoGBT[Utils]:Sampled data size:',len(sorted_index))
return X[sorted_index],y[sorted_index]
def simple_time_tracker(log_fun):
def _simple_time_tracker(fn):
@wraps(fn)
def wrapped_fn(*args, **kwargs):
start_time = time.time()
try:
result = fn(*args, **kwargs)
finally:
elapsed_time = time.time() - start_time
# log the result
log_fun({
'function_name': fn.__name__,
'total_time': elapsed_time,
})
return result
return wrapped_fn
return _simple_time_tracker
def _log(message):
print('[SimpleTimeTracker] {function_name} {total_time:.3f}'.format(**message))
from sklearn.model_selection import train_test_split
from hyperopt import hp, tpe, STATUS_OK, Trials
from hyperopt.fmin import fmin
from hyperopt import space_eval
import lightgbm as lgbm
class AutoHyperOptimizer:
"""
A wrapper for hyperopt to automatically tune hyper-parameters of our model
Idea : We use basic SMBO to get to best hyper parameters using a
directed search near the neighborhood of a fixed set of hyper-parameters.
A search window is defined for each hyper-parameter considering the nature
of the hyper-parameter.Each set of hyper-parameters is eavluated in a cross-validation
setting on a small fraction of data to determine the fitness. Hyperopt attempts
to find hyper-parameters that minimize (1.0-AUC) on the validation data.
We finallty compare the cross-validation AUC of the model trained with
fixed hyper-parameter set with the AUC of the model trained using hyper-parameters
returned by hyperopt, and choose the one with higher AUC as the optimal hyper-parameter
set.
"""
def __init__(self,max_samples=50000,max_evaluations=25,seed=1,parameter_space={}):
self.max_samples = max_samples
self.max_evaluations = max_evaluations
self.test_size = 0.25 ## fraction of data used for internal validation
self.shuffle = False
self.best_params = {}
self.seed = seed
self.param_space = parameter_space
def gbc_objective(self,space):
print('AutoGBT[AutoHyperOptimizer]:Parameter space:',space)
model = lgbm.LGBMClassifier(random_state=self.seed,min_data=1, min_data_in_bin=1)
model.set_params(**space)
model.fit(self.Xe_train,self.ys_train)
mypreds = model.predict_proba(self.Xe_test)[:,1]
auc = auc_metric(self.ys_test.reshape(-1,1),mypreds.reshape(-1,1))
print('AutoGBT[AutoHyperOptimizer] auc=',auc)
return{'loss': (1-auc), 'status': STATUS_OK }
def fit(self,X,y,indicator):
'''
indicator=1 means we intend to do just sampling and one-time fitting
for evaluating a fixed set of hyper-parameters,
0 means run hyperopt to search in the neighborhood of the seed
hyper-parameters to see if model quality is improving.
'''
num_samples = len(X)
print('AutoGBT[AutoHyperOptimizer]:Total samples passed for'\
'hyperparameter tuning:',num_samples)
if num_samples>self.max_samples:
removeperc = 1.0 - (float(self.max_samples)/num_samples)
print ('AutoGBT[AutoHyperOptimizer]:Need to downsample for managing time:,'\
'I will remove data percentage',removeperc)
XFull,yFull = Utils.random_sample_in_order(X,y.reshape(-1,1),removeperc)
print('AutoGBT[AutoHyperOptimizer]:downsampled data length',len(XFull))
else:
XFull = X
yFull = y
self.Xe_train, self.Xe_test, self.ys_train, self.ys_test = \
train_test_split(XFull, yFull.ravel(),test_size = self.test_size, random_state=self.seed,shuffle=True)
if indicator == 1:
## just fit lightgbm once to obtain the AUC w.r.t a fixed set of hyper-parameters ##
model = lgbm.LGBMClassifier(random_state=self.seed,min_data=1, min_data_in_bin=1)
model.set_params(**self.param_space)
model.fit(self.Xe_train,self.ys_train)
mypreds = model.predict_proba(self.Xe_test)[:,1]
auc = auc_metric(self.ys_test.reshape(-1,1),mypreds.reshape(-1,1))
return auc
else:
trials = Trials()
best = fmin(fn=self.gbc_objective,space=self.param_space,algo=tpe.suggest,trials=trials,max_evals=self.max_evaluations)
params = space_eval(self.param_space, best)
print('AutoGBT[AutoHyperOptimizer]:Best hyper-parameters',params)
self.best_params = params
return params, 1-np.min([x['loss'] for x in trials.results])
#return the best hyper-param with the corresponding AUC
from collections import Counter
class GenericStreamPreprocessor:
"""
Our generic pre-processing pipeline that uses frequency encoder idea. Categorical and
Multi-categorical features are encoded with their running frequencies
Pipeline also handlees Datetime columns. Min non-zero value in such columns would
be subtracted to make comparison meaningful. Additional derived features
(e.g. day of week, time of day etc.) are also generated from such columns
"""
def __init__(self):
self.categorical_cols=[]
self.date_cols=[]
self.redundant_catcols = []
self.ohe_cols = []
self.frequency_encode = True
self.date_encode = True
self.colMins = {}
self.featureMap = {}
self.dateMap = {}
self.ohe_col_threshold = 30
## no. of unique values to decide if the column need tobe one-hot encoded ##
## we didnt finally use OHE as it didnt appear to generalize our pipeline well ##
self.rows_processed = 0
self.freqMap = {}
def set_date_cols(self,cols):
self.date_cols = cols
for col in cols:
self.dateMap[col]=0.0
def set_categorical_cols(self,cols):
self.categorical_cols = cols
for col in cols:
self.featureMap[col]={}
def set_frequency_encode(self,flag=True):
self.frequency_encode = flag
def set_date_encode(self,flag=True):
self.date_encode = flag
def set_ohe_col_threshold(self,threshold=30):
self.ohe_col_threshold = threshold
def get_ohe_col_threshold(self):
return self.ohe_col_threshold
def print_config(self):
print ('AutoGBT[GenericStreamPreprocessor]:date encoding:',\
self.date_encode,'columns=',self.date_cols)
print ('AutoGBT[GenericStreamPreprocessor]:frequency encoding:',\
self.frequency_encode,'columns=',self.categorical_cols)
@simple_time_tracker(_log)
def partial_fit(self,X):
"""
Update frequency count of all categorical/multi-categorical values
Maintain a map of minimum values in each date column for encoding
"""
for col in range(X.shape[1]):
if col in self.categorical_cols and self.frequency_encode ==True :
if X.shape[0] > 200000:
## count using pandas if it is a large dataset
curr_featureMap = dict(pd.value_counts(X[:,col]))
self.featureMap[col] = dict(Counter(self.featureMap[col]) + Counter(curr_featureMap))
print('AutoGBT[GenericStreamPreprocessor]:using pandas count ' \
'for faster results..updating feature count map for column:',col)
else:
val,freq = np.unique(X[:,col],return_counts=True)
curr_featureMap = dict(zip(val,freq))
self.featureMap[col] = dict(Counter(self.featureMap[col]) + Counter(curr_featureMap))
print('AutoGBT[GenericStreamPreprocessor]:using numpy unique count..'\
'updating feature count map for column:',col, len(self.featureMap[col]))
elif col in self.date_cols and self.date_encode == True:
## find minimum non-zero value corresponding to each date columns ##
date_col = X[:,col].astype(float)
non_zero_idx = np.nonzero(date_col)[0]
if(len(non_zero_idx) > 0):
if self.dateMap[col]==0:
self.dateMap[col] = np.min(date_col[non_zero_idx])
else:
self.dateMap[col] = np.min([self.dateMap[col],np.min(date_col[non_zero_idx])])
self.rows_processed = self.rows_processed + len(X)
print('AutoGBT[GenericStreamPreprocessor]:featuremap size:',len(self.featureMap))
@simple_time_tracker(_log)
def prepareFrequencyEncodingMap(self):
for col in self.categorical_cols:
keys = self.featureMap[col].keys()
vals = np.array(list(self.featureMap[col].values())).astype(float)
self.freqMap[col] = dict(zip(keys,vals))
@simple_time_tracker(_log)
def transform(self,X):
result = []
for col in range(X.shape[1]):
if col in self.categorical_cols:
### DO FREQUENCY ENCODING ####
freq_encoded_col = np.vectorize(self.freqMap[col].get)(X[:,col])
result.append(freq_encoded_col)
elif col in self.date_cols:
transformed_date_col = X[:,col].astype(float) - self.dateMap[col]
result.append(transformed_date_col)
else: ### it must be a numeric feature
result.append(X[:,col])
### add dynamic date difference features and other generated features ###
for i in range(len(self.date_cols)):
for j in range(i+1,len(self.date_cols)):
if len(np.nonzero(X[:,i]))>0 and len(np.nonzero(X[:,j]))>0:
print('AutoGBT[GenericStreamPreprocessor]:datediff from nonzero cols:',i,j)
result.append(X[:,i]-X[:,j])
dates = pd.DatetimeIndex(X[:,i])
## get the date column
dayofweek = dates.dayofweek.values
dayofyear = dates.dayofyear.values
month = dates.month.values
weekofyear = dates.weekofyear.values
day = dates.day.values
hour = dates.hour.values
minute = dates.minute.values
year = dates.year.values
result.append(dayofweek)
result.append(dayofyear)
result.append(month)
result.append(weekofyear)
result.append(year)
result.append(day)
result.append(hour)
result.append(minute)
return np.array(result).T
def get_ohe_candidate_columns(self):
ohe_candidates = []
for col in self.categorical_cols:
unique_categories = len(self.featureMap[col])
if unique_categories>1 and unique_categories <= self.ohe_col_threshold:
ohe_candidates.append(col)
return ohe_candidates
class StreamSaveRetrainPredictor:
"""
A Save-Retrain model to combat concept-drift using a two level sampling strategy,
and by using a generic stream processing pipeline.
Idea: for each incoming batch of data along with label, do a majority undersampling
and maintain the raw data in a buffer (level-1 sampling strategy). Model training is
performed using a lazy strategy (just prior to making predictions) subject to
the availability of time budget. This way, most recent data is utilized by
the pre-processing pipeline in performing frequency encoding, datetime column normalization
etc., to minimze the effect of changes in the underlying data distribution. Automatic
hyper-parameter tuning is performed using hyperopt SMBO when the very first batch
of data is encountered. For large datasets,a level-2 downsampling strategty is applied on
accumulated training set to keep model training time within the budget.
"""
def __init__(self):
self.batch=0
self.max_train_data=400000
self.min_train_per_batch = 5000
self.clf=''
self.best_hyperparams = {}
self.stream_processor = GenericStreamPreprocessor()
self.XFull = []
self.yFull = []
self.ohe = None
self.ohe_cols = None
### if 80% time budget on a dataset is already spent, donot refit the model - just predict with the existing model###
self.dataset_budget_threshold = 0.8
### Set the delta region for parameter exploration for hyperopt ###
### Explore in a small window of hyper-parameters nearby to see if model quality improves ###
self.delta_n_estimators = 50
self.delta_learning_rate = 0.005
self.delta_max_depth = 1
self.delta_feature_fraction = 0.1
self.delta_bagging_fraction = 0.1
self.delta_bagging_freq = 1
self.delta_num_leaves = 20
self.current_train_X = {}
self.current_train_y = []
## max number of function evaluation for hyperopt ##
self.max_evaluation = 30
def partial_fit(self,F,y,datainfo,timeinfo):
self.current_train_X = F
self.current_train_y = y
date_cols = datainfo['loaded_feat_types'][0]
numeric_cols = datainfo['loaded_feat_types'][1]
categorical_cols = datainfo['loaded_feat_types'][2]
multicategorical_cols = datainfo['loaded_feat_types'][3]
## date time coulumn indices ###
time_cols = np.arange(0,date_cols)
## categorical and multi-categorical column indices ###
cols = np.arange(date_cols+numeric_cols,date_cols+numeric_cols+categorical_cols+multicategorical_cols)
print('AutoGBT[StreamSaveRetrainPredictor]:date-time columns:',time_cols)
print('AutoGBT[StreamSaveRetrainPredictor]:categorical columns:',cols)
### extract numerical features first
X=F['numerical']
### replace missing values with zeros ###
X = np.nan_to_num(X)
print('AutoGBT[StreamSaveRetrainPredictor]:Numeric Only data shape:',X.shape)
if categorical_cols > 0:
### replace missing values with string 'nan' ###
CAT = F['CAT'].fillna('nan').values
X = np.concatenate((X,CAT),axis=1)
## append categorical features
del CAT
if multicategorical_cols > 0:
### replace missing values with string 'nan' ###
MV = F['MV'].fillna('nan').values
X = np.concatenate((X,MV),axis=1)
### append multi-categorical features ###
del MV
print('AutoGBT[StreamSaveRetrainPredictor]:Feature Matrix Shape:',X.shape)
### INITIALIZE OUR STREAM PROCESSOR PIPELINE###
if len(self.stream_processor.categorical_cols)==0:
print('AutoGBT[StreamSaveRetrainPredictor]:initializing categorical columns:')
self.stream_processor.set_categorical_cols(cols)
if len(self.stream_processor.date_cols)==0:
print('AutoGBT[StreamSaveRetrainPredictor]:initializing date-time columns:')
self.stream_processor.set_date_cols(time_cols)
#### END INITIALIZATION ###
if self.stream_processor.rows_processed == 0:
### we are seeing the first batch of data; process it to make frequency encoder ready ###
self.stream_processor.partial_fit(X)
print('AutoGBT[StreamSaveRetrainPredictor]:partial fit of X for first time..')
train_X,train_y = Utils.majority_undersample(X,y,frac=3.0)
### level-1 of our sampling strategy - sample 1:3 always to handle skewed data ##
print('AutoGBT[StreamSaveRetrainPredictor]:Level-1 Sampling: undersampling and '\
'saving raw data for training:length=',len(train_X))
self.batch = self.batch + 1.0
if len(self.XFull) == 0:
### first time
self.XFull = train_X
self.yFull = train_y
else:
## we have history, so concatenate to it ##
self.XFull=np.concatenate((self.XFull,train_X),axis=0)
self.yFull=np.concatenate((self.yFull,train_y),axis=0)
num_train_samples = len(self.XFull)
print('AutoGBT[StreamSaveRetrainPredictor]:Total accumulated training '\
'data in raw form:',num_train_samples)
def predict(self,F,datainfo,timeinfo):
### extract numerical data first
X=F['numerical']
## replace nan to 0 ##
X = np.nan_to_num(X)
date_cols = datainfo['loaded_feat_types'][0]
numeric_cols = datainfo['loaded_feat_types'][1]
categorical_cols = datainfo['loaded_feat_types'][2]
multicategorical_cols = datainfo['loaded_feat_types'][3]
if categorical_cols >0:
### replace missing values with string 'nan' ###
CAT = F['CAT'].fillna('nan').values
## append categorical features
X = np.concatenate((X,CAT),axis=1)
del CAT
if multicategorical_cols > 0:
### replace missing values with string 'nan' ###
MV = F['MV'].fillna('nan').values
### append multi-categorical features
X = np.concatenate((X,MV),axis=1)
del MV
dataset_spenttime=time.time()-timeinfo[1]
print('AutoGBT[StreamSaveRetrainPredictor]:Dataset Budget threshhold:',self.dataset_budget_threshold ,'safe limit =', \
datainfo['time_budget']*self.dataset_budget_threshold)
## a safe limit for time budget is calculated ##
if dataset_spenttime < datainfo['time_budget']*self.dataset_budget_threshold:
### if sufficient time budget exist considering the safe limit, then continue model update ###
print('AutoGBT[StreamSaveRetrainPredictor]:Sufficient budget available to update the model')
### update the stream processor with new data ###
self.stream_processor.partial_fit(X)
print('AutoGBT[StreamSaveRetrainPredictor]:partial fit of X in predict function..total rows processed:',self.stream_processor.rows_processed)
self.stream_processor.prepareFrequencyEncodingMap()
print('AutoGBT[StreamSaveRetrainPredictor]:FrequencyEncoding Map Prepared')
num_train_samples = len(self.XFull)
print('AutoGBT[StreamSaveRetrainPredictor]:About to transform full training data:',num_train_samples)
XTrain = []
yTrain = []
if num_train_samples>self.max_train_data:
removeperc = 1.0 - (float(self.max_train_data)/num_train_samples)
print('AutoGBT[StreamSaveRetrainPredictor]:Level-2 Sampling...'\
'Too much training data..I need to subsample:remove',removeperc)
XTrain,yTrain = Utils.random_sample_in_order(self.XFull,self.yFull.reshape(-1,1),removeperc)
print('AutoGBT[StreamSaveRetrainPredictor]:downsampled training data length=',len(XTrain))
else:
XTrain = self.XFull
yTrain = self.yFull
XTrain_transformed = self.stream_processor.transform(XTrain)
print('AutoGBT[StreamSaveRetrainPredictor]:Training transformed shape:',XTrain_transformed.shape)
### we didnt find the best hyper-parameters yet
if len(self.best_hyperparams)==0:
#Evaluate at run-time 2 promising choices for Hyper-parameters:
#Choice1->Fixed set of hyper-parameters, Choice2-> promising solution near a fixed set, found using hyperopt
param_choice_fixed = {'n_estimators':600,\
'learning_rate':0.01,\
'num_leaves':60,\
'feature_fraction':0.6,\
'bagging_fraction':0.6,\
'bagging_freq':2,\
'boosting_type':'gbdt',\
'objective':'binary',\
'metric':'auc'}
#Get the AUC for the fixed hyperparameter on the internal validation set
autohyper = AutoHyperOptimizer(parameter_space=param_choice_fixed)
best_score_choice1 = autohyper.fit(XTrain_transformed,yTrain.ravel(),1)
print("---------------------------------------------------------------------------------------------------")
print("AutoGBT[StreamSaveRetrainPredictor]:Fixed hyperparameters:",param_choice_fixed)
print("AutoGBT[StreamSaveRetrainPredictor]:Best scores obtained from Fixed hyperparameter only is:",best_score_choice1)
print("---------------------------------------------------------------------------------------------------")
#Get the AUC for the fixed hyperparameter+Hyperopt combination on the internal validation set
#Step:1-Define the search space for Hyperopt to be a small delta region over the initial set of fixed hyperparameters
n_estimators_low = 50 if (param_choice_fixed['n_estimators'] - self.delta_n_estimators)<50 else param_choice_fixed['n_estimators'] - self.delta_n_estimators
n_estimators_high = param_choice_fixed['n_estimators'] + self.delta_n_estimators
learning_rate_low = | np.log(0.001) | numpy.log |
import pandas as pd
import numpy as np
from collections import defaultdict
from sklearn.preprocessing import scale
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
from sklearn.mixture import GaussianMixture, BayesianGaussianMixture
from sklearn import metrics
import hdbscan
from scipy.cluster import hierarchy
from fastcluster import linkage
from fancyimpute import KNN
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
# plt.style.use('seaborn-white')
class Preprocessing:
def __init__(self, csv_path, varlist=None, verbose=False):
'''
path -- the string of the csv file representing our raw dataset
varlist -- the list of strings
'''
# import the csv dataset as a pandas DataFrame
self.df = pd.read_csv(csv_path)
# change index (row labels)
self.df = self.df.set_index('Country Code', verify_integrity=True)
# only keep the variables(columns) selected by user
if varlist:
varlist = ['Country Name'] + varlist
self.df = self.df[varlist]
# convert all columns but Country Names to numeric type
self.df.iloc[:, 1:] = \
self.df.iloc[:, 1:].apply(pd.to_numeric, errors='coerce')
# report poor features and selected_countries
if verbose:
feature_miss = self.df.isnull().sum()
country_miss = self.df.isnull().sum(axis=1)
feature_miss = \
feature_miss[feature_miss != 0].sort_values(ascending=False)
country_miss = \
country_miss[country_miss != 0].sort_values(ascending=False)
print('MISSING VALUES FOR EACH FEATURE:')
print(feature_miss, '\n')
print('MISSING VALUES FOR EACH COUNTRY:')
print(country_miss)
# def drop_poor_columns(self, p):
# ''' Drop the columns of self.df with more than p (%) missing values'''
#
# # create df with a the count of missing values for each column
# missing_df = pd.DataFrame(self.df.isnull().sum())
# # extract the names of columns with more than p (%) missing values
# poor_columns = missing_df.loc[missing_df[0] > p*len(self.df)].index
# # drop sparse columns
# self.df.drop(poor_columns, axis=1, inplace=True)
# return self.df, poor_columns
def dropPoorFeatures(self, axis, p):
'''
Drop the rows/columns of self.df with more than p (%) missing values
axis -- indicate whether to drop rows (axis=0) or columns(axis=1)
'''
# create df with the count of missing values for each row/column
missing_df = pd.DataFrame(self.df.isnull().sum(axis=int(not axis)))
# extract the names of rows/columns with more than p (%) missing values
if axis == 0:
length = len(self.df.columns)
else:
length = len(self.df)
poor_features = missing_df.loc[missing_df[0] > p*length].index
# drop sparse rows/columns
self.df.drop(poor_features, axis=axis, inplace=True)
return self.df, poor_features
def imputeKNN(self):
# df is my data frame with the missings. I keep only floats
self.country_names = self.df['Country Name'].values
df_numeric = self.df.select_dtypes(include=[np.float64]).values
# impute missing values
df_filled_KNN = pd.DataFrame(
KNN(k=2, verbose=False).complete(df_numeric))
df_filled_KNN.insert(
loc=0, column='Country Names', value=self.country_names)
df_filled_KNN.columns = self.df.columns
df_filled_KNN.index = self.df.index
return df_filled_KNN
def exportCSV(self, path, impute=False):
if not impute:
# export the cleaned dataframe to a csv file
self.df.to_csv(path)
else:
# impute the missing values before exporting to csv
self.df_filled_KNN = self.imputeKNN()
self.df_filled_KNN.to_csv(path)
def heatmap(df, links):
'''
Plot a matrix dataset as a hierarchically-clustered heatmap,
using given linkages.
'''
cmap = sns.cubehelix_palette(
as_cmap=True, start=.5, rot=-.75, light=.9)
sns.clustermap(
data=df, row_linkage=links, col_cluster=False, cmap=cmap)
class Clustering:
def __init__(self, csv_path, verbose=False):
self.df = pd.read_csv(csv_path)
# change index (row labels)
self.df = self.df.set_index('Country Code', verify_integrity=True)
# df.info(verbose=False)
# store country full names (for plots) before removing the feature
self.country_names = self.df['Country Name'].values
self.df = self.df.drop(['Country Name'], axis=1)
# scale the dataset to be distributed as a standard Gaussian
cols = self.df.columns
ind = self.df.index
self.df = pd.DataFrame(scale(self.df))
self.df.columns = cols
self.df.index = ind
# create disctionary of clusters
self.clusterings = defaultdict(lambda: np.array(0))
self.clusterings_labels = defaultdict(lambda: np.array(0))
# print general info
if verbose:
print('The imported dataset as the following characteristics:')
print(self.df.info(verbose=False))
def getPC(self):
'''
Calculate the principal components (PC) and create a new DataFrame
by projecting the datapoints on the PC space.
'''
self.pca = PCA()
self.pca_loadings = pd.DataFrame(
PCA().fit(self.df).components_.T, index=self.df.columns)
self.df_pc = pd.DataFrame(
self.pca.fit_transform(self.df), index=self.df.index)
# plot the cumulated proportion of variance explained by the PC
print('CUMULATIVE PROPORTION OF VARIANCE EXPLAINED BY PCs')
plt.figure(figsize=(7, 5))
plt.plot(range(1, len(self.pca.components_)+1),
self.pca.explained_variance_ratio_, '-o',
label='Individual component')
plt.plot(range(1, len(self.pca.components_)+1),
np.cumsum(self.pca.explained_variance_ratio_), '-s',
label='Cumulative')
plt.ylabel('Proportion of Variance Explained')
plt.xlabel('Principal Component')
plt.xlim(0.75, 4.25)
plt.ylim(0, 1.05)
plt.xticks(range(1, len(self.pca.components_)+1))
plt.legend(loc=2)
def plotAlongPC(self, pc1=0, pc2=1, xlim=[-5, 5], ylim=[-5, 5],
loadings=True, clustering=None):
'''
Plot the countries along the two principal components given in input:
pc1[int] (usually = 0, indicating the first PC) and pc2[int]
'''
fig, ax1 = plt.subplots(figsize=(9, 7))
ax1.set_xlim(xlim[0], xlim[1])
ax1.set_ylim(ylim[0], ylim[1])
if clustering is not None:
# build a generator of colors
NUM_COLORS = len(self.clusterings[clustering])
clist = np.random.uniform(low=0, high=1, size=(NUM_COLORS, 4))
# plot countries along PCs coloring them according to their cluster
labels = self.clusterings_labels[clustering]
for i, country in enumerate(self.df_pc.index):
ax1.annotate(country,
(self.df_pc[pc1].loc[country],
-self.df_pc[pc2].loc[country]),
ha='center',
color=clist[labels[i]],
fontweight='bold')
else:
# plot countries along PCs
for i in self.df_pc.index:
ax1.annotate(i,
(self.df_pc[pc1].loc[i],
-self.df_pc[pc2].loc[i]),
ha='center',
color='b',
fontweight='bold')
# Plot reference lines
ax1.hlines(0, -5, 5, linestyles='dotted', colors='grey')
ax1.vlines(0, -5, 5, linestyles='dotted', colors='grey')
pc1_string = 'Principal Component ' + str(pc1)
pc2_string = 'Principal Component ' + str(pc2)
ax1.set_xlabel(pc1_string)
ax1.set_ylabel(pc2_string)
if loadings:
# Plot Principal Component loading vectors, using a second y-axis.
ax2 = ax1.twinx().twiny()
ax2.set_ylim(-1, 1)
ax2.set_xlim(-1, 1)
ax2.tick_params(axis='y', colors='orange')
# ax2.set_xlabel('Principal Component loading vectors',
# color='orange')
# Plot labels for vectors.
# 'a' is an offset parameter to separate arrow tip and text.
a = 1.07
for i in self.pca_loadings[[pc1, pc2]].index:
ax2.annotate(i,
(self.pca_loadings[pc1].loc[i]*a,
-self.pca_loadings[pc2].loc[i]*a),
color='orange')
# Plot vectors
for k in range(len(self.pca_loadings.columns)):
ax2.arrow(0, 0, self.pca_loadings[pc1][k],
-self.pca_loadings[pc2][k],
width=0.002, color='black')
return
def plotDendrogram(self, links, threshold, metric, method):
plt.figure(figsize=(15, 9))
den_title = 'METHOD: ' + str(method) + ' METRIC: ' + str(metric)
plt.title(den_title)
den = hierarchy.dendrogram(links,
orientation='right',
labels=self.country_names,
color_threshold=threshold,
leaf_font_size=10)
plt.vlines(threshold, 0,
plt.gca().yaxis.get_data_interval()[1],
colors='r', linestyles='dashed')
return den
def clustersTable(self, clustering):
'''
Clustering is an array of cluster labels, one for each country
'''
lis = sorted(
list(zip(clustering, self.country_names)), key=lambda x: x[0])
groups = set(map(lambda x: x[0], lis))
table = pd.DataFrame(list(
zip(groups, [[y[1] for y in lis if y[0] == x] for x in groups])))
table.columns = ['Cluster', '']
table.set_index('Cluster', inplace=True, verify_integrity=False)
return table
def saveClustering(self, cluster_labels, clustering_name):
# save clusterings into a dict and rename its columns
self.clusterings[clustering_name] = \
self.clustersTable(cluster_labels)
self.clusterings[clustering_name].columns = [clustering_name]
self.clusterings_labels[clustering_name] = cluster_labels
def hierarchicalClustering(
self, metric, method, threshold=None, on_PC=0, heatmap=False):
'''
Show figures of clusters retrieved through the hierachical method
and return an array with the cluster index of each country.
metric -- [str] used for assigning distances to data:
'euclidean', 'ćorrelation', 'cosine', 'seuclidean'...
method -- [str] the type of linkage used for agglomerating the nodes
'average','complete','ward'...(check fastcluster full list)
threshold -- [int] threshold distance for separing clusters,
in the hierachical tree.
on_PC -- [int] apply clustering by using data projections
on the first on_PC principal components
'''
if on_PC > 0:
df = self.df_pc.iloc[:, :on_PC+1]
else:
df = self.df
if method == 'all':
method = ['average',
'complete',
'single',
'weighted',
'centroid', # only for Euclidean data
'median', # only for Euclidean data
'ward', # only for Euclidean data
]
elif type(method) != list:
method = list([method])
metric = str(metric)
for met in method:
# set up the linking tool
links = linkage(df, metric=metric, method=met)
self.link = links
# plot dendrogram
self.plotDendrogram(links, threshold, metric, met)
if heatmap:
heatmap(df, links)
labels = hierarchy.fcluster(links, threshold, criterion='distance')
# save clusters
self.saveClustering(
labels, 'hc_'+str(met)+'_'+str(metric)+'_'+str(threshold))
# self.hierarchical_classes = get_hierarchical_classes(den)
# plt.savefig('tree2.png')
def hdbscan(self, min_cluster_size=2, on_PC=0):
'''compute clusters using HDBSCAN algorithm'''
if on_PC > 0:
df = self.df_pc.iloc[:, :on_PC+1]
else:
df = self.df
clusterer = hdbscan.HDBSCAN(min_cluster_size=min_cluster_size)
clusterer.fit_predict(df)
# save clusters
self.saveClustering(clusterer.labels_, 'hdbscan')
def bayesianGaussianMixture(self, n_components, covariance_type='full',
n_init=50, on_PC=0):
'''
Compute Bayesian Gaussian Mixture clustering.
Note: in this case, the number of components effectively used
can be < n_componentss (at most, n_components).
'''
if on_PC > 0:
df = self.df_pc.iloc[:, :on_PC+1]
else:
df = self.df
clusterer = BayesianGaussianMixture(n_components,
covariance_type=covariance_type,
n_init=n_init)
labels = clusterer.fit(df).predict(df)
# save clusters
self.saveClustering(labels, 'bayesian gm' + str(n_components))
def gaussianMixture(self, n_components, covariance_type='full',
n_init=50, on_PC=0):
'''compute Gaussian Mixture clustering'''
if on_PC > 0:
df = self.df_pc.iloc[:, :on_PC+1]
else:
df = self.df
clusterer = GaussianMixture(n_components,
covariance_type=covariance_type,
n_init=n_init)
labels = clusterer.fit(df).predict(df)
# save clusters
self.saveClustering(labels, 'gm' + str(n_components))
def gmBIC(self, n_min, n_max, covariance_type='full',
n_init=50, on_PC=0):
if on_PC > 0:
df = self.df_pc.iloc[:, :on_PC+1]
else:
df = self.df
'''compute Bayesian Information Criterion'''
n_components = np.arange(n_min, n_max)
models = [
GaussianMixture(n, covariance_type=covariance_type, n_init=n_init)
for n in n_components]
bics = [model.fit(df).bic(df) for model in models]
bics = np.array(bics)
# store the optimal number of gaussian components and the resulting BIC
self.min_BIC = [bics.argmin()+n_min, bics.min()]
print('the minimum BIC is achieved with \
%i gaussian components' % self.min_BIC[0])
fig, ax = plt.subplots(num='Bayesian Information Criterion')
plt.plot(n_components, bics)
def kmeans(self, n_clusters=2, on_PC=0, n_init=50, evaluate=True):
'''compute clusters using KMeans algorithm'''
if on_PC > 0:
df = self.df_pc.iloc[:, :on_PC+1]
else:
df = self.df
# re-initialize seed for random initial centroids' position
np.random.seed(42)
clusterer = KMeans(n_clusters=n_clusters, n_init=n_init)
clusterer.fit_predict(df)
# save clusters
self.saveClustering(clusterer.labels_, 'kmeans' + str(n_clusters))
# compute Silhouette and Calinski-Harabaz Score
if evaluate:
benchClustering(clusterer, 'kmeans', df)
def multipleKmeans(self, k_min, k_max, on_PC=0, n_init=50):
if on_PC > 0:
df = self.df_pc.iloc[:, :on_PC+1]
else:
df = self.df
ks = np.arange(k_min, k_max)
silh = | np.zeros(k_max - k_min) | numpy.zeros |
# Copyright (C) 2020-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from ...algorithm import Algorithm
from ...algorithm_selector import COMPRESSION_ALGORITHMS
from ....graph import model_utils as mu
from ....graph import node_utils as nu
from ....utils.logger import get_logger
logger = get_logger(__name__)
@COMPRESSION_ALGORITHMS.register('OutlierChannelSplitting')
class OutlierChannelSplitting(Algorithm):
name = 'OutlierChannelSplitting'
@property
def change_original_model(self):
return True
def __init__(self, config, engine):
super().__init__(config, engine)
self.weights_expansion_ratio = config.get('weights_expansion_ratio', 0.01)
def run(self, model):
""" this function applies outlier channel splitting procedure
:param model: model to apply the algorithm on
:return result model
"""
conv_nodes_list = self.get_conv_nodes(model)
for conv_node in conv_nodes_list:
weights_node = nu.get_weights_for_node(conv_node)
weights = nu.get_node_value(weights_node)
ocs_weights, ocs_channels_idxs = self.split_weights(weights)
if self.add_input_channels_for_conv_node(conv_node, ocs_channels_idxs):
nu.set_node_value(weights_node, ocs_weights)
logger.debug('Node {}: Channels {} were splitted'.
format(conv_node.fullname, ','.join(str(idx) for idx in ocs_channels_idxs)))
model.clean_up()
return model
def split_weights(self, weights):
num_input_channels = weights.shape[1]
# calculates the channels number for splitting
num_ocs_channels = int(np.ceil(self.weights_expansion_ratio * num_input_channels))
if num_ocs_channels == 0:
return weights, []
ocs_weights = np.copy(weights)
ocs_channels_idxs = []
map_ocs_channels_idxs = {}
for count in range(num_ocs_channels):
# find channel with max value
axis = (0,) + tuple(range(2, ocs_weights.ndim))
max_per_channel = np.max(np.abs(ocs_weights), axis=axis)
split_channel_idx = np.argmax(max_per_channel)
# Split channel
split_channel = ocs_weights[:, split_channel_idx:split_channel_idx + 1, ...]
split_channel_half = split_channel * 0.5
split_channel_zero = np.zeros_like(split_channel)
split_threshold = max_per_channel[split_channel_idx] * 0.5
abs_split_channel = np.abs(split_channel)
ocs_ch_0 = | np.where(abs_split_channel > split_threshold, split_channel_half, split_channel) | numpy.where |
def CovidPlots():
# Function to reproduce the interactive plots from:
# https://hectoramirez.github.io/covid/COVID19.html
# The code is explained in:
# https://github.com/hectoramirez/Covid19
import os
import pandas as pd
import numpy as np
import datetime
import plotly.express as px
import plotly as plty
import seaborn as sns
#
sns.set()
sns.set_style("whitegrid")
custom_style = {
'grid.color': '0.8',
'grid.linestyle': '--',
'grid.linewidth': 0.5,
}
sns.set_style(custom_style)
os.chdir('/Users/hramirez/GitHub/Covid19/Automated')
# ========================================================================================= import
WORLD_CONFIRMED_URL = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv'
WORLD_DEATHS_URL = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv'
WORLD_RECOVERED_URL = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_recovered_global.csv'
world_confirmed = pd.read_csv(WORLD_CONFIRMED_URL)
world_deaths = pd.read_csv(WORLD_DEATHS_URL)
world_recovered = pd.read_csv(WORLD_RECOVERED_URL)
sets = [world_confirmed, world_deaths, world_recovered]
# yesterday's date
yesterday = pd.to_datetime(world_confirmed.columns[-1]).date()
today_date = str(pd.to_datetime(yesterday).date() + datetime.timedelta(days=1))
# print('\nAccording to the latest imput, the data was updated on ' + today_date + '.')
# ========================================================================================= clean
def drop_neg(df):
# Drop negative entries entries
idx_l = df[df.iloc[:, -1] < 0].index.tolist()
for i in idx_l:
df.drop([i], inplace=True)
return df.reset_index(drop=True)
sets = [drop_neg(i) for i in sets]
for i in range(3):
sets[i].rename(columns={'Country/Region': 'Country', 'Province/State': 'State'}, inplace=True)
sets[i][['State']] = sets[i][['State']].fillna('')
sets[i].fillna(0, inplace=True)
# Change dates to datetime format
sets[i].columns = sets[i].columns[:4].tolist() + [pd.to_datetime(sets[i].columns[j]).date()
for j in range(4, len(sets[i].columns))]
sets_grouped = []
cases = ['confirmed cases', 'deaths', 'recovered cases']
for i in range(3):
o = sets[i].groupby('Country').sum()
o.rename(index={'US': 'United States'}, inplace=True)
sets_grouped.append(o)
# ========================================================================================= top countries
def bokehB(dataF, case):
# Bokeh bar plots. The function takes a dataframe, datF, as the one provided by the raw data
# (dates as columns, countries as rows). It first takes the last column as yesterday's date.
from bokeh.io import output_file, show, output_notebook, save
from bokeh.plotting import figure
from bokeh.models import ColumnDataSource, HoverTool
from bokeh.palettes import Viridis as palette
from bokeh.transform import factor_cmap
df = dataF.iloc[:, -1].sort_values(ascending=False).head(20).to_frame()
df['totals'] = df.iloc[:, -1]
df.drop(df.columns[0], axis=1, inplace=True)
# get continent names
import country_converter as coco
continent = coco.convert(names=df.index.to_list(), to='Continent')
df['Continent'] = continent
cont_cat = len(df['Continent'].unique())
source = ColumnDataSource(df)
select_tools = ['save']
tooltips = [
('Country', '@Country'), ('Totals', '@totals{0,000}')
]
p = figure(x_range=df.index.tolist(), plot_width=840, plot_height=600,
x_axis_label='Country',
y_axis_label='Totals',
title="Top Countries with {} as of ".format(case) + today_date,
tools=select_tools)
p.vbar(x='Country', top='totals', width=0.9, alpha=0.7, source=source,
legend_field="Continent",
color=factor_cmap('Continent', palette=palette[cont_cat], factors=df.Continent.unique()))
p.xgrid.grid_line_color = None
p.y_range.start = 0
p.xaxis.major_label_orientation = 1
p.left[0].formatter.use_scientific = False
p.add_tools(HoverTool(tooltips=tooltips))
output_file('top_{}.html'.format(case))
return save(p, 'top_{}.html'.format(case))
def bokehB_mort(num=100):
# Bokeh bar plots. The function already includes the confirmed and deaths dataframes,
# and operates over them to calculate th mortality rate depending on num (number of
# minimum deaths to consider for a country). The rest is equivalent to the BokehB()
# function.
from bokeh.io import output_file, show, output_notebook, save
from bokeh.plotting import figure
from bokeh.models import ColumnDataSource, HoverTool
from bokeh.palettes import Viridis as palette
from bokeh.transform import factor_cmap
# top countries by deaths rate with at least num deaths
top_death = sets_grouped[1][yesterday].sort_values(ascending=False)
top_death = top_death[top_death > num]
# Inner join to the confirmed set, compute mortality rate and take top 20
df_mort = pd.concat([sets_grouped[0][yesterday], top_death], axis=1, join='inner')
mort_rate = round(df_mort.iloc[:, 1] / df_mort.iloc[:, 0] * 100, 2)
mort_rate = mort_rate.sort_values(ascending=False).to_frame().head(20)
# take yesterday's data
df = mort_rate.iloc[:, -1].sort_values(ascending=False).head(20).to_frame()
df['totals'] = df.iloc[:, -1]
df.drop(df.columns[0], axis=1, inplace=True)
import country_converter as coco
continent = coco.convert(names=df.index.to_list(), to='Continent')
df['Continent'] = continent
cont_cat = len(df['Continent'].unique())
source = ColumnDataSource(df)
select_tools = ['save']
tooltips = [
('Country', '@Country'), ('Rate', '@totals{0.00}%')
]
p = figure(x_range=df.index.tolist(), plot_width=840, plot_height=600,
x_axis_label='Country',
y_axis_label='Rate (%)',
title="Mortality rate of countries with at least {} deaths " \
"as of ".format(num) + today_date,
tools=select_tools)
p.vbar(x='Country', top='totals', width=0.9, alpha=0.7, source=source,
legend_field="Continent",
fill_color=factor_cmap('Continent', palette=palette[cont_cat], factors=df.Continent.unique()))
p.xgrid.grid_line_color = None
p.y_range.start = 0
p.xaxis.major_label_orientation = 1
p.left[0].formatter.use_scientific = False
p.add_tools(HoverTool(tooltips=tooltips))
output_file('top_mortality.html')
return save(p, 'top_mortality.html')
for i in range(3):
bokehB(sets_grouped[i], cases[i])
bokehB_mort(100)
# ========================================================================================= daily cases
roll = 7
def daily(n_top=15):
# compute daily values for the n_top countries
dfs = [df.sort_values(by=yesterday, ascending=False).iloc[:n_top, 2:].diff(axis=1).T
for df in sets_grouped]
# replace negative values by the previous day value
for df in dfs:
for i in df.columns:
idx = df.loc[df[i] < 0, i].index
df.loc[idx, i] = df.loc[idx - datetime.timedelta(days=1), i].tolist()
return dfs
def replace_outliers(series):
# Calculate the absolute difference of each timepoint from the series mean
absolute_differences_from_mean = np.abs(series - np.mean(series))
# Calculate a mask for the differences that are > 5 standard deviations from zero
this_mask = absolute_differences_from_mean > ( | np.std(series) | numpy.std |
import os
import requests
import time
import json
import io
import numpy as np
import pandas as pd
import paavo_queries as paavo_queries
from sklearn.linear_model import LinearRegression
import statsmodels.api as sm
## NOTE: Table 9_koko access is forbidden from the API for some reasons.
# url to the API
MAIN_PAAVO_URL = 'http://pxnet2.stat.fi/PXWeb/api/v1/en/Postinumeroalueittainen_avoin_tieto/'
def paavo_url(level, table):
"""Helper to make url to the paavo API"""
return MAIN_PAAVO_URL + str(level) + '/' + table
def fetch_csv(url, destination_directory, file_name, query={"query": [], "response": {"format": "csv"}}):
"""Fetch a single file from PXweb API. File name should end with '.csv'"""
response = requests.post(url, json=query, stream=True, allow_redirects=True)
if not os.path.exists(destination_directory):
os.makedirs(destination_directory)
destination_file = os.path.join(destination_directory, file_name)
if response.status_code == 200:
open(destination_file, 'wb').write(response.content)
print('Downloaded ' + file_name + ' from ' + url)
else:
print('Could not download ' + file_name + ' from ' + url)
print('HTTP/1.1 ' + str(response.status_code))
time.sleep(1)
def fetch_paavo(destination_directory):
"""Fetch the whole Paavo directory"""
# Getting levels from Paavo database
levels = []
response = requests.post('http://pxnet2.stat.fi/PXWeb/api/v1/en/Postinumeroalueittainen_avoin_tieto/')
response_texts = json.loads(response.text)
for response_text in response_texts:
levels.append(str(response_text['id']))
paavo_directory = os.path.join(destination_directory, 'paavo_raw')
for level in levels:
response = requests.post('http://pxnet2.stat.fi/PXWeb/api/v1/en/Postinumeroalueittainen_avoin_tieto/' + str(level))
response_texts = json.loads(response.text)
table_data = {}
for response_text in response_texts:
table_data[response_text['id']] = str(response_text['text']).split('. ')[-1].replace("'", "").replace(" ", "_")
for (id, name) in table_data.items():
url = paavo_url(level, id)
file_name = name + '.csv'
fetch_csv(url, paavo_directory, file_name)
def fetch_dataframe(url, query={"query": [], "response": {"format": "csv"}}):
"""Download a table from PXweb API to a DataFrame"""
response = requests.post(url, json=query, stream=True, allow_redirects=True)
if response.status_code == 200:
byte_data = io.BytesIO(response.content)
df = pd.read_csv(byte_data, sep=',', encoding='iso-8859-1')
print('Downloaded data from ' + url)
return df
else:
print('Could not download from ' + url)
print('HTTP/1.1 ' + str(response.status_code))
return pd.DataFrame()
time.sleep(0.2)
def paavo_data():
"""Download the whole paavo directory to a dictionary with names as keys and dataframes as values"""
data = {}
# Getting levels from paavo database
levels = []
response = requests.post('http://pxnet2.stat.fi/PXWeb/api/v1/en/Postinumeroalueittainen_avoin_tieto/')
response_texts = json.loads(response.text)
for response_text in response_texts:
levels.append(str(response_text['id']))
for level in levels:
response = requests.post(
'http://pxnet2.stat.fi/PXWeb/api/v1/en/Postinumeroalueittainen_avoin_tieto/' + str(level))
response_texts = json.loads(response.text)
table_data = {}
for response_text in response_texts:
table_data[response_text['id']] = str(response_text['text']).split('. ')[-1].replace("'", "").replace(" ", "_")
for (id, name) in table_data.items():
url = paavo_url(level, id)
df = fetch_dataframe(url)
if not df.empty:
data[name] = df
time.sleep(1)
return data
def fetch_paavo_density_and_area(density_file_destination, area_file_destination):
def clean_df(df):
# Drop Finland row
df.drop(index=0, inplace=True)
# Extract postal code
df.rename(columns={df.columns[0]: 'Postal code'}, inplace=True)
df['Postal code'] = df['Postal code'].apply(lambda x: x.split(' ')[0])
# Replace '.' with 0 and set Postal code as index
df.replace({'.': 0}, inplace=True)
df.set_index('Postal code', inplace=True)
# Change data type of all columns to integer
for column in df.columns:
df[column] = df[column].astype(int)
return df
url_2013 = 'http://pxnet2.stat.fi/PXWeb/api/v1/en/Postinumeroalueittainen_avoin_tieto/2015/paavo_9_koko_2015.px/'
url_2014 = 'http://pxnet2.stat.fi/PXWeb/api/v1/en/Postinumeroalueittainen_avoin_tieto/2016/paavo_9_koko_2016.px/'
url_2015 = 'http://pxnet2.stat.fi/PXWeb/api/v1/en/Postinumeroalueittainen_avoin_tieto/2017/paavo_9_koko_2017.px/'
url_2016 = 'http://pxnet2.stat.fi/PXWeb/api/v1/en/Postinumeroalueittainen_avoin_tieto/2018/paavo_9_koko_2018.px/'
url_2017 = 'http://pxnet2.stat.fi/PXWeb/api/v1/en/Postinumeroalueittainen_avoin_tieto/2019/paavo_9_koko_2019.px/'
dfs = {}
years = np.array([[2014], [2015], [2016], [2017]])
# Download and clean each dataframe
dfs[2013] = clean_df(fetch_dataframe(url_2013, paavo_queries.surface_population_query))
dfs[2014] = clean_df(fetch_dataframe(url_2014, paavo_queries.surface_population_query))
dfs[2015] = clean_df(fetch_dataframe(url_2015, paavo_queries.surface_population_query))
dfs[2016] = clean_df(fetch_dataframe(url_2016, paavo_queries.surface_population_query))
dfs[2017] = clean_df(fetch_dataframe(url_2017, paavo_queries.surface_population_query))
# Change column labels
for (year, df) in dfs.items():
pop_str = 'Population (' + str(year) +')'
area_str = 'Surface area (' + str(year) + ')'
density_str = 'Density (' + str(year) +')'
if year > 2013:
df.rename(columns={df.columns[0]: area_str, df.columns[1]: pop_str}, inplace=True)
df.insert(2, density_str, df[pop_str] / df[area_str])
df.replace({0.0: np.nan})
else:
df.rename(columns={df.columns[0]: pop_str}, inplace=True)
df.replace({0.0: np.nan})
# Merge dataframe using Postal code index, manually adding density and surface area columns for 2013
main_table = dfs[2014]
main_table = main_table.merge(dfs[2013], how='left', on='Postal code')
main_table = main_table.merge(dfs[2015], how='left', on='Postal code')
main_table = main_table.merge(dfs[2016], how='left', on='Postal code')
main_table = main_table.merge(dfs[2017], how='left', on='Postal code')
main_table.insert(0, 'Density (2013)', np.nan)
main_table.insert(0, 'Surface area (2013)', np.nan)
densities = main_table[['Density (2014)', 'Density (2015)', 'Density (2016)', 'Density (2017)']]
# Linear regression on density. If density is negative, drop the latest density and retry. If there is only 1 usable density, copy it to the 2013 density
for index, row in densities.iterrows():
y = row.to_numpy()
valid_index = np.where(y >= 0)
valid_years = years[valid_index]
y = y[valid_index]
density_prediction = -1.0
while len(y) > 1 and density_prediction < 0:
reg = LinearRegression().fit(valid_years, y)
density_prediction = reg.predict([[2013]])
if density_prediction < 0:
y = y[:-1]
valid_years = valid_years[:-1]
if len(y) > 1:
main_table.at[index, 'Density (2013)'] = density_prediction
elif len(y) ==1:
main_table.at[index, 'Density (2013)'] = y[0]
else:
continue
# Calculate surface area using density and population
for index, row in main_table.iterrows():
if row['Population (2013)'] == np.nan:
continue
elif row['Population (2013)'] > 0 and row['Density (2013)'] > 0:
main_table.at[index, 'Surface area (2013)'] = round(row['Population (2013)']/row['Density (2013)'])
elif row['Population (2013)'] == 0 and row['Density (2013)'] == 0:
main_table.at[index, 'Surface area (2013)'] = row['Surface area (2014)']
main_table = main_table.fillna(0)
# Results
densities = main_table[['Density (2013)', 'Density (2014)', 'Density (2015)', 'Density (2016)', 'Density (2017)']]
areas = main_table[['Surface area (2013)', 'Surface area (2014)', 'Surface area (2015)', 'Surface area (2016)', 'Surface area (2017)']]
# Export to tsv files
densities.to_csv(density_file_destination, sep='\t')
areas.to_csv(area_file_destination, sep='\t')
def fetch_paavo_housing(destination_directory, postal_code_file, density_file):
def postal_standardize(df):
df= df.astype({'Postal code': str})
for i in list(df.index):
df.at[i, 'Postal code'] = '0' * (5-len(df.at[i,'Postal code']))+ df.at[i, 'Postal code']
return df
def postal_merge(left, right):
return left.merge(right, how='left', on='Postal code')
def get_mean_simple(df, n):
"""Calculate housing prices for groups of postal codes with the same first 6-n digits"""
df_n = pd.DataFrame(df['Postal code'].apply(lambda x: x[:(1 - n)]))
df_n.rename(columns={df_n.columns[0]: 'Postal code'}, inplace=True)
df_n = df_n.join(df[['Total value', 'Number']].copy())
df_n = df_n.groupby("Postal code", as_index=False).agg("sum")
df_n['Mean'] = df_n['Total value'] / df_n['Number']
df_n.drop(['Total value', 'Number'], axis=1, inplace=True)
# df_n.set_index('Postal code', inplace=True)
return df_n
def impute_simple(df, df_n):
"""Impute using the results above"""
df_ni = df_n.set_index('Postal code')
for code in list(df_n['Postal code']):
df_rows = np.array(df[df['Postal code'].str.startswith(code)].index)
for i in df_rows:
if df.at[i, 'Mean'] == 0 or np.isnan(df.at[i, 'Mean']):
df.at[i, 'Mean'] = df_ni.at[code, 'Mean']
return df
def impute_with_density(df, postal_df):
"""Impute with respect to density using a linear model"""
def postal_truncate(n):
df_n = postal_df.copy()
df_n['Postal code'] = df_n['Postal code'].apply(lambda x: x[:(1-n)])
df_n.drop_duplicates(subset='Postal code', inplace=True)
return df_n
def impute_price(df_, n):
truncated_postal = postal_truncate(n)
for code in truncated_postal['Postal code']:
sub_df = df_[df_['Postal code'].str.startswith(code)]
good_df = sub_df[sub_df['Mean'] != 0]
bad_df = sub_df[sub_df['Mean'] == 0]
if len(good_df.index) >= 7:
good_df = good_df.nsmallest(15, 'Mean')
X = good_df['Density']
y = good_df['Mean']
X = sm.add_constant(X.values)
model = sm.OLS(y, X).fit()
for i in bad_df.index:
if df_.at[i, 'Mean'] <= 0 or np.isnan(df_.at[i, 'Mean']):
df_.at[i, 'Mean'] = int(model.predict([1, df_.at[i, 'Density']])[0])
return df_
for i in range(3,6):
df = impute_price(df, i)
return df
main_table = postal_standardize(pd.read_csv(postal_code_file, sep='\t'))
density = postal_standardize(pd.read_csv(density_file, sep='\t'))
density = density.fillna(0)
postal_code = main_table.copy()
year_list = list(range(2005, 2018))
base_query = paavo_queries.ts_housing_query['query']
for year in year_list:
for quarter in range(5):
# Construct the json query
new_query = [{"code": "Vuosi", "selection": {"filter": "item", "values": [str(year)]}}, {"code": "Neljännes", "selection": {"filter": "item", "values": [str(quarter)]}}] + base_query
quarter_query = {"query": new_query, "response": {"format": "csv"}}
if quarter == 0:
mean_label = 'Housing price (' + str(year) + ')'
else:
mean_label = str(year) + 'Q' +str(quarter)
# Get the data table for the quarter
quarter_frame = postal_standardize(fetch_dataframe(paavo_queries.housing_url, query= quarter_query))
# Leave only Postal code and house price
quarter_frame = quarter_frame[['Postal code', 'Mean', 'Number']]
# Replace missing value '.' with '0'
quarter_frame.replace({'.': '0'}, inplace=True)
# Change mean to housing price and convert to float, number to Int
quarter_frame['Mean'] = quarter_frame['Mean'].astype(int)
quarter_frame['Number'] = quarter_frame['Number'].astype(int)
# Calculate the total housing value for each row
quarter_frame['Total value'] = quarter_frame['Mean'] * quarter_frame['Number']
# Get the complete postal code
quarter_frame = postal_merge(postal_code, quarter_frame)
# Change the numbers of houses where the prices are hidden to 0 so that the calculation of the group mean is not affected
for code in list(quarter_frame.index):
if quarter_frame.at[code, 'Mean'] == 0 or | np.isnan(quarter_frame.at[code, 'Mean']) | numpy.isnan |
""" Color based K-means"""
import numpy as np
import cv2
import os
import glob
from glob import glob
from PIL import Image
from matplotlib import pyplot as plt
import pdb
heatMap_image_path = '/Users/monjoysaha/Downloads/CT_lung_segmentation-master/check/test/'
save_path = '/Users/monjoysaha/Downloads/CT_lung_segmentation-master/check/only_GGO/'
g= glob(heatMap_image_path + "/*.png")
#
for image in g:
fname_image = os.path.basename(image)
img = cv2.imread(image)
Z = np.float32(img.reshape((-1,3)))
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
K = 4
_,labels,centers = cv2.kmeans(Z, K, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)
labels = labels.reshape((img.shape[:-1]))
reduced = np.uint8(centers)[labels]
for i, c in enumerate(centers):
mask = cv2.inRange(labels, i, i)
mask = np.dstack([mask]*3) # Make it 3 channel
ex_img = cv2.bitwise_and(img, mask)
ex_reduced = cv2.bitwise_and(reduced, mask)
hsv = cv2.cvtColor(ex_reduced, cv2.COLOR_BGR2HSV)
lower_red = np.array([110,50,50])
upper_red = | np.array([130,255,255]) | numpy.array |
import visualization.panda.world as wd
import modeling.geometric_model as gm
import modeling.collision_model as cm
import grasping.planning.antipodal as gpa
import numpy as np
import basis.robot_math as rm
import robot_sim.robots.fr5.fr5 as fr5
import manipulation.pick_place_planner as ppp
import motion.probabilistic.rrt_connect as rrtc
import motion.optimization_based.incremental_nik as inik
import matplotlib.pyplot as plt
import snsplot
snsplot.set()
def spiral(start_angle, start_radius, linear_vel, radius_per_turn, max_radius, granularity=0.1):
r, phi = start_radius, start_angle
xpt, ypt = [], []
while r <= max_radius:
xpt.append(r * np.cos(phi))
ypt.append(r * | np.sin(phi) | numpy.sin |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
2020
@author: jmmauricio
"""
import numpy as np
from pydae.tools import get_v,get_i,get_s
import json
from collections import namedtuple
import numba
class grid(object):
def __init__(self,syst):
#def bokeh_tools(data):
self.syst = syst
self.s_radio_scale = 0.01
self.s_radio_max = 20
self.s_radio_min = 1
with np.load('matrices.npz') as data:
Y_primitive = data['Y_primitive']
A_conect = data['A_conect']
nodes_list = data['nodes_list']
node_sorter = data['node_sorter']
Y_vv = data['Y_vv']
Y_vi = data['Y_vi']
N_v = int(data['N_v'])
self.nodes_list = nodes_list
self.Y_primitive = Y_primitive
self.A_conect = A_conect
self.node_sorter = node_sorter
self.Y_vv = Y_vv
self.Y_vi = Y_vi
self.N_v = N_v
json_file = 'grid_data.json'
json_file = json_file
json_data = open(json_file).read().replace("'",'"')
data = json.loads(json_data)
self.buses = data['buses']
if 'transformers' in data:
self.transformers = data['transformers']
else:
self.transformers = []
self.lines = data['lines']
self.loads = data['loads']
if 'vscs' in data:
self.vscs = data['vscs']
else: self.vscs = []
def dae2vi(self):
'''
For obtaining line currents from node voltages after power flow is solved.
Returns
-------
None.
'''
n2a = {'1':'a','2':'b','3':'c','4':'n'}
a2n = {'a':1,'b':2,'c':3,'n':4}
V_node_list = []
I_node_list = [0.0]*len(self.nodes_list)
self.I_node_list = I_node_list
for item in self.nodes_list:
bus_name,phase_name = item.split('.')
#i = get_i(self.syst,bus_name,phase_name=n2a[phase_name],i_type='phasor',dq_name='ri')
#I_node_list += [i]
v = get_v(self.syst,bus_name,phase_name=n2a[phase_name],v_type='phasor',dq_name='ri')
V_node_list += [v]
V_node = np.array(V_node_list).reshape(len(V_node_list),1)
V_known = np.copy(V_node[:self.N_v])
V_unknown = np.copy(V_node[self.N_v:])
I_unknown = self.Y_vv @ V_known + self.Y_vi @ V_unknown
#self.I_node = I_node
self.V_node = V_node
self.I_unknown = I_unknown
self.I_known = np.array(I_node_list).reshape(len(I_node_list),1)
self.I_node = np.vstack((self.I_unknown,self.I_known))
for load in self.loads:
bus_name = load['bus']
if load['type'] == '3P+N':
for ph in ['a','b','c','n']:
idx = list(self.nodes_list).index(f"{load['bus']}.{a2n[ph]}")
i_ = get_i(self.syst,'load_' + bus_name,phase_name=ph,i_type='phasor',dq_name='ri')
self.I_node[idx] += i_
if load['type'] == '1P+N':
ph = load['bus_nodes'][0]
idx = list(self.nodes_list).index(f"{load['bus']}.{ph}")
i_ = get_i(self.syst,'load_' + bus_name,phase_name=n2a[str(ph)],i_type='phasor',dq_name='ri')
self.I_node[idx] += i_
ph = load['bus_nodes'][1]
idx = list(self.nodes_list).index(f"{load['bus']}.{ph}")
i_ = get_i(self.syst,'load_' + bus_name,phase_name=n2a[str(ph)],i_type='phasor',dq_name='ri')
self.I_node[idx] += i_
for vsc in self.vscs:
bus_name = vsc['bus_ac']
phases = ['a','b','c','n']
if vsc['type'] == 'ac3ph3wvdcq' or vsc['type'] == 'ac3ph3wpq':
phases = ['a','b','c']
for ph in phases:
idx = list(self.nodes_list).index(f"{vsc['bus_ac']}.{a2n[ph]}")
i_ = get_i(self.syst,'vsc_' + bus_name,phase_name=ph,i_type='phasor',dq_name='ri')
self.I_node[idx] += i_
if not vsc['type'] == 'ac3ph3wvdcq' or vsc['type'] == 'ac3ph3wpq':
bus_name = vsc['bus_dc']
for ph in ['a','n']:
idx = list(self.nodes_list).index(f"{vsc['bus_dc']}.{a2n[ph]}")
i_ = get_i(self.syst,'vsc_' + bus_name,phase_name=ph,i_type='phasor',dq_name='r')
self.I_node[idx] += i_
I_lines = self.Y_primitive @ self.A_conect.T @ self.V_node
self.I_lines = I_lines
def get_v(self):
'''
Compute phase-neutral and phase-phase voltages from power flow solution and put values
in buses dictionary.
'''
res = {}
V_sorted = []
I_sorted = []
S_sorted = []
start_node = 0
self.V_results = self.V_node
# self.I_results = self.I_node
V_sorted = self.V_node[self.node_sorter]
I_sorted = self.I_node[self.node_sorter]
nodes2string = ['v_an','v_bn','v_cn','v_gn']
for bus in self.buses:
N_nodes = bus['N_nodes']
# for node in range(5):
# bus_node = '{:s}.{:s}'.format(str(bus['bus']),str(node))
# if bus_node in self.nodes:
# V = self.V_results[self.nodes.index(bus_node)][0]
# V_sorted += [V]
# nodes_in_bus += [node]
# for node in range(5):
# bus_node = '{:s}.{:s}'.format(str(bus['bus']),str(node))
# if bus_node in self.nodes:
# I = self.I_results[self.nodes.index(bus_node)][0]
# I_sorted += [I]
if N_nodes==3: # if 3 phases
v_ag = V_sorted[start_node+0,0]
v_bg = V_sorted[start_node+1,0]
v_cg = V_sorted[start_node+2,0]
i_a = I_sorted[start_node+0,0]
i_b = I_sorted[start_node+1,0]
i_c = I_sorted[start_node+2,0]
s_a = (v_ag)*np.conj(i_a)
s_b = (v_bg)*np.conj(i_b)
s_c = (v_cg)*np.conj(i_c)
start_node += 3
bus.update({'v_an':np.abs(v_ag),
'v_bn':np.abs(v_bg),
'v_cn':np.abs(v_cg),
'v_ng':0.0})
bus.update({'deg_an':np.angle(v_ag, deg=True),
'deg_bn':np.angle(v_bg, deg=True),
'deg_cn':np.angle(v_cg, deg=True),
'deg_ng':np.angle(0, deg=True)})
bus.update({'v_ab':np.abs(v_ag-v_bg),
'v_bc':np.abs(v_bg-v_cg),
'v_ca':np.abs(v_cg-v_ag)})
bus.update({'p_a':s_a.real,
'p_b':s_b.real,
'p_c':s_c.real})
bus.update({'q_a':s_a.imag,
'q_b':s_b.imag,
'q_c':s_c.imag})
tup = namedtuple('tup',['v_ag', 'v_bg', 'v_cg'])
res.update({bus['bus']:tup(v_ag,v_bg,v_cg)})
if N_nodes==4: # if 3 phases + neutral
v_ag = V_sorted[start_node+0,0]
v_bg = V_sorted[start_node+1,0]
v_cg = V_sorted[start_node+2,0]
v_ng = V_sorted[start_node+3,0]
i_a = I_sorted[start_node+0,0]
i_b = I_sorted[start_node+1,0]
i_c = I_sorted[start_node+2,0]
i_n = I_sorted[start_node+3,0]
v_an = v_ag-v_ng
v_bn = v_bg-v_ng
v_cn = v_cg-v_ng
s_a = (v_an)*np.conj(i_a)
s_b = (v_bn)*np.conj(i_b)
s_c = (v_cn)*np.conj(i_c)
bus.update({'v_an':np.abs(v_an),
'v_bn':np.abs(v_bn),
'v_cn':np.abs(v_cn),
'v_ng':np.abs(v_ng)})
bus.update({'deg_an':np.angle(v_ag-v_ng, deg=True),
'deg_bn':np.angle(v_bg-v_ng, deg=True),
'deg_cn':np.angle(v_cg-v_ng, deg=True),
'deg_ng':np.angle(v_ng, deg=True)})
bus.update({'v_ab':np.abs(v_ag-v_bg),
'v_bc':np.abs(v_bg-v_cg),
'v_ca':np.abs(v_cg-v_ag)})
bus.update({'p_a':s_a.real,
'p_b':s_b.real,
'p_c':s_c.real})
bus.update({'q_a':s_a.imag,
'q_b':s_b.imag,
'q_c':s_c.imag})
start_node += 4
tup = namedtuple('tup',['v_ag', 'v_bg', 'v_cg', 'v_ng','v_an', 'v_bn', 'v_cn'])
res.update({bus['bus']:tup(v_ag,v_bg,v_cg,v_ng,v_an,v_bn,v_cn)})
self.V = np.array(V_sorted).reshape(len(V_sorted),1)
self.res = res
return 0 #self.V
def get_i(self):
'''
Compute line currents from power flow solution and put values
in transformers and lines dictionaries.
'''
I_lines =self.I_lines
it_single_line = 0
for trafo in self.transformers:
if 'conductors_j' in trafo:
cond_1 = trafo['conductors_j']
else:
cond_1 = trafo['conductors_1']
if 'conductors_k' in trafo:
cond_2 = trafo['conductors_k']
else:
cond_2 = trafo['conductors_2']
I_1a = (I_lines[it_single_line,0])
I_1b = (I_lines[it_single_line+1,0])
I_1c = (I_lines[it_single_line+2,0])
I_1n = (I_lines[it_single_line+3,0])
I_2a = (I_lines[it_single_line+cond_1+0,0])
I_2b = (I_lines[it_single_line+cond_1+1,0])
I_2c = (I_lines[it_single_line+cond_1+2,0])
if cond_1>3: I_1n = (I_lines[it_single_line+cond_1+3,0])
if cond_2>3: I_2n = (I_lines[it_single_line+cond_2+3,0])
#I_n = (I_lines[it_single_line+3,0])
if cond_1 <=3:
I_1n = I_1a+I_1b+I_1c
if cond_2 <=3:
I_2n = I_2a+I_2b+I_2c
it_single_line += cond_1 + cond_2
trafo.update({'i_1a_m':np.abs(I_1a)})
trafo.update({'i_1b_m':np.abs(I_1b)})
trafo.update({'i_1c_m':np.abs(I_1c)})
trafo.update({'i_1n_m':np.abs(I_1n)})
trafo.update({'i_2a_m':np.abs(I_2a)})
trafo.update({'i_2b_m':np.abs(I_2b)})
trafo.update({'i_2c_m':np.abs(I_2c)})
trafo.update({'i_2n_m':np.abs(I_2n)})
trafo.update({'deg_1a':np.angle(I_1a, deg=True)})
trafo.update({'deg_1b':np.angle(I_1b, deg=True)})
trafo.update({'deg_1c':np.angle(I_1c, deg=True)})
trafo.update({'deg_1n':np.angle(I_1n, deg=True)})
trafo.update({'deg_2a':np.angle(I_2a, deg=True)})
trafo.update({'deg_2b':np.angle(I_2b, deg=True)})
trafo.update({'deg_2c':np.angle(I_2c, deg=True)})
trafo.update({'deg_2n':np.angle(I_2n, deg=True)})
self.I_lines = I_lines
for line in self.lines:
if line['type'] == 'z':
N_conductors = len(line['bus_j_nodes'])
if N_conductors == 3:
I_a = (I_lines[it_single_line,0])
I_b = (I_lines[it_single_line+1,0])
I_c = (I_lines[it_single_line+2,0])
#I_n = (I_lines[it_single_line+3,0])
I_n = I_a+I_b+I_c
alpha = alpha = np.exp(2.0/3*np.pi*1j)
i_z = 1/3*(I_a+I_b+I_c)
i_p = 1.0/3.0*(I_a + I_b*alpha + I_c*alpha**2)
i_n = 1.0/3.0*(I_a + I_b*alpha**2 + I_c*alpha)
it_single_line += N_conductors
line.update({'i_j_a_m':np.abs(I_a)})
line.update({'i_j_b_m':np.abs(I_b)})
line.update({'i_j_c_m':np.abs(I_c)})
line.update({'i_j_n_m':np.abs(I_n)})
line.update({'deg_j_a':np.angle(I_a, deg=True)})
line.update({'deg_j_b':np.angle(I_b, deg=True)})
line.update({'deg_j_c':np.angle(I_c, deg=True)})
line.update({'deg_j_n':np.angle(I_n, deg=True)})
line.update({'i_k_a_m':np.abs(I_a)})
line.update({'i_k_b_m':np.abs(I_b)})
line.update({'i_k_c_m':np.abs(I_c)})
line.update({'i_k_n_m':np.abs(I_n)})
line.update({'deg_k_a':np.angle(I_a, deg=True)})
line.update({'deg_k_b':np.angle(I_b, deg=True)})
line.update({'deg_k_c':np.angle(I_c, deg=True)})
line.update({'deg_k_n':np.angle(I_n, deg=True)})
line.update({'i_z':np.abs(i_z)})
line.update({'i_p':np.abs(i_p)})
line.update({'i_n':np.abs(i_n)})
if N_conductors == 4:
I_a = (I_lines[it_single_line,0])
I_b = (I_lines[it_single_line+1,0])
I_c = (I_lines[it_single_line+2,0])
I_n = (I_lines[it_single_line+3,0])
it_single_line += N_conductors
line.update({'i_j_a_m':np.abs(I_a)})
line.update({'i_j_b_m':np.abs(I_b)})
line.update({'i_j_c_m':np.abs(I_c)})
line.update({'i_j_n_m':np.abs(I_n)})
line.update({'deg_j_a':np.angle(I_a, deg=True)})
line.update({'deg_j_b':np.angle(I_b, deg=True)})
line.update({'deg_j_c':np.angle(I_c, deg=True)})
line.update({'deg_j_n':np.angle(I_n, deg=True)})
line.update({'i_k_a_m':np.abs(I_a)})
line.update({'i_k_b_m':np.abs(I_b)})
line.update({'i_k_c_m':np.abs(I_c)})
line.update({'i_k_n_m':np.abs(I_n)})
line.update({'deg_k_a':np.angle(I_a, deg=True)})
line.update({'deg_k_b':np.angle(I_b, deg=True)})
line.update({'deg_k_c':np.angle(I_c, deg=True)})
line.update({'deg_k_n':np.angle(I_n, deg=True)})
if line['type'] == 'pi':
N_conductors = len(line['bus_j_nodes'])
if N_conductors == 3:
I_j_a = I_lines[it_single_line+0,0]+I_lines[it_single_line+3,0]
I_j_b = I_lines[it_single_line+1,0]+I_lines[it_single_line+4,0]
I_j_c = I_lines[it_single_line+2,0]+I_lines[it_single_line+5,0]
I_k_a = I_lines[it_single_line+0,0]-I_lines[it_single_line+6,0]
I_k_b = I_lines[it_single_line+1,0]-I_lines[it_single_line+7,0]
I_k_c = I_lines[it_single_line+2,0]-I_lines[it_single_line+8,0]
#I_n = (I_lines[it_single_line+3,0])
I_j_n = I_j_a+I_j_b+I_j_c
I_k_n = I_k_a+I_k_b+I_k_c
alpha = alpha = np.exp(2.0/3*np.pi*1j)
i_z = 1/3*(I_j_a+I_j_b+I_j_c)
i_p = 1.0/3.0*(I_j_a + I_j_b*alpha + I_j_c*alpha**2)
i_n = 1.0/3.0*(I_j_a + I_j_b*alpha**2 + I_j_c*alpha)
it_single_line += N_conductors*3
line.update({'i_j_a_m':np.abs(I_j_a)})
line.update({'i_j_b_m':np.abs(I_j_b)})
line.update({'i_j_c_m':np.abs(I_j_c)})
line.update({'i_j_n_m':np.abs(I_j_n)})
line.update({'deg_j_a':np.angle(I_j_a, deg=True)})
line.update({'deg_j_b':np.angle(I_j_b, deg=True)})
line.update({'deg_j_c':np.angle(I_j_c, deg=True)})
line.update({'deg_j_n':np.angle(I_j_n, deg=True)})
line.update({'i_k_a_m':np.abs(I_k_a)})
line.update({'i_k_b_m':np.abs(I_k_b)})
line.update({'i_k_c_m':np.abs(I_k_c)})
line.update({'i_k_n_m':np.abs(I_k_n)})
line.update({'deg_k_a':np.angle(I_k_a, deg=True)})
line.update({'deg_k_b':np.angle(I_k_b, deg=True)})
line.update({'deg_k_c':np.angle(I_k_c, deg=True)})
line.update({'deg_k_n':np.angle(I_k_n, deg=True)})
line.update({'i_z':np.abs(i_z)})
line.update({'i_p':np.abs(i_p)})
line.update({'i_n':np.abs(i_n)})
if N_conductors == 4:
I_j_a = I_lines[it_single_line+0,0]+I_lines[it_single_line+3,0]
I_j_b = I_lines[it_single_line+1,0]+I_lines[it_single_line+4,0]
I_j_c = I_lines[it_single_line+2,0]+I_lines[it_single_line+5,0]
I_k_a = I_lines[it_single_line+0,0]-I_lines[it_single_line+6,0]
I_k_b = I_lines[it_single_line+1,0]-I_lines[it_single_line+7,0]
I_k_c = I_lines[it_single_line+2,0]-I_lines[it_single_line+8,0]
I_j_n = I_lines[it_single_line+3,0]
I_k_n = I_lines[it_single_line+3,0]
#I_n = (I_lines[it_single_line+3,0])
I_j_n = I_j_a+I_j_b+I_j_c
I_k_n = I_k_a+I_k_b+I_k_c
alpha = alpha = np.exp(2.0/3*np.pi*1j)
i_z = 1/3*(I_j_a+I_j_b+I_j_c)
i_p = 1.0/3.0*(I_j_a + I_j_b*alpha + I_j_c*alpha**2)
i_n = 1.0/3.0*(I_j_a + I_j_b*alpha**2 + I_j_c*alpha)
it_single_line += N_conductors*3
line.update({'i_j_a_m':np.abs(I_j_a)})
line.update({'i_j_b_m':np.abs(I_j_b)})
line.update({'i_j_c_m':np.abs(I_j_c)})
line.update({'i_j_n_m':np.abs(I_j_n)})
line.update({'deg_j_a':np.angle(I_j_a, deg=True)})
line.update({'deg_j_b':np.angle(I_j_b, deg=True)})
line.update({'deg_j_c':np.angle(I_j_c, deg=True)})
line.update({'deg_j_n':np.angle(I_j_n, deg=True)})
line.update({'i_k_a_m':np.abs(I_k_a)})
line.update({'i_k_b_m':np.abs(I_k_b)})
line.update({'i_k_c_m': | np.abs(I_k_c) | numpy.abs |
from skimage import filters, io
import numpy as np
import cv2
from scipy import signal
from matplotlib import pyplot as plt
def get_ROI(raw_image):
"""
Get the ROI of the input raw image.
:param raw_image: grayscale image.
:return: grayscale image.
"""
float_image = np.float32(raw_image)
shape_height, shape_width = raw_image.shape
otsu_threshold = filters.threshold_otsu(float_image)
otsu_mask = float_image < otsu_threshold
int_mask = np.ones_like(float_image) * otsu_mask
kernel = | np.ones((5, 5), np.int64) | numpy.ones |
from typing import Tuple
import numpy as np
import torch
from prettytable import PrettyTable
from sklearn.metrics import roc_auc_score
from torch.utils.data import DataLoader
from tqdm import tqdm
from datasets.base import VideoAnomalyDetectionDataset
from models.base import BaseModule
from models.loss_functions import LSALoss
from utils import normalize
from utils import novelty_score
class ResultsAccumulator:
"""
Accumulates results in a buffer for a sliding window
results computation. Employed to get frame-level scores
from clip-level scores.
` In order to recover the anomaly score of each
frame, we compute the mean score of all clips in which it
appears`
"""
def __init__(self, time_steps):
# type: (int) -> None
"""
Class constructor.
:param time_steps: the number of frames each clip holds.
"""
# This buffers rotate.
self._buffer = np.zeros(shape=(time_steps,), dtype=np.float32)
self._counts = np.zeros(shape=(time_steps,))
def push(self, score):
# type: (float) -> None
"""
Pushes the score of a clip into the buffer.
:param score: the score of a clip
"""
# Update buffer and counts
self._buffer += score
self._counts += 1
def get_next(self):
# type: () -> float
"""
Gets the next frame (the first in the buffer) score,
computed as the mean of the clips in which it appeared,
and rolls the buffers.
:return: the averaged score of the frame exiting the buffer.
"""
# Return first in buffer
ret = self._buffer[0] / self._counts[0]
# Roll time backwards
self._buffer = np.roll(self._buffer, shift=-1)
self._counts = | np.roll(self._counts, shift=-1) | numpy.roll |
"""Testing module for the `Satellite` class."""
from celest.encounter.groundposition import GroundPosition
from celest.encounter._window_handling import Window, Windows
from celest.satellite.coordinate import Coordinate
from celest.satellite.satellite import Satellite
from celest.satellite.time import Time
from unittest import TestCase
import numpy as np
import unittest
class TestSatellite(TestCase):
def setUp(self):
"""Test fixure for test method execution."""
fname = "tests/test_data/coordinate_validation_set.txt"
data = | np.loadtxt(fname=fname, delimiter="\t", skiprows=1) | numpy.loadtxt |
# Copyright 2017 the pycolab Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Get to the top to win, but watch out for the fiery shockwaves of death.
Command-line usage: `shockwave.py <level>`, where `<level>` is an optional
integer argument that is either -1 (selecting a randomly-generated map) or
0 (selecting the map hard-coded in this module).
Tip: Try hiding in the blue bunkers.
Keys: up, left, right.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import curses
import sys
import numpy as np
from scipy import ndimage
from gym_pycolab import ascii_art
from gym_pycolab import human_ui
from gym_pycolab import things as plab_things
from gym_pycolab.prefab_parts import sprites as prefab_sprites
# Just one level for now.
LEVELS = [
['^^^^^^^^^^^^^^^',
' ',
' + +',
' == ++ == +',
' +',
'======= +',
' + +',
' + ++ ',
'+ == ',
'+ + ',
' = ',
' +++ P ++ '],
]
COLOURS = {'+': (0, 0, 999), # Blue background. Safe from fire here.
'P': (0, 999, 0), # The green player.
' ': (500, 500, 500), # Exposed areas where the player might die.
'^': (700, 700, 700), # Permanent safe zone.
'=': (999, 600, 200), # Impassable wall.
'@': (999, 0, 0)} # The fiery shockwave.
def random_level(height=12, width=12, safety_density=0.15):
"""Returns a random level."""
level = np.full((height, width), ' ', dtype='|S1')
# Add some safe areas.
level[np.random.random_sample(level.shape) < safety_density] = '+'
# Place walls on random, but not consecutive, rows. Also not on the top or
# bottom rows.
valid_rows = set(range(1, height))
while valid_rows:
row = np.random.choice(list(valid_rows))
n_walls = np.random.randint(2, width - 1 - 2)
mask = | np.zeros((width,), dtype=np.bool) | numpy.zeros |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
'''Analysis file.'''
import sys
import os.path
import tensorflow as tf
from absl import app
from absl import flags
from absl import gfile
import cPickle as pickle
import matplotlib
matplotlib.use('TkAgg')
from matplotlib import pylab
import matplotlib.pyplot as plt
import numpy as np, h5py
import scipy.io as sio
from scipy import ndimage
import random
import re # regular expression matching
FLAGS = flags.FLAGS
flags.DEFINE_string('folder_name', 'experiment4', 'folder where to store all the data')
flags.DEFINE_string('save_location',
'/home/bhaishahster/',
'where to store logs and outputs?');
flags.DEFINE_string('data_location',
'/home/bhaishahster/data_breakdown/',
'where to take data from?')
flags.DEFINE_integer('n_b_in_c', 10, 'number of batches in one chunk of data')
flags.DEFINE_integer('np_randseed', 23, 'numpy RNG seed')
flags.DEFINE_integer('randseed', 65, 'python RNG seed')
flags.DEFINE_integer('ratio_SU', 2, 'ratio of subunits/cells')
flags.DEFINE_string('model_id', 'poisson', 'which model to fit')
FLAGS = flags.FLAGS
def main(argv):
print('\nCode started')
np.random.seed(FLAGS.np_randseed)
random.seed(FLAGS.randseed)
## Load data summary
filename = FLAGS.data_location + 'data_details.mat'
summary_file = gfile.Open(filename, 'r')
data_summary = sio.loadmat(summary_file)
cells = np.squeeze(data_summary['cells'])
if FLAGS.model_id == 'poisson' or FLAGS.model_id == 'logistic' or FLAGS.model_id == 'hinge':
cells_choose = (cells ==3287) | (cells ==3318 ) | (cells ==3155) | (cells ==3066)
if FLAGS.model_id == 'poisson_full':
cells_choose = np.array(np.ones(np.shape(cells)), dtype='bool')
n_cells = np.sum(cells_choose)
tot_spks = np.squeeze(data_summary['tot_spks'])
total_mask = np.squeeze(data_summary['totalMaskAccept_log']).T
tot_spks_chosen_cells = tot_spks[cells_choose]
chosen_mask = np.array(np.sum(total_mask[cells_choose,:],0)>0, dtype='bool')
print(np.shape(chosen_mask))
print(np.sum(chosen_mask))
stim_dim = np.sum(chosen_mask)
print('\ndataset summary loaded')
# use stim_dim, chosen_mask, cells_choose, tot_spks_chosen_cells, n_cells
# decide the number of subunits to fit
n_su = FLAGS.ratio_SU*n_cells
#batchsz = [100, 500, 1000, 100, 500, 1000, 100, 500, 1000, 1000, 1000, 5000, 10000, 5000, 10000]
#n_b_in_c = [10, 2, 1, 10, 2, 1, 10, 2, 1, 1, 1, 1, 1, 1, 1 ]
#step_sz = [0.0001, 0.0001, 0.0001, 0.01, 0.01, 0.01 , 1, 1, 1, 10, 100, 10, 10, 1, 1 ]
batchsz = [100, 500, 1000, 5000, 1000, 100, 500, 1000, 5000, 10000, 100, 500, 1000, 5000, 10000, 100, 500, 1000, 5000, 10000]
n_b_in_c = [10, 2, 1, 1, 1, 10, 2, 1, 1, 1, 10, 2, 1, 1, 1, 10, 2, 1, 1, 1 ]
step_sz = [0.1, 0.1, 0.1, 0.1, 0.1, 1 , 1, 1, 1, 1, 5, 5, 5, 5, 5, 10, 10, 10, 10, 10 ]
with tf.Session() as sess:
# Learn population model!
stim = tf.placeholder(tf.float32, shape=[None, stim_dim], name='stim')
resp = tf.placeholder(tf.float32, name='resp')
data_len = tf.placeholder(tf.float32, name='data_len')
# get filename
if FLAGS.model_id == 'poisson' or FLAGS.model_id == 'poisson_full':
w = tf.Variable(np.array(0.01 * np.random.randn(stim_dim, n_su), dtype='float32'))
a = tf.Variable(np.array(0.1 * np.random.rand(n_cells, 1, n_su), dtype='float32'))
if FLAGS.model_id == 'logistic' or FLAGS.model_id == 'hinge':
w = tf.Variable(np.array(0.01 * np.random.randn(stim_dim, n_su), dtype='float32'))
a = tf.Variable(np.array(0.01 * np.random.rand(n_su, n_cells), dtype='float32'))
b_init = np.random.randn(n_cells) #np.log((np.sum(response,0))/(response.shape[0]-np.sum(response,0)))
b = tf.Variable(b_init,dtype='float32')
plt.figure()
for icnt, ibatchsz in enumerate(batchsz):
in_b_in_c = n_b_in_c[icnt]
istep_sz = np.array(step_sz[icnt],dtype='double')
print(icnt)
if FLAGS.model_id == 'poisson':
short_filename = ('data_model=ASM_pop_batch_sz='+ str(ibatchsz) + '_n_b_in_c' + str(in_b_in_c) +
'_step_sz'+ str(istep_sz)+'_bg')
else:
short_filename = ('data_model='+ str(FLAGS.model_id) +'_batch_sz='+ str(ibatchsz) + '_n_b_in_c' + str(in_b_in_c) +
'_step_sz'+ str(istep_sz)+'_bg')
parent_folder = FLAGS.save_location + FLAGS.folder_name + '/'
save_location = parent_folder +short_filename + '/'
print(gfile.IsDirectory(save_location))
print(save_location)
save_filename = save_location + short_filename
#determine filelist
file_list = gfile.ListDirectory(save_location)
save_filename = save_location + short_filename
print('\nLoading: ', save_filename)
bin_files = []
meta_files = []
for file_n in file_list:
if re.search(short_filename + '.', file_n):
if re.search('.meta', file_n):
meta_files += [file_n]
else:
bin_files += [file_n]
#print(bin_files)
print(len(meta_files), len(bin_files), len(file_list))
# get latest iteration
iterations = np.array([])
for file_name in bin_files:
try:
iterations = np.append(iterations, int(file_name.split('/')[-1].split('-')[-1]))
except:
print('Could not load filename: ' + file_name)
iterations.sort()
print(iterations)
iter_plot = iterations[-1]
print(int(iter_plot))
# load tensorflow variables
saver_var = tf.train.Saver(tf.all_variables())
restore_file = save_filename + '-' + str(int(iter_plot))
saver_var.restore(sess, restore_file)
a_eval = a.eval()
print(np.exp(np.squeeze(a_eval)))
#print(np.shape(a_eval))
# get 2D region to plot
mask2D = | np.reshape(chosen_mask, [40, 80]) | numpy.reshape |
import numpy as np
# Photon history bits (see photon.h for source)
NO_HIT = 0x1 << 0
BULK_ABSORB = 0x1 << 1
SURFACE_DETECT = 0x1 << 2
SURFACE_ABSORB = 0x1 << 3
RAYLEIGH_SCATTER = 0x1 << 4
REFLECT_DIFFUSE = 0x1 << 5
REFLECT_SPECULAR = 0x1 << 6
SURFACE_REEMIT = 0x1 << 7
SURFACE_TRANSMIT = 0x1 << 8
BULK_REEMIT = 0x1 << 9
CHERENKOV = 0x1 << 10
SCINTILLATION = 0x1 << 11
NAN_ABORT = 0x1 << 31
class Steps(object):
def __init__(self,x,y,z,t,dx,dy,dz,ke,edep,qedep):
self.x = x
self.y = y
self.z = z
self.t = t
self.dx = dx
self.dy = dy
self.dz = dz
self.ke = ke
self.edep = edep
self.qedep = qedep
class Vertex(object):
def __init__(self, particle_name, pos, dir, ke, t0=0.0, pol=None, steps=None, children=None, trackid=-1, pdgcode=-1):
'''Create a particle vertex.
particle_name: string
Name of particle, following the GEANT4 convention.
Examples: e-, e+, gamma, mu-, mu+, pi0
pos: array-like object, length 3
Position of particle vertex (mm)
dir: array-like object, length 3
Normalized direction vector
ke: float
Kinetic energy (MeV)
t0: float
Initial time of particle (ns)
pol: array-like object, length 3
Normalized polarization vector. By default, set to None,
and the particle is treated as having a random polarization.
'''
self.particle_name = particle_name
self.pos = pos
self.dir = dir
self.pol = pol
self.ke = ke
self.t0 = t0
self.steps = steps
self.children = children
self.trackid = trackid
self.pdgcode = pdgcode
def __str__(self):
return 'Vertex('+self.particle_name+',ke='+str(self.ke)+',steps='+str(True if self.steps else False)+')'
__repr__ = __str__
class Photons(object):
def __init__(self, pos=np.empty((0,3)), dir=np.empty((0,3)), pol=np.empty((0,3)), wavelengths=np.empty((0)), t=None, last_hit_triangles=None, flags=None, weights=None, evidx=None, channel=None):
'''Create a new list of n photons.
pos: numpy.ndarray(dtype=numpy.float32, shape=(n,3))
Position 3-vectors (mm)
dir: numpy.ndarray(dtype=numpy.float32, shape=(n,3))
Direction 3-vectors (normalized)
pol: numpy.ndarray(dtype=numpy.float32, shape=(n,3))
Polarization direction 3-vectors (normalized)
wavelengths: numpy.ndarray(dtype=numpy.float32, shape=n)
Photon wavelengths (nm)
t: numpy.ndarray(dtype=numpy.float32, shape=n)
Photon times (ns)
last_hit_triangles: numpy.ndarray(dtype=numpy.int32, shape=n)
ID number of last intersected triangle. -1 if no triangle hit in last step
If set to None, a default array filled with -1 is created
flags: numpy.ndarray(dtype=numpy.uint32, shape=n)
Bit-field indicating the physics interaction history of the photon. See
history bit constants in chroma.event for definition.
weights: numpy.ndarray(dtype=numpy.float32, shape=n)
Survival probability for each photon. Used by
photon propagation code when computing likelihood functions.
evidx: numpy.ndarray(dtype=numpy.uint32, shape=n)
Index of the event in a GPU batch
'''
self.pos = np.asarray(pos, dtype=np.float32)
self.dir = np.asarray(dir, dtype=np.float32)
self.pol = np.asarray(pol, dtype=np.float32)
self.wavelengths = np.asarray(wavelengths, dtype=np.float32)
if t is None:
self.t = np.zeros(len(pos), dtype=np.float32)
else:
self.t = np.asarray(t, dtype=np.float32)
if last_hit_triangles is None:
self.last_hit_triangles = np.empty(len(pos), dtype=np.int32)
self.last_hit_triangles.fill(-1)
else:
self.last_hit_triangles = np.asarray(last_hit_triangles,
dtype=np.int32)
if flags is None:
self.flags = np.zeros(len(pos), dtype=np.uint32)
else:
self.flags = np.asarray(flags, dtype=np.uint32)
if weights is None:
self.weights = np.ones(len(pos), dtype=np.float32)
else:
self.weights = np.asarray(weights, dtype=np.float32)
if evidx is None:
self.evidx = np.zeros(len(pos), dtype=np.uint32)
else:
self.evidx = np.asarray(evidx, dtype=np.uint32)
if channel is None:
self.channel = np.zeros(len(pos), dtype=np.uint32)
else:
self.channel = np.asarray(channel, dtype=np.uint32)
def join(photon_list,concatenate=True):
'''Concatenates many photon objects together efficiently'''
if concatenate: #internally lists
pos = np.concatenate([p.pos for p in photon_list])
dir = np.concatenate([p.dir for p in photon_list])
pol = np.concatenate([p.pol for p in photon_list])
wavelengths = np.concatenate([p.wavelengths for p in photon_list])
t = np.concatenate([p.t for p in photon_list])
last_hit_triangles = np.concatenate([p.last_hit_triangles for p in photon_list])
flags = np.concatenate([p.flags for p in photon_list])
weights = np.concatenate([p.weights for p in photon_list])
evidx = np.concatenate([p.evidx for p in photon_list])
channel = | np.concatenate([p.channel for p in photon_list]) | numpy.concatenate |
"""
Created on Thu Jan 26 17:04:11 2017
@author: <NAME>, <EMAIL>
"""
#%matplotlib inline
import numpy as np
import pandas as pd
import dicom
import os
import scipy.ndimage as ndimage
import matplotlib.pyplot as plt
import scipy.ndimage # added for scaling
import cv2
import time
import glob
from skimage import measure, morphology, segmentation
import SimpleITK as sitk
RESIZE_SPACING = [2,2,2] # z, y, x (x & y MUST be the same)
RESOLUTION_STR = "2x2x2"
img_rows = 448
img_cols = 448 # global values
DO_NOT_USE_SEGMENTED = True
#STAGE = "stage1"
STAGE_DIR_BASE = "../input/%s/" # on one cluster we had input_shared
LUNA_MASKS_DIR = "../luna/data/original_lung_masks/"
luna_subset = 0 # initial
LUNA_BASE_DIR = "../luna/data/original_lungs/subset%s/" # added on AWS; data as well
LUNA_DIR = LUNA_BASE_DIR % luna_subset
CSVFILES = "../luna/data/original_lungs/CSVFILES/%s"
LUNA_ANNOTATIONS = CSVFILES % "annotations.csv"
LUNA_CANDIDATES = CSVFILES % "candidates.csv"
# Load the scans in given folder path (loads the most recent acquisition)
def load_scan(path):
slices = [dicom.read_file(path + '/' + s) for s in os.listdir(path)]
#slices.sort(key = lambda x: int(x.InstanceNumber))
acquisitions = [x.AcquisitionNumber for x in slices]
vals, counts = np.unique(acquisitions, return_counts=True)
vals = vals[::-1] # reverse order so the later acquisitions are first (the np.uniques seems to always return the ordered 1 2 etc.
counts = counts[::-1]
## take the acquistions that has more entries; if these are identical take the later entrye
acq_val_sel = vals[np.argmax(counts)]
##acquisitions = sorted(np.unique(acquisitions), reverse=True)
if len(vals) > 1:
print ("WARNING ##########: MULTIPLE acquisitions & counts, acq_val_sel, path: ", vals, counts, acq_val_sel, path)
slices2= [x for x in slices if x.AcquisitionNumber == acq_val_sel]
slices = slices2
## ONE path includes 2 acquisitions (2 sets), take the latter acquiisiton only whihch cyupically is better than the first/previous ones.
## example of the '../input/stage1/b8bb02d229361a623a4dc57aa0e5c485'
#slices.sort(key = lambda x: int(x.ImagePositionPatient[2])) # from v 8, BUG should be float
slices.sort(key = lambda x: float(x.ImagePositionPatient[2])) # from v 9
try:
slice_thickness = np.abs(slices[0].ImagePositionPatient[2] - slices[1].ImagePositionPatient[2])
except:
slice_thickness = np.abs(slices[0].SliceLocation - slices[1].SliceLocation)
for s in slices:
s.SliceThickness = slice_thickness
return slices
def get_3d_data_slices(slices): # get data in Hunsfield Units
slices.sort(key = lambda x: float(x.ImagePositionPatient[2])) # from v 9
image = np.stack([s.pixel_array for s in slices])
image = image.astype(np.int16) # ensure int16 (it may be here uint16 for some images )
image[image == -2000] = 0 #correcting cyindrical bound entrioes to 0
# Convert to Hounsfield units (HU)
# The intercept is usually -1024
for slice_number in range(len(slices)): # from v 8
intercept = slices[slice_number].RescaleIntercept
slope = slices[slice_number].RescaleSlope
if slope != 1: # added 16 Jan 2016, evening
image[slice_number] = slope * image[slice_number].astype(np.float64)
image[slice_number] = image[slice_number].astype(np.int16)
image[slice_number] += np.int16(intercept)
return np.array(image, dtype=np.int16)
def get_pixels_hu(slices):
image = np.stack([s.pixel_array for s in slices])
image = image.astype(np.int16)
# Set outside-of-scan pixels to 0
# The intercept is usually -1024, so air is approximately 0
image[image == -2000] = 0
# Convert to Hounsfield units (HU)
### slope can differ per slice -- so do it individually (case in point black_tset, slices 95 vs 96)
### Changes/correction - 31.01.2017
for slice_number in range(len(slices)):
intercept = slices[slice_number].RescaleIntercept
slope = slices[slice_number].RescaleSlope
if slope != 1:
image[slice_number] = slope * image[slice_number].astype(np.float64)
image[slice_number] = image[slice_number].astype(np.int16)
image[slice_number] += np.int16(intercept)
return np.array(image, dtype=np.int16)
MARKER_INTERNAL_THRESH = -400
MARKER_FRAME_WIDTH = 9 # 9 seems OK for the half special case ...
def generate_markers(image):
#Creation of the internal Marker
useTestPlot = False
if useTestPlot:
timg = image
plt.imshow(timg, cmap='gray')
plt.show()
add_frame_vertical = True
if add_frame_vertical: # add frame for potentially closing the lungs that touch the edge, but only vertically
fw = MARKER_FRAME_WIDTH # frame width (it looks that 2 is the minimum width for the algorithms implemented here, namely the first 2 operations for the marker_internal)
xdim = image.shape[1]
#ydim = image.shape[0]
img2 = np.copy(image)
#y3 = ydim // 3
img2 [:, 0] = -1024
img2 [:, 1:fw] = 0
img2 [:, xdim-1:xdim] = -1024
img2 [:, xdim-fw:xdim-1] = 0
marker_internal = img2 < MARKER_INTERNAL_THRESH
else:
marker_internal = image < MARKER_INTERNAL_THRESH # was -400
useTestPlot = False
if useTestPlot:
timg = marker_internal
plt.imshow(timg, cmap='gray')
plt.show()
correct_edges2 = False ## NOT a good idea - no added value
if correct_edges2:
marker_internal[0,:] = 0
marker_internal[:,0] = 0
#marker_internal[:,1] = True
#marker_internal[:,2] = True
marker_internal[511,:] = 0
marker_internal[:,511] = 0
marker_internal = segmentation.clear_border(marker_internal, buffer_size=0)
marker_internal_labels = measure.label(marker_internal)
areas = [r.area for r in measure.regionprops(marker_internal_labels)]
areas.sort()
if len(areas) > 2:
for region in measure.regionprops(marker_internal_labels):
if region.area < areas[-2]:
for coordinates in region.coords:
marker_internal_labels[coordinates[0], coordinates[1]] = 0
marker_internal = marker_internal_labels > 0
#Creation of the external Marker
external_a = ndimage.binary_dilation(marker_internal, iterations=10) # was 10
external_b = ndimage.binary_dilation(marker_internal, iterations=55) # was 55
marker_external = external_b ^ external_a
#Creation of the Watershed Marker matrix
#marker_watershed = np.zeros((512, 512), dtype=np.int) # origi
marker_watershed = np.zeros((marker_external.shape), dtype=np.int)
marker_watershed += marker_internal * 255
marker_watershed += marker_external * 128
return marker_internal, marker_external, marker_watershed
# Some of the starting Code is taken from ArnavJain, since it's more readable then my own
def generate_markers_3d(image):
#Creation of the internal Marker
marker_internal = image < -400
marker_internal_labels = np.zeros(image.shape).astype(np.int16)
for i in range(marker_internal.shape[0]):
marker_internal[i] = segmentation.clear_border(marker_internal[i])
marker_internal_labels[i] = measure.label(marker_internal[i])
#areas = [r.area for r in measure.regionprops(marker_internal_labels)]
areas = [r.area for i in range(marker_internal.shape[0]) for r in measure.regionprops(marker_internal_labels[i])]
for i in range(marker_internal.shape[0]):
areas = [r.area for r in measure.regionprops(marker_internal_labels[i])]
areas.sort()
if len(areas) > 2:
for region in measure.regionprops(marker_internal_labels[i]):
if region.area < areas[-2]:
for coordinates in region.coords:
marker_internal_labels[i, coordinates[0], coordinates[1]] = 0
marker_internal = marker_internal_labels > 0
#Creation of the external Marker
# 3x3 structuring element with connectivity 1, used by default
struct1 = ndimage.generate_binary_structure(2, 1)
struct1 = struct1[np.newaxis,:,:] # expand by z axis .
external_a = ndimage.binary_dilation(marker_internal, structure=struct1, iterations=10)
external_b = ndimage.binary_dilation(marker_internal, structure=struct1, iterations=55)
marker_external = external_b ^ external_a
#Creation of the Watershed Marker matrix
#marker_watershed = np.zeros((512, 512), dtype=np.int) # origi
marker_watershed = np.zeros((marker_external.shape), dtype=np.int)
marker_watershed += marker_internal * 255
marker_watershed += marker_external * 128
return marker_internal, marker_external, marker_watershed
BINARY_CLOSING_SIZE = 7 #was 7 before final; 5 for disk seems sufficient - for safety let's go with 6 or even 7
def seperate_lungs(image):
#Creation of the markers as shown above:
marker_internal, marker_external, marker_watershed = generate_markers(image)
#Creation of the Sobel-Gradient
sobel_filtered_dx = ndimage.sobel(image, 1)
sobel_filtered_dy = ndimage.sobel(image, 0)
sobel_gradient = np.hypot(sobel_filtered_dx, sobel_filtered_dy)
sobel_gradient *= 255.0 / np.max(sobel_gradient)
#Watershed algorithm
watershed = morphology.watershed(sobel_gradient, marker_watershed)
#Reducing the image created by the Watershed algorithm to its outline
outline = ndimage.morphological_gradient(watershed, size=(3,3))
outline = outline.astype(bool)
#Performing Black-Tophat Morphology for reinclusion
#Creation of the disk-kernel and increasing its size a bit
blackhat_struct = [[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0]]
blackhat_struct = ndimage.iterate_structure(blackhat_struct, 8)
#Perform the Black-Hat
outline += ndimage.black_tophat(outline, structure=blackhat_struct)
#Use the internal marker and the Outline that was just created to generate the lungfilter
lungfilter = np.bitwise_or(marker_internal, outline)
#Close holes in the lungfilter
#fill_holes is not used here, since in some slices the heart would be reincluded by accident
##structure = np.ones((BINARY_CLOSING_SIZE,BINARY_CLOSING_SIZE)) # 5 is not enough, 7 is
structure = morphology.disk(BINARY_CLOSING_SIZE) # better , 5 seems sufficient, we use 7 for safety/just in case
lungfilter = ndimage.morphology.binary_closing(lungfilter, structure=structure, iterations=3) #, iterations=3) # was structure=np.ones((5,5))
### NOTE if no iterattions, i.e. default 1 we get holes within lungs for the disk(5) and perhaps more
#Apply the lungfilter (note the filtered areas being assigned -2000 HU)
segmented = np.where(lungfilter == 1, image, -2000*np.ones((512, 512))) ### was -2000
return segmented, lungfilter, outline, watershed, sobel_gradient, marker_internal, marker_external, marker_watershed
def rescale_n(n,reduce_factor):
return max( 1, int(round(n / reduce_factor)))
def seperate_lungs_cv2(image): # for increased speed
#Creation of the markers as shown above:
marker_internal, marker_external, marker_watershed = generate_markers(image)
#image_size = image.shape[0]
reduce_factor = 512 / image.shape[0]
#Creation of the Sobel-Gradient
sobel_filtered_dx = ndimage.sobel(image, 1)
sobel_filtered_dy = ndimage.sobel(image, 0)
sobel_gradient = np.hypot(sobel_filtered_dx, sobel_filtered_dy)
sobel_gradient *= 255.0 / np.max(sobel_gradient)
useTestPlot = False
if useTestPlot:
timg = sobel_gradient
plt.imshow(timg, cmap='gray')
plt.show()
#Watershed algorithm
watershed = morphology.watershed(sobel_gradient, marker_watershed)
if useTestPlot:
timg = marker_external
plt.imshow(timg, cmap='gray')
plt.show()
#Reducing the image created by the Watershed algorithm to its outline
#wsize = rescale_n(3,reduce_factor) # THIS IS TOO SMALL, dynamically adjusting the size for the watersehed algorithm
outline = ndimage.morphological_gradient(watershed, size=(3,3)) # original (3,3), (wsize, wsize) is too small to create an outline
outline = outline.astype(bool)
outline_u = outline.astype(np.uint8) #added
#Performing Black-Tophat Morphology for reinclusion
#Creation of the disk-kernel and increasing its size a bit
blackhat_struct = [[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0]]
use_reduce_factor = True
if use_reduce_factor:
blackhat_struct = ndimage.iterate_structure(blackhat_struct, rescale_n(8,reduce_factor)) # dyanmically adjust the number of iterattions; original was 8
else:
blackhat_struct = ndimage.iterate_structure(blackhat_struct, 8)
blackhat_struct_cv2 = blackhat_struct.astype(np.uint8)
#Perform the Black-Hat
#outline += ndimage.black_tophat(outline, structure=blackhat_struct) # original slow
#outline1 = outline + (cv2.morphologyEx(outline_u, cv2.MORPH_BLACKHAT, kernel=blackhat_struct_cv2)).astype(np.bool)
#outline2 = outline + ndimage.black_tophat(outline, structure=blackhat_struct)
#np.array_equal(outline1,outline2) # True
outline += (cv2.morphologyEx(outline_u, cv2.MORPH_BLACKHAT, kernel=blackhat_struct_cv2)).astype(np.bool) # fats
if useTestPlot:
timg = outline
plt.imshow(timg, cmap='gray')
plt.show()
#Use the internal marker and the Outline that was just created to generate the lungfilter
lungfilter = np.bitwise_or(marker_internal, outline)
if useTestPlot:
timg = lungfilter
plt.imshow(timg, cmap='gray')
plt.show()
#Close holes in the lungfilter
#fill_holes is not used here, since in some slices the heart would be reincluded by accident
##structure = np.ones((BINARY_CLOSING_SIZE,BINARY_CLOSING_SIZE)) # 5 is not enough, 7 is
structure2 = morphology.disk(2) # used to fill the gaos/holes close to the border (otherwise the large sttructure would create a gap by the edge)
if use_reduce_factor:
structure3 = morphology.disk(rescale_n(BINARY_CLOSING_SIZE,reduce_factor)) # dynanically adjust; better , 5 seems sufficient, we use 7 for safety/just in case
else:
structure3 = morphology.disk(BINARY_CLOSING_SIZE) # dynanically adjust; better , 5 seems sufficient, we use 7 for safety/just in case
##lungfilter = ndimage.morphology.binary_closing(lungfilter, structure=structure, iterations=3) #, ORIGINAL iterations=3) # was structure=np.ones((5,5))
lungfilter2 = ndimage.morphology.binary_closing(lungfilter, structure=structure2, iterations=3) # ADDED
lungfilter3 = ndimage.morphology.binary_closing(lungfilter, structure=structure3, iterations=3)
lungfilter = np.bitwise_or(lungfilter2, lungfilter3)
### NOTE if no iterattions, i.e. default 1 we get holes within lungs for the disk(5) and perhaps more
#Apply the lungfilter (note the filtered areas being assigned -2000 HU)
#image.shape
#segmented = np.where(lungfilter == 1, image, -2000*np.ones((512, 512)).astype(np.int16)) # was -2000 someone suggested 30
segmented = np.where(lungfilter == 1, image, -2000*np.ones(image.shape).astype(np.int16)) # was -2000 someone suggested 30
return segmented, lungfilter, outline, watershed, sobel_gradient, marker_internal, marker_external, marker_watershed
def seperate_lungs_3d(image):
#Creation of the markers as shown above:
marker_internal, marker_external, marker_watershed = generate_markers_3d(image)
#Creation of the Sobel-Gradient
sobel_filtered_dx = ndimage.sobel(image, axis=2)
sobel_filtered_dy = ndimage.sobel(image, axis=1)
sobel_gradient = np.hypot(sobel_filtered_dx, sobel_filtered_dy)
sobel_gradient *= 255.0 / np.max(sobel_gradient)
#Watershed algorithm
watershed = morphology.watershed(sobel_gradient, marker_watershed)
#Reducing the image created by the Watershed algorithm to its outline
outline = ndimage.morphological_gradient(watershed, size=(1,3,3))
outline = outline.astype(bool)
#Performing Black-Tophat Morphology for reinclusion
#Creation of the disk-kernel and increasing its size a bit
blackhat_struct = [[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0]]
blackhat_struct = ndimage.iterate_structure(blackhat_struct, 8)
blackhat_struct = blackhat_struct[np.newaxis,:,:]
#Perform the Black-Hat
outline += ndimage.black_tophat(outline, structure=blackhat_struct) # very long time
#Use the internal marker and the Outline that was just created to generate the lungfilter
lungfilter = np.bitwise_or(marker_internal, outline)
#Close holes in the lungfilter
#fill_holes is not used here, since in some slices the heart would be reincluded by accident
##structure = np.ones((BINARY_CLOSING_SIZE,BINARY_CLOSING_SIZE)) # 5 is not enough, 7 is
structure = morphology.disk(BINARY_CLOSING_SIZE) # better , 5 seems sufficient, we use 7 for safety/just in case
structure = structure[np.newaxis,:,:]
lungfilter = ndimage.morphology.binary_closing(lungfilter, structure=structure, iterations=3) #, iterations=3) # was structure=np.ones((5,5))
### NOTE if no iterattions, i.e. default 1 we get holes within lungs for the disk(5) and perhaps more
#Apply the lungfilter (note the filtered areas being assigned -2000 HU)
segmented = np.where(lungfilter == 1, image, -2000*np.ones(marker_internal.shape))
return segmented, lungfilter, outline, watershed, sobel_gradient, marker_internal, marker_external, marker_watershed
def get_slice_location(dcm):
return float(dcm[0x0020, 0x1041].value)
def thru_plane_position(dcm):
"""Gets spatial coordinate of image origin whose axis
is perpendicular to image plane.
"""
orientation = tuple((float(o) for o in dcm.ImageOrientationPatient))
position = tuple((float(p) for p in dcm.ImagePositionPatient))
rowvec, colvec = orientation[:3], orientation[3:]
normal_vector = np.cross(rowvec, colvec)
slice_pos = np.dot(position, normal_vector)
return slice_pos
def resample(image, scan, new_spacing=[1,1,1]):
# Determine current pixel spacing
spacing = map(float, ([scan[0].SliceThickness] + scan[0].PixelSpacing))
spacing = np.array(list(spacing))
#scan[2].SliceThickness
resize_factor = spacing / new_spacing
new_real_shape = image.shape * resize_factor
new_shape = np.round(new_real_shape)
real_resize_factor = new_shape / image.shape
new_spacing = spacing / real_resize_factor
image = scipy.ndimage.interpolation.zoom(image, real_resize_factor, mode='nearest') ### early orig modified
return image, new_spacing
def segment_all(stage, part=0, processors=1, showSummaryPlot=True): # stage added to simplify the stage1 and stage2 calculations
count = 0
STAGE_DIR = STAGE_DIR_BASE % stage
folders = glob.glob(''.join([STAGE_DIR,'*']))
if len(folders) == 0:
print ("ERROR, check directory, no folders found in: ", STAGE_DIR )
for folder in folders:
count += 1
if count % processors == part: # do this part in this process, otherwise skip
path = folder
slices = load_scan(path)
image_slices = get_3d_data_slices(slices)
#mid = len(image_slices) // 2
#img_sel = mid
useTestPlot = False
if useTestPlot:
print("Shape before segmenting\t", image_slices.shape)
plt.hist(image_slices.flatten(), bins=80, color='c')
plt.xlabel("Hounsfield Units (HU)")
plt.ylabel("Frequency")
plt.show()
start = time.time()
resampleImages = True
if resampleImages:
image_resampled, spacing = resample(image_slices, slices, RESIZE_SPACING) # let's start wkith this small resolutuion for workign our the system (then perhaps 2, 0.667, 0.667)
print("Shape_before_&_after_resampling\t", image_slices.shape,image_resampled.shape)
if useTestPlot:
plt.imshow(image_slices[image_slices.shape[0]//2], cmap=plt.cm.bone)
plt.show()
plt.imshow(image_resampled[image_resampled.shape[0]//2], cmap=plt.cm.bone)
np.max(image_slices)
np.max(image_resampled)
np.min(image_slices)
np.min(image_resampled)
plt.show()
image_slices = image_resampled
shape = image_slices.shape
l_segmented = np.zeros(shape).astype(np.int16)
l_lungfilter = np.zeros(shape).astype(np.bool)
l_outline = np.zeros(shape).astype(np.bool)
l_watershed = np.zeros(shape).astype(np.int16)
l_sobel_gradient = np.zeros(shape).astype(np.float32)
l_marker_internal = np.zeros(shape).astype(np.bool)
l_marker_external = np.zeros(shape).astype(np.bool)
l_marker_watershed = np.zeros(shape).astype(np.int16)
# start = time.time()
i=0
for i in range(shape[0]):
l_segmented[i], l_lungfilter[i], l_outline[i], l_watershed[i], l_sobel_gradient[i], l_marker_internal[i], l_marker_external[i], l_marker_watershed[i] = seperate_lungs_cv2(image_slices[i])
print("Rescale & Seg time, and path: ", ((time.time() - start)), path )
if useTestPlot:
plt.hist(image_slices.flatten(), bins=80, color='c')
plt.xlabel("Hounsfield Units (HU)")
plt.ylabel("Frequency")
plt.show()
plt.hist(l_segmented.flatten(), bins=80, color='c')
plt.xlabel("Hounsfield Units (HU)")
plt.ylabel("Frequency")
plt.show()
img_sel_i = shape[0] // 2
# Show some slice in the middle
plt.imshow(image_slices[img_sel_i], cmap=plt.cm.gray)
plt.show()
# Show some slice in the middle
plt.imshow(l_segmented[img_sel_i], cmap='gray')
plt.show()
path_rescaled = path.replace(stage, ''.join([stage, "_", RESOLUTION_STR]), 1)
path_segmented = path.replace(stage, ''.join([stage, "_segmented_", RESOLUTION_STR]), 1)
path_segmented_crop = path.replace(stage, ''.join([stage, "_segmented_", RESOLUTION_STR, "_crop"]), 1)
np.savez_compressed (path_rescaled, image_slices)
np.savez_compressed (path_segmented, l_segmented)
mask = l_lungfilter.astype(np.int8)
regions = measure.regionprops(mask) # this measures the largest region and is a bug when the mask is not the largest region !!!
bb = regions[0].bbox
#print(bb)
zlen = bb[3] - bb[0]
ylen = bb[4] - bb[1]
xlen = bb[5] - bb[2]
dx = 0 # could be reduced
## have to reduce dx as for istance at least image the lungs stretch right to the border evebn without cropping
## namely for '../input/stage1/be57c648eb683a31e8499e278a89c5a0'
crop_max_ratio_z = 0.6 # 0.8 is to big make_submit2(45, 1)
crop_max_ratio_y = 0.4
crop_max_ratio_x = 0.6
bxy_min = np.min(bb[1:3])
bxy_max = np.max(bb[4:6])
mask_shape= mask.shape
image_shape = l_segmented.shape
mask_volume = zlen*ylen*zlen /(mask_shape[0] * mask_shape[1] * mask_shape[2])
mask_volume_thresh = 0.08 # anything below is too small (maybe just one half of the lung or something very small0)
mask_volume_check = mask_volume > mask_volume_thresh
# print ("Mask Volume: ", mask_volume )
### DO NOT allow the mask to touch x & y ---> if it does it is likely a wrong one as for:
## folders[3] , path = '../input/stage1/9ba5fbcccfbc9e08edcfe2258ddf7
maskOK = False
if bxy_min >0 and bxy_max < 512 and mask_volume_check and zlen/mask_shape[0] > crop_max_ratio_z and ylen/mask_shape[1] > crop_max_ratio_y and xlen/mask_shape[2] > crop_max_ratio_x:
## square crop and at least dx elements on both sides on x & y
bxy_min = np.min(bb[1:3])
bxy_max = np.max(bb[4:6])
if bxy_min == 0 or bxy_max == 512:
# Mask to bigg, auto-correct
print("The following mask likely too big, autoreducing by:", dx)
bxy_min = np.max((bxy_min, dx))
bxy_max = np.min ((bxy_max, mask_shape[1] - dx))
image = l_segmented[bb[0]:bb[3], bxy_min:bxy_max, bxy_min:bxy_max]
mask = mask[bb[0]:bb[3], bxy_min:bxy_max, bxy_min:bxy_max]
#maskOK = True
print ("Shape, cropped, bbox ", mask_shape, mask.shape, bb)
elif bxy_min> 0 and bxy_max < 512 and mask_volume_check and zlen/mask.shape[0] > crop_max_ratio_z:
## cut on z at least
image = l_segmented[bb[0]:bb[3], dx: image_shape[1] - dx, dx: image_shape[2] - dx]
#mask = mask[bb[0]:bb[3], dx: mask_shape[1] - dx, dx: mask_shape[2] - dx]
print("Mask too small, NOT auto-cropping x-y: shape, cropped, bbox, ratios, violume:", mask_shape, image.shape, bb, path, zlen/mask_shape[0], ylen/mask_shape[1], xlen/mask_shape[2], mask_volume)
else:
image = l_segmented[0:mask_shape[0], dx: image_shape[1] - dx, dx: image_shape[2] - dx]
#mask = mask[0:mask_shape[0], dx: mask_shape[1] - dx, dx: mask_shape[2] - dx]
print("Mask wrong, NOT auto-cropping: shape, cropped, bbox, ratios, volume:", mask_shape, image.shape, bb, path, zlen/mask_shape[0], ylen/mask_shape[1], xlen/mask_shape[2], mask_volume)
if showSummaryPlot:
img_sel_i = shape[0] // 2
# Show some slice in the middle
useSeparatePlots = False
if useSeparatePlots:
plt.imshow(image_slices[img_sel_i], cmap=plt.cm.gray)
plt.show()
# Show some slice in the middle
plt.imshow(l_segmented[img_sel_i], cmap='gray')
plt.show()
else:
f, ax = plt.subplots(1, 2, figsize=(6,3))
ax[0].imshow(image_slices[img_sel_i],cmap=plt.cm.bone)
ax[1].imshow(l_segmented[img_sel_i],cmap=plt.cm.bone)
plt.show()
# Show some slice in the middle
#plt.imshow(image[image.shape[0] // 2], cmap='gray') # don't show it for simpler review
#plt.show()
np.savez_compressed(path_segmented_crop, image)
#print("Mask count: ", count)
#print ("Shape: ", image.shape)
return part, processors, count
# the following 3 functions to read LUNA files are from: https://www.kaggle.com/arnavkj95/data-science-bowl-2017/candidate-generation-and-luna16-preprocessing/notebook
'''
This funciton reads a '.mhd' file using SimpleITK and return the image array,
origin and spacing of the image.
'''
def load_itk(filename):
# Reads the image using SimpleITK
itkimage = sitk.ReadImage(filename)
# Convert the image to a numpy array first and then shuffle the dimensions to get axis in the order z,y,x
ct_scan = sitk.GetArrayFromImage(itkimage)
# Read the origin of the ct_scan, will be used to convert the coordinates from world to voxel and vice versa.
origin = np.array(list(reversed(itkimage.GetOrigin())))
# Read the spacing along each dimension
spacing = np.array(list(reversed(itkimage.GetSpacing())))
return ct_scan, origin, spacing
'''
This function is used to convert the world coordinates to voxel coordinates using
the origin and spacing of the ct_scan
'''
def world_2_voxel(world_coordinates, origin, spacing):
stretched_voxel_coordinates = np.absolute(world_coordinates - origin)
voxel_coordinates = stretched_voxel_coordinates / spacing
return voxel_coordinates
'''
This function is used to convert the voxel coordinates to world coordinates using
the origin and spacing of the ct_scan.
'''
def voxel_2_world(voxel_coordinates, origin, spacing):
stretched_voxel_coordinates = voxel_coordinates * spacing
world_coordinates = stretched_voxel_coordinates + origin
return world_coordinates
def seq(start, stop, step=1):
n = int(round((stop - start)/float(step)))
if n > 1:
return([start + step*i for i in range(n+1)])
else:
return([])
'''
This function is used to create spherical regions in binary masks
at the given locations and radius.
'''
#image = lung_img
#spacing = new_spacing
def draw_circles(image,cands,origin,spacing):
#make empty matrix, which will be filled with the mask
image_mask = np.zeros(image.shape, dtype=np.int16)
#run over all the nodules in the lungs
for ca in cands.values:
#get middel x-,y-, and z-worldcoordinate of the nodule
#radius = np.ceil(ca[4])/2 ## original: replaced the ceil with a very minor increase of 1% ....
radius = (ca[4])/2 + 0.51 * spacing[0] # increasing by circa half of distance in z direction .... (trying to capture wider region/border for learning ... and adress the rough net .
coord_x = ca[1]
coord_y = ca[2]
coord_z = ca[3]
image_coord = np.array((coord_z,coord_y,coord_x))
#determine voxel coordinate given the worldcoordinate
image_coord = world_2_voxel(image_coord,origin,spacing)
#determine the range of the nodule
#noduleRange = seq(-radius, radius, RESIZE_SPACING[0]) # original, uniform spacing
noduleRange_z = seq(-radius, radius, spacing[0])
noduleRange_y = seq(-radius, radius, spacing[1])
noduleRange_x = seq(-radius, radius, spacing[2])
#x = y = z = -2
#create the mask
for x in noduleRange_x:
for y in noduleRange_y:
for z in noduleRange_z:
coords = world_2_voxel(np.array((coord_z+z,coord_y+y,coord_x+x)),origin,spacing)
#if (np.linalg.norm(image_coord-coords) * RESIZE_SPACING[0]) < radius: ### original (contrained to a uniofrm RESIZE)
if (np.linalg.norm((image_coord-coords) * spacing)) < radius:
image_mask[int(np.round(coords[0])),int(np.round(coords[1])),int(np.round(coords[2]))] = int(1)
return image_mask
'''
This function takes the path to a '.mhd' file as input and
is used to create the nodule masks and segmented lungs after
rescaling to 1mm size in all directions. It saved them in the .npz
format. It also takes the list of nodule locations in that CT Scan as
input.
'''
def load_scans_masks(luna_subset, useAll, use_unsegmented=True):
#luna_subset = "[0-6]"
LUNA_DIR = LUNA_BASE_DIR % luna_subset
files = glob.glob(''.join([LUNA_DIR,'*.mhd']))
annotations = pd.read_csv(LUNA_ANNOTATIONS)
annotations.head()
sids = []
scans = []
masks = []
cnt = 0
skipped = 0
for file in files:
imagePath = file
seriesuid = file[file.rindex('/')+1:] # everything after the last slash
seriesuid = seriesuid[:len(seriesuid)-len(".mhd")] # cut out the suffix to get the uid
path = imagePath[:len(imagePath)-len(".mhd")] # cut out the suffix to get the uid
if use_unsegmented:
path_segmented = path.replace("original_lungs", "lungs_2x2x2", 1)
else:
path_segmented = path.replace("original_lungs", "segmented_2x2x2", 1)
cands = annotations[seriesuid == annotations.seriesuid] # select the annotations for the current series
#useAll = True
if (len(cands) > 0 or useAll):
sids.append(seriesuid)
if use_unsegmented:
scan_z = np.load(''.join((path_segmented + '_lung' + '.npz')))
else:
scan_z = np.load(''.join((path_segmented + '_lung_seg' + '.npz')))
#scan_z.keys()
scan = scan_z['arr_0']
mask_z = np.load(''.join((path_segmented + '_nodule_mask' + '.npz')))
mask = mask_z['arr_0']
scans.append(scan)
masks.append(mask)
cnt += 1
else:
print("Skipping non-nodules entry ", seriesuid)
skipped += 1
print ("Summary: cnt & skipped: ", cnt, skipped)
return scans, masks, sids
def load_scans_masks_or_blanks(luna_subset, useAll, use_unsegmented=True):
#luna_subset = "[0-6]"
LUNA_DIR = LUNA_BASE_DIR % luna_subset
files = glob.glob(''.join([LUNA_DIR,'*.mhd']))
annotations = pd.read_csv(LUNA_ANNOTATIONS)
annotations.head()
candidates = pd.read_csv(LUNA_CANDIDATES)
candidates_false = candidates[candidates["class"] == 0] # only select the false candidates
candidates_true = candidates[candidates["class"] == 1] # only select the false candidates
sids = []
scans = []
masks = []
blankids = [] # class/id whether scan is with nodule or without, 0 - with, 1 - without
cnt = 0
skipped = 0
#file=files[7]
for file in files:
imagePath = file
seriesuid = file[file.rindex('/')+1:] # everything after the last slash
seriesuid = seriesuid[:len(seriesuid)-len(".mhd")] # cut out the suffix to get the uid
path = imagePath[:len(imagePath)-len(".mhd")] # cut out the suffix to get the uid
if use_unsegmented:
path_segmented = path.replace("original_lungs", "lungs_2x2x2", 1)
else:
path_segmented = path.replace("original_lungs", "segmented_2x2x2", 1)
cands = annotations[seriesuid == annotations.seriesuid] # select the annotations for the current series
ctrue = candidates_true[seriesuid == candidates_true.seriesuid]
cfalse = candidates_false[seriesuid == candidates_false.seriesuid]
#useAll = True
blankid = 1 if (len(cands) == 0 and len(ctrue) == 0 and len(cfalse) > 0) else 0
skip_nodules_entirely = False # was False
use_only_nodules = False # was True
if skip_nodules_entirely and blankid ==0:
## manual switch to generate extra data for the corrupted set
print("Skipping nodules (skip_nodules_entirely) ", seriesuid)
skipped += 1
elif use_only_nodules and (len(cands) == 0):
## manual switch to generate only nodules data due lack of time and repeat etc time pressures
print("Skipping blanks (use_only_nodules) ", seriesuid)
skipped += 1
else: # NORMAL operations
if (len(cands) > 0 or
(blankid >0) or
useAll):
sids.append(seriesuid)
blankids.append(blankid)
if use_unsegmented:
scan_z = np.load(''.join((path_segmented + '_lung' + '.npz')))
else:
scan_z = np.load(''.join((path_segmented + '_lung_seg' + '.npz')))
#scan_z.keys()
scan = scan_z['arr_0']
#mask_z = np.load(''.join((path_segmented + '_nodule_mask' + '.npz')))
mask_z = np.load(''.join((path_segmented + '_nodule_mask_wblanks' + '.npz')))
mask = mask_z['arr_0']
testPlot = False
if testPlot:
maskcheck_z = np.load(''.join((path_segmented + '_nodule_mask' + '.npz')))
maskcheck = maskcheck_z['arr_0']
f, ax = plt.subplots(1, 2, figsize=(10,5))
ax[0].imshow(np.sum(np.abs(maskcheck), axis=0),cmap=plt.cm.gray)
ax[1].imshow(np.sum(np.abs(mask), axis=0),cmap=plt.cm.gray)
#ax[2].imshow(masks1[i,:,:],cmap=plt.cm.gray)
plt.show()
scans.append(scan)
masks.append(mask)
cnt += 1
else:
print("Skipping non-nodules and non-blank entry ", seriesuid)
skipped += 1
print ("Summary: cnt & skipped: ", cnt, skipped)
return scans, masks, sids, blankids
#return scans, masks, sids # not yet, old style
def load_scans_masks_no_nodules(luna_subset, use_unsegmented=True): # load only the ones that do not contain nodules
#luna_subset = "[0-6]"
LUNA_DIR = LUNA_BASE_DIR % luna_subset
files = glob.glob(''.join([LUNA_DIR,'*.mhd']))
annotations = pd.read_csv(LUNA_ANNOTATIONS)
annotations.head()
sids = []
scans = []
masks = []
cnt = 0
skipped = 0
for file in files:
imagePath = file
seriesuid = file[file.rindex('/')+1:] # everything after the last slash
seriesuid = seriesuid[:len(seriesuid)-len(".mhd")] # cut out the suffix to get the uid
path = imagePath[:len(imagePath)-len(".mhd")] # cut out the suffix to get the uid
if use_unsegmented:
path_segmented = path.replace("original_lungs", "lungs_2x2x2", 1)
else:
path_segmented = path.replace("original_lungs", "segmented_2x2x2", 1)
cands = annotations[seriesuid == annotations.seriesuid] # select the annotations for the current series
#useAll = True
if (len(cands)):
print("Skipping entry with nodules ", seriesuid)
skipped += 1
else:
sids.append(seriesuid)
if use_unsegmented:
scan_z = np.load(''.join((path_segmented + '_lung' + '.npz')))
else:
scan_z = np.load(''.join((path_segmented + '_lung_seg' + '.npz')))
#scan_z.keys()
scan = scan_z['arr_0']
mask_z = np.load(''.join((path_segmented + '_nodule_mask' + '.npz')))
mask = mask_z['arr_0']
scans.append(scan)
masks.append(mask)
cnt += 1
print ("Summary: cnt & skipped: ", cnt, skipped)
return scans, masks, sids
MIN_BOUND = -1000.0
MAX_BOUND = 400.0
def normalize(image):
image = (image - MIN_BOUND) / (MAX_BOUND - MIN_BOUND)
image[image>1] = 1.
image[image<0] = 0.
return image
PIXEL_MEAN = 0.028 ## for LUNA subset 0 and our preprocessing, only with nudels was 0.028, all was 0.020421744071562546 (in the tutorial they used 0.25)
def zero_center(image):
image = image - PIXEL_MEAN
return image
def load_scans(path): # function used for testing
slices = [dicom.read_file(path + '/' + s) for s in os.listdir(path)]
slices.sort(key=lambda x: int(x.InstanceNumber))
return np.stack([s.pixel_array for s in slices])
def get_scans(df,scans_list):
scans=np.stack([load_scans(scan_folder+df.id[i_scan[0]])[i_scan[1]] for i_scan in scans_list])
scans=process_scans(scans)
view_scans(scans)
return(scans)
def process_scans(scans): # used for tesing
scans1=np.zeros((scans.shape[0],1,img_rows,img_cols))
for i in range(scans.shape[0]):
img=scans[i,:,:]
img = 255.0 / np.amax(img) * img
img =img.astype(np.uint8)
img =cv2.resize(img, (img_rows, img_cols))
scans1[i,0,:,:]=img
return (scans1)
only_with_nudels = True
def convert_scans_and_masks(scans, masks, only_with_nudels):
flattened1 = [val for sublist in scans for val in sublist[1:-1]] # skip one element at the beginning and at the end
scans1 = np.stack(flattened1)
flattened1 = [val for sublist in masks for val in sublist[1:-1]] # skip one element at the beginning and at the end
masks1 = np.stack(flattened1) # 10187
#only_with_nudels = True
if only_with_nudels:
nudels_pix_count = np.sum(masks1, axis = (1,2))
scans1 = scans1[nudels_pix_count>0]
masks1 = masks1[nudels_pix_count>0] # 493 -- circa 5 % with nudeles oters without
#nudels2 = np.where(masks1 == 1, scans1, -4000*np.ones(( masks1.shape[1], masks1.shape[2))) ### was -2000
#nudels1 = np.where(masks1 == 1, scans1, masks1 - 4000) ### was -2000
#nudles1_rf = nudels1.flatten()
#nudles1_rf = nudles1_rf[nudles1_rf > -4000]
scans = normalize(scans1)
useTestPlot = False
if useTestPlot:
plt.hist(scans1.flatten(), bins=80, color='c')
plt.xlabel("Hounsfield Units (HU)")
plt.ylabel("Frequency")
plt.show()
#for i in range(scans.shape[0]):
for i in range(20):
print ('scan '+str(i))
f, ax = plt.subplots(1, 3, figsize=(15,5))
ax[0].imshow(scans1[i,:,:],cmap=plt.cm.gray)
ax[1].imshow(scans[i,:,:],cmap=plt.cm.gray)
ax[2].imshow(masks1[i,:,:],cmap=plt.cm.gray)
plt.show()
#np.mean(scans) # 0.028367 / 0.0204
#np.min(scans) # 0
#np.max(scans) #
scans = zero_center(scans)
masks = np.copy(masks1)
## if needed do the resize here ....
img_rows = scans.shape[1] ### redefine img_rows/ cols and add resize if needed
img_cols = scans.shape[2]
scans1=np.zeros((scans.shape[0],1,img_rows,img_cols))
for i in range(scans.shape[0]):
img=scans[i,:,:]
###img =cv2.resize(img, (img_rows, img_cols)) ## add/test resizing if needed
scans1[i,0,:,:]=img
masks1=np.zeros((masks.shape[0],1,img_rows,img_cols))
for i in range(masks.shape[0]):
img=masks[i,:,:]
###img =cv2.resize(img, (img_rows, img_cols)) ## add/test resizing if needed
masks1[i,0,:,:]=img
return scans1, masks1
#scans = [scans[i]]
#masks = [masks[i]]
def convert_scans_and_masks_xd_ablanks(scans, masks, blankids, only_with_nudels, dim=3):
# reuse scan to reduce memory footprint
dim_orig = dim
add_blank_spacing_size = dim * 8 #### use 4 for [0 - 3] and 8 for [4 - 7] ???initial trial (should perhaps be just dim ....)
#skip = dim // 2 # old
skip_low = dim // 2 # dim shoudl be uneven -- it is recalculated anyway to this end
skip_high = dim -skip_low - 1
do_not_allow_even_dim = False ## now we allow odd numbers ...
if do_not_allow_even_dim:
dim = 2 * skip_low + 1
skip_low = dim // 2
skip_high = dim -skip_low - 1
if dim != dim_orig:
print ("convert_scans_and_masks_x: Dim must be uneven, corrected from .. to:", dim_orig, dim)
work = [] # 3 layers
#scan = scans[0]
for scan in scans: ##TEMP
tmp = []
#i = 1
#for i in range(1, scan.shape[0]-1, 3): # SKIP EVERY 3
for i in range(skip_low, scan.shape[0]-skip_high):
#img1 = scan[i-1]
#img2 = scan[i]
#img3 = scan[i+1]
#rgb = np.stack((img1, img2, img3))
rgb = np.stack(scan[i-skip_low:i+skip_high+1])
tmp.append(rgb)
work.append(np.array(tmp))
#flattened1 = [val for sublist in work for val in sublist ] # NO skipping as we have already cut the first and the last layer
#scans1 = np.stack(flattened1)
scans1 = np.stack([val for sublist in work for val in sublist ]) # NO skipping as we have already cut the first and the last layer
work = []
### ADD ariticial mask pixel every add_blank_spacing layers for each blankids ...
# set the (0,0) pixel to -1 every add_blank_spacing_size for blanks ..
blanks_per_axis = 4 # skip border
crop = 16
dx = (img_cols - 2 * crop) // (blanks_per_axis + 2)
dy = (img_rows - 2 * crop) // (blanks_per_axis + 2)
for mask in masks:
if (np.sum(mask) < 0):
## we have a blank
### ADD ariticial mask pixel every add_blank_spacing layers for each blankids ...
# set the (0,0) pixel to -1 every add_blank_spacing_size for blanks ..
for i in range(skip_low, mask.shape[0]-skip_high, add_blank_spacing_size):
for ix in range(blanks_per_axis):
xpos = crop + (ix+1)*dx + dx //2
for iy in range(blanks_per_axis):
ypos = crop + (iy+1)*dy + dy //2
#print (xpos, ypos)
mask[skip_low, ypos, xpos] = -1 # negative pixel to be picked up below and corrected back to none
#for k in range(len(blankids)):
# if blankids[k] > 0:
# mask = masks[k]
# ## add the blanls
# for i in range(skip_low, mask.shape[0]-skip_high, add_blank_spacing_size):
# mask[skip_low, 0, 0] = -1 # negative pixel to be picked up below and corrected back to none
use_3d_mask = True ##
if use_3d_mask:
work = [] # 3 layers
#mask = masks[0]
for mask in masks:
tmp = []
#i = 0
for i in range(skip_low, mask.shape[0]-skip_high):
#img1 = mask[i-1]
#img2 = mask[i]
#img3 = mask[i+1]
#rgb = np.stack((img1, img2, img3))
rgb = np.stack(mask[i-skip_low:i+skip_high+1])
tmp.append(rgb)
work.append(np.array(tmp))
masks1 = np.stack([val for sublist in work for val in sublist ] )# NO skipping as we have already cut the first and the last layer
else:
masks1 = np.stack([val for sublist in masks for val in sublist[skip_low:-skip_high]] ) # skip one element at the beginning and at the end
#masks1 = np.stack(flattened1) # 10187
#only_with_nudels = True
if only_with_nudels:
if use_3d_mask:
nudels_pix_count = np.sum(masks1[:,skip_low], axis = (1,2)) ## abd added for the potential blanks; modified that the centre mask be mask!
else:
nudels_pix_count = np.sum(masks1, axis = (1,2))
scans1 = scans1[nudels_pix_count != 0]
masks1 = masks1[nudels_pix_count != 0]
#blank_mask_factor = np.sign(nudels_pix_count)[nudels_pix_count != 0]
#sum(blank_mask_factor)
#blank_mask_factor[blank_mask_factor <0] = 0
#mask1_orig = masks1
#np.sum(mask1_orig)
#np.min(masks1)
#masks1 = masks1[nudels_pix_count != 0] * blank_mask_factor # 493 -- circa 5 % with nudeles oters without; 232 if we skip over every 3 layers and use a 3d mask
masks1[masks1 < 0] = 0 # 493 -- circa 5 % with nudeles oters without; 232 if we skip over every 3 layers and use a 3d mask
#masks1[nudels_pix_count < 0] = 0 # making empty mask for balancing training set
#nudels2 = np.where(masks1 == 1, scans1, -4000*np.ones(( masks1.shape[1], masks1.shape[2))) ### was -2000
#nudels1 = np.where(masks1 == 1, scans1, masks1 - 4000) ### was -2000
#nudles1_rf = nudels1.flatten()
#nudles1_rf = nudles1_rf[nudles1_rf > -4000]
scans1 = normalize(scans1)
useTestPlot = False
if useTestPlot:
plt.hist(scans1.flatten(), bins=80, color='c')
plt.xlabel("Hounsfield Units (HU)")
plt.ylabel("Frequency")
plt.show()
#for i in range(scans.shape[0]):
for i in range(20):
print ('scan '+str(i))
f, ax = plt.subplots(1, 3, figsize=(15,5))
ax[0].imshow(scans1[i,:,:],cmap=plt.cm.gray)
ax[1].imshow(scans[i,:,:],cmap=plt.cm.gray)
ax[2].imshow(masks1[i,:,:],cmap=plt.cm.gray)
plt.show()
#np.mean(scans) # 0.028367 / 0.0204
#np.min(scans) # 0
#np.max(scans) #
scans1 = zero_center(scans1)
#masks = np.copy(masks1)
## if needed do the resize here .... (img_rows and img_cols are global values defined externally)
#img_rows = scans.shape[1] ### redefine img_rows/ cols and add resize if needed
#img_cols = scans.shape[2]
# scans already are in the tensor mode with 3 rgb elements ....
#scans1 = scans ## no change
#scans1=np.zeros((scans.shape[0],3,img_rows,img_cols))
#for i in range(scans.shape[0]):
# img=scans[i,:,:]
# ###img =cv2.resize(img, (img_rows, img_cols)) ## add/test resizing if needed
# scans1[i,0,:,:]=img
if use_3d_mask:
done = 1 # nothing to do
else:
masks = np.copy(masks1)
masks1=np.zeros((masks.shape[0],1,img_rows,img_cols))
for i in range(masks.shape[0]):
img=masks[i,:,:]
###img =cv2.resize(img, (img_rows, img_cols)) ## add/test resizing if needed
masks1[i,0,:,:]=img
return scans1, masks1
#scans = [scans[j]]
#masks = [masks[j]]
def convert_scans_and_masks_xd3(scans, masks, only_with_nudels, dim=3, crop=16, blanks_per_axis = 4, add_blank_spacing_size=0, add_blank_layers = 0):
# reuse scan to reduce memory footprint
dim_orig = dim
#add_blank_spacing_size = 0 # dim *4 # dim # was dim ### set to 0 for version_16 #### initial trial (should perhaps be just dim ....), if 0 - do not add ...
#add_blank_layers = 0 # was 4
#skip = dim // 2 # old
skip_low = dim // 2 # dim shoudl be uneven -- it is recalculated anyway to this end
skip_high = dim -skip_low - 1
do_not_allow_even_dim = False ## now we allow odd numbers ...
if do_not_allow_even_dim:
dim = 2 * skip_low + 1
skip_low = dim // 2
skip_high = dim -skip_low - 1
if dim != dim_orig:
print ("convert_scans_and_masks_x: Dim must be uneven, corrected from .. to:", dim_orig, dim)
work = [] # 3 layers
#scan = scans[0]
for scan in scans: ##TEMP
tmp = []
#i = 1
#for i in range(1, scan.shape[0]-1, 3): # SKIP EVERY 3
for i in range(skip_low, scan.shape[0]-skip_high):
#img1 = scan[i-1]
#img2 = scan[i]
#img3 = scan[i+1]
#rgb = np.stack((img1, img2, img3))
rgb = np.stack(scan[i-skip_low:i+skip_high+1])
tmp.append(rgb)
work.append(np.array(tmp))
#flattened1 = [val for sublist in work for val in sublist ] # NO skipping as we have already cut the first and the last layer
#scans1 = np.stack(flattened1)
scans1 = np.stack([val for sublist in work for val in sublist ]) # NO skipping as we have already cut the first and the last layer
work = []
##blanks_per_axis = 6 # cover all slice
##crop = 44
dxrange = scans[0].shape[-1] - 2 * crop
dyrange = scans[0].shape[-2] - 2 * crop
#dx = (img_cols - 2 * crop) // (blanks_per_axis)
#dy = (img_rows - 2 * crop) // (blanks_per_axis)
#dx = dxrange // (blanks_per_axis+1)
#dy = dyrange // (blanks_per_axis+1)
### ADD ariticial mask pixel every add_blank_spacing layers for each blankids ...
# set the (0,0) pixel to -1 every add_blank_spacing_size for blanks ..
if add_blank_spacing_size > 0:
for mask in masks:
if (np.min(mask) < 0):
## we have a blank
### ADD ariticial mask pixel every add_blank_spacing layers for each blankids ...
# set the (0,0) pixel to -1 every add_blank_spacing_size for blanks ..
for i in range(skip_low+(add_blank_spacing_size//2), mask.shape[0]-skip_high, add_blank_spacing_size):
mask[i, np.random.randint(0,dyrange), np.random.randint(0,dxrange)] = -1 # negative pixel to be picked up below and corrected back to none
if add_blank_layers > 0:
for mask in masks:
if (np.min(mask) < 0):
dzrange = mask.shape[0]-dim
## we have a blank
### ADD ariticial mask pixel every add_blank_spacing layers for each blankids ...
# set the (0,0) pixel to -1 every add_blank_spacing_size for blanks ..
for k in range(add_blank_layers):
i = np.random.randint(0, dzrange) + skip_low
#print ("dz position, random, mask.shape ", i, mask.shape)
mask[i, np.random.randint(0,dyrange), np.random.randint(0,dxrange)] = -1 # negative pixel to be picked up below and corrected back to none
#mask = masks[0]
add_random_blanks_in_blanks = False ## NO need for the extra random blank pixels now, 20170327
if add_random_blanks_in_blanks:
for mask in masks:
if (np.min(mask) < 0):
## we have a blank
### ADD ariticial mask pixel every add_blank_spacing layers for each blankids ...
# set the (0,0) pixel to -1 every add_blank_spacing_size for blanks ..
#zlow = skip_low
#zhigh = mask.shape[0]-skip_high
pix_sum = np.sum(mask, axis=(1,2))
idx_blanks = np.min(mask, axis=(1,2)) < 0 ## don't use it - let's vary the position across the space
for iz in range(mask.shape[0]):
if (np.min(mask[iz])) < 0:
for ix in range(blanks_per_axis):
#xpos = crop + (ix)*dx + dx //2
for iy in range(blanks_per_axis):
#ypos = crop + (iy)*dy + dy //2
xpos = crop + np.random.randint(0,dxrange)
ypos = crop + np.random.randint(0,dyrange)
#print (iz, xpos, ypos)
#mask[idx_blanks, ypos, xpos] = -1 # negative pixel to be picked up below and corrected back to none
mask[iz, ypos, xpos] = -1
use_3d_mask = True ##
if use_3d_mask:
work = [] # 3 layers
#mask = masks[0]
for mask in masks:
tmp = []
#i = 0
for i in range(skip_low, mask.shape[0]-skip_high):
#img1 = mask[i-1]
#img2 = mask[i]
#img3 = mask[i+1]
#rgb = np.stack((img1, img2, img3))
rgb = np.stack(mask[i-skip_low:i+skip_high+1])
tmp.append(rgb)
work.append(np.array(tmp))
masks1 = np.stack([val for sublist in work for val in sublist ] )# NO skipping as we have already cut the first and the last layer
else:
masks1 = np.stack([val for sublist in masks for val in sublist[skip_low:-skip_high]] ) # skip one element at the beginning and at the end
#masks1 = np.stack(flattened1) # 10187
#only_with_nudels = True
if only_with_nudels:
if use_3d_mask:
#nudels_pix_count = np.sum(np.abs(masks1[:,skip_low]), axis = (1,2)) ## CHANGE IT WED - use ANY i.e. remove skip_low abd added for the potential blanks; modified that the centre mask be mask!
nudels_pix_count = np.sum(np.abs(masks1), axis = (1,2,3)) ## USE ANY March 1; CHANGE IT WED - use ANY i.e. remove skip_low abd added for the potential blanks; modified that the centre mask be mask!
else:
nudels_pix_count = np.sum(np.abs(masks1), axis = (1,2))
scans1 = scans1[nudels_pix_count != 0]
masks1 = masks1[nudels_pix_count != 0]
#blank_mask_factor = np.sign(nudels_pix_count)[nudels_pix_count != 0]
#sum(blank_mask_factor)
#blank_mask_factor[blank_mask_factor <0] = 0
#mask1_orig = masks1
#np.sum(mask1_orig)
#np.min(masks1)
#masks1 = masks1[nudels_pix_count != 0] * blank_mask_factor # 493 -- circa 5 % with nudeles oters without; 232 if we skip over every 3 layers and use a 3d mask
#masks1[masks1 < 0] = 0 # !!!!!!!!!!!!!! in GRID version do NOT do that - do it in the key version 493 -- circa 5 % with nudeles oters without; 232 if we skip over every 3 layers and use a 3d mask
#masks1[nudels_pix_count < 0] = 0 # making empty mask for balancing training set
#nudels2 = np.where(masks1 == 1, scans1, -4000*np.ones(( masks1.shape[1], masks1.shape[2))) ### was -2000
#nudels1 = np.where(masks1 == 1, scans1, masks1 - 4000) ### was -2000
#nudles1_rf = nudels1.flatten()
#nudles1_rf = nudles1_rf[nudles1_rf > -4000]
scans1 = normalize(scans1)
### after this scans1 becomes float64 ....
useTestPlot = False
if useTestPlot:
plt.hist(scans1.flatten(), bins=80, color='c')
plt.xlabel("Hounsfield Units (HU)")
plt.ylabel("Frequency")
plt.show()
#for i in range(scans.shape[0]):
for i in range(20):
print ('scan '+str(i))
f, ax = plt.subplots(1, 3, figsize=(15,5))
ax[0].imshow(scans1[i,:,:],cmap=plt.cm.gray)
ax[1].imshow(scans[i,:,:],cmap=plt.cm.gray)
ax[2].imshow(masks1[i,:,:],cmap=plt.cm.gray)
plt.show()
#np.mean(scans) # 0.028367 / 0.0204
#np.min(scans) # 0
#np.max(scans) #
scans1 = zero_center(scans1)
#masks = np.copy(masks1)
scans1 = scans1.astype(np.float32) # make it float 32 (not point carring 64, also because kears operates on float32, and originals were in int
## if needed do the resize here .... (img_rows and img_cols are global values defined externally)
#img_rows = scans.shape[1] ### redefine img_rows/ cols and add resize if needed
#img_cols = scans.shape[2]
# scans already are in the tensor mode with 3 rgb elements ....
#scans1 = scans ## no change
#scans1=np.zeros((scans.shape[0],3,img_rows,img_cols))
#for i in range(scans.shape[0]):
# img=scans[i,:,:]
# ###img =cv2.resize(img, (img_rows, img_cols)) ## add/test resizing if needed
# scans1[i,0,:,:]=img
if use_3d_mask:
done = 1 # nothing to do
else:
masks = np.copy(masks1)
masks1=np.zeros((masks.shape[0],1,img_rows,img_cols))
for i in range(masks.shape[0]):
img=masks[i,:,:]
###img =cv2.resize(img, (img_rows, img_cols)) ## add/test resizing if needed
masks1[i,0,:,:]=img
return scans1, masks1
def convert_scans_and_masks_3d(scans, masks, only_with_nudels):
# reuse scan to reduce memory footprint
work = [] # 3 layers
#scan = scans[0]
for scan in scans:
tmp = []
#i = 0
#for i in range(1, scan.shape[0]-1, 3): # SKIP EVERY 3
for i in range(1, scan.shape[0]-1):
img1 = scan[i-1]
img2 = scan[i]
img3 = scan[i+1]
rgb = np.stack((img1, img2, img3))
tmp.append(rgb)
work.append(np.array(tmp))
#flattened1 = [val for sublist in work for val in sublist ] # NO skipping as we have already cut the first and the last layer
#scans1 = np.stack(flattened1)
scans1 = np.stack([val for sublist in work for val in sublist ]) # NO skipping as we have already cut the first and the last layer
work = []
use_3d_mask = False
if use_3d_mask:
work = [] # 3 layers
#mask = masks[0]
for mask in masks:
tmp = []
#i = 0
for i in range(1, mask.shape[0]-1, 3): # SKIP EVERY 3
img1 = mask[i-1]
img2 = mask[i]
img3 = mask[i+1]
rgb = np.stack((img1, img2, img3))
tmp.append(rgb)
work.append(np.array(tmp))
masks1 = np.stack([val for sublist in work for val in sublist ] )# NO skipping as we have already cut the first and the last layer
else:
masks1 = np.stack([val for sublist in masks for val in sublist[1:-1]] ) # skip one element at the beginning and at the end
#masks1 = np.stack(flattened1) # 10187
#only_with_nudels = True
if only_with_nudels:
if use_3d_mask:
nudels_pix_count = np.sum(masks1, axis = (1,2,3))
else:
nudels_pix_count = np.sum(masks1, axis = (1,2))
scans1 = scans1[nudels_pix_count>0]
masks1 = masks1[nudels_pix_count>0] # 493 -- circa 5 % with nudeles oters without; 232 if we skip over every 3 layers and use a 3d mask
#nudels2 = np.where(masks1 == 1, scans1, -4000*np.ones(( masks1.shape[1], masks1.shape[2))) ### was -2000
#nudels1 = np.where(masks1 == 1, scans1, masks1 - 4000) ### was -2000
#nudles1_rf = nudels1.flatten()
#nudles1_rf = nudles1_rf[nudles1_rf > -4000]
scans1 = normalize(scans1)
useTestPlot = False
if useTestPlot:
plt.hist(scans1.flatten(), bins=80, color='c')
plt.xlabel("Hounsfield Units (HU)")
plt.ylabel("Frequency")
plt.show()
#for i in range(scans.shape[0]):
for i in range(20):
print ('scan '+str(i))
f, ax = plt.subplots(1, 3, figsize=(15,5))
ax[0].imshow(scans1[i,:,:],cmap=plt.cm.gray)
ax[1].imshow(scans[i,:,:],cmap=plt.cm.gray)
ax[2].imshow(masks1[i,:,:],cmap=plt.cm.gray)
plt.show()
#np.mean(scans) # 0.028367 / 0.0204
#np.min(scans) # 0
#np.max(scans) #
scans1 = zero_center(scans1)
#masks = np.copy(masks1)
## if needed do the resize here .... (img_rows and img_cols are global values defined externally)
#img_rows = scans.shape[1] ### redefine img_rows/ cols and add resize if needed
#img_cols = scans.shape[2]
# scans already are in the tensor mode with 3 rgb elements ....
#scans1 = scans ## no change
#scans1=np.zeros((scans.shape[0],3,img_rows,img_cols))
#for i in range(scans.shape[0]):
# img=scans[i,:,:]
# ###img =cv2.resize(img, (img_rows, img_cols)) ## add/test resizing if needed
# scans1[i,0,:,:]=img
if use_3d_mask:
done = 1 # nothing to do
else:
masks = np.copy(masks1)
masks1=np.zeros((masks.shape[0],1,img_rows,img_cols))
for i in range(masks.shape[0]):
img=masks[i,:,:]
###img =cv2.resize(img, (img_rows, img_cols)) ## add/test resizing if needed
masks1[i,0,:,:]=img
return scans1, masks1
def view_scans(scans):
#%matplotlib inline
for i in range(scans.shape[0]):
print ('scan '+str(i))
plt.imshow(scans[i,0,:,:], cmap=plt.cm.gray)
plt.show()
def view_scans_widget(scans):
#%matplotlib tk
for i in range(scans.shape[0]):
plt.figure(figsize=(7,7))
plt.imshow(scans[i,0,:,:], cmap=plt.cm.gray)
plt.show()
def get_masks(scans,masks_list):
#%matplotlib inline
scans1=scans.copy()
maxv=255
masks=np.zeros(shape=(scans.shape[0],1,img_rows,img_cols))
for i_m in range(len(masks_list)):
for i in range(-masks_list[i_m][3],masks_list[i_m][3]+1):
for j in range(-masks_list[i_m][3],masks_list[i_m][3]+1):
masks[masks_list[i_m][0],0,masks_list[i_m][2]+i,masks_list[i_m][1]+j]=1
for i1 in range(-masks_list[i_m][3],masks_list[i_m][3]+1):
scans1[masks_list[i_m][0],0,masks_list[i_m][2]+i1,masks_list[i_m][1]+masks_list[i_m][3]]=maxv=255
scans1[masks_list[i_m][0],0,masks_list[i_m][2]+i1,masks_list[i_m][1]-masks_list[i_m][3]]=maxv=255
scans1[masks_list[i_m][0],0,masks_list[i_m][2]+masks_list[i_m][3],masks_list[i_m][1]+i1]=maxv=255
scans1[masks_list[i_m][0],0,masks_list[i_m][2]-masks_list[i_m][3],masks_list[i_m][1]+i1]=maxv=255
for i in range(scans.shape[0]):
print ('scan '+str(i))
f, ax = plt.subplots(1, 2,figsize=(10,5))
ax[0].imshow(scans1[i,0,:,:],cmap=plt.cm.gray)
ax[1].imshow(masks[i,0,:,:],cmap=plt.cm.gray)
plt.show()
return(masks)
def augmentation(scans,masks,n):
datagen = ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=25, # was 25
width_shift_range=0.3, # ws 0.3; was 0.1# tried 0.01
height_shift_range=0.3, # was 0.3; was 0.1 # tried 0.01
horizontal_flip=True,
vertical_flip=True,
zoom_range=False)
i=0
scans_g=scans.copy()
for batch in datagen.flow(scans, batch_size=1, seed=1000):
scans_g=np.vstack([scans_g,batch])
i += 1
if i > n:
break
i=0
masks_g=masks.copy()
for batch in datagen.flow(masks, batch_size=1, seed=1000):
masks_g=np.vstack([masks_g,batch])
i += 1
if i > n:
break
return((scans_g,masks_g))
def hu_to_pix (hu):
return (hu - MIN_BOUND) / (MAX_BOUND - MIN_BOUND) - PIXEL_MEAN
def pix_to_hu (pix):
return (pix + PIXEL_MEAN) * (MAX_BOUND - MIN_BOUND) + MIN_BOUND
from scipy import stats
def eliminate_incorrectly_segmented(scans, masks):
skip = dim // 2 # To Change see below ...
sxm = scans * masks
near_air_thresh = (-900 - MIN_BOUND) / (MAX_BOUND - MIN_BOUND) - PIXEL_MEAN # version 3 # -750 gives one more (for 0_3, d4, -600 give 15 more than -900
near_air_thresh #0.08628 for -840 # 0.067 # for -867; 0.1148 for -800
cnt = 0
for i in range(sxm.shape[0]):
#sx = sxm[i,skip]
sx = sxm[i]
mx = masks[i]
if np.sum(mx) > 0: # only check non-blanks ...(keep blanks)
sx_max = np.max(sx)
if (sx_max) <= near_air_thresh:
cnt += 1
print ("Entry, count # and max: ", i, cnt, sx_max)
print (stats.describe(sx, axis=None))
#plt.imshow(sx, cmap='gray')
plt.imshow(sx[0,skip], cmap='gray') # selecting the mid entry
plt.show()
s_eliminate = np.max(sxm, axis=(1,2,3,4)) <= near_air_thresh # 3d
s_preserve = np.max(sxm, axis=(1,2,3,4)) > near_air_thresh #3d
s_eliminate_sum = sum(s_eliminate)
s_preserve_sum = sum(s_preserve)
print ("Eliminate, preserve =", s_eliminate_sum, s_preserve_sum)
masks = masks[s_preserve]
scans = scans[s_preserve]
del(sxm)
return scans, masks
# the following 3 functions to read LUNA files are from: https://www.kaggle.com/arnavkj95/data-science-bowl-2017/candidate-generation-and-luna16-preprocessing/notebook
'''
This funciton reads a '.mhd' file using SimpleITK and return the image array,
origin and spacing of the image.
'''
def load_itk(filename):
# Reads the image using SimpleITK
itkimage = sitk.ReadImage(filename)
# Convert the image to a numpy array first and then shuffle the dimensions to get axis in the order z,y,x
ct_scan = sitk.GetArrayFromImage(itkimage)
# Read the origin of the ct_scan, will be used to convert the coordinates from world to voxel and vice versa.
origin = np.array(list(reversed(itkimage.GetOrigin())))
# Read the spacing along each dimension
spacing = np.array(list(reversed(itkimage.GetSpacing())))
return ct_scan, origin, spacing
'''
This function is used to convert the world coordinates to voxel coordinates using
the origin and spacing of the ct_scan
'''
def world_2_voxel(world_coordinates, origin, spacing):
stretched_voxel_coordinates = np.absolute(world_coordinates - origin)
voxel_coordinates = stretched_voxel_coordinates / spacing
return voxel_coordinates
'''
This function is used to convert the voxel coordinates to world coordinates using
the origin and spacing of the ct_scan.
'''
def voxel_2_world(voxel_coordinates, origin, spacing):
stretched_voxel_coordinates = voxel_coordinates * spacing
world_coordinates = stretched_voxel_coordinates + origin
return world_coordinates
def seq(start, stop, step=1):
n = int(round((stop - start)/float(step)))
if n > 1:
return([start + step*i for i in range(n+1)])
else:
return([])
'''
This function is used to create spherical regions in binary masks
at the given locations and radius.
'''
def draw_circles(image,cands,origin,spacing):
#make empty matrix, which will be filled with the mask
image_mask = np.zeros(image.shape, dtype=np.int16)
#run over all the nodules in the lungs
for ca in cands.values:
#get middel x-,y-, and z-worldcoordinate of the nodule
#radius = np.ceil(ca[4])/2 ## original: replaced the ceil with a very minor increase of 1% ....
radius = (ca[4])/2 + 0.51 * spacing[0] # increasing by circa half of distance in z direction .... (trying to capture wider region/border for learning ... and adress the rough net .
coord_x = ca[1]
coord_y = ca[2]
coord_z = ca[3]
image_coord = np.array((coord_z,coord_y,coord_x))
#determine voxel coordinate given the worldcoordinate
image_coord = world_2_voxel(image_coord,origin,spacing)
#determine the range of the nodule
#noduleRange = seq(-radius, radius, RESIZE_SPACING[0]) # original, uniform spacing
noduleRange_z = seq(-radius, radius, spacing[0])
noduleRange_y = seq(-radius, radius, spacing[1])
noduleRange_x = seq(-radius, radius, spacing[2])
#x = y = z = -2
#create the mask
for x in noduleRange_x:
for y in noduleRange_y:
for z in noduleRange_z:
coords = world_2_voxel(np.array((coord_z+z,coord_y+y,coord_x+x)),origin,spacing)
#if (np.linalg.norm(image_coord-coords) * RESIZE_SPACING[0]) < radius: ### original (contrained to a uniofrm RESIZE)
if (np.linalg.norm((image_coord-coords) * spacing)) < radius:
image_mask[int(np.round(coords[0])),int(np.round(coords[1])),int(np.round(coords[2]))] = int(1)
return image_mask
'''
This function takes the path to a '.mhd' file as input and
is used to create the nodule masks and segmented lungs after
rescaling to 1mm size in all directions. It saved them in the .npz
format. It also takes the list of nodule locations in that CT Scan as
input.
'''
def load_scans_masks_or_blanks(luna_subset, useAll, use_unsegmented=True):
#luna_subset = "[0-6]"
LUNA_DIR = LUNA_BASE_DIR % luna_subset
files = glob.glob(''.join([LUNA_DIR,'*.mhd']))
annotations = pd.read_csv(LUNA_ANNOTATIONS)
annotations.head()
candidates = pd.read_csv(LUNA_CANDIDATES)
candidates_false = candidates[candidates["class"] == 0] # only select the false candidates
candidates_true = candidates[candidates["class"] == 1] # only select the false candidates
sids = []
scans = []
masks = []
blankids = [] # class/id whether scan is with nodule or without, 0 - with, 1 - without
cnt = 0
skipped = 0
#file=files[7]
for file in files:
imagePath = file
seriesuid = file[file.rindex('/')+1:] # everything after the last slash
seriesuid = seriesuid[:len(seriesuid)-len(".mhd")] # cut out the suffix to get the uid
path = imagePath[:len(imagePath)-len(".mhd")] # cut out the suffix to get the uid
if use_unsegmented:
path_segmented = path.replace("original_lungs", "lungs_2x2x2", 1)
else:
path_segmented = path.replace("original_lungs", "segmented_2x2x2", 1)
cands = annotations[seriesuid == annotations.seriesuid] # select the annotations for the current series
ctrue = candidates_true[seriesuid == candidates_true.seriesuid]
cfalse = candidates_false[seriesuid == candidates_false.seriesuid]
blankid = 1 if (len(cands) == 0 and len(ctrue) == 0 and len(cfalse) > 0) else 0
skip_nodules_entirely = False # was False
use_only_nodules = False
if skip_nodules_entirely and blankid ==0:
## manual switch to generate extra data for the corrupted set
print("Skipping nodules (skip_nodules_entirely) ", seriesuid)
skipped += 1
elif use_only_nodules and (len(cands) == 0):
## manual switch to generate only nodules data due lack of time and repeat etc time pressures
print("Skipping blanks (use_only_nodules) ", seriesuid)
skipped += 1
else: # NORMAL operations
if (len(cands) > 0 or
(blankid >0) or
useAll):
sids.append(seriesuid)
blankids.append(blankid)
if use_unsegmented:
scan_z = np.load(''.join((path_segmented + '_lung' + '.npz')))
else:
scan_z = np.load(''.join((path_segmented + '_lung_seg' + '.npz')))
scan = scan_z['arr_0']
mask_z = np.load(''.join((path_segmented + '_nodule_mask_wblanks' + '.npz')))
mask = mask_z['arr_0']
testPlot = False
if testPlot:
maskcheck_z = np.load(''.join((path_segmented + '_nodule_mask' + '.npz')))
maskcheck = maskcheck_z['arr_0']
f, ax = plt.subplots(1, 2, figsize=(10,5))
ax[0].imshow(np.sum(np.abs(maskcheck), axis=0),cmap=plt.cm.gray)
ax[1].imshow(np.sum(np.abs(mask), axis=0),cmap=plt.cm.gray)
#ax[2].imshow(masks1[i,:,:],cmap=plt.cm.gray)
plt.show()
scans.append(scan)
masks.append(mask)
cnt += 1
else:
print("Skipping non-nodules and non-blank entry ", seriesuid)
skipped += 1
print ("Summary: cnt & skipped: ", cnt, skipped)
return scans, masks, sids, blankids
MIN_BOUND = -1000.0
MAX_BOUND = 400.0
def normalize(image):
image = (image - MIN_BOUND) / (MAX_BOUND - MIN_BOUND)
image[image>1] = 1.
image[image<0] = 0.
return image
PIXEL_MEAN = 0.028 ## for LUNA subset 0 and our preprocessing, only with nudels was 0.028, all was 0.020421744071562546 (in the tutorial they used 0.25)
def zero_center(image):
image = image - PIXEL_MEAN
return image
def convert_scans_and_masks_xd3(scans, masks, only_with_nudels, dim=3, crop=16, blanks_per_axis = 4, add_blank_spacing_size=0, add_blank_layers = 0):
# reuse scan to reduce memory footprint
dim_orig = dim
skip_low = dim // 2 # dim shoudl be uneven -- it is recalculated anyway to this end
skip_high = dim -skip_low - 1
do_not_allow_even_dim = False ## now we allow odd numbers ...
if do_not_allow_even_dim:
dim = 2 * skip_low + 1
skip_low = dim // 2
skip_high = dim -skip_low - 1
if dim != dim_orig:
print ("convert_scans_and_masks_x: Dim must be uneven, corrected from .. to:", dim_orig, dim)
work = []
for scan in scans:
tmp = []
for i in range(skip_low, scan.shape[0]-skip_high):
#img1 = scan[i-1]
#img2 = scan[i]
#img3 = scan[i+1]
#rgb = np.stack((img1, img2, img3))
rgb = np.stack(scan[i-skip_low:i+skip_high+1])
tmp.append(rgb)
work.append(np.array(tmp))
scans1 = np.stack([val for sublist in work for val in sublist ]) # NO skipping as we have already cut the first and the last layer
work = []
dxrange = scans[0].shape[-1] - 2 * crop
dyrange = scans[0].shape[-2] - 2 * crop
if add_blank_spacing_size > 0:
for mask in masks:
if (np.min(mask) < 0):
## we have a blank
### ADD ariticial mask pixel every add_blank_spacing layers for each blankids ...
# set the (0,0) pixel to -1 every add_blank_spacing_size for blanks ..
for i in range(skip_low+(add_blank_spacing_size//2), mask.shape[0]-skip_high, add_blank_spacing_size):
mask[i, np.random.randint(0,dyrange), np.random.randint(0,dxrange)] = -1 # negative pixel to be picked up below and corrected back to none
if add_blank_layers > 0:
for mask in masks:
if (np.min(mask) < 0):
dzrange = mask.shape[0]-dim
## we have a blank
### ADD ariticial mask pixel every add_blank_spacing layers for each blankids ...
# set the (0,0) pixel to -1 every add_blank_spacing_size for blanks ..
for k in range(add_blank_layers):
i = np.random.randint(0, dzrange) + skip_low
#print ("dz position, random, mask.shape ", i, mask.shape)
mask[i, np.random.randint(0,dyrange), np.random.randint(0,dxrange)] = -1 # negative pixel to be picked up below and corrected back to none
add_random_blanks_in_blanks = False ## NO need for the extra random blank pixels now, 20170327
if add_random_blanks_in_blanks:
for mask in masks:
if (np.min(mask) < 0):
## we have a blank
### ADD ariticial mask pixel every add_blank_spacing layers for each blankids ...
# set the (0,0) pixel to -1 every add_blank_spacing_size for blanks ..
#zlow = skip_low
#zhigh = mask.shape[0]-skip_high
pix_sum = np.sum(mask, axis=(1,2))
idx_blanks = np.min(mask, axis=(1,2)) < 0 ## don't use it - let's vary the position across the space
for iz in range(mask.shape[0]):
if (np.min(mask[iz])) < 0:
for ix in range(blanks_per_axis):
#xpos = crop + (ix)*dx + dx //2
for iy in range(blanks_per_axis):
#ypos = crop + (iy)*dy + dy //2
xpos = crop + np.random.randint(0,dxrange)
ypos = crop + np.random.randint(0,dyrange)
#print (iz, xpos, ypos)
#mask[idx_blanks, ypos, xpos] = -1 # negative pixel to be picked up below and corrected back to none
mask[iz, ypos, xpos] = -1
use_3d_mask = True ##
if use_3d_mask:
work = [] # 3 layers
for mask in masks:
tmp = []
#i = 0
for i in range(skip_low, mask.shape[0]-skip_high):
rgb = np.stack(mask[i-skip_low:i+skip_high+1])
tmp.append(rgb)
work.append(np.array(tmp))
masks1 = np.stack([val for sublist in work for val in sublist ] )# NO skipping as we have already cut the first and the last layer
else:
masks1 = np.stack([val for sublist in masks for val in sublist[skip_low:-skip_high]] ) # skip one element at the beginning and at the end
if only_with_nudels:
if use_3d_mask:
nudels_pix_count = np.sum(np.abs(masks1), axis = (1,2,3)) ## USE ANY March 1; CHANGE IT WED - use ANY i.e. remove skip_low abd added for the potential blanks; modified that the centre mask be mask!
else:
nudels_pix_count = np.sum(np.abs(masks1), axis = (1,2))
scans1 = scans1[nudels_pix_count != 0]
masks1 = masks1[nudels_pix_count != 0]
scans1 = normalize(scans1)
useTestPlot = False
if useTestPlot:
plt.hist(scans1.flatten(), bins=80, color='c')
plt.xlabel("Hounsfield Units (HU)")
plt.ylabel("Frequency")
plt.show()
for i in range(20):
print ('scan '+str(i))
f, ax = plt.subplots(1, 3, figsize=(15,5))
ax[0].imshow(scans1[i,:,:],cmap=plt.cm.gray)
ax[1].imshow(scans[i,:,:],cmap=plt.cm.gray)
ax[2].imshow(masks1[i,:,:],cmap=plt.cm.gray)
plt.show()
scans1 = zero_center(scans1)
scans1 = scans1.astype(np.float32) # make it float 32 (not point carring 64, also because kears operates on float32, and originals were in int
if use_3d_mask:
done = 1 # nothing to do
else:
masks = np.copy(masks1)
masks1=np.zeros((masks.shape[0],1,img_rows,img_cols))
for i in range(masks.shape[0]):
img=masks[i,:,:]
###img =cv2.resize(img, (img_rows, img_cols)) ## add/test resizing if needed
masks1[i,0,:,:]=img
return scans1, masks1
def eliminate_incorrectly_segmented(scans, masks):
skip = dim // 2 # To Change see below ...
sxm = scans * masks
near_air_thresh = (-900 - MIN_BOUND) / (MAX_BOUND - MIN_BOUND) - PIXEL_MEAN # version 3 # -750 gives one more (for 0_3, d4, -600 give 15 more than -900
#near_air_thresh #0.08628 for -840 # 0.067 # for -867; 0.1148 for -800
cnt = 0
for i in range(sxm.shape[0]):
#sx = sxm[i,skip]
sx = sxm[i]
mx = masks[i]
if np.sum(mx) > 0: # only check non-blanks ...(keep blanks)
sx_max = np.max(sx)
if (sx_max) <= near_air_thresh:
cnt += 1
print ("Entry, count # and max: ", i, cnt, sx_max)
print (stats.describe(sx, axis=None))
#plt.imshow(sx, cmap='gray')
plt.imshow(sx[0,skip], cmap='gray') # selecting the mid entry
plt.show()
s_eliminate = np.max(sxm, axis=(1,2,3,4)) <= near_air_thresh # 3d
s_preserve = np.max(sxm, axis=(1,2,3,4)) > near_air_thresh #3d
s_eliminate_sum = sum(s_eliminate)
s_preserve_sum = sum(s_preserve)
print ("Eliminate, preserve =", s_eliminate_sum, s_preserve_sum)
masks = masks[s_preserve]
scans = scans[s_preserve]
del(sxm)
return scans, masks
def grid_data(source, grid=32, crop=16, expand=12):
gridsize = grid + 2 * expand
stacksize = source.shape[0]
height = source.shape[3] # should be 224 for our data
width = source.shape[4]
gridheight = (height - 2 * crop) // grid # should be 6 for our data
gridwidth = (width - 2 * crop) // grid
cells = []
for j in range(gridheight):
for i in range (gridwidth):
cell = source[:,:,:, crop+j*grid-expand:crop+(j+1)*grid+expand, crop+i*grid-expand:crop+(i+1)*grid+expand]
cells.append(cell)
cells = np.vstack (cells)
return cells, gridwidth, gridheight
def data_from_grid (cells, gridwidth, gridheight, grid=32):
height = cells.shape[3] # should be 224 for our data
width = cells.shape[4]
crop = (width - grid ) // 2 ## for simplicity we are assuming the same crop (and grid) vertically and horizontally
dspacing = gridwidth * gridheight
layers = cells.shape[0] // dspacing
if crop > 0: # do NOT crop with 0 as we get empty cells ...
cells = cells[:,:,:,crop:-crop,crop:-crop]
if crop > 2*grid:
print ("data_from_grid Warning, unusually large crop (> 2*grid); crop, & grid, gridwith, gridheight: ", (crop, grid, gridwidth, gridheight))
shape = cells.shape
new_shape_1_dim = shape[0]// (gridwidth * gridheight) # ws // 36 -- Improved on 20170306
new_shape = (gridwidth * gridheight, new_shape_1_dim, ) + tuple([x for x in shape][1:]) # was 36, Improved on 20170306
cells = np.reshape(cells, new_shape)
cells = np.moveaxis(cells, 0, -3)
shape = cells.shape
new_shape2 = tuple([x for x in shape[0:3]]) + (gridheight, gridwidth,) + tuple([x for x in shape[4:]])
cells = np.reshape(cells, new_shape2)
cells = cells.swapaxes(-2, -3)
shape = cells.shape
combine_shape =tuple([x for x in shape[0:3]]) + (shape[-4]*shape[-3], shape[-2]*shape[-1],)
cells = np.reshape(cells, combine_shape)
return cells
def data_from_grid_by_proximity (cells, gridwidth, gridheight, grid=32):
# disperse the sequential dats into layers and then use data_from_grid
dspacing = gridwidth * gridheight
layers = cells.shape[0] // dspacing
shape = cells.shape
new_shape_1_dim = shape[0]// (gridwidth * gridheight) # ws // 36 -- Improved on 20170306
### NOTE tha we invert the order of shapes below to get the required proximity type ordering
new_shape = (new_shape_1_dim, gridwidth * gridheight, ) + tuple([x for x in shape][1:]) # was 36, Improved on 20170306
# swap ordering of axes
cells = np.reshape(cells, new_shape)
cells = cells.swapaxes(0, 1)
cells = np.reshape(cells, shape)
cells = data_from_grid (cells, gridwidth, gridheight, grid)
return cells
def find_voxels(dim, grid, images3, images3_seg, pmasks3, nodules_threshold=0.999, voxelscountmax = 1000, mid_mask_only = True, find_blanks_also = True, centralcutonly=True):
zsel = dim // 2
sstart = 0
send = images3.shape[0]
if mid_mask_only:
pmav = pmasks3[:,0,dim // 2] # using the mid mask
pmav.shape
else:
pmav = pmasks3[:,0] ### NOTE this variant has NOT been tested fully YET
run_UNNEEDED_code = False
ims = images3[sstart:send,0,zsel] # selecting the zsel cut for nodules calc ...
ims_seg = images3_seg[sstart:send,0,zsel]
ims.shape
#pms = pmasks3[sstart:send,0,0]
pms = pmav[sstart:send]
images3.shape
thresh = nodules_threshold # for testing , set it here and skip the loop
segment = 2 # for compatibility of the naming convention
# threshold the precited nasks ...
#for thresh in [0.5, 0.9, 0.9999]:
#for thresh in [0.5, 0.75, 0.9, 0.95, 0.98, 0.99, 0.999, 0.9999, 0.99999, 0.999999, 0.9999999]:
for thresh in [nodules_threshold]: # jusst this one - keeping loop for a while
if find_blanks_also:
idx = np.abs(pms) > thresh
else:
idx = pms > thresh
idx.shape
nodls = np.zeros(pms.shape).astype(np.int16)
nodls[idx] = 1
nx = nodls[idx]
nodules_pixels = ims[idx] # flat
nodules_hu = pix_to_hu(nodules_pixels)
part_name = ''.join([str(segment), '_', str(thresh)])
### DO NOT do them here
use_corrected_nodules = True # do it below from 20170311
if not use_corrected_nodules:
df = hu_describe(nodules_hu, uid=uid, part=part_name)
add_projections = False
axis = 1
nodules_projections = []
for axis in range(3):
nodls_projection = np.max(nodls, axis=axis)
naxis_name = ''.join(["naxis_", str(axis),"_", part_name])
if add_projections:
df[naxis_name] = np.sum(nodls_projection)
nodules_projections.append(nodls_projection)
idx.shape
## find the individual nodules ... as per the specified probabilities
labs, labs_num = measure.label(idx, return_num = True, neighbors = 8 , background = 0) # label the nodules in 3d, allow for diagonal connectivity
voxels = []
vmasks = []
if labs_num > 0 and labs.shape[0] >1: # checking for height > 1 is needed as measure.regionprops fails when it is not, for instance for shape (1, 20, 20) we get ValueError: Label and intensity image must have the same shape.
print("Befpre measure.regionprops, labs & intensity shapes: ", labs.shape, ims.shape)
regprop = measure.regionprops(labs, intensity_image=ims) # probkem here on 20170327
voxel_volume = np.product(RESIZE_SPACING)
areas = [rp.area for rp in regprop] # this is in cubic mm now (i.e. should really be called volume)
volumes = [rp.area * voxel_volume for rp in regprop]
diameters = [2 * (3* volume / (4 * np.pi ))**0.3333 for volume in volumes]
labs_ids = [rp.label for rp in regprop]
#ls = [rp.label for rp in regprop]
max_val = np.max(areas)
max_index = areas.index(max_val)
max_label = regprop[max_index].label
bboxes = [r.bbox for r in regprop]
idl = labs == regprop[max_index].label # 400
nodules_pixels = ims[idl]
nodules_hu = pix_to_hu(nodules_pixels)
if run_UNNEEDED_code:
nodules_hu_reg = []
for rp in regprop:
idl = labs == rp.label
nodules_pixels = ims[idl]
nodules_hu = pix_to_hu(nodules_pixels)
nodules_hu_reg.append(nodules_hu) # NOTE some are out of interest, i.e. are equal all (or near all) to MAX_BOUND (400)
dfn = pd.DataFrame(
{
"area": areas,
"diameter": diameters,
"bbox": bboxes
},
index=labs_ids)
nodules_count = len(dfn) # 524 for file 1 of part 8 ..
max_nodules_count = voxelscountmax
n=0
for n in range(max_nodules_count):
if n < len(dfn): # use the nodule data, otheriwse empty
bb = dfn.iloc[n]["bbox"]
zmin = bb[0]
zmax = bb[3]
zlen = bb[3] - bb[0]
ylen = bb[4] - bb[1]
xlen = bb[5] - bb[2]
xmin = np.max([bb[2] - np.max([(grid - xlen ) //2, 0]), 0]) ## do not go beyond 0/left side of the image
xmax = np.min([xmin + grid, ims.shape[2]]) ## do not beyond the right side
xmin = xmax - grid
if (xmax - xmin) != grid:
print ("ERROR in calculating the cut-offs ..., xmin, xmax =", xmin, xmax)
ymin = np.max([bb[1] - np.max([(grid - ylen ) //2, 0]), 0]) ## do not go beyond 0/left side of the image
ymax = np.min([ymin + grid, ims.shape[1]]) ## do not beyond the right side
ymin = ymax - grid
if (ymax - ymin) != grid:
print ("ERROR in calculating the cut-offs ..., ymin, ymax =", ymin, ymax)
zmin_sel = zmin
zmax_sel = zmax
if centralcutonly: #include only one voxel representation
zmin_sel = zmin + zlen // 2
zmax_sel = zmin_sel + 1
iz=zmin_sel # for testing
for iz in range(zmin_sel,zmax_sel):
voxel = images3[iz,:,:, ymin:ymax, xmin:xmax]
vmask = pmasks3[iz,:,:, ymin:ymax, xmin:xmax]
voxels.append(voxel)
vmasks.append(vmask)
testPlot = False
if testPlot:
print ('scan '+str(iz))
f, ax = plt.subplots(1, 8, figsize=(24,3))
ax[0].imshow(nodls[iz,ymin:ymax, xmin:xmax],cmap=plt.cm.gray)
ax[1].imshow(ims[iz,ymin:ymax, xmin:xmax],cmap=plt.cm.gray)
ax[2].imshow(images3_amp[iz,0, dim//2, ymin:ymax, xmin:xmax],cmap=plt.cm.gray)
ax[3].imshow(voxel[0,dim//2],cmap=plt.cm.gray)
ax[4].imshow(voxel[0,dim],cmap=plt.cm.gray)
ax[5].imshow(voxel[0,dim+1],cmap=plt.cm.gray)
ax[6].imshow(voxel[0,dim+2],cmap=plt.cm.gray)
ax[7].imshow(voxel[0,dim+3],cmap=plt.cm.gray)
if len(voxels) > 0:
voxel_stack = np.stack(voxels)
vmask_stack = np.stack(vmasks)
else:
print_warning = False
if print_warning:
print("WARNING, find_voxels, not single voxel found even though expected")
voxel_stack = []
vmask_stack = []
if testPlot:
print ('voxels count ', len(voxel_stack))
for ii in range(0,len(voxel_stack),len(voxel_stack)//10):
f, ax = plt.subplots(1, 2, figsize=(6,3))
ax[0].imshow(voxel_stack[ii, 0, dim // 2],cmap=plt.cm.gray)
ax[1].imshow(vmask_stack[ii, 0, dim // 2],cmap=plt.cm.gray)
return voxel_stack, vmask_stack
def measure_voxels(labs, ims):
#print("Befpre measure.regionprops, labs & intensity shapes: ", labs.shape, ims.shape)
regprop = measure.regionprops(labs, intensity_image=ims) # probkem here on 20170327
voxel_volume = np.product(RESIZE_SPACING)
areas = [rp.area for rp in regprop] # this is in cubic mm now (i.e. should really be called volume)
volumes = [rp.area * voxel_volume for rp in regprop]
diameters = [2 * (3* volume / (4 * np.pi ))**0.3333 for volume in volumes]
labs_ids = [rp.label for rp in regprop]
#ls = [rp.label for rp in regprop]
max_val = np.max(areas)
max_index = areas.index(max_val)
max_label = regprop[max_index].label
bboxes = [r.bbox for r in regprop]
#max_ls = ls[max_index]
idl = labs == regprop[max_index].label # 400
nodules_pixels = ims[idl]
nodules_hu = pix_to_hu(nodules_pixels)
run_UNNEEDED_code = False
if run_UNNEEDED_code:
nodules_hu_reg = []
for rp in regprop:
idl = labs == rp.label
nodules_pixels = ims[idl]
nodules_hu = pix_to_hu(nodules_pixels)
nodules_hu_reg.append(nodules_hu) # NOTE some are out of interest, i.e. are equal all (or near all) to MAX_BOUND (400)
dfn = pd.DataFrame(
{
#"zcenter": zcenters,
#"ycenter": ycenters,
#"xcenter": xcenters,
"area": areas,
"diameter": diameters,
#"irreg_vol": irreg_vol,
#"irreg_shape": irreg_shape,
#"nodules_hu": nodules_hu_reg,
"bbox": bboxes
},
index=labs_ids)
return dfn
def find_voxels_and_blanks(dim, grid, images3, images3_seg, pmasks3, nodules_threshold=0.999, voxelscountmax = 1000, find_blanks_also = True, centralcutonly=True, diamin=2, diamax=10):
if np.sum(pmasks3) > 0:
centralcutonly = False # override centralcut for True nodule masks
zsel = dim // 2 if centralcutonly else range(0,dim)
pmav = pmasks3[:,0,zsel]
ims = images3[:,0,zsel] # selecting the zsel cut for nodules calc ...
ims_seg = images3_seg[:,0,zsel]
sstart = 0
send = images3.shape[0]
pms = pmav[sstart:send]
run_UNNEEDED_code = False
thresh = nodules_threshold # for testing , set it here and skip the loop
segment = 2 # for compatibility of the naming convention
for thresh in [nodules_threshold]: # jusst this one - keeping loop for a while
if find_blanks_also:
idx = np.abs(pms) > thresh
else:
idx = pms > thresh
idx.shape
nodls = np.zeros(pms.shape).astype(np.int16)
nodls[idx] = 1
nx = nodls[idx]
volume = np.sum(nodls) # A check calculation ... :wcounted as a count within hu_describe
nodules_pixels = ims[idx] # flat
nodules_hu = pix_to_hu(nodules_pixels)
part_name = ''.join([str(segment), '_', str(thresh)])
### DO NOT do them here
use_corrected_nodules = True # do it below from 20170311
if not use_corrected_nodules:
df = hu_describe(nodules_hu, uid=uid, part=part_name)
add_projections = False
if add_projections:
nodules_projections = []
for axis in range(3):
#sxm_projection = np.max(sxm, axis = axis)
nodls_projection = np.max(nodls, axis=axis)
naxis_name = ''.join(["naxis_", str(axis),"_", part_name])
if add_projections:
df[naxis_name] = np.sum(nodls_projection)
nodules_projections.append(nodls_projection)
voxels = []
vmasks = []
if not centralcutonly:
for k in range(idx.shape[0]):
if np.sum(idx[k]) > 0:
## find the nodules and take a cut
labs, labs_num = measure.label(idx[k], return_num = True, neighbors = 8 , background = 0) # label the nodules in 3d, allow for diagonal connectivity
dfn = measure_voxels(labs, ims[k])
nodules_count_0 = len(dfn)
## CUT out anything that is outside of the specified diam range
dfn = dfn[(dfn["diameter"] >= diamin) & ((dfn["diameter"] < diamax))] # CUT OUT anything that is less than 3 mm (essentially less than 7 voxels for 2x2x2
nodules_count = len(dfn) # 524 for file 1 of part 8 ..
max_nodules_count = voxelscountmax
n=0
for n in range(max_nodules_count):
if n < len(dfn): # use the nodule data, otheriwse empty
bb = dfn.iloc[n]["bbox"]
zmin = bb[0]
zmax = bb[3]
zlen = bb[3] - bb[0]
ylen = bb[4] - bb[1]
xlen = bb[5] - bb[2]
xmin = np.max([bb[2] - np.max([(grid - xlen ) //2, 0]), 0]) ## do not go beyond 0/left side of the image
xmax = np.min([xmin + grid, ims.shape[-1]]) ## do not beyond the right side
xmin = xmax - grid
if (xmax - xmin) != grid:
print ("ERROR in calculating the cut-offs ..., xmin, xmax =", xmin, xmax)
ymin = np.max([bb[1] - np.max([(grid - ylen ) //2, 0]), 0]) ## do not go beyond 0/left side of the image
ymax = np.min([ymin + grid, ims.shape[-2]]) ## do not beyond the right side
ymin = ymax - grid
if (ymax - ymin) != grid:
print ("ERROR in calculating the cut-offs ..., ymin, ymax =", ymin, ymax)
# here simply takje the entire voxel we have
#images3.shape
voxel = images3[k,:,:, ymin:ymax, xmin:xmax]
vmask = pmasks3[k,:,:, ymin:ymax, xmin:xmax]
voxels.append(voxel)
vmasks.append(vmask)
#voxel.shape
else:# essentially taking the central cuts of the blanks
## find the individual nodules ... as per the specified probabilities
labs, labs_num = measure.label(idx, return_num = True, neighbors = 8 , background = 0) # label the nodules in 3d, allow for diagonal connectivity
if labs_num > 0 and labs.shape[0] >1: # checking for height > 1 is needed as measure.regionprops fails when it is not, for instance for shape (1, 20, 20) we get ValueError: Label and intensity image must have the same shape.
#labs_num_to_store = 5
dfn = measure_voxels(labs, ims)
nodules_count = len(dfn) # 524 for file 1 of part 8 ..
max_nodules_count = voxelscountmax
n=0
for n in range(max_nodules_count):
if n < len(dfn): # use the nodule data, otheriwse empty
bb = dfn.iloc[n]["bbox"]
zmin = bb[0]
zmax = bb[3]
zlen = bb[3] - bb[0]
ylen = bb[4] - bb[1]
xlen = bb[5] - bb[2]
xmin = np.max([bb[2] - np.max([(grid - xlen ) //2, 0]), 0]) ## do not go beyond 0/left side of the image
xmax = np.min([xmin + grid, ims.shape[-1]]) ## do not beyond the right side
xmin = xmax - grid
if (xmax - xmin) != grid:
print ("ERROR in calculating the cut-offs ..., xmin, xmax =", xmin, xmax)
ymin = np.max([bb[1] - np.max([(grid - ylen ) //2, 0]), 0]) ## do not go beyond 0/left side of the image
ymax = np.min([ymin + grid, ims.shape[-2]]) ## do not beyond the right side
ymin = ymax - grid
if (ymax - ymin) != grid:
print ("ERROR in calculating the cut-offs ..., ymin, ymax =", ymin, ymax)
zmin_sel = zmin
zmax_sel = zmax
if centralcutonly: #include only one voxel representation
zmin_sel = zmin + zlen // 2
zmax_sel = zmin_sel + 1
iz=zmin_sel # for testing
for iz in range(zmin_sel,zmax_sel):
voxel = images3[iz,:,:, ymin:ymax, xmin:xmax]
vmask = pmasks3[iz,:,:, ymin:ymax, xmin:xmax]
voxels.append(voxel)
vmasks.append(vmask)
testPlot = False
if testPlot:
print ('scan '+str(iz))
f, ax = plt.subplots(1, 8, figsize=(24,3))
ax[0].imshow(nodls[iz,ymin:ymax, xmin:xmax],cmap=plt.cm.gray)
ax[1].imshow(ims[iz,ymin:ymax, xmin:xmax],cmap=plt.cm.gray)
ax[2].imshow(images3_amp[iz,0, dim//2, ymin:ymax, xmin:xmax],cmap=plt.cm.gray)
ax[3].imshow(voxel[0,dim//2],cmap=plt.cm.gray)
ax[4].imshow(voxel[0,dim],cmap=plt.cm.gray)
ax[5].imshow(voxel[0,dim+1],cmap=plt.cm.gray)
ax[6].imshow(voxel[0,dim+2],cmap=plt.cm.gray)
ax[7].imshow(voxel[0,dim+3],cmap=plt.cm.gray)
if len(voxels) > 0:
voxel_stack = np.stack(voxels)
vmask_stack = np.stack(vmasks)
else:
print_warning = False
if print_warning:
print("WARNING, find_voxels, not single voxel found even though expected")
voxel_stack = []
vmask_stack = []
#print("Nodules, voxels_aggregated: ", len(dfn), len(voxel_stack))
#np.savez_compressed(path_voxels_variant, voxel_stack)
testPlot = False
if testPlot:
print ('voxels count ', len(voxel_stack))
for ii in range(0,len(voxel_stack),len(voxel_stack)//10):
#plt.imshow(voxel_stack[ii,0,dim // 2], cmap=plt.cm.gray)
#plt.show()
f, ax = plt.subplots(1, 2, figsize=(6,3))
ax[0].imshow(voxel_stack[ii, 0, dim // 2],cmap=plt.cm.gray)
ax[1].imshow(vmask_stack[ii, 0, dim // 2],cmap=plt.cm.gray)
return voxel_stack, vmask_stack
def shuffle_scans_masks(scans, masks, seed):
np.random.seed(seed)
index_shuf = np.arange(len(scans))
np.random.shuffle(index_shuf)
scans = scans[index_shuf]
masks = masks[index_shuf]
return scans, masks
def create_unet_training_files (dim, recreate_grid8_March_data=True): # version with backward compatibility
grid8_March_data_str = "a" if recreate_grid8_March_data else "" # used for the the original data/approach
# the main procedure to create training files for the nodule identifier (consolidated version, with backward compatibility for grid 8)
create_main_grid = True
if create_main_grid:
diamins_2_10 = not recreate_grid8_March_data # backward compatible option
if diamins_2_10:
grids = [10, 20]
diamins = [2, 10]
diamaxs = [10, 100]
crops2 = [7, 2] # not used in this option, added for flow
else:
grids = [20, 40]
diamins = [2, 2]
diamaxs = [100, 100]
crops2 = [2, 12] # added to recreate_grid8_March_data
else:
## created separately -- as an addition - for extra augmentation
grids = [10]
diamins = [2]
diamaxs = [5]
crops2 = [7]
create_missing_grid_file = False
if create_missing_grid_file:
grids = [20]
diamins = [19]
diamaxs = [99]
crops2 = [2]
resolution_str = RESOLUTION_STR
grid=20
centralcutonly = True
grid_multiple = 1 # do not aggregate any of the grids/data crreated -- save verbatim
grid_dest = grid * grid_multiple
eliminate_blanks_for_mid_extra_cut = False # typically False, only true for the extra data
if eliminate_blanks_for_mid_extra_cut:
crop=12 # leading to 200x200 image cut 10 x 10 times
model_grid_name = "8g10"
else:
crop=22 #provigind with grid 9x20 9x20
model_grid_name = "8g9" #"16g3" # was 16g9
dim = dim
include_ba_partial_height = dim//2
grid_passes = 1 # was 10 # gp10 standard must be 1
if grid_passes > 1:
model_grid_name = "8g10x%s" % grid_passes
elif grid_passes < 1:
grid_passes = 1
print ("grid_passes, include_ba_partial_height, model_grid_name: ", grid_passes, include_ba_partial_height, model_grid_name)
data_generation=True
testPrint = False
if data_generation: # DO ONE BY ONE as convert_scans_and_masks_xd requires a lot of memory and can swap ...
exclude_blanks = False if create_main_grid else True # replaces the manual change done in the interactive mode
include_below_above_nodule = False
if not include_below_above_nodule:
ba0 = dim //2 - include_ba_partial_height
ba1 = np.min([dim //2 + include_ba_partial_height + 1, dim])
split_into_nodules_and_blanks = True
for pt in range(0,3): # splitting into 2 parts due to memory needs
np.random.seed(1000+pt)
scans_all_grid = []
masks_all_grid = []
scans_all_grid2 = []
masks_all_grid2 = []
scans_all_grid3 = []
masks_all_grid3 = []
if pt == 0:
istart = 4*pt
iend = 4*(pt+1)
elif pt == 1:
istart = 4*pt
iend = 4*(pt+1)
iend += 1 # increase by 1 to cover 9
else:
istart = 9
iend = 10
for i in range(istart, iend):
scans_all = []
masks_all = []
sids_all = []
scans_all2 = []
masks_all2 = []
sids_all2 = []
scans_all3 = []
masks_all3 = []
sids_all3 = []
print ("\n\n################################# EXECUTING subset ", i)
scans, masks, sids, blankids = load_scans_masks_or_blanks(i, useAll = False, use_unsegmented=DO_NOT_USE_SEGMENTED)
if include_below_above_nodule:
only_with_nudels = True # This may be False or True must be False so we do not loose the info
else:
only_with_nudels = True # could be True ...
for j in range(len(scans)):
extra_test=False
if extra_test:
mtemp = masks[j]
np.sum(mtemp)
np.min(mtemp)
idx = np.sum(masks[j], axis=(1,2)) != 0 # at this stage, with this more memory friendly version there should be only items with nodules
idx_nodules = np.sum(masks[j], axis=(1,2)) > 0
idx_blanks = np.sum(masks[j], axis=(1,2)) < 0
print ("Masks, with nodules and blanks: ", np.sum(idx_nodules), np.sum(idx_blanks))
blanks_per_axis = 0 # we now rnadomly position this
scans1 = [scans[j]]
masks1 = [masks[j]]
use_standard_convert = True if recreate_grid8_March_data else False # added for backward compatbility
if use_standard_convert:
scans1, masks1 = convert_scans_and_masks_xd3 (scans1, masks1, only_with_nudels = only_with_nudels, dim=dim, crop=crop, blanks_per_axis = blanks_per_axis,
add_blank_spacing_size=1, add_blank_layers = 0) # as per March data generation
if not include_below_above_nodule:
### take the centrale values
idx = np.sum(np.abs(masks1[:,ba0:ba1]), axis=(-1,-2, -3)) != 0 #dim // 2
idx_nodules = np.sum(masks1[:,ba0:ba1], axis=(-1,-2, -3)) > 0
idx_blanks = np.sum(masks1[:,ba0:ba1], axis=(-1,-2, -3)) < 0
else:
idx = np.sum(np.abs(masks1), axis=(-1,-2,-3)) != 0
idx_nodules = np.sum(masks1, axis=(-1,-2,-3)) > 0
idx_blanks = np.sum(masks1, axis=(-1,-2,-3)) < 0
count_nodules = np.sum(idx_nodules)
count_blanks = np.sum(idx_blanks)
count_all = np.sum(idx, axis=0)
print ("sidj, Total masks, and with nodules and blanks: ", sids[j], len(idx), count_nodules, count_blanks)
if (count_nodules == 0):
# cut down the blanks only to the centrally located, whatever the include_below_above_nodule
idx_blanks = np.sum(masks1[:,dim // 2], axis=(-1,-2)) < 0
count_blanks = np.sum(idx_blanks)
print("Selecting only the central blanks, count of: ", count_blanks)
masks1 = masks1[idx_blanks]
scans1 = scans1[idx_blanks]
elif not include_below_above_nodule:
#print("Not including the below and above nodules' entries, beyond partial_height of , remaining count: ", include_ba_partial_height, count_all)
print("Using ba partial_height; remaining count: ", count_all)
masks1 = masks1[idx]
scans1 = scans1[idx]
else:
print("Keeping all entries of: ", count_all )
else:
## just convert into 3d rep and find the vosel in the entire space
scans1, masks1 = convert_scans_and_masks_xd3 (scans1, masks1, only_with_nudels = False, dim=dim, crop=crop, blanks_per_axis = blanks_per_axis,
add_blank_spacing_size=0, add_blank_layers = 0)
scans1 = scans1[:, np.newaxis] # do NOT change these as we iterate by different grids now 20170327
masks1 = masks1[:, np.newaxis] # do NOT change these as we iterate by different grids now 20170327
for ig in range(len(grids)):
grid_masks = []
grid_scans = []
grid = grids[ig]
crop12 = crops2[ig]
if exclude_blanks and np.sum(masks1) <0:
print("Completely excluding blanks & gridding of them ...")
scans1_c = []
masks1_c = []
else:
for gpass in range(grid_passes):
if grid_passes != 1:
shift = grid // grid_passes
shifting_gridwith = img_cols // grid - 1 # minus 1 to accomodate the shift
crop_top_left = (img_cols - (shifting_gridwith+1)*grid) // 2 + gpass*shift
crop_bottom_right = crop_top_left + shifting_gridwith*grid
masks1_c = masks1[:,:,:,crop_top_left:crop_bottom_right,crop_top_left:crop_bottom_right]
scans1_c = scans1[:,:,:,crop_top_left:crop_bottom_right,crop_top_left:crop_bottom_right]
if recreate_grid8_March_data:
grid_masks1, gridwidth, gridheight = grid_data(masks1_c, grid=grid, crop=0, expand=0 )
grid_scans1, gridwidth, gridheight = grid_data(scans1_c, grid=grid, crop=0, expand=0)
else:
#### NOTE the following has NOT been tested
print("WARNING: grid_passes option has NOT been tested working with the find_voxels procedure")
grid_scans1, grid_masks1 = find_voxels_and_blanks(dim, grid, scans1_c, scans1_c, masks1_c, nodules_threshold=0.999, voxelscountmax = 1000,
find_blanks_also = True, centralcutonly = centralcutonly, diamin=diamins[ig], diamax=diamaxs[ig])
else: # just a single standard pass - no shifting grid
if recreate_grid8_March_data:
grid_masks1, gridwidth, gridheight = grid_data(masks1, grid=grid, crop=crop12, expand=0 )
grid_scans1, gridwidth, gridheight = grid_data(scans1, grid=grid, crop=crop12, expand=0)
else:
grid_scans1, grid_masks1 = find_voxels_and_blanks(dim, grid, scans1, scans1, masks1, nodules_threshold=0.999, voxelscountmax = 1000,
find_blanks_also = True, centralcutonly = centralcutonly, diamin=diamins[ig], diamax=diamaxs[ig])
testPlot = False
if testPlot:
for ii in range(0, len(grid_scans1)): # was 2, 20
print ('gridscans1 scan/cut '+str(ii))
f, ax = plt.subplots(1, 2, figsize=(8,4))
ax[0].imshow(grid_scans1[ii,0,dim // 2],cmap=plt.cm.gray)
#ax[1].imshow(masks_pred[ii,0,0],cmap=plt.cm.gray)
ax[1].imshow(grid_masks1[ii,0,dim // 2] ,cmap=plt.cm.gray)
#ax[2].imshow(np.abs(masks_pred[ii,0,0] - masks_pred_prev[ii,0,0]) ,cmap=plt.cm.gray)
#ax[2].imshow(masks1[i,:,:],cmap=plt.cm.gray)
plt.show()
if len(grid_masks1) > 0:
idx_blanks = np.sum(grid_masks1[:,:,dim // 2], axis=(-1,-2, -3)) < 0
idx = np.sum(np.abs(grid_masks1), axis=(1,2,3,4)) != 0
if not include_below_above_nodule:
idx_nodules = np.sum(grid_masks1[:,:,ba0:ba1], axis=(1,2,3,4)) > 0
else:
idx_nodules = np.sum(grid_masks1, axis=(1,2,3,4)) > 0 # this may be inaccurate whene blanks was somewhere there
# cut down the blanks only to the centrally located
if testPrint:
print ("Total masks (after grid), and with nodules and blanks: ", len(idx), np.sum(idx_nodules), np.sum(idx_blanks))
idx_nodules_central_blanks = idx_nodules | idx_blanks
if exclude_blanks:
if testPrint:
print("Not including blanks ....")
grid_masks1 = grid_masks1[idx_nodules]
grid_scans1 = grid_scans1[idx_nodules]
else:
grid_masks1 = grid_masks1[idx_nodules_central_blanks] # ONLY keep the masks and scans with nodules(central)
grid_scans1 = grid_scans1[idx_nodules_central_blanks]
if testPrint:
print ("Total masks (after another central blanks cut): ", len(grid_masks1))
grid_masks.append(grid_masks1)
grid_scans.append(grid_scans1)
if len(grid_masks):
masks1_c = np.concatenate(grid_masks)
scans1_c = np.concatenate(grid_scans)
else:
masks1_c = []
scans1_c = []
print ("=== Grid, Sub-total masks1 : ", (grid, len(masks1_c)))
if (len(masks1_c) > 0):
if ig == 0:
scans_all.append(scans1_c)
masks_all.append(masks1_c)
sids_all.append(sids[j]) # ????
elif ig == 1:
scans_all2.append(scans1_c)
masks_all2.append(masks1_c)
sids_all2.append(sids[j]) # ????
elif ig == 2:
scans_all3.append(scans1_c)
masks_all3.append(masks1_c)
sids_all3.append(sids[j]) # ???
else:
print("Warning: 4 separate grids are not implemented for automatic data generation")
## end of the grid_and_limit_data LOOP --------------------------------------------------------
scans = np.concatenate(scans_all) #e.g. [0:4])
masks = np.concatenate(masks_all) #[0:4])
if len(grids) > 1:
scans2 = np.concatenate(scans_all2)
masks2 = np.concatenate(masks_all2)
if len(grids) > 2:
scans3 = np.concatenate(scans_all3)
masks3 = np.concatenate(masks_all3)
################### end o the scans loop #### ######################################################
ig =0
for ig in range(len(grids)):
if ig == 0:
scansx = scans
masksx = masks
elif ig == 1:
scansx = scans2
masksx = masks2
elif ig == 2:
scansx = scans3
masksx = masks3
# select only non-zero grids .. (essentially decimating the data; for subset 1: from 17496 dow to 1681)
idx = np.sum( | np.abs(masksx) | numpy.abs |
# -*- coding: utf-8 -*-
"""
This file contains MLTools class and all developed methods.
"""
# Python2 support
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import pickle
class MLTools(object):
"""
A Python implementation of several methods needed for machine learning
classification/regression.
Attributes:
last_training_pattern (numpy.ndarray): Full path to the package
to test.
has_trained (boolean): package_name str
cv_best_rmse (float): package_name str
"""
def __init__(self):
self.last_training_pattern = []
self.has_trained = False
self.cv_best_rmse = "Not cross-validated"
#################################################
########### Methods to be overridden ############
#################################################
def _local_train(self, training_patterns, training_expected_targets,
params):
"""
Should be overridden.
"""
return None
def _local_test(self, testing_patterns, testing_expected_targets,
predicting):
"""
Should be overridden.
"""
return None
# ########################
# Public Methods
# ########################
def _ml_search_param(self, database, dataprocess, path_filename, save,
cv, min_f):
"""
Should be overridden.
"""
return None
def _ml_print_parameters(self):
"""
Should be overridden.
"""
return None
def _ml_predict(self, horizon=1):
"""
Predict next targets based on previous training.
Arguments:
horizon (int): number of predictions.
Returns:
numpy.ndarray: a column vector containing all predicted targets.
"""
if not self.has_trained:
print("Error: Train before predict.")
return
# Create first new pattern
new_pattern = np.hstack([self.last_training_pattern[2:],
self.last_training_pattern[0]])
# Create a fake target (1)
new_pattern = np.insert(new_pattern, 0, 1).reshape(1, -1)
predicted_targets = np.zeros((horizon, 1))
for t_counter in range(horizon):
te_errors = self.test(new_pattern, predicting=True)
predicted_value = te_errors.predicted_targets
predicted_targets[t_counter] = predicted_value
# Create a new pattern including the actual predicted value
new_pattern = np.hstack([new_pattern[0, 2:],
np.squeeze(predicted_value)])
# Create a fake target
new_pattern = np.insert(new_pattern, 0, 1).reshape(1, -1)
return predicted_targets
def _ml_train(self, training_matrix, params):
"""
wr
"""
training_patterns = training_matrix[:, 1:]
training_expected_targets = training_matrix[:, 0]
training_predicted_targets = \
self._local_train(training_patterns,
training_expected_targets,
params)
training_errors = Error(training_expected_targets,
training_predicted_targets,
regressor_name=self.regressor_name)
# Save last pattern for posterior predictions
self.last_training_pattern = training_matrix[-1, :]
self.has_trained = True
return training_errors
def _ml_test(self, testing_matrix, predicting=False):
""" wr
"""
testing_patterns = testing_matrix[:, 1:]
testing_expected_targets = testing_matrix[:, 0].reshape(-1, 1)
testing_predicted_targets = self._local_test(testing_patterns,
testing_expected_targets,
predicting)
testing_errors = Error(testing_expected_targets,
testing_predicted_targets,
regressor_name=self.regressor_name)
return testing_errors
def _ml_train_iterative(self, database_matrix, params=[],
sliding_window=168, k=1):
"""
Training method used by Fred 09 paper.
"""
# Number of dimension/lags/order
p = database_matrix.shape[1] - 1
# Amount of training/testing procedures
number_iterations = database_matrix.shape[0] + p - k - sliding_window + 1
print("Number of iterations: ", number_iterations)
# Training set size
tr_size = sliding_window - p - 1
# Sum -z_i value to every input pattern, Z = r_t-(p-1)-k
z = database_matrix[0:-k, 1].reshape(-1, 1) * np.ones((1, p))
database_matrix[k:, 1:] = database_matrix[k:, 1:] - z
pr_target = []
ex_target = []
for i in range(number_iterations):
# Train with sliding window training dataset
self._ml_train(database_matrix[k+i:k+i+tr_size-1, :], params)
# Predicted target with training_data - z_i ( r_t+1 )
pr_t = self._ml_predict(horizon=1)
# Sum z_i value to get r'_t+1 = r_t+1 + z_i
pr_t = pr_t[0][0] + z[i, 0]
pr_target.append(pr_t)
# Expected target
ex_target.append(database_matrix[k+i+tr_size, 0])
pr_result = Error(expected=ex_target, predicted=pr_target)
return pr_result
def save_regressor(self, file_name):
"""
Save current classifier/regressor to file_name file.
"""
try:
# First save all class attributes
file = file_name
with open(file, 'wb') as f:
pickle.dump(self, f, protocol=pickle.HIGHEST_PROTOCOL)
except:
print("Error while saving ", file_name)
return
else:
print("Saved model as: ", file_name)
def load_regressor(self, file_name):
"""
Load classifier/regressor to memory.
"""
try:
# First load all class attributes
file = file_name
with open(file, 'rb') as f:
self = pickle.load(f)
except:
print("Error while loading ", file_name)
return
return self
class Error(object):
"""
Error is a class that saves expected and predicted values to calculate
error metrics.
Attributes:
regressor_name (str): Deprecated.
expected_targets (numpy.ndarray): array of expected values.
predicted_targets (numpy.ndarray): array of predicted values.
dict_errors (dict): a dictionary containing all calculated errors
and their values.
"""
available_error_metrics = ["rmse", "mse", "mae", "me", "mpe", "mape",
"std", "hr", "hr+", "hr-", "accuracy"]
def __init__(self, expected, predicted, regressor_name=""):
if type(expected) is list:
expected = np.array(expected)
if type(predicted) is list:
predicted = np.array(predicted)
expected = expected.flatten()
predicted = predicted.flatten()
self.regressor_name = regressor_name
self.expected_targets = expected
self.predicted_targets = predicted
self.dict_errors = {}
for error in self.available_error_metrics:
self.dict_errors[error] = "Not calculated"
def _calc(self, name, expected, predicted):
"""
a
"""
if self.dict_errors[name] == "Not calculated":
if name == "mae":
error = expected - predicted
self.dict_errors[name] = np.mean(np.fabs(error))
elif name == "me":
error = expected - predicted
self.dict_errors[name] = error.mean()
elif name == "mse":
error = expected - predicted
self.dict_errors[name] = (error ** 2).mean()
elif name == "rmse":
error = expected - predicted
self.dict_errors[name] = np.sqrt((error ** 2).mean())
elif name == "mpe":
if np.count_nonzero(expected != 0) == 0:
self.dict_errors[name] = np.nan
else:
# Remove all indexes that have 0, so I can calculate
# relative error
find_zero = expected != 0
_et = np.extract(find_zero, expected)
_pt = np.extract(find_zero, predicted)
relative_error = (_et - _pt) / _et
self.dict_errors[name] = 100 * relative_error.mean()
elif name == "mape":
if np.count_nonzero(expected != 0) == 0:
self.dict_errors[name] = np.nan
else:
# Remove all indexes that have 0, so I can calculate
# relative error
find_zero = expected != 0
_et = np.extract(find_zero, expected)
_pt = np.extract(find_zero, predicted)
relative_error = (_et - _pt) / _et
self.dict_errors[name] = \
100 * np.fabs(relative_error).mean()
elif name == "std":
error = expected - predicted
self.dict_errors[name] = np.std(error)
elif name == "hr":
_c = expected * predicted
if np.count_nonzero(_c != 0) == 0:
self.dict_errors[name] = np.nan
else:
self.dict_errors[name] = np.count_nonzero(_c > 0) / \
np.count_nonzero(_c != 0)
elif name == "hr+":
_a = expected
_b = predicted
if np.count_nonzero(_b > 0) == 0:
self.dict_errors[name] = np.nan
else:
self.dict_errors[name] = \
np.count_nonzero((_a > 0) * (_b > 0)) / \
np.count_nonzero(_b > 0)
elif name == "hr-":
_a = expected
_b = predicted
if np.count_nonzero(_b < 0) == 0:
self.dict_errors[name] = np.nan
else:
self.dict_errors[name] = \
np.count_nonzero((_a < 0) * (_b < 0)) / \
np.count_nonzero(_b < 0)
elif name == "accuracy":
_a = expected.astype(int)
_b = | np.round(predicted) | numpy.round |
import numpy as np
import scipy.sparse as sp
import torch
def encode_onehot(labels):
classes = set(labels)
classes_dict = {c: np.identity(len(classes))[i, :] for i, c in enumerate(classes)}
labels_onehot = np.array(list(map(classes_dict.get, labels)), dtype=np.int32)
return labels_onehot
def load_data(path="./data/cora/", dataset="cora"):
"""Load citation network dataset (cora only for now)"""
print('Loading {} dataset...'.format(dataset))
idx_features_labels = np.genfromtxt("{}{}.content".format(path, dataset), dtype=np.dtype(str))
features = sp.csr_matrix(idx_features_labels[:, 1:-1], dtype=np.float32)
labels = encode_onehot(idx_features_labels[:, -1])
# build graph
idx = np.array(idx_features_labels[:, 0], dtype=np.int32)
idx_map = {j: i for i, j in enumerate(idx)}
edges_unordered = np.genfromtxt("{}{}.cites".format(path, dataset), dtype=np.int32)
edges = np.array(list(map(idx_map.get, edges_unordered.flatten())), dtype=np.int32).reshape(edges_unordered.shape)
adj = sp.coo_matrix(( | np.ones(edges.shape[0]) | numpy.ones |
import math
import numpy as np
import hes5
from numpy import number
import os.path
from numba import jit
# suppresses annoying performance warnings about np.dot() being
# faster on contiguous arrays. should look at fixing it but this
# is good for now
from numba.core.errors import NumbaPerformanceWarning
import warnings
warnings.simplefilter('ignore', category=NumbaPerformanceWarning)
from scipy.stats import gamma, multivariate_normal, uniform
import multiprocessing as mp
def kalman_filter(protein_at_observations,model_parameters,measurement_variance,derivative=True):
"""
Perform Kalman-Bucy filter based on observation of protein
copy numbers. This implements the filter described by Calderazzo et al., Bioinformatics (2018).
Parameters
----------
protein_at_observations : numpy array.
Observed protein. The dimension is n x 2, where n is the number of observation time points.
The first column is the time, and the second column is the observed protein copy number at
that time. The filter assumes that observations are generated with a fixed, regular time interval.
model_parameters : numpy array.
An array containing the model parameters in the following order:
repression_threshold, hill_coefficient, mRNA_degradation_rate,
protein_degradation_rate, basal_transcription_rate, translation_rate,
transcription_delay.
measurement_variance : float.
The variance in our measurement. This is given by Sigma_e in Calderazzo et. al. (2018).
derivative : bool.
True if you want derivative calculations, False if not.
Returns
-------
state_space_mean : numpy array.
An array of dimension n x 3, where n is the number of inferred time points.
The first column is time, the second column is the mean mRNA, and the third
column is the mean protein. Time points are generated every minute
state_space_variance : numpy array.
An array of dimension 2n x 2n.
[ cov( mRNA(t0:tn),mRNA(t0:tn) ), cov( protein(t0:tn),mRNA(t0:tn) ),
cov( mRNA(t0:tn),protein(t0:tn) ), cov( protein(t0:tn),protein(t0:tn) ]
state_space_mean_derivative : numpy array.
An array of dimension n x m x 2, where n is the number of inferred time points,
and m is the number of parameters. The m columns in the second dimension are the
derivative of the state mean with respect to each parameter. The two elements in
the third dimension represent the derivative of mRNA and protein respectively
state_space_variance_derivative : numpy array.
An array of dimension 7 x 2n x 2n.
[ d[cov( mRNA(t0:tn),mRNA(t0:tn) )]/d_theta, d[cov( protein(t0:tn),mRNA(t0:tn) )]/d_theta,
d[cov( mRNA(t0:tn),protein(t0:tn) )/]d_theta, d[cov( protein(t0:tn),protein(t0:tn) )]/d_theta ]
predicted_observation_distributions : numpy array.
An array of dimension n x 3 where n is the number of observation time points.
The first column is time, the second and third columns are the mean and variance
of the distribution of the expected observations at each time point, respectively.
predicted_observation_mean_derivatives : numpy array.
An array of dimension n x m x 2, where n is the number of observation time points,
and m is the number of parameters. This gives the (non-updated) predicted derivative of the state
space mean at each observation time point, wrt each parameter
predicted_observation_variance_derivatives : numpy array.
An array of dimension n x m x 2 x 2, where n is the number of observation time points,
and m is the number of parameters. This gives the (non-updated) predicted derivative of the state
space variance at each observation time point, wrt each parameter
"""
time_delay = model_parameters[6]
if protein_at_observations.reshape(-1,2).shape[0] == 1:
number_of_observations = 1.0
observation_time_step = 10.0
else:
number_of_observations = protein_at_observations.shape[0]
observation_time_step = protein_at_observations[1,0]-protein_at_observations[0,0]
# This is the time step dt in the forward euler scheme
discretisation_time_step = 1.0
# This is the delay as an integer multiple of the discretization timestep so that we can index with it
discrete_delay = int(np.around(time_delay/discretisation_time_step))
number_of_hidden_states = int(np.around(observation_time_step/discretisation_time_step))
initial_number_of_states = discrete_delay + 1
total_number_of_states = initial_number_of_states + (number_of_observations - 1)*number_of_hidden_states
# scaling factors for mRNA and protein respectively. For example, observation might be fluorescence,
# so the scaling would correspond to how light intensity relates to molecule number.
observation_transform = np.array([0.0,1.0])
state_space_mean, state_space_variance, state_space_mean_derivative, state_space_variance_derivative, predicted_observation_distributions, predicted_observation_mean_derivatives, predicted_observation_variance_derivatives = kalman_filter_state_space_initialisation(protein_at_observations,
model_parameters,
measurement_variance,
derivative)
# loop through observations and at each observation apply the Kalman prediction step and then the update step
# for observation_index, current_observation in enumerate(protein_at_observations[1:]):
for observation_index in range(len(protein_at_observations)-1):
if number_of_observations != 1:
current_observation = protein_at_observations[1+observation_index,:]
state_space_mean, state_space_variance, state_space_mean_derivative, state_space_variance_derivative = kalman_prediction_step(state_space_mean,
state_space_variance,
state_space_mean_derivative,
state_space_variance_derivative,
current_observation,
model_parameters,
observation_time_step,
derivative)
current_number_of_states = int(np.around(current_observation[0]/observation_time_step))*number_of_hidden_states + initial_number_of_states
# between the prediction and update steps we record the mean and sd for our likelihood, and the derivatives of the mean and variance for the
# derivative of the likelihood wrt the parameters
predicted_observation_distributions[observation_index + 1] = kalman_observation_distribution_parameters(predicted_observation_distributions,
current_observation,
state_space_mean,
state_space_variance,
current_number_of_states,
total_number_of_states,
measurement_variance,
observation_index)
if derivative:
predicted_observation_mean_derivatives[observation_index + 1], predicted_observation_variance_derivatives[observation_index + 1] = kalman_observation_derivatives(predicted_observation_mean_derivatives,
predicted_observation_variance_derivatives,
current_observation,
state_space_mean_derivative,
state_space_variance_derivative,
current_number_of_states,
total_number_of_states,
observation_index)
state_space_mean, state_space_variance, state_space_mean_derivative, state_space_variance_derivative = kalman_update_step(state_space_mean,
state_space_variance,
state_space_mean_derivative,
state_space_variance_derivative,
current_observation,
time_delay,
observation_time_step,
measurement_variance,
derivative)
return state_space_mean, state_space_variance, state_space_mean_derivative, state_space_variance_derivative, predicted_observation_distributions, predicted_observation_mean_derivatives, predicted_observation_variance_derivatives
def kalman_filter_state_space_initialisation(protein_at_observations,model_parameters,measurement_variance,derivative=True):
"""
A function for initialisation of the state space mean and variance, and update for the "negative" times that
are a result of the time delay. Initialises the negative times using the steady state of the deterministic system,
and then updates them with kalman_update_step.
Parameters
----------
protein_at_observations : numpy array.
Observed protein. The dimension is n x 2, where n is the number of observation time points.
The first column is the time, and the second column is the observed protein copy number at
that time. The filter assumes that observations are generated with a fixed, regular time interval.
model_parameters : numpy array.
An array containing the model parameters in the following order:
repression_threshold, hill_coefficient, mRNA_degradation_rate,
protein_degradation_rate, basal_transcription_rate, translation_rate,
transcription_delay.
measurement_variance : float.
The variance in our measurement. This is given by Sigma_e in Calderazzo et. al. (2018).
Returns
-------
state_space_mean : numpy array.
An array of dimension n x 3, where n is the number of inferred time points.
The first column is time, the second column is the mean mRNA, and the third
column is the mean protein. Time points are generated every minute
state_space_variance : numpy array.
An array of dimension 2n x 2n.
[ cov( mRNA(t0:tn),mRNA(t0:tn) ), cov( protein(t0:tn),mRNA(t0:tn) ),
cov( mRNA(t0:tn),protein(t0:tn) ), cov( protein(t0:tn),protein(t0:tn) ]
state_space_mean_derivative : numpy array.
An array of dimension n x m x 2, where n is the number of inferred time points,
and m is the number of parameters. The m columns in the second dimension are the
derivative of the state mean with respect to each parameter. The two elements in
the third dimension represent the derivative of mRNA and protein respectively
state_space_variance_derivative : numpy array.
An array of dimension 7 x 2n x 2n.
[ d[cov( mRNA(t0:tn),mRNA(t0:tn) )]/d_theta, d[cov( protein(t0:tn),mRNA(t0:tn) )]/d_theta,
d[cov( mRNA(t0:tn),protein(t0:tn) )/]d_theta, d[cov( protein(t0:tn),protein(t0:tn) )]/d_theta ]
predicted_observation_distributions : numpy array.
An array of dimension n x 3 where n is the number of observation time points.
The first column is time, the second and third columns are the mean and variance
of the distribution of the expected observations at each time point, respectively
predicted_observation_mean_derivatives : numpy array.
An array of dimension n x m x 2, where n is the number of observation time points,
and m is the number of parameters. This gives the (non-updated) predicted derivative of the state
space mean at each observation time point, wrt each parameter
predicted_observation_variance_derivatives : numpy array.
An array of dimension n x m x 2 x 2, where n is the number of observation time points,
and m is the number of parameters. This gives the (non-updated) predicted derivative of the state
space variance at each observation time point, wrt each parameter
"""
time_delay = model_parameters[6]
# This is the time step dt in the forward euler scheme
discretisation_time_step = 1.0
# This is the delay as an integer multiple of the discretization timestep so that we can index with it
discrete_delay = int(np.around(time_delay/discretisation_time_step))
if protein_at_observations.reshape(-1,2).shape[0] == 1:
observation_time_step = 10.0
number_of_observations = 1
else:
observation_time_step = protein_at_observations[1,0]-protein_at_observations[0,0]
number_of_observations = protein_at_observations.shape[0]
# 'synthetic' observations, which allow us to update backwards in time
number_of_hidden_states = int(np.around(observation_time_step/discretisation_time_step))
## initialise "negative time" with the mean and standard deviations of the LNA
initial_number_of_states = discrete_delay + 1
total_number_of_states = initial_number_of_states + (number_of_observations - 1)*number_of_hidden_states
state_space_mean = np.zeros((total_number_of_states,3))
state_space_mean[:initial_number_of_states,(1,2)] = hes5.calculate_steady_state_of_ode(repression_threshold=model_parameters[0],
hill_coefficient=model_parameters[1],
mRNA_degradation_rate=model_parameters[2],
protein_degradation_rate=model_parameters[3],
basal_transcription_rate=model_parameters[4],
translation_rate=model_parameters[5])
if protein_at_observations.reshape(-1,2).shape[0] == 1:
final_observation_time = 0
else:
final_observation_time = protein_at_observations[-1,0]
# assign time entries
state_space_mean[:,0] = np.linspace(protein_at_observations[0,0]-discrete_delay,final_observation_time,total_number_of_states)
# initialise initial covariance matrix
state_space_variance = np.zeros((2*(total_number_of_states),2*(total_number_of_states)))
# the top left block of the matrix corresponds to the mRNA covariance, see docstring above
initial_mRNA_scaling = 20.0
initial_mRNA_variance = state_space_mean[0,1]*initial_mRNA_scaling
np.fill_diagonal( state_space_variance[:initial_number_of_states,:initial_number_of_states] , initial_mRNA_variance)
# the bottom right block of the matrix corresponds to the mRNA covariance, see docstring above
initial_protein_scaling = 100.0
initial_protein_variance = state_space_mean[0,2]*initial_protein_scaling
np.fill_diagonal( state_space_variance[total_number_of_states:total_number_of_states + initial_number_of_states,
total_number_of_states:total_number_of_states + initial_number_of_states] , initial_protein_variance )
observation_transform = np.array([0.0,1.0])
predicted_observation_distributions = np.zeros((number_of_observations,3))
predicted_observation_distributions[0,0] = 0
predicted_observation_distributions[0,1] = observation_transform.dot(state_space_mean[initial_number_of_states-1,1:3])
# making it numba-ready
last_predicted_covariance_matrix = np.zeros((2,2))
for short_row_index, long_row_index in enumerate([initial_number_of_states-1,
total_number_of_states+initial_number_of_states-1]):
for short_column_index, long_column_index in enumerate([initial_number_of_states -1,
total_number_of_states+initial_number_of_states-1]):
last_predicted_covariance_matrix[short_row_index,short_column_index] = state_space_variance[long_row_index,
long_column_index]
predicted_observation_distributions[0,2] = (observation_transform.dot(
last_predicted_covariance_matrix).dot(observation_transform.transpose())
+
measurement_variance)
####################################################################
####################################################################
##
## initialise derivative arrays
##
####################################################################
####################################################################
#
state_space_mean_derivative = np.zeros((total_number_of_states,7,2))
state_space_variance_derivative = np.zeros((7,2*total_number_of_states,2*total_number_of_states))
predicted_observation_mean_derivatives = np.zeros((number_of_observations,7,2))
predicted_observation_mean_derivatives[0] = state_space_mean_derivative[initial_number_of_states-1]
predicted_observation_variance_derivatives = np.zeros((number_of_observations,7,2,2))
if derivative:
state_space_mean_derivative = np.zeros((total_number_of_states,7,2))
repression_threshold = model_parameters[0]
hill_coefficient = model_parameters[1]
mRNA_degradation_rate = model_parameters[2]
protein_degradation_rate = model_parameters[3]
basal_transcription_rate = model_parameters[4]
translation_rate = model_parameters[5]
transcription_delay = model_parameters[6]
steady_state_protein = state_space_mean[0,2]
hill_function_value = 1.0/(1.0+np.power(steady_state_protein/repression_threshold,hill_coefficient))
hill_function_derivative_value_wrt_protein = - hill_coefficient*np.power(steady_state_protein/repression_threshold,
hill_coefficient - 1)/( repression_threshold*
np.power(1.0+np.power( steady_state_protein/repression_threshold,
hill_coefficient),2))
protein_derivative_denominator_scalar = (basal_transcription_rate*translation_rate)/(mRNA_degradation_rate*protein_degradation_rate)
initial_protein_derivative_denominator = (protein_derivative_denominator_scalar*hill_function_derivative_value_wrt_protein) - 1
# assign protein derivative first, since mRNA derivative is given as a function of protein derivative
hill_function_derivative_value_wrt_repression = hill_coefficient*np.power(steady_state_protein/repression_threshold,
hill_coefficient)/( repression_threshold*
np.power(1.0+np.power( steady_state_protein/repression_threshold,
hill_coefficient),2))
hill_function_derivative_value_wrt_hill_coefficient = - np.log(steady_state_protein/repression_threshold)*np.power(steady_state_protein/repression_threshold,
hill_coefficient)/( np.power(1.0+np.power( steady_state_protein/repression_threshold,
hill_coefficient),2))
# repression threshold
state_space_mean_derivative[:initial_number_of_states,0,1] = - (protein_derivative_denominator_scalar*hill_function_derivative_value_wrt_repression)/(
initial_protein_derivative_denominator)
state_space_mean_derivative[:initial_number_of_states,0,0] = (protein_degradation_rate/translation_rate)*state_space_mean_derivative[0,0,1]
# hill coefficient
state_space_mean_derivative[:initial_number_of_states,1,1] = - (protein_derivative_denominator_scalar*hill_function_derivative_value_wrt_hill_coefficient)/(
initial_protein_derivative_denominator)
state_space_mean_derivative[:initial_number_of_states,1,0] = (protein_degradation_rate/translation_rate)*state_space_mean_derivative[0,1,1]
# mRNA degradation
state_space_mean_derivative[:initial_number_of_states,2,1] = (protein_derivative_denominator_scalar*hill_function_value)/(
mRNA_degradation_rate*initial_protein_derivative_denominator)
state_space_mean_derivative[:initial_number_of_states,2,0] = (protein_degradation_rate/translation_rate)*state_space_mean_derivative[0,2,1]
# protein degradation
state_space_mean_derivative[:initial_number_of_states,3,1] = (protein_derivative_denominator_scalar*hill_function_value)/(
protein_degradation_rate*initial_protein_derivative_denominator)
state_space_mean_derivative[:initial_number_of_states,3,0] = (steady_state_protein + protein_degradation_rate*state_space_mean_derivative[0,3,1])/translation_rate
# basal transcription
state_space_mean_derivative[:initial_number_of_states,4,1] = -(protein_derivative_denominator_scalar*hill_function_value)/(
basal_transcription_rate*initial_protein_derivative_denominator)
state_space_mean_derivative[:initial_number_of_states,4,0] = (protein_degradation_rate/translation_rate)*state_space_mean_derivative[0,4,1]
# translation
state_space_mean_derivative[:initial_number_of_states,5,1] = -(protein_derivative_denominator_scalar*hill_function_value)/(
translation_rate*initial_protein_derivative_denominator)
state_space_mean_derivative[:initial_number_of_states,5,0] = -(protein_degradation_rate/translation_rate)*((steady_state_protein/translation_rate) -
state_space_mean_derivative[0,5,1])
# transcriptional delay
state_space_mean_derivative[:initial_number_of_states,6,1] = 0
state_space_mean_derivative[:initial_number_of_states,6,0] = 0
state_space_variance_derivative = np.zeros((7,2*total_number_of_states,2*total_number_of_states))
for parameter_index in range(7):
np.fill_diagonal(state_space_variance_derivative[parameter_index,:initial_number_of_states,:initial_number_of_states],
initial_mRNA_scaling*state_space_mean_derivative[0,parameter_index,0])
np.fill_diagonal(state_space_variance_derivative[parameter_index,
total_number_of_states:total_number_of_states + initial_number_of_states,
total_number_of_states:total_number_of_states + initial_number_of_states],
initial_protein_scaling*state_space_mean_derivative[0,parameter_index,1])
predicted_observation_mean_derivatives = np.zeros((number_of_observations,7,2))
predicted_observation_mean_derivatives[0] = state_space_mean_derivative[initial_number_of_states-1]
predicted_observation_variance_derivatives = np.zeros((number_of_observations,7,2,2))
for parameter_index in range(7):
for short_row_index, long_row_index in enumerate([initial_number_of_states-1,
total_number_of_states+initial_number_of_states-1]):
for short_column_index, long_column_index in enumerate([initial_number_of_states -1,
total_number_of_states+initial_number_of_states-1]):
predicted_observation_variance_derivatives[0,parameter_index,short_row_index,short_column_index] = state_space_variance_derivative[parameter_index,
long_row_index,
long_column_index]
# update the past ("negative time")
if protein_at_observations.reshape(-1,2).shape[0] == 1:
current_observation = protein_at_observations
else:
current_observation = protein_at_observations[0]
state_space_mean, state_space_variance, state_space_mean_derivative, state_space_variance_derivative = kalman_update_step(state_space_mean,
state_space_variance,
state_space_mean_derivative,
state_space_variance_derivative,
current_observation,
time_delay,
observation_time_step,
measurement_variance,
derivative)
return state_space_mean, state_space_variance, state_space_mean_derivative, state_space_variance_derivative, predicted_observation_distributions, predicted_observation_mean_derivatives, predicted_observation_variance_derivatives
# @jit(nopython = True)
def kalman_observation_distribution_parameters(predicted_observation_distributions,
current_observation,
state_space_mean,
state_space_variance,
current_number_of_states,
total_number_of_states,
measurement_variance,
observation_index):
"""
A function which updates the mean and variance for the distributions which describe the likelihood of
our observations, given some model parameters.
Parameters
----------
predicted_observation_distributions : numpy array.
An array of dimension n x 3 where n is the number of observation time points.
The first column is time, the second and third columns are the mean and variance
of the distribution of the expected observations at each time point, respectively
current_observation : int.
Observed protein at the current time. The dimension is 1 x 2.
The first column is the time, and the second column is the observed protein copy number at
that time
state_space_mean : numpy array
An array of dimension n x 3, where n is the number of inferred time points.
The first column is time, the second column is the mean mRNA, and the third
column is the mean protein. Time points are generated every minute
state_space_variance : numpy array.
An array of dimension 2n x 2n.
[ cov( mRNA(t0:tn),mRNA(t0:tn) ), cov( protein(t0:tn),mRNA(t0:tn) ),
cov( mRNA(t0:tn),protein(t0:tn) ), cov( protein(t0:tn),protein(t0:tn) ]
current_number_of_states : float.
The current number of (hidden and observed) states upto the current observation time point.
This includes the initial states (with negative time).
total_number_of_states : float.
The total number of states that will be predicted by the kalman_filter function
measurement_variance : float.
The variance in our measurement. This is given by Sigma_e in Calderazzo et. al. (2018).
observation_index : int.
The index for the current observation time in the main kalman_filter loop
Returns
-------
predicted_observation_distributions[observation_index + 1] : numpy array.
An array of dimension 1 x 3.
The first column is time, the second and third columns are the mean and variance
of the distribution of the expected observations at the current time point, respectively.
"""
observation_transform = np.array([0.0,1.0])
predicted_observation_distributions[observation_index+1,0] = current_observation[0]
predicted_observation_distributions[observation_index+1,1] = observation_transform.dot(state_space_mean[current_number_of_states-1,1:3])
# not using np.ix_-like indexing to make it numba-ready
last_predicted_covariance_matrix = np.zeros((2,2))
for short_row_index, long_row_index in enumerate([current_number_of_states-1,
total_number_of_states+current_number_of_states-1]):
for short_column_index, long_column_index in enumerate([current_number_of_states -1,
total_number_of_states+current_number_of_states-1]):
last_predicted_covariance_matrix[short_row_index,short_column_index] = state_space_variance[long_row_index,
long_column_index]
predicted_observation_distributions[observation_index+1,2] = (observation_transform.dot(
last_predicted_covariance_matrix).dot(observation_transform.transpose())
+
measurement_variance)
return predicted_observation_distributions[observation_index + 1]
# @jit(nopython = True)
def kalman_observation_derivatives(predicted_observation_mean_derivatives,
predicted_observation_variance_derivatives,
current_observation,
state_space_mean_derivative,
state_space_variance_derivative,
current_number_of_states,
total_number_of_states,
observation_index):
"""
Parameters
----------
predicted_observation_mean_derivatives : numpy array.
An array of dimension n x m x 2, where n is the number of observation time points,
and m is the number of parameters. This gives the (non-updated) predicted derivative of the state
space mean at each observation time point, wrt each parameter
predicted_observation_variance_derivatives : numpy array.
An array of dimension n x m x 2 x 2, where n is the number of observation time points,
and m is the number of parameters. This gives the (non-updated) predicted derivative of the state
space variance at each observation time point, wrt each parameter
current_observation : numpy array.
A 1 x 2 array which describes the observation of protein at the current time point. The first
column is time, and the second column is the protein level
state_space_mean_derivative : numpy array.
An array of dimension n x m x 2, where n is the number of inferred time points,
and m is the number of parameters. The m columns in the second dimension are the
derivative of the state mean with respect to each parameter. The two elements in
the third dimension represent the derivative of mRNA and protein respectively
state_space_variance_derivative : numpy array.
An array of dimension 7 x 2n x 2n.
[ d[cov( mRNA(t0:tn),mRNA(t0:tn) )]/d_theta, d[cov( protein(t0:tn),mRNA(t0:tn) )]/d_theta,
d[cov( mRNA(t0:tn),protein(t0:tn) )/]d_theta, d[cov( protein(t0:tn),protein(t0:tn) )]/d_theta ]
current_number_of_states : float.
The current number of (hidden and observed) states upto the current observation time point.
This includes the initial states (with negative time).
total_number_of_states : float.
The total number of (observed and hidden) states, used to index the variance matrix
observation_index : int.
The index for the current observation time in the main kalman_filter loop
Returns
-------
predicted_observation_mean_derivatives[observation_index + 1] : numpy array.
An array of dimension 7 x 2, which contains the derivative of the mean mRNA
and protein wrt each parameter at the current observation time point
predicted_observation_variance_derivatives[observation_index + 1] : numpy array.
An array of dimension 7 x 2 x 2, which describes the derivative of the state
space variance wrt each parameter for the current time point
"""
predicted_observation_mean_derivatives[observation_index+1] = state_space_mean_derivative[current_number_of_states-1]
for parameter_index in range(7):
for short_row_index, long_row_index in enumerate([current_number_of_states-1,
total_number_of_states+current_number_of_states-1]):
for short_column_index, long_column_index in enumerate([current_number_of_states-1,
total_number_of_states+current_number_of_states-1]):
predicted_observation_variance_derivatives[observation_index+1,parameter_index,short_row_index,short_column_index] = state_space_variance_derivative[parameter_index,
long_row_index,
long_column_index]
return predicted_observation_mean_derivatives[observation_index + 1], predicted_observation_variance_derivatives[observation_index + 1]
# @jit(nopython = True)
def kalman_prediction_step(state_space_mean,
state_space_variance,
state_space_mean_derivative,
state_space_variance_derivative,
current_observation,
model_parameters,
observation_time_step,
derivative):
"""
Perform the Kalman filter prediction about future observation, based on current knowledge i.e. current
state space mean and variance. This gives rho_{t+\delta t-tau:t+\delta t} and P_{t+\delta t-tau:t+\delta t},
using the differential equations in supplementary section 4 of Calderazzo et al., Bioinformatics (2018),
approximated using a forward Euler scheme.
TODO: update variable descriptions
Parameters
----------
state_space_mean : numpy array.
The dimension is n x 3, where n is the number of states until the current time.
The first column is time, the second column is mean mRNA, and the third column is mean protein. It
represents the information based on observations we have already made.
state_space_variance : numpy array.
The dimension is 2n x 2n, where n is the number of states until the current time. The definition
is identical to the one provided in the Kalman filter function, i.e.
[ cov( mRNA(t0:tn),mRNA(t0:tn) ), cov( protein(t0:tn),mRNA(t0:tn) ),
cov( mRNA(t0:tn),protein(t0:tn) ), cov( protein(t0:tn),protein(t0:tn) ]
state_space_mean_derivative : numpy array.
An array of dimension n x m x 2, where n is the number of inferred time points,
and m is the number of parameters. The m columns in the second dimension are the
derivative of the state mean with respect to each parameter. The two elements in
the third dimension represent the derivative of mRNA and protein respectively
state_space_variance_derivative : numpy array.
An array of dimension 7 x 2n x 2n.
[ d[cov( mRNA(t0:tn),mRNA(t0:tn) )]/d_theta, d[cov( protein(t0:tn),mRNA(t0:tn) )]/d_theta,
d[cov( mRNA(t0:tn),protein(t0:tn) )/]d_theta, d[cov( protein(t0:tn),protein(t0:tn) )]/d_theta ]
current_observation : numpy array.
The dimension is 1 x 2, where the first entry is time, and the second is the protein observation.
model_parameters : numpy array.
An array containing the model parameters. The order is identical to the one provided in the
Kalman filter function documentation, i.e.
repression_threshold, hill_coefficient, mRNA_degradation_rate,
protein_degradation_rate, basal_transcription_rate, translation_rate,
transcription_delay.
observation_time_step : float.
This gives the time between each experimental observation. This is required to know how far
the function should predict.
Returns
-------
predicted_state_space_mean : numpy array.
The dimension is n x 3, where n is the number of previous observations until the current time.
The first column is time, the second column is mean mRNA, and the third column is mean protein.
predicted_state_space_variance : numpy array.
The dimension is 2n x 2n, where n is the number of previous observations until the current time.
[ cov( mRNA(t0:tn),mRNA(t0:tn) ), cov( protein(t0:tn),mRNA(t0:tn) ),
cov( mRNA(t0:tn),protein(t0:tn) ), cov( protein(t0:tn),protein(t0:tn) ]
state_space_mean_derivative : numpy array.
An array of dimension n x m x 2, where n is the number of inferred time points,
and m is the number of parameters. The m columns in the second dimension are the
derivative of the state mean with respect to each parameter. The two elements in
the third dimension represent the derivative of mRNA and protein respectively
state_space_variance_derivative : numpy array.
An array of dimension 7 x 2n x 2n.
[ d[cov( mRNA(t0:tn),mRNA(t0:tn) )]/d_theta, d[cov( protein(t0:tn),mRNA(t0:tn) )]/d_theta,
d[cov( mRNA(t0:tn),protein(t0:tn) )/]d_theta, d[cov( protein(t0:tn),protein(t0:tn) )]/d_theta ]
"""
# This is the time step dt in the forward euler scheme
discretisation_time_step = 1.0
## name the model parameters
repression_threshold = model_parameters[0]
hill_coefficient = model_parameters[1]
mRNA_degradation_rate = model_parameters[2]
protein_degradation_rate = model_parameters[3]
basal_transcription_rate = model_parameters[4]
translation_rate = model_parameters[5]
transcription_delay = model_parameters[6]
discrete_delay = int(np.around(transcription_delay/discretisation_time_step))
number_of_hidden_states = int(np.around(observation_time_step/discretisation_time_step))
# this is the number of states at t, i.e. before predicting towards t+observation_time_step
current_number_of_states = (int(np.around(current_observation[0]/observation_time_step))-1)*number_of_hidden_states + discrete_delay+1
total_number_of_states = state_space_mean.shape[0]
## next_time_index corresponds to 't+Deltat' in the propagation equation on page 5 of the supplementary
## material in the calderazzo paper
# we initialise all our matrices outside of the main for loop for improved performance
# this is P(t,t)
current_covariance_matrix = np.zeros((2,2))
# this is P(t-\tau,t) in page 5 of the supplementary material of Calderazzo et. al.
covariance_matrix_past_to_now = np.zeros((2,2))
# this is P(t,t-\tau) in page 5 of the supplementary material of Calderazzo et. al.
covariance_matrix_now_to_past = np.zeros((2,2))
# This corresponds to P(s,t) in the Calderazzo paper
covariance_matrix_intermediate_to_current = np.zeros((2,2))
# This corresponds to P(s,t-tau)
covariance_matrix_intermediate_to_past = np.zeros((2,2))
# this is d_rho(t)/d_theta
next_mean_derivative = np.zeros((7,2))
# this is d_P(t,t)/d_theta
current_covariance_derivative_matrix = np.zeros((7,2,2))
# this is d_P(t-\tau,t)/d_theta
covariance_derivative_matrix_past_to_now = np.zeros((7,2,2))
# this is d_P(t,t-\tau)/d_theta
covariance_derivative_matrix_now_to_past = np.zeros((7,2,2))
# d_P(t+Deltat,t+Deltat)/d_theta
next_covariance_derivative_matrix = np.zeros((7,2,2))
# initialisation for the common part of the derivative of P(t,t) for each parameter
common_state_space_variance_derivative_element = np.zeros((7,2,2))
# This corresponds to d_P(s,t)/d_theta in the Calderazzo paper
covariance_matrix_derivative_intermediate_to_current = np.zeros((7,2,2))
# This corresponds to d_P(s,t-tau)/d_theta
covariance_matrix_derivative_intermediate_to_past = np.zeros((7,2,2))
# This corresponds to d_P(s,t+Deltat)/d_theta in the Calderazzo paper
covariance_matrix_derivative_intermediate_to_next = np.zeros((7,2,2))
# initialisation for the common part of the derivative of P(s,t) for each parameter
common_intermediate_state_space_variance_derivative_element = np.zeros((7,2,2))
# derivations for the following are found in Calderazzo et. al. (2018)
# g is [[-mRNA_degradation_rate,0], *[M(t),
# [translation_rate,-protein_degradation_rate]] [P(t)]
# and its derivative will be called instant_jacobian
# f is [[basal_transcription_rate*hill_function(past_protein)],0]
# and its derivative with respect to the past state will be called delayed_jacobian
# the matrix A in the paper will be called variance_of_noise
instant_jacobian = np.array([[-mRNA_degradation_rate,0.0],[translation_rate,-protein_degradation_rate]])
instant_jacobian_transpose = np.transpose(instant_jacobian)
for ii, next_time_index in enumerate(range(current_number_of_states, current_number_of_states + number_of_hidden_states)):
current_time_index = next_time_index - 1 # this corresponds to t
past_time_index = current_time_index - discrete_delay # this corresponds to t-tau
# indexing with 1:3 for numba
current_mean = state_space_mean[current_time_index,1:3]
past_protein = state_space_mean[past_time_index,2]
if ii == 0:
print(current_mean)
print(state_space_mean[past_time_index,1:3])
past_mRNA = state_space_mean[past_time_index,1]
hill_function_value = 1.0/(1.0+np.power(past_protein/repression_threshold,hill_coefficient))
# if ii == 0:
# print(hill_function_value)
hill_function_derivative_value = - hill_coefficient*np.power(past_protein/repression_threshold,
hill_coefficient - 1)/( repression_threshold*
np.power(1.0+np.power( past_protein/repression_threshold,
hill_coefficient),2))
# jacobian of f is derivative of f with respect to past state ([past_mRNA, past_protein])
delayed_jacobian = np.array([[0.0,basal_transcription_rate*hill_function_derivative_value],[0.0,0.0]])
delayed_jacobian_transpose = np.transpose(delayed_jacobian)
## derivative of mean is contributions from instant reactions + contributions from past reactions
derivative_of_mean = ( np.array([[-mRNA_degradation_rate,0.0],
[translation_rate,-protein_degradation_rate]]).dot(current_mean) +
np.array([basal_transcription_rate*hill_function_value,0]) )
next_mean = current_mean + discretisation_time_step*derivative_of_mean
# ensures the prediction is non negative
next_mean = np.maximum(next_mean,0)
# indexing with 1:3 for numba
state_space_mean[next_time_index,1:3] = next_mean
# in the next lines we use for loop instead of np.ix_-like indexing for numba
for short_row_index, long_row_index in enumerate([current_time_index,
total_number_of_states+current_time_index]):
for short_column_index, long_column_index in enumerate([current_time_index,
total_number_of_states+current_time_index]):
current_covariance_matrix[short_row_index,short_column_index] = state_space_variance[long_row_index,
long_column_index]
# this is P(t-\tau,t) in page 5 of the supplementary material of Calderazzo et. al
for short_row_index, long_row_index in enumerate([past_time_index,
total_number_of_states+past_time_index]):
for short_column_index, long_column_index in enumerate([current_time_index,
total_number_of_states+current_time_index]):
covariance_matrix_past_to_now[short_row_index,short_column_index] = state_space_variance[long_row_index,
long_column_index]
# this is P(t,t-\tau) in page 5 of the supplementary material of Calderazzo et. al.
for short_row_index, long_row_index in enumerate([current_time_index,
total_number_of_states+current_time_index]):
for short_column_index, long_column_index in enumerate([past_time_index,
total_number_of_states+past_time_index]):
covariance_matrix_now_to_past[short_row_index,short_column_index] = state_space_variance[long_row_index,
long_column_index]
variance_change_current_contribution = ( instant_jacobian.dot(current_covariance_matrix) +
current_covariance_matrix.dot(instant_jacobian_transpose) )
variance_change_past_contribution = ( delayed_jacobian.dot(covariance_matrix_past_to_now) +
covariance_matrix_now_to_past.dot(delayed_jacobian_transpose) )
variance_of_noise = np.array([[mRNA_degradation_rate*current_mean[0]+basal_transcription_rate*hill_function_value,0],
[0,translation_rate*current_mean[0]+protein_degradation_rate*current_mean[1]]])
derivative_of_variance = ( variance_change_current_contribution +
variance_change_past_contribution +
variance_of_noise )
# P(t+Deltat,t+Deltat)
next_covariance_matrix = current_covariance_matrix + discretisation_time_step*derivative_of_variance
# ensure that the diagonal entries are non negative
np.fill_diagonal(next_covariance_matrix,np.maximum(np.diag(next_covariance_matrix),0))
# in the next lines we use for loop instead of np.ix_-like indexing for numba
for short_row_index, long_row_index in enumerate([next_time_index,
total_number_of_states+next_time_index]):
for short_column_index, long_column_index in enumerate([next_time_index,
total_number_of_states+next_time_index]):
state_space_variance[long_row_index,long_column_index] = next_covariance_matrix[short_row_index,
short_column_index]
## now we need to update the cross correlations, P(s,t) in the Calderazzo paper
# the range needs to include t, since we want to propagate P(t,t) into P(t,t+Deltat)
for intermediate_time_index in range(past_time_index,current_time_index+1):
# This corresponds to P(s,t) in the Calderazzo paper
for short_row_index, long_row_index in enumerate([intermediate_time_index,
total_number_of_states+intermediate_time_index]):
for short_column_index, long_column_index in enumerate([current_time_index,
total_number_of_states+current_time_index]):
covariance_matrix_intermediate_to_current[short_row_index,short_column_index] = state_space_variance[long_row_index,
long_column_index]
# This corresponds to P(s,t-tau)
for short_row_index, long_row_index in enumerate([intermediate_time_index,
total_number_of_states+intermediate_time_index]):
for short_column_index, long_column_index in enumerate([past_time_index,
total_number_of_states+past_time_index]):
covariance_matrix_intermediate_to_past[short_row_index,short_column_index] = state_space_variance[long_row_index,
long_column_index]
covariance_derivative = ( covariance_matrix_intermediate_to_current.dot( instant_jacobian_transpose) +
covariance_matrix_intermediate_to_past.dot(delayed_jacobian_transpose))
# This corresponds to P(s,t+Deltat) in the Calderazzo paper
covariance_matrix_intermediate_to_next = covariance_matrix_intermediate_to_current + discretisation_time_step*covariance_derivative
# Fill in the big matrix
for short_row_index, long_row_index in enumerate([intermediate_time_index,
total_number_of_states+intermediate_time_index]):
for short_column_index, long_column_index in enumerate([next_time_index,
total_number_of_states+next_time_index]):
state_space_variance[long_row_index,long_column_index] = covariance_matrix_intermediate_to_next[short_row_index,
short_column_index]
# Fill in the big matrix with transpose arguments, i.e. P(t+Deltat, s) - works if initialised symmetrically
for short_row_index, long_row_index in enumerate([next_time_index,
total_number_of_states+next_time_index]):
for short_column_index, long_column_index in enumerate([intermediate_time_index,
total_number_of_states+intermediate_time_index]):
state_space_variance[long_row_index,long_column_index] = covariance_matrix_intermediate_to_next[short_column_index,
short_row_index]
#################################
####
#### prediction step for the derivatives of the state space mean and variance wrt each parameter
####
#################################
###
### state space mean derivatives
###
if derivative:
# indexing with 1:3 for numba
current_mean_derivative = state_space_mean_derivative[current_time_index,:,0:2]
past_mean_derivative = state_space_mean_derivative[past_time_index,:,0:2]
past_protein_derivative = state_space_mean_derivative[past_time_index,:,1]
# calculate predictions for derivative of mean wrt each parameter
# repression threshold
hill_function_derivative_value_wrt_repression = hill_coefficient*np.power(past_protein/repression_threshold,
hill_coefficient)/( repression_threshold*
np.power(1.0+np.power( past_protein/repression_threshold,
hill_coefficient),
2))
repression_derivative = ( instant_jacobian.dot(current_mean_derivative[0]).reshape((2,1)) +
delayed_jacobian.dot(past_mean_derivative[0]).reshape((2,1)) +
np.array([[basal_transcription_rate*hill_function_derivative_value_wrt_repression],[0.0]]) )
next_mean_derivative[0] = current_mean_derivative[0] + discretisation_time_step*(repression_derivative.reshape((1,2)))
# hill coefficient
hill_function_derivative_value_wrt_hill_coefficient = - np.log(past_protein/repression_threshold)*np.power(past_protein/repression_threshold,
hill_coefficient)/( np.power(1.0+np.power( past_protein/repression_threshold,
hill_coefficient),2))
hill_coefficient_derivative = ( instant_jacobian.dot(current_mean_derivative[1]).reshape((2,1)) +
delayed_jacobian.dot(past_mean_derivative[1]).reshape((2,1)) +
np.array(([[basal_transcription_rate*hill_function_derivative_value_wrt_hill_coefficient],[0.0]])) )
next_mean_derivative[1] = current_mean_derivative[1] + discretisation_time_step*(hill_coefficient_derivative.reshape((1,2)))
# mRNA degradation rate
mRNA_degradation_rate_derivative = ( instant_jacobian.dot(current_mean_derivative[2]).reshape((2,1)) +
delayed_jacobian.dot(past_mean_derivative[2]).reshape((2,1)) +
np.array(([[-current_mean[0]],[0.0]])) )
next_mean_derivative[2] = current_mean_derivative[2] + discretisation_time_step*(mRNA_degradation_rate_derivative.reshape((1,2)))
# protein degradation rate
protein_degradation_rate_derivative = ( instant_jacobian.dot(current_mean_derivative[3]).reshape((2,1)) +
delayed_jacobian.dot(past_mean_derivative[3]).reshape((2,1)) +
np.array(([[0.0],[-current_mean[1]]])) )
next_mean_derivative[3] = current_mean_derivative[3] + discretisation_time_step*(protein_degradation_rate_derivative.reshape((1,2)))
# basal transcription rate
basal_transcription_rate_derivative = ( instant_jacobian.dot(current_mean_derivative[4]).reshape((2,1)) +
delayed_jacobian.dot(past_mean_derivative[4]).reshape((2,1)) +
np.array(([[hill_function_value],[0.0]])) )
next_mean_derivative[4] = current_mean_derivative[4] + discretisation_time_step*(basal_transcription_rate_derivative.reshape((1,2)))
# translation rate
translation_rate_derivative = ( instant_jacobian.dot(current_mean_derivative[5]).reshape((2,1)) +
delayed_jacobian.dot(past_mean_derivative[5]).reshape((2,1)) +
np.array(([[0.0],[current_mean[0]]])) )
next_mean_derivative[5] = current_mean_derivative[5] + discretisation_time_step*(translation_rate_derivative.reshape((1,2)))
# transcriptional delay
transcription_delay_derivative = ( instant_jacobian.dot(current_mean_derivative[6]).reshape((2,1)) +
delayed_jacobian.dot(past_mean_derivative[6]).reshape((2,1)) +
np.array(([[-basal_transcription_rate*hill_function_derivative_value*(
translation_rate*past_mRNA - protein_degradation_rate*past_protein)],[0.0]])) )
next_mean_derivative[6] = current_mean_derivative[6] + discretisation_time_step*(transcription_delay_derivative.reshape((1,2)))
# assign the predicted derivatives to our state_space_mean_derivative array
state_space_mean_derivative[next_time_index] = next_mean_derivative
###
### state space variance derivatives
###
# in the next lines we use for loop instead of np.ix_-like indexing for numba
# this is d_P(t,t)/d_theta
for parameter_index in range(7):
for short_row_index, long_row_index in enumerate([current_time_index,
total_number_of_states+current_time_index]):
for short_column_index, long_column_index in enumerate([current_time_index,
total_number_of_states+current_time_index]):
current_covariance_derivative_matrix[parameter_index,short_row_index,short_column_index] = state_space_variance_derivative[parameter_index,
long_row_index,
long_column_index]
# this is d_P(t-\tau,t)/d_theta
for parameter_index in range(7):
for short_row_index, long_row_index in enumerate([past_time_index,
total_number_of_states+past_time_index]):
for short_column_index, long_column_index in enumerate([current_time_index,
total_number_of_states+current_time_index]):
covariance_derivative_matrix_past_to_now[parameter_index,short_row_index,short_column_index] = state_space_variance_derivative[parameter_index,
long_row_index,
long_column_index]
# this is d_P(t,t-\tau)/d_theta
for parameter_index in range(7):
for short_row_index, long_row_index in enumerate([current_time_index,
total_number_of_states+current_time_index]):
for short_column_index, long_column_index in enumerate([past_time_index,
total_number_of_states+past_time_index]):
covariance_derivative_matrix_now_to_past[parameter_index,short_row_index,short_column_index] = state_space_variance_derivative[parameter_index,
long_row_index,
long_column_index]
## d_P(t+Deltat,t+Deltat)/d_theta
# the derivative is quite long and slightly different for each parameter, meaning it's difficult to
# code this part with a loop. For each parameter we divide it in to it's constituent parts. There is one
# main part in common for every derivative which is defined here as common_state_space_variance_derivative_element
for parameter_index in range(7):
common_state_space_variance_derivative_element[parameter_index] = ( np.dot(instant_jacobian,
current_covariance_derivative_matrix[parameter_index]) +
np.dot(current_covariance_derivative_matrix[parameter_index],
instant_jacobian_transpose) +
np.dot(delayed_jacobian,
covariance_derivative_matrix_past_to_now[parameter_index]) +
np.dot(covariance_derivative_matrix_now_to_past[parameter_index],
delayed_jacobian_transpose) )
hill_function_second_derivative_value = hill_coefficient*np.power(past_protein/repression_threshold,
hill_coefficient)*(
np.power(past_protein/repression_threshold,
hill_coefficient) +
hill_coefficient*(np.power(past_protein/repression_threshold,
hill_coefficient)-1)+1)/( np.power(past_protein,2)*
np.power(1.0+np.power( past_protein/repression_threshold,
hill_coefficient),3))
# repression threshold
# this refers to d(f'(p(t-\tau)))/dp_0
hill_function_second_derivative_value_wrt_repression = -np.power(hill_coefficient,2)*(np.power(past_protein/repression_threshold,
hill_coefficient)-1)*np.power(past_protein/repression_threshold,
hill_coefficient-1)/( np.power(repression_threshold,2)*
(np.power(1.0+np.power( past_protein/repression_threshold,
hill_coefficient),3)))
# instant_jacobian_derivative_wrt_repression = 0
delayed_jacobian_derivative_wrt_repression = (np.array([[0.0,basal_transcription_rate*hill_function_second_derivative_value*past_mean_derivative[0,1]],[0.0,0.0]]) +
np.array([[0.0,basal_transcription_rate*hill_function_second_derivative_value_wrt_repression],[0.0,0.0]]) )
delayed_jacobian_derivative_wrt_repression_transpose = np.transpose(delayed_jacobian_derivative_wrt_repression)
instant_noise_derivative_wrt_repression = (np.array([[mRNA_degradation_rate*current_mean_derivative[0,0],0.0],
[0.0,translation_rate*current_mean_derivative[0,0] + protein_degradation_rate*current_mean_derivative[0,1]]]))
delayed_noise_derivative_wrt_repression = (np.array([[basal_transcription_rate*(hill_function_derivative_value*past_mean_derivative[0,1] + hill_function_derivative_value_wrt_repression),0.0],
[0.0,0.0]]))
derivative_of_variance_wrt_repression_threshold = ( common_state_space_variance_derivative_element[0] +
np.dot(delayed_jacobian_derivative_wrt_repression,covariance_matrix_past_to_now) +
np.dot(covariance_matrix_now_to_past,delayed_jacobian_derivative_wrt_repression_transpose) +
instant_noise_derivative_wrt_repression + delayed_noise_derivative_wrt_repression )
next_covariance_derivative_matrix[0] = current_covariance_derivative_matrix[0] + discretisation_time_step*(derivative_of_variance_wrt_repression_threshold)
# hill coefficient
# this refers to d(f'(p(t-\tau)))/dh
hill_function_second_derivative_value_wrt_hill_coefficient = np.power(past_protein/repression_threshold,hill_coefficient)*(-np.power(past_protein/repression_threshold,hill_coefficient) +
hill_coefficient*(np.power(past_protein/repression_threshold,hill_coefficient)-1)* | np.log(past_protein/repression_threshold) | numpy.log |
from typing import List, Optional, Tuple
import os
import time
import torch
import numpy as np
import json
from transformers.models.rag.retrieval_rag import (
HFIndexBase,
RagRetriever,
LegacyIndex,
CustomHFIndex,
CanonicalHFIndex,
LEGACY_INDEX_PATH,
)
from transformers.models.rag.tokenization_rag import RagTokenizer
from transformers.file_utils import requires_backends
from transformers.tokenization_utils_base import BatchEncoding
from transformers.utils import logging
from dialdoc.models.rag.configuration_rag_dialdoc import DialDocRagConfig
logger = logging.get_logger(__name__)
class DialDocIndex(CustomHFIndex):
def load_pid_domain_mapping(self, mapping_file):
with open(mapping_file, "r") as f_in:
map = json.load(f_in)
new_map = {}
for k, v in map.items():
new_map[int(k)] = v
del map
self.mapping = new_map
def get_top_docs(self, question_hidden_states: np.ndarray, n_docs=5) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
scores, ids = self.dataset.search_batch("embeddings", question_hidden_states, n_docs)
docs = [self.dataset[[i for i in indices if i >= 0]] for indices in ids]
vectors = [doc["embeddings"] for doc in docs]
for i in range(len(vectors)):
if len(vectors[i]) < n_docs:
vectors[i] = np.vstack([vectors[i], np.zeros((n_docs - len(vectors[i]), self.vector_size))])
return (
np.array(ids),
np.array(vectors),
np.array(scores),
) # shapes (batch_size, n_docs), (batch_size, n_docs, d) and (batch_size, n_docs)
def search_batch_domain(self, embeddings, domain, n_docs=5):
scores, ids = self.dataset.search_batch("embeddings", embeddings, 1200)
filtered_scores, filtered_ids = [], []
for i in range(len(ids)):
dom = domain[i]
f_s, f_id = [], []
for score, id in zip(scores[i], ids[i]):
if id != -1 and self.mapping[id] == dom:
f_s.append(score)
f_id.append(id)
if len(f_id) == n_docs:
filtered_scores.append(f_s)
filtered_ids.append(f_id)
break
if 0 < len(f_id) < n_docs: ## bandage for cases where the retriever finds less than n_docs
while len(f_id) < n_docs:
f_id.append(f_id[0])
f_s.append(f_s[0])
filtered_scores.append(f_s)
filtered_ids.append(f_id)
## TODO: what happens if none of the retrieved docs are not in GT domain
return filtered_scores, filtered_ids
def get_top_docs_domain(
self, question_hidden_states: np.ndarray, domain, n_docs=5
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
scores, ids = self.search_batch_domain(question_hidden_states, domain, n_docs)
docs = [self.dataset[[i for i in indices if i >= 0]] for indices in ids]
vectors = [doc["embeddings"] for doc in docs]
for i in range(len(vectors)):
if len(vectors[i]) < n_docs:
vectors[i] = np.vstack([vectors[i], np.zeros((n_docs - len(vectors[i]), self.vector_size))])
return (
| np.array(ids) | numpy.array |
import sys
import numpy as np
import datetime as dt
import matplotlib
import matplotlib.pyplot as plt
# use ggplot style for more sophisticated visuals
plt.style.use('ggplot')
is_closed = False
panning_allowed = False
def enable_panning(event):
global panning_allowed
panning_allowed = True
def disable_panning(event):
global panning_allowed
panning_allowed = False
def has_been_closed():
return is_closed
def window_closed(event):
global is_closed
is_closed = True
# Converts a numpy.datetime64 to a datetime.datetime object by converting dt64 to UTC time (for later use)
def datetime64_to_datetime(dt64):
return dt.datetime.utcfromtimestamp((dt64 - np.datetime64('1970-01-01T00:00:00Z')) / np.timedelta64(1, 's'))
# Gets a list of x (time) and y (sensor reading) coordinates for the index-th column of data_df
# Also returns the labels for the x ticks (strings in HH:MM:SS) format
def get_coordinate_lists(data_df, index):
time_list = data_df['Time'].tolist()
value_list = data_df.iloc[:, index].tolist()
time_list = [datetime64_to_datetime(time) for time in time_list]
# Convert time_list to timedeltas, representing the time between each element of time_list and time_list[0]
time_list = list(map(lambda time: time - time_list[0], time_list))
# Convert the timedeltas to seconds
time_list_seconds = list(map(lambda timedelta: round(timedelta.total_seconds()), time_list))
# Convert the timedeltas to HH:MM:SS format
time_list_strings = list(map(lambda timedelta: "%.2d:%.2d:%.2d" % (
int(timedelta.seconds / 3600), (timedelta.seconds // 60) % 60, timedelta.seconds % 60), time_list))
return time_list_seconds, value_list, time_list_strings
def live_plotter_init(data_df, lines, formats, labels, xlabel='X Label', ylabel='Y Label', title='Title'):
plt.ion()
fig = plt.figure(figsize=(13, 9))
ax = fig.add_subplot(111)
# Set window title
gcf = plt.gcf()
gcf.canvas.set_window_title(title)
# Event bindings
close_bind = fig.canvas.mpl_connect('close_event', window_closed)
enter_bind = fig.canvas.mpl_connect('axes_enter_event', enable_panning)
exit_bind = fig.canvas.mpl_connect('axes_leave_event', disable_panning)
# Setup mouse wheel zooming
def zoom_factory(ax, base_scale=2.):
def zoom_fun(event):
# get the current x and y limits
cur_xlim = ax.get_xlim()
cur_ylim = ax.get_ylim()
cur_xrange = (cur_xlim[1] - cur_xlim[0]) * .5
cur_yrange = (cur_ylim[1] - cur_ylim[0]) * .5
xdata = event.xdata # get event x location
ydata = event.ydata # get event y location
if event.button == 'up':
# deal with zoom in
scale_factor = 1 / base_scale
elif event.button == 'down':
# deal with zoom out
scale_factor = base_scale
else:
# deal with something that should never happen
scale_factor = 1
print
event.button
# set new limits
ax.set_xlim([xdata - cur_xrange * scale_factor,
xdata + cur_xrange * scale_factor])
ax.set_ylim([ydata - cur_yrange * scale_factor,
ydata + cur_yrange * scale_factor])
plt.draw() # force re-draw
fi = ax.get_figure() # get the figure of interest
# attach the call back
fi.canvas.mpl_connect('scroll_event', zoom_fun)
# return the function
return zoom_fun
zoom = zoom_factory(ax)
# Plot initial data
for index in range(len(lines)):
x_vec, y_vec, skip = get_coordinate_lists(data_df, index)
lines[index] = ax.plot(x_vec, y_vec, formats[index], alpha=0.8, label=labels[index])
ax.legend(loc='upper right')
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
plt.gcf().subplots_adjust(bottom=0.15)
plt.show()
# Update the line graph
def live_plotter_update(data_df, lines, pause_time=0.01, max_points_to_show=10):
# All x_vec and y_vec lists, used to set the bounds of the graph
x_vecs = []
y_vecs = []
last_x_vec = None # Store the last x_vec, in full (not only the last max_points_to_show points), for time labeling
time_list_strings = None
for index in range(len(lines)):
x_vec, y_vec, list_strings = get_coordinate_lists(data_df, index)
lines[index][0].set_data(x_vec, y_vec)
# Add to x_vecs and y_vecs
x_vecs.append(x_vec[-max_points_to_show:])
y_vecs.append(y_vec[-max_points_to_show:])
# Override time_list_strings
time_list_strings = list_strings
# Override last_x_vec, so the time labels are properly applied to all points, not just those visible
last_x_vec = x_vec
if has_been_closed():
return # Exit program early if closed
# Do not adjust bounds if panning because it will send them back to the original view
if not panning_allowed:
# Adjust the bounds to fit all the lines on the screen and only show at most max_points_to_show at once
# Find the smallest and largest x values (in the last max_points_to_show of each x_vec in x_vecs)
smallest_x = np.min(x_vecs)
largest_x = np.max(x_vecs)
# Find the smallest and largest y values (in the last max_points_to_show of each y_vec in y_vecs)
smallest_y = np.min(y_vecs)
largest_y = | np.max(y_vecs) | numpy.max |
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Scoring functions relating to loads."""
import json
import os
import makani
from makani.analysis.aero import apparent_wind_util
from makani.analysis.control import geometry
from makani.control import control_types
from makani.lib.python import c_helpers
from makani.lib.python.batch_sim import scoring_functions
import numpy as np
from scipy import interpolate
import scoring_functions_util as scoring_util
_FLIGHT_MODE_HELPER = c_helpers.EnumHelper('FlightMode', control_types)
class YbAccelerationScoringFunction(
scoring_functions.DoubleSidedLimitScoringFunction):
"""Tests if the body-y acceleration falls outside of acceptable limits."""
def __init__(self, bad_lower_limit, good_lower_limit, good_upper_limit,
bad_upper_limit, severity):
super(YbAccelerationScoringFunction, self).__init__(
'Acceleration', 'm/s^2', bad_lower_limit, good_lower_limit,
good_upper_limit, bad_upper_limit, severity)
def GetSystemLabels(self):
return ['loads']
def GetValue(self, output):
return np.array([output['yb_accel_min'],
output['yb_accel_max']])
def GetOutput(self, timeseries):
return {
'yb_accel_min': np.min(timeseries['yb_accel']),
'yb_accel_max': np.max(timeseries['yb_accel'])
}
def GetTimeSeries(self, params, sim, control):
yb_accel = self._SelectTelemetry(sim, control, 'wing_acc')['y']
return {'yb_accel': yb_accel}
class ZgAccelerationScoringFunction(
scoring_functions.SingleSidedLimitScoringFunction):
"""Tests the kite vertical acceleration in ground frame."""
def __init__(self, good_limit, bad_limit, severity):
super(ZgAccelerationScoringFunction, self).__init__(
'Wing Accel Zg', 'm/s^2', good_limit, bad_limit, severity)
def GetSystemLabels(self):
return ['controls']
def GetValue(self, output):
return np.array([output['zg_accel_min'],
output['zg_accel_max']])
def GetOutput(self, timeseries):
return {
'zg_accel_min': | np.min(timeseries['zg_accel']) | numpy.min |
# -*- coding: utf-8 -*-
# @Author: <NAME>
# @Date: 2018-10-19 00:59:49
# @Last Modified by: <NAME>
# @Last Modified time: 2021-12-04 18:04:19
import os, math, matplotlib
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.colors import BoundaryNorm
from matplotlib.ticker import MaxNLocator
from matplotlib.patches import Ellipse
import seaborn as sns
import numpy as np
import numpy.ma as ma
from skimage import exposure
import helper as my_help
import intensity_calculation as my_int
import settings as settings
""" ========================= Supporting Functions =========================== """
### return coordinates of a circle given it's center and radius.
def draw_circle(center, radius):
"""
Function: return coordinates of a circle given it's center and radius
Inputs:
- center: numpy array (or list). x and y coordinates of the center of the circle.
- radius: value of the radius of the circle.
Output: numpy array. x and y coordinates of the circle
"""
theta = np.linspace(0, 2*np.pi, 100)
# compute x1 and x2
x = radius*np.cos(theta) + center[0]
y = radius*np.sin(theta) + center[1]
circle = np.column_stack((x,y))
return circle
### convert from cartesian coordinate to polar coordinate
def cartesian_to_polar(x,y):
"""
Function: convert from cartesian coordinate to polar coordinate
Input: x,y - float. x and y coordinate values.
Output: r, theta - float. radial and angle coordinate values.
"""
r = np.sqrt(x**2 + y**2)
theta = np.arctan(y/x)
return r,theta
### convert from polar coordinate to cartesian coordinate
def polar_to_cartesian(r,theta):
"""
Function: convert from polar coordinate to cartesian coordinate
Input: r, theta - float. radial and angle coordinate values.
Output: x,y - float. x and y coordinate values.
"""
x = r*np.cos(theta)
y = r*np.sin(theta)
return x,y
### reduce x and y axis labels.
def get_tick_list(tick_type, ori_tick, arg2):
"""
Function: reduce axis labels.
Inputs:
- tick_type: int.
- tick_type == 1: specify number of labels remained.
- tick_type == 2: specify which label should be kept.
- ori_tick: numpy array. original tick values.
- arg2:
- if tick_type == 1: int. number of labels
- if tick_type == 2: list. labels to keep.
Output: ticks_list: list. tick values.
"""
### specify number of labels remained.
if(tick_type == 1):
num_of_labels = arg2
k = np.ceil((len(ori_tick) - 1)/(num_of_labels)).astype(int)
nk = np.floor(len(ori_tick)/k).astype(int)
true_label_list = []
i = 0
ti = 0 + i*k
while ti < len(ori_tick):
true_label_list.append(0 + i*k)
i += 1
ti = 0 + i*k
### specify which label should be kept.
elif(tick_type == 2):
must_lists = arg2
true_label_list = np.where(np.isin(ori_tick, must_lists))[0]
### from index of ticks to tick values
tick_list = []
for i in range(len(ori_tick)):
if i in true_label_list:
tick_list.append(ori_tick[i].astype(str))
else:
tick_list.append('')
return tick_list
### get slicing information for polar plot.
def get_slice_params_for_polar_plot(analysis_params, slicing_params):
"""
Function: get slicing information for polar plot.
Inputs:
- analysis_params: list. Contains: num_angle_section, num_outside_angle, num_x_section, z_offset, radius_expanse_ratio.
- slicing_params: list. Contains: slice_zero_point, slice_one_point, cut_off_point, center_point.
Outputs:
- rs: numpy array. coordinates of angular axis.
- phis: numpy array. coordinates of radial axis.
"""
num_angle_section, num_outside_angle, num_x_section, z_offset, radius_expanse_ratio = analysis_params
slice_zero_point, slice_one_point, cut_off_point, center_point = slicing_params
radius = np.linalg.norm( center_point - cut_off_point )
angle_start_to_r = np.arctan2( slice_zero_point[1] - center_point[1], slice_zero_point[0] - center_point[0] )
angle_end_to_r = np.arctan2( slice_one_point[1] - center_point[1], slice_one_point[0] - center_point[0])
phi_range = my_int.inner_angle(slice_one_point - center_point, slice_zero_point - center_point, True)
phi_unit = phi_range/num_angle_section
if(((-np.pi <= angle_start_to_r <= -0.5*np.pi) | (-np.pi <= angle_end_to_r <= -0.5*np.pi)) & (angle_start_to_r*angle_end_to_r < 1) ):
if((-np.pi <= angle_start_to_r <= -0.5*np.pi) & (-np.pi <= angle_end_to_r <= -0.5*np.pi)):
phi_start = min(angle_start_to_r, angle_end_to_r) - num_outside_angle * phi_unit
phi_end = max(angle_start_to_r, angle_end_to_r) + num_outside_angle * phi_unit
else:
phi_start = max(angle_start_to_r, angle_end_to_r) - num_outside_angle * phi_unit
phi_end = min(angle_start_to_r, angle_end_to_r) + num_outside_angle * phi_unit
else:
phi_start = min(angle_start_to_r, angle_end_to_r) - num_outside_angle * phi_unit
phi_end = max(angle_start_to_r, angle_end_to_r) + num_outside_angle * phi_unit
phi_start = my_int.angle_normalization(phi_start)
phi_end = my_int.angle_normalization(phi_end)
phis = my_int.get_phis(phi_start, phi_end, num_angle_section + num_outside_angle*2 + 2)
if(my_int.smallest_angle(angle_start_to_r, phis[-1]) < my_int.smallest_angle(angle_start_to_r, phis[0])):
phis = np.flip(phis, axis = 0)
rs = np.linspace(0, radius_expanse_ratio, num_x_section + 2)
return rs, phis
### get polar or cartasian coordinates of targets
def get_target_grid(return_type, **kwargs):
"""
Function: get polar or cartasian coordinates of targets
Inputs:
- return_type: str. "cart" for cartasian coordinates; "polar" for polar coordinates.
- kwargs: additional params.
- rel_points: dictionary. relative length for target positions and heel positions
Outputs:
- if return cartasian coordinates: numpy array. x and y coordinates of targets in cartasian coordinates.
- if return polar coordinates: dictionary {type('c', 'l', 'h'):numpy array}. polar coordinates of target centers ('c')/lower bounds ('l')/upper bounds ('h')
"""
### unravel params.
if('rel_points' in kwargs.keys()):
rel_points = kwargs['rel_points']
### calculate ideal grid
#### before standardization
##### distance: normal
dT0T2 = dT0T5 = dT2T4 = dT4T5 = 1
dT0T4 = dT2T3 = (dT0T5 ** 2 + dT4T5 ** 2 -2*dT4T5*dT0T5*math.cos(math.radians(100)))**0.5
dT2T5 = dT3T7 = (dT0T5 ** 2 + dT4T5 ** 2 -2*dT0T2*dT0T5*math.cos(math.radians(80)))**0.5
dT0T3 = dT0T7 = ((dT2T5/2) ** 2 + (dT2T3*1.5) ** 2) ** 0.5
##### angles: normal
aT0T2 = math.radians(80)/2
aT0T5 = - math.radians(80)/2
aT0T3 = math.acos((dT0T3 ** 2 + dT0T7 ** 2 - dT3T7 ** 2)/(2*dT0T3*dT0T7))/2
aT0T7 = - aT0T3
aT0T4 = 0
##### target coordinates
T0 = np.array((0,0))
T2 = np.array((aT0T2, dT0T2))
T3 = np.array((aT0T3, dT0T3))
T4 = np.array((aT0T4, dT0T4))
T5 = np.array((aT0T5, dT0T2))
T7 = np.array((aT0T7, dT0T7))
target_grid_polar = np.stack((T0, T2, T3, T4, T5, T7), axis = 0)
target_grid_cart = np.zeros((6,2))
for i in range(6):
target_grid_cart[i,:] = polar_to_cartesian(target_grid_polar[i,1], target_grid_polar[i,0])
##### heel coordinates
alpha = 0.2354
a = 0.2957
b = 0.5
r_heels_cart = np.zeros((6,2))
r_heels_polar = np.zeros((6,2))
for n in range(1,7):
phi_n = -(alpha + (n-1)*(np.pi - 2*alpha)/5)
x = a*np.cos(phi_n)
y = b*np.sin(phi_n)
r, theta = cartesian_to_polar(-y, x)
r_heels_cart[n-1, :] = [-y,x]
r_heels_polar[n-1, :] = [theta, r]
##### intersect
c = my_help.line_intersection((r_heels_cart[2,:], target_grid_cart[2,:]),(r_heels_cart[3,:], target_grid_cart[5,:]))
#### after standardization
dTiC = np.zeros((6,1))
for i in range(1,6):
dTiC[i] = np.linalg.norm(target_grid_cart[i,:] - c)
dTiC = dTiC/dTiC[3]
aTiCT4 = np.zeros((6,1))
for i in range(1,6):
aTiCT4[i] = my_int.inner_angle(target_grid_cart[i,:] - c, target_grid_cart[3,:] - c, True)
if(i in [4,5]):
aTiCT4[i] = - aTiCT4[i]
### calculate output values
if(return_type == 'cart'):
grid_cart = np.zeros((6,2))
for i in range(1,6):
grid_cart[i,0],grid_cart[i,1] = polar_to_cartesian(dTiC[i][0], aTiCT4[i][0])
return grid_cart
elif(return_type == 'polar'):
target_grid_polar = {}
for t in ['c', 'l', 'h']:
T0 = | np.array((aTiCT4[0], -rel_points[f'T0{t}'])) | numpy.array |
"""
Reference:
[1]: Branlard, Flexible multibody dynamics using joint coordinates and the Rayleigh-Ritz approximation: the general framework behind and beyond Flex, Wind Energy, 2019
"""
import numpy as np
from .utils import *
from .bodies import Body as GenericBody
from .bodies import RigidBody as GenericRigidBody
from .bodies import FlexibleBody as GenericFlexibleBody
from .bodies import BeamBody as GenericBeamBody
from .bodies import FASTBeamBody as GenericFASTBeamBody
from .bodies import InertialBody as GenericInertialBody
# --- To ease comparison with sympy version
from numpy import eye, cross, cos ,sin
def Matrix(m):
return np.asarray(m)
def colvec(v):
v=np.asarray(v).ravel()
return np.array([[v[0]],[v[1]],[v[2]]])
# --------------------------------------------------------------------------------}
# --- Connections
# --------------------------------------------------------------------------------{
class Connection():
def __init__(self,Type,RelPoint=None,RelOrientation=None,JointRotations=None, OrientAfter=True):
if RelOrientation is None:
RelOrientation=eye(3)
if RelPoint is None:
RelPoint=colvec([0,0,0])
self.Type=Type
self.s_C_0_inB = RelPoint
self.s_C_inB = self.s_C_0_inB
self.R_ci_0 = RelOrientation
self.R_ci = self.R_ci_0
self.OrientAfter= OrientAfter
if self.Type=='Rigid':
self.nj=0
elif self.Type=='SphericalJoint':
self.JointRotations=JointRotations;
self.nj=len(self.JointRotations);
else:
raise NotImplementedError()
def updateKinematics(j,q):
j.B_ci=Matrix(np.zeros((6,j.nj)))
if j.Type=='Rigid':
j.R_ci=j.R_ci_0
elif j.Type=='SphericalJoint':
R=eye(3)
myq = q [j.I_DOF,0];
#myqdot = qdot[j.I_DOF];
for ir,rot in enumerate(j.JointRotations):
if rot=='x':
I=np.array([1,0,0])
Rj=R_x( myq[ir] )
elif rot=='y':
I=np.array([0,1,0])
Rj=R_y( myq[ir] )
elif rot=='z':
I=np.array([0,0,1])
Rj=R_z( myq[ir] )
else:
raise Exception()
# Setting Bhat column by column
j.B_ci[3:,ir] = np.dot(R,I) # NOTE: needs to be done before R updates
# Updating rotation matrix
R = np.dot(R , Rj )
if j.OrientAfter:
j.R_ci = np.dot(R, j.R_ci_0 )
else:
j.R_ci = np.dot(j.R_ci_0, R )
# --------------------------------------------------------------------------------}
# --- Bodies
# --------------------------------------------------------------------------------{
class Body(GenericBody):
def __init__(B,name=''):
GenericBody.__init__(B, name=name)
B.Children = []
B.Connections = []
B.MM = None
B.B = [] # Velocity transformation matrix
B.updatePosOrientation(colvec([0,0,0]), eye(3))
def updatePosOrientation(o,x_0,R_0b):
o.r_O = x_0 # position of body origin in global coordinates
o.R_0b=R_0b # transformation matrix from body to global
def connectTo(self, Child, Point=None, Type=None, RelOrientation=None, JointRotations=None, OrientAfter=True):
if Type =='Rigid':
c=Connection(Type, RelPoint=Point, RelOrientation = RelOrientation)
else: # TODO first node, last node
c=Connection(Type, RelPoint=Point, RelOrientation=RelOrientation, JointRotations=JointRotations, OrientAfter=OrientAfter)
self.Children.append(Child)
self.Connections.append(c)
def setupDOFIndex(o,n):
nForMe=o.nf
# Setting my dof index
o.I_DOF=n+ np.arange(nForMe)
# Update
n=n+nForMe
for child,conn in zip(o.Children,o.Connections):
# Connection first
nForConn=conn.nj;
conn.I_DOF=n+np.arange(nForConn)
# Update
n=n+nForConn;
# Then Children
n=child.setupDOFIndex(n)
return n
#def __repr__(B):
# return GenericBody.__repr__(B)
@property
def R_bc(self):
return eye(3);
@property
def Bhat_x_bc(self):
return Matrix(np.zeros((3,0)))
@property
def Bhat_t_bc(self):
return Matrix(np.zeros((3,0)))
def updateChildrenKinematicsNonRecursive(p,q):
# At this stage all the kinematics of the body p are known
# Useful variables
R_0p = p.R_0b
B_p = p.B
r_0p = p.r_O
nf_all_children=sum([child.nf for child in p.Children])
for ic,(body_i,conn_pi) in enumerate(zip(p.Children,p.Connections)):
# Flexible influence to connection point
R_pc = p.R_bc
#print('R_pc')
#print(R_pc)
Bx_pc = p.Bhat_x_bc
Bt_pc = p.Bhat_t_bc
# Joint influence to next body (R_ci, B_ci)
conn_pi.updateKinematics(q) # TODO
#print('R_ci',p.name)
#print(conn_pi.R_ci)
# Full connection p and j
R_pi = np.dot(R_pc, conn_pi.R_ci )
if conn_pi.B_ci.shape[1]>0:
Bx_pi = np.column_stack((Bx_pc, np.dot(R_pc,conn_pi.B_ci[:3,:])))
Bt_pi = np.column_stack((Bt_pc, np.dot(R_pc,conn_pi.B_ci[3:,:])))
else:
Bx_pi = Bx_pc
Bt_pi = Bt_pc
# Rotation of body i is rotation due to p and j
R_0i = np.dot( R_0p , R_pi )
#print('R_pi',p.name)
#print(R_pi)
#print('R_0p',p.name)
#print(R_0p)
#print('R_0i',p.name)
#print(R_0i)
# Position of connection point in P and 0 system
r_pi_inP= conn_pi.s_C_inB
r_pi = np.dot (R_0p , r_pi_inP )
#print('r_pi')
#print(r_pi_inP)
#print('r_pi')
#print(r_pi)
#print('Bx_pi')
#print(Bx_pi)
#print('Bt_pi')
#print(Bt_pi)
B_i = fBMatRecursion(B_p, Bx_pi, Bt_pi, R_0p, r_pi)
B_i_inI = fB_inB(R_0i, B_i)
BB_i_inI = fB_aug(B_i_inI, body_i.nf)
body_i.B = B_i
body_i.B_inB = B_i_inI
body_i.BB_inB = BB_i_inI
# --- Updating Position and orientation of child body
r_0i = r_0p + r_pi # in 0 system
body_i.R_pb = R_pi
body_i.updatePosOrientation(r_0i,R_0i)
# TODO flexible dofs and velocities/acceleration
body_i.gzf = q[body_i.I_DOF,0] # TODO use updateKinematics
def getFullM(o,M):
if not isinstance(o,GroundBody):
MqB = fBMB(o.BB_inB,o.MM)
n = MqB.shape[0]
M[:n,:n] = M[:n,:n]+MqB
for c in o.Children:
M=c.getFullM(M)
return M
def getFullK(o,K):
if not isinstance(o,GroundBody):
KqB = fBMB(o.BB_inB,o.KK)
n = KqB.shape[0]
K[:n,:n] = K[:n,:n]+KqB
for c in o.Children:
K=c.getFullK(K)
return K
def getFullD(o,D):
if not isinstance(o,GroundBody):
DqB = fBMB(o.BB_inB,o.DD)
n = DqB.shape[0]
D[:n,:n] = D[:n,:n]+DqB
for c in o.Children:
D=c.getFullD(D)
return D
@property
def nf(B):
if hasattr(B,'PhiU'):
return len(B.PhiU)
else:
return 0
@property
def Mass(B):
if B.MM is None:
return 0
return B.MM[0,0]
def updateKinematics(o,x_0,R_0b,gz,v_0,a_v_0):
# Updating position of body origin in global coordinates
o.r_O = x_0[0:3]
o.gzf = gz
# Updating Transformation matrix
o.R_0b=R_0b
# Updating rigid body velocity and acceleration
o.v_O_inB = np.dot(R_0b, v_0[0:3])
o.om_O_inB = np.dot(R_0b, v_0[3:6])
o.a_O_v_inB = np.dot(R_0b, a_v_0[0:3])
o.omp_O_v_inB = np.dot(R_0b, a_v_0[3:6])
# --------------------------------------------------------------------------------}
# --- Ground Body
# --------------------------------------------------------------------------------{
class GroundBody(Body, GenericInertialBody):
def __init__(B):
Body.__init__(B, 'Grd')
GenericInertialBody.__init__(B)
# --------------------------------------------------------------------------------}
# --- Rigid Body
# --------------------------------------------------------------------------------{
class RigidBody(Body,GenericRigidBody):
def __init__(B, name, Mass, J_G, rho_G):
"""
Creates a rigid body
"""
Body.__init__(B,name)
GenericRigidBody.__init__(B, name, Mass, J_G, rho_G)
B.s_G_inB = B.masscenter
B.J_G_inB = B.masscenter_inertia
B.J_O_inB = translateInertiaMatrixFromCOG(B.J_G_inB, Mass, -B.s_G_inB)
B.MM = rigidBodyMassMatrix(Mass, B.J_O_inB, B.s_G_inB) # TODO change interface
B.DD = np.zeros((6,6))
B.KK = np.zeros((6,6))
# --------------------------------------------------------------------------------}
# --- Beam Body
# --------------------------------------------------------------------------------{
class BeamBody(GenericBeamBody, Body):
def __init__(B, s_span, s_P0, m, PhiU, PhiV, PhiK, EI, jxxG=None, s_G0=None,
s_min=None, s_max=None,
bAxialCorr=False, bOrth=False, Mtop=0, bStiffening=True, gravity=None,main_axis='z',
massExpected=None
):
"""
Points P0 - Undeformed mean line of the body
"""
# --- nherit from BeamBody and Body
Body.__init__(B)
GenericBeamBody.__init__(B,'dummy', s_span, s_P0, m, EI, PhiU, PhiV, PhiK, jxxG=jxxG, s_G0=s_G0, s_min=s_min, s_max=s_max,
bAxialCorr=bAxialCorr, bOrth=bOrth, Mtop=Mtop, bStiffening=bStiffening, gravity=gravity, main_axis=main_axis,
massExpected=massExpected
)
B.gzf = np.zeros((B.nf,1))
B.gzpf = np.zeros((B.nf,1))
B.gzppf = np.zeros((B.nf,1))
# TODO
B.V0 = np.zeros((3,B.nSpan))
B.K0 = np.zeros((3,B.nSpan))
B.rho_G0_inS = np.zeros((3,B.nSpan)) # location of COG in each cross section
#[o.PhiV,o.PhiK] = fBeamSlopeCurvature(o.s_span,o.PhiU,o.PhiV,o.PhiK,1e-2);
#[o.V0,o.K0] = fBeamSlopeCurvature(o.s_span,o.s_P0,o.V0,o.K0,1e-2) ;
#if isempty(o.s_G0); o.s_G0=o.s_P0; end;
#if isempty(o.rho_G0_inS); o.rho_G0_inS=np.zeros(3,o.nSpan); end;
#if isempty(o.rho_G0 );
# o.rho_G0 =np.zeros(3,o.nSpan);
# for i=1:o.nSpan
# o.rho_G0(1:3,i) =R_x(o.V0(1,i))*o.rho_G0_inS(:,i);
@property
def alpha_couplings(self):
return np.dot(self.Bhat_t_bc , self.gzf).ravel()
@property
def R_bc(self):
alpha = self.alpha_couplings
if self.main_axis=='x':
return np.dot(R_y(alpha[1]),R_z(alpha[2]))
elif self.main_axis=='z':
return np.dot(R_x(alpha[0]),R_y(alpha[1]))
else:
raise NotImplementedError()
def updateKinematics(o,x_0,R_0b,gz,v_0,a_v_0):
super(BeamBody,o).updateKinematics(x_0,R_0b,gz,v_0,a_v_0)
# --- Calculation of deformations wrt straight beam axis, curvature (K) and velocities (UP)
if o.nf>0:
o.gzpf = v_0[6:]
o.gzppf = a_v_0[6:]
# Deflections shape
o.U = np.zeros((3,o.nSpan));
o.V = np.zeros((3,o.nSpan));
o.K = np.zeros((3,o.nSpan));
#o.U(1,:) = o.s_span;
o.UP = np.zeros((3,o.nSpan));
for j in range(o.nf):
o.U [0:3,:] = o.U [0:3,:] + o.gzf[j] * o.PhiU[j][0:3,:]
o.UP[0:3,:] = o.UP[0:3,:] + o.gzpf[j] * o.PhiU[j][0:3,:]
o.V [0:3,:] = o.V [0:3,:] + o.gzf[j] * o.PhiV[j][0:3,:]
o.K [0:3,:] = o.K [0:3,:] + o.gzf[j] * o.PhiK[j][0:3,:]
o.V_tot=o.V+o.V0;
o.K_tot=o.K+o.K0;
# Position of mean line
o.s_P=o.s_P0+o.U;
# Position of deflected COG
# TODO TODO TODO mean_axis not x
o.rho_G = np.zeros((3,o.nSpan))
if o.main_axis=='x':
o.rho_G[1,:] = o.rho_G0_inS[1,:]*np.cos(o.V_tot[0,:])-o.rho_G0_inS[2,:]*np.sin(o.V_tot[0,:]);
o.rho_G[2,:] = o.rho_G0_inS[1,:]*np.sin(o.V_tot[0,:])+o.rho_G0_inS[2,:]*np.cos(o.V_tot[0,:]);
else:
raise NotImplementedError()
o.rho_G[1,:] = o.rho_G0_inS[1,:]*np.cos(o.V_tot[0,:])-o.rho_G0_inS[2,:]*np.sin(o.V_tot[0,:]);
o.rho_G[2,:] = o.rho_G0_inS[1,:]*np.sin(o.V_tot[0,:])+o.rho_G0_inS[2,:]*np.cos(o.V_tot[0,:]);
o.s_G = o.s_P+o.rho_G;
# Alternative:
#rho_G2 = zeros(3,o.nSpan);
#rho_G2(2,:) = o.rho_G0(2,:).*cos(o.V(1,:))-o.rho_G0(3,:).*sin(o.V(1,:));
#rho_G2(3,:) = o.rho_G0(2,:).*sin(o.V(1,:))+o.rho_G0(3,:).*cos(o.V(1,:));
#compare(o.rho_G,rho_G2,'rho_G');
# Position of connection point
print('TODO connection points')
#for ic=1:length(o.Connections)
# iNode=o.Connections{ic}.ParentNode;
# %o.Connections{ic}.s_C_inB = o.U(1:3,iNode);
# o.Connections{ic}.s_C_inB = o.s_P(1:3,iNode);
@property
def nSpan(B):
return len(B.s_span)
# --------------------------------------------------------------------------------}
# --- Uniform Beam Body
# --------------------------------------------------------------------------------{
class UniformBeamBody(BeamBody):
def __init__(B, name, nShapes, nSpan, L, EI0, m, Mtop=0, jxxG=None, GKt=None, bAxialCorr=True, bCompatibility=False, bStiffnessFromGM=False, bStiffening=True, gravity=None, main_axis='x'):
import welib.beams.theory as bt
if jxxG is None:
jxxG=0
if GKt is None:
GKt=0
A=1; rho=A*m;
x=np.linspace(0,L,nSpan);
# Mode shapes
freq,s_span,U,V,K = bt.UniformBeamBendingModes('unloaded-topmass-clamped-free',EI0,rho,A,L,x=x,Mtop=Mtop)
PhiU = np.zeros((nShapes,3,nSpan)) # Shape
PhiV = np.zeros((nShapes,3,nSpan)) # Slope
PhiK = np.zeros((nShapes,3,nSpan)) # Curvature
if main_axis=='x':
iModeAxis=2 # Setting modes along z
elif main_axis=='z':
iModeAxis=0 # Setting modes along x
for j in np.arange(nShapes):
PhiU[j][iModeAxis,:] = U[j,:]
PhiV[j][iModeAxis,:] = V[j,:]
PhiK[j][iModeAxis,:] = K[j,:]
m = m * np.ones(nSpan)
jxxG = jxxG * np.ones(nSpan)
EI = np.zeros((3,nSpan))
if main_axis=='x':
EI[1,:] = EI0
EI[2,:] = EI0
elif main_axis=='z':
EI[0,:] = EI0
EI[1,:] = EI0
GKt = GKt * np.ones(nSpan)
# --- Straight undeflected shape (and COG)
s_P0 = np.zeros((3,nSpan))
if main_axis=='x':
s_P0[0,:] = x
elif main_axis=='z':
s_P0[2,:] = x
# Create a beam body
super(UniformBeamBody,B).__init__(s_span, s_P0, m, PhiU, PhiV, PhiK, EI, jxxG=jxxG, bAxialCorr=bAxialCorr, Mtop=Mtop, bStiffening=bStiffening, gravity=gravity, main_axis=main_axis)
# --------------------------------------------------------------------------------}
# --- FAST Beam body
# --------------------------------------------------------------------------------{
class FASTBeamBody(BeamBody, GenericFASTBeamBody):
def __init__(B, body_type, ED, inp, Mtop=0, shapes=None, nShapes=None, main_axis='x',nSpan=None,bAxialCorr=False,bStiffening=True,
spanFrom0=False, massExpected=None
):
"""
"""
if shapes is None:
if nShapes==2:
shapes=[0,1]
elif nShapes==0:
shapes=[]
elif nShapes==1:
shapes=[0]
else:
raise NotImplementedError('>> TODO')
GenericFASTBeamBody.__init__(B, ED, inp, Mtop=Mtop, shapes=shapes, main_axis=main_axis, nSpan=nSpan, bAxialCorr=bAxialCorr, bStiffening=bStiffening,
spanFrom0=spanFrom0,
massExpected=massExpected
)
# We need to inherit from "YAMS" Beam not just generic Beam
BeamBody.__init__(B, B.s_span, B.s_P0, B.m, B.PhiU, B.PhiV, B.PhiK, B.EI, jxxG=B.jxxG, s_G0=B.s_G0,
# NOTE: r_O, r_b2g is lost here
s_min=B.s_min, s_max=B.s_max,
bAxialCorr=bAxialCorr, bOrth=B.bOrth, Mtop=Mtop, bStiffening=bStiffening, gravity=B.gravity,main_axis=main_axis,
massExpected=massExpected
)
# --------------------------------------------------------------------------------}
# --- B Matrices
# --------------------------------------------------------------------------------{
def fB_inB(R_EI, B_I):
""" Transfer a global B_I matrix (body I at point I) into a matrix in it's own coordinate.
Simply multiply the top part and bottom part of the B matrix by the 3x3 rotation matrix R_EI
e.g.
B_N_inN = [R_EN' * B_N(1:3,:); R_EN' * B_N(4:6,:)];
"""
if len(B_I)==0:
B_I_inI = Matrix( | np.array([]) | numpy.array |
# --------------------------------------------------------
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
import sys
import ctypes
from pprint import pprint
from PIL import Image
import glutils.glcontext as glcontext
import OpenGL.GL as GL
import cv2
import numpy as np
from pyassimp import *
from glutils.meshutil import *
from glutils.voxel import *
from transforms3d.quaternions import axangle2quat, mat2quat, qmult, qinverse
from transforms3d.euler import quat2euler, mat2euler, euler2quat, euler2mat
import CppYCBRenderer
from numpy.linalg import inv, norm
import numpy.random as npr
import IPython
import subprocess
import multiprocessing
import threading
import platform
PYTHON2 = True
if platform.python_version().startswith("3"):
PYTHON2 = False
try:
from .get_available_devices import *
except:
from get_available_devices import *
import scipy.io as sio
MAX_NUM_OBJECTS = 3
from glutils.utils import *
from glutils.trackball import Trackball
import time
import random
def load_mesh_single(param):
"""
Load a single mesh. Used in multiprocessing.
"""
path, scale, offset_map = param
scene = load(path)
mesh_file = path.strip().split("/")[-1] # for offset the robot mesh
offset = np.zeros(3)
if offset_map and mesh_file in offset_map:
offset = offset_map[mesh_file]
return recursive_load(scene.rootnode, [], [], [], [], offset, scale, [[], [], []])
def load_texture_single(param):
obj_path, texture_paths = param
textures, is_colors, is_textures = [], [], []
for texture_path in texture_paths:
is_texture = False
is_color = False
if texture_path == "":
texture = texture_path
elif texture_path == "color":
is_color = True
texture = texture_path
else:
texture_path = os.path.join(
"/".join(obj_path.split("/")[:-1]), texture_path
)
texture = loadTexture2(texture_path)
is_texture = True
textures.append(texture)
is_colors.append(is_color)
is_textures.append(is_texture)
return [textures, is_colors, is_textures]
def recursive_load(
node,
vertices,
faces,
materials,
texture_paths,
offset,
scale=1,
repeated=[[], [], []],
):
"""
Applying transform to recursively load vertices, normals, faces
"""
if node.meshes:
transform = node.transformation
for idx, mesh in enumerate(node.meshes):
if mesh.faces.shape[-1] != 3: # ignore Line Set
continue
mat = mesh.material
texture_path = False
if hasattr(mat, "properties"):
file = ("file", long(1)) if PYTHON2 else ("file", 1)
if file in mat.properties:
texture_paths.append(mat.properties[file]) # .encode("utf-8")
texture_path = True
else:
texture_paths.append("")
mat_diffuse = np.array(mat.properties["diffuse"])[:3]
mat_specular = np.array(mat.properties["specular"])[:3]
mat_ambient = np.array(mat.properties["ambient"])[:3] # phong shader
if "shininess" in mat.properties:
mat_shininess = max(
mat.properties["shininess"], 1
) # avoid the 0 shininess
else:
mat_shininess = 1
mesh_vertex = (
homotrans(transform, mesh.vertices) - offset
) # subtract the offset
if mesh.normals.shape[0] > 0:
mesh_normals = (
transform[:3, :3].dot(mesh.normals.transpose()).transpose()
) # normal stays the same
else:
mesh_normals = np.zeros_like(mesh_vertex)
mesh_normals[:, -1] = 1
if texture_path:
vertices.append(
np.concatenate(
[
mesh_vertex * scale,
mesh_normals,
mesh.texturecoords[0, :, :2],
],
axis=-1,
)
)
elif mesh.colors is not None and len(mesh.colors.shape) > 2:
vertices.append(
np.concatenate(
[mesh_vertex * scale, mesh_normals, mesh.colors[0, :, :3]],
axis=-1,
)
) #
texture_paths[-1] = "color"
else:
vertices.append(
np.concatenate([mesh_vertex * scale, mesh_normals], axis=-1)
)
faces.append(mesh.faces)
materials.append(
np.hstack([mat_diffuse, mat_specular, mat_ambient, mat_shininess])
)
for child in node.children:
recursive_load(
child, vertices, faces, materials, texture_paths, offset, scale, repeated
)
return vertices, faces, materials, texture_paths
def loadTexture2(path):
"""
Load texture file
"""
img = Image.open(path).transpose(Image.FLIP_TOP_BOTTOM)
img_data = np.fromstring(img.tobytes(), np.uint8)
width, height = img.size
return img, img_data
def loadTexture(path):
"""
Load texture file
"""
img = Image.open(path).transpose(Image.FLIP_TOP_BOTTOM)
img_data = np.fromstring(img.tobytes(), np.uint8)
width, height = img.size
texture = GL.glGenTextures(1)
GL.glPixelStorei(GL.GL_UNPACK_ALIGNMENT, 1)
GL.glBindTexture(GL.GL_TEXTURE_2D, texture)
GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_LINEAR)
GL.glTexParameterf(
GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR_MIPMAP_LINEAR
)
GL.glTexParameterf(
GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_S, GL.GL_REPEAT
) # .GL_CLAMP_TO_EDGE GL_REPEAT
GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_T, GL.GL_REPEAT)
if img.mode == "RGBA":
GL.glTexImage2D(
GL.GL_TEXTURE_2D,
0,
GL.GL_RGBA,
width,
height,
0,
GL.GL_RGBA,
GL.GL_UNSIGNED_BYTE,
img_data,
)
else:
GL.glTexImage2D(
GL.GL_TEXTURE_2D,
0,
GL.GL_RGB,
width,
height,
0,
GL.GL_RGB,
GL.GL_UNSIGNED_BYTE,
img_data,
)
GL.glGenerateMipmap(GL.GL_TEXTURE_2D)
return texture
def bindTexture(imgs):
"""
Load texture file
"""
all_textures = []
for img in imgs:
textures = []
for item in img:
if len(item) < 2:
textures.append([])
continue
(img, img_data) = item
width, height = img.size
texture = GL.glGenTextures(1)
GL.glPixelStorei(GL.GL_UNPACK_ALIGNMENT, 1)
GL.glBindTexture(GL.GL_TEXTURE_2D, texture)
GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_LINEAR)
GL.glTexParameterf(
GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR_MIPMAP_LINEAR
)
GL.glTexParameterf(
GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_S, GL.GL_REPEAT
) # .GL_CLAMP_TO_EDGE GL_REPEAT
GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_T, GL.GL_REPEAT)
if img.mode == "RGBA":
GL.glTexImage2D(
GL.GL_TEXTURE_2D,
0,
GL.GL_RGBA,
width,
height,
0,
GL.GL_RGBA,
GL.GL_UNSIGNED_BYTE,
img_data,
)
else:
GL.glTexImage2D(
GL.GL_TEXTURE_2D,
0,
GL.GL_RGB,
width,
height,
0,
GL.GL_RGB,
GL.GL_UNSIGNED_BYTE,
img_data,
)
GL.glGenerateMipmap(GL.GL_TEXTURE_2D)
textures.append(texture)
all_textures.append(textures)
return all_textures
class YCBRenderer:
def __init__(
self,
width=512,
height=512,
gpu_id=0,
render_marker=False,
robot="panda_arm",
offset=True,
reinit=False,
parallel_load_mesh=False,
parallel_textures=False
):
self.render_marker = render_marker
self.VAOs = []
self.VBOs = []
self.materials = []
self.textures = []
self.is_textured = []
self.is_materialed = []
self.is_colored = []
self.objects = []
self.texUnitUniform = None
self.width = width
self.height = height
self.faces = []
self.poses_trans = []
self.poses_rot = []
self.instances = []
self.parallel_textures = parallel_textures
self.parallel_load_mesh = parallel_load_mesh
self.robot = robot
self._offset_map = None
self.click_obj_idx = -1
self.world_place_point_pos = np.zeros([3, 1])
self.click_obj_name = None
self.click_pix_loc = None
self.place_click_pix_loc = None
self.click_pose = None
if (robot == "panda_arm" or robot == "baxter") and offset:
self._offset_map = self.load_offset()
if gpu_id == -1:
from gibson2.core.render.mesh_renderer import CppMeshRenderer
self.r = CppMeshRenderer.CppMeshRenderer(width, height, 0)
else:
self.r = CppYCBRenderer.CppYCBRenderer(
width, height, get_available_devices()[gpu_id]
)
self.r.init()
self.glstring = GL.glGetString(GL.GL_VERSION)
from OpenGL.GL import shaders
self.shaders = shaders
self.colors = []
self.lightcolor = [1, 1, 1]
self.worldlight = [[0.2, 0, 0.2], [0.2, 0.2, 0], [0, 0.5, 1], [0.5, 0, 1]]
cur_dir = os.path.dirname(os.path.abspath(__file__))
vertexShader = self.shaders.compileShader(
open(os.path.join(cur_dir, "shaders/vert.shader")).readlines(),
GL.GL_VERTEX_SHADER,
)
fragmentShader = self.shaders.compileShader(
open(os.path.join(cur_dir, "shaders/frag.shader")).readlines(),
GL.GL_FRAGMENT_SHADER,
)
vertexShader_textureMat = self.shaders.compileShader(
open(os.path.join(cur_dir, "shaders/vert_blinnphong.shader")).readlines(),
GL.GL_VERTEX_SHADER,
)
fragmentShader_textureMat = self.shaders.compileShader(
open(os.path.join(cur_dir, "shaders/frag_blinnphong.shader")).readlines(),
GL.GL_FRAGMENT_SHADER,
)
vertexShader_textureless = self.shaders.compileShader(
open(os.path.join(cur_dir, "shaders/vert_textureless.shader")).readlines(),
GL.GL_VERTEX_SHADER,
)
fragmentShader_textureless = self.shaders.compileShader(
open(os.path.join(cur_dir, "shaders/frag_textureless.shader")).readlines(),
GL.GL_FRAGMENT_SHADER,
)
vertexShader_material = self.shaders.compileShader(
open(os.path.join(cur_dir, "shaders/vert_mat.shader")).readlines(),
GL.GL_VERTEX_SHADER,
)
fragmentShader_material = self.shaders.compileShader(
open(os.path.join(cur_dir, "shaders/frag_mat.shader")).readlines(),
GL.GL_FRAGMENT_SHADER,
)
vertexShader_simple = self.shaders.compileShader(
open(os.path.join(cur_dir, "shaders/vert_simple.shader")).readlines(),
GL.GL_VERTEX_SHADER,
)
fragmentShader_simple = self.shaders.compileShader(
open(os.path.join(cur_dir, "shaders/frag_simple.shader")).readlines(),
GL.GL_FRAGMENT_SHADER,
)
vertexShader_simple_color = self.shaders.compileShader(
open(os.path.join(cur_dir, "shaders/vert_simple_color.shader")).readlines(),
GL.GL_VERTEX_SHADER,
)
fragmentShader_simple_color = self.shaders.compileShader(
open(os.path.join(cur_dir, "shaders/frag_simple_color.shader")).readlines(),
GL.GL_FRAGMENT_SHADER,
)
self.shaderProgram = self.shaders.compileProgram(vertexShader, fragmentShader)
self.shaderProgram_textureless = self.shaders.compileProgram(
vertexShader_textureless, fragmentShader_textureless
)
self.shaderProgram_simple = self.shaders.compileProgram(
vertexShader_simple, fragmentShader_simple
)
self.shaderProgram_simple_color = self.shaders.compileProgram(
vertexShader_simple_color, fragmentShader_simple_color
)
self.shaderProgram_material = self.shaders.compileProgram(
vertexShader_material, fragmentShader_material
)
self.shaderProgram_textureMat = self.shaders.compileProgram(
vertexShader_textureMat, fragmentShader_textureMat
)
vertexShader_textureMatNormal = self.shaders.compileShader(
open(
os.path.join(cur_dir, "shaders/vert_blinnphong_normal.shader")
).readlines(),
GL.GL_VERTEX_SHADER,
)
geometryShader_textureMatNormal = self.shaders.compileShader(
open(
os.path.join(cur_dir, "shaders/geo_blinnphong_normal.shader")
).readlines(),
GL.GL_GEOMETRY_SHADER,
)
fragmentShader_textureMatNormal = self.shaders.compileShader(
open(
os.path.join(cur_dir, "shaders/frag_blinnphong_normal.shader")
).readlines(),
GL.GL_FRAGMENT_SHADER,
)
self.shaderProgram_textureMatNormal = self.shaders.compileProgram(
vertexShader_textureMatNormal,
geometryShader_textureMatNormal,
fragmentShader_textureMatNormal,
)
self.texUnitUniform_textureMat = GL.glGetUniformLocation(
self.shaderProgram_textureMat, "texUnit"
)
self.bind_texture_buffer()
self.lightpos = [0, 0, 0]
self.fov = 20
self.camera = [1, 0, 0]
self.target = [0, 0, 0]
self.up = [0, 0, 1]
self.lineVAOs = []
self.pointVAOs = []
self.coordVAOs = []
P = perspective(self.fov, float(self.width) / float(self.height), 0.01, 100)
V = lookat(self.camera, self.target, up=self.up)
self.V = np.ascontiguousarray(V, np.float32)
self.P = np.ascontiguousarray(P, np.float32)
self.grid = self.generate_grid()
def generate_grid(self):
"""
Generate a grid as the plane background
"""
VAO = GL.glGenVertexArrays(1)
GL.glBindVertexArray(VAO)
vertexData = []
for i in np.arange(-1, 1, 0.05):
vertexData.append([i, 0, -1, 0, 0, 0, 0, 0])
vertexData.append([i, 0, 1, 0, 0, 0, 0, 0])
vertexData.append([1, 0, i, 0, 0, 0, 0, 0])
vertexData.append([-1, 0, i, 0, 0, 0, 0, 0])
vertexData = np.array(vertexData).astype(np.float32) * 3
# Need VBO for triangle vertices and texture UV coordinates
VBO = GL.glGenBuffers(1)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, VBO)
GL.glBufferData(
GL.GL_ARRAY_BUFFER, vertexData.nbytes, vertexData, GL.GL_STATIC_DRAW
)
# enable array and set up data
positionAttrib = GL.glGetAttribLocation(self.shaderProgram_simple, "position")
GL.glEnableVertexAttribArray(0)
GL.glVertexAttribPointer(positionAttrib, 3, GL.GL_FLOAT, GL.GL_FALSE, 32, None)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, 0)
GL.glBindVertexArray(0)
return VAO
def generate_coordinate_axis(self, thickness=5, length=0.2, coordVAOs=None):
lines = []
basis_line = [np.zeros([3, 3]), np.eye(3) * length]
line_colors = [
np.array([[255, 0, 0], [0, 255, 0], [0, 0, 255]])
] * self.get_num_objects()
thicknesses = [thickness] * self.get_num_objects()
# lines [3 x n, 3 x n]
for i in range(len(self.poses_rot)):
linea = (
self.poses_rot[i][:3, :3].dot(basis_line[0])
+ self.poses_trans[i][[3], :3].T
)
lineb = (
self.poses_rot[i][:3, :3].dot(basis_line[1])
+ self.poses_trans[i][[3], :3].T
)
lines.append([linea.copy(), lineb.copy()])
return self.generate_lines((lines, line_colors, thicknesses, True))
def generate_lines(self, line_info, lineVAOs=None):
"""
Render lines with GL
"""
if line_info is not None:
lines, line_colors, thicknesses, create_new = line_info
for idx, (line, line_color, thickness) in enumerate(
zip(lines, line_colors, thicknesses)
):
GL.glLineWidth(thickness)
if create_new:
VAO = GL.glGenVertexArrays(1)
GL.glBindVertexArray(VAO)
vertexData = []
linea, lineb = line[0].T, line[1].T
line_num = len(lineb)
if type(line_color) is not np.ndarray:
line_color = (
np.tile(line_color, line_num).reshape(-1, 3) / 255.0
) # [np.array(line_color[0]) / 255.] * line_num
else:
line_color = line_color / 255.0
for i in np.arange(line_num):
vertexData.append(
[
linea[i][0],
linea[i][1],
linea[i][2],
0,
0,
0,
line_color[i][0],
line_color[i][1],
line_color[i][2],
]
)
vertexData.append(
[
lineb[i][0],
lineb[i][1],
lineb[i][2],
0,
0,
0,
line_color[i][0],
line_color[i][1],
line_color[i][2],
]
)
vertexData = np.array(vertexData).astype(np.float32)
# Need VBO for triangle vertices and texture UV coordinates
VBO = GL.glGenBuffers(1)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, VBO)
GL.glBufferData(
GL.GL_ARRAY_BUFFER,
vertexData.nbytes,
vertexData,
GL.GL_STATIC_DRAW,
)
# enable array and set up data
positionAttrib = GL.glGetAttribLocation(
self.shaderProgram_simple_color, "position"
)
colorAttrib = GL.glGetAttribLocation(
self.shaderProgram_simple_color, "texCoords"
)
GL.glEnableVertexAttribArray(0)
GL.glVertexAttribPointer(
positionAttrib, 3, GL.GL_FLOAT, GL.GL_FALSE, 36, None
)
GL.glEnableVertexAttribArray(2)
GL.glVertexAttribPointer(
colorAttrib,
3,
GL.GL_FLOAT,
GL.GL_FALSE,
36,
ctypes.c_void_p(24),
)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, 0)
GL.glBindVertexArray(0)
line_num = line[0].shape[1] * 2
if lineVAOs is not None:
lineVAOs.append((VAO, line_num))
GL.glUseProgram(self.shaderProgram_simple_color)
GL.glBindVertexArray(VAO)
GL.glUniformMatrix4fv(
GL.glGetUniformLocation(self.shaderProgram_simple_color, "V"),
1,
GL.GL_TRUE,
self.V,
)
GL.glUniformMatrix4fv(
GL.glGetUniformLocation(self.shaderProgram_simple_color, "P"),
1,
GL.GL_FALSE,
self.P,
)
GL.glDrawElements(
GL.GL_LINES,
line_num,
GL.GL_UNSIGNED_INT,
np.arange(line_num, dtype=np.int),
)
GL.glBindVertexArray(0)
GL.glUseProgram(0)
if lineVAOs is not None:
for idx, (VAO, line_num) in enumerate(lineVAOs):
GL.glUseProgram(self.shaderProgram_simple_color)
GL.glBindVertexArray(VAO)
GL.glUniformMatrix4fv(
GL.glGetUniformLocation(self.shaderProgram_simple_color, "V"),
1,
GL.GL_TRUE,
self.V,
)
GL.glUniformMatrix4fv(
GL.glGetUniformLocation(self.shaderProgram_simple_color, "P"),
1,
GL.GL_FALSE,
self.P,
)
GL.glDrawElements(
GL.GL_LINES,
line_num,
GL.GL_UNSIGNED_INT,
np.arange(line_num, dtype=np.int),
)
GL.glBindVertexArray(0)
GL.glUseProgram(0)
GL.glLineWidth(1)
return lineVAOs
def generate_points(self, point_info, pointVAOs=None):
"""
Render points with GL
"""
if point_info is not None:
points, points_colors, thicknesses, create_new = point_info
for idx, (point, point_color, thickness) in enumerate(
zip(points, points_colors, thicknesses)
):
GL.glPointSize(thickness)
if create_new:
VAO = GL.glGenVertexArrays(1)
GL.glBindVertexArray(VAO)
vertexData = []
point = point.T
point_num = len(point)
if type(point_color) is not np.ndarray:
point_color = (
np.tile(point_color, point_num).reshape(-1, 3) / 255.0
) # [np.array(line_color[0]) / 255.] * line_num
else:
point_color = point_color / 255.0
for i in np.arange(point_num):
vertexData.append(
[
point[i][0],
point[i][1],
point[i][2],
0,
0,
0,
point_color[i][0],
point_color[i][1],
point_color[i][2],
]
)
vertexData = np.array(vertexData).astype(np.float32)
VBO = GL.glGenBuffers(1)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, VBO)
GL.glBufferData(
GL.GL_ARRAY_BUFFER,
vertexData.nbytes,
vertexData,
GL.GL_STATIC_DRAW,
)
# enable array and set up data
positionAttrib = GL.glGetAttribLocation(
self.shaderProgram_simple_color, "position"
)
colorAttrib = GL.glGetAttribLocation(
self.shaderProgram_simple_color, "texCoords"
)
GL.glEnableVertexAttribArray(0)
GL.glVertexAttribPointer(
positionAttrib, 3, GL.GL_FLOAT, GL.GL_FALSE, 36, None
)
GL.glEnableVertexAttribArray(2)
GL.glVertexAttribPointer(
colorAttrib,
3,
GL.GL_FLOAT,
GL.GL_FALSE,
36,
ctypes.c_void_p(24),
)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, 0)
GL.glBindVertexArray(0)
point_num = point.shape[0]
if pointVAOs is not None:
pointVAOs.append((VAO, point_num, thickness))
GL.glUseProgram(self.shaderProgram_simple_color)
GL.glBindVertexArray(VAO)
GL.glUniformMatrix4fv(
GL.glGetUniformLocation(self.shaderProgram_simple_color, "V"),
1,
GL.GL_TRUE,
self.V,
)
GL.glUniformMatrix4fv(
GL.glGetUniformLocation(self.shaderProgram_simple_color, "P"),
1,
GL.GL_FALSE,
self.P,
)
GL.glDrawElements(
GL.GL_POINTS,
point_num,
GL.GL_UNSIGNED_INT,
np.arange(point_num, dtype=np.int),
)
GL.glBindVertexArray(0)
GL.glUseProgram(0)
GL.glPointSize(1)
if pointVAOs is not None:
for idx, (VAO, point_num, point_thickness) in enumerate(pointVAOs):
GL.glPointSize(point_thickness)
GL.glUseProgram(self.shaderProgram_simple_color)
GL.glBindVertexArray(VAO)
GL.glUniformMatrix4fv(
GL.glGetUniformLocation(self.shaderProgram_simple_color, "V"),
1,
GL.GL_TRUE,
self.V,
)
GL.glUniformMatrix4fv(
GL.glGetUniformLocation(self.shaderProgram_simple_color, "P"),
1,
GL.GL_FALSE,
self.P,
)
GL.glDrawElements(
GL.GL_POINTS,
point_num,
GL.GL_UNSIGNED_INT,
np.arange(point_num, dtype=np.int),
)
GL.glBindVertexArray(0)
GL.glUseProgram(0)
GL.glPointSize(1)
GL.glPointSize(1)
return pointVAOs
def bind_texture_buffer(self):
"""
bind texture buffer with GL
"""
self.fbo = GL.glGenFramebuffers(1)
self.color_tex = GL.glGenTextures(1)
self.color_tex_2 = GL.glGenTextures(1)
self.color_tex_3 = GL.glGenTextures(1)
self.color_tex_4 = GL.glGenTextures(1)
self.color_tex_5 = GL.glGenTextures(1)
self.depth_tex = GL.glGenTextures(1)
GL.glBindTexture(GL.GL_TEXTURE_2D, self.color_tex)
GL.glTexImage2D(
GL.GL_TEXTURE_2D,
0,
GL.GL_RGBA32F,
self.width,
self.height,
0,
GL.GL_RGBA,
GL.GL_UNSIGNED_BYTE,
None,
)
GL.glBindTexture(GL.GL_TEXTURE_2D, self.color_tex_2)
GL.glTexImage2D(
GL.GL_TEXTURE_2D,
0,
GL.GL_RGBA32F,
self.width,
self.height,
0,
GL.GL_RGBA,
GL.GL_UNSIGNED_BYTE,
None,
)
GL.glBindTexture(GL.GL_TEXTURE_2D, self.color_tex_3)
GL.glTexImage2D(
GL.GL_TEXTURE_2D,
0,
GL.GL_RGBA32F,
self.width,
self.height,
0,
GL.GL_RGBA,
GL.GL_UNSIGNED_BYTE,
None,
)
GL.glBindTexture(GL.GL_TEXTURE_2D, self.color_tex_4)
GL.glTexImage2D(
GL.GL_TEXTURE_2D,
0,
GL.GL_RGBA32F,
self.width,
self.height,
0,
GL.GL_RGBA,
GL.GL_FLOAT,
None,
)
GL.glBindTexture(GL.GL_TEXTURE_2D, self.color_tex_5)
GL.glTexImage2D(
GL.GL_TEXTURE_2D,
0,
GL.GL_RGBA32F,
self.width,
self.height,
0,
GL.GL_RGBA,
GL.GL_FLOAT,
None,
)
GL.glBindTexture(GL.GL_TEXTURE_2D, self.depth_tex)
GL.glTexImage2D.wrappedOperation(
GL.GL_TEXTURE_2D,
0,
GL.GL_DEPTH24_STENCIL8,
self.width,
self.height,
0,
GL.GL_DEPTH_STENCIL,
GL.GL_UNSIGNED_INT_24_8,
None,
)
GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, self.fbo)
GL.glFramebufferTexture2D(
GL.GL_FRAMEBUFFER,
GL.GL_COLOR_ATTACHMENT0,
GL.GL_TEXTURE_2D,
self.color_tex,
0,
)
GL.glFramebufferTexture2D(
GL.GL_FRAMEBUFFER,
GL.GL_COLOR_ATTACHMENT1,
GL.GL_TEXTURE_2D,
self.color_tex_2,
0,
)
GL.glFramebufferTexture2D(
GL.GL_FRAMEBUFFER,
GL.GL_COLOR_ATTACHMENT2,
GL.GL_TEXTURE_2D,
self.color_tex_3,
0,
)
GL.glFramebufferTexture2D(
GL.GL_FRAMEBUFFER,
GL.GL_COLOR_ATTACHMENT3,
GL.GL_TEXTURE_2D,
self.color_tex_4,
0,
)
GL.glFramebufferTexture2D(
GL.GL_FRAMEBUFFER,
GL.GL_COLOR_ATTACHMENT4,
GL.GL_TEXTURE_2D,
self.color_tex_5,
0,
)
GL.glFramebufferTexture2D(
GL.GL_FRAMEBUFFER,
GL.GL_DEPTH_STENCIL_ATTACHMENT,
GL.GL_TEXTURE_2D,
self.depth_tex,
0,
)
GL.glViewport(0, 0, self.width, self.height)
GL.glDrawBuffers(
5,
[
GL.GL_COLOR_ATTACHMENT0,
GL.GL_COLOR_ATTACHMENT1,
GL.GL_COLOR_ATTACHMENT2,
GL.GL_COLOR_ATTACHMENT3,
GL.GL_COLOR_ATTACHMENT4,
],
)
assert (
GL.glCheckFramebufferStatus(GL.GL_FRAMEBUFFER) == GL.GL_FRAMEBUFFER_COMPLETE
)
def load_object(
self, obj_path, texture_path, scene, textures_i, scale=1, data=None
):
"""
load a single object and bind to buffer
"""
is_texture = []
is_materialed = True
textures = []
start_time = time.time()
(
vertices,
faces,
materials,
texture_paths,
) = scene # self.load_mesh(obj_path, scale, scene)
self.materials.append(materials)
is_textured = []
is_colored = []
if not self.parallel_textures:
for texture_path in texture_paths:
is_texture = False
is_color = False
if texture_path == "":
textures.append(texture_path)
elif texture_path == "color":
is_color = True
textures.append(texture_path)
else:
texture_path = os.path.join(
"/".join(obj_path.split("/")[:-1]), texture_path
)
texture = loadTexture(texture_path)
textures.append(texture)
is_texture = True
is_textured.append(is_texture)
is_colored.append(is_color)
else:
textures, is_colored, is_textured = textures_i
self.textures.append(textures) # textures
self.is_textured.append(is_textured) # is_textured
self.is_materialed.append(is_materialed)
if is_materialed:
for idx in range(len(vertices)):
vertexData = vertices[idx].astype(np.float32)
face = faces[idx]
VAO = GL.glGenVertexArrays(1)
GL.glBindVertexArray(VAO)
# Need VBO for triangle vertices and texture UV coordinates
VBO = GL.glGenBuffers(1)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, VBO)
GL.glBufferData(
GL.GL_ARRAY_BUFFER, vertexData.nbytes, vertexData, GL.GL_STATIC_DRAW
)
if is_textured[idx]:
positionAttrib = GL.glGetAttribLocation(
self.shaderProgram_textureMat, "position"
)
normalAttrib = GL.glGetAttribLocation(
self.shaderProgram_textureMat, "normal"
)
coordsAttrib = GL.glGetAttribLocation(
self.shaderProgram_textureMat, "texCoords"
)
elif is_colored[idx]:
positionAttrib = GL.glGetAttribLocation(
self.shaderProgram_textureless, "position"
)
normalAttrib = GL.glGetAttribLocation(
self.shaderProgram_textureless, "normal"
)
colorAttrib = GL.glGetAttribLocation(
self.shaderProgram_textureless, "color"
)
else:
positionAttrib = GL.glGetAttribLocation(
self.shaderProgram_material, "position"
)
normalAttrib = GL.glGetAttribLocation(
self.shaderProgram_material, "normal"
)
GL.glEnableVertexAttribArray(0)
GL.glEnableVertexAttribArray(1)
# the last parameter is a pointer
if is_textured[idx]:
GL.glEnableVertexAttribArray(2)
GL.glVertexAttribPointer(
positionAttrib, 3, GL.GL_FLOAT, GL.GL_FALSE, 32, None
)
GL.glVertexAttribPointer(
normalAttrib,
3,
GL.GL_FLOAT,
GL.GL_FALSE,
32,
ctypes.c_void_p(12),
)
GL.glVertexAttribPointer(
coordsAttrib,
2,
GL.GL_FLOAT,
GL.GL_TRUE,
32,
ctypes.c_void_p(24),
)
elif is_colored[idx]:
GL.glEnableVertexAttribArray(2)
GL.glVertexAttribPointer(
positionAttrib, 3, GL.GL_FLOAT, GL.GL_FALSE, 36, None
)
GL.glVertexAttribPointer(
normalAttrib,
3,
GL.GL_FLOAT,
GL.GL_FALSE,
36,
ctypes.c_void_p(12),
)
GL.glVertexAttribPointer(
colorAttrib,
3,
GL.GL_FLOAT,
GL.GL_FALSE,
36,
ctypes.c_void_p(24),
)
else:
GL.glVertexAttribPointer(
positionAttrib, 3, GL.GL_FLOAT, GL.GL_FALSE, 24, None
)
GL.glVertexAttribPointer(
normalAttrib,
3,
GL.GL_FLOAT,
GL.GL_FALSE,
24,
ctypes.c_void_p(12),
)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, 0)
GL.glBindVertexArray(0)
self.VAOs.append(VAO)
self.VBOs.append(VBO)
self.faces.append(face)
self.objects.append(obj_path)
self.poses_rot.append(np.eye(4))
self.poses_trans.append(np.eye(4))
def load_offset(self):
"""
Load offsets, mainly for robots
"""
try:
cur_path = os.path.abspath(os.path.dirname(__file__))
relative_path = "../"
offset_file = os.path.join(cur_path, relative_path + "data/robots/", "center_offset.txt")
if not os.path.exists(offset_file):
relative_path = relative_path + "../"
offset_file = os.path.join(cur_path, relative_path + "data/robots/", "center_offset.txt")
model_file = os.path.join(cur_path, relative_path + "data/robots/", "models.txt")
with open(model_file, "r+") as file:
content = file.readlines()
model_paths = [path.strip().split("/")[-1] for path in content]
offset = np.loadtxt(offset_file).astype(np.float32)
offset_map = {}
for i in range(offset.shape[0]):
offset_map[model_paths[i]] = offset[i, :]
return offset_map
except:
print("renderer offsets are not used")
return {}
def parallel_assimp_load(self, mesh_files, scales):
"""
use multiprocessing to load objects
"""
if len(mesh_files) == 0:
return None, None
if not self.parallel_load_mesh:
scenes = [load_mesh_single([mesh_files[i], scales[i], self._offset_map])
for i in range(len(mesh_files))]
else:
p = multiprocessing.Pool(processes=4)
scenes = p.map_async(
load_mesh_single,
[
[mesh_files[i], scales[i], self._offset_map]
for i in range(len(mesh_files))
],
).get()
p.terminate()
textures = [0 for _ in scenes]
if self.parallel_textures:
p = multiprocessing.pool.ThreadPool(
processes=4
)
textures = p.map_async(
load_texture_single,
[[mesh_files[i], scenes[i][-1]] for i in range(len(scenes))],
).get()
p.terminate()
texture_img = [t[0] for t in textures]
texture_id = bindTexture(texture_img)
for i, id in enumerate(texture_id):
textures[i][0] = id
return scenes, textures
def load_objects(
self,
obj_paths,
texture_paths=None,
colors=None,
scale=None,
data=None,
add=False,
):
if scale is None:
scale = [1] * len(obj_paths)
if texture_paths is None:
texture_paths = [""] * len(obj_paths)
if colors is None:
colors = [(i,0,0) for i in range(len(obj_paths))] # get_mask_colors(len(obj_paths))
self.colors.extend(colors)
start_time = time.time()
scenes, textures = self.parallel_assimp_load(obj_paths, scale)
# print("assimp time:", time.time() - start_time)
for i in range(len(obj_paths)):
if len(self.instances) == 0:
self.instances.append(0)
else:
self.instances.append(self.instances[-1] + len(self.materials[-1]))
self.load_object(
obj_paths[i], texture_paths[i], scenes[i], textures[i], scale[i], data
)
def set_camera(self, camera, target, up):
self.camera = camera
self.target = target
self.up = up
V = lookat(self.camera, self.target, up=self.up)
self.V = np.ascontiguousarray(V, np.float32)
def set_camera_default(self):
self.V = np.eye(4)
def set_fov(self, fov):
self.fov = fov
# this is vertical fov
P = perspective(self.fov, float(self.width) / float(self.height), 0.01, 100)
self.P = np.ascontiguousarray(P, np.float32)
def set_projection_matrix(self, w, h, fu, fv, u0, v0, znear, zfar):
L = -(u0) * znear / fu
R = +(w - u0) * znear / fu
T = -(v0) * znear / fv
B = +(h - v0) * znear / fv
P = np.zeros((4, 4), dtype=np.float32)
P[0, 0] = 2 * znear / (R - L)
P[1, 1] = 2 * znear / (T - B)
P[2, 0] = (R + L) / (L - R)
P[2, 1] = (T + B) / (B - T)
P[2, 2] = (zfar + znear) / (zfar - znear)
P[2, 3] = 1.0
P[3, 2] = (2 * zfar * znear) / (znear - zfar)
self.P = P
# set intrinsics
self.intrinsic_matrix = np.eye(3)
self.intrinsic_matrix[0, 0] = fu
self.intrinsic_matrix[1, 1] = fv
self.intrinsic_matrix[0, 2] = u0
self.intrinsic_matrix[1, 2] = v0
def set_light_color(self, color):
self.lightcolor = color
def set_light_pos(self, light):
self.lightpos = light
def render_grid(self):
GL.glUseProgram(self.shaderProgram_simple)
GL.glBindVertexArray(self.grid)
GL.glUniformMatrix4fv(
GL.glGetUniformLocation(self.shaderProgram_simple, "V"),
1,
GL.GL_TRUE,
self.V,
)
GL.glUniformMatrix4fv(
GL.glGetUniformLocation(self.shaderProgram_simple, "P"),
1,
GL.GL_FALSE,
self.P,
)
GL.glDrawElements(
GL.GL_LINES, 160, GL.GL_UNSIGNED_INT, np.arange(160, dtype=np.int)
)
GL.glBindVertexArray(0)
GL.glUseProgram(0)
def render(
self,
cls_indexes,
image_tensor,
seg_tensor,
point_info=None,
color_idx=None,
color_list=None,
normal_tensor=None,
line_info=None,
pc1_tensor=None,
pc2_tensor=None,
cpu=False,
only_rgb=False,
white_bg=True,
draw_grid=False,
polygon_mode=0,
coordinate_axis=0,
draw_normal=False,
point_capture_toggle=False,
):
frame = 0
if white_bg:
GL.glClearColor(1.0, 1.0, 1.0, 1.0)
else:
GL.glClearColor(0, 0, 0, 1)
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
GL.glEnable(GL.GL_DEPTH_TEST)
# print(len(cls_indexes), len(self.pointVAOs), len(point_info))
if draw_grid:
if not hasattr(self, "grid"):
self.grid = self.generate_grid()
self.render_grid()
if line_info is not None or len(self.lineVAOs) > 0:
self.lineVAOs = self.generate_lines(line_info, lineVAOs=self.lineVAOs)
if point_info is not None or len(self.pointVAOs) > 0:
self.pointVAOs = self.generate_points(point_info, pointVAOs=self.pointVAOs)
if coordinate_axis > 0:
self.coordVAOs = self.generate_coordinate_axis(coordVAOs=self.coordVAOs)
if polygon_mode == 0:
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_FILL)
if polygon_mode == 1:
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_LINE)
if polygon_mode == 2:
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_POINT)
size = 0
color_cnt = 0
for render_cnt in range(2):
if point_capture_toggle and point_info is not None:
break
for i in range(len(cls_indexes)):
index = cls_indexes[i]
is_materialed = self.is_materialed[index]
if is_materialed:
num = len(self.materials[index])
for idx in range(num):
is_texture = self.is_textured[index][idx] # index
if is_texture:
shader = (
self.shaderProgram_textureMat
) # self.shaderProgram_textureMat
elif self.textures[index][idx] == "color":
shader = self.shaderProgram_textureless
else:
shader = self.shaderProgram_material
GL.glUseProgram(shader)
if draw_normal and render_cnt == 1:
shader = self.shaderProgram_textureMatNormal
GL.glUseProgram(shader)
GL.glUniform3f(
GL.glGetUniformLocation(
self.shaderProgram_textureMatNormal, "normal_color"
),
*[0.0, 1.0, 0.0]
)
GL.glUniform1f(
GL.glGetUniformLocation(
self.shaderProgram_textureMatNormal,
"normal_magnitude",
),
0.03,
)
GL.glUniform1f(
GL.glGetUniformLocation(
self.shaderProgram_textureMatNormal, "face_normal"
),
1.0,
)
GL.glUniformMatrix4fv(
GL.glGetUniformLocation(shader, "V"), 1, GL.GL_TRUE, self.V
)
GL.glUniformMatrix4fv(
GL.glGetUniformLocation(shader, "P"), 1, GL.GL_FALSE, self.P
)
GL.glUniformMatrix4fv(
GL.glGetUniformLocation(shader, "pose_trans"),
1,
GL.GL_FALSE,
self.poses_trans[i],
)
GL.glUniformMatrix4fv(
GL.glGetUniformLocation(shader, "pose_rot"),
1,
GL.GL_TRUE,
self.poses_rot[i],
)
GL.glUniform3f(
GL.glGetUniformLocation(shader, "light_position"),
*self.lightpos
)
GL.glUniform3f(
GL.glGetUniformLocation(shader, "instance_color"),
*self.colors[index]
)
GL.glUniform3f(
GL.glGetUniformLocation(shader, "light_color"),
*self.lightcolor
)
if color_idx is None or i not in color_idx:
GL.glUniform3f(
GL.glGetUniformLocation(shader, "mat_diffuse"),
*self.materials[index][idx][:3]
)
GL.glUniform3f(
GL.glGetUniformLocation(shader, "mat_specular"),
*self.materials[index][idx][3:6]
)
GL.glUniform3f(
GL.glGetUniformLocation(shader, "mat_ambient"),
*self.materials[index][idx][6:9]
)
GL.glUniform1f(
GL.glGetUniformLocation(shader, "mat_shininess"),
self.materials[index][idx][-1],
)
else:
GL.glUniform3f(
GL.glGetUniformLocation(shader, "mat_diffuse"),
*color_list[color_cnt]
)
GL.glUniform3f(
GL.glGetUniformLocation(shader, "mat_specular"),
*color_list[color_cnt]
)
GL.glUniform3f(
GL.glGetUniformLocation(shader, "mat_ambient"),
*color_list[color_cnt]
)
GL.glUniform1f(
GL.glGetUniformLocation(shader, "mat_shininess"), 100
)
color_cnt += 1
try:
if is_texture:
GL.glActiveTexture(GL.GL_TEXTURE0)
GL.glBindTexture(
GL.GL_TEXTURE_2D, self.textures[index][idx]
) # self.instances[index]
# GL.glUniform1i(self.texUnitUniform_textureMat, 0)
GL.glBindVertexArray(
self.VAOs[self.instances[index] + idx]
) #
GL.glDrawElements(
GL.GL_TRIANGLES,
self.faces[self.instances[index] + idx].size,
GL.GL_UNSIGNED_INT,
self.faces[self.instances[index] + idx],
)
finally:
GL.glBindVertexArray(0)
GL.glUseProgram(0)
if not draw_normal:
break
GL.glDisable(GL.GL_DEPTH_TEST)
# mapping
if not cpu:
self.r.map_tensor(
int(self.color_tex),
int(self.width),
int(self.height),
image_tensor.data_ptr(),
)
self.r.map_tensor(
int(self.color_tex_3),
int(self.width),
int(self.height),
seg_tensor.data_ptr(),
)
if normal_tensor is not None:
self.r.map_tensor(
int(self.color_tex_2),
int(self.width),
int(self.height),
normal_tensor.data_ptr(),
)
if pc1_tensor is not None:
self.r.map_tensor(
int(self.color_tex_4),
int(self.width),
int(self.height),
pc1_tensor.data_ptr(),
)
if pc2_tensor is not None:
self.r.map_tensor(
int(self.color_tex_5),
int(self.width),
int(self.height),
pc2_tensor.data_ptr(),
)
else:
GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT0)
frame = GL.glReadPixels(
0, 0, self.width, self.height, GL.GL_BGRA, GL.GL_FLOAT
)
frame = frame.reshape(self.height, self.width, 4)[::-1, :]
if only_rgb:
return [frame]
GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT1)
normal = GL.glReadPixels(
0, 0, self.width, self.height, GL.GL_BGRA, GL.GL_FLOAT
)
normal = normal.reshape(self.height, self.width, 4)[::-1, :]
GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT2)
seg = GL.glReadPixels(
0, 0, self.width, self.height, GL.GL_BGRA, GL.GL_FLOAT
)
seg = seg.reshape(self.height, self.width, 4)[::-1, :]
# points in camera coordinate
GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT4)
pc3 = GL.glReadPixels(
0, 0, self.width, self.height, GL.GL_RGBA, GL.GL_FLOAT
)
pc3 = pc3.reshape(self.height, self.width, 4)[::-1, :]
pc3 = pc3[:, :, :4] # 3
return [frame, seg, normal, pc3]
def get_num_objects(self):
return len(self.objects)
def set_poses(self, poses):
self.poses_rot = [np.ascontiguousarray(quat2rotmat(item[3:])) for item in poses]
self.poses_trans = [np.ascontiguousarray(xyz2mat(item[:3])) for item in poses]
def set_allocentric_poses(self, poses):
self.poses_rot = []
self.poses_trans = []
for pose in poses:
x, y, z = pose[:3]
quat_input = pose[3:]
dx = np.arctan2(x, -z)
dy = np.arctan2(y, -z)
quat = euler2quat(-dy, -dx, 0, axes="sxyz")
quat = qmult(quat, quat_input)
self.poses_rot.append(np.ascontiguousarray(quat2rotmat(quat)))
self.poses_trans.append(np.ascontiguousarray(xyz2mat(pose[:3])))
def transform_vector(self, vec):
vec = np.array(vec)
zeros = np.zeros_like(vec)
vec_t = self.transform_point(vec)
zero_t = self.transform_point(zeros)
v = vec_t - zero_t
return v
def transform_point(self, vec):
vec = np.array(vec)
if vec.shape[0] == 3:
v = self.V.dot(np.concatenate([vec, np.array([1])]))
return v[:3] / v[-1]
elif vec.shape[0] == 4:
v = self.V.dot(vec)
return v / v[-1]
else:
return None
def transform_pose(self, pose):
pose_rot = quat2rotmat(pose[3:])
pose_trans = xyz2mat(pose[:3])
pose_cam = self.V.dot(pose_trans.T).dot(pose_rot).T
return np.concatenate([mat2xyz(pose_cam), safemat2quat(pose_cam[:3, :3].T)])
def get_poses(self):
mat = [
self.V.dot(self.poses_trans[i].T).dot(self.poses_rot[i]).T
for i in range(len(self.poses_rot))
]
poses = [
np.concatenate([mat2xyz(item), safemat2quat(item[:3, :3].T)])
for item in mat
]
return poses
def get_world_poses(self):
mat = [
self.poses_trans[i].T.dot(self.poses_rot[i].T)
for i in range(len(self.poses_rot))
]
return mat
def get_egocentric_poses(self):
return self.get_poses()
def get_allocentric_poses(self):
poses = self.get_poses()
poses_allocentric = []
for pose in poses:
dx = np.arctan2(pose[0], -pose[2])
dy = np.arctan2(pose[1], -pose[2])
quat = euler2quat(-dy, -dx, 0, axes="sxyz")
quat = qmult(qinverse(quat), pose[3:])
poses_allocentric.append(np.concatenate([pose[:3], quat]))
return poses_allocentric
def release(self):
print(self.glstring)
self.clean()
self.r.release()
def clean(self):
GL.glDeleteTextures(
[
self.color_tex,
self.color_tex_2,
self.color_tex_3,
self.color_tex_4,
self.depth_tex,
]
)
self.color_tex = None
self.color_tex_2 = None
self.color_tex_3 = None
self.color_tex_4 = None
self.depth_tex = None
GL.glDeleteFramebuffers(1, [self.fbo])
self.fbo = None
GL.glDeleteBuffers(len(self.VAOs), self.VAOs)
self.VAOs = []
GL.glDeleteBuffers(len(self.VBOs), self.VBOs)
self.VBOs = []
self.clean_line_point()
def flatten(container):
for i in container:
if isinstance(i, (list,tuple)):
for j in flatten(i):
yield j
else:
yield i
textures_ = list(flatten(self.textures))
if len(textures_) > 0:
GL.glDeleteTextures(textures_)
self.textures = []
self.objects = []
self.faces = []
self.poses_trans = []
self.poses_rot = []
self.colors = []
def clean_line_point(self):
if self.lineVAOs is not None and len(self.lineVAOs) > 0:
GL.glDeleteBuffers(len(self.lineVAOs), self.lineVAOs)
self.lineVAOs = []
if self.pointVAOs is not None and len(self.pointVAOs) > 0:
# print(len(self.pointVAOs), self.pointVAOs)
GL.glDeleteBuffers(len(self.pointVAOs), self.pointVAOs)
self.pointVAOs = []
def get_num_instances(self):
return len(self.instances)
def capture_point(self, frames):
point_mask = frames[1]
bg_mask = (point_mask[..., :3].sum(-1) != 3) * (
point_mask[..., :3].sum(-1) != 0
)
point_pos = frames[3][..., :3].reshape(-1, 3)[bg_mask.reshape(-1)].T
point_pos = self.V[:3, :3].T.dot(point_pos - self.V[:3, [3]])
point_color = frames[0][..., :3].reshape(-1, 3)[bg_mask.reshape(-1)] * 255
point_size = [1]
point_info = [[point_pos], [point_color[:, [2, 1, 0]]], point_size, True]
return point_info
def vis(
self,
poses,
cls_indexes,
color_idx=None,
color_list=None,
cam_pos=[0, 0, 2.0],
V=None,
distance=2.0,
shifted_pose=None,
interact=0,
visualize_context={},
window_name="test",
):
"""
a complicated visualization function
"""
theta = 0
q = 0
cam_x, cam_y, cam_z = cam_pos
sample = []
new_poses = []
# center view
if len(poses) > 0:
origin = np.linalg.inv(unpack_pose(poses[0]))
if shifted_pose is not None:
origin = np.linalg.inv(shifted_pose)
for pose in poses:
pose = unpack_pose(pose)
pose = origin.dot(pose)
new_poses.append(pack_pose(pose))
poses = new_poses
self.set_poses(poses)
cam_pos = np.array([cam_x, cam_y, cam_z])
self.set_camera(cam_pos, cam_pos * 2, [0, 1, 0])
if V is not None:
V = np.array(V)
self.V = V[...]
cam_pos = V[:3, 3]
self.set_light_pos(cam_pos)
mouse_events = {
"view_dir": -self.V[:3, 3],
"view_origin": np.array([0, 0, 0.0]), # anchor
"_mouse_ix": -1,
"_mouse_iy": -1,
"down": False,
"shift": False,
"trackball": Trackball(self.width, self.height, cam_pos=cam_pos),
}
def update_dir():
view_dir = mouse_events["view_origin"] - self.V[:3, 3]
self.set_camera(
self.V[:3, 3], self.V[:3, 3] - view_dir, [0, 1, 0]
) # would shift along the sphere
self.V[...] = self.V[...].dot(mouse_events["trackball"].property["model"].T)
if V is not None:
V[...] = self.V
def change_dir(
event, x, y, flags, param
): # fix later to be a finalized version
if event == cv2.EVENT_LBUTTONDOWN:
mouse_events["_mouse_ix"], mouse_events["_mouse_iy"] = x, y
mouse_events["down"] = True
if event == cv2.EVENT_MBUTTONDOWN:
mouse_events["_mouse_ix"], mouse_events["_mouse_iy"] = x, y
mouse_events["shift"] = True
if event == cv2.EVENT_LBUTTONDOWN and q == 57:
self.click_pix_loc = x, y
if event == cv2.EVENT_LBUTTONDOWN and q == 48:
self.place_click_pix_loc = x, y
if event == cv2.EVENT_MOUSEMOVE and (flags >= 8):
if mouse_events["down"] and flags < 15:
dx = (x - mouse_events["_mouse_ix"]) / -50.0
dy = (y - mouse_events["_mouse_iy"]) / -50.0
mouse_events["trackball"].on_mouse_drag(x, y, dx, dy)
update_dir()
if mouse_events["down"] and flags > 15:
dx = (x - mouse_events["_mouse_ix"]) / (-1000.0 / self.V[2, 3])
dy = (y - mouse_events["_mouse_iy"]) / (-1000.0 / self.V[2, 3])
self.V[:3, 3] += 0.5 * np.array([0, 0, dx + dy])
mouse_events["view_origin"] += 0.5 * np.array(
[0, 0, dx + dy]
) # change
update_dir()
if mouse_events["shift"]:
dx = (x - mouse_events["_mouse_ix"]) / (-8000.0 / self.V[2, 3])
dy = (y - mouse_events["_mouse_iy"]) / (-8000.0 / self.V[2, 3])
self.V[:3, 3] += 0.5 * np.array([dx, dy, 0])
mouse_events["view_origin"] += 0.5 * np.array(
[-dx, dy, 0]
) # change
update_dir()
if event == cv2.EVENT_LBUTTONUP:
mouse_events["down"] = False
if event == cv2.EVENT_MBUTTONUP:
mouse_events["shift"] = False
if interact > 0:
cv2.namedWindow(window_name)
cv2.setMouseCallback(window_name, change_dir)
# update_dir()
img = np.zeros([self.height, self.width, 3])
img_toggle = 0
polygon_toggle = 0
coordinate_axis_toggle = 0
normal_toggle = False
mouse_toggle = False
white_bg_toggle = "white_bg" in visualize_context
grid_toggle = "grid" in visualize_context
write_video_toggle = "write_video" in visualize_context
point_capture_toggle = False
video_writer = None
line_info, point_info = None, None
if "reset_line_point" in visualize_context:
self.clean_line_point()
if "line" in visualize_context and visualize_context["line"] is not None:
if "line_color" not in visualize_context:
visualize_context["line_color"] = [
[255, 0, 0] for _ in range(len(visualize_context["line"]))
]
if "thickness" not in visualize_context:
visualize_context["thickness"] = [
2 for _ in range(len(visualize_context["line"]))
]
line_info = [
visualize_context["line"],
visualize_context["line_color"],
visualize_context["thickness"],
True,
]
self.lineVAOs = self.generate_lines(line_info, lineVAOs=self.lineVAOs)
line_info[-1] = False
if "project_point" in visualize_context:
if "project_color" not in visualize_context:
visualize_context["project_color"] = [
[255, 0, 0] for _ in range(len(visualize_context["project_point"]))
]
if "point_size" not in visualize_context:
visualize_context["point_size"] = [
2 for _ in range(len(visualize_context["project_point"]))
]
point_info = [
visualize_context["project_point"],
visualize_context["project_color"],
visualize_context["point_size"],
True,
]
self.pointVAOs = self.generate_points(point_info, pointVAOs=self.pointVAOs)
point_info[-1] = False
while True:
new_cam_pos = -self.V[:3, :3].T.dot(self.V[:3, 3])
q = cv2.waitKey(1)
if interact > 0:
if q == 9:
img_toggle = (img_toggle + 1) % 4
elif q == 96:
polygon_toggle = (polygon_toggle + 1) % 3
elif q == 32:
coordinate_axis_toggle = (coordinate_axis_toggle + 1) % 2
elif q == ord("s"):
interact = 2
elif q == ord("u"):
interact = 1
elif q == ord("q"):
mouse_events["trackball"].theta_delta(5)
update_dir()
elif q == ord("w"):
white_bg_toggle = not white_bg_toggle
elif q == ord("v"):
write_video_toggle = not write_video_toggle
elif q == ord("g"):
grid_toggle = not grid_toggle
elif q == ord("1"):
normal_toggle = not normal_toggle
elif q == ord("2"):
point_capture_toggle = not point_capture_toggle
self.pointVAOs = []
point_info = None
elif q == ord("3"):
point_capture_toggle = not point_capture_toggle
point_info = None
elif q == ord("e"):
mouse_events["trackball"].theta_delta(-5)
update_dir()
elif q == ord("a"):
mouse_events["trackball"].phi_delta(5)
update_dir()
elif q == ord("d"):
mouse_events["trackball"].phi_delta(-5)
update_dir()
elif q == ord("z"):
self.V[:3, 3] += 0.02 * (
self.V[:3, 3] - mouse_events["view_origin"]
)
update_dir()
elif q == ord("c"): # move closer
self.V[:3, 3] -= 0.02 * (
self.V[:3, 3] - mouse_events["view_origin"]
)
update_dir()
elif q == ord("x"): # reset
self.set_camera(cam_pos, cam_pos * 2, [0, 1, 0])
mouse_events = {
"view_dir": -self.V[:3, 3],
"view_origin": | np.array([0, 0, 0.0]) | numpy.array |
"""
Copyright 2020, the e-prop team
Full paper: A solution to the learning dilemma for recurrent networks of spiking neurons
Authors: <NAME>*, <NAME>*, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
Training LSNN model to solve framewise phone classification of TIMIT dataset
CUDA_VISIBLE_DEVICES=0 python3 -u solve_timit_with_framewise_lsnn.py
"""
import tensorflow as tf
import numpy as np
import numpy.random as rd
from alif_eligibility_propagation import CustomALIF, exp_convolve
from toolbox.matplotlib_extension import raster_plot, strip_right_top_axis
from toolbox.file_saver_dumper_no_h5py import NumpyAwareEncoder
from tools import TimitDataset, einsum_bij_jk_to_bik, pad_vector
import time
import os
import errno
import json
import datetime
def flag_to_dict(FLAG):
if float(tf.__version__[2:]) >= 5:
flag_dict = FLAG.flag_values_dict()
else:
flag_dict = FLAG.__flags
return flag_dict
script_name = os.path.basename(__file__)[:-3]
time_stamp = datetime.datetime.now().strftime("%Y_%m_%d__%H_%M__%S_%f")
try:
os.makedirs('results')
except OSError as e:
if e.errno != errno.EEXIST:
raise
FLAGS = tf.app.flags.FLAGS
# Accessible parameter from the shell
tf.app.flags.DEFINE_string('comment', '', 'comment attached to output filenames')
tf.app.flags.DEFINE_string('run_id', '', 'comment attached to output filenames')
tf.app.flags.DEFINE_string('checkpoint', '', 'optionally load the pre-trained weights from checkpoint')
tf.app.flags.DEFINE_string('preproc', 'htk', 'Input preprocessing: fbank, mfccs, cochspec, cochspike, htk')
tf.app.flags.DEFINE_string('eprop', None, 'options: [None, symmetric, adaptive, random], None means use BPTT')
tf.app.flags.DEFINE_bool('adam', True, 'use Adam optimizer')
tf.app.flags.DEFINE_bool('plot', False, 'Interactive plot during training (useful for debugging)')
tf.app.flags.DEFINE_bool('reduced_phns', False, 'Use reduced phone set')
tf.app.flags.DEFINE_bool('psp_out', True, 'Use accumulated PSP instead of raw spikes of model as output')
tf.app.flags.DEFINE_bool('verbose', True, '')
tf.app.flags.DEFINE_bool('ramping_learning_rate', True, 'Ramp up the learning rate from 0 to lr_init in first epoch')
tf.app.flags.DEFINE_bool('BAglobal', False, 'Enable broadcast alignment with uniform weights to all neurons')
tf.app.flags.DEFINE_bool('cell_train', True, 'Train the RNN cell')
tf.app.flags.DEFINE_bool('readout_bias', True, 'Use bias variable in readout')
tf.app.flags.DEFINE_bool('rec', True, 'Use recurrent weights. Used to provide a baseline.')
tf.app.flags.DEFINE_string('dataset', '../datasets/timit_processed', 'Path to dataset to use')
tf.app.flags.DEFINE_float('readout_decay', 1e-2, 'Decay readout [and broadcast] weights')
tf.app.flags.DEFINE_bool('loss_from_all_layers', True, 'For multi-layer setup, make readout from all layers.')
#
tf.app.flags.DEFINE_integer('seed', -1, 'seed number')
tf.app.flags.DEFINE_integer('n_epochs', 80, 'number of iteration ')
tf.app.flags.DEFINE_integer('n_layer', 1, 'number of layers')
tf.app.flags.DEFINE_integer('n_regular', 300, 'number of regular spiking units in the recurrent layer.')
tf.app.flags.DEFINE_integer('n_adaptive', 100, 'number of adaptive spiking units in the recurrent layer')
tf.app.flags.DEFINE_integer('print_every', 100, 'print every and store accuracy')
tf.app.flags.DEFINE_integer('lr_decay_every', -1, 'Decay every')
tf.app.flags.DEFINE_integer('batch', 32, 'mini_batch size')
tf.app.flags.DEFINE_integer('test_batch', 32, 'mini_batch size')
tf.app.flags.DEFINE_integer('n_ref', 2, 'Number of refractory steps')
tf.app.flags.DEFINE_integer('n_repeat', 5, 'repeat each input time step for this many simulation steps (ms)')
tf.app.flags.DEFINE_integer('reg_rate', 10, 'target rate for regularization')
tf.app.flags.DEFINE_integer('truncT', -1, 'truncate time to this many input steps (truncT * n_repeat ms)')
#
tf.app.flags.DEFINE_float('dt', 1., 'Membrane time constant of output readouts')
tf.app.flags.DEFINE_float('tau_a', 200, 'Adaptation time constant')
tf.app.flags.DEFINE_bool('tau_a_spread', False, 'Spread time constants uniformly from 0 to tau_a')
tf.app.flags.DEFINE_float('tau_v', 20, 'Membrane time constant of recurrent neurons')
tf.app.flags.DEFINE_bool('tau_v_spread', False, 'Spread time constants uniformly from 0 to tau_v')
tf.app.flags.DEFINE_float('beta', 1.8, 'Scaling constant of the adaptive threshold')
tf.app.flags.DEFINE_float('clip', 0., 'Proportion of connected synpases at initialization')
tf.app.flags.DEFINE_float('l2', 1e-5, '')
tf.app.flags.DEFINE_float('lr_decay', .3, '')
tf.app.flags.DEFINE_float('lr_init', 0.01, '')
tf.app.flags.DEFINE_float('adam_epsilon', 1e-5, '')
tf.app.flags.DEFINE_float('momentum', 0.9, '')
tf.app.flags.DEFINE_float('gd_noise', 0.06 ** 2 * 10, 'Used only when noise_step_start > 0')
tf.app.flags.DEFINE_float('noise_step_start', -1, 'was 1000')
tf.app.flags.DEFINE_float('thr', 0.01, 'Baseline threshold voltage')
tf.app.flags.DEFINE_float('proportion_excitatory', 0.75, 'proportion of excitatory neurons')
tf.app.flags.DEFINE_float('l1', 1e-2, 'l1 regularization that goes with rewiring (irrelevant without rewiring)')
tf.app.flags.DEFINE_float('rewiring_temperature', 0., 'regularization coefficient')
tf.app.flags.DEFINE_float('dampening_factor', 0.3, 'Parameter necessary to approximate the spike derivative')
tf.app.flags.DEFINE_float('tau_out', 3, 'Mikolov: tau for PSP decay at output')
tf.app.flags.DEFINE_float('reg', 50, 'regularization coefficient')
tf.app.flags.DEFINE_float('drop_out_probability', -1., '')
tf.app.flags.DEFINE_integer('cuda_device', -1, '')
if FLAGS.plot:
import matplotlib.pyplot as plt
#
key0 = list(dir(FLAGS))[0]
getattr(FLAGS, key0)
if FLAGS.cuda_device >= 0:
os.environ["CUDA_VISIBLE_DEVICES"] = str(FLAGS.cuda_device)
filename = time_stamp + '_' + FLAGS.comment + '_' + FLAGS.run_id
storage_path = os.path.join('results', script_name, filename)
print("STORING EVERYTHING TO: ", storage_path)
try:
os.makedirs(storage_path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
if FLAGS.n_repeat < 1:
FLAGS.n_repeat = 1
flagdict = flag_to_dict(FLAGS)
assert isinstance(flagdict, dict)
# After processing the data, this object loads it and prepare it.
dataset = TimitDataset(FLAGS.batch, data_path=FLAGS.dataset, preproc=FLAGS.preproc,
use_reduced_phonem_set=FLAGS.reduced_phns)
n_in = dataset.n_features
# Placeholders loaded from data
features = tf.placeholder(shape=(None, None, dataset.n_features), dtype=tf.float32, name='Features')
audio = tf.placeholder(shape=(None, None), dtype=tf.float32, name='Audio')
phns = tf.placeholder(shape=(None, None), dtype=tf.int64, name='Labels')
seq_len = tf.placeholder(dtype=tf.int32, shape=[None], name="SeqLen")
keep_prob = tf.placeholder(dtype=tf.float32, shape=(), name="KeepProb")
weighted_relevant_mask = tf.placeholder(shape=(None, None), dtype=tf.float32, name="RelevanceMask")
batch_size = tf.Variable(0, dtype=tf.int32, trainable=False, name="BatchSize")
# Non-trainable variables that are used to implement a decaying learning rate and count the iterations
lr = tf.Variable(FLAGS.lr_init, dtype=tf.float32, trainable=False, name="LearningRate")
global_step = tf.Variable(0, dtype=tf.int32, trainable=False, name="GlobalStep")
lr_update = tf.assign(lr, lr * FLAGS.lr_decay)
gd_noise = tf.Variable(0, dtype=tf.float32, trainable=False, name="GDNoise")
# Op to ramping learning rate
n_iteration_per_epoch = 100
ramping_learning_rate_values = tf.linspace(0., 1., num=n_iteration_per_epoch)
clipped_global_step = tf.minimum(global_step, n_iteration_per_epoch - 1)
ramping_learning_rate_op = tf.assign(lr, FLAGS.lr_init * ramping_learning_rate_values[clipped_global_step])
# Frequencies
regularization_f0 = FLAGS.reg_rate / 1000
def batch_to_feed_dict(batch, is_train):
'''
Create the dictionnary that is fed into the Session.run(..) calls.
:param batch:
:return:
'''
features_np, phns_np, seq_len_np, wav_np = batch
n_time = max([len(i) for i in wav_np])
wav_np = np.stack([pad_vector(w, n_time) for w in wav_np], axis=0)
# print("input max ", np.max(features_np))
n_batch, n_time, n_features = features_np.shape
relevance_mask_np = [(np.arange(n_time) < seq_len_np[i]) / seq_len_np[i] for i in range(n_batch)]
relevance_mask_np = np.array(relevance_mask_np)
if FLAGS.n_repeat > 1:
# Extend sequences with the repeat in time
features_np = np.repeat(features_np, FLAGS.n_repeat, axis=1)
seq_len_np *= FLAGS.n_repeat
if FLAGS.truncT > 0 and is_train:
in_steps_len = phns_np.shape[1]
if in_steps_len <= FLAGS.truncT:
print("truncT (", FLAGS.truncT, ") too long! setting to smaller size found = ", in_steps_len - 1)
FLAGS.truncT = in_steps_len - 1
max_step_offset = in_steps_len - FLAGS.truncT
rnd_step_offset = rd.randint(low=0, high=max_step_offset)
features_np = features_np[:, rnd_step_offset * FLAGS.n_repeat:(rnd_step_offset + FLAGS.truncT) * FLAGS.n_repeat,
:]
phns_np = phns_np[:, rnd_step_offset:rnd_step_offset + FLAGS.truncT]
seq_len_np = np.array(seq_len_np)
seq_len_np[seq_len_np > FLAGS.truncT] = FLAGS.truncT
relevance_mask_np = relevance_mask_np[:, rnd_step_offset:rnd_step_offset + FLAGS.truncT]
n_batch, n_time, n_features = features_np.shape
phns_labels = phns_np
return {features: features_np, phns: phns_labels, seq_len: seq_len_np, weighted_relevant_mask: relevance_mask_np,
batch_size: n_batch, keep_prob: FLAGS.drop_out_probability if is_train else 1., audio: wav_np}
if FLAGS.tau_a_spread:
taua = rd.choice([1, 0.5], size=FLAGS.n_regular + FLAGS.n_adaptive) * FLAGS.tau_a
else:
taua = FLAGS.tau_a
if FLAGS.tau_v_spread:
tauv = rd.choice([1, 0.5], size=FLAGS.n_regular + FLAGS.n_adaptive) * FLAGS.tau_v
else:
tauv = FLAGS.tau_v
flagdict['tauas'] = taua.tolist() if type(taua) is not float else taua
flagdict['tauvs'] = tauv.tolist() if type(tauv) is not float else tauv
with open(os.path.join(storage_path, 'flags.json'), 'w') as f:
json.dump(flagdict, f, indent=2)
def get_cell(tag, n_input=n_in):
# converting thr and beta parameters
thr_new = FLAGS.thr / (1 - np.exp(-FLAGS.dt / tauv)) if np.isscalar(tauv) else \
[FLAGS.thr / (1 - np.exp(-FLAGS.dt / tv)) for tv in tauv]
if | np.isscalar(tauv) | numpy.isscalar |
import librosa
import numpy as np
from utils import feature_extractor as utils
class EMG:
def __init__(self, audio, config):
self.audio = audio
self.dependencies = config["emg"]["dependencies"]
self.frame_size = int(config["frame_size"])
self.sampling_rate = int(config["sampling_rate"])
self.number_of_bins = int(config["emg"]["number_of_bins"])
self.is_raw_data = config["is_raw_data"]
self.time_lag = int(config["emg"]["time_lag"])
self.embedded_dimension = int(config["emg"]["embedded_dimension"])
self.boundary_frequencies = list(config["emg"]["boundary_frequencies"])
self.hfd_parameter = int(config["emg"]["hfd_parameter"])
self.r = int(config["emg"]["r"])
self.frames = int(np.ceil(len(self.audio.data) / self.frame_size))
def __enter__(self):
print ("Initializing emg calculation...")
def __exit__(self, exc_type, exc_val, exc_tb):
print ("Done with calculations...")
def get_current_frame(self, index):
return utils._get_frame_array(self.audio, index, self.frame_size)
def compute_hurst(self):
self.hurst = []
for k in range(0, self.frames):
current_frame = self.get_current_frame(k)
N = current_frame.size
T = np.arange(1, N + 1)
Y = np.cumsum(current_frame)
Ave_T = Y / T
S_T = np.zeros(N)
R_T = np.zeros(N)
for i in range(N):
S_T[i] = np.std(current_frame[:i + 1])
X_T = Y - T * Ave_T[i]
R_T[i] = np.ptp(X_T[:i + 1])
R_S = R_T / S_T
R_S = np.log(R_S)[1:]
n = np.log(T)[1:]
A = np.column_stack((n, np.ones(n.size)))
[m, c] = np.linalg.lstsq(A, R_S)[0]
self.hurst.append(m)
self.hurst = np.asarray(self.hurst)
def get_hurst(self):
return self.hurst
def compute_embed_seq(self):
self.embed_seq = []
for k in range(0, self.frames):
current_frame = self.get_current_frame(k)
shape = (current_frame.size - self.time_lag * (self.embedded_dimension - 1), self.embedded_dimension)
strides = (current_frame.itemsize, self.time_lag * current_frame.itemsize)
m = np.lib.stride_tricks.as_strided(current_frame, shape=shape, strides=strides)
self.embed_seq.append(m)
self.embed_seq = np.asarray(self.embed_seq)
def get_embed_seq(self):
return self.embed_seq
def compute_bin_power(self):
self.Power_Ratio = []
self.Power = []
for k in range(0, self.frames):
current_frame = self.get_current_frame(k)
C = np.fft.fft(current_frame)
C = abs(C)
Power = np.zeros(len(self.boundary_frequencies) - 1)
for Freq_Index in range(0, len(self.boundary_frequencies) - 1):
Freq = float(self.boundary_frequencies[Freq_Index])
Next_Freq = float(self.boundary_frequencies[Freq_Index + 1])
Power[Freq_Index] = sum(
C[int(np.floor(Freq / self.sampling_rate * len(current_frame))):
int(np.floor(Next_Freq / self.sampling_rate * len(current_frame)))])
self.Power.append(Power)
self.Power_Ratio.append(Power / sum(Power))
self.Power = np.asarray(self.Power)
self.Power_Ratio = np.asarray(self.Power_Ratio)
def get_bin_power(self):
return self.Power
def get_bin_power_ratio(self):
return self.Power_Ratio
def compute_pfd(self, D=None):
self.pfd = []
for k in range(0, self.frames):
current_frame = self.get_current_frame(k)
if D is None:
D = np.diff(current_frame)
D = D.tolist()
N_delta = 0 # number of sign changes in derivative of the signal
for i in range(1, len(D)):
if D[i] * D[i - 1] < 0:
N_delta += 1
n = len(current_frame)
m = np.log10(n) / (np.log10(n) + np.log10(n / n + 0.4 * N_delta))
self.pfd.append(m)
if self.is_raw_data:
self.pfd = np.asarray(self.pfd)
else:
self.pfd = np.asarray(self.pfd)[0]
def get_pfd(self):
return self.pfd
def compute_hfd(self):
self.hfd = []
for v in range(0, self.frames):
current_frame = self.get_current_frame(v)
L = []
x = []
N = len(current_frame)
for k in range(1, self.hfd_parameter):
Lk = []
for m in range(0, k):
Lmk = 0
for i in range(1, int(np.floor((N - m) / k))):
Lmk += abs(current_frame[m + i * k] - current_frame[m + i * k - k])
Lmk = Lmk * (N - 1) / np.floor((N - m) / float(k)) / k
Lk.append(Lmk)
L.append(np.log(np.mean(Lk)))
x.append([np.log(float(1) / k), 1])
(p, r1, r2, s) = np.linalg.lstsq(x, L)
self.hfd.append(p[0])
if self.is_raw_data:
self.hfd = np.asarray(self.hfd)
else:
self.hfd = np.asarray(self.hfd)[0]
def get_hfd(self):
return self.hfd
def compute_hjorth(self, D=None):
self.hjorth = []
for k in range(0, self.frames):
current_frame = self.get_current_frame(k)
if D is None:
D = np.diff(current_frame)
np.concatenate(([current_frame[0]], D))
D = np.array(D)
n = len(current_frame)
M2 = float(sum(D ** 2)) / n
TP = sum(np.array(current_frame) ** 2)
M4 = 0
for i in range(1, len(D)):
M4 += (D[i] - D[i - 1]) ** 2
M4 = M4 / n
m = np.sqrt(M2 / TP), np.sqrt(float(M4) * TP / M2 / M2)
self.hjorth.append(m)
if self.is_raw_data:
self.hjorth = np.asarray(self.hjorth)
else:
self.hjorth = np.asarray(self.hjorth)[0]
def get_hjorth(self):
return self.hjorth
def compute_spectral_entropy(self):
self.spectral_entropy = []
for k in range(0, self.frames):
Power, Power_Ratio = self.get_bin_power()[k], self.get_bin_power_ratio()[k]
Spectral_Entropy = 0
for i in range(0, len(Power_Ratio) - 1):
Spectral_Entropy += Power_Ratio[i] * np.log(Power_Ratio[i])
Spectral_Entropy /= np.log(len(Power_Ratio))
m = -1 * Spectral_Entropy
self.spectral_entropy.append(m)
self.spectral_entropy = np.asarray(self.spectral_entropy)
def get_spectral_entropy(self):
return self.spectral_entropy
def compute_svd_entropy(self, W=None):
self.svd_entropy = []
for k in range(0, self.frames):
if W is None:
Y = self.get_embed_seq()[k]
W = np.linalg.svd(Y, compute_uv=0)
W /= sum(W) # normalize singular values
m = -1 * sum(W * np.log(W))
self.svd_entropy.append(m)
if self.is_raw_data:
self.svd_entropy = np.asarray(self.svd_entropy)
else:
self.svd_entropy = np.asarray(self.svd_entropy)[0]
def get_svd_entropy(self):
return self.svd_entropy
def compute_fisher_info(self, W=None):
self.fisher_info = []
for k in range(0, self.frames):
if W is None:
Y = self.get_embed_seq()
W = np.linalg.svd(Y, compute_uv=0)
W /= sum(W) # normalize singular values
m = -1 * sum(W * np.log(W))
self.fisher_info.append(m)
if self.is_raw_data:
self.fisher_info = np.asarray(self.fisher_info)
else:
self.fisher_info = np.asarray(self.fisher_info)[0]
def get_fisher_info(self):
return self.fisher_info
def compute_ap_entropy(self):
self.ap_entropy = []
for k in range(0, self.frames):
current_frame = self.get_current_frame(k)
N = len(current_frame)
Em = self.get_embed_seq()[k]
A = np.tile(Em, (len(Em), 1, 1))
B = np.transpose(A, [1, 0, 2])
D = np.abs(A - B) # D[i,j,k] = |Em[i][k] - Em[j][k]|
InRange = np.max(D, axis=2) <= self.r
Cm = InRange.mean(axis=0)
Dp = np.abs(np.tile(current_frame[self.embedded_dimension:],
(N - self.embedded_dimension, 1)) - np.tile(current_frame[self.embedded_dimension:],
(N - self.embedded_dimension, 1)).T)
Cmp = np.logical_and(Dp <= self.r, InRange[:-1, :-1]).mean(axis=0)
Phi_m, Phi_mp = np.sum(np.log(Cm)), np.sum(np.log(Cmp))
m = (Phi_m - Phi_mp) / (N - self.embedded_dimension)
self.ap_entropy.append(m)
self.ap_entropy = np.asarray(self.ap_entropy)
def get_ap_entropy(self):
return self.ap_entropy
def compute_samp_entropy(self):
self.samp_entropy = []
for k in range(0, self.frames):
current_frame = self.get_current_frame(k)
N = len(current_frame)
Em = self.get_embed_seq()[k]
A = np.tile(Em, (len(Em), 1, 1))
B = np.transpose(A, [1, 0, 2])
D = np.abs(A - B) # D[i,j,k] = |Em[i][k] - Em[j][k]|
InRange = np.max(D, axis=2) <= self.r
np.fill_diagonal(InRange, 0) # Don't count self-matches
Cm = InRange.sum(axis=0) # Probability that random M-sequences are in range
Dp = np.abs(np.tile(current_frame[self.embedded_dimension:], (N - self.embedded_dimension, 1))
- np.tile(current_frame[self.embedded_dimension:], (N - self.embedded_dimension, 1)).T)
Cmp = np.logical_and(Dp <= self.r, InRange[:-1, :-1]).sum(axis=0)
# Uncomment below for old (miscounted) version
# InRange[np.triu_indices(len(InRange))] = 0
# InRange = InRange[:-1,:-2]
# Cm = InRange.sum(axis=0) # Probability that random M-sequences are in range
# Dp = np.abs(np.tile(X[M:], (N - M, 1)) - np.tile(X[M:], (N - M, 1)).T)
# Dp = Dp[:,:-1]
# Cmp = np.logical_and(Dp <= R, InRange).sum(axis=0)
# Avoid taking log(0)
Samp_En = np.log(np.sum(Cm + 1e-100) / np.sum(Cmp + 1e-100))
self.samp_entropy.append(Samp_En)
self.samp_entropy = np.asarray(self.samp_entropy)
def get_samp_entropy(self):
return self.samp_entropy
def compute_dfa(self, Ave=None, L=None):
self.dfa = []
for k in range(0, self.frames):
current_frame = self.get_current_frame(k)
if Ave is None:
Ave = np.mean(current_frame)
Y = np.cumsum(current_frame)
Y -= Ave
if L is None:
L = np.floor(len(current_frame) * 1 /
(2 ** np.array(list(range(4, int(np.log2(len(current_frame))) - 4)))))
F = np.zeros(len(L))
for i in range(0, len(L)):
n = int(L[i]) # for each box length L[i]
if n == 0:
print("time series is too short while the box length is too big")
print("abort")
exit()
for j in range(0, len(current_frame), n): # for each box
if j + n < len(current_frame):
c = list(range(j, j + n))
# coordinates of time in the box
c = np.vstack([c, np.ones(n)]).T
# the value of data in the box
y = Y[j:j + n]
# add residue in this box
F[i] += np.linalg.lstsq(c, y)[1]
F[i] /= ((len(current_frame) / n) * n)
F = np.sqrt(F)
Alpha = np.linalg.lstsq(np.vstack([np.log(L), np.ones(len(L))]).T, | np.log(F) | numpy.log |
# coding=utf-8
# Copyright 2019 The Authors of RL Reliability Metrics.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for online metrics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from rl_reliability_metrics.metrics import metrics_online
import unittest
class MetricsOnlineTest(parameterized.TestCase, unittest.TestCase):
@parameterized.parameters(
([0, 1], None, None, [1.41421356237, 2.12132034356]),
([0, 1], None, 1, [1.41421356237, 2.12132034356]),
([0, 1], 0.5, 0.5, [2.9954294688643497, 4.564952035367936]),
([0, 1], None, 'curve_range', [1.414213562 / 1.425, 2.121320343 / 1.425]),
)
def testCorrectStddevAcrossRuns(self, timepoints, lowpass_thresh, baseline,
expected):
curves = [
np.array([[-1, 0, 1], [1., 1., 1.]]),
np.array([[-1, 0, 1, 2], [2., 3., 4., 5.]])
]
metric = metrics_online.StddevAcrossRuns(
lowpass_thresh=lowpass_thresh,
eval_points=timepoints,
baseline=baseline)
result = metric(curves)
np.testing.assert_allclose(result, expected)
@parameterized.parameters(
([0, 1], None, None, [1, 1.5]),
([0, 1], None, 2, [0.5, 0.75]),
)
def testCorrectIqrAcrossRuns(self, timepoints, lowpass_thresh, baseline,
expected):
curves = [
np.array([[-1, 0, 1], [1., 1., 1.]]),
np.array([[-1, 0, 1, 2], [2., 3., 4., 5.]])
]
metric = metrics_online.IqrAcrossRuns(
lowpass_thresh=lowpass_thresh,
eval_points=timepoints,
baseline=baseline)
result = metric(curves)
| np.testing.assert_allclose(result, expected) | numpy.testing.assert_allclose |
import os
import numpy as np
import pandas as pd
"""
This function is used to import the data. Put the data in a folder named all_data in the directory of the code
"""
def import_data(dt_name):
"""
:param dt_name: Name of the Dataset
:return: Three pandas frames which correspond to training, testing and validation data
"""
# First we get the directory of our project and then take all the three files and import them to the respective
# names.
d = os.getcwd()
test_data = pd.read_csv(os.path.join(os.path.join(d, "all_data"), "test_{0}.csv".format(dt_name)), header=None)
train_data = pd.read_csv(os.path.join(os.path.join(d, "all_data"), "train_{0}.csv".format(dt_name)), header=None)
validation_data = pd.read_csv(os.path.join(os.path.join(d, "all_data"), "valid_{0}.csv".format(dt_name)),
header=None)
# Now we will return the data frames
return [test_data, train_data, validation_data]
"""
This function is defined to get the labels/classes and attribute values in different variables
"""
def get_attributes_and_labels(data):
"""
:param data: The dataset to be divided
:return: Two panda frames which are in order of classes and attributes
"""
# Here we divide our attributes and classes features for a given dataset
return [data.iloc[:, -1], data.iloc[:, :-1]]
"""
This function is used to find the entropy which is our impurity heuristic for this algorithm
"""
def get_entropy(data):
"""
:param data: THese are the values for which we want to find the entropy of. We pass a whole vector of values which
correspond to the attribute of importance and find entropy for that vector.
:return: Entropy for the given vector
"""
entropy_value = 0
temp, unique_count = np.unique(data, return_counts=True)
# We will use the formula mentioned in the slides to calculate the value of entropy for both the options (i.e,
# 1 and 0)
sum_of_counts = np.sum(unique_count)
for count in unique_count:
entropy_value = entropy_value - ((count / sum_of_counts) * np.log2(count / sum_of_counts))
return entropy_value
"""
This function is used to find the information gain for the given sub-tree/tree. The information gain is used to find the
attribute we will use to do further branching
"""
def Information_Gain_Heuristic(examples, attributes, target_attribute):
"""
:param examples: The data for whihc we want to find the information gain
:param attributes: the values of the attributes available (the column number)
:param target_attribute: the target attribute we are trying to find
:return: Information Gain of the given sub-tree.
"""
# Here we find the entropy for the root node
previous_entropy = get_entropy(target_attribute)
Information_Gain = []
for each_attribute in attributes:
unique_value_of_attribute, counts_of_attribute = np.unique(examples[each_attribute], return_counts=True)
try:
if counts_of_attribute[0] < counts_of_attribute[1]:
counts_of_attribute = np.flipud(counts_of_attribute)
unique_value_of_attribute = np.flipud(unique_value_of_attribute)
except IndexError:
i = 0
# Since I have hardcoded the array_after_division arrays we will try to the first values for 0.
if unique_value_of_attribute[0] == 1:
counts_of_attribute = np.flipud(counts_of_attribute)
unique_value_of_attribute = np.flipud(unique_value_of_attribute)
array_after_division_1 = []
array_after_division_0 = []
# This loop is for 0 and 1
# I need to find the number of 1's and 0's in target value when the given attribute value is something
# particular
total_data = pd.concat([examples, target_attribute], axis=1, sort=False)
# Here I concatenated the data frames so that only one df is used to both lookup the value and find the value
# to append
row_names = total_data.index.values
list_of_row_names = list(row_names)
for each in list_of_row_names:
value_to_append = int(total_data.iloc[:, -1][each])
if examples[each_attribute][each] == 1:
array_after_division_1.append(value_to_append)
else:
array_after_division_0.append(value_to_append)
# Here I will use try catch since if the target_attribute have only one unique value then it will give an
# error if we try to use the second index (i.e. 2). and if we have only one unique value then our imputrity
# is 0 and thus entropy is 0
try:
value_of_new_inpurity = (counts_of_attribute[0] / np.size(examples[each_attribute])) * get_entropy(
array_after_division_0) + (counts_of_attribute[1] / np.size(examples[each_attribute])) * get_entropy(
array_after_division_1)
except IndexError:
value_of_new_inpurity = 0
temp = previous_entropy - value_of_new_inpurity
Information_Gain.append(temp)
return Information_Gain
"""
This function is the main function for our algorithm. The decision_tree function is used recursively to create new nodes
and make the tree while doing the training.
"""
def decision_tree_construction(examples, target_attribute, attributes, depth):
"""
:param examples: The data we will use to train the tree(x)
:param target_attribute: The label we want to classify(y)
:param attributes: The number(index) of the labels/attributes of the data-set
:return: The tree corresponding to the given data
"""
# This is the first base condition of the algorithm. It is used if the attributes variable is empty, then we return
# the single-node tree Root, with label = most common value of target_attribute in examples
# The base condition for the recursion when we check if all the variables are same or not in the node and if they
# are same then we return that value as the node
if len(attributes) == 0 or len(np.unique(target_attribute)) == 1:
unique_value_of_attribute, counts_of_attribute = np.unique(target_attribute, return_counts=True)
try:
if counts_of_attribute[0] < counts_of_attribute[1]:
unique_value_of_attribute = np.flipud(unique_value_of_attribute)
except IndexError:
i = 0
if unique_value_of_attribute[0] == 1:
# More positive values
return 1, depth
elif unique_value_of_attribute[0] == 0:
# More negative values
return 0, depth
# This is the recursion part of the algorithm in which we try to find the sub-tree's by using recursion and
# information gain
else:
Information_Gain = Information_Gain_Heuristic(examples, attributes, target_attribute)
best_attribute_number = attributes[np.argmax(Information_Gain)]
# Since we now have the best_attribute(A in algorithm) we will create the root node of the tree/sub-tree with
# that and name the root as the best attribute among all Here we make the tree as a dictionary for testing
# purposes
tree = dict([(best_attribute_number, dict())])
if isinstance(tree, int):
# If the given value is a int value then it's definitely a leaf node and if it's a dictionary then its a
# node
tree[best_attribute_number]["type_of_node"] = "leaf"
tree[best_attribute_number]["depth"] = depth
unique_value_of_attribute, counts_of_attribute = np.unique(target_attribute, return_counts=True)
try:
if counts_of_attribute[0] < counts_of_attribute[1]:
counts_of_attribute = np.flipud(counts_of_attribute)
unique_value_of_attribute = np.flipud(unique_value_of_attribute)
except IndexError:
# Here we can have an index error since in some case it may happen that the array has only one type
# of value and thus accessing the index [1] is not possible
i = 0
tree[best_attribute_number]["majority_target_attribute"] = unique_value_of_attribute[0]
tree[best_attribute_number]["best_attribute_number"] = best_attribute_number
else:
tree[best_attribute_number]["type_of_node"] = "node"
tree[best_attribute_number]["depth"] = depth
unique_value_of_attribute, counts_of_attribute = np.unique(target_attribute, return_counts=True)
try:
if counts_of_attribute[0] < counts_of_attribute[1]:
counts_of_attribute = np.flipud(counts_of_attribute)
unique_value_of_attribute = np.flipud(unique_value_of_attribute)
except IndexError:
# Here we can have an index error since in some case it may happen that the array has only one type
# of value and thus accessing the index [1] is not possible
i = 0
tree[best_attribute_number]["majority_target_attribute"] = unique_value_of_attribute[0]
tree[best_attribute_number]["best_attribute_number"] = best_attribute_number
attributes.remove(best_attribute_number)
# Now we do the recursive algorithm which will be used to create the tree after the root node.
depth_of_node = []
for each_unique_value in np.unique(examples[best_attribute_number]):
# We use those values for which the examples[best_attribute_number] == each_unique_value
class1 = each_unique_value
new_target_attribute = pd.DataFrame(target_attribute)
total_data = pd.concat([examples, new_target_attribute], axis=1, sort=False)
# WE do this step so that we can pick the values which belong to the best_attribute = [0,1], i.e. We now
# want to divide our data so that the values for the best_attribute is divided among the branches. And
# thus we will have 4 arrays now, two for the data and two for target attribute.
new_data_after_partition = total_data.loc[total_data[best_attribute_number] == class1]
new_target_attribute, new_examples_after_partition = get_attributes_and_labels(new_data_after_partition)
# This is also a condition for our algorithm in which we check if the number of examples after the
# partition are positive or not. If the values are less than 1 then we return the most frequent value in
# the node
if len(new_examples_after_partition) == 0:
unique_value_of_attribute, counts_of_attribute = np.unique(target_attribute, return_counts=True)
try:
if counts_of_attribute[0] < counts_of_attribute[1]:
counts_of_attribute = np.flipud(counts_of_attribute)
unique_value_of_attribute = np.flipud(unique_value_of_attribute)
except IndexError:
i = 0
if unique_value_of_attribute[0] == 1:
# More positive values
return 1, depth
elif unique_value_of_attribute[0] == 0:
# More negative values
return 0, depth
# This is the recursion step, in which we make new decision trees till the case when any of the base
# cases are true
new_sub_tree_after_partition, deptha = decision_tree_construction(new_examples_after_partition,
new_target_attribute, attributes,
depth + 1)
depth_of_node.append(deptha)
# Here we are adding the depth of the node so that we can do the depth based pruning
tree[best_attribute_number][each_unique_value] = new_sub_tree_after_partition
if isinstance(new_sub_tree_after_partition, int):
tree[best_attribute_number]["type_of_node"] = "leaf"
tree[best_attribute_number]["depth"] = depth
unique_value_of_attribute, counts_of_attribute = np.unique(target_attribute, return_counts=True)
try:
if counts_of_attribute[0] < counts_of_attribute[1]:
counts_of_attribute = np.flipud(counts_of_attribute)
unique_value_of_attribute = np.flipud(unique_value_of_attribute)
except IndexError:
i = 0
tree[best_attribute_number]["majority_target_attribute"] = unique_value_of_attribute[0]
tree[best_attribute_number]["best_attribute_number"] = best_attribute_number
else:
tree[best_attribute_number]["type_of_node"] = "node"
tree[best_attribute_number]["depth"] = depth
unique_value_of_attribute, counts_of_attribute = np.unique(target_attribute, return_counts=True)
try:
if counts_of_attribute[0] < counts_of_attribute[1]:
counts_of_attribute = np.flipud(counts_of_attribute)
unique_value_of_attribute = np.flipud(unique_value_of_attribute)
except IndexError:
i = 0
tree[best_attribute_number]["majority_target_attribute"] = unique_value_of_attribute[0]
tree[best_attribute_number]["best_attribute_number"] = best_attribute_number
return tree, max(depth_of_node)
"""
This function is used to do the pruning of the given tree based on the given max_depth
"""
def depth_pruning_for_one_value(given_tree, maximum_allowed_depth, current_depth=0):
"""
:param given_tree: This is the tree we want to prune based on the depth
:param maximum_allowed_depth: This is the maximum allowed depth for the main tree
:param current_depth: This is the current depth we are on
:return: The depth pruned tree
"""
for each_key in list(given_tree):
# In this function we are just checking if the depth is greater or not and if greater we are
# pruning the tree
if isinstance(given_tree[each_key], dict):
try:
current_depth = given_tree[each_key]["depth"]
except KeyError:
# Here we are not anything to the depth since in this case the node will be a leaf.
current_depth = current_depth
if current_depth == maximum_allowed_depth:
try:
given_tree[each_key] = given_tree[each_key]["majority_target_attribute"]
except KeyError:
given_tree[each_key] = 0
else:
depth_pruning_for_one_value(given_tree[each_key], maximum_allowed_depth, current_depth)
"""
This function is used for the depth based pruning for validation of the values
"""
def depth_pruning_by_validation_set(given_tree, valid_x, valid_y, max_value_in_target_attribute):
"""
:param given_tree: This is the tree we want to prune based on the depth
:param valid_x: This is the validation data
:param valid_y: This is the validation class
:param max_value_in_target_attribute: This is the max value in target attribute (for testing purposes)
:return:
"""
list = [5, 10, 15, 20, 50, 100]
best_accuracy = 0
best_number = 0
for each in list:
# Here we just iterate over the values and try to find the best hyper parameter of depth
pruned_tree = given_tree.copy()
depth_pruning_for_one_value(pruned_tree, each, 0)
predicted_y = decision_tree_test(valid_x, pruned_tree, max_value_in_target_attribute)
accuracy_for_pruned_tree = decision_tree_accuracy(valid_y, predicted_y)
if accuracy_for_pruned_tree > best_accuracy:
best_accuracy = accuracy_for_pruned_tree
best_number = each
return best_accuracy, best_number
"""
This function is used to predict the new and unseen test data point by using the created tree and the given instance
"""
def decision_tree_predict(tree, testing_example, max_value_in_target_attribute):
"""
:param max_value_in_target_attribute: If we are not able to classify due to less data, we return this value when testing
:param tree: This is the trained tree which we will use for finding the class of the given instance
:param testing_example: These are the instance on which we want to find the class
:return:
"""
# We take each attribute for the datapoint anc check if that attribute is the root node for the tree we are on
try:
max_value_in_target_attribute = tree[list(tree)[0]]["majority_target_attribute"]
except (KeyError, IndexError):
max_value_in_target_attribute = 0
for each_attribute in list(testing_example.index):
if each_attribute in tree:
try:
value_of_the_attribute = testing_example[each_attribute]
# I have used a try catch here since we trained the algo on a part of the data and it's not
# necessary that we will have a branch which can classify the data point at all.
new_tree = tree[each_attribute][value_of_the_attribute]
except KeyError:
# There are two things we can do here, first is to show an error and return no class and the second
# thing we can do is return the max value in our training target array. error = "The algorithm cannot
# classify the given instance right now" return error....Need more training
return max_value_in_target_attribute
except IndexError:
# This is the case when we do pruning and the node becomes a value.
return tree[each_attribute]
if type(new_tree) == dict:
# In this case we see if the value predicted is a tree then we again call the recursion if not we
# return the value we got
return decision_tree_predict(new_tree, testing_example, max_value_in_target_attribute)
else:
return new_tree
"""
This function is used to find the output for the given testing dataset by using the tree created during the training process
"""
def decision_tree_test(test_x, tree, max_value_in_target_attribute):
"""
:param test_x: This is input attributes from the testing data
:param tree: This is the tree created after the training process
:param max_value_in_target_attribute: This is the most occurring target_attribute in our training data
:return: The output for the given testing data
"""
output = []
# In this function we just use the decision_tree_predict and predict the value for all the instances
for index, row in test_x.iterrows():
output.append(int(decision_tree_predict(tree, row, max_value_in_target_attribute)))
return output
"""
In this function we try to find the accuracy for our predictions and return the accuracy
"""
def decision_tree_accuracy(test_y, predicted_y):
"""
:param test_y: The classes given with the data
:param predicted_y: The predicted classes
:return: The accuracy for the given tree
"""
test_y = test_y.tolist()
right_predict = 0
# Here we predict the accuracy by the formula accuracy = right_predictions/total_data_points_in_the_set
for each in range(np.size(test_y)):
if test_y[each] == predicted_y[each]:
right_predict = right_predict + 1
return right_predict / len(test_y)
"""
In this function we will try to iterate over the whole naive decision tree with entropy as the impurity heuristic
"""
def main_dtree_with_entropy(train_x, train_y, test_x, test_y, valid_x, valid_y):
"""
:param train_x: This is the training data
:param train_y: This is the training class
:param test_x: This is the testing data
:param test_y: This is the testing class
:return: The accuracy for created tree
"""
train_y = pd.DataFrame(train_y)
max_value_in_target_attribute, temp = | np.unique(train_y, return_counts=True) | numpy.unique |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 10 11:46:58 2017
@author: yannwork
THIS CODE HAS THE DESCRIPTION OF THE IZHIKEVICH MODEL WITH 5 PARAMETERS or 9 PARAMETERS
"""
#Imports the print function from Python3.
from __future__ import print_function
#Imports the modf function.
from math import modf
#Imports numpy to work with matrix.
import numpy as np
#Imports re to work with string values.
import re
#Imports all ploting utilities.
from pylab import *
import sys
#5-parameter version of the Izhikevich model
class Izhikevich_5P():
def __init__(self, a, b, c, d, vmax, dt, x, y):
self.createNeuron(a, b, c, d, vmax, dt, x, y)
# a, b, c, d, are the parameters for the membrane potential dynamics
# vmax is the peak membrane potential of single action potentials
# x, y are the spatial coordinates of each cell
def createNeuron(self, a, b, c, d, vmax, dt, x, y):
#Set Neuron constants.
self.a = a
self.b = b
self.c = c
self.d = d
self.vmax = vmax
self.dt = dt
self.x = x
self.y = y
def getNextVal(self, v, u, Stim):
l = Stim
if v < self.vmax:
# ODE eqs
dv = (0.04*v**2)+5*v+140-u
vNew = v+(dv+l)*self.dt
du = self.a*(self.b*v-u)
uNew = u + self.dt*du
vOld = v
else:
# Spike
vOld = self.vmax
vNew = self.c
uNew = u + self.d
nVals = np.array([vNew, uNew, vOld])
return nVals
#9-parameter version of the Izhikevich model
class Izhikevich_9P():
def __init__(self, a, b, c, d, vmax, vr, vt, k, Cm, dt, x, y):
self.createNeuron(a, b, c, d, vmax, vr, vt, k, Cm, dt, x, y)
# a, b, c, d, are the parameters for the membrane potential dynamics
# vmax is the peak membrane potential of single action potentials
# vr, vt are the resting and threshold membrane potential
# k is a coefficient of the quadratic polynomial
# C is the membrane capacitance
# x, y are the spatial coordinates of each cell
def createNeuron(self, a, b, c, d, vmax, vr, vt, k, Cm, dt, x, y):
#Set Neuron constants.
self.a = a
self.b = b
self.c = c
self.d = d
self.vmax = vmax
self.vr = vr
self.vt = vt
self.k = k
self.Cm = Cm
self.dt = dt
self.x = x
self.y = y
def getNextVal(self, v, u, Stim):
l = Stim
if v < self.vmax:
# ODE eqs
dv = self.k*(v-self.vr)*(v-self.vt)-u
vNew = v+(dv+l)*self.dt/self.Cm
du = self.a*(self.b*(v-self.vr)-u)
uNew = u + self.dt*du
vOld = v
else:
# Spike
vOld = self.vmax
vNew = self.c
uNew = u + self.d
nVals = np.array([vNew, uNew, vOld])
return nVals
def return_parameters(self):
return self.a, self.b, self.c, self.d, self.vmax, self.vr, self.vt, self.k, self.Cm
#Leaky integrator object
class Leaky_Integrator():
def __init__(self, R, C, dt, x, y):
self.createNeuron(R, C, dt, x, y)
def createNeuron(self, R, C, dt, x, y):
#Set Neuron constants.
self.R = R
self.C = C
self.dt = dt
self.x = x
self.y = y
def getNextVal(self, v, Stim):
l = Stim
# ODE eqs
dv = (-1/(self.R*self.C))*v+l/self.C
vNew = v+(dv)*self.dt
vOld = v
nVals = | np.array([vNew, vOld]) | numpy.array |
import tensorflow as tf
import numpy as np
import numpy.linalg as nl
import utils.general
import skimage.feature
import json
import os
PAF_type = 0
allPAFConnection = [[np.array([[1, 8], [8, 9], [9, 10], [1, 11], [11, 12], [12, 13], [1, 2], [2, 3], [3, 4], [2, 16], [1, 5], [5, 6], [6, 7], [5, 17], [1, 0], [0, 14], [0, 15], [14, 16], [15, 17], [1, 18], [1, 19], [19, 8], [19, 11]]),
np.array([[0, 4], [4, 3], [3, 2], [2, 1], [0, 8], [8, 7], [7, 6], [6, 5], [0, 12], [12, 11], [11, 10], [10, 9], [0, 16], [16, 15], [15, 14], [14, 13], [0, 20], [20, 19], [19, 18], [18, 17]])
], # PAF type 0 (Original Openpose)
[np.array([[1, 8], [8, 9], [9, 10], [1, 11], [11, 12], [12, 13], [1, 2], [2, 3], [3, 4], [2, 16], [1, 5], [5, 6], [6, 7], [5, 17],
[1, 0], [0, 14], [0, 15], [14, 16], [15, 17], [1, 18], [2, 4], [5, 7], [8, 4], [11, 7], [8, 10], [11, 13]]), # augmented PAF
np.array([[0, 4], [4, 3], [3, 2], [2, 1], [0, 8], [8, 7], [7, 6], [6, 5], [0, 12], [12, 11], [11, 10], [10, 9], [0, 16], [16, 15], [15, 14], [14, 13], [0, 20], [20, 19], [19, 18], [18, 17]])
]] # PAF type 1 (My augmented PAF)
PAFConnection = allPAFConnection[PAF_type]
dist_thresh = 8
if os.path.exists('utils/default_PAF_lengths.json'):
with open('utils/default_PAF_lengths.json', 'r') as f:
default_PAF_length = json.load(f)
def getValidPAF(valid, objtype, PAFdim):
# input "valid": a tensor containing bool valid/invalid for each channel
# input "objtype": 0 for body, 1 for hand (to select PAFConnection)
with tf.variable_scope('getValidPAF'):
assert objtype in (0, 1)
connection = tf.constant(np.repeat(PAFConnection[objtype], PAFdim, axis=0), dtype=tf.int64)
batch_size = valid.get_shape().as_list()[0]
PAF_valid = []
for ib in range(batch_size):
b_valid = valid[ib, :]
assert len(b_valid.get_shape().as_list()) == 1
indexed_valid = tf.gather(b_valid, connection, axis=0)
PAF_valid.append(tf.logical_and(indexed_valid[:, 0], indexed_valid[:, 1]))
PAF_valid = tf.stack(PAF_valid, axis=0)
return PAF_valid
def getValidPAFNumpy(valid, objtype):
# used in testing time
# input "valid": a numpy array containing bool valid/invalid for each channel
# input "objtype": 0 for body, 1 for hand (to select PAFConnection)
assert objtype in (0, 1)
connection = PAFConnection[objtype]
PAF_valid = []
for conn in connection:
connection_valid = valid[conn[0]] and valid[conn[1]]
PAF_valid.append(connection_valid)
PAF_valid = np.array(PAF_valid, dtype=bool)
return PAF_valid
def createPAF(keypoint2d, keypoint3d, objtype, output_size, normalize_3d=True, valid_vec=None):
# objtype: 0: body, 1: hand
# output_size: (h, w)
# keypoint2d: (x, y)
# normalize_3d: if True: set x^2 + y^2 + z^2 = 1; else set x^2 + y^2 = 1
with tf.variable_scope('createPAF'):
assert keypoint2d.get_shape().as_list()[0] == keypoint3d.get_shape().as_list()[0]
assert keypoint2d.get_shape().as_list()[1] == 2
assert keypoint3d.get_shape().as_list()[1] == 3
if valid_vec is None:
valid_vec = tf.ones([keypoint2d.get_shape()[0]], dtype=tf.bool)
h_range = tf.expand_dims(tf.range(output_size[0]), 1)
w_range = tf.expand_dims(tf.range(output_size[1]), 0)
H = tf.cast(tf.tile(h_range, [1, output_size[1]]), tf.float32)
W = tf.cast(tf.tile(w_range, [output_size[0], 1]), tf.float32)
PAFs = []
for ic, conn in enumerate(PAFConnection[objtype]):
AB = keypoint2d[conn[1]] - keypoint2d[conn[0]] # joint 0 - > joint 1
l_AB = tf.sqrt(tf.reduce_sum(tf.square(AB)))
AB = AB / l_AB
dx = W - keypoint2d[conn[0], 0]
dy = H - keypoint2d[conn[0], 1]
dist = tf.abs(dy * AB[0] - dx * AB[1]) # cross product
Xmin = tf.minimum(keypoint2d[conn[0], 0], keypoint2d[conn[1], 0]) - dist_thresh
Xmax = tf.maximum(keypoint2d[conn[0], 0], keypoint2d[conn[1], 0]) + dist_thresh
Ymin = tf.minimum(keypoint2d[conn[0], 1], keypoint2d[conn[1], 1]) - dist_thresh
Ymax = tf.maximum(keypoint2d[conn[0], 1], keypoint2d[conn[1], 1]) + dist_thresh
within_range = tf.cast(W >= Xmin, tf.float32) * tf.cast(W <= Xmax, tf.float32) * tf.cast(H >= Ymin, tf.float32) * tf.cast(H <= Ymax, tf.float32)
within_dist = tf.cast(dist < dist_thresh, tf.float32)
mask = within_range * within_dist
AB3d = (keypoint3d[conn[1]] - keypoint3d[conn[0]])
if normalize_3d:
scale = tf.sqrt(tf.reduce_sum(tf.square(AB3d)))
else:
scale = tf.sqrt(tf.reduce_sum(tf.square(AB3d[:2])))
AB3d /= scale
AB3d = tf.where(tf.is_nan(AB3d), tf.zeros([3], dtype=tf.float32), AB3d)
cond_valid = tf.logical_and(valid_vec[conn[0]], valid_vec[conn[1]])
connPAF = tf.cond(cond_valid, lambda: tf.tile(tf.expand_dims(mask, 2), [1, 1, 3]) * AB3d, lambda: tf.zeros((output_size[0], output_size[1], 3), dtype=tf.float32))
# create the PAF only when both joints are valid
PAFs.append(connPAF)
concat_PAFs = tf.concat(PAFs, axis=2)
return concat_PAFs
def getColorAffinity(v):
RY = 15
YG = 6
GC = 4
CB = 11
BM = 13
MR = 6
summed = RY + YG + GC + CB + BM + MR
v = min(max(v, 0.0), 1.0) * summed
if v < RY:
c = (255., 255. * (v / (RY)), 0.)
elif v < RY + YG:
c = (255. * (1 - ((v - RY) / (YG))), 255., 0.)
elif v < RY + YG + GC:
c = (0. * (1 - ((v - RY) / (YG))), 255., 255. * ((v - RY - YG) / (GC)))
elif v < RY + YG + GC + CB:
c = (0., 255. * (1 - ((v - RY - YG - GC) / (CB))), 255.)
elif v < summed - MR:
c = (255. * ((v - RY - YG - GC - CB) / (BM)), 0., 255.)
elif v < summed:
c = (255., 0., 255. * (1 - ((v - RY - YG - GC - CB - BM) / (MR))))
else:
c = (255., 0., 0.)
return np.array(c)
def plot_PAF(PAF_array):
# return a 3-channel uint8 np array
assert len(PAF_array.shape) == 3
assert PAF_array.shape[2] == 2 or PAF_array.shape[2] == 3
out = np.zeros((PAF_array.shape[0], PAF_array.shape[1], 3), dtype=np.uint8)
# 2D PAF: use Openpose Visualization
x = PAF_array[:, :, 0]
y = PAF_array[:, :, 1]
rad = np.sqrt(np.square(x) + np.square(y))
rad = np.minimum(rad, 1.0)
a = np.arctan2(-y, -x) / np.pi
fk = (a + 1.) / 2.
for i in range(PAF_array.shape[0]):
for j in range(PAF_array.shape[1]):
color = getColorAffinity(fk[i, j]) * rad[i, j]
out[i, j, :] = color
if PAF_array.shape[2] == 3:
# also return the average z value (for judge pointing out / in)
# total_rad = np.sqrt(np.sum(np.square(PAF_array), axis=2))
# rz = PAF_array[:, :, 2] / total_rad
# rz[np.isnan(rz)] = 0.0
# rz[total_rad < 0.5] = 0.0
# z_map = np.zeros((PAF_array.shape[0], PAF_array.shape[1], 3))
# z_map[:, :, 0] = 255 * rz * (rz > 0)
# z_map[:, :, 1] = 255 * (-rz) * (rz < 0)
rz = PAF_array[:, :, 2]
z_map = np.zeros((PAF_array.shape[0], PAF_array.shape[1], 3))
z_map[:, :, 0] = 255 * rz * (rz > 0)
z_map[:, :, 1] = 255 * (-rz) * (rz < 0)
z_map = np.maximum(np.minimum(z_map, 255), 0)
return out, z_map.astype(np.uint8)
return out
def plot_all_PAF(PAF_array, PAFdim):
assert PAFdim in (2, 3)
if PAFdim == 2:
assert PAF_array.shape[2] % 2 == 0
total_PAF_x = np.sum(PAF_array[:, :, ::2], axis=2)
total_PAF_y = np.sum(PAF_array[:, :, 1::2], axis=2)
total_PAF = np.stack([total_PAF_x, total_PAF_y], axis=2)
return plot_PAF(total_PAF)
else:
assert PAFdim == 3 and PAF_array.shape[2] % 3 == 0
total_PAF_x = np.sum(PAF_array[:, :, ::3], axis=2)
total_PAF_y = np.sum(PAF_array[:, :, 1::3], axis=2)
total_PAF_z = np.sum(PAF_array[:, :, 2::3], axis=2)
total_PAF = np.stack([total_PAF_x, total_PAF_y, total_PAF_z], axis=2)
return plot_PAF(total_PAF)
def PAF_to_3D(coord2d, PAF, objtype=0):
if objtype == 0:
depth_root_idx = 1 # put neck at 0-depth
else:
assert objtype == 1
depth_root_idx = 0
assert len(coord2d.shape) == 2 and coord2d.shape[1] == 2
coord3d = np.zeros((coord2d.shape[0], 3), dtype=coord2d.dtype)
coord3d[:, :2] = coord2d
coord3d[depth_root_idx, 2] = 0.0
vec3d_array = []
for ic, conn in enumerate(PAFConnection[objtype]):
if objtype == 0:
if PAF_type == 0:
if ic in (9, 13):
continue
elif PAF_type == 1:
if ic in (9, 13) or ic >= 20:
continue
A = coord2d[conn[0]]
B = coord2d[conn[1]]
u = np.linspace(0.0, 1.0, num=11)
v = 1.0 - u
points = (np.outer(A, v) + np.outer(B, u)).astype(int) # 2 * N
vec3ds = PAF[points[1], points[0], 3 * ic:3 * ic + 3] # note order of y, x in index
vec3d = np.mean(vec3ds, axis=0)
vec3d[np.isnan(vec3d)] = 0.0 # numerical stability
if (A == B).all(): # A and B actually coincides with each other, put the default bone length.
coord3d[conn[1], 0] = A[0]
coord3d[conn[1], 1] = A[1]
if vec3d[2] >= 0:
coord3d[conn[1], 2] = coord3d[conn[0], 2] + default_PAF_length[objtype][ic]
else:
coord3d[conn[1], 2] = coord3d[conn[0], 2] - default_PAF_length[objtype][ic]
else:
# find the least square solution of Ax = b
A = np.zeros([3, 2])
A[2, 0] = -1.
A[:, 1] = vec3d
b = coord3d[conn[1]] - coord3d[conn[0]] # by this time the z-value of target joint should be 0
x, _, _, _ = nl.lstsq(A, b, rcond=-1)
if x[1] < 0: # the direction is reversed
if vec3d[2] >= 0:
coord3d[conn[1], 2] = coord3d[conn[0], 2] + default_PAF_length[objtype][ic] # assume that this connection is vertical to the screen
else:
coord3d[conn[1], 2] = coord3d[conn[0], 2] - default_PAF_length[objtype][ic]
else:
coord3d[conn[1], 2] = x[0]
if nl.norm(vec3d) < 0.1 or x[1] < 0: # If there is almost no response, or the direction is reversed, put it zero so that Adam does not fit.
vec3d[:] = 0
vec3d_array.append(vec3d)
return coord3d, np.array(vec3d_array)
def collect_PAF_vec(coord2d, PAF, objtype=0):
assert len(coord2d.shape) == 2 and coord2d.shape[1] == 2
assert len(PAF.shape) == 3 # H, W, C
vec3d_array = []
for ic, conn in enumerate(PAFConnection[objtype]):
if objtype == 0:
if PAF_type == 0 and ic in (9, 13):
continue
elif PAF_type == 1 and ic in (9, 13): # need the extra PAFs here
continue
A = coord2d[conn[0]]
B = coord2d[conn[1]]
u = np.linspace(0.0, 1.0, num=11)
v = 1.0 - u
points = ( | np.outer(A, v) | numpy.outer |
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 31 13:22:36 2016
@author: <NAME>
Just a CNN
"""
import sys
sys.path.append('../')
from Lib.TensorBase.tensorbase.base import Model, Data, Layers
from Lib.test_aux import test_net, vis_detections
from Networks.convnet import convnet
from Networks.faster_rcnn_networks_mnist import rpn, roi_proposal, fast_rcnn
from tqdm import tqdm
import numpy as np
import tensorflow as tf
import argparse
# Global Dictionary of Flags
flags = {
'data_directory': '../Data/data_clutter/', # Location of training/testing files
'save_directory': '../Logs/', # Where to create model_directory folder
'model_directory': 'conv5_actually/', # Where to create 'Model[n]' folder
'batch_size': 64,
'display_step': 200, # How often to display loss
'num_classes': 11, # 10 digits, +1 for background
'classes': ('__background__', '1', '2', '3', '4', '5', '6', '7', '8', '9', '0'),
'anchor_scales': [1, 2, 3]
}
class Conv5(Model):
def __init__(self, flags_input):
super().__init__(flags_input, flags_input['run_num'], vram=0.2, restore=flags_input['restore_num'])
self.print_log("Seed: %d" % flags_input['seed'])
self.threads, self.coord = Data.init_threads(self.sess)
def _data(self):
# Initialize placeholder dicts
self.x = {}
self.gt_boxes = {}
self.im_dims = {}
# Train data
file_train = flags['data_directory'] + 'clutter_mnist_train.tfrecords'
self.x['TRAIN'], self.gt_boxes['TRAIN'], self.im_dims['TRAIN'] = Data.batch_inputs(self.read_and_decode,
file_train, batch_size=
self.flags['batch_size'])
# Validation data. No GT Boxes necessary.
file_valid = flags['data_directory'] + 'clutter_mnist_valid.tfrecords'
self.x['VALID'], _, self.im_dims['VALID'] = Data.batch_inputs(self.read_and_decode,
file_valid, mode="eval",
batch_size=
self.flags['batch_size'],
num_threads=1, num_readers=1)
# Test data. No GT Boxes.
self.x['TEST'] = tf.placeholder(tf.float32, [None, 128, 128, 1])
self.im_dims['TEST'] = tf.placeholder(tf.int32, [None, 2])
self.num_images = {'TRAIN': 55000, 'VALID': 5000, 'TEST': 10000}
def _summaries(self):
""" Define summaries for TensorBoard """
tf.summary.scalar("Total_Loss", self.cost)
tf.summary.image("x_train", self.x['TRAIN'])
def _network(self):
""" Define the network outputs """
# Initialize network dicts
self.cnn = {}
self.logits = {}
# Train network
with tf.variable_scope('model'):
self._cnn(self.x['TRAIN'], self.gt_boxes['TRAIN'], self.im_dims['TRAIN'], 'TRAIN')
# Valid network => Uses same weights as train network
with tf.variable_scope('model', reuse=True):
assert tf.get_variable_scope().reuse is True
self._cnn(self.x['VALID'], None, self.im_dims['VALID'], 'VALID')
# Test network => Uses same weights as train network
with tf.variable_scope('model', reuse=True):
assert tf.get_variable_scope().reuse is True
self._cnn(self.x['TEST'], None, self.im_dims['TEST'], 'TEST')
def _cnn(self, x, gt_boxes, im_dims, key):
# self.cnn[key] = convnet(x, [5, 3, 3, 3, 3], [32, 64, 64, 128, 128], strides=[2, 2, 1, 2, 1])
self.cnn[key] = Layers(x)
self.cnn[key].conv2d(5, 32)
self.cnn[key].maxpool()
self.cnn[key].conv2d(3, 64)
self.cnn[key].maxpool()
self.cnn[key].conv2d(3, 64)
self.cnn[key].conv2d(3, 128)
self.cnn[key].maxpool()
self.cnn[key].conv2d(3, 128)
self.cnn[key].flatten()
self.cnn[key].fc(512)
self.cnn[key].fc(11,activation_fn = None)
self.logits[key] = self.cnn[key].get_output()
def _optimizer(self):
""" Define losses and initialize optimizer """
# Losses (come from TRAIN networks)
self.label = self.gt_boxes['TRAIN'][:,4]
self.cost = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.logits['TRAIN'], labels=self.label))
# Optimization operation
self.optimizer = tf.train.AdamOptimizer().minimize(self.cost)
def _run_train_iter(self):
""" Run training iteration"""
summary, _ = self.sess.run([self.merged, self.optimizer])
return summary
def _record_train_metrics(self):
""" Record training metrics """
loss,logits = self.sess.run([self.cost,self.logits['TRAIN']])
self.print_log('Step %d: loss = %.6f' % (self.step, loss))
print('Class predictions:')
print( | np.argmax(logits, 1) | numpy.argmax |
"""
ColECM: Collagen ExtraCellular Matrix Simulation
SIMULATION 2D ROUTINE
Created by: <NAME>
Created on: 09/03/2018
Last Modified: 19/04/2018
"""
import numpy as np
import sys, os, pickle
import utilities as ut
def cos_sin_theta_2D(vector, r_vector):
"""
cos_sin_theta_2D(vector, r_vector)
Returns cosine and sine of angles of intersecting vectors betwen even and odd indicies
Parameters
----------
vector: array_like, (float); shape=(n_vector, n_dim)
Array of displacement vectors between connecting beads
r_vector: array_like, (float); shape=(n_vector)
Array of radial distances between connecting beads
Returns
-------
cos_the: array_like (float); shape=(n_vector/2)
Cosine of the angle between each pair of displacement vectors
sin_the: array_like (float); shape=(n_vector/2)
Sine of the angle between each pair of displacement vectors
r_prod: array_like (float); shape=(n_vector/2)
Product of radial distance between each pair of displacement vectors
"""
n_vector = int(vector.shape[0])
n_dim = vector.shape[1]
temp_vector = np.reshape(vector, (int(n_vector/2), 2, n_dim))
"Calculate |rij||rjk| product for each pair of vectors"
r_prod = np.prod(np.reshape(r_vector, (int(n_vector/2), 2)), axis = 1)
"Form dot product of each vector pair rij*rjk in vector array corresponding to an angle"
dot_prod = np.sum(np.prod(temp_vector, axis=1), axis=1)
"Form pseudo-cross product of each vector pair rij*rjk in vector array corresponding to an angle"
cross_prod = np.linalg.det(temp_vector)
"Calculate cos(theta) for each angle"
cos_the = dot_prod / r_prod
"Calculate sin(theta) for each angle"
sin_the = cross_prod / r_prod
return cos_the, sin_the, r_prod
def calc_energy_forces_2D(pos, cell_dim, bond_indices, angle_indices, angle_bond_indices, param):
"""
calc_energy_forces(pos, cell_dim, bond_indices, angle_indices, angle_bond_indices, vdw_coeff, param)
Return tot potential energy and forces on each bead in simulation
Parameters
----------
dxy: array_like (float); shape=(2, n_bead, n_bead)
Displacement along x and y axis between each bead
r2: array_like (float); shape=(n_bead, n_bead)
Square of Radial disance between each bead
bond_matrix: array_like (int); shape=(n_bead, n_bead)
Matrix determining whether a bond is present between two beads
verlet_list: array_like (int); shape=(n_bead, n_bead)
Matrix determining whether two beads are within rc radial distance
vdw_param: array_like (float); shape=(2)
Sigma and epsilon paameters for Van de Waals forces
bond_param: array_like (float); shape=(2)
Equilibrium length and energy paameters for bonded forces
angle_param: array_like (float); shape=(2)
Equilibrium angle and energy paameters for angular forces
rc: float
Interaction cutoff radius for non-bonded forces
bond_beads: array_like, (int); shape=(n_angle, 3)
Array containing indicies in pos array all 3-bead angular interactions
dxy_index: array_like, (int); shape=(n_bond, 2)
Array containing indicies in dx and dy arrays of all bonded interactions
r_index: array_like, (int); shape=(n_bond, 2)
Array containing indicies in r array of all bonded interactions
Returns
-------
pot_energy: float
Total potential energy of simulation cell
frc_beads: array_like (float); shape=(n_beads, n_dim)
Forces acting upon each bead due to positional array
virial_tensor: array_like (float); shape=(n_dim, n_dim)
Virial term of pressure tensor components
"""
f_beads = np.zeros((2, pos.shape[0]))
pot_energy = 0
cut_frc = ut.force_vdw(param['rc']**2, param['vdw_sigma'], param['vdw_epsilon'])
cut_pot = ut.pot_vdw(param['rc']**2, param['vdw_sigma'], param['vdw_epsilon'])
virial_tensor = np.zeros((2, 2))
n_bond = bond_indices[0].shape[0]
pair_dist = ut.get_distances(pos, cell_dim)
pair_r2 = np.sum(pair_dist**2, axis=0)
if n_bond > 0:
"Bond Lengths"
bond_r = np.sqrt(pair_r2[bond_indices])
#verlet_list_r0 = ut.check_cutoff(r_half, param['bond_r0'])
#verlet_list_r1 = ut.check_cutoff(r_half, param['bond_r1'])
bond_pot = ut.pot_harmonic(bond_r, param['bond_r0'], param['bond_matrix'][bond_indices])# * verlet_list_r0
#bond_pot_1 = ut.pot_harmonic(r_half, param['bond_r1'], param['bond_k1']) * verlet_list_r1
pot_energy += 0.5 * np.sum(bond_pot)# + np.sum(bond_pot_1)
bond_frc = ut.force_harmonic(bond_r, param['bond_r0'], param['bond_matrix'][bond_indices])# * verlet_list_r0
#bond_frc_1 = ut.force_harmonic(r_half, param['bond_r1'], param['bond_k1']) * verlet_list_r1
temp_frc = np.zeros((2, pos.shape[0], pos.shape[0]))
for i in range(2):
temp_frc[i][bond_indices] += bond_frc * pair_dist[i][bond_indices] / bond_r
f_beads[i] += np.sum(temp_frc[i], axis=1)
#for i in range(2):
# for j in range(2): virial_tensor[i][j] += np.sum(bond_frc / r_half * distances[i][indices_half] * distances[j][indices_half])
"Bond Angles"
try:
angle_dist = pair_dist.T[angle_bond_indices].T
"Make array of vectors rij, rjk for all connected bonds"
vector = np.stack((angle_dist[0], angle_dist[1]), axis=1)
n_vector = int(vector.shape[0])
"Find |rij| values for each vector"
r_vector = np.sqrt(pair_r2[angle_bond_indices])
cos_the, sin_the, r_prod = cos_sin_theta_2D(vector, r_vector)
pot_energy += np.sum(param['angle_array'] * (cos_the + 1))
"Form arrays of |rij| vales, cos(theta) and |rij||rjk| terms same shape as vector array"
r_array = np.reshape(np.repeat(r_vector, 2), vector.shape)
sin_the_array = np.reshape(np.repeat(sin_the, 4), vector.shape)
r_prod_array = np.reshape(np.repeat(r_prod, 4), vector.shape)
"Form left and right hand side terms of (cos(theta) rij / |rij|^2 - rjk / |rij||rjk|)"
r_left = vector / r_prod_array
r_right = sin_the_array * vector / r_array**2
ij_indices = np.arange(0, n_vector, 2)
jk_indices = np.arange(1, n_vector, 2)
"Perfrom right hand - left hand term for every rij rkj pair"
r_left[ij_indices] -= r_right[jk_indices]
r_left[jk_indices] -= r_right[ij_indices]
"Calculate forces upon beads i, j and k"
frc_angle_ij = param['angle_k0'] * r_left
frc_angle_k = -np.sum(np.reshape(frc_angle_ij, (int(n_vector/2), 2, 2)), axis=1)
"Add angular forces to force array"
for i in range(2):
f_beads[i][angle_indices.T[0]] -= frc_angle_ij[ij_indices].T[i]
f_beads[i][angle_indices.T[1]] -= frc_angle_k.T[i]
f_beads[i][angle_indices.T[2]] -= frc_angle_ij[jk_indices].T[i]
except IndexError: pass
verlet_list = ut.check_cutoff(pair_r2, param['rc']**2)
non_zero = np.nonzero(pair_r2 * verlet_list)
nonbond_pot = ut.pot_vdw((pair_r2 * verlet_list)[non_zero], param['vdw_sigma'], param['vdw_matrix'][non_zero]) - cut_pot
pot_energy += np.nansum(nonbond_pot) / 2
nonbond_frc = ut.force_vdw((pair_r2 * verlet_list)[non_zero], param['vdw_sigma'], param['vdw_matrix'][non_zero]) - cut_frc
temp_xy = np.zeros(pair_dist.shape)
for i in range(2):
temp_xy[i][non_zero] += nonbond_frc * (pair_dist[i][non_zero] / pair_r2[non_zero])
for j in range(2):
virial_tensor[i][j] += np.sum(np.triu(temp_xy[i] * pair_dist[i] * pair_dist[j]))
f_beads[i] += np.sum(temp_xy[i], axis=0)
frc = f_beads.T
return frc, pot_energy, virial_tensor
def calc_energy_forces_2D_mpi(pos, cell_dim, pos_indices, bond_indices, glob_indices, angle_indices, angle_bond_indices,
angle_coeff, vdw_coeff, virial_indicies, param):
"""
calc_energy_forces(distances, r2, bond_matrix, vdw_matrix, verlet_list, bond_beads, dist_index, r_index, param)
Return tot potential energy and forces on each bead in simulation
Parameters
----------
dxy: array_like (float); shape=(2, n_bead, n_bead)
Displacement along x and y axis between each bead
r2: array_like (float); shape=(n_bead, n_bead)
Square of Radial disance between each bead
bond_matrix: array_like (int); shape=(n_bead, n_bead)
Matrix determining whether a bond is present between two beads
verlet_list: array_like (int); shape=(n_bead, n_bead)
Matrix determining whether two beads are within rc radial distance
vdw_param: array_like (float); shape=(2)
Sigma and epsilon paameters for Van de Waals forces
bond_param: array_like (float); shape=(2)
Equilibrium length and energy paameters for bonded forces
angle_param: array_like (float); shape=(2)
Equilibrium angle and energy paameters for angular forces
rc: float
Interaction cutoff radius for non-bonded forces
bond_beads: array_like, (int); shape=(n_angle, 3)
Array containing indicies in pos array all 3-bead angular interactions
dxy_index: array_like, (int); shape=(n_bond, 2)
Array containing indicies in dx and dy arrays of all bonded interactions
r_index: array_like, (int); shape=(n_bond, 2)
Array containing indicies in r array of all bonded interactions
Returns
-------
pot_energy: float
Total potential energy of simulation cell
frc_beads: array_like (float); shape=(n_beads, n_dim)
Forces acting upon each bead due to positional array
virial_tensor: array_like (float); shape=(n_dim, n_dim)
Virial term of pressure tensor components
"""
f_beads = np.zeros((2, pos.shape[0]))
pot_energy = 0
cut_frc = ut.force_vdw(param['rc']**2, param['vdw_sigma'], param['vdw_epsilon'])
cut_pot = ut.pot_vdw(param['rc']**2, param['vdw_sigma'], param['vdw_epsilon'])
virial_tensor = np.zeros((2, 2))
n_bond = bond_indices[0].shape[0]
pair_dist = ut.get_distances_mpi(pos, pos_indices, cell_dim)
pair_r2 = np.sum(pair_dist**2, axis=0)
if n_bond > 0:
"Bond Lengths"
bond_r = np.sqrt(pair_r2[bond_indices])
#verlet_list_r0 = ut.check_cutoff(r_half, param['bond_r0'])
#verlet_list_r1 = ut.check_cutoff(r_half, param['bond_r1'])
bond_pot = ut.pot_harmonic(bond_r, param['bond_r0'], param['bond_matrix'][glob_indices])# * verlet_list_r0
#bond_pot_1 = ut.pot_harmonic(r_half, param['bond_r1'], param['bond_k1']) * verlet_list_r1
pot_energy += 0.5 * np.sum(bond_pot)# + np.sum(bond_pot_1)
bond_frc = ut.force_harmonic(bond_r, param['bond_r0'], param['bond_matrix'][glob_indices])# * verlet_list_r0
#bond_frc_1 = ut.force_harmonic(r_half, param['bond_r1'], param['bond_k1']) * verlet_list_r1
temp_frc = np.zeros((2, pos.shape[0], pos.shape[0]))
for i in range(2):
temp_frc[i][glob_indices] += bond_frc * pair_dist[i][bond_indices] / bond_r
f_beads[i] += np.sum(temp_frc[i], axis=1)
#for i in range(2):
# for j in range(2): virial_tensor[i][j] += np.sum(bond_frc / r_half * distances[i][indices_half] * distances[j][indices_half])
"Bond Angles"
try:
angle_dist = (pos[angle_bond_indices[1]] - pos[angle_bond_indices[0]]).T
for i in range(param['n_dim']): angle_dist[i] -= cell_dim[i] * np.array(2 * angle_dist[i] / cell_dim[i], dtype=int)
angle_r2 = np.sum(angle_dist**2, axis=0)
r_vector = np.sqrt(angle_r2)
"Make array of vectors rij, rjk for all connected bonds"
vector = np.stack((angle_dist[0], angle_dist[1]), axis=1)
n_vector = int(vector.shape[0])
"Find |rij| values for each vector"
cos_the, sin_the, r_prod = cos_sin_theta_2D(vector, r_vector)
pot_energy += np.sum(angle_coeff * (cos_the + 1))
"Form arrays of |rij| vales, cos(theta) and |rij||rjk| terms same shape as vector array"
r_array = np.reshape(np.repeat(r_vector, 2), vector.shape)
sin_the_array = np.reshape(np.repeat(sin_the, 4), vector.shape)
r_prod_array = np.reshape(np.repeat(r_prod, 4), vector.shape)
"Form left and right hand side terms of (cos(theta) rij / |rij|^2 - rjk / |rij||rjk|)"
r_left = vector / r_prod_array
r_right = sin_the_array * vector / r_array**2
ij_indices = np.arange(0, n_vector, 2)
jk_indices = np.arange(1, n_vector, 2)
"Perfrom right hand - left hand term for every rij rkj pair"
r_left[ij_indices] -= r_right[jk_indices]
r_left[jk_indices] -= r_right[ij_indices]
"Calculate forces upon beads i, j and k"
frc_angle_ij = param['angle_k0'] * r_left
frc_angle_k = -np.sum(np.reshape(frc_angle_ij, (int(n_vector/2), 2, 2)), axis=1)
"Add angular forces to force array"
for i in range(2):
f_beads[i][angle_indices.T[0]] -= frc_angle_ij[ij_indices].T[i]
f_beads[i][angle_indices.T[1]] -= frc_angle_k.T[i]
f_beads[i][angle_indices.T[2]] -= frc_angle_ij[jk_indices].T[i]
except IndexError: pass
verlet_list = ut.check_cutoff(pair_r2, param['rc']**2)
non_zero = np.nonzero(pair_r2 * verlet_list)
nonbond_pot = ut.pot_vdw((pair_r2 * verlet_list)[non_zero], param['vdw_sigma'], vdw_coeff[non_zero]) - cut_pot
pot_energy += np.nansum(nonbond_pot) / 2
nonbond_frc = ut.force_vdw((pair_r2 * verlet_list)[non_zero], param['vdw_sigma'], vdw_coeff[non_zero]) - cut_frc
temp_xy = np.zeros(pair_dist.shape)
for i in range(2):
temp_xy[i][non_zero] += nonbond_frc * (pair_dist[i][non_zero] / pair_r2[non_zero])
for j in range(2):
virial_tensor[i][j] += np.sum(np.triu(temp_xy[i] * pair_dist[i] * pair_dist[j])[virial_indicies])
f_beads[i] += np.sum(temp_xy[i], axis=0)
frc = f_beads.T
return frc, pot_energy, virial_tensor
def cos_sin_theta_3D(vector, r_vector):
"""
cos_sin_theta_3D(vector, r_vector)
Returns cosine and sine of angles of intersecting vectors betwen even and odd indicies
Parameters
----------
vector: array_like, (float); shape=(n_vector, n_dim)
Array of displacement vectors between connecting beads
r_vector: array_like, (float); shape=(n_vector)
Array of radial distances between connecting beads
Returns
-------
cos_the: array_like (float); shape=(n_vector/2)
Cosine of the angle between each pair of displacement vectors
sin_the: array_like (float); shape=(n_vector/2)
Sine of the angle between each pair of displacement vectors
r_prod: array_like (float); shape=(n_vector/2)
Product of radial distance between each pair of displacement vectors
"""
n_vector = int(vector.shape[0])
n_dim = vector.shape[1]
temp_vector = np.reshape(vector, (int(n_vector/2), 2, n_dim))
"Calculate |rij||rjk| product for each pair of vectors"
r_prod = np.prod(np.reshape(r_vector, (int(n_vector/2), 2)), axis = 1)
"Form dot product of each vector pair rij*rjk in vector array corresponding to an angle"
dot_prod = np.sum(np.prod(temp_vector, axis=1), axis=1)
"Form pseudo-cross product of each vector pair rij*rjk in vector array corresponding to an angle"
temp_vector = np.moveaxis(temp_vector, (1, 0, 2), (0, 1, 2))
cross_prod = np.cross(temp_vector[0], temp_vector[1])
"Calculate cos(theta) for each angle"
cos_the = dot_prod / r_prod
"Calculate sin(theta) for each angle"
sin_the = cross_prod / np.reshape(np.repeat(r_prod, n_dim), cross_prod.shape)
return cos_the, sin_the, r_prod
def calc_energy_forces_3D(pos, cell_dim, bond_indices, angle_indices, angle_bond_indices, param):
"""
calc_energy_forces(pos, cell_dim, bond_indices, angle_indices, angle_bond_indices, vdw_coeff, param)
Return tot potential energy and forces on each bead in simulation
Parameters
----------
dxy: array_like (float); shape=(2, n_bead, n_bead)
Displacement along x and y axis between each bead
r2: array_like (float); shape=(n_bead, n_bead)
Square of Radial disance between each bead
bond_matrix: array_like (int); shape=(n_bead, n_bead)
Matrix determining whether a bond is present between two beads
verlet_list: array_like (int); shape=(n_bead, n_bead)
Matrix determining whether two beads are within rc radial distance
vdw_param: array_like (float); shape=(2)
Sigma and epsilon paameters for Van de Waals forces
bond_param: array_like (float); shape=(2)
Equilibrium length and energy paameters for bonded forces
angle_param: array_like (float); shape=(2)
Equilibrium angle and energy paameters for angular forces
rc: float
Interaction cutoff radius for non-bonded forces
bond_beads: array_like, (int); shape=(n_angle, 3)
Array containing indicies in pos array all 3-bead angular interactions
dxy_index: array_like, (int); shape=(n_bond, 2)
Array containing indicies in dx and dy arrays of all bonded interactions
r_index: array_like, (int); shape=(n_bond, 2)
Array containing indicies in r array of all bonded interactions
Returns
-------
pot_energy: float
Total potential energy of simulation cell
frc_beads: array_like (float); shape=(n_beads, n_dim)
Forces acting upon each bead due to positional array
virial_tensor: array_like (float); shape=(n_dim, n_dim)
Virial term of pressure tensor components
"""
f_beads = np.zeros((3, pos.shape[0]))
pot_energy = 0
cut_frc = ut.force_vdw(param['rc']**2, param['vdw_sigma'], param['vdw_epsilon'])
cut_pot = ut.pot_vdw(param['rc']**2, param['vdw_sigma'], param['vdw_epsilon'])
virial_tensor = np.zeros((3, 3))
n_bond = bond_indices[0].shape[0]
pair_dist = ut.get_distances(pos, cell_dim)
pair_r2 = np.sum(pair_dist**2, axis=0)
if n_bond > 0:
"Bond Lengths"
bond_r = np.sqrt(pair_r2[bond_indices])
#verlet_list_r0 = ut.check_cutoff(r_half, param['bond_r0'])
#verlet_list_r1 = ut.check_cutoff(r_half, param['bond_r1'])
bond_pot = ut.pot_harmonic(bond_r, param['bond_r0'], param['bond_matrix'][bond_indices])# * verlet_list_r0
#bond_pot_1 = ut.pot_harmonic(r_half, param['bond_r1'], param['bond_k1']) * verlet_list_r1
pot_energy += 0.5 * np.sum(bond_pot)# + np.sum(bond_pot_1)
bond_frc = ut.force_harmonic(bond_r, param['bond_r0'], param['bond_matrix'][bond_indices])# * verlet_list_r0
#bond_frc_1 = ut.force_harmonic(r_half, param['bond_r1'], param['bond_k1']) * verlet_list_r1
temp_frc = np.zeros((3, pos.shape[0], pos.shape[0]))
for i in range(3):
temp_frc[i][bond_indices] += bond_frc * pair_dist[i][bond_indices] / bond_r
f_beads[i] += | np.sum(temp_frc[i], axis=1) | numpy.sum |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
#
# sugarcane.py
# projects
#
# Created by <NAME> on 12/02/19
# Copyright © 2019 <NAME>. All rights reserved.
#
import logging
import os.path as op
import sys
from random import random, sample
from itertools import groupby
from collections import Counter
import numpy as np
import matplotlib.pyplot as plt
from jcvi.apps.base import OptionParser, ActionDispatcher, mkdir
from jcvi.graphics.base import adjust_spines, markup, normalize_axes, savefig
from jcvi.utils.validator import validate_in_choices
SoColor = "#7436a4" # Purple
SsColor = "#5a8340" # Green
# Computed using prepare(), corrected with real sizes
ChrSizes = {
"SO-chr01": 148750011,
"SO-chr02": 119865146,
"SO-chr03": 103845728,
"SO-chr04": 104559946,
"SO-chr05": 93134056,
"SO-chr06": 74422021,
"SO-chr07": 81308893,
"SO-chr08": 71010813,
"SO-chr09": 86380266,
"SO-chr10": 73923121,
"SS-chr01": 114519418,
"SS-chr02": 119157314,
"SS-chr03": 85009228,
"SS-chr04": 79762909,
"SS-chr05": 90584537,
"SS-chr06": 95848354,
"SS-chr07": 83589369,
"SS-chr08": 64028871,
}
# Simulate genome composition
class Genome:
def __init__(self, name, prefix, ploidy, haploid_chromosome_count):
"""
Simulate a genome with given ploidy and haploid_chromosome_count. Example:
>>> print(Genome("t", "pf", 2, 3))
test: pf-chr01_a,pf-chr01_b,pf-chr02_a,pf-chr02_b,pf-chr03_a,pf-chr03_b
"""
self.name = name
chromosomes = []
for i in range(haploid_chromosome_count):
chromosomes += [
f"{prefix}-chr{i + 1:02d}_{chr(ord('a') + j)}" for j in range(ploidy)
]
self.chromosomes = chromosomes
def __len__(self):
return len(self.chromosomes)
@classmethod
def make(cls, name, chromosomes):
genome = Genome(name, "", 0, 0)
genome.chromosomes = chromosomes
return genome
@property
def gamete(self):
"""Randomly generate a gamete from current genome that"""
self.chromosomes.sort()
gamete_chromosomes = []
# Check for any chromosome that have 2 identical copies, if so, we will assume disomic
# inheritance for that chromosome and always keep one and only copy
duplicate_chromosomes = []
singleton_chromosomes = []
for chromosome, chromosomes in groupby(self.chromosomes):
chromosomes = list(chromosomes)
ncopies = len(chromosomes)
duplicate_chromosomes += [chromosome] * (ncopies // 2)
if ncopies % 2 == 1:
singleton_chromosomes.append(chromosome)
# Get one copy of each duplicate chromosome first
gamete_chromosomes += duplicate_chromosomes
def prefix(x):
return x.split("_", 1)[0]
# Randomly assign the rest, singleton chromosomes
for group, chromosomes in groupby(singleton_chromosomes, key=prefix):
chromosomes = list(chromosomes)
halfn = len(chromosomes) // 2
# Odd number, e.g. 5, equal chance to be 2 or 3
if len(chromosomes) % 2 != 0 and random() < 0.5:
halfn += 1
gamete_chromosomes += sorted(sample(chromosomes, halfn))
return Genome.make(self.name + " gamete", gamete_chromosomes)
def mate_nplusn(self, name, other_genome, verbose=True):
if verbose:
print(
f"Crossing '{self.name}' x '{other_genome.name}' (n+n)", file=sys.stderr
)
f1_chromosomes = sorted(
self.gamete.chromosomes + other_genome.gamete.chromosomes
)
return Genome.make(name, f1_chromosomes)
def mate_nx2plusn(self, name, other_genome, verbose=True):
if verbose:
print(
f"Crossing '{self.name}' x '{other_genome.name}' (2xn+n)",
file=sys.stderr,
)
f1_chromosomes = sorted(
2 * self.gamete.chromosomes + other_genome.gamete.chromosomes
)
return Genome.make(name, f1_chromosomes)
def mate_2nplusn(self, name, other_genome, verbose=True):
if verbose:
print(
f"Crossing '{self.name}' x '{other_genome.name}' (2n+n)",
file=sys.stderr,
)
f1_chromosomes = sorted(self.chromosomes + other_genome.gamete.chromosomes)
return Genome.make(name, f1_chromosomes)
def __str__(self):
return self.name + ": " + ",".join(self.chromosomes)
@property
def summary(self):
def prefix(x, sep="-"):
return x.split(sep, 1)[0]
def size(chromosomes):
return sum(ChrSizes[prefix(x, sep="_")] for x in chromosomes)
# Chromosome count
total_count = 0
total_unique = 0
total_size = 0
total_so_size = 0
ans = []
for group, chromosomes in groupby(self.chromosomes, prefix):
chromosomes = list(chromosomes)
uniq_chromosomes = set(chromosomes)
group_count = len(chromosomes)
group_unique = len(uniq_chromosomes)
group_so_size = size({x for x in uniq_chromosomes if x[:2] == "SO"})
group_size = size(uniq_chromosomes)
total_count += group_count
total_unique += group_unique
total_so_size += group_so_size
total_size += group_size
ans.append((group, group_count, group_unique, group_so_size, group_size))
ans.append(("Total", total_count, total_unique, total_so_size, total_size))
return ans
def print_summary(self):
print("[SUMMARY]")
for group, group_count, group_unique in self.summary:
print(f"{group}: count={group_count}, unique={group_unique}")
class GenomeSummary:
def __init__(self, SO_data, SS_data, percent_SO_data):
self.SO_data = SO_data
self.SS_data = SS_data
self.percent_SO_data = percent_SO_data
self.percent_SS_data = [100 - x for x in percent_SO_data]
def _summary(self, a, tag, precision=0):
mean, min, max = (
round(np.mean(a), precision),
round(np.min(a), precision),
round( | np.max(a) | numpy.max |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cv2
import numpy as np
def decode_image(im_file, im_info):
"""read rgb image
Args:
im_file (str|np.ndarray): input can be image path or np.ndarray
im_info (dict): info of image
Returns:
im (np.ndarray): processed image (np.ndarray)
im_info (dict): info of processed image
"""
if isinstance(im_file, str):
with open(im_file, 'rb') as f:
im_read = f.read()
data = np.frombuffer(im_read, dtype='uint8')
im = cv2.imdecode(data, 1) # BGR mode, but need RGB mode
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
else:
im = im_file
im_info['im_shape'] = np.array(im.shape[:2], dtype=np.float32)
im_info['scale_factor'] = np.array([1., 1.], dtype=np.float32)
return im, im_info
class Resize(object):
"""resize image by target_size and max_size
Args:
target_size (int): the target size of image
keep_ratio (bool): whether keep_ratio or not, default true
interp (int): method of resize
"""
def __init__(self, target_size, keep_ratio=True, interp=cv2.INTER_LINEAR):
if isinstance(target_size, int):
target_size = [target_size, target_size]
self.target_size = target_size
self.keep_ratio = keep_ratio
self.interp = interp
def __call__(self, im, im_info):
"""
Args:
im (np.ndarray): image (np.ndarray)
im_info (dict): info of image
Returns:
im (np.ndarray): processed image (np.ndarray)
im_info (dict): info of processed image
"""
assert len(self.target_size) == 2
assert self.target_size[0] > 0 and self.target_size[1] > 0
im_channel = im.shape[2]
im_scale_y, im_scale_x = self.generate_scale(im)
im = cv2.resize(
im,
None,
None,
fx=im_scale_x,
fy=im_scale_y,
interpolation=self.interp)
im_info['im_shape'] = np.array(im.shape[:2]).astype('float32')
im_info['scale_factor'] = np.array(
[im_scale_y, im_scale_x]).astype('float32')
return im, im_info
def generate_scale(self, im):
"""
Args:
im (np.ndarray): image (np.ndarray)
Returns:
im_scale_x: the resize ratio of X
im_scale_y: the resize ratio of Y
"""
origin_shape = im.shape[:2]
im_c = im.shape[2]
if self.keep_ratio:
im_size_min = np.min(origin_shape)
im_size_max = np.max(origin_shape)
target_size_min = np.min(self.target_size)
target_size_max = np.max(self.target_size)
im_scale = float(target_size_min) / float(im_size_min)
if np.round(im_scale * im_size_max) > target_size_max:
im_scale = float(target_size_max) / float(im_size_max)
im_scale_x = im_scale
im_scale_y = im_scale
else:
resize_h, resize_w = self.target_size
im_scale_y = resize_h / float(origin_shape[0])
im_scale_x = resize_w / float(origin_shape[1])
return im_scale_y, im_scale_x
class NormalizeImage(object):
"""normalize image
Args:
mean (list): im - mean
std (list): im / std
is_scale (bool): whether need im / 255
is_channel_first (bool): if True: image shape is CHW, else: HWC
"""
def __init__(self, mean, std, is_scale=True):
self.mean = mean
self.std = std
self.is_scale = is_scale
def __call__(self, im, im_info):
"""
Args:
im (np.ndarray): image (np.ndarray)
im_info (dict): info of image
Returns:
im (np.ndarray): processed image (np.ndarray)
im_info (dict): info of processed image
"""
im = im.astype(np.float32, copy=False)
mean = np.array(self.mean)[np.newaxis, np.newaxis, :]
std = np.array(self.std)[np.newaxis, np.newaxis, :]
if self.is_scale:
im = im / 255.0
im -= mean
im /= std
return im, im_info
class Permute(object):
"""permute image
Args:
to_bgr (bool): whether convert RGB to BGR
channel_first (bool): whether convert HWC to CHW
"""
def __init__(self, ):
super(Permute, self).__init__()
def __call__(self, im, im_info):
"""
Args:
im (np.ndarray): image (np.ndarray)
im_info (dict): info of image
Returns:
im (np.ndarray): processed image (np.ndarray)
im_info (dict): info of processed image
"""
im = im.transpose((2, 0, 1)).copy()
return im, im_info
class PadStride(object):
""" padding image for model with FPN, instead PadBatch(pad_to_stride) in original config
Args:
stride (bool): model with FPN need image shape % stride == 0
"""
def __init__(self, stride=0):
self.coarsest_stride = stride
def __call__(self, im, im_info):
"""
Args:
im (np.ndarray): image (np.ndarray)
im_info (dict): info of image
Returns:
im (np.ndarray): processed image (np.ndarray)
im_info (dict): info of processed image
"""
coarsest_stride = self.coarsest_stride
if coarsest_stride <= 0:
return im, im_info
im_c, im_h, im_w = im.shape
pad_h = int(np.ceil(float(im_h) / coarsest_stride) * coarsest_stride)
pad_w = int(np.ceil(float(im_w) / coarsest_stride) * coarsest_stride)
padding_im = np.zeros((im_c, pad_h, pad_w), dtype=np.float32)
padding_im[:, :im_h, :im_w] = im
return padding_im, im_info
class WarpAffine(object):
"""Warp affine the image
"""
def __init__(self,
keep_res=False,
pad=31,
input_h=512,
input_w=512,
scale=0.4,
shift=0.1):
self.keep_res = keep_res
self.pad = pad
self.input_h = input_h
self.input_w = input_w
self.scale = scale
self.shift = shift
def __call__(self, im, im_info):
"""
Args:
im (np.ndarray): image (np.ndarray)
im_info (dict): info of image
Returns:
im (np.ndarray): processed image (np.ndarray)
im_info (dict): info of processed image
"""
img = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
h, w = img.shape[:2]
if self.keep_res:
input_h = (h | self.pad) + 1
input_w = (w | self.pad) + 1
s = np.array([input_w, input_h], dtype=np.float32)
c = np.array([w // 2, h // 2], dtype=np.float32)
else:
s = max(h, w) * 1.0
input_h, input_w = self.input_h, self.input_w
c = np.array([w / 2., h / 2.], dtype=np.float32)
trans_input = get_affine_transform(c, s, 0, [input_w, input_h])
img = cv2.resize(img, (w, h))
inp = cv2.warpAffine(
img, trans_input, (input_w, input_h), flags=cv2.INTER_LINEAR)
return inp, im_info
class EvalAffine(object):
def __init__(self, size, stride=64):
super(EvalAffine, self).__init__()
self.size = size
self.stride = stride
def __call__(self, image, im_info):
s = self.size
h, w, _ = image.shape
trans, size_resized = get_affine_mat_kernel(h, w, s, inv=False)
image_resized = cv2.warpAffine(image, trans, size_resized)
return image_resized, im_info
def get_affine_mat_kernel(h, w, s, inv=False):
if w < h:
w_ = s
h_ = int(np.ceil((s / w * h) / 64.) * 64)
scale_w = w
scale_h = h_ / w_ * w
else:
h_ = s
w_ = int(np.ceil((s / h * w) / 64.) * 64)
scale_h = h
scale_w = w_ / h_ * h
center = np.array([np.round(w / 2.), np.round(h / 2.)])
size_resized = (w_, h_)
trans = get_affine_transform(
center, np.array([scale_w, scale_h]), 0, size_resized, inv=inv)
return trans, size_resized
def get_affine_transform(center,
input_size,
rot,
output_size,
shift=(0., 0.),
inv=False):
"""Get the affine transform matrix, given the center/scale/rot/output_size.
Args:
center (np.ndarray[2, ]): Center of the bounding box (x, y).
scale (np.ndarray[2, ]): Scale of the bounding box
wrt [width, height].
rot (float): Rotation angle (degree).
output_size (np.ndarray[2, ]): Size of the destination heatmaps.
shift (0-100%): Shift translation ratio wrt the width/height.
Default (0., 0.).
inv (bool): Option to inverse the affine transform direction.
(inv=False: src->dst or inv=True: dst->src)
Returns:
np.ndarray: The transform matrix.
"""
assert len(center) == 2
assert len(output_size) == 2
assert len(shift) == 2
if not isinstance(input_size, (np.ndarray, list)):
input_size = np.array([input_size, input_size], dtype=np.float32)
scale_tmp = input_size
shift = np.array(shift)
src_w = scale_tmp[0]
dst_w = output_size[0]
dst_h = output_size[1]
rot_rad = np.pi * rot / 180
src_dir = rotate_point([0., src_w * -0.5], rot_rad)
dst_dir = np.array([0., dst_w * -0.5])
src = np.zeros((3, 2), dtype=np.float32)
src[0, :] = center + scale_tmp * shift
src[1, :] = center + src_dir + scale_tmp * shift
src[2, :] = _get_3rd_point(src[0, :], src[1, :])
dst = np.zeros((3, 2), dtype=np.float32)
dst[0, :] = [dst_w * 0.5, dst_h * 0.5]
dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5]) + dst_dir
dst[2, :] = _get_3rd_point(dst[0, :], dst[1, :])
if inv:
trans = cv2.getAffineTransform(np.float32(dst), np.float32(src))
else:
trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))
return trans
def get_warp_matrix(theta, size_input, size_dst, size_target):
"""This code is based on
https://github.com/open-mmlab/mmpose/blob/master/mmpose/core/post_processing/post_transforms.py
Calculate the transformation matrix under the constraint of unbiased.
Paper ref: Huang et al. The Devil is in the Details: Delving into Unbiased
Data Processing for Human Pose Estimation (CVPR 2020).
Args:
theta (float): Rotation angle in degrees.
size_input (np.ndarray): Size of input image [w, h].
size_dst (np.ndarray): Size of output image [w, h].
size_target (np.ndarray): Size of ROI in input plane [w, h].
Returns:
matrix (np.ndarray): A matrix for transformation.
"""
theta = np.deg2rad(theta)
matrix = np.zeros((2, 3), dtype=np.float32)
scale_x = size_dst[0] / size_target[0]
scale_y = size_dst[1] / size_target[1]
matrix[0, 0] = np.cos(theta) * scale_x
matrix[0, 1] = -np.sin(theta) * scale_x
matrix[0, 2] = scale_x * (
-0.5 * size_input[0] * np.cos(theta) + 0.5 * size_input[1] *
np.sin(theta) + 0.5 * size_target[0])
matrix[1, 0] = np.sin(theta) * scale_y
matrix[1, 1] = np.cos(theta) * scale_y
matrix[1, 2] = scale_y * (
-0.5 * size_input[0] * np.sin(theta) - 0.5 * size_input[1] *
np.cos(theta) + 0.5 * size_target[1])
return matrix
def rotate_point(pt, angle_rad):
"""Rotate a point by an angle.
Args:
pt (list[float]): 2 dimensional point to be rotated
angle_rad (float): rotation angle by radian
Returns:
list[float]: Rotated point.
"""
assert len(pt) == 2
sn, cs = np.sin(angle_rad), np.cos(angle_rad)
new_x = pt[0] * cs - pt[1] * sn
new_y = pt[0] * sn + pt[1] * cs
rotated_pt = [new_x, new_y]
return rotated_pt
def _get_3rd_point(a, b):
"""To calculate the affine matrix, three pairs of points are required. This
function is used to get the 3rd point, given 2D points a & b.
The 3rd point is defined by rotating vector `a - b` by 90 degrees
anticlockwise, using b as the rotation center.
Args:
a (np.ndarray): point(x,y)
b (np.ndarray): point(x,y)
Returns:
np.ndarray: The 3rd point.
"""
assert len(a) == 2
assert len(b) == 2
direction = a - b
third_pt = b + np.array([-direction[1], direction[0]], dtype=np.float32)
return third_pt
class TopDownEvalAffine(object):
"""apply affine transform to image and coords
Args:
trainsize (list): [w, h], the standard size used to train
use_udp (bool): whether to use Unbiased Data Processing.
records(dict): the dict contained the image and coords
Returns:
records (dict): contain the image and coords after tranformed
"""
def __init__(self, trainsize, use_udp=False):
self.trainsize = trainsize
self.use_udp = use_udp
def __call__(self, image, im_info):
rot = 0
imshape = im_info['im_shape'][::-1]
center = im_info['center'] if 'center' in im_info else imshape / 2.
scale = im_info['scale'] if 'scale' in im_info else imshape
if self.use_udp:
trans = get_warp_matrix(
rot, center * 2.0,
[self.trainsize[0] - 1.0, self.trainsize[1] - 1.0], scale)
image = cv2.warpAffine(
image,
trans, (int(self.trainsize[0]), int(self.trainsize[1])),
flags=cv2.INTER_LINEAR)
else:
trans = get_affine_transform(center, scale, rot, self.trainsize)
image = cv2.warpAffine(
image,
trans, (int(self.trainsize[0]), int(self.trainsize[1])),
flags=cv2.INTER_LINEAR)
return image, im_info
def expand_crop(images, rect, expand_ratio=0.3):
imgh, imgw, c = images.shape
label, conf, xmin, ymin, xmax, ymax = [int(x) for x in rect.tolist()]
if label != 0:
return None, None, None
org_rect = [xmin, ymin, xmax, ymax]
h_half = (ymax - ymin) * (1 + expand_ratio) / 2.
w_half = (xmax - xmin) * (1 + expand_ratio) / 2.
if h_half > w_half * 4 / 3:
w_half = h_half * 0.75
center = [(ymin + ymax) / 2., (xmin + xmax) / 2.]
ymin = max(0, int(center[0] - h_half))
ymax = min(imgh - 1, int(center[0] + h_half))
xmin = max(0, int(center[1] - w_half))
xmax = min(imgw - 1, int(center[1] + w_half))
return images[ymin:ymax, xmin:xmax, :], [xmin, ymin, xmax, ymax], org_rect
class EvalAffine(object):
def __init__(self, size, stride=64):
super(EvalAffine, self).__init__()
self.size = size
self.stride = stride
def __call__(self, image, im_info):
s = self.size
h, w, _ = image.shape
trans, size_resized = get_affine_mat_kernel(h, w, s, inv=False)
image_resized = cv2.warpAffine(image, trans, size_resized)
return image_resized, im_info
def get_affine_mat_kernel(h, w, s, inv=False):
if w < h:
w_ = s
h_ = int(np.ceil((s / w * h) / 64.) * 64)
scale_w = w
scale_h = h_ / w_ * w
else:
h_ = s
w_ = int(np.ceil((s / h * w) / 64.) * 64)
scale_h = h
scale_w = w_ / h_ * h
center = np.array([np.round(w / 2.), np.round(h / 2.)])
size_resized = (w_, h_)
trans = get_affine_transform(
center, np.array([scale_w, scale_h]), 0, size_resized, inv=inv)
return trans, size_resized
def get_affine_transform(center,
input_size,
rot,
output_size,
shift=(0., 0.),
inv=False):
"""Get the affine transform matrix, given the center/scale/rot/output_size.
Args:
center (np.ndarray[2, ]): Center of the bounding box (x, y).
scale (np.ndarray[2, ]): Scale of the bounding box
wrt [width, height].
rot (float): Rotation angle (degree).
output_size (np.ndarray[2, ]): Size of the destination heatmaps.
shift (0-100%): Shift translation ratio wrt the width/height.
Default (0., 0.).
inv (bool): Option to inverse the affine transform direction.
(inv=False: src->dst or inv=True: dst->src)
Returns:
np.ndarray: The transform matrix.
"""
assert len(center) == 2
assert len(output_size) == 2
assert len(shift) == 2
if not isinstance(input_size, (np.ndarray, list)):
input_size = np.array([input_size, input_size], dtype=np.float32)
scale_tmp = input_size
shift = np.array(shift)
src_w = scale_tmp[0]
dst_w = output_size[0]
dst_h = output_size[1]
rot_rad = np.pi * rot / 180
src_dir = rotate_point([0., src_w * -0.5], rot_rad)
dst_dir = | np.array([0., dst_w * -0.5]) | numpy.array |
import os
import numpy as np
import seaborn as sns
import json
from scipy import stats
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score
batches = [1,2,4,8,16,32,64]
model_names = [
"t5-small-lm-adapt",
"t5-base-lm-adapt",
"t5-large-lm-adapt",
"t5-xl-lm-adapt",
]
# batches = [1,2,4,8,16]
model_name = "t5-xl-lm-adapt"
model_keys = [
"S",
"M",
"L",
"XL"
]
id = "XL"
true_latency = []
pred_latency = []
def reject_outliers(data, m = 2.):
data = np.array(data)
d = np.abs(data - | np.median(data) | numpy.median |
import sys
import pyzed.sl as sl
import numpy as np
import tifffile
import scipy.ndimage
import matplotlib.pyplot as plt
import os.path
import os
from tqdm import tqdm
import skimage.measure
from PIL import Image
from PIL import ImageTk
import yaml
import threading
if sys.version_info[0] == 2: # the tkinter library changed it's name from Python 2 to 3.
import Tkinter as tk
else:
import tkinter as tk
############################################################################################################################################
############################################### Function used for 3D-2D matrix estimation ##################################################
############################################################################################################################################
## Parameter for the rotationmatrix function
rotationAngleDegThreshold = 0.00001
def rotationMatrix(r):
"""
Simple 3D Matrix rotation function, obtained from following sources:
https://en.wikipedia.org/wiki/Rodrigues'_rotation_formula
Args:
-r: a rotation vector, with rotation value in x, y and z direction.
"""
# its length is the rotation angle
rotationAngleDeg = np.linalg.norm(r)
if rotationAngleDeg > rotationAngleDegThreshold:
# its direction is the rotation axis.
rotationAxis = r / rotationAngleDeg
# positive angle is clockwise
K = np.array([[ 0, -rotationAxis[2], rotationAxis[1]],
[ rotationAxis[2], 0, -rotationAxis[0]],
[-rotationAxis[1], rotationAxis[0], 0 ]])
# Note the np.dot is very important.
R = np.eye(3) + (np.sin(np.deg2rad(rotationAngleDeg)) * K) + \
((1.0 - np.cos(np.deg2rad(rotationAngleDeg))) * np.dot(K, K))
tmp = np.eye(4)
tmp[0:3, 0:3] = R
else:
R = np.eye(3)
return R
def optimise_me(x,calib_points_XYZ,proj_xy):
"""
This is the function we want to optimize. It corresponds to the following matrix equation:
s*[x,y,1] = K.Rt(r0,r1,r2,dX,dY,dZ).[X,Y,Z,1]
with:
[ f*m_x gamma u_0 0 ]
K = [ 0 f*m_y v_0 0 ]
[ 0 0 1 0 ]
Args:
- x: The initial guess of the parameters
- calib_points_XYZ: The 3D coordinates of the points measured during calibration, in a numpy array (n,3), with n the number of calibration points.
- proj_xy: The 2D coordinates, obtained during the calibration grid generation, in a numpy array (n,2)
- NUMBER_OF_CALIBRATION_PTS: the number of Calibration
"""
## for printing purposes during optimisation process
global j
j += 1
NUMBER_OF_CALIBRATION_PTS = calib_points_XYZ.shape[0]
## Initialisation
s, f, u0, v0, dX, dY, dZ, m_x, m_y, gamma, r0, r1, r2 = x
# Rotation matrix
R = rotationMatrix(np.array([r0, r1, r2]))
# Rotation and translation
Rt = np.zeros((4, 4))
Rt[0:3, 0:3] = R
Rt[:, -1] = np.array([dX, dY, dZ, 1])
# K matrix
K = np.array([[f*m_x, gamma, u0, 0], [0, f*m_y, v0, 0], [0, 0, 1, 0]])
totalError = 0
for i in range(NUMBER_OF_CALIBRATION_PTS):
# Right Hand Side, See equation above
XYZ1 = np.array([calib_points_XYZ[i,0], calib_points_XYZ[i,1], calib_points_XYZ[i,2], 1]).T
RHS = np.dot(np.dot(K, Rt), XYZ1)/s
totalError += np.square(RHS[0:2] - proj_xy[i]).sum()
if j%1000 == 0: print(f"Error: {np.sqrt(totalError)}")
return np.sqrt(totalError)
def calculate_3D_2D_matrix(PROJECTOR_PIXEL_PTS_PATH,CALIB_PTS_XYZ):
"""
This function is doing the optimization of the optimize_me function.
It saves the different parameters necessary for the 3D_2D transformation operation, in order to display 3D point cloud with the projector.
Args:
- PROJECTOR_PIXEL_PTS: Path to the 2D pixel coordinates obtained in the calibration grid generation.
- CALIB_PTS_XYZ: Path to the 3D coordinates measured during calibration.
"""
### Load projector positions in px
proj_xy = np.load(PROJECTOR_PIXEL_PTS_PATH)
calib_points_XYZ = np.load(CALIB_PTS_XYZ)
# Initialisation
NUMBER_OF_CALIBRATION_PTS = calib_points_XYZ.shape[0]
s = 0.04
f = 3.2
u0 = -0.04
v0 = -0.02
dX = 2.2
dY = 3.0
dZ = 1.8
m_x = 2.2
m_y = 1.5
gamma = 2.5
r0 = 0.0
r1 = 0.0
r2 = 0.0
x0 = [s, f, u0, v0, dX, dY, dZ, m_x, m_y, gamma, r0, r1, r2]
# Optimisation
global j
j=0
output = scipy.optimize.minimize(optimise_me,
x0,
args=(calib_points_XYZ,proj_xy),
method='Powell',
options={'disp': True,
'gtol': 0.000000000000001,
'ftol': 0.000000000000001,
'maxiter': 1000000,
'maxcor':10000,
'eps':0.00000000005,
'maxfun':10000000,
'maxls':50000})
# Results
s, f, u0, v0, dX, dY, dZ, m_x, m_y, gamma, r0, r1, r2 = output["x"]
print(f"s : {s }")
print(f"f : {f }")
print(f"u0 : {u0 }")
print(f"v0 : {v0 }")
print(f"dX : {dX }")
print(f"dY : {dY }")
print(f"dZ : {dZ }")
print(f"m_x : {m_x }")
print(f"m_y : {m_y }")
print(f"gamma: {gamma}")
print(f"r0 : {r0 }")
print(f"r1 : {r1 }")
print(f"r2 : {r2 }")
pause()
### Show residuals in mm of computed optimum
print("\n\nFinal Quality check!!\n\n")
Rt = np.zeros((4, 4))
R = rotationMatrix(np.array([r0, r1, r2]))
Rt[0:3, 0:3] = R
Rt[:, -1] = np.array([dX, dY, dZ, 1])
K = np.array([[f*m_x, gamma, u0, 0], [0, f*m_y, v0, 0], [0, 0, 1, 0]])
for i in range(NUMBER_OF_CALIBRATION_PTS):
RHS = np.dot(np.dot(K, Rt), np.array([calib_points_XYZ[i,0], calib_points_XYZ[i,1], calib_points_XYZ[i,2], 1]).T)/s
print(f"Input pixels: {proj_xy[i]}, output match: {RHS[0:2]}")
# yaml file saving as a dictionary
K_dict = {"s": float(s) , "f": float(f) , "u0":float(u0) , "v0":float(v0) , "dX":float(dX) , "dY":float(dY) , "dZ":float(dZ) , "m_x":float(m_x) , "m_y":float(m_y) , "gamma":float(gamma) , "r0":float(r0) , "r1":float(r1) , "r2":float(r2) }
return K_dict
def get_3D_2D_matrix(YAML_PATH):
"""
This function opens the Calibration Yaml File, reads the information about the 3D_2D_matrix and return it as a numpy array
Args:
- YAML_PATH: Path of the yaml calibration file, containing the 3D_2D_Matrix information.
"""
# Opening YAML file
with open(YAML_PATH) as yaml_file:
data = yaml.load(yaml_file,Loader=yaml.FullLoader)
Matrix = data["3D_2D_Matrix"]
s, f, u0, v0, dX, dY, dZ, m_x, m_y, gamma, r0, r1, r2 = Matrix["s"],Matrix["f"],Matrix["u0"],Matrix["v0"],Matrix["dX"],Matrix["dY"],Matrix["dZ"],Matrix["m_x"],Matrix["m_y"],Matrix["gamma"],Matrix["r0"], Matrix["r1"],Matrix["r2"]
Rt = np.zeros((4, 4))
R = rotationMatrix(np.array([r0, r1, r2]))
Rt[0:3, 0:3] = R
Rt[:, -1] = np.array([dX, dY, dZ, 1])
K = np.array([[f*m_x, gamma, u0, 0], [0, f*m_y, v0, 0], [0, 0, 1, 0]])
From_3D_2D_matrix = np.dot(K,Rt)/s
return From_3D_2D_matrix
############################################################################################################################################
############################################### Function for the Gen and Display of the calibration grid ###################################
############################################################################################################################################
def display_calibration(CALIB_IMG_PATH):
"""
This function is displaying an image in full size on your monitor, using Tkinter.
To escape the full screen just press the escape key of the keyboard.
Args:
- CALIB_IMG_PATH: the path of the image displayed, here it is used for the calibration image.
"""
class App(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.alive = True
self.start()
def callback(self):
self.tk.quit()
def toggle_fullscreen(self, event=None):
self.state = not self.state # Just toggling the boolean
self.tk.attributes("-fullscreen", self.state)
return "break"
def end_fullscreen(self, event=None):
self.state = False
self.tk.attributes("-fullscreen", False)
return "break"
def close(self,event=None):
self.alive = not self.alive
if not self.alive :
self.lmain.configure(compound="center",text="Close Window ?\n Press <Enter> to Confirm.\n Press <q> to exit.",font=("Courier", 44),fg="white",bg="black")
else:
self.lmain.configure(text="")
return "break"
def down(self,event=None):
if not self.alive:
root = self.tk
root.quit()
return "break"
def error_down(self):
root = self.tk
root.quit()
def run(self):
self.tk = tk.Tk()
self.tk.attributes('-zoomed', True) # This just maximizes it so we can see the window. It's nothing to do with fullscreen.
image = Image.open(CALIB_IMG_PATH)
self.image = ImageTk.PhotoImage(image=image)
lmain = tk.Label(self.tk,image=self.image)
lmain.pack()
self.lmain = lmain
self.state = False
self.tk.bind("<F11>", self.toggle_fullscreen)
self.tk.bind("<Escape>", self.end_fullscreen)
self.tk.bind("<q>", self.close)
self.tk.bind("<Return>",self.down)
self.tk.protocol("WM_DELETE_WINDOW", self.callback)
self.tk.mainloop()
app = App()
return app
def draw_grid(save_path_img,save_path_2D_pts,nb_lines_X=3,nb_lines_Y=3,line_width=4):
"""
This function is generating the grid image and the pixel coordinates of the points.
Args:
save_path: path, where the files are saved.
nb_lines_X: number of lines drawn in X direction, (corresponding to the number of points in X direction)
nb_lines_Y: number of lines drawn in Y direction, (corresponding to the number of points in Y direction)
line_width: the width in pixel of the lines which are drawn.
Returns:
An RGB image of the grid used for calibration.
A numpy file containing the coordinates of the (nb_lines_X * nb_lines_Y) points in pixel.
"""
X =[]
Y =[]
# Initialize black image
shape=(1080,1920,3)
Img = np.zeros(shape,dtype=np.uint8)
# Calculate space between lines
X_space = (shape[1] - nb_lines_X*line_width)//(nb_lines_X+1)
Y_space = (shape[0] - nb_lines_Y*line_width)//(nb_lines_Y+1)
#Pts coordinate saving
Pts=np.zeros((nb_lines_Y*nb_lines_X,2))
# Draw the lines
for i in range(1,nb_lines_Y+1):
Img[i*Y_space-line_width//2:i*Y_space+line_width//2,:,1]=255
for j in range (1,nb_lines_X+1):
Pts[(i-1)*(nb_lines_X)+(j-1),0]=j*X_space+line_width//2
Pts[(i-1)*(nb_lines_X)+(j-1),1]=i*Y_space+line_width//2
for i in range(1,nb_lines_X+1):
Img[:,i*X_space-line_width//2:i*X_space+line_width//2,1]=255
np.save(save_path_2D_pts,Pts)
plt.imsave(save_path_img,Img)
print(f"A Calibration image of size: {nb_lines_X}x{nb_lines_Y} was generated.\nIt is saved in: {save_path_img}")
def get_image(zed, point_cloud, medianFrames=1, components=[2]):
"""
This function is giving an average value of the components, X, Y or Z
obtained by a certain number of sequentialy acquired frames.
This helps to stabilize the coordinates acquired, in case of flickering for instance.
Args:
zed: initialized and opened zed camera
point_cloud: initialized point cloud of the zed Camera
medianFrames: Number of sequentialy acquired Frames for the average value generation
components: List of values 0,1 or 2 for respectively X,Y and Z coordinates.
Returns:
The median value of the coordinates acquired.
"""
stack_of_images = []
for n in tqdm(range(medianFrames)):
if zed.grab() == sl.ERROR_CODE.SUCCESS:
zed.retrieve_measure(point_cloud, sl.MEASURE.XYZRGBA,sl.MEM.CPU, zed.get_camera_information().camera_resolution)
point_cloud_np = point_cloud.get_data()
stack_of_images.append(point_cloud_np)
else:
print(":(")
return None
print("\nThe Scene can now be enterd.\nProcessing images ...")
stack_of_images = np.array(stack_of_images)
stack_of_images[not np.isfinite] = np.nan
median = np.nanmedian(stack_of_images, axis=0)
return median[:,:,components]
def pause():
programPause = input("Press the <ENTER> key to continue...")
def get_Disk_Position(imageZoffset, newImageXYZ,ROI,CALIB_Z_THRESHOLD_M,RADIUS_TOLERANCE,RADIUS_PERI_THRESHOLD_PX):
"""
This function is giving us the coordinates of the center of a circular object, a CD for instance.
By acquiring the coordinates of a certain number of points located on a plane,
we will be able to calibrate the system.
Args:
imageZoffset: The offset of the Z coordinates (Zcoordinates - Background)
newImageXYZ: The X,Y and Z coordinates of the image
ROI: The region of Interest
CALIB_Z_THRESHOLD_M: The Z threshold corresponding to the Z offset of the object we try to detect
RADIUS_PERI_THRESHOLD_PX: The threshold to detect a round object of a given radius.
Returns:
The X,Y,Z coordinates of the center of the CD.
And the Pixel value of the center.
"""
# Segmentation of objects wich appeared into the scene with a Z-difference of at least : CALIB_Z_THERSHOLD_M
binaryCalib = imageZoffset[ROI] > CALIB_Z_THRESHOLD_M
objects = scipy.ndimage.label(binaryCalib)[0]
# Acquisition of properties
properties = skimage.measure.regionprops(objects)
# Circularity Test
circlesBool = []
print("Starting Circularity Test ...")
for label in range(objects.max()):
# Perimeter and Area acquisition
peri = properties[label].perimeter
area = properties[label].area
# Calculation of the radius
rPeri = peri/2/np.pi
rArea = (area/np.pi)**0.5
# Circularity test
isCircle = | np.isclose(rPeri, rArea, atol=rArea*RADIUS_TOLERANCE) | numpy.isclose |
import numpy as np
mu = np.array([[2, 3], [2, 3], [2, 3], [2, 3], [2, 3]])
print(np.concatenate([ | np.array([2, 2, 2]) | numpy.array |
""" Estimating stability of macro model by root finding.
Here, we want to find a root of jw + Fe(w)*(I-alpha*C)/tauC
This becomes a multiplication of jw + Fe(w)*lambda_i(w)/tauC, multiplication over i where lambda_i are eigenvalues
Thus, we need to find roots of jw + lambda_i(w)/(tauC*(tau_e*jw+1)**2)
This gives tauC*jw*(-w**2*tau_e**2 + 2*tau_e*jw + 1). We need this to be zero.
Separating out real and imaginary parts of this expression gives:
Re: -2*tau_e*tauC*w**2 + re(lambda_i(w)) = 0
Im: -tau_e**2*tauC*w**3 + tauC*w + im(lambda_i(w)) = 0
Since w should be real, this also implies that we will only consider lambda_i(w)
with positive real parts (based on the equation for real part of the expression)
"""
import numpy as np
def network_transfer(parameters, brain, orgparameters):
"""Estimating stability of macro model when alpha > 1 using root finding
Args:
parameters (dict): w, tauC
brain (Brain): specific brain to calculate NTF
orgparameters (dict): original parameters of NTF that won't be changing
Returns:
Condensed version of characteristic equations for root finding
"""
C = brain.reducedConnectome
D = brain.distance_matrix
w = parameters[0]
tauC = parameters[1]
tau_e = orgparameters["tau_e"]
speed = orgparameters["speed"]
alpha = orgparameters["alpha"]
# define sum of degrees for rows and columns for laplacian normalization
rowdegree = np.transpose(np.sum(C, axis=1))
coldegree = np.sum(C, axis=0)
qind = rowdegree + coldegree < 0.2 * np.mean(rowdegree + coldegree)
rowdegree[qind] = np.inf
coldegree[qind] = np.inf
nroi = C.shape[0]
Tau = 0.001 * D / speed
Cc = C * np.exp(-1j * Tau * w)
# Eigen Decomposition of Complex Laplacian Here
L1 = np.identity(nroi)
L2 = np.divide(1, np.sqrt(np.multiply(rowdegree, coldegree)) + | np.spacing(1) | numpy.spacing |
import warnings
import numpy as np
import astropy.units as u
from astropy.units import UnitsWarning
from astropy.io import fits
from astropy.utils.exceptions import AstropyUserWarning
from specutils import Spectrum1D
__all__ = [
'get_spectrum'
]
phoenix_base_url = (
'ftp://phoenix.astro.physik.uni-goettingen.de/'
'v2.0/HiResFITS/PHOENIX-ACES-AGSS-COND-2011/'
)
phoenix_wavelength_url = (
'ftp://phoenix.astro.physik.uni-goettingen.de/'
'v2.0/HiResFITS/WAVE_PHOENIX-ACES-AGSS-COND-2011.fits'
)
phoenix_model_temps = np.concatenate([
np.arange(2300, 7100, 100), np.arange(7000, 12200, 200)
])
phoenix_model_logg = np.arange(0, 6.5, 0.5)
phoenix_model_alpha = np.arange(-0.2, 1.4, 0.2)
phoenix_model_z = np.concatenate([
np.arange(-4, -1, 1), np.arange(-2, 1.5, 0.5)
])
class OutsideGridWarning(AstropyUserWarning):
"""
Warning for when there is no good match in the PHOENIX grid
"""
pass
def validate_grid_point(T_eff, log_g, Z, alpha, closest_params):
temp_out_of_range = (
T_eff > phoenix_model_temps.max() or T_eff < phoenix_model_temps.min()
)
logg_out_of_range = (
log_g > phoenix_model_logg.max() or log_g < phoenix_model_logg.min()
)
z_out_of_range = (
Z > phoenix_model_z.max() or Z < phoenix_model_z.min()
)
alpha_out_of_range = (
alpha > phoenix_model_alpha.max() or alpha < phoenix_model_alpha.min()
)
out_of_range = [
temp_out_of_range, logg_out_of_range, z_out_of_range, alpha_out_of_range
]
if np.any(out_of_range):
warn_message = (
f"{np.count_nonzero(out_of_range):d} supplied parameters out of the"
f" boundaries of the PHOENIX model grid. Closest grid point has "
f"parameters: {closest_params}"
)
warnings.warn(warn_message, OutsideGridWarning)
def get_url(T_eff, log_g, Z=0, alpha=0):
"""
Construct an FTP address from a temperature, log g, metallicity, alpha.
"""
closest_temp_index = np.argmin(np.abs(phoenix_model_temps - T_eff))
closest_grid_temperature = phoenix_model_temps[closest_temp_index]
closest_logg_index = np.argmin( | np.abs(phoenix_model_logg - log_g) | numpy.abs |
#!/usr/bin/env python3
import numpy as np
import re
from pkg_resources import resource_filename
from ..num.num_input import Num_input
from directdm.run import rge
#-----------------------#
# Conventions and Basis #
#-----------------------#
# The basis of operators in the DM-SM sector below the weak scale (5-flavor EFT) is given by
# dim.5 (2 operators)
#
# 'C51', 'C52',
# dim.6 (32 operators)
#
# 'C61u', 'C61d', 'C61s', 'C61c', 'C61b', 'C61e', 'C61mu', 'C61tau',
# 'C62u', 'C62d', 'C62s', 'C62c', 'C62b', 'C62e', 'C62mu', 'C62tau',
# 'C63u', 'C63d', 'C63s', 'C63c', 'C63b', 'C63e', 'C63mu', 'C63tau',
# 'C64u', 'C64d', 'C64s', 'C64c', 'C64b', 'C64e', 'C64mu', 'C64tau',
# dim.7 (129 operators)
#
# 'C71', 'C72', 'C73', 'C74',
# 'C75u', 'C75d', 'C75s', 'C75c', 'C75b', 'C75e', 'C75mu', 'C75tau',
# 'C76u', 'C76d', 'C76s', 'C76c', 'C76b', 'C76e', 'C76mu', 'C76tau',
# 'C77u', 'C77d', 'C77s', 'C77c', 'C77b', 'C77e', 'C77mu', 'C77tau',
# 'C78u', 'C78d', 'C78s', 'C78c', 'C78b', 'C78e', 'C78mu', 'C78tau',
# 'C79u', 'C79d', 'C79s', 'C79c', 'C79b', 'C79e', 'C79mu', 'C79tau',
# 'C710u', 'C710d', 'C710s', 'C710c', 'C710b', 'C710e', 'C710mu', 'C710tau',
# 'C711', 'C712', 'C713', 'C714',
# 'C715u', 'C715d', 'C715s', 'C715c', 'C715b', 'C715e', 'C715mu', 'C715tau',
# 'C716u', 'C716d', 'C716s', 'C716c', 'C716b', 'C716e', 'C716mu', 'C716tau',
# 'C717u', 'C717d', 'C717s', 'C717c', 'C717b', 'C717e', 'C717mu', 'C717tau',
# 'C718u', 'C718d', 'C718s', 'C718c', 'C718b', 'C718e', 'C718mu', 'C718tau',
# 'C719u', 'C719d', 'C719s', 'C719c', 'C719b', 'C719e', 'C719mu', 'C719tau',
# 'C720u', 'C720d', 'C720s', 'C720c', 'C720b', 'C720e', 'C720mu', 'C720tau',
# 'C721u', 'C721d', 'C721s', 'C721c', 'C721b', 'C721e', 'C721mu', 'C721tau',
# 'C722u', 'C722d', 'C722s', 'C722c', 'C722b', 'C722e', 'C722mu', 'C722tau',
# 'C723u', 'C723d', 'C723s', 'C723c', 'C723b', 'C723e', 'C723mu', 'C723tau',
# 'C725',
# dim.8 (12 operators)
#
# 'C81u', 'C81d', 'C81s', 'C82u', 'C82d', 'C82s'
# 'C83u', 'C83d', 'C83s', 'C84u', 'C84d', 'C84s'
# In total, we have 2+32+129+12=175 operators.
# In total, we have 2+32+129=163 operators w/o dim.8.
#-----------------------------#
# The QED anomalous dimension #
#-----------------------------#
def ADM_QED(nf):
""" Return the QED anomalous dimension in the DM-SM sector for nf flavor EFT """
Qu = 2/3
Qd = -1/3
Qe = -1
nc = 3
gamma_QED = np.array([[8/3*Qu*Qu*nc, 8/3*Qu*Qd*nc, 8/3*Qu*Qd*nc, 8/3*Qu*Qu*nc,\
8/3*Qu*Qd*nc, 8/3*Qu*Qe*nc, 8/3*Qu*Qe*nc, 8/3*Qu*Qe*nc],
[8/3*Qd*Qu*nc, 8/3*Qd*Qd*nc, 8/3*Qd*Qd*nc, 8/3*Qd*Qu*nc,\
8/3*Qd*Qd*nc, 8/3*Qd*Qe*nc, 8/3*Qd*Qe*nc, 8/3*Qd*Qe*nc],
[8/3*Qd*Qu*nc, 8/3*Qd*Qd*nc, 8/3*Qd*Qd*nc, 8/3*Qd*Qu*nc,\
8/3*Qd*Qd*nc, 8/3*Qd*Qe*nc, 8/3*Qd*Qe*nc, 8/3*Qd*Qe*nc],
[8/3*Qu*Qu*nc, 8/3*Qu*Qd*nc, 8/3*Qu*Qd*nc, 8/3*Qu*Qu*nc,\
8/3*Qu*Qd*nc, 8/3*Qu*Qe*nc, 8/3*Qu*Qe*nc, 8/3*Qu*Qe*nc],
[8/3*Qd*Qu*nc, 8/3*Qd*Qd*nc, 8/3*Qd*Qd*nc, 8/3*Qd*Qu*nc,\
8/3*Qd*Qd*nc, 8/3*Qd*Qe*nc, 8/3*Qd*Qe*nc, 8/3*Qd*Qe*nc],
[8/3*Qe*Qu, 8/3*Qe*Qd, 8/3*Qe*Qd, 8/3*Qe*Qu,\
8/3*Qe*Qd, 8/3*Qe*Qe, 8/3*Qe*Qe, 8/3*Qe*Qe],
[8/3*Qe*Qu, 8/3*Qe*Qd, 8/3*Qe*Qd, 8/3*Qe*Qu,\
8/3*Qe*Qd, 8/3*Qe*Qe, 8/3*Qe*Qe, 8/3*Qe*Qe],
[8/3*Qe*Qu, 8/3*Qe*Qd, 8/3*Qe*Qd, 8/3*Qe*Qu,\
8/3*Qe*Qd, 8/3*Qe*Qe, 8/3*Qe*Qe, 8/3*Qe*Qe]])
gamma_QED_1 = np.zeros((2,163))
gamma_QED_2 = np.hstack((np.zeros((8,2)),gamma_QED,np.zeros((8,153))))
gamma_QED_3 = np.hstack((np.zeros((8,10)),gamma_QED,np.zeros((8,145))))
gamma_QED_4 = np.zeros((145,163))
gamma_QED = np.vstack((gamma_QED_1, gamma_QED_2, gamma_QED_3, gamma_QED_4))
if nf == 5:
return gamma_QED
elif nf == 4:
return np.delete(np.delete(gamma_QED, [6, 14, 22, 30, 42, 50, 58, 66, 74, 82, 94,\
102, 110, 118, 126, 134, 142, 150, 158], 0)\
, [6, 14, 22, 30, 42, 50, 58, 66, 74, 82, 94,\
102, 110, 118, 126, 134, 142, 150, 158], 1)
elif nf == 3:
return np.delete(np.delete(gamma_QED, [5,6, 13,14, 21,22, 29,30, 41,42,\
49,50, 57,58, 65,66, 73,74, 81,82,\
93,94, 101,102, 109,110, 117,118,\
125,126, 133,134, 141,142, 149,150, 158,159], 0)\
, [5,6, 13,14, 21,22, 29,30, 41,42,\
49,50, 57,58, 65,66, 73,74, 81,82,\
93,94, 101,102, 109,110, 117,118,\
125,126, 133,134, 141,142, 149,150, 158,159], 1)
else:
raise Exception("nf has to be 3, 4 or 5")
def ADM_QED2(nf):
""" Return the QED anomalous dimension in the DM-SM sector for nf flavor EFT at alpha^2 """
# Mixing of Q_{11}^(7) into Q_{5,f}^(7) and Q_{12}^(7) into Q_{6,f}^(7), adapted from Hill et al. [1409.8290].
gamma_gf = -8
gamma_QED2_gf = np.array([5*[gamma_gf]])
gamma_QED2_1 = np.zeros((86,163))
gamma_QED2_2 = np.hstack((np.zeros((1,38)),gamma_QED2_gf,np.zeros((1,120))))
gamma_QED2_3 = np.hstack((np.zeros((1,46)),gamma_QED2_gf,np.zeros((1,112))))
gamma_QED2_4 = np.zeros((75,163))
gamma_QED2 = np.vstack((gamma_QED2_1, gamma_QED2_2, gamma_QED2_3, gamma_QED2_4))
if nf == 5:
return gamma_QED2
elif nf == 4:
return np.delete(np.delete(gamma_QED2, [6, 14, 22, 30, 42, 50, 58, 66, 74, 82, 94,\
102, 110, 118, 126, 134, 142, 150, 158], 0)\
, [6, 14, 22, 30, 42, 50, 58, 66, 74, 82, 94,\
102, 110, 118, 126, 134, 142, 150, 158], 1)
elif nf == 3:
return np.delete(np.delete(gamma_QED2, [5,6, 13,14, 21,22, 29,30, 41,42,\
49,50, 57,58, 65,66, 73,74, 81,82,\
93,94, 101,102, 109,110, 117,118,\
125,126, 133,134, 141,142, 149,150, 158,159], 0)\
, [5,6, 13,14, 21,22, 29,30, 41,42,\
49,50, 57,58, 65,66, 73,74, 81,82,\
93,94, 101,102, 109,110, 117,118,\
125,126, 133,134, 141,142, 149,150, 158,159], 1)
else:
raise Exception("nf has to be 3, 4 or 5")
#------------------------------#
# The QCD anomalous dimensions #
#------------------------------#
def ADM_QCD(nf):
""" Return the QCD anomalous dimension in the DM-SM sector for nf flavor EFT, when ADM starts at O(alphas) """
gamma_QCD_T = 32/3 * np.eye(5)
gt2qq = 64/9
gt2qg = -4/3
gt2gq = -64/9
gt2gg = 4/3*nf
gamma_twist2 = np.array([[gt2qq, 0, 0, 0, 0, 0, 0, 0, gt2qg],
[0, gt2qq, 0, 0, 0, 0, 0, 0, gt2qg],
[0, 0, gt2qq, 0, 0, 0, 0, 0, gt2qg],
[0, 0, 0, gt2qq, 0, 0, 0, 0, gt2qg],
[0, 0, 0, 0, gt2qq, 0, 0, 0, gt2qg],
[0, 0, 0, 0, 0, 0, 0, 0, 0 ],
[0, 0, 0, 0, 0, 0, 0, 0, 0 ],
[0, 0, 0, 0, 0, 0, 0, 0, 0 ],
[gt2gq, gt2gq, gt2gq, gt2gq, gt2gq, 0, 0, 0, gt2gg]])
gamma_QCD_1 = np.zeros((70,163))
gamma_QCD_2 = np.hstack((np.zeros((5,70)), gamma_QCD_T, np.zeros((5,88))))
gamma_QCD_3 = np.zeros((3,163))
gamma_QCD_4 = np.hstack((np.zeros((5,78)), gamma_QCD_T, np.zeros((5,80))))
gamma_QCD_5 = np.zeros((71,163))
gamma_QCD_6 = np.hstack((np.zeros((9,154)), gamma_twist2))
gamma_QCD = [np.vstack((gamma_QCD_1, gamma_QCD_2, gamma_QCD_3,\
gamma_QCD_4, gamma_QCD_5, gamma_QCD_6))]
if nf == 5:
return gamma_QCD
elif nf == 4:
return np.delete(np.delete(gamma_QCD, [6, 14, 22, 30, 42, 50, 58, 66, 74, 82, 94,\
102, 110, 118, 126, 134, 142, 150, 158], 1)\
, [6, 14, 22, 30, 42, 50, 58, 66, 74, 82, 94,\
102, 110, 118, 126, 134, 142, 150, 158], 2)
elif nf == 3:
return np.delete(np.delete(gamma_QCD, [5,6, 13,14, 21,22, 29,30, 41,42,\
49,50, 57,58, 65,66, 73,74, 81,82,\
93,94, 101,102, 109,110, 117,118,\
125,126, 133,134, 141,142, 149,150, 158,159], 1)\
, [5,6, 13,14, 21,22, 29,30, 41,42,\
49,50, 57,58, 65,66, 73,74, 81,82,\
93,94, 101,102, 109,110, 117,118,\
125,126, 133,134, 141,142, 149,150, 158,159], 2)
else:
raise Exception("nf has to be 3, 4 or 5")
def ADM_QCD2(nf):
# CHECK ADM #
""" Return the QCD anomalous dimension in the DM-SM sector for nf flavor EFT, when ADM starts at O(alphas^2) """
# Mixing of Q_1^(7) into Q_{5,q}^(7) and Q_2^(7) into Q_{6,q}^(7), from Hill et al. [1409.8290].
# Note that we have different prefactors and signs.
cf = 4/3
gamma_gq = 8*cf # changed 2019-08-29, double check with RG solution
# Mixing of Q_3^(7) into Q_{7,q}^(7) and Q_4^(7) into Q_{8,q}^(7), from Hill et al. [1409.8290].
# Note that we have different prefactors and signs.
gamma_5gq = -8 # changed 2019-08-29, double check with RG solution
gamma_QCD2_gq = np.array([5*[gamma_gq]])
gamma_QCD2_5gq = np.array([5*[gamma_5gq]])
gamma_QCD2_1 = np.zeros((34,163))
gamma_QCD2_2 = np.hstack((np.zeros((1,38)),gamma_QCD2_gq,np.zeros((1,120))))
gamma_QCD2_3 = np.hstack((np.zeros((1,46)),gamma_QCD2_gq,np.zeros((1,112))))
gamma_QCD2_4 = np.hstack((np.zeros((1,54)),gamma_QCD2_5gq,np.zeros((1,104))))
gamma_QCD2_5 = np.hstack((np.zeros((1,62)),gamma_QCD2_5gq,np.zeros((1,96))))
gamma_QCD2_6 = np.zeros((125,163))
gamma_QCD2 = [np.vstack((gamma_QCD2_1, gamma_QCD2_2, gamma_QCD2_3,\
gamma_QCD2_4, gamma_QCD2_5, gamma_QCD2_6))]
if nf == 5:
return gamma_QCD2
elif nf == 4:
return np.delete(np.delete(gamma_QCD2, [6, 14, 22, 30, 42, 50, 58, 66, 74, 82, 94,\
102, 110, 118, 126, 134, 142, 150, 158], 1)\
, [6, 14, 22, 30, 42, 50, 58, 66, 74, 82, 94,\
102, 110, 118, 126, 134, 142, 150, 158], 2)
elif nf == 3:
return np.delete(np.delete(gamma_QCD2, [5,6, 13,14, 21,22, 29,30, 41,42,\
49,50, 57,58, 65,66, 73,74, 81,82,\
93,94, 101,102, 109,110, 117,118,\
125,126, 133,134, 141,142, 149,150, 158,159], 1)\
, [5,6, 13,14, 21,22, 29,30, 41,42,\
49,50, 57,58, 65,66, 73,74, 81,82,\
93,94, 101,102, 109,110, 117,118,\
125,126, 133,134, 141,142, 149,150, 158,159], 2)
else:
raise Exception("nf has to be 3, 4 or 5")
def ADM5(Ychi, dchi):
""" The dimension-five anomalous dimension
Return a numpy array with the anomalous dimension matrices for g1, g2, g3, and yt
The Higgs self coupling lambda is currently ignored.
Variables
---------
Ychi: The DM hypercharge, defined via the Gell-Mann - Nishijima relation Q = I_W^3 + Ychi/2.
dchi: The dimension of the electroweak SU(2) representation furnished by the DM multiplet.
"""
jj1 = (dchi**2-1)/4
# The beta functions for one multiplet
b1 = - 41/6 - Ychi**2 * dchi/3
b2 = 19/6 - 4*jj1*dchi/9
adm5_g1 = np.array([[5/2*Ychi**2-2*b1, 0, -6*Ychi, 0, 0, 0, 0, 0],
[-4*Ychi*jj1, Ychi**2/2, 0, 12*Ychi, 0, 0, 0, 0],
[0, 0, -3/2*(1+Ychi**2), 0, 0, 0, 0, 0],
[0, 0, 0, -3/2*(1+Ychi**2), 0, 0, 0, 0],
[0, 0, 0, 0, 5/2*Ychi**2-2*b1, 0, -6*Ychi, 0],
[0, 0, 0, 0, -4*Ychi*jj1, Ychi**2/2, 0, 12*Ychi],
[0, 0, 0, 0, 0, 0, -3/2*(1+Ychi**2), 0],
[0, 0, 0, 0, 0, 0, 0, -3/2*(1+Ychi**2)]])
adm5_g2 = np.array([[2*jj1, -4*Ychi, 0, -24, 0, 0, 0, 0],
[0, (10*jj1-8)-2*b2, 12*jj1, 0, 0, 0, 0, 0],
[0, 0, (-9/2-6*jj1), 0, 0, 0, 0, 0],
[0, 0, 0, (3/2-6*jj1), 0, 0, 0, 0],
[0, 0, 0, 0, 2*jj1, -4*Ychi, 0, -24],
[0, 0, 0, 0, 0, (10*jj1-8)-2*b2, 12*jj1, 0],
[0, 0, 0, 0, 0, 0, (-9/2-6*jj1), 0],
[0, 0, 0, 0, 0, 0, 0, (3/2-6*jj1)]])
adm5_g3 = np.zeros((8,8))
adm5_yc = np.diag([0,0,6,6,0,0,6,6])
adm5_ytau = np.diag([0,0,2,2,0,0,2,2])
adm5_yb = np.diag([0,0,6,6,0,0,6,6])
adm5_yt = np.diag([0,0,6,6,0,0,6,6])
adm5_lam = np.diag([0,0,3,1,0,0,3,1])
full_adm = np.array([adm5_g1, adm5_g2, adm5_g3, adm5_yc, adm5_ytau, adm5_yb, adm5_yt, adm5_lam])
if dchi == 1:
return np.delete(np.delete(full_adm, [1,3,5,7], 1), [1,3,5,7], 2)
else:
return full_adm
def ADM6(Ychi, dchi):
""" The dimension-five anomalous dimension
Return a numpy array with the anomalous dimension matrices for g1, g2, g3, ytau, yb, and yt
The running due to the Higgs self coupling lambda is currently ignored.
The operator basis is Q1-Q14 1st, 2nd, 3rd gen.; S1-S17 (mixing of gen: 1-1, 2-2, 3-3, 1-2, 1-3, 2-3),
S18-S24 1st, 2nd, 3rd gen., S25; D1-D4.
The explicit ordering of the operators, including flavor indices, is contained in the file
"directdm/run/operator_ordering.txt"
Variables
---------
Ychi: The DM hypercharge, defined via the Gell-Mann - Nishijima relation Q = I_W^3 + Ychi/2.
dchi: The dimension of the electroweak SU(2) representation furnished by the DM multiplet.
"""
scope = locals()
def load_adm(admfile):
with open(admfile, "r") as f:
adm = []
for line in f:
line = re.sub("\n", "", line)
line = line.split(",")
adm.append(list(map(lambda x: eval(x, scope), line)))
return adm
admg1 = load_adm(resource_filename("directdm", "run/full_adm_g1.py"))
admg2 = load_adm(resource_filename("directdm", "run/full_adm_g2.py"))
admg3 = np.zeros((207,207))
admyc = load_adm(resource_filename("directdm", "run/full_adm_yc.py"))
admytau = load_adm(resource_filename("directdm", "run/full_adm_ytau.py"))
admyb = load_adm(resource_filename("directdm", "run/full_adm_yb.py"))
admyt = load_adm(resource_filename("directdm", "run/full_adm_yt.py"))
admlam = np.zeros((207,207))
full_adm = np.array([np.array(admg1), np.array(admg2), admg3,\
np.array(admyc), np.array(admytau), np.array(admyb),\
np.array(admyt), np.array(admlam)])
if dchi == 1:
return np.delete(np.delete(full_adm, [0, 4, 8, 11, 14, 18, 22, 25, 28, 32, 36, 39,\
42, 44, 205, 206], 1),\
[0, 4, 8, 11, 14, 18, 22, 25, 28, 32, 36, 39,\
42, 44, 205, 206], 2)
else:
return full_adm
def ADM_QCD_dim8(nf):
""" Return the QCD anomalous dimension in the DM-SM sector at dim.8, for nf flavor EFT """
beta0 = rge.QCD_beta(nf, 1).trad()
gammam0 = rge.QCD_gamma(nf, 1).trad()
ADM8 = 2*(gammam0 - beta0) * np.eye(12)
return ADM8
def ADM_SM_QCD(nf):
""" Return the QCD anomalous dimension in the SM-SM sector for nf flavor EFT, for a subset of SM dim.6 operators
The basis is spanned by a subset of 10*8 + 5*4 = 100 SM operators, with Wilson coefficients
['P61ud', 'P62ud', 'P63ud', 'P63du', 'P64ud', 'P65ud', 'P66ud', 'P66du',
'P61us', 'P62us', 'P63us', 'P63su', 'P64us', 'P65us', 'P66us', 'P66su',
'P61uc', 'P62uc', 'P63uc', 'P63cu', 'P64uc', 'P65uc', 'P66uc', 'P66cu',
'P61ub', 'P62ub', 'P63ub', 'P63bu', 'P64ub', 'P65ub', 'P66ub', 'P66bu',
'P61ds', 'P62ds', 'P63ds', 'P63sd', 'P64ds', 'P65ds', 'P66ds', 'P66sd',
'P61dc', 'P62dc', 'P63dc', 'P63cd', 'P64dc', 'P65dc', 'P66dc', 'P66cd',
'P61db', 'P62db', 'P63db', 'P63bd', 'P64db', 'P65db', 'P66db', 'P66bd',
'P61sc', 'P62sc', 'P63sc', 'P63cs', 'P64sc', 'P65sc', 'P66sc', 'P66cs',
'P61sb', 'P62sb', 'P63sb', 'P63bs', 'P64sb', 'P65sb', 'P66sb', 'P66bs',
'P61cb', 'P62cb', 'P63cb', 'P63bc', 'P64cb', 'P65cb', 'P66cb', 'P66bc',
'P61u', 'P62u', 'P63u', 'P64u',
'P61d', 'P62d', 'P63d', 'P64d',
'P61s', 'P62s', 'P63s', 'P64s',
'P61c', 'P62c', 'P63c', 'P64c',
'P61b', 'P62b', 'P63b', 'P64b']
"""
adm_qqp_qqp = np.array([[0, 0, 0, 0, 0, 12, 0, 0],
[0, 0, 0, 0, 12, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 12],
[0, 0, 0, 0, 0, 0, 12, 0],
[0, 8/3, 0, 0, - 19/3, 5, 0, 0],
[8/3, 0, 0, 0, 5, - 9, 0, 0],
[0, 0, 0, 8/3, 0, 0, - 23/3, 5],
[0, 0, 8/3, 0, 0, 0, 5, - 23/3]])
adm_qqp_qqpp = np.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 4/3, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 4/3, 0],
[0, 0, 0, 0, 0, 0, 0, 0]])
adm_qpq_qppq = np.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 4/3, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 4/3]])
adm_qqp_qppq = np.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 4/3, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 4/3],
[0, 0, 0, 0, 0, 0, 0, 0]])
adm_qpq_qqpp = np.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 4/3, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 4/3, 0]])
adm_q_q = np.array([[4, 4, 0, - 28/3],
[0, 0, 0, 44/3],
[0, 0, 44/9, 0],
[5/3, 13/3, 0, - 106/9]])
adm_qqp_q = np.array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 4/3],
[0, 0, 0, 0],
[0, 0, 4/9, 0],
[0, 0, 0, 0]])
adm_qpq_q = np.array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 4/3],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 4/9, 0]])
adm_q_qqp = np.array([[0, 0, 0, 0, 8/3, 0, 0, 0],
[0, 0, 0, 0, 8/3, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 8/3, 0],
[0, 0, 0, 0, 20/9, 0, 0, 0]])
adm_q_qpq = np.array([[0, 0, 0, 0, 8/3, 0, 0, 0],
[0, 0, 0, 0, 8/3, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 8/3],
[0, 0, 0, 0, 20/9, 0, 0, 0]])
adm_ud = np.hstack((adm_qqp_qqp, adm_qqp_qqpp, adm_qqp_qqpp,\
adm_qqp_qqpp, adm_qpq_qqpp, adm_qpq_qqpp,\
adm_qpq_qqpp, np.zeros((8, 24)), adm_qqp_q,\
adm_qpq_q, np.zeros((8,12))))
adm_us = np.hstack((adm_qqp_qqpp, adm_qqp_qqp, adm_qqp_qqpp,\
adm_qqp_qqpp, adm_qpq_qppq, np.zeros((8,16)),\
adm_qpq_qqpp, adm_qpq_qqpp, np.zeros((8, 8)),\
adm_qqp_q, np.zeros((8,4)), adm_qpq_q, np.zeros((8,8))))
adm_uc = np.hstack((adm_qqp_qqpp, adm_qqp_qqpp, adm_qqp_qqp,\
adm_qqp_qqpp, np.zeros((8,8)), adm_qpq_qppq,\
np.zeros((8,8)), adm_qpq_qppq, np.zeros((8, 8)),\
adm_qpq_qqpp, adm_qqp_q, np.zeros((8,8)),\
adm_qpq_q, np.zeros((8,4))))
adm_ub = np.hstack((adm_qqp_qqpp, adm_qqp_qqpp, adm_qqp_qqpp,\
adm_qqp_qqp, np.zeros((8,16)), adm_qpq_qppq,\
np.zeros((8,8)), adm_qpq_qppq, adm_qpq_qppq,\
adm_qqp_q, np.zeros((8,12)), adm_qpq_q))
adm_ds = np.hstack((adm_qqp_qppq, adm_qpq_qppq, np.zeros((8,16)),\
adm_qqp_qqp, adm_qqp_qqpp, adm_qqp_qqpp,\
adm_qpq_qqpp, adm_qpq_qqpp, np.zeros((8,8)),\
np.zeros((8,4)), adm_qqp_q, adm_qpq_q, np.zeros((8,8))))
adm_dc = np.hstack((adm_qqp_qppq, | np.zeros((8,8)) | numpy.zeros |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from netCDF4 import Dataset
import numpy as np
import numpy.ma as ma
def combine_landsat():
fh_out = Dataset('../../processed_data/landsat/20180719.nc', 'w')
flag = False
for i in range(1, 8):
fh_in = Dataset('../../raw_data/landsat/nebraska/SRB{}_20180719.nc'.format(i), 'r')
if not flag:
lats, lons = fh_in.variables['lat'][:], fh_in.variables['lon'][:]
fh_out.createDimension("lat", len(lats))
fh_out.createDimension("lon", len(lons))
for v_name, varin in fh_in.variables.items():
if v_name in ["lat", "lon"]:
outVar = fh_out.createVariable(v_name, varin.datatype, (v_name,))
outVar.setncatts({k: varin.getncattr(k) for k in varin.ncattrs()})
fh_out.variables["lat"][:] = lats[:]
fh_out.variables["lon"][:] = lons[:]
flag = True
for v_name, varin in fh_in.variables.items():
if v_name == 'Band1':
outVar = fh_out.createVariable('band{}'.format(i), varin.datatype, ('lat', 'lon'))
outVar.setncatts({k: varin.getncattr(k) for k in varin.ncattrs()})
outVar[:] = ma.masked_less(varin[:], 0)
fh_in.close()
fh_out.close()
# 20190604
def subset_landsat(lat1, lat2, lon1, lon2):
fh_out = Dataset('../../processed_data/landsat/2019155.nc', 'w')
flag = False
lat_indices, lon_indices = None, None
for i in range(1, 8):
fh_in = Dataset('../../raw_data/landsat/SRB{}_doy2019155.nc'.format(i), 'r')
if not flag:
lats, lons = fh_in.variables['lat'][:], fh_in.variables['lon'][:]
lat_indices = np.searchsorted(lats, [lat2, lat1])
lon_indices = | np.searchsorted(lons, [lon1, lon2]) | numpy.searchsorted |
from ..features import GeologicalFeatureInterpolator
from . import FoldRotationAngle
import numpy as np
from LoopStructural.utils import getLogger, InterpolatorError
logger = getLogger(__name__)
def _calculate_average_intersection(feature_builder, fold_frame, fold,
**kwargs):
"""
Parameters
----------
series_builder
fold_frame
fold
Returns
-------
"""
class FoldedFeatureBuilder(GeologicalFeatureInterpolator):
def __init__(self,interpolator,fold,fold_weights={},name='Feature',region=None,**kwargs):
GeologicalFeatureInterpolator.__init__(self,interpolator,name=name,region=region,**kwargs)
self.fold = fold
self.fold_weights = fold_weights
self.kwargs = kwargs
self.svario = True
def set_fold_axis(self):
"""calculates the fold axis/ fold axis rotation and adds this to the fold
"""
kwargs = self.kwargs
fold_axis = kwargs.get('fold_axis',None)
if fold_axis is not None:
fold_axis = np.array(fold_axis)
if len(fold_axis.shape) == 1:
self.fold.fold_axis = fold_axis
if "av_fold_axis" in kwargs:
l2 = self.fold.foldframe.calculate_intersection_lineation(self)
self.fold.fold_axis = | np.mean(l2, axis=0) | numpy.mean |
import itertools
import os
import enum
import pickle
import logging
import numpy as np
import pandas as pd
import sklearn.metrics as metrics
from util import read_geotiff, Image
from feature import Feature
from groundtruth import create_mask, create_groundtruth, reshape_image
from matplotlib import pyplot as plt
from sklearn.utils import shuffle
from sklearn.preprocessing import StandardScaler
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.neural_network import MLPClassifier
from imblearn.over_sampling import RandomOverSampler, SMOTE, ADASYN
from imblearn.under_sampling import RandomUnderSampler
from scipy.ndimage import zoom
from scipy.ndimage.interpolation import shift
LOG = logging.getLogger(__file__)
logging.basicConfig(level=logging.INFO)
shapefile = 'data/slums_approved.shp'
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
# Little more room for the X label
plt.gcf().subplots_adjust(bottom=0.2)
def get_metrics(prediction, ytest):
f1_score = metrics.f1_score(ytest, prediction)
matthews = metrics.matthews_corrcoef(ytest, prediction)
return f1_score, matthews
class Dataset:
def __init__(self, train_images, test_image, shapefile,
feature_names, scales=[50, 100, 150], block_size=20,
bands=[1, 2, 3]):
self.train_images = train_images
self.test_image = test_image
self.shapefile = shapefile
self.feature_names = feature_names
self.bands = bands
self.scales = scales
self.block_size = block_size
self.dataset = None
self.feature_shape = None
self._create_train_test()
def _load_features(self, image):
if os.path.exists(image.path):
features = Feature(image, self.block_size, self.scales,
self.bands, self.feature_names).get()
features[features == np.inf] = 0
return features
print("Feature does not exist")
exit()
def _create_dataset(self, image, feature):
mask = create_mask(shapefile, image.path)
groundtruth = create_groundtruth(mask, block_size=self.block_size,
threshold=0.6)
groundtruth = reshape_image(groundtruth, image.shape, self.block_size,
max(self.scales))
X = []
for i in range(feature.shape[1]):
for j in range(feature.shape[2]):
X.append(feature[:, i, j])
y = []
for i in range(groundtruth.shape[0]):
for j in range(groundtruth.shape[1]):
y.append(groundtruth[i, j])
X = np.array(X)
y = np.array(y)
return X, y
def _create_train_test(self):
test_features = self._load_features(self.test_image)
Xtest, ytest = self._create_dataset(self.test_image, test_features)
Xtrain = np.empty((0, Xtest.shape[1]))
ytrain = np.ravel(np.empty((0, 1)))
for image in self.train_images:
train_features = self._load_features(image)
X, y = self._create_dataset(image, train_features)
# print(X.shape)
# print(y.shape)
Xtrain = np.concatenate((Xtrain, X), axis=0)
ytrain = np.concatenate((ytrain, y), axis=0)
# print(Xtrain.shape)
# print(ytrain.shape)
scaler = StandardScaler().fit(Xtrain)
Xtrain = scaler.transform(Xtrain)
Xtest = scaler.transform(Xtest)
Xtrain, ytrain = shuffle(Xtrain, ytrain)
Xtrain, ytrain = SMOTE().fit_sample(Xtrain, ytrain)
self.dataset = (Xtrain, ytrain, Xtest, ytest)
self.feature_shape = test_features[0].shape
def get_dataset(self):
return self.dataset
def get_feature_shape(self):
return self.feature_shape
def get_train_images(self):
return self.train_images
def get_test_image(self):
return self.test_image
def get_feature_names(self):
return self.feature_names
def get_scales(self):
return self.scales
def get_block_size(self):
return self.block_size
class Classify:
classifiers = {
0: DecisionTreeClassifier(),
1: RandomForestClassifier(),
2: MLPClassifier(),
3: AdaBoostClassifier(),
4: GradientBoostingClassifier()
}
def __init__(self, dataset, classifier_indices=None, experiments=5):
self.dataset = dataset
self.classifier_indices = classifier_indices
if not self.classifier_indices:
self.classifier_indices = list(self.classifiers.keys())
self.experiments = experiments
self.train_images = dataset.get_train_images()
self.test_image = dataset.get_test_image()
self.feature_names = dataset.get_feature_names()
self.scales = dataset.get_scales()
self.block_size = dataset.get_block_size()
def _get_classifier_name(self, index):
return str(self.classifiers[index]).split("(")[0]
def _create_confusion(self, prediction, ytest, folder, classifier_index):
basename = self._get_basename()
classifier_name = self._get_classifier_name(classifier_index)
name = "confusion_" + basename + "_" + classifier_name + ".png"
path = os.path.join(folder, name)
matrix = metrics.confusion_matrix(ytest, prediction)
plot_confusion_matrix(matrix,
classes=['Formal', 'Informal'],
title='Confusion Matrix')
LOG.info("Saving confusion as: {}".format(path))
plt.savefig(path, format='png', dpi=200)
plt.clf()
def _create_overlay(self, prediction, ytest, folder, classifier_index):
basename = self._get_basename()
classifier_name = self._get_classifier_name(classifier_index)
name = "overlay_" + basename + "_" + classifier_name + ".png"
path = os.path.join(folder, name)
prediction = np.reshape(prediction, self.dataset.get_feature_shape())
prediction = shift(prediction, 3, cval=0)
prediction = zoom(prediction, self.block_size, order=0)
# Compensate for the padding
plt.axis('off')
image = self.test_image.RGB
#image = np.dstack((image[0], image[1], image[2]))
plt.imshow(image)
plt.imshow(prediction, alpha=0.5)
LOG.info("Saving overlay as: {}".format(path))
plt.savefig(path, format='png', dpi=400)
plt.clf()
def _create_metrics(self, folder):
columns = ["F1 score", "Matthews"]
indices = [self._get_classifier_name(index)
for index in self.classifier_indices]
metrics = pd.DataFrame(columns=columns)
LOG.info("Start creating metrics")
for index in self.classifier_indices:
tmp = np.empty((0, 2))
classifier = self.classifiers[index]
Xtrain, ytrain, Xtest, ytest = self.dataset.get_dataset()
for i in range(self.experiments):
LOG.info("Experiment {}/{}".format(index * i + i, self.experiments * len(self.classifier_indices)))
classifier.fit(Xtrain, ytrain)
prediction = classifier.predict(Xtest)
f1_score, matthews = get_metrics(prediction, ytest)
entry = np.array([[f1_score, matthews]])
tmp = np.concatenate((tmp, entry))
mean = | np.mean(tmp, axis=0) | numpy.mean |
""" CS4277/CS5477 Lab 2: Camera Calibration.
See accompanying Jupyter notebook (lab2.ipynb) for instructions.
Name: <NAME>
Email: <EMAIL>
Student ID: A0215003A
"""
import cv2
import numpy as np
from scipy.optimize import least_squares
"""Helper functions: You should not have to touch the following functions.
"""
def convt2rotation(Q):
"""Convert a 3x3 matrix into a rotation matrix
Args:
Q (np.ndarray): Input matrix
Returns:
R (np.ndarray): A matrix that satisfies the property of a rotation matrix
"""
u,s,vt = np.linalg.svd(Q)
R = np.dot(u, vt)
return R
def vector2matrix(S):
"""Convert the vector representation to rotation matrix,
You will use it in the error function because the input parameters is in vector format
Args:
S (np.ndarray): vector representation of rotation (3,)
Returns:
R (np.ndarray): Rotation matrix (3, 3)
"""
S = np.expand_dims(S, axis=1)
den = 1 + np.dot(S.T, S)
num = (1 - np.dot(S.T, S))*(np.eye(3)) + 2 * skew(S) + 2 * np.dot(S, S.T)
R = num/den
homo = np.zeros([3,1], dtype=np.float32)
R = np.hstack([R, homo])
return R
def skew(a):
s = np.array([[0, -a[2, 0], a[1, 0]], [a[2, 0], 0, -a[0, 0]], [-a[1, 0], a[0, 0], 0]])
return s
def matrix2quaternion(T):
R = T[:3, :3]
rotdiff = R - R.T
r = np.zeros(3)
r[0] = -rotdiff[1, 2]
r[1] = rotdiff[0, 2]
r[2] = -rotdiff[0, 1]
sintheta = np.linalg.norm(r) / 2
r0 = np.divide(r, np.linalg.norm(r) + np.finfo(np.float32).eps)
costheta = (np.trace(R) - 1) / 2
theta = np.arctan2(sintheta, costheta)
q = np.zeros(4)
q[0] = np.cos(theta / 2)
q[1:] = r0 * np.sin(theta / 2)
return q
def matrix2vector(R):
"""Convert a rotation matrix into vector representation.
You will use it to convert a rotation matrix into a vector representation before you pass the parameters into the error function.
Args:
R (np.ndarray): Rotation matrix (3, 3)
Returns:
Q (np.ndarray): vector representation of rotation (3,)
"""
Q = matrix2quaternion(R)
S = Q[1:]/Q[0]
return S
"""Functions to be implemented
"""
def init_param(pts_model, pts_2d):
""" Estimate the intrisics and extrinsics of cameras
Args:
pts_model (np.ndarray): Coordinates of points in 3D (2, N)
pts_2d (list): Coordinates of points in 2D, the list includes 2D coordinates in three views 3 * (2, N)
Returns:
R_all (list): a list including three rotation matrix
T_all (list): a list including three translation vector
K (np.ndarray): a list includes five intrinsic parameters (5,)
Prohibited functions:
cv2.calibrateCamera()
"""
""" YOUR CODE STARTS HERE """
R_all = []
T_all = []
V = np.zeros((2 * len(pts_2d), 6), np.float64)
def vectorSquare(a, b, h):
v = np.array([
h[0, a] * h[0, b], h[0, a] * h[1, b] + h[1, a] * h[0, b], h[1, a] * h[1, b],
h[2, a] * h[0, b] + h[0, a] * h[2, b], h[2, a] * h[1, b] + h[1, a] * h[2, b], h[2, a] * h[2, b]])
return v
for i in range(len(pts_2d)):
pts_src = pts_model.T
pts_dst = pts_2d[i].T
h, _ = cv2.findHomography(pts_src, pts_dst)
V[2 * i] = vectorSquare(0, 1, h)
V[2 * i + 1] = np.subtract(vectorSquare(0, 0, h), vectorSquare(1, 1, h))
u, s, vh = np.linalg.svd(V)
b = vh[-1]
cy = (b[1] * b[3] - b[0] * b[4]) / (b[0] * b[2] - b[1] ** 2)
l = b[5] - (b[3] ** 2 + cy * (b[1] * b[2] - b[0] * b[4])) / b[0]
fx = np.sqrt((l / b[0]))
fy = np.sqrt(((l * b[0]) / (b[0] * b[2] - b[1] ** 2)))
ga = -1 * ((b[1]) * (fx ** 2) * (fy / l))
cx = (ga * cy / fy) - (b[3] * (fx ** 2) / l)
k = np.array([
[fx, ga, cx],
[0, fy, cy],
[0, 0, 1.0],
])
invk = np.linalg.inv(k)
for i in range(len(pts_2d)):
pts_src = pts_model.T
pts_dst = pts_2d[i].T
h, _ = cv2.findHomography(pts_src, pts_dst)
s = 1 / | np.linalg.norm(invk @ h[:, 0]) | numpy.linalg.norm |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
COMP0088 lab exercises for week 2.
Add your code as specified below.
A simple test driver is included in this script. Call it at the command line like this:
$ python week_2.py
A 4-panel figure, `week_2.pdf`, will be generated so you can check it's doing what you
want. You should not need to edit the driver code, though you can if you wish.
NB: the code imports two functions from last week's exercises. If you were not
able to complete those functions, please talk to the TAs to get a working version.
"""
import sys, os, os.path
import argparse
import numpy as np
import numpy.random
import matplotlib.pyplot as plt
import utils
from week_1 import generate_noisy_linear, generate_linearly_separable
#### ADD YOUR CODE BELOW
# -- Question 1 --
def ridge_closed ( X, y, l2=0 ):
"""
Implement L2-penalised least-squares (ridge) regression
using its closed form expression.
# Arguments
X: an array of sample data, where rows are samples
and columns are features (assume there are at least
as many samples as features). caller is responsible
for prepending x0=1 terms if required.
y: vector of measured (or simulated) labels for the samples,
must be same length as number of rows in X
l2: optional L2 regularisation weight. if zero (the default)
then this reduces to unregularised least squares
# Returns
w: the fitted vector of weights
"""
assert(len(X.shape)==2)
assert(X.shape[0]==len(y))
# TODO: implement this
return None
# -- Question 2 --
def monomial_projection_1d ( X, degree ):
"""
Map 1d data to an expanded basis of monomials
up to the given degree.
# Arguments
X: an array of sample data, where rows are samples
and the single column is the input feature.
degree: maximum degree of the monomial terms
# Returns
Xm: an array of the transformed data, with the
same number of rows (samples) as X, and
with degree+1 columns (features):
1, x, x**2, x**3, ..., x**degree
"""
assert(len(X.shape)==2)
assert(X.shape[1]==1)
# TODO: implement this
return None
def generate_noisy_poly_1d ( num_samples, weights, sigma, limits, rng ):
"""
Draw samples from a 1D polynomial model with additive
Gaussian noise.
# Arguments
num_samples: number of samples to generate
(ie, the number of rows in the returned X
and the length of the returned y)
weights: vector of the polynomial coefficients
(including a bias term at index 0)
sigma: standard deviation of the additive noise
limits: a tuple (low, high) specifying the value
range for the single input dimension x1
rng: an instance of numpy.random.Generator
from which to draw random numbers
# Returns
X: a matrix of sample inputs, where
the samples are the rows and the
single column is the 1D feature x1
ie, its size should be:
num_samples x 1
y: a vector of num_samples output values
"""
# TODO: implement this
return None, None
def fit_poly_1d ( X, y, degree, l2=0 ):
"""
Fit a polynomial of the given degree to 1D sample data.
# Arguments
X: an array of sample data, where rows are samples
and the single column is the input feature.
y: vector of output values corresponding to the inputs,
must be same length as number of rows in X
degree: degree of the polynomial
l2: optional L2 regularisation weight
# Returns
w: the fitted polynomial coefficients
"""
assert(len(X.shape)==2)
assert(X.shape[1]==1)
assert(X.shape[0]==len(y))
# TODO: implement this
return None
# -- Question 3 --
def gradient_descent ( z, loss_func, grad_func, lr=0.01,
loss_stop=1e-4, z_stop=1e-4, max_iter=100 ):
"""
Generic batch gradient descent optimisation.
Iteratively updates z by subtracting lr * grad
until one or more stopping criteria are met.
# Arguments
z: initial value(s) of the optimisation var(s).
can be a scalar if optimising a univariate
function, otherwise a single numpy array
loss_func: function of z that we seek to minimise,
should return a scalar value
grad_func: function calculating the gradient of
loss_func at z. for vector z, this should return
a vector of the same length containing the
partial derivatives
lr: learning rate, ie fraction of the gradient by
which to update z each iteration
loss_stop: stop iterating if the loss changes
by less than this (absolute)
z_stop: stop iterating if z changes by less than
this (L2 norm)
max_iter: stop iterating after iterating this
many times
# Returns
zs: a list of the z values at each iteration
losses: a list of the losses at each iteration
"""
# TODO: implement this
return None, None
# -- Question 4 --
def logistic_regression ( X, y, w0=None, lr=0.05,
loss_stop=1e-4, weight_stop=1e-4, max_iter=100 ):
"""
Fit a logistic regression classifier to data.
# Arguments
X: an array of sample data, where rows are samples
and columns are features. caller is responsible
for prepending x0=1 terms if required.
y: vector of binary class labels for the samples,
must be same length as number of rows in X
w0: starting value of the weights, if omitted
then all zeros are used
lr: learning rate, ie fraction of gradients by
which to update weights at each iteration
loss_stop: stop iterating if the loss changes
by less than this (absolute)
weight_stop: stop iterating if weights change by less
than this (L2 norm)
max_iter: stop iterating after iterating this
many times
# Returns
ws: a list of fitted weights at each iteration
losses: a list of the loss values at each iteration
"""
assert(len(X.shape)==2)
assert(X.shape[0]==len(y))
# TODO: implement this
return None, None
#### plotting utilities
def plot_ridge_regression_1d ( axes, X, y, weights, limits, l2s=[0] ):
"""
Perform least-squares fits to the provided (X, y) data
using the specified levels of L2 regularisation, and plot
the results.
# Arguments
axes: a Matplotlib Axes object into which to plot
X: an array of sample data, where rows are samples
and the single column is the input feature.
y: vector of output values corresponding to the
rows of X
weights: a weight vector of length 2, specifying
the true generating model, with a bias term
at index 0.
limits: a tuple (low, high) specifying the value
range of the feature dimension x1
l2s: a list (or vector/array) of numeric values
specifying amounts of L2 regularisation to use.
"""
assert(len(X.shape)==2)
assert(X.shape[1]==1)
assert(X.shape[0]==len(y))
# plot the data
axes.scatter(X[:,0], y, marker='x', color='grey')
# plot the true relationship
y0 = weights[0] + limits[0] * weights[1]
y1 = weights[0] + limits[1] * weights[1]
axes.plot(limits, (y0, y1), linestyle='dashed', color='red', label='Ground Truth')
# fit for specified regs and plot the results
X1 = utils.add_x0(X)
cmap = plt.cm.get_cmap('jet')
for l2 in l2s:
w = ridge_closed(X1, y, l2)
y0 = w[0] + limits[0] * w[1]
y1 = w[0] + limits[1] * w[1]
axes.plot(limits, (y0, y1), linestyle='solid', color=cmap(l2/np.max(l2s)), label='$\lambda=%.f$' % l2)
axes.set_xlim(limits[0], limits[1])
axes.set_ylim(limits[0], limits[1])
axes.set_xlabel('$x_1$')
axes.set_ylabel('$y$')
axes.legend(loc='upper left')
axes.set_title('Ridge Regression')
def plot_poly_fit_1d ( axes, X, y, weights, limits, degrees, l2=0 ):
"""
Fit polynomials of different degrees to the supplied
data, and plot the results.
# Arguments
axes: a Matplotlib Axes object into which to plot
X: an array of sample data, where rows are samples
and the single column is the input feature.
y: vector of output values corresponding to the inputs,
must be same length as number of rows in X
weights: the true polynomial coefficients from which
the data was generated
limits: a tuple (low, high) specifying the value
range of the feature dimension x1
degrees: a list of integer values specifying degrees
of polynomial to fit
l2: the amount of l2 regularisation to apply
# Returns
None
"""
assert(len(X.shape)==2)
assert(X.shape[1]==1)
assert(X.shape[0]==len(y))
axes.scatter(X, y, color='grey', marker='x')
print(f'true weights: {weights}')
ground_x, ground_y = utils.grid_sample(lambda x: utils.affine(monomial_projection_1d(x, len(weights)-1), weights),
1,
num_divisions=50, limits=limits)
axes.plot(ground_x, ground_y, color='red', linestyle='dashed', label='Ground Truth')
cmap = plt.cm.get_cmap('jet')
n = 0
for deg in degrees:
w = fit_poly_1d(X, y, deg, l2)
if w is None:
print('Polynomial fitting not implemented')
break
print(f'fit {deg} weights: {w}')
fit_x, fit_y = utils.grid_sample(lambda x: utils.affine(monomial_projection_1d(x, len(w)-1), w),
1,
num_divisions=50, limits=limits)
axes.plot(fit_x, fit_y, linestyle='solid', color=cmap(n/len(degrees)), label=f'Degree {deg} Fit')
n += 1
axes.set_xlim(limits[0], limits[1])
axes.set_xlabel('$x_1$')
axes.set_ylabel('$y$')
axes.legend(loc='upper right')
axes.set_title('Polynomial Fitting')
def plot_logistic_regression_2d ( axs, X, y, weights, limits ):
"""
Fit a 2D logistic regression classifier and plot the results.
Note that there are two separate plots produced here.
The first (in axs[0]) is an optimisation history, showing how the
loss decreases via gradient descent. The second (in axs[1]) is
the regression itself, showing data points and fit results.
# Arguments
axs: an array of 2 Matplotlib Axes objects into which
to plot.
X: an array of sample data, where rows are samples
and columns are features, including x0=1 terms.
y: vector of binary class labels for the samples,
must be same length as number of rows in X
weights: weights defining the true decision boundary
with which the data was generated
limits: a tuple (low, high) specifying the value
range of both feature dimensions
# Returns
None
"""
assert(len(X.shape)==2)
assert(X.shape[1]==3)
assert(X.shape[0]==len(y))
assert(len(weights)==3)
ww, ll = logistic_regression(X, y)
if ww is None:
utils.plot_unimplemented(axs[0], title='Logistic Regression Gradient Descent')
utils.plot_unimplemented(axs[1], title='Logistic Regression Results')
return
print('Number of iterations: %i' % len(ll))
axs[0].plot(ll)
axs[0].set_title('Logistic Regression Gradient Descent')
axs[0].set_xlabel('Iteration')
axs[0].set_ylabel('Logistic Loss')
Xm, ym = utils.grid_sample(lambda x: 1/(1 + np.exp(-utils.affine(x, ww[-1]))), 2, 100, limits)
axs[1].imshow(ym.T, cmap='coolwarm', origin='lower', extent=(limits[0], limits[1], limits[0], limits[1]), alpha=0.5)
axs[1].contour(ym.T, levels=[.5], origin='lower', extent=(limits[0], limits[1], limits[0], limits[1]))
y0 = -(weights[0] + limits[0] * weights[1]) / weights[2]
y1 = -(weights[0] + limits[1] * weights[1]) / weights[2]
axs[1].plot(limits, (y0, y1), linestyle='dashed', color='red', marker='')
axs[1].plot(X[y==0,1], X[y==0,2], linestyle='', color='orange', marker='v', label='Class 0')
axs[1].plot(X[y==1,1], X[y==1,2], linestyle='', color='darkorchid', marker='o', label='Class 1')
axs[1].set_xlabel('$x_1$')
axs[1].set_ylabel('$x_2$')
axs[1].legend(loc='upper left', framealpha=1)
axs[1].set_title('Logistic Regression Results')
#### TEST DRIVER
def process_args():
ap = argparse.ArgumentParser(description='week 2 coursework script for COMP0088')
ap.add_argument('-s', '--seed', help='seed random number generator', type=int, default=None)
ap.add_argument('-n', '--num_samples', help='number of samples to generate and fit', type=int, default=50)
ap.add_argument('file', help='name of output file to produce', nargs='?', default='week_2.pdf')
return ap.parse_args()
if __name__ == '__main__':
args = process_args()
rng = numpy.random.default_rng(args.seed)
LIMITS = (-5, 5)
WEIGHTS = np.array([0.5, -0.4, 0.6])
fig = plt.figure(figsize=(10, 10))
axs = fig.subplots(nrows=2, ncols=2)
print('Q1: testing unregularised least squares')
X, y = generate_noisy_linear(args.num_samples, WEIGHTS, 0.5, LIMITS, rng)
if X is None:
print('(week 1) linear generation not implemented')
utils.plot_unimplemented(axs[0,0], title='Ridge Regression')
else:
w = ridge_closed(utils.add_x0(X), y)
print('true weights: %.2f, %.2f, %.2f' % (WEIGHTS[0], WEIGHTS[1], WEIGHTS[2]))
if w is None:
print('regression not implemented')
utils.plot_unimplemented(axs[0,0], title='Ridge Regression')
else:
print('regressed weights: %.2f, %.2f, %.2f' % (w[0], w[1], w[2]))
print('squared error: %.2g' % np.dot(WEIGHTS-w, WEIGHTS-w))
print('plotting regularised least squares')
X, y = generate_noisy_linear(args.num_samples, WEIGHTS[1:], 3, LIMITS, rng)
plot_ridge_regression_1d ( axs[0, 0], X, y, WEIGHTS[1:], LIMITS, | np.arange(5) | numpy.arange |
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 18 01:53:44 2018
@author: santanu
"""
import tensorflow as tf
import numpy as np
import os
import keras
import pickle
import fire
from elapsedtimer import ElapsedTimer
'''
Train Model for Movie Review
'''
class Review_sentiment:
def __init__(self,path,epochs):
self.batch_size = 250
self.train_to_val_ratio = 5.0
self.batch_size_val = int(self.batch_size/self.train_to_val_ratio)
self.epochs = epochs
self.hidden_states = 100
self.embedding_dim = 100
self.learning_rate =1e-4
self.n_words = 50000 + 1
self.checkpoint_step = 1
self.sentence_length = 1000
self.cell_layer_size = 1
self.lambda1 = 0.01
#self.path = '/home/santanu/Downloads/Mobile_App/'
self.path = path
self.X_train = np.load(self.path + "aclImdb/X_train.npy")
self.y_train = np.load(self.path + "aclImdb/y_train.npy")
self.y_train = np.reshape(self.y_train,(-1,1))
self.X_val = np.load(self.path + "aclImdb/X_val.npy")
self.y_val = np.load(self.path + "aclImdb/y_val.npy")
self.y_val = np.reshape(self.y_val,(-1,1))
self.X_test = np.load(self.path + "aclImdb/X_test.npy")
self.y_test = np.load(self.path + "aclImdb/y_test.npy")
self.y_test = np.reshape(self.y_test,(-1,1))
print (np.shape(self.X_train),np.shape(self.y_train))
print (np.shape(self.X_val),np.shape(self.y_val))
print ( | np.shape(self.X_test) | numpy.shape |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 27 10:22:28 2018
This module contains a list of functions that can be used to convert the IQ
measured on an instrument into the seeing at a different wavelength, airmass...
It is based on the definition of the IQ as defined on the ESO webpage
https://www.eso.org/observing/etc/doc/helpkmos.html
by
FWHM_IQ = sqrt (FWHM_ATM^2 + FWHM_TEL^2 + FWHM_INS^2) (1)
with
FWHM_ATM = FWHM_ORANG * (λc/500)^(-1/5) * airmass^3/5 * Fcorr
where :
• FWHM_ORANG is the current seeing measured by the MASS-DIMM at 500nm
and entered by the astronomer as an input in ORANG.
• airmass is the current airmass of the target
• λc is the observing wavelength in nm; this is bluest filter in the
OB science templates.
• Fcorr is a correction factor defined as
(1 + FKolb * 2.182 * (r0/L0)^0.356)^0.5 where
o r0 is the Fried parameter:
r0 = 0.976 * 500E-09 / FWHM_ORANG * ((180/PI)*3600) * (λc/500)^1.2 * [airmass ^ (-3/5)]
o Kolb factor: FKolb = 1/(1+300*D/L0)-1 such that FKolb(VLT) = -0.982
o L0 is the turbulence outer scale defined as L0 = 46 m
FWHM_TEL is the telescope diffraction limit FWHM at the observing
wavelength λc, for the VLT case: FWHM_TEL = 0.000212 * λc/8.2 [arcsec]
FWHM_INS (λc) is instrument transfer function FWHM at observing
wavelength λc in arcsec taken from instrument.cf file of IP
@author: jmilli
"""
import numpy as np
from sympy.core.power import Pow
from sympy.solvers import nsolve
from sympy import Symbol
def convert_strehl(sr1,lambda1,lambda2):
"""
Convert the strehl given at wavelength 1 to wavelenth 2
Input:
- sr1: Strehl ratio (between 0 and 1) at wavelength 1
- lambda1: wavelength 1 (in the same unit as wavelength2)
- lambda2: wavelength 2 (in the same unit as wavelength1)
"""
return np.power(sr1,(lambda1/lambda2)**2)
def IQ2seeing(IQ1,wavelength1=1.2e-6,airmass1=1.,\
wavelength2=500e-9,L0=20,D=8.,FWHMins=0.):
"""
Converts the IQ measured at a given wavelength for a given airmass into
a seeing, taking the telescope and instrument transfer function into account
as well as the atmosphere outer scale.
Input:
- IQ1: the image quality in arcsec
- wavelength1: the wavelength in m at which the IQ is provided (by
default 1.2 micron)
- wavelength2: the wavelength in m at which the seeing is desired
(by default 500nm)
- L0: the turbulence outer scale (by default 20m)
- airmass1: the airmass at which the IQ is measured
- D: diameter of the telescope (8m by default)
- FWHMins: the instrument transfer function in arcsec (by default 0)
Output:
- the seeing at zenith, at wavelength 2
"""
FWHMtel = np.rad2deg(wavelength1/D)*3600
FWHMatm = np.sqrt(IQ1**2-FWHMtel^2 - FWHMins^2)
seeing = FWHMatm2seeing(FWHMatm,wavelength1=wavelength1,airmass1=airmass1\
,wavelength2=wavelength2,L0=L0,D=D)
return seeing
def FWHMatm2seeing(IQ1,wavelength1=1.2e-6,airmass1=1.,wavelength2=500e-9,\
L0=20,D=8.):
"""
Converts the atmospheric FWHM measured at a given wavelength for a given
airmass into a seeing value at zenith, taking the turbulence outer
scale into account
Input:
- IQ1: the image quality in arcsec
- wavelength1: the wavelength in m at which the IQ is provided (by
default 1.2 micron)
- airmass1: the airmass at which the IQ is measured
- D: diameter of the telescope (8m by default)
- wavelength2: the wavelength in m at which the seeing is desired
(by default 500nm)
- L0: the turbulence outer scale (by default 20m)
Output:
- the seeing at zenith, at wavelength 2
"""
FKolb = 1./(1.+300*D/L0)-1
IQ_squared_rad = np.deg2rad(IQ1/3600.)**2
coeffA = (0.9759 * wavelength2 * np.power(wavelength2/wavelength1,-1./5.)*\
np.power(airmass1,3./5.))**2
coeffB = coeffA*FKolb*2.182/np.power(L0,0.356)
r0 = Symbol('r0',real=True)
bounds = (1.-3,2.)
r0_sol = nsolve(IQ_squared_rad*r0**2-coeffB*Pow(r0,0.356)-coeffA, bounds)
if np.abs(np.imag(r0_sol))>1e-3:
raise ValueError('Problem: the solver found a complex solution:',r0_sol)
else:
r0_sol_complex = np.complex(r0_sol)
seeing2_arcsec = np.rad2deg(0.9759*wavelength2/r0_sol_complex.real)*3600.
print('Input:')
print('IQ: {0:.2f}", lambda: {1:4.0f}nm, airmass: {2:.2f}, outer scale: {3:.2f}m, D: {4:.1f}m'.format(\
IQ1,wavelength1*1e9,airmass1,L0,D))
print('Output:')
print('lambda:{0:4.0f}nm, r0: {1:.2f}m, seeing: {2:.2f}"'.format(\
wavelength2*1e9,r0_sol_complex.real,seeing2_arcsec))
return seeing2_arcsec
def seeing2FWHMatm(seeing1,wavelength1=500.e-9,wavelength2=1200.e-9,L0=20,\
airmass2=1.,D=8.):
"""
Converts the seeing at zenith at wavelength 1 into an image quality, taken
into account the airmass, turbulence outer scale
Input:
- wavelength1: the wavelength in m at which the seeing is provided (by
default 500nm)
- airmass2: the airmass at which the IQ is desired
- diameter D: diameter of the telescope (8m by default)
- wavelength2: the wavelength in m at which the IQ is desired
(by default 1200nm)
- L0: the turbulence outer scale (by default 20m)
Output:
- the IQ in arcsec
"""
FKolb = 1./(1.+300*D/L0)-1
r0 = 0.9759 * wavelength1 / np.deg2rad(seeing1/3600.) * \
np.power(wavelength1/500.e-9,1.2) # r0 at 500nm
Fcorr = np.sqrt(1 + FKolb * 2.182 * np.power(r0/L0,0.356))
FWHMatm = seeing1 * | np.power(wavelength2/wavelength1,-1./5.) | numpy.power |
"""
<NAME>
Date: June 16, 2021
functions for calculating Solar velocity corrections
and components for derivation of SDO/HMI RVs
"""
import datetime
import numpy as np
import astropy.units as u
from astropy.coordinates import SkyCoord
import sunpy.map
from sunpy.net import Fido
from sunpy.net import attrs as a
from sunpy.coordinates import frames
from skimage.measure import label, regionprops
from SolAster.tools.settings import Parameters
def map_sequence(dates_list, time_range=datetime.timedelta(seconds=6), instrument=a.Instrument.aia,
wavelength=a.Wavelength(171 * u.angstrom)):
"""
function to query sunpy for images within certain time range of dates in dates list
user specified instrument and wavelength, otherwise default values: AIA 171A
Parameters
----------
dates_list: datetime, list
list of dates, either datetime or strings
time_range: datetime timedelta
plus/minus time range to search for images in comparison to desired timestamp
instrument: astropy inst
Sunpy instrument of choice (AIA, HMI, LASCO, EIT)
wavelength: astropy wvlth
desired wavelength of choice instrument
Returns
-------
maps: map
Sunpy map sequence object
"""
if isinstance(dates_list[0][0], str):
datetimes = [datetime.datetime.strptime(date[0], '%Y-%m-%dT%H:%M:%S') for date in dates_list]
else:
datetimes = dates_list
downloaded_files = []
for ind, datetime_object in enumerate(datetimes):
# pull image within specified time range
result = Fido.search(a.Time(str(datetime_object - time_range), str(datetime_object + time_range)),
instrument, wavelength)
# add file to list
downloaded_files.append(Fido.fetch(result))
# build map sequence from files
maps = sunpy.map.Map(downloaded_files, sequence=True)
return maps
def rel_positions(wij, nij, rij, smap):
"""
function to calculate pixel-wise relative positions in new coordinate frame
Parameters
----------
wij: float, array
array of westward values for image
nij: float, array
array of northward values for image
rij: float, array
array of radius values for image
smap: map
Sunpy map object
Returns
-------
deltaw: float, array
relative westward position of pixel
deltan: float, array
relative northward position of pixel
deltar: float, array
relative radial position of pixel
dij: float
distance between pixel ij and spacecraft
"""
# calculate relative positions of each pixel
rsc = smap.meta['dsun_obs'] / smap.meta['rsun_ref']
deltaw = wij
deltan = nij
deltar = rij - rsc
dij = np.sqrt(deltaw ** 2 + deltan ** 2 + deltar ** 2)
return deltaw, deltan, deltar, dij
def spacecraft_vel(deltaw, deltan, deltar, dij, vmap):
"""
function to calculate pixel-wise spacecraft velocities for Sunpy map
Based on Haywood et al. (2016) and described in Ervin et al. (2021) - In Prep.
Parameters
----------
deltaw: float, array
relative westward position of pixel
deltan: float, array
relative northward position of pixel
deltar: float, array
relative radial position of pixel
dij: float
distance between pixel ij and spacecraft
vmap: map
Sunpy map object (Dopplergram)
Returns
-------
vsc: float, array
array of spacecraft velocities
"""
# velocity of spacecraft relative to sun
vscw = vmap.meta['obs_vw']
vscn = vmap.meta['obs_vn']
vscr = vmap.meta['obs_vr']
# pixel-wise magnitude of spacecraft velocity
vsc = - (deltaw * vscw + deltan * vscn + deltar * vscr) / dij
return vsc
def solar_rot_vel(wij, nij, rij, deltaw, deltan, deltar, dij, vmap, a_parameters=[Parameters.a1, Parameters.a2, Parameters.a3]):
"""
function to calculate pixel-wise velocities due to solar rotation
Based on Haywood et al. (2016) and described in Ervin et al. (2021) - In Prep.
Parameters
----------
wij: float, array
array of westward values for image
nij: float, array
array of northward values for image
rij: float, array
array of radius values for image
deltaw: float, array
relative westward position of pixel
deltan: float, array
relative northward position of pixel
deltar: float, array
relative radial position of pixel
dij: float
distance between pixel ij and spacecraft
vmap: map
Sunpy map object (Dopplergram)
a_parameters: float, array
array of solar differential rotation parameters from Snodgrass & Ulrich (1990).
Returns
-------
vrot: float, array
array of solar rotation velocities\
"""
# apply to cartesian coordinates
x1 = wij
y1 = nij * np.cos(np.deg2rad(vmap.meta['crlt_obs'])) + rij * np.sin(np.deg2rad(vmap.meta['crlt_obs']))
z1 = - nij * np.sin(np.deg2rad(vmap.meta['crlt_obs'])) + rij * np.cos(np.deg2rad(vmap.meta['crlt_obs']))
hx = x1 * np.cos(np.deg2rad(vmap.meta['crln_obs'])) + z1 * np.sin(np.deg2rad(vmap.meta['crln_obs']))
hy = y1
hz = -x1 * np.sin(np.deg2rad(vmap.meta['crln_obs'])) + z1 * np.cos(np.deg2rad(vmap.meta['crln_obs']))
# apply parameters to determine vrot for given image pixel
w = (a_parameters[0] + a_parameters[1] * ((np.sin(hy)) ** 2) + a_parameters[2] * (
(np.sin(hy)) ** 4)) * 1. / 86400. * np.pi / 180.
# get projection of solar rotation
vx_rot = w * hz * vmap.meta['rsun_ref']
vy_rot = 0.
vz_rot = -w * hx * vmap.meta['rsun_ref']
v1 = np.cos(np.deg2rad(vmap.meta['crln_obs'])) * vx_rot - np.sin(np.deg2rad(vmap.meta['crln_obs'])) * vz_rot
v2 = vy_rot
v3 = np.sin(np.deg2rad(vmap.meta['crln_obs'])) * vx_rot + np.cos(np.deg2rad(vmap.meta['crln_obs'])) * vz_rot
# project into correct direction
vrotw = v1
vrotn = v2 * np.cos(np.deg2rad(vmap.meta['crlt_obs'])) - v3 * np.sin(np.deg2rad(vmap.meta['crlt_obs']))
vrotr = v2 * np.sin(np.deg2rad(vmap.meta['crlt_obs'])) + v3 * np.cos(np.deg2rad(vmap.meta['crlt_obs']))
# get full rotational velocity
vrot = (deltaw * vrotw + deltan * vrotn + deltar * vrotr) / dij
return vrot
def corrected_map(corrected_data, smap, map_type, frame=frames.HeliographicCarrington):
"""
function to make Sunpy map object from corrected data
Parameters
----------
corrected_data: float, array
corrected velocity data
smap: map
original Sunpy map object
map_type: map type
map type for 'content' section of fits header (string)
frame: sunpy coordinate frame
new rotation frame
Returns
-------
corr_map: map
Sunpy map object with new frame information and corrected data
"""
# build SkyCoord instance in new frame
coord = SkyCoord(0 * u.arcsec, 0 * u.arcsec, obstime=smap.date, observer=smap.observer_coordinate,
frame=frame)
# create fits header file with data and coordinate system information
header = sunpy.map.make_fitswcs_header(corrected_data, coord)
# update fits header with instrument and content information
header['content'] = map_type
header['telescop'] = smap.meta['telescop']
header['wavelnth'] = smap.meta['wavelnth']
# create new Sunpy map instance with corrected data
corr_map = sunpy.map.Map(corrected_data, header)
return corr_map
def mag_field(mu, mmap, B_noise=Parameters.B_noise, mu_cutoff=Parameters.mu_cutoff):
"""
function to correct for unsigned magnetic field strength and magnetic noise
Based on Haywood et al. (2016) and described in Ervin et al. (2021) - In Prep.
Parameters
----------
mu: float, array
array of mu (cosine theta) values
mmap: map
Sunpy map object (Magnetogram)
B_noise: int
magnetic noise level in Gauss
mu_cutoff: float
minimum mu cutoff value
Returns
-------
Bobs: float, array
array of corrected observed magnetic field strength
Br: float, array
array of corrected unsigned magnetic field strength
"""
# get valid indices
use_indices = np.logical_and(mu > mu_cutoff, mu != np.nan)
mag_indices = np.logical_and(use_indices, np.abs(mmap.data) < B_noise)
# calculate full magnetic field strength
Bobs = mmap.data
Br = np.full(shape=mmap.data.shape, fill_value=np.nan)
Br[use_indices] = Bobs[use_indices] / mu[use_indices]
Bobs[mag_indices] = 0
Br[mag_indices] = 0
return Bobs, Br
def mag_thresh(mu, mmap, Br_cutoff=Parameters.Br_cutoff, mu_cutoff=Parameters.mu_cutoff):
"""
function to calculate magnetic threshold and differentiate between magnetically active regions and quiet Sun
Based on Yeo et al. (2013) and described in Ervin et al. (2021) - In Prep.
Parameters
----------
mu: float, array
array of mu (cosine theta) values
mmap: map
corrected (unsigned magnetic field) Sunpy map object (Magnetogram)
Br_cutoff: int
minimum cutoff value (in Gauss) for thresholding active regions
mu_cutoff: float
minimum mu cutoff value for data to ignore
Returns
-------
active: int, array
weights array where active pixels are 1
quiet: int, array
weights array where active pixels are 0
"""
# get active region indices
active_inds = np.where(np.abs(mmap.data) * mu > Br_cutoff)
bad_mu = np.where(mu <= mu_cutoff)
# make active region array
active = np.zeros(mu.shape)
active[active_inds] = 1.
active[bad_mu] = 0.
# find isolated pixels
# get area
y_labeled = label(active, connectivity=2, background=0)
y_area = [props.area for props in regionprops(y_labeled)]
# area constraint
good_area = np.where(np.array(y_area) > 5)
good_area = good_area[0] + 1
active_indices = np.isin(y_labeled, good_area)
# create weights array
active[~active_indices] = 0
# get quiet indices
quiet = 1 - active
return active, quiet
def int_thresh(map_int_cor, active, quiet):
"""
function to do intensity thresholding and differentiate between faculae (bright) and sunspots (dark)
Based on Yeo et al. (2013) and described in Ervin et al. (2021) - In Prep.
Parameters
----------
map_int_cor: map
corrected (limb-darkening) Sunpy map object (Intensitygram)
active: int, array
weights array where active pixels are 1
quiet: int, array
weights array where active pixels are 0
Returns
-------
fac_inds: int, array
array of indices where faculae are detected
spot_inds: int, array
array of indices where sunspots are detected
"""
# flattened intensity data
Iflat = map_int_cor.data
# calculate quiet sun intensity
int_quiet = np.nansum(Iflat * quiet) / np.nansum(quiet)
# intensity threshold
int_cutoff = 0.89 * int_quiet
# get faculae
fac_inds = np.logical_and((Iflat > int_cutoff), (active > 0.5))
# get sunspots
spot_inds = np.logical_and((Iflat <= int_cutoff), (active > 0.5))
return fac_inds, spot_inds
def thresh_map(fac_inds, spot_inds):
"""
function that creates thresholded map of sunspots (-1) and faculae (1)
Parameters
----------
fac_inds: int, array
array of indices where faculae are detected
spot_inds: int, array
array of indices where sunspots are detected
Returns
-------
thresh_arr: int, array
array of values denoting faculae (1) and sunspots (-1)
"""
thresh_arr = np.full(shape=fac_inds.shape, fill_value=np.nan)
thresh_arr[fac_inds] = 1
thresh_arr[spot_inds] = -1
return thresh_arr
def v_quiet(map_vel_cor, imap, quiet):
"""
function to calculate velocity due to convective motion of quiet-Sun
Based on Haywood et al. (2016) and described in Ervin et al. (2021) - In Prep.
Parameters
----------
map_vel_cor: map
corrected (velocities) Sunpy map object (Dopplergram)
imap: map
UNCORRECTED Sunpy map object (Intensitygram)
quiet: int, array
weights array where active pixels have weight = 0
Returns
-------
v_quiet: float
quiet-Sun velocity
"""
v_quiet = np.nansum(map_vel_cor.data * imap.data * quiet) / np.nansum(
imap.data * quiet)
return v_quiet
def v_phot(quiet, active, Lij, vrot, imap, mu, fac_inds, spot_inds, mu_cutoff=Parameters.mu_cutoff):
"""
function to calculate photometric velocity due to rotational Doppler variation
Based on Haywood et al. (2016) and described in Ervin et al. (2021) - In Prep.
Parameters
----------
quiet: int, array
weights array where active pixels have weight = 0
active: int, array
weights array where active pixels have weight = 1
Lij: float, array
limb-darkening polynomial function
vrot: float, array
solar rotational velocity
imap: map
UNCORRECTED Sunpy map object (Intensitygram)
mu: float, array
array of mu values
fac_inds: int, array
array of indices where faculae are detected
spot_inds: int, array
array of indices where sunspots are detected
mu_cutoff: float
minimum mu cutoff value
Returns
-------
v_phot: float
photospheric velocity perturbation
"""
# get good mu values
good_mu = np.where(mu > mu_cutoff)
# calculate K scaling factor
K = np.nansum(imap.data * Lij * quiet) / np.sum((Lij[good_mu] ** 2) * quiet[good_mu])
# calculate photospheric velocity
v_phot = np.nansum(np.real(vrot) * (imap.data - K * Lij) * active) / np.nansum(imap.data)
# faculae driven photospheric velocity
vphot_bright = np.nansum(np.real(vrot) * (imap.data - K * Lij) * fac_inds) / np.nansum(imap.data)
# sunspots driven photospheric velocity
vphot_spot = np.nansum(np.real(vrot) * (imap.data - K * Lij) * spot_inds) / np.nansum(imap.data)
return v_phot, vphot_bright, vphot_spot
def v_disc(map_vel_cor, imap):
"""
function to calculate disc-averaged velocity of Sun
Based on Haywood et al. (2016) and described in Ervin et al. (2021) - In Prep.
Parameters
----------
map_vel_cor: map
corrected (velocities) Sunpy map object (Dopplergram)
imap: map
UNCORRECTED Sunpy map object (Intensitygram)
Returns
-------
v_disc: float
disc averaged velocity of Sun
"""
v_disc = np.nansum(map_vel_cor.data * imap.data) / np.nansum(imap.data)
return v_disc
def filling_factor(mu, mmap, active, fac_inds, spot_inds, mu_cutoff=Parameters.mu_cutoff):
"""
function to calculate filling factors for faculae, sunspots, and
total magnetically active regions
- percentage of magnetically active pixels on the solar surface at any one time
Based on Haywood et al. (2016) and described in Ervin et al. (2021) - In Prep.
Parameters
----------
mu: float, array
array of mu (cosine theta) values
mmap: map
UNCORRECTED Sunpy map object (Magnetogram)
active: int, array
weights array where active pixels have weight = 1
fac_inds: int, array
array of indices where faculae are detected
spot_inds: int, array
array of indices where sunspots are detected
mu_cutoff: float
minimum mu cutoff value
Returns
-------
f_bright: float
filling factor (%) for bright areas (faculae)
f_spot: float
filling factor (%) for dark areas (sunspots)
f_total: float
filling factor (%) for timestamp
"""
# get good mu values
good_mu = np.where(mu > mu_cutoff)
# get number of pixels
npix = len(mmap.data[good_mu])
# faculae
faculae = np.zeros(mmap.data.shape)
faculae[fac_inds] = 1.
f_bright = np.sum(faculae) / npix * 100
# sunspots
spots = np.zeros(mmap.data.shape)
spots[spot_inds] = 1.
f_spot = np.sum(spots) / npix * 100
# get filling factor
f_total = np.sum(active) / npix * 100
return f_bright, f_spot, f_total
def unsigned_flux(map_mag_obs, imap):
"""
calculate unsigned magnetic flux
Based on Haywood et al. (2016) and described in Ervin et al. (2021) - In Prep.
Parameters
----------
map_mag_obs: map
corrected observed magnetic field strength Sunpy map object (Magnetogram)
imap: map
UNCORRECTED Sunpy map object (Intensitygram)
Returns
-------
unsign_flux: float
unsigned magnetic flux
"""
# get data arrays
i_data = imap.data
m_data = map_mag_obs.data
mabs_data = np.abs(m_data)
# unsigned flux
unsign_flux = np.nansum(mabs_data * i_data) / np.nansum(i_data)
return np.abs(unsign_flux)
def area_calc(active, pixA_hem):
"""
calculate area of active pixels for a thresholded map
Based on Milbourne et al. (2019) and described in Ervin et al. (2021) - In Prep.
Parameters
----------
active: int, array
weights array where active pixels have weight = 1
pixA_hem: float, array
pixel areas in uHem
Returns
-------
area: float, array
area of each active region weighted by its intensity
"""
# get labeling of image
labeled = label(active)
# get area of active regions
area = np.zeros(active.shape)
props = regionprops(labeled)
info = regionprops(labeled, pixA_hem)
# add area to array
for k in range(1, len(info)):
area[props[k].coords[:, 0], props[k].coords[:, 1]] = info[k].area * info[k].mean_intensity
return area
def area_filling_factor(active, area, mu, mmap, fac_inds, athresh=Parameters.athresh, mu_cutoff=Parameters.mu_cutoff):
"""
calculate filling factor for regions thresholded by area
- differentiate between large and small regions
- differentiate between plage (large) and network (small) bright regions
Parameters
----------
active: int, array
weights array where active pixels have weight = 1
area: float, array
area of each active region weighted by its intensity
mu: float, array
array of mu (cosine theta) values
mmap: map
UNCORRECTED Sunpy map object (Magnetogram)
fac_inds: int, array
array of indices where faculae are detected
athresh: int
area threshold value between large and small regions (in uHem)
mu_cutoff: float
minimum mu cutoff value for usable data
Returns
-------
f_small: float
filling factor (%) for small magnetically active regions
f_large: float
filling factor (%) for large magnetically active regions
f_network: float
filling factor (%) for network (small, bright magnetically active) regions
f_plage: float
filling factor (%) for plage (large, bright magnetically active) regions
f_nonconv: float
filling factor (%) for regions that do not suppress convective blueshift
"""
# get good mu values
good_mu = np.where(mu > mu_cutoff)
# get number of pixels
npix = len(mmap.data[good_mu])
# get quiet pixels
quiet = 1 - active
# get filling factor for 'small' magnetic features
small = np.zeros(mmap.data.shape)
small_inds = np.logical_and(active > 0.5, area < athresh)
small[small_inds] = 1.
f_small = np.nansum(small) / npix * 100
# get filling factor for 'large' magnetic features
large = np.zeros(mmap.data.shape)
large_inds = np.logical_and(active > 0.5, area > athresh)
large[large_inds] = 1.
f_large = np.nansum(large) / npix * 100
# get filling factor for network (small, faculae regions)
network = np.zeros(mmap.data.shape)
network_inds = np.logical_and(small > 0.5, fac_inds > 0.5)
network[network_inds] = 1.
f_network = np.nansum(network) / npix * 100
# get filling factor for plage (large, faculae regions)
plage = np.zeros(mmap.data.shape)
plage_inds = np.logical_and(large > 0.5, fac_inds > 0.5)
plage[plage_inds] = 1.
f_plage = np.nansum(plage) / npix * 100
# # get filling factor for small, non-convective regions
# nonconv = np.zeros(mmap.data.shape)
# nonconv_inds = np.logical_and(quiet > 0.5, small > 0.5)
# nonconv[nonconv_inds] = 1.
# f_nonconv = np.nansum(nonconv) / npix * 100
return f_small, f_large, f_network, f_plage
def area_unsigned_flux(map_mag_obs, imap, area, active, athresh=Parameters.athresh):
"""
calculate the magnetic flux for different regions based on area cut
and magnetic activitiy
Parameters
----------
map_mag_obs: map
corrected observed magnetic field strength Sunpy map object (Magnetogram)
imap: map
UNCORRECTED Sunpy map object (Intensitygram)
area: float, array
area of each active region weighted by its intensity
active: int, array
weights array where active pixels have weight = 1
athresh: int
area threshold value between large and small regions (in uHem)
Returns
-------
quiet_flux: float
magnetic flux of quiet-Sun regions
ar_flux: float
magnetic flux of active regions
conv_flux: float
magnetic flux of regions that suppress convective blueshift
pol_flux: float
magnetic flux of polarized regions
pol_conv_flux: float
magnetic flux of polarized regions that suppress the convective blueshift
"""
# get data arrays
i_data = imap.data
m_data = map_mag_obs.data
mabs_data = np.abs(m_data)
quiet = 1 - active
# get large regions array
large = np.zeros(m_data.shape)
large_inds = np.logical_and(active > 0.5, area > athresh)
large[large_inds] = 1.
# calculate relevant fluxes
quiet_flux = np.nansum(mabs_data * i_data * quiet) / np.nansum(i_data * quiet)
ar_flux = np.nansum(mabs_data * i_data * active) / np.nansum(i_data * active)
conv_flux = np.nansum(mabs_data * i_data * large) / np.nansum(i_data * large)
pol_flux = np.nansum(m_data * i_data) / np.nansum(i_data)
pol_conv_flux = np.nansum(m_data * i_data * large) / np.nansum(i_data * large)
return quiet_flux, ar_flux, conv_flux, pol_flux, pol_conv_flux
def area_vconv(map_vel_cor, imap, active, area, athresh=Parameters.athresh):
"""
calculate convective velocities for different area thresholds
Parameters
----------
map_vel_cor: map
corrected (velocities) Sunpy map object (Dopplergram)
imap: map
UNCORRECTED Sunpy map object (Intensitygram)
active: int, array
weights array where active pixels have weight = 1
area: float, array
area of each active region weighted by its intensity
athresh: int
area threshold value between large and small regions (in uHem)
Returns
-------
vconv_quiet: float
convective velocity due to quiet-Sun regions
vconv_large: float
convective velocity due to large active regions
vconv_small: float
convective velocity due to small active regions
"""
# get data arrays
v_data = map_vel_cor.data
i_data = imap.data
# get large regions array
large = np.zeros(v_data.shape)
large_inds = np.logical_and(active > 0.5, area > athresh)
large[large_inds] = 1.
# get small regions array
small = np.zeros(v_data.shape)
small_inds = np.logical_and(active > 0.5, area < athresh)
small[small_inds] = 1.
# label the regions
labeled = label(large)
v_props = regionprops(labeled, v_data)
i_props = regionprops(labeled, i_data)
# labeled for small regions
labeled = label(small)
v_small = regionprops(labeled, v_data)
i_small = regionprops(labeled, i_data)
# get quiet regions array
quiet = 1 - active
# array of non-convective regions
nonconv = np.zeros(v_data.shape)
nonconv_inds = np.logical_or(quiet > 0.5, small > 0.5)
nonconv[nonconv_inds] = 1.
# velocities of non convective regions
vel_quiet = np.nansum(v_data * i_data * quiet) / np.sum(i_data * quiet)
vel_nonconv = np.nansum(v_data * i_data * nonconv) / np.sum(i_data * nonconv)
# velocities of convective regions
vconv_large = np.zeros(len(v_props))
vconv_small = np.zeros(len(v_props))
vconv_quiet = np.zeros(len(v_props))
for k in range(len(v_props)):
vconv_large[k] = v_props[k].area * (v_props[k].mean_intensity - vel_quiet) * i_props[k].mean_intensity
vconv_small[k] = v_small[k].area * (v_small[k].mean_intensity - vel_quiet) * i_small[k].mean_intensity
vconv_quiet[k] = v_props[k].area * (v_props[k].mean_intensity - vel_nonconv) * i_props[k].mean_intensity
# convective velocity of quiet regions
vconv_quiet = np.nansum(vconv_quiet) / np.sum(i_data)
# convective velocity of large regions
vconv_large = np.nansum(vconv_large) / | np.sum(i_data) | numpy.sum |
# coding: utf-8
import logging
import matplotlib.pyplot as plt
import numpy
from astropy import constants
from astropy import units as u
from astropy.coordinates import SkyCoord, EarthLocation
from wrappers.arlexecute.simulation.configurations import create_named_configuration
from processing_library.util.coordinate_support import xyz_to_uvw, uvw_to_xyz, skycoord_to_lmn, simulate_point, pa_z
def create_propagators(config, interferer: EarthLocation, frequency, **kwargs):
""" Create a set of propagators
:return: Array
"""
nchannels = len(frequency)
nants = len(config.data['names'])
interferer_xyz = [interferer.geocentric[0].value, interferer.geocentric[1].value, interferer.geocentric[2].value]
propagators = numpy.zeros([nants, nchannels], dtype='complex')
for iant, ant_xyz in enumerate(config.xyz):
vec = ant_xyz - interferer_xyz
r = numpy.sqrt(vec[0] ** 2 + vec[1] ** 2 + vec[2] ** 2)
k = 2.0 * numpy.pi * frequency / constants.c.value
propagators[iant, :] = numpy.exp(- 1.0j * k * r) / r
return propagators
def calculate_interferer_fringe(propagators):
nants, nchannels = propagators.shape
# Now calculate the interferer propagator fringe. This is static in time.
interferer_fringe = numpy.zeros([nants, nants, nchannels], dtype='complex')
for chan in range(nchannels):
interferer_fringe[..., chan] = numpy.outer(propagators[..., chan],
numpy.conjugate(propagators[..., chan]))
return interferer_fringe
def calculate_station_fringe_rotation(ants_xyz, times, frequency, phasecentre, pole):
# Time corresponds to hour angle
uvw = xyz_to_uvw(ants_xyz, times, phasecentre.dec.rad)
nants, nuvw = uvw.shape
ntimes = len(times)
uvw = uvw.reshape([nants, ntimes, 3])
lmn = skycoord_to_lmn(phasecentre, pole)
delay = numpy.dot(uvw, lmn)
nchan = len(frequency)
phase = | numpy.zeros([nants, ntimes, nchan]) | numpy.zeros |
from itertools import combinations_with_replacement
import numpy as np
from scipy import ndimage as ndi
from scipy import stats
from scipy import spatial
from ..util import img_as_float
from .peak import peak_local_max
from .util import _prepare_grayscale_input_2D, _prepare_grayscale_input_nD
from .corner_cy import _corner_fast
from ._hessian_det_appx import _hessian_matrix_det
from ..transform import integral_image
from .._shared.utils import safe_as_int
from .corner_cy import _corner_moravec, _corner_orientations
from warnings import warn
def _compute_derivatives(image, mode='constant', cval=0):
"""Compute derivatives in axis directions using the Sobel operator.
Parameters
----------
image : ndarray
Input image.
mode : {'constant', 'reflect', 'wrap', 'nearest', 'mirror'}, optional
How to handle values outside the image borders.
cval : float, optional
Used in conjunction with mode 'constant', the value outside
the image boundaries.
Returns
-------
derivatives : list of ndarray
Derivatives in each axis direction.
"""
derivatives = [ndi.sobel(image, axis=i, mode=mode, cval=cval)
for i in range(image.ndim)]
return derivatives
def structure_tensor(image, sigma=1, mode='constant', cval=0, order=None):
"""Compute structure tensor using sum of squared differences.
The (2-dimensional) structure tensor A is defined as::
A = [Arr Arc]
[Arc Acc]
which is approximated by the weighted sum of squared differences in a local
window around each pixel in the image. This formula can be extended to a
larger number of dimensions (see [1]_).
Parameters
----------
image : ndarray
Input image.
sigma : float, optional
Standard deviation used for the Gaussian kernel, which is used as a
weighting function for the local summation of squared differences.
mode : {'constant', 'reflect', 'wrap', 'nearest', 'mirror'}, optional
How to handle values outside the image borders.
cval : float, optional
Used in conjunction with mode 'constant', the value outside
the image boundaries.
order : {'rc', 'xy'}, optional
NOTE: Only applies in 2D. Higher dimensions must always use 'rc' order.
This parameter allows for the use of reverse or forward order of
the image axes in gradient computation. 'rc' indicates the use of
the first axis initially (Arr, Arc, Acc), whilst 'xy' indicates the
usage of the last axis initially (Axx, Axy, Ayy).
Returns
-------
A_elems : list of ndarray
Upper-diagonal elements of the structure tensor for each pixel in the
input image.
Examples
--------
>>> from skimage.feature import structure_tensor
>>> square = np.zeros((5, 5))
>>> square[2, 2] = 1
>>> Arr, Arc, Acc = structure_tensor(square, sigma=0.1, order='rc')
>>> Acc
array([[0., 0., 0., 0., 0.],
[0., 1., 0., 1., 0.],
[0., 4., 0., 4., 0.],
[0., 1., 0., 1., 0.],
[0., 0., 0., 0., 0.]])
See also
--------
structure_tensor_eigenvalues
References
----------
.. [1] https://en.wikipedia.org/wiki/Structure_tensor
"""
if order == 'xy' and image.ndim > 2:
raise ValueError('Only "rc" order is supported for dim > 2.')
if order is None:
if image.ndim == 2:
# The legacy 2D code followed (x, y) convention, so we swap the
# axis order to maintain compatibility with old code
warn('deprecation warning: the default order of the structure '
'tensor values will be "row-column" instead of "xy" starting '
'in skimage version 0.20. Use order="rc" or order="xy" to '
'set this explicitly. (Specify order="xy" to maintain the '
'old behavior.)', category=FutureWarning, stacklevel=2)
order = 'xy'
else:
order = 'rc'
image = _prepare_grayscale_input_nD(image)
derivatives = _compute_derivatives(image, mode=mode, cval=cval)
if order == 'xy':
derivatives = reversed(derivatives)
# structure tensor
A_elems = [ndi.gaussian_filter(der0 * der1, sigma, mode=mode, cval=cval)
for der0, der1 in combinations_with_replacement(derivatives, 2)]
return A_elems
def hessian_matrix(image, sigma=1, mode='constant', cval=0, order='rc'):
"""Compute Hessian matrix.
The Hessian matrix is defined as::
H = [Hrr Hrc]
[Hrc Hcc]
which is computed by convolving the image with the second derivatives
of the Gaussian kernel in the respective r- and c-directions.
Parameters
----------
image : ndarray
Input image.
sigma : float
Standard deviation used for the Gaussian kernel, which is used as
weighting function for the auto-correlation matrix.
mode : {'constant', 'reflect', 'wrap', 'nearest', 'mirror'}, optional
How to handle values outside the image borders.
cval : float, optional
Used in conjunction with mode 'constant', the value outside
the image boundaries.
order : {'rc', 'xy'}, optional
This parameter allows for the use of reverse or forward order of
the image axes in gradient computation. 'rc' indicates the use of
the first axis initially (Hrr, Hrc, Hcc), whilst 'xy' indicates the
usage of the last axis initially (Hxx, Hxy, Hyy)
Returns
-------
Hrr : ndarray
Element of the Hessian matrix for each pixel in the input image.
Hrc : ndarray
Element of the Hessian matrix for each pixel in the input image.
Hcc : ndarray
Element of the Hessian matrix for each pixel in the input image.
Examples
--------
>>> from skimage.feature import hessian_matrix
>>> square = np.zeros((5, 5))
>>> square[2, 2] = 4
>>> Hrr, Hrc, Hcc = hessian_matrix(square, sigma=0.1, order='rc')
>>> Hrc
array([[ 0., 0., 0., 0., 0.],
[ 0., 1., 0., -1., 0.],
[ 0., 0., 0., 0., 0.],
[ 0., -1., 0., 1., 0.],
[ 0., 0., 0., 0., 0.]])
"""
image = img_as_float(image)
gaussian_filtered = ndi.gaussian_filter(image, sigma=sigma,
mode=mode, cval=cval)
gradients = np.gradient(gaussian_filtered)
axes = range(image.ndim)
if order == 'rc':
axes = reversed(axes)
H_elems = [np.gradient(gradients[ax0], axis=ax1)
for ax0, ax1 in combinations_with_replacement(axes, 2)]
return H_elems
def hessian_matrix_det(image, sigma=1, approximate=True):
"""Compute the approximate Hessian Determinant over an image.
The 2D approximate method uses box filters over integral images to
compute the approximate Hessian Determinant, as described in [1]_.
Parameters
----------
image : array
The image over which to compute Hessian Determinant.
sigma : float, optional
Standard deviation used for the Gaussian kernel, used for the Hessian
matrix.
approximate : bool, optional
If ``True`` and the image is 2D, use a much faster approximate
computation. This argument has no effect on 3D and higher images.
Returns
-------
out : array
The array of the Determinant of Hessians.
References
----------
.. [1] <NAME>, <NAME>, <NAME>, <NAME>,
"SURF: Speeded Up Robust Features"
ftp://ftp.vision.ee.ethz.ch/publications/articles/eth_biwi_00517.pdf
Notes
-----
For 2D images when ``approximate=True``, the running time of this method
only depends on size of the image. It is independent of `sigma` as one
would expect. The downside is that the result for `sigma` less than `3`
is not accurate, i.e., not similar to the result obtained if someone
computed the Hessian and took its determinant.
"""
image = img_as_float(image)
if image.ndim == 2 and approximate:
integral = integral_image(image)
return np.array(_hessian_matrix_det(integral, sigma))
else: # slower brute-force implementation for nD images
hessian_mat_array = _symmetric_image(hessian_matrix(image, sigma))
return np.linalg.det(hessian_mat_array)
def _image_orthogonal_matrix22_eigvals(M00, M01, M11):
l1 = (M00 + M11) / 2 + np.sqrt(4 * M01 ** 2 + (M00 - M11) ** 2) / 2
l2 = (M00 + M11) / 2 - np.sqrt(4 * M01 ** 2 + (M00 - M11) ** 2) / 2
return l1, l2
def _symmetric_compute_eigenvalues(S_elems):
"""Compute eigenvalues from the upperdiagonal entries of a symmetric matrix
Parameters
----------
S_elems : list of ndarray
The upper-diagonal elements of the matrix, as returned by
`hessian_matrix` or `structure_tensor`.
Returns
-------
eigs : ndarray
The eigenvalues of the matrix, in decreasing order. The eigenvalues are
the leading dimension. That is, ``eigs[i, j, k]`` contains the
ith-largest eigenvalue at position (j, k).
"""
if len(S_elems) == 3: # Use fast Cython code for 2D
eigs = np.stack(_image_orthogonal_matrix22_eigvals(*S_elems))
else:
matrices = _symmetric_image(S_elems)
# eigvalsh returns eigenvalues in increasing order. We want decreasing
eigs = np.linalg.eigvalsh(matrices)[..., ::-1]
leading_axes = tuple(range(eigs.ndim - 1))
eigs = np.transpose(eigs, (eigs.ndim - 1,) + leading_axes)
return eigs
def _symmetric_image(S_elems):
"""Convert the upper-diagonal elements of a matrix to the full
symmetric matrix.
Parameters
----------
S_elems : list of array
The upper-diagonal elements of the matrix, as returned by
`hessian_matrix` or `structure_tensor`.
Returns
-------
image : array
An array of shape ``(M, N[, ...], image.ndim, image.ndim)``,
containing the matrix corresponding to each coordinate.
"""
image = S_elems[0]
symmetric_image = np.zeros(image.shape + (image.ndim, image.ndim))
for idx, (row, col) in \
enumerate(combinations_with_replacement(range(image.ndim), 2)):
symmetric_image[..., row, col] = S_elems[idx]
symmetric_image[..., col, row] = S_elems[idx]
return symmetric_image
def structure_tensor_eigenvalues(A_elems):
"""Compute eigenvalues of structure tensor.
Parameters
----------
A_elems : list of ndarray
The upper-diagonal elements of the structure tensor, as returned
by `structure_tensor`.
Returns
-------
ndarray
The eigenvalues of the structure tensor, in decreasing order. The
eigenvalues are the leading dimension. That is, the coordinate
[i, j, k] corresponds to the ith-largest eigenvalue at position (j, k).
Examples
--------
>>> from skimage.feature import structure_tensor
>>> from skimage.feature import structure_tensor_eigenvalues
>>> square = np.zeros((5, 5))
>>> square[2, 2] = 1
>>> A_elems = structure_tensor(square, sigma=0.1, order='rc')
>>> structure_tensor_eigenvalues(A_elems)[0]
array([[0., 0., 0., 0., 0.],
[0., 2., 4., 2., 0.],
[0., 4., 0., 4., 0.],
[0., 2., 4., 2., 0.],
[0., 0., 0., 0., 0.]])
See also
--------
structure_tensor
"""
return _symmetric_compute_eigenvalues(A_elems)
def structure_tensor_eigvals(Axx, Axy, Ayy):
"""Compute eigenvalues of structure tensor.
Parameters
----------
Axx : ndarray
Element of the structure tensor for each pixel in the input image.
Axy : ndarray
Element of the structure tensor for each pixel in the input image.
Ayy : ndarray
Element of the structure tensor for each pixel in the input image.
Returns
-------
l1 : ndarray
Larger eigen value for each input matrix.
l2 : ndarray
Smaller eigen value for each input matrix.
Examples
--------
>>> from skimage.feature import structure_tensor, structure_tensor_eigvals
>>> square = np.zeros((5, 5))
>>> square[2, 2] = 1
>>> Arr, Arc, Acc = structure_tensor(square, sigma=0.1, order='rc')
>>> structure_tensor_eigvals(Acc, Arc, Arr)[0]
array([[0., 0., 0., 0., 0.],
[0., 2., 4., 2., 0.],
[0., 4., 0., 4., 0.],
[0., 2., 4., 2., 0.],
[0., 0., 0., 0., 0.]])
"""
warn('deprecation warning: the function structure_tensor_eigvals is '
'deprecated and will be removed in version 0.20. Please use '
'structure_tensor_eigenvalues instead.',
category=FutureWarning, stacklevel=2)
return _image_orthogonal_matrix22_eigvals(Axx, Axy, Ayy)
def hessian_matrix_eigvals(H_elems):
"""Compute eigenvalues of Hessian matrix.
Parameters
----------
H_elems : list of ndarray
The upper-diagonal elements of the Hessian matrix, as returned
by `hessian_matrix`.
Returns
-------
eigs : ndarray
The eigenvalues of the Hessian matrix, in decreasing order. The
eigenvalues are the leading dimension. That is, ``eigs[i, j, k]``
contains the ith-largest eigenvalue at position (j, k).
Examples
--------
>>> from skimage.feature import hessian_matrix, hessian_matrix_eigvals
>>> square = np.zeros((5, 5))
>>> square[2, 2] = 4
>>> H_elems = hessian_matrix(square, sigma=0.1, order='rc')
>>> hessian_matrix_eigvals(H_elems)[0]
array([[ 0., 0., 2., 0., 0.],
[ 0., 1., 0., 1., 0.],
[ 2., 0., -2., 0., 2.],
[ 0., 1., 0., 1., 0.],
[ 0., 0., 2., 0., 0.]])
"""
return _symmetric_compute_eigenvalues(H_elems)
def shape_index(image, sigma=1, mode='constant', cval=0):
"""Compute the shape index.
The shape index, as defined by Koenderink & <NAME> [1]_, is a
single valued measure of local curvature, assuming the image as a 3D plane
with intensities representing heights.
It is derived from the eigen values of the Hessian, and its
value ranges from -1 to 1 (and is undefined (=NaN) in *flat* regions),
with following ranges representing following shapes:
.. table:: Ranges of the shape index and corresponding shapes.
=================== =============
Interval (s in ...) Shape
=================== =============
[ -1, -7/8) Spherical cup
[-7/8, -5/8) Through
[-5/8, -3/8) Rut
[-3/8, -1/8) Saddle rut
[-1/8, +1/8) Saddle
[+1/8, +3/8) Saddle ridge
[+3/8, +5/8) Ridge
[+5/8, +7/8) Dome
[+7/8, +1] Spherical cap
=================== =============
Parameters
----------
image : ndarray
Input image.
sigma : float, optional
Standard deviation used for the Gaussian kernel, which is used for
smoothing the input data before Hessian eigen value calculation.
mode : {'constant', 'reflect', 'wrap', 'nearest', 'mirror'}, optional
How to handle values outside the image borders
cval : float, optional
Used in conjunction with mode 'constant', the value outside
the image boundaries.
Returns
-------
s : ndarray
Shape index
References
----------
.. [1] <NAME>. & <NAME>.,
"Surface shape and curvature scales",
Image and Vision Computing, 1992, 10, 557-564.
:DOI:`10.1016/0262-8856(92)90076-F`
Examples
--------
>>> from skimage.feature import shape_index
>>> square = np.zeros((5, 5))
>>> square[2, 2] = 4
>>> s = shape_index(square, sigma=0.1)
>>> s
array([[ nan, nan, -0.5, nan, nan],
[ nan, -0. , nan, -0. , nan],
[-0.5, nan, -1. , nan, -0.5],
[ nan, -0. , nan, -0. , nan],
[ nan, nan, -0.5, nan, nan]])
"""
H = hessian_matrix(image, sigma=sigma, mode=mode, cval=cval, order='rc')
l1, l2 = hessian_matrix_eigvals(H)
return (2.0 / np.pi) * np.arctan((l2 + l1) / (l2 - l1))
def corner_kitchen_rosenfeld(image, mode='constant', cval=0):
"""Compute Kitchen and Rosenfeld corner measure response image.
The corner measure is calculated as follows::
(imxx * imy**2 + imyy * imx**2 - 2 * imxy * imx * imy)
/ (imx**2 + imy**2)
Where imx and imy are the first and imxx, imxy, imyy the second
derivatives.
Parameters
----------
image : ndarray
Input image.
mode : {'constant', 'reflect', 'wrap', 'nearest', 'mirror'}, optional
How to handle values outside the image borders.
cval : float, optional
Used in conjunction with mode 'constant', the value outside
the image boundaries.
Returns
-------
response : ndarray
Kitchen and Rosenfeld response image.
References
----------
.. [1] <NAME>., & <NAME>. (1982). Gray-level corner detection.
Pattern recognition letters, 1(2), 95-102.
:DOI:`10.1016/0167-8655(82)90020-4`
"""
imy, imx = _compute_derivatives(image, mode=mode, cval=cval)
imxy, imxx = _compute_derivatives(imx, mode=mode, cval=cval)
imyy, imyx = _compute_derivatives(imy, mode=mode, cval=cval)
numerator = (imxx * imy ** 2 + imyy * imx ** 2 - 2 * imxy * imx * imy)
denominator = (imx ** 2 + imy ** 2)
response = | np.zeros_like(image, dtype=np.double) | numpy.zeros_like |
import pickle
import copy
import utiltools.thirdparty.o3dhelper as o3dh
import utiltools.robotmath as rm
import utiltools.thirdparty.p3dhelper as p3dh
import environment.collisionmodel as cm
import numpy as np
import copy
from panda3d.core import NodePath
class Pattern(object):
def __init__(self, elearray= | np.zeros((5,10)) | numpy.zeros |
import subprocess, logging, winreg, requests, json, pandas, os, sys
import numpy as np
import os.path as osp
from threading import Thread
from flask import Flask, jsonify, request
from time import sleep, time
app = Flask(__name__)
log = logging.getLogger('werkzeug')
log.disabled = True
def split_data(points, times, cycle_min_duration=10):
cycles = []
cycle_times = []
slew_twists = np.hstack([0, np.where((points[1:, 0] > 0) & (points[:-1, 0] < 0))[0]])
for i in range(len(slew_twists) - 1):
if times[slew_twists[i + 1]] - times[slew_twists[i]] > cycle_min_duration:
cycle_time = times[slew_twists[i] - 1: slew_twists[i + 1] + 1]
cycle = points[slew_twists[i] - 1: slew_twists[i + 1] + 1, :]
cycles.append(cycle)
cycle_times.append(cycle_time)
return cycles, cycle_times
def retrieve_original_dataset(data_dir='data/raw', tkey='Time', xkeys=['ForceR_Slew r', 'Cylinder_BoomLift_L x', 'Cylinder_DipperArm x', 'Cylinder_Bucket x'], nsteps=32):
# find data files
files = []
for f in os.listdir(data_dir):
fpath = osp.join(data_dir, f)
if osp.isfile(fpath):
files.append(fpath)
# retrieve data
data = []
for file in files:
data.append([])
p = pandas.read_csv(file, delimiter='\t', header=1)
n = p.shape[0]
points = np.zeros((n, len(xkeys)))
for i,key in enumerate(xkeys):
points[:, i] = p[key].values
times = p[tkey].values
pieces, piece_times = split_data(points, times)
for piece,piece_time in zip(pieces,piece_times):
x = np.zeros((nsteps, len(xkeys)))
tmin = piece_time[0]
tmax = piece_time[-1]
t = np.arange(nsteps) * (tmax - tmin) / nsteps + tmin
for j in range(piece.shape[1]):
x[:, j] = np.interp(t, piece_time, piece[:, j])
data[-1].append(x)
return data
def augment_data(sample, d_min=80, d_max=110):
sample_aug = []
dig_angle_orig = np.max([np.max(d[:, 0]) for d in sample])
dig_angle_new = d_min + np.random.rand() * (d_max - d_min)
dig_alpha = dig_angle_new / dig_angle_orig
for j in range(len(sample)):
a = sample[j][:, 0:1]
x = sample[j][:, 1:]
a_new = a
a_new[a_new[:, 0] > 0] *= dig_alpha
sample_aug.append(np.hstack([a_new, x]))
return sample_aug
def resample(x, m):
m_old = x.shape[0]
n = x.shape[1]
x_resampled = np.zeros((m, n))
for i in range(n):
x_resampled[:, i] = np.interp((np.arange(m) + 1) / m, (np.arange(m_old) + 1) / m_old, x[:, i])
return x_resampled
def start_simulator(solver_args, http_url='http://127.0.0.1:5000', n_attempts=30, uri='ready'):
url = '{0}/{1}'.format(http_url, uri)
print('Trying to start solver...')
ready = False
while not ready:
attempt = 0
registered = False
subprocess.Popen(solver_args, stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL)
while not registered:
try:
j = requests.get(url).json()
registered = j['ready']
except Exception as e:
print(e)
attempt += 1
if attempt >= n_attempts:
break
sleep(1.0)
if registered:
ready = True
print('Solver has successfully started!')
else:
print('Could not start solver :( Trying again...')
@app.route('/register')
def register(eid=0):
global backend
if not backend['ready']:
backend['ready'] = True
return jsonify({'id': eid})
@app.route('/ready', methods=['GET', 'POST'])
def assign_reset():
global backend
if request.method == 'POST':
backend['ready'] = False
return jsonify({'ready': backend['ready']})
@app.route('/mode', methods=['GET', 'POST'])
def mode():
global backend
data = request.data.decode('utf-8')
jdata = json.loads(data)
if request.method == 'GET':
backend['running'] = True
elif request.method == 'POST':
mode = jdata['mode']
backend['mode'] = mode
if mode == 'RESTART':
backend['running'] = False
return jsonify({'mode': backend['mode']})
@app.route('/p_target', methods=['GET', 'POST'])
def target():
global backend
data = request.data.decode('utf-8')
jdata = json.loads(data)
data_keys = ['<KEY>']
if request.method == 'GET':
for key in data_keys:
backend[key] = jdata[key]
if backend['y'] is not None:
y = backend['y'].copy()
backend['y'] = None # every time an excavator requests the target, we nulify it, this guarantees that the excavator operates with the fresh target
else:
y = None
mode = backend['mode']
return jsonify({'y': y, 'mode': mode})
elif request.method == 'POST':
backend['y'] = jdata['y']
data = {}
for key in data_keys:
data[key] = backend[key]
data['running'] = backend['running']
return jsonify(data)
def generate_demonstration_dataset(fname, mvs,
n_series=10,
http_url='http://127.0.0.1:5000',
mode_uri='mode',
dig_file = 'data/dig.txt',
emp_file='data/emp.txt',
n_steps = 8,
delay=1.0, x_thr=3.0, t_thr=3.0, m_thr=10.0, m_max=1000.0, t_max=60.0, a_thr=3.0
):
regkey = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r'Software\WOW6432Node\Mevea\Mevea Simulation Software')
(solverpath, _) = winreg.QueryValueEx(regkey, 'InstallPath')
solverpath += r'\Bin\MeveaSolver.exe'
winreg.CloseKey(regkey)
solver_args = [solverpath, r'/mvs', mvs]
best_mass = -np.inf
best_lost = np.inf
# main loop
for si in range(n_series):
start_simulator(solver_args)
ready = False
while not ready:
jdata = post_target()
if jdata is not None:
if jdata['running'] and jdata['x'] is not None and jdata['l'] is not None and jdata['m'] is not None:
ready = True
else:
sleep(delay)
requests.post('{0}/{1}'.format(http_url, mode_uri), json={'mode': 'AI_TRAIN'}).json()
idx = np.arange(len(data_orig))
sample_orig = data_orig[np.random.choice(idx)]
dsa = augment_data(sample_orig)
dumped_last = 0
T = []
Xd = []
Xe = []
D = []
C = []
Digs = []
Emps = []
M = []
for ci,cycle in enumerate(dsa):
cycle_time_start = time()
mass = np.zeros(cycle.shape[0])
dig_target = None
emp_target = None
for i in range(cycle.shape[0]):
target = cycle[i, :]
post_target(target)
in_target = np.zeros(4)
if ci > 0 and i == cycle.shape[0] // 2:
D.append((backend['d'] - dumped_last) / m_max)
dumped_last = backend['d']
t_start = time()
while not np.all(in_target):
current = backend['x']
dist_to_x = np.abs(np.array(current) - target)
for j in range(4):
if dist_to_x[j] < x_thr:
in_target[j] = 1
if (time() - t_start) > t_thr:
break
cycle[i, :] = backend['x']
mass[i] = backend['m']
if mass[i] > m_thr and dig_target is None:
dig_target = backend['x']
elif mass[i] < m_thr and dig_target is not None and emp_target is None and np.abs(dig_target[0] - backend['x'][0]) > a_thr:
emp_target = backend['x']
# check the targets
if dig_target is not None:
dig_target_angle = dig_target[0]
didx = np.where((cycle[:, 0] > dig_target_angle - a_thr) & (cycle[:, 0] < dig_target_angle + a_thr))[0]
if emp_target is not None:
emp_target_angle = emp_target[0]
eidx = np.where((cycle[:, 0] > emp_target_angle - a_thr) & (cycle[:, 0] < emp_target_angle + a_thr))[0]
# save the stats
c = (cycle - np.ones((cycle.shape[0], 1)) * x_min) / (np.ones((cycle.shape[0], 1)) * (x_max - x_min + 1e-10))
T.append((time() - cycle_time_start) / t_max)
if dig_target is not None:
Digs.append(c[didx, :])
Xd.append((dig_target - x_min) / (x_max - x_min + 1e-10))
if emp_target is not None:
Emps.append(c[eidx, :])
Xe.append((emp_target - x_min) / (x_max - x_min + 1e-10))
C.append(c.reshape(4 * cycle.shape[0]))
M.append(np.max(mass) / m_max)
# for the last cycle we wait for few seconds to let the simulator to calculate the soil mass in the dumper
sleep(3.0)
D.append((backend['d'] - dumped_last) / m_max)
# save data to the files
if len(Xd) == n_cycles and len(Xe) == n_cycles:
for ci in range(n_cycles):
t = T[ci]
xd = Xd[ci]
xe = Xe[ci]
d = D[ci]
c = C[ci]
m = M[ci]
v = np.hstack([ci, xd, xe, t, m, d, c])
line = ','.join([str(item) for item in v])
with open(fname, 'a') as f:
f.write(line + '\n')
print(ci, t, xd, xe, m, d)
mass_array = np.hstack(M)
idx = np.argmax(mass_array)
if mass_array[idx] > best_mass:
best_mass = mass_array[idx]
dig = resample(Digs[idx], n_steps)
with open(dig_file, 'w') as f:
for x in dig:
line = ','.join([str(item) for item in x]) + '\n'
f.write(line)
lost_array = np.hstack([x - y for x,y in zip(M, D)])
idx = np.argmin(lost_array)
if lost_array[idx] < best_lost:
emp = resample(Emps[idx], n_steps)
with open(emp_file, 'w') as f:
for x in emp:
line = ','.join([str(item) for item in x]) + '\n'
f.write(line)
else:
print(Xd, Xe)
# stop the software
requests.post('{0}/{1}'.format(http_url, mode_uri), json={'mode': 'RESTART'}).json()
def post_target(target=None, http_url='http://127.0.0.1:5000', uri='p_target'):
url = '{0}/{1}'.format(http_url, uri)
if target is not None:
target = target.tolist()
try:
jdata = requests.post(url, json={'y': target}).json()
except Exception as e:
print(e)
jdata = None
return jdata
if __name__ == '__main__':
# process args
if len(sys.argv) == 2:
mvs = sys.argv[1]
else:
print('Please specify path to the excavator model!')
sys.exit(1)
# file name to save dataset
fname = 'data/cycles.txt'
if not osp.exists(fname):
open(fname, 'a').close()
# original data
n_cycles = 4
data_orig_all = retrieve_original_dataset()
data_orig = [series for series in data_orig_all if len(series) == n_cycles]
x_min = np.array([-180.0, 3.9024162648733514, 13.252630737652677, 16.775050853637147])
x_max = | np.array([180.0, 812.0058600513476, 1011.7128949856826, 787.6024456729566]) | numpy.array |
import numpy as np
from mpi4py import MPI
from ppo_and_friends.utils.mpi_utils import rank_print
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
num_procs = comm.Get_size()
class RunningMeanStd(object):
"""
A running mean and std tracker.
NOTE: This is almost identical to the Stable Baselines'
implementation.
"""
def __init__(self,
shape = (),
epsilon = 1e-4):
"""
Arguments:
shape The shape of data to track.
epsilon A very small number to help avoid 0 errors.
"""
self.mean = np.zeros(shape, dtype=np.float32)
self.variance = np.ones(shape, dtype=np.float32)
self.count = epsilon
def update(self,
data,
gather_stats = True):
"""
Update the running stats.
Arguments:
data A new batch of data.
gather_stats Should we gather the data across processors
before computing stats?
"""
#
# If we're running with multiple processors, I've found that
# it's very important to normalize environments across all
# processors. This does add a bit of overhead, but not too much.
# NOTE: allgathers can be dangerous with large datasets.
# If this becomes problematic, we can perform all work on rank
# 0 and broadcast. That approach is just a bit slower.
#
if num_procs > 1 and gather_stats:
data = comm.allgather(data)
data = | np.concatenate(data) | numpy.concatenate |
from os import path
import numpy as np
from numpy.testing import *
import datetime
class TestDateTime(TestCase):
def test_creation(self):
for unit in ['Y', 'M', 'W', 'B', 'D',
'h', 'm', 's', 'ms', 'us',
'ns', 'ps', 'fs', 'as']:
dt1 = np.dtype('M8[750%s]' % unit)
assert dt1 == np.dtype('datetime64[750%s]' % unit)
dt2 = np.dtype('m8[%s]' % unit)
assert dt2 == np.dtype('timedelta64[%s]' % unit)
def test_divisor_conversion_year(self):
assert np.dtype('M8[Y/4]') == np.dtype('M8[3M]')
assert np.dtype('M8[Y/13]') == np.dtype('M8[4W]')
assert np.dtype('M8[3Y/73]') == np.dtype('M8[15D]')
def test_divisor_conversion_month(self):
assert np.dtype('M8[M/2]') == np.dtype('M8[2W]')
assert np.dtype('M8[M/15]') == np.dtype('M8[2D]')
assert np.dtype('M8[3M/40]') == np.dtype('M8[54h]')
def test_divisor_conversion_week(self):
assert np.dtype('m8[W/5]') == np.dtype('m8[B]')
assert np.dtype('m8[W/7]') == np.dtype('m8[D]')
assert np.dtype('m8[3W/14]') == np.dtype('m8[36h]')
assert np.dtype('m8[5W/140]') == np.dtype('m8[360m]')
def test_divisor_conversion_bday(self):
assert np.dtype('M8[B/12]') == np.dtype('M8[2h]')
assert np.dtype('M8[B/120]') == np.dtype('M8[12m]')
assert np.dtype('M8[3B/960]') == np.dtype('M8[270s]')
def test_divisor_conversion_day(self):
assert np.dtype('M8[D/12]') == np.dtype('M8[2h]')
assert np.dtype('M8[D/120]') == np.dtype('M8[12m]')
assert np.dtype('M8[3D/960]') == np.dtype('M8[270s]')
def test_divisor_conversion_hour(self):
assert np.dtype('m8[h/30]') == np.dtype('m8[2m]')
assert np.dtype('m8[3h/300]') == np.dtype('m8[36s]')
def test_divisor_conversion_minute(self):
assert np.dtype('m8[m/30]') == np.dtype('m8[2s]')
assert np.dtype('m8[3m/300]') == np.dtype('m8[600ms]')
def test_divisor_conversion_second(self):
assert np.dtype('m8[s/100]') == np.dtype('m8[10ms]')
assert np.dtype('m8[3s/10000]') == np.dtype('m8[300us]')
def test_divisor_conversion_fs(self):
assert np.dtype('M8[fs/100]') == np.dtype('M8[10as]')
self.assertRaises(ValueError, lambda : np.dtype('M8[3fs/10000]'))
def test_divisor_conversion_as(self):
self.assertRaises(ValueError, lambda : np.dtype('M8[as/10]'))
def test_creation_overflow(self):
date = '1980-03-23 20:00:00'
timesteps = np.array([date], dtype='datetime64[s]')[0].astype(np.int64)
for unit in ['ms', 'us', 'ns']:
timesteps *= 1000
x = np.array([date], dtype='datetime64[%s]' % unit)
assert_equal(timesteps, x[0].astype(np.int64),
err_msg='Datetime conversion error for unit %s' % unit)
assert_equal(x[0].astype(np.int64), 322689600000000000)
class TestDateTimeModulo(TestCase):
@dec.knownfailureif(True, "datetime modulo fails.")
def test_modulo_years(self):
timesteps = np.array([0,1,2], dtype='datetime64[Y]//10')
assert timesteps[0] == np.datetime64('1970')
assert timesteps[1] == np.datetime64('1980')
assert timesteps[2] == np.datetime64('1990')
@dec.knownfailureif(True, "datetime modulo fails.")
def test_modulo_months(self):
timesteps = np.array([0,1,2], dtype='datetime64[M]//10')
assert timesteps[0] == np.datetime64('1970-01')
assert timesteps[1] == np.datetime64('1970-11')
assert timesteps[2] == np.datetime64('1971-09')
@dec.knownfailureif(True, "datetime modulo fails.")
def test_modulo_weeks(self):
timesteps = np.array([0,1,2], dtype='datetime64[W]//3')
assert timesteps[0] == np.datetime64('1970-01-01')
assert timesteps[1] == np.datetime64('1970-01-22')
assert timesteps[2] == np.datetime64('1971-02-12')
@dec.knownfailureif(True, "datetime modulo fails.")
def test_modulo_business_days(self):
timesteps = np.array([0,1,2], dtype='datetime64[B]//4')
assert timesteps[0] == np.datetime64('1970-01-01')
assert timesteps[1] == np.datetime64('1970-01-07')
assert timesteps[2] == np.datetime64('1971-01-13')
@dec.knownfailureif(True, "datetime modulo fails.")
def test_modulo_days(self):
timesteps = np.array([0,1,2], dtype='datetime64[D]//17')
assert timesteps[0] == np.datetime64('1970-01-01')
assert timesteps[1] == np.datetime64('1970-01-18')
assert timesteps[2] == np.datetime64('1971-02-04')
@dec.knownfailureif(True, "datetime modulo fails.")
def test_modulo_hours(self):
timesteps = np.array([0,1,2], dtype='datetime64[h]//17')
assert timesteps[0] == np.datetime64('1970-01-01 00')
assert timesteps[1] == np.datetime64('1970-01-01 17')
assert timesteps[2] == np.datetime64('1970-01-02 10')
@dec.knownfailureif(True, "datetime modulo fails.")
def test_modulo_minutes(self):
timesteps = np.array([0,1,2], dtype='datetime64[m]//42')
assert timesteps[0] == np.datetime64('1970-01-01 00:00')
assert timesteps[1] == np.datetime64('1970-01-01 00:42')
assert timesteps[2] == np.datetime64('1970-01-01 01:24')
@dec.knownfailureif(True, "datetime modulo fails.")
def test_modulo_seconds(self):
timesteps = np.array([0,1,2], dtype='datetime64[s]//42')
assert timesteps[1] == np.datetime64('1970-01-01 00:00:00')
assert timesteps[1] == np.datetime64('1970-01-01 00:00:42')
assert timesteps[1] == np.datetime64('1970-01-01 00:01:24')
@dec.knownfailureif(True, "datetime modulo fails.")
def test_modulo_milliseconds(self):
timesteps = np.array([0,1,2], dtype='datetime64[ms]//42')
assert timesteps[1] == np.datetime64('1970-01-01 00:00:00.000')
assert timesteps[1] == np.datetime64('1970-01-01 00:00:00.042')
assert timesteps[1] == np.datetime64('1970-01-01 00:01:00.084')
@dec.knownfailureif(True, "datetime modulo fails.")
def test_modulo_microseconds(self):
timesteps = np.array([0,1,2], dtype='datetime64[us]//42')
assert timesteps[1] == np.datetime64('1970-01-01 00:00:00.000000')
assert timesteps[1] == np.datetime64('1970-01-01 00:00:00.000042')
assert timesteps[1] == np.datetime64('1970-01-01 00:01:00.000084')
@dec.knownfailureif(True, "datetime modulo fails.")
def test_modulo_nanoseconds(self):
timesteps = np.array([0,1,2], dtype='datetime64[ns]//42')
assert timesteps[1] == | np.datetime64('1970-01-01 00:00:00.000000000') | numpy.datetime64 |
# -*- coding: utf-8 -*-
import numpy as np
import scipy.stats as st
from abc import ABCMeta, abstractmethod
from .mvar.comp import ldl
from .mvarmodel import Mvar
from .aec.utils import filter_band, calc_ampenv, FQ_BANDS
import six
from six.moves import map
from six.moves import range
from six.moves import zip
########################################################################
# Spectrum functions:
########################################################################
def spectrum(acoef, vcoef, fs=1, resolution=100):
"""
Generating data point from matrix *A* with MVAR coefficients.
Args:
*acoef* : numpy.array
array of shape (k, k, p) where *k* is number of channels and
*p* is a model order.
*vcoef* : numpy.array
prediction error matrix (k, k)
*fs* = 1 : int
sampling rate
*resolution* = 100 : int
number of spectrum data points
Returns:
*A_z* : numpy.array
z-transformed A(f) complex matrix in shape (*resolution*, k, k)
*H_z* : numpy.array
inversion of *A_z*
*S_z* : numpy.array
spectrum matrix (*resolution*, k, k)
References:
.. [1] <NAME>, <NAME>, <NAME> (2004) “Granger causality
and information flow in multivariate processes”
Physical Review E 70, 050902.
"""
p, k, k = acoef.shape
freqs = np.linspace(0, fs*0.5, resolution)
A_z = np.zeros((len(freqs), k, k), complex)
H_z = np.zeros((len(freqs), k, k), complex)
S_z = np.zeros((len(freqs), k, k), complex)
I = np.eye(k, dtype=complex)
for e, f in enumerate(freqs):
epot = np.zeros((p, 1), complex)
ce = np.exp(-2.j*np.pi*f*(1./fs))
epot[0] = ce
for k in range(1, p):
epot[k] = epot[k-1]*ce
A_z[e] = I - np.sum([epot[x]*acoef[x] for x in range(p)], axis=0)
H_z[e] = np.linalg.inv(A_z[e])
S_z[e] = np.dot(np.dot(H_z[e], vcoef), H_z[e].T.conj())
return A_z, H_z, S_z
def spectrum_inst(acoef, vcoef, fs=1, resolution=100):
"""
Generating data point from matrix *A* with MVAR coefficients taking
into account zero-lag effects.
Args:
*acoef* : numpy.array
array of shape (k, k, p+1) where *k* is number of channels and
*p* is a model order. acoef[0] - is (k, k) matrix for zero lag,
acoef[1] for one data point lag and so on.
*vcoef* : numpy.array
prediction error matrix (k, k)
*fs* = 1 : int
sampling rate
*resolution* = 100 : int
number of spectrum data points
Returns:
*A_z* : numpy.array
z-transformed A(f) complex matrix in shape (*resolution*, k, k)
*H_z* : numpy.array
inversion of *A_z*
*S_z* : numpy.array
spectrum matrix (*resolution*, k, k)
References:
.. [1] <NAME>, Multivariate Autoregressive Model with
Instantaneous Effects to Improve Brain Connectivity Estimation,
Int. J. Bioelectromagn. 11, 74–79 (2009).
"""
p, k, k = acoef.shape
freqs = np.linspace(0, fs/2, resolution)
B_z = np.zeros((len(freqs), k, k), complex)
L, U, Lt = ldl(vcoef)
Linv = np.linalg.inv(L)
I = np.eye(k, dtype=complex)
bcoef = np.array([np.dot(Linv, acoef[x]) for x in range(p)])
b0 = np.eye(k) - Linv
for e, f in enumerate(freqs):
epot = np.zeros((p, 1), complex)
ce = np.exp(-2.j*np.pi*f*(1./fs))
epot[0] = ce
for k in range(1, p):
epot[k] = epot[k-1]*ce
B_z[e] = I - b0 - np.sum([epot[x]*bcoef[x] for x in range(p)], axis=0)
return B_z
########################################################################
# Connectivity classes:
########################################################################
class Connect(six.with_metaclass(ABCMeta, object)):
"""
Abstract class governing calculation of various connectivity estimators
with concrete methods: *short_time*, *significance* and
abstract *calculate*.
"""
def __init__(self):
self.values_range = [None, None] # normalization bands
self.two_sided = False # only positive, or also negative values
@abstractmethod
def calculate(self):
"""Abstract method to calculate values of estimators from specific
parameters"""
pass
def short_time(self, data, nfft=None, no=None, **params):
"""
Short-tme version of estimator, where data is windowed into parts
of length *nfft* and overlap *no*. *params* catch additional
parameters specific for estimator.
Args:
*data* : numpy.array
data matrix (kXN) or (kXNxR) where k - channels,
N - data points, R - nr of trials
*nfft* = None : int
window length (if None it's N/5)
*no* = None : int
overlap length (if None it's N/10)
*params* :
additional parameters specific for chosen estimator
Returns:
*stvalues* : numpy.array
short time values (time points, frequency, k, k), where k
is number of channels
"""
assert nfft > no, "overlap must be smaller than window"
if data.ndim > 2:
k, N, trls = data.shape
else:
k, N = data.shape
trls = 0
if not nfft:
nfft = int(N/5)
if not no:
no = int(N/10)
slices = range(0, N, int(nfft-no))
for e, i in enumerate(slices):
if i+nfft >= N:
if trls:
datcut = np.concatenate((data[:, i:i+nfft], np.zeros((k, i+nfft-N, trls))), axis=1)
else:
datcut = np.concatenate((data[:, i:i+nfft], np.zeros((k, i+nfft-N))), axis=1)
else:
datcut = data[:, i:i+nfft]
if e == 0:
rescalc = self.calculate(datcut, **params)
stvalues = np.zeros((len(slices), rescalc.shape[0], k, k))
stvalues[e] = rescalc
continue
stvalues[e] = self.calculate(datcut, **params)
return stvalues
def short_time_significance(self, data, Nrep=10, alpha=0.05,
nfft=None, no=None, verbose=True, **params):
"""
Significance of short-tme versions of estimators. It base on
bootstrap :func:`Connect.bootstrap` for multitrial case and
surrogate data :func:`Connect.surrogate` for one trial.
Args:
*data* : numpy.array
data matrix (kXN) or (kXNxR) where k - channels,
N - data points, R - nr of trials
*Nrep* = 100 : int
number of resamples
*alpha* = 0.05 : float
type I error rate (significance level)
*nfft* = None : int
window length (if None it's N/5)
*no* = None : int
overlap length (if None it's N/10)
*verbose* = True : bool
if True it prints dot on every realization, if False it's
quiet.
*params* :
additional parameters specific for chosen estimator
Returns:
*signi_st* : numpy.array
short time significance values in shape of
- (tp, k, k) for one sided estimator
- (tp, 2, k, k) for two sided
where k is number of channels and tp number of time points
"""
assert nfft > no, "overlap must be smaller than window"
if data.ndim > 2:
k, N, trls = data.shape
else:
k, N = data.shape
trls = 0
if not nfft:
nfft = int(N/5)
if not no:
no = int(N/10)
slices = range(0, N, int(nfft-no))
if self.two_sided:
signi_st = np.zeros((len(slices), 2, k, k))
else:
signi_st = np.zeros((len(slices), k, k))
for e, i in enumerate(slices):
if i+nfft >= N:
if trls:
datcut = np.concatenate((data[:, i:i+nfft], np.zeros((k, i+nfft-N, trls))), axis=1)
else:
datcut = np.concatenate((data[:, i:i+nfft], np.zeros((k, i+nfft-N))), axis=1)
else:
datcut = data[:, i:i+nfft]
signi_st[e] = self.significance(datcut, Nrep=Nrep,
alpha=alpha, verbose=verbose, **params)
return signi_st
def significance(self, data, Nrep=10, alpha=0.05, verbose=True, **params):
"""
Significance of connectivity estimators. It base on
bootstrap :func:`Connect.bootstrap` for multitrial case and
surrogate data :func:`Connect.surrogate` for one trial.
Args:
*data* : numpy.array
data matrix (kXN) or (kXNxR) where k - channels,
N - data points, R - nr of trials
*Nrep* = 100 : int
number of resamples
*alpha* = 0.05 : float
type I error rate (significance level)
*verbose* = True : bool
if True it prints dot on every realization, if False it's
quiet.
*params* :
additional parameters specific for chosen estimator
Returns:
*signific* : numpy.array
significance values, check :func:`Connect.levels`
"""
if data.ndim > 2:
signific = self.bootstrap(data, Nrep=10, alpha=alpha, verbose=verbose, **params)
else:
signific = self.surrogate(data, Nrep=10, alpha=alpha, verbose=verbose, **params)
return signific
def levels(self, signi, alpha, k):
"""
Levels of significance
Args:
*signi* : numpy.array
bootstraped values of each channel
*alpha* : float
type I error rate (significance level) - from 0 to 1
- (1-*alpha*) for onesided estimators (e.g. class:`DTF`)
- *alpha* and (1-*alpha*) for twosided (e.g. class:`PSI`)
*k* : int
number of channels
Returns:
*ficance* : numpy.array
maximal value throughout frequency of score at percentile
at level 1-*alpha*
- (k, k) for one sided estimator
- (2, k, k) for two sided
"""
if self.two_sided:
ficance = np.zeros((2, k, k))
else:
ficance = np.zeros((k, k))
for i in range(k):
for j in range(k):
if self.two_sided:
ficance[0][i][j] = np.min(st.scoreatpercentile(signi[:, :, i, j], alpha*100, axis=1))
ficance[1][i][j] = np.max(st.scoreatpercentile(signi[:, :, i, j], (1-alpha)*100, axis=1))
else:
ficance[i][j] = np.min(st.scoreatpercentile(signi[:, :, i, j], (1-alpha)*100, axis=1))
return ficance
def __calc_multitrial(self, data, **params):
"Calc multitrial averaged estimator for :func:`Connect.bootstrap`"
trials = data.shape[2]
chosen = np.random.randint(trials, size=trials)
bc = np.bincount(chosen)
idxbc = np.nonzero(bc)[0]
flag = True
for num, occurence in zip(idxbc, bc[idxbc]):
if occurence > 0:
trdata = data[:, :, num]
if flag:
rescalc = self.calculate(trdata, **params)*occurence
flag = False
continue
rescalc += self.calculate(trdata, **params)*occurence
return rescalc/trials
def bootstrap(self, data, Nrep=100, alpha=0.05, verbose=True, **params):
"""
Bootstrap - random sampling with replacement of trials.
Args:
*data* : numpy.array
multichannel data matrix
*Nrep* = 100 : int
number of resamples
*alpha* = 0.05 : float
type I error rate (significance level)
*verbose* = True : bool
if True it prints dot on every realization, if False it's
quiet.
*params* :
additional parameters specific for chosen estimator
Returns:
*levelsigni* : numpy.array
significance values, check :func:`Connect.levels`
"""
for i in range(Nrep):
if verbose:
print('.', end=' ')
if i == 0:
tmpsig = self.__calc_multitrial(data, **params)
fres, k, k = tmpsig.shape
signi = np.zeros((Nrep, fres, k, k))
signi[i] = tmpsig
else:
signi[i] = self.__calc_multitrial(data, **params)
if verbose:
print('|')
return self.levels(signi, alpha, k)
def surrogate(self, data, Nrep=100, alpha=0.05, verbose=True, **params):
"""
Surrogate data testing. Mixing data points in each channel.
Significance level in calculated over all *Nrep* surrogate sets.
Args:
*data* : numpy.array
multichannel data matrix
*Nrep* = 100 : int
number of resamples
*alpha* = 0.05 : float
type I error rate (significance level)
*verbose* = True : bool
if True it prints dot on every realization, if False it's
quiet.
*params* :
additional parameters specific for chosen estimator
Returns:
*levelsigni* : numpy.array
significance values, check :func:`Connect.levels`
"""
k, N = data.shape
shdata = data.copy()
for i in range(Nrep):
if verbose:
print('.', end=' ')
for ch in range(k):
np.random.shuffle(shdata[ch,:])
if i == 0:
rtmp = self.calculate(shdata, **params)
reskeeper = np.zeros((Nrep, rtmp.shape[0], k, k))
reskeeper[i] = rtmp
continue
reskeeper[i] = self.calculate(shdata, **params)
if verbose:
print('|')
return self.levels(reskeeper, alpha, k)
class ConnectAR(six.with_metaclass(ABCMeta, Connect)):
"""
Inherits from *Connect* class and governs calculation of various
connectivity estimators basing on MVAR model methods. It overloads
*short_time*, *significance* methods but *calculate* remains abstract.
"""
def __init__(self):
super(ConnectAR, self).__init__()
self.values_range = [0, 1]
def short_time(self, data, nfft=None, no=None, mvarmethod='yw',
order=None, resol=None, fs=1):
"""
It overloads :class:`ConnectAR` method :func:`Connect.short_time`.
Short-tme version of estimator, where data is windowed into parts
of length *nfft* and overlap *no*. *params* catch additional
parameters specific for estimator.
Args:
*data* : numpy.array
data matrix (kXN) or (kXNxR) where k - channels,
N - data points, R - nr of trials
*nfft* = None : int
window length (if None it's N/5)
*no* = None : int
overlap length (if None it's N/10)
*mvarmethod* = 'yw' :
MVAR parameters estimation method
all avaiable methods you can find in *fitting_algorithms*
*order* = None:
MVAR model order; it None, it is set automatically basing
on default criterion.
*resol* = None:
frequency resolution; if None, it is 100.
*fs* = 1 :
sampling frequency
Returns:
*stvalues* : numpy.array
short time values (time points, frequency, k, k), where k
is number of channels
"""
assert nfft > no, "overlap must be smaller than window"
if data.ndim > 2:
k, N, trls = data.shape
else:
k, N = data.shape
trls = 0
if not nfft:
nfft = int(N/5)
if not no:
no = int(N/10)
slices = range(0, N, int(nfft-no))
for e, i in enumerate(slices):
if i+nfft >= N:
if trls:
datcut = np.concatenate((data[:, i:i+nfft], np.zeros((k, i+nfft-N, trls))), axis=1)
else:
datcut = np.concatenate((data[:, i:i+nfft], np.zeros((k, i+nfft-N))), axis=1)
else:
datcut = data[:, i:i+nfft]
ar, vr = Mvar().fit(datcut, order, mvarmethod)
if e == 0:
rescalc = self.calculate(ar, vr, fs, resol)
stvalues = np.zeros((len(slices), rescalc.shape[0], k, k))
stvalues[e] = rescalc
continue
stvalues[e] = self.calculate(ar, vr, fs, resol)
return stvalues
def short_time_significance(self, data, Nrep=100, alpha=0.05, method='yw',
order=None, fs=1, resolution=None,
nfft=None, no=None, verbose=True, **params):
"""
Significance of short-tme versions of estimators. It base on
bootstrap :func:`ConnectAR.bootstrap` for multitrial case and
surrogate data :func:`ConnectAR.surrogate` for one trial.
Args:
*data* : numpy.array
data matrix (kXN) or (kXNxR) where k - channels,
N - data points, R - nr of trials
*Nrep* = 100 : int
number of resamples
*alpha* = 0.05 : float
type I error rate (significance level)
*method* = 'yw': str
method of MVAR parameters estimation
all avaiable methods you can find in *fitting_algorithms*
*order* = None : int
MVAR model order, if None, it's chosen using default criterion
*fs* = 1 : int
sampling frequency
*resolution* = None : int
resolution (if None, it's 100 points)
*nfft* = None : int
window length (if None it's N/5)
*no* = None : int
overlap length (if None it's N/10)
*verbose* = True : bool
if True it prints dot on every realization, if False it's
quiet.
*params* :
additional parameters specific for chosen estimator
Returns:
*signi_st* : numpy.array
short time significance values in shape of
- (tp, k, k) for one sided estimator
- (tp, 2, k, k) for two sided
where k is number of channels and tp number of time points
"""
assert nfft > no, "overlap must be smaller than window"
if data.ndim > 2:
k, N, trls = data.shape
else:
k, N = data.shape
trls = 0
if not nfft:
nfft = int(N/5)
if not no:
no = int(N/10)
slices = range(0, N, int(nfft-no))
signi_st = np.zeros((len(slices), k, k))
for e, i in enumerate(slices):
if i+nfft >= N:
if trls:
datcut = np.concatenate((data[:, i:i+nfft], np.zeros((k, i+nfft-N, trls))), axis=1)
else:
datcut = np.concatenate((data[:, i:i+nfft], np.zeros((k, i+nfft-N))), axis=1)
else:
datcut = data[:, i:i+nfft]
signi_st[e] = self.significance(datcut, method, order=order, resolution=resolution,
Nrep=Nrep, alpha=alpha, verbose=verbose, **params)
return signi_st
def __calc_multitrial(self, data, method='yw', order=None, fs=1, resolution=None, **params):
"Calc multitrial averaged estimator for :func:`ConnectAR.bootstrap`"
trials = data.shape[2]
chosen = np.random.randint(trials, size=trials)
ar, vr = Mvar().fit(data[:, :, chosen], order, method)
rescalc = self.calculate(ar, vr, fs, resolution)
return rescalc
def significance(self, data, method, order=None, resolution=None, Nrep=10, alpha=0.05, verbose=True, **params):
"""
Significance of connectivity estimators. It base on
bootstrap :func:`ConnectAR.bootstrap` for multitrial case and
surrogate data :func:`ConnectAR.surrogate` for one trial.
Args:
*data* : numpy.array
data matrix
*method* = 'yw': str
method of MVAR parametersestimation
all avaiable methods you can find in *fitting_algorithms*
*order* = None : int
MVAR model order, if None, it's chosen using default criterion
*Nrep* = 100 : int
number of resamples
*alpha* = 0.05 : float
type I error rate (significance level)
*resolution* = None : int
resolution (if None, it's 100 points)
*verbose* = True : bool
if True it prints dot on every realization, if False it's
quiet.
*params* :
additional parameters specific for chosen estimator
Returns:
*signi_st* : numpy.array
significance values, check :func:`Connect.levels`
"""
if data.ndim > 2:
signific = self.bootstrap(data, method, order=order, resolution=resolution,
Nrep=Nrep, alpha=alpha, verbose=verbose, **params)
else:
signific = self.surrogate(data, method, order=order, resolution=resolution,
Nrep=Nrep, alpha=alpha, verbose=verbose, **params)
return signific
def bootstrap(self, data, method, order=None, Nrep=10, alpha=0.05, fs=1, verbose=True, **params):
"""
Bootstrap - random sampling with replacement of trials for *ConnectAR*.
Args:
*data* : numpy.array
multichannel data matrix
*method* : str
method of MVAR parametersestimation
all avaiable methods you can find in *fitting_algorithms*
*Nrep* = 100 : int
number of resamples
*alpha* = 0.05 : float
type I error rate (significance level)
*order* = None : int
MVAR model order, if None, it's chosen using default criterion
*verbose* = True : bool
if True it prints dot on every realization, if False it's
quiet.
*params* :
additional parameters specific for chosen estimator
Returns:
*levelsigni* : numpy.array
significance values, check :func:`Connect.levels`
"""
resolution = 100
if 'resolution' in params and params['resolution']:
resolution = params['resolution']
for i in range(Nrep):
if verbose:
print('.', end=' ')
if i == 0:
tmpsig = self.__calc_multitrial(data, method, order, fs, resolution)
fres, k, k = tmpsig.shape
signi = np.zeros((Nrep, fres, k, k))
signi[i] = tmpsig
else:
signi[i] = self.__calc_multitrial(data, method, order, fs, resolution)
if verbose:
print('|')
return self.levels(signi, alpha, k)
def surrogate(self, data, method, Nrep=10, alpha=0.05, order=None, fs=1, verbose=True, **params):
"""
Surrogate data testing for *ConnectAR* . Mixing data points in each channel.
Significance level in calculated over all *Nrep* surrogate sets.
Args:
*data* : numpy.array
multichannel data matrix
*method* : str
method of MVAR parameters estimation
all avaiable methods you can find in *fitting_algorithms*
*Nrep* = 100 : int
number of resamples
*alpha* = 0.05 : float
type I error rate (significance level)
*order* = None : int
MVAR model order, if None, it's chosen using default criterion
*verbose* = True : bool
if True it prints dot on every realization, if False it's
quiet.
*params* :
additional parameters specific for chosen estimator
Returns:
*levelsigni* : numpy.array
significance values, check :func:`Connect.levels`
"""
shdata = data.copy()
k, N = data.shape
resolution = 100
if 'resolution' in params and params['resolution']:
resolution = params['resolution']
for i in range(Nrep):
if verbose:
print('.', end=' ')
list(map(np.random.shuffle, shdata))
ar, vr = Mvar().fit(shdata, order, method)
if i == 0:
rtmp = self.calculate(ar, vr, fs, resolution)
reskeeper = np.zeros((Nrep, rtmp.shape[0], k, k))
reskeeper[i] = rtmp
continue
reskeeper[i] = self.calculate(ar, vr, fs, resolution)
if verbose:
print('|')
return self.levels(reskeeper, alpha, k)
############################
# MVAR based methods:
def dtf_fun(Acoef, Vcoef, fs, resolution, generalized=False):
"""
Directed Transfer Function estimation from MVAR parameters.
Args:
*Acoef* : numpy.array
array of shape (k, k, p) where *k* is number of channels and
*p* is a model order.
*Vcoef* : numpy.array
prediction error matrix (k, k)
*fs* = 1 : int
sampling rate
*resolution* = 100 : int
number of spectrum data points
*generalized* = False : bool
generalized version or not
Returns:
*DTF* : numpy.array
matrix with estimation results (*resolution*, k, k)
References:
.. [1] <NAME>, <NAME>. A new method of the description
of the information flow. Biol.Cybern. 65:203-210, (1991).
"""
A_z, H_z, S_z = spectrum(Acoef, Vcoef, fs, resolution=resolution)
res, k, k = A_z.shape
DTF = np.zeros((res, k, k))
if generalized:
sigma = np.diag(Vcoef)
else:
sigma = np.ones(k)
for i in range(res):
mH = sigma*np.dot(H_z[i], H_z[i].T.conj()).real
DTF[i] = (np.sqrt(sigma)*np.abs(H_z[i]))/np.sqrt(np.diag(mH)).reshape((k, 1))
return DTF
def pdc_fun(Acoef, Vcoef, fs, resolution, generalized=False):
"""
Partial Directed Coherence estimation from MVAR parameters.
Args:
*Acoef* : numpy.array
array of shape (k, k, p) where *k* is number of channels and
*p* is a model order.
*Vcoef* : numpy.array
prediction error matrix (k, k)
*fs* = 1 : int
sampling rate
*resolution* = 100 : int
number of spectrum data points
*generalized* = False : bool
generalized version or not
Returns:
*PDC* : numpy.array
matrix with estimation results (*resolution*, k, k)
References:
.. [1] <NAME>., <NAME>., Partial directed
coherence: a new concept in neural structure determination.,
2001, Biol. Cybern. 84, 463–474.
"""
A_z, H_z, S_z = spectrum(Acoef, Vcoef, fs, resolution=resolution)
res, k, k = A_z.shape
PDC = np.zeros((res, k, k))
sigma = np.diag(Vcoef)
for i in range(res):
mA = (1./sigma[:, None])*np.dot(A_z[i].T.conj(), A_z[i]).real
PDC[i] = np.abs(A_z[i]/np.sqrt(sigma))/np.sqrt(np.diag(mA))
return PDC
class PartialCoh(ConnectAR):
"""
PartialCoh - class inherits from :class:`ConnectAR` and overloads
:func:`Connect.calculate` method.
"""
def calculate(self, Acoef=None, Vcoef=None, fs=None, resolution=None):
"""
Partial Coherence estimation from MVAR parameters.
Args:
*Acoef* : numpy.array
array of shape (k, k, p) where *k* is number of channels and
*p* is a model order.
*Vcoef* : numpy.array
prediction error matrix (k, k)
*fs* = 1 : int
sampling rate
*resolution* = 100 : int
number of spectrum data points
*generalized* = False : bool
generalized version or not
Returns:
*PC* : numpy.array
matrix with estimation results (*resolution*, k, k)
References:
.. [1] <NAME>, <NAME>. Spectral Analysis and its
Applications. Holden-Day, USA, 1969
"""
A_z, H_z, S_z = spectrum(Acoef, Vcoef, fs, resolution=resolution)
res, k, k = A_z.shape
PC = np.zeros((res, k, k))
before = np.ones((k, k))
before[0::2, :] *= -1
before[:, 0::2] *= -1
for i in range(res):
D_z = np.linalg.inv(S_z[i])
dd = np.tile(np.diag(D_z), (k, 1))
mD = (dd*dd.T).real
PC[i] = -1*before*(np.abs(D_z)/np.sqrt(mD))
return np.abs(PC)
class PDC(ConnectAR):
"""
PDC - class inherits from :class:`ConnectAR` and overloads
:func:`Connect.calculate` method.
"""
def calculate(self, Acoef=None, Vcoef=None, fs=None, resolution=100):
"More in :func:`pdc_fun`."
return pdc_fun(Acoef, Vcoef, fs, resolution)
class gPDC(ConnectAR):
"""
gPDC - class inherits from :class:`ConnectAR` and overloads
:func:`Connect.calculate` method.
"""
def calculate(self, Acoef=None, Vcoef=None, fs=None, resolution=100):
"More in :func:`pdc_fun`"
return pdc_fun(Acoef, Vcoef, fs, resolution, generalized=True)
class DTF(ConnectAR):
"""
DTF - class inherits from :class:`ConnectAR` and overloads
:func:`Connect.calculate` method.
"""
def calculate(self, Acoef=None, Vcoef=None, fs=None, resolution=100):
"More in :func:`dtf_fun`."
return dtf_fun(Acoef, Vcoef, fs, resolution)
class gDTF(ConnectAR):
"""
gDTF - class inherits from :class:`ConnectAR` and overloads
:func:`Connect.calculate` method.
"""
def calculate(self, Acoef=None, Vcoef=None, fs=None, resolution=100):
"More in :func:`dtf_fun`."
return dtf_fun(Acoef, Vcoef, fs, resolution, generalized=True)
class ffDTF(ConnectAR):
"""
ffDTF - class inherits from :class:`ConnectAR` and overloads
:func:`Connect.calculate` method.
"""
def calculate(self, Acoef=None, Vcoef=None, fs=None, resolution=100):
"""
full-frequency Directed Transfer Function estimation from MVAR
parameters.
Args:
*Acoef* : numpy.array
array of shape (k, k, p) where *k* is number of channels and
*p* is a model order.
*Vcoef* : numpy.array
prediction error matrix (k, k)
*fs* = 1 : int
sampling rate
*resolution* = 100 : int
number of spectrum data points
*generalized* = False : bool
generalized version or not
Returns:
*ffDTF* : numpy.array
matrix with estimation results (*resolution*, k, k)
References:
.. [1] Korzeniewska, A.et. all. Determination of information flow direction
among brain structures by a modified directed transfer function (dDTF)
method. J. Neurosci. Methods 125, 195–207 (2003).
"""
A_z, H_z, S_z = spectrum(Acoef, Vcoef, fs, resolution=resolution)
res, k, k = A_z.shape
mH = np.zeros((res, k, k))
for i in range(res):
mH[i] = np.abs(np.dot(H_z[i], H_z[i].T.conj()))
mHsum = np.sum(mH, axis=0)
ffDTF = np.zeros((res, k, k))
for i in range(res):
ffDTF[i] = (np.abs(H_z[i]).T/np.sqrt(np.diag(mHsum))).T
return ffDTF
class dDTF(ConnectAR):
"""
dDTF - class inherits from :class:`ConnectAR` and overloads
:func:`Connect.calculate` method.
"""
def calculate(self, Acoef=None, Vcoef=None, fs=None, resolution=100):
"""
direct Directed Transfer Function estimation from MVAR
parameters. dDTF is a DTF multiplied in each frequency by
Patrial Coherence.
Args:
*Acoef* : numpy.array
array of shape (k, k, p) where *k* is number of channels and
*p* is a model order.
*Vcoef* : numpy.array
prediction error matrix (k, k)
*fs* = 1 : int
sampling rate
*resolution* = 100 : int
number of spectrum data points
*generalized* = False : bool
generalized version or not
Returns:
*dDTF* : numpy.array
matrix with estimation results (*resolution*, k, k)
References:
.. [1] <NAME>. all. Determination of information flow direction
among brain structures by a modified directed transfer function (dDTF)
method. J. Neurosci. Methods 125, 195–207 (2003).
"""
A_z, H_z, S_z = spectrum(Acoef, Vcoef, fs, resolution=resolution)
res, k, k = A_z.shape
mH = np.zeros((res, k, k))
for i in range(res):
mH[i] = np.abs(np.dot(H_z[i], H_z[i].T.conj()))
mHsum = np.sum(mH, axis=0)
dDTF = np.zeros((res, k, k))
before = np.ones((k, k))
before[0::2, :] *= -1
before[:, 0::2] *= -1
for i in range(res):
D_z = np.linalg.inv(S_z[i])
dd = np.tile(np.diag(D_z), (k, 1))
mD = (dd*dd.T).real
PC = np.abs(-1*before*(np.abs(D_z)/np.sqrt(mD)))
dDTF[i] = PC*(np.abs(H_z[i]).T/np.sqrt(np.diag(mHsum))).T
return dDTF
class iPDC(ConnectAR):
"""
iPDC - class inherits from :class:`ConnectAR` and overloads
:func:`Connect.calculate` method.
"""
def calculate(self, Acoef=None, Vcoef=None, fs=None, resolution=100):
"""
instantaneous Partial Directed Coherence from MVAR
parameters.
Args:
*Acoef* : numpy.array
array of shape (k, k, p+1) where *k* is number of channels and
*p* is a model order. It's zero lag case.
*Vcoef* : numpy.array
prediction error matrix (k, k)
*fs* = 1 : int
sampling rate
*resolution* = 100 : int
number of spectrum data points
*generalized* = False : bool
generalized version or not
Returns:
*iPDC* : numpy.array
matrix with estimation results (*resolution*, k, k)
References:
.. [1] <NAME>. et all Multivariate Autoregressive Model with Instantaneous
Effects to Improve Brain Connectivity Estimation.
Int. J. Bioelectromagn. 11, 74–79 (2009).
"""
B_z = spectrum_inst(Acoef, Vcoef, fs, resolution=resolution)
res, k, k = B_z.shape
PDC = np.zeros((res, k, k))
for i in range(res):
mB = np.dot(B_z[i].T.conj(), B_z[i]).real
PDC[i] = np.abs(B_z[i])/np.sqrt(np.diag(mB))
return PDC
class iDTF(ConnectAR):
"""
iDTF - class inherits from :class:`ConnectAR` and overloads
:func:`Connect.calculate` method.
"""
def calculate(self, Acoef=None, Vcoef=None, fs=None, resolution=100):
"""
instantaneous Partial Directed Coherence from MVAR
parameters.
Args:
*Acoef* : numpy.array
array of shape (k, k, p+1) where *k* is number of channels and
*p* is a model order. It's zero lag case.
*Vcoef* : numpy.array
prediction error matrix (k, k)
*fs* = 1 : int
sampling rate
*resolution* = 100 : int
number of spectrum data points
*generalized* = False : bool
generalized version or not
Returns:
*iPDC* : numpy.array
matrix with estimation results (*resolution*, k, k)
References:
.. [1] <NAME>, Multivariate Autoregressive Model with Instantaneous
Effects to Improve Brain Connectivity Estimation.
Int. J. Bioelectromagn. 11, 74–79 (2009).
"""
B_z = spectrum_inst(Acoef, Vcoef, fs, resolution=resolution)
res, k, k = B_z.shape
DTF = np.zeros((res, k, k))
for i in range(res):
Hb_z = np.linalg.inv(B_z[i])
mH = np.dot(Hb_z, Hb_z.T.conj()).real
DTF[i] = np.abs(Hb_z)/np.sqrt(np.diag(mH)).reshape((k, 1))
return DTF
############################
# Fourier Transform based methods:
class Coherency(Connect):
"""
Coherency - class inherits from :class:`Connect` and overloads
:func:`Connect.calculate` method and *values_range* attribute.
"""
def __init__(self):
self.values_range = [0, 1]
def calculate(self, data, cnfft=None, cno=None, window=np.hanning, im=False):
"""
Coherency calculation using FFT mehtod.
Args:
*data* : numpy.array
array of shape (k, N) where *k* is number of channels and
*N* is number of data points.
*cnfft* = None : int
number of data points in window; if None, it is N/5
*cno* = 0 : int
overlap; if None, it is N/10
*window* = np.hanning : <function> generating window with 1 arg *n*
window function
*im* = False : bool
if False it return absolute value, otherwise complex number
Returns:
*COH* : numpy.array
matrix with estimation results (*resolution*, k, k)
References:
.. [1] <NAME> Spectral Analysis and Time Series.
Academic Press Inc. (London) LTD., 1981
"""
assert cnfft > cno, "overlap must be smaller than window"
k, N = data.shape
if not cnfft:
cnfft = int(N/5)
if cno is None:
cno = int(N/10)
winarr = window(cnfft)
slices = range(0, N, int(cnfft-cno))
ftsliced = np.zeros((len(slices), k, int(cnfft/2)+1), complex)
for e, i in enumerate(slices):
if i+cnfft >= N:
datzer = np.concatenate((data[:, i:i+cnfft],
np.zeros((k, i+cnfft-N))), axis=1)
ftsliced[e] = np.fft.rfft(datzer*winarr, axis=1)
else:
ftsliced[e] = np.fft.rfft(data[:, i:i+cnfft]*winarr, axis=1)
ctop = np.zeros((len(slices), k, k, int(cnfft/2)+1), complex)
cdown = np.zeros((len(slices), k, int(cnfft/2)+1))
for i in range(len(slices)):
c1 = ftsliced[i, :, :].reshape((k, 1, int(cnfft/2)+1))
c2 = ftsliced[i, :, :].conj().reshape((1, k, int(cnfft/2)+1))
ctop[i] = c1*c2
cdown[i] = np.abs(ftsliced[i, :, :])**2
cd1 = np.mean(cdown, axis=0).reshape((k, 1, int(cnfft/2)+1))
cd2 = np.mean(cdown, axis=0).reshape((1, k, int(cnfft/2)+1))
cdwn = cd1*cd2
coh = np.mean(ctop, axis=0)/np.sqrt(cdwn)
if not im:
coh = np.abs(coh)
return coh.T
class PSI(Connect):
"""
PSI - class inherits from :class:`Connect` and overloads
:func:`Connect.calculate` method.
"""
def __init__(self):
super(PSI, self).__init__()
self.two_sided = True
def calculate(self, data, band_width=4, psinfft=None, psino=0, window=np.hanning):
"""
Phase Slope Index calculation using FFT mehtod.
Args:
*data* : numpy.array
array of shape (k, N) where *k* is number of channels and
*N* is number of data points.
*band_width* = 4 : int
width of frequency band where PSI values are summed
*psinfft* = None : int
number of data points in window; if None, it is N/5
*psino* = 0 : int
overlap; if None, it is N/10
*window* = np.hanning : <function> generating window with 1 arg *n*
window function
Returns:
*COH* : numpy.array
matrix with estimation results (*resolution*, k, k)
References:
.. [1] <NAME>. et all, Comparison of Granger Causality and
Phase Slope Index. 267–276 (2009).
"""
k, N = data.shape
if not psinfft:
psinfft = int(N/4)
assert psinfft > psino, "overlap must be smaller than window"
coh = Coherency()
cohval = coh.calculate(data, cnfft=psinfft, cno=psino, window=window, im=True)
fq_bands = np.arange(0, int(psinfft/2)+1, band_width)
psi = np.zeros((len(fq_bands)-1, k, k))
for f in range(len(fq_bands)-1):
ctmp = cohval[fq_bands[f]:fq_bands[f+1], :, :]
psi[f] = np.imag(np.sum(ctmp[:-1, :, :].conj()*ctmp[1:, :, :], axis=0))
return psi
class GCI(Connect):
"""
GCI - class inherits from :class:`Connect` and overloads
:func:`Connect.calculate` method.
"""
def __init__(self):
super(GCI, self).__init__()
self.two_sided = False
def calculate(self, data, gcimethod='yw', gciorder=None):
"""
Granger Causality Index calculation from MVAR model.
Args:
*data* : numpy.array
array of shape (k, N) where *k* is number of channels and
*N* is number of data points.
*gcimethod* = 'yw' : int
MVAR parameters estimation model
*gciorder* = None : int
model order, if None appropiate value is chosen basic
on default criterion
Returns:
*gci* : numpy.array
matrix with estimation results (*resolution*, k, k)
References:
.. [1] <NAME>. et all, Comparison of Granger Causality and
Phase Slope Index. 267–276 (2009).
"""
k, N = data.shape
arfull, vrfull = Mvar().fit(data, gciorder, gcimethod)
gcval = np.zeros((k, k))
for i in range(k):
arix = [j for j in range(k) if i != j]
ar_i, vr_i = Mvar().fit(data[arix, :], gciorder, gcimethod)
for e, c in enumerate(arix):
gcval[c, i] = | np.log(vrfull[i, i]/vr_i[e, e]) | numpy.log |
import numpy as np
import pytest
from simulators.fake_simulator import fake_bhm_simulation, fake_iam_simulation
from simulators.fake_simulator import main as fake_main
from simulators.fake_simulator import parse_args
def test_fake_simulator_parser():
args = ["HD30501", "01", "-p", "2300, 4.5, -3.0", "--params2", "2100, 3.5, 0.0", "-g", "10"]
parsed = parse_args(args)
assert parsed.star == "HD30501"
assert parsed.sim_num == "01"
assert parsed.params1 == "2300, 4.5, -3.0"
assert parsed.params2 == "2100, 3.5, 0.0"
assert parsed.gamma == 10
assert parsed.rv is None
assert isinstance(parsed.gamma, float)
assert parsed.replace is False
assert parsed.noplots is False
assert parsed.test is False
assert parsed.mode == "iam"
assert parsed.noise is None
def test_fake_simulator_parser_toggle():
args = ["HDTEST", "02", "-t", "-r", '-v', "10", "-m", "bhm", "-s", "100", "-n"]
parsed = parse_args(args)
assert parsed.star == "HDTEST"
assert parsed.sim_num == "02"
assert parsed.params1 is None
assert parsed.params2 is None
assert parsed.rv == 10
assert parsed.gamma is None
assert parsed.replace is True
assert parsed.noplots is True
assert parsed.test is True
assert parsed.mode == "bhm"
assert parsed.noise == 100.0
assert isinstance(parsed.noise, float)
def test_fake_sim_main_with_no_params1_returns_error():
with pytest.raises(ValueError):
fake_main("hdtest", 1, params1=None, params2=[5800, 4.0, -0.5], rv=7, gamma=5, mode="iam")
with pytest.raises(ValueError):
fake_main("hdtest2", 2, params1=None, gamma=7, mode="bmh")
@pytest.mark.parametrize("starname, obsnum",
[("teststar", 1),
("SECONDTEST", 9)])
@pytest.mark.parametrize("mode", ["iam", "bhm"])
def test_fake_simulator_main_runs_and_creates_files(sim_config, tmpdir, starname, obsnum, mode):
"""NO gamma or rv provided"""
simulators = sim_config
simulators.paths["output_dir"] = str(tmpdir)
simulators.paths["parameters"] = str(tmpdir)
simulators.paths["spectra"] = str(tmpdir)
starname_up = starname.upper()
def sim_filename(chip):
return tmpdir.join("{0}-{1}-mixavg-tellcorr_{2}_bervcorr_masked.fits".format(starname_up, obsnum, chip))
# Simulations for each chip don't exist yet?
for chip in range(1, 5):
expected_sim_file = sim_filename(chip)
assert expected_sim_file.check(file=0)
result = fake_main(starname, obsnum, "4500, 5.0, 0.5", "2300, 4.5, 0.0", mode=mode)
# Simulations for each chip were created?
for chip in range(1, 5):
expected_sim_file = sim_filename(chip)
assert expected_sim_file.check(file=1)
assert result is None
@pytest.mark.parametrize("params", [(2500, 4.5, 0.0), (2800, 4.5, 0.5)])
@pytest.mark.parametrize("wav", [np.linspace(2147, 2160, 200)])
@pytest.mark.parametrize("rv", [5, 2, -6])
@pytest.mark.parametrize("gamma", [-5, 1, 7])
@pytest.mark.parametrize("limits", [[2070, 2180]])
def test_fake_iam_simulation_with_passing_wav(params, wav, rv, gamma, limits):
fake_wav, fake_flux = fake_iam_simulation(wav, [5000, 4.5, 0.5], params2=params, gamma=gamma, rv=-rv, limits=limits)
assert np.all(fake_wav < limits[1]) and np.all(fake_wav > limits[0])
assert | np.all(fake_wav == wav) | numpy.all |
"""
Created on Thu Dec 06 10:24:14 2019
@author: <NAME>
"""
import cv2
import numpy as np
import os
from sklearn import linear_model
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
def display_matches(img1, img2, kp1, kp2,name, num=20, save=False):
"""Helper to display matches of keypoint in botch images, by connecting a line from one image to another
Typical use:
display_matches(target, source, lmk1, lmk2, name="matches", save = True)
img1, img2: target and source images as np.ndarray
kp1, kp2: landmarks of target and source images respectively as np.ndarray
name: name of the figure display and the image saved if save = True
save: boolean indicates to save the image of the matches
"""
if img1.shape[0] != img2.shape[0]:
minn = min(img1.shape[0], img1.shape[0])
if minn == img1.shape[0]:
img1 = np.concatenate((img1, np.zeros(img2.shape[0] - minn, img1.shape[1], 3)), axis=0)
else:
img2 = np.concatenate((img2, np.zeros(img1.shape[0] - minn, img2.shape[1], 3)), axis=0)
img = np.concatenate((img1, img2), axis=1)
for i in np.random.choice(len(kp1), min(num, len(kp1))):
x1, y1 = int(kp1[i][0]), int(kp1[i][1])
x2, y2 = int(kp2[i][0]) + img1.shape[1], int(kp2[i][1])
cv2.line(img, (x1, y1), (x2, y2), (0, 0, 255), 1)
cv2.namedWindow(name, cv2.WINDOW_NORMAL)
cv2.resizeWindow(name, img.shape[1], img.shape[0])
cv2.imshow(name, img)
cv2.waitKey()
cv2.destroyAllWindows()
if save:
cv2.imwrite(os.path.join("result", name+".jpg"), img)
def match(lmk1, lmk2, desc1, desc2, sift_error=0.7):
"""Helper to find the pair of matches between two keypoints lists
it return two np.ndarray of landmarks in an order respecting the matching
Typical use:
lmk1, lmk2 = match(lmk1, lmk2, desc1, desc2)
lmk1, lmk2: landmarks of target and source images respectively as np.ndarray
desc1, desc2: descriptors of target and source images respectively as np.ndarray
sift_error: if the ratio between the distance to the closest match and the second closest is less than sift_error
reject this landmark.
"""
match1, match2 = [], []
for i in range(len(desc1)):
distance = np.sqrt(np.sum((desc1[i] - desc2) ** 2, axis=1))
indices = np.argsort(distance)
if distance[indices[0]] / distance[indices[1]] < sift_error:
match1.append(lmk1[i])
match2.append(lmk2[indices[0]])
return np.array(match1), np.array(match2)
def cross_corr(img1, img2):
"""Helper to calculate cross_correlation metric between two images. Well adapted, if we assume there is a linear
transformation between pixels intensities in both images.
it returns the cross-correlation value.
Typical use:
cc = cross_corr(warped, target_w)
"""
img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
mean1, mean2 = np.mean(img1), np.mean(img2)
img1, img2 = img1-mean1, img2-mean2
numerator = np.sum(np.multiply(img1, img2))
denominator = np.sqrt(np.sum(np.multiply(img1, img1))*np.sum(np.multiply(img2, img2)))
corr = numerator/denominator
print("Cross-correlation: ", corr)
return corr
def mutual_inf(img1, img2, verbose=False):
"""Helper to calculate mutual-information metric between two images. it gives a probabilistic measure on how
uncertain we are about the target image in the absence/presence of the warped source image
it returns the mutual information value.
Typical use:
mi = mutual_inf(warped, target_w)
verbose: if verbose=True, display and save the joint-histogram between the two images.
"""
epsilon = 1.e-6
img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
img1 = np.round(img1).astype("uint8")
img2 = np.round(img2).astype("uint8")
joint_hist = np.zeros((256, 256))
for i in range(min(img1.shape[0], img2.shape[0])):
for j in range(min(img1.shape[1], img2.shape[1])):
joint_hist[img1[i, j], img2[i, j]] += 1
if verbose:
display_jh = np.log(joint_hist + epsilon)
display_jh = 255*(display_jh - display_jh.min())/(display_jh.max() - display_jh.min())
cv2.imshow("joint_histogram", display_jh)
cv2.waitKey()
cv2.destroyAllWindows()
cv2.imwrite("result/joint_histogram.jpg", display_jh)
joint_hist /= np.sum(joint_hist)
p1 = np.sum(joint_hist, axis=0)
p2 = np.sum(joint_hist, axis=1)
joint_hist_d = joint_hist/(p1+epsilon)
joint_hist_d /= (p2+epsilon)
mi = np.sum(np.multiply(joint_hist, np.log(joint_hist_d+epsilon)))
print("Mutual Information: ", mi)
return mi
def ransac(kp1, kp2):
"""Helper to apply ransac (RANdom SAmple Consensus) algorithm on two arrays of landmarks
it returns the inliers and outliers in both arrays
Typical use:
lmk1, lmk2, outliers1, outliers2 = ransac(lmk1, lmk2)
kp1, kp2: landmarks of target and source images respectively as np.ndarray
"""
ransac_model = linear_model.RANSACRegressor()
ransac_model.fit(kp1, kp2)
inlier_mask = ransac_model.inlier_mask_
outlier_mask = np.logical_not(inlier_mask)
return kp1[inlier_mask], kp2[inlier_mask], kp1[outlier_mask], kp2[outlier_mask]
def calculate_transform(kp1, kp2):
"""Helper to apply find the best affine transform using two arrays of landmarks.
it returns the affine transform, a matrix T of size (2, 3)
Typical use:
T = calculate_transform(lmk2, lmk1)
kp1, kp2: landmarks of target and source images respectively as np.ndarray
"""
upper = np.concatenate((kp1, np.ones((kp1.shape[0], 1)), np.zeros((kp1.shape[0], 3))), axis=1)
lower = np.concatenate((np.zeros((kp1.shape[0], 3)), kp1, | np.ones((kp1.shape[0], 1)) | numpy.ones |
import os
import glob
import random
import torch
import imageio
import errno
import numpy as np
import tifffile as tiff
import torch.nn as nn
import matplotlib.pyplot as plt
from torch.utils import data
from sklearn.metrics import confusion_matrix
# =============================================
class CustomDataset(torch.utils.data.Dataset):
def __init__(self, imgs_folder, labels_folder, augmentation):
# 1. Initialize file paths or a list of file names.
self.imgs_folder = imgs_folder
self.labels_folder = labels_folder
self.data_augmentation = augmentation
# self.transform = transforms
def __getitem__(self, index):
# 1. Read one data from file (e.g. using num py.fromfile, PIL.Image.open).
# 2. Preprocess the data (e.g. torchvision.Transform).
# 3. Return a data pair (e.g. image and label).
all_images = glob.glob(os.path.join(self.imgs_folder, '*.npy'))
all_labels = glob.glob(os.path.join(self.labels_folder, '*.npy'))
# sort all in the same order
all_labels.sort()
all_images.sort()
#
# label = Image.open(all_labels[index])
# label = tiff.imread(all_labels[index])
label = np.load(all_labels[index])
label = np.array(label, dtype='float32')
# image = tiff.imread(all_images[index])
image = np.load(all_images[index])
image = np.array(image, dtype='float32')
#
labelname = all_labels[index]
path_label, labelname = os.path.split(labelname)
labelname, labelext = os.path.splitext(labelname)
#
c_amount = len(np.shape(label))
#
#
# Reshaping everyting to make sure the order: channel x height x width
if c_amount == 3:
d1, d2, d3 = np.shape(label)
if d1 != min(d1, d2, d3):
label = np.reshape(label, (d3, d1, d2))
#
elif c_amount == 2:
h, w = np.shape(label)
label = np.reshape(label, (1, h, w))
#
d1, d2, d3 = np.shape(image)
#
if d1 != min(d1, d2, d3):
#
image = np.reshape(image, (d3, d1, d2))
#
if self.data_augmentation == 'full':
# augmentation:
augmentation = random.uniform(0, 1)
#
if augmentation < 0.25:
#
c, h, w = np.shape(image)
#
for channel in range(c):
#
image[channel, :, :] = np.flip(image[channel, :, :], axis=0).copy()
image[channel, :, :] = np.flip(image[channel, :, :], axis=1).copy()
#
label = np.flip(label, axis=1).copy()
label = np.flip(label, axis=2).copy()
elif augmentation < 0.5:
#
mean = 0.0
sigma = 0.15
noise = np.random.normal(mean, sigma, image.shape)
mask_overflow_upper = image + noise >= 1.0
mask_overflow_lower = image + noise < 0.0
noise[mask_overflow_upper] = 1.0
noise[mask_overflow_lower] = 0.0
image += noise
elif augmentation < 0.75:
#
c, h, w = np.shape(image)
#
for channel in range(c):
#
channel_ratio = random.uniform(0, 1)
#
image[channel, :, :] = image[channel, :, :] * channel_ratio
elif self.data_augmentation == 'flip':
# augmentation:
augmentation = random.uniform(0, 1)
#
if augmentation > 0.5 or augmentation == 0.5:
#
c, h, w = np.shape(image)
#
for channel in range(c):
#
image[channel, :, :] = np.flip(image[channel, :, :], axis=0).copy()
#
label = np.flip(label, axis=1).copy()
elif self.data_augmentation == 'all_flip':
# augmentation:
augmentation = random.uniform(0, 1)
#
if augmentation > 0.5 or augmentation == 0.5:
#
c, h, w = np.shape(image)
#
for channel in range(c):
#
image[channel, :, :] = np.flip(image[channel, :, :], axis=0).copy()
image[channel, :, :] = np.flip(image[channel, :, :], axis=1).copy()
#
label = np.flip(label, axis=1).copy()
label = np.flip(label, axis=2).copy()
return image, label, labelname
def __len__(self):
# You should change 0 to the total size of your dataset.
return len(glob.glob(os.path.join(self.imgs_folder, '*.npy')))
# ============================================================================================
def evaluate_noisy_label(data, model1, model2, class_no):
"""
Args:
data:
model1:
model2:
class_no:
Returns:
"""
model1.eval()
model2.eval()
#
test_dice = 0
test_dice_all = []
#
for i, (v_images, v_labels_over, v_labels_under, v_labels_wrong, v_labels_true, v_imagename) in enumerate(data):
#
v_images = v_images.to(device='cuda', dtype=torch.float32)
v_outputs_logits = model1(v_images)
v_outputs_logits_noisy = model2(v_images)
#
_, v_output = torch.max(v_outputs_logits, dim=1)
v_outputs_noisy = []
#
for v_noisy_logit in v_outputs_logits_noisy:
#
_, v_noisy_output = torch.max(v_noisy_logit, dim=1)
v_outputs_noisy.append(v_noisy_output.cpu().detach().numpy())
#
v_dice_ = segmentation_scores(v_labels_true, v_output.cpu().detach().numpy(), class_no)
#
epoch_noisy_labels = [v_labels_over.cpu().detach().numpy(), v_labels_under.cpu().detach().numpy(), v_labels_wrong.cpu().detach().numpy(), v_labels_true.cpu().detach().numpy()]
v_ged = generalized_energy_distance(epoch_noisy_labels, v_outputs_noisy, class_no)
test_dice += v_dice_
test_dice_all.append(test_dice)
#
# print(i)
# print(test_dice)
# print(test_dice / (i + 1))
return test_dice / (i + 1), v_ged
def evaluate_noisy_label_2(data, model1, model2, class_no):
"""
Args:
data:
model1:
model2:
class_no:
Returns:
"""
model1.eval()
model2.eval()
test_dice = 0
test_dice_all = []
for i, (v_images, v_labels_over, v_labels_under, v_labels_wrong, v_labels_true, v_imagename) in enumerate(data):
#
v_images = v_images.to(device='cuda', dtype=torch.float32)
v_outputs_logits = model1(v_images)
b, c, h, w = v_outputs_logits.size()
v_outputs_logits = nn.Softmax(dim=1)(v_outputs_logits)
cms = model2(v_images)
#
_, v_output = torch.max(v_outputs_logits, dim=1)
v_outputs_noisy = []
#
for cm in cms:
#
cm = cm.reshape(b * h * w, c, c)
cm = cm / cm.sum(1, keepdim=True)
v_noisy_logit = torch.bmm(cm, v_outputs_logits.reshape(b * h * w, c, 1)).reshape(b, c, h, w)
_, v_noisy_output = torch.max(v_noisy_logit, dim=1)
v_outputs_noisy.append(v_noisy_output.cpu().detach().numpy())
#
v_dice_ = segmentation_scores(v_labels_true, v_output.cpu().detach().numpy(), class_no)
#
epoch_noisy_labels = [v_labels_over.cpu().detach().numpy(), v_labels_under.cpu().detach().numpy(), v_labels_wrong.cpu().detach().numpy(), v_labels_true.cpu().detach().numpy()]
v_ged = generalized_energy_distance(epoch_noisy_labels, v_outputs_noisy, class_no)
test_dice += v_dice_
test_dice_all.append(test_dice)
#
# print(i)
# print(test_dice)
# print(test_dice / (i + 1))
return test_dice / (i + 1), v_ged
def evaluate_noisy_label_3(data, model1, class_no):
"""
Args:
data:
model1:
class_no:
Returns:
"""
model1.eval()
# model2.eval()
#
test_dice = 0
test_dice_all = []
#
for i, (v_images, v_labels_over, v_labels_under, v_labels_wrong, v_labels_good, v_imagename) in enumerate(data):
#
# print(i)
#
v_images = v_images.to(device='cuda', dtype=torch.float32)
v_outputs_logits, cms = model1(v_images)
b, c, h, w = v_outputs_logits.size()
v_outputs_logits = nn.Softmax(dim=1)(v_outputs_logits)
# cms = model2(v_images)
#
_, v_output = torch.max(v_outputs_logits, dim=1)
v_outputs_noisy = []
#
# v_outputs_logits = v_outputs_logits.permute(0, 2, 3, 1).contiguous()
# v_outputs_logits = v_outputs_logits.reshape(b * h * w, c, 1)
#
for cm in cms:
#
cm = cm.reshape(b * h * w, c, c)
cm = cm / cm.sum(1, keepdim=True)
v_noisy_output = torch.bmm(cm, v_outputs_logits.reshape(b * h * w, c, 1)).reshape(b, c, h, w)
# cm = cm.permute(0, 2, 3, 1).contiguous().view(b * h * w, c, c)
# cm = cm / cm.sum(1, keepdim=True)
# v_noisy_output = torch.bmm(cm, v_outputs_logits)
# v_noisy_output = v_noisy_output.view(b, h, w, c).permute(0, 3, 1, 2).contiguous()
_, v_noisy_output = torch.max(v_noisy_output, dim=1)
v_outputs_noisy.append(v_noisy_output.cpu().detach().numpy())
#
v_dice_ = segmentation_scores(v_labels_good, v_output.cpu().detach().numpy(), class_no)
#
epoch_noisy_labels = [v_labels_over.cpu().detach().numpy(), v_labels_under.cpu().detach().numpy(), v_labels_wrong.cpu().detach().numpy(), v_labels_good.cpu().detach().numpy()]
v_ged = generalized_energy_distance(epoch_noisy_labels, v_outputs_noisy, class_no)
test_dice += v_dice_
test_dice_all.append(test_dice)
#
# print(i)
# print(test_dice)
# print(test_dice / (i + 1))
#
return test_dice / (i + 1), v_ged
def evaluate_noisy_label_4(data, model1, class_no):
"""
Args:
data:
model1:
class_no:
Returns:
"""
model1.eval()
# model2.eval()
#
test_dice = 0
test_dice_all = []
#
for i, (v_images, v_labels_over, v_labels_under, v_labels_wrong, v_labels_good, v_imagename) in enumerate(data):
#
# print(i)
#
v_images = v_images.to(device='cuda', dtype=torch.float32)
v_outputs_logits, cms = model1(v_images)
b, c, h, w = v_outputs_logits.size()
v_outputs_logits = nn.Softmax(dim=1)(v_outputs_logits)
# cms = model2(v_images)
#
_, v_output = torch.max(v_outputs_logits, dim=1)
v_outputs_noisy = []
#
v_outputs_logits = v_outputs_logits.view(b, c, h*w)
v_outputs_logits = v_outputs_logits.permute(0, 2, 1).contiguous().view(b*h*w, c)
v_outputs_logits = v_outputs_logits.view(b * h * w, c, 1)
#
for cm in cms:
#
cm = cm.reshape(b, c**2, h*w).permute(0, 2, 1).contiguous().view(b*h*w, c*c).view(b*h*w, c, c)
cm = cm / cm.sum(1, keepdim=True)
v_noisy_output = torch.bmm(cm, v_outputs_logits).view(b*h*w, c)
v_noisy_output = v_noisy_output.view(b, h*w, c).permute(0, 2, 1).contiguous().view(b, c, h, w)
_, v_noisy_output = torch.max(v_noisy_output, dim=1)
v_outputs_noisy.append(v_noisy_output.cpu().detach().numpy())
#
v_dice_ = segmentation_scores(v_labels_good, v_output.cpu().detach().numpy(), class_no)
#
epoch_noisy_labels = [v_labels_over.cpu().detach().numpy(), v_labels_under.cpu().detach().numpy(), v_labels_wrong.cpu().detach().numpy(), v_labels_good.cpu().detach().numpy()]
v_ged = generalized_energy_distance(epoch_noisy_labels, v_outputs_noisy, class_no)
test_dice += v_dice_
test_dice_all.append(test_dice)
#
# print(i)
# print(test_dice)
# print(test_dice / (i + 1))
#
return test_dice / (i + 1), v_ged
def evaluate_noisy_label_6(data, model1, class_no):
"""
Args:
data:
model1:
class_no:
Returns:
"""
model1.eval()
# model2.eval()
#
test_dice = 0
test_dice_all = []
#
for i, (v_images, v_labels_over, v_labels_under, v_labels_wrong, v_labels_good, v_imagename) in enumerate(data):
#
# print(i)
#
v_images = v_images.to(device='cuda', dtype=torch.float32)
v_outputs_logits, cms = model1(v_images)
b, c, h, w = v_outputs_logits.size()
v_outputs_logits = nn.Softmax(dim=1)(v_outputs_logits)
# cms = model2(v_images)
#
_, v_output = torch.max(v_outputs_logits, dim=1)
v_outputs_noisy = []
#
v_outputs_logits = v_outputs_logits.view(b, c, h*w)
v_outputs_logits = v_outputs_logits.permute(0, 2, 1).contiguous().view(b*h*w, c)
v_outputs_logits = v_outputs_logits.view(b * h * w, c, 1)
#
for cm in cms:
#
b, c_r_d, h, w = cm.size()
r = c_r_d // c // 2
cm1 = cm[:, 0:r * c, :, :]
if r == 1:
cm2 = cm[:, r * c:c_r_d-1, :, :]
else:
cm2 = cm[:, r * c:c_r_d-1, :, :]
cm1_reshape = cm1.view(b, c_r_d // 2, h * w).permute(0, 2, 1).contiguous().view(b * h * w, r * c).view(b * h * w, r, c)
cm2_reshape = cm2.view(b, c_r_d // 2, h * w).permute(0, 2, 1).contiguous().view(b * h * w, r * c).view(b * h * w, c, r)
#
cm1_reshape = cm1_reshape / cm1_reshape.sum(1, keepdim=True)
cm2_reshape = cm2_reshape / cm2_reshape.sum(1, keepdim=True)
#
v_noisy_output = torch.bmm(cm1_reshape, v_outputs_logits)
v_noisy_output = torch.bmm(cm2_reshape, v_noisy_output).view(b * h * w, c)
v_noisy_output = v_noisy_output.view(b, h * w, c).permute(0, 2, 1).contiguous().view(b, c, h, w)
#
# v_noisy_output = torch.bmm(cm, v_outputs_logits).view(b*h*w, c)
# v_noisy_output = v_noisy_output.view(b, h*w, c).permute(0, 2, 1).contiguous().view(b, c, h, w)
_, v_noisy_output = torch.max(v_noisy_output, dim=1)
v_outputs_noisy.append(v_noisy_output.cpu().detach().numpy())
#
v_dice_ = segmentation_scores(v_labels_good, v_output.cpu().detach().numpy(), class_no)
#
epoch_noisy_labels = [v_labels_over.cpu().detach().numpy(), v_labels_under.cpu().detach().numpy(), v_labels_wrong.cpu().detach().numpy(), v_labels_good.cpu().detach().numpy()]
v_ged = generalized_energy_distance(epoch_noisy_labels, v_outputs_noisy, class_no)
test_dice += v_dice_
test_dice_all.append(test_dice)
#
# print(i)
# print(test_dice)
# print(test_dice / (i + 1))
#
return test_dice / (i + 1), v_ged
def evaluate_noisy_label_7(data, model1, model2, class_no, low_rank):
"""
Args:
data:
model1:
model2:
class_no:
low_rank:
Returns:
"""
model1.eval()
model2.eval()
#
test_dice = 0
test_dice_all = []
#
for i, (v_images, v_labels_over, v_labels_under, v_labels_wrong, v_labels_good, v_imagename) in enumerate(data):
#
# print(i)
#
v_images = v_images.to(device='cuda', dtype=torch.float32)
v_outputs_logits = model1(v_images)
b, c, h, w = v_outputs_logits.size()
v_outputs_logits = nn.Softmax(dim=1)(v_outputs_logits)
cms = model2(v_images)
#
_, v_output = torch.max(v_outputs_logits, dim=1)
v_outputs_noisy = []
#
v_outputs_logits = v_outputs_logits.view(b, c, h*w)
v_outputs_logits = v_outputs_logits.permute(0, 2, 1).contiguous().view(b*h*w, c)
v_outputs_logits = v_outputs_logits.view(b * h * w, c, 1)
#
for cm in cms:
#
if low_rank is False:
#
cm = cm.reshape(b, c**2, h*w).permute(0, 2, 1).contiguous().view(b*h*w, c*c).view(b*h*w, c, c)
cm = cm / cm.sum(1, keepdim=True)
v_noisy_output = torch.bmm(cm, v_outputs_logits).view(b*h*w, c)
v_noisy_output = v_noisy_output.view(b, h*w, c).permute(0, 2, 1).contiguous().view(b, c, h, w)
#
else:
#
b, c_r_d, h, w = cm.size()
r = c_r_d // c // 2
cm1 = cm[:, 0:r * c, :, :]
cm2 = cm[:, r * c:c_r_d, :, :]
cm1_reshape = cm1.view(b, c_r_d // 2, h * w).permute(0, 2, 1).contiguous().view(b * h * w, r * c).view(b * h * w, r, c)
cm2_reshape = cm2.view(b, c_r_d // 2, h * w).permute(0, 2, 1).contiguous().view(b * h * w, r * c).view(b * h * w, c, r)
#
cm1_reshape = cm1_reshape / cm1_reshape.sum(1, keepdim=True)
cm2_reshape = cm2_reshape / cm2_reshape.sum(1, keepdim=True)
#
v_noisy_output = torch.bmm(cm1_reshape, v_outputs_logits)
v_noisy_output = torch.bmm(cm2_reshape, v_noisy_output).view(b * h * w, c)
v_noisy_output = v_noisy_output.view(b, h * w, c).permute(0, 2, 1).contiguous().view(b, c, h, w)
#
_, v_noisy_output = torch.max(v_noisy_output, dim=1)
v_outputs_noisy.append(v_noisy_output.cpu().detach().numpy())
#
v_dice_ = segmentation_scores(v_labels_good, v_output.cpu().detach().numpy(), class_no)
#
epoch_noisy_labels = [v_labels_over.cpu().detach().numpy(), v_labels_under.cpu().detach().numpy(), v_labels_wrong.cpu().detach().numpy(), v_labels_good.cpu().detach().numpy()]
v_ged = generalized_energy_distance(epoch_noisy_labels, v_outputs_noisy, class_no)
test_dice += v_dice_
test_dice_all.append(test_dice)
#
# print(i)
# print(test_dice)
# print(test_dice / (i + 1))
#
return test_dice / (i + 1), v_ged
def evaluate_noisy_label_5(data, model1, class_no):
"""
Args:
data:
model1:
class_no:
Returns:
"""
model1.eval()
# model2.eval()
#
test_dice = 0
test_dice_all = []
#
for i, (v_images, v_labels_over, v_labels_under, v_labels_wrong, v_labels_good, v_labels_true, v_imagename) in enumerate(data):
#
# print(i)
#
v_images = v_images.to(device='cuda', dtype=torch.float32)
v_outputs_logits, cms = model1(v_images)
b, c, h, w = v_outputs_logits.size()
v_outputs_logits = nn.Softmax(dim=1)(v_outputs_logits)
# cms = model2(v_images)
#
_, v_output = torch.max(v_outputs_logits, dim=1)
v_outputs_noisy = []
#
v_outputs_logits = v_outputs_logits.view(b, c, h*w)
v_outputs_logits = v_outputs_logits.permute(0, 2, 1).contiguous().view(b*h*w, c)
v_outputs_logits = v_outputs_logits.view(b * h * w, c, 1)
#
for cm in cms:
#
# cm = cm.reshape(b * h * w, c, c)
# cm = cm / cm.sum(1, keepdim=True)
# v_noisy_output = torch.bmm(cm, v_outputs_logits.reshape(b * h * w, c, 1)).reshape(b, c, h, w)
# cm = cm.permute(0, 2, 3, 1).contiguous().view(b * h * w, c, c)
cm = cm.reshape(b, c**2, h*w).permute(0, 2, 1).contiguous().view(b*h*w, c*c).view(b*h*w, c, c)
cm = cm / cm.sum(1, keepdim=True)
v_noisy_output = torch.bmm(cm, v_outputs_logits).view(b*h*w, c)
v_noisy_output = v_noisy_output.view(b, h*w, c).permute(0, 2, 1).contiguous().view(b, c, h, w)
_, v_noisy_output = torch.max(v_noisy_output, dim=1)
v_outputs_noisy.append(v_noisy_output.cpu().detach().numpy())
#
v_dice_ = segmentation_scores(v_labels_true, v_output.cpu().detach().numpy(), class_no)
#
epoch_noisy_labels = [v_labels_over.cpu().detach().numpy(), v_labels_under.cpu().detach().numpy(), v_labels_wrong.cpu().detach().numpy(), v_labels_true.cpu().detach().numpy(), v_labels_good.cpu().detach().numpy()]
v_ged = generalized_energy_distance(epoch_noisy_labels, v_outputs_noisy, class_no)
test_dice += v_dice_
test_dice_all.append(test_dice)
#
# print(i)
# print(test_dice)
# print(test_dice / (i + 1))
#
return test_dice / (i + 1), v_ged
def evaluate(evaluatedata, model, device, class_no):
"""
Args:
evaluatedata:
model:
device:
class_no:
Returns:
"""
model.eval()
#
with torch.no_grad():
#
test_iou = 0
#
for j, (testimg, testlabel, testname) in enumerate(evaluatedata):
#
testimg = testimg.to(device=device, dtype=torch.float32)
testlabel = testlabel.to(device=device, dtype=torch.float32)
#
testoutput = model(testimg)
if class_no == 2:
testoutput = torch.sigmoid(testoutput)
testoutput = (testoutput > 0.5).float()
else:
_, testoutput = torch.max(testoutput, dim=1)
#
mean_iu_ = segmentation_scores(testlabel.cpu().detach().numpy(), testoutput.cpu().detach().numpy(), class_no)
test_iou += mean_iu_
#
return test_iou / (j+1)
def test(testdata,
model,
device,
class_no,
save_path):
"""
Args:
testdata:
model:
device:
class_no:
save_path:
Returns:
"""
model.eval()
with torch.no_grad():
#
test_iou = 0
#
for j, (testimg, testlabel, testname) in enumerate(testdata):
#
testimg = testimg.to(device=device, dtype=torch.float32)
testlabel = testlabel.to(device=device, dtype=torch.float32)
#
testoutput = model(testimg)
if class_no == 2:
testoutput = torch.sigmoid(testoutput)
testoutput = (testoutput > 0.5).float()
else:
_, testoutput = torch.max(testoutput, dim=1)
#
mean_iu_ = segmentation_scores(testlabel.cpu().detach().numpy(), testoutput.cpu().detach().numpy(), class_no)
test_iou += mean_iu_
#
# ========================================================
# # Plotting segmentation:
# ========================================================
prediction_map_path = save_path + '/' + 'Visual_results'
#
try:
os.mkdir(prediction_map_path)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
pass
b, c, h, w = np.shape(testlabel)
testoutput_original = np.asarray(testoutput.cpu().detach().numpy(), dtype=np.uint8)
testoutput_original = np.squeeze(testoutput_original, axis=0)
testoutput_original = np.repeat(testoutput_original[:, :, np.newaxis], 3, axis=2)
#
if class_no == 2:
segmentation_map = np.zeros((h, w, 3), dtype=np.uint8)
#
segmentation_map[:, :, 0][np.logical_and(testoutput_original[:, :, 0] == 1, testoutput_original[:, :, 1] == 1, testoutput_original[:, :, 2] == 1)] = 255
segmentation_map[:, :, 1][np.logical_and(testoutput_original[:, :, 0] == 1, testoutput_original[:, :, 1] == 1, testoutput_original[:, :, 2] == 1)] = 0
segmentation_map[:, :, 2][np.logical_and(testoutput_original[:, :, 0] == 1, testoutput_original[:, :, 1] == 1, testoutput_original[:, :, 2] == 1)] = 0
#
else:
segmentation_map = np.zeros((h, w, 3), dtype=np.uint8)
if class_no == 4:
# multi class for brats 2018
segmentation_map[:, :, 0][np.logical_and(testoutput_original[:, :, 0] == 1, testoutput_original[:, :, 1] == 1, testoutput_original[:, :, 2] == 1)] = 255
segmentation_map[:, :, 1][np.logical_and(testoutput_original[:, :, 0] == 1, testoutput_original[:, :, 1] == 1, testoutput_original[:, :, 2] == 1)] = 0
segmentation_map[:, :, 2][np.logical_and(testoutput_original[:, :, 0] == 1, testoutput_original[:, :, 1] == 1, testoutput_original[:, :, 2] == 1)] = 0
#
segmentation_map[:, :, 0][np.logical_and(testoutput_original[:, :, 0] == 2, testoutput_original[:, :, 1] == 2, testoutput_original[:, :, 2] == 2)] = 0
segmentation_map[:, :, 1][np.logical_and(testoutput_original[:, :, 0] == 2, testoutput_original[:, :, 1] == 2, testoutput_original[:, :, 2] == 2)] = 255
segmentation_map[:, :, 2][np.logical_and(testoutput_original[:, :, 0] == 2, testoutput_original[:, :, 1] == 2, testoutput_original[:, :, 2] == 2)] = 0
#
segmentation_map[:, :, 0][np.logical_and(testoutput_original[:, :, 0] == 3, testoutput_original[:, :, 1] == 3, testoutput_original[:, :, 2] == 3)] = 0
segmentation_map[:, :, 1][np.logical_and(testoutput_original[:, :, 0] == 3, testoutput_original[:, :, 1] == 3, testoutput_original[:, :, 2] == 3)] = 0
segmentation_map[:, :, 2][np.logical_and(testoutput_original[:, :, 0] == 3, testoutput_original[:, :, 1] == 3, testoutput_original[:, :, 2] == 3)] = 255
#
elif class_no == 8:
# multi class for cityscapes
segmentation_map[:, :, 0][np.logical_and(testoutput_original[:, :, 0] == 0, testoutput_original[:, :, 1] == 0, testoutput_original[:, :, 2] == 0)] = 255
segmentation_map[:, :, 1][np.logical_and(testoutput_original[:, :, 0] == 0, testoutput_original[:, :, 1] == 0, testoutput_original[:, :, 2] == 0)] = 0
segmentation_map[:, :, 2][np.logical_and(testoutput_original[:, :, 0] == 0, testoutput_original[:, :, 1] == 0, testoutput_original[:, :, 2] == 0)] = 0
#
segmentation_map[:, :, 0][np.logical_and(testoutput_original[:, :, 0] == 1, testoutput_original[:, :, 1] == 1, testoutput_original[:, :, 2] == 1)] = 0
segmentation_map[:, :, 1][np.logical_and(testoutput_original[:, :, 0] == 1, testoutput_original[:, :, 1] == 1, testoutput_original[:, :, 2] == 1)] = 255
segmentation_map[:, :, 2][np.logical_and(testoutput_original[:, :, 0] == 1, testoutput_original[:, :, 1] == 1, testoutput_original[:, :, 2] == 1)] = 0
#
segmentation_map[:, :, 0][np.logical_and(testoutput_original[:, :, 0] == 2, testoutput_original[:, :, 1] == 2, testoutput_original[:, :, 2] == 2)] = 0
segmentation_map[:, :, 1][np.logical_and(testoutput_original[:, :, 0] == 2, testoutput_original[:, :, 1] == 2, testoutput_original[:, :, 2] == 2)] = 0
segmentation_map[:, :, 2][np.logical_and(testoutput_original[:, :, 0] == 2, testoutput_original[:, :, 1] == 2, testoutput_original[:, :, 2] == 2)] = 255
#
segmentation_map[:, :, 0][np.logical_and(testoutput_original[:, :, 0] == 3, testoutput_original[:, :, 1] == 3, testoutput_original[:, :, 2] == 3)] = 255
segmentation_map[:, :, 1][np.logical_and(testoutput_original[:, :, 0] == 3, testoutput_original[:, :, 1] == 3, testoutput_original[:, :, 2] == 3)] = 255
segmentation_map[:, :, 2][np.logical_and(testoutput_original[:, :, 0] == 3, testoutput_original[:, :, 1] == 3, testoutput_original[:, :, 2] == 3)] = 0
#
segmentation_map[:, :, 0][np.logical_and(testoutput_original[:, :, 0] == 4, testoutput_original[:, :, 1] == 4, testoutput_original[:, :, 2] == 4)] = 153
segmentation_map[:, :, 1][np.logical_and(testoutput_original[:, :, 0] == 4, testoutput_original[:, :, 1] == 4, testoutput_original[:, :, 2] == 4)] = 51
segmentation_map[:, :, 2][np.logical_and(testoutput_original[:, :, 0] == 4, testoutput_original[:, :, 1] == 4, testoutput_original[:, :, 2] == 4)] = 255
#
segmentation_map[:, :, 0][np.logical_and(testoutput_original[:, :, 0] == 5, testoutput_original[:, :, 1] == 5, testoutput_original[:, :, 2] == 5)] = 255
segmentation_map[:, :, 1][np.logical_and(testoutput_original[:, :, 0] == 5, testoutput_original[:, :, 1] == 5, testoutput_original[:, :, 2] == 5)] = 102
segmentation_map[:, :, 2][np.logical_and(testoutput_original[:, :, 0] == 5, testoutput_original[:, :, 1] == 5, testoutput_original[:, :, 2] == 5)] = 178
#
segmentation_map[:, :, 0][np.logical_and(testoutput_original[:, :, 0] == 6, testoutput_original[:, :, 1] == 6, testoutput_original[:, :, 2] == 6)] = 102
segmentation_map[:, :, 1][np.logical_and(testoutput_original[:, :, 0] == 6, testoutput_original[:, :, 1] == 6, testoutput_original[:, :, 2] == 6)] = 255
segmentation_map[:, :, 2][np.logical_and(testoutput_original[:, :, 0] == 6, testoutput_original[:, :, 1] == 6, testoutput_original[:, :, 2] == 6)] = 102
#
prediction_name = 'seg_' + testname[0] + '.png'
full_error_map_name = os.path.join(prediction_map_path, prediction_name)
imageio.imsave(full_error_map_name, segmentation_map)
#
prediction_result_path = save_path + '/Quantitative_Results'
#
try:
#
os.mkdir(prediction_result_path)
#
except OSError as exc:
#
if exc.errno != errno.EEXIST:
#
raise
#
pass
#
result_dictionary = {'Test dice': str(test_iou / len(testdata))}
#
ff_path = prediction_result_path + '/test_result_data.txt'
ff = open(ff_path, 'w')
ff.write(str(result_dictionary))
ff.close()
print('Test iou: {:.4f}, '.format(test_iou / len(testdata)))
class CustomDataset_punet(torch.utils.data.Dataset):
def __init__(self, dataset_location, dataset_tag, noisylabel, augmentation=False):
#
self.label_mode = noisylabel
self.dataset_tag = dataset_tag
#
if noisylabel == 'multi':
#
if dataset_tag == 'mnist':
self.label_over_folder = dataset_location + '/Over'
self.label_under_folder = dataset_location + '/Under'
self.label_wrong_folder = dataset_location + '/Wrong'
self.label_good_folder = dataset_location + '/GT'
self.image_folder = dataset_location + '/Gaussian'
elif dataset_tag == 'brats':
self.label_over_folder = dataset_location + '/Over'
self.label_under_folder = dataset_location + '/Under'
self.label_wrong_folder = dataset_location + '/Wrong'
self.label_good_folder = dataset_location + '/Good'
self.image_folder = dataset_location + '/Image'
elif dataset_tag == 'lidc':
self.label_over_folder = dataset_location + '/Annotator_1'
self.label_under_folder = dataset_location + '/Annotator_2'
self.label_wrong_folder = dataset_location + '/Annotator_3'
self.label_good_folder = dataset_location + '/Annotator_4'
self.label_true_folder = dataset_location + '/Annotator_5'
self.image_folder = dataset_location + '/Image'
#
elif noisylabel == 'binary':
if dataset_tag == 'mnist':
self.label_folder = dataset_location + '/Mean'
self.image_folder = dataset_location + '/Gaussian'
self.true_label_folder = dataset_location + '/GT'
elif noisylabel == 'normal':
if dataset_tag == 'mnist':
self.label_folder = dataset_location + '/GT'
self.image_folder = dataset_location + '/Gaussian'
elif noisylabel == 'p_unet':
if dataset_tag == 'mnist':
self.label_folder = dataset_location + '/All'
self.image_folder = dataset_location + '/Gaussian'
self.data_aug = augmentation
def __getitem__(self, index):
if self.label_mode == 'multi':
#
if self.dataset_tag == 'mnist' or self.dataset_tag == 'brats':
#
all_labels_over = glob.glob(os.path.join(self.label_over_folder, '*.tif'))
all_labels_over.sort()
#
all_labels_under = glob.glob(os.path.join(self.label_under_folder, '*.tif'))
all_labels_under.sort()
#
all_labels_wrong = glob.glob(os.path.join(self.label_wrong_folder, '*.tif'))
all_labels_wrong.sort()
#
all_labels_good = glob.glob(os.path.join(self.label_good_folder, '*.tif'))
all_labels_good.sort()
#
all_images = glob.glob(os.path.join(self.image_folder, '*.tif'))
all_images.sort()
#
label_over = tiff.imread(all_labels_over[index])
label_over = np.array(label_over, dtype='float32')
#
label_under = tiff.imread(all_labels_under[index])
label_under = np.array(label_under, dtype='float32')
#
label_wrong = tiff.imread(all_labels_wrong[index])
label_wrong = np.array(label_wrong, dtype='float32')
#
label_good = tiff.imread(all_labels_good[index])
label_good = np.array(label_good, dtype='float32')
#
image = tiff.imread(all_images[index])
image = np.array(image, dtype='float32')
#
# dim_length = len(np.shape(label_over))
label_over[label_over == 4.0] = 3.0
label_wrong[label_wrong == 4.0] = 3.0
label_good[label_good == 4.0] = 3.0
label_under[label_under == 4.0] = 3.0
if self.dataset_tag == 'mnist':
label_over = np.where(label_over > 0.5, 1.0, 0.0)
label_under = np.where(label_under > 0.5, 1.0, 0.0)
label_wrong = np.where(label_wrong > 0.5, 1.0, 0.0)
if np.amax(label_good) != 1.0:
# sometimes, some preprocessing might give it as 0 - 255 range
label_good = np.where(label_good > 10.0, 1.0, 0.0)
else:
assert np.amax(label_good) == 1.0
label_good = np.where(label_good > 0.5, 1.0, 0.0)
# print(np.unique(label_over))
# label_over: h x w
# image: h x w x c
c_amount = len(np.shape(label_over))
# Reshaping everyting to make sure the order: channel x height x width
if c_amount == 3:
#
d1, d2, d3 = np.shape(label_over)
#
if d1 != min(d1, d2, d3):
#
assert d3 == min(d1, d2, d3)
#
label_over = np.transpose(label_over, (2, 0, 1))
label_under = np.transpose(label_under, (2, 0, 1))
label_wrong = np.transpose(label_wrong, (2, 0, 1))
label_good = np.transpose(label_good, (2, 0, 1))
#
elif c_amount == 2:
#
label_over = np.expand_dims(label_over, axis=0)
label_under = np.expand_dims(label_under, axis=0)
label_wrong = np.expand_dims(label_wrong, axis=0)
label_good = np.expand_dims(label_good, axis=0)
#
c_amount = len(np.shape(image))
#
if c_amount == 3:
#
d1, d2, d3 = np.shape(image)
#
if d1 != min(d1, d2, d3):
#
image = np.transpose(image, (2, 0, 1))
#
elif c_amount == 2:
#
image = np.expand_dims(image, axis=0)
#
imagename = all_images[index]
path_image, imagename = os.path.split(imagename)
imagename, imageext = os.path.splitext(imagename)
#
if self.data_aug is True:
#
augmentation = random.uniform(0, 1)
#
if augmentation > 0.5:
#
c, h, w = np.shape(image)
#
for channel in range(c):
#
image[channel, :, :] = np.flip(image[channel, :, :], axis=0).copy()
image[channel, :, :] = np.flip(image[channel, :, :], axis=1).copy()
#
label_over = np.flip(label_over, axis=1).copy()
label_over = np.flip(label_over, axis=2).copy()
label_under = np.flip(label_under, axis=1).copy()
label_under = np.flip(label_under, axis=2).copy()
label_wrong = np.flip(label_wrong, axis=1).copy()
label_wrong = np.flip(label_wrong, axis=2).copy()
label_good = np.flip(label_good, axis=1).copy()
label_good = np.flip(label_good, axis=2).copy()
#
return image, label_over, label_under, label_wrong, label_good, imagename
elif self.dataset_tag == 'lidc':
#
all_labels_over = glob.glob(os.path.join(self.label_over_folder, '*.tif'))
all_labels_over.sort()
#
all_labels_under = glob.glob(os.path.join(self.label_under_folder, '*.tif'))
all_labels_under.sort()
#
all_labels_wrong = glob.glob(os.path.join(self.label_wrong_folder, '*.tif'))
all_labels_wrong.sort()
#
all_labels_good = glob.glob(os.path.join(self.label_good_folder, '*.tif'))
all_labels_good.sort()
#
all_labels_true = glob.glob(os.path.join(self.label_true_folder, '*.tif'))
all_labels_true.sort()
#
all_images = glob.glob(os.path.join(self.image_folder, '*.tif'))
all_images.sort()
#
label_over = tiff.imread(all_labels_over[index])
label_over = np.array(label_over, dtype='float32')
#
label_under = tiff.imread(all_labels_under[index])
label_under = np.array(label_under, dtype='float32')
#
label_wrong = tiff.imread(all_labels_wrong[index])
label_wrong = np.array(label_wrong, dtype='float32')
#
label_good = tiff.imread(all_labels_good[index])
label_good = np.array(label_good, dtype='float32')
#
label_true = tiff.imread(all_labels_true[index])
label_true = np.array(label_true, dtype='float32')
#
image = tiff.imread(all_images[index])
image = np.array(image, dtype='float32')
#
# dim_length = len(np.shape(label_over))
# label_over[label_over == 4.0] = 3.0
# label_wrong[label_wrong == 4.0] = 3.0
# label_good[label_good == 4.0] = 3.0
# label_under[label_under == 4.0] = 3.0
# label_true[label_true == 4.0] = 3.0
# print(np.unique(label_over))
# label_over: h x w
# image: h x w x c
c_amount = len(np.shape(label_over))
# Reshaping everyting to make sure the order: channel x height x width
if c_amount == 3:
#
d1, d2, d3 = np.shape(label_over)
#
if d1 != min(d1, d2, d3):
#
assert d3 == min(d1, d2, d3)
#
label_over = np.transpose(label_over, (2, 0, 1))
label_under = np.transpose(label_under, (2, 0, 1))
label_wrong = np.transpose(label_wrong, (2, 0, 1))
label_good = np.transpose(label_good, (2, 0, 1))
label_true = np.transpose(label_true, (2, 0, 1))
#
elif c_amount == 2:
#
label_over = np.expand_dims(label_over, axis=0)
label_under = np.expand_dims(label_under, axis=0)
label_wrong = np.expand_dims(label_wrong, axis=0)
label_good = np.expand_dims(label_good, axis=0)
label_true = np.expand_dims(label_true, axis=0)
#
c_amount = len(np.shape(image))
#
if c_amount == 3:
#
d1, d2, d3 = np.shape(image)
#
if d1 != min(d1, d2, d3):
#
image = np.transpose(image, (2, 0, 1))
#
elif c_amount == 2:
#
image = np.expand_dims(image, axis=0)
#
imagename = all_images[index]
path_image, imagename = os.path.split(imagename)
imagename, imageext = os.path.splitext(imagename)
#
if self.data_aug is True:
#
augmentation = random.uniform(0, 1)
#
if augmentation > 0.5:
#
c, h, w = np.shape(image)
#
for channel in range(c):
#
image[channel, :, :] = np.flip(image[channel, :, :], axis=0).copy()
image[channel, :, :] = np.flip(image[channel, :, :], axis=1).copy()
#
label_over = np.flip(label_over, axis=1).copy()
label_over = np.flip(label_over, axis=2).copy()
label_under = np.flip(label_under, axis=1).copy()
label_under = np.flip(label_under, axis=2).copy()
label_wrong = np.flip(label_wrong, axis=1).copy()
label_wrong = np.flip(label_wrong, axis=2).copy()
label_good = np.flip(label_good, axis=1).copy()
label_good = np.flip(label_good, axis=2).copy()
label_true = np.flip(label_true, axis=1).copy()
label_true = np.flip(label_true, axis=2).copy()
#
return image, label_over, label_under, label_wrong, label_good, label_true, imagename
#
elif self.label_mode == 'binary':
all_true_labels = glob.glob(os.path.join(self.true_label_folder, '*.tif'))
all_true_labels.sort()
all_labels = glob.glob(os.path.join(self.label_folder, '*.tif'))
all_labels.sort()
all_images = glob.glob(os.path.join(self.image_folder, '*.tif'))
all_images.sort()
#
image = tiff.imread(all_images[index])
image = np.array(image, dtype='float32')
#
label = tiff.imread(all_labels[index])
label = np.array(label, dtype='float32')
#
true_label = tiff.imread(all_true_labels[index])
true_label = np.array(true_label, dtype='float32')
#
d1, d2, d3 = np.shape(label)
image = np.reshape(image, (d3, d1, d2))
label = np.reshape(label, (d3, d1, d2))
true_label = np.reshape(true_label, (d3, d1, d2))
#
imagename = all_images[index]
path_image, imagename = os.path.split(imagename)
imagename, imageext = os.path.splitext(imagename)
#
if self.data_aug is True:
#
augmentation = random.uniform(0, 1)
#
if augmentation < 0.25:
#
c, h, w = np.shape(image)
#
for channel in range(c):
#
image[channel, :, :] = np.flip(image[channel, :, :], axis=0).copy()
image[channel, :, :] = np.flip(image[channel, :, :], axis=1).copy()
#
label = np.flip(label, axis=1).copy()
label = np.flip(label, axis=2).copy()
#
true_label = np.flip(true_label, axis=1).copy()
true_label = np.flip(true_label, axis=2).copy()
#
elif augmentation < 0.5:
#
mean = 0.0
sigma = 0.15
noise = np.random.normal(mean, sigma, image.shape)
mask_overflow_upper = image + noise >= 1.0
mask_overflow_lower = image + noise < 0.0
noise[mask_overflow_upper] = 1.0
noise[mask_overflow_lower] = 0.0
image += noise
elif augmentation < 0.75:
#
c, h, w = np.shape(image)
#
for channel in range(c):
#
channel_ratio = random.uniform(0, 1)
#
image[channel, :, :] = image[channel, :, :] * channel_ratio
return image, label, true_label, imagename
elif self.label_mode == 'p_unet':
all_labels = glob.glob(os.path.join(self.label_folder, '*.tif'))
all_labels.sort()
all_images = glob.glob(os.path.join(self.image_folder, '*.tif'))
all_images.sort()
#
image = tiff.imread(all_images[index])
image = np.array(image, dtype='float32')
#
label = tiff.imread(all_labels[index])
label = np.array(label, dtype='float32')
#
d1, d2, d3 = np.shape(image)
image = np.reshape(image, (d3, d1, d2))
label = np.reshape(label, (1, d1, d2))
#
imagename = all_images[index]
path_image, imagename = os.path.split(imagename)
imagename, imageext = os.path.splitext(imagename)
#
if self.data_aug is True:
#
augmentation = random.uniform(0, 1)
#
if augmentation > 0.5:
#
c, h, w = np.shape(image)
#
for channel in range(c):
#
image[channel, :, :] = np.flip(image[channel, :, :], axis=0).copy()
image[channel, :, :] = np.flip(image[channel, :, :], axis=1).copy()
#
label = np.flip(label, axis=1).copy()
label = np.flip(label, axis=2).copy()
#
# elif augmentation < 0.5:
# #
# mean = 0.0
# sigma = 0.15
# noise = np.random.normal(mean, sigma, image.shape)
# mask_overflow_upper = image + noise >= 1.0
# mask_overflow_lower = image + noise < 0.0
# noise[mask_overflow_upper] = 1.0
# noise[mask_overflow_lower] = 0.0
# image += noise
#
# elif augmentation < 0.75:
# #
# c, h, w = np.shape(image)
# #
# for channel in range(c):
# #
# channel_ratio = random.uniform(0, 1)
# #
# image[channel, :, :] = image[channel, :, :] * channel_ratio
return image, label, imagename
def __len__(self):
# You should change 0 to the total size of your dataset.
return len(glob.glob(os.path.join(self.image_folder, '*.tif')))
def truncated_normal_(tensor, mean=0, std=1):
size = tensor.shape
tmp = tensor.new_empty(size + (4,)).normal_()
valid = (tmp < 2) & (tmp > -2)
ind = valid.max(-1, keepdim=True)[1]
tensor.data.copy_(tmp.gather(-1, ind).squeeze(-1))
tensor.data.mul_(std).add_(mean)
def init_weights(m):
if type(m) == nn.Conv2d or type(m) == nn.ConvTranspose2d:
nn.init.kaiming_normal_(m.weight, mode='fan_in', nonlinearity='relu')
# nn.init.normal_(m.weight, std=0.001)
# nn.init.normal_(m.bias, std=0.001)
# truncated_normal_(m.bias, mean=0, std=0.001)
def init_weights_orthogonal_normal(m):
if type(m) == nn.Conv2d or type(m) == nn.ConvTranspose2d:
nn.init.orthogonal_(m.weight)
# truncated_normal_(m.bias, mean=0, std=0.001)
#nn.init.normal_(m.bias, std=0.001)
def l2_regularisation(m):
l2_reg = None
for W in m.parameters():
if l2_reg is None:
l2_reg = W.norm(2)
else:
l2_reg = l2_reg + W.norm(2)
return l2_reg
def save_mask_prediction_example(mask, pred, iter):
plt.imshow(pred[0,:,:],cmap='Greys')
plt.savefig('images/'+str(iter)+"_prediction.png")
plt.imshow(mask[0,:,:],cmap='Greys')
plt.savefig('images/'+str(iter)+"_mask.png")
def test_punet(net, testdata, save_path, sampling_times):
#
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
net.eval()
test_iou = 0
test_generalized_energy_distance = 0
epoch_noisy_labels = []
epoch_noisy_segs = []
# sampling_times = 10
# save_path = '../../projects_data/Exp_Results'
#
try:
os.mkdir(save_path)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
pass
#
save_path = save_path + '/Visual_segmentation'
#
try:
os.mkdir(save_path)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
pass
#
for no_eval, (patch_eval, mask_eval_over, mask_eval_under, mask_eval_wrong, mask_eval_good, mask_name_eval) in enumerate(testdata):
#
if no_eval < 30:
#
patch_eval = patch_eval.to(device)
mask_eval_over = mask_eval_over.to(device)
mask_eval_under = mask_eval_under.to(device)
mask_eval_wrong = mask_eval_wrong.to(device)
mask_eval_good = mask_eval_good.to(device)
#
for j in range(sampling_times):
#
net.eval()
# segm input doesn't matter
net.forward(patch_eval, mask_eval_good, training=False)
seg_sample = net.sample(testing=True)
seg_sample = (torch.sigmoid(seg_sample) > 0.5).float()
(b, c, h, w) = seg_sample.shape
#
if j == 0:
seg_evaluate = seg_sample
else:
seg_evaluate += seg_sample
#
epoch_noisy_segs.append(seg_sample.cpu().detach().numpy())
#
if no_eval < 10:
#
save_name = save_path + '/test_' + str(no_eval) + '_sample_' + str(j) + '_seg.png'
#
plt.imsave(save_name, seg_sample.reshape(h, w).cpu().detach().numpy(), cmap='gray')
#
seg_evaluate = seg_evaluate / sampling_times
#
if no_eval < 10:
#
gt_save_name = save_path + '/gt_' + str(no_eval) + '.png'
#
plt.imsave(gt_save_name, mask_eval_good.reshape(h, w).cpu().detach().numpy(), cmap='gray')
#
val_iou = segmentation_scores(mask_eval_good.cpu().detach().numpy(), seg_evaluate.cpu().detach().numpy(), 2)
test_iou += val_iou
epoch_noisy_labels = [mask_eval_good.cpu().detach().numpy(), mask_eval_over.cpu().detach().numpy(), mask_eval_under.cpu().detach().numpy(), mask_eval_wrong.cpu().detach().numpy()]
ged = generalized_energy_distance(epoch_noisy_labels, epoch_noisy_segs, 2)
test_generalized_energy_distance += ged
#
test_iou = test_iou / no_eval
test_generalized_energy_distance = test_generalized_energy_distance / no_eval
#
result_dictionary = {'Test IoU': str(test_iou), 'Test GED': str(test_generalized_energy_distance)}
ff_path = save_path + '/test_result_data.txt'
ff = open(ff_path, 'w')
ff.write(str(result_dictionary))
ff.close()
#
print('Test iou: ' + str(test_iou))
print('Test generalised energy distance: ' + str(test_generalized_energy_distance))
def evaluate_punet(net, val_data, class_no, sampling_no):
#
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
validate_iou = 0
generalized_energy_distance_epoch = 0
#
for no_eval, (patch_eval, mask_eval_over, mask_eval_under, mask_eval_wrong, mask_eval_true, mask_name_eval) in enumerate(val_data):
#
patch_eval = patch_eval.to(device)
mask_eval_over = mask_eval_over.to(device)
mask_eval_under = mask_eval_under.to(device)
mask_eval_wrong = mask_eval_wrong.to(device)
mask_eval_true = mask_eval_true.to(device)
epoch_noisy_segs = []
#
for j in range(sampling_no):
net.eval()
# segm input doesn't matter
net.forward(patch_eval, mask_eval_wrong, training=False)
seg_sample = net.sample(testing=True)
seg_sample = (torch.sigmoid(seg_sample) > 0.5).float()
#
if j == 0:
#
seg_evaluate = seg_sample
#
else:
#
seg_evaluate += seg_sample
#
epoch_noisy_segs.append(seg_sample.cpu().detach().numpy())
#
seg_evaluate = seg_evaluate / sampling_no
#
val_iou = segmentation_scores(mask_eval_true.cpu().detach().numpy(), seg_evaluate.cpu().detach().numpy(), class_no)
epoch_noisy_labels = [mask_eval_true.cpu().detach().numpy(), mask_eval_over.cpu().detach().numpy(), mask_eval_under.cpu().detach().numpy(), mask_eval_wrong.cpu().detach().numpy()]
# epoch_noisy_segs = [seg_good.cpu().detach().numpy(), seg_over.cpu().detach().numpy(), seg_under.cpu().detach().numpy(), seg_wrong.cpu().detach().numpy()]
ged = generalized_energy_distance(epoch_noisy_labels, epoch_noisy_segs, class_no)
validate_iou += val_iou
generalized_energy_distance_epoch += ged
#
return validate_iou / (no_eval), generalized_energy_distance_epoch / (no_eval)
def segmentation_scores(label_trues, label_preds, n_class):
'''
:param label_trues:
:param label_preds:
:param n_class:
:return:
'''
assert len(label_trues) == len(label_preds)
if n_class == 2:
#
output_zeros = np.zeros_like(label_preds)
output_ones = np.ones_like(label_preds)
label_preds = np.where((label_preds > 0.5), output_ones, output_zeros)
label_trues += 1
label_preds += 1
label_preds = np.asarray(label_preds, dtype='int8').copy()
label_trues = | np.asarray(label_trues, dtype='int8') | numpy.asarray |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the PyMVPA package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Unit tests for complementary unittest-ing tools"""
import numpy as np
from os.path import exists
from mvpa2.base.externals import versions
from mvpa2.testing.tools import *
from mvpa2.testing.sweep import *
import mvpa2.tests as mvtests
def test_assert_objectarray_equal():
if versions['numpy'] < '1.4':
raise SkipTest("Skipping because of known segfaults with numpy < 1.4")
# explicit dtype so we could test with numpy < 1.6
a = np.array([np.array([0, 1]), np.array(1)], dtype=object)
b = np.array([np.array([0, 1]), np.array(1)], dtype=object)
# they should be ok for both types of comparison
for strict in True, False:
# good with self
assert_objectarray_equal(a, a, strict=strict)
# good with a copy
assert_objectarray_equal(a, a.copy(), strict=strict)
# good while operating with an identical one
# see http://projects.scipy.org/numpy/ticket/2117
assert_objectarray_equal(a, b, strict=strict)
# now check if we still fail for a good reason
for value_equal, b in (
(False, np.array(1)),
(False, np.array([1])),
(False, np.array([np.array([0, 1]), np.array((1, 2))], dtype=object)),
(False, np.array([ | np.array([0, 1]) | numpy.array |
from __future__ import (absolute_import, division,print_function, unicode_literals)
from builtins import *
import numpy as np
import cv2
import SimpleITK as sitk
from builtins import *
from scipy.spatial import distance
from scipy import stats
import sys
import time
############### FUNCTIONS ##########################
def imcomplement(im):
if np.max(im)>1:
imout=255-im
else:
imout=1-im
return imout
def mat2gray(img):
max_img=np.max(img)
min_img=np.min(img)
imgout=(img-min_img)/(max_img-min_img)
return imgout
def im2double(img):
imgout=img.astype('float32')
imgout= mat2gray(imgout)
return imgout
def imreconstruct(marker,mask):
markeritk=sitk.GetImageFromArray(marker)
maskitk=sitk.GetImageFromArray(mask)
recfilt=sitk.ReconstructionByDilationImageFilter()
rectoutitk=recfilt.Execute(markeritk,maskitk)
rectout=sitk.GetArrayFromImage(rectoutitk)
return rectout
def eigen_cov(x,y):
mx=np.mean(x)
my=np.mean(y)
x=x-mx
y=y-my
cxx=np.var(x)
cxy=0
cyy=np.var(y);
nx=len(x)
for ct in range(nx):
cxy=cxy+x[ct]*y[ct];
cxy=cxy/nx;
C=np.zeros((2,2))
C[0,0]=cxx
C[0,1]=cxy
C[1,0]=cxy
C[1,1]=cyy
D,V=np.linalg.eig(C)
return V,D
def is_inside(img,x,y):
x=int(x)
y=int(y)
if(x>0 and y>0 and x<img.shape[1] and y<img.shape[0]):
return True
else:
return False
def improfile(img,x,y,n):
xm=x[0]
x0=x[1]
ym=y[0]
y0=y[1]
a = np.arctan((y0 - ym) / (x0 - xm))
i=range(0,100,int(100/n))
cx=np.squeeze(np.zeros((1,len(i))))
cy=np.squeeze(np.zeros((1,len(i))))
c=np.squeeze(np.zeros((1,len(i))))
ct=0
for t in range(0,100,int(100/30)):
tf=t/100.0
cx[ct] = int(xm + (x0 - xm)*tf)
cy[ct] = int(ym + (y0 - ym)*tf)
if(is_inside(img,cx[ct],cy[ct])):
c[ct]=img[int(cy[ct]), int(cx[ct])]
else:
c[ct]=1;
ct=ct+1
return c,cx,cy
def filter_result3(img,bw_result,ths,thm):
bw_result_orig= | np.copy(bw_result) | numpy.copy |
"""
Test the equivalence of truncated Taylor series of equivalent expressions.
These tests could be optional and should provide no additional coverage.
This module is originally intended to test only scalar functions.
"""
import math
from numpy.testing import assert_allclose, assert_array_almost_equal
from numpy.testing import run_module_suite, TestCase
from numpy.testing.decorators import skipif
import numpy
import scipy.special
import algopy.nthderiv
from algopy.utpm import *
try:
import mpmath
except ImportError:
mpmath = None
def sample_unit_radius(utpm_shape=(5, 3, 4, 5), eps=1e-1):
"""
Sample an ndarray between -1 and 1.
@param utpm_shape: an array shape
@param eps: push the random numbers this far away from 0 and 1
@return: a random UTPM object
"""
if len(utpm_shape) < 2:
raise ValueError
tmp = numpy.random.rand(*utpm_shape)
return UTPM((tmp - 0.5)*(1-2*eps)*2)
def sample_unit(utpm_shape=(5, 3, 4, 5), eps=1e-1):
"""
Sample an ndarray in the unit interval.
@param utpm_shape: an array shape
@param eps: push the random numbers this far away from 0 and 1
@return: a random UTPM object
"""
if len(utpm_shape) < 2:
raise ValueError
tmp = numpy.random.rand(*utpm_shape)
return UTPM(tmp * (1 - 2*eps) + eps)
def sample_randn(utpm_shape=(5, 3, 4, 5)):
"""
Sample an ndarray of random normal variables.
@param utpm_shape: an array shape
@return: a random UTPM object
"""
if len(utpm_shape) < 2:
raise ValueError
return UTPM(numpy.random.randn(*utpm_shape))
def sample_nonzero(utpm_shape=(5, 3, 4, 5), eps=1e-1):
"""
Sample an ndarray of random normal variables then push them away from zero.
@param utpm_shape: an array shape
@param eps: push the random numbers this far away from zero
@return: a random UTPM object
"""
if len(utpm_shape) < 2:
raise ValueError
tmp = numpy.random.randn(*utpm_shape)
return UTPM(tmp + eps*numpy.sign(tmp))
def sample_positive(utpm_shape=(5, 3, 4, 5), eps=1e-1):
"""
Sample an ndarray of random normal variables then make them positive.
@param utpm_shape: an array shape
@param eps: push the random numbers this far away from zero
@return: a random UTPM object
"""
if len(utpm_shape) < 2:
raise ValueError
tmp = numpy.random.randn(*utpm_shape)
return UTPM(numpy.abs(tmp) + eps)
class Test_PlainIdentities(TestCase):
"""
Test scalar math identities involving only elementary functions in numpy.
"""
def test_exp_log_v1(self):
x = sample_randn()
y = UTPM.exp(x)
x2 = UTPM.log(y)
y2 = UTPM.exp(x2)
assert_allclose(x.data, x2.data)
assert_allclose(y.data, y2.data)
def test_exp_log_v2(self):
x = sample_randn()
x.data[1] = 1.
y = UTPM.exp(x)
x2 = UTPM.log(y)
y2 = UTPM.exp(x2)
assert_allclose(x.data, x2.data)
assert_allclose(y.data, y2.data)
def test_expm1_log1p(self):
x = sample_randn()
y = UTPM.expm1(x)
x2 = UTPM.log1p(y)
y2 = UTPM.expm1(x2)
assert_allclose(x.data, x2.data)
assert_allclose(y.data, y2.data)
def test_expm1_exp(self):
x = sample_randn()
x.data[0] = 1.
y1 = UTPM.expm1(x)
y2 = UTPM.exp(x) - 1.
assert_allclose(y1.data, y2.data)
def test_log1p_log(self):
x = sample_positive() - 0.5
y1 = UTPM.log1p(x)
y2 = UTPM.log(1. + x)
assert_allclose(y1.data, y2.data)
def test_pow_mul(self):
x = sample_randn()
y1 = x**3
y2 = x*x*x
assert_allclose(y1.data, y2.data)
def test_reciprocal_div(self):
x = sample_nonzero()
y1 = UTPM.reciprocal(x)
y2 = 1 / x
assert_allclose(y1.data, y2.data)
def test_sqrt_square(self):
x = sample_positive()
y = UTPM.sqrt(x)
x2 = UTPM.square(y)
y2 = UTPM.sqrt(x2)
assert_allclose(x.data, x2.data)
assert_allclose(y.data, y2.data)
def test_sqrt_mul(self):
x = sample_positive()
y = UTPM.sqrt(x)
x2 = y * y
y2 = UTPM.sqrt(x2)
assert_allclose(x.data, x2.data)
assert_allclose(y.data, y2.data)
def test_square_mul_v1(self):
x = sample_randn(utpm_shape=(5, 3, 4, 5))
y1 = UTPM.square(x)
y2 = x*x
assert_allclose(y1.data, y2.data)
def test_square_mul_v2(self):
x = sample_randn(utpm_shape=(4, 3, 4, 5))
y1 = UTPM.square(x)
y2 = x*x
assert_allclose(y1.data, y2.data)
def test_sign_tanh(self):
x = sample_nonzero()
k = 200.
y = UTPM.tanh(k*x)
z = UTPM.sign(x)
assert_allclose(y.data, z.data)
def test_abs_tanh(self):
x = sample_nonzero()
k = 200.
y = x*UTPM.tanh(k*x)
z = abs(x)
assert_allclose(y.data, z.data)
def test_abs_sign(self):
x = sample_randn()
y = x * UTPM.sign(x)
z = abs(x)
assert_allclose(y.data, z.data)
def test_cos_squared_plus_sin_squared(self):
x = sample_randn()
y = UTPM.cos(x)**2 + UTPM.sin(x)**2 - 1
assert_array_almost_equal(y.data, numpy.zeros_like(y.data))
def test_cosh_squared_minus_sinh_squared(self):
x = sample_randn()
y = UTPM.cosh(x)**2 - UTPM.sinh(x)**2 - 1
assert_array_almost_equal(y.data, numpy.zeros_like(y.data))
def test_tan_sin_cos(self):
x = sample_randn()
y1 = UTPM.tan(x)
y2 = UTPM.sin(x) / UTPM.cos(x)
assert_allclose(y1.data, y2.data)
def test_tanh_sinh_cosh(self):
x = sample_randn()
y1 = UTPM.tanh(x)
y2 = UTPM.sinh(x) / UTPM.cosh(x)
assert_allclose(y1.data, y2.data)
def test_arcsin(self):
x = sample_unit_radius()
y = UTPM.arcsin(x)
x2 = UTPM.sin(y)
y2 = UTPM.arcsin(x2)
assert_allclose(x.data, x2.data)
assert_allclose(y.data, y2.data)
def test_arccos(self):
x = sample_unit_radius()
y = UTPM.arccos(x)
x2 = UTPM.cos(y)
y2 = UTPM.arccos(x2)
assert_allclose(x.data, x2.data)
assert_allclose(y.data, y2.data)
def test_arctan(self):
x = sample_unit_radius() * math.pi / 2.
y = UTPM.tan(x)
x2 = UTPM.arctan(y)
y2 = UTPM.tan(x2)
assert_allclose(x.data, x2.data)
assert_allclose(y.data, y2.data)
def test_negative_sin_cos(self):
x = sample_randn()
y1 = UTPM.negative(UTPM.sin(x))
y2 = UTPM.cos(x + math.pi / 2.)
assert_allclose(y1.data, y2.data)
def test_absolute_abs_cos(self):
x = sample_randn()
y1 = abs(x)
y2 = UTPM.absolute(x)
assert_allclose(y1.data, y2.data)
def test_minimum_cos(self):
x = sample_randn()
c1 = UTPM.cos(x)
c2 = UTPM.cos(x - math.pi)
y1 = UTPM.minimum(c1, c2)
y2 = UTPM.negative(UTPM.absolute(c1))
y3 = -abs(c1)
assert_allclose(y1.data, y2.data)
assert_allclose(y1.data, y3.data)
def test_maximum_cos(self):
x = sample_randn()
c1 = UTPM.cos(x)
c2 = UTPM.cos(x - math.pi)
y1 = UTPM.maximum(c1, c2)
y2 = UTPM.absolute(c1)
y3 = abs(c1)
assert_allclose(y1.data, y2.data)
assert_allclose(y1.data, y3.data)
class Test_SpecialIdentities(TestCase):
"""
Test scalar math identities involving special functions in scipy.
"""
def test_hyp1f1_exp_v1(self):
x = sample_randn()
y1 = UTPM.hyp1f1(1., 1., x)
y2 = UTPM.exp(x)
assert_allclose(y1.data, y2.data)
def test_hyp1f1_exp_v2(self):
x = sample_randn()
y1 = UTPM.hyp1f1(0.5, -0.5, x)
y2 = UTPM.exp(x) * (1. - 2*x)
assert_allclose(y1.data, y2.data)
def test_hyp1f1_expm1_exp(self):
x = sample_nonzero()
y1 = UTPM.hyp1f1(1., 2., x)
y2 = UTPM.expm1(x) / x
y3 = (UTPM.exp(x) - 1.) / x
assert_allclose(y1.data, y2.data)
assert_allclose(y1.data, y3.data)
@skipif(mpmath is None)
def test_dpm_hyp1f1_exp_v1(self):
x = sample_randn()
y1 = UTPM.dpm_hyp1f1(1., 1., x)
y2 = UTPM.exp(x)
assert_allclose(y1.data, y2.data)
@skipif(mpmath is None)
def test_dpm_hyp1f1_exp_v2(self):
x = sample_randn()
y1 = UTPM.dpm_hyp1f1(0.5, -0.5, x)
y2 = UTPM.exp(x) * (1. - 2*x)
assert_allclose(y1.data, y2.data)
@skipif(mpmath is None)
def test_dpm_hyp1f1_expm1_exp(self):
x = sample_nonzero()
y1 = UTPM.dpm_hyp1f1(1., 2., x)
y2 = UTPM.expm1(x) / x
y3 = (UTPM.exp(x) - 1.) / x
assert_allclose(y1.data, y2.data)
assert_allclose(y1.data, y3.data)
def test_psi_psi_v1(self):
x = sample_positive()
y1 = UTPM.psi(x + 1)
y2 = UTPM.psi(x) + 1 / x
assert_allclose(y1.data, y2.data)
def test_psi_psi_v2(self):
x = sample_positive()
y1 = UTPM.psi(2*x)
y2 = 0.5 * UTPM.psi(x) + 0.5 * UTPM.psi(x + 0.5) + numpy.log(2)
| assert_allclose(y1.data, y2.data) | numpy.testing.assert_allclose |
'''
episodestats.py
implements statistic that are used in producing employment statistics for the
lifecycle model
'''
import h5py
import numpy as np
import numpy_financial as npf
import matplotlib.pyplot as plt
import matplotlib as mpl
import seaborn as sns
from scipy.stats import norm
#import locale
from tabulate import tabulate
import pandas as pd
import scipy.optimize
from tqdm import tqdm_notebook as tqdm
from . empstats import Empstats
from scipy.stats import gaussian_kde
#locale.setlocale(locale.LC_ALL, 'fi_FI')
def modify_offsettext(ax,text):
'''
For y axis
'''
x_pos = 0.0
y_pos = 1.0
horizontalalignment='left'
verticalalignment='bottom'
offset = ax.yaxis.get_offset_text()
#value=offset.get_text()
# value=float(value)
# if value>=1e12:
# text='biljoonaa'
# elif value>1e9:
# text=str(value/1e9)+' miljardia'
# elif value==1e9:
# text=' miljardia'
# elif value>1e6:
# text=str(value/1e6)+' miljoonaa'
# elif value==1e6:
# text='miljoonaa'
# elif value>1e3:
# text=str(value/1e3)+' tuhatta'
# elif value==1e3:
# text='tuhatta'
offset.set_visible(False)
ax.text(x_pos, y_pos, text, transform=ax.transAxes,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment)
class Labels():
def get_labels(self,language='English'):
labels={}
if language=='English':
labels['osuus tilassa x']='Proportion in state {} [%]'
labels['age']='Age [y]'
labels['ratio']='Proportion [%]'
labels['unemp duration']='Length of unemployment [y]'
labels['scaled freq']='Scaled frequency'
labels['probability']='probability'
labels['telp']='Employee pension premium'
labels['sairausvakuutus']='Health insurance'
labels['työttömyysvakuutusmaksu']='Unemployment insurance'
labels['puolison verot']='Partners taxes'
labels['taxes']='Taxes'
labels['asumistuki']='Housing benefit'
labels['toimeentulotuki']='Supplementary benefit'
labels['tyottomyysturva']='Unemployment benefit'
labels['paivahoito']='Daycare'
labels['elake']='Pension'
labels['tyollisyysaste']='Employment rate'
labels['tyottomien osuus']='Proportion of unemployed'
labels['havainto']='Observation'
labels['tyottomyysaste']='Unemployment rate [%]'
labels['tyottomien osuus']='Proportion of unemployed [%]'
labels['tyollisyysaste %']='Employment rate [%]'
labels['ero osuuksissa']='Difference in proportions [%]'
labels['osuus']='proportion'
labels['havainto, naiset']='data, women'
labels['havainto, miehet']='data, men'
labels['palkkasumma']='Palkkasumma [euroa]'
labels['Verokiila %']='Verokiila [%]'
labels['Työnteko [hlö/htv]']='Työnteko [hlö/htv]'
labels['Työnteko [htv]']='Työnteko [htv]'
labels['Työnteko [hlö]']='Työnteko [hlö]'
labels['Työnteko [miljoonaa hlö/htv]']='Työnteko [miljoonaa hlö/htv]'
labels['Työnteko [miljoonaa htv]']='Työnteko [miljoonaa htv]'
labels['Työnteko [miljoonaa hlö]']='Työnteko [miljoonaa hlö]'
labels['Osatyönteko [%-yks]']='Osa-aikatyössä [%-yks]'
labels['Muut tulot [euroa]']='Muut tulot [euroa]'
labels['Henkilöitä']='Henkilöitä'
labels['Verot [euroa]']='Verot [euroa]'
labels['Verot [[miljardia euroa]']='Verot [[miljardia euroa]'
labels['Verokertymä [euroa]']='Verokertymä [euroa]'
labels['Verokertymä [miljardia euroa]']='Verokertymä [miljardia euroa]'
labels['Muut tarvittavat tulot [euroa]']='Muut tarvittavat tulot [euroa]'
labels['Muut tarvittavat tulot [miljardia euroa]']='Muut tarvittavat tulot [miljardia euroa]'
labels['malli']='Life cycle model'
else:
labels['osuus tilassa x']='Osuus tilassa {} [%]'
labels['age']='Ikä [v]'
labels['ratio']='Osuus tilassa [%]'
labels['unemp duration']='työttömyysjakson pituus [v]'
labels['scaled freq']='skaalattu taajuus'
labels['probability']='todennäköisyys'
labels['telp']='TEL-P'
labels['sairausvakuutus']='Sairausvakuutus'
labels['työttömyysvakuutusmaksu']='Työttömyysvakuutusmaksu'
labels['puolison verot']='puolison verot'
labels['taxes']='Verot'
labels['asumistuki']='Asumistuki'
labels['toimeentulotuki']='Toimeentulotuki'
labels['tyottomyysturva']='Työttömyysturva'
labels['paivahoito']='Päivähoito'
labels['elake']='Eläke'
labels['tyollisyysaste']='työllisyysaste'
labels['tyottomien osuus']='työttömien osuus'
labels['havainto']='havainto'
labels['tyottomyysaste']='Työttömyysaste [%]'
labels['tyottomien osuus']='Työttömien osuus väestöstö [%]'
labels['tyollisyysaste %']='Työllisyysaste [%]'
labels['ero osuuksissa']='Ero osuuksissa [%]'
labels['osuus']='Osuus'
labels['havainto, naiset']='havainto, naiset'
labels['havainto, miehet']='havainto, miehet'
labels['palkkasumma']='Palkkasumma [euroa]'
labels['Verokiila %']='Verokiila [%]'
labels['Työnteko [hlö/htv]']='Työnteko [hlö/htv]'
labels['Työnteko [htv]']='Työnteko [htv]'
labels['Työnteko [hlö]']='Työnteko [hlö]'
labels['Työnteko [miljoonaa hlö/htv]']='Työnteko [miljoonaa hlö/htv]'
labels['Työnteko [miljoonaa htv]']='Työnteko [miljoonaa htv]'
labels['Työnteko [miljoonaa hlö]']='Työnteko [miljoonaa hlö]'
labels['Osatyönteko [%-yks]']='Osa-aikatyössä [%-yks]'
labels['Muut tulot [euroa]']='Muut tulot [euroa]'
labels['Henkilöitä']='Henkilöitä'
labels['Verot [euroa]']='Verot [euroa]'
labels['Verot [[miljardia euroa]']='Verot [[miljardia euroa]'
labels['Verokertymä [euroa]']='Verokertymä [euroa]'
labels['Verokertymä [miljardia euroa]']='Verokertymä [miljardia euroa]'
labels['Muut tarvittavat tulot [euroa]']='Muut tarvittavat tulot [euroa]'
labels['Muut tarvittavat tulot [miljardia euroa]']='Muut tarvittavat tulot [miljardia euroa]'
labels['malli']='elinkaarimalli'
return labels
class EpisodeStats():
def __init__(self,timestep,n_time,n_emps,n_pop,env,minimal,min_age,max_age,min_retirementage,year=2018,version=3,params=None,gamma=0.92,lang='English'):
self.version=version
self.gamma=gamma
self.params=params
self.lab=Labels()
self.reset(timestep,n_time,n_emps,n_pop,env,minimal,min_age,max_age,min_retirementage,year,params=params,lang=lang)
print('version',version)
def reset(self,timestep,n_time,n_emps,n_pop,env,minimal,min_age,max_age,min_retirementage,year,version=None,params=None,lang=None,dynprog=False):
self.min_age=min_age
self.max_age=max_age
self.min_retirementage=min_retirementage
self.minimal=minimal
if params is not None:
self.params=params
if lang is None:
self.language='English'
else:
self.language=lang
if version is not None:
self.version=version
self.setup_labels()
self.n_employment=n_emps
self.n_time=n_time
self.timestep=timestep # 0.25 = 3kk askel
self.inv_timestep=int(np.round(1/self.timestep)) # pitää olla kokonaisluku
self.n_pop=n_pop
self.year=year
self.env=env
self.reaalinen_palkkojenkasvu=0.016
self.palkkakerroin=(0.8*1+0.2*1.0/(1+self.reaalinen_palkkojenkasvu))**self.timestep
self.elakeindeksi=(0.2*1+0.8*1.0/(1+self.reaalinen_palkkojenkasvu))**self.timestep
self.dynprog=dynprog
if self.minimal:
self.version=0
if self.version in set([0,101]):
self.n_groups=1
else:
self.n_groups=6
self.empstats=Empstats(year=self.year,max_age=self.max_age,n_groups=self.n_groups,timestep=self.timestep,n_time=self.n_time,
min_age=self.min_age)
self.init_variables()
def init_variables(self):
n_emps=self.n_employment
self.empstate=np.zeros((self.n_time,n_emps))
self.gempstate=np.zeros((self.n_time,n_emps,self.n_groups))
self.deceiced=np.zeros((self.n_time,1))
self.alive=np.zeros((self.n_time,1))
self.galive=np.zeros((self.n_time,self.n_groups))
self.rewstate=np.zeros((self.n_time,n_emps))
self.poprewstate=np.zeros((self.n_time,self.n_pop))
self.salaries_emp=np.zeros((self.n_time,n_emps))
#self.salaries=np.zeros((self.n_time,self.n_pop))
self.actions=np.zeros((self.n_time,self.n_pop))
self.popempstate=np.zeros((self.n_time,self.n_pop))
self.popunemprightleft=np.zeros((self.n_time,self.n_pop))
self.popunemprightused=np.zeros((self.n_time,self.n_pop))
self.tyoll_distrib_bu=np.zeros((self.n_time,self.n_pop))
self.unemp_distrib_bu=np.zeros((self.n_time,self.n_pop))
self.siirtyneet=np.zeros((self.n_time,n_emps))
self.siirtyneet_det=np.zeros((self.n_time,n_emps,n_emps))
self.pysyneet=np.zeros((self.n_time,n_emps))
self.aveV=np.zeros((self.n_time,self.n_pop))
self.time_in_state=np.zeros((self.n_time,n_emps))
self.stat_tyoura=np.zeros((self.n_time,n_emps))
self.stat_toe=np.zeros((self.n_time,n_emps))
self.stat_pension=np.zeros((self.n_time,n_emps))
self.stat_paidpension=np.zeros((self.n_time,n_emps))
self.out_of_work=np.zeros((self.n_time,n_emps))
self.stat_unemp_len=np.zeros((self.n_time,self.n_pop))
self.stat_wage_reduction=np.zeros((self.n_time,n_emps))
self.stat_wage_reduction_g=np.zeros((self.n_time,n_emps,self.n_groups))
self.infostats_group=np.zeros((self.n_pop,1))
self.infostats_taxes=np.zeros((self.n_time,1))
self.infostats_wagetaxes=np.zeros((self.n_time,1))
self.infostats_taxes_distrib=np.zeros((self.n_time,n_emps))
self.infostats_etuustulo=np.zeros((self.n_time,1))
self.infostats_etuustulo_group=np.zeros((self.n_time,self.n_groups))
self.infostats_perustulo=np.zeros((self.n_time,1))
self.infostats_palkkatulo=np.zeros((self.n_time,1))
self.infostats_palkkatulo_eielakkeella=np.zeros((self.n_time,1))
self.infostats_palkkatulo_group=np.zeros((self.n_time,self.n_groups))
self.infostats_palkkatulo_eielakkeella_group=np.zeros((self.n_time,1))
self.infostats_ansiopvraha=np.zeros((self.n_time,1))
self.infostats_ansiopvraha_group=np.zeros((self.n_time,self.n_groups))
self.infostats_asumistuki=np.zeros((self.n_time,1))
self.infostats_asumistuki_group=np.zeros((self.n_time,self.n_groups))
self.infostats_valtionvero=np.zeros((self.n_time,1))
self.infostats_valtionvero_group=np.zeros((self.n_time,self.n_groups))
self.infostats_kunnallisvero=np.zeros((self.n_time,1))
self.infostats_kunnallisvero_group=np.zeros((self.n_time,self.n_groups))
self.infostats_valtionvero_distrib=np.zeros((self.n_time,n_emps))
self.infostats_kunnallisvero_distrib=np.zeros((self.n_time,n_emps))
self.infostats_ptel=np.zeros((self.n_time,1))
self.infostats_tyotvakmaksu=np.zeros((self.n_time,1))
self.infostats_tyoelake=np.zeros((self.n_time,1))
self.infostats_kokoelake=np.zeros((self.n_time,1))
self.infostats_opintotuki=np.zeros((self.n_time,1))
self.infostats_isyyspaivaraha=np.zeros((self.n_time,1))
self.infostats_aitiyspaivaraha=np.zeros((self.n_time,1))
self.infostats_kotihoidontuki=np.zeros((self.n_time,1))
self.infostats_sairauspaivaraha=np.zeros((self.n_time,1))
self.infostats_toimeentulotuki=np.zeros((self.n_time,1))
self.infostats_tulot_netto=np.zeros((self.n_time,1))
self.infostats_pinkslip=np.zeros((self.n_time,n_emps))
self.infostats_pop_pinkslip=np.zeros((self.n_time,self.n_pop))
self.infostats_chilren18_emp=np.zeros((self.n_time,n_emps))
self.infostats_chilren7_emp=np.zeros((self.n_time,n_emps))
self.infostats_chilren18=np.zeros((self.n_time,1))
self.infostats_chilren7=np.zeros((self.n_time,1))
self.infostats_tyelpremium=np.zeros((self.n_time,self.n_pop))
self.infostats_paid_tyel_pension=np.zeros((self.n_time,self.n_pop))
self.infostats_sairausvakuutus=np.zeros((self.n_time))
self.infostats_pvhoitomaksu=np.zeros((self.n_time,self.n_pop))
self.infostats_ylevero=np.zeros((self.n_time,1))
self.infostats_ylevero_distrib=np.zeros((self.n_time,n_emps))
self.infostats_irr=np.zeros((self.n_pop,1))
self.infostats_npv0=np.zeros((self.n_pop,1))
self.infostats_mother_in_workforce=np.zeros((self.n_time,1))
self.infostats_children_under3=np.zeros((self.n_time,self.n_pop))
self.infostats_children_under7=np.zeros((self.n_time,self.n_pop))
self.infostats_unempwagebasis=np.zeros((self.n_time,self.n_pop))
self.infostats_unempwagebasis_acc=np.zeros((self.n_time,self.n_pop))
self.infostats_toe=np.zeros((self.n_time,self.n_pop))
self.infostats_ove=np.zeros((self.n_time,n_emps))
self.infostats_kassanjasen=np.zeros((self.n_time))
self.infostats_poptulot_netto=np.zeros((self.n_time,self.n_pop))
self.infostats_pop_wage=np.zeros((self.n_time,self.n_pop))
self.infostats_pop_pension=np.zeros((self.n_time,self.n_pop))
self.infostats_equivalent_income=np.zeros(self.n_time)
self.infostats_alv=np.zeros(self.n_time)
self.infostats_puoliso=np.zeros(self.n_time)
self.pop_predrew=np.zeros((self.n_time,self.n_pop))
if self.version==101:
self.infostats_savings=np.zeros((self.n_time,self.n_pop))
self.sav_actions=np.zeros((self.n_time,self.n_pop))
def add(self,n,act,r,state,newstate,q=None,debug=False,plot=False,aveV=None,pred_r=None):
if self.version==0:
emp,_,_,a,_,_=self.env.state_decode(state) # current employment state
newemp,newpen,newsal,a2,tis,next_wage=self.env.state_decode(newstate)
g=0
bu=0
ove=0
jasen=0
puoliso=0
elif self.version==1:
# v1
emp,_,_,_,a,_,_,_,_,_,_,_,_,_=self.env.state_decode(state) # current employment state
newemp,g,newpen,newsal,a2,tis,paidpens,pink,toe,ura,oof,bu,wr,p=self.env.state_decode(newstate)
ove=0
jasen=0
puoliso=0
elif self.version==2:
# v2
emp,_,_,_,a,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_=self.env.state_decode(state) # current employment state
newemp,g,newpen,newsal,a2,tis,paidpens,pink,toe,ura,bu,wr,upr,uw,uwr,pr,c3,c7,c18,unemp_left,aa,toe58=self.env.state_decode(newstate)
ove=0
jasen=0
puoliso=0
elif self.version==3:
# v3
emp,_,_,_,a,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_=self.env.state_decode(state) # current employment state
newemp,g,newpen,newsal,a2,tis,paidpens,pink,toe,toek,ura,bu,wr,upr,uw,uwr,pr,c3,c7,c18,unemp_left,aa,toe58,ove,jasen=self.env.state_decode(newstate)
puoliso=0
elif self.version==4:
# v3
emp,_,_,_,a,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_=self.env.state_decode(state) # current employment state
newemp,g,newpen,newsal,a2,tis,paidpens,pink,toe,toek,ura,bu,wr,upr,uw,uwr,pr,\
c3,c7,c18,unemp_left,aa,toe58,ove,jasen,puoliso,puoliso_tyossa,puoliso_palkka=self.env.state_decode(newstate)
elif self.version==101:
emp,_,_,a,_,_,_=self.env.state_decode(state) # current employment state
newemp,newpen,newsal,a2,tis,next_wage,savings=self.env.state_decode(newstate)
g=0
bu=0
ove=0
jasen=0
t=int(np.round((a2-self.min_age)*self.inv_timestep))#-1
if a2>a and newemp>=0: # new state is not reset (age2>age)
if a2>self.min_retirementage and newemp==3 and self.version in set([1,2,3,4]):
newemp=2
if self.version in set([1,2,3,4]):
self.empstate[t,newemp]+=1
self.alive[t]+=1
self.rewstate[t,newemp]+=r
self.poprewstate[t,n]=r
self.actions[t,n]=act
self.popempstate[t,n]=newemp
#self.salaries[t,n]=newsal
self.salaries_emp[t,newemp]+=newsal
self.time_in_state[t,newemp]+=tis
if tis<=0.25 and newemp==5:
self.infostats_mother_in_workforce[t]+=1
self.infostats_pinkslip[t,newemp]+=pink
self.infostats_pop_pinkslip[t,n]=pink
self.gempstate[t,newemp,g]+=1
self.stat_wage_reduction[t,newemp]+=wr
self.stat_wage_reduction_g[t,newemp,g]+=wr
self.galive[t,g]+=1
self.stat_tyoura[t,newemp]+=ura
self.stat_toe[t,newemp]+=toe
self.stat_pension[t,newemp]+=newpen
self.stat_paidpension[t,newemp]+=paidpens
self.stat_unemp_len[t,n]=tis
self.popunemprightleft[t,n]=-self.env.unempright_left(newemp,tis,bu,a2,ura)
self.popunemprightused[t,n]=bu
self.infostats_group[n]=int(g)
self.infostats_unempwagebasis[t,n]=uw
self.infostats_unempwagebasis_acc[t,n]=uwr
self.infostats_toe[t,n]=toe
self.infostats_ove[t,newemp]+=ove
self.infostats_kassanjasen[t]+=jasen
self.infostats_pop_wage[t,n]=newsal
self.infostats_pop_pension[t,n]=newpen
self.infostats_puoliso[t]+=puoliso
if q is not None:
#print(newsal,q['palkkatulot'])
self.infostats_taxes[t]+=q['verot']*self.timestep*12
self.infostats_wagetaxes[t]+=q['verot_ilman_etuuksia']*self.timestep*12
self.infostats_taxes_distrib[t,newemp]+=q['verot']*self.timestep*12
self.infostats_etuustulo[t]+=q['etuustulo_brutto']*self.timestep*12
self.infostats_etuustulo_group[t,g]+=q['etuustulo_brutto']*self.timestep*12
self.infostats_perustulo[t]+=q['perustulo']*self.timestep*12
self.infostats_palkkatulo[t]+=q['palkkatulot']*self.timestep*12
self.infostats_palkkatulo_eielakkeella[t]+=q['palkkatulot_eielakkeella']*self.timestep*12
self.infostats_ansiopvraha[t]+=q['ansiopvraha']*self.timestep*12
self.infostats_asumistuki[t]+=q['asumistuki']*self.timestep*12
self.infostats_valtionvero[t]+=q['valtionvero']*self.timestep*12
self.infostats_valtionvero_distrib[t,newemp]+=q['valtionvero']*self.timestep*12
self.infostats_kunnallisvero[t]+=q['kunnallisvero']*self.timestep*12
self.infostats_kunnallisvero_distrib[t,newemp]+=q['kunnallisvero']*self.timestep*12
self.infostats_ptel[t]+=q['ptel']*self.timestep*12
self.infostats_tyotvakmaksu[t]+=q['tyotvakmaksu']*self.timestep*12
self.infostats_tyoelake[t]+=q['elake_maksussa']*self.timestep*12
self.infostats_kokoelake[t]+=q['kokoelake']*self.timestep*12
self.infostats_opintotuki[t]+=q['opintotuki']*self.timestep*12
self.infostats_isyyspaivaraha[t]+=q['isyyspaivaraha']*self.timestep*12
self.infostats_aitiyspaivaraha[t]+=q['aitiyspaivaraha']*self.timestep*12
self.infostats_kotihoidontuki[t]+=q['kotihoidontuki']*self.timestep*12
self.infostats_sairauspaivaraha[t]+=q['sairauspaivaraha']*self.timestep*12
self.infostats_toimeentulotuki[t]+=q['toimtuki']*self.timestep*12
self.infostats_tulot_netto[t]+=q['kateen']*self.timestep*12
self.infostats_tyelpremium[t,n]=q['tyel_kokomaksu']*self.timestep*12
self.infostats_paid_tyel_pension[t,n]=q['puhdas_tyoelake']*self.timestep*12
self.infostats_sairausvakuutus[t]+=q['sairausvakuutus']*self.timestep*12
self.infostats_pvhoitomaksu[t,n]=q['pvhoito']*self.timestep*12
self.infostats_ylevero[t]+=q['ylevero']*self.timestep*12
self.infostats_ylevero_distrib[t,newemp]=q['ylevero']*self.timestep*12
self.infostats_poptulot_netto[t,n]=q['kateen']*self.timestep*12
self.infostats_children_under3[t,n]=c3
self.infostats_children_under7[t,n]=c7
self.infostats_npv0[n]=q['multiplier']
self.infostats_equivalent_income[t]+=q['eq']
if 'alv' in q:
self.infostats_alv[t]+=q['alv']
#self.infostats_kassanjasen[t]+=1
elif self.version in set([0,101]):
self.empstate[t,newemp]+=1
self.alive[t]+=1
self.rewstate[t,newemp]+=r
self.infostats_tulot_netto[t]+=q['netto'] # already at annual level
self.infostats_poptulot_netto[t,n]=q['netto']
self.poprewstate[t,n]=r
self.popempstate[t,n]=newemp
#self.salaries[t,n]=newsal
self.salaries_emp[t,newemp]+=newsal
self.time_in_state[t,newemp]+=tis
self.infostats_equivalent_income[t]+=q['eq']
self.infostats_pop_wage[t,n]=newsal
self.infostats_pop_pension[t,n]=newpen
if self.dynprog and pred_r is not None:
self.pop_predrew[t,n]=pred_r
if self.version==101:
self.infostats_savings[t,n]=savings
self.actions[t,n]=act[0]
self.sav_actions[t,n]=act[1]
else:
self.actions[t,n]=act
# if self.version in set([1,2,3]):
# self.gempstate[t,newemp,g]+=1
# self.stat_wage_reduction[t,newemp]+=wr
# self.galive[t,g]+=1
# self.stat_tyoura[t,newemp]+=ura
# self.stat_toe[t,newemp]+=toe
# self.stat_pension[t,newemp]+=newpen
# self.stat_paidpension[t,newemp]+=paidpens
# self.stat_unemp_len[t,n]=tis
# self.popunemprightleft[t,n]=0
# self.popunemprightused[t,n]=0
if aveV is not None:
self.aveV[t,n]=aveV
if not emp==newemp:
self.siirtyneet[t,emp]+=1
self.siirtyneet_det[t,emp,newemp]+=1
else:
self.pysyneet[t,emp]+=1
elif newemp<0:
self.deceiced[t]+=1
def scale_error(self,x,target=None,averaged=False):
return (target-self.comp_scaled_consumption(x,averaged=averaged))
def comp_employed_ratio_by_age(self,emp=None,grouped=False,g=0):
if emp is None:
if grouped:
emp=np.squeeze(self.gempstate[:,:,g])
else:
emp=self.empstate
nn=np.sum(emp,1)
if self.minimal:
tyoll_osuus=(emp[:,1]+emp[:,3])/nn
htv_osuus=(emp[:,1]+0.5*emp[:,3])/nn
tyoll_osuus=np.reshape(tyoll_osuus,(tyoll_osuus.shape[0],1))
htv_osuus=np.reshape(htv_osuus,(htv_osuus.shape[0],1))
else:
# työllisiksi lasketaan kokoaikatyössä olevat, osa-aikaiset, ve+työ, ve+osatyö
# isyysvapaalla olevat jötetty pois, vaikka vapaa kestöö alle 3kk
tyoll_osuus=(emp[:,1]+emp[:,8]+emp[:,9]+emp[:,10])
htv_osuus=(emp[:,1]+0.5*emp[:,8]+emp[:,9]+0.5*emp[:,10])
tyoll_osuus=np.reshape(tyoll_osuus,(tyoll_osuus.shape[0],1))
htv_osuus=np.reshape(htv_osuus,(htv_osuus.shape[0],1))
return tyoll_osuus,htv_osuus
def comp_employed_aggregate(self,emp=None,start=20,end=63.5,grouped=False,g=0):
if emp is None:
if grouped:
emp=self.gempstate[:,:,g]
else:
emp=self.empstate
nn=np.sum(emp,1)
if self.minimal:
tyoll_osuus=(emp[:,1]+emp[:,3])/nn
htv_osuus=(emp[:,1]+0.5*emp[:,3])/nn
else:
# työllisiksi lasketaan kokoaikatyössä olevat, osa-aikaiset, ve+työ, ve+osatyö
# isyysvapaalla olevat jötetty pois, vaikka vapaa kestöö alle 3kk
tyoll_osuus=(emp[:,1]+emp[:,8]+emp[:,9]+emp[:,10])/nn
htv_osuus=(emp[:,1]+0.5*emp[:,8]+emp[:,9]+0.5*emp[:,10])/nn
htv_osuus=self.comp_state_stats(htv_osuus,start=start,end=end,ratio=True)
tyoll_osuus=self.comp_state_stats(tyoll_osuus,start=start,end=end,ratio=True)
return tyoll_osuus,htv_osuus
def comp_group_ps(self):
return self.comp_palkkasumma(grouped=True)
def comp_palkkasumma(self,start=19,end=68,grouped=False,scale_time=True):
demog2=self.empstats.get_demog()
if scale_time:
scale=self.timestep
else:
scale=1.0
min_cage=self.map_age(start)
max_cage=self.map_age(end)+1
if grouped:
scalex=demog2/self.n_pop*self.timestep
ps=np.zeros((self.n_time,6))
ps_norw=np.zeros((self.n_time,6))
a_ps=np.zeros(6)
a_ps_norw=np.zeros(6)
for k in range(self.n_pop):
g=int(self.infostats_group[k,0])
for t in range(min_cage,max_cage):
e=int(self.popempstate[t,k])
if e in set([1,10]):
ps[t,g]+=self.infostats_pop_wage[t,k]
ps_norw[t,g]+=self.infostats_pop_wage[t,k]
elif e in set([8,9]):
ps[t,g]+=self.infostats_pop_wage[t,k]*self.timestep
for g in range(6):
a_ps[g]=np.sum(scalex[min_cage:max_cage]*ps[min_cage:max_cage,g])
a_ps_norw[g]=np.sum(scalex[min_cage:max_cage]*ps_norw[min_cage:max_cage,g])
else:
scalex=demog2/self.n_pop*self.timestep
ps=np.zeros((self.n_time,1))
ps_norw=np.zeros((self.n_time,1))
for k in range(self.n_pop):
for t in range(min_cage,max_cage):
e=int(self.popempstate[t,k])
if e in set([1,10]):
ps[t,0]+=self.infostats_pop_wage[t,k]
ps_norw[t,0]+=self.infostats_pop_wage[t,k]
elif e in set([8,9]):
ps[t,0]+=self.infostats_pop_wage[t,k]
a_ps=np.sum(scalex[min_cage:max_cage]*ps[min_cage:max_cage])
a_ps_norw=np.sum(scalex[min_cage:max_cage]*ps_norw[min_cage:max_cage])
return a_ps,a_ps_norw
def comp_stats_agegroup(self,border=[19,35,50]):
n_groups=len(border)
low=border.copy()
high=border.copy()
high[0:n_groups-1]=border[1:n_groups]
high[-1]=65
employed=np.zeros(n_groups)
unemployed=np.zeros(n_groups)
ahtv=np.zeros(n_groups)
parttimeratio=np.zeros(n_groups)
unempratio=np.zeros(n_groups)
empratio=np.zeros(n_groups)
i_ps=np.zeros(n_groups)
i_ps_norw=np.zeros(n_groups)
for n in range(n_groups):
l=low[n]
h=high[n]
htv,tyollvaikutus,tyollaste,tyotosuus,tyottomat,osatyollaste=\
self.comp_tyollisyys_stats(self.empstate,scale_time=True,start=l,end=h,agegroups=True)
ps,ps_norw=self.comp_palkkasumma(start=l,end=h)
print(f'l {l} h {h}\nhtv {htv}\ntyollaste {tyollaste}\ntyotosuus {tyotosuus}\ntyottomat {tyottomat}\nosatyollaste {osatyollaste}\nps {ps}')
employed[n]=tyollvaikutus
ahtv[n]=htv
unemployed[n]=tyottomat
unempratio[n]=tyotosuus
empratio[n]=tyollaste
parttimeratio[n]=osatyollaste
i_ps[n]=ps
i_ps_norw[n]=ps_norw
return employed,ahtv,unemployed,parttimeratio,i_ps,i_ps_norw,unempratio,empratio
def comp_unemployed_ratio_by_age(self,emp=None,grouped=False,g=0):
if emp is None:
if grouped:
emp=self.gempstate[:,:,g]
else:
emp=self.empstate
nn=np.sum(emp,1)
if self.minimal:
tyot_osuus=emp[:,0]/nn
tyot_osuus=np.reshape(tyot_osuus,(tyot_osuus.shape[0],1))
else:
# työllisiksi lasketaan kokoaikatyössä olevat, osa-aikaiset, ve+työ, ve+osatyö
# isyysvapaalla olevat jötetty pois, vaikka vapaa kestöö alle 3kk
tyot_osuus=(emp[:,0]+emp[:,4]+emp[:,13])[:,None]
#tyot_osuus=np.reshape(tyot_osuus,(tyot_osuus.shape[0],1))
return tyot_osuus
def comp_unemployed_aggregate(self,emp=None,start=20,end=63.5,scale_time=True,grouped=False,g=0):
if emp is None:
if grouped:
emp=self.gempstate[:,:,g]
else:
emp=self.empstate
nn=np.sum(emp,1)
if self.minimal:
tyot_osuus=emp[:,0]/nn
else:
tyot_osuus=(emp[:,0]+emp[:,4]+emp[:,13])/nn
#print(f'tyot_osuus {tyot_osuus}')
unemp=self.comp_state_stats(tyot_osuus,start=start,end=end,ratio=True)
return unemp
def comp_parttime_aggregate(self,emp=None,start=20,end=63.5,scale_time=True,grouped=False,g=0):
'''
Lukumäärätiedot (EI HTV!)
'''
if emp is None:
if grouped:
emp=self.gempstate[:,:,g]
else:
emp=self.empstate
nn=np.sum(emp,1)
if not self.minimal:
tyossa=(emp[:,1]+emp[:,10]+emp[:,8]+emp[:,9])/nn
osatyossa=(emp[:,10]+emp[:,8])/nn
else:
tyossa=emp[:,1]/nn
osatyossa=0*tyossa
osatyo_osuus=osatyossa/tyossa
osatyo_osuus=self.comp_state_stats(osatyo_osuus,start=start,end=end,ratio=True)
kokotyo_osuus=1-osatyo_osuus
return kokotyo_osuus,osatyo_osuus
def comp_parttime_ratio_by_age(self,emp=None,grouped=False,g=0):
if emp is None:
if grouped:
emp=self.gempstate[:,:,g]
else:
emp=self.empstate
nn=np.sum(emp,1)
if self.minimal:
kokotyo_osuus=(emp[:,1])/nn
osatyo_osuus=(emp[:,3])/nn
else:
if grouped:
for g in range(6):
kokotyo_osuus=(emp[:,1,g]+emp[:,9,g])/nn
osatyo_osuus=(emp[:,8,g]+emp[:,10,g])/nn
else:
kokotyo_osuus=(emp[:,1]+emp[:,9])/nn
osatyo_osuus=(emp[:,8]+emp[:,10])/nn
osatyo_osuus=np.reshape(osatyo_osuus,(osatyo_osuus.shape[0],1))
kokotyo_osuus=np.reshape(kokotyo_osuus,(osatyo_osuus.shape[0],1))
return kokotyo_osuus,osatyo_osuus
def comp_employed_ratio(self,emp):
tyoll_osuus,htv_osuus=self.comp_employed_ratio_by_age(emp)
tyot_osuus=self.comp_unemployed_ratio_by_age(emp)
kokotyo_osuus,osatyo_osuus=self.comp_parttime_ratio_by_age(emp)
return tyoll_osuus,htv_osuus,tyot_osuus,kokotyo_osuus,osatyo_osuus
def comp_unemployed_detailed(self,emp):
if self.minimal:
ansiosid_osuus=emp[:,0]/np.sum(emp,1)
tm_osuus=ansiosid_osuus*0
else:
# työllisiksi lasketaan kokoaikatyössä olevat, osa-aikaiset, ve+työ, ve+osatyö
# isyysvapaalla olevat jötetty pois, vaikka vapaa kestöö alle 3kk
ansiosid_osuus=(emp[:,0]+emp[:,4])/np.sum(emp,1)
tm_osuus=(emp[:,13])/np.sum(emp,1)
return ansiosid_osuus,tm_osuus
def comp_tyollisyys_stats(self,emp,scale_time=True,start=19,end=68,full=False,tyot_stats=False,agg=False,shapes=False,only_groups=False,g=0,agegroups=False):
demog2=self.empstats.get_demog()
if scale_time:
scale=self.timestep
else:
scale=1.0
min_cage=self.map_age(start)
max_cage=self.map_age(end)+1
scalex=demog2[min_cage:max_cage]/self.n_pop*scale
if only_groups:
tyollosuus,htvosuus,tyot_osuus,kokotyo_osuus,osatyo_osuus=self.comp_employed_ratio(emp)
else:
tyollosuus,htvosuus,tyot_osuus,kokotyo_osuus,osatyo_osuus=self.comp_employed_ratio(emp)
htv=np.sum(scalex*htvosuus[min_cage:max_cage])
tyollvaikutus=np.sum(scalex*tyollosuus[min_cage:max_cage])
tyottomat=np.sum(scalex*tyot_osuus[min_cage:max_cage])
osatyollvaikutus=np.sum(scalex*osatyo_osuus[min_cage:max_cage])
kokotyollvaikutus=np.sum(scalex*kokotyo_osuus[min_cage:max_cage])
haj=np.mean(np.std(tyollosuus[min_cage:max_cage]))
tyollaste=tyollvaikutus/(np.sum(scalex)*self.n_pop)
osatyollaste=osatyollvaikutus/(kokotyollvaikutus+osatyollvaikutus)
kokotyollaste=kokotyollvaikutus/(kokotyollvaikutus+osatyollvaikutus)
if tyot_stats:
if agg:
#d2=np.squeeze(demog2)
tyolliset_osuus=np.squeeze(tyollosuus)
tyottomat_osuus=np.squeeze(tyot_osuus)
return tyolliset_ika,tyottomat_ika,htv_ika,tyolliset_osuus,tyottomat_osuus
else:
d2=np.squeeze(demog2)
tyolliset_ika=np.squeeze(scale*d2*np.squeeze(htvosuus))
tyottomat_ika=np.squeeze(scale*d2*np.squeeze(tyot_osuus))
htv_ika=np.squeeze(scale*d2*np.squeeze(htvosuus))
tyolliset_osuus=np.squeeze(tyollosuus)
tyottomat_osuus=np.squeeze(tyot_osuus)
return tyolliset_ika,tyottomat_ika,htv_ika,tyolliset_osuus,tyottomat_osuus
elif full:
return htv,tyollvaikutus,haj,tyollaste,tyollosuus,osatyollvaikutus,kokotyollvaikutus,osatyollaste,kokotyollaste
elif agegroups:
tyot_osuus=self.comp_unemployed_aggregate(start=start,end=end)
return htv,tyollvaikutus,tyollaste,tyot_osuus,tyottomat,osatyollaste
else:
return htv,tyollvaikutus,haj,tyollaste,tyollosuus
def comp_employment_stats(self,scale_time=True,returns=False):
demog2=self.empstats.get_demog()
if scale_time:
scale=self.timestep
else:
scale=1.0
min_cage=self.map_age(self.min_age)
max_cage=self.map_age(self.max_age)+1
scalex=np.squeeze(demog2/self.n_pop*self.timestep)
d=np.squeeze(demog2[min_cage:max_cage])
self.ratiostates=self.empstate/self.alive
self.demogstates=(self.empstate.T*scalex).T
if self.minimal>0:
self.stats_employed=self.demogstates[:,0]+self.demogstates[:,3]
self.stats_parttime=self.demogstates[:,3]
self.stats_unemployed=self.demogstates[:,0]
self.stats_all=np.sum(self.demogstates,1)
else:
self.stats_employed=self.demogstates[:,0]+self.demogstates[:,10]+self.demogstates[:,8]+self.demogstates[:,9]
self.stats_parttime=self.demogstates[:,10]+self.demogstates[:,8]
self.stats_unemployed=self.demogstates[:,0]+self.demogstates[:,4]+self.demogstates[:,13]
self.stats_all=np.sum(self.demogstates,1)
if returns:
return self.stats_employed,self.stats_parttime,self.stats_unemployed
# def test_emp(self):
# g_emp=0
# g_htv=0
# g_10=0
# g_1=0
# g_8=0
# g_9=0
# g_x=0
# scalex=1
#
# demog2=self.empstats.get_demog()
# scalex=np.squeeze(demog2/self.n_pop*self.timestep)
#
#
# for g in range(6):
# q=self.comp_participants(grouped=True,g=g)
# #g_1+=np.sum(self.gempstate[:,1,g])
# #g_10+=np.sum(self.gempstate[:,10,g])
# #g_8+=np.sum(self.gempstate[:,8,g])
# #g_9+=np.sum(self.gempstate[:,9,g])
# g_emp+=q['palkansaajia']
# g_htv+=q['htv']
# g_x+=np.sum((self.gempstate[:,1,g]+self.gempstate[:,10,g])*scalex)
#
# q=self.comp_participants()
# s_1=np.sum(self.empstate[:,1])
# s_10=np.sum(self.empstate[:,10])
# s_8=np.sum(self.empstate[:,8])
# s_9=np.sum(self.empstate[:,9])
# s_x=np.sum((self.empstate[:,1]+self.empstate[:,10])*scalex)
# emp=q['palkansaajia']
# htv=q['htv']
#
# print(f'htv {htv} vs g_htv {g_htv}')
# print(f'emp {emp} vs g_emp {g_emp}')
# print(f's_x {s_x} vs g_x {g_x}')
# #print(f's_1 {s_1} vs g_1 {g_1}')
# #print(f's_10 {s_10} vs g_10 {g_10}')
# #print(f's_8 {s_8} vs g_8 {g_8}')
# #print(f's_9 {s_9} vs g_9 {g_9}')
def comp_participants(self,scale=True,include_retwork=True,grouped=False,g=0):
'''
<NAME> lkm
scalex olettaa, että naisia & miehiä yhtä paljon. Tämän voisi tarkentaa.
'''
demog2=self.empstats.get_demog()
scalex=np.squeeze(demog2/self.n_pop*self.timestep)
#print('version',self.version)
q={}
if self.version in set([1,2,3,4]):
if grouped:
#print('group=',g)
emp=np.squeeze(self.gempstate[:,:,g])
q['yhteensä']=np.sum(np.sum(emp,axis=1)*scalex)
if include_retwork:
q['palkansaajia']=np.sum((emp[:,1]+emp[:,10]+emp[:,8]+emp[:,9])*scalex)
q['htv']=np.sum((emp[:,1]+0.5*emp[:,10]+0.5*emp[:,8]+emp[:,9])*scalex)
else:
q['palkansaajia']=np.sum((emp[:,1]+emp[:,10])*scalex)
q['htv']=np.sum((emp[:,1]+0.5*emp[:,10])*scalex)
q['ansiosidonnaisella']=np.sum((emp[:,0]+emp[:,4])*scalex)
q['tmtuella']=np.sum(emp[:,13]*scalex)
q['isyysvapaalla']=np.sum(emp[:,6]*scalex)
q['kotihoidontuella']=np.sum(emp[:,7]*scalex)
q['vanhempainvapaalla']=np.sum(emp[:,5]*scalex)
else:
q['yhteensä']=np.sum(np.sum(self.empstate[:,:],axis=1)*scalex)
if include_retwork:
q['palkansaajia']=np.sum((self.empstate[:,1]+self.empstate[:,10]+self.empstate[:,8]+self.empstate[:,9])*scalex)
q['htv']=np.sum((self.empstate[:,1]+0.5*self.empstate[:,10]+0.5*self.empstate[:,8]+self.empstate[:,9])*scalex)
else:
q['palkansaajia']=np.sum((self.empstate[:,1]+self.empstate[:,10])*scalex)
q['htv']=np.sum((self.empstate[:,1]+0.5*self.empstate[:,10])*scalex)
q['ansiosidonnaisella']=np.sum((self.empstate[:,0]+self.empstate[:,4])*scalex)
q['tmtuella']=np.sum(self.empstate[:,13]*scalex)
q['isyysvapaalla']=np.sum(self.empstate[:,6]*scalex)
q['kotihoidontuella']=np.sum(self.empstate[:,7]*scalex)
q['vanhempainvapaalla']=np.sum(self.empstate[:,5]*scalex)
else:
q['yhteensä']=np.sum(np.sum(self.empstate[:,:],1)*scalex)
q['palkansaajia']=np.sum((self.empstate[:,1])*scalex)
q['htv']=np.sum((self.empstate[:,1])*scalex)
q['ansiosidonnaisella']=np.sum((self.empstate[:,0])*scalex)
q['tmtuella']=np.sum(self.empstate[:,1]*0)
q['isyysvapaalla']=np.sum(self.empstate[:,1]*0)
q['kotihoidontuella']=np.sum(self.empstate[:,1]*0)
q['vanhempainvapaalla']=np.sum(self.empstate[:,1]*0)
return q
def comp_employment_groupstats(self,scale_time=True,g=0,include_retwork=True,grouped=True):
demog2=self.empstats.get_demog()
if scale_time:
scale=self.timestep
else:
scale=1.0
#min_cage=self.map_age(self.min_age)
#max_cage=self.map_age(self.max_age)+1
scalex=np.squeeze(demog2/self.n_pop*scale)
#d=np.squeeze(demog2[min_cage:max_cage])
if grouped:
ratiostates=np.squeeze(self.gempstate[:,:,g])/self.alive
demogstates=np.squeeze(self.gempstate[:,:,g])
else:
ratiostates=self.empstate[:,:]/self.alive
demogstates=self.empstate[:,:]
if self.version in set([1,2,3,4]):
if include_retwork:
stats_employed=np.sum((demogstates[:,1]+demogstates[:,9])*scalex)
stats_parttime=np.sum((demogstates[:,10]+demogstates[:,8])*scalex)
else:
stats_employed=np.sum((demogstates[:,1])*scalex)
stats_parttime=np.sum((demogstates[:,10])*scalex)
stats_unemployed=np.sum((demogstates[:,0]+demogstates[:,4]+demogstates[:,13])*scalex)
else:
stats_employed=np.sum((demogstates[:,0]+demogstates[:,3])*scalex)
stats_parttime=np.sum((demogstates[:,3])*scalex)
stats_unemployed=np.sum((demogstates[:,0])*scalex)
#stats_all=np.sum(demogstates,1)
return stats_employed,stats_parttime,stats_unemployed
def comp_state_stats(self,state,scale_time=True,start=20,end=63.5,ratio=False):
demog2=np.squeeze(self.empstats.get_demog())
#if scale_time:
# scale=self.timestep
#else:
# scale=1.0
min_cage=self.map_age(start)
max_cage=self.map_age(end)+1
#vaikutus=np.round(scale*np.sum(demog2[min_cage:max_cage]*state[min_cage:max_cage]))/np.sum(demog2[min_cage:max_cage])
vaikutus=np.sum(demog2[min_cage:max_cage]*state[min_cage:max_cage])/np.sum(demog2[min_cage:max_cage])
x=np.sum(demog2[min_cage:max_cage]*state[min_cage:max_cage])
y=np.sum(demog2[min_cage:max_cage])
#print(f'vaikutus {vaikutus} x {x} y {y}\n s {state[min_cage:max_cage]} mean {np.mean(state[min_cage:max_cage])}\n d {demog2[min_cage:max_cage]}')
return vaikutus
def get_vanhempainvapaat(self):
'''
Laskee vanhempainvapaalla olevien määrän outsider-mallia (Excel) varten, tila 6
'''
alive=np.zeros((self.galive.shape[0],1))
alive[:,0]=np.sum(self.galive[:,0:3],1)
ulkopuolella_m=np.sum(self.gempstate[:,7,0:3],axis=1)[:,None]/alive
alive[:,0]=np.sum(self.galive[:,3:6],1)
nn=np.sum(self.gempstate[:,5,3:6]+self.gempstate[:,7,3:6],axis=1)[:,None]-self.infostats_mother_in_workforce
ulkopuolella_n=nn/alive
return ulkopuolella_m[::4],ulkopuolella_n[::4]
def get_vanhempainvapaat_md(self):
'''
Laskee vanhempainvapaalla olevien määrän outsider-mallia (Excel) varten, tila 7
'''
alive=np.zeros((self.galive.shape[0],1))
alive[:,0]=np.sum(self.galive[:,0:3],1)
ulkopuolella_m=np.sum(self.gempstate[:,6,0:3],axis=1)[:,None]/alive
alive[:,0]=np.sum(self.galive[:,3:6],1)
nn=self.infostats_mother_in_workforce
ulkopuolella_n=nn/alive
return ulkopuolella_m[::4],ulkopuolella_n[::4]
def comp_L2error(self):
tyollisyysaste_m,osatyoaste_m,tyottomyysaste_m,ka_tyottomyysaste=self.comp_gempratios(gender='men',unempratio=False)
tyollisyysaste_w,osatyoaste_w,tyottomyysaste_w,ka_tyottomyysaste=self.comp_gempratios(gender='women',unempratio=False)
emp_statsratio_m=self.empstats.emp_stats(g=1)[:-1]*100
emp_statsratio_w=self.empstats.emp_stats(g=2)[:-1]*100
unemp_statsratio_m=self.empstats.unemp_stats(g=1)[:-1]*100
unemp_statsratio_w=self.empstats.unemp_stats(g=2)[:-1]*100
w1=1.0
w2=3.0
L2= w1*np.sum(np.abs(emp_statsratio_m-tyollisyysaste_m[:-1])**2)+\
w1*np.sum(np.abs(emp_statsratio_w-tyollisyysaste_w[:-1])**2)+\
w2*np.sum(np.abs(unemp_statsratio_m-tyottomyysaste_m[:-1])**2)+\
w2*np.sum(np.abs(unemp_statsratio_w-tyottomyysaste_w[:-1])**2)
L2=L2/self.n_pop
#print(L1,emp_statsratio_m,tyollisyysaste_m,tyollisyysaste_w,unemp_statsratio_m,tyottomyysaste_m,tyottomyysaste_w)
print('L2 error {}'.format(L2))
return L2
def comp_budgetL2error(self,ref_muut,scale=1):
q=self.comp_budget()
muut=q['muut tulot']
L2=-((ref_muut-muut)/scale)**2
print(f'L2 error {L2} (muut {muut} muut_ref {ref_muut})')
return L2
def optimize_scale(self,target,averaged=scale_error):
opt=scipy.optimize.least_squares(self.scale_error,0.20,bounds=(-1,1),kwargs={'target':target,'averaged':averaged})
#print(opt)
return opt['x']
def optimize_logutil(self,target,source):
'''
analytical compensated consumption
does not implement final reward, hence duration 110 y
'''
n_time=110
gy=np.empty(n_time)
g=1
gx=np.empty(n_time)
for t in range(0,n_time):
gx[t]=g
g*=self.gamma
for t in range(1,n_time):
gy[t]=np.sum(gx[0:t])
gf=np.mean(gy[1:])/10
lx=(target-source)
opt=np.exp(lx/gf)-1.0
print(opt)
def min_max(self):
min_wage=np.min(self.infostats_pop_wage)
max_wage=np.max(self.infostats_pop_wage)
max_pension=np.max(self.infostats_pop_pension)
min_pension=np.min(self.infostats_pop_pension)
print(f'min wage {min_wage} max wage {max_wage}')
print(f'min pension {min_pension} max pension {max_pension}')
def setup_labels(self):
self.labels=self.lab.get_labels(self.language)
def map_age(self,age,start_zero=False):
if start_zero:
return int((age)*self.inv_timestep)
else:
return int((age-self.min_age)*self.inv_timestep)
def map_t_to_age(self,t):
return self.min_age+t/self.inv_timestep
def episodestats_exit(self):
plt.close(self.episode_fig)
def comp_gini(self):
'''
<NAME>-kerroin populaatiolle
'''
income=np.sort(self.infostats_tulot_netto,axis=None)
n=len(income)
L=np.arange(n,0,-1)
A=np.sum(L*income)/np.sum(income)
G=(n+1-2*A)/2
return G
def comp_annual_irr(self,npv,premium,pension,empstate,doprint=False):
k=0
max_npv=int(np.ceil(npv))
cashflow=-premium+pension
x=np.zeros(cashflow.shape[0]+max_npv)
eind=np.zeros(max_npv+1)
el=1
for k in range(max_npv+1):
eind[k]=el
el=el*self.elakeindeksi
x[:cashflow.shape[0]]=cashflow
if npv>0:
x[cashflow.shape[0]-1:]=cashflow[-2]*eind[:max_npv+1]
y=np.zeros(int(np.ceil(x.shape[0]/4)))
for k in range(y.shape[0]):
y[k]=np.sum(x[4*k:4*k+4])
irri=npf.irr(y)*100
#if np.isnan(irri):
# if np.sum(pension)<0.1 and np.sum(empstate[0:self.map_age(63)]==15)>0: # vain maksuja, joista ei saa tuottoja, joten tappio 100%
# irri=-100
if irri<0.01 and doprint:
print('---------\nirri {}\nnpv {}\nx {}\ny {}\nprem {}\npens {}\nemps {}\n---------\n'.format(irri,npv,x,y,premium,pension,empstate))
if irri>100 and doprint:
print('---------\nirri {}\nnpv {}\nx {}\ny {}\nprem {}\npens {}\nemps {}\n---------\n'.format(irri,npv,x,y,premium,pension,empstate))
if np.isnan(irri) and doprint:
print('---------\nirri {}\nnpv {}\nx {}\ny {}\nprem {}\npens {}\nemps {}\n---------\n'.format(irri,npv,x,y,premium,np.sum(pension),empstate))
#print('---------\nirri {}\nnpv {}\n\ny {}\nprem {}\npens {}\nemps {}\n---------\n'.format(irri,npv,x,y,premium,np.sum(pension),np.sum(empstate==15)))
if irri<-50 and doprint:
print('---------\nirri {}\nnpv {}\nx {}\ny {}\nprem {}\npens {}\nemps {}\n---------\n'.format(irri,npv,x,y,premium,pension,empstate))
return irri
def comp_irr(self):
'''
Laskee sisäisen tuottoasteen (IRR)
Indeksointi puuttuu npv:n osalta
Tuloksiin lisättävä inflaatio+palkkojen reaalikasvu = palkkojen nimellinen kasvu
'''
for k in range(self.n_pop):
self.infostats_irr[k]=self.reaalinen_palkkojenkasvu*100+self.comp_annual_irr(self.infostats_npv0[k,0],self.infostats_tyelpremium[:,k],self.infostats_paid_tyel_pension[:,k],self.popempstate[:,k])
def comp_aggirr(self):
'''
Laskee aggregoidun sisäisen tuottoasteen (IRR)
Indeksointi puuttuu npv:n osalta
Tuloksiin lisättävä inflaatio+palkkojen reaalikasvu = palkkojen nimellinen kasvu
'''
maxnpv=np.max(self.infostats_npv0)
agg_premium=np.sum(self.infostats_tyelpremium,axis=1)
agg_pensions=np.sum(self.infostats_paid_tyel_pension,axis=1)
agg_irr=self.reaalinen_palkkojenkasvu*100+self.comp_annual_irr(maxnpv,agg_premium,agg_pensions,self.popempstate[:,0])
x=np.zeros(self.infostats_paid_tyel_pension.shape[0]+int(np.ceil(maxnpv)))
max_npv=int(max(np.ceil(self.infostats_npv0[:,0])))
eind=np.zeros(max_npv)
el=1
for k in range(max_npv):
eind[k]=el
el=el*self.elakeindeksi
cfn=self.infostats_tyelpremium.shape[0]
for k in range(self.n_pop):
if np.sum(self.popempstate[0:self.map_age(63),k]==15)<1: # ilman kuolleita
n=int(np.ceil(self.infostats_npv0[k,0]))
cashflow=-self.infostats_tyelpremium[:,k]+self.infostats_paid_tyel_pension[:,k]
# indeksointi puuttuu
x[:cfn]+=cashflow
if n>0:
x[cfn-1:cfn+n-1]+=cashflow[-2]*eind[:n] # ei indeksoida, pitäisi huomioida takuueläkekin
y=np.zeros(int(np.ceil(x.shape[0]/4)))
for k in range(y.shape[0]):
y[k]=np.sum(x[4*k:4*k+101])
irri=npf.irr(y)*100
print('aggregate irr {}'.format(agg_irr))
def comp_unemp_durations(self,popempstate=None,popunemprightused=None,putki=True,\
tmtuki=False,laaja=False,outsider=False,ansiosid=True,tyott=False,kaikki=False,\
return_q=True,max_age=100):
'''
Poikkileikkaushetken työttömyyskestot
'''
unempset=[]
if tmtuki:
unempset.append(13)
if outsider:
unempset.append(11)
if putki:
unempset.append(4)
if ansiosid:
unempset.append(0)
if tyott:
unempset=[0,4,13]
if laaja:
unempset=[0,4,11,13]
if kaikki:
unempset=[0,2,3,4,5,6,7,8,9,11,12,13,14]
unempset=set(unempset)
if popempstate is None:
popempstate=self.popempstate
if popunemprightused is None:
popunemprightused=self.popunemprightused
keskikesto=np.zeros((5,5)) # 20-29, 30-39, 40-49, 50-59, 60-69, vastaa TYJin tilastoa
n=np.zeros(5)
for k in range(self.n_pop):
for t in range(1,self.n_time):
age=self.min_age+t*self.timestep
if age<=max_age:
if popempstate[t,k] in unempset:
if age<29:
l=0
elif age<39:
l=1
elif age<49:
l=2
elif age<59:
l=3
else:
l=4
n[l]+=1
if self.popunemprightused[t,k]<=0.51:
keskikesto[l,0]+=1
elif self.popunemprightused[t,k]<=1.01:
keskikesto[l,1]+=1
elif self.popunemprightused[t,k]<=1.51:
keskikesto[l,2]+=1
elif self.popunemprightused[t,k]<=2.01:
keskikesto[l,3]+=1
else:
keskikesto[l,4]+=1
for k in range(5):
keskikesto[k,:] /= n[k]
if return_q:
return self.empdur_to_dict(keskikesto)
else:
return keskikesto
def empdur_to_dict(self,empdur):
q={}
q['20-29']=empdur[0,:]
q['30-39']=empdur[1,:]
q['40-49']=empdur[2,:]
q['50-59']=empdur[3,:]
q['60-65']=empdur[4,:]
return q
def comp_unemp_durations_v2(self,popempstate=None,putki=True,tmtuki=False,laaja=False,\
outsider=False,ansiosid=True,tyott=False,kaikki=False,\
return_q=True,max_age=100):
'''
Poikkileikkaushetken työttömyyskestot
Tässä lasketaan tulos tiladatasta, jolloin kyse on viimeisimmän jakson kestosta
'''
unempset=[]
if tmtuki:
unempset.append(13)
if outsider:
unempset.append(11)
if putki:
unempset.append(4)
if ansiosid:
unempset.append(0)
if tyott:
unempset=[0,4,13]
if laaja:
unempset=[0,4,11,13]
if kaikki:
unempset=[0,2,3,4,5,6,7,8,9,11,12,13,14]
unempset=set(unempset)
if popempstate is None:
popempstate=self.popempstate
keskikesto=np.zeros((5,5)) # 20-29, 30-39, 40-49, 50-59, 60-69, vastaa TYJin tilastoa
n=np.zeros(5)
for k in range(self.n_pop):
prev_state=popempstate[0,k]
prev_trans=0
for t in range(1,self.n_time):
age=self.min_age+t*self.timestep
if age<=max_age:
if popempstate[t,k]!=prev_state:
if prev_state in unempset and popempstate[t,k] not in unempset:
prev_state=popempstate[t,k]
duration=(t-prev_trans)*self.timestep
prev_trans=t
if age<29:
l=0
elif age<39:
l=1
elif age<49:
l=2
elif age<59:
l=3
else:
l=4
n[l]+=1
if duration<=0.51:
keskikesto[l,0]+=1
elif duration<=1.01:
keskikesto[l,1]+=1
elif duration<=1.51:
keskikesto[l,2]+=1
elif duration<=2.01:
keskikesto[l,3]+=1
else:
keskikesto[l,4]+=1
elif prev_state not in unempset and popempstate[t,k] in unempset:
prev_trans=t
prev_state=popempstate[t,k]
else: # some other state
prev_state=popempstate[t,k]
prev_trans=t
for k in range(5):
keskikesto[k,:] /= n[k]
if return_q:
return self.empdur_to_dict(keskikesto)
else:
return keskikesto
def comp_virrat(self,popempstate=None,putki=True,tmtuki=True,laaja=False,outsider=False,ansiosid=True,tyott=False,kaikki=False,max_age=100):
tyoll_virta=np.zeros((self.n_time,1))
tyot_virta=np.zeros((self.n_time,1))
unempset=[]
empset=[]
if tmtuki:
unempset.append(13)
if outsider:
unempset.append(11)
if putki:
unempset.append(4)
if ansiosid:
unempset.append(0)
if tyott:
unempset=[0,4,13]
if laaja:
unempset=[0,4,11,13]
if kaikki:
unempset=[0,2,3,4,5,6,7,8,9,11,12,13,14]
empset=set([1,10])
unempset=set(unempset)
if popempstate is None:
popempstate=self.popempstate
for k in range(self.n_pop):
prev_state=popempstate[0,k]
prev_trans=0
for t in range(1,self.n_time):
age=self.min_age+t*self.timestep
if age<=max_age:
if popempstate[t,k]!=prev_state:
if prev_state in unempset and popempstate[t,k] in empset:
tyoll_virta[t]+=1
prev_state=popempstate[t,k]
elif prev_state in empset and popempstate[t,k] in unempset:
tyot_virta[t]+=1
prev_state=popempstate[t,k]
else: # some other state
prev_state=popempstate[t,k]
return tyoll_virta,tyot_virta
def comp_tyollistymisdistribs(self,popempstate=None,popunemprightleft=None,putki=True,tmtuki=True,laaja=False,outsider=False,ansiosid=True,tyott=False,max_age=100):
tyoll_distrib=[]
tyoll_distrib_bu=[]
unempset=[]
if tmtuki:
unempset.append(13)
if outsider:
unempset.append(11)
if putki:
unempset.append(4)
if ansiosid:
unempset.append(0)
if tyott:
unempset=[0,4,13]
if laaja:
unempset=[0,4,11,13]
empset=set([1,10])
unempset=set(unempset)
if popempstate is None or popunemprightleft is None:
popempstate=self.popempstate
popunemprightleft=self.popunemprightleft
for k in range(self.n_pop):
prev_state=popempstate[0,k]
prev_trans=0
for t in range(1,self.n_time):
age=self.min_age+t*self.timestep
if age<=max_age:
if popempstate[t,k]!=prev_state:
if prev_state in unempset and popempstate[t,k] in empset:
tyoll_distrib.append((t-prev_trans)*self.timestep)
tyoll_distrib_bu.append(popunemprightleft[t,k])
prev_state=popempstate[t,k]
prev_trans=t
else: # some other state
prev_state=popempstate[t,k]
prev_trans=t
return tyoll_distrib,tyoll_distrib_bu
def comp_empdistribs(self,popempstate=None,popunemprightleft=None,putki=True,tmtuki=True,laaja=False,outsider=False,ansiosid=True,tyott=False,max_age=100):
unemp_distrib=[]
unemp_distrib_bu=[]
emp_distrib=[]
unempset=[]
if tmtuki:
unempset.append(13)
if outsider:
unempset.append(11)
if putki:
unempset.append(4)
if ansiosid:
unempset.append(0)
if tyott:
unempset=[0,4,13]
if laaja:
unempset=[0,4,11,13]
if popempstate is None or popunemprightleft is None:
popempstate=self.popempstate
popunemprightleft=self.popunemprightleft
empset=set([1,10])
unempset=set(unempset)
for k in range(self.n_pop):
prev_state=popempstate[0,k]
prev_trans=0
for t in range(1,self.n_time):
age=self.min_age+t*self.timestep
if age<=max_age:
if self.popempstate[t,k]!=prev_state:
if prev_state in unempset and popempstate[t,k] not in unempset:
unemp_distrib.append((t-prev_trans)*self.timestep)
unemp_distrib_bu.append(popunemprightleft[t,k])
prev_state=popempstate[t,k]
prev_trans=t
elif prev_state in empset and popempstate[t,k] not in unempset:
emp_distrib.append((t-prev_trans)*self.timestep)
prev_state=popempstate[t,k]
prev_trans=t
else: # some other state
prev_state=popempstate[t,k]
prev_trans=t
return unemp_distrib,emp_distrib,unemp_distrib_bu
def empdist_stat(self):
ratio=np.array([1,0.287024901703801,0.115508955875928,0.0681083442551332,0.0339886413280909,0.0339886413280909,0.0114460463084316,0.0114460463084316,0.0114460463084316,0.00419397116644823,0.00419397116644823,0.00419397116644823,0.00419397116644823,0.00419397116644823,0.00419397116644823,0.00419397116644823,0.00419397116644823,0.00166011358671909,0.00166011358671909,0.00166011358671909,0.00166011358671909,0.00166011358671909,0.00166011358671909,0.00166011358671909,0.00166011358671909,0.00104849279161206,0.00104849279161206,0.00104849279161206,0.00104849279161206,0.00104849279161206,0.00104849279161206,0.00104849279161206,0.00104849279161206])
return ratio
def comp_gempratios(self,unempratio=True,gender='men'):
if gender=='men': # men
gempstate=np.sum(self.gempstate[:,:,0:3],axis=2)
alive=np.zeros((self.galive.shape[0],1))
alive[:,0]=np.sum(self.galive[:,0:3],1)
mother_in_workforce=0
else: # women
gempstate=np.sum(self.gempstate[:,:,3:6],axis=2)
alive=np.zeros((self.galive.shape[0],1))
alive[:,0]=np.sum(self.galive[:,3:6],1)
mother_in_workforce=self.infostats_mother_in_workforce
tyollisyysaste,osatyoaste,tyottomyysaste,ka_tyottomyysaste=self.comp_empratios(gempstate,alive,unempratio=unempratio,mother_in_workforce=mother_in_workforce)
return tyollisyysaste,osatyoaste,tyottomyysaste,ka_tyottomyysaste
def comp_empratios(self,emp,alive,unempratio=True,mother_in_workforce=0):
employed=emp[:,1]
retired=emp[:,2]
unemployed=emp[:,0]
if self.version in set([1,2,3,4]):
disabled=emp[:,3]
piped=emp[:,4]
mother=emp[:,5]
dad=emp[:,6]
kotihoidontuki=emp[:,7]
vetyo=emp[:,9]
veosatyo=emp[:,8]
osatyo=emp[:,10]
outsider=emp[:,11]
student=emp[:,12]
tyomarkkinatuki=emp[:,13]
tyollisyysaste=100*(employed+osatyo+veosatyo+vetyo+dad+mother_in_workforce)/alive[:,0]
osatyoaste=100*(osatyo+veosatyo)/(employed+osatyo+veosatyo+vetyo)
if unempratio:
tyottomyysaste=100*(unemployed+piped+tyomarkkinatuki)/(tyomarkkinatuki+unemployed+employed+piped+osatyo+veosatyo+vetyo)
ka_tyottomyysaste=100*np.sum(unemployed+tyomarkkinatuki+piped)/np.sum(tyomarkkinatuki+unemployed+employed+piped+osatyo+veosatyo+vetyo)
else:
tyottomyysaste=100*(unemployed+piped+tyomarkkinatuki)/alive[:,0]
ka_tyottomyysaste=100*np.sum(unemployed+tyomarkkinatuki+piped)/np.sum(alive[:,0])
elif self.version in set([0,101]):
if False:
osatyo=emp[:,3]
else:
osatyo=0
tyollisyysaste=100*(employed+osatyo)/alive[:,0]
#osatyoaste=np.zeros(employed.shape)
osatyoaste=100*(osatyo)/(employed+osatyo)
if unempratio:
tyottomyysaste=100*(unemployed)/(unemployed+employed+osatyo)
ka_tyottomyysaste=100*np.sum(unemployed)/np.sum(unemployed+employed+osatyo)
else:
tyottomyysaste=100*(unemployed)/alive[:,0]
ka_tyottomyysaste=100*np.sum(unemployed)/np.sum(alive[:,0])
return tyollisyysaste,osatyoaste,tyottomyysaste,ka_tyottomyysaste
def plot_ratiostats(self,t):
'''
Tee kuvia tuloksista
'''
x=np.linspace(self.min_age,self.max_age,self.n_time)
fig,ax=plt.subplots()
ax.set_xlabel('palkat')
ax.set_ylabel('freq')
ax.hist(self.infostats_pop_wage[t,:])
plt.show()
fig,ax=plt.subplots()
ax.set_xlabel('aika')
ax.set_ylabel('palkat')
meansal=np.mean(self.infostats_pop_wage,axis=1)
stdsal=np.std(self.infostats_pop_wage,axis=1)
ax.plot(x,meansal)
ax.plot(x,meansal+stdsal)
ax.plot(x,meansal-stdsal)
plt.show()
def plot_empdistribs(self,emp_distrib):
fig,ax=plt.subplots()
ax.set_xlabel('työsuhteen pituus [v]')
ax.set_ylabel('freq')
ax.set_yscale('log')
max_time=50
nn_time = int(np.round((max_time)*self.inv_timestep))+1
x=np.linspace(0,max_time,nn_time)
scaled,x2=np.histogram(emp_distrib,x)
scaled=scaled/np.sum(emp_distrib)
#ax.hist(emp_distrib)
ax.bar(x2[1:-1],scaled[1:],align='center')
plt.show()
def plot_compare_empdistribs(self,emp_distrib,emp_distrib2,label2='vaihtoehto',label1=''):
fig,ax=plt.subplots()
ax.set_xlabel('työsuhteen pituus [v]')
ax.set_ylabel(self.labels['probability'])
ax.set_yscale('log')
max_time=50
nn_time = int(np.round((max_time)*self.inv_timestep))+1
x=np.linspace(0,max_time,nn_time)
scaled,x2=np.histogram(emp_distrib,x)
scaled=scaled/np.sum(emp_distrib)
x=np.linspace(0,max_time,nn_time)
scaled3,x3=np.histogram(emp_distrib2,x)
scaled3=scaled3/np.sum(emp_distrib2)
ax.plot(x3[:-1],scaled3,label=label1)
ax.plot(x2[:-1],scaled,label=label2)
ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.show()
def plot_vlines_unemp(self,point=0):
axvcolor='gray'
lstyle='--'
plt.axvline(x=300/(12*21.5),ls=lstyle,color=axvcolor)
plt.text(310/(12*21.5),point,'300',rotation=90)
plt.axvline(x=400/(12*21.5),ls=lstyle,color=axvcolor)
plt.text(410/(12*21.5),point,'400',rotation=90)
plt.axvline(x=500/(12*21.5),ls=lstyle,color=axvcolor)
plt.text(510/(12*21.5),point,'500',rotation=90)
def plot_tyolldistribs(self,emp_distrib,tyoll_distrib,tyollistyneet=True,max=10,figname=None):
max_time=55
nn_time = int( | np.round((max_time)*self.inv_timestep) | numpy.round |
# This Python file uses the following encoding: utf-8
"""
MIT License
Copyright (c) 2020 <NAME> & <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
"""
This class file represent the compute of minimizing energy with the Monte-Carlo method, using the Metropolis algorithm
There is two class:
-One wich use this algorithm in multiple thread to reduce computing time (actually not working)
-The other one using only one thread to compute in background
"""
from copy import deepcopy
import numpy as np
from scipy.constants import k as kb
from PySide2 import QtWidgets
from PySide2 import QtCore, QtGui
from .DipSimUtilities import *
from .DipSim import *
from .DipSimComputor import *
#########################################################
### Method of Monte-Carlo running on multpile threads ###
############## /!\ Actually not working /!\ #############
#########################################################
"""
dipole: list of dipoles (DipModel)
nbIteration: number of iterations (int)
temperature: temperature of the system (float)
lock2D: compute on 3D or 2D (bool)
unitCoef: power of the distance, 0 is meter, 10**-9 is nanometer (float)
"""
class MonteCarloThreadWorker(QThread):
resultDips = Signal(list)
error = Signal()
def __init__(self, parent=None):
super(MonteCarloThreadWorker, self).__init__(parent=parent)
self.dipoles = None
self.nbIteration = 1
self.temperature = 1
self.lock2D = False
self.unitCoef=10**-11
self.nbIterMutex = None
self._currentNbIterations = None
self.id = -1
def compute(self, nbIterMutex, dipoles, nbIteration, currentNbIterations, temperature, distCoef=0.0, lock2D=False, id = -1):
self.id = id
self.nbIterMutex = nbIterMutex
self._currentNbIterations = currentNbIterations
self.dipoles = dipoles
self.nbIteration = nbIteration
self.unitCoef=10**distCoef
self.temperature = temperature
self.lock2D = lock2D
self.start()
def run(self):
try:
resDips = self.monteCarloThread(self.dipoles, self.nbIteration, self.temperature, self.lock2D)
self.resultDips.emit(resDips)
except:
self.error.emit()
"""
Minimisation with Monte-Carlo working on multiple threads
Return the list of dipoles with new computed directions
dipoles: list of dipoles
N:number of iteration(int)
T: temperature(float)
lock2D: compute on 2D or 3D (boolean)
"""
def monteCarloThread(self, dipoles, N, T, lock2D):
dipCopy = deepcopy(dipoles)
current_energy = self.computeEnergy(dipCopy)
while True:
moment_to_change_indice = np.random.randint(len(dipCopy))
moment_to_change = dipCopy[moment_to_change_indice].quaternion #moment i-1
dipCopy[moment_to_change_indice].quaternion = Dipole.rndQuaternionGenerator(is2D=lock2D) # we replace the moment i-1 by a new random one
new_energy = self.computeEnergy(dipCopy)
r = | np.random.random() | numpy.random.random |
#!/usr/bin/env python
# coding: utf-8
from ffmpy import FFmpeg
import matplotlib.pyplot as plt
import librosa
from librosa import display as ld
import numpy as np
def framing(sig, fs=48000, win_len=0.37, win_hop=0.02):
"""
transform a signal into a series of overlapping frames.
Args:
sig (array) : a mono audio signal (Nx1) from which to compute features.
fs (int) : the sampling frequency of the signal we are working with.
win_len (float) : window length in sec.
Default is 0.37.
win_hop (float) : step between successive windows in sec.
Default is 0.02.
Returns:
array of frames.
frame length.
"""
# compute frame length and frame step (convert from seconds to samples)
frame_length = win_len * fs
frame_step = win_hop * fs
signal_length = len(sig)
frames_overlap = frame_length - frame_step
# Make sure that we have at least 1 frame+
num_frames = np.abs(signal_length - frames_overlap) // np.abs(frame_length - frames_overlap)
rest_samples = np.abs(signal_length - frames_overlap) % np.abs(frame_length - frames_overlap)
# Pad Signal to make sure that all frames have equal number of samples
# without truncating any samples from the original signal
if rest_samples != 0:
pad_signal_length = int(frame_step - rest_samples)
z = np.zeros((pad_signal_length))
pad_signal = | np.append(sig, z) | numpy.append |
"""
Created on Wed Oct 14
@author: M.C.Pali & <NAME>
"""
import time
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.ioff()
import numpy as np
from termcolor import colored
from sklearn.linear_model import OrthogonalMatchingPursuit
from sklearn import preprocessing
from ksvd import ApproximateKSVD
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# ITKrM algorithm (fast with matrices)
def itkrm(data,K,S,maxit,dinit):
""" Iterative Thresholding and K-residual Means (ITKrM) algorithm
:param data: training signals
:param K: number of dictionary atoms
:param S: sparsity level
:param maxit: maximal number of dictionary learning iterations
:param dinit: initial dictionary
:returns: learned dictionary
"""
""" preprocessing """
dold = np.asmatrix(dinit)
data = np.asmatrix(data)
d,N = np.shape(data)
""" algorithm """
for it in range(maxit):
''' thresholding '''
ip = np.dot(dold.transpose(),data)
absip = np.abs(ip)
signip = np.sign(ip)
I = np.argsort(absip, axis=0)[::-1]
gram = np.dot(dold.transpose(),dold)
dnew = np.asmatrix(np.zeros((d,K)))
X = np.zeros((K,N))
''' update coefficient matrix '''
It = I[0:S,:] # indices of S largest inner products
for n in range(N):
In = It[:,n]
try:
coeff = np.linalg.solve(gram[In,np.transpose(np.asmatrix(In))],np.asmatrix(ip[In,n]))
except:
pinv_gram_In = np.linalg.pinv(gram[In,np.transpose(np.asmatrix(In))])
coeff = np.dot(pinv_gram_In,np.asmatrix(ip[In,n]))
X[In,n] = coeff
app = np.dot(dold,X)
avenapp = np.linalg.norm(app,'fro')
res = data - app
''' dictionary update '''
for j in range(K):
# signals that use atom j
J = np.ravel(X[j,:].ravel().nonzero())
dnew[:,j] = np.dot(res[:,J],signip[j,J].transpose())
dnew[:,j] = dnew[:,j] + np.dot(dold[:,j],np.sum(absip[j,J]))
# do not update unused atoms
avenapp = avenapp/N
scale = np.sum(np.multiply(dnew,dnew),axis=0)
nonzero = np.argwhere(scale > (0.001*avenapp/d))[:,1]
dnew[:,nonzero] = np.dot(dnew[:,nonzero],np.diagflat(np.reciprocal(np.sqrt(scale[:,nonzero]))))
dold[:,nonzero] = dnew[:,nonzero]
dico = dold
return dico
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# adaptive OMP
def a_omp_plus1(dico,data):
""" Adaptive Orthogonal Matching Pursuit (aOMP) algorithm for sparse coding
:param dico: dictionary used for sparse coding
:param data: signals to be sparsely approximated, stored in a matrix
:returns: sparse coefficient matrix
"""
""" preprocessing """
dico = np.asmatrix(dico)
data = np.asmatrix(data)
d,N = np.shape(data)
d,K = np.shape(dico)
Smax = np.int(np.floor(d/np.log(K))) # maximal number of coefficients for each signal
threshold1 = np.sqrt(2*(np.log(2*K) - np.log(0.25))/d) # threshold for 1st iteration (tau_1)
threshold = np.sqrt(2*(np.log(2*K) - np.log(0.5))/d) # threshold for aOMP in subsequent iterations (tau_2)
# initialisation
X = np.zeros((K,N))
iterations_ps = np.zeros((N,1)) # iterations for each signal
""" algorithm """
for n in range(N):
yn = data[:,n] # signal y_n
norm_yn = np.linalg.norm(yn)
res = yn # initial residual
ind_yn = np.zeros((Smax,1))
it = 0
''' part of aOMP using threshold tau_1 '''
resip = np.dot(dico.transpose(),res) # residual inner product
abs_resip = np.abs(resip) # absolute value of residual inner product
# find first part of support
supp_new = np.argwhere(abs_resip > threshold1*np.linalg.norm(res))
supp_new = np.array(supp_new[:Smax,0])
l = np.int(supp_new.size)
it = it + l
ind_yn[0:l,:] = np.asarray([supp_new]).transpose()
ind_it = np.ravel(np.asmatrix(ind_yn[0:l,:],dtype=int))
# compute coefficient vector and new residual
if len(supp_new)!=0:
try:
x = np.linalg.solve(dico[:,supp_new],yn)
except:
x = np.dot(np.linalg.pinv(dico[:,supp_new]),yn)
res = yn - np.dot(dico[:,supp_new],x)
else:
res = yn
''' part of aOMP using threshold tau_2 '''
abs_resip = np.abs(np.dot(dico.transpose(),res)) # absolute value of residual
max_val = np.max(abs_resip) # maximal absolute value of inner product between dictionary and residual
max_pos = np.int(np.argmax(abs_resip)) # position of max_val
while max_val >= threshold*np.linalg.norm(res) and np.linalg.norm(res) > (10**-3)*norm_yn and it < Smax:
ind_yn[it,:] = max_pos
ind_it = np.ravel(np.asmatrix(ind_yn[0:(it+1),:],dtype=int)) # current support
# compute coefficient vector and new residual
if len(ind_it)!=0:
try:
x = np.linalg.solve(dico[:,ind_it],yn)
except:
x = np.dot(np.linalg.pinv(dico[:,ind_it]),yn)
res = yn - np.dot(dico[:,ind_it],x)
else:
res = yn
it = it+1
# new residual inner product
resip = np.dot(dico.transpose(),res)
abs_resip = np.abs(resip)
max_val = np.max(abs_resip)
max_pos = np.int(np.argmax(abs_resip))
if it > 0:
if len(ind_it)!=0:
# store sparse coefficient vector in sparse coefficient matrix X
X[np.array(ind_it),n] = np.ravel(x)
iterations_ps[n,:] = it
return X
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
"""
-- dictionary learning via ITKrM and adaptively chosen sparsity level S and dictionary size K
"""
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# sub-functions for adaptive ITKrM:
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# one iteration of adaptive ITKrM (fast with matrices)
def batch_a_itkrm1(dico,data,S,repinit,minobs):
"""
Subfunction of aITKrM:
-- one iteration of adaptive Iterative Thresholding and K-residual Means (aITKrM) algorithm --
:param dico: current dictionary estimate
:param data: training signals
:param S: sparsity level used for thresholding
:param repinit: initialisation for replacement candidates
:param minobs: minimal number of required observations
:returns:
dico: learned dictionary
freq: score for each atom in the dictionary
repcand: candidate atoms
repfreq: score of candidate atoms
Sguess: estimated sparsity level
avenres: average residual energy
"""
""" preprocessing """
dico = np.asmatrix(dico)
data = np.asmatrix(data)
d,N = np.shape(data)
d,K = np.shape(dico)
S = np.int(S)
# initialisation
L = np.int(np.round(np.log(d))) # number of learned replacement candidates
m = np.int(np.round(np.log(d))) # number of iterations for learning candidates
NL = np.int(np.floor(N/m)) # number of training signals for candidates
candtau = 2*np.log(2*NL/d)/d # threshold for candidates
tau = 2*np.log(2*N/minobs)/d # threshold dictionary atoms
""" algorithm """
# initialisation
dnew = np.asmatrix(np.zeros((d,K))) # new dictionary
freq = np.zeros((1,K)) # score for each atom in the dictionary
X = np.zeros((K,N)) # coefficient matrix
Z = np.zeros((K,N)) # residual inner products + coefficients
repcand = repinit
repcandnew = np.asmatrix(np.zeros((d,L))) # new replacement candidate
repfreq = np.zeros((1,L))
avenres = 0 # average energy of residual
avenapp = 0 # average energy of approximation
Sguess = 0 # guess for sparsity level
''' thresholding '''
ip = np.dot(dico.transpose(),data)
absip = np.abs(ip)
signip = np.sign(ip)
I = np.argsort(absip, axis=0)[::-1]
It = I[0:S,:] # indices of S largest inner products in absolute value
gram = np.dot(dico.transpose(),dico)
''' update coefficient matrix '''
for n in range(N):
In = It[:,n]
try:
coeff = np.linalg.solve(gram[In,np.transpose(np.asmatrix(In))],np.asmatrix(ip[In,n]))
except:
pinv_gram_In = np.linalg.pinv(gram[In,np.transpose(np.asmatrix(In))])
coeff = np.dot(pinv_gram_In,np.asmatrix(ip[In,n]))
X[In,n] = coeff
app = np.dot(dico,X)
res = data - app
''' dictionary update '''
for j in range(K):
# signals that use atom j
J = np.ravel(X[j,:].ravel().nonzero())
dnew[:,j] = np.dot(res[:,J],signip[j,J].transpose())
dnew[:,j] = dnew[:,j] + np.dot(dico[:,j],np.sum(absip[j,J]))
""" part for adaptivity """
# counter for adaptivity
res_energy = (np.linalg.norm(res))**2 # energy of residual
app_energy = (np.linalg.norm(app))**2 # energy of signal approximation
enres = np.sum(np.square(res),axis=0)
enapp = np.sum(np.square(app),axis=0)
avenres = np.linalg.norm(res,'fro')/N
avenapp = np.linalg.norm(app,'fro')/N
# steps for estimating the sparsity level
sig_threshold = np.sqrt(app_energy/d+res_energy*2*np.log(4*K)/d)
Z = X + np.dot(dico.transpose(),res) # inner products with residual + coefficients
Sguess = np.sum(np.ceil(np.sum(np.abs(Z),axis=1) - sig_threshold*np.ones((K,1))))/N
# counting above threshold
threshold = enres*tau + enapp/d
freq = np.sum(np.square(np.abs(X)) > threshold,axis=1) # atom scores
''' candidate update '''
for it in range(m):
res_cand = res[:,it*NL:(it+1)*NL]
enres = np.sum(np.square(res_cand),axis=0)
repip = np.dot(np.transpose(repcand),res_cand)
signip = np.sign(repip)
max_repip_val = np.max(np.abs(repip),axis=0)
max_repip_pos = np.argmax(np.abs(repip),axis=0)
for j in range(L):
# signals that use atom j
J = np.argwhere(max_repip_pos == j)[:,1]
repcandnew[:,j] = res_cand[:,J]*np.transpose(np.sign(repip[j,J]))
if it == m-1:
# candidate counter
repfreq[:,j] = np.sum(np.square(max_repip_val[:,J]) >= enres[:,J]*candtau)
repcand = preprocessing.normalize(repcandnew,norm='l2',axis=0) # normalise candidate atoms
''' dictionary update - remove unused atoms '''
scale = np.sum(np.square(dnew),axis=0)
nonzero = np.argwhere(scale > (0.001*avenapp/d))[:,1]
iszero = np.argwhere(scale <= (0.001*avenapp/d))[:,1]
# remove unused atoms
freq[ | np.ravel(iszero) | numpy.ravel |
from keras.models import Sequential, load_model
from keras.layers import LSTM, Dense, Dropout, GRU
from keras.optimizers import Adam, SGD, RMSprop
import numpy as np
import random
import cv2
import display as dp
from datetime import datetime
def arrange_data(x_train, y_train):
y_train = np.nan_to_num(y_train)
print("y_train:", | np.shape(y_train) | numpy.shape |
import unittest
import numpy as np
from desc.backend import factorial, put, sign
class TestBackend(unittest.TestCase):
"""tests for backend functions"""
def test_factorial(self):
self.assertAlmostEqual(factorial(10), 3628800)
self.assertAlmostEqual(factorial(0), 1)
def test_put(self):
a = np.array([0, 0, 0])
b = | np.array([1, 2, 3]) | numpy.array |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.