prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
import os
import numpy as np
import pandas as pd
"""
This function is used to import the data. Put the data in a folder named all_data in the directory of the code
"""
def import_data(dt_name):
"""
:param dt_name: Name of the Dataset
:return: Three pandas frames which correspond to training, testing and validation data
"""
# First we get the directory of our project and then take all the three files and import them to the respective
# names.
d = os.getcwd()
test_data = pd.read_csv(os.path.join(os.path.join(d, "all_data"), "test_{0}.csv".format(dt_name)), header=None)
train_data = pd.read_csv(os.path.join(os.path.join(d, "all_data"), "train_{0}.csv".format(dt_name)), header=None)
validation_data = pd.read_csv(os.path.join(os.path.join(d, "all_data"), "valid_{0}.csv".format(dt_name)),
header=None)
# Now we will return the data frames
return [test_data, train_data, validation_data]
"""
This function is defined to get the labels/classes and attribute values in different variables
"""
def get_attributes_and_labels(data):
"""
:param data: The dataset to be divided
:return: Two panda frames which are in order of classes and attributes
"""
# Here we divide our attributes and classes features for a given dataset
return [data.iloc[:, -1], data.iloc[:, :-1]]
"""
This function is used to find the entropy which is our impurity heuristic for this algorithm
"""
def get_entropy(data):
"""
:param data: THese are the values for which we want to find the entropy of. We pass a whole vector of values which
correspond to the attribute of importance and find entropy for that vector.
:return: Entropy for the given vector
"""
entropy_value = 0
temp, unique_count = np.unique(data, return_counts=True)
# We will use the formula mentioned in the slides to calculate the value of entropy for both the options (i.e,
# 1 and 0)
sum_of_counts = np.sum(unique_count)
for count in unique_count:
entropy_value = entropy_value - ((count / sum_of_counts) * np.log2(count / sum_of_counts))
return entropy_value
"""
This function is used to find the information gain for the given sub-tree/tree. The information gain is used to find the
attribute we will use to do further branching
"""
def Information_Gain_Heuristic(examples, attributes, target_attribute):
"""
:param examples: The data for whihc we want to find the information gain
:param attributes: the values of the attributes available (the column number)
:param target_attribute: the target attribute we are trying to find
:return: Information Gain of the given sub-tree.
"""
# Here we find the entropy for the root node
previous_entropy = get_entropy(target_attribute)
Information_Gain = []
for each_attribute in attributes:
unique_value_of_attribute, counts_of_attribute = np.unique(examples[each_attribute], return_counts=True)
try:
if counts_of_attribute[0] < counts_of_attribute[1]:
counts_of_attribute = np.flipud(counts_of_attribute)
unique_value_of_attribute = np.flipud(unique_value_of_attribute)
except IndexError:
i = 0
# Since I have hardcoded the array_after_division arrays we will try to the first values for 0.
if unique_value_of_attribute[0] == 1:
counts_of_attribute = np.flipud(counts_of_attribute)
unique_value_of_attribute = np.flipud(unique_value_of_attribute)
array_after_division_1 = []
array_after_division_0 = []
# This loop is for 0 and 1
# I need to find the number of 1's and 0's in target value when the given attribute value is something
# particular
total_data = pd.concat([examples, target_attribute], axis=1, sort=False)
# Here I concatenated the data frames so that only one df is used to both lookup the value and find the value
# to append
row_names = total_data.index.values
list_of_row_names = list(row_names)
for each in list_of_row_names:
value_to_append = int(total_data.iloc[:, -1][each])
if examples[each_attribute][each] == 1:
array_after_division_1.append(value_to_append)
else:
array_after_division_0.append(value_to_append)
# Here I will use try catch since if the target_attribute have only one unique value then it will give an
# error if we try to use the second index (i.e. 2). and if we have only one unique value then our imputrity
# is 0 and thus entropy is 0
try:
value_of_new_inpurity = (counts_of_attribute[0] / np.size(examples[each_attribute])) * get_entropy(
array_after_division_0) + (counts_of_attribute[1] / np.size(examples[each_attribute])) * get_entropy(
array_after_division_1)
except IndexError:
value_of_new_inpurity = 0
temp = previous_entropy - value_of_new_inpurity
Information_Gain.append(temp)
return Information_Gain
"""
This function is the main function for our algorithm. The decision_tree function is used recursively to create new nodes
and make the tree while doing the training.
"""
def decision_tree_construction(examples, target_attribute, attributes, depth):
"""
:param examples: The data we will use to train the tree(x)
:param target_attribute: The label we want to classify(y)
:param attributes: The number(index) of the labels/attributes of the data-set
:return: The tree corresponding to the given data
"""
# This is the first base condition of the algorithm. It is used if the attributes variable is empty, then we return
# the single-node tree Root, with label = most common value of target_attribute in examples
# The base condition for the recursion when we check if all the variables are same or not in the node and if they
# are same then we return that value as the node
if len(attributes) == 0 or len(np.unique(target_attribute)) == 1:
unique_value_of_attribute, counts_of_attribute = np.unique(target_attribute, return_counts=True)
try:
if counts_of_attribute[0] < counts_of_attribute[1]:
unique_value_of_attribute = np.flipud(unique_value_of_attribute)
except IndexError:
i = 0
if unique_value_of_attribute[0] == 1:
# More positive values
return 1, depth
elif unique_value_of_attribute[0] == 0:
# More negative values
return 0, depth
# This is the recursion part of the algorithm in which we try to find the sub-tree's by using recursion and
# information gain
else:
Information_Gain = Information_Gain_Heuristic(examples, attributes, target_attribute)
best_attribute_number = attributes[np.argmax(Information_Gain)]
# Since we now have the best_attribute(A in algorithm) we will create the root node of the tree/sub-tree with
# that and name the root as the best attribute among all Here we make the tree as a dictionary for testing
# purposes
tree = dict([(best_attribute_number, dict())])
if isinstance(tree, int):
# If the given value is a int value then it's definitely a leaf node and if it's a dictionary then its a
# node
tree[best_attribute_number]["type_of_node"] = "leaf"
tree[best_attribute_number]["depth"] = depth
unique_value_of_attribute, counts_of_attribute = np.unique(target_attribute, return_counts=True)
try:
if counts_of_attribute[0] < counts_of_attribute[1]:
counts_of_attribute = np.flipud(counts_of_attribute)
unique_value_of_attribute = np.flipud(unique_value_of_attribute)
except IndexError:
# Here we can have an index error since in some case it may happen that the array has only one type
# of value and thus accessing the index [1] is not possible
i = 0
tree[best_attribute_number]["majority_target_attribute"] = unique_value_of_attribute[0]
tree[best_attribute_number]["best_attribute_number"] = best_attribute_number
else:
tree[best_attribute_number]["type_of_node"] = "node"
tree[best_attribute_number]["depth"] = depth
unique_value_of_attribute, counts_of_attribute = np.unique(target_attribute, return_counts=True)
try:
if counts_of_attribute[0] < counts_of_attribute[1]:
counts_of_attribute = np.flipud(counts_of_attribute)
unique_value_of_attribute = np.flipud(unique_value_of_attribute)
except IndexError:
# Here we can have an index error since in some case it may happen that the array has only one type
# of value and thus accessing the index [1] is not possible
i = 0
tree[best_attribute_number]["majority_target_attribute"] = unique_value_of_attribute[0]
tree[best_attribute_number]["best_attribute_number"] = best_attribute_number
attributes.remove(best_attribute_number)
# Now we do the recursive algorithm which will be used to create the tree after the root node.
depth_of_node = []
for each_unique_value in | np.unique(examples[best_attribute_number]) | numpy.unique |
#!/usr/bin/env python
# coding: utf-8
# # 波士顿房价预测任务
#
# ## 线性回归模型
#
# 假设房价和各影响因素之间能够用线性关系来描述:
#
# $$y = {\sum_{j=1}^Mx_j w_j} + b$$
#
# 模型的求解即是通过数据拟合出每个$w_j$和$b$。其中,$w_j$和$b$分别表示该线性模型的权重和偏置。一维情况下,$w_j$ 和 $b$ 是直线的斜率和截距。
#
# 线性回归模型使用均方误差作为损失函数(Loss),用以衡量预测房价和真实房价的差异,公式如下:
#
# $$MSE = \frac{1}{n} \sum_{i=1}^n(\hat{Y_i} - {Y_i})^{2}$$
# # python+numpy
# ### 数据处理
#
# 数据处理包含五个部分:数据导入、数据形状变换、数据集划分、数据归一化处理和封装`load data`函数。数据预处理后,才能被模型调用。
# #### 读入数据
# 通过如下代码读入数据,了解下波士顿房价的数据集结构,数据存放在本地目录下housing.data文件中。
# #### 数据形状变换
# 由于读入的原始数据是1维的,所有数据都连在一起。因此需要我们将数据的形状进行变换,形成一个2维的矩阵,每行为一个数据样本(14个值),每个数据样本包含13个$X$(影响房价的特征)和一个$Y$(该类型房屋的均价)。
# #### 数据集划分
# 将数据集划分成训练集和测试集,其中训练集用于确定模型的参数,测试集用于评判模型的效果。
# 在本案例中,我们将80%的数据用作训练集,20%用作测试集,实现代码如下。通过打印训练集的形状,可以发现共有404个样本,每个样本含有13个特征和1个预测值。
# #### 数据归一化处理
# 对每个特征进行归一化处理,使得每个特征的取值缩放到0~1之间。这样做有两个好处:一是模型训练更高效;二是特征前的权重大小可以代表该变量对预测结果的贡献度(因为每个特征值本身的范围相同)。
# In[80]:
def load_data():
# 从文件导入数据
datafile = './work/housing.data'
data = np.fromfile(datafile, sep=' ')
# 每条数据包括14项,其中前面13项是影响因素,第14项是相应的房屋价格中位数
feature_names = [ 'CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT', 'MEDV' ]
feature_num = len(feature_names)
# 数据形状变换
# 将原始数据进行Reshape,变成[N, 14]这样的形状
data = data.reshape([data.shape[0] // feature_num, feature_num])
# 将原数据集拆分成训练集和测试集
# 这里使用80%的数据做训练,20%的数据做测试
# 测试集和训练集必须是没有交集的
ratio = 0.8
offset = int(data.shape[0] * ratio)
training_data = data[:offset]
# 计算训练集的最大值,最小值,平均值
maximums, minimums, avgs = training_data.max(axis=0), training_data.min(axis=0), training_data.sum(axis=0) / training_data.shape[0]
# 对数据进行归一化处理
for i in range(feature_num):
#print(maximums[i], minimums[i], avgs[i])
data[:, i] = (data[:, i] - minimums[i]) / (maximums[i] - minimums[i])
# 训练集和测试集的划分比例
training_data = data[:offset]
test_data = data[offset:]
return training_data, test_data
# In[81]:
# 获取数据
training_data, test_data = load_data()
x = training_data[:, :-1]
y = training_data[:, -1:]
# ## 模型设计
#
# 模型设计是深度学习模型关键要素之一,也称为网络结构设计,相当于模型的假设空间,即实现模型“前向计算”(从输入到输出)的过程。
#
# 如果将输入特征和输出预测值均以向量表示,输入特征$x$有13个分量,$y$有1个分量,那么参数权重的形状(shape)是$13\times1$。
# ## 训练过程
#
# 上述计算过程描述了如何构建神经网络,通过神经网络完成预测值和损失函数的计算。接下来介绍如何求解参数$w$和$b$的数值,这个过程也称为模型训练过程。训练过程是深度学习模型的关键要素之一,其目标是让定义的损失函数$Loss$尽可能的小,也就是说找到一个参数解$w$和$b$,使得损失函数取得极小值。
#
# ### 梯度下降法
#
# 在现实中存在大量的函数正向求解容易,但反向求解较难,被称为单向函数,这种函数在密码学中有大量的应用。密码锁的特点是可以迅速判断一个密钥是否是正确的(已知$x$,求$y$很容易),但是即使获取到密码锁系统,无法破解出正确的密钥是什么(已知$y$,求$x$很难)。
#
# 这种情况特别类似于一位想从山峰走到坡谷的盲人,他看不见坡谷在哪(无法逆向求解出$Loss$导数为0时的参数值),但可以伸脚探索身边的坡度(当前点的导数值,也称为梯度)。那么,求解Loss函数最小值可以这样实现:从当前的参数取值,一步步的按照下坡的方向下降,直到走到最低点。这就是“梯度下降法”。
# In[161]:
import numpy as np
def sigmoid(x):
# sigmoid激活函数
return 1/(1+np.exp(-x))
def dsigmoid(x):
# sigmoid激活函数的导数
return x*(1-x)
class Network(object):
def __init__(self, num_of_weights,hidden_sum):
# 随机产生w的初始值
# 为了保持程序每次运行结果的一致性,此处设置固定的随机数种子
np.random.seed(0)
self.w_1 = np.random.randn(num_of_weights, hidden_sum) # 第一个全连接层的网络参数
self.b_1 = np.zeros(hidden_sum)
self.w_2 = np.random.randn(hidden_sum,1) # 第二个全连接层的网络参数
self.b_2 = 0.
def forward(self, x):
z = | np.dot(x, self.w_1) | numpy.dot |
import numpy as np
from numpy.linalg import norm
from .Line import Line
class Plane:
"""Class to represent planes in a three dimensional space.
Documentation obtained from: http://commons.apache.org/proper/commons-math
/apidocs/org/apache/commons/math4/geometry/euclidean/threed/Plane.html
Attributes
----------
u : array-like
First vector of the plane frame (in plane).
v : array-like
Second vector of the plane frame (in plane).
w : array-like
Third vector of the plane frame (plane normal).
origin : array-like
Origin of the plane frame.
origin_offset : float
Offset of the origin with respect to the plane.
tolerance : float
Tolerance below which points are considered identical.
"""
def __init__(self, normal=None, tolerance=None, p=None, plane=None,
p1=None, p2=None, p3=None):
"""Function to build a plane normal to a given direction and containing
the origin.
If p is specified, the plane contains the point. If plane is
specified, makes a copy of the plane.
Parameters
----------
normal : array-like
Normal direction to the plane.
tolerance : float
Tolerance below which points are considered identical.
p : array-like
Point belonging to the plane.
plane : Plane
Plane to copy.
p1 : array-like
Point belonging to the plane.
p2 : array-like
Point belonging to the plane.
p3 : array-like
Point belonging to the plane.
Raises
------
Exception
If norm is zero.
"""
if plane is None and normal is not None and tolerance is not None:
n = norm(normal)
if n < 1e-10:
raise Exception("Norm is zero!")
# Third vector of the plane frame (plane normal).
self.w = normal / n
# Tolerance below which points are considered identical.
self.tolerance = tolerance
# Offset of the origin with respect to the plane.
self.origin_offset = -np.dot(p, self.w) if p is not None else 0
# Origin of the plane frame.
self.origin = -self.origin_offset * self.w
# First vector of the plane frame (in plane).
self.u = self.orthogonal(self.w)
# Second vector of the plane frame (in plane).
self.v = np.cross(self.w, self.u)
elif plane is not None:
# Offset of the origin with respect to the plane.
self.origin_offset = plane.origin_offset
# Origin of the plane frame.
self.origin = plane.origin
# First vector of the plane frame (in plane).
self.u = plane.u
# Second vector of the plane frame (in plane).
self.v = plane.v
# Third vector of the plane frame (plane normal).
self.w = plane.w
# Tolerance below which points are considered identical.
self.tolerance = plane.tolerance
elif p1 is not None and p2 is not None and p3 is not None and \
tolerance is not None:
v1 = | np.array(p1, dtype=float) | numpy.array |
# -*- coding: utf-8 -*-
# Copyright 2019 the HERA Project
# Licensed under the MIT License
import warnings
import pytest
import numpy as np
from copy import deepcopy
import os
import sys
import shutil
from scipy import constants, interpolate
from pyuvdata import UVCal, UVData
from hera_sim.interpolators import Beam
from hera_sim import DATA_PATH as HS_DATA_PATH
from hera_sim import noise
from uvtools import dspec
from hera_cal import io, datacontainer
from hera_cal import vis_clean
from hera_cal.vis_clean import VisClean
from hera_cal.data import DATA_PATH
from hera_cal import frf
import glob
import copy
# test flagging utility funtions
def test_truncate_flagged_edges():
Nfreqs = 64
Ntimes = 60
data_in = np.outer(np.arange(1, Ntimes + 1), np.arange(1, Nfreqs + 1))
weights_in = np.abs(data_in).astype(float)
data_in = data_in + .3j * data_in
# flag channel 30
weights_in[:, 30] = 0.
# flag last channel
weights_in[:, -1] = 0.
# flag last two integrations
weights_in[-2:, :] = 0.
times = np.arange(60) * 10.
freqs = np.arange(64) * 100e3
# test freq truncation
xout, dout, wout, edges, _ = vis_clean.truncate_flagged_edges(data_in, weights_in, freqs, ax='freq')
assert np.all(np.isclose(xout, freqs[:-1]))
assert np.all(np.isclose(dout, data_in[:, :-1]))
assert np.all(np.isclose(wout, weights_in[:, :-1]))
assert edges == [(0, 1)]
# test time truncation
xout, dout, wout, edges, _ = vis_clean.truncate_flagged_edges(data_in, weights_in, times, ax='time')
assert np.all(np.isclose(xout, times[:-2]))
assert np.all(np.isclose(dout, data_in[:-2, :]))
assert np.all(np.isclose(wout, weights_in[:-2, :]))
assert edges == [(0, 2)]
# test truncating both.
xout, dout, wout, edges, _ = vis_clean.truncate_flagged_edges(data_in, weights_in, (times, freqs), ax='both')
assert np.all(np.isclose(xout[0], times[:-2]))
assert np.all(np.isclose(xout[1], freqs[:-1]))
assert np.all(np.isclose(dout, data_in[:-2, :-1]))
assert np.all(np.isclose(wout, weights_in[:-2, :-1]))
assert edges == [[(0, 2)], [(0, 1)]]
def test_restore_flagged_edges():
Nfreqs = 64
Ntimes = 60
data_in = np.outer(np.arange(1, Ntimes + 1), np.arange(1, Nfreqs + 1))
weights_in = np.abs(data_in).astype(float)
data_in = data_in + .3j * data_in
# flag channel 30
weights_in[:, 30] = 0.
# flag last channel
weights_in[:, -1] = 0.
# flag last two integrations
weights_in[-2:, :] = 0.
times = np.arange(60) * 10.
freqs = np.arange(64) * 100e3
# test freq truncation
xout, dout, wout, edges, _ = vis_clean.truncate_flagged_edges(data_in, weights_in, freqs, ax='freq')
wrest = vis_clean.restore_flagged_edges(xout, wout, edges)
assert np.allclose(weights_in[:, :-1], wrest[:, :-1])
assert np.allclose(wrest[:, -1], 0.0)
xout, dout, wout, edges, _ = vis_clean.truncate_flagged_edges(data_in, weights_in, times, ax='time')
wrest = vis_clean.restore_flagged_edges(xout, wout, edges, ax='time')
assert np.allclose(wout, wrest[:-2, :])
assert np.allclose(wrest[-2:, :], 0.0)
xout, dout, wout, edges, _ = vis_clean.truncate_flagged_edges(data_in, weights_in, (times, freqs), ax='both')
wrest = vis_clean.restore_flagged_edges(xout, wout, edges, ax='both')
assert np.allclose(wrest[-2:, :], 0.0)
assert np.allclose(wrest[:, -1], 0.0)
assert np.allclose(wout, wrest[:-2, :-1])
def test_find_discontinuity_edges():
assert vis_clean.find_discontinuity_edges([0, 1, 4, 9]) == [(0, 2), (2, 3), (3, 4)]
assert vis_clean.find_discontinuity_edges([0, 1, 2, 4, 5, 6, 7, 9, 11, 12]) == [(0, 3), (3, 7), (7, 8), (8, 10)]
def test_flag_rows_with_flags_within_edge_distance():
Nfreqs = 64
Ntimes = 60
weights_in = np.outer(np.arange(1, Ntimes + 1), np.arange(1, Nfreqs + 1))
weights_in[32, 2] = 0.
weights_in[33, 12] = 0.
weights_in[2, 30] = 0.
weights_in[-10, 20] = 0.
freqs = np.arange(Nfreqs) * 100e3
# under the above flagging pattern
# freq flagging with min_flag_edge_distance=2 yields 32nd integration flagged only.
wout = vis_clean.flag_rows_with_flags_within_edge_distance(freqs, weights_in, min_flag_edge_distance=3, ax='freq')
for i in range(wout.shape[0]):
if i == 32:
assert np.all(np.isclose(wout[i], 0.0))
else:
assert np.all(np.isclose(wout[i], weights_in[i]))
# extending edge_distance to 12 should yield 33rd integration being flagged as well.
wout = vis_clean.flag_rows_with_flags_within_edge_distance(freqs, weights_in, min_flag_edge_distance=13, ax='freq')
for i in range(wout.shape[0]):
if i == 32 or i == 33:
assert np.all(np.isclose(wout[i], 0.0))
else:
assert np.all(np.isclose(wout[i], weights_in[i]))
# now do time axis. 30th channel should be flagged for this case.
wout = vis_clean.flag_rows_with_flags_within_edge_distance(freqs, weights_in, min_flag_edge_distance=3, ax='time')
for i in range(wout.shape[1]):
if i == 30:
assert np.all(np.isclose(wout[:, i], 0.0))
else:
assert np.all(np.isclose(wout[:, i], weights_in[:, i]))
# 30th and 20th channels should end up flagged for this case.
times = np.arange(Ntimes) * 10.
wout = vis_clean.flag_rows_with_flags_within_edge_distance(times, weights_in, min_flag_edge_distance=11, ax='time')
for i in range(wout.shape[1]):
if i == 30 or i == 20:
assert np.all(np.isclose(wout[:, i], 0.0))
else:
assert np.all(np.isclose(wout[:, i], weights_in[:, i]))
# now do both
wout = vis_clean.flag_rows_with_flags_within_edge_distance([times, freqs], weights_in, min_flag_edge_distance=(3, 3), ax='both')
for i in range(wout.shape[1]):
if i == 30:
assert np.all(np.isclose(wout[:, i], 0.0))
for i in range(wout.shape[0]):
if i == 32:
assert np.all(np.isclose(wout[i], 0.0))
def test_flag_rows_with_flags_within_edge_distance_with_breaks():
Nfreqs = 64
Ntimes = 60
freqs = np.hstack([np.arange(23), 30 + np.arange(24), 58 + np.arange(17)]) * 100e3 + 150e6 # freq axis with discontinuities at 23 and 47 integrations.
times = np.hstack([np.arange(20) * 11., 41 * 11. + np.arange(27) * 11., 200 * 11. + np.arange(13) * 11.]) # time axis with discontinuities at 29 abd 47 integrations
weights_in = np.outer(np.arange(1, Ntimes + 1), np.arange(1, Nfreqs + 1))
# frequency direction and time direction separately.
weights_in[2, 30] = 0. # time 2 should not get flagged
weights_in[21, 48] = 0. # time 21 should get flagged
weights_in[55, 46] = 0. # time 55 should get flagged
weights_in[25, -2] = 0. # time 25 should get flagged
wout = vis_clean.flag_rows_with_flags_within_edge_distance(freqs, weights_in, min_flag_edge_distance=3, ax='freq')
assert list(np.where(np.all(np.isclose(wout, 0.), axis=1))[0]) == [21, 25, 55]
weights_in[22, 30] = 0. # channel 30 should be flagged
# channel 48 will also be flagged.
wout = vis_clean.flag_rows_with_flags_within_edge_distance(times, weights_in, min_flag_edge_distance=3, ax='time')
assert list(np.where(np.all(np.isclose(wout, 0.), axis=0))[0]) == [30, 48]
weights_in = np.outer(np.arange(1, Ntimes + 1), np.arange(1, Nfreqs + 1))
# both directions
weights_in[22, 30] = 0. # time 2 should not get flagged
weights_in[55, 46] = 0. # time 55 should get flagged
weights_in[25, -2] = 0. # time 25 should get flagged
weights_in[22, 30] = 0. # channel 30 should be flagged
wout = vis_clean.flag_rows_with_flags_within_edge_distance([times, freqs], weights_in, min_flag_edge_distance=[2, 3], ax='both')
assert list(np.where(np.all(np.isclose(wout, 0.), axis=0))[0]) == [30]
assert list(np.where(np.all(np.isclose(wout, 0.), axis=1))[0]) == [25, 55]
def test_flag_rows_with_contiguous_flags():
Nfreqs = 64
Ntimes = 60
weights_in = np.outer(np.arange(1, Ntimes + 1), np.arange(1, Nfreqs + 1))
weights_in[32, 2:12] = 0.
weights_in[35, 12:14] = 0.
weights_in[2:12, 30] = 0.
weights_in[-10:-8, 20] = 0.
wout = vis_clean.flag_rows_with_contiguous_flags(weights_in, max_contiguous_flag=8, ax='freq')
for i in range(wout.shape[0]):
if i == 32:
assert np.all(np.isclose(wout[i], 0.0))
else:
assert np.all(np.isclose(wout[i], weights_in[i]))
# extending edge_distance to 12 should yield 33rd integration being flagged as well.
wout = vis_clean.flag_rows_with_contiguous_flags(weights_in, max_contiguous_flag=2, ax='freq')
for i in range(wout.shape[0]):
if i == 32 or i == 35:
assert np.all(np.isclose(wout[i], 0.0))
else:
assert np.all(np.isclose(wout[i], weights_in[i]))
# now do time axis. 30th channel should be flagged for this case.
wout = vis_clean.flag_rows_with_contiguous_flags(weights_in, max_contiguous_flag=8, ax='time')
for i in range(wout.shape[1]):
if i == 30:
assert np.all(np.isclose(wout[:, i], 0.0))
else:
assert np.all(np.isclose(wout[:, i], weights_in[:, i]))
# 30th and 20th channels should end up flagged for this case.
wout = vis_clean.flag_rows_with_contiguous_flags(weights_in, max_contiguous_flag=2, ax='time')
for i in range(wout.shape[1]):
if i == 30 or i == 20:
assert np.all(np.isclose(wout[:, i], 0.0))
else:
assert np.all(np.isclose(wout[:, i], weights_in[:, i]))
# now do both
wout = vis_clean.flag_rows_with_contiguous_flags(weights_in, max_contiguous_flag=(3, 3), ax='both')
for i in range(wout.shape[1]):
if i == 30:
assert np.all(np.isclose(wout[:, i], 0.0))
for i in range(wout.shape[0]):
if i == 32:
assert np.all(np.isclose(wout[i], 0.0))
def test_get_max_contiguous_flag_from_filter_periods():
Nfreqs = 64
Ntimes = 60
times = np.arange(60) * 10.
freqs = np.arange(64) * 100e3
filter_centers = [[0.], [0.]]
filter_half_widths = [[1 / (3. * 10)], [1 / (100e3 * 2)]]
mcf = vis_clean.get_max_contiguous_flag_from_filter_periods(freqs, filter_centers[1], filter_half_widths[1])
assert mcf == 2
mcf = vis_clean.get_max_contiguous_flag_from_filter_periods(times, filter_centers[0], filter_half_widths[0])
assert mcf == 3
mcf = vis_clean.get_max_contiguous_flag_from_filter_periods((times, freqs), filter_centers, filter_half_widths)
assert tuple(mcf) == (3, 2)
# test assertion errors
pytest.raises(ValueError, vis_clean.get_max_contiguous_flag_from_filter_periods, [1.], [0.], [.5])
pytest.raises(ValueError, vis_clean.get_max_contiguous_flag_from_filter_periods, [[1.], [0.]], [[0.], [0.]], [[.5], [.5]])
def test_flag_model_rms():
Nfreqs = 64
Ntimes = 60
times = np.arange(60) * 10.
freqs = np.arange(64) * 100e3
w = np.ones((Ntimes, Nfreqs), dtype=bool)
d = np.random.randn(Ntimes, Nfreqs) * 1e-3 + 1j * np.random.randn(Ntimes, Nfreqs) * 1e-3
d += np.ones_like(d) * 100
d[30, 12] = 3.12315132e6
w[30, 12] = 0.
mdl = np.ones_like(d) * 100
mdl[30, 24] = 1e6
skipped = np.zeros_like(mdl, dtype=bool)
skipped = vis_clean.flag_model_rms(skipped, d, w, mdl, ax='freq')
for i in range(Ntimes):
if i == 30:
assert np.all(skipped[i])
else:
assert np.all(~skipped[i])
skipped = np.zeros_like(mdl, dtype=bool)
skipped = vis_clean.flag_model_rms(skipped, d, w, mdl, ax='time')
for i in range(Ntimes):
if i == 24:
assert np.all(skipped[:, i])
else:
assert np.all(~skipped[:, i])
skipped = np.zeros_like(mdl, dtype=bool)
skipped = vis_clean.flag_model_rms(skipped, d, w, mdl, ax='both')
for i in range(Nfreqs):
if i == 24:
assert np.all(skipped[:, i])
else:
assert ~np.all(skipped[:, i])
for i in range(Ntimes):
if i == 30:
assert np.all(skipped[i])
else:
assert ~np.all(skipped[i])
@pytest.mark.filterwarnings("ignore:The default for the `center` keyword has changed")
@pytest.mark.filterwarnings("ignore:It seems that the latitude and longitude are in radians")
class Test_VisClean(object):
def test_init(self):
# test basic init
fname = os.path.join(DATA_PATH, "zen.2458043.40141.xx.HH.XRAA.uvh5")
V = VisClean(fname, filetype='uvh5')
assert not hasattr(V, 'data')
V.read(bls=[(24, 25, 'ee')])
assert hasattr(V, 'data')
assert hasattr(V, 'antpos')
assert isinstance(V.hd, io.HERAData)
assert isinstance(V.hd.data_array, np.ndarray)
# test basic init w/ uvh5
fname = os.path.join(DATA_PATH, 'zen.2458098.43124.subband.uvh5')
V = VisClean(fname, filetype='uvh5')
assert not hasattr(V, 'data')
V.read(bls=[(13, 14, 'ee')])
assert set(V.hd.ant_1_array) == set([13])
assert isinstance(V.hd, io.HERAData)
assert isinstance(V.hd.data_array, np.ndarray)
# test input cal
fname = os.path.join(DATA_PATH, 'zen.2458043.12552.xx.HH.uvORA')
uvc = io.HERACal(os.path.join(DATA_PATH, 'zen.2458043.12552.xx.HH.uvORA.abs.calfits'))
gains, _, _, _ = uvc.read()
V1 = VisClean(fname, filetype='miriad')
bl = (52, 53, 'ee')
V1.read(bls=[bl])
V2 = VisClean(fname, filetype='miriad', input_cal=uvc)
V2.read(bls=[bl])
g = gains[(bl[0], 'Jee')] * gains[(bl[1], 'Jee')].conj()
assert np.allclose((V1.data[bl] / g)[30, 30], V2.data[bl][30, 30])
V2.apply_calibration(V2.hc, unapply=True)
assert np.allclose(V1.data[bl][30, 30], V2.data[bl][30, 30], atol=1e-5)
# test soft copy
V1.hello = 'hi'
V1.hello_there = 'bye'
V1.foo = 'bar'
V3 = V1.soft_copy(references=["hello*"])
assert hex(id(V1.data[(52, 53, 'ee')])) == hex(id(V3.data[(52, 53, 'ee')]))
assert hasattr(V3, 'hello')
assert hasattr(V3, 'hello_there')
assert not hasattr(V3, 'foo')
assert V3.__class__ == VisClean
# test clear
V1.clear_containers()
assert np.all([len(getattr(V1, c)) == 0 for c in ['data', 'flags', 'nsamples']])
V2.clear_calibration()
assert not hasattr(V2, 'hc')
@pytest.mark.filterwarnings("ignore:Selected polarization values are not evenly spaced")
def test_read_write(self):
# test read data can be turned off for uvh5
fname = os.path.join(DATA_PATH, 'zen.2458098.43124.subband.uvh5')
V = VisClean(fname, filetype='uvh5')
V.read(read_data=False)
assert set(V.hd.ant_1_array) == set([1, 11, 12, 13, 14])
# test read-write-read
V.read()
V.write_data(V.data, "./ex.uvh5", overwrite=True, filetype='uvh5', extra_attrs=dict(vis_units='Jy'))
V2 = VisClean("./ex.uvh5", filetype='uvh5')
V2.read()
assert V2.hd.vis_units == 'Jy'
assert 'Thisfilewasproducedbythefunction' in V2.hd.history.replace('\n', '').replace(' ', '')
V.hd.history, V2.hd.history, V2.hd.vis_units = '', '', V.hd.vis_units
if hasattr(V.hd, "filename"):
# make sure filename attributes are what we're expecting
assert V.hd.filename == ["zen.2458098.43124.subband.uvh5"]
assert V2.hd.filename == ["ex.uvh5"]
V.hd.filename = V2.hd.filename
assert V.hd == V2.hd
os.remove("./ex.uvh5")
# exceptions
pytest.raises(ValueError, V.write_data, V.data, 'foo', filetype='what')
# test write on subset of data
V.read(read_data=True)
data = datacontainer.DataContainer(dict([(k, V.data[k]) for k in list(V.data.keys())[:2]]))
V.write_data(data, "ex.uvh5", overwrite=True, filetype='uvh5')
assert os.path.exists("ex.uvh5")
os.remove('ex.uvh5')
def test_fourier_filter(self):
fname = os.path.join(DATA_PATH, "zen.2458043.40141.xx.HH.XRAA.uvh5")
V = VisClean(fname, filetype='uvh5')
V.read()
# test arg errors
k = (24, 25, 'ee')
fc = [0.]
fw = [100e-9]
ff = [1e-9]
fwt = [1e-3]
assert pytest.raises(ValueError, V.fourier_filter, keys=[k], overwrite=True,
filter_centers=fc, filter_half_widths=fw, suppression_factors=ff,
ax='height', mode='dayenu', fitting_options=None)
V.fourier_filter(keys=[k], filter_centers=fc, filter_half_widths=fw, suppression_factors=ff,
ax='freq', mode='dayenu', output_prefix='clean', zeropad=10, overwrite=True, max_contiguous_edge_flags=20)
# this line is repeated to cover the overwrite skip
V.fourier_filter(keys=[k], filter_centers=fc, filter_half_widths=fw, suppression_factors=ff, max_contiguous_edge_flags=20,
ax='freq', mode='dayenu', zeropad=10, output_prefix='clean', overwrite=False)
assert np.all([V.clean_info[k][(0, V.Nfreqs)]['status']['axis_1'][i] == 'success' for i in V.clean_info[k][(0, V.Nfreqs)]['status']['axis_1']])
# now do a time filter
V.fourier_filter(keys=[k], filter_centers=fc, filter_half_widths=fwt, suppression_factors=ff, overwrite=True,
ax='time', mode='dayenu', zeropad=10, max_contiguous_edge_flags=20)
assert V.clean_info[k][(0, V.Nfreqs)]['status']['axis_0'][0] == 'skipped'
assert V.clean_info[k][(0, V.Nfreqs)]['status']['axis_0'][3] == 'success'
# check that clean resid is equal to zero in flagged channels
assert np.all(V.clean_resid[(24, 25, 'ee')][V.clean_flags[(24, 25, 'ee')] | V.flags[(24, 25, 'ee')]] == 0.)
assert np.any(V.clean_resid[(24, 25, 'ee')][~(V.clean_flags[(24, 25, 'ee')] | V.flags[(24, 25, 'ee')])] != 0.)
assert np.all(V.clean_model[(24, 25, 'ee')][V.clean_flags[(24, 25, 'ee')]] == 0.)
assert np.any(V.clean_model[(24, 25, 'ee')][~V.clean_flags[(24, 25, 'ee')]] != 0.)
# check that filtered_data is the same in channels that were not flagged
atol = 1e-6 * np.mean(np.abs(V.data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')]]) ** 2.) ** .5
assert np.all(np.isclose(V.clean_data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')] & ~V.clean_flags[(24, 25, 'ee')]],
V.data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')] & ~V.clean_flags[(24, 25, 'ee')]], rtol=0., atol=atol))
# raise errors.
assert pytest.raises(ValueError, V.fourier_filter, filter_centers=[fc, fc], ax='both',
filter_half_widths=[fwt, fw], suppression_factors=[ff, ff],
mode='dayenu', zeropad=0, overwrite=True)
assert pytest.raises(ValueError, V.fourier_filter, filter_centers=[fc, fc], ax='both',
filter_half_widths=[fwt, fw], suppression_factors=[ff, ff], overwrite=True,
mode='dayenu', zeropad=['Mathematical Universe', 'Crazy Universe'])
# check 2d filter.
V.fourier_filter(filter_centers=[fc, fc],
filter_half_widths=[fwt, fw],
suppression_factors=[ff, ff],
mode='dayenu', overwrite=True,
zeropad=[20, 10], ax='both', max_contiguous_edge_flags=100)
assert V.clean_info[k][(0, V.Nfreqs)]['status']['axis_0'][0] == 'skipped'
assert V.clean_info[k][(0, V.Nfreqs)]['status']['axis_0'][3] == 'success'
# check that clean resid is equal to zero in flagged channels
assert np.all(V.clean_resid[(24, 25, 'ee')][V.clean_flags[(24, 25, 'ee')] | V.flags[(24, 25, 'ee')]] == 0.)
assert np.any(V.clean_resid[(24, 25, 'ee')][~(V.clean_flags[(24, 25, 'ee')] | V.flags[(24, 25, 'ee')])] != 0.)
assert np.all(V.clean_model[(24, 25, 'ee')][V.clean_flags[(24, 25, 'ee')]] == 0.)
assert np.any(V.clean_model[(24, 25, 'ee')][~V.clean_flags[(24, 25, 'ee')]] != 0.)
# check that filtered_data is the same in channels that were not flagged
atol = 1e-6 * np.mean(np.abs(V.data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')]]) ** 2.) ** .5
assert np.allclose(V.clean_data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')] & ~V.clean_flags[(24, 25, 'ee')]],
V.data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')] & ~V.clean_flags[(24, 25, 'ee')]], rtol=0., atol=atol)
@pytest.mark.filterwarnings("ignore:.*dspec.vis_filter will soon be deprecated")
def test_vis_clean_dayenu(self):
fname = os.path.join(DATA_PATH, "zen.2458043.40141.xx.HH.XRAA.uvh5")
V = VisClean(fname, filetype='uvh5')
V.read()
# most coverage is in dspec. Check that args go through here.
# similar situation for test_vis_clean.
V.vis_clean(keys=[(24, 25, 'ee'), (24, 25, 'ee')], ax='freq', overwrite=True, mode='dayenu')
# check that clean resid is equal to zero in flagged channels
assert np.all(V.clean_resid[(24, 25, 'ee')][V.clean_flags[(24, 25, 'ee')] | V.flags[(24, 25, 'ee')]] == 0.)
assert np.any(V.clean_resid[(24, 25, 'ee')][~(V.clean_flags[(24, 25, 'ee')] | V.flags[(24, 25, 'ee')])] != 0.)
assert np.all(V.clean_model[(24, 25, 'ee')][V.clean_flags[(24, 25, 'ee')]] == 0.)
assert np.any(V.clean_model[(24, 25, 'ee')][~V.clean_flags[(24, 25, 'ee')]] != 0.)
# check that filtered_data is the same in channels that were not flagged
# had to set atol=1e-6 here so it won't fail on travis (it runs fine on my laptop). There are some funny
# numpy issues.
atol = 1e-6 * np.mean(np.abs(V.data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')]]) ** 2.) ** .5
assert np.all(np.isclose(V.clean_data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')] & ~V.clean_flags[(24, 25, 'ee')]],
V.data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')] & ~V.clean_flags[(24, 25, 'ee')]], atol=atol, rtol=0.))
assert np.all([V.clean_info[(24, 25, 'ee')][(0, V.Nfreqs)]['status']['axis_1'][i] == 'success' for i in V.clean_info[(24, 25, 'ee')][(0, V.Nfreqs)]['status']['axis_1']])
assert pytest.raises(AssertionError, V.vis_clean, keys=[(24, 25, 'ee')], ax='time', max_frate=None, mode='dayenu')
assert pytest.raises(ValueError, V.vis_clean, keys=[(24, 25, 'ee')], ax='time', max_frate='arglebargle', mode='dayenu')
# cover no overwrite = False skip lines.
V.vis_clean(keys=[(24, 25, 'ee'), (24, 25, 'ee')], ax='freq', overwrite=False, mode='dayenu')
V.vis_clean(keys=[(24, 25, 'ee'), (24, 25, 'ee')], ax='time', overwrite=True, max_frate=1.0, mode='dayenu')
assert V.clean_info[(24, 25, 'ee')][(0, V.Nfreqs)]['status']['axis_0'][0] == 'skipped'
assert V.clean_info[(24, 25, 'ee')][(0, V.Nfreqs)]['status']['axis_0'][3] == 'success'
# check that clean resid is equal to zero in flagged channels
assert np.all(V.clean_resid[(24, 25, 'ee')][V.clean_flags[(24, 25, 'ee')] | V.flags[(24, 25, 'ee')]] == 0.)
assert np.any(V.clean_resid[(24, 25, 'ee')][~(V.clean_flags[(24, 25, 'ee')] | V.flags[(24, 25, 'ee')])] != 0.)
assert np.all(V.clean_model[(24, 25, 'ee')][V.clean_flags[(24, 25, 'ee')]] == 0.)
assert np.any(V.clean_model[(24, 25, 'ee')][~V.clean_flags[(24, 25, 'ee')]] != 0.)
# check that filtered_data is the same in channels that were not flagged
atol = 1e-6 * np.mean(np.abs(V.data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')]]) ** 2.) ** .5
assert np.allclose(V.clean_data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')] & ~V.clean_flags[(24, 25, 'ee')]],
V.data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')] & ~V.clean_flags[(24, 25, 'ee')]], atol=atol, rtol=0.)
V.vis_clean(keys=[(24, 25, 'ee'), (24, 25, 'ee')], ax='both', overwrite=True, max_frate=1.0, mode='dayenu')
assert np.all(['success' == V.clean_info[(24, 25, 'ee')][(0, V.Nfreqs)]['status']['axis_1'][i] for i in V.clean_info[(24, 25, 'ee')][(0, V.Nfreqs)]['status']['axis_1']])
assert V.clean_info[(24, 25, 'ee')][(0, V.Nfreqs)]['status']['axis_0'][0] == 'skipped'
assert V.clean_info[(24, 25, 'ee')][(0, V.Nfreqs)]['status']['axis_0'][3] == 'success'
# check that clean resid is equal to zero in flagged channels
assert np.all(V.clean_resid[(24, 25, 'ee')][V.clean_flags[(24, 25, 'ee')] | V.flags[(24, 25, 'ee')]] == 0.)
assert np.any(V.clean_resid[(24, 25, 'ee')][~(V.clean_flags[(24, 25, 'ee')] | V.flags[(24, 25, 'ee')])] != 0.)
assert np.all(V.clean_model[(24, 25, 'ee')][V.clean_flags[(24, 25, 'ee')]] == 0.)
assert np.any(V.clean_model[(24, 25, 'ee')][~V.clean_flags[(24, 25, 'ee')]] != 0.)
# check that filtered_data is the same in channels that were not flagged
atol = 1e-6 * np.mean(np.abs(V.data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')]]) ** 2.) ** .5
assert np.allclose(V.clean_data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')] & ~V.clean_flags[(24, 25, 'ee')]],
V.data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')] & ~V.clean_flags[(24, 25, 'ee')]], atol=atol, rtol=0.)
# check whether dayenu filtering axis 1 and then axis 0 is the same as dayenu filtering axis 1 and then filtering the resid.
# note that filtering axis orders do not commute, we filter axis 1 (foregrounds) before filtering cross-talk.
V.vis_clean(keys=[(24, 25, 'ee'), (24, 25, 'ee')], ax='both', overwrite=True, max_frate=1.0, mode='dayenu')
V.vis_clean(keys=[(24, 25, 'ee'), (24, 25, 'ee')], ax='freq', overwrite=True, max_frate=1.0, output_prefix='clean1', mode='dayenu')
V.vis_clean(keys=[(24, 25, 'ee'), (24, 25, 'ee')], ax='time', overwrite=True, max_frate=1.0, data=V.clean1_resid, output_prefix='clean0', mode='dayenu')
assert np.all(np.isclose(V.clean_resid[(24, 25, 'ee')], V.clean0_resid[(24, 25, 'ee')]))
@pytest.mark.filterwarnings("ignore:.*dspec.vis_filter will soon be deprecated")
def test_vis_clean_dpss(self):
# Relax atol=1e-6 for clean_data and data equalities. there may be some numerical
# issues going on. Notebook tests show that distributing minus signs has
# consequences.
fname = os.path.join(DATA_PATH, "zen.2458043.40141.xx.HH.XRAA.uvh5")
V = VisClean(fname, filetype='uvh5')
V.read()
# most coverage is in dspec. Check that args go through here.
# similar situation for test_vis_clean.
V.vis_clean(keys=[(24, 25, 'ee'), (24, 25, 'ee')], ax='freq', overwrite=True, mode='dpss_leastsq')
# check that clean resid is equal to zero in flagged channels
assert np.all(V.clean_resid[(24, 25, 'ee')][V.clean_flags[(24, 25, 'ee')] | V.flags[(24, 25, 'ee')]] == 0.)
assert np.any(V.clean_resid[(24, 25, 'ee')][~(V.clean_flags[(24, 25, 'ee')] | V.flags[(24, 25, 'ee')])] != 0.)
assert np.all(V.clean_model[(24, 25, 'ee')][V.clean_flags[(24, 25, 'ee')]] == 0.)
assert np.any(V.clean_model[(24, 25, 'ee')][~V.clean_flags[(24, 25, 'ee')]] != 0.)
# check that filtered_data is the same in channels that were not flagged
atol = 1e-6 * np.mean(np.abs(V.data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')]]) ** 2.) ** .5
assert np.all(np.isclose(V.clean_data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')] & ~V.clean_flags[(24, 25, 'ee')]],
V.data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')] & ~V.clean_flags[(24, 25, 'ee')]], atol=atol, rtol=0.))
assert np.all([V.clean_info[(24, 25, 'ee')][(0, V.Nfreqs)]['status']['axis_1'][i] == 'success' for i in V.clean_info[(24, 25, 'ee')][(0, V.Nfreqs)]['status']['axis_1']])
assert pytest.raises(AssertionError, V.vis_clean, keys=[(24, 25, 'ee')], ax='time', mode='dpss_leastsq')
assert pytest.raises(ValueError, V.vis_clean, keys=[(24, 25, 'ee')], ax='time', max_frate='arglebargle', mode='dpss_leastsq')
# cover no overwrite = False skip lines.
V.vis_clean(keys=[(24, 25, 'ee'), (24, 25, 'ee')], ax='freq', overwrite=False, mode='dpss_leastsq')
V.vis_clean(keys=[(24, 25, 'ee'), (24, 25, 'ee')], ax='time', overwrite=True, max_frate=1.0, mode='dpss_leastsq')
assert V.clean_info[(24, 25, 'ee')][(0, V.Nfreqs)]['status']['axis_0'][0] == 'skipped'
assert V.clean_info[(24, 25, 'ee')][(0, V.Nfreqs)]['status']['axis_0'][3] == 'success'
# check that clean resid is equal to zero in flagged channels
assert np.all(V.clean_resid[(24, 25, 'ee')][V.clean_flags[(24, 25, 'ee')] | V.flags[(24, 25, 'ee')]] == 0.)
assert np.any(V.clean_resid[(24, 25, 'ee')][~(V.clean_flags[(24, 25, 'ee')] | V.flags[(24, 25, 'ee')])] != 0.)
assert np.all(V.clean_model[(24, 25, 'ee')][V.clean_flags[(24, 25, 'ee')]] == 0.)
assert np.any(V.clean_model[(24, 25, 'ee')][~V.clean_flags[(24, 25, 'ee')]] != 0.)
# check that filtered_data is the same in channels that were not flagged
atol = 1e-6 * np.mean(np.abs(V.data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')]]) ** 2.) ** .5
assert np.all(np.isclose(V.clean_data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')] & ~V.clean_flags[(24, 25, 'ee')]],
V.data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')] & ~V.clean_flags[(24, 25, 'ee')]], atol=atol, rtol=0.))
V.vis_clean(keys=[(24, 25, 'ee'), (24, 25, 'ee')], ax='both', overwrite=True, max_frate=1.0, mode='dpss_leastsq')
assert np.all(['success' == V.clean_info[(24, 25, 'ee')][(0, V.Nfreqs)]['status']['axis_1'][i] for i in V.clean_info[(24, 25, 'ee')][(0, V.Nfreqs)]['status']['axis_1']])
# check that clean resid is equal to zero in flagged channels
assert np.all(V.clean_resid[(24, 25, 'ee')][V.clean_flags[(24, 25, 'ee')] | V.flags[(24, 25, 'ee')]] == 0.)
assert np.any(V.clean_resid[(24, 25, 'ee')][~(V.clean_flags[(24, 25, 'ee')] | V.flags[(24, 25, 'ee')])] != 0.)
assert np.all(V.clean_model[(24, 25, 'ee')][V.clean_flags[(24, 25, 'ee')]] == 0.)
assert np.any(V.clean_model[(24, 25, 'ee')][~V.clean_flags[(24, 25, 'ee')]] != 0.)
# check that filtered_data is the same in channels that were not flagged
atol = 1e-6 * np.mean(np.abs(V.data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')]]) ** 2.) ** .5
assert np.all(np.isclose(V.clean_data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')] & ~V.clean_flags[(24, 25, 'ee')]],
V.data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')] & ~V.clean_flags[(24, 25, 'ee')]], atol=atol, rtol=0.))
# run with flag_model_rms_outliers
for ax in ['freq', 'time', 'both']:
for k in V.flags:
V.flags[k][:] = False
V.data[k][:] = np.random.randn(*V.data[k].shape) + 1j * np.random.randn(*V.data[k].shape)
# run with rms threshold < 1 which should lead to everything being flagged.
V.vis_clean(keys=[(24, 25, 'ee'), (24, 25, 'ee')], ax=ax, overwrite=True,
max_frate=1.0, mode='dpss_leastsq', flag_model_rms_outliers=True, model_rms_threshold=0.1)
for k in [(24, 25, 'ee'), (24, 25, 'ee')]:
assert np.all(V.clean_flags[k])
# now use a threshold which should not lead to any flags.
V.vis_clean(keys=[(24, 25, 'ee'), (24, 25, 'ee')], ax=ax, overwrite=True,
max_frate=1.0, mode='dpss_leastsq', flag_model_rms_outliers=True, model_rms_threshold=1e6)
for k in [(24, 25, 'ee'), (24, 25, 'ee')]:
assert not np.any(V.clean_flags[k])
def test_vis_clean_flag_options(self, tmpdir):
# tests for time and frequency partial flagging.
tmp_path = tmpdir.strpath
template = os.path.join(DATA_PATH, "zen.2458043.40141.xx.HH.XRAA.uvh5")
# first run flagging channels and frequencies
fname_edgeflags = os.path.join(tmp_path, "zen.2458043.40141.xx.HH.XRAA.edgeflags.uvh5")
fname_flagged = os.path.join(tmp_path, "zen.2458043.40141.xx.HH.XRAA.allflags.uvh5")
hdt = io.HERAData(template)
d, f, n = hdt.read()
for k in d:
f[k][:] = False
f[k][:, 0] = True
f[k][0, :] = True
hdt.update(flags=f)
hdt.write_uvh5(fname_edgeflags)
for k in d:
f[k][:] = True
hdt.update(flags=f)
hdt.write_uvh5(fname_flagged)
V = VisClean(fname_flagged, filetype='uvh5')
V.read()
V.vis_clean(keys=[(24, 25, 'ee'), (24, 24, 'ee')], ax='freq', overwrite=True,
skip_flagged_edges=True)
# make sure if no unflagged channels exist, then the clean flags are all flagged.
for k in V.clean_flags:
assert np.all(V.clean_flags[k])
V.vis_clean(keys=[(24, 25, 'ee'), (24, 24, 'ee')], ax='freq', overwrite=True,
skip_contiguous_flags=True)
for k in V.clean_flags:
assert np.all(V.clean_flags[k])
V.vis_clean(keys=[(24, 25, 'ee'), (24, 24, 'ee')], ax='time', overwrite=True,
skip_contiguous_flags=True, max_frate=0.025)
for k in V.clean_flags:
assert np.all(V.clean_flags[k])
V.vis_clean(keys=[(24, 25, 'ee'), (24, 24, 'ee')], ax='both', overwrite=True,
skip_contiguous_flags=True, max_frate=0.025)
for k in V.clean_flags:
assert np.all(V.clean_flags[k])
# now do file with some edge flags. Make sure the edge flags remain in clean_flags.
V = VisClean(fname_edgeflags, filetype='uvh5')
V.read()
V.vis_clean(keys=[(24, 25, 'ee'), (24, 24, 'ee')], ax='freq', overwrite=True,
skip_flagged_edges=True)
for k in V.clean_flags:
if not np.all(V.flags[k]):
assert not np.all(V.clean_flags[k])
assert np.all(V.clean_flags[k][0])
assert np.all(V.clean_flags[k][:, 0])
V.vis_clean(keys=[(24, 25, 'ee'), (24, 24, 'ee')], ax='time', overwrite=True,
skip_flagged_edges=True, max_frate=0.025)
for k in V.clean_flags:
if not np.all(V.flags[k]):
assert not np.all(V.clean_flags[k])
assert np.all(V.clean_flags[k][0])
assert np.all(V.clean_flags[k][:, 0])
V.vis_clean(keys=[(24, 25, 'ee'), (24, 24, 'ee')], ax='both', overwrite=True,
skip_flagged_edges=True, max_frate=0.025)
for k in V.clean_flags:
if not np.all(V.flags[k]):
assert not np.all(V.clean_flags[k])
assert np.all(V.clean_flags[k][0])
assert np.all(V.clean_flags[k][:, 0])
# now try using skip_contiguous flag gaps.
standoff = 1e9 / (np.median(np.diff(V.freqs)))
max_frate = datacontainer.DataContainer({(24, 25, 'ee'): 2. / np.abs(np.median(np.diff(V.times)) * 3.6 * 24.),
(24, 24, 'ee'): 1. / np.abs(2 * np.median( | np.diff(V.times) | numpy.diff |
import numpy
'''
compute angle (in degrees) for p0p1p2 corner
Inputs:
p0,p1,p2 - points in the form of [x,y]
'''
def calc_angle(p0, p1, p2):
v0 = numpy.array(p0) - numpy.array(p1)
v1 = | numpy.array(p2) | numpy.array |
##############################
# Import necessary libraries #
##############################
import numpy as np
from scipy.optimize import fsolve
##################################
# Define various math functions. #
##################################
def norm(v):
return np.sqrt(np.dot(v,v))
def S(z):
return ( np.sqrt(z) - np.sin(np.sqrt(z)) ) / np.sqrt(z**3)
def C(z):
return ( 1 - np.cos(np.sqrt(z)) ) / z
######################################
# Define class for celestial bodies. #
######################################
# This works at the moment only for elliptical (generic) orbits. Fix this!
class celestial_body:
# This class assumes a reference coordinate system such that a large mass is situated at the origin. It might actually assume some more things.
####### Init #######
def __init__(self,mass,mu,semi_major_axis,eccentricity,inclination,longitude_ascending_node,argument_periapsis,true_anomaly_epoch):
# Initialization of class using classical orbital elements a, e, i, Omega, omega, nu_0
self.semi_major_axis = semi_major_axis # a
self.energy = - mu / ( 2.0 * self.semi_major_axis ) # E
self.eccentricity = eccentricity # e
if self.energy < 0:
if self.eccentricity == 0:
self.type = "circular"
else:
self.type = "elliptical"
elif self.energy == 0:
self.type = "parabolic"
else:
self.type = "hyperbolic"
self.inclination = inclination # i
if inclination == 0:
self.planar == True
else:
self.planar == False
if self.planar == False:
self.longitude_ascending_node = longitude_ascending_node # Omega
self.argument_periapsis = argument_periapsis # omega
else:
self.longitude_ascending_node = 0
self.argument_periapsis = 0
self.true_anomaly_epoch = true_anomaly_epoch # nu
self.mass = mass # m
self.parameter = semi_major_axis * (1 - eccentricity**2) # p
if ( 0 <= self.true_anomaly_epoch ) and ( self.true_anomaly_epoch <= np.pi):
self.eccentric_anomaly = np.arccos((self.eccentricity + np.cos(self.true_anomaly_epoch)) / (1 + self.eccentricity * np.cos(self.true_anomaly_epoch))) # E, at the moment the cases dont't cover everything.
else:
self.eccentric_anomaly = 2 * np.pi - np.arccos((self.eccentricity + np.cos(self.true_anomaly_epoch)) / (1 + self.eccentricity * np.cos(self.true_anomaly_epoch))) # E
self.mean_anomaly = self.eccentric_anomaly - self.eccentricity * np.sin(self.eccentric_anomaly) # M
self.mean_motion = np.sqrt(mu / self.semi_major_axis**3 ) # n
self.period = 2 * np.pi / np.sqrt(mu) * np.sqrt(self.semi_major_axis**3) # T
self.mu = mu # mu
self.X = 0 # X for universal formulation of time of flight
@classmethod
def from_position_velocity(self,mass,mu,position,velocity):
# Initialization of class using position and momentum
# For this purpose we need to calculate various intermediate objects. Should we save them for later? Is it more clever to just use position and momentum all the time?
h = np.cross(position,velocity) # Calculate angular momentum h
if h != [0,0,0]:
n = np.cross(np.array([0,0,1],float),h) # Calculate node vector
e = 1.0 / mu * ((np.dot(velocity,velocity) - mu / norm(position)) * position - np.dot(position,velocity) * velocity) # Calculate eccentricity vector pointing in direction of perihelion
p = np.dot(h,h) / mu
# Is it better to just save the cosine of the angles?
semi_major_axis = p / (1-np.dot(e,e))
eccentricity = norm(e)
inclination = np.arccos(h[2] / norm(h))
if position[1] >= 0:
longitude_ascending_node = np.arccos(n[0] / norm(n))
else:
longitude_ascending_node = 2 * np.pi - np.arccos(n[0] / norm(n))
if e[2] >= 0:
argument_periapsis = np.arccos(np.dot(n,e) / (norm(n) * norm(e)))
else:
argument_periapsis = 2 * np.pi - np.arccos(np.dot(n,e) / (norm(n) * norm(e)))
if np.dot(position,velocity) >= 0:
true_anomaly_epoch = np.arccos(np.dot(e,position) / (norm(e) * norm(position)))
else:
true_anomaly_epoch = 2 * np.pi - np.arccos(np.dot(e,position) / (norm(e) * norm(position)))
body = celestial_body(mass,mu,semi_major_axis,eccentricity,inclination,longitude_ascending_node,argument_periapsis,true_anomaly_epoch)
return body
else:
return celestial_object.initialize_collision_orbit(mass,mu,position,velocity)
@classmethod
def initialize_collision_orbit(self,mass,mu,position,velocity):
pass
####### Export #######
def export_position_velocity(self):
# Exports position and velocity of celestial body. How should time dependence be incorparated? Should it be a parameter for this function?
r = self.parameter / ( 1 + self.eccentricity * np.cos(self.true_anomaly_epoch))
# The perifocal coordinate system uses coordinate axes P, Q, W in this order, where P points in the direction of the periapsis and Q is perpendicular in positive direction in the plane of the orbit.
position_perifocal_system = np.array([r * np.cos(self.true_anomaly_epoch),r * np.sin(self.true_anomaly_epoch),0],float)
velocity_perifocal_system = np.sqrt(self.mu / self.parameter) * np.array([-np.sin(self.true_anomaly_epoch),self.eccentricity + np.cos(self.true_anomaly_epoch),0],float)
# Calculate the rotation matrix from perifocal to fixed frame. Bate says, one should avoid this technique.
rotation_matrix = np.array([[np.cos(self.longitude_ascending_node) * np.cos(self.argument_periapsis) - np.sin(self.longitude_ascending_node) * np.sin(self.argument_periapsis) * np.cos(self.inclination) , - np.cos(self.longitude_ascending_node) * np.sin(self.argument_periapsis) - np.sin(self.longitude_ascending_node) * np.cos(self.argument_periapsis) * np.cos(self.inclination) , np.sin(self.longitude_ascending_node) * np.sin(self.inclination)],\
[np.sin(self.longitude_ascending_node) * np.cos(self.argument_periapsis) + np.cos(self.longitude_ascending_node) * np.sin(self.argument_periapsis) * np.cos(self.inclination) , - np.sin(self.longitude_ascending_node) * np.sin(self.argument_periapsis) + np.cos(self.longitude_ascending_node) * np.cos(self.argument_periapsis) * np.cos(self.inclination) , - np.cos(self.longitude_ascending_node) * np.sin(self.inclination)],\
[ np.sin(self.argument_periapsis) * np.sin(self.inclination) , np.cos(self.argument_periapsis) * np.sin(self.inclination) , np.cos(self.inclination)]\
],float)
position = np.dot(rotation_matrix,position_perifocal_system)
velocity = np.dot(rotation_matrix,velocity_perifocal_system)
return position, velocity
def export_orbit(self,number_points):
# Returns a list of three dimensional coordinates for the orbit.
position = np.zeros( (number_points,3) )
interval = 2 * np.pi / number_points
for i in range(number_points):
position[i,:] = self.calculate_advance_in_true_anomaly(i * interval)[0]
return np.vstack( (position,position[0,:]) )
###### Advance along orbit #######
def advance_in_time(self,delta_t):
# This method advances the object on its course by delta t in time. This means that it needs to translate the time difference into changes in the true anomaly at epoch and then add this number to the existing value.
# delta_t should be small enough such that the body does not evolve more than one period. Is this necessary?
# Update mean anomaly. Ignore full rotations.
new_mean_anomaly = self.mean_motion * delta_t + self.mean_anomaly
# Solve E-e*sin(E)=M numerically
new_eccentric_anomaly = fsolve(lambda E : E - self.eccentricity * np.sin(E) -new_mean_anomaly,new_mean_anomaly)
# Calculate new true anomaly at epoch
if new_eccentric_anomaly <= np.pi:
new_true_anomaly_epoch = np.arccos( ( np.cos(new_eccentric_anomaly) - self.eccentricity ) / ( 1 - self.eccentricity * np.cos(new_eccentric_anomaly)))
else:
new_true_anomaly_epoch = 2 * np.pi - np.arccos( ( np.cos(new_eccentric_anomaly) - self.eccentricity ) / ( 1 - self.eccentricity * np.cos(new_eccentric_anomaly)))
# Update values of true anomaly at epoch and eccentric anomaly and mean anomaly
self.true_anomaly_epoch = new_true_anomaly_epoch
self.mean_anomaly = new_mean_anomaly
self.eccentric_anomaly = new_eccentric_anomaly
def t_in_dep_of_X(self, X):
r_0, v_0 = self.export_postion_velocity()
return 1 / np.sqrt(self.mu) * ( np.dot(r_0,v_0) /np.sqrt(self.mu) * X**2 * C(X) + ( 1 - norm(r_0) / self.semi_major_axis ) * X**3 * S(X) + norm(r_0) * X )
def advance_in_time_universal(self,delta_t):
# This method advances the object on its course by delta t in time using the universal time of fligt formulation. This means it should be usable for all kinds of orbits.
# Solve for new X
new_X = fsolve(lambda X : self.t_in_dep_of_X(X) - delta_t,delta_t)
def advance_in_true_anomaly(self,delta_nu):
# This method increases the true anomaly by a given input. It can be used to find equi-distant-angle points on the orbit for visualization purposes. It also updates eccentric anomaly and mean anomaly.
self.true_anomaly_epoch = self.true_anomaly_epoch + delta_nu
if self.true_anomaly_epoch <= np.pi:
self.eccentric_anomaly = np.arccos( ( np.cos(self.true_anomaly_epoch) + self.eccentricity ) / ( 1 + self.eccentricity * np.cos(self.true_anomaly_epoch)))
else:
self.eccentric_anomaly = 2 * np.pi - np.arccos( ( np.cos(self.true_anomaly_epoch) + self.eccentricity ) / ( 1 + self.eccentricity * np.cos(self.true_anomaly_epoch)))
self.mean_anomaly = self.eccentric_anomaly - self.eccentricity * np.sin( self.eccentric_anomaly )
def calculate_advance_in_true_anomaly(self,delta_nu):
# This method advances the object on its course by delta nu in true anomaly and returns the new position. It is useful for calculating points on the orbit without actually advancing the object itself.
new_true_anomaly_epoch = self.true_anomaly_epoch + delta_nu
r = self.parameter / ( 1 + self.eccentricity * np.cos(new_true_anomaly_epoch))
# The perifocal coordinate system uses coordinate axes P, Q, W in this order, where P points in the direction of the periapsis and Q is perpendicular in positive direction in the plane of the orbit.
position_perifocal_system = np.array([r * np.cos(new_true_anomaly_epoch),r * np.sin(new_true_anomaly_epoch),0],float)
velocity_perifocal_system = np.sqrt(self.mu / self.parameter) * np.array([-np.sin(new_true_anomaly_epoch),self.eccentricity + np.cos(new_true_anomaly_epoch),0],float)
# Calculate the rotation matrix from perifocal to fixed frame. Bate says, one should avoid this technique.
rotation_matrix = np.array([[np.cos(self.longitude_ascending_node) * np.cos(self.argument_periapsis) - np.sin(self.longitude_ascending_node) * np.sin(self.argument_periapsis) * np.cos(self.inclination) , - np.cos(self.longitude_ascending_node) * np.sin(self.argument_periapsis) - np.sin(self.longitude_ascending_node) * np.cos(self.argument_periapsis) * np.cos(self.inclination) , np.sin(self.longitude_ascending_node) * np.sin(self.inclination)],\
[np.sin(self.longitude_ascending_node) * | np.cos(self.argument_periapsis) | numpy.cos |
"""
Tests for direct function
"""
import numpy as np
import pytest
from cayenne.simulation import Simulation
@pytest.mark.parametrize("algorithm", ["direct", "tau_leaping", "tau_adaptive"])
@pytest.mark.usefixtures("setup_basic", "setup_large")
class TestSanitizeAlg:
"""
Sanity checks on Simulation class where simulations are attempted.
"""
def test_null(self, algorithm, setup_basic):
species_names, rxn_names, V_r, V_p, X0, k = setup_basic
k = | np.array([0.0, 0.0]) | numpy.array |
# Copyright 2017 Division of Medical Image Computing, German Cancer Research Center (DKFZ)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from builtins import range, zip
import random
import numpy as np
from copy import deepcopy
from scipy.ndimage import map_coordinates
from scipy.ndimage.filters import gaussian_filter, gaussian_gradient_magnitude
from scipy.ndimage.morphology import grey_dilation
from skimage.transform import resize
from scipy.ndimage.measurements import label as lb
def generate_elastic_transform_coordinates(shape, alpha, sigma):
n_dim = len(shape)
offsets = []
for _ in range(n_dim):
offsets.append(gaussian_filter((np.random.random(shape) * 2 - 1), sigma, mode="constant", cval=0) * alpha)
tmp = tuple([np.arange(i) for i in shape])
coords = np.meshgrid(*tmp, indexing='ij')
indices = [np.reshape(i + j, (-1, 1)) for i, j in zip(offsets, coords)]
return indices
def create_zero_centered_coordinate_mesh(shape):
tmp = tuple([np.arange(i) for i in shape])
coords = np.array(np.meshgrid(*tmp, indexing='ij')).astype(float)
for d in range(len(shape)):
coords[d] -= ((np.array(shape).astype(float) - 1) / 2.)[d]
return coords
def convert_seg_image_to_one_hot_encoding(image, classes=None):
'''
Takes as input an nd array of a label map (any dimension). Outputs a one hot encoding of the label map.
Example (3D): if input is of shape (x, y, z), the output will ne of shape (n_classes, x, y, z)
'''
if classes is None:
classes = np.unique(image)
out_image = np.zeros([len(classes)]+list(image.shape), dtype=image.dtype)
for i, c in enumerate(classes):
out_image[i][image == c] = 1
return out_image
def elastic_deform_coordinates(coordinates, alpha, sigma):
n_dim = len(coordinates)
offsets = []
for _ in range(n_dim):
offsets.append(
gaussian_filter((np.random.random(coordinates.shape[1:]) * 2 - 1), sigma, mode="constant", cval=0) * alpha)
offsets = np.array(offsets)
indices = offsets + coordinates
return indices
def rotate_coords_3d(coords, angle_x, angle_y, angle_z):
rot_matrix = np.identity(len(coords))
rot_matrix = create_matrix_rotation_x_3d(angle_x, rot_matrix)
rot_matrix = create_matrix_rotation_y_3d(angle_y, rot_matrix)
rot_matrix = create_matrix_rotation_z_3d(angle_z, rot_matrix)
coords = np.dot(coords.reshape(len(coords), -1).transpose(), rot_matrix).transpose().reshape(coords.shape)
return coords
def rotate_coords_2d(coords, angle):
rot_matrix = create_matrix_rotation_2d(angle)
coords = np.dot(coords.reshape(len(coords), -1).transpose(), rot_matrix).transpose().reshape(coords.shape)
return coords
def scale_coords(coords, scale):
return coords * scale
def uncenter_coords(coords):
shp = coords.shape[1:]
coords = deepcopy(coords)
for d in range(coords.shape[0]):
coords[d] += (shp[d] - 1) / 2.
return coords
def interpolate_img(img, coords, order=3, mode='nearest', cval=0.0, is_seg=False):
if is_seg and order != 0:
unique_labels = np.unique(img)
result = np.zeros(coords.shape[1:], img.dtype)
for i, c in enumerate(unique_labels):
res_new = map_coordinates((img == c).astype(float), coords, order=order, mode=mode, cval=cval)
result[res_new >= 0.5] = c
return result
else:
return map_coordinates(img.astype(float), coords, order=order, mode=mode, cval=cval).astype(img.dtype)
def generate_noise(shape, alpha, sigma):
noise = np.random.random(shape) * 2 - 1
noise = gaussian_filter(noise, sigma, mode="constant", cval=0) * alpha
return noise
def find_entries_in_array(entries, myarray):
entries = np.array(entries)
values = np.arange(np.max(myarray) + 1)
lut = np.zeros(len(values), 'bool')
lut[entries.astype("int")] = True
return np.take(lut, myarray.astype(int))
def center_crop_3D_image(img, crop_size):
center = np.array(img.shape) / 2.
if type(crop_size) not in (tuple, list):
center_crop = [int(crop_size)] * len(img.shape)
else:
center_crop = crop_size
assert len(center_crop) == len(
img.shape), "If you provide a list/tuple as center crop make sure it has the same len as your data has dims (3d)"
return img[int(center[0] - center_crop[0] / 2.):int(center[0] + center_crop[0] / 2.),
int(center[1] - center_crop[1] / 2.):int(center[1] + center_crop[1] / 2.),
int(center[2] - center_crop[2] / 2.):int(center[2] + center_crop[2] / 2.)]
def center_crop_3D_image_batched(img, crop_size):
# dim 0 is batch, dim 1 is channel, dim 2, 3 and 4 are x y z
center = np.array(img.shape[2:]) / 2.
if type(crop_size) not in (tuple, list):
center_crop = [int(crop_size)] * (len(img.shape) - 2)
else:
center_crop = crop_size
assert len(center_crop) == (len(
img.shape) - 2), "If you provide a list/tuple as center crop make sure it has the same len as your data has dims (3d)"
return img[:, :, int(center[0] - center_crop[0] / 2.):int(center[0] + center_crop[0] / 2.),
int(center[1] - center_crop[1] / 2.):int(center[1] + center_crop[1] / 2.),
int(center[2] - center_crop[2] / 2.):int(center[2] + center_crop[2] / 2.)]
def center_crop_2D_image(img, crop_size):
center = np.array(img.shape) / 2.
if type(crop_size) not in (tuple, list):
center_crop = [int(crop_size)] * len(img.shape)
else:
center_crop = crop_size
assert len(center_crop) == len(
img.shape), "If you provide a list/tuple as center crop make sure it has the same len as your data has dims (2d)"
return img[int(center[0] - center_crop[0] / 2.):int(center[0] + center_crop[0] / 2.),
int(center[1] - center_crop[1] / 2.):int(center[1] + center_crop[1] / 2.)]
def center_crop_2D_image_batched(img, crop_size):
# dim 0 is batch, dim 1 is channel, dim 2 and 3 are x y
center = np.array(img.shape[2:]) / 2.
if type(crop_size) not in (tuple, list):
center_crop = [int(crop_size)] * (len(img.shape) - 2)
else:
center_crop = crop_size
assert len(center_crop) == (len(
img.shape) - 2), "If you provide a list/tuple as center crop make sure it has the same len as your data has dims (2d)"
return img[:, :, int(center[0] - center_crop[0] / 2.):int(center[0] + center_crop[0] / 2.),
int(center[1] - center_crop[1] / 2.):int(center[1] + center_crop[1] / 2.)]
def random_crop_3D_image(img, crop_size):
if type(crop_size) not in (tuple, list):
crop_size = [crop_size] * len(img.shape)
else:
assert len(crop_size) == len(
img.shape), "If you provide a list/tuple as center crop make sure it has the same len as your data has dims (3d)"
if crop_size[0] < img.shape[0]:
lb_x = np.random.randint(0, img.shape[0] - crop_size[0])
elif crop_size[0] == img.shape[0]:
lb_x = 0
else:
raise ValueError("crop_size[0] must be smaller or equal to the images x dimension")
if crop_size[1] < img.shape[1]:
lb_y = np.random.randint(0, img.shape[1] - crop_size[1])
elif crop_size[1] == img.shape[1]:
lb_y = 0
else:
raise ValueError("crop_size[1] must be smaller or equal to the images y dimension")
if crop_size[2] < img.shape[2]:
lb_z = np.random.randint(0, img.shape[2] - crop_size[2])
elif crop_size[2] == img.shape[2]:
lb_z = 0
else:
raise ValueError("crop_size[2] must be smaller or equal to the images z dimension")
return img[lb_x:lb_x + crop_size[0], lb_y:lb_y + crop_size[1], lb_z:lb_z + crop_size[2]]
def random_crop_3D_image_batched(img, crop_size):
if type(crop_size) not in (tuple, list):
crop_size = [crop_size] * (len(img.shape) - 2)
else:
assert len(crop_size) == (len(
img.shape) - 2), "If you provide a list/tuple as center crop make sure it has the same len as your data has dims (3d)"
if crop_size[0] < img.shape[2]:
lb_x = np.random.randint(0, img.shape[2] - crop_size[0])
elif crop_size[0] == img.shape[2]:
lb_x = 0
else:
raise ValueError("crop_size[0] must be smaller or equal to the images x dimension")
if crop_size[1] < img.shape[3]:
lb_y = np.random.randint(0, img.shape[3] - crop_size[1])
elif crop_size[1] == img.shape[3]:
lb_y = 0
else:
raise ValueError("crop_size[1] must be smaller or equal to the images y dimension")
if crop_size[2] < img.shape[4]:
lb_z = np.random.randint(0, img.shape[4] - crop_size[2])
elif crop_size[2] == img.shape[4]:
lb_z = 0
else:
raise ValueError("crop_size[2] must be smaller or equal to the images z dimension")
return img[:, :, lb_x:lb_x + crop_size[0], lb_y:lb_y + crop_size[1], lb_z:lb_z + crop_size[2]]
def random_crop_2D_image(img, crop_size):
if type(crop_size) not in (tuple, list):
crop_size = [crop_size] * len(img.shape)
else:
assert len(crop_size) == len(
img.shape), "If you provide a list/tuple as center crop make sure it has the same len as your data has dims (2d)"
if crop_size[0] < img.shape[0]:
lb_x = np.random.randint(0, img.shape[0] - crop_size[0])
elif crop_size[0] == img.shape[0]:
lb_x = 0
else:
raise ValueError("crop_size[0] must be smaller or equal to the images x dimension")
if crop_size[1] < img.shape[1]:
lb_y = np.random.randint(0, img.shape[1] - crop_size[1])
elif crop_size[1] == img.shape[1]:
lb_y = 0
else:
raise ValueError("crop_size[1] must be smaller or equal to the images y dimension")
return img[lb_x:lb_x + crop_size[0], lb_y:lb_y + crop_size[1]]
def random_crop_2D_image_batched(img, crop_size):
if type(crop_size) not in (tuple, list):
crop_size = [crop_size] * (len(img.shape) - 2)
else:
assert len(crop_size) == (len(
img.shape) - 2), "If you provide a list/tuple as center crop make sure it has the same len as your data has dims (2d)"
if crop_size[0] < img.shape[2]:
lb_x = np.random.randint(0, img.shape[2] - crop_size[0])
elif crop_size[0] == img.shape[2]:
lb_x = 0
else:
raise ValueError("crop_size[0] must be smaller or equal to the images x dimension")
if crop_size[1] < img.shape[3]:
lb_y = np.random.randint(0, img.shape[3] - crop_size[1])
elif crop_size[1] == img.shape[3]:
lb_y = 0
else:
raise ValueError("crop_size[1] must be smaller or equal to the images y dimension")
return img[:, :, lb_x:lb_x + crop_size[0], lb_y:lb_y + crop_size[1]]
def resize_image_by_padding(image, new_shape, pad_value=None):
shape = tuple(list(image.shape))
new_shape = tuple(np.max( | np.concatenate((shape, new_shape)) | numpy.concatenate |
import math
import cmath
import numpy as np
# not necessary
import glob, os # for debug
def drange(start, stop, step):
# equivalent of function range except that it allows float steps
r = start
while r < stop:
yield r
r += step
def DFT(dataFrame, m):
lFrame_ = len(dataFrame) # frame length lFram = 2.Ls in publication
t = 0
for n_ in range(lFrame_):
t += dataFrame[n_] * cmath.exp(-2 * math.pi * 1j * m * n_ / (lFrame_ - 1))
return t
def entropyDFT(dftData, m):
p_ = float(dftData[m]) / np.sum(dftData[1:])
return p_
##----------------------------------------------------------------------------
def getFeatures_Detection(rowData):
vector = 0
# Compute features of a data sequence corresponding to one move
# Compute derived data (derived curves) based on row data
# Slice each data curve to get series of point from each ones
# Return a row vector of all features
# # Get data
# n = rowData.shape[0]
# ramp = np.linspace(1, 100, num=n)
# accX = rowData[:, 0] * ramp
# accY = rowData[:, 1] * ramp
# accZ = rowData[:, 2] * ramp
# gyrX = rowData[:, 3] * ramp
# gyrY = rowData[:, 4] * ramp
# gyrZ = rowData[:, 5] * ramp
# time = rowData[:, 6] - rowData[0, 6] # Time origin
# Get data
n = rowData.shape[0]
ramp = np.linspace(1, 100, num=n)
time = rowData[:, 0] * ramp
accX = rowData[:, 1] * ramp
accY = rowData[:, 2] * ramp
accZ = rowData[:, 3] * ramp
gyrX = rowData[:, 4] * ramp
gyrY = rowData[:, 5] * ramp
gyrZ = rowData[:, 6] * ramp
magX = rowData[:, 7] * ramp
magY = rowData[:, 8] * ramp
magZ = rowData[:, 9] * ramp
# time = rowData[:,6] - rowData[0,6] # Time origin
# print np.fft.fft(accX), len(np.fft.fft(accX))
absDFTData = np.empty((n, 9)) # matrix containing DFT data from input data
absDFTData[:] = np.NaN
for i in range(n):
for j in range(9):
absDFTData[i, j] = np.absolute(DFT(rowData[:, j], i))
#print(absDFTData)
#print(absDFTData.shape)
##----------------------------------------------------------------------------
# COMPUTE DERIVED CURVES
# integral, double integral, derivative, double derivative
# Compute time integral (only to get others integral)
timeIntegral = [time[0]]
for k in range(1, n):
timeIntegral.append(time[k] - time[k - 1])
# Compute data integral (Speed X, Y,Z & Angle X,Y,Z)
integralData = np.empty((n, 9))
integralData[:] = np.NAN
for k in range(0, n):
integralData[k, :] = rowData[k, :9] * timeIntegral[k]
if k > 0:
integralData[k, :] += integralData[k - 1, :]
# Compute data double integral (Position X,Y,Z)
doubleIntegralData = np.empty((n, 9))
doubleIntegralData[:] = np.NAN
for k in range(0, n):
doubleIntegralData[k, :] = integralData[k, :9] * timeIntegral[k]
if k > 0:
doubleIntegralData[k, :] += doubleIntegralData[k - 1, :]
# Compute data derivate
derivData = np.empty((n, 9))
derivData[:] = np.NAN
for k in range(0, n):
if k == 0:
derivData[k, :] = (rowData[k + 1, :9] - rowData[k, :9]) / (time[k + 1] - time[k])
elif k == n - 1:
derivData[k, :] = (rowData[k, :9] - rowData[k - 1, :9]) / (time[k] - time[k - 1])
else:
derivData[k, :] = (rowData[k + 1, :9] - rowData[k - 1, :9]) / (time[k + 1] - time[k - 1])
# Compute double data derivate
doubleDerivData = np.empty((n, 9))
doubleDerivData[:] = np.NAN
for k in range(0, n):
if k == 0:
doubleDerivData[k, :] = (derivData[k + 1, :9] - derivData[k, :9]) / (time[k + 1] - time[k])
elif k == n - 1:
doubleDerivData[k, :] = (derivData[k, :9] - derivData[k - 1, :9]) / (time[k] - time[k - 1])
else:
doubleDerivData[k, :] = (derivData[k + 1, :9] - derivData[k - 1, :9]) / (time[k + 1] - time[k - 1])
# ----------------------------------------------------------------------------
# GET FEATURES
# slice curves to get the same number of points on each curve
step = 4 # number of slice
ech = float(n) / float(step) # sampling
timeStep_ = drange(0, n + ech, ech) # generate time steps
indStep = []
for i in timeStep_:
i = round(i, 2)
indStep.append(math.floor(i)) # get index corresponding to time steps
x_ = [] # features vector
# Generate features for each frame (temporal and frequency domain)
for i in range(len(indStep) - 2):
# Get range of the frame
ind = indStep[i]
ind1 = indStep[i + 2] # 1 frame corresponds to 2 injunction
if ind == ind1:
rg = ind
else:
rg = range(int(ind), int(ind1))
lengFrame = len(rg)
# Get Discrete Fourier Transform (DFT)
absDFTData_ = np.empty((lengFrame, 9)) # matrix containing DFT data from input data
absDFTData_[:] = np.NaN
for i in range(lengFrame):
for j in range(9):
absDFTData_[i, j] = np.absolute(DFT(rowData[rg, j], i))
# Add DC component as features (for each axis x,y,z)
x_ += absDFTData_[0, :].tolist()
# Add energy features (exclude DC component)
x_ += (np.sum(np.power(absDFTData_[1:, :], 2), axis=0) / (lengFrame - 1)).tolist()
# Add entropy features (exclude DC component)
entropyDFTData_ = np.empty((lengFrame, 9)) # matrix containing DFT entropy data
entropyDFTData_[:] = np.NaN
for i in range(lengFrame):
for j in range(9):
entropyDFTData_[i, j] = entropyDFT(absDFTData_[:, j], i)
x_ += np.sum(entropyDFTData_[1:, :] * np.log(1 / entropyDFTData_[1:, :]), axis=0).tolist() # normalize entropy
# Add deviation features (time domain)
datMean = np.mean(rowData[rg, :-1], axis=0)
x_ += np.sum(np.power(rowData[rg, :-1] - datMean, 2), axis=0).tolist()
# Add corelation features (time domain)
y_ = []
for i in range(9):
for j in range(9):
if (j > i):
# vij = np.sum(np.abs(rowData[rg,i]*rowData[rg,j]))/float(lengFrame)
# vii = np.sum(np.abs(rowData[rg,i]*rowData[rg,i]))/float(lengFrame)
# vjj = np.sum(np.abs(rowData[rg,j]*rowData[rg,j]))/float(lengFrame)
# yij = (vij-datMean[i]*datMean[j]) / float(math.sqrt(vii-datMean[i]**2) * math.sqrt(vjj-datMean[j]**2))
yij = np.sum((rowData[rg, i] - datMean[i]) * (rowData[rg, j] - datMean[j]))
if math.sqrt(np.sum(rowData[rg, i] - datMean[i]) ** 2) * math.sqrt(
np.sum(rowData[rg, j] - datMean[j]) ** 2) != 0:
yij /= float(math.sqrt(np.sum(rowData[rg, i] - datMean[i]) ** 2) * math.sqrt(
np.sum(rowData[rg, j] - datMean[j]) ** 2))
else:
yij = 0
y_.append(yij)
x_ += y_
# print x_
# print len(x_)
# Mean data
x_.append(np.max(accX) - np.min(accX))
x_.append(np.max(accY) - np.min(accY))
x_.append(np.max(accZ) - np.min(accZ))
x_.append( | np.max(gyrX) | numpy.max |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 14 21:31:56 2017
@author: Franz
"""
import scipy.signal
import numpy as np
import scipy.io as so
import os.path
import re
import matplotlib.pylab as plt
import h5py
import matplotlib.patches as patches
import numpy.random as rand
import seaborn as sns
import pandas as pd
from functools import reduce
import random
import pdb
class Mouse :
def __init__(self, idf, list=None, typ='') :
self.recordings = []
self.recordings.append(list)
self.typ = typ
self.idf = idf
def add(self, rec) :
self.recordings.append(rec)
def __len__(self) :
return len(self.recordings)
def __repr__(self) :
return ", ".join(self.recordings)
### PROCESSING OF RECORDING DATA ##############################################
def load_stateidx(ppath, name, ann_name=''):
""" load the sleep state file of recording (folder) $ppath/$name
@Return:
M,K sequence of sleep states, sequence of
0'1 and 1's indicating non- and annotated states
"""
ddir = os.path.join(ppath, name)
ppath, name = os.path.split(ddir)
if ann_name == '':
ann_name = name
sfile = os.path.join(ppath, name, 'remidx_' + ann_name + '.txt')
f = open(sfile, 'r')
lines = f.readlines()
f.close()
n = 0
for l in lines:
if re.match('\d', l):
n += 1
M = np.zeros(n, dtype='int')
K = np.zeros(n, dtype='int')
i = 0
for l in lines :
if re.search('^\s+$', l) :
continue
if re.search('\s*#', l) :
continue
if re.match('\d+\s+-?\d+', l) :
a = re.split('\s+', l)
M[i] = int(a[0])
K[i] = int(a[1])
i += 1
return M,K
def load_recordings(ppath, rec_file) :
"""
load_recordings(ppath, rec_file)
load recording listing with syntax:
[E|C] \s+ recording_name
#COMMENT
@RETURN:
(list of controls, lis of experiments)
"""
exp_list = []
ctr_list = []
rfile = os.path.join(ppath, rec_file)
f = open(rfile, newline=None)
lines = f.readlines()
f.close()
for l in lines :
if re.search('^\s+$', l) :
continue
if re.search('^\s*#', l) :
continue
a = re.split('\s+', l)
if re.search('E', a[0]) :
exp_list.append(a[1])
if re.search('C', a[0]) :
ctr_list.append(a[1])
return ctr_list, exp_list
def load_dose_recordings(ppath, rec_file):
"""
load recording list with following syntax:
A line is either control or experiments; Control recordings look like:
C \s recording_name
Experimental recordings also come with an additional dose parameter
(allowing for comparison of multiple doses with controls)
E \s recording_name \s dose_1
E \s recording_name \s dose_2
"""
rfile = os.path.join(ppath, rec_file)
f = open(rfile, newline=None)
lines = f.readlines()
f.close()
# first get all potential doses
doses = {}
ctr_list = []
for l in lines :
if re.search('^\s+$', l):
continue
if re.search('^\s*#', l):
continue
a = re.split('\s+', l)
if re.search('E', a[0]):
if a[2] in doses:
doses[a[2]].append(a[1])
else:
doses[a[2]] = [a[1]]
if re.search('C', a[0]):
ctr_list.append(a[1])
return ctr_list, doses
def get_snr(ppath, name):
"""
read and return sampling rate (SR) from file $ppath/$name/info.txt
"""
fid = open(os.path.join(ppath, name, 'info.txt'), newline=None)
lines = fid.readlines()
fid.close()
values = []
for l in lines :
a = re.search("^" + 'SR' + ":" + "\s+(.*)", l)
if a :
values.append(a.group(1))
return float(values[0])
def get_infoparam(ifile, field):
"""
NOTE: field is a single string
and the function does not check for the type
of the values for field.
In fact, it just returns the string following field
"""
fid = open(ifile, newline=None)
lines = fid.readlines()
fid.close()
values = []
for l in lines :
a = re.search("^" + field + ":" + "\s+(.*)", l)
if a :
values.append(a.group(1))
return values
def add_infoparam(ifile, field, vals):
"""
:param ifile: info file
:param field: Parameters specifier, e.g. 'SR'
:param vals: list with parameters
"""
fid = open(ifile, 'a')
vals = [str(s) for s in vals]
param = " ".join(vals)
fid.write('%s:\t%s' % (field, param))
fid.write(os.linesep)
fid.close()
def laser_start_end(laser, SR=1525.88, intval=5):
"""laser_start_end(ppath, name)
print start and end index of laser stimulation trains: For example,
if you was stimulated for 2min every 20 min with 20 Hz, return the
start and end index of the each 2min stimulation period (train)
returns the tuple (istart, iend), both indices are inclusive,
i.e. part of the sequence
@Param:
laser - laser, vector of 0s and 1s
intval - minimum time separation [s] between two laser trains
@Return:
(istart, iend) - tuple of two np.arrays with laser start and end indices
"""
idx = np.where(laser > 0.5)[0]
if len(idx) == 0 :
return ([], [])
idx2 = np.nonzero(np.diff(idx)*(1./SR) > intval)[0]
istart = np.hstack([idx[0], idx[idx2+1]])
iend = np.hstack([idx[idx2], idx[-1]])
return (istart, iend)
def load_laser(ppath, name):
"""
load laser from recording ppath/name
@RETURN:
@laser, vector of 0's and 1's
"""
# laser might be .mat or h5py file
# perhaps we could find a better way of testing that
file = os.path.join(ppath, name, 'laser_'+name+'.mat')
try:
laser = np.array(h5py.File(file,'r').get('laser'))
except:
laser = so.loadmat(file)['laser']
return np.squeeze(laser)
def laser_protocol(ppath, name):
"""
What was the stimulation frequency and the inter-stimulation interval for recording
$ppath/$name?
@Return:
iinter-stimulation intervals, avg. inter-stimulation interval, frequency
"""
laser = load_laser(ppath, name)
SR = get_snr(ppath, name)
# first get inter-stimulation interval
(istart, iend) = laser_start_end(laser, SR)
intv = np.diff(np.array(istart/float(SR)))
d = intv/60.0
print("The laser was turned on in average every %.2f min," % (np.mean(d)))
print("with a min. interval of %.2f min and max. interval of %.2f min." % (np.min(d), np.max(d)))
print("Laser stimulation lasted for %f s." % (np.mean(np.array(iend/float(SR)-istart/float(SR)).mean())))
# print laser start times
print("Start time of each laser trial:")
j=1
for t in istart:
print("trial %d: %.2f" % (j, (t / float(SR)) / 60))
j += 1
# for each laser stimulation interval, check laser stimulation frequency
dt = 1/float(SR)
freq = []
laser_up = []
laser_down = []
for (i,j) in zip(istart, iend):
part = laser[i:j+1]
(a,b) = laser_start_end(part, SR, 0.005)
dur = (j-i+1)*dt
freq.append(len(a) / dur)
up_dur = (b-a+1)*dt*1000
down_dur = (a[1:]-b[0:-1]-1)*dt*1000
laser_up.append(np.mean(up_dur))
laser_down.append(np.mean(down_dur))
print(os.linesep + "Laser stimulation freq. was %.2f Hz," % np.mean(np.array(freq)))
print("with laser up and down duration of %.2f and %.2f ms." % (np.mean(np.array(laser_up)), np.mean(np.array(laser_down))))
return d, np.mean(d), np.mean(np.array(freq))
def swap_eeg(ppath, rec, ch='EEG'):
"""
swap EEG and EEG2 or EMG with EMG2 if $ch='EMG'
"""
if ch == 'EEG':
name = 'EEG'
else:
name = ch
EEG = so.loadmat(os.path.join(ppath, rec, name+'.mat'))[name]
EEG2 = so.loadmat(os.path.join(ppath, rec, name+'2.mat'))[name + '2']
tmp = EEG
EEG = EEG2
EEG2 = tmp
file_eeg1 = os.path.join(ppath, rec, '%s.mat' % name)
file_eeg2 = os.path.join(ppath, rec, '%s2.mat' % name)
so.savemat(file_eeg1, {name : EEG})
so.savemat(file_eeg2, {name+'2' : EEG2})
def eeg_conversion(ppath, rec, conv_factor=0.195):
"""
multiply all EEG and EMG channels with the given
conversion factor and write the conversion factor
as parameter (conversion:) into the info file.
Only if there's no conversion factor in the info file
specified, the conversion will be executed
:param ppath: base filder
:param rec: recording
:param conv_factor: conversion factor
:return: n/s
"""
ifile = os.path.join(ppath, rec, 'info.txt')
conv = get_infoparam(ifile, 'conversion')
if len(conv) > 0:
print("found conversion: parameter in info file")
print("returning: no conversion necessary!!!")
return
else:
files = os.listdir(os.path.join(ppath, rec))
files = [f for f in files if re.match('^EEG', f)]
for f in files:
name = re.split('\.', f)[0]
EEG = so.loadmat(os.path.join(ppath, rec, name+'.mat'), squeeze_me=True)[name]
if EEG[0].dtype == 'int16':
EEG = EEG * conv_factor
file_eeg = os.path.join(ppath, rec, '%s.mat' % name)
print(file_eeg)
so.savemat(file_eeg, {name: EEG})
else:
print('Wrong datatype! probably already converted; returning...')
return
files = os.listdir(os.path.join(ppath, rec))
files = [f for f in files if re.match('^EMG', f)]
for f in files:
name = re.split('\.', f)[0]
EMG = so.loadmat(os.path.join(ppath, rec, name+'.mat'), squeeze_me=True)[name]
if EMG[0].dtype == 'int16':
EMG = EMG * conv_factor
file_emg = os.path.join(ppath, rec, '%s.mat' % name)
print(file_emg)
so.savemat(file_emg, {name: EMG})
else:
print('Wrong datatype! probably already converted; returning...')
return
add_infoparam(ifile, 'conversion', [conv_factor])
calculate_spectrum(ppath, rec)
### DEPRICATED ############################################
def video_pulse_detection(ppath, rec, SR=1000, iv = 0.01):
"""
return index of each video frame onset
ppath/rec - recording
@Optional
SR - sampling rate of EEG(!) recording
iv - minimum time inverval (in seconds) between two frames
@Return
index of each video frame onset
"""
V = np.squeeze(so.loadmat(os.path.join(ppath, rec, 'videotime_' + rec + '.mat'))['video'])
TS = np.arange(0, len(V))
# indices where there's a jump in the signal
t = TS[np.where(V<0.5)];
if len(t) == 0:
idx = []
return idx
# time points where the interval between jumps is longer than iv
t2 = np.where(np.diff(t)*(1.0/SR)>=iv)[0]
idx = np.concatenate(([t[0]],t[t2+1]))
return idx
# SIGNAL PROCESSING ###########################################################
def my_lpfilter(x, w0, N=4):
"""
create a lowpass Butterworth filter with a cutoff of w0 * the Nyquist rate.
The nice thing about this filter is that is has zero-phase distortion.
A conventional lowpass filter would introduce a phase lag.
w0 - filter cutoff; value between 0 and 1, where 1 corresponds to nyquist frequency.
So if you want a filter with cutoff at x Hz, the corresponding w0 value is given by
w0 = 2 * x / sampling_rate
N - order of filter
@Return:
low-pass filtered signal
See also my hp_filter, or my_bpfilter
"""
from scipy import signal
b,a = signal.butter(N, w0)
y = signal.filtfilt(b,a, x)
return y
def my_hpfilter(x, w0, N=4):
"""
create an N-th order highpass Butterworth filter with cutoff frequency w0 * sampling_rate/2
"""
from scipy import signal
# use scipy.signal.firwin to generate filter
#taps = signal.firwin(numtaps, w0, pass_zero=False)
#y = signal.lfilter(taps, 1.0, x)
b,a = signal.butter(N, w0, 'high')
y = signal.filtfilt(b,a, x, padlen = x.shape[0]-1)
return y
def my_bpfilter(x, w0, w1, N=4,bf=True):
"""
create N-th order bandpass Butterworth filter with corner frequencies
w0*sampling_rate/2 and w1*sampling_rate/2
"""
#from scipy import signal
#taps = signal.firwin(numtaps, w0, pass_zero=False)
#y = signal.lfilter(taps, 1.0, x)
#return y
from scipy import signal
b,a = signal.butter(N, [w0, w1], 'bandpass')
if bf:
y = signal.filtfilt(b,a, x)
else:
y = signal.lfilter(b,a, x)
return y
def my_notchfilter(x, sr=1000, band=5, freq=60, ripple=10, order=3, filter_type='butter'):
from scipy.signal import iirfilter,lfilter
fs = sr
nyq = fs/2.0
low = freq - band/2.0
high = freq + band/2.0
low = low/nyq
high = high/nyq
b, a = iirfilter(order, [low, high], rp=ripple, btype='bandstop',
analog=False, ftype=filter_type)
filtered_data = lfilter(b, a, x)
return filtered_data
def downsample_vec(x, nbin):
"""
y = downsample_vec(x, nbin)
downsample the vector x by replacing nbin consecutive \
bin by their mean \
@RETURN: the downsampled vector
"""
n_down = int(np.floor(len(x) / nbin))
x = x[0:n_down*nbin]
x_down = np.zeros((n_down,))
# 0 1 2 | 3 4 5 | 6 7 8
for i in range(nbin) :
idx = list(range(i, int(n_down*nbin), int(nbin)))
x_down += x[idx]
return x_down / nbin
def smooth_data(x, sig):
"""
y = smooth_data(x, sig)
smooth data vector @x with gaussian kernel
with standard deviation $sig
"""
sig = float(sig)
if sig == 0.0:
return x
# gaussian:
gauss = lambda x, sig : (1/(sig*np.sqrt(2.*np.pi)))*np.exp(-(x*x)/(2.*sig*sig))
bound = 1.0/10000
L = 10.
p = gauss(L, sig)
while (p > bound):
L = L+10
p = gauss(L, sig)
#F = map(lambda x: gauss((x, sig)), np.arange(-L, L+1.))
# py3:
F = [gauss(x, sig) for x in np.arange(-L, L+1.)]
F = F / np.sum(F)
return scipy.signal.fftconvolve(x, F, 'same')
def power_spectrum(data, length, dt):
"""
scipy's implementation of Welch's method using hanning window to estimate
the power spectrum
The function returns power density with units V**2/Hz
see also https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.signal.welch.html
The label on the y-axis should say PSD [V**2/Hz]
@Parameters
data - time series; float vector!
length - length of hanning window, even integer!
@Return:
power density, frequencies
The function returns power density in units V^2 / Hz
Note that
np.var(data) ~ np.sum(power density) * (frequencies[1]-frequencies[0])
"""
f, pxx = scipy.signal.welch(data, fs=1.0/dt, window='hanning', nperseg=int(length), noverlap=int(length/2))
return pxx, f
def spectral_density(data, length, nfft, dt):
"""
calculate the spectrogram for the time series given by data with time resolution dt
The powerspectrum for each window of length $length is computed using
Welch's method.
The windows for the powerspectrum calculation are half-overlapping. If length contains 5s of data,
then the first windows goes from 0s to 5s, the second window from 2.5 to 7.5s, ...
The last window ends at ceil(len(data)/length)*5s
Another example, assume we have 13 s of data, with 5 s windows, the the powerdensity is calculated for the following
time windows:
0 -- 5, 2.5 -- 7.5, 5 -- 10, 7.5 -- 12.5, 10 -- 15
In total there are thus 2*ceil(13/5)-1 = 5 windows
The last window starts at 2*3-2 * (5/2) = 10 s
Note: the returned time axis starts at time point goes from 0 to 10s in 2.5s steps
@Parameters:
data - time series
length - window length of data used to calculate powerspectrum.
Note that the time resolution of the spectrogram is length/2
nfft - size of the window used to calculate the powerspectrum.
determines the frequency resolution.
@Return:
Powspectrum, frequencies, time axis
"""
n = len(data)
k = int(np.ceil((1.0*n)/length))
data = np.concatenate((data, np.zeros((length*k-n,))))
fdt = length*dt/2 # time step for spectrogram
t = np.arange(0, fdt*(2*k-2)+fdt/2.0, fdt)
# frequency axis of spectrogram
f = np.linspace(0, 1, int(np.ceil(nfft/2.0))+1) * (0.5/dt)
# the power spectrum is calculated for 2*k-1 time points
Pow = np.zeros((len(f), k*2-1))
j = 0
for i in range(0, k-2+1):
w1=data[(length*i):(i+1)*length]
w2=data[length*i+int(length/2):(i+1)*length+int(length/2)]
Pow[:,j] = power_spectrum(w1, nfft, dt)[0]
Pow[:,j+1] = power_spectrum(w2, nfft, dt)[0]
j += 2
# last time point
Pow[:,j],f = power_spectrum(data[length*(k-1):k*length], nfft, dt)
return Pow, f, t
def calculate_spectrum(ppath, name, fres=0.5):
"""
calculate EEG and EMG spectrogram used for sleep stage detection.
Function assumes that data vectors EEG.mat and EMG.mat exist in recording
folder ppath/name; these are used to calculate the powerspectrum
fres - resolution of frequency axis
all data saved in "true" mat files
:return EEG Spectrogram, EMG Spectrogram, frequency axis, time axis
"""
SR = get_snr(ppath, name)
swin = round(SR)*5
fft_win = round(swin/5) # approximate number of data points per second
if (fres == 1.0) or (fres == 1):
fft_win = int(fft_win)
elif fres == 0.5:
fft_win = 2*int(fft_win)
else:
print("Resolution %f not allowed; please use either 1 or 0.5" % fres)
(peeg2, pemg2) = (False, False)
# Calculate EEG spectrogram
EEG = np.squeeze(so.loadmat(os.path.join(ppath, name, 'EEG.mat'))['EEG'])
Pxx, f, t = spectral_density(EEG, int(swin), int(fft_win), 1/SR)
if os.path.isfile(os.path.join(ppath, name, 'EEG2.mat')):
peeg2 = True
EEG = np.squeeze(so.loadmat(os.path.join(ppath, name, 'EEG2.mat'))['EEG2'])
Pxx2, f, t = spectral_density(EEG, int(swin), int(fft_win), 1/SR)
#save the stuff to a .mat file
spfile = os.path.join(ppath, name, 'sp_' + name + '.mat')
if peeg2 == True:
so.savemat(spfile, {'SP':Pxx, 'SP2':Pxx2, 'freq':f, 'dt':t[1]-t[0],'t':t})
else:
so.savemat(spfile, {'SP':Pxx, 'freq':f, 'dt':t[1]-t[0],'t':t})
# Calculate EMG spectrogram
EMG = np.squeeze(so.loadmat(os.path.join(ppath, name, 'EMG.mat'))['EMG'])
Qxx, f, t = spectral_density(EMG, int(swin), int(fft_win), 1/SR)
if os.path.isfile(os.path.join(ppath, name, 'EMG2.mat')):
pemg2 = True
EMG = np.squeeze(so.loadmat(os.path.join(ppath, name, 'EMG2.mat'))['EMG2'])
Qxx2, f, t = spectral_density(EMG, int(swin), int(fft_win), 1/SR)
# save the stuff to .mat file
spfile = os.path.join(ppath, name, 'msp_' + name + '.mat')
if pemg2 == True:
so.savemat(spfile, {'mSP':Qxx, 'mSP2':Qxx2, 'freq':f, 'dt':t[1]-t[0],'t':t})
else:
so.savemat(spfile, {'mSP':Qxx, 'freq':f, 'dt':t[1]-t[0],'t':t})
return Pxx, Qxx, f, t
def whiten_spectrogram(ppath, name, fmax=50):
"""
experimental
:param ppath:
:param name:
:param fmax:
:return:
"""
P = so.loadmat(os.path.join(ppath, name, 'sp_' + name + '.mat'), squeeze_me=True)
SPE = P['SP']
freq = P['freq']
ifreq = np.where(freq <= fmax)[0]
SPE = SPE[ifreq,:]
nfilt = 5
filt = np.ones((nfilt, nfilt))
filt = np.divide(filt, filt.sum())
#SPE = scipy.signal.convolve2d(SPE, filt, boundary='symm', mode='same')
m = np.mean(SPE,axis=1)
SPE -= np.tile(m, (SPE.shape[1], 1)).T
SPE = SPE.T
C = np.dot(SPE.T, SPE)
[evals, L] = np.linalg.eigh(C)
idx = np.argsort(evals)
D = np.diag(np.sqrt(evals[idx]))
L = L[:,idx]
W = np.dot(L, np.dot(np.linalg.inv(D),np.dot(L.T,SPE.T)))
nfilt = 2
filt = np.ones((nfilt,nfilt))
filt = np.divide(filt, filt.sum())
W = scipy.signal.convolve2d(W, filt, boundary='symm', mode='same')
return W, D, L
def normalize_spectrogram(ppath, name, fmax=0, band=[], vm=5, pplot=True, sptype='', filt_dim=[]):
"""
Normalize EEG spectrogram by deviding each frequency band by its average value.
:param ppath, name: base folder, recording name
:param fmax: maximum frequency; frequency axis of spectrogram goes from 0 to fmax
if fmax=0, use complete frequency axis
:param band: list or tuple, define lower and upper range of a frequency band,
if pplot=True, plot band, along with spectrogram;
if band=[], disregard
:param vm: color range for plotting spectrogram
:pplot: if True, plot spectrogram along with power band
:sptype: if sptype='fine' plot 'special' spectrogram, save under sp_fine_$name.mat;
otherwise plot 'normal' spectrogram sp_$name.mat
:filt_dim: list or tuple; the two values define the dimensions of box filter
used to filter the normalized spectrogram; if filt_dim=[], then no filtering
:return SPE, t, freq: normalized spectrogram (np.array), time axis, frequency axis
"""
if (len(sptype) == 0) or (sptype=='std'):
P = so.loadmat(os.path.join(ppath, name, 'sp_' + name + '.mat'), squeeze_me=True)
elif sptype == 'fine':
P = so.loadmat(os.path.join(ppath, name, 'sp_fine_' + name + '.mat'), squeeze_me=True)
SPE = P['SP']
freq = P['freq']
t = P['t']
if fmax > 0:
ifreq = np.where(freq <= fmax)[0]
else:
ifreq = np.arange(0, len(freq))
freq = freq[ifreq]
nfilt = 4
filt = np.ones((nfilt,nfilt))
filt = np.divide(filt, filt.sum())
SPE = SPE[ifreq,:]
# before
#SPE = SPE[ifreq]
#W = scipy.signal.convolve2d(SPE, filt, boundary='symm', mode='same')
#sp_mean = W.mean(axis=1)
sp_mean = SPE.mean(axis=1)
SPE = np.divide(SPE, np.tile(sp_mean, (SPE.shape[1], 1)).T)
if len(filt_dim) > 0:
filt = np.ones(filt_dim)
filt = np.divide(filt, filt.sum())
SPE = scipy.signal.convolve2d(SPE, filt, boundary='symm', mode='same')
# get high gamma peaks
if len(band) > 0:
iband = np.where((freq >= band[0]) & (freq <= band[-1]))[0]
pow_band = SPE[iband,:].mean(axis=0)
thr = pow_band.mean() + pow_band.std()
idx = np.where(pow_band > thr)[0]
# plot normalized spectrogram, along with band
if pplot:
plt.ion()
plt.figure()
if len(band) > 0:
med = np.median(SPE.mean(axis=0))
ax1 = plt.subplot(211)
plt.pcolormesh(t, freq, SPE, vmin=0, vmax=vm*med, cmap='jet')
plt.subplot(212, sharex=ax1)
plt.plot(t,SPE[iband,:].mean(axis=0))
plt.plot(t[idx], pow_band[idx], '.')
plt.draw()
return SPE, t, freq[ifreq]
def recursive_spectrogram(ppath, name, sf=0.3, alpha=0.3, pplot=True):
"""
calculate EEG/EMG spectrogram in a way that can be implemented by a closed-loop system.
The spectrogram is temporally filtered using a recursive implementation of a lowpass filter
@Parameters:
ppath/name - mouse EEG recording
sf - smoothing factor along frequency axis
alpha - temporal lowpass filter time constant
pplot - if pplot==True, plot figure
@Return:
SE, SM - EEG, EMG spectrogram
"""
EEG = np.squeeze(so.loadmat(os.path.join(ppath, name, 'EEG.mat'))['EEG'])
EMG = np.squeeze(so.loadmat(os.path.join(ppath, name, 'EMG.mat'))['EMG'])
len_eeg = len(EEG)
fdt = 2.5
SR = get_snr(ppath, name)
# we calculate the powerspectrum for 5s windows
swin = int(np.round(SR) * 5.0)
# but we sample new data each 2.5 s
swinh = int(swin/2.0)
fft_win = int(swin / 5.0)
# number of 2.5s long samples
spoints = int(np.floor(len_eeg / swinh))
SE = np.zeros((int(fft_win/2+1), spoints))
SM = np.zeros((int(fft_win/2+1), spoints))
print("Starting calculating spectrogram for %s..." % name)
for i in range(2, spoints):
# we take the last two swinh windows (the new 2.5 s long sample and the one from
# the last iteration)
x = EEG[(i-2)*swinh:i*swinh]
[p, f] = power_spectrum(x.astype('float'), fft_win, 1.0/SR)
p = smooth_data(p, sf)
# recursive low pass filtering of spectrogram:
# the current state is an estimate of the current sample and the previous state
SE[:,i] = alpha*p + (1-alpha) * SE[:,i-1]
# and the same of EMG
x = EMG[(i-2)*swinh:i*swinh]
[p, f] = power_spectrum(x.astype('float'), fft_win, 1.0/SR)
p = smooth_data(p, sf)
SM[:,i] = alpha*p + (1-alpha) * SM[:,i-1]
if pplot:
# plot EEG spectrogram
t = np.arange(0, SM.shape[1])*fdt
plt.figure()
ax1 = plt.subplot(211)
im = np.where((f>=0) & (f<=30))[0]
med = np.median(SE.max(axis=0))
ax1.imshow(np.flipud(SE[im,:]), vmin=0, vmax=med*2)
plt.xticks(())
ix = list(range(0, 30, 10))
fi = f[im][::-1]
plt.yticks(ix, list(map(int, fi[ix])))
box_off(ax1)
plt.axis('tight')
plt.ylabel('Freq (Hz)')
# plot EMG amplitude
ax2 = plt.subplot(212)
im = np.where((f>=10) & (f<100))[0]
df = np.mean(np.diff(f))
# amplitude is the square root of the integral
ax2.plot(t, np.sqrt(SM[im,:].sum(axis=0)*df)/1000.0)
plt.xlim((0, t[-1]))
plt.ylabel('EMG Ampl (mV)')
plt.xlabel('Time (s)')
box_off(ax2)
plt.show(block=False)
return SE, SM, f
def recursive_sleepstate_rem(ppath, recordings, sf=0.3, alpha=0.3, past_mu=0.2, std_thdelta = 1.5, past_len=120, sdt=2.5, psave=False, xemg=False):
"""
predict a REM period only based on EEG/EMG history; the same algorithm is also used for
closed-loop REM sleep manipulation.
The algorithm uses for REM sleep detection a threshold on delta power, EMG power, and theta/delta power.
For theta/delta I use two thresholds: A hard (larger) threshold and a soft (lower) threshold. Initially,
theta/delta has to cross the hard threshold to initiate a REM period. Then, as long as,
theta/delta is above the soft threshold (and EMG power stays low) REM sleep continues.
@Parameters:
ppath base folder with recordings
recordings list of recordings
sf smoothing factor for each powerspectrum
alpha smoothing factor along time dimension
past_mu percentage (0 .. 1) of brain states that are allowed to have EMG power larger than threshold
during the last $past_len seconds
past_len window to calculate $past_mu
std_thdelta the hard theta/delta threshold is given by, mean(theta/delta) + $std_thdelta * std(theta/delta)
sdt time bin for brain sttate, typically 2.5s
psave if True, save threshold parameters to file.
"""
idf = re.split('_', recordings[0])[0]
# 02/05/2020 changed from int to float:
past_len = float(np.round(past_len/sdt))
# calculate spectrogram
(SE, SM) = ([],[])
for rec in recordings:
A,B, freq = recursive_spectrogram(ppath, rec, sf=sf, alpha=alpha)
SE.append(A)
SM.append(B)
# fuse lists SE and SM
SE = np.squeeze(reduce(lambda x,y: np.concatenate((x,y)), SE))
if not xemg:
SM = np.squeeze(reduce(lambda x,y: np.concatenate((x,y)), SM))
else:
SM = SE
# EEG, EMG bands
ntbins = SE.shape[1]
r_delta = [0.5, 4]
r_theta = [5,12]
# EMG band
r_mu = [300, 500]
i_delta = np.where((freq >= r_delta[0]) & (freq <= r_delta[1]))[0]
i_theta = np.where((freq >= r_theta[0]) & (freq <= r_theta[1]))[0]
i_mu = np.where((freq >= r_mu[0]) & (freq <= r_mu[1]))[0]
pow_delta = np.sum(SE[i_delta,:], axis=0)
pow_theta = np.sum(SE[i_theta,:], axis=0)
pow_mu = np.sum(SM[i_mu,:], axis=0)
# theta/delta
th_delta = np.divide(pow_theta, pow_delta)
thr_th_delta1 = np.nanmean(th_delta) + std_thdelta*np.nanstd(th_delta)
thr_th_delta2 = np.nanmean(th_delta) + 0.0*np.nanstd(th_delta)
thr_delta = pow_delta.mean()
thr_mu = pow_mu.mean() + 0.5*np.nanstd(pow_mu)
### The actual algorithm for REM detection
rem_idx = np.zeros((ntbins,))
prem = 0 # whether or not we are in REM
for i in range(ntbins):
if prem == 0 and pow_delta[i] < thr_delta and pow_mu[i] < thr_mu:
### could be REM
if th_delta[i] > thr_th_delta1:
### we are potentially entering REM
if (i - past_len) >= 0:
sstart = int(i-past_len)
else:
sstart = 0
# count the percentage of brainstate bins with elevated EMG power
c_mu = np.sum( np.where(pow_mu[sstart:i]>thr_mu)[0] ) / (past_len*1.0)
if c_mu < past_mu:
### we are in REM
prem = 1 # turn laser on
rem_idx[i] = 1
# We are currently in REM; do we stay there?
if prem == 1:
### REM continues, if theta/delta is larger than soft threshold and if there's
### no EMG activation
if (th_delta[i] > thr_th_delta2) and (pow_mu[i] < thr_mu):
rem_idx[i] = 1
else:
prem = 0 #turn laser off
# for loop ends
# Determine which channel is EEG, EMG
ch_alloc = get_infoparam(os.path.join(ppath, recordings[0], 'info.txt'), 'ch_alloc')[0]
# plot the whole stuff:
# (1) spectrogram
# (2) EMG Power
# (3) Delta
# (4) TH_Delta
plt.figure()
t = np.arange(0, sdt*(ntbins-1)+sdt/2.0, sdt)
ax1 = plt.subplot(411)
im = np.where((freq>=0) & (freq<=30))[0]
med = np.median(SE.max(axis=0))
ax1.imshow(np.flipud(SE[im,:]), vmin=0, vmax=med*2)
plt.yticks(list(range(0, 31, 10)), list(range(30, -1, -10)))
plt.ylabel('Freq. (Hz)')
plt.axis('tight')
ax2 = plt.subplot(412)
ax2.plot(t, pow_mu, color='black')
ax2.plot(t, np.ones((len(t),))*thr_mu, color='red')
plt.ylabel('EMG Pow.')
plt.xlim((t[0], t[-1]))
ax3 = plt.subplot(413, sharex=ax2)
ax3.plot(t, pow_delta, color='black')
ax3.plot(t, np.ones((len(t),))*thr_delta, color='red')
plt.ylabel('Delta Pow.')
plt.xlim((t[0], t[-1]))
ax4 = plt.subplot(414, sharex=ax3)
ax4.plot(t, th_delta, color='black')
ax4.plot(t, np.ones((len(t),))*thr_th_delta1, color='red')
ax4.plot(t, np.ones((len(t),))*thr_th_delta2, color='pink')
ax4.plot(t, rem_idx*thr_th_delta1, color='blue')
plt.ylabel('Theta/Delta')
plt.xlabel('Time (s)')
plt.xlim((t[0], t[-1]))
plt.show(block=False)
# write config file
if psave:
cfile = os.path.join(ppath, idf + '_rem.txt')
fid = open(cfile, 'w')
fid.write(('IDF: %s'+os.linesep) % idf)
fid.write(('ch_alloc: %s'+os.linesep) % ch_alloc)
fid.write(('THR_DELTA: %.2f'+os.linesep) % thr_delta)
fid.write(('THR_MU: %.2f'+os.linesep) % thr_mu)
fid.write(('THR_TH_DELTA: %.2f %.2f'+os.linesep) % (thr_th_delta1, thr_th_delta2))
fid.write(('STD_THDELTA: %.2f'+os.linesep) % std_thdelta)
fid.write(('PAST_MU: %.2f'+os.linesep) % past_mu)
fid.write(('SF: %.2f'+os.linesep) % sf)
fid.write(('ALPHA: %.2f'+os.linesep) % alpha)
fid.write(('Bern: %.2f' + os.linesep) % 0.5)
if xemg:
fid.write(('XEMG: %d'+os.linesep) % 1)
else:
fid.write(('XEMG: %d' + os.linesep) % 0)
fid.close()
print('wrote file %s' % cfile)
def recursive_sleepstate_rem_control(ppath, recordings, past_len=120, sdt=2.5, delay=120):
"""
algorithm running laser control for REM sleep dependent activation/inhibtion.
$delay s after a detected REM sleep period, the laser is turned on for the same duration. If a new REM period starts,
the laser stops, but we keep track of the missing time. The next time is laser turns on again,
it stays on for the duration of the most recent REM period + the remaining time.
The algorithm for REM detection is the same as used forclosed-loop REM sleep manipulation.
The function reads in the required parameters from the configuration file (MOUSEID_rem.txt)
The algorithm uses for REM sleep detection a threshold on delta power, EMG power, and theta/delta power.
For theta/delta I use two thresholds: A hard (larger) threshold and a soft (lower) threshold. Initially,
theta/delta has to cross the hard threshold to initiate a REM period. Then, as long as,
theta/delta is above the soft threshold (and EMG power stays low) REM sleep continues.
@Parameters:
ppath base folder with recordings
recordings list of recordings
past_len window to calculate $past_mu
sdt time bin for brain sttate, typically 2.5s
delay delay to wait after a REM sleep periods ends, till the laser is turned on.
"""
idf = re.split('_', recordings[0])[0]
past_len = int(np.round(past_len/sdt))
# load parameters
cfile = os.path.join(ppath, idf + '_rem.txt')
params = load_sleep_params(ppath, cfile)
thr_th_delta1 = params['THR_TH_DELTA'][0]
thr_th_delta2 = params['THR_TH_DELTA'][1]
thr_delta = params['THR_DELTA'][0]
thr_mu = params['THR_MU'][0]
alpha = params['ALPHA'][0]
sf = params['SF'][0]
past_mu = params['PAST_MU'][0]
xemg = params['XEMG'][0]
# calculate spectrogram
(SE, SM) = ([], [])
for rec in recordings:
A, B, freq = recursive_spectrogram(ppath, rec, sf=sf, alpha=alpha)
SE.append(A)
SM.append(B)
# fuse lists SE and SM
SE = np.squeeze(reduce(lambda x, y: np.concatenate((x, y)), SE))
if not xemg:
SM = np.squeeze(reduce(lambda x, y: np.concatenate((x, y)), SM))
else:
SM = SE
# EEG, EMG bands
ntbins = SE.shape[1]
r_delta = [0.5, 4]
r_theta = [5, 12]
# EMG band
r_mu = [300, 500]
i_delta = np.where((freq >= r_delta[0]) & (freq <= r_delta[1]))[0]
i_theta = np.where((freq >= r_theta[0]) & (freq <= r_theta[1]))[0]
i_mu = np.where((freq >= r_mu[0]) & (freq <= r_mu[1]))[0]
pow_delta = np.sum(SE[i_delta, :], axis=0)
pow_theta = np.sum(SE[i_theta, :], axis=0)
pow_mu = np.sum(SM[i_mu, :], axis=0)
th_delta = np.divide(pow_theta, pow_delta)
### The actual algorithm for REM detection
rem_idx = np.zeros((ntbins,))
prem = 0 # whether or not we are in REM
# NEW variables:
laser_idx = np.zeros((ntbins,))
delay = int(np.round(delay/sdt))
delay_count = 0
curr_rem_dur = 0
dur_count = 0
on_delay = False
laser_on = False
for i in range(ntbins):
if prem == 0 and pow_delta[i] < thr_delta and pow_mu[i] < thr_mu:
### could be REM
if th_delta[i] > thr_th_delta1:
### we are potentially entering REM
if (i - past_len) >= 0:
sstart = i - past_len
else:
sstart = 0
# count the percentage of brainstate bins with elevated EMG power
c_mu = np.sum(np.where(pow_mu[sstart:i] > thr_mu)[0]) / past_len
if c_mu < past_mu:
### we are in REM
prem = 1 # turn laser on
rem_idx[i] = 1
curr_rem_dur += 1 #NEW
# We are currently in REM; do we stay there?
if prem == 1:
### REM continues, if theta/delta is larger than soft threshold and if there's
### no EMG activation
if (th_delta[i] > thr_th_delta2) and (pow_mu[i] < thr_mu):
rem_idx[i] = 1
curr_rem_dur += 1
else:
prem = 0 # turn laser off
dur_count += curr_rem_dur #NEW
delay_count = delay #NEW
curr_rem_dur = 0 #NEW
on_delay = True #NEW
# NEW:
if on_delay:
if prem == 0:
delay_count -=1
if delay_count == 0:
laser_on = True
on_delay = False
if laser_on:
if prem == 0:
if dur_count >= 0:
dur_count -= 1
laser_idx[i] = 1
else:
laser_on = False
else:
laser_on = False
# plot the whole stuff:
# (1) spectrogram
# (2) EMG Power
# (3) Delta
# (4) TH_Delta
plt.figure()
t = np.arange(0, sdt*(ntbins-1)+sdt/2.0, sdt)
ax1 = plt.subplot(411)
im = np.where((freq>=0) & (freq<=30))[0]
med = np.median(SE.max(axis=0))
ax1.imshow(np.flipud(SE[im,:]), vmin=0, vmax=med*2)
plt.yticks(list(range(0, 31, 10)), list(range(30, -1, -10)))
plt.ylabel('Freq. (Hz)')
plt.axis('tight')
ax2 = plt.subplot(412)
ax2.plot(t, pow_mu, color='black')
ax2.plot(t, np.ones((len(t),))*thr_mu, color='red')
plt.ylabel('EMG Pow.')
plt.xlim((t[0], t[-1]))
ax3 = plt.subplot(413, sharex=ax2)
ax3.plot(t, pow_delta, color='black')
ax3.plot(t, np.ones((len(t),))*thr_delta, color='red')
plt.ylabel('Delta Pow.')
plt.xlim((t[0], t[-1]))
ax4 = plt.subplot(414, sharex=ax3)
ax4.plot(t, th_delta, color='black')
ax4.plot(t, np.ones((len(t),))*thr_th_delta1, color='red')
ax4.plot(t, np.ones((len(t),))*thr_th_delta2, color='pink')
ax4.plot(t, rem_idx*thr_th_delta1, color='green', label='REM')
ax4.plot(t, laser_idx * thr_th_delta1, color='blue', label='Laser')
plt.ylabel('Theta/Delta')
plt.xlabel('Time (s)')
plt.xlim((t[0], t[-1]))
plt.legend()
plt.show(block=False)
def load_sleep_params(path, param_file):
"""
load parameter file generated by &recursive_sleepstate_rem || &recursive_sleepstate_nrem
@Return:
Dictionary: Parameter --> Value
"""
fid = open(os.path.join(path, param_file), 'r')
lines = fid.readlines()
params = {}
for line in lines:
if re.match('^[\S_]+:', line):
a = re.split('\s+', line)
key = a[0][:-1]
params[key] = a[1:-1]
# transform number strings to floats
for k in params:
vals = params[k]
new_vals = []
for v in vals:
if re.match('^[\d\.]+$', v):
new_vals.append(float(v))
else:
new_vals.append(v)
params[k] = new_vals
return params
def recursive_sleepstate_nrem(ppath, recordings, sf=0.3, alpha=0.3, std_thdelta = 1.5, sdt=2.5, psave=False, xemg=False):
"""
predict NREMs period only based on EEG/EMG history; the same algorithm is also used for
closed-loop NREM sleep manipulation.
The algorithm uses for NREM sleep detection thresholds for delta power, EMG power, and theta/delta power.
For delta I use two thresholds: A hard (larger) threshold and a soft (lower) threshold. Initially,
theta/delta has to cross the hard threshold to initiate a NREM period. Then, as long as,
theta/delta is above the soft threshold (and EMG power stays low) REM sleep continues.
The values for hard and soft threshold are fitted using a Gaussian mixture model
:param ppath: base folder
:param recordings: list of recordings
:param sf: smoothing factor for each powerspectrum
:param alpha: spatial smoothing factor
:param std_thdelta: factor to set threshold for theta/delta
:param sdt: time step of brain state classification, typically 2.5 s
:param psave: save parameters to text file?
:param xemg: use EEG instead of EMG?
"""
# to fit Gaussian mixture model to delta power distributino
from sklearn import mixture
idf = re.split('_', recordings[0])[0]
# calculate spectrogram
(SE, SM) = ([],[])
for rec in recordings:
A,B, freq = recursive_spectrogram(ppath, rec, sf=sf, alpha=alpha)
SE.append(A)
SM.append(B)
# fuse lists SE and SM
SE = np.squeeze(reduce(lambda x,y: np.concatenate((x,y)), SE))
if not xemg:
SM = np.squeeze(reduce(lambda x,y: np.concatenate((x,y)), SM))
else:
SM = SE
# EEG, EMG bands
ntbins = SE.shape[1]
r_delta = [0.5, 4]
r_theta = [5,12]
# EMG band
r_mu = [300, 500]
i_delta = np.where((freq >= r_delta[0]) & (freq <= r_delta[1]))[0]
i_theta = np.where((freq >= r_theta[0]) & (freq <= r_theta[1]))[0]
i_mu = np.where((freq >= r_mu[0]) & (freq <= r_mu[1]))[0]
pow_delta = np.sum(SE[i_delta,:], axis=0)
pow_theta = np.sum(SE[i_theta,:], axis=0)
pow_mu = np.sum(SM[i_mu,:], axis=0)
# theta/delta
th_delta = np.divide(pow_theta, pow_delta)
thr_th_delta1 = np.nanmean(th_delta) + std_thdelta*np.nanstd(th_delta)
thr_th_delta2 = np.nanmean(th_delta) + 0.0*np.nanstd(th_delta)
thr_mu = pow_mu.mean() + 0.5*np.nanstd(pow_mu)
med_delta = np.median(pow_delta)
pow_delta_fit = pow_delta[np.where(pow_delta<=3*med_delta)]
# fit Gaussian mixture model to delta power
# see http://www.astroml.org/book_figures/chapter4/fig_GMM_1D.html
gm = mixture.GaussianMixture(n_components=2)
fit = gm.fit(pow_delta_fit.reshape(-1, 1))
means = np.squeeze(fit.means_)
x = np.arange(0, med_delta*3, 100)
plt.figure()
plt.hist(pow_delta_fit, 100, normed=True, histtype='stepfilled', alpha=0.4)
logprob = fit.score_samples(x.reshape(-1,1))
responsibilities = fit.predict_proba(x.reshape((-1,1)))
pdf = np.exp(logprob)
pdf_individual = responsibilities * pdf[:, np.newaxis]
plt.plot(x, pdf, '-k')
plt.plot(x, pdf_individual, '--k')
plt.xlim((0, med_delta*3))
plt.ylabel('p(x)')
plt.xlabel('x = Delta power')
# get point where curves cut each other
if means[0] < means[1]:
idx = np.where((x>= means[0]) & (x<= means[1]))[0]
else:
idx = np.where((x >= means[1]) & (x <= means[0]))[0]
imin = np.argmin(pdf[idx])
xcut = x[idx[0]+imin]
plt.plot(xcut, pdf[idx[0]+imin], 'ro')
ilow = np.argmin(np.abs(x-means[0]))
plt.plot(x[ilow], pdf[ilow], 'bo')
ihigh = np.argmin(np.abs(x-means[1]))
plt.plot(x[ihigh], pdf[ihigh], 'go')
plt.show(block=False)
# set parameters for hard and soft delta thresholds
tmp = np.array([x[ihigh], xcut, x[ilow]])
tmp.sort()
thr_delta1 = tmp[-1] # x[ihigh]; right peak of distribution
thr_delta2 = tmp[1] # trough of distribution
# NREM yes or no according to thresholds
# However, this variable does not directly control whether laser should
# be on or off; whether NREM sleep is really on or off is determined
# by nrem_idx; if pnrem_hidden == 1, then all threshold critera, but not
# sleep history criteria are fulfilled
pnrem_hidden = 0
# if nrem_idx[i] == 1, time point i is NREM
nrem_idx = np.zeros((ntbins,), dtype='int8')
# NREM stays on after thresholds are NOT fulfilled to avoid interruptions by microarousals
grace_period = int(20 / sdt)
# nrem_delay: NREM only starts with some delay
nrem_delay = int(10 / sdt)
grace_count = grace_period
delay_count = nrem_delay
for i in range(ntbins):
if pnrem_hidden == 0:
### Entering NREM:
# Delta power laser than high threshold
if pow_delta[i] > thr_delta1 and pow_mu[i] < thr_mu and th_delta[i] < thr_th_delta1:
### NOT-NREM -> NREM
pnrem_hidden = 1
nrem_idx[i] = 0
delay_count -= 1
# we are fully in NREM, that's why grace_count is reset:
grace_count = grace_period
else:
### NOT-NREM -> NOT-NREM
if grace_count > 0:
grace_count -= 1
nrem_idx[i] = 1
else:
nrem_idx[i] = 0
else:
### pnrem_hidden == 1
if pow_delta[i] > thr_delta2 and pow_mu[i] < thr_mu and th_delta[i] < thr_th_delta1:
if delay_count > 0:
delay_count -= 1
nrem_idx[i] = 0
else :
nrem_idx[i] = 1
else:
### Exit NREM -> NOT-NREM
# were are fully out of NREM, so delay_count can be reset:
delay_count = nrem_delay
pnrem_hidden = 0
if grace_count > 0:
grace_count -= 1
nrem_idx[i] = 1
#### figure ##############################################
plt.figure()
t = np.arange(0, sdt * (ntbins - 1) + sdt / 2.0, sdt)
ax1 = plt.subplot(411)
im = np.where((freq >= 0) & (freq <= 30))[0]
med = np.median(SE.max(axis=0))
ax1.imshow(np.flipud(SE[im, :]), vmin=0, vmax=med * 2, cmap='jet')
ax1.pcolorfast(t, freq[im], np.flipud(SE[im, :]), vmin=0, vmax=med * 2, cmap='jet')
plt.yticks(list(range(0, 31, 10)), list(range(30, -1, -10)))
plt.ylabel('Freq. (Hz)')
plt.axis('tight')
ax2 = plt.subplot(412, sharex=ax1)
ax2.plot(t, pow_mu, color='black')
ax2.plot(t, np.ones((len(t),)) * thr_mu, color='red')
plt.ylabel('EMG Pow.')
plt.xlim((t[0], t[-1]))
ax3 = plt.subplot(413, sharex=ax2)
ax3.plot(t, pow_delta, color='black')
ax3.plot(t, np.ones((len(t),)) * thr_delta1, color='red')
ax3.plot(t, np.ones((len(t),)) * thr_delta2, color=[1, 0.6, 0.6])
ax3.plot(t, nrem_idx * thr_delta1, color=[0.6, 0.6, 0.6])
plt.ylabel('Delta Pow.')
plt.xlim((t[0], t[-1]))
ax4 = plt.subplot(414, sharex=ax3)
ax4.plot(t, th_delta, color='black')
ax4.plot(t, np.ones((len(t),)) * thr_th_delta1, color='red')
plt.ylabel('Theta/Delta')
plt.xlabel('Time (s)')
plt.xlim((t[0], t[-1]))
plt.show(block=False)
# Determine which channel is EEG, EMG
ch_alloc = get_infoparam(os.path.join(ppath, recordings[0], 'info.txt'), 'ch_alloc')[0]
# write config file
if psave:
cfile = os.path.join(ppath, idf + '_nrem.txt')
fid = open(cfile, 'w')
fid.write(('IDF: %s' + os.linesep) % idf)
fid.write(('ch_alloc: %s' + os.linesep) % ch_alloc)
fid.write(('THR_DELTA: %.2f %.2f' + os.linesep) % (thr_delta1, thr_delta2))
fid.write(('THR_MU: %.2f' + os.linesep) % thr_mu)
fid.write(('THR_TH_DELTA: %.2f %.2f' + os.linesep) % (thr_th_delta1, thr_th_delta2))
fid.write(('STD_THDELTA: %.2f' + os.linesep) % std_thdelta)
fid.write(('SF: %.2f' + os.linesep) % sf)
fid.write(('ALPHA: %.2f' + os.linesep) % alpha)
if xemg:
fid.write(('XEMG: %d' + os.linesep) % 1)
else:
fid.write(('XEMG: %d' + os.linesep) % 0)
fid.close()
print('wrote file %s' % cfile)
def rem_online_analysis(ppath, recordings, backup='', single_mode=False, fig_file='', overlap=0):
"""
analyze results from closed-loop experiments
:param ppath: base folder
:param recordings: list of strings, recordinds
:param backup: string, potential second backup folder with recordings
:param single_mode: boolean, if True, average across all REM periods (irrespective of mouse)
and plot each single REM period as dot
:param overlap: float between 0 and 100; specifices percentage by which the online detected REM period has to
overlap with real (annotated) REM period to be further consided for analysis;
if overlap == 0, then any overlap counts, i.e. this parameter has no influence
:return: df, pd.DataFrame, with control and experimental REM durations as data columns
"""
if type(recordings) != list:
recordings = [recordings]
overlap = overlap / 100.0
paths = dict()
for rec in recordings:
if os.path.isdir(os.path.join(ppath, rec)):
paths[rec] = ppath
else:
paths[rec] = backup
mice = dict()
for rec in recordings:
idf = re.split('_', rec)[0]
if not idf in mice:
mice[idf] = 1
mice = list(mice.keys())
if len(mice) == 1:
single_mode=True
dur_exp = {m:[] for m in mice}
dur_ctr = {m:[] for m in mice}
for rec in recordings:
idf = re.split('_', rec)[0]
M,S = load_stateidx(paths[rec], rec)
sr = get_snr(paths[rec], rec)
nbin = int(np.round(sr)*2.5)
dt = (1.0/sr)*nbin
laser = load_laser(paths[rec], rec)
rem_trig = so.loadmat(os.path.join(paths[rec], rec, 'rem_trig_%s.mat'%rec), squeeze_me=True)['rem_trig']
laser = downsample_vec(laser, nbin)
laser[np.where(laser>0)] = 1
rem_trig = downsample_vec(rem_trig, nbin)
rem_trig[np.where(rem_trig>0)] = 1
laser_idx = np.where(laser==1)[0]
rem_idx = np.where(rem_trig==1)[0]
# REM sequences from offline analysis (assumed to be the
# "ground truth"
seq = get_sequences(np.where(M==1)[0])
for s in seq:
# check true REM sequences overlapping with online detected sequences
isect = np.intersect1d(s, rem_idx)
#print(len(isect)/ len(s))
# test if real REM period s overlaps with online detected REM periods and,
# if yes, make sure that the overlap is at least overlap *100 percent
if len(np.intersect1d(s, rem_idx)) > 0 and float(len(isect)) / len(s) >= overlap:
drn = (s[-1]-s[0]+1)*dt
# does the sequence overlap with laser?
if len(np.intersect1d(isect, laser_idx))>0:
dur_exp[idf].append(drn)
else:
dur_ctr[idf].append(drn)
data = {'exp':[], 'ctr':[]}
# if single_mode put all REM periods together,
# otherwise average across REM periods for each mouse
if len(mice) == 1 or single_mode==True:
for m in mice:
data['exp'] += dur_exp[m]
data['ctr'] += dur_ctr[m]
else:
for idf in dur_ctr:
dur_ctr[idf] = np.array(dur_ctr[idf]).mean()
dur_exp[idf] = np.array(dur_exp[idf]).mean()
data['exp'] = np.array(list(dur_exp.values()))
data['ctr'] = np.array(list(dur_ctr.values()))
df = pd.DataFrame({'ctr':pd.Series(data['ctr']), 'exp' : pd.Series(data['exp'])})
# plot everything
if not single_mode:
plt.ion()
plt.figure()
ax = plt.axes([0.2, 0.15, 0.3, 0.7])
df_mean = df.mean()
plt.bar([1], [df_mean['ctr']], color='grey', label='W/o Laser')
plt.bar([2], [df_mean['exp']], color='blue', label='With laser')
plt.xticks([1,2])
box_off(ax)
#ax.set_xticklabels(['ctr', 'exp'], rotation=30)
plt.ylabel('REM duration (s)')
for (a,b) in zip(df['ctr'], df['exp']):
plt.plot([1,2], [a,b], color='black')
plt.legend(bbox_to_anchor=(0., 1.0, 1., .102), loc=3, mode='expand', ncol=1, frameon=False)
else:
plt.figure()
ax = plt.axes([0.2, 0.15, 0.3, 0.7])
df_mean = df.mean()
plt.bar([1], [df_mean['ctr']], color='grey')
plt.bar([2], [df_mean['exp']], color='blue')
plt.xticks([1,2])
box_off(ax)
#ax.set_xticklabels(['ctr', 'exp'], rotation=30)
plt.ylabel('REM duration (s)')
a = df['ctr']
b = df['exp']
plt.plot(np.ones((len(a),)), a, '.', color='black', label='W/o Laser')
plt.plot(2*np.ones((len(b),)), b, '.', color='black', label='With laser')
plt.legend(bbox_to_anchor=(0., 1.0, 1., .102), loc=3, mode='expand', ncol=1, frameon=False)
plt.show()
if len(fig_file) > 0:
save_figure(fig_file)
return df
def online_homeostasis(ppath, recordings, backup='', mode=0, single_mode=False, pplot=True, overlap=0, ma_thr=0):
"""
Further analysis of data obtained from closed loop stimulation
Assume the sleep structure looks like this
R R R R W W N N N N N W W N N N N R R R R R
REM_pre -- inter REM ---- REM_post
REM_pre is the duration of the first REM period, inter-REM is everything between REM_pre and the
next REM period REM_post.
The function calculates the inter REM duration after REM periods with laser and after REM periods w/o laser
:param ppath: base folder
:param recordings: list of recording, or file listing
:param backup: backup folder for $ppath
:param mode: mode == 0, calculate complete inter REM duration
mode == 2, only calculate duration of wake in inter REM periods
mode == 3, only calculate duration of NREM in inter REM periods
:param single_mode: consider each single recording, instead of mice
:param overlap: percentage (number between 0 and 100). Defines the percentage
how much a true (offline annotated) REM period should overlap with laser
to be considered as REM sleep with laser.
Of note, REM periods w/o laser have to have 0 overlap with laser.
All remaining REM periods are discarded.
:param pplot: if True, plot figure; errorbars show 95% confidence intervals,
calculated using bootstrapping
:param ma:
:return: df, if single_mode == True $df is a pandas DataFrame:
REM iREM laser
mouse - mouse ID
REM - REM duration
iREM - inter REM duration after REM periods with laser
laser - 'y' or 'n'; depending on whether laser was on during REM sleep period (for "REM") or during the
preceding REM sleep period (for "iREM")
if single_mode == False, mouse is the data frame index
"""
if type(recordings) != list:
recordings = [recordings]
if overlap > 0:
overlap = overlap / 100
paths = dict()
for rec in recordings:
if os.path.isdir(os.path.join(ppath, rec)):
paths[rec] = ppath
else:
paths[rec] = backup
mice = dict()
for rec in recordings:
idf = re.split('_', rec)[0]
if not idf in mice:
mice[idf] = 1
mice = list(mice.keys())
if len(mice) == 1:
single_mode=True
remdur_exp = {m:[] for m in mice}
remdur_ctr = {m:[] for m in mice}
itdur_exp = {m:[] for m in mice}
itdur_ctr = {m:[] for m in mice}
for rec in recordings:
idf = re.split('_', rec)[0]
M = load_stateidx(paths[rec], rec)[0]
sr = get_snr(paths[rec], rec)
nbin = int(np.round(sr)*2.5)
dt = (1.0/sr)*nbin
if ma_thr>0:
seq = get_sequences(np.where(M==2)[0])
for s in seq:
if len(s)*dt <= ma_thr:
M[s] = 3
laser = load_laser(paths[rec], rec)
rem_trig = so.loadmat(os.path.join(paths[rec], rec, 'rem_trig_%s.mat' % rec), squeeze_me=True)['rem_trig']
laser = downsample_vec(laser, nbin)
laser[np.where(laser>0)] = 1
rem_trig = downsample_vec(rem_trig, nbin)
rem_trig[np.where(rem_trig>0)] = 1
laser_idx = np.where(laser==1)[0]
rem_idx = np.where(rem_trig==1)[0]
# REM sequences from offline analysis (assumed to be the
# "ground truth"
seq = get_sequences(np.where(M==1)[0])
for (p,q) in zip(seq[0:-1], seq[1:]):
# check if true REM sequences do overlap with online detected sequences
# and only continue working with those:
if len(np.intersect1d(p, rem_idx)) > 0:
drn = (p[-1]-p[0]+1)*dt
it_M = M[p[-1]+1:q[0]]
if mode == 0:
it_drn = len(it_M)*dt
elif mode == 2:
it_drn = len(np.where(it_M==2)[0]) * dt
else:
it_drn = len(np.where(it_M == 3)[0]) * dt
# does the true REM sequence overlap with laser?
# by setting overlap to a value > 0, you can
# set a percentage how much the REM period should overlap with laser
# NEW 08/26/21
if len(np.intersect1d(p, laser_idx)) / len(p) > overlap:
remdur_exp[idf].append(drn)
itdur_exp[idf].append(it_drn)
elif len(np.intersect1d(p, laser_idx)) == 0:
remdur_ctr[idf].append(drn)
itdur_ctr[idf].append(it_drn)
else:
pass
# if single_mode put all REM periods together,
# otherwise average across REM periods for each mouse
if len(mice) == 1 or single_mode==True:
data = {'itexp':[], 'itctr':[], 'remexp':[], 'remctr':[]}
for m in mice:
data['itexp'] += itdur_exp[m]
data['itctr'] += itdur_ctr[m]
data['remexp'] += remdur_exp[m]
data['remctr'] += remdur_ctr[m]
df = pd.DataFrame({'REM': data['remexp']+data['remctr'], 'iREM':data['itexp']+data['itctr'], 'laser': ['y']*len(data['remexp']) + ['n']*len(data['remctr'])})
else:
for idf in mice:
itdur_ctr[idf] = np.array(itdur_ctr[idf]).mean()
itdur_exp[idf] = np.array(itdur_exp[idf]).mean()
remdur_ctr[idf] = np.array(remdur_ctr[idf]).mean()
remdur_exp[idf] = np.array(remdur_exp[idf]).mean()
data = {}
for s in ['itexp', 'itctr', 'remexp', 'remctr']:
data[s] = np.zeros((len(mice),))
i = 0
for m in mice:
data['itexp'][i] = itdur_exp[m]
data['itctr'][i] = itdur_ctr[m]
data['remexp'][i] = remdur_exp[m]
data['remctr'][i] = remdur_ctr[m]
i += 1
df = pd.DataFrame({'REM': np.concatenate((data['remexp'], data['remctr'])),
'iREM': np.concatenate((data['itexp'], data['itctr'])),
'laser': ['y']*len(mice) + ['n']*len(mice),
'mouse': mice+mice})
if pplot and not single_mode:
dfm = pd.melt(df, id_vars=['laser', 'mouse'], var_name='state')
sns.set_style('whitegrid')
plt.ion()
plt.figure()
sns.barplot(data=dfm, hue='laser', x='state', y='value', palette=['blue', 'gray'])
sns.swarmplot(data=dfm, hue='laser', x='state', y='value', dodge=True, color='black')
sns.despine()
plt.ylabel('Duration (s)')
if pplot and single_mode:
dfm = pd.melt(df, id_vars=['laser'], var_name='state')
plt.ion()
plt.figure()
sns.set(style="whitegrid")
#sns.swarmplot(data=df[['itctr', 'itexp']], color='black')
#sns.barplot(data=df[['itctr', 'itexp']], palette=['gray', 'blue'], errcolor='black')
sns.barplot(data=dfm, hue='laser', x='state', y='value', palette=['blue', 'gray'])
sns.swarmplot(data=dfm, hue='laser', x='state', y='value', dodge=True, color='black')
sns.despine()
plt.ylabel('Duration (s)')
return df
### FUNCTIONS USED BY SLEEP_STATE #####################################################
def get_sequences(idx, ibreak=1) :
"""
get_sequences(idx, ibreak=1)
idx - np.vector of indices
@RETURN:
seq - list of np.vectors
"""
diff = idx[1:] - idx[0:-1]
breaks = np.nonzero(diff>ibreak)[0]
breaks = np.append(breaks, len(idx)-1)
seq = []
iold = 0
for i in breaks:
r = list(range(iold, i+1))
seq.append(idx[r])
iold = i+1
return seq
def threshold_crossing(data, th, ilen, ibreak, m):
"""
seq = threshold_crossing(data, th, ilen, ibreak, m)
"""
if m>=0:
idx = np.where(data>=th)[0]
else:
idx = np.where(data<=th)[0]
# gather sequences
j = 0
seq = []
while (j <= len(idx)-1):
s = [idx[j]]
for k in range(j+1,len(idx)):
if (idx[k] - idx[k-1]-1) <= ibreak:
# add j to sequence
s.append(idx[k])
else:
break
if (s[-1] - s[0]+1) >= ilen and not(s[0] in [i[1] for i in seq]):
seq.append((s[0], s[-1]))
if j == len(idx)-1:
break
j=k
return seq
def closest_precessor(seq, i):
"""
find the preceding element in seq which is closest to i
helper function for sleep_state
"""
tmp = seq-i;
d = np.where(tmp<0)[0]
if len(d)>0:
id = seq[d[-1]];
else:
id = 0;
return id
def write_remidx(M, K, ppath, name, mode=1) :
"""
rewrite_remidx(idx, states, ppath, name)
replace the indices idx in the remidx file of recording name
with the assignment given in states
"""
if mode == 0 :
outfile = os.path.join(ppath, name, 'remidx_' + name + '.txt')
else :
outfile = os.path.join(ppath, name, 'remidx_' + name + '_corr.txt')
f = open(outfile, 'w')
s = ["%d\t%d\n" % (i,j) for (i,j) in zip(M[0,:],K)]
f.writelines(s)
f.close()
#######################################################################################
### MANIPULATING FIGURES ##############################################################
def set_fontsize(fs):
import matplotlib
matplotlib.rcParams.update({'font.size': fs})
def set_fontarial():
"""
set Arial as default font
"""
import matplotlib
matplotlib.rcParams['font.sans-serif'] = "Arial"
def save_figure(fig_file):
# alternative way of setting nice fonts:
#matplotlib.rcParams['pdf.fonttype'] = 42
#matplotlib.rcParams['ps.fonttype'] = 42
#matplotlib.pylab.savefig(fig_file, dpi=300)
#matplotlib.rcParams['text.usetex'] = False
#matplotlib.rcParams['text.usetex'] = True
plt.savefig(fig_file, bbox_inches="tight", dpi=200)
#matplotlib.rcParams['text.usetex'] = False
def box_off(ax):
"""
similar to Matlab's box off
"""
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
#######################################################################################
def sleep_state(ppath, name, th_delta_std=1, mu_std=0, sf=1, sf_delta=3, pwrite=0,
pplot=True, pemg=True, vmax=2.5, pspec_norm=False, use_idx=[]):
"""
automatic sleep state detection based on
delta, theta, sigma, gamma and EMG power.
New: use also sigma band: that's very helpful to classify pre-REM periods
as NREM; otherwise they tend to be classified as wake.
Gamma peaks nicely pick up during microarousals.
My strategy is the following:
I smooth delta band a lot to avoid strong fragmentation of sleep; but to
still pick up microarousals I use the gamma power.
spectrogram data has to be calculated before using calculate_spectrum
Each bin in the spectrogram gets assigned one of four states:
1-REM
2-Wake
3-NREM
0-undef
:param ppath: base folder
:param name: recording name
:param th_delta_std: threshold for theta/delta band is calculated as mean(theta/delta) + th_delta_std*std(theta/delta)
:param mu_std: threshold for EMG power is calculate as "mean(EMG) + mu_std * mean(EMG)
:param sf: smoothing factor for gamma and sigma power
:param sf_delta: smoothing factor for delta power
:param pwrite: if True, save sleep classification to file remidx_$name.txt
:param pplot: if True, plot figures
:param pemg: if True, use EMG as EMG, otherwise use EEG gamma power instead
:param vmax: float, set maximum of color range of EEG heatmap.
:param pspec_norm: boolean, if True, normalized EEG spectrogram by deviding each frequency band by its mean; only affects
plotting, no effect on sleep state calculation
:param use_idx: list, if not empty, use only given indices to calculate sleep state
:return:
"""
PRE_WAKE_REM = 30.0
# Minimum Duration and Break in
# high theta/delta, high emg, high delta, high sigma and gamma sequences
#
# duration[i,0] is the minimum duration of sequence of state i
# duration[i,1] is maximal break duration allowed in a sequence of state i
duration = np.zeros((5,2))
# high theta/delta
duration[0,:] = [5,15]
# high emg
duration[1,:] = [0, 5]
# high delta
duration[2,:] = [10, 10]
# high sigma
duration[3,:] = [10, 10]
# gamma
duration[4,:] = [0, 5]
# Frequency Bands/Ranges for delta, theta, and, gamma
r_delta = [0.5, 4]
r_sigma = [12, 20]
r_theta = [5,12]
# EMG band
r_mu = [50, 500]
if not pemg:
r_mu = [250, 500]
# high gamma power
r_gamma = [100, 150]
#load EEG and EMG spectrum, calculated by calculate_spectrum
P = so.loadmat(os.path.join(ppath, name, 'sp_' + name + '.mat'))
if pemg:
Q = so.loadmat(os.path.join(ppath, name, 'msp_' + name + '.mat'))
else:
Q = so.loadmat(os.path.join(ppath, name, 'sp_' + name + '.mat'))
SPEEG = np.squeeze(P['SP'])
if pemg == 1:
SPEMG = np.squeeze(Q['mSP'])
else:
SPEMG = np.squeeze(P['SP'])
if use_idx == []:
use_idx = range(0, SPEEG.shape[1])
freq = np.squeeze(P['freq'])
t = np.squeeze(P['t'])
dt = float(np.squeeze(P['dt']))
N = len(t)
duration = np.divide(duration,dt)
# get indices for frequency bands
i_delta = np.where((freq >= r_delta[0]) & (freq <= r_delta[1]))[0]
i_theta = np.where((freq >= r_theta[0]) & (freq <= r_theta[1]))[0]
i_mu = np.where((freq >= r_mu[0]) & (freq <= r_mu[1]))[0]
i_sigma = np.where((freq >= r_sigma[0]) & (freq <= r_sigma[1]))[0]
i_gamma = np.where((freq >= r_gamma[0]) & (freq <= r_gamma[1]))[0]
p_delta = smooth_data( SPEEG[i_delta,:].mean(axis=0), sf_delta )
p_theta = smooth_data( SPEEG[i_theta,:].mean(axis=0), 0 )
# now filtering for EMG to pick up microarousals
p_mu = smooth_data( SPEMG[i_mu,:].mean(axis=0), sf )
p_sigma = smooth_data( SPEEG[i_sigma,:].mean(axis=0), sf )
p_gamma = smooth_data( SPEEG[i_gamma,:].mean(axis=0), 0 )
th_delta = np.divide(p_theta, p_delta)
#th_delta = smooth_data(th_delta, 2);
seq = {}
seq['high_theta'] = threshold_crossing(th_delta, np.nanmean(th_delta[use_idx])+th_delta_std*np.nanstd(th_delta[use_idx]),
duration[0,1], duration[0,1], 1)
seq['high_emg'] = threshold_crossing(p_mu, np.nanmean(p_mu[use_idx])+mu_std* | np.nanstd(p_mu[use_idx]) | numpy.nanstd |
"""
Somes utilities function
"""
import numpy as np
import scipy.linalg
import numpy.polynomial.polynomial as poly
import scipy.integrate
def memory_kernel(ntimes, dt, coeffs, dim_x, noDirac=False):
"""
Return the value of the estimated memory kernel
Parameters
----------
ntimes,dt: Number of timestep and timestep
coeffs : Coefficients for diffusion and friction
dim_x: Dimension of visible variables
noDirac: Remove the dirac at time zero
Returns
-------
timespan : array-like, shape (n_samples, )
Array of time to evaluate memory kernel
kernel_evaluated : array-like, shape (n_samples, dim_x,dim_x)
Array of values of the kernel at time provided
"""
Avv = coeffs["A"][:dim_x, :dim_x]
Ahv = coeffs["A"][dim_x:, :dim_x]
Avh = coeffs["A"][:dim_x, dim_x:]
Ahh = coeffs["A"][dim_x:, dim_x:]
Kernel = np.zeros((ntimes, dim_x, dim_x))
for n in np.arange(ntimes):
Kernel[n, :, :] = -np.matmul(Avh, np.matmul(scipy.linalg.expm(-1 * n * dt * Ahh), Ahv))
if not noDirac:
Kernel[0, :, :] = Kernel[0, :, :] + Avv
return dt * np.arange(ntimes), Kernel
def memory_kernel_logspace(dt, coeffs, dim_x, noDirac=False):
"""
Return the value of the estimated memory kernel
Parameters
----------
dt: Timestep
coeffs : Coefficients for diffusion and friction
dim_x: Dimension of visible variables
noDirac: Remove the dirac at time zero
Returns
-------
timespan : array-like, shape (n_samples, )
Array of time to evaluate memory kernel
kernel_evaluated : array-like, shape (n_samples, dim_x,dim_x)
Array of values of the kernel at time provided
"""
Avv = coeffs["A"][:dim_x, :dim_x]
Ahv = coeffs["A"][dim_x:, :dim_x]
Avh = coeffs["A"][:dim_x, dim_x:]
Ahh = coeffs["A"][dim_x:, dim_x:]
eigs = np.linalg.eigvals(Ahh)
Kernel = np.zeros((150, dim_x, dim_x))
final_time = 25 / np.min(np.abs(np.real(eigs)))
times = np.logspace(np.log10(dt), np.log10(final_time), num=150)
for n, t in enumerate(times):
Kernel[n, :, :] = -np.matmul(Avh, np.matmul(scipy.linalg.expm(-1 * t * Ahh), Ahv))
if not noDirac:
Kernel[0, :, :] = Kernel[0, :, :] + Avv
return times, Kernel
def memory_timescales(coeffs, dim_x):
"""
Compute the eigenvalues of A_hh to get the timescale of the memory
"""
return np.linalg.eigvals(coeffs["A"][dim_x:, dim_x:])
def friction_matrix(coeffs, dim_x):
"""
Compute integral of memory kernel to get friction matrix
"""
Avv = coeffs["A"][:dim_x, :dim_x]
Ahv = coeffs["A"][dim_x:, :dim_x]
Avh = coeffs["A"][:dim_x, dim_x:]
Ahh = coeffs["A"][dim_x:, dim_x:]
return Avv - np.matmul(Avh, np.matmul(np.linalg.inv(Ahh), Ahv))
def diagonalC(coeffs, dim_x):
"""
Return A and C after putting C in diagonal form
"""
C = coeffs["C"]
lamb, vect = np.linalg.eigh(C[dim_x:, dim_x:])
vect_ext = np.identity(C.shape[0])
vect_ext[dim_x:, dim_x:] = vect
C_bis = vect_ext.T @ C @ vect_ext
A_bis = vect_ext.T @ coeffs["A"] @ vect_ext
return A_bis, C_bis
def prony_splitting(coeffs, dim_x):
"""
Compute the Kernel under prony series form
"""
Ahv = coeffs["A"][dim_x:, :dim_x]
Avh = coeffs["A"][:dim_x, dim_x:]
eigs, right_vect = np.linalg.eig(coeffs["A"][dim_x:, dim_x:])
right_coeffs = | np.linalg.inv(right_vect) | numpy.linalg.inv |
import numpy as np
import scipy.stats
from scipy.signal.windows import *
import datetime
def generateRandomBits(n_bits):
'''
Generates a numpy array of 0's and 1's.
'''
return np.random.randint(0,high=2,size=n_bits,dtype='int')
def bitsToSymbols(bits, M):
'''
Takes an array of bits and converts them to their corresponding symbols.
M is the number of points in the constellation.
e.g. 0101 0000 1111 1010 -> 5 0 15 10
'''
n = int(np.log2(M))
nsym = int(len(bits)/n)
symbols = np.zeros((nsym,),dtype='int')
w = (2**np.arange(n-1,-1,-1)).astype('int')
for i in range(0,nsym):
symbols[i] = sum(bits[i*n:(i+1)*n] * w)
return symbols
def symbolsToIq(syms, constellation):
"""
Converts symbol indexes to complex values according to the given constellation
"""
return constellation[syms]
def matchedFilter(x, p):
"""
Given a signal x, performs matched filtering based on pulse shape p
"""
return np.convolve(x,np.flip(np.conj(p)))
def symbolsToBits(syms, M):
'''
Takes a series of symbols and converts them to their corresponding bits.
M is the number of points in the constellation.
e.g. 5 0 15 10 -> 0101 0000 1111 1010
'''
n = int(np.log2(M))
bits = np.zeros(len(syms)*n, dtype='int')
for i in range(0,len(syms)):
s = format(syms[i], '0'+str(n)+'b') # represent symbol as binary string
for j in range(0,n):
bits[i*n+j] = s[j]
return bits
def calculateBer(b1,b2):
"""
Calculates the number of nonzero elements in the difference of the two arrays, and computes the bit error rate
"""
return np.count_nonzero(b1 - b2) / len(b1)
def noiseVariance(SNR, Eb):
"""
Given an SNR in dB and an energy per bit Eb, calculate the noise variance N0.
Note: This calculates Eb / gamma, where gamma is the SNR on a linear scale.
"""
return Eb / (10 ** (SNR/10)) # calculates N0
def addNoise(iqs, **kwargs):
'''
adds additive white gaussian noise to an array of complex IQ samples
in **kwargs, you must specify
a. SNR (dB) and Eb (the energy per bit), or
b. N0, the noise variance
'''
if 'SNR' and 'Eb' in kwargs.keys():
SNR = kwargs['SNR']
Eb = kwargs['Eb']
N0 = noiseVariance(SNR, Eb)
elif 'N0' in kwargs.keys():
N0 = kwargs['N0']
else:
raise Exception("addNoise(): must specify N0 or SNR & Eb in kwargs.")
var = N0 / 2
nr = np.random.normal(scale=np.sqrt(var), size=(len(iqs),))
ni = np.random.normal(scale=np.sqrt(var), size=(len(iqs),))
return iqs + (nr + 1j*ni)
def addFrequencyOffset(iqs, nuT=0.0):
'''
Adds a frequency nuT in terms of cycles/sample.
'''
return iqs * np.exp(1j*2.0*np.pi*np.arange(0,len(iqs))*nuT)
def addPhaseOffset(iqs, phase=None):
'''
Adds a random phase to a list of complex values.
If none is specifed, a random phase is chosen.
'''
if phase == None:
phase = 2*np.pi*np.random.rand()
return iqs * np.exp(1j*phase)
def phaseAmbiguity(rx,uw):
'''
Returns angle between received samples and the provided unique word.
'''
return np.angle(np.sum(rx*np.conj(uw)))
def phaseAmbiguityResolution(rx, rxuw, uw):
'''
Returns the received data with the phase ambiguity removed.
rxuw are the received symbols corresponding to the unique word
uw is the unique word itself
'''
a = phaseAmbiguity(rxuw,uw)
return addPhaseOffset(rx, phase=-a)
def makeDecision(iq, constellation):
'''
returns the index of nearest constellation point
'''
return np.argmin(abs(constellation - iq))
def makeDecisions(iqs, constellation):
'''
returns the indexes of the nearest constellation points
'''
idxs = np.zeros(len(iqs), dtype='int8')
for i in range(0,len(iqs)):
idxs[i] = makeDecision(iqs[i], constellation)
return idxs
def freqOffsetEstimation16Apsk(rx, mode='gauss'):
'''
Various methods for estimating a frequency offset when using a 16-APSK constellation
Returns the normalized frequency offset in terms of cycles/sample
Available modes:
'coarse'
'gauss'
'interp_1'
'interp_2'
'''
def nonLinearXform(z):
zz_m = z * np.conj(z);
zz_p = 12 * np.angle(z);
return zz_m * np.exp(1j*zz_p);
z = nonLinearXform(rx)
Lfft = 2*len(z)
ZZ = np.fft.fft(z,Lfft)
PP2 = ZZ * np.conj(ZZ)
idx_max = np.argmax(PP2)
if idx_max >= Lfft/2:
vhat2 = (idx_max-Lfft)/(Lfft*12)
else:
vhat2 = idx_max/(Lfft*12)
II1 = abs(PP2[idx_max-1])
II2 = abs(PP2[idx_max])
II3 = abs(PP2[idx_max+1])
II0 = np.maximum(II1, II3)
if mode == 'interp_1':
return vhat2 + 1/(12*Lfft) * 0.5*(II1-II3)/(II1-2*II2+II3) # D'Amico
elif mode == 'interp_2':
return vhat2 + np.sign(II3 - II1) / Lfft * II0 / (II2 - II0) / 2 / 2 / np.pi / 12
elif mode == 'gauss':
return vhat2 + ( (1 / Lfft) * (np.log(II1) - np.log(II3)) / (np.log(II1) - 2*np.log(II2) + np.log(II3)) ) / (24 * np.pi)
elif mode == 'coarse':
return vhat2
else:
raise Exception('Invalid mode.')
def freqOffsetEstimationQpsk(rx, mode='interp_2'):
'''
Various methods for estimating a frequency offset when using a QPSK constellation
Returns the normalized frequency offset in terms of cycles/sample
Available modes:
'coarse'
'gauss'
'interp_1'
'interp_2'
Note: none of these have been derived from first princples. I modified the 16-APSK frequency estimators and they appear to work. There are probably more efficient/better frequency estimation methods available for QPSK. I simply haven't looked for them.
'''
def nonLinearXform(z):
zz_m = z * np.conj(z);
zz_p = 4 * np.angle(z);
return zz_m * np.exp(1j*zz_p);
z = nonLinearXform(rx)
Lfft = 2*len(z)
ZZ = np.fft.fft(z,Lfft)
PP2 = ZZ * np.conj(ZZ)
idx_max = np.argmax(PP2)
if idx_max >= Lfft/2:
vhat2 = (idx_max-Lfft)/(Lfft*4)
else:
vhat2 = idx_max/(Lfft*4)
II1 = abs(PP2[idx_max-1])
II2 = abs(PP2[idx_max])
II3 = abs(PP2[idx_max+1])
II0 = np.maximum(II1, II3)
if mode == 'interp_1':
return vhat2 + 1/(4*Lfft) * 0.5*(II1-II3)/(II1-2*II2+II3) # D'Amico
elif mode == 'interp_2':
return vhat2 + np.sign(II3 - II1) / Lfft * II0 / (II2 - II0) / 2 / 2 / np.pi / 4
elif mode == 'gauss':
return vhat2 + ( (1 / Lfft) * (np.log(II1) - np.log(II3)) / (np.log(II1) - 2*np.log(II2) + np.log(II3)) ) / (2 * 4 * np.pi)
elif mode == 'coarse':
return vhat2
else:
raise Exception('Invalid mode.')
def createDerivativeFilter(N=51,Tsamp=1):
'''
Calculates the coefficients for a derivative filter.
N must be odd
'''
if (N+1)%4 != 0:
raise Exception("createDerivativeFilter: N must be of form 4*n-1")
ndmin = -(N-1)/2
ndmax = (N-1)/2
nd = np.arange(ndmin, ndmax+1)
d = np.zeros(nd.shape)
ndnz = nd != 0 # nonzero indexes
d[ndnz] = 1 / Tsamp * ((-1)**nd[ndnz]) / nd[ndnz]
d = d * blackman(N)
return d
def derivativeFilter2(x, N=51,Tsamp=1,zero_edge=False):
'''
Calculates the derivative of a discrete-time signal x with sample time Tsamp using a filter of length N.
Because convolution results in values that are not correct near the edges, I decided to zero out those values as they can be quite large. So don't be surpised by the zeros at the beginning and end of the array.
'''
d = createDerivativeFilter(N=N,Tsamp=Tsamp)
pad = int((N-1)/2) # this is the number of samples at the beginning/end of the signal that aren't quite correct due to blurring from convolution
xd = (np.convolve(x,d))[pad:-pad]
if zero_edge:
xd[0:pad] = 0
xd[-pad:-1] = 0
xd[-1] = 0
return xd
def derivativeFilter(x,N=51,Tsamp=1):
'''
Calculates the derivative of a discrete-time signal x with sample time Tsamp using a filter of length N.
Because convolution results in values that are not correct near the edges, this function appends a linear extrapolation on either end prior to convolution to avoid strange filter behavior.
This might not work well in the presence of even mild noise, but seems to work better than the original function I wrote.
'''
d = createDerivativeFilter(N=N,Tsamp=Tsamp)
pad = int((N-1)/2) # this is the number of samples at the beginning/end of the signal that aren't quite correct due to blurring from convolution
# extend x with linear extrapolation on both ends
x2 = np.zeros((len(x)+2*pad,))
x2[pad:-pad] = x # insert sequence in middle
x2[0:pad] = x[0] - np.arange(pad,0,step=-1) * (x[1] - x[0]) # left side extrapolation
x2[len(x2)-pad:len(x2)] = x[-1] + np.arange(1,pad+1) * (x[-1] - x[-2]) # right side extrapolation
# valid values
xd = (np.convolve(x2,d))[2*pad:-2*pad]
return xd
def fractionalDelayCoeffs(T, dT, L):
"""
Produces fractional delay filter coefficients.
"""
n = | np.arange(-L,L+1) | numpy.arange |
# Practice sites
#https://www.machinelearningplus.com/python/101-numpy-exercises-python/
#http://www.cs.umd.edu/~nayeem/courses/MSML605/files/04_Lec4_List_Numpy.pdf
#https://www.gormanalysis.com/blog/python-numpy-for-your-grandma/
#https://nickmccullum.com/advanced-python/numpy-indexing-assignment/
# 1. Import numpy as np and see the version
# Difficulty Level: L1
# Q. Import numpy as np and print the version number.
##? 1. Import numpy as np and see the version
# Difficulty Level: L1
# Q. Import numpy as np and print the version number.
import numpy as np
print(np.__version__)
##? 2. How to create a 1D array?
# Difficulty Level: L1
# Q. Create a 1D array of numbers from 0 to 9
arr = np.arange(10)
arr
##? 3. How to create a boolean array?
# Difficulty Level: L1
# Q. Create a 3×3 numpy array of all True’s
arr = np.full((3,3), True, dtype=bool)
arr
##? 4. How to extract items that satisfy a given condition from 1D array?
# Difficulty Level: L1
# Q. Extract all odd numbers from arr
arr = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
arr[arr % 2 == 1]
##? 5. How to replace items that satisfy a condition with another value in numpy array?
# Difficulty Level: L1
# Q. Replace all odd numbers in arr with -1
arr = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
arr[arr % 2 == 1] = -1
arr
##? 6. How to replace items that satisfy a condition without affecting the original array?
# Difficulty Level: L2
# Q. Replace all odd numbers in arr with -1 without changing arr
arr = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
#1 np.where
out = np.where(arr % 2 == 1, -1, arr)
out
#2 list comp
out = np.array([-1 if x % 2 == 1 else x for x in arr])
out
##? 7. How to reshape an array?
# Difficulty Level: L1
# Q. Convert a 1D array to a 2D array with 2 rows
arr = np.arange(10)
arr.reshape(2, -1)
# Setting y to -1 automatically decides number of columns.
# Could do the same with
arr.reshape(2, 5)
##? 8. How to stack two arrays vertically?
# Difficulty Level: L2
# Q. Stack arrays a and b vertically
a = np.arange(10).reshape(2, -1)
b = np.repeat(1, 10).reshape(2, -1)
#1
np.vstack([a, b])
#2
np.concatenate([a, b], axis=0)
#3
np.r_[a, b]
# 9. How to stack two arrays horizontally?
# Difficulty Level: L2
# Q. Stack the arrays a and b horizontally.
a = np.arange(10).reshape(2, -1)
b = np.repeat(1, 10).reshape(2, -1)
#1
np.hstack([a, b])
#2
np.concatenate([a, b], axis=1)
#3
np.c_[a, b]
##? 10. How to generate custom sequences in numpy without hardcoding?
# Difficulty Level: L2
# Q. Create the following pattern without hardcoding.
# Use only numpy functions and the below input array a.
a = np.array([1,2,3])
np.r_[np.repeat(a,3), np.tile(a, 3)]
##? 11. How to get the common items between two python numpy arrays?
# Difficulty Level: L2
# Q. Get the common items between a and b
a = np.array([1,2,3,2,3,4,3,4,5,6])
b = np.array([7,2,10,2,7,4,9,4,9,8])
np.intersect1d(a, b)
##? 12. How to remove from one array those items that exist in another?
# Difficulty Level: L2
# Q. From array a remove all items present in array b
a = np.array([1,2,3,4,5])
b = np.array([5,6,7,8,9])
# From 'a' remove all of 'b'
np.setdiff1d(a,b)
##? 13. How to get the positions where elements of two arrays match?
# Difficulty Level: L2
# Q. Get the positions where elements of a and b match
a = np.array([1,2,3,2,3,4,3,4,5,6])
b = np.array([7,2,10,2,7,4,9,4,9,8])
np.where(a==b)
# 14. How to extract all numbers between a given range from a numpy array?
# Difficulty Level: L2
# Q. Get all items between 5 and 10 from a.
a = np.array([2, 6, 1, 9, 10, 3, 27])
#1
idx = np.where((a>=5) & (a<=10))
a[idx]
#2
idx = np.where(np.logical_and(a >= 5, a <= 10))
a[idx]
#3
a[(a >= 5) & (a <= 10)]
##? 15. How to make a python function that handles scalars to work on numpy arrays?
# Difficulty Level: L2
# Q. Convert the function maxx that works on two scalars, to work on two arrays.
def maxx(x:np.array, y:np.array):
"""Get the maximum of two items"""
if x >= y:
return x
else:
return y
a = np.array([5, 7, 9, 8, 6, 4, 5])
b = np.array([6, 3, 4, 8, 9, 7, 1])
pair_max = np.vectorize(maxx, otypes=[float])
pair_max(a, b)
##? 16. How to swap two columns in a 2d numpy array?
# Difficulty Level: L2
# Q. Swap columns 1 and 2 in the array arr.
arr = np.arange(9).reshape(3,3)
arr
arr[:, [1, 0, 2]]
#by putting brackets inside the column slice. You have access to column indices
##? 17. How to swap two rows in a 2d numpy array?
# Difficulty Level: L2
# Q. Swap rows 1 and 2 in the array arr:
arr = np.arange(9).reshape(3,3)
arr
arr[[0, 2, 1], :]
#same goes here for the rows
##? 18. How to reverse the rows of a 2D array?
# Difficulty Level: L2
# Q. Reverse the rows of a 2D array arr.
# Input
arr = np.arange(9).reshape(3,3)
arr
arr[::-1, :]
#or
arr[::-1]
# 19. How to reverse the columns of a 2D array?
# Difficulty Level: L2
# Q. Reverse the columns of a 2D array arr.
# Input
arr = np.arange(9).reshape(3,3)
arr
arr[:,::-1]
##? 20. How to create a 2D array containing random floats between 5 and 10?
# Difficulty Level: L2
# Q. Create a 2D array of shape 5x3 to contain random decimal numbers between 5 and 10.
arr = np.arange(9).reshape(3,3)
#1
rand_arr = np.random.randint(low=5, high=10, size=(5,3)) + np.random.random((5,3))
rand_arr
#2
rand_arr = np.random.uniform(5, 10, size=(5,3))
rand_arr
##? 21. How to print only 3 decimal places in python numpy array?
# Difficulty Level: L1
# Q. Print or show only 3 decimal places of the numpy array rand_arr.
rand_arr = np.random.random((5,3))
rand_arr
rand_arr = np.random.random([5,3])
np.set_printoptions(precision=3)
rand_arr[:4]
##? 22. How to pretty print a numpy array by suppressing the scientific notation (like 1e10)?
# Difficulty Level: L1
# Q. Pretty print rand_arr by suppressing the scientific notation (like 1e10)
#Reset printoptions
np.set_printoptions(suppress=False)
# Create the random array
np.random.seed(100)
rand_arr = np.random.random([3,3])/1e3
rand_arr
#Set precision and suppress e notation
np.set_printoptions(suppress=True, precision=6)
rand_arr
##? 23. How to limit the number of items printed in output of numpy array?
# Difficulty Level: L1
# Q. Limit the number of items printed in python numpy array a to a maximum of 6 elements.
a = np.arange(15)
#set the elements to print in threshold
np.set_printoptions(threshold=6)
a
# reset the threshold to default
np.set_printoptions(threshold=1000)
##? 24. How to print the full numpy array without truncating
# Difficulty Level: L1
# Q. Print the full numpy array a without truncating.
a = np.arange(15)
# reset the threshold to default
np.set_printoptions(threshold=1000)
a
##? 25. How to import a dataset with numbers and texts keeping the text intact in python numpy?
# Difficulty Level: L2
# Q. Import the iris dataset keeping the text intact.
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
iris = np.genfromtxt(url, delimiter=',', dtype="object")
names = ('sepallength', 'sepalwidth', 'petallength', 'petalwidth', 'species')
iris[:3]
##? 26. How to extract a particular column from 1D array of tuples?
# Difficulty Level: L2
# Q. Extract the text column species from the 1D iris imported in previous question.
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
iris_1d = np.genfromtxt(url, delimiter=',', dtype=None, encoding = "UTF-8")
species = np.array([col[4] for col in iris_1d])
species[:5]
##? 27. How to convert a 1d array of tuples to a 2d numpy array?
# Difficulty Level: L2
# Q. Convert the 1D iris to 2D array iris_2d by omitting the species text field.
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
iris_1d = np.genfromtxt(url, delimiter=',', dtype=None, encoding = "UTF-8")
#1
no_species_2d = np.array([row.tolist()[:4] for row in iris_1d])
no_species_2d[:3]
#2
# Can directly specify columns to use with the "usecols" method
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
no_species_2d = np.genfromtxt(url, delimiter=',', dtype=None, encoding = "UTF-8", usecols=[0,1,2,3])
no_species_2d[:3]
##? 28. How to compute the mean, median, standard deviation of a numpy array?
# Difficulty: L1
# Q. Find the mean, median, standard deviation of iris's sepallength (1st column)
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
iris_1d = np.genfromtxt(url, delimiter=',', dtype=None, encoding="utf-8")
sepal = np.genfromtxt(url, delimiter=',', dtype=float, usecols=[0])
# or
sepal = np.array([col[0] for col in iris_1d])
# or
sepal = np.array([col.tolist()[0] for col in iris_1d])
mu, med, sd = np.mean(sepal), np.median(sepal), np.std(sepal)
np.set_printoptions(precision=2)
print(f'The mean is {mu} \nThe median is {med} \nThe standard deviation is {sd}')
##? 29. How to normalize an array so the values range exactly between 0 and 1?
# Difficulty: L2
# Q. Create a normalized form of iris's sepallength whose values range exactly between 0 and 1 so that the minimum has value 0 and maximum has value 1.
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
iris_1d = np.genfromtxt(url, delimiter=',', dtype=None, encoding="utf-8")
sepal = np.genfromtxt(url, delimiter=',', dtype=float, usecols=[0])
#1
smax, smin = np.max(sepal), np.min(sepal)
S = (sepal-smin)/(smax-smin)
S
#2
S = (sepal-smin)/sepal.ptp()
S
##? 30. How to compute the softmax score?
# Difficulty Level: L3
# Q. Compute the softmax score of sepallength.
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
sepal = np.genfromtxt(url, delimiter=',', dtype=float, usecols=[0], encoding="utf-8")
#or
sepal = np.genfromtxt(url, delimiter=',', dtype='object')
sepal = np.array([float(row[0]) for row in sepal])
# https://stackoverflow.com/questions/34968722/how-to-implement-the-softmax-function-in-python"""
#1
def softmax(x):
e_x = np.exp(x - np.max(x))
return e_x/ e_x.sum(axis=0)
softmax(sepal)
##? 31. How to find the percentile scores of a numpy array?
# Difficulty Level: L1
# Q. Find the 5th and 95th percentile of iris's sepallength
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
sepal = np.genfromtxt(url, delimiter=',', dtype='float', usecols=[0])
np.percentile(sepal, q=[5, 95])
##? 32. How to insert values at random positions in an array?
# Difficulty Level: L2
# Q. Insert np.nan values at 20 random positions in iris_2d dataset
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
iris_2d = np.genfromtxt(url, delimiter=',', dtype='float', encoding="utf-8")
#Can change object to float if you want
#1
i, j = np.where(iris_2d)
# i, j contain the row numbers and column numbers of the 600 elements of Irix_x
np.random.seed(100)
iris_2d[np.random.choice(i, 20), np.random.choice((j), 20)] = np.nan
#Checking nans in 2nd column
np.isnan(iris_2d[:, 1]).sum()
#Looking over all rows/columns
np.isnan(iris_2d[:, :]).sum()
#2
np.random.seed(100)
iris_2d[np.random.randint(150, size=20), np.random.randint(4, size=20)]=np.nan
#Looking over all rows/columns
np.isnan(iris_2d[:, :]).sum()
##? 33. How to find the position of missing values in numpy array?
# Difficulty Level: L2
# Q. Find the number and position of missing values in iris_2d's sepallength (1st column)
# ehh already did that? Lol. Using above filtered array from method 2 in
# question 32
np.isnan(iris_2d[:, 0]).sum()
#Indexes of which can be found with
np.where(np.isnan(iris_2d[:, 0]))
##? 34. How to filter a numpy array based on two or more conditions?
# Difficulty Level: L3
# Q. Filter the rows of iris_2d that has petallength (3rd column) > 1.5
# and sepallength (1st column) < 5.0
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
iris_2d = np.genfromtxt(url, delimiter=',', dtype='float', usecols=[0,1,2,3])
filt_cond = (iris_2d[:,0] < 5.0) & (iris_2d[:, 2] > 1.5)
iris_2d[filt_cond]
##? 35. How to drop rows that contain a missing value from a numpy array?
# Difficulty Level: L3:
# Q. Select the rows of iris_2d that does not have any nan value.
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
iris_2d = np.genfromtxt(url, delimiter=',', dtype='float', usecols=[0,1,2,3])
iris_2d[np.random.randint(150, size=20), np.random.randint(4, size=20)] = np.nan
#1
#No direct numpy implementation
iris_drop = np.array([~np.any(np.isnan(row)) for row in iris_2d])
#Look at first 5 rows of drop
iris_2d[iris_drop][:5]
#2
iris_2d[np.sum(np.isnan(iris_2d), axis=1)==0][:5]
##? 36. How to find the correlation between two columns of a numpy array?
# Difficulty Level: L2
# Q. Find the correlation between SepalLength(1st column) and PetalLength(3rd column) in iris_2d
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
iris_2d = np.genfromtxt(url, delimiter=',', dtype='float', usecols=[0,1,2,3])
#1
np.corrcoef(iris_2d[:, 0], iris_2d[:, 2])[0, 1]
#2
from scipy.stats.stats import pearsonr
corr, p_val = pearsonr(iris_2d[:, 0], iris_2d[:, 2])
print(corr)
# Correlation coef indicates the degree of linear relationship between two numeric variables.
# It can range between -1 to +1.
# The p-value roughly indicates the probability of an uncorrelated system producing
# datasets that have a correlation at least as extreme as the one computed.
# The lower the p-value (<0.01), greater is the significance of the relationship.
# It is not an indicator of the strength.
#> 0.871754157305
##? 37. How to find if a given array has any null values?
# Difficulty Level: L2
# Q. Find out if iris_2d has any missing values.
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
iris_2d = np.genfromtxt(url, delimiter=',', dtype='float', usecols=[0,1,2,3])
np.isnan(iris_2d[:, :]).any()
##? 38. How to replace all missing values with 0 in a numpy array?
# Difficulty Level: L2
# Q. Replace all occurrences of nan with 0 in numpy array
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
iris_2d = np.genfromtxt(url, delimiter=',', dtype='float', usecols=[0,1,2,3])
iris_2d[np.random.randint(150, size=20), np.random.randint(4, size=20)] = np.nan
#Check for nans
np.any(~np.isnan(iris_2d[:, :]))
#Set Indexes of of the nans = 0
iris_2d[np.isnan(iris_2d)] = 0
#Check the same indexes
np.where(iris_2d==0)
#Check first 10 rows
iris_2d[:10]
##? 39. How to find the count of unique values in a numpy array?
# Difficulty Level: L2
# Q. Find the unique values and the count of unique values in iris's species
# Input
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
iris = np.genfromtxt(url, delimiter=',', dtype='object', encoding="utf-8")
names = ('sepallength', 'sepalwidth', 'petallength', 'petalwidth', 'species')
#1
species = np.array([row.tolist()[4] for row in iris])
np.unique(species, return_counts=True)
#2
np.unique(iris[:, 4], return_counts=True)
##? 40. How to convert a numeric to a categorical (text) array?
# Difficulty Level: L2
# Q. Bin the petal length (3rd) column of iris_2d to form a text array, such that if petal length is:
# Less than 3 --> 'small'
# 3-5 --> 'medium'
# '>=5 --> 'large'
# Input
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
iris = np.genfromtxt(url, delimiter=',', dtype='object')
names = ('sepallength', 'sepalwidth', 'petallength', 'petalwidth', 'species')
#1
#Bin the petal length
petal_length_bin = np.digitize(iris[:, 2].astype('float'), [0, 3, 5, 10])
#Map it to respective category.
label_map = {1: 'small', 2: 'medium', 3: 'large', 4: np.nan}
petal_length_cat = [label_map[x] for x in petal_length_bin]
petal_length_cat[:4]
#or
petal_length_cat = np.array(list(map(lambda x: label_map[x], petal_length_bin)))
petal_length_cat[:4]
##? 41. How to create a new column from existing columns of a numpy array?
# Difficulty Level: L2
# Q. Create a new column for volume in iris_2d,
# where volume is (pi x petallength x sepal_length^2)/3
# Input
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
iris_2d = np.genfromtxt(url, delimiter=',', dtype='object')
# Compute volume
sepallength = iris_2d[:, 0].astype('float')
petallength = iris_2d[:, 2].astype('float')
volume = (np.pi * petallength*sepallength**2)/3
# Introduce new dimension to match iris_2d's
volume = volume[:, np.newaxis]
# Add the new column
out = np.hstack([iris_2d, volume])
out[:4]
##? 42. How to do probabilistic sampling in numpy?
# Difficulty Level: L3
# Q. Randomly sample iris's species such that setosa
# is twice the number of versicolor and virginica
# Import iris keeping the text column intact
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
iris = np.genfromtxt(url, delimiter=',', dtype='object')
#Get species column
species = iris[:, 4]
#1 Generate Probablistically.
np.random.seed(100)
a = np.array(['Iris-setosa', 'Iris-versicolor', 'Iris-virginica'])
out = np.random.choice(a, 150, p=[0.5, 0.25, 0.25])
#Checking counts
np.unique(out[:], return_counts=True)
#2 Probablistic Sampling #preferred
np.random.seed(100)
probs = np.r_[np.linspace(0, 0.500, num=50), np.linspace(0.501, .0750, num=50), np.linspace(.751, 1.0, num=50)]
index = np.searchsorted(probs, np.random.random(150))
species_out = species[index]
print(np.unique(species_out, return_counts=True))
# Approach 2 is preferred because it creates an index variable that can be
# used to sample 2d tabular data.
##? 43. How to get the second largest value of an array when grouped by another array?
# Difficulty Level: L2
# Q. What is the value of second longest petallength of species setosa
# Input
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
iris = np.genfromtxt(url, delimiter=',', dtype='object')
names = ('sepallength', 'sepalwidth', 'petallength', 'petalwidth', 'species')
petal_setosa = iris[iris[:, 4]==b'Iris-setosa', [2]].astype('float')
#1
#Note. Option 1 will return the second largest value 1.7, but with no repeats (np.unique()
np.unique(np.sort(petal_setosa))[-2]
#Note, options 2 and 3. these will return 1.9 because that is the second largest value.
#2
petal_setosa[np.argpartition(petal_setosa, -2)[-2]]
#3
petal_setosa[petal_setosa.argsort()[-2]]
#4
unq = np.unique(petal_setosa)
unq[np.argpartition(unq, -2)[-2]]
#Note: This method still gives back 1.9. As that is the 2nd largest value,
#So you'd have to filter for unique values. Then do the argpart on the unq array
##? 44. How to sort a 2D array by a column
# Difficulty Level: L2
# Q. Sort the iris dataset based on sepallength column.
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
# dtype = [('sepallength', float), ('sepalwidth', float), ('petallength', float), ('petalwidth', float),('species', 'S10')]
iris = np.genfromtxt(url, delimiter=',', dtype="object")
names = ('sepallength', 'sepalwidth', 'petallength', 'petalwidth', 'species')
#1
print(iris[iris[:,0].argsort()][:20])
#2
#!Only captures first column to sort
np.sort(iris[:, 0], axis=0)
#3
sorted(iris, key=lambda x: x[0])
##? 45. How to find the most frequent value in a numpy array?
# Difficulty Level: L1
# Q. Find the most frequent value of petal length (3rd column) in iris dataset.
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
iris = np.genfromtxt(url, delimiter=',', dtype='object')
names = ('sepallength', 'sepalwidth', 'petallength', 'petalwidth', 'species')
vals, counts = np.unique(iris[:, 2], return_counts=True)
print(vals[np.argmax(counts)])
##? 46. How to find the position of the first occurrence of a value greater than a given value?
# Difficulty Level: L2
# Q. Find the position of the first occurrence of a value greater than 1.0 in petalwidth 4th column of iris dataset.
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
iris = np.genfromtxt(url, delimiter=',', dtype='object')
#1
np.argwhere(iris[:, 3].astype(float) > 1.0)[0]
# 47. How to replace all values greater than a given value to a given cutoff?
# Difficulty Level: L2
# Q. From the array a, replace all values greater than 30 to 30 and less than 10 to 10.
np.set_printoptions(precision=2)
np.random.seed(100)
a = np.random.uniform(1,50, 20)
#1
np.clip(a, a_min=10, a_max=30)
#2
np.where(a < 10, 10, np.where(a > 30, 30, a))
#Tangent - Filtering condition
#Say we only want the values above 10 and below 30. Or operator | should help there.
filt_cond = (a < 10) | (a > 30)
a[filt_cond]
##? 48. How to get the positions of top n values from a numpy array?
# Difficulty Level: L2
# Q. Get the positions of top 5 maximum values in a given array a.
np.random.seed(100)
a = np.random.uniform(1,50, 20)
#1
a.argsort()[:5]
#2
np.argpartition(-a, 5)[:5]
# or (order is reversed though)
np.argpartition(a, -5)[-5:]
#To get the values.
#1
a[a.argsort()][-5:]
#2
np.sort(a)[-5:]
#3
np.partition(a, kth=-5)[-5:]
#4
a[np.argpartition(-a, 5)][:5]
#or
a[np.argpartition(a, -5)][-5:]
##? 49. How to compute the row wise counts of all possible values in an array?
# Difficulty Level: L4
# Q. Compute the counts of unique values row-wise.
np.random.seed(100)
arr = np.random.randint(1,11,size=(6, 10))
#Add a column of of the counts of each row
#Tangent fun
counts = np.array([np.unique(row).size for row in arr])
counts = counts.reshape(arr.shape[0], 1)
arr = np.hstack([arr, counts])
arr
#1
def row_counts(arr2d):
count_arr = [np.unique(row, return_counts=True) for row in arr2d]
return [[int(b[a==i]) if i in a else 0 for i in np.unique(arr2d)] for a, b in count_arr]
print(np.arange(1, 11))
row_counts(arr)
#2
arr = np.array([np.array(list('<NAME>')), np.array(list('narendramodi')), np.array(list('jjayalalitha'))])
print(np.unique(arr))
row_counts(arr)
##? 50. How to convert an array of arrays into a flat 1d array?
# Difficulty Level: 2
# Q. Convert array_of_arrays into a flat linear 1d array.
# Input:
arr1 = np.arange(3)
arr2 = np.arange(3,7)
arr3 = np.arange(7,10)
array_of_arrays = np.array([arr1, arr2, arr3])
array_of_arrays
#1 - List comp
arr_2d = [a for arr in array_of_arrays for a in arr]
arr_2d
#2 - concatenate
arr_2d = np.concatenate([arr1, arr2, arr3])
arr_2d
#3 - hstack
arr_2d = np.hstack([arr1, arr2, arr3])
arr_2d
#4 - ravel
arr_2d = np.concatenate(array_of_arrays).ravel() #ravel flattens the array
arr_2d
##? 51. How to generate one-hot encodings for an array in numpy?
# Difficulty Level L4
# Q. Compute the one-hot encodings (dummy binary variables for each unique value in the array)
# Input
np.random.seed(101)
arr = np.random.randint(1,11, size=20)
arr
#1
def one_hot_encode(arr):
uniqs = np.unique(arr)
out = np.zeros((arr.shape[0], uniqs.shape[0]))
for i, k in enumerate(arr):
out[i, k-1] = 1
return out
print("\t",np.arange(1, 11))
one_hot_encode(arr)
#2
(arr[:, None] == np.unique(arr)).view(np.int8)
##? 52. How to create row numbers grouped by a categorical variable?
# Difficulty Level: L3
# Q. Create row numbers grouped by a categorical variable.
# Use the following sample from iris species as input.
#Input
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
species = np.genfromtxt(url, delimiter=',', dtype='str', usecols=4)
#choose 20 species randomly
species_small = np.sort(np.random.choice(species, size=20))
species_small
#1
print([i for val in np.unique(species_small) for i, grp in enumerate(species_small[species_small==val])])
##? 53. How to create group ids based on a given categorical variable?
# Difficulty Level: L4
# Q. Create group ids based on a given categorical variable.
# Use the following sample from iris species as input.
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
species = np.genfromtxt(url, delimiter=',', dtype='str', usecols=4)
species_small = np.sort(np.random.choice(species, size=20))
species_small
#1
[np.argwhere(np.unique(species_small) == s).tolist()[0][0] for val in np.unique(species_small) for s in species_small[species_small==val]]
#2
# Solution: For Loop version
output = []
uniqs = np.unique(species_small)
for val in uniqs: # uniq values in group
for s in species_small[species_small==val]: # each element in group
groupid = np.argwhere(uniqs == s).tolist()[0][0] # groupid
output.append(groupid)
print(output)
##? 54. How to rank items in an array using numpy?
# Difficulty Level: L2
# Q. Create the ranks for the given numeric array a.
#Input
np.random.seed(10)
a = np.random.randint(20, size=10)
print(a)
a.argsort().argsort()
##? 55. How to rank items in a multidimensional array using numpy?
# Difficulty Level: L3
# Q. Create a rank array of the same shape as a given numeric array a.
#Input
np.random.seed(10)
a = np.random.randint(20, size=[5,5])
print(a)
#1
print(a.ravel().argsort().argsort().reshape(a.shape))
#2
#Ranking the rows
tmp = a.argsort()[::-1]
np.arange(len(a))[tmp]+1
#2b
#Alternate ranking of rows (8x faster)
sidx = np.argsort(a, axis=1)
# Store shape info
m,n = a.shape
# Initialize output array
out = np.empty((m,n),dtype=int)
# Use sidx as column indices, while a range array for the row indices
# to select one element per row. Since sidx is a 2D array of indices
# we need to use a 2D extended range array for the row indices
out[np.arange(m)[:,None], sidx] = np.arange(n)
#3
#Ranking the columns
sidx = np.argsort(a, axis=0)
out[sidx, np.arange(n)] = np.arange(m)[:,None]
#4
#Ranking all the columns
tmp = a.argsort(axis=0).argsort(axis=0)[::-1]
np.arange(len(a))[tmp]+1
#3b Ranks for first column
tmp[:,0]
#3c Ranks for second column
tmp[:,1]
##? 56. How to find the maximum value in each row of a numpy array 2d?
# DifficultyLevel: L2
# Q. Compute the maximum for each row in the given array.
#Input
np.random.seed(100)
a = np.random.randint(1,10, [5,3])
a
#1
[np.max(row) for row in a]
#2
np.amax(a, axis=1)
#3
| np.apply_along_axis(np.max, arr=a, axis=1) | numpy.apply_along_axis |
from .ConfidenceIntervalsOnlySamples import ConfidenceIntervalsOnlySamples
import numpy as np
class ConfidenceIntervalsOnlySamplesClassification(ConfidenceIntervalsOnlySamples):
def _stats_and_plot(self, baseName, batch_samples_list, real_valu_list, extra_batch_dict):
all_samples = np.concatenate(batch_samples_list, axis=0)
y = np.concatenate(real_valu_list, axis=0)
nb, no, ns = all_samples.shape
cumulative_preds = np.sum(all_samples, axis=2)
predictions_forced = | np.argmax(cumulative_preds, axis=1) | numpy.argmax |
# -*- coding: utf-8 -*-
from darkflow.net.build import TFNet
import cv2
import os
import json
import numpy as np
from PIL import Image
import random
import csv
import sys
import math
from scipy import genfromtxt
from sklearn.cluster import KMeans
import pandas as pd
from sklearn.externals import joblib
from sklearn import svm
import matplotlib
save_path = 'sample_img/output/'
open_path = 'sample_img/'
#detection関数
def inputdata(image_name):
os.chdir('/var/www/KB_1810/learn/')
options = {"model": "/var/www/KB_1810/learn/cfg/yolov2-voc.cfg", "load": "/var/www/KB_1810/learn/bin/yolo_learn.weights", "threshold": 0.4, "gpu": 0.3}
tfnet = TFNet(options)
input_image = image_name
image_folder = "sample_img"
current_path = os.getcwd()
output_file = "out"
current_path = os.path.join(current_path,image_folder)
output_path = os.path.join(current_path,output_file)
if not os.path.exists(output_path):
print('Creating output path {}'.format(output_path))
os.mkdir(output_path)
src = cv2.imread(os.path.join(current_path,input_image))
dst = src
#cv2.imshow("img", src)
result, result1 = tfnet.return_predict(src,dst)
print(result)
#cv2.imshow("img_out", dst)
cv2.waitKey()
cv2.imwrite(output_path + '\\' + input_image, dst)
cv2.imwrite("result1.png",dst)
return result1
#detectionされた部分を画像にする
def image_split(img_name):
global save_path
global open_path
#save_path1 = 'sample_img'
img = img_name
#detectionする boxdataにはobjectの座標が入る
boxdata = inputdata(img)
subregion = list()
pic = Image.open(open_path + img)
#detection画像の分割
for boxs in boxdata:
box = (int(boxs[0]), int(boxs[2]), int(boxs[1]), int(boxs[3]))
#print(box)
subregion.append(pic.crop(box))
for num in range(len(boxdata)):
subregion[num].save(save_path +str(num) + 'bus.jpg',"JPEG")
return boxdata
# 点p0に一番近い点を点群psから抽出
def serch_neighbourhood(p0, ps):
L = np.array([])
for i in range(ps.shape[0]):
L = np.append(L,np.linalg.norm(ps[i]-p0))
return ps[ | np.argmin(L) | numpy.argmin |
import inspect
import logging
from abc import ABCMeta, abstractmethod
from collections import OrderedDict
from copy import deepcopy
import gym
import numpy as np
import pybullet as p
from future.utils import with_metaclass
from igibson.controllers import ControlType, create_controller
from igibson.external.pybullet_tools.utils import get_joint_info
from igibson.object_states.utils import clear_cached_states
from igibson.objects.stateful_object import StatefulObject
from igibson.utils.python_utils import assert_valid_key, merge_nested_dicts
from igibson.utils.utils import rotate_vector_3d
log = logging.getLogger(__name__)
# Global dicts that will contain mappings
REGISTERED_ROBOTS = {}
ROBOT_TEMPLATE_CLASSES = {
"BaseRobot",
"ActiveCameraRobot",
"TwoWheelRobot",
"ManipulationRobot",
"LocomotionRobot",
}
def register_robot(cls):
if cls.__name__ not in REGISTERED_ROBOTS and cls.__name__ not in ROBOT_TEMPLATE_CLASSES:
REGISTERED_ROBOTS[cls.__name__] = cls
class BaseRobot(StatefulObject):
"""
Base class for mujoco xml/ROS urdf based robot agents.
This class handles object loading, and provides method interfaces that should be
implemented by subclassed robots.
"""
def __init_subclass__(cls, **kwargs):
"""
Registers all subclasses as part of this registry. This is useful to decouple internal codebase from external
user additions. This way, users can add their custom robot by simply extending this Robot class,
and it will automatically be registered internally. This allows users to then specify their robot
directly in string-from in e.g., their config files, without having to manually set the str-to-class mapping
in our code.
"""
if not inspect.isabstract(cls):
register_robot(cls)
def __init__(
self,
name=None,
control_freq=None,
action_type="continuous",
action_normalize=True,
proprio_obs="default",
reset_joint_pos=None,
controller_config=None,
base_name=None,
scale=1.0,
self_collision=False,
**kwargs
):
"""
:param name: None or str, name of the robot object
:param control_freq: float, control frequency (in Hz) at which to control the robot. If set to be None,
simulator.import_object will automatically set the control frequency to be 1 / render_timestep by default.
:param action_type: str, one of {discrete, continuous} - what type of action space to use
:param action_normalize: bool, whether to normalize inputted actions. This will override any default values
specified by this class.
:param proprio_obs: str or tuple of str, proprioception observation key(s) to use for generating proprioceptive
observations. If str, should be exactly "default" -- this results in the default proprioception observations
being used, as defined by self.default_proprio_obs. See self._get_proprioception_dict for valid key choices
:param reset_joint_pos: None or Array[float], if specified, should be the joint positions that the robot should
be set to during a reset. If None (default), self.default_joint_pos will be used instead.
:param controller_config: None or Dict[str, ...], nested dictionary mapping controller name(s) to specific controller
configurations for this robot. This will override any default values specified by this class.
:param base_name: None or str, robot link name that will represent the entire robot's frame of reference. If not None,
this should correspond to one of the link names found in this robot's corresponding URDF / MJCF file.
None defaults to the base link name used in @model_file
:param scale: int, scaling factor for model (default is 1)
:param self_collision: bool, whether to enable self collision
:param **kwargs: see StatefulObject
"""
if type(name) == dict:
raise ValueError(
"Robot name is a dict. You are probably using the deprecated constructor API which takes in robot_config (a dict) as input. Check the new API in BaseRobot."
)
super(BaseRobot, self).__init__(name=name, category="agent", abilities={"robot": {}}, **kwargs)
self.base_name = base_name
self.control_freq = control_freq
self.scale = scale
self.self_collision = self_collision
assert_valid_key(key=action_type, valid_keys={"discrete", "continuous"}, name="action type")
self.action_type = action_type
self.action_normalize = action_normalize
self.proprio_obs = self.default_proprio_obs if proprio_obs == "default" else list(proprio_obs)
self.reset_joint_pos = reset_joint_pos if reset_joint_pos is None else np.array(reset_joint_pos)
self.controller_config = {} if controller_config is None else controller_config
# Initialize internal attributes that will be loaded later
# These will have public interfaces
self.simulator = None
self.model_type = None
self.action_list = None # Array of discrete actions to deploy
self._last_action = None
self._links = None
self._joints = None
self._controllers = None
self._mass = None
self._joint_state = { # This is filled in periodically every time self.update_state() is called
"unnormalized": {
"position": None,
"velocity": None,
"torque": None,
},
"normalized": {
"position": None,
"velocity": None,
"torque": None,
},
"at_limits": None,
}
def _load(self, simulator):
"""
Loads this pybullet model into the simulation. Should return a list of unique body IDs corresponding
to this model.
:param simulator: Simulator, iGibson simulator reference
:return Array[int]: List of unique pybullet IDs corresponding to this model. This will usually
only be a single value
"""
log.debug("Loading robot model file: {}".format(self.model_file))
# A persistent reference to simulator is needed for AG in ManipulationRobot
self.simulator = simulator
# Set the control frequency if one was not provided.
expected_control_freq = 1.0 / simulator.render_timestep
if self.control_freq is None:
log.debug(
"Control frequency is None - being set to default of 1 / render_timestep: %.4f", expected_control_freq
)
self.control_freq = expected_control_freq
else:
assert np.isclose(
expected_control_freq, self.control_freq
), "Stored control frequency does not match environment's render timestep."
# Set flags for loading model
flags = p.URDF_USE_MATERIAL_COLORS_FROM_MTL
if self.self_collision:
flags = flags | p.URDF_USE_SELF_COLLISION | p.URDF_USE_SELF_COLLISION_EXCLUDE_PARENT
# Run some sanity checks and load the model
model_file_type = self.model_file.split(".")[-1]
if model_file_type == "urdf":
self.model_type = "URDF"
body_ids = (p.loadURDF(self.model_file, globalScaling=self.scale, flags=flags),)
else:
self.model_type = "MJCF"
assert self.scale == 1.0, (
"robot scale must be 1.0 because pybullet does not support scaling " "for MJCF model (p.loadMJCF)"
)
body_ids = p.loadMJCF(self.model_file, flags=flags)
# Load into simulator and initialize states
for body_id in body_ids:
simulator.load_object_in_renderer(self, body_id, self.class_id, **self._rendering_params)
return body_ids
def load(self, simulator):
# Call the load function on the BaseObject through StatefulObject. This sets up the body_ids member.
body_ids = super(BaseRobot, self).load(simulator)
# Grab relevant references from the body IDs
self._setup_references()
# Disable collisions
for names in self.disabled_collision_pairs:
link_a = self._links[names[0]]
link_b = self._links[names[1]]
p.setCollisionFilterPair(link_a.body_id, link_b.body_id, link_a.link_id, link_b.link_id, 0)
# Load controllers
self._load_controllers()
# Setup action space
self._action_space = (
self._create_discrete_action_space()
if self.action_type == "discrete"
else self._create_continuous_action_space()
)
# Validate this robot configuration
self._validate_configuration()
# Reset the robot and keep all joints still after loading
self.reset()
self.keep_still()
# Return the body IDs
return body_ids
def _setup_references(self):
"""
Parse the set of robot @body_ids to get properties including joint information and mass
"""
# Initialize link and joint dictionaries for this robot
self._links, self._joints, self._mass = OrderedDict(), OrderedDict(), 0.0
# Grab model base info
body_ids = self.get_body_ids()
assert (
self.base_name is not None or len(body_ids) == 1
), "Base name can be inferred only for single-body robots."
for body_id in body_ids:
base_name = p.getBodyInfo(body_id)[0].decode("utf8")
assert (
base_name not in self._links
), "Links of a robot, even if on different bodies, must be uniquely named."
self._links[base_name] = RobotLink(self, base_name, -1, body_id)
# if base_name is unspecified, use this link as robot_body (base_link).
if self.base_name is None:
self.base_name = base_name
# Loop through all robot links and infer relevant link / joint / mass references
for j in range(p.getNumJoints(body_id)):
self._mass += p.getDynamicsInfo(body_id, j)[0]
p.setJointMotorControl2(body_id, j, p.POSITION_CONTROL, positionGain=0.1, velocityGain=0.1, force=0)
_, joint_name, joint_type, _, _, _, _, _, _, _, _, _, link_name, _, _, _, _ = p.getJointInfo(body_id, j)
log.debug("Robot joint: {}".format(p.getJointInfo(body_id, j)))
joint_name = joint_name.decode("utf8")
assert (
joint_name not in self._joints
), "Joints of a robot, even if on different bodies, must be uniquely named."
link_name = link_name.decode("utf8")
assert (
link_name not in self._links
), "Links of a robot, even if on different bodies, must be uniquely named."
self._links[link_name] = RobotLink(self, link_name, j, body_id)
# We additionally create joint references if they are (not) of certain types
if joint_name[:6] == "ignore":
# We don't save a reference to this joint, but we disable its motor
PhysicalJoint(joint_name, j, body_id).disable_motor()
elif joint_name[:8] == "jointfix" or joint_type == p.JOINT_FIXED:
# Fixed joint, so we don't save a reference to this joint
pass
else:
# Default case, we store a reference
self._joints[joint_name] = PhysicalJoint(joint_name, j, body_id)
# Assert that the base link is link -1 of one of the robot's bodies.
assert self._links[self.base_name].link_id == -1, "Robot base link should be link -1 of some body."
# Set up any virtual joints for any non-base bodies.
virtual_joints = {joint.joint_name: joint for joint in self._setup_virtual_joints()}
assert self._joints.keys().isdisjoint(virtual_joints.keys())
self._joints.update(virtual_joints)
# Populate the joint states
self.update_state()
# Update the configs
for group in self.controller_order:
group_controller_name = (
self.controller_config[group]["name"]
if group in self.controller_config and "name" in self.controller_config[group]
else self._default_controllers[group]
)
self.controller_config[group] = merge_nested_dicts(
base_dict=self._default_controller_config[group][group_controller_name],
extra_dict=self.controller_config.get(group, {}),
)
# Update the reset joint pos
if self.reset_joint_pos is None:
self.reset_joint_pos = self.default_joint_pos
def _setup_virtual_joints(self):
"""Create and return any virtual joints a robot might need. Subclasses can implement this as necessary."""
return []
def _validate_configuration(self):
"""
Run any needed sanity checks to make sure this robot was created correctly.
"""
pass
def update_state(self):
"""
Updates the internal proprioceptive state of this robot, and returns the raw values
:return Tuple[Array[float], Array[float]]: The raw joint states, normalized joint states
for this robot
"""
# Grab raw values
joint_states = np.array([j.get_state() for j in self._joints.values()]).astype(np.float32).flatten()
joint_states_normalized = (
np.array([j.get_relative_state() for j in self._joints.values()]).astype(np.float32).flatten()
)
# Get raw joint values and normalized versions
self._joint_state["unnormalized"]["position"] = joint_states[0::3]
self._joint_state["unnormalized"]["velocity"] = joint_states[1::3]
self._joint_state["unnormalized"]["torque"] = joint_states[2::3]
self._joint_state["normalized"]["position"] = joint_states_normalized[0::3]
self._joint_state["normalized"]["velocity"] = joint_states_normalized[1::3]
self._joint_state["normalized"]["torque"] = joint_states_normalized[2::3]
# Infer whether joints are at their limits
self._joint_state["at_limits"] = 1.0 * (np.abs(self.joint_positions_normalized) > 0.99)
# Return the raw joint states
return joint_states, joint_states_normalized
def calc_state(self):
"""
Calculate proprioceptive states for the robot. By default, this is:
[pos, rpy, lin_vel, ang_vel, joint_states]
:return Array[float]: Flat array of proprioceptive states (e.g.: [position, orientation, ...])
"""
# Update states
joint_states, _ = self.update_state()
pos = self.get_position()
rpy = self.get_rpy()
# rotate linear and angular velocities to local frame
lin_vel = rotate_vector_3d(self.base_link.get_linear_velocity(), *rpy)
ang_vel = rotate_vector_3d(self.base_link.get_angular_velocity(), *rpy)
state = np.concatenate([pos, rpy, lin_vel, ang_vel, joint_states])
return state
def can_toggle(self, toggle_position, toggle_distance_threshold):
"""
Returns True if the part of the robot that can toggle a toggleable is within the given range of a
point corresponding to a toggle marker
by default, we assume robot cannot toggle toggle markers
:param toggle_position: Array[float], (x,y,z) cartesian position values as a reference point for evaluating
whether a toggle can occur
:param toggle_distance_threshold: float, distance value below which a toggle is allowed
:return bool: True if the part of the robot that can toggle a toggleable is within the given range of a
point corresponding to a toggle marker. By default, we assume robot cannot toggle toggle markers
"""
return False
def reset(self):
"""
Reset function for each specific robot. Can be overwritten by subclass
By default, sets all joint states (pos, vel) to 0, and resets all controllers.
"""
for joint, joint_pos in zip(self._joints.values(), self.reset_joint_pos):
joint.reset_state(joint_pos, 0.0)
for controller in self._controllers.values():
controller.reset()
def _load_controllers(self):
"""
Loads controller(s) to map inputted actions into executable (pos, vel, and / or torque) signals on this robot.
Stores created controllers as dictionary mapping controller names to specific controller
instances used by this robot.
"""
# Initialize controllers to create
self._controllers = OrderedDict()
# Loop over all controllers, in the order corresponding to @action dim
for name in self.controller_order:
assert_valid_key(key=name, valid_keys=self.controller_config, name="controller name")
cfg = self.controller_config[name]
# If we're using normalized action space, override the inputs for all controllers
if self.action_normalize:
cfg["command_input_limits"] = "default" # default is normalized (-1, 1)
# Create the controller
self._controllers[name] = create_controller(**cfg)
@abstractmethod
def _create_discrete_action_space(self):
"""
Create a discrete action space for this robot. Should be implemented by the subclass (if a subclass does not
support this type of action space, it should raise an error).
:return gym.space: Robot-specific discrete action space
"""
raise NotImplementedError
def _create_continuous_action_space(self):
"""
Create a continuous action space for this robot. By default, this loops over all controllers and
appends their respective input command limits to set the action space.
Any custom behavior should be implemented by the subclass (e.g.: if a subclass does not
support this type of action space, it should raise an error).
:return gym.space.Box: Robot-specific continuous action space
"""
# Action space is ordered according to the order in _default_controller_config control
low, high = [], []
for controller in self._controllers.values():
limits = controller.command_input_limits
low.append(np.array([-np.inf] * controller.command_dim) if limits is None else limits[0])
high.append(np.array([np.inf] * controller.command_dim) if limits is None else limits[1])
return gym.spaces.Box(
shape=(self.action_dim,), low=np.concatenate(low), high=np.concatenate(high), dtype=np.float32
)
def apply_action(self, action):
"""
Converts inputted actions into low-level control signals and deploys them on the robot
:param action: Array[float], n-DOF length array of actions to convert and deploy on the robot
"""
self._last_action = action
# Update state
self.update_state()
# If we're using discrete action space, we grab the specific action and use that to convert to control
if self.action_type == "discrete":
action = np.array(self.action_list[action])
# Run convert actions to controls
control, control_type = self._actions_to_control(action=action)
# Deploy control signals
self._deploy_control(control=control, control_type=control_type)
def _actions_to_control(self, action):
"""
Converts inputted @action into low level control signals to deploy directly on the robot.
This returns two arrays: the converted low level control signals and an array corresponding
to the specific ControlType for each signal.
:param action: Array[float], n-DOF length array of actions to convert and deploy on the robot
:return Tuple[Array[float], Array[ControlType]]: The (1) raw control signals to send to the robot's joints
and (2) control types for each joint
"""
# First, loop over all controllers, and calculate the computed control
control = OrderedDict()
idx = 0
for name, controller in self._controllers.items():
# Compose control_dict
control_dict = self.get_control_dict()
# Set command, then take a controller step
controller.update_command(command=action[idx : idx + controller.command_dim])
control[name] = {
"value": controller.step(control_dict=control_dict),
"type": controller.control_type,
}
# Update idx
idx += controller.command_dim
# Compose controls
u_vec = np.zeros(self.n_joints)
u_type_vec = np.array([ControlType.POSITION] * self.n_joints)
for group, ctrl in control.items():
idx = self._controllers[group].joint_idx
u_vec[idx] = ctrl["value"]
u_type_vec[idx] = ctrl["type"]
# Return control
return u_vec, u_type_vec
def _deploy_control(self, control, control_type):
"""
Deploys control signals @control with corresponding @control_type on this robot
:param control: Array[float], raw control signals to send to the robot's joints
:param control_type: Array[ControlType], control types for each joint
"""
# Run sanity check
joints = self._joints.values()
assert len(control) == len(control_type) == len(joints), (
"Control signals, control types, and number of joints should all be the same!"
"Got {}, {}, and {} respectively.".format(len(control), len(control_type), len(joints))
)
# Loop through all control / types, and deploy the signal
for joint, ctrl, ctrl_type in zip(joints, control, control_type):
if ctrl_type == ControlType.TORQUE:
joint.set_torque(ctrl)
elif ctrl_type == ControlType.VELOCITY:
joint.set_vel(ctrl)
elif ctrl_type == ControlType.POSITION:
joint.set_pos(ctrl)
else:
raise ValueError("Invalid control type specified: {}".format(ctrl_type))
def get_proprioception(self):
"""
:return Array[float]: numpy array of all robot-specific proprioceptive observations.
"""
proprio_dict = self._get_proprioception_dict()
return np.concatenate([proprio_dict[obs] for obs in self.proprio_obs])
def get_position_orientation(self):
"""
:return Tuple[Array[float], Array[float]]: pos (x,y,z) global cartesian coordinates, quat (x,y,z,w) global
orientation in quaternion form of this model's body (as taken at its body_id)
"""
pos, orn = p.getBasePositionAndOrientation(self.base_link.body_id)
return np.array(pos), np.array(orn)
def get_rpy(self):
"""
Return robot orientation in roll, pitch, yaw
:return: roll, pitch, yaw
"""
return self.base_link.get_rpy()
def set_joint_positions(self, joint_positions):
"""Set this robot's joint positions, where @joint_positions is an array"""
for joint, joint_pos in zip(self._joints.values(), joint_positions):
joint.reset_state(pos=joint_pos, vel=0.0)
def set_joint_states(self, joint_states):
"""Set this robot's joint states in the format of Dict[String: (q, q_dot)]]"""
for joint_name, joint in self._joints.items():
joint_position, joint_velocity = joint_states[joint_name]
joint.reset_state(pos=joint_position, vel=joint_velocity)
def get_joint_states(self):
"""Get this robot's joint states in the format of Dict[String: (q, q_dot)]]"""
joint_states = {}
for joint_name, joint in self._joints.items():
joint_position, joint_velocity, _ = joint.get_state()
joint_states[joint_name] = (joint_position, joint_velocity)
return joint_states
def get_linear_velocity(self):
"""
Get linear velocity of this robot (velocity associated with base link)
:return Array[float]: linear (x,y,z) velocity of this robot
"""
return self.base_link.get_linear_velocity()
def get_angular_velocity(self):
"""
Get angular velocity of this robot (velocity associated with base link)
:return Array[float]: angular (ax,ay,az) velocity of this robot
"""
return self.base_link.get_angular_velocity()
def set_position_orientation(self, pos, quat):
"""
Set model's global position and orientation
:param pos: Array[float], corresponding to (x,y,z) global cartesian coordinates to set
:param quat: Array[float], corresponding to (x,y,z,w) global quaternion orientation to set
"""
p.resetBasePositionAndOrientation(self.base_link.body_id, pos, quat)
clear_cached_states(self)
def set_base_link_position_orientation(self, pos, orn):
"""Set object base link position and orientation in the format of Tuple[Array[x, y, z], Array[x, y, z, w]]"""
dynamics_info = p.getDynamicsInfo(self.base_link.body_id, -1)
inertial_pos, inertial_orn = dynamics_info[3], dynamics_info[4]
pos, orn = p.multiplyTransforms(pos, orn, inertial_pos, inertial_orn)
self.set_position_orientation(pos, orn)
def get_base_link_position_orientation(self):
"""Get object base link position and orientation in the format of Tuple[Array[x, y, z], Array[x, y, z, w]]"""
dynamics_info = p.getDynamicsInfo(self.base_link.body_id, -1)
inertial_pos, inertial_orn = dynamics_info[3], dynamics_info[4]
inv_inertial_pos, inv_inertial_orn = p.invertTransform(inertial_pos, inertial_orn)
pos, orn = p.getBasePositionAndOrientation(self.base_link.body_id)
base_link_position, base_link_orientation = p.multiplyTransforms(pos, orn, inv_inertial_pos, inv_inertial_orn)
return np.array(base_link_position), np.array(base_link_orientation)
def get_control_dict(self):
"""
Grabs all relevant information that should be passed to each controller during each controller step.
:return Dict[str, Array[float]]: Keyword-mapped control values for this robot.
By default, returns the following:
- joint_position: (n_dof,) joint positions
- joint_velocity: (n_dof,) joint velocities
- joint_torque: (n_dof,) joint torques
- base_pos: (3,) (x,y,z) global cartesian position of the robot's base link
- base_quat: (4,) (x,y,z,w) global cartesian orientation of ths robot's base link
"""
return {
"joint_position": self.joint_positions,
"joint_velocity": self.joint_velocities,
"joint_torque": self.joint_torques,
"base_pos": self.get_position(),
"base_quat": self.get_orientation(),
}
def dump_action(self):
"""Dump the last action applied to this robot. For use in demo collection."""
return self._last_action
def dump_config(self):
"""Dump robot config"""
return {
"name": self.name,
"control_freq": self.control_freq,
"action_type": self.action_type,
"action_normalize": self.action_normalize,
"proprio_obs": self.proprio_obs,
"reset_joint_pos": self.reset_joint_pos,
"controller_config": self.controller_config,
"base_name": self.base_name,
"scale": self.scale,
"self_collision": self.self_collision,
}
def dump_state(self):
"""Dump the state of the object other than what's not included in pybullet state."""
return {
"parent_state": super(BaseRobot, self).dump_state(),
"controllers": {
controller_name: controller.dump_state() for controller_name, controller in self._controllers.items()
},
}
def load_state(self, dump):
"""Dump the state of the object other than what's not included in pybullet state."""
super(BaseRobot, self).load_state(dump["parent_state"])
controller_dump = dump["controllers"]
for controller_name, controller in self._controllers.items():
controller.load_state(controller_dump[controller_name])
def _get_proprioception_dict(self):
"""
:return dict: keyword-mapped proprioception observations available for this robot. Can be extended by subclasses
"""
return {
"joint_qpos": self.joint_positions,
"joint_qpos_sin": np.sin(self.joint_positions),
"joint_qpos_cos": np.cos(self.joint_positions),
"joint_qvel": self.joint_velocities,
"joint_qtor": self.joint_torques,
"robot_pos": self.get_position(),
"robot_rpy": self.get_rpy(),
"robot_quat": self.get_orientation(),
"robot_lin_vel": self.get_linear_velocity(),
"robot_ang_vel": self.get_angular_velocity(),
}
@property
def proprioception_dim(self):
"""
:return int: Size of self.get_proprioception() vector
"""
return len(self.get_proprioception())
@property
def links(self):
"""
Links belonging to this robot.
:return OrderedDict[str, RobotLink]: Ordered Dictionary mapping robot link names to corresponding
RobotLink objects owned by this robot
"""
return self._links
@property
def joints(self):
"""
Joints belonging to this robot.
:return OrderedDict[str, RobotJoint]: Ordered Dictionary mapping robot joint names to corresponding
RobotJoint objects owned by this robot
"""
return self._joints
@property
def n_links(self):
"""
:return int: Number of links for this robot
"""
return len(list(self._links.keys()))
@property
def n_joints(self):
"""
:return int: Number of joints for this robot
"""
return len(list(self._joints.keys()))
@property
def base_link(self):
"""
Returns the RobotLink body corresponding to the link as defined by self.base_name.
Note that if base_name was not specified during this robot's initialization, this will default to be the
first link in the underlying robot model file.
:return RobotLink: robot's base link corresponding to self.base_name.
"""
assert self.base_name in self._links, "Cannot find base link '{}' in links! Valid options are: {}".format(
self.base_name, list(self._links.keys())
)
return self._links[self.base_name]
@property
def eyes(self):
"""
Returns the RobotLink corresponding to the robot's camera. Assumes that there is a link
with name "eyes" in the underlying robot model. If not, an error will be raised.
:return RobotLink: link containing the robot's camera
"""
assert "eyes" in self._links, "Cannot find 'eyes' in links, current link names are: {}".format(
list(self._links.keys())
)
return self._links["eyes"]
@property
def mass(self):
"""
Returns the mass of this robot. Default is 0.0 kg
:return float: Mass of this robot, in kg
"""
return self._mass
@property
def joint_position_limits(self):
"""
:return Tuple[Array[float], Array[float]]: (min, max) joint position limits, where each is an n-DOF length array
"""
return (self.joint_lower_limits, self.joint_upper_limits)
@property
def joint_velocity_limits(self):
"""
:return Tuple[Array[float], Array[float]]: (min, max) joint velocity limits, where each is an n-DOF length array
"""
return (
-np.array([j.max_velocity for j in self._joints.values()]),
np.array([j.max_velocity for j in self._joints.values()]),
)
@property
def joint_torque_limits(self):
"""
:return Tuple[Array[float], Array[float]]: (min, max) joint torque limits, where each is an n-DOF length array
"""
return (
-np.array([j.max_torque for j in self._joints.values()]),
np.array([j.max_torque for j in self._joints.values()]),
)
@property
def joint_positions(self):
"""
:return Array[float]: n-DOF length array of this robot's joint positions
"""
return deepcopy(self._joint_state["unnormalized"]["position"])
@property
def joint_velocities(self):
"""
:return Array[float]: n-DOF length array of this robot's joint velocities
"""
return deepcopy(self._joint_state["unnormalized"]["velocity"])
@property
def joint_torques(self):
"""
:return Array[float]: n-DOF length array of this robot's joint torques
"""
return deepcopy(self._joint_state["unnormalized"]["torque"])
@property
def joint_positions_normalized(self):
"""
:return Array[float]: n-DOF length array of this robot's normalized joint positions in range [-1, 1]
"""
return deepcopy(self._joint_state["normalized"]["position"])
@property
def joint_velocities_normalized(self):
"""
:return Array[float]: n-DOF length array of this robot's normalized joint velocities in range [-1, 1]
"""
return deepcopy(self._joint_state["normalized"]["velocity"])
@property
def joint_torques_normalized(self):
"""
:return Array[float]: n-DOF length array of this robot's normalized joint torques in range [-1, 1]
"""
return deepcopy(self._joint_state["normalized"]["torque"])
@property
def joint_at_limits(self):
"""
:return Array[float]: n-DOF length array specifying whether joint is at its limit,
with 1.0 --> at limit, otherwise 0.0
"""
return deepcopy(self._joint_state["at_limits"])
@property
def joint_has_limits(self):
"""
:return Array[bool]: n-DOF length array specifying whether joint has a limit or not
"""
return np.array([j.has_limit for j in self._joints.values()])
@property
@abstractmethod
def model_name(self):
"""
:return str: robot model name
"""
raise NotImplementedError
@property
def action_dim(self):
"""
:return int: Dimension of action space for this robot. By default,
is the sum over all controller action dimensions
"""
return sum([controller.command_dim for controller in self._controllers.values()])
@property
def action_space(self):
"""
Action space for this robot.
:return gym.space: Action space, either discrete (Discrete) or continuous (Box)
"""
return deepcopy(self._action_space)
@property
@abstractmethod
def controller_order(self):
"""
:return Tuple[str]: Ordering of the actions, corresponding to the controllers. e.g., ["base", "arm", "gripper"],
to denote that the action vector should be interpreted as first the base action, then arm command, then
gripper command
"""
raise NotImplementedError
@property
def controller_action_idx(self):
"""
:return: Dict[str, Array[int]]: Mapping from controller names (e.g.: head, base, arm, etc.) to corresponding
indices in the action vector
"""
dic = {}
idx = 0
for controller in self.controller_order:
cmd_dim = self._controllers[controller].command_dim
dic[controller] = np.arange(idx, idx + cmd_dim)
idx += cmd_dim
return dic
@property
def control_limits(self):
"""
:return: Dict[str, Any]: Keyword-mapped limits for this robot. Dict contains:
position: (min, max) joint limits, where min and max are N-DOF arrays
velocity: (min, max) joint velocity limits, where min and max are N-DOF arrays
torque: (min, max) joint torque limits, where min and max are N-DOF arrays
has_limit: (n_dof,) array where each element is True if that corresponding joint has a position limit
(otherwise, joint is assumed to be limitless)
"""
return {
"position": (self.joint_lower_limits, self.joint_upper_limits),
"velocity": (-self.max_joint_velocities, self.max_joint_velocities),
"torque": (-self.max_joint_torques, self.max_joint_torques),
"has_limit": self.joint_has_limits,
}
@property
def default_proprio_obs(self):
"""
:return Array[str]: Default proprioception observations to use
"""
return []
@property
@abstractmethod
def default_joint_pos(self):
"""
:return Array[float]: Default joint positions for this robot
"""
raise NotImplementedError
@property
@abstractmethod
def _default_controller_config(self):
"""
:return Dict[str, Any]: default nested dictionary mapping controller name(s) to specific controller
configurations for this robot. Note that the order specifies the sequence of actions to be received
from the environment.
Expected structure is as follows:
group1:
controller_name1:
controller_name1_params
...
controller_name2:
...
group2:
...
The @group keys specify the control type for various aspects of the robot, e.g.: "head", "arm", "base", etc.
@controller_name keys specify the supported controllers for that group. A default specification MUST be
specified for each controller_name. e.g.: IKController, DifferentialDriveController, JointController, etc.
"""
return {}
@property
@abstractmethod
def _default_controllers(self):
"""
:return Dict[str, str]: Maps robot group (e.g. base, arm, etc.) to default controller class name to use
(e.g. IKController, JointController, etc.)
"""
return {}
@property
def joint_damping(self):
"""
:return: Array[float], joint damping values for this robot
"""
return np.array([joint.damping for joint in self._joints.values()])
@property
def joint_lower_limits(self):
"""
:return: Array[float], minimum values for this robot's joints. If joint does not have a range, returns -1000
for that joint
"""
return np.array([joint.lower_limit if joint.has_limit else -1000.0 for joint in self._joints.values()])
@property
def joint_upper_limits(self):
"""
:return: Array[float], maximum values for this robot's joints. If joint does not have a range, returns 1000
for that joint
"""
return np.array([joint.upper_limit if joint.has_limit else 1000.0 for joint in self._joints.values()])
@property
def joint_range(self):
"""
:return: Array[float], joint range values for this robot's joints
"""
return self.joint_upper_limits - self.joint_lower_limits
@property
def max_joint_velocities(self):
"""
:return: Array[float], maximum velocities for this robot's joints
"""
return np.array([joint.max_velocity for joint in self._joints.values()])
@property
def max_joint_torques(self):
"""
:return: Array[float], maximum torques for this robot's joints
"""
return np.array([joint.max_torque for joint in self._joints.values()])
@property
def disabled_collision_pairs(self):
"""
:return Tuple[Tuple[str, str]]: List of collision pairs to disable. Default is None (empty list)
"""
return []
@property
@abstractmethod
def model_file(self):
"""
:return str: absolute path to robot model's URDF / MJCF file
"""
raise NotImplementedError
def keep_still(self):
"""
Keep the robot still. Apply zero velocity to all joints.
"""
for joint in self._joints.values():
joint.set_vel(0.0)
class RobotLink:
"""
Body part (link) of Robots
"""
def __init__(self, robot, link_name, link_id, body_id):
"""
:param robot: BaseRobot, the robot this link belongs to.
:param link_name: str, name of the link corresponding to @link_id
:param link_id: int, ID of this link within the link(s) found in the body corresponding to @body_id
:param body_id: Robot body ID containing this link
"""
# Store args and initialize state
self.robot = robot
self.link_name = link_name
self.link_id = link_id
self.body_id = body_id
self.initial_pos, self.initial_quat = self.get_position_orientation()
self.movement_cid = -1
def get_name(self):
"""
Get name of this link
"""
return self.link_name
def get_position_orientation(self):
"""
Get pose of this link
:return Tuple[Array[float], Array[float]]: pos (x,y,z) cartesian coordinates, quat (x,y,z,w)
orientation in quaternion form of this link
"""
if self.link_id == -1:
pos, quat = p.getBasePositionAndOrientation(self.body_id)
else:
_, _, _, _, pos, quat = p.getLinkState(self.body_id, self.link_id)
return np.array(pos), np.array(quat)
def get_position(self):
"""
:return Array[float]: (x,y,z) cartesian coordinates of this link
"""
return self.get_position_orientation()[0]
def get_orientation(self):
"""
:return Array[float]: (x,y,z,w) orientation in quaternion form of this link
"""
return self.get_position_orientation()[1]
def get_local_position_orientation(self):
"""
Get pose of this link in the robot's base frame.
:return Tuple[Array[float], Array[float]]: pos (x,y,z) cartesian coordinates, quat (x,y,z,w)
orientation in quaternion form of this link
"""
base = self.robot.base_link
return p.multiplyTransforms(
*p.invertTransform(*base.get_position_orientation()), *self.get_position_orientation()
)
def get_rpy(self):
"""
:return Array[float]: (r,p,y) orientation in euler form of this link
"""
return np.array(p.getEulerFromQuaternion(self.get_orientation()))
def set_position(self, pos):
"""
Sets the link's position
:param pos: Array[float], corresponding to (x,y,z) cartesian coordinates to set
"""
old_quat = self.get_orientation()
self.set_position_orientation(pos, old_quat)
def set_orientation(self, quat):
"""
Set the link's global orientation
:param quat: Array[float], corresponding to (x,y,z,w) quaternion orientation to set
"""
old_pos = self.get_position()
self.set_position_orientation(old_pos, quat)
def set_position_orientation(self, pos, quat):
"""
Set model's global position and orientation. Note: only supported if this is the base link (ID = -1!)
:param pos: Array[float], corresponding to (x,y,z) global cartesian coordinates to set
:param quat: Array[float], corresponding to (x,y,z,w) global quaternion orientation to set
"""
assert self.link_id == -1, "Can only set pose for a base link (id = -1)! Got link id: {}.".format(self.link_id)
p.resetBasePositionAndOrientation(self.body_id, pos, quat)
def get_velocity(self):
"""
Get velocity of this link
:return Tuple[Array[float], Array[float]]: linear (x,y,z) velocity, angular (ax,ay,az)
velocity of this link
"""
if self.link_id == -1:
lin, ang = p.getBaseVelocity(self.body_id)
else:
_, _, _, _, _, _, lin, ang = p.getLinkState(self.body_id, self.link_id, computeLinkVelocity=1)
return np.array(lin), np.array(ang)
def get_linear_velocity(self):
"""
Get linear velocity of this link
:return Array[float]: linear (x,y,z) velocity of this link
"""
return self.get_velocity()[0]
def get_angular_velocity(self):
"""
Get angular velocity of this link
:return Array[float]: angular (ax,ay,az) velocity of this link
"""
return self.get_velocity()[1]
def contact_list(self):
"""
Get contact points of the body part
:return Array[ContactPoints]: list of contact points seen by this link
"""
return p.getContactPoints(self.body_id, -1, self.link_id, -1)
def force_wakeup(self):
"""
Forces a wakeup for this robot. Defaults to no-op.
"""
p.changeDynamics(self.body_id, self.link_id, activationState=p.ACTIVATION_STATE_WAKE_UP)
class RobotJoint(with_metaclass(ABCMeta, object)):
"""
Joint of a robot
"""
@property
@abstractmethod
def joint_name(self):
pass
@property
@abstractmethod
def joint_type(self):
pass
@property
@abstractmethod
def lower_limit(self):
pass
@property
@abstractmethod
def upper_limit(self):
pass
@property
@abstractmethod
def max_velocity(self):
pass
@property
@abstractmethod
def max_torque(self):
pass
@property
@abstractmethod
def damping(self):
pass
@abstractmethod
def get_state(self):
"""
Get the current state of the joint
:return Tuple[float, float, float]: (joint_pos, joint_vel, joint_tor) observed for this joint
"""
pass
@abstractmethod
def get_relative_state(self):
"""
Get the normalized current state of the joint
:return Tuple[float, float, float]: Normalized (joint_pos, joint_vel, joint_tor) observed for this joint
"""
pass
@abstractmethod
def set_pos(self, pos):
"""
Set position of joint (in metric space)
:param pos: float, desired position for this joint, in metric space
"""
pass
@abstractmethod
def set_vel(self, vel):
"""
Set velocity of joint (in metric space)
:param vel: float, desired velocity for this joint, in metric space
"""
pass
@abstractmethod
def set_torque(self, torque):
"""
Set torque of joint (in metric space)
:param torque: float, desired torque for this joint, in metric space
"""
pass
@abstractmethod
def reset_state(self, pos, vel):
"""
Reset pos and vel of joint in metric space
:param pos: float, desired position for this joint, in metric space
:param vel: float, desired velocity for this joint, in metric space
"""
pass
@property
def has_limit(self):
"""
:return bool: True if this joint has a limit, else False
"""
return self.lower_limit < self.upper_limit
class PhysicalJoint(RobotJoint):
"""
A robot joint that exists in the physics simulation (e.g. in pybullet).
"""
def __init__(self, joint_name, joint_id, body_id):
"""
:param joint_name: str, name of the joint corresponding to @joint_id
:param joint_id: int, ID of this joint within the joint(s) found in the body corresponding to @body_id
:param body_id: Robot body ID containing this link
"""
# Store args and initialize state
self._joint_name = joint_name
self.joint_id = joint_id
self.body_id = body_id
# read joint type and joint limit from the URDF file
# lower_limit, upper_limit, max_velocity, max_torque = <limit lower=... upper=... velocity=... effort=.../>
# "effort" is approximately torque (revolute) / force (prismatic), but not exactly (ref: http://wiki.ros.org/pr2_controller_manager/safety_limits).
# if <limit /> does not exist, the following will be the default value
# lower_limit, upper_limit, max_velocity, max_torque = 0.0, -1.0, 0.0, 0.0
info = get_joint_info(self.body_id, self.joint_id)
self._joint_type = info.jointType
self._lower_limit = info.jointLowerLimit
self._upper_limit = info.jointUpperLimit
self._max_torque = info.jointMaxForce
self._max_velocity = info.jointMaxVelocity
self._damping = info.jointDamping
# if joint torque and velocity limits cannot be found in the model file, set a default value for them
if self._max_torque == 0.0:
self._max_torque = 100.0
if self._max_velocity == 0.0:
# if max_velocity and joint limit are missing for a revolute joint,
# it's likely to be a wheel joint and a high max_velocity is usually supported.
self._max_velocity = 15.0 if self._joint_type == p.JOINT_REVOLUTE and not self.has_limit else 1.0
@property
def joint_name(self):
return self._joint_name
@property
def joint_type(self):
return self._joint_type
@property
def lower_limit(self):
return self._lower_limit
@property
def upper_limit(self):
return self._upper_limit
@property
def max_velocity(self):
return self._max_velocity
@property
def max_torque(self):
return self._max_torque
@property
def damping(self):
return self._damping
def __str__(self):
return "idx: {}, name: {}".format(self.joint_id, self.joint_name)
def get_state(self):
"""
Get the current state of the joint
:return Tuple[float, float, float]: (joint_pos, joint_vel, joint_tor) observed for this joint
"""
x, vx, _, trq = p.getJointState(self.body_id, self.joint_id)
return x, vx, trq
def get_relative_state(self):
"""
Get the normalized current state of the joint
:return Tuple[float, float, float]: Normalized (joint_pos, joint_vel, joint_tor) observed for this joint
"""
pos, vel, trq = self.get_state()
# normalize position to [-1, 1]
if self.has_limit:
mean = (self.lower_limit + self.upper_limit) / 2.0
magnitude = (self.upper_limit - self.lower_limit) / 2.0
pos = (pos - mean) / magnitude
# (trying to) normalize velocity to [-1, 1]
vel /= self.max_velocity
# (trying to) normalize torque / force to [-1, 1]
trq /= self.max_torque
return pos, vel, trq
def set_pos(self, pos):
"""
Set position of joint (in metric space)
:param pos: float, desired position for this joint, in metric space
"""
if self.has_limit:
pos = | np.clip(pos, self.lower_limit, self.upper_limit) | numpy.clip |
import numpy as np
from unittest import expectedFailure
from unittest import TestCase
from zlib import crc32
from pycqed.measurement.randomized_benchmarking.clifford_group import(
clifford_lookuptable, clifford_group_single_qubit,
X,Y,Z, H, S, S2, CZ)
import pycqed.measurement.randomized_benchmarking.randomized_benchmarking \
as rb
from pycqed.measurement.randomized_benchmarking.clifford_decompositions \
import(gate_decomposition, epstein_fixed_length_decomposition)
from pycqed.measurement.randomized_benchmarking import \
two_qubit_clifford_group as tqc
from pycqed.measurement.randomized_benchmarking.generate_clifford_hash_tables import construct_clifford_lookuptable
np.random.seed(0)
test_indices_2Q = np.random.randint(0, high=11520, size=50)
# To test all elements of the 2 qubit clifford group use:
# test_indices_2Q = np.arange(11520)
class TestLookuptable(TestCase):
def test_unique_mapping(self):
for row in clifford_lookuptable:
self.assertFalse(len(row) > len(set(row)))
def test_sum_of_rows(self):
expected_sum = np.sum(range(len(clifford_group_single_qubit)))
for row in clifford_lookuptable:
self.assertEqual(np.sum(row), expected_sum)
def test_element_index_in_group(self):
for row in clifford_lookuptable:
for el in row:
self.assertTrue(el < len(clifford_group_single_qubit))
class TestCalculateNetClifford(TestCase):
def test_identity_does_nothing(self):
id_seq = np.zeros(5)
net_cl = rb.calculate_net_clifford(id_seq)
self.assertEqual(net_cl, 0)
for i in range(len(clifford_group_single_qubit)):
id_seq[3] = i
net_cl = rb.calculate_net_clifford(id_seq)
self.assertEqual(net_cl, i)
def test_pauli_squared_is_ID(self):
for cl in [0, 3, 6, 9, 12]: # 12 is Hadamard
net_cl = rb.calculate_net_clifford([cl, cl])
self.assertEqual(net_cl, 0)
class TestRecoveryClifford(TestCase):
def testInversionRandomSequence(self):
random_cliffords = np.random.randint(0, len(clifford_group_single_qubit), 100)
net_cl = rb.calculate_net_clifford(random_cliffords)
for des_cl in range(len(clifford_group_single_qubit)):
rec_cliff = rb.calculate_recovery_clifford(net_cl, des_cl)
comb_seq = np.append(random_cliffords, rec_cliff)
comb_net_cl_simple = rb.calculate_net_clifford([net_cl, rec_cliff])
comb_net_cl = rb.calculate_net_clifford(comb_seq)
self.assertEqual(comb_net_cl, des_cl)
self.assertEqual(comb_net_cl_simple, des_cl)
class TestRB_sequence(TestCase):
def test_net_cliff(self):
for i in range(len(clifford_group_single_qubit)):
rb_seq = rb.randomized_benchmarking_sequence(500, desired_net_cl=i)
net_cliff = rb.calculate_net_clifford(rb_seq)
self.assertEqual(net_cliff, i)
def test_seed_reproduces(self):
rb_seq_a = rb.randomized_benchmarking_sequence(500, seed=5)
rb_seq_b = rb.randomized_benchmarking_sequence(500, seed=None)
rb_seq_c = rb.randomized_benchmarking_sequence(500, seed=5)
rb_seq_d = rb.randomized_benchmarking_sequence(500, seed=None)
self.assertTrue((rb_seq_a == rb_seq_c).all())
self.assertTrue((rb_seq_a != rb_seq_b).any)
self.assertTrue((rb_seq_c != rb_seq_b).any)
self.assertTrue((rb_seq_b != rb_seq_d).any)
class TestGateDecomposition(TestCase):
def test_unique_elements(self):
for gate in gate_decomposition:
self.assertEqual(gate_decomposition.count(gate), 1)
def test_average_number_of_gates_epst_efficient(self):
from itertools import chain
avg_nr_gates = len(list(chain(*gate_decomposition)))/24
self.assertEqual(avg_nr_gates, 1.875)
def test_average_number_of_gates_epst_fixed_length(self):
from itertools import chain
avg_nr_gates = len(list(chain(*epstein_fixed_length_decomposition)))/24
self.assertEqual(avg_nr_gates, 3)
######################################################################
# Two qubit clifford group below
######################################################################
class TestHashedLookuptables(TestCase):
def test_single_qubit_hashtable_constructed(self):
hash_table = construct_clifford_lookuptable(tqc.SingleQubitClifford,
np.arange(24))
for i in range(24):
Cl = tqc.SingleQubitClifford(i)
target_hash = crc32(Cl.pauli_transfer_matrix.round().astype(int))
table_idx = hash_table.index(target_hash)
self.assertEqual(table_idx, i)
def test_single_qubit_hashtable_file(self):
hash_table = tqc.get_single_qubit_clifford_hash_table()
for i in range(24):
Cl = tqc.SingleQubitClifford(i)
target_hash = crc32(Cl.pauli_transfer_matrix.round().astype(int))
table_idx = hash_table.index(target_hash)
self.assertEqual(table_idx, i)
def test_two_qubit_hashtable_constructed(self):
hash_table = construct_clifford_lookuptable(tqc.TwoQubitClifford,
np.arange(11520))
for i in test_indices_2Q:
Cl = tqc.TwoQubitClifford(i)
target_hash = crc32(Cl.pauli_transfer_matrix.round().astype(int))
table_idx = hash_table.index(target_hash)
self.assertEqual(table_idx, i)
def test_two_qubit_hashtable_file(self):
hash_table = tqc.get_two_qubit_clifford_hash_table()
for i in test_indices_2Q:
Cl = tqc.TwoQubitClifford(i)
target_hash = crc32(Cl.pauli_transfer_matrix.round().astype(int))
table_idx = hash_table.index(target_hash)
self.assertEqual(table_idx, i)
def test_get_clifford_id(self):
for i in range(24):
Cl = tqc.SingleQubitClifford(i)
idx = tqc.get_clifford_id(Cl.pauli_transfer_matrix)
self.assertEqual(idx, Cl.idx)
for i in test_indices_2Q:
Cl = tqc.TwoQubitClifford(i)
idx = tqc.get_clifford_id(Cl.pauli_transfer_matrix)
self.assertEqual(idx, Cl.idx)
class Test_CliffordGroupProperties(TestCase):
def test_single_qubit_group(self):
hash_table = tqc.get_single_qubit_clifford_hash_table()
self.assertEqual(len(hash_table), 24)
self.assertEqual(len(np.unique(hash_table)), 24)
# Testing the subgroups of the Clifford group
def test_single_qubit_like_PTM(self):
hash_table = []
for idx in np.arange(24**2):
clifford = tqc.single_qubit_like_PTM(idx)
hash_val = crc32(clifford.round().astype(int))
hash_table.append(hash_val)
self.assertEqual(len(hash_table), 24**2)
self.assertEqual(len(np.unique(hash_table)), 24**2)
with self.assertRaises(AssertionError):
clifford = tqc.single_qubit_like_PTM(24**2+1)
def test_CNOT_like_PTM(self):
hash_table = []
for idx in np.arange(5184):
clifford = tqc.CNOT_like_PTM(idx)
hash_val = crc32(clifford.round().astype(int))
hash_table.append(hash_val)
self.assertEqual(len(hash_table), 5184)
self.assertEqual(len(np.unique(hash_table)), 5184)
with self.assertRaises(AssertionError):
clifford = tqc.CNOT_like_PTM(5184**2+1)
def test_iSWAP_like_PTM(self):
hash_table = []
for idx in np.arange(5184):
clifford = tqc.iSWAP_like_PTM(idx)
hash_val = crc32(clifford.round().astype(int))
hash_table.append(hash_val)
self.assertEqual(len(hash_table), 5184)
self.assertEqual(len(np.unique(hash_table)), 5184)
with self.assertRaises(AssertionError):
clifford = tqc.iSWAP_like_PTM(5184+1)
def test_SWAP_like_PTM(self):
hash_table = []
for idx in np.arange(24**2):
clifford = tqc.SWAP_like_PTM(idx)
hash_val = crc32(clifford.round().astype(int))
hash_table.append(hash_val)
self.assertEqual(len(hash_table), 24**2)
self.assertEqual(len(np.unique(hash_table)), 24**2)
with self.assertRaises(AssertionError):
clifford = tqc.SWAP_like_PTM(24**2+1)
def test_two_qubit_group(self):
hash_table = tqc.get_two_qubit_clifford_hash_table()
self.assertEqual(len(hash_table), 11520)
self.assertEqual(len(np.unique(hash_table)), 11520)
class TestCliffordCalculus(TestCase):
def test_products(self):
Cl_3 = tqc.SingleQubitClifford(3)
Cl_3*Cl_3
self.assertEqual(Cl_3.idx, 3) # Pauli X
self.assertEqual((Cl_3*Cl_3).idx, 0) # The identity
Cl_3 = tqc.TwoQubitClifford(3)
self.assertEqual(Cl_3.idx, 3) # Pauli X on q0
self.assertEqual((Cl_3*Cl_3).idx, 0) # The identity
product_hash = crc32((Cl_3*Cl_3).pauli_transfer_matrix.round().astype(int))
target_hash = crc32(tqc.TwoQubitClifford(0).pauli_transfer_matrix.round().astype(int))
self.assertEqual(product_hash, target_hash)
def test_product_order(self):
"""
Tests that the order of multiplying matrices is the same as what is
defined in numpy.dot
"""
Cl_528 = tqc.TwoQubitClifford(528)
Cl_9230 = tqc.TwoQubitClifford(9230)
Cliff_prod = Cl_528*Cl_9230
dot_prod = np.dot(Cl_528.pauli_transfer_matrix,
Cl_9230.pauli_transfer_matrix)
np.testing.assert_array_equal(Cliff_prod.pauli_transfer_matrix,
dot_prod)
def test_inverse_single_qubit_clifford(self):
for i in range(24):
Cl = tqc.SingleQubitClifford(i)
Cl_inv = Cl.get_inverse()
self.assertEqual((Cl_inv*Cl).idx, 0)
def test_inverse_two_qubit_clifford(self):
for i in test_indices_2Q:
Cl = tqc.TwoQubitClifford(i)
Cl_inv = Cl.get_inverse()
self.assertEqual((Cl_inv*Cl).idx, 0)
class TestCliffordGateDecomposition(TestCase):
def test_single_qubit_gate_decomposition(self):
for i in range(24):
CL = tqc.SingleQubitClifford(i)
gate_dec = CL.gate_decomposition
self.assertIsInstance(gate_dec, list)
for g in gate_dec:
self.assertIsInstance(g[0], str)
self.assertEqual(g[1], 'q0')
def test_two_qubit_gate_decomposition(self):
for idx in (test_indices_2Q):
CL = tqc.TwoQubitClifford(idx)
gate_dec = CL.gate_decomposition
print(idx, gate_dec)
self.assertIsInstance(gate_dec, list)
for g in gate_dec:
self.assertIsInstance(g[0], str)
if g[0] == 'CZ':
self.assertEqual(g[1], ['q0', 'q1'])
else:
self.assertIn(g[1], ['q0', 'q1'])
def test_gate_decomposition_unique_single_qubit(self):
hash_table = []
for i in range(24):
CL = tqc.SingleQubitClifford(i)
gate_dec = CL.gate_decomposition
hash_table.append(crc32(bytes(str(gate_dec), 'utf-8')))
self.assertEqual(len(hash_table),24)
self.assertEqual(len(np.unique(hash_table)),24)
def test_gate_decomposition_unique_two_qubit(self):
hash_table = []
for i in range(11520):
CL = tqc.TwoQubitClifford(i)
gate_dec = CL.gate_decomposition
hash_table.append(crc32(bytes(str(gate_dec), 'utf-8')))
self.assertEqual(len(hash_table), 11520)
self.assertEqual(len(np.unique(hash_table)), 11520)
class TestCliffordClassRBSeqs(TestCase):
"""
"""
def test_single_qubit_randomized_benchmarking_sequence(self):
"""
"""
seeds = [0, 100, 200, 300, 400]
net_cliffs = np.arange(len(seeds))
for seed, net_cl in zip(seeds, net_cliffs):
cliffords_single_qubit_class = rb.randomized_benchmarking_sequence(
n_cl=20, desired_net_cl=0, number_of_qubits=1, seed=0)
cliffords = rb.randomized_benchmarking_sequence_old(
n_cl=20, desired_net_cl=0, seed=0)
| np.testing.assert_array_equal(cliffords_single_qubit_class, cliffords) | numpy.testing.assert_array_equal |
import scipy.signal
import numpy as np
import random
import tensorflow as tf
def set_global_seeds(env, seed):
tf.set_random_seed(seed)
np.random.seed(seed)
random.seed(seed)
env_seeds = np.random.randint(low=0, high=1e6, size=env.num_envs)
env.set_random_seed(env_seeds)
class RunningMeanStd(object):
def __init__(self, epsilon=1e-4, shape=()):
self.mean = np.zeros(shape, 'float64')
self.var = np.ones(shape, 'float64')
self.count = epsilon
def update(self, x):
batch_mean = np.mean(x, axis=0)
batch_var = np.var(x, axis=0)
batch_count = x.shape[0]
delta = batch_mean - self.mean
tot_count = self.count + batch_count
new_mean = self.mean + delta * batch_count / tot_count
m_a = self.var * (self.count)
m_b = batch_var * (batch_count)
M2 = m_a + m_b + np.square(delta) * self.count * batch_count / (self.count + batch_count)
new_var = M2 / (self.count + batch_count)
new_count = batch_count + self.count
self.mean = new_mean
self.var = new_var
self.count = new_count
def sf01(arr):
"""
swap and then flatten axes 0 and 1
"""
s = arr.shape
return arr.swapaxes(0, 1).reshape(s[0] * s[1], *s[2:])
def discount(x, gamma):
return scipy.signal.lfilter([1.0], [1.0, -gamma], x[::-1], axis=0)[::-1]
# ================================================================
# Network components
# ================================================================
def ortho_init(scale=1.0):
def _ortho_init(shape, dtype, partition_info=None):
shape = tuple(shape)
if len(shape) == 2:
flat_shape = shape
elif len(shape) == 4:
flat_shape = (np.prod(shape[:-1]), shape[-1])
else:
raise NotImplementedError
a = np.random.normal(0.0, 1.0, flat_shape)
u, _, v = | np.linalg.svd(a, full_matrices=False) | numpy.linalg.svd |
import numpy as np
import matplotlib.pyplot as plt
from astropy.coordinates import SkyCoord, SkyOffsetFrame
import astropy.units as u
from astropy.coordinates.erfa_astrom import erfa_astrom, ErfaAstromInterpolator
from fact.analysis.statistics import li_ma_significance
def theta2(
theta2_on,
theta2_off,
scaling,
cut,
threshold="",
source="",
ontime=None,
ax=None,
window=[0, 1],
bins=100,
on_weights=None,
off_weights=None,
):
if on_weights is None:
on_weights = np.full_like(theta2_on, 1).astype('bool')
if off_weights is None:
off_weights = np.full_like(theta2_off, 1).astype('bool')
ax = ax or plt.gca()
bins_=np.linspace(window[0], window[1], bins)
#from IPython import embed; embed()
ax.hist(theta2_on, bins=bins_, range=window, histtype="step", color="r", label="ON")
ax.hist(
theta2_off,
bins=bins_,
range=window,
histtype="stepfilled",
color="tab:blue",
alpha=0.5,
label="OFF",
weights=np.full_like(theta2_off, scaling),
)
print(theta2_on.shape, theta2_off.shape)
print(np.count_nonzero(on_weights), np.count_nonzero(off_weights))
n_off = np.count_nonzero(theta2_off[off_weights] < cut)
n_on = np.count_nonzero(theta2_on[on_weights] < cut)
li_ma = li_ma_significance(n_on, n_off, scaling)
n_exc_mean = n_on - scaling * n_off
n_exc_std = | np.sqrt(n_on + scaling ** 2 * n_off) | numpy.sqrt |
# python3
# Copyright 2021 InstaDeep Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wraps a Flatland MARL environment to be used as a dm_env environment."""
import types as tp
import typing
from functools import partial
from typing import Any, Callable, Dict, List, Sequence, Tuple, Union
import dm_env
import numpy as np
from acme import specs
from acme.wrappers.gym_wrapper import _convert_to_spec
try:
from flatland.envs.observations import GlobalObsForRailEnv, Node, TreeObsForRailEnv
from flatland.envs.rail_env import RailEnv
from flatland.utils.rendertools import AgentRenderVariant, RenderTool
_has_flatland = True
except ModuleNotFoundError:
_has_flatland = False
pass
from gym.spaces import Discrete
from gym.spaces.box import Box
from mava.types import OLT, Observation
from mava.utils.sort_utils import sort_str_num
from mava.utils.wrapper_utils import (
convert_dm_compatible_observations,
convert_np_type,
parameterized_restart,
)
from mava.wrappers.env_wrappers import ParallelEnvWrapper
if _has_flatland: # noqa: C901
class FlatlandEnvWrapper(ParallelEnvWrapper):
"""Environment wrapper for Flatland environments.
All environments would require an observation preprocessor, except for
'GlobalObsForRailEnv'. This is because flatland gives users the
flexibility of designing custom observation builders. 'TreeObsForRailEnv'
would use the normalize_observation function from the flatland baselines
if none is supplied.
The supplied preprocessor should return either an array, tuple of arrays or
a dictionary of arrays for an observation input.
The obervation, for an agent, returned by this wrapper could consist of both
the agent observation and agent info. This is because flatland also provides
informationn about the agents at each step. This information include;
'action_required', 'malfunction', 'speed', and 'status', and it can be appended
to the observation, by this wrapper, as an array. action_required is a boolean,
malfunction is an int denoting the number of steps for which the agent would
remain motionless, speed is a float and status can be any of the below;
READY_TO_DEPART = 0
ACTIVE = 1
DONE = 2
DONE_REMOVED = 3
This would be included in the observation if agent_info is set to True
"""
# Note: we don't inherit from base.EnvironmentWrapper because that class
# assumes that the wrapped environment is a dm_env.Environment.
def __init__(
self,
environment: RailEnv,
preprocessor: Callable[
[Any], Union[np.ndarray, Tuple[np.ndarray], Dict[str, np.ndarray]]
] = None,
agent_info: bool = True,
):
"""Wrap Flatland environment.
Args:
environment: underlying RailEnv
preprocessor: optional preprocessor. Defaults to None.
agent_info: include agent info. Defaults to True.
"""
self._environment = environment
decorate_step_method(self._environment)
self._agents = [get_agent_id(i) for i in range(self.num_agents)]
self._possible_agents = self.agents[:]
self._reset_next_step = True
self._step_type = dm_env.StepType.FIRST
self.num_actions = 5
self.action_spaces = {
agent: Discrete(self.num_actions) for agent in self.possible_agents
}
# preprocessor must be for observation builders other than global obs
# treeobs builders would use the default preprocessor if none is
# supplied
self.preprocessor: Callable[
[Dict[int, Any]], Dict[int, Any]
] = self._obtain_preprocessor(preprocessor)
self._include_agent_info = agent_info
# observation space:
# flatland defines no observation space for an agent. Here we try
# to define the observation space. All agents are identical and would
# have the same observation space.
# Infer observation space based on returned observation
obs, _ = self._environment.reset()
obs = self.preprocessor(obs)
self.observation_spaces = {
get_agent_id(i): infer_observation_space(ob) for i, ob in obs.items()
}
self._env_renderer = RenderTool(
self._environment,
agent_render_variant=AgentRenderVariant.ONE_STEP_BEHIND,
show_debug=False,
screen_height=600, # Adjust these parameters to fit your resolution
screen_width=800,
) # Adjust these parameters to fit your resolution
@property
def agents(self) -> List[str]:
"""Return list of active agents."""
return self._agents
@property
def possible_agents(self) -> List[str]:
"""Return list of all possible agents."""
return self._possible_agents
def render(self, mode: str = "human") -> np.ndarray:
"""Renders the environment."""
if mode == "human":
show = True
else:
show = False
return self._env_renderer.render_env(
show=show,
show_observations=False,
show_predictions=False,
return_image=True,
)
def env_done(self) -> bool:
"""Checks if the environment is done."""
return self._environment.dones["__all__"] or not self.agents
def reset(self) -> dm_env.TimeStep:
"""Resets the episode."""
# Reset the rendering sytem
self._env_renderer.reset()
self._reset_next_step = False
self._agents = self.possible_agents[:]
observe, info = self._environment.reset()
observations = self._create_observations(
observe, info, self._environment.dones
)
rewards_spec = self.reward_spec()
rewards = {
agent: convert_np_type(rewards_spec[agent].dtype, 0)
for agent in self.possible_agents
}
discount_spec = self.discount_spec()
self._discounts = {
agent: convert_np_type(discount_spec[agent].dtype, 1)
for agent in self.possible_agents
}
return parameterized_restart(rewards, self._discounts, observations)
def step(self, actions: Dict[str, np.ndarray]) -> dm_env.TimeStep:
"""Steps the environment."""
self._pre_step()
if self._reset_next_step:
return self.reset()
self._agents = [
agent
for agent in self.agents
if not self._environment.dones[get_agent_handle(agent)]
]
observations, rewards, dones, infos = self._environment.step(actions)
rewards_spec = self.reward_spec()
# Handle empty rewards
if not rewards:
rewards = {
agent: convert_np_type(rewards_spec[agent].dtype, 0)
for agent in self.possible_agents
}
else:
rewards = {
get_agent_id(agent): convert_np_type(
rewards_spec[get_agent_id(agent)].dtype, reward
)
for agent, reward in rewards.items()
}
if observations:
observations = self._create_observations(observations, infos, dones)
if self.env_done():
self._step_type = dm_env.StepType.LAST
self._reset_next_step = True
# Zero discount when env done
discounts = {
agent: convert_np_type(
self.discount_spec()[agent].dtype, 0
) # Zero discount on final step
for agent in self.possible_agents
}
else:
self._step_type = dm_env.StepType.MID
discounts = self._discounts
return dm_env.TimeStep(
observation=observations,
reward=rewards,
discount=discounts,
step_type=self._step_type,
)
# Convert Flatland observation so it's dm_env compatible. Also, the list
# of legal actions must be converted to a legal actions mask.
def _convert_observations(
self,
observes: Dict[str, Tuple[np.ndarray, np.ndarray]],
dones: Dict[str, bool],
) -> Observation:
return convert_dm_compatible_observations(
observes, # type: ignore
dones,
self.observation_spec(),
self.env_done(),
self.possible_agents,
)
# collate agent info and observation into a tuple, making the agents obervation
# to be a tuple of the observation from the env and the agent info
def _collate_obs_and_info(
self, observes: Dict[int, np.ndarray], info: Dict[str, Dict[int, Any]]
) -> Dict[str, Tuple[np.ndarray, np.ndarray]]:
observations: Dict[str, Tuple[np.ndarray, np.ndarray]] = {}
observes = self.preprocessor(observes)
for agent, obs in observes.items():
agent_id = get_agent_id(agent)
agent_info = np.array(
[info[k][agent] for k in sort_str_num(info.keys())],
dtype=np.float32,
)
obs = (obs, agent_info) if self._include_agent_info else obs # type: ignore # noqa: E501
observations[agent_id] = obs # type: ignore
return observations
def _create_observations(
self,
obs: Dict[int, np.ndarray],
info: Dict[str, Dict[int, Any]],
dones: Dict[int, bool],
) -> Observation:
"""Convert observation."""
observations_ = self._collate_obs_and_info(obs, info)
dones_ = {get_agent_id(k): v for k, v in dones.items()}
observations = self._convert_observations(observations_, dones_)
return observations
def _obtain_preprocessor(
self, preprocessor: Any
) -> Callable[[Dict[int, Any]], Dict[int, np.ndarray]]:
"""Obtains the actual preprocessor.
Obtains the actual preprocessor to be used based on the supplied
preprocessor and the env's obs_builder object
"""
if not isinstance(self.obs_builder, GlobalObsForRailEnv):
_preprocessor = preprocessor if preprocessor else lambda x: x
if isinstance(self.obs_builder, TreeObsForRailEnv):
_preprocessor = (
partial(
normalize_observation, tree_depth=self.obs_builder.max_depth
)
if not preprocessor
else preprocessor
)
assert _preprocessor is not None
else:
def _preprocessor(
x: Tuple[np.ndarray, np.ndarray, np.ndarray]
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
return x
def returned_preprocessor(obs: Dict[int, Any]) -> Dict[int, np.ndarray]:
temp_obs = {}
for agent_id, ob in obs.items():
temp_obs[agent_id] = _preprocessor(ob)
return temp_obs
return returned_preprocessor
# set all parameters that should be available before an environment step
# if no available agent, then environment is done and should be reset
def _pre_step(self) -> None:
if not self.agents:
self._step_type = dm_env.StepType.LAST
def observation_spec(self) -> Dict[str, OLT]:
"""Return observation spec."""
observation_specs = {}
for agent in self.agents:
observation_specs[agent] = OLT(
observation=tuple(
(
_convert_to_spec(self.observation_spaces[agent]),
agent_info_spec(),
)
)
if self._include_agent_info
else _convert_to_spec(self.observation_spaces[agent]),
legal_actions=_convert_to_spec(self.action_spaces[agent]),
terminal=specs.Array((1,), np.float32),
)
return observation_specs
def action_spec(
self,
) -> Dict[str, Union[specs.DiscreteArray, specs.BoundedArray]]:
"""Get action spec."""
action_specs = {}
action_spaces = self.action_spaces
for agent in self.possible_agents:
action_specs[agent] = _convert_to_spec(action_spaces[agent])
return action_specs
def reward_spec(self) -> Dict[str, specs.Array]:
"""Get the reward spec."""
reward_specs = {}
for agent in self.possible_agents:
reward_specs[agent] = specs.Array((), np.float32)
return reward_specs
def discount_spec(self) -> Dict[str, specs.BoundedArray]:
"""Get the discount spec."""
discount_specs = {}
for agent in self.possible_agents:
discount_specs[agent] = specs.BoundedArray(
(), np.float32, minimum=0, maximum=1.0
)
return discount_specs
def extra_spec(self) -> Dict[str, specs.BoundedArray]:
"""Get the extras spec."""
return {}
def seed(self, seed: int = None) -> None:
"""Seed the environment."""
self._environment._seed(seed)
@property
def environment(self) -> RailEnv:
"""Returns the wrapped environment."""
return self._environment
@property
def num_agents(self) -> int:
"""Returns the number of trains/agents in the flatland environment"""
return int(self._environment.number_of_agents)
def __getattr__(self, name: str) -> Any:
"""Expose any other attributes of the underlying environment."""
return getattr(self._environment, name)
# Utility functions
def infer_observation_space(
obs: Union[tuple, np.ndarray, dict]
) -> Union[Box, tuple, dict]:
"""Infer a gym Observation space from a sample observation from flatland"""
if isinstance(obs, np.ndarray):
return Box(
-np.inf,
np.inf,
shape=obs.shape,
dtype=obs.dtype,
)
elif isinstance(obs, tuple):
return tuple(infer_observation_space(o) for o in obs)
elif isinstance(obs, dict):
return {key: infer_observation_space(value) for key, value in obs.items()}
else:
raise ValueError(
f"Unexpected observation type: {type(obs)}. "
f"Observation should be of either of this types "
f"(np.ndarray, tuple, or dict)"
)
def agent_info_spec() -> specs.BoundedArray:
"""Create the spec for the agent_info part of the observation"""
return specs.BoundedArray((4,), dtype=np.float32, minimum=0.0, maximum=10)
def get_agent_id(handle: int) -> str:
"""Obtain the string that constitutes the agent id from an agent handle"""
return f"train_{handle}"
def get_agent_handle(id: str) -> int:
"""Obtain an agents handle given its id"""
return int(id.split("_")[-1])
def decorate_step_method(env: RailEnv) -> None:
"""Step method decorator.
Enable the step method of the env to take action dictionaries where agent keys
are the agent ids. Flatland uses the agent handles as keys instead. This
function decorates the step method so that it accepts an action dict where
the keys are the agent ids.
"""
env.step_ = env.step
def _step(
self: RailEnv, actions: Dict[str, Union[int, float, Any]]
) -> dm_env.TimeStep:
actions_ = {get_agent_handle(k): int(v) for k, v in actions.items()}
return self.step_(actions_)
env.step = tp.MethodType(_step, env)
# The block of code below is obtained from the flatland starter-kit
# at https://gitlab.aicrowd.com/flatland/flatland-starter-kit/-/blob/master/
# utils/observation_utils.py
# this is done just to obtain the normalize_observation function that would
# serve as the default preprocessor for the Tree obs builder.
def max_lt(seq: Sequence, val: Any) -> Any:
"""Get max in sequence.
Return greatest item in seq for which item < val applies.
None is returned if seq was empty or all items in seq were >= val.
"""
max = 0
idx = len(seq) - 1
while idx >= 0:
if seq[idx] < val and seq[idx] >= 0 and seq[idx] > max:
max = seq[idx]
idx -= 1
return max
def min_gt(seq: Sequence, val: Any) -> Any:
"""Gets min in a sequence.
Return smallest item in seq for which item > val applies.
None is returned if seq was empty or all items in seq were >= val.
"""
min = np.inf
idx = len(seq) - 1
while idx >= 0:
if seq[idx] >= val and seq[idx] < min:
min = seq[idx]
idx -= 1
return min
@typing.no_type_check
def norm_obs_clip(
obs: np.ndarray,
clip_min: int = -1,
clip_max: int = 1,
fixed_radius: int = 0,
normalize_to_range: bool = False,
) -> np.ndarray:
"""Normalize observation.
This function returns the difference between min and max value of an observation
:param obs: Observation that should be normalized
:param clip_min: min value where observation will be clipped
:param clip_max: max value where observation will be clipped
:return: returnes normalized and clipped observatoin
"""
if fixed_radius > 0:
max_obs = fixed_radius
else:
max_obs = max(1, max_lt(obs, 1000)) + 1
min_obs = 0 # min(max_obs, min_gt(obs, 0))
if normalize_to_range:
min_obs = min_gt(obs, 0)
if min_obs > max_obs:
min_obs = max_obs
if max_obs == min_obs:
return np.clip(np.array(obs) / max_obs, clip_min, clip_max)
norm = np.abs(max_obs - min_obs)
return np.clip((np.array(obs) - min_obs) / norm, clip_min, clip_max)
def _split_node_into_feature_groups(
node: Node,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""Splits node into features."""
data = np.zeros(6)
distance = np.zeros(1)
agent_data = np.zeros(4)
data[0] = node.dist_own_target_encountered
data[1] = node.dist_other_target_encountered
data[2] = node.dist_other_agent_encountered
data[3] = node.dist_potential_conflict
data[4] = node.dist_unusable_switch
data[5] = node.dist_to_next_branch
distance[0] = node.dist_min_to_target
agent_data[0] = node.num_agents_same_direction
agent_data[1] = node.num_agents_opposite_direction
agent_data[2] = node.num_agents_malfunctioning
agent_data[3] = node.speed_min_fractional
return data, distance, agent_data
@typing.no_type_check
def _split_subtree_into_feature_groups(
node: Node, current_tree_depth: int, max_tree_depth: int
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""Split subtree."""
if node == -np.inf:
remaining_depth = max_tree_depth - current_tree_depth
# reference:
# https://stackoverflow.com/questions/515214/total-number-of-nodes-in-a-tree-data-structure
num_remaining_nodes = int((4 ** (remaining_depth + 1) - 1) / (4 - 1))
return (
[-np.inf] * num_remaining_nodes * 6,
[-np.inf] * num_remaining_nodes,
[-np.inf] * num_remaining_nodes * 4,
)
data, distance, agent_data = _split_node_into_feature_groups(node)
if not node.childs:
return data, distance, agent_data
for direction in TreeObsForRailEnv.tree_explored_actions_char:
sub_data, sub_distance, sub_agent_data = _split_subtree_into_feature_groups(
node.childs[direction], current_tree_depth + 1, max_tree_depth
)
data = np.concatenate((data, sub_data))
distance = np.concatenate((distance, sub_distance))
agent_data = | np.concatenate((agent_data, sub_agent_data)) | numpy.concatenate |
# Copyright (c) 2003-2015 by <NAME>
#
# TreeCorr is free software: redistribution and use in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions, and the disclaimer given in the accompanying LICENSE
# file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the disclaimer given in the documentation
# and/or other materials provided with the distribution.
from __future__ import print_function
import numpy
import treecorr
import os
from numpy import pi
from test_helper import get_from_wiki
def test_ascii():
nobj = 5000
numpy.random.seed(8675309)
x = numpy.random.random_sample(nobj)
y = numpy.random.random_sample(nobj)
z = numpy.random.random_sample(nobj)
ra = numpy.random.random_sample(nobj)
dec = numpy.random.random_sample(nobj)
r = numpy.random.random_sample(nobj)
w = numpy.random.random_sample(nobj)
g1 = numpy.random.random_sample(nobj)
g2 = numpy.random.random_sample(nobj)
k = numpy.random.random_sample(nobj)
flags = numpy.zeros(nobj).astype(int)
for flag in [ 1, 2, 4, 8, 16 ]:
sub = numpy.random.random_sample(nobj) < 0.1
flags[sub] = numpy.bitwise_or(flags[sub], flag)
file_name = os.path.join('data','test.dat')
with open(file_name, 'w') as fid:
# These are intentionally in a different order from the order we parse them.
fid.write('# ra,dec,x,y,k,g1,g2,w,flag,z,r\n')
for i in range(nobj):
fid.write((('%.8f '*10)+'%d\n')%(
ra[i],dec[i],x[i],y[i],k[i],g1[i],g2[i],w[i],z[i],r[i],flags[i]))
# Check basic input
config = {
'x_col' : 3,
'y_col' : 4,
'z_col' : 9,
'x_units' : 'rad',
'y_units' : 'rad',
'w_col' : 8,
'g1_col' : 6,
'g2_col' : 7,
'k_col' : 5,
}
cat1 = treecorr.Catalog(file_name, config)
numpy.testing.assert_almost_equal(cat1.x, x)
numpy.testing.assert_almost_equal(cat1.y, y)
numpy.testing.assert_almost_equal(cat1.z, z)
numpy.testing.assert_almost_equal(cat1.w, w)
numpy.testing.assert_almost_equal(cat1.g1, g1)
numpy.testing.assert_almost_equal(cat1.g2, g2)
numpy.testing.assert_almost_equal(cat1.k, k)
# Check flags
config['flag_col'] = 11
cat2 = treecorr.Catalog(file_name, config)
numpy.testing.assert_almost_equal(cat2.w[flags==0], w[flags==0])
numpy.testing.assert_almost_equal(cat2.w[flags!=0], 0.)
# Check ok_flag
config['ok_flag'] = 4
cat3 = treecorr.Catalog(file_name, config)
numpy.testing.assert_almost_equal(cat3.w[numpy.logical_or(flags==0, flags==4)],
w[numpy.logical_or(flags==0, flags==4)])
numpy.testing.assert_almost_equal(cat3.w[numpy.logical_and(flags!=0, flags!=4)], 0.)
# Check ignore_flag
del config['ok_flag']
config['ignore_flag'] = 16
cat4 = treecorr.Catalog(file_name, config)
numpy.testing.assert_almost_equal(cat4.w[flags < 16], w[flags < 16])
numpy.testing.assert_almost_equal(cat4.w[flags >= 16], 0.)
# Check different units for x,y
config['x_units'] = 'arcsec'
config['y_units'] = 'arcsec'
del config['z_col']
cat5 = treecorr.Catalog(file_name, config)
numpy.testing.assert_almost_equal(cat5.x, x * (pi/180./3600.))
numpy.testing.assert_almost_equal(cat5.y, y * (pi/180./3600.))
config['x_units'] = 'arcmin'
config['y_units'] = 'arcmin'
cat5 = treecorr.Catalog(file_name, config)
numpy.testing.assert_almost_equal(cat5.x, x * (pi/180./60.))
numpy.testing.assert_almost_equal(cat5.y, y * (pi/180./60.))
config['x_units'] = 'deg'
config['y_units'] = 'deg'
cat5 = treecorr.Catalog(file_name, config)
numpy.testing.assert_almost_equal(cat5.x, x * (pi/180.))
numpy.testing.assert_almost_equal(cat5.y, y * (pi/180.))
del config['x_units'] # Default is radians
del config['y_units']
cat5 = treecorr.Catalog(file_name, config)
numpy.testing.assert_almost_equal(cat5.x, x)
numpy.testing.assert_almost_equal(cat5.y, y)
# Check ra,dec
del config['x_col']
del config['y_col']
config['ra_col'] = 1
config['dec_col'] = 2
config['r_col'] = 10
config['ra_units'] = 'rad'
config['dec_units'] = 'rad'
cat6 = treecorr.Catalog(file_name, config)
numpy.testing.assert_almost_equal(cat6.ra, ra)
numpy.testing.assert_almost_equal(cat6.dec, dec)
config['ra_units'] = 'deg'
config['dec_units'] = 'deg'
cat6 = treecorr.Catalog(file_name, config)
numpy.testing.assert_almost_equal(cat6.ra, ra * (pi/180.))
numpy.testing.assert_almost_equal(cat6.dec, dec * (pi/180.))
config['ra_units'] = 'hour'
config['dec_units'] = 'deg'
cat6 = treecorr.Catalog(file_name, config)
numpy.testing.assert_almost_equal(cat6.ra, ra * (pi/12.))
numpy.testing.assert_almost_equal(cat6.dec, dec * (pi/180.))
# Check using a different delimiter, comment marker
csv_file_name = os.path.join('data','test.csv')
with open(csv_file_name, 'w') as fid:
# These are intentionally in a different order from the order we parse them.
fid.write('% This file uses commas for its delimiter')
fid.write('% And more than one header line.')
fid.write('% Plus some extra comment lines every so often.')
fid.write('% And we use a weird comment marker to boot.')
fid.write('% ra,dec,x,y,k,g1,g2,w,flag\n')
for i in range(nobj):
fid.write((('%.8f,'*10)+'%d\n')%(
ra[i],dec[i],x[i],y[i],k[i],g1[i],g2[i],w[i],z[i],r[i],flags[i]))
if i%100 == 0:
fid.write('%%%% Line %d\n'%i)
config['delimiter'] = ','
config['comment_marker'] = '%'
cat7 = treecorr.Catalog(csv_file_name, config)
numpy.testing.assert_almost_equal(cat7.ra, ra * (pi/12.))
numpy.testing.assert_almost_equal(cat7.dec, dec * (pi/180.))
numpy.testing.assert_almost_equal(cat7.r, r)
numpy.testing.assert_almost_equal(cat7.g1, g1)
numpy.testing.assert_almost_equal(cat7.g2, g2)
numpy.testing.assert_almost_equal(cat7.w[flags < 16], w[flags < 16])
numpy.testing.assert_almost_equal(cat7.w[flags >= 16], 0.)
# Check flip_g1, flip_g2
del config['delimiter']
del config['comment_marker']
config['flip_g1'] = True
cat8 = treecorr.Catalog(file_name, config)
numpy.testing.assert_almost_equal(cat8.g1, -g1)
numpy.testing.assert_almost_equal(cat8.g2, g2)
config['flip_g2'] = 'true'
cat8 = treecorr.Catalog(file_name, config)
numpy.testing.assert_almost_equal(cat8.g1, -g1)
numpy.testing.assert_almost_equal(cat8.g2, -g2)
config['flip_g1'] = 'n'
config['flip_g2'] = 'yes'
cat8 = treecorr.Catalog(file_name, config)
numpy.testing.assert_almost_equal(cat8.g1, g1)
numpy.testing.assert_almost_equal(cat8.g2, -g2)
# Check overriding values with kwargs
cat8 = treecorr.Catalog(file_name, config, flip_g1=True, flip_g2=False)
numpy.testing.assert_almost_equal(cat8.g1, -g1)
numpy.testing.assert_almost_equal(cat8.g2, g2)
def test_fits():
get_from_wiki('Aardvark.fit')
file_name = os.path.join('data','Aardvark.fit')
config = treecorr.read_config('Aardvark.yaml')
config['verbose'] = 1
# Just test a few random particular values
cat1 = treecorr.Catalog(file_name, config)
numpy.testing.assert_equal(len(cat1.ra), 390935)
numpy.testing.assert_equal(cat1.nobj, 390935)
numpy.testing.assert_almost_equal(cat1.ra[0], 56.4195 * (pi/180.))
numpy.testing.assert_almost_equal(cat1.ra[390934], 78.4782 * (pi/180.))
| numpy.testing.assert_almost_equal(cat1.dec[290333], 83.1579 * (pi/180.)) | numpy.testing.assert_almost_equal |
#!/usr/bin/env python
import unittest
import numpy as np
import simweights
info_dtype = [
("primary_type", np.int32),
("n_flux_events", np.int32),
("global_probability_scale", np.float64),
("cylinder_radius", np.float64),
("min_zenith", np.float64),
("max_zenith", np.float64),
("min_energy", np.float64),
("max_energy", np.float64),
("power_law_index", np.float64),
]
result_dtype = [("neu", np.int32), ("Ev", np.float64), ("wght", np.float64)]
class TestCorsikaWeighter(unittest.TestCase):
def test_triggered_corsika(self):
nevents = 10000
pdgid = 12
c1 = simweights.CircleInjector(300, 0, 1)
p1 = simweights.PowerLaw(0, 1e3, 1e4)
weight = np.zeros(nevents, dtype=result_dtype)
weight["neu"] = pdgid
weight["Ev"] = p1.ppf(np.linspace(0, 1, nevents))
for event_weight in [1e-6, 1e-3, 1]:
weight["wght"] = event_weight
for nfiles in [1, 5, 50]:
rows = nfiles * [
(
pdgid,
nevents,
1,
c1.radius,
| np.arccos(c1.cos_zen_max) | numpy.arccos |
################################################################################
# Copyright (C) 2013-2014 <NAME>
#
# This file is licensed under the MIT License.
################################################################################
"""
Unit tests for `dot` module.
"""
import unittest
import numpy as np
import scipy
from numpy import testing
from ..dot import Dot, SumMultiply
from ..gaussian import Gaussian, GaussianARD
from bayespy.nodes import GaussianGamma
from ...vmp import VB
from bayespy.utils import misc
from bayespy.utils import linalg
from bayespy.utils import random
from bayespy.utils.misc import TestCase
class TestSumMultiply(TestCase):
def test_parent_validity(self):
"""
Test that the parent nodes are validated properly in SumMultiply
"""
V = GaussianARD(1, 1)
X = Gaussian(np.ones(1), np.identity(1))
Y = Gaussian(np.ones(3), np.identity(3))
Z = Gaussian(np.ones(5), np.identity(5))
A = SumMultiply(X, ['i'])
self.assertEqual(A.dims, ((), ()))
A = SumMultiply('i', X)
self.assertEqual(A.dims, ((), ()))
A = SumMultiply(X, ['i'], ['i'])
self.assertEqual(A.dims, ((1,), (1,1)))
A = SumMultiply('i->i', X)
self.assertEqual(A.dims, ((1,), (1,1)))
A = SumMultiply(X, ['i'], Y, ['j'], ['i','j'])
self.assertEqual(A.dims, ((1,3), (1,3,1,3)))
A = SumMultiply('i,j->ij', X, Y)
self.assertEqual(A.dims, ((1,3), (1,3,1,3)))
A = SumMultiply(V, [], X, ['i'], Y, ['i'], [])
self.assertEqual(A.dims, ((), ()))
A = SumMultiply(',i,i->', V, X, Y)
self.assertEqual(A.dims, ((), ()))
# Gaussian-gamma parents
C = GaussianGamma(np.ones(3), np.identity(3), 1, 1)
A = SumMultiply(Y, ['i'], C, ['i'], ['i'])
self.assertEqual(A.dims, ((3,), (3,3), (), ()))
A = SumMultiply('i,i->i', Y, C)
self.assertEqual(A.dims, ((3,), (3,3), (), ()))
C = GaussianGamma(np.ones(3), np.identity(3), 1, 1)
A = SumMultiply(Y, ['i'], C, ['i'], [])
self.assertEqual(A.dims, ((), (), (), ()))
A = SumMultiply('i,i->', Y, C)
self.assertEqual(A.dims, ((), (), (), ()))
# Error: not enough inputs
self.assertRaises(ValueError,
SumMultiply)
self.assertRaises(ValueError,
SumMultiply,
X)
# Error: too many keys
self.assertRaises(ValueError,
SumMultiply,
Y,
['i', 'j'])
self.assertRaises(ValueError,
SumMultiply,
'ij',
Y)
# Error: not broadcastable
self.assertRaises(ValueError,
SumMultiply,
Y,
['i'],
Z,
['i'])
self.assertRaises(ValueError,
SumMultiply,
'i,i',
Y,
Z)
# Error: output key not in inputs
self.assertRaises(ValueError,
SumMultiply,
X,
['i'],
['j'])
self.assertRaises(ValueError,
SumMultiply,
'i->j',
X)
# Error: non-unique input keys
self.assertRaises(ValueError,
SumMultiply,
X,
['i','i'])
self.assertRaises(ValueError,
SumMultiply,
'ii',
X)
# Error: non-unique output keys
self.assertRaises(ValueError,
SumMultiply,
X,
['i'],
['i','i'])
self.assertRaises(ValueError,
SumMultiply,
'i->ii',
X)
# String has too many '->'
self.assertRaises(ValueError,
SumMultiply,
'i->i->i',
X)
# String has too many input nodes
self.assertRaises(ValueError,
SumMultiply,
'i,i->i',
X)
# Same parent several times
self.assertRaises(ValueError,
SumMultiply,
'i,i->i',
X,
X)
# Same parent several times via deterministic node
Xh = SumMultiply('i->i', X)
self.assertRaises(ValueError,
SumMultiply,
'i,i->i',
X,
Xh)
def test_message_to_child(self):
"""
Test the message from SumMultiply to its children.
"""
def compare_moments(u0, u1, *args):
Y = SumMultiply(*args)
u_Y = Y.get_moments()
self.assertAllClose(u_Y[0], u0)
self.assertAllClose(u_Y[1], u1)
# Test constant parent
y = np.random.randn(2,3,4)
compare_moments(y,
linalg.outer(y, y, ndim=2),
'ij->ij',
y)
# Do nothing for 2-D array
Y = GaussianARD(np.random.randn(5,2,3),
np.random.rand(5,2,3),
plates=(5,),
shape=(2,3))
y = Y.get_moments()
compare_moments(y[0],
y[1],
'ij->ij',
Y)
compare_moments(y[0],
y[1],
Y,
[0,1],
[0,1])
# Sum over the rows of a matrix
Y = GaussianARD(np.random.randn(5,2,3),
np.random.rand(5,2,3),
plates=(5,),
shape=(2,3))
y = Y.get_moments()
mu = np.einsum('...ij->...j', y[0])
cov = np.einsum('...ijkl->...jl', y[1])
compare_moments(mu,
cov,
'ij->j',
Y)
compare_moments(mu,
cov,
Y,
[0,1],
[1])
# Inner product of three vectors
X1 = GaussianARD(np.random.randn(2),
np.random.rand(2),
plates=(),
shape=(2,))
x1 = X1.get_moments()
X2 = GaussianARD(np.random.randn(6,1,2),
np.random.rand(6,1,2),
plates=(6,1),
shape=(2,))
x2 = X2.get_moments()
X3 = GaussianARD(np.random.randn(7,6,5,2),
np.random.rand(7,6,5,2),
plates=(7,6,5),
shape=(2,))
x3 = X3.get_moments()
mu = np.einsum('...i,...i,...i->...', x1[0], x2[0], x3[0])
cov = np.einsum('...ij,...ij,...ij->...', x1[1], x2[1], x3[1])
compare_moments(mu,
cov,
'i,i,i',
X1,
X2,
X3)
compare_moments(mu,
cov,
'i,i,i->',
X1,
X2,
X3)
compare_moments(mu,
cov,
X1,
[9],
X2,
[9],
X3,
[9])
compare_moments(mu,
cov,
X1,
[9],
X2,
[9],
X3,
[9],
[])
# Outer product of two vectors
X1 = GaussianARD(np.random.randn(2),
np.random.rand(2),
plates=(5,),
shape=(2,))
x1 = X1.get_moments()
X2 = GaussianARD(np.random.randn(6,1,2),
np.random.rand(6,1,2),
plates=(6,1),
shape=(2,))
x2 = X2.get_moments()
mu = np.einsum('...i,...j->...ij', x1[0], x2[0])
cov = np.einsum('...ik,...jl->...ijkl', x1[1], x2[1])
compare_moments(mu,
cov,
'i,j->ij',
X1,
X2)
compare_moments(mu,
cov,
X1,
[9],
X2,
[7],
[9,7])
# Matrix product
Y1 = GaussianARD(np.random.randn(3,2),
np.random.rand(3,2),
plates=(),
shape=(3,2))
y1 = Y1.get_moments()
Y2 = GaussianARD(np.random.randn(5,2,3),
np.random.rand(5,2,3),
plates=(5,),
shape=(2,3))
y2 = Y2.get_moments()
mu = np.einsum('...ik,...kj->...ij', y1[0], y2[0])
cov = np.einsum('...ikjl,...kmln->...imjn', y1[1], y2[1])
compare_moments(mu,
cov,
'ik,kj->ij',
Y1,
Y2)
compare_moments(mu,
cov,
Y1,
['i','k'],
Y2,
['k','j'],
['i','j'])
# Trace of a matrix product
Y1 = GaussianARD(np.random.randn(3,2),
np.random.rand(3,2),
plates=(),
shape=(3,2))
y1 = Y1.get_moments()
Y2 = GaussianARD(np.random.randn(5,2,3),
np.random.rand(5,2,3),
plates=(5,),
shape=(2,3))
y2 = Y2.get_moments()
mu = np.einsum('...ij,...ji->...', y1[0], y2[0])
cov = np.einsum('...ikjl,...kilj->...', y1[1], y2[1])
compare_moments(mu,
cov,
'ij,ji',
Y1,
Y2)
compare_moments(mu,
cov,
'ij,ji->',
Y1,
Y2)
compare_moments(mu,
cov,
Y1,
['i','j'],
Y2,
['j','i'])
compare_moments(mu,
cov,
Y1,
['i','j'],
Y2,
['j','i'],
[])
# Vector-matrix-vector product
X1 = GaussianARD(np.random.randn(3),
np.random.rand(3),
plates=(),
shape=(3,))
x1 = X1.get_moments()
X2 = GaussianARD(np.random.randn(6,1,2),
np.random.rand(6,1,2),
plates=(6,1),
shape=(2,))
x2 = X2.get_moments()
Y = GaussianARD(np.random.randn(3,2),
np.random.rand(3,2),
plates=(),
shape=(3,2))
y = Y.get_moments()
mu = np.einsum('...i,...ij,...j->...', x1[0], y[0], x2[0])
cov = np.einsum('...ia,...ijab,...jb->...', x1[1], y[1], x2[1])
compare_moments(mu,
cov,
'i,ij,j',
X1,
Y,
X2)
compare_moments(mu,
cov,
X1,
[1],
Y,
[1,2],
X2,
[2])
# Complex sum-product of 0-D, 1-D, 2-D and 3-D arrays
V = GaussianARD(np.random.randn(7,6,5),
np.random.rand(7,6,5),
plates=(7,6,5),
shape=())
v = V.get_moments()
X = GaussianARD(np.random.randn(6,1,2),
np.random.rand(6,1,2),
plates=(6,1),
shape=(2,))
x = X.get_moments()
Y = GaussianARD(np.random.randn(3,4),
np.random.rand(3,4),
plates=(5,),
shape=(3,4))
y = Y.get_moments()
Z = GaussianARD(np.random.randn(4,2,3),
np.random.rand(4,2,3),
plates=(6,5),
shape=(4,2,3))
z = Z.get_moments()
mu = np.einsum('...,...i,...kj,...jik->...k', v[0], x[0], y[0], z[0])
cov = np.einsum('...,...ia,...kjcb,...jikbac->...kc', v[1], x[1], y[1], z[1])
compare_moments(mu,
cov,
',i,kj,jik->k',
V,
X,
Y,
Z)
compare_moments(mu,
cov,
V,
[],
X,
['i'],
Y,
['k','j'],
Z,
['j','i','k'],
['k'])
# Test with constant nodes
N = 10
D = 5
a = np.random.randn(N, D)
B = Gaussian(
np.random.randn(D),
random.covariance(D),
)
X = SumMultiply('i,i->', B, a)
np.testing.assert_allclose(
X.get_moments()[0],
np.einsum('ni,i->n', a, B.get_moments()[0]),
)
np.testing.assert_allclose(
X.get_moments()[1],
np.einsum('ni,nj,ij->n', a, a, B.get_moments()[1]),
)
#
# Gaussian-gamma parents
#
# Outer product of vectors
X1 = GaussianARD(np.random.randn(2),
np.random.rand(2),
shape=(2,))
x1 = X1.get_moments()
X2 = GaussianGamma(
np.random.randn(6,1,2),
random.covariance(2),
np.random.rand(6,1),
np.random.rand(6,1),
plates=(6,1)
)
x2 = X2.get_moments()
Y = SumMultiply('i,j->ij', X1, X2)
u = Y._message_to_child()
y = np.einsum('...i,...j->...ij', x1[0], x2[0])
yy = np.einsum('...ik,...jl->...ijkl', x1[1], x2[1])
self.assertAllClose(u[0], y)
self.assertAllClose(u[1], yy)
self.assertAllClose(u[2], x2[2])
self.assertAllClose(u[3], x2[3])
# Test with constant nodes
N = 10
M = 8
D = 5
a = np.random.randn(N, 1, D)
B = GaussianGamma(
np.random.randn(M, D),
random.covariance(D, size=(M,)),
np.random.rand(M),
np.random.rand(M),
ndim=1,
)
X = SumMultiply('i,i->', B, a)
np.testing.assert_allclose(
X.get_moments()[0],
np.einsum('nmi,mi->nm', a, B.get_moments()[0]),
)
np.testing.assert_allclose(
X.get_moments()[1],
np.einsum('nmi,nmj,mij->nm', a, a, B.get_moments()[1]),
)
np.testing.assert_allclose(
X.get_moments()[2],
B.get_moments()[2],
)
np.testing.assert_allclose(
X.get_moments()[3],
B.get_moments()[3],
)
pass
def test_message_to_parent(self):
"""
Test the message from SumMultiply node to its parents.
"""
data = 2
tau = 3
def check_message(true_m0, true_m1, parent, *args, F=None):
if F is None:
A = SumMultiply(*args)
B = GaussianARD(A, tau)
B.observe(data*np.ones(A.plates + A.dims[0]))
else:
A = F
(A_m0, A_m1) = A._message_to_parent(parent)
self.assertAllClose(true_m0, A_m0)
self.assertAllClose(true_m1, A_m1)
pass
# Check: different message to each of multiple parents
X1 = GaussianARD(np.random.randn(2),
np.random.rand(2),
ndim=1)
x1 = X1.get_moments()
X2 = GaussianARD(np.random.randn(2),
np.random.rand(2),
ndim=1)
x2 = X2.get_moments()
m0 = tau * data * x2[0]
m1 = -0.5 * tau * x2[1] * np.identity(2)
check_message(m0, m1, 0,
'i,i->i',
X1,
X2)
check_message(m0, m1, 0,
X1,
[9],
X2,
[9],
[9])
m0 = tau * data * x1[0]
m1 = -0.5 * tau * x1[1] * np.identity(2)
check_message(m0, m1, 1,
'i,i->i',
X1,
X2)
check_message(m0, m1, 1,
X1,
[9],
X2,
[9],
[9])
# Check: key not in output
X1 = GaussianARD(np.random.randn(2),
np.random.rand(2),
ndim=1)
x1 = X1.get_moments()
m0 = tau * data * np.ones(2)
m1 = -0.5 * tau * np.ones((2,2))
check_message(m0, m1, 0,
'i',
X1)
check_message(m0, m1, 0,
'i->',
X1)
check_message(m0, m1, 0,
X1,
[9])
check_message(m0, m1, 0,
X1,
[9],
[])
# Check: key not in some input
X1 = GaussianARD(np.random.randn(),
np.random.rand())
x1 = X1.get_moments()
X2 = GaussianARD(np.random.randn(2),
np.random.rand(2),
ndim=1)
x2 = X2.get_moments()
m0 = tau * data * np.sum(x2[0], axis=-1)
m1 = -0.5 * tau * np.sum(x2[1] * np.identity(2),
axis=(-1,-2))
check_message(m0, m1, 0,
',i->i',
X1,
X2)
check_message(m0, m1, 0,
X1,
[],
X2,
[9],
[9])
m0 = tau * data * x1[0] * np.ones(2)
m1 = -0.5 * tau * x1[1] * np.identity(2)
check_message(m0, m1, 1,
',i->i',
X1,
X2)
check_message(m0, m1, 1,
X1,
[],
X2,
[9],
[9])
# Check: keys in different order
Y1 = GaussianARD(np.random.randn(3,2),
np.random.rand(3,2),
ndim=2)
y1 = Y1.get_moments()
Y2 = GaussianARD(np.random.randn(2,3),
np.random.rand(2,3),
ndim=2)
y2 = Y2.get_moments()
m0 = tau * data * y2[0].T
m1 = -0.5 * tau * np.einsum('ijlk->jikl', y2[1] * misc.identity(2,3))
check_message(m0, m1, 0,
'ij,ji->ij',
Y1,
Y2)
check_message(m0, m1, 0,
Y1,
['i','j'],
Y2,
['j','i'],
['i','j'])
m0 = tau * data * y1[0].T
m1 = -0.5 * tau * np.einsum('ijlk->jikl', y1[1] * misc.identity(3,2))
check_message(m0, m1, 1,
'ij,ji->ij',
Y1,
Y2)
check_message(m0, m1, 1,
Y1,
['i','j'],
Y2,
['j','i'],
['i','j'])
# Check: plates when different dimensionality
X1 = GaussianARD(np.random.randn(5),
np.random.rand(5),
shape=(),
plates=(5,))
x1 = X1.get_moments()
X2 = GaussianARD(np.random.randn(5,3),
np.random.rand(5,3),
shape=(3,),
plates=(5,))
x2 = X2.get_moments()
m0 = tau * data * np.sum(np.ones((5,3)) * x2[0], axis=-1)
m1 = -0.5 * tau * np.sum(x2[1] * misc.identity(3), axis=(-1,-2))
check_message(m0, m1, 0,
',i->i',
X1,
X2)
check_message(m0, m1, 0,
X1,
[],
X2,
['i'],
['i'])
m0 = tau * data * x1[0][:,np.newaxis] * np.ones((5,3))
m1 = -0.5 * tau * x1[1][:,np.newaxis,np.newaxis] * misc.identity(3)
check_message(m0, m1, 1,
',i->i',
X1,
X2)
check_message(m0, m1, 1,
X1,
[],
X2,
['i'],
['i'])
# Check: other parent's moments broadcasts over plates when node has the
# same plates
X1 = GaussianARD(np.random.randn(5,4,3),
np.random.rand(5,4,3),
shape=(3,),
plates=(5,4))
x1 = X1.get_moments()
X2 = GaussianARD(np.random.randn(3),
np.random.rand(3),
shape=(3,),
plates=(5,4))
x2 = X2.get_moments()
m0 = tau * data * np.ones((5,4,3)) * x2[0]
m1 = -0.5 * tau * x2[1] * misc.identity(3)
check_message(m0, m1, 0,
'i,i->i',
X1,
X2)
check_message(m0, m1, 0,
X1,
['i'],
X2,
['i'],
['i'])
# Check: other parent's moments broadcasts over plates when node does
# not have that plate
X1 = GaussianARD(np.random.randn(3),
np.random.rand(3),
shape=(3,),
plates=())
x1 = X1.get_moments()
X2 = GaussianARD(np.random.randn(3),
np.random.rand(3),
shape=(3,),
plates=(5,4))
x2 = X2.get_moments()
m0 = tau * data * np.sum(np.ones((5,4,3)) * x2[0], axis=(0,1))
m1 = -0.5 * tau * np.sum(np.ones((5,4,1,1))
* misc.identity(3)
* x2[1],
axis=(0,1))
check_message(m0, m1, 0,
'i,i->i',
X1,
X2)
check_message(m0, m1, 0,
X1,
['i'],
X2,
['i'],
['i'])
# Check: other parent's moments broadcasts over plates when the node
# only broadcasts that plate
X1 = GaussianARD(np.random.randn(3),
np.random.rand(3),
shape=(3,),
plates=(1,1))
x1 = X1.get_moments()
X2 = GaussianARD(np.random.randn(3),
np.random.rand(3),
shape=(3,),
plates=(5,4))
x2 = X2.get_moments()
m0 = tau * data * np.sum(np.ones((5,4,3)) * x2[0], axis=(0,1), keepdims=True)
m1 = -0.5 * tau * np.sum( | np.ones((5,4,1,1)) | numpy.ones |
import numpy as np
import numpy.ma as ma
import numpy.testing as npt
import sharppy.sharptab.thermo as thermo
from sharppy.sharptab.constants import *
def test_ctof():
# single pass
input_c = 0
correct_f = 32
returned_f = thermo.ctof(input_c)
npt.assert_almost_equal(returned_f, correct_f)
# array_like pass
input_c = [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
input_c = np.asanyarray(input_c)
correct_f = [32, 50, 68, 86, 104, 122, 140, 158, 176, 194, 212]
correct_f = np.asanyarray(correct_f)
returned_f = thermo.ctof(input_c)
npt.assert_almost_equal(returned_f, correct_f)
# single masked
input_c = ma.masked
correct_f = ma.masked
returned_f = thermo.ctof(input_c)
npt.assert_(type(returned_f), type(correct_f))
# array_like pass
inds = [0, 5, 7]
input_c = [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
input_c = np.ma.asanyarray(input_c)
correct_f = [32, 50, 68, 86, 104, 122, 140, 158, 176, 194, 212]
correct_f = np.ma.asanyarray(correct_f)
input_c[inds] = ma.masked
correct_f[inds] = ma.masked
returned_f = thermo.ctof(input_c)
npt.assert_almost_equal(returned_f, correct_f)
def test_ftoc():
# single pass
input_f = 32
correct_c = 0
returned_c = thermo.ftoc(input_f)
npt.assert_almost_equal(returned_c, correct_c)
# array_like pass
input_f = [32, 50, 68, 86, 104, 122, 140, 158, 176, 194, 212]
input_f = np.asanyarray(input_f)
correct_c = [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
correct_c = np.asanyarray(correct_c)
returned_c = thermo.ftoc(input_f)
npt.assert_almost_equal(returned_c, correct_c)
# single masked
input_f = ma.masked
correct_c = ma.masked
returned_c = thermo.ftoc(input_f)
npt.assert_(type(returned_c), type(correct_c))
# array_like pass
inds = [0, 5, 7]
input_f = [32, 50, 68, 86, 104, 122, 140, 158, 176, 194, 212]
input_f = np.ma.asanyarray(input_f)
correct_c = [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
correct_c = np.ma.asanyarray(correct_c)
input_f[inds] = ma.masked
correct_c[inds] = ma.masked
returned_c = thermo.ftoc(input_f)
npt.assert_almost_equal(returned_c, correct_c)
def test_ktoc():
# single pass
input_k = 0
correct_c = -273.15
returned_c = thermo.ktoc(input_k)
npt.assert_almost_equal(returned_c, correct_c)
# array_like pass
input_k = [0, 50, 100, 150, 200, 250, 300]
input_k = np.asanyarray(input_k)
correct_c = [-273.15, -223.15, -173.15, -123.15, -73.15, -23.15, 26.85]
correct_c = np.asanyarray(correct_c)
returned_c = thermo.ktoc(input_k)
npt.assert_almost_equal(returned_c, correct_c)
# single masked
input_k = ma.masked
correct_c = ma.masked
returned_c = thermo.ktoc(input_k)
npt.assert_(type(returned_c), type(correct_c))
# array_like pass
inds = [0, 2, 3]
input_k = [0, 50, 100, 150, 200, 250, 300]
input_k = np.ma.asanyarray(input_k)
correct_c = [-273.15, -223.15, -173.15, -123.15, -73.15, -23.15, 26.85]
correct_c = np.ma.asanyarray(correct_c)
input_k[inds] = ma.masked
correct_c[inds] = ma.masked
returned_c = thermo.ktoc(input_k)
npt.assert_almost_equal(returned_c, correct_c)
def test_ctok():
# single pass
input_c = -273.15
correct_k = 0
returned_k = thermo.ctok(input_c)
npt.assert_almost_equal(returned_k, correct_k)
# array_like pass
input_c = [-273.15, -223.15, -173.15, -123.15, -73.15, -23.15, 26.85]
input_c = np.asanyarray(input_c)
correct_k = [0, 50, 100, 150, 200, 250, 300]
correct_k = np.asanyarray(correct_k)
returned_k = thermo.ctok(input_c)
npt.assert_almost_equal(returned_k, correct_k)
# single masked
input_c = ma.masked
correct_k = ma.masked
returned_k = thermo.ctok(input_c)
npt.assert_(type(returned_k), type(correct_k))
# array_like pass
inds = [0, 2, 3]
input_c = [-273.15, -223.15, -173.15, -123.15, -73.15, -23.15, 26.85]
input_c = np.ma.asanyarray(input_c)
correct_k = [0, 50, 100, 150, 200, 250, 300]
correct_k = np.ma.asanyarray(correct_k)
input_c[inds] = ma.masked
correct_k[inds] = ma.masked
returned_k = thermo.ctok(input_c)
npt.assert_almost_equal(returned_k, correct_k)
def test_ktof():
# single pass
input_k = 0
correct_f = -459.67
returned_f = thermo.ktof(input_k)
npt.assert_almost_equal(returned_f, correct_f)
# array_like pass
input_k = [0, 50, 100, 150, 200, 250, 300]
input_k = np.asanyarray(input_k)
correct_f = [-459.67, -369.67, -279.67, -189.67, -99.67, -9.67, 80.33]
correct_f = np.asanyarray(correct_f)
returned_f = thermo.ktof(input_k)
npt.assert_almost_equal(returned_f, correct_f)
# single masked
input_k = ma.masked
correct_f = ma.masked
returned_f = thermo.ktof(input_k)
npt.assert_(type(returned_f), type(correct_f))
# array_like pass
inds = [0, 2, 3]
input_k = [0, 50, 100, 150, 200, 250, 300]
input_k = np.ma.asanyarray(input_k)
correct_f = [-459.67, -369.67, -279.67, -189.67, -99.67, -9.67, 80.33]
correct_f = np.ma.asanyarray(correct_f)
input_k[inds] = ma.masked
correct_f[inds] = ma.masked
returned_f = thermo.ktof(input_k)
npt.assert_almost_equal(returned_f, correct_f)
def test_ftok():
# single pass
input_f = -459.67
correct_k = 0
returned_k = thermo.ftok(input_f)
npt.assert_almost_equal(returned_k, correct_k)
# array_like pass
input_f = [-459.67, -369.67, -279.67, -189.67, -99.67, -9.67, 80.33]
input_f = np.asanyarray(input_f)
correct_k = [0, 50, 100, 150, 200, 250, 300]
correct_k = np.asanyarray(correct_k)
returned_k = thermo.ftok(input_f)
npt.assert_almost_equal(returned_k, correct_k)
# single masked
input_f = ma.masked
correct_k = ma.masked
returned_k = thermo.ftok(input_f)
npt.assert_(type(returned_k), type(correct_k))
# array_like pass
inds = [0, 2, 3]
input_f = [-459.67, -369.67, -279.67, -189.67, -99.67, -9.67, 80.33]
input_f = np.ma.asanyarray(input_f)
correct_k = [0, 50, 100, 150, 200, 250, 300]
correct_k = np.ma.asanyarray(correct_k)
input_f[inds] = ma.masked
correct_k[inds] = ma.masked
returned_k = thermo.ftok(input_f)
npt.assert_almost_equal(returned_k, correct_k)
def test_theta():
# single
input_p = 940
input_t = 5
input_p2 = 1000.
correct_theta = 9.961049492262532
returned_theta = thermo.theta(input_p, input_t, input_p2)
npt.assert_almost_equal(returned_theta, correct_theta)
# array
input_p = np.asarray([940, 850])
input_t = np.asarray([5, 10])
input_p2 = np.asarray([1000., 1000.])
correct_theta = [9.961049492262532, 23.457812111895066]
returned_theta = thermo.theta(input_p, input_t, input_p2)
npt.assert_almost_equal(returned_theta, correct_theta)
def test_wobf():
input_t = 10
correct_c = 10.192034543230415
returned_c = thermo.wobf(input_t)
npt.assert_almost_equal(returned_c, correct_c)
input_t = [10, 0, -10]
input_t = np.asanyarray(input_t)
correct_c = [10.192034543230415, 6.411053315058521, 3.8633154447163114]
correct_c = np.asanyarray(correct_c)
returned_c = thermo.wobf(input_t)
npt.assert_almost_equal(returned_c, correct_c)
def test_lcltemp():
input_t = 10
input_td = 5
correct_t = 3.89818375
returned_t = thermo.lcltemp(input_t, input_td)
npt.assert_almost_equal(returned_t, correct_t)
input_t = np.asanyarray([20, 10, 0, -5])
input_td = np.asanyarray([15, 8, -1, -10])
correct_t = [13.83558375, 7.54631416, -1.21632173, -11.00791625]
correct_t = np.asanyarray(correct_t)
returned_t = thermo.lcltemp(input_t, input_td)
npt.assert_almost_equal(returned_t, correct_t)
def test_thalvl():
input_theta = 10
input_t = 5
correct_p = 939.5475008003834
returned_p = thermo.thalvl(input_theta, input_t)
npt.assert_almost_equal(returned_p, correct_p)
input_theta = np.asanyarray([5, 12, 25])
input_t = np.asanyarray([5, 10, 0.])
correct_p = [1000., 975.6659847653189, 736.0076986893786]
correct_p = np.asanyarray(correct_p)
returned_p = thermo.thalvl(input_theta, input_t)
npt.assert_almost_equal(returned_p, correct_p)
def test_drylift():
input_p = 950
input_t = 30
input_td = 25
correct_p = 883.4367363248148
correct_t = 23.77298375
returned_p, returned_t = thermo.drylift(input_p, input_t, input_td)
npt.assert_almost_equal(returned_p, correct_p)
npt.assert_almost_equal(returned_t, correct_t)
input_p = np.asarray([950, 975, 1013, 900])
input_t = np.asarray([30, 10, 22, 40])
input_td = np.asarray([25, -10, 18, 0])
correct_p = np.asarray([883.4367363248148, 716.8293994988512,
954.7701032005202, 504.72627541064145])
correct_t = np.asarray([23.77298375, -13.822639999999996,
17.04965568, -7.6987199999999945])
returned_p, returned_t = thermo.drylift(input_p, input_t, input_td)
npt.assert_almost_equal(returned_p, correct_p)
npt.assert_almost_equal(returned_t, correct_t)
def test_satlift():
input_p = 850
input_thetam = 20
correct_t = 13.712979340608157
returned_t = thermo.satlift(input_p, input_thetam)
npt.assert_almost_equal(returned_t, correct_t)
def test_wetlift():
input_p = 700
input_t = 15
input_p2 = 100
correct_t = -81.27400812504021
returned_t = thermo.wetlift(input_p, input_t, input_p2)
npt.assert_almost_equal(returned_t, correct_t)
def test_lifted():
input_p = 950
input_t = 30
input_td = 25
input_lev = 100
correct_t = -79.05621246586672
returned_t = thermo.lifted(input_p, input_t, input_td, input_lev)
npt.assert_almost_equal(returned_t, correct_t)
def test_vappres():
input_t = 25
correct_p = 31.670078513287617
returned_p = thermo.vappres(input_t)
npt.assert_almost_equal(returned_p, correct_p)
input_t = np.asanyarray([0, 5, 10, 15, 20, 25])
correct_p = [6.107954896017587, 8.719365306196854, 12.2722963940349,
17.04353238898728, 23.37237439430437, 31.670078513287617]
correct_p = np.asanyarray(correct_p)
returned_p = thermo.vappres(input_t)
npt.assert_almost_equal(returned_p, correct_p)
def test_mixratio():
input_p = 950
input_t = 25
correct_w = 21.549675456205275
returned_w = thermo.mixratio(input_p, input_t)
npt.assert_almost_equal(returned_w, correct_w)
input_p = np.asanyarray([1013, 1000, 975, 950, 900])
input_t = np.asanyarray([26, 15, 20, 10, 10])
correct_w = [21.448870702611913, 10.834359059077558, 15.346544211592512,
8.17527964576288, 8.633830400361578]
correct_w = np.asanyarray(correct_w)
returned_w = thermo.mixratio(input_p, input_t)
npt.assert_almost_equal(returned_w, correct_w)
def test_temp_at_mixrat():
input_w = 14
input_p = 950
correct_t = 18.25602418045935
returned_t = thermo.temp_at_mixrat(input_w, input_p)
| npt.assert_almost_equal(returned_t, correct_t) | numpy.testing.assert_almost_equal |
"""IO methods for raw wind data, which may come from 4 different sources:
- HFMETARs (high-frequency [1- and 5-minute] meteorological aerodrome reports)
- MADIS (Meteorological Assimilation Data Ingest System)
- Oklahoma Mesonet stations
- Storm Events (dataset with local storm reports)
"""
import copy
import os.path
import numpy
import pandas
from gewittergefahr.gg_utils import time_conversion
from gewittergefahr.gg_utils import time_periods
from gewittergefahr.gg_utils import geodetic_utils
from gewittergefahr.gg_utils import longitude_conversion as lng_conversion
from gewittergefahr.gg_utils import number_rounding as rounder
from gewittergefahr.gg_utils import file_system_utils
from gewittergefahr.gg_utils import error_checking
TOLERANCE = 1e-6
WIND_DIR_DEFAULT_DEG = 0.
DEGREES_TO_RADIANS = numpy.pi / 180
RADIANS_TO_DEGREES = 180. / numpy.pi
HOURS_TO_SECONDS = 3600
KT_TO_METRES_PER_SECOND = 1.852 / 3.6
TIME_FORMAT_MONTH_YEAR = '%Y%m'
TIME_FORMAT_SECOND = '%Y-%m-%d-%H%M%S'
PROCESSED_FILE_PREFIX = 'wind-observations'
PROCESSED_FILE_EXTENSION = '.csv'
HFMETAR_DATA_SOURCE = 'hfmetar'
MADIS_DATA_SOURCE = 'madis'
OK_MESONET_DATA_SOURCE = 'ok_mesonet'
STORM_EVENTS_DATA_SOURCE = 'storm_events'
MERGED_DATA_SOURCE = 'merged'
PRIMARY_UNMERGED_DATA_SOURCES = [
HFMETAR_DATA_SOURCE, MADIS_DATA_SOURCE, OK_MESONET_DATA_SOURCE,
STORM_EVENTS_DATA_SOURCE
]
PRIMARY_DATA_SOURCES = PRIMARY_UNMERGED_DATA_SOURCES + [MERGED_DATA_SOURCE]
MADIS_COOP_DATA_SOURCE = 'coop'
MADIS_CRN_DATA_SOURCE = 'crn'
MADIS_HCN_DATA_SOURCE = 'hcn'
MADIS_HFMETAR_DATA_SOURCE = 'hfmetar'
MADIS_MARITIME_DATA_SOURCE = 'maritime'
MADIS_MESONET_DATA_SOURCE = 'mesonet'
MADIS_METAR_DATA_SOURCE = 'metar'
MADIS_NEPP_DATA_SOURCE = 'nepp'
MADIS_SAO_DATA_SOURCE = 'sao'
MADIS_URBANET_DATA_SOURCE = 'urbanet'
SECONDARY_DATA_SOURCES = [
MADIS_COOP_DATA_SOURCE, MADIS_CRN_DATA_SOURCE, MADIS_HCN_DATA_SOURCE,
MADIS_HFMETAR_DATA_SOURCE, MADIS_MARITIME_DATA_SOURCE,
MADIS_MESONET_DATA_SOURCE, MADIS_METAR_DATA_SOURCE, MADIS_NEPP_DATA_SOURCE,
MADIS_SAO_DATA_SOURCE, MADIS_URBANET_DATA_SOURCE
]
PRIMARY_SOURCE_COLUMN = 'primary_source'
SECONDARY_SOURCE_COLUMN = 'secondary_source'
MIN_WIND_DIRECTION_DEG = 0.
MAX_WIND_DIRECTION_DEG = 360. - TOLERANCE
MIN_SIGNED_WIND_SPEED_M_S01 = -100. * KT_TO_METRES_PER_SECOND
MIN_ABSOLUTE_WIND_SPEED_M_S01 = 0.
MAX_WIND_SPEED_M_S01 = 100. * KT_TO_METRES_PER_SECOND
MIN_ELEVATION_M_ASL = -418. # Lowest point on land (shore of Dead Sea).
MAX_ELEVATION_M_ASL = 8848. # Highest point on land (Mount Everest).
MIN_LATITUDE_DEG = -90.
MAX_LATITUDE_DEG = 90.
MIN_LONGITUDE_DEG = -180.
MAX_LONGITUDE_DEG = 360.
MIN_LNG_NEGATIVE_IN_WEST_DEG = -180.
MAX_LNG_NEGATIVE_IN_WEST_DEG = 180.
MIN_LNG_POSITIVE_IN_WEST_DEG = 0.
MAX_LNG_POSITIVE_IN_WEST_DEG = 360.
STATION_ID_COLUMN = 'station_id'
STATION_NAME_COLUMN = 'station_name'
LATITUDE_COLUMN = 'latitude_deg'
LONGITUDE_COLUMN = 'longitude_deg'
ELEVATION_COLUMN = 'elevation_m_asl'
UTC_OFFSET_COLUMN = 'utc_offset_hours'
WIND_SPEED_COLUMN = 'wind_speed_m_s01'
WIND_DIR_COLUMN = 'wind_direction_deg'
WIND_GUST_SPEED_COLUMN = 'wind_gust_speed_m_s01'
WIND_GUST_DIR_COLUMN = 'wind_gust_direction_deg'
U_WIND_COLUMN = 'u_wind_m_s01'
V_WIND_COLUMN = 'v_wind_m_s01'
TIME_COLUMN = 'unix_time_sec'
REQUIRED_STATION_METADATA_COLUMNS = [
STATION_ID_COLUMN, STATION_NAME_COLUMN, LATITUDE_COLUMN, LONGITUDE_COLUMN,
ELEVATION_COLUMN
]
STATION_METADATA_COLUMNS = (
REQUIRED_STATION_METADATA_COLUMNS + [UTC_OFFSET_COLUMN]
)
STATION_METADATA_COLUMN_TYPE_DICT = {
STATION_ID_COLUMN: str, STATION_NAME_COLUMN: str,
LATITUDE_COLUMN: numpy.float64, LONGITUDE_COLUMN: numpy.float64,
ELEVATION_COLUMN: numpy.float64, UTC_OFFSET_COLUMN: numpy.float64}
WIND_COLUMNS = REQUIRED_STATION_METADATA_COLUMNS + [
TIME_COLUMN, U_WIND_COLUMN, V_WIND_COLUMN
]
WIND_COLUMN_TYPE_DICT = copy.deepcopy(STATION_METADATA_COLUMN_TYPE_DICT)
WIND_COLUMN_TYPE_DICT.update({
TIME_COLUMN: numpy.int64,
U_WIND_COLUMN: numpy.float64,
V_WIND_COLUMN: numpy.float64
})
def _primary_and_secondary_sources_to_table():
"""Creates pandas DataFrame with all pairs of primary/secondary data source.
:return: primary_and_secondary_source_pairs_as_table: pandas DataFrame with
columns listed below.
primary_and_secondary_source_pairs_as_table.primary_source: Name of primary
data source.
primary_and_secondary_source_pairs_as_table.secondary_source: Name of
secondary source (None if primary_source != "madis").
"""
unique_primary_sources = set(PRIMARY_UNMERGED_DATA_SOURCES)
unique_primary_sources.remove(MADIS_DATA_SOURCE)
unique_primary_sources = list(unique_primary_sources)
num_secondary_sources = len(SECONDARY_DATA_SOURCES)
primary_sources_to_append = [MADIS_DATA_SOURCE] * num_secondary_sources
secondary_sources_to_prepend = [None] * len(unique_primary_sources)
primary_source_by_pair = unique_primary_sources + primary_sources_to_append
secondary_source_by_pair = (
secondary_sources_to_prepend + SECONDARY_DATA_SOURCES)
primary_and_secondary_source_pairs_as_dict = {
PRIMARY_SOURCE_COLUMN: primary_source_by_pair,
SECONDARY_SOURCE_COLUMN: secondary_source_by_pair}
return pandas.DataFrame.from_dict(
primary_and_secondary_source_pairs_as_dict)
def _check_elevations(elevations_m_asl):
"""Finds invalid surface elevations.
N = number of elevations
:param elevations_m_asl: length-N numpy array of elevations (metres above
sea level).
:return: invalid_indices: 1-D numpy array with indices of invalid surface
elevations. For example, if 5th and 12th elevations are invalid, this
array will contain 4 and 11.
"""
error_checking.assert_is_real_numpy_array(elevations_m_asl)
error_checking.assert_is_numpy_array(elevations_m_asl, num_dimensions=1)
valid_flags = numpy.logical_and(elevations_m_asl >= MIN_ELEVATION_M_ASL,
elevations_m_asl <= MAX_ELEVATION_M_ASL)
return numpy.where(numpy.invert(valid_flags))[0]
def _check_wind_directions(wind_directions_deg):
"""Finds invalid wind directions.
N = number of observations
:param wind_directions_deg: length-N numpy array of wind directions (degrees
of origin).
:return: invalid_indices: 1-D numpy array with indices of invalid
directions.
"""
error_checking.assert_is_real_numpy_array(wind_directions_deg)
error_checking.assert_is_numpy_array(wind_directions_deg, num_dimensions=1)
valid_flags = numpy.logical_and(
wind_directions_deg >= MIN_WIND_DIRECTION_DEG,
wind_directions_deg <= MAX_WIND_DIRECTION_DEG)
return numpy.where(numpy.invert(valid_flags))[0]
def _remove_duplicate_observations(wind_table):
"""Removes duplicate wind observations.
:param wind_table: See documentation for write_processed_file.
:return: wind_table: Same as input, but maybe with fewer rows.
"""
wind_speeds_m_s01 = numpy.sqrt(
wind_table[U_WIND_COLUMN].values ** 2 +
wind_table[V_WIND_COLUMN].values ** 2)
num_observations = len(wind_speeds_m_s01)
observation_strings = [''] * num_observations
for i in range(num_observations):
observation_strings[i] = '{0:.2f}_{1:.2f}_{2:d}_{3:.2f}'.format(
wind_table[LATITUDE_COLUMN].values[i],
wind_table[LONGITUDE_COLUMN].values[i],
wind_table[TIME_COLUMN].values[i], wind_speeds_m_s01[i])
_, unique_indices = numpy.unique(
numpy.asarray(observation_strings), return_index=True)
return wind_table.iloc[unique_indices]
def _get_pathless_processed_file_name(start_time_unix_sec=None,
end_time_unix_sec=None,
primary_source=None,
secondary_source=None):
"""Generates pathless name for processed wind file.
:param start_time_unix_sec: Start time.
:param end_time_unix_sec: End time.
:param primary_source: String ID for primary data source.
:param secondary_source: String ID for secondary data source.
:return: pathless_processed_file_name: Pathless name for processed wind
file.
"""
if primary_source == MADIS_DATA_SOURCE:
combined_source = '{0:s}_{1:s}'.format(primary_source, secondary_source)
else:
combined_source = primary_source.replace('_', '-')
return '{0:s}_{1:s}_{2:s}_{3:s}{4:s}'.format(
PROCESSED_FILE_PREFIX, combined_source,
time_conversion.unix_sec_to_string(
start_time_unix_sec, TIME_FORMAT_SECOND),
time_conversion.unix_sec_to_string(
end_time_unix_sec, TIME_FORMAT_SECOND),
PROCESSED_FILE_EXTENSION
)
def check_wind_speeds(wind_speeds_m_s01, one_component=False):
"""Finds invalid wind speeds.
N = number of observations.
:param wind_speeds_m_s01: length-N numpy array of wind speeds (m/s).
:param one_component: Boolean flag. If True, wind speeds are only one
component (either u or v), which means that they can be negative. If
False, wind speeds are absolute (vector magnitudes), so they cannot be
negative.
:return: invalid_indices: 1-D numpy array with indices of invalid speeds.
"""
error_checking.assert_is_real_numpy_array(wind_speeds_m_s01)
error_checking.assert_is_numpy_array(wind_speeds_m_s01, num_dimensions=1)
error_checking.assert_is_boolean(one_component)
if one_component:
this_min_wind_speed_m_s01 = MIN_SIGNED_WIND_SPEED_M_S01
else:
this_min_wind_speed_m_s01 = MIN_ABSOLUTE_WIND_SPEED_M_S01
valid_flags = numpy.logical_and(
wind_speeds_m_s01 >= this_min_wind_speed_m_s01,
wind_speeds_m_s01 <= MAX_WIND_SPEED_M_S01)
return numpy.where(numpy.invert(valid_flags))[0]
def check_data_sources(
primary_source, secondary_source=None, allow_merged=False):
"""Ensures that data sources are valid.
:param primary_source: String ID for primary data source (must be in
PRIMARY_DATA_SOURCES or PRIMARY_UNMERGED_DATA_SOURCES).
:param secondary_source: String ID for secondary source. If primary_source
!= "madis", this should be None. If primary_source == "madis", this
must be in SECONDARY_DATA_SOURCES.
:param allow_merged: Boolean flag. If True, will allow "merged" as primary
data source. If False, will not allow "merged".
:raises: ValueError: if primary_source or secondary_source is invalid.
"""
if allow_merged:
valid_primary_sources = copy.deepcopy(PRIMARY_DATA_SOURCES)
else:
valid_primary_sources = copy.deepcopy(PRIMARY_UNMERGED_DATA_SOURCES)
if primary_source not in valid_primary_sources:
error_string = (
'\n\n' + str(valid_primary_sources) + '\n\nValid primary sources ' +
'(listed above) do not include "' + primary_source + '".')
raise ValueError(error_string)
if (primary_source == MADIS_DATA_SOURCE and
secondary_source not in SECONDARY_DATA_SOURCES):
error_string = (
'\n\n' + str(SECONDARY_DATA_SOURCES) + '\n\nValid secondary ' +
'sources (listed above) do not include "' + secondary_source + '".')
raise ValueError(error_string)
def append_source_to_station_id(station_id, primary_source=None,
secondary_source=None):
"""Appends data source to station ID.
:param station_id: String ID for station.
:param primary_source: String ID for primary data source.
:param secondary_source: String ID for secondary data source.
:return: station_id: Same as input, but with data source appended.
"""
check_data_sources(primary_source, secondary_source, allow_merged=False)
if primary_source == MADIS_DATA_SOURCE:
combined_source = '{0:s}_{1:s}'.format(primary_source, secondary_source)
else:
combined_source = primary_source.replace('_', '-')
return '{0:s}_{1:s}'.format(station_id, combined_source)
def remove_invalid_rows(input_table, check_speed_flag=False,
check_direction_flag=False, check_u_wind_flag=False,
check_v_wind_flag=False, check_lat_flag=False,
check_lng_flag=False, check_elevation_flag=False,
check_time_flag=False):
"""Removes any row with invalid data from pandas DataFrame.
However, this method does not remove rows with invalid wind direction or
elevation. It simply sets the wind direction or elevation to NaN, so that
it will not be mistaken for valid data. Also, this method converts
longitudes to positive (180...360 deg E) in western hemisphere.
:param input_table: pandas DataFrame.
:param check_speed_flag: Boolean flag. If True, will check wind speed.
:param check_direction_flag: Boolean flag. If True, will check wind
direction.
:param check_u_wind_flag: Boolean flag. If True, will check u-wind.
:param check_v_wind_flag: Boolean flag. If True, will check v-wind.
:param check_lat_flag: Boolean flag. If True, will check latitude.
:param check_lng_flag: Boolean flag. If True, will check longitude.
:param check_elevation_flag: Boolean flag. If True, will check elevation.
:param check_time_flag: Boolean flag. If True, will check time.
:return: output_table: Same as input_table, except that some rows may be
gone.
"""
error_checking.assert_is_boolean(check_speed_flag)
error_checking.assert_is_boolean(check_direction_flag)
error_checking.assert_is_boolean(check_u_wind_flag)
error_checking.assert_is_boolean(check_v_wind_flag)
error_checking.assert_is_boolean(check_lat_flag)
error_checking.assert_is_boolean(check_lng_flag)
error_checking.assert_is_boolean(check_elevation_flag)
error_checking.assert_is_boolean(check_time_flag)
if check_speed_flag:
invalid_sustained_indices = check_wind_speeds(
input_table[WIND_SPEED_COLUMN].values, one_component=False)
input_table[WIND_SPEED_COLUMN].values[
invalid_sustained_indices] = numpy.nan
invalid_gust_indices = check_wind_speeds(
input_table[WIND_GUST_SPEED_COLUMN].values, one_component=False)
input_table[WIND_GUST_SPEED_COLUMN].values[
invalid_gust_indices] = numpy.nan
invalid_indices = list(
set(invalid_gust_indices).intersection(invalid_sustained_indices))
input_table.drop(input_table.index[invalid_indices], axis=0,
inplace=True)
if check_direction_flag:
invalid_indices = _check_wind_directions(
input_table[WIND_DIR_COLUMN].values)
input_table[WIND_DIR_COLUMN].values[invalid_indices] = numpy.nan
invalid_indices = _check_wind_directions(
input_table[WIND_GUST_DIR_COLUMN].values)
input_table[WIND_GUST_DIR_COLUMN].values[invalid_indices] = numpy.nan
if check_u_wind_flag:
invalid_indices = check_wind_speeds(
input_table[U_WIND_COLUMN].values, one_component=True)
input_table.drop(input_table.index[invalid_indices], axis=0,
inplace=True)
if check_v_wind_flag:
invalid_indices = check_wind_speeds(
input_table[V_WIND_COLUMN].values, one_component=True)
input_table.drop(input_table.index[invalid_indices], axis=0,
inplace=True)
if check_lat_flag:
invalid_indices = geodetic_utils.find_invalid_latitudes(
input_table[LATITUDE_COLUMN].values)
input_table.drop(
input_table.index[invalid_indices], axis=0, inplace=True)
if check_lng_flag:
invalid_indices = geodetic_utils.find_invalid_longitudes(
input_table[LONGITUDE_COLUMN].values,
sign_in_western_hemisphere=geodetic_utils.EITHER_SIGN_LONGITUDE_ARG)
input_table.drop(
input_table.index[invalid_indices], axis=0, inplace=True)
input_table[LONGITUDE_COLUMN] = (
lng_conversion.convert_lng_positive_in_west(
input_table[LONGITUDE_COLUMN].values))
if check_elevation_flag:
invalid_indices = _check_elevations(
input_table[ELEVATION_COLUMN].values)
input_table[ELEVATION_COLUMN].values[invalid_indices] = numpy.nan
if check_time_flag:
invalid_flags = numpy.invert(input_table[TIME_COLUMN].values > 0)
invalid_indices = numpy.where(invalid_flags)[0]
input_table.drop(input_table.index[invalid_indices], axis=0,
inplace=True)
return input_table
def get_max_of_sustained_and_gust(wind_speeds_m_s01, wind_gust_speeds_m_s01,
wind_directions_deg,
wind_gust_directions_deg):
"""Converts wind data from 4 variables to 2.
Original variables:
- speed of sustained wind
- direction of sustained wind
- speed of wind gust
- direction of wind gust
New variables:
- speed of max wind (whichever is higher between sustained and gust)
- direction of max wind (whichever is higher between sustained and gust)
"Whichever is higher between sustained and gust" sounds like a stupid
phrase, because gust speed should always be >= sustained speed. However,
due to quality issues, this is not always the case.
N = number of wind observations.
:param wind_speeds_m_s01: length-N numpy array of sustained speeds (m/s).
:param wind_gust_speeds_m_s01: length-N numpy array of gust speeds (m/s).
:param wind_directions_deg: length-N numpy array of sustained directions
(degrees of origin).
:param wind_gust_directions_deg: length-N numpy array of gust directions
(degrees of origin).
:return: max_wind_speeds_m_s01: length-N numpy array of max wind speeds
(m/s).
:return: max_wind_directions_deg: length-N numpy array with directions of
max wind (degrees of origin).
"""
error_checking.assert_is_real_numpy_array(wind_speeds_m_s01)
error_checking.assert_is_numpy_array(wind_speeds_m_s01, num_dimensions=1)
num_observations = len(wind_speeds_m_s01)
error_checking.assert_is_real_numpy_array(wind_gust_speeds_m_s01)
error_checking.assert_is_numpy_array(
wind_gust_speeds_m_s01,
exact_dimensions=numpy.array([num_observations]))
error_checking.assert_is_real_numpy_array(wind_directions_deg)
error_checking.assert_is_numpy_array(
wind_directions_deg,
exact_dimensions=numpy.array([num_observations]))
error_checking.assert_is_real_numpy_array(wind_gust_directions_deg)
error_checking.assert_is_numpy_array(
wind_gust_directions_deg,
exact_dimensions=numpy.array([num_observations]))
wind_speed_matrix_m_s01 = numpy.vstack(
(wind_speeds_m_s01, wind_gust_speeds_m_s01))
wind_direction_matrix_deg = numpy.vstack(
(wind_directions_deg, wind_gust_directions_deg))
max_wind_speeds_m_s01 = numpy.nanmax(wind_speed_matrix_m_s01,
axis=0).astype(numpy.float64)
row_indices = numpy.nanargmax(wind_speed_matrix_m_s01, axis=0)
num_observations = len(max_wind_speeds_m_s01)
column_indices = numpy.linspace(0, num_observations - 1,
num=num_observations, dtype=int)
linear_indices = numpy.ravel_multi_index((row_indices, column_indices),
(2, num_observations))
all_wind_directions_deg = numpy.reshape(wind_direction_matrix_deg,
2 * num_observations)
max_wind_directions_deg = all_wind_directions_deg[linear_indices]
nan_flags = numpy.isnan(max_wind_directions_deg)
nan_indices = numpy.where(nan_flags)[0]
for i in nan_indices:
max_wind_directions_deg[i] = numpy.nanmax(
wind_direction_matrix_deg[:, i])
return max_wind_speeds_m_s01, max_wind_directions_deg
def speed_and_direction_to_uv(wind_speeds_m_s01, wind_directions_deg):
"""Converts wind vectors from speed and direction to u- and v-components.
:param wind_speeds_m_s01: numpy array of wind speeds (metres per second).
:param wind_directions_deg: Equivalent-shape numpy array of wind
directions (direction of origin, as per meteorological convention).
:return: u_winds_m_s01: Equivalent-shape numpy array of u-components (metres
per second).
:return: v_winds_m_s01: Equivalent-shape numpy array of v-components.
"""
error_checking.assert_is_geq_numpy_array(wind_speeds_m_s01, 0.)
error_checking.assert_is_numpy_array(
wind_directions_deg,
exact_dimensions=numpy.array(wind_speeds_m_s01.shape))
these_wind_directions_deg = copy.deepcopy(wind_directions_deg)
these_wind_directions_deg[
numpy.isnan(these_wind_directions_deg)] = WIND_DIR_DEFAULT_DEG
u_winds_m_s01 = -1 * wind_speeds_m_s01 * numpy.sin(
these_wind_directions_deg * DEGREES_TO_RADIANS)
v_winds_m_s01 = -1 * wind_speeds_m_s01 * numpy.cos(
these_wind_directions_deg * DEGREES_TO_RADIANS)
return u_winds_m_s01, v_winds_m_s01
def uv_to_speed_and_direction(u_winds_m_s01, v_winds_m_s01):
"""Converts wind vectors from u- and v-components to speed and direction.
:param u_winds_m_s01: numpy array of u-components (metres per second).
:param v_winds_m_s01: Equivalent-shape numpy array of v-components.
:return: wind_speeds_m_s01: Equivalent-shape numpy array of wind speeds
(metres per second).
:return: wind_directions_deg: Equivalent-shape numpy array of wind
directions (direction of origin, as per meteorological convention).
"""
error_checking.assert_is_numpy_array(
v_winds_m_s01, exact_dimensions=numpy.array(u_winds_m_s01.shape))
wind_directions_deg = RADIANS_TO_DEGREES * numpy.arctan2(
-u_winds_m_s01, -v_winds_m_s01)
wind_directions_deg = numpy.mod(wind_directions_deg + 360., 360)
wind_speeds_m_s01 = | numpy.sqrt(u_winds_m_s01 ** 2 + v_winds_m_s01 ** 2) | numpy.sqrt |
from transformers import GPT2Tokenizer, GPT2LMHeadModel
import torch
import numpy as np
import os
from fuzzywuzzy import fuzz
# from titlecase import titlecase
class Generator(object):
def __init__(
self, model_dir="models", init_epoch=70,
):
# where to get the model files?
self.model_dir = model_dir
self.init_epoch = init_epoch
self.pretrained_path = self._construct_pretrained_path(
self.model_dir, self.init_epoch
)
# model's start and end tokens
self.START_TKN = "<|startoftext|>"
self.END_TKN = "<|endoftext|>"
# load the model and tokenizer
self.tokenizer = GPT2Tokenizer.from_pretrained("distilgpt2")
self.model = GPT2LMHeadModel.from_pretrained("distilgpt2")
# select torch device (cpu/gpu)
self.device = self._select_device()
self.model = self.model.to(self.device)
@staticmethod
def _construct_pretrained_path(model_dir, epoch):
ptp = os.path.join(model_dir, f"distilgpt2_onion_{epoch}.pt")
assert os.path.exists(ptp), "file DNE"
return ptp
@staticmethod
def _select_device():
device = "cpu"
if torch.cuda.is_available():
device = "cuda"
return device
def _load_weights_from_file(self, path):
self.model.load_state_dict(torch.load(path))
def _load_weights_from_epoch(self, epoch: int):
path = self._construct_pretrained_path(self.model_dir, epoch)
self.model.load_state_dict(torch.load(path))
@staticmethod
def _select_a_top_token(slogits, max_candidates):
index = np.argpartition(slogits, -max_candidates)[-max_candidates:]
top_slogits = slogits[index]
top_slogits = top_slogits / | np.sum(top_slogits) | numpy.sum |
from __future__ import division, print_function, absolute_import
import os
import time
import shutil
import numpy as np
import cv2 as cv
import glob
import scipy.io as sio
import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorflow.contrib.layers.python.layers import initializers
from Ops import Ops
from DataLoaderNormal import DataLoader
from CommonUtil import logger, safe_rm_mkdir, safe_mkdir
from Constants import consts
log = logger.write
class Trainer(object):
def __init__(self, sess):
self.sess = sess
def train(self,
dataset_dir, # path to dataset
dataset_training_indices, # data starting index
dataset_testing_indices, # data ending index
results_dir='./results/results_depth_multi_normal3', # directory to stored the results
graph_dir='./results/graph_depth_multi_normal3', # directory as tensorboard working space
batch_size=4, # batch size
epoch_num=9, # epoch number
first_channel=8,
bottle_width=4,
dis_reps=1,
mode='retrain', # training mode: 'retrain' or 'finetune'
pre_model_dir=None): # directory to pre-trained model
"""
Train
construct the network, data loader and loss function accroding to the argument
"""
assert batch_size > 1 # tf.squeeze is used, so need to make sure that the batch dim won't be removed
self._setup_result_folder(mode, results_dir, graph_dir)
# logger.set_log_file(results_dir + '/log.txt')
# setups data loader
data_loader_num = 4
data_loaders, val_data_loader = self._setup_data_loader(data_loader_num, batch_size, dataset_dir, dataset_training_indices, dataset_testing_indices)
batch_num = len(dataset_training_indices)//batch_size
test_batch_num = 16 // batch_size
log('#epoch = %d, #batch = %d' % (epoch_num, batch_num))
# loads some testing data for visualization and supervision
test_conc_imgs, test_smpl_v_volumes, test_mesh_volumes = [], [], []
safe_rm_mkdir(results_dir + '/test_gt')
for i in range(test_batch_num):
_, conc_imgs, smpl_v_volumes, mesh_volumes = val_data_loader.queue.get()
test_conc_imgs.append(conc_imgs)
test_smpl_v_volumes.append(smpl_v_volumes)
test_mesh_volumes.append(mesh_volumes)
self._save_tuple(conc_imgs, smpl_v_volumes, mesh_volumes, results_dir+'/test_gt', i)
# setups network and training loss
self._build_network(batch_size, first_channel, bottle_width)
loss_collection = self._build_loss(self.v_d[-1], self.Y, self.M_fv, self.M_sv,
self.Ns, self.n_final, self.dis_real_out, self.dis_fake_out,
lamb_sil=self.lamb_sil, lamb_nml_rf=self.lamb_nml,
lamb_dis=self.lamb_dis, w=0.7)
loss_keys = ['vol_loss', 'sil_loss', 'normal_loss', 'nr_loss', 'recon_loss', 'total_loss']
# setups optimizer and visualizer
recon_loss = loss_collection['recon_loss']
nr_loss = loss_collection['nr_loss']
total_loss = loss_collection['total_loss']
dis_d_loss = loss_collection['dis_d_loss']
recon_opt, nr_opt, all_opt, dis_opt = self._build_optimizer(self.lr, recon_loss, nr_loss, total_loss, dis_d_loss)
merged_scalar_loss, writer = self._setup_summary(self.sess, graph_dir, loss_collection)
self.sess.run(tf.global_variables_initializer())
self.sess.run(tf.local_variables_initializer())
saver = self._setup_saver(pre_model_dir)
for epoch_id in range(epoch_num):
log('Running epoch No.%d' % epoch_id)
loss_log_str = ''
for batch_id in range(batch_num):
iter_id = epoch_id*batch_num+batch_id
lrate = 1e-4 if epoch_id <= epoch_num/3*2 else 1e-5
lrate_d = lrate * 0.1
l_sil = consts.lamb_sil
l_dis = consts.lamb_dis
l_nml_rf = consts.lamb_nml_rf
# training ==============================================================
ind, conc_imgs, smpl_v_volumes, mesh_volumes = data_loaders[iter_id % data_loader_num].queue.get()
f_dict = self._construct_feed_dict(conc_imgs, smpl_v_volumes, mesh_volumes, l_sil, l_dis, l_nml_rf, lrate)
f_dict_d = self._construct_feed_dict(conc_imgs, smpl_v_volumes, mesh_volumes, l_sil, l_dis, l_nml_rf, lrate_d)
if epoch_id <= epoch_num / 3:
out = self.sess.run([recon_opt] + [loss_collection[lk] for lk in loss_keys] + [merged_scalar_loss], feed_dict=f_dict)
loss_curr_list = out[1:-1]
graph_results = out[-1]
elif epoch_id <= epoch_num / 3 * 2:
# for _ in range(dis_reps):
# self.sess.run([dis_opt], feed_dict=f_dict_d)
out = self.sess.run([nr_opt] + [loss_collection[lk] for lk in loss_keys] + [merged_scalar_loss],feed_dict=f_dict)
loss_curr_list = out[1:-1]
graph_results = out[-1]
else:
# for _ in range(dis_reps):
# self.sess.run([dis_opt], feed_dict=f_dict_d)
out = self.sess.run([all_opt] + [loss_collection[lk] for lk in loss_keys] + [merged_scalar_loss], feed_dict=f_dict)
loss_curr_list = out[2:-1]
graph_results = out[-1]
writer.add_summary(graph_results, epoch_id * batch_num + batch_id)
scale = 1
log('Epoch %d, Batch %d: '
'vol_loss:%.4f, sil_loss:%.4f, normal_loss:%.4f, nr_loss:%.4f, '
'recon_loss:%.4f, total_loss:%.4f' %
(epoch_id, batch_id, loss_curr_list[0] * scale, loss_curr_list[1] * scale,
loss_curr_list[2] * scale, loss_curr_list[3] * scale,
loss_curr_list[4] * scale, loss_curr_list[5] * scale))
# validation ===========================================================
if iter_id % 5 == 0:
_, conc_imgs, smpl_v_volumes, mesh_volumes = val_data_loader.queue.get()
f_dict = self._construct_feed_dict(conc_imgs, smpl_v_volumes, mesh_volumes, l_sil, l_dis, l_nml_rf, lrate)
loss_val_curr = self.sess.run([loss_collection[lk] for lk in loss_keys], feed_dict=f_dict)
loss_log_str += ('%f %f %f %f %f %f ' % (loss_curr_list[0], loss_curr_list[1], loss_curr_list[2],
loss_curr_list[3], loss_curr_list[4], loss_curr_list[5]))
loss_log_str += ('%f %f %f %f %f %f \n' % (loss_val_curr[0], loss_val_curr[1], loss_val_curr[2],
loss_val_curr[3], loss_val_curr[4], loss_val_curr[5]))
log('End of epoch. ')
with open(os.path.join(results_dir, 'loss_log.txt'), 'a') as fp:
fp.write(loss_log_str)
if epoch_id > 0.5 * epoch_num:
test_dir = os.path.join(results_dir, '%04d' % epoch_id)
safe_rm_mkdir(test_dir)
saver.save(self.sess, os.path.join(results_dir, 'model.ckpt'))
# test the network and save the results
for tbi in range(test_batch_num):
f_dict = {self.X: test_smpl_v_volumes[tbi], self.Y: test_mesh_volumes[tbi],
self.R: test_conc_imgs[tbi][:, :, :, :6]}
n0_p, n1_p, n2_p, n3_p = self.sess.run([self.n0_project, self.n1_project,
self.n2_project, self.n3_project],
feed_dict=f_dict)
nps = np.concatenate((n0_p, n1_p, n2_p, n3_p), axis=-1)
res = self.sess.run(self.v_out, feed_dict=f_dict)
res_n = self.sess.run(self.n_final, feed_dict=f_dict)
self._save_results_raw_training(res, res_n, nps, test_dir, tbi)
# backup model
if True: # epoch_id % 10 == 0:
saver.save(self.sess, os.path.join(results_dir, 'model.ckpt'))
for data in data_loaders:
data.stop_queue = True
val_data_loader.stop_queue = True
@staticmethod
def _setup_result_folder(mode,
results_dir='./results/results_depth_multi_normal3',
graph_dir='./results/graph_depth_multi_normal3'):
# create folders
if mode == 'retrain':
if os.path.exists(results_dir):
log('Warning: %s already exists. It will be removed. ' % results_dir)
shutil.rmtree(results_dir)
if os.path.exists(graph_dir):
log('Warning: %s already exists. It will be removed. ' % graph_dir)
shutil.rmtree(graph_dir)
safe_rm_mkdir(results_dir)
safe_rm_mkdir(graph_dir)
safe_rm_mkdir(results_dir + '/code_bk')
pylist = glob.glob(os.path.join('./', '*.py'))
for pyfile in pylist:
shutil.copy(pyfile, results_dir + '/code_bk')
@staticmethod
def _setup_data_loader(data_loader_num, batch_size, dataset_dir,
dataset_training_indices, dataset_testing_indices):
log('Constructing data loader...')
log('#training_data =', len(dataset_training_indices))
log('#testing_data =', len(dataset_testing_indices))
data_loaders = []
for _ in range(data_loader_num):
data = DataLoader(batch_size, dataset_dir, dataset_training_indices, augmentation=True)
data.daemon = True
data.start()
data_loaders.append(data)
val_data_loader = DataLoader(batch_size, dataset_dir, dataset_testing_indices, augmentation=False)
val_data_loader.daemon = True
val_data_loader.start()
log('DataLoaders start. ')
return data_loaders, val_data_loader
def _construct_feed_dict(self, conc_imgs, smpl_v_volumes, mesh_volumes,
l_sil, l_dis, l_nml_rf, lrate):
in_imgs = conc_imgs[:, :, :, :6] # use only first 6 channels as input
m0, m1 = conc_imgs[:, :, :, 6:7], conc_imgs[:, :, :, 7:8]
n0, n1 = conc_imgs[:, :, :, 10:13], conc_imgs[:, :, :, 13:16]
n2, n3 = conc_imgs[:, :, :, 16:19], conc_imgs[:, :, :, 19:22]
f_dict = {self.X: smpl_v_volumes,
self.Y: mesh_volumes,
self.R: in_imgs,
self.M_fv: m0, self.M_sv: m1,
self.N0: n0, self.N1: n1, self.N2: n2, self.N3: n3,
self.lamb_sil: l_sil, self.lamb_dis: l_dis, self.lamb_nml: l_nml_rf,
self.lr: lrate}
return f_dict
def test(self,
dataset_dir, # path to testing dataset
dataset_prefix_list, # file name prefix of testing data
pre_model_dir, # path to pretrained model
first_channel=8,
bottle_width=4): # directory to pre-trained model
"""
Test
construct the network, data loader and loss function accroding to the argument
"""
log = logger.write
batch_size = 1 # batch size
self._build_network(batch_size, first_channel, bottle_width)
self.sess.run(tf.global_variables_initializer())
self.sess.run(tf.local_variables_initializer())
log('Constructing saver...')
saver = self._setup_saver(pre_model_dir)
for dataset_prefix in dataset_prefix_list:
prefix = dataset_dir + '/' + dataset_prefix
img = cv.cvtColor(cv.imread(prefix + 'color.png'), cv.COLOR_BGR2RGB)
# prefix = './TestingData/test_'
# img = cv.cvtColor(cv.imread(prefix + 'input.jpg'), cv.COLOR_BGR2RGB)
img = np.float32(img) / 255.0
img = DataLoader.resize_and_crop_img(img)
vmap = cv.cvtColor(cv.imread(prefix + 'vmap.png'), cv.COLOR_BGR2RGB)
vmap = np.float32(vmap) / 255.0
vmap = DataLoader.resize_and_crop_img(vmap)
smpl_v_volume = sio.loadmat(prefix + 'volume.mat')
smpl_v_volume = smpl_v_volume['smpl_v_volume']
smpl_v_volume = np.transpose(smpl_v_volume, (2, 1, 0, 3))
smpl_v_volume = np.flip(smpl_v_volume, axis=1)
concat_in = np.concatenate((img, vmap), axis=-1)
concat_in = np.expand_dims(concat_in, axis=0)
smpl_v_volume = np.expand_dims(smpl_v_volume, axis=0)
n0_p, n1_p, n2_p, n3_p = self.sess.run([self.n0_project, self.n1_project, self.n2_project, self.n3_project],
feed_dict={self.X: smpl_v_volume, self.R: concat_in})
nps = np.concatenate((n0_p, n1_p, n2_p, n3_p), axis=-1)
res = self.sess.run(self.v_out, feed_dict={self.X: smpl_v_volume, self.R: concat_in})
res_n = self.sess.run(self.n_final, feed_dict={self.X: smpl_v_volume, self.R: concat_in})
log('Testing results saved to', dataset_dir)
self._save_results_raw_testing(res, res_n, nps, dataset_dir, dataset_prefix)
def test_with_gt(self,
dataset_dir, # path to dataset
dataset_testing_indices, # data ending index
pre_model_dir,
output_dir,
first_channel=8,
bottle_width=4): # directory to pre-trained model
safe_mkdir(output_dir)
loader = DataLoader(1, dataset_dir, dataset_testing_indices, augmentation=False)
self._build_network(1, first_channel, bottle_width)
self.sess.run(tf.global_variables_initializer())
self.sess.run(tf.local_variables_initializer())
saver = self._setup_saver(pre_model_dir)
for i in dataset_testing_indices:
conc_imgs, smpl_v_volumes, mesh_volumes = loader.load_tuple_batch([i])
f_dict = self._construct_feed_dict(conc_imgs, smpl_v_volumes, mesh_volumes, 0,
0, 0, 0)
n0_p, n1_p, n2_p, n3_p = self.sess.run([self.n0_project, self.n1_project,
self.n2_project, self.n3_project],
feed_dict=f_dict)
nps = np.concatenate((n0_p, n1_p, n2_p, n3_p), axis=-1)
res, res_n = self.sess.run([self.v_out, self.n_final],
feed_dict=f_dict)
log('Testing results saved to ', output_dir)
self._save_results_raw_testing(res, res_n, nps, output_dir, '%08d_' % i)
def _build_network(self, batch_size, first_channel, bottle_width):
"""
Builds the image-guided volume-to-volume network
Warning: the network input format: BDHWC for volume, and BHWC for image
"""
log('Constructing network...')
with tf.name_scope('params'):
self.lamb_sil = tf.placeholder(dtype=tf.float32)
self.lamb_dis = tf.placeholder(dtype=tf.float32)
self.lamb_nml = tf.placeholder(dtype=tf.float32)
self.lr = tf.placeholder(dtype=tf.float32)
with tf.name_scope('input'):
self.X = tf.placeholder(shape=[batch_size, consts.dim_w, consts.dim_h, consts.dim_w, 3], dtype=tf.float32)
self.Y = tf.placeholder(shape=[batch_size, consts.dim_w, consts.dim_h, consts.dim_w, 1], dtype=tf.float32)
self.R = tf.placeholder(shape=[batch_size, 2*consts.dim_h, 2*consts.dim_w, 6], dtype=tf.float32)
self.M_fv = tf.placeholder(shape=[batch_size, 2*consts.dim_h, 2*consts.dim_w, 1], dtype=tf.float32)
self.M_sv = tf.placeholder(shape=[batch_size, 2*consts.dim_h, 2*consts.dim_w, 1], dtype=tf.float32)
self.N0 = tf.placeholder(shape=[batch_size, 2 * consts.dim_h, 2 * consts.dim_w, 3], dtype=tf.float32)
self.N1 = tf.placeholder(shape=[batch_size, 2 * consts.dim_h, 2 * consts.dim_w, 3], dtype=tf.float32)
self.N2 = tf.placeholder(shape=[batch_size, 2 * consts.dim_h, 2 * consts.dim_w, 3], dtype=tf.float32)
self.N3 = tf.placeholder(shape=[batch_size, 2 * consts.dim_h, 2 * consts.dim_w, 3], dtype=tf.float32)
self.Ns = tf.concat([self.N0, self.N1, self.N2, self.N3], axis=-1)
with tf.name_scope('network'):
self.i_e = self._build_image_encoder(self.R, first_channel, bottle_width, logger.write)
self.sft_a, self.sft_b = self._build_affine_params(self.i_e, logger.write)
self.v_e = self._build_volume_encoder(self.X, first_channel, bottle_width, self.sft_a, self.sft_b, logger.write)
self.v_d = self._build_volume_decoder(self.v_e, 1, consts.dim_w, self.sft_a, self.sft_b, logger.write)
self.v_out = self.v_d[-1]
self.d0, self.d1, self.d2, self.d3 = self._build_depth_projector(self.v_out)
self.n0_project = self._build_normal_calculator(self.d0)
self.n1_project = self._build_normal_calculator(self.d1)
self.n2_project = self._build_normal_calculator(self.d2)
self.n3_project = self._build_normal_calculator(self.d3)
self.nr0 = self._build_normal_refiner(self.n0_project, self.R, logger.write)
self.n_final_0 = self.nr0[-1]
self.nr1, self.nr2, self.nr3 = self._build_normal_refiner2(self.n1_project, self.n2_project, self.n3_project, logger.write)
self.n_final_1, self.n_final_2, self.n_final_3 = self.nr1[-1], self.nr2[-1], self.nr3[-1]
self.n_final = tf.concat([self.n_final_0, self.n_final_1, self.n_final_2, self.n_final_3], axis=-1)
self.dis_real_out, self.dis_fake_out = self._build_normal_discriminator(self.n_final, self.Ns, self.M_fv, self.M_sv, self.R, logger.write)
log('The whole graph has %d trainable parameters' % Ops.get_variable_num(logger))
@staticmethod
def _build_image_encoder(R, first_channel, bottle_neck_w, print_fn=None):
"""
Build the volume encoder
"""
R_ = tf.image.resize_bilinear(R, (consts.dim_h, consts.dim_w))
r_shape = R_.get_shape().as_list()
r_w = r_shape[2]
if print_fn is None:
print_fn = print
# calculate network parameters
w_e = [r_w // 2]
c_e = [first_channel]
while w_e[-1] > bottle_neck_w:
w_e.append(w_e[-1]//2)
c_e.append(c_e[-1]*2)
print_fn('-- Image encoder layers\' width', w_e)
print_fn('-- Image encoder layers\' channel', c_e)
layers = [R_]
for c in c_e:
with tf.variable_scope('i_e_%d' % (len(layers))):
nin_shape = layers[-1].get_shape().as_list()
net = slim.conv2d(layers[-1], c, [7, 7], 2, padding='SAME',
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
rate=1, normalizer_fn=slim.batch_norm,
activation_fn=tf.nn.leaky_relu, scope='conv0')
print_fn('-- Image encoder layer %d:'%len(layers), nin_shape, '-->', net.get_shape().as_list())
layers.append(net)
return layers
@staticmethod
def _build_affine_params(E_i, print_fn=None):
if print_fn is None:
print_fn = print
sft_a, sft_b = [], []
for li in range(1, len(E_i)):
with tf.variable_scope('a_p_%d' % (len(sft_a)+1)):
nin_shape = E_i[li].get_shape().as_list()
net_a = slim.conv2d(E_i[li], nin_shape[-1], [1, 1], 1, padding='SAME',
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
rate=1, normalizer_fn=slim.batch_norm,
activation_fn=tf.nn.leaky_relu, scope='conv0_pa')
sft_a.append(net_a)
net_b = slim.conv2d(E_i[li], nin_shape[-1], [1, 1], 1, padding='SAME',
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
rate=1, normalizer_fn=slim.batch_norm,
activation_fn=tf.nn.leaky_relu, scope='conv0_pb')
sft_b.append(net_b)
print_fn('-- SFT parameters layer %d:' % len(sft_a), nin_shape, '-->', net_a.get_shape().as_list())
return sft_a, sft_b
@staticmethod
def _build_volume_encoder(X, frist_channel, bottle_neck_w, sft_params_a, sft_params_b, print_fn=None):
"""
Build the volume encoder
"""
x_shape = X.get_shape().as_list() # (batch, x_dim, y_dim, z_dim, channel)
x_w = x_shape[1]
if print_fn is None:
print_fn = print
# calculate network parameters
w_e = [x_w//2]
c_e = [frist_channel]
while w_e[-1] > bottle_neck_w:
w_e.append(w_e[-1]//2)
c_e.append(c_e[-1]*2)
print_fn('-- Volume encoder layers\' width', w_e)
print_fn('-- Volume encoder layers\' channel', c_e)
layers = [X]
for ci, c in enumerate(c_e):
with tf.variable_scope('v_e_%d' % len(layers)):
nin_shape = layers[-1].get_shape().as_list()
net = slim.conv3d(layers[-1], c, [7, 7, 7], 2, padding='SAME',
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
rate=1, normalizer_fn=slim.batch_norm,
activation_fn=tf.nn.leaky_relu, scope='conv0')
net = Ops.featrue_affine(net, sft_params_a[ci], sft_params_b[ci])
print_fn('-- Volume encoder layer %d:' % len(layers), nin_shape, '-->', net.get_shape().as_list())
layers.append(net)
return layers
@staticmethod
def _build_volume_decoder(layers_e, last_channel, out_w, sft_params_a, sft_params_b, print_fn=None):
"""
Build the volume decoder
"""
Z = layers_e[-1]
z_shape = Z.get_shape().as_list()
z_w = z_shape[1]
z_c = z_shape[-1]
if print_fn is None:
print_fn = print
# calculate network parameters
w_d = [z_w*2]
c_d = [z_c//2]
while w_d[-1] < out_w:
w_d.append(w_d[-1]*2)
c_d.append(c_d[-1]//2)
print_fn('-- Volume decoder layers\' width', w_d)
print_fn('-- Volume decoder layers\' channel', c_d)
layers = [Z]
for ci, c in enumerate(c_d):
with tf.variable_scope('v_d_%d' % len(layers)):
if ci == 0:
net = layers[-1]
else:
net = tf.concat([layers[-1], layers_e[-ci-1]], axis=-1) # U-net structure
nin_shape = net.get_shape().as_list()
net = slim.conv3d_transpose(net, c, [7, 7, 7], 2, padding='SAME',
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
normalizer_fn=slim.batch_norm,
activation_fn=tf.nn.leaky_relu, scope='conv0')
print_fn('-- Volume decoder layer %d:' % len(layers), nin_shape, '-->', net.get_shape().as_list())
layers.append(net)
with tf.variable_scope('v_d_out'):
nin_shape = layers[-1].get_shape().as_list()
net = slim.conv3d(layers[-1], last_channel, [1, 1, 1], 1, padding='SAME',
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
rate=1, normalizer_fn=None,
activation_fn=tf.nn.sigmoid, scope='conv0') # output to (0, 1)
print_fn('-- Volume decoder layer %d:' % len(layers), nin_shape, '-->', net.get_shape().as_list())
layers.append(net)
return layers
@staticmethod
def _build_sil_projector(volume):
with tf.name_scope('projector'):
vshape = volume.get_shape().as_list()
v1 = tf.reshape(volume, (vshape[0], vshape[1], vshape[2], vshape[3])) # remove last dim
fv = tf.reduce_max(v1, axis=1) # project along z-axis
fv = tf.squeeze(fv) # remove z-dim
fv = tf.expand_dims(fv, axis=-1) # add channel dim
sv = tf.reduce_max(v1, axis=3) # project along x-axis
sv = tf.squeeze(sv) # remove x-dim
sv = tf.transpose(sv, (0, 2, 1)) # convert to HW format
sv = tf.expand_dims(sv, axis=-1) # add channel dim
return fv, sv
@staticmethod
def _build_depth_projector(volume):
with tf.name_scope('depth_projector'):
vshape = volume.get_shape().as_list()
v1 = tf.reshape(volume, (vshape[0], vshape[1], vshape[2], vshape[3])) # remove last dim
v1 = tf.sigmoid(9999*(v1-0.5))
d_array = np.asarray(range(consts.dim_w), dtype=np.float32)
d_array = (d_array - (consts.dim_w / 2) + 0.5) * consts.voxel_size
d_array = tf.constant(d_array, dtype=tf.float32)
# front view (view 0) projection (along z-axis)
M = -99
d_array_v0 = tf.reshape(d_array, (1, -1, 1, 1)) # BDHW
depth_volume_0 = M*(1-v1) + d_array_v0 * v1
depth_project_0 = tf.reduce_max(depth_volume_0, axis=1) # max along D (Z) --> BHW
depth_project_0 = tf.reshape(depth_project_0, (vshape[0], vshape[2], vshape[3], 1))
# side view (view 1) projection (along x_axis)
M = 99
d_array_v1 = tf.reshape(d_array, (1, 1, 1, -1))
depth_volume_1 = M*(1-v1) + d_array_v1 * v1
depth_project_1 = tf.reduce_min(depth_volume_1, axis=3) # min along W (X) --> BDH
depth_project_1 = -depth_project_1
depth_project_1 = tf.reshape(tf.transpose(depth_project_1, (0, 2, 1)), (vshape[0], vshape[2], vshape[3], 1))
# back view (view 2) projection (along z-axis)
M = 99
depth_volume_2 = M*(1-v1) + d_array_v0 * v1
depth_project_2 = tf.reduce_min(depth_volume_2, axis=1) # max along D (Z) --> BHW
depth_project_2 = -depth_project_2
depth_project_2 = tf.reshape(depth_project_2, (vshape[0], vshape[2], vshape[3], 1))
# size view (view 3) projection (along x-axis)
M = -99
depth_volume_3 = M*(1-v1) + d_array_v1 * v1
depth_project_3 = tf.reduce_max(depth_volume_3, axis=3) # min along W (X) --> BDH
depth_project_3 = tf.reshape(tf.transpose(depth_project_3, (0, 2, 1)), (vshape[0], vshape[2], vshape[3], 1))
return depth_project_0, depth_project_1, depth_project_2, depth_project_3
@staticmethod
def _build_normal_calculator(depth):
d_shape = depth.get_shape().as_list()
batch_sz = d_shape[0]
img_h = d_shape[1]
img_w = d_shape[2]
w_array = np.asarray(range(consts.dim_w), dtype=np.float32)
w_array = (w_array - (consts.dim_w / 2) + 0.5) * consts.voxel_size
w_array = np.reshape(w_array, (1, 1, -1, 1)) # BHWC
w_array = np.tile(w_array, (batch_sz, img_h, 1, 1))
w_map = tf.constant(w_array, dtype=tf.float32)
h_array = np.asarray(range(consts.dim_h), dtype=np.float32)
h_array = (h_array - (consts.dim_h / 2) + 0.5) * consts.voxel_size
h_array = np.reshape(h_array, (1, -1, 1, 1)) # BHWC
h_array = np.tile(h_array, (batch_sz, 1, img_w, 1))
h_map = tf.constant(h_array, dtype=tf.float32)
# vmap = tf.concat([w_map, h_map, depth], axis=-1)
sobel_x = tf.constant([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]], tf.float32)
sobel_x_filter = tf.reshape(sobel_x, [3, 3, 1, 1])
sobel_y_filter = tf.transpose(sobel_x_filter, [1, 0, 2, 3])
w_map_dx = tf.nn.conv2d(w_map, sobel_x_filter, strides=[1, 1, 1, 1], padding='SAME')
h_map_dx = tf.nn.conv2d(h_map, sobel_x_filter, strides=[1, 1, 1, 1], padding='SAME')
depth_dx = tf.nn.conv2d(depth, sobel_x_filter, strides=[1, 1, 1, 1], padding='SAME')
dx = tf.concat([w_map_dx, h_map_dx, depth_dx], axis=-1)
w_map_dy = tf.nn.conv2d(w_map, sobel_y_filter, strides=[1, 1, 1, 1], padding='SAME')
h_map_dy = tf.nn.conv2d(h_map, sobel_y_filter, strides=[1, 1, 1, 1], padding='SAME')
depth_dy = tf.nn.conv2d(depth, sobel_y_filter, strides=[1, 1, 1, 1], padding='SAME')
dy = tf.concat([w_map_dy, h_map_dy, depth_dy], axis=-1)
normal = tf.cross(dy, dx)
normal = normal / tf.norm(normal, axis=-1, keepdims=True)
return normal
@staticmethod
def _build_normal_refiner(normal_0, rgb, print_fn=None):
conc_d = tf.image.resize_bilinear(normal_0, (consts.dim_h * 2, consts.dim_w * 2))
conc = tf.concat([conc_d, rgb], axis=-1)
w_e = [consts.dim_w//2]
c_e = [16]
bottle_neck_w = 4
while w_e[-1] > bottle_neck_w:
w_e.append(w_e[-1]//2)
c_e.append(c_e[-1]*2)
if print_fn is None:
print_fn = print
print_fn('-- Normal refiner 0 encoder layers\' width', w_e)
print_fn('-- Normal refiner 0 encoder layers\' channel', c_e)
layers = [conc]
for c in c_e:
with tf.variable_scope('nml_rf_e_%d' % (len(layers))):
nin_shape = layers[-1].get_shape().as_list()
net = slim.conv2d(layers[-1], c, [4, 4], 2, padding='SAME',
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
rate=1, normalizer_fn=slim.batch_norm,
activation_fn=tf.nn.leaky_relu, scope='conv0')
print_fn('-- Normal refiner encoder 0 layer %d:'%len(layers), nin_shape, '-->', net.get_shape().as_list())
layers.append(net)
w_d = [w_e[-1]*2]
c_d = [c_e[-1]//2]
while w_d[-1] < consts.dim_w:
w_d.append(w_d[-1]*2)
c_d.append(c_d[-1]//2)
print_fn('-- Normal refiner 0 decoder layers\' width', w_d)
print_fn('-- Normal refiner 0 decoderlayers\' channel', c_d)
for ci, c in enumerate(c_d):
with tf.variable_scope('nml_rf_d_%d' % (len(layers))):
nin_shape = layers[-1].get_shape().as_list()
net = tf.image.resize_bilinear(layers[-1], (nin_shape[1]*2, nin_shape[2]*2))
net = tf.concat([net, layers[len(w_e)-ci-1]], axis=-1) # U-net structure
net = slim.conv2d(net, c, [4, 4], 1, padding='SAME',
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
rate=1, normalizer_fn=slim.batch_norm,
activation_fn=tf.nn.leaky_relu, scope='conv0')
print_fn('-- Normal refiner decoder layer %d:'%len(layers), nin_shape, '-->', net.get_shape().as_list())
layers.append(net)
with tf.variable_scope('nml_rf_d_out'):
nin_shape = layers[-1].get_shape().as_list()
net = slim.conv2d(layers[-1], 3, [1, 1], 1, padding='SAME',
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
rate=1, normalizer_fn=None,
activation_fn=tf.nn.tanh, scope='conv0') # output to (-1, 1)
net = net + conc_d
print_fn('-- Normal refiner 0 decoder layer %d:' % len(layers), nin_shape, '-->', net.get_shape().as_list())
layers.append(net)
return layers
@staticmethod
def _build_normal_refiner2(normal_1, normal_2, normal_3, print_fn=None):
def build_u_net(normal, reuse, print_fn=None):
conc = tf.image.resize_bilinear(normal, (consts.dim_h * 2, consts.dim_w * 2))
w_e = [consts.dim_w // 2]
c_e = [16]
bottle_neck_w = 4
while w_e[-1] > bottle_neck_w:
w_e.append(w_e[-1] // 2)
c_e.append(c_e[-1] * 2)
if print_fn is None:
print_fn = print
if not reuse:
print_fn('-- Normal refiner 1 encoder layers\' width', w_e)
print_fn('-- Normal refiner 1 encoder layers\' channel', c_e)
layers = [conc]
for c in c_e:
with tf.variable_scope('nml_rf2_e_%d' % (len(layers)), reuse=reuse):
nin_shape = layers[-1].get_shape().as_list()
net = slim.conv2d(layers[-1], c, [4, 4], 2, padding='SAME',
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
rate=1, normalizer_fn=slim.batch_norm,
activation_fn=tf.nn.leaky_relu, scope='conv0')
if not reuse:
print_fn('-- Normal refiner 1 encoder layer %d:' % len(layers), nin_shape, '-->',
net.get_shape().as_list())
layers.append(net)
w_d = [w_e[-1] * 2]
c_d = [c_e[-1] // 2]
while w_d[-1] < consts.dim_w:
w_d.append(w_d[-1] * 2)
c_d.append(c_d[-1] // 2)
if not reuse:
print_fn('-- Normal refiner 1 decoder layers\' width', w_d)
print_fn('-- Normal refiner 1 decoderlayers\' channel', c_d)
for ci, c in enumerate(c_d):
with tf.variable_scope('nml_rf2_d_%d' % (len(layers)), reuse=reuse):
nin_shape = layers[-1].get_shape().as_list()
net = tf.image.resize_bilinear(layers[-1], (nin_shape[1] * 2, nin_shape[2] * 2))
net = tf.concat([net, layers[len(w_e) - ci - 1]], axis=-1) # U-net structure
net = slim.conv2d(net, c, [4, 4], 1, padding='SAME',
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
rate=1, normalizer_fn=slim.batch_norm,
activation_fn=tf.nn.leaky_relu, scope='conv0')
if not reuse:
print_fn('-- Normal refiner 1 decoder layer %d:' % len(layers), nin_shape, '-->',
net.get_shape().as_list())
layers.append(net)
with tf.variable_scope('nml_rf2_d_out', reuse=reuse):
nin_shape = layers[-1].get_shape().as_list()
net = slim.conv2d(layers[-1], 3, [1, 1], 1, padding='SAME',
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
rate=1, normalizer_fn=None,
activation_fn=tf.nn.tanh, scope='conv0') # output to (-1, 1)
net = net + conc
if not reuse:
print_fn('-- Normal refiner 1 decoder layer %d:' % len(layers), nin_shape, '-->',
net.get_shape().as_list())
layers.append(net)
return layers
with tf.name_scope('normal_1_R'):
normal_1_r = build_u_net(normal_1, reuse=False, print_fn=print_fn)
with tf.name_scope('normal_2_R'):
normal_2_r = build_u_net(normal_2, reuse=True, print_fn=print_fn)
with tf.name_scope('normal_3_R'):
normal_3_r = build_u_net(normal_3, reuse=True, print_fn=print_fn)
return normal_1_r, normal_2_r, normal_3_r
@staticmethod
def _build_normal_discriminator(d_pred, d_gt, mask_fv_gt, mask_sv_gt, in_img, print_fn=None):
if print_fn is None:
print_fn = print
m_conc = tf.concat([mask_fv_gt, mask_fv_gt, mask_fv_gt,
mask_sv_gt, mask_sv_gt, mask_sv_gt,
mask_fv_gt, mask_fv_gt, mask_fv_gt,
mask_sv_gt, mask_sv_gt, mask_sv_gt], axis=-1)
d_pred_m = m_conc * d_pred # mask out background
d_gt_m = m_conc * d_gt # mask out background
conc_pred = tf.concat([d_pred_m, in_img], axis=-1)
conc_gt = tf.concat([d_gt_m, in_img], axis=-1)
conc_pred = conc_pred
conc_gt = conc_gt
def build_D(conc, reuse=False):
batch_sz = conc.get_shape().as_list()[0]
layer_w = conc.get_shape().as_list()[2]
w_e = [layer_w // 2]
c_e = [16]
while w_e[-1] > 16:
w_e.append(w_e[-1] // 2)
c_e.append(min(c_e[-1] * 2, 64))
if not reuse:
print_fn('-- Normal discriminator encoder layers\' width', w_e)
print_fn('-- Normal discriminator encoder layers\' channel', c_e)
layers = [conc]
for c in c_e:
with tf.variable_scope('nml_dis_e_%d' % (len(layers)), reuse=reuse):
nin_shape = layers[-1].get_shape().as_list()
net = slim.conv2d(layers[-1], c, [3, 3], 1, padding='SAME',
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
rate=1, normalizer_fn=slim.batch_norm,
activation_fn=tf.nn.leaky_relu, scope='conv0')
net = slim.max_pool2d(net, [2, 2], [2, 2], padding='SAME', scope='maxp0')
if not reuse:
print_fn('-- Normal discriminator encoder layer %d:'%len(layers), nin_shape, '-->', net.get_shape().as_list())
layers.append(net)
with tf.variable_scope('nml_dis_out', reuse=reuse):
nin_shape = layers[-1].get_shape().as_list()
net = slim.conv2d(layers[-1], 1, [1, 1], 1, padding='SAME',
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
rate=1, normalizer_fn=None,
activation_fn=tf.nn.sigmoid, scope='conv0')
if not reuse:
print_fn('-- Normal discriminator encoder layer %d:' % len(layers), nin_shape, '-->', net.get_shape().as_list())
layers.append(net)
return layers
with tf.name_scope('Dis_real'):
d_out_gt = build_D(tf.concat([conc_gt[:, :, :, 0:3], conc_gt[:, :, :, 12:15]], axis=-1), reuse=False)
with tf.name_scope('Dis_fake'):
d_out_pred = build_D(tf.concat([conc_pred[:, :, :, 0:3], conc_pred[:, :, :, 12:15]], axis=-1), reuse=True)
# with tf.name_scope('Dis_real_0'):
# d_out_gt0 = build_D(conc_gt[:, :, :, 0:3], reuse=False)
# with tf.name_scope('Dis_real_1'):
# d_out_gt1 = build_D(conc_gt[:, :, :, 3:6], reuse=True)
# with tf.name_scope('Dis_real_2'):
# d_out_gt2 = build_D(conc_gt[:, :, :, 6:9], reuse=True)
# with tf.name_scope('Dis_real_3'):
# d_out_gt3 = build_D(conc_gt[:, :, :, 9:12], reuse=True)
# with tf.name_scope('Dis_fake_0'):
# d_out_pred0 = build_D(conc_pred[:, :, :, 0:3], reuse=True)
# with tf.name_scope('Dis_fake_1'):
# d_out_pred1 = build_D(conc_pred[:, :, :, 3:6], reuse=True)
# with tf.name_scope('Dis_fake_2'):
# d_out_pred2 = build_D(conc_pred[:, :, :, 6:9], reuse=True)
# with tf.name_scope('Dis_fake_3'):
# d_out_pred3 = build_D(conc_pred[:, :, :, 9:12], reuse=True)
# d_out_gt = tf.concat([d_out_gt0[-1], d_out_gt1[-1], d_out_gt2[-1], d_out_gt3[-1]], axis=-1)
# d_out_pred = tf.concat([d_out_pred0[-1], d_out_pred1[-1], d_out_pred2[-1], d_out_pred3[-1]], axis=-1)
return d_out_gt[-1], d_out_pred[-1]
@staticmethod
def _build_loss(vol_pred, vol_gt,
mask_fv_gt, mask_sv_gt,
normal_hd_gt, normal_hd_pred,
dis_real, dis_fake,
lamb_sil=0.1, lamb_nml_rf=0.01, lamb_dis=0.001,
w=0.7):
log('Constructing loss function...')
s = 1000 # to scale the loss
shp = mask_fv_gt.get_shape().as_list()
with tf.name_scope('loss'):
# volume loss
vol_loss = s * tf.reduce_mean(-w * tf.reduce_mean(vol_gt * tf.log(vol_pred + 1e-8))
- (1 - w) * tf.reduce_mean((1 - vol_gt) * tf.log(1 - vol_pred + 1e-8)))
# silhouette loss
mask_fv_pred, mask_sv_pred = Trainer._build_sil_projector(vol_pred)
#mask_fv_gt_p, mask_sv_gt_p = Trainer._build_sil_projector(vol_gt)
mask_fv_gt_rs = tf.image.resize_bilinear(mask_fv_gt, (shp[1]//2, shp[2]//2))
mask_sv_gt_rs = tf.image.resize_bilinear(mask_sv_gt, (shp[1]//2, shp[2]//2))
sil_loss_fv = s * tf.reduce_mean(-tf.reduce_mean(mask_fv_gt_rs * tf.log(mask_fv_pred + 1e-8))
-tf.reduce_mean((1-mask_fv_gt_rs) * tf.log(1 - mask_fv_pred + 1e-8)))
sil_loss_sv = s * tf.reduce_mean(-tf.reduce_mean(mask_sv_gt_rs * tf.log(mask_sv_pred + 1e-8))
-tf.reduce_mean((1-mask_sv_gt_rs) * tf.log(1 - mask_sv_pred + 1e-8)))
sil_loss = sil_loss_fv + sil_loss_sv
# normal refinement loss
normal_loss = 0
for i in range(4):
normal_hd_gt_ = normal_hd_gt[:, :, :, (i*3):(i*3+3)]
normal_hd_pred_ = normal_hd_pred[:, :, :, (i*3):(i*3+3)]
normal_cos = 1 - tf.reduce_sum(normal_hd_gt_*normal_hd_pred_, axis=-1, keepdims=True) \
/ (tf.norm(normal_hd_gt_, axis=-1, keepdims=True)*tf.norm(normal_hd_pred_, axis=-1, keepdims=True))
# mask out invalid areas
if i % 2 == 0:
normal_loss += s * tf.reduce_mean(mask_fv_gt*normal_cos)
normal_loss += s * 0.001 * tf.reduce_mean(mask_fv_gt*tf.square(normal_hd_pred_-normal_hd_gt_))
else:
normal_loss += s * tf.reduce_mean(mask_sv_gt*normal_cos)
normal_loss += s * 0.001 * tf.reduce_mean(mask_sv_gt*tf.square(normal_hd_pred_-normal_hd_gt_))
# normal discriminator loss
dis_d_real_loss = s * tf.reduce_mean(tf.square(dis_fake))
dis_d_fake_loss = s * tf.reduce_mean(tf.square(1-dis_real))
dis_d_loss = dis_d_real_loss + dis_d_fake_loss
dis_g_loss = s * tf.reduce_mean(tf.square(1-dis_fake))
# total loss
recon_loss = vol_loss + lamb_sil * sil_loss # reconstruction loss
nr_loss = lamb_nml_rf * normal_loss + lamb_dis * dis_g_loss # normal refinement loss
total_loss = recon_loss + nr_loss # total loss
loss_collection = {}
loss_collection['vol_loss'] = vol_loss
loss_collection['sil_loss'] = sil_loss
loss_collection['normal_loss'] = normal_loss
loss_collection['dis_d_real_loss'] = dis_d_real_loss
loss_collection['dis_d_fake_loss'] = dis_d_fake_loss
loss_collection['dis_d_loss'] = dis_d_loss
loss_collection['dis_g_loss'] = dis_g_loss
loss_collection['recon_loss'] = recon_loss
loss_collection['nr_loss'] = nr_loss
loss_collection['total_loss'] = total_loss
return loss_collection
@staticmethod
def _build_optimizer(lr, recon_loss, nr_loss, total_loss, dis_loss):
log('Constructing optimizer...')
recon_var_list = [var for var in tf.trainable_variables() if not var.name.startswith('nml_rf') and not var.name.startswith('nml_dis')]
nr_var_list = [var for var in tf.trainable_variables() if var.name.startswith('nml_rf') and not var.name.startswith('nml_dis')]
all_var_list = [var for var in tf.trainable_variables() if not var.name.startswith('nml_dis')]
dis_var_list = [var for var in tf.trainable_variables() if var.name.startswith('nml_dis')]
with tf.name_scope('recon_optimizer'):
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
recon_opt = tf.train.AdamOptimizer(learning_rate=lr).minimize(recon_loss, var_list=recon_var_list)
with tf.name_scope('nml_rf_optimizer'):
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
dr_opt = tf.train.AdamOptimizer(learning_rate=lr).minimize(nr_loss, var_list=nr_var_list)
with tf.name_scope('all_optimizer'):
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
all_opt = tf.train.AdamOptimizer(learning_rate=lr).minimize(total_loss, var_list=all_var_list)
with tf.name_scope('dis_optimizer'):
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
dis_opt = tf.train.AdamOptimizer(learning_rate=lr).minimize(dis_loss, var_list=dis_var_list)
return recon_opt, dr_opt, all_opt, dis_opt
@staticmethod
def _setup_summary(sess, graph_dir, loss_collection):
loss_scalar_s = []
for lk in loss_collection:
loss_s = tf.summary.scalar('loss/%s' % (lk), loss_collection[lk])
loss_scalar_s.append(loss_s)
merged_scalar_loss = tf.summary.merge([loss_s for loss_s in loss_scalar_s])
writer = tf.summary.FileWriter(graph_dir, sess.graph)
return merged_scalar_loss, writer
def _setup_saver(self, pre_model_dir):
# load pre-trained model to fine-tune or resume training
log('Constructing saver...')
if pre_model_dir is not None:
ckpt_prev = tf.train.get_checkpoint_state(pre_model_dir)
if ckpt_prev:
saver = tf.train.Saver(var_list=[var for var in tf.trainable_variables()])
saver.restore(self.sess, ckpt_prev.model_checkpoint_path)
logger.write('Loaded model %s' % pre_model_dir)
else:
logger.write('Unable to load the pretrained model. ')
saver = tf.train.Saver(max_to_keep=1000)
return saver
@staticmethod
def _save_tuple(conc_imgs, smpl_v_volumes, mesh_volumes, dir, idx):
batch_sz = conc_imgs.shape[0]
for bi in range(batch_sz):
cv.imwrite('%s/color_%d.png' % (dir, batch_sz * idx + bi), cv.cvtColor(np.uint8(conc_imgs[bi, :, :, 0:3] * 255), cv.COLOR_BGRA2RGB))
cv.imwrite('%s/vmap_%d.png' % (dir, batch_sz * idx + bi), np.uint8(conc_imgs[bi, :, :, 3:6] * 255))
cv.imwrite('%s/mask_%d.png' % (dir, batch_sz * idx + bi), np.uint8(conc_imgs[bi, :, :, 6] * 255))
cv.imwrite('%s/normal_%d.png' % (dir, batch_sz * idx + bi), np.uint16(conc_imgs[bi, :, :, 10:13] * 32767.5 + 32767.5))
@staticmethod
def _save_results_raw_training(mesh_volume, refined_normal, orig_normal, test_dir, idx):
batch_sz = mesh_volume.shape[0]
for bi in range(batch_sz):
sio.savemat('%s/mesh_volume_%d.obj' % (test_dir, batch_sz*idx+bi),
{'mesh_volume': mesh_volume[bi, :, :, :, 0]}, do_compression=False)
for bi in range(batch_sz):
for vi in range(4):
refined_normal_ = refined_normal[bi, :, :, (3*vi):(3*vi+3)]
refined_normal_l = np.sqrt(refined_normal_[:, :, 0] * refined_normal_[:, :, 0]+
refined_normal_[:, :, 1] * refined_normal_[:, :, 1] +
refined_normal_[:, :, 2] * refined_normal_[:, :, 2])
refined_normal_ /= np.expand_dims(refined_normal_l, axis=-1)
refined_normal[bi, :, :, (3 * vi):(3 * vi + 3)] = refined_normal_
original_normal_ = orig_normal[bi, :, :, (3*vi):(3*vi+3)]
original_normal_l = np.sqrt(original_normal_[:, :, 0] * original_normal_[:, :, 0] +
original_normal_[:, :, 1] * original_normal_[:, :, 1] +
original_normal_[:, :, 2] * original_normal_[:, :, 2])
original_normal_ /= np.expand_dims(original_normal_l, axis=-1)
orig_normal[bi, :, :, (3 * vi):(3 * vi + 3)] = original_normal_
cv.imwrite('%s/normal_0_%d.png' % (test_dir, batch_sz * idx + bi), np.uint16(refined_normal[bi, :, :, 0:3] * 32767.5 + 32767.5))
cv.imwrite('%s/normal_1_%d.png' % (test_dir, batch_sz * idx + bi), np.uint16(refined_normal[bi, :, :, 3:6] * 32767.5 + 32767.5))
cv.imwrite('%s/normal_2_%d.png' % (test_dir, batch_sz * idx + bi), np.uint16(refined_normal[bi, :, :, 6:9] * 32767.5 + 32767.5))
cv.imwrite('%s/normal_3_%d.png' % (test_dir, batch_sz * idx + bi), np.uint16(refined_normal[bi, :, :, 9:12] * 32767.5 + 32767.5))
cv.imwrite('%s/normal_0_%d_.png' % (test_dir, batch_sz * idx + bi), np.uint16(orig_normal[bi, :, :, 0:3] * 32767.5 + 32767.5))
cv.imwrite('%s/normal_1_%d_.png' % (test_dir, batch_sz * idx + bi), np.uint16(orig_normal[bi, :, :, 3:6] * 32767.5 + 32767.5))
cv.imwrite('%s/normal_2_%d_.png' % (test_dir, batch_sz * idx + bi), np.uint16(orig_normal[bi, :, :, 6:9] * 32767.5 + 32767.5))
cv.imwrite('%s/normal_3_%d_.png' % (test_dir, batch_sz * idx + bi), np.uint16(orig_normal[bi, :, :, 9:12] * 32767.5 + 32767.5))
@staticmethod
def _save_results_raw_testing(mesh_volume, refined_normal, orig_normal, test_dir, prefix):
batch_sz = mesh_volume.shape[0]
assert batch_sz == 1 # only use for testing
# mesh_volume = np.squeeze(mesh_volume)
for bi in range(batch_sz):
sio.savemat('%s/%s_volume_out.mat' % (test_dir, prefix),
{'mesh_volume': mesh_volume[bi, :, :, :, 0]}, do_compression=False)
for bi in range(batch_sz):
for vi in range(4):
refined_normal_ = refined_normal[bi, :, :, (3*vi):(3*vi+3)]
refined_normal_l = np.sqrt(refined_normal_[:, :, 0] * refined_normal_[:, :, 0]+
refined_normal_[:, :, 1] * refined_normal_[:, :, 1] +
refined_normal_[:, :, 2] * refined_normal_[:, :, 2])
refined_normal_ /= np.expand_dims(refined_normal_l, axis=-1)
original_normal_ = orig_normal[bi, :, :, (3*vi):(3*vi+3)]
original_normal_l = np.sqrt(original_normal_[:, :, 0] * original_normal_[:, :, 0] +
original_normal_[:, :, 1] * original_normal_[:, :, 1] +
original_normal_[:, :, 2] * original_normal_[:, :, 2])
original_normal_ /= np.expand_dims(original_normal_l, axis=-1)
cv.imwrite('%s/%s_normal_%d.png' % (test_dir, prefix, vi), | np.uint16(refined_normal_ * 32767.5 + 32767.5) | numpy.uint16 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 4 12:32:53 2017
@author: wroscoe
"""
import os
import sys
import time
import json
import datetime
import random
import glob
import numpy as np
import pandas as pd
from PIL import Image
class Tub(object):
"""
A datastore to store sensor data in a key, value format.
Accepts str, int, float, image_array, image, and array data types.
For example:
#Create a tub to store speed values.
>>> path = '~/mydonkey/test_tub'
>>> inputs = ['user/speed', 'cam/image']
>>> types = ['float', 'image']
>>> t=Tub(path=path, inputs=inputs, types=types)
"""
def __init__(self, path, inputs=None, types=None, user_meta=[]):
self.path = os.path.expanduser(path)
#print('path_in_tub:', self.path)
self.meta_path = os.path.join(self.path, 'meta.json')
self.exclude_path = os.path.join(self.path, "exclude.json")
self.df = None
exists = os.path.exists(self.path)
if exists:
#load log and meta
#print("Tub exists: {}".format(self.path))
try:
with open(self.meta_path, 'r') as f:
self.meta = json.load(f)
except FileNotFoundError:
self.meta = {'inputs': [], 'types': []}
try:
with open(self.exclude_path,'r') as f:
excl = json.load(f) # stored as a list
self.exclude = set(excl)
except FileNotFoundError:
self.exclude = set()
try:
self.current_ix = self.get_last_ix() + 1
except ValueError:
self.current_ix = 0
if 'start' in self.meta:
self.start_time = self.meta['start']
else:
self.start_time = time.time()
self.meta['start'] = self.start_time
elif not exists and inputs:
print('Tub does NOT exist. Creating new tub...')
self.start_time = time.time()
#create log and save meta
os.makedirs(self.path)
self.meta = {'inputs': inputs, 'types': types, 'start': self.start_time}
for kv in user_meta:
kvs = kv.split(":")
if len(kvs) == 2:
self.meta[kvs[0]] = kvs[1]
# else exception? print message?
with open(self.meta_path, 'w') as f:
json.dump(self.meta, f)
self.current_ix = 0
self.exclude = set()
print('New tub created at: {}'.format(self.path))
else:
msg = "The tub path you provided doesn't exist and you didnt pass any meta info (inputs & types)" + \
"to create a new tub. Please check your tub path or provide meta info to create a new tub."
raise AttributeError(msg)
def get_last_ix(self):
index = self.get_index()
return max(index)
def update_df(self):
df = pd.DataFrame([self.get_json_record(i) for i in self.get_index(shuffled=False)])
self.df = df
def get_df(self):
if self.df is None:
self.update_df()
return self.df
def get_index(self, shuffled=True):
files = next(os.walk(self.path))[2]
record_files = [f for f in files if f[:6]=='record']
def get_file_ix(file_name):
try:
name = file_name.split('.')[0]
num = int(name.split('_')[1])
except:
num = 0
return num
nums = [get_file_ix(f) for f in record_files]
if shuffled:
random.shuffle(nums)
else:
nums = sorted(nums)
return nums
@property
def inputs(self):
return list(self.meta['inputs'])
@property
def types(self):
return list(self.meta['types'])
def get_input_type(self, key):
input_types = dict(zip(self.inputs, self.types))
return input_types.get(key)
def write_json_record(self, json_data):
path = self.get_json_record_path(self.current_ix)
try:
with open(path, 'w') as fp:
json.dump(json_data, fp)
#print('wrote record:', json_data)
except TypeError:
print('troubles with record:', json_data)
except FileNotFoundError:
raise
except:
print("Unexpected error:", sys.exc_info()[0])
raise
def get_num_records(self):
import glob
files = glob.glob(os.path.join(self.path, 'record_*.json'))
return len(files)
def make_record_paths_absolute(self, record_dict):
#make paths absolute
d = {}
for k, v in record_dict.items():
if type(v) == str: #filename
if '.' in v:
v = os.path.join(self.path, v)
d[k] = v
return d
def check(self, fix=False):
'''
Iterate over all records and make sure we can load them.
Optionally remove records that cause a problem.
'''
print('Checking tub:%s.' % self.path)
print('Found: %d records.' % self.get_num_records())
problems = False
for ix in self.get_index(shuffled=False):
try:
self.get_record(ix)
except:
problems = True
if fix == False:
print('problems with record:', self.path, ix)
else:
print('problems with record, removing:', self.path, ix)
self.remove_record(ix)
if not problems:
print("No problems found.")
def remove_record(self, ix):
'''
remove data associate with a record
'''
record = self.get_json_record_path(ix)
os.unlink(record)
def put_record(self, data):
"""
Save values like images that can't be saved in the csv log and
return a record with references to the saved values that can
be saved in a csv.
"""
json_data = {}
self.current_ix += 1
for key, val in data.items():
typ = self.get_input_type(key)
if (val is not None) and (typ == 'float'):
# in case val is a numpy.float32, which json doesn't like
json_data[key] = float(val)
elif typ in ['str', 'float', 'int', 'boolean', 'vector']:
json_data[key] = val
elif typ is 'image':
path = self.make_file_path(key)
val.save(path)
json_data[key]=path
elif typ == 'image_array':
img = Image.fromarray(np.uint8(val))
name = self.make_file_name(key, ext='.jpg')
img.save(os.path.join(self.path, name))
json_data[key]=name
else:
msg = 'Tub does not know what to do with this type {}'.format(typ)
raise TypeError(msg)
json_data['milliseconds'] = int((time.time() - self.start_time) * 1000)
self.write_json_record(json_data)
return self.current_ix
def erase_last_n_records(self, num_erase):
'''
erase N records from the disc and move current back accordingly
'''
last_erase = self.current_ix
first_erase = last_erase - num_erase
self.current_ix = first_erase - 1
if self.current_ix < 0:
self.current_ix = 0
for i in range(first_erase, last_erase):
if i < 0:
continue
self.erase_record(i)
def erase_record(self, i):
json_path = self.get_json_record_path(i)
if os.path.exists(json_path):
os.unlink(json_path)
img_filename = '%d_cam-image_array_.jpg' % (i)
img_path = os.path.join(self.path, img_filename)
if os.path.exists(img_path):
os.unlink(img_path)
def get_json_record_path(self, ix):
return os.path.join(self.path, 'record_'+str(ix)+'.json')
def get_json_record(self, ix):
path = self.get_json_record_path(ix)
try:
with open(path, 'r') as fp:
json_data = json.load(fp)
except UnicodeDecodeError:
raise Exception('bad record: %d. You may want to run `python manage.py check --fix`' % ix)
except FileNotFoundError:
raise
except:
print("Unexpected error:", sys.exc_info()[0])
raise
record_dict = self.make_record_paths_absolute(json_data)
return record_dict
def get_record(self, ix):
json_data = self.get_json_record(ix)
data = self.read_record(json_data)
return data
def read_record(self, record_dict):
data={}
for key, val in record_dict.items():
typ = self.get_input_type(key)
#load objects that were saved as separate files
if typ == 'image_array':
img = Image.open((val))
val = | np.array(img) | numpy.array |
"""
Python API for CSR matrices.
"""
import warnings
import logging
import numpy as np
import scipy.sparse as sps
from numba import config
from numba.experimental import structref
from csr.kernels import get_kernel, releasing
from . import _struct, _rows
INTC = np.iinfo(np.intc)
_log = logging.getLogger(__name__)
# ugly hack for a bug on Numba < 0.53
if config.DISABLE_JIT:
class _csr_base:
def __init__(self, nrows, ncols, nnz, ptrs, inds, vals, _cast=True):
self.nrows = nrows
self.ncols = ncols
self.nnz = nnz
if _cast and np.max(ptrs, initial=0) <= INTC.max:
self.rowptrs = np.require(ptrs, np.intc, 'C')
else:
self.rowptrs = np.require(ptrs, requirements='C')
self.colinds = np.require(inds, np.intc, 'C')
if vals is not None:
self._values = np.require(vals, requirements='C')
else:
self._values = None
def _numba_box_(self, *args):
raise NotImplementedError()
NUMBA_ENABLED = False
else:
_csr_base = structref.StructRefProxy
NUMBA_ENABLED = True
class CSR(_csr_base):
"""
Simple compressed sparse row matrix. This is like :py:class:`scipy.sparse.csr_matrix`, with
a few useful differences:
* The value array is optional, for cases in which only the matrix structure is required.
* The value array, if present, is always double-precision.
* It is usable from code compiled in Numba's nopython mode.
You generally don't want to create this class yourself with the constructor. Instead, use one
of its class or static methods. If you do use the constructor, be advised that the class may
reuse the arrays that you pass, but does not guarantee that they will be used.
Not all methods are available from Numba, and a few have restricted signatures. The
documentation for each method notes deviations when in Numba-compiled code.
At the Numba level, matrices with and without value arrays have different types. For the
most part, this is transparent, but if you want to write a Numba function that works on
the values array but only if it is present, it requires writing two versions of the
function and using :py:func:`numba.extending.overload` to dispatch to the correct one.
There are several examples of doing this in the CSR source code. The method
:py:meth:`CSRType.has_values` lets you quickly see if a CSR type instance has
values or not.
Attributes:
nrows(int): the number of rows.
ncols(int): the number of columns.
nnz(int): the number of entries.
rowptrs(numpy.ndarray): the row pointers.
colinds(numpy.ndarray): the column indices.
values(numpy.ndarray or None): the values.
"""
def __new__(cls, nrows, ncols, nnz, rps, cis, vs, _cast=True):
assert nrows >= 0
assert nrows <= INTC.max
assert ncols >= 0
assert ncols <= INTC.max
assert nnz >= 0
nrows = np.intc(nrows)
ncols = np.intc(ncols)
if _cast:
cis = np.require(cis, np.intc, 'C')
if nnz <= INTC.max:
rps = np.require(rps, np.intc, 'C')
else:
rps = np.require(rps, np.int64, 'C')
if vs is not None:
vs = np.require(vs, requirements='C')
if NUMBA_ENABLED:
return _csr_base.__new__(cls, nrows, ncols, nnz, rps, cis, vs)
else:
return _csr_base.__new__(cls)
@classmethod
def empty(cls, nrows, ncols, row_nnzs=None, values=True):
"""
Create an uninitialized CSR matrix.
Args:
nrows(int): the number of rows.
ncols(int): the number of columns.
row_nnzs(array-like):
the number of nonzero entries for each row, or None for an empty matrix.
values(bool, str, or numpy.dtype):
whether it has values or only structure; can be a NumPy data type to
specify a type other than `f8`.
"""
from .constructors import create_empty
assert nrows >= 0
assert ncols >= 0
if row_nnzs is not None:
assert len(row_nnzs) == nrows
nnz = np.sum(row_nnzs, dtype=np.int64)
assert nnz >= 0
rp_dtype = np.intc if nnz <= INTC.max else np.int64
rps = np.zeros(nrows + 1, dtype=rp_dtype)
np.cumsum(row_nnzs, dtype=rp_dtype, out=rps[1:])
cis = np.zeros(nnz, dtype=np.int32)
if values is True:
vs = np.zeros(nnz)
elif values:
vs = np.zeros(nnz, dtype=values)
else:
vs = None
return cls(nrows, ncols, nnz, rps, cis, vs)
else:
return create_empty(nrows, ncols)
@classmethod
def from_coo(cls, rows, cols, vals, shape=None, *, rpdtype=np.intc):
"""
Create a CSR matrix from data in COO format.
Args:
rows(array-like): the row indices.
cols(array-like): the column indices.
vals(array-like): the data values; can be ``None``.
shape(tuple): the array shape, or ``None`` to infer from row & column indices.
"""
from .structure import from_coo
if shape is not None:
nrows, ncols = shape
assert np.max(rows, initial=0) < nrows
assert np.max(cols, initial=0) < ncols
else:
nrows = np.max(rows) + 1
ncols = np.max(cols) + 1
nnz = len(rows)
assert len(cols) == nnz
assert vals is None or len(vals) == nnz
rowptrs, cols, vals = from_coo(nrows, rows, cols, vals)
return cls(nrows, ncols, nnz, rowptrs, cols, vals)
@classmethod
def from_scipy(cls, mat, copy=True):
"""
Convert a scipy sparse matrix to a CSR.
Args:
mat(scipy.sparse.spmatrix): a SciPy sparse matrix.
copy(bool): if ``False``, reuse the SciPy storage if possible.
Returns:
CSR: a CSR matrix.
"""
if not sps.isspmatrix_csr(mat):
mat = mat.tocsr(copy=copy)
rp = np.require(mat.indptr, np.intc, 'C')
if copy and rp is mat.indptr:
rp = rp.copy()
cs = | np.require(mat.indices, np.intc, 'C') | numpy.require |
# AUTOGENERATED! DO NOT EDIT! File to edit: dev/01_dataset_ucf101.ipynb (unless otherwise specified).
__all__ = ['UCF101', 'SingleFrameDataset', 'BatchShower', 'SequenceDataset', 'SequenceBatchShower']
# Cell
import numpy as np
import pathlib
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
from torchvision import utils
import torchvision.transforms as transforms
import torch
from .avi import AVI
# Cell
class UCF101:
def __init__(self, base_directory=''):
"""
Args:
base_directory: main data folder (e.g. ../data/UCF101)
"""
self.base_directory = pathlib.Path(base_directory)
def getFileList(self, data_type='train', remove_classname = False):
"""
This function uses a text file provided with the dataset
which lists all of the relative paths for the videos for the train/test split.
Args:
data_type: 'train' | 'test'
remove_classname: if True does not include the class name in the filenames.
Returns:
X is a tuple.
The first element is a numpy array with the absolute
filepaths for the videos for training.
The second element is a numpy array of class indices (0-100).
class_names is a list of the action categories.
"""
base_directory = self.base_directory
#print(f'[getFileList] Reading data from: {base_directory}')
# action class labels
class_file = open(base_directory/'annotations/ucfTrainTestlist/classInd.txt','r')
lines = class_file.readlines()
lines = [line.split(' ')[1].strip() for line in lines]
class_file.close()
class_names = np.asarray(lines)
if data_type == 'train':
# training data
train_file = open(base_directory/'annotations/ucfTrainTestlist/trainlist01.txt','r')
lines = train_file.readlines()
if remove_classname:
filenames = ['/UCF-101/' + line.split(' ')[0].split('/')[1] for line in lines]
else:
filenames = ['/UCF-101/' + line.split(' ')[0] for line in lines]
y_train = [int(line.split(' ')[1].strip())-1 for line in lines]
y_train = np.asarray(y_train)
filenames = [base_directory.as_posix() + filename for filename in filenames]
train_file.close()
train = (np.asarray(filenames),y_train)
X = train
print('Number of training files:', len(X[0]))
else:
# testing data
test_file = open(base_directory/'annotations/ucfTrainTestlist/testlist01.txt','r')
lines = test_file.readlines()
filenames = ['/UCF-101/' + line.split(' ')[0].strip() for line in lines]
classnames = [filename.split('/')[2] for filename in filenames]
if remove_classname:
# remove the class name from the filename if needed.
filenames = ['/UCF-101/' + line.split(' ')[0].split('/')[1].strip() for line in lines]
y_test = [np.where(classname == class_names)[0][0] for classname in classnames]
y_test = np.asarray(y_test)
filenames = [base_directory.as_posix() + filename for filename in filenames]
test_file.close()
test = (np.asarray(filenames),y_test)
X = test
print('Number of validation files:', len(X[0]))
#print('[getFileList] Done.')
return X, class_names
def downloadData(self):
"""
Downloads all zip files of the UCF101 dataset.
"""
target_dir = self.base_directory
print(f'[downloadData] 1/2 Beginning file download to {target_dir}')
compressed_dir = pathlib.Path(target_dir + '/compressed')
compressed_dir.mkdir(parents=True, exist_ok=True)
annotations_dir = pathlib.Path(target_dir + '/annotations')
annotations_dir.mkdir(parents=True, exist_ok=True)
destination_dir = pathlib.Path(target_dir + '/UCF-101')
destination_dir.mkdir(parents=True, exist_ok=True)
# download annotations for action recognition
if pathlib.Path(compressed_dir/'UCF101TrainTestSplits-RecognitionTask.zip').exists():
print ("[downloadData]File UCF101TrainTestSplits-RecognitionTask.zip exists.")
else:
annotation_url = 'https://www.crcv.ucf.edu/data/UCF101/UCF101TrainTestSplits-RecognitionTask.zip'
filename = wget.download(annotation_url, out=compressed_dir.as_posix(), bar=wget.bar_adaptive)
print(f'[downloadData]File downloaded to {filename}')
if pathlib.Path(compressed_dir/'UCF101TrainTestSplits-DetectionTask.zip').exists():
print ("[downloadData]File UCF101TrainTestSplits-DetectionTask.zip exists.")
else:
# download annotations for action detection
annotation_url = 'https://www.crcv.ucf.edu/data/UCF101/UCF101TrainTestSplits-DetectionTask.zip'
filename =wget.download(annotation_url, out=compressed_dir.as_posix(), bar=wget.bar_adaptive)
print(f'[downloadData]File downloaded to {filename}')
# download videos
if pathlib.Path(compressed_dir/'UCF101.rar').exists():
print ("[downloadData]File UCF101.rar exists.")
else:
video_url = 'https://www.crcv.ucf.edu/data/UCF101/UCF101.rar'
filename =wget.download(video_url, out=compressed_dir.as_posix(), bar=wget.bar_adaptive)
print(f'[downloadData]File downloaded to {filename}')
print('[downloadData] Done.\n')
def extractData(self):
"""
Extracts all zip files of the UCF101 dataset.
It does system calls and it needs unrar (apt-get install unrar-free)
"""
target_dir = self.base_directory
print('[extractData] Extracting data...')
target_dir = pathlib.Path(target_dir)
compressed_dir = pathlib.Path(target_dir/'compressed')
compressed_dir.mkdir(parents=True, exist_ok=True)
annotations_dir = pathlib.Path(target_dir/'annotations')
annotations_dir.mkdir(parents=True, exist_ok=True)
destination_dir = pathlib.Path(target_dir/'UCF-101')
destination_dir.mkdir(parents=True, exist_ok=True)
try:
bash_cmd = 'unrar ' + target_dir.as_posix() + '/UCF101.rar' + ' ' + target_dir.as_posix() + '/UCF-101'
print(bash_cmd)
process = subprocess.Popen(bash_cmd.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
print(output)
except Exception as e:
print(e)
print()
bash_cmd = 'cp ' + target_dir.as_posix() + '/compressed/UCF101TrainTestSplits-RecognitionTask.zip ' + annotations_dir.as_posix() + '/UCF101TrainTestSplits-RecognitionTask.zip'
print(bash_cmd)
process = subprocess.Popen(bash_cmd.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
if len(output) > 0: print(output)
if len(error) > 0: print(error)
print()
bash_cmd = 'unzip ' + annotations_dir .as_posix() + '/UCF101TrainTestSplits-RecognitionTask.zip -d ' + annotations_dir.as_posix()
print(bash_cmd)
process = subprocess.Popen(bash_cmd.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
if len(output) > 0: print(output)
if len(error) > 0: print(error)
print()
bash_cmd = 'cp ' + target_dir.as_posix() + '/compressed/UCF101TrainTestSplits-DetectionTask.zip ' + annotations_dir.as_posix() + '/UCF101TrainTestSplits-DetectionTask.zip'
print(bash_cmd)
process = subprocess.Popen(bash_cmd.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
if len(output) > 0: print(output)
if len(error) > 0: print(error)
print()
bash_cmd = 'unzip ' + annotations_dir.as_posix() + '/UCF101TrainTestSplits-DetectionTask.zip -d ' + annotations_dir.as_posix()
print(bash_cmd)
process = subprocess.Popen(bash_cmd.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
if len(output) > 0: print(output)
if len(error) > 0: print(error)
print()
bash_cmd = 'rm ' + target_dir.as_posix() + '/annotations/*.zip'
print(bash_cmd)
process = subprocess.Popen(bash_cmd.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
if len(output) > 0: print(output)
if len(error) > 0: print(error)
print()
print('[extractData] Done.')
# Cell
class SingleFrameDataset(Dataset):
"""Single frame dataset for the UCF101."""
def __init__(self, dataset_path, training=True, transform=None):
"""
Args:
file_list: list of files as a numpy array.
labels: one entry per filename in the list of files as a numpy array.
train: flag to say whether train or test dataset is used.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.training = True
ucf = UCF101(dataset_path)
X, class_names = ucf.getFileList(data_type='train' if training else 'test')
self.file_list, self.labels = X[0], X[1]
self.class_names = class_names
self.num_classes = len(self.class_names)
self.transform = transform
def getClassName(self, idx):
return self.class_names[idx]
def __len__(self):
return len(self.file_list)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
video_list = self.file_list[idx]
#video_list = self.file_list[idx % len(self)] # wraps up if an out of index idx is used.
avi = AVI(video_list)
frame = avi.getRandomFrame()
label = self.labels[idx]
label = np.array([label])
if self.transform:
# frame = frame.transpose(2,0,1)
frame = self.transform(frame)
return frame, label
# Cell
class BatchShower:
def __init__(self, dl):
self.dl = dl
def showBatch(self, idx, scale=1):
"""Loops through the dataloader and shows only one batch (idx)"""
assert idx >= 0 and idx <= np.floor(len(self.dl.dataset)/self.dl.batch_size), "selected batch index out of batch size range: [0, %d]" % np.floor(len(self.dl.dataset)/self.dl.batch_size)
for i_batch, sample_batched in enumerate(self.dl):
# print(i_batch, sample_batched[0].size())
# observe the idx-th batch and stop
if i_batch == idx:
plt.figure(figsize=(10,10))
image, label = sample_batched[0], sample_batched[1]
class_name = self.dl.dataset.getClassName(sample_batched[1])
self.showThisBatch(image, label, scale)
print(class_name.tolist())
plt.axis('off')
plt.ioff()
plt.show()
break
def showThisBatch(self, images_batch, labels_batch, scale=1):
"""Show image for a batch of samples.
Must be tensors of size (bs x w x h x channels).
"""
batch_size = len(images_batch)
im_size = images_batch.size()
ncols = int(np.ceil(np.sqrt(batch_size)))
for i in range(batch_size):
ax = plt.subplot(ncols, ncols, i+1)
if type(images_batch[i]) == torch.Tensor:
frame = images_batch[i].data.numpy()
frame = frame/255.
if frame.shape[0] <= 3:
frame = frame.transpose(1, 2, 0)
frame_v_mean = np.mean(frame)
frame = scale*frame
frame[frame<0] = 0
if np.mean(frame) < 2:
frame[frame>1] = 1
else:
frame[frame>255] = 255
plt.imshow(frame)
# plt.tight_layout()
ax.axis('off')
# Cell
class SequenceDataset(Dataset):
"""Sequence based dataset for the UCF101.
Output is of shape:
seq_len, H, W, C
Note that when this is passed onto a DataLoader with toTensor() transform, it changes its shape to:
batch_size, seq_length, C, H, W
"""
def __init__(self, dataset_path, sequence_length, sample_interval=1, training=True, transform=None):
"""
Args:
file_list: list of files as a numpy array.
labels: one entry per filename in the list of files as a numpy array.
train: flag to say whether train or test dataset is used.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.training = True
self.sequence_length = sequence_length
self.sample_interval = sample_interval
ucf = UCF101(dataset_path)
X, class_names = ucf.getFileList(data_type='train' if training else 'test')
self.file_list, self.labels = X[0], X[1]
self.class_names = class_names
self.num_classes = len(self.class_names)
self.transform = transform
def getClassName(self, idx):
return self.class_names[idx]
def __len__(self):
return len(self.file_list)
def __getitem__(self, idx):
"""
returns:
- sequence: list of frames of length self.sequence_length
"""
if torch.is_tensor(idx):
idx = idx.tolist()
#video_list = self.file_list[idx]
video_list = self.file_list[idx % len(self)] # wraps up if an out of index idx is used.
avi = AVI(video_list, verbose=False) # set verbose to True might help debugging.
frames = avi.getRandomSequence(self.sequence_length, self.sample_interval)
# frames is a numpy matrix of shape seq_len, H, W, C
label = self.labels[idx]
label = np.array([label])
# Extract frames as tensors
image_sequence = []
#image_sequence = torch.stack(image_sequence)
# frame = frame.transpose(2,0,1)
if self.transform:
sequence = None
for i, frame in enumerate(frames):
frame = self.transform(frame) # here frame might be a tensor
if sequence is None:
# we still need to allocate it.
sequence = np.zeros(np.array(np.append( | np.array([self.sequence_length]) | numpy.array |
"""
pgeometry
---------
A collection of usefull functions.
For additional options also see
`numpy <http://numpy.scipy.org/>`_ and `matplotlib <http://matplotlib.sourceforge.net/>`_.
:platform: Unix, Windows
Additions:
Copyright 2012-2016 TNO
Original code:
Copyright 2011 <NAME> <<EMAIL>>
@author: eendebakpt
"""
# %% Load necessary packages
import copy
import logging
import math
import os
import pickle
import pkgutil
import re
import subprocess
import sys
import tempfile
import time
import warnings
from functools import wraps
from typing import List, Optional, Union
import numpy
import numpy as np
import scipy.io
import scipy.ndimage.filters as filters
import scipy.ndimage.morphology as morphology
import shapely.geometry
__version__ = '0.7.0'
# %% Load qt functionality
def qtModules(verbose=0):
""" Return list of Qt modules loaded """
_ll = sys.modules.keys()
qq = [x for x in _ll if x.startswith('Py')]
if verbose:
print('qt modules: %s' % str(qq))
return qq
try:
_applocalqt = None
try:
# by default use qtpy to import Qt
import qtpy
_haveqtpy = True
import qtpy.QtCore as QtCore
import qtpy.QtGui as QtGui
import qtpy.QtWidgets as QtWidgets
from qtpy.QtCore import QObject, Signal, Slot
except ImportError:
_haveqtpy = False
warnings.warn('could not import qtpy, not all functionality available')
pass
_ll = sys.modules.keys()
_pyside = len([_x for _x in _ll if _x.startswith('PySide.QtGui')]) > 0
_pyqt4 = len([_x for _x in _ll if _x.startswith('PyQt4.QtGui')]) > 0
_pyqt5 = len([_x for _x in _ll if _x.startswith('PyQt5.QtGui')]) > 0
def slotTest(txt):
""" Helper function for Qt slots """
class slotObject(QtCore.QObject):
def __init__(self, txt):
QObject.__init__(self)
self.txt = txt
@Slot()
def slot(self, v=None):
if v is None:
print('slotTest: %s' % self.txt)
else:
print('slotTest: %s: %s' % (self.txt, str(v)))
s = slotObject(txt)
return s.slot
class signalTest(QObject):
""" Helper function for Qt signals """
s = Signal()
def __init__(self):
QObject.__init__(self)
def go(self):
self.s.emit()
except Exception as ex:
logging.info('pgeometry: load qt: %s' % ex)
print(ex)
print('pgeometry: no Qt found')
# %% Load other modules
try:
import pylab
import pylab as p
except Exception as inst:
print(inst)
print('could not import pylab, not all functionality available...')
pass
try:
import matplotlib
import matplotlib.pyplot as plt
# needed for 3d plot points, do not remove!
try:
from mpl_toolkits.mplot3d import Axes3D
except BaseException:
pass
except ModuleNotFoundError as ex:
warnings.warn(
'could not find matplotlib, not all functionality available...')
plt = None
pass
try:
import skimage.filters
except ModuleNotFoundError as ex:
warnings.warn(
'could not find skimage.filters, not all functionality is available')
pass
try:
import cv2
_haveOpenCV = True
except (ModuleNotFoundError, ImportError):
_haveOpenCV = False
warnings.warn('could not find or load OpenCV, not all functionality is available')
pass
# %% Utils
try:
import resource
def memUsage():
""" Prints the memory usage in MB
Uses the resource module
"""
# http://chase-seibert.github.io/blog/2013/08/03/diagnosing-memory-leaks-python.html
print('Memory usage: %s (mb)' %
((resource.getrusage(resource.RUSAGE_SELF).ru_maxrss) / 1024., ))
except BaseException:
def memUsage():
print('Memory usage: ? (mb)')
def memory():
""" Return the memory usage in MB
Returns:
float: memory usage in mb
"""
import os
import psutil
process = psutil.Process(os.getpid())
mem = process.memory_info().rss / (1024. * 1024.)
return mem
def list_objects(objectype=None, objectclassname='__123', verbose=1):
""" List all objects in memory of a specific type or with a specific class name
Args:
objectype (None or class)
objectclassname (str)
Returns:
ll (list): list of objects found
"""
import gc
ll = []
for ii, obj in enumerate(gc.get_objects()):
if ii > 1000000:
break
valid = False
if hasattr(obj, '__class__'):
valid = getattr(obj.__class__, '__name__', 'none').startswith(objectclassname)
if objectype is not None and not valid:
if isinstance(obj, objectype):
valid = True
if valid:
if verbose:
print('list_objects: object %s' % (obj, ))
ll.append(obj)
return ll
def package_versions(verbose=1):
""" Report package versions installed """
print('numpy.__version__ %s' % numpy.__version__)
print('scipy.__version__ %s' % scipy.__version__)
print('matplotlib.__version__ %s' % matplotlib.__version__)
try:
import cv2
print('cv2.__version__ %s' % cv2.__version__)
except BaseException:
pass
try:
import qtpy
import qtpy.QtCore
print('qtpy.API_NAME %s' % (qtpy.API_NAME))
print('qtpy.QtCore %s' % (qtpy.QtCore))
print('qtpy.QtCore.__version__ %s' % (qtpy.QtCore.__version__))
except BaseException:
pass
try:
import sip
print('sip %s' % sip.SIP_VERSION_STR)
except BaseException:
pass
def freezeclass(cls):
""" Decorator to freeze a class
This means that no attributes can be added to the class after instantiation.
"""
cls.__frozen = False
def frozensetattr(self, key, value):
if self.__frozen and not hasattr(self, key):
print("Class {} is frozen. Cannot set {} = {}"
.format(cls.__name__, key, value))
else:
object.__setattr__(self, key, value)
def init_decorator(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
func(self, *args, **kwargs)
self.__frozen = True
return wrapper
cls.__setattr__ = frozensetattr
cls.__init__ = init_decorator(cls.__init__)
return cls
def static_var(varname, value):
""" Helper function to create a static variable
Args:
varname (str)
value (anything)
"""
def decorate(func):
setattr(func, varname, value)
return func
return decorate
@static_var("time", {'default': 0})
def tprint(string, dt=1, output=False, tag='default'):
""" Print progress of a loop every dt seconds
Args:
string (str): text to print
dt (float): delta time in seconds
output (bool): if True return whether output was printed or not
tag (str): optional tag for time
Returns:
output (bool)
"""
if (time.time() - tprint.time.get(tag, 0)) > dt:
print(string)
tprint.time[tag] = time.time()
if output:
return True
else:
return
else:
if output:
return False
else:
return
def partiala(method, **kwargs):
""" Function to perform functools.partial on named arguments """
raise Exception('Use functools.partial instead')
def t(x):
return method(x, **kwargs)
return t
def setFontSizes(labelsize=20, fsize=17, titlesize=None, ax=None,):
""" Update font sizes for a matplotlib plot """
if ax is None:
ax = plt.gca()
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(fsize)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(fsize)
for x in [ax.xaxis.label, ax.yaxis.label]: # ax.title,
x.set_fontsize(labelsize)
plt.tick_params(axis='both', which='major', labelsize=fsize)
if titlesize is not None:
ax.title.set_fontsize(titlesize)
plt.draw()
def plotCostFunction(fun, x0, fig=None, marker='.', scale=1, c=None):
""" Plot a cost function on specified data points
Example with variation of Booth's function:
>>> fun = lambda x: 2*(x[0]+2*x[1]-7)**2 + (2*x[0]+x[1]-5)**2
>>> plotCostFunction(fun, np.array([1,3]), fig=100, marker='-')
"""
x0 = np.array(x0).astype(float)
nn = x0.size
if fig is not None:
plt.figure(fig)
scale = np.array(scale)
if scale.size == 1:
scale = scale * np.ones(x0.size)
tt = np.arange(-1, 1, 5e-2)
for ii in range(nn):
val = np.zeros(tt.size)
for jj in range(tt.size):
x = x0.copy()
x[ii] += scale[ii] * tt[jj]
val[jj] = fun(x)
if c is None:
plt.plot(tt, val, marker)
else:
plt.plot(tt, val, marker, color=c[ii])
plt.xlabel('Scaled variables')
plt.ylabel('Value of cost function')
class fps_t:
def __init__(self, nn: int = 40):
""" Class for framerate measurements
Args:
nn: number of time measurements to store
Example usage:
>>> fps = fps_t(nn=8)
>>> for kk in range(12):
... fps.addtime(.2*kk)
>>> fps.show()
framerate: 5.000
"""
self.n = nn
self.tt = np.zeros(self.n)
self.x = np.zeros(self.n)
self.ii = 0
def __repr__(self):
ss = 'fps_t: buffer size %d, framerate %.3f [fps]' % (
self.n, self.framerate())
return ss
def addtime(self, t: Optional[float] = None, x: float = 0):
""" Add a timestamp to the object
Args:
t: Timestamp. If None, use `time.perf_counter`
x: Optional value to store with the timestamp
"""
if t is None:
t = time.perf_counter()
self.ii = self.ii + 1
iim = self.ii % self.n
self.tt[iim] = t
self.x[iim] = x
def value(self) -> float:
""" Return mean of current values """
return self.x.mean()
def iim(self) -> float:
""" Return index modulo number of elements """
return self.ii % self.n
def framerate(self) -> float:
""" Return the current framerate """
iim = self.ii % self.n
iimn = (self.ii + 1) % self.n
dt = self.tt[iim] - self.tt[iimn]
if dt == 0:
return np.NaN
fps = float(self.n - 1) / dt
return fps
def loop(self, s: str = ''):
""" Helper function """
self.addtime(time.time())
self.showloop(s='')
def showloop(self, dt: float = 2, s: str = ''):
""" Print current framerate in a loop
The print statement is only executed once every `dt` seconds
"""
fps = self.framerate()
if len(s) == 0:
tprint('loop %d: framerate: %.1f [fps]' % (self.ii, fps), dt=dt)
else:
tprint(
'%s: loop %d: framerate: %.1f [fps]' % (s, self.ii, fps), dt=dt)
def show(self):
""" Print the current framerate """
fps = self.framerate()
print('framerate: %.3f' % fps)
def mkdirc(d):
""" Similar to mkdir, but no warnings if the directory already exists """
try:
os.mkdir(d)
except BaseException:
pass
return d
def projectiveTransformation(H, x):
""" Apply a projective transformation to a kxN array
>>> y = projectiveTransformation( np.eye(3), np.random.rand( 2, 10 ))
"""
k = x.shape[0]
kout = H.shape[0] - 1
xx = x.transpose().reshape((-1, 1, k))
if (xx.dtype is np.integer or xx.dtype == 'int64'):
xx = xx.astype(np.float32)
if xx.size > 0:
ww = cv2.perspectiveTransform(xx, H)
ww = ww.reshape((-1, kout)).transpose()
return ww
else:
return copy.copy(x)
def rottra2mat(rot, tra):
""" create 4x4 matrix from 3x3 rot and 1x3 tra """
out = np.eye(4)
out[0:3, 0:3] = rot
out[0:3, 3] = tra.transpose()
return out
def breakLoop(wk=None, dt=0.001, verbose=0):
""" Break a loop using OpenCV image feedback """
if wk is None:
wk = cv2.waitKey(1)
time.sleep(dt)
wkm = wk % 256
if wkm == 27 or wkm == ord('q') or wk == 1048689:
if verbose:
print('breakLoop: key q pressed, quitting loop')
return True
return False
def hom(x):
""" Create affine to homogeneous coordinates
Args:
x (kxN array): affine coordinates
Returns:
h ( (k+1xN) array): homogeneous coordinates
"""
nx = x.shape[1]
return np.vstack((x, np.ones(nx)))
def dehom(x):
""" Convert homogeneous points to affine coordinates """
return x[0:-1, :] / x[-1, :]
def null(a, rtol=1e-5):
""" Calculate null space of a matrix """
u, s, v = np.linalg.svd(a)
rank = (s > rtol * s[0]).sum()
return rank, v[rank:].T.copy()
def intersect2lines(l1, l2):
""" Calculate intersection between 2 lines
Args:
l1 (array): first line in homogeneous format
l2 (array): first line in homogeneous format
Returns:
array: intersection in homogeneous format. To convert to affine coordinates use `dehom`
"""
r = null(np.vstack((l1, l2)))
return r[1]
def runcmd(cmd, verbose=0):
""" Run command and return output """
output = subprocess.check_output(cmd, shell=True)
return output
# %% Geometry functions
def angleDiff(x, y):
""" Return difference between two angles in radians modulo 2* pi
>>> d=angleDiff( 0.01, np.pi+0.02)
>>> d=angleDiff( 0.01, 2*np.pi+0.02)
"""
return np.abs(((x - y + np.pi) % (2 * np.pi)) - np.pi)
def angleDiffOri(x, y):
""" Return difference between two angles in radians modulo pi
>>> d=angleDiff( 0.01, np.pi+0.02)
>>> d=angleDiff( 0.01, 2*np.pi+0.02)
"""
return np.abs(((x - y + np.pi / 2) % (np.pi)) - np.pi / 2)
def opencvpose2attpos(rvecs, tvecs):
tvec = np.array(tvecs).flatten()
rvec = np.array(rvecs).flatten()
R, tmp = cv2.Rodrigues(rvec)
att = RBE2euler(R)
pos = -R.transpose().dot(np.array(tvec.reshape((3, 1))))
return att, pos
def opencv2TX(rvecs, tvecs):
""" Convert OpenCV pose to homogenous transform """
T = np.array(np.eye(4))
R = cv2.Rodrigues(rvecs)[0]
T[0:3, 0:3] = R
T[0:3, 3:4] = tvecs
return T
def opencv2T(rvec, tvec):
""" Convert OpenCV pose to homogenous transform """
T = np.array(np.eye(4))
T[0:3, 0:3] = cv2.Rodrigues(rvec)[0]
T[0:3, 3] = tvec
return T
def T2opencv(T):
""" Convert transformation to OpenCV rvec, tvec pair
Example
-------
>>> rvec, tvec = T2opencv(np.eye(4))
"""
rvec = cv2.Rodrigues(T[0:3, 0:3])[0]
tvec = T[0:3, 3]
return rvec, tvec
def euler2RBE(theta):
""" Convert Euler angles to rotation matrix
Example
-------
>>> np.set_printoptions(precision=4, suppress=True)
>>> euler2RBE( [0,0,np.pi/2] )
array([[ 0., -1., 0.],
[ 1., 0., 0.],
[-0., 0., 1.]])
"""
cr = math.cos(theta[0])
sr = math.sin(theta[0])
cp = math.cos(theta[1])
sp = math.sin(theta[1])
cy = math.cos(theta[2])
sy = math.sin(theta[2])
out = np.array([cp * cy, sr * sp * cy - cr * sy, cr * sp * cy + sr * sy,
cp * sy, sr * sp * sy + cr * cy, cr * sp * sy - sr * cy, -sp, sr * cp, cr * cp])
return out.reshape((3, 3))
def RBE2euler(Rbe):
""" Convert rotation matrix to Euler angles """
out = np.zeros([3, 1])
out[0, 0] = math.atan2(Rbe[2, 1], Rbe[2, 2])
out[1, 0] = -math.asin(Rbe[2, 0])
out[2, 0] = math.atan2(Rbe[1, 0], Rbe[0, 0])
return out
# %% Helper functions
def pg_rotation2H(R):
""" Convert rotation matrix to homogenous transform matrix """
X = np.array(np.eye(R.shape[0] + 1))
X[0:-1, 0:-1] = R
return X
def directionMean(vec):
""" Calculate the mean of a set of directions
The initial direction is determined using the oriented direction. Then a non-linear optimization is done.
Args:
vec: List of directions
Returns
Angle of mean of directions
>>> vv=np.array( [[1,0],[1,0.1], [-1,.1]])
>>> a=directionMean(vv)
"""
vec = np.array(vec)
def dist(a, vec):
phi = np.arctan2(vec[:, 0], vec[:, 1])
x = a - phi
x = np.mod(x + np.pi / 2, np.pi) - np.pi / 2
cost = np.linalg.norm(x)
return cost
Nfeval = 1
def callbackF(Xi):
global Nfeval
print(Xi)
print(f'{Nfeval:4d} {Xi[0]: 3.6f}: distance {dist(Xi[0], vec)}')
Nfeval += 1
m = vec.mean(axis=0)
a0 = np.arctan2(m[0], m[1])
def cost_function(a): return dist(a, vec)
r = scipy.optimize.minimize(cost_function, a0, callback=None, options=dict({'disp': False}))
angle = r.x[0]
return angle
def circular_mean(weights, angles):
""" Calculate circular mean of a set of 2D vectors """
x = y = 0.
for angle, weight in zip(angles, weights):
x += math.cos(math.radians(angle)) * weight
y += math.sin(math.radians(angle)) * weight
mean = math.degrees(math.atan2(y, x))
return mean
def dir2R(d, a=None):
""" Convert direction to rotation matrix
Note: numerically not stable near singular points!
Arguments:
d (numpy array of size 3): direction to rotation to a
a (numpy array of size 3): target direction
Returns:
R (3x3 numpy array): matrix R such that R*a = d
Example:
>>> d = np.array([0, 1, 0]); a = np.array([0, -1, 0])
>>> R = dir2R(d, a)
<NAME> <<EMAIL>>
"""
# set target vector
if a is None:
a = np.array([0, 0, 1])
# normalize
b = d.reshape((3, 1)) / np.linalg.norm(d)
a = a.reshape((3, 1))
c = np.cross(a.flat, b.flat)
if np.linalg.norm(c) < 1e-12 and a.T.dot(b) < .01:
# deal with singular case
if(np.linalg.norm(a[1:]) < 1e-4):
R0 = np.array([[0, 1, 0], [-1, 0, 0], [0, 0, 1]])
else:
R0 = np.array([[1, 0, 0], [0, 0, 1], [0, -1, 0]])
a = R0.dot(a)
bt = (a + b) / np.linalg.norm(a + b)
R = np.eye(3) - 2 * a.dot(a.T) - 2 * \
(bt.dot(bt.T)).dot(np.eye(3) - 2 * a.dot(a.T))
R = R.dot(R0)
else:
bt = (a + b) / np.linalg.norm(a + b)
R = np.eye(3) - 2 * a.dot(a.T) - 2 * \
(bt.dot(bt.T)).dot(np.eye(3) - 2 * a.dot(a.T))
return R
def frame2T(f):
""" Convert frame into 4x4 transformation matrix """
T = np.array(np.eye(4))
T[0:3, 0:3] = euler2RBE(f[3:7])
T[0:3, 3] = f[0:3].reshape(3, 1)
return T
@static_var("b", np.array(np.zeros((2, 2))))
def rot2D(phi):
""" Return 2x2 rotation matrix from angle
Arguments
---------
phi : float
Angle in radians
Returns
-------
R : array
The 2x2 rotation matrix
Examples
--------
>>> R = rot2D(np.pi)
"""
r = rot2D.b.copy()
c = math.cos(phi)
s = math.sin(phi)
r.itemset(0, c)
r.itemset(1, -s)
r.itemset(2, s)
r.itemset(3, c)
return r
def pg_rotx(phi):
""" Rotate around the x-axis with angle """
c = math.cos(phi)
s = math.sin(phi)
R = np.zeros((3, 3))
R.flat = [1, 0, 0, 0, c, -s, 0, s, c]
return R
def pcolormesh_centre(x, y, im, *args, **kwargs):
""" Wrapper for pcolormesh to plot pixel centres at data points """
dx = np.diff(x)
dy = np.diff(y)
dx = np.hstack((dx[0], dx, dx[-1]))
dy = np.hstack((dy[0], dy, dy[-1]))
xx = np.hstack((x, x[-1] + dx[-1])) - dx / 2
yy = np.hstack((y, y[-1] + dy[-1])) - dy / 2
plt.pcolormesh(xx, yy, im, *args, **kwargs)
def imshowz(im, *args, **kwargs):
""" Show image with interactive z-values """
plt.imshow(im, *args, **kwargs)
sz = im.shape
numrows, numcols = sz[0], sz[1]
def format_coord(x, y):
col = int(x + 0.5)
row = int(y + 0.5)
if col >= 0 and col < numcols and row >= 0 and row < numrows:
z = im[row, col]
try:
if len(z) == 1:
return 'x=%1.4f, y=%1.4f, z=%1.4f' % (x, y, z)
else:
return 'x=%1.4f, y=%1.4f, z=%s' % (x, y, str(z))
except BaseException:
return 'x=%1.4f, y=%1.4f, z=%s' % (x, y, str(z))
else:
return 'x=%1.4f, y=%1.4f' % (x, y)
ax = plt.gca()
ax.format_coord = format_coord
def pg_scaling(scale, cc=None):
""" Create scaling with specified centre
Example
-------
>>> pg_scaling( [1.,2])
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 1.]])
"""
scale = np.array(scale)
scale = np.hstack((scale, 1))
H = np.diag(scale)
if cc is not None:
cc = np.array(cc).flatten()
H = pg_transl2H(cc).dot(H).dot(pg_transl2H(-cc))
return H
def pg_transl2H(tr):
""" Convert translation to homogeneous transform matrix
>>> pg_transl2H( [1,2])
array([[ 1., 0., 1.],
[ 0., 1., 2.],
[ 0., 0., 1.]])
"""
sh = np.array(tr)
H = np.eye(sh.size + 1)
H[0:-1, -1] = sh.flatten()
H = np.array(H)
return H
def setregion(im, subim, pos, mask=None, clip=False):
""" Set region in Numpy image
Arguments
---------
im : Numpy array
image to fill region in
subim : Numpy array
subimage
pos: array
position to place image
mask (None or array): mask to use for the subimage
clip (bool): if True clip the subimage where necessary to fit
"""
h = subim.shape[0]
w = subim.shape[1]
x1 = int(pos[0])
y1 = int(pos[1])
x2 = int(pos[0]) + w
y2 = int(pos[1]) + h
if clip:
x1 = max(x1, 0)
y1 = max(y1, 0)
x2 = min(x2, im.shape[1])
y2 = min(y2, im.shape[0])
w = max(0, x2 - x1)
h = max(0, y2 - y1)
if mask is None:
if len(im.shape) == len(subim.shape):
im[y1:y2, x1:x2, ...] = subim[0:h, 0:w]
else:
im[y1:y2, x1:x2, ...] = subim[0:h, 0:w, np.newaxis]
else:
if len(im.shape) > len(mask.shape):
im[y1:y2, x1:x2] = im[y1:y2, x1:x2] * \
(1 - mask[:, :, np.newaxis]) + (subim * mask[:, :, np.newaxis])
else:
if len(im.shape) == len(subim.shape):
im[y1:y2, x1:x2, ...] = im[y1:y2, x1:x2, ...] * \
(1 - mask[:, :]) + (subim * mask[:, :])
else:
im[y1:y2, x1:x2, ...] = im[y1:y2, x1:x2, ...] * \
(1 - mask[:, :]) + (subim[:, :, np.newaxis] * mask[:, :])
return im
def region2poly(rr):
""" Convert a region (bounding box xxyy) to polygon """
if isinstance(rr, tuple) or isinstance(rr, list):
# x,y,x2,y2 format
rr = np.array(rr).reshape((2, 2)).transpose()
poly = np.array([rr[:, 0:1], np.array([[rr[0, 1]], [rr[1, 0]]]), rr[
:, 1:2], np.array([[rr[0, 0]], [rr[1, 1]]]), rr[:, 0:1]]).reshape((5, 2)).T
return poly
poly = rr.flat[[0, 1, 1, 0, 0, 2, 2, 3, 3, 2]].reshape((2, 5))
return poly
def plotLabels(xx, *args, **kwargs):
""" Plot labels next to points
Args:
xx (2xN array): points to plot
*kwargs: arguments past to plotting function
Example:
>>> xx=np.random.rand(2, 10)
>>> fig=plt.figure(10); plt.clf()
>>> _ = plotPoints(xx, '.b'); _ = plotLabels(xx)
"""
if len(np.array(xx).shape) == 1 and xx.shape[0] == 2:
xx = xx.reshape((2, 1))
if xx.shape[0] > 2 and xx.shape[1] == 2:
xx = xx.T
if len(args) == 0:
v = range(0, xx.shape[1])
lbl = ['%d' % i for i in v]
else:
lbl = args[0]
if isinstance(lbl, int):
lbl = [str(lbl)]
elif isinstance(lbl, str):
lbl = [str(lbl)]
nn = xx.shape[1]
ax = plt.gca()
th = [None] * nn
for ii in range(nn):
lbltxt = str(lbl[ii])
th[ii] = ax.annotate(lbltxt, xx[:, ii], **kwargs)
return th
def plotPoints(xx, *args, **kwargs):
""" Plot 2D or 3D points
Args:
xx (array): array of points to plot
*args: arguments passed to the plot function of matplotlib
**kwargs: arguments passed to the plot function of matplotlib
Example:
>>> plotPoints(np.random.rand(2,10), '.-b')
"""
if xx.shape[0] == 2:
h = plt.plot(xx[0, :], xx[1, :], *args, **kwargs)
elif xx.shape[0] == 3:
h = plt.plot(xx[0, :], xx[1, :], xx[2, :], *args, **kwargs)
if xx.shape[0] == 1:
h = plt.plot(xx[0, :], *args, **kwargs)
else:
h = None
return h
def plot2Dline(line, *args, **kwargs):
""" Plot a 2D line in a matplotlib figure
Args:
line (3x1 array): line to plot
>>> plot2Dline([-1,1,0], 'b')
"""
if np.abs(line[1]) > .001:
xx = plt.xlim()
xx = np.array(xx)
yy = (-line[2] - line[0] * xx) / line[1]
plt.plot(xx, yy, *args, **kwargs)
else:
yy = np.array(plt.ylim())
xx = (-line[2] - line[1] * yy) / line[0]
plt.plot(xx, yy, *args, **kwargs)
# %%
def scaleImage(image, display_min=None, display_max=None):
""" Scale any image into uint8 range
Args:
image (numpy array): input image
display_min (float): value to map to min output range
display_max (float): value to map to max output range
Returns:
image (numpy array): the scaled image
Example:
>>> im=scaleImage(255*np.random.rand( 30,40), 40, 100)
Code modified from: https://stackoverflow.com/questions/14464449/using-numpy-to-efficiently-convert-16-bit-image-data-to-8-bit-for-display-with?noredirect=1&lq=1
"""
image = np.array(image, copy=True)
if display_min is None:
display_min = | np.percentile(image, .15) | numpy.percentile |
# Copyright 2020 Q-CTRL Pty Ltd & Q-CTRL Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Driven control module.
"""
import csv
import json
from typing import (
Dict,
Optional,
)
import numpy as np
from ..utils import (
Coordinate,
FileFormat,
FileType,
check_arguments,
create_repr_from_attributes,
)
class DrivenControl:
r"""
A piecewise-constant driven control for a single qubit.
Parameters
----------
durations : np.ndarray
The durations :math:`\{\delta t_n\}` for each segment, in units of seconds. Every element
must be positive. Represented as a 1D array of length :math:`N`, where :math:`N` is number
of segments.
rabi_rates : np.ndarray, optional
The Rabi rates :math:`\{\Omega_n\}` for each segment, in units of radians per second. Every
element must be non-negative. Represented as a 1D array of length :math:`N`, where :math:`N`
is number of segments. You can omit this field if the Rabi rate is zero on all segments.
azimuthal_angles : np.ndarray, optional
The azimuthal angles :math:`\{\phi_n\}` for each segment. Represented as a 1D array of
length :math:`N`, where :math:`N` is number of segments. You can omit this field if the
azimuthal angle is zero on all segments.
detunings : np.ndarray, optional
The detunings :math:`\{\Delta_n\}` for each segment, in units of radians per second.
Represented as a 1D array of length :math:`N`, where :math:`N` is number of segments. You
can omit this field if the detuning is zero on all segments.
name : string, optional
An optional string to name the control. Defaults to ``None``.
Notes
-----
This class represents a control for a single driven qubit with Hamiltonian:
.. math::
H(t) = \frac{1}{2}\left(\Omega(t) e^{i\phi(t)} \sigma_- +
\Omega(t) e^{-i\phi(t)}\sigma_+\right) +
\frac{1}{2}\Delta(t)\sigma_z,
where :math:`\Omega(t)` is the Rabi rate, :math:`\phi(t)` is the azimuthal angle (or drive
phase), :math:`\Delta(t)` is the detuning, :math:`\sigma_\pm = (\sigma_x \mp \sigma_y)/2`,
and :math:`\sigma_k` are the Pauli matrices.
The controls are piecewise-constant, meaning :math:`\Omega(t)=\Omega_n` for
:math:`t_{n-1}\leq t<t_n`, where :math:`t_0=0` and :math:`t_n=t_{n-1}+\delta t_n` (and similarly
for :math:`\phi(t)` and :math:`\Delta(t)`).
For each segment of the control, the constant Hamiltonian effects unitary time evolution of the
form:
.. math::
U_n = \exp\left[-i\frac{\theta_n}{2} (\mathbf u_n\cdot\boldsymbol \sigma)\right],
where :math:`\theta_n = \sqrt{\Omega_n^2+\Delta_n^2}\delta t_n`,
:math:`\mathbf u_n` is the unit vector in the direction
:math:`(\Omega_n\cos\phi_n, \Omega_n\sin\phi_n, \Delta_n)`, and
:math:`\boldsymbol\sigma=(\sigma_x, \sigma_y, \sigma_z)`. This unitary time evolution
corresponds to a rotation of the Bloch sphere of an angle :math:`\theta_n` about the axis
:math:`\mathbf u_n`.
"""
def __init__(
self,
durations: np.ndarray,
rabi_rates: Optional[np.ndarray] = None,
azimuthal_angles: Optional[np.ndarray] = None,
detunings: Optional[np.ndarray] = None,
name: Optional[str] = None,
):
self.name = name
durations = np.asarray(durations, dtype=np.float)
# check if all the durations are greater than zero
check_arguments(
all(durations > 0),
"Duration of driven control segments must all be greater than zero.",
{"durations": durations},
)
# check if all non-None inputs have the same length
input_lengths = {
np.array(v).size
for v in [rabi_rates, azimuthal_angles, detunings, durations]
if v is not None
}
check_arguments(
len(input_lengths) == 1,
"If set, rabi rates, azimuthal angles, detunings and durations "
"must be of same length",
{
"rabi_rates": rabi_rates,
"azimuthal_angles": azimuthal_angles,
"detunings": detunings,
"durations": durations,
},
)
duration_count = len(durations)
if rabi_rates is None:
rabi_rates = | np.zeros(duration_count) | numpy.zeros |
"""
Interval unit commitment
@author:<NAME>
@e-mail:<EMAIL>
"""
from pypower import loadcase, ext2int, makeBdc
from scipy.sparse import csr_matrix as sparse
from numpy import zeros, c_, shape, ix_, ones, r_, arange, sum, concatenate, array, diag, eye
from solvers.mixed_integer_solvers_cplex import mixed_integer_linear_programming as lp
import pandas as pd
def problem_formulation(case, BETA=0.15, BETA_HYDRO=0.05, BETA_LOAD=0.03):
"""
:param case: The test case for unit commitment problem
:return:
"""
CAP_WIND = 1 # The capacity of wind farm
# The disturbance range of wind farm
# The disturbance range of wind farm
CAPVALUE = 10 # The capacity value
Price_energy = r_[ones(8), 3 * ones(8), ones(8)]
from pypower.idx_brch import F_BUS, T_BUS, BR_X, TAP, SHIFT, BR_STATUS, RATE_A
from pypower.idx_cost import MODEL, NCOST, PW_LINEAR, COST, POLYNOMIAL
from pypower.idx_bus import BUS_TYPE, REF, VA, VM, PD, GS, VMAX, VMIN, BUS_I
from pypower.idx_gen import GEN_BUS, VG, PG, QG, PMAX, PMIN, QMAX, QMIN
mpc = ext2int.ext2int(case)
baseMVA, bus, gen, branch, gencost = mpc["baseMVA"], mpc["bus"], mpc["gen"], mpc["branch"], mpc["gencost"]
nb = shape(mpc['bus'])[0] ## number of buses
nl = shape(mpc['branch'])[0] ## number of branches
ng = shape(mpc['gen'])[0] ## number of dispatchable injections
# Bbus = makeBdc.makeBdc(baseMVA, bus, branch)
# Distribution_factor = Bbus[1] * inv(Bbus[0])
Distribution_factor = array([
[0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[-0.005, -0.005, -0.005, -1.005, -0.005, -0.005, -0.005, -0.005, -0.005, -0.005, -0.005, -0.005, -0.005,
-0.005, ],
[0.47, 0.47, 0.47, 0.47, -0.03, -0.03, -0.03, -0.03, -0.03, -0.03, -0.03, -0.03, -0.03, -0.03],
[0.47, 0.47, 0.47, 0.47, -0.03, - 0.03, -0.03, -0.03, -0.03, -0.03, -0.03, -0.03, -0.03, -0.03],
[0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0.32, 0.32, 0.32, 0.32, 0.32, 0.32, -0.68, -0.68, 0.32, 0.32, 0.32, 0.32, 0.32, 0.32],
[0.32, 0.32, 0.32, 0.32, 0.32, 0.32, 0.32, 0.32, -0.68, -0.68, 0.32, 0.32, 0.32, 0.32],
[0.16, 0.16, 0.16, 0.16, 0.16, 0.16, 0.16, 0.16, 0.16, -0.84, 0.16, 0.16, 0.16, 0.16],
[-0.16, -0.16, -0.16, -0.16, -0.16, -0.16, -0.16, -0.16, -0.16, -0.16, -1.16, -0.16, -1.16, -0.16],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0],
[-0.16, -0.16, -0.16, -0.16, -0.16, -0.16, -0.16, -0.16, -0.16, -0.16, -0.16, -1.16, -0.16, -0.16],
[-0.08, -0.08, -0.08, -0.08, -0.08, -0.08, -0.08, -0.08, -0.08, -0.08, -0.08, -0.08, -0.08, -1.08],
])
Distribution_factor = sparse(Distribution_factor)
# Formulate connection matrix for wind farms
i = []
PWMAX = []
PWMIN = []
for index in range(ng):
if gen[index, PMIN] == 0:
i.append(index)
PWMAX.append(gen[index, PMAX])
PWMIN.append(gen[index, PMIN])
i = array(i)
nw = i.shape[0]
Cw = sparse((ones(nw), (gen[i, GEN_BUS], arange(nw))), shape=(nb, nw))
PWMAX = array(PWMAX).reshape((len(PWMAX), 1))
PWMIN = array(PWMIN).reshape((len(PWMIN), 1))
# Formulate the connection matrix for hydro power plants
i = []
PHMAX = []
PHMIN = []
for index in range(ng):
if gen[index, PMIN] > 0:
i.append(index)
PHMAX.append(gen[index, PMAX])
PHMIN.append(gen[index, PMIN])
i = array(i)
nh = i.shape[0]
Ch = sparse((ones(nh), (gen[i, GEN_BUS], arange(nh))), shape=(nb, nh))
PHMAX = array(PHMAX).reshape((len(PHMAX), 1))
PHMIN = array(PHMIN).reshape((len(PHMIN), 1))
# Formulate the external power systems
i = []
PEXMAX = []
PEXMIN = []
for index in range(ng):
if gen[index, PMIN] < 0:
i.append(index)
PEXMAX.append(gen[index, PMAX])
PEXMIN.append(gen[index, PMIN])
i = array(i)
nex = i.shape[0]
Cex = sparse((ones(nex), (gen[i, GEN_BUS], arange(nex))), shape=(nb, nex))
PEXMAX = array(PEXMAX).reshape((len(PEXMAX), 1))
PEXMIN = array(PEXMIN).reshape((len(PEXMIN), 1))
PLMAX = branch[:, RATE_A].reshape((nl, 1)) # The power flow limitation
T = 24
## Profiles
# Wind profile
WIND_PROFILE = array(
[591.35, 714.50, 1074.49, 505.06, 692.78, 881.88, 858.48, 609.11, 559.95, 426.86, 394.54, 164.47, 27.15, 4.47,
54.08, 109.90, 111.50, 130.44, 111.59, 162.38, 188.16, 216.98, 102.94, 229.53]).reshape((T, 1))
WIND_PROFILE = WIND_PROFILE / WIND_PROFILE.max()
WIND_PROFILE_FORECAST = zeros((T * nw, 1))
Delta_wind = zeros((T * nw, 1))
for i in range(T):
WIND_PROFILE_FORECAST[i * nw:(i + 1) * nw, :] = WIND_PROFILE[i] * PWMAX
Delta_wind[i * nw:(i + 1) * nw, :] = WIND_PROFILE[i] * PWMAX * BETA
# Load profile
LOAD_PROFILE = array([0.632596195634005, 0.598783973523217, 0.580981513054525, 0.574328051348912, 0.584214221241601,
0.631074282084712, 0.708620833751212, 0.797665730618795, 0.877125330124026, 0.926981579915087,
0.947428654208872, 0.921588439808779, 0.884707317888543, 0.877717046100358, 0.880387289807107,
0.892056129442049, 0.909233443653261, 0.926748403704075, 0.968646575067696, 0.999358974358974,
0.979169591816267, 0.913517534182463, 0.806453715775750, 0.699930632166617]).reshape((T, 1))
LOAD_FORECAST = zeros((T * nb, 1))
Delta_load = zeros((T * nb, 1))
load_base = bus[:, PD].reshape(nb, 1)
for i in range(T):
LOAD_FORECAST[i * nb:(i + 1) * nb, :] = load_base * LOAD_PROFILE[i]
Delta_load[i * nb:(i + 1) * nb, :] = load_base * BETA_LOAD
# Hydro information
HYDRO_INJECT = array([6, 2, 4, 3]).reshape((nh, 1))
HYDRO_INJECT_FORECAST = zeros((T * nh, 1))
Delta_hydro = zeros((T * nh, 1))
for i in range(T):
HYDRO_INJECT_FORECAST[i * nh:(i + 1) * nh, :] = HYDRO_INJECT
Delta_hydro[i * nh:(i + 1) * nh, :] = HYDRO_INJECT * BETA_HYDRO
MIN_DOWN = ones((nh, 1))
MIN_UP = ones((nh, 1))
QMIN = array([1.5, 1, 1, 1]).reshape((nh, 1))
QMAX = array([20, 10, 10, 10]).reshape((nh, 1))
VMIN = array([70, 50, 70, 40]).reshape((nh, 1))
VMAX = array([160, 140, 150, 130]).reshape((nh, 1))
V0 = array([110, 90, 100, 80]).reshape((nh, 1))
M_transfer = diag(array([8.8649, 6.4444, 6.778, 7.3333]))
C_TEMP = array([30, 2, 9, 4]).reshape((4, 1))
Q_TEMP = array([1.5, 1, 1, 1]).reshape((4, 1))
# Define the first stage decision variables
ON = 0
OFF = 1
IHG = 2
PHG = 3
RUHG = 4
RDHG = 5
QHG = 6
QUHG = 7
QDHG = 8
V = 9
S = 10
PWC = 11
PLC = 12
PEX = 13
CEX = 14
NX = PWC * nh * T + nw * T + nb * T + nex * T + 1
lb = zeros((NX, 1))
ub = zeros((NX, 1))
c = zeros((NX, 1))
vtypes = ["c"] * NX
for i in range(T):
for j in range(nh):
# lower boundary information
lb[ON * nh * T + i * nh + j] = 0
lb[OFF * nh * T + i * nh + j] = 0
lb[IHG * nh * T + i * nh + j] = 0
lb[PHG * nh * T + i * nh + j] = 0
lb[RUHG * nh * T + i * nh + j] = 0
lb[RDHG * nh * T + i * nh + j] = 0
lb[QHG * nh * T + i * nh + j] = 0
lb[QUHG * nh * T + i * nh + j] = 0
lb[QDHG * nh * T + i * nh + j] = 0
lb[V * nh * T + i * nh + j] = VMIN[j]
lb[S * nh * T + i * nh + j] = 0
# upper boundary information
ub[ON * nh * T + i * nh + j] = 1
ub[OFF * nh * T + i * nh + j] = 1
ub[IHG * nh * T + i * nh + j] = 1
ub[PHG * nh * T + i * nh + j] = PHMAX[j]
ub[RUHG * nh * T + i * nh + j] = PHMAX[j]
ub[RDHG * nh * T + i * nh + j] = PHMAX[j]
ub[QHG * nh * T + i * nh + j] = QMAX[j]
ub[QUHG * nh * T + i * nh + j] = QMAX[j]
ub[QDHG * nh * T + i * nh + j] = QMAX[j]
ub[V * nh * T + i * nh + j] = VMAX[j]
ub[S * nh * T + i * nh + j] = 10 ** 8
# objective value
c[S * nh * T + i * nh + j] = 1
c[RUHG * nh * T + i * nh + j] = -Price_energy[j]
c[RDHG * nh * T + i * nh + j] = Price_energy[j]
# variables types
vtypes[ON * nh * T + i * nh + j] = "D"
vtypes[OFF * nh * T + i * nh + j] = "D"
vtypes[IHG * nh * T + i * nh + j] = "D"
if i == T - 1:
lb[V * nh * T + i * nh + j] = V0[j]
ub[V * nh * T + i * nh + j] = V0[j]
for j in range(nw):
# lower boundary information
lb[PWC * nh * T + i * nw + j] = 0
# upper boundary information
ub[PWC * nh * T + i * nw + j] = WIND_PROFILE_FORECAST[i * nw + j]
# objective value
c[PWC * nh * T + i * nw + j] = 1
for j in range(nb):
# lower boundary information
lb[PWC * nh * T + nw * T + i * nb + j] = 0
# upper boundary information
ub[PWC * nh * T + nw * T + i * nb + j] = bus[j, PD] * LOAD_PROFILE[i]
# objective value
c[PWC * nh * T + nw * T + i * nb + j] = 10 ** 8
for j in range(nex):
# lower boundary information
lb[PWC * nh * T + nw * T + nb * T + i * nex + j] = PEXMIN[j]
# upper boundary information
ub[PWC * nh * T + nw * T + nb * T + i * nex + j] = PEXMAX[j]
# objective value
c[PWC * nh * T + nw * T + nb * T + i * nex + j] = -Price_energy[i]
# lower boundary information
lb[PWC * nh * T + nw * T + nb * T + nex * T] = PEXMIN[0]
# upper boundary information
ub[PWC * nh * T + nw * T + nb * T + nex * T] = PEXMAX[0]
# objective value
# c[PWC * nh * T + nw * T + nb * T + nex * T] = -CAPVALUE
# 2) Constraint set
# 2.1) Power balance equation
Aeq = zeros((T, NX))
beq = zeros((T, 1))
for i in range(T):
# For the hydro units
for j in range(nh):
Aeq[i, PHG * nh * T + i * nh + j] = 1
# For the wind farms
for j in range(nw):
Aeq[i, PWC * nh * T + i * nw + j] = -1
# For the loads
for j in range(nb):
Aeq[i, PWC * nh * T + nw * T + i * nb + j] = 1
# For the power exchange
for j in range(nex):
Aeq[i, PWC * nh * T + nw * T + nb * T + i * nex + j] = -1
beq[i] = sum(load_base) * LOAD_PROFILE[i] - sum(WIND_PROFILE_FORECAST[i * nw:(i + 1) * nw])
# 2.2) Status transformation of each unit
Aeq_temp = zeros((T * nh, NX))
beq_temp = zeros((T * nh, 1))
for i in range(T):
for j in range(nh):
Aeq_temp[i * nh + j, ON * nh * T + i * nh + j] = -1
Aeq_temp[i * nh + j, OFF * nh * T + i * nh + j] = 1
Aeq_temp[i * nh + j, IHG * nh * T + i * nh + j] = 1
if i != 0:
Aeq_temp[i * nh + j, IHG * nh * T + (i - 1) * nh + j] = -1
else:
beq_temp[i * T + j] = 0
Aeq = concatenate((Aeq, Aeq_temp), axis=0)
beq = concatenate((beq, beq_temp), axis=0)
# 2.3) water status change
Aeq_temp = zeros((T * nh, NX))
beq_temp = HYDRO_INJECT_FORECAST
for i in range(T):
for j in range(nh):
Aeq_temp[i * nh + j, V * nh * T + i * nh + j] = 1
Aeq_temp[i * nh + j, S * nh * T + i * nh + j] = 1
Aeq_temp[i * nh + j, QHG * nh * T + i * nh + j] = 1
if i != 0:
Aeq_temp[i * nh + j, V * nh * T + (i - 1) * nh + j] = -1
else:
beq_temp[i * T + j] += V0[j]
Aeq = concatenate((Aeq, Aeq_temp), axis=0)
beq = concatenate((beq, beq_temp), axis=0)
# 2.4) Power water transfering
Aeq_temp = zeros((T * nh, NX))
beq_temp = zeros((T * nh, 1))
for i in range(T):
for j in range(nh):
Aeq_temp[i * nh + j, PHG * nh * T + i * nh + j] = 1
Aeq_temp[i * nh + j, QHG * nh * T + i * nh + j] = -M_transfer[j, j]
Aeq_temp[i * nh + j, IHG * nh * T + i * nh + j] = -C_TEMP[j] + M_transfer[j, j] * Q_TEMP[j]
Aeq = concatenate((Aeq, Aeq_temp), axis=0)
beq = concatenate((beq, beq_temp), axis=0)
# 2.5) Power range limitation
Aineq = zeros((T * nh, NX))
bineq = zeros((T * nh, 1))
for i in range(T):
for j in range(nh):
Aineq[i * nh + j, ON * nh * T + i * nh + j] = 1
Aineq[i * nh + j, OFF * nh * T + i * nh + j] = 1
bineq[i * nh + j] = 1
Aineq_temp = zeros((T * nh, NX))
bineq_temp = zeros((T * nh, 1))
for i in range(T):
for j in range(nh):
Aineq_temp[i * nh + j, IHG * nh * T + i * nh + j] = PHMIN[j]
Aineq_temp[i * nh + j, PHG * nh * T + i * nh + j] = -1
Aineq_temp[i * nh + j, RDHG * nh * T + i * nh + j] = 1
Aineq = concatenate((Aineq, Aineq_temp), axis=0)
bineq = concatenate((bineq, bineq_temp), axis=0)
Aineq_temp = zeros((T * nh, NX))
bineq_temp = zeros((T * nh, 1))
for i in range(T):
for j in range(nh):
Aineq_temp[i * nh + j, IHG * nh * T + i * nh + j] = -PHMAX[j]
Aineq_temp[i * nh + j, PHG * nh * T + i * nh + j] = 1
Aineq_temp[i * nh + j, RUHG * nh * T + i * nh + j] = 1
Aineq = concatenate((Aineq, Aineq_temp), axis=0)
bineq = concatenate((bineq, bineq_temp), axis=0)
# 2.6) Water reserve constraints
Aineq_temp = zeros((T * nh, NX))
bineq_temp = zeros((T * nh, 1))
for i in range(T):
for j in range(nh):
Aineq_temp[i * nh + j, PHG * nh * T + i * nh + j] = 1
Aineq_temp[i * nh + j, RUHG * nh * T + i * nh + j] = 1
Aineq_temp[i * nh + j, IHG * nh * T + i * nh + j] = -C_TEMP[j] + M_transfer[j, j] * Q_TEMP[j]
Aineq_temp[i * nh + j, QHG * nh * T + i * nh + j] = -M_transfer[j, j]
Aineq_temp[i * nh + j, QUHG * nh * T + i * nh + j] = -M_transfer[j, j]
Aineq = concatenate((Aineq, Aineq_temp), axis=0)
bineq = concatenate((bineq, bineq_temp), axis=0)
Aineq_temp = zeros((T * nh, NX))
bineq_temp = zeros((T * nh, 1))
for i in range(T):
for j in range(nh):
Aineq_temp[i * nh + j, PHG * nh * T + i * nh + j] = -1
Aineq_temp[i * nh + j, RDHG * nh * T + i * nh + j] = 1
Aineq_temp[i * nh + j, IHG * nh * T + i * nh + j] = C_TEMP[j] - M_transfer[j, j] * Q_TEMP[j]
Aineq_temp[i * nh + j, QHG * nh * T + i * nh + j] = M_transfer[j, j]
Aineq_temp[i * nh + j, QDHG * nh * T + i * nh + j] = -M_transfer[j, j]
Aineq = concatenate((Aineq, Aineq_temp), axis=0)
bineq = concatenate((bineq, bineq_temp), axis=0)
# 2.7) water flow constraints
Aineq_temp = zeros((T * nh, NX))
bineq_temp = zeros((T * nh, 1))
for i in range(T):
for j in range(nh):
Aineq_temp[i * nh + j, IHG * nh * T + i * nh + j] = QMIN[j]
Aineq_temp[i * nh + j, QHG * nh * T + i * nh + j] = -1
Aineq_temp[i * nh + j, QDHG * nh * T + i * nh + j] = 1
Aineq = concatenate((Aineq, Aineq_temp), axis=0)
bineq = concatenate((bineq, bineq_temp), axis=0)
Aineq_temp = zeros((T * nh, NX))
bineq_temp = zeros((T * nh, 1))
for i in range(T):
for j in range(nh):
Aineq_temp[i * nh + j, IHG * nh * T + i * nh + j] = -QMAX[j]
Aineq_temp[i * nh + j, QHG * nh * T + i * nh + j] = 1
Aineq_temp[i * nh + j, QUHG * nh * T + i * nh + j] = 1
Aineq = concatenate((Aineq, Aineq_temp), axis=0)
bineq = concatenate((bineq, bineq_temp), axis=0)
# 2.8) Water reserve limitation
Aineq_temp = zeros((T * nh, NX))
bineq_temp = zeros((T * nh, 1))
for i in range(T):
for j in range(nh):
Aineq_temp[i * nh + j, V * nh * T + i * nh + j] = 1
Aineq_temp[i * nh + j, QHG * nh * T + i * nh + j] = -1
Aineq_temp[i * nh + j, QDHG * nh * T + i * nh + j] = 1
Aineq_temp[i * nh + j, S * nh * T + i * nh + j] = -1
bineq_temp[i * nh + j] = VMAX[j] - HYDRO_INJECT_FORECAST[i * nh + j]
Aineq = concatenate((Aineq, Aineq_temp), axis=0)
bineq = concatenate((bineq, bineq_temp), axis=0)
Aineq_temp = zeros((T * nh, NX))
bineq_temp = zeros((T * nh, 1))
for i in range(T):
for j in range(nh):
Aineq_temp[i * nh + j, V * nh * T + i * nh + j] = -1
Aineq_temp[i * nh + j, QHG * nh * T + i * nh + j] = 1
Aineq_temp[i * nh + j, QUHG * nh * T + i * nh + j] = 1
Aineq_temp[i * nh + j, S * nh * T + i * nh + j] = 1
bineq_temp[i * nh + j] = -VMIN[j] + HYDRO_INJECT_FORECAST[i * nh + j]
Aineq = concatenate((Aineq, Aineq_temp), axis=0)
bineq = concatenate((bineq, bineq_temp), axis=0)
# 2.9) Line flow limitation
Aineq_temp = zeros((T * nl, NX))
bineq_temp = zeros((T * nl, 1))
for i in range(T):
Aineq_temp[i * nl:(i + 1) * nl, PHG * nh * T + i * nh:PHG * nh * T + (i + 1) * nh] = -(
Distribution_factor * Ch).todense()
Aineq_temp[i * nl:(i + 1) * nl, PWC * nh * T + i * nw:PWC * nh * T + (i + 1) * nw] = (
Distribution_factor * Cw).todense()
Aineq_temp[i * nl:(i + 1) * nl,
PWC * nh * T + nw * T + i * nb:PWC * nh * T + nw * T + (i + 1) * nb] = -Distribution_factor.todense()
Aineq_temp[i * nl:(i + 1) * nl,
PWC * nh * T + nw * T + nb * T + i * nex:PWC * nh * T + nw * T + nb * T + (i + 1) * nex] = (
Distribution_factor * Cex).todense()
bineq_temp[i * nl:(i + 1) * nl, :] = PLMAX - Distribution_factor * (
(bus[:, PD] * LOAD_PROFILE[i]).reshape(nb, 1) - Cw * WIND_PROFILE_FORECAST[i * nw:(i + 1) * nw])
Aineq = concatenate((Aineq, Aineq_temp), axis=0)
bineq = concatenate((bineq, bineq_temp), axis=0)
Aineq_temp = zeros((T * nl, NX))
bineq_temp = zeros((T * nl, 1))
for i in range(T):
Aineq_temp[i * nl:(i + 1) * nl, PHG * nh * T + i * nh:PHG * nh * T + (i + 1) * nh] = (
Distribution_factor * Ch).todense()
Aineq_temp[i * nl:(i + 1) * nl, PWC * nh * T + i * nw:PWC * nh * T + (i + 1) * nw] = -(
Distribution_factor * Cw).todense()
Aineq_temp[i * nl:(i + 1) * nl,
PWC * nh * T + nw * T + i * nb:PWC * nh * T + nw * T + (i + 1) * nb] = Distribution_factor.todense()
Aineq_temp[i * nl:(i + 1) * nl,
PWC * nh * T + nw * T + nb * T + i * nex:PWC * nh * T + nw * T + nb * T + (i + 1) * nex] = -(
Distribution_factor * Cex).todense()
bineq_temp[i * nl:(i + 1) * nl, :] = PLMAX + Distribution_factor * (
(bus[:, PD] * LOAD_PROFILE[i]).reshape(nb, 1) - Cw * WIND_PROFILE_FORECAST[i * nw:(i + 1) * nw])
Aineq = concatenate((Aineq, Aineq_temp), axis=0)
bineq = concatenate((bineq, bineq_temp), axis=0)
# 2.10) Capacity limitation
Aineq_temp = zeros((T, NX))
bineq_temp = zeros((T, 1))
for i in range(T):
Aineq_temp[i, PWC * nh * T + nw * T + nb * T + nex * T] = 1
Aineq_temp[i, PWC * nh * T + nw * T + nb * T + i * nex] = -1
Aineq = concatenate((Aineq, Aineq_temp), axis=0)
bineq = concatenate((bineq, bineq_temp), axis=0)
# 2.11) Up and down reserve for the forecasting errors
# Up reserve limitation
Aineq_temp = zeros((T, NX))
bineq_temp = zeros((T, 1))
for i in range(T):
for j in range(nh):
Aineq_temp[i, RUHG * nh * T + i * nh + j] = -1
for j in range(nw):
bineq_temp[i] -= Delta_wind[i * nw + j]
for j in range(nb):
bineq_temp[i] -= Delta_load[i * nb + j]
Aineq = concatenate((Aineq, Aineq_temp), axis=0)
bineq = concatenate((bineq, bineq_temp), axis=0)
# Down reserve limitation
Aineq_temp = zeros((T, NX))
bineq_temp = zeros((T, 1))
for i in range(T):
for j in range(nh):
Aineq_temp[i, RDHG * nh * T + i * nh + j] = -1
for j in range(nw):
bineq_temp[i] -= Delta_wind[i * nw + j]
for j in range(nb):
bineq_temp[i] -= Delta_load[i * nb + j]
Aineq = concatenate((Aineq, Aineq_temp), axis=0)
bineq = concatenate((bineq, bineq_temp), axis=0)
model_first_stage = {"c": c,
"lb": lb,
"ub": ub,
"A": Aineq,
"b": bineq,
"Aeq": Aeq,
"beq": beq,
"vtypes": vtypes}
## Formualte the second stage decision making problem
phg = 0
qhg = 1
v = 2
s = 3
pwc = 4
plc = 5
pex = 6
cex = 7
nx = pwc * nh * T + nw * T + nb * T + nex * T + 1
# Generate the lower and boundary for the first stage decision variables
lb = zeros((nx, 1))
ub = zeros((nx, 1))
c = zeros((nx, 1))
vtypes = ["c"] * nx
nu = nh * T + nw * T + nb * T
u_mean = concatenate([HYDRO_INJECT_FORECAST, WIND_PROFILE_FORECAST, LOAD_FORECAST])
u_delta = concatenate([Delta_hydro, Delta_wind, Delta_load])
for i in range(T):
for j in range(nh):
# lower boundary information
lb[phg * nh * T + i * nh + j] = 0
lb[qhg * nh * T + i * nh + j] = 0
lb[v * nh * T + i * nh + j] = VMIN[j]
lb[s * nh * T + i * nh + j] = 0
# upper boundary information
ub[phg * nh * T + i * nh + j] = PHMAX[j]
ub[qhg * nh * T + i * nh + j] = QMAX[j]
ub[v * nh * T + i * nh + j] = VMAX[j]
ub[s * nh * T + i * nh + j] = 10 ** 8
# objective value
c[s * nh * T + i * nh + j] = 1
if i == T - 1:
lb[v * nh * T + i * nh + j] = V0[j]
ub[v * nh * T + i * nh + j] = V0[j]
for j in range(nw):
# lower boundary information
lb[pwc * nh * T + i * nw + j] = 0
# upper boundary information
ub[pwc * nh * T + i * nw + j] = 10 ** 4
# objective value
c[pwc * nh * T + i * nw + j] = 1
for j in range(nb):
# lower boundary information
lb[pwc * nh * T + nw * T + i * nb + j] = 0
# upper boundary information
ub[pwc * nh * T + nw * T + i * nb + j] = 10 ** 4
# objective value
c[pwc * nh * T + nw * T + i * nb + j] = 10 ** 6
for j in range(nex):
# lower boundary information
lb[pwc * nh * T + nw * T + nb * T + i * nex + j] = PEXMIN[j]
# upper boundary information
ub[pwc * nh * T + nw * T + nb * T + i * nex + j] = PEXMAX[j]
# objective value
# c[pwc * nh * T + nw * T + nb * T + i * nex + j] = -Price_energy[i]
# lower boundary information
lb[pwc * nh * T + nw * T + nb * T + nex * T] = PEXMIN[0]
# upper boundary information
ub[pwc * nh * T + nw * T + nb * T + nex * T] = PEXMAX[0]
# objective value
c[pwc * nh * T + nw * T + nb * T + nex * T] = -CAPVALUE
# Generate correlate constraints
# 3.1) Power balance constraints
E = zeros((T, NX))
M = zeros((T, nu))
G = zeros((T, nx))
h = beq[0:T]
for i in range(T):
# For the hydro units
for j in range(nh):
G[i, phg * nh * T + i * nh + j] = 1
# For the wind farms
for j in range(nw):
G[i, pwc * nh * T + i * nw + j] = -1
# For the loads
for j in range(nb):
G[i, pwc * nh * T + nw * T + i * nb + j] = 1
# For the power exchange
for j in range(nex):
G[i, pwc * nh * T + nw * T + nb * T + i * nex + j] = -1
# Update G,M,E,h
G = concatenate([G, -G])
M = concatenate([M, -M])
E = concatenate([E, -E])
h = concatenate([h, -h])
# 3.2) water status change
E_temp = zeros((T * nh, NX))
M_temp = zeros((T * nh, nu))
G_temp = zeros((T * nh, nx))
h_temp = HYDRO_INJECT_FORECAST
for i in range(T):
for j in range(nh):
G_temp[i * nh + j, v * nh * T + i * nh + j] = 1
G_temp[i * nh + j, s * nh * T + i * nh + j] = 1
G_temp[i * nh + j, qhg * nh * T + i * nh + j] = 1
if i != 0:
G_temp[i * nh + j, v * nh * T + (i - 1) * nh + j] = -1
else:
h_temp[i * T + j] = V0[j]
# M_temp[i * nh + j, i * nh + j] = -1
G = concatenate([G, G_temp, -G_temp])
M = concatenate([M, M_temp, -M_temp])
E = concatenate([E, E_temp, -E_temp])
h = concatenate([h, h_temp, -h_temp])
# 3.3) Power water transfering
E_temp = zeros((T * nh, NX))
M_temp = zeros((T * nh, nu))
G_temp = zeros((T * nh, nx))
h_temp = zeros((T * nh, 1))
for i in range(T):
for j in range(nh):
G_temp[i * nh + j, phg * nh * T + i * nh + j] = 1
G_temp[i * nh + j, qhg * nh * T + i * nh + j] = -M_transfer[j, j]
E_temp[i * nh + j, IHG * nh * T + i * nh + j] = -C_TEMP[j] + M_transfer[j, j] * Q_TEMP[j]
G = concatenate([G, G_temp, -G_temp])
M = concatenate([M, M_temp, -M_temp])
E = concatenate([E, E_temp, -E_temp])
h = concatenate([h, h_temp, -h_temp])
# 3.4) Power range limitation
# Some problem found
E_temp = zeros((T * nh, NX))
M_temp = zeros((T * nh, nu))
G_temp = zeros((T * nh, nx))
h_temp = zeros((T * nh, 1))
for i in range(T):
for j in range(nh):
G_temp[i * nh + j, phg * nh * T + i * nh + j] = 1
E_temp[i * nh + j, PHG * nh * T + i * nh + j] = -1
E_temp[i * nh + j, RDHG * nh * T + i * nh + j] = 1
G = concatenate([G, G_temp])
M = concatenate([M, M_temp])
E = concatenate([E, E_temp])
h = concatenate([h, h_temp])
E_temp = zeros((T * nh, NX))
M_temp = zeros((T * nh, nu))
G_temp = zeros((T * nh, nx))
h_temp = zeros((T * nh, 1))
for i in range(T):
for j in range(nh):
G_temp[i * nh + j, phg * nh * T + i * nh + j] = -1
E_temp[i * nh + j, PHG * nh * T + i * nh + j] = 1
E_temp[i * nh + j, RUHG * nh * T + i * nh + j] = 1
G = concatenate([G, G_temp])
M = concatenate([M, M_temp])
E = concatenate([E, E_temp])
h = concatenate([h, h_temp])
# 3.5) Water flow constraints
E_temp = zeros((T * nh, NX))
M_temp = | zeros((T * nh, nu)) | numpy.zeros |
#!/usr/bin/env python
# coding: utf-8
# # <center>Lab 1</center>
# ## <center> Optical Digit Recognition </center>
# 
# ### Description:
# The scope of this exercise is the implementation of __an optical digit recognition system__. Our dataset comes from __US Postal Service__, written by hand (scanned from postal envelopes), and contains digits from 0 to 9 separated in train and test set.
# ### Data:
# We are given two text files (train.txt and text.txt). Each line corresponds to a sample-digit and each collumn corresponds to a features of the digit. For example, the value (i, j) is the j-th feature of the i-th digit. Every digit is described from 257 values. The first value is the class (if it is 0, 1 etc) and the rest 256 values are the pixels that describe it in grayscale.
# ### Implementation:
# First, we import all the necessary libraries and suppress some unnecessary warnings.
# In[1]:
# various
import numpy as np
from matplotlib import pyplot as plt
import random
import scipy.stats
# sklearn
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.model_selection import KFold, learning_curve, ShuffleSplit, cross_val_score, train_test_split
from sklearn.svm import SVC
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.decomposition import PCA
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier, VotingClassifier, BaggingClassifier
# pytorch
from torch.utils.data import Dataset, DataLoader
import torch
from torch import nn
from torch import optim
# In[2]:
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
# #### The first 13 steps were implemented as a part of the PrepareLab located in prepare_lab folder.
# __Step 1:__ Read input data from given text files.
# In[3]:
# Define useful variables
data_path = "./pr_lab1_2016-17_data_0/pr_lab1_2016-17_data"
train_size = 7291
test_size = 2007
n_features = 256
# Initialize X_train, X_test, y_train, y_test
X_train = np.zeros((train_size, n_features), dtype=np.float64)
X_test = np.zeros((test_size, n_features), dtype=np.float64)
y_train = np.zeros(train_size, dtype='int64')
y_test = np.zeros(test_size, dtype='int64')
# Read train data
with open(data_path + "/train.txt") as f:
for i, line in enumerate(f):
# Split i-th line
line = line.split()
# Keep the first collumn as the class of the i-th digit
y_train[i] = int(float(line[0]))
# Keep the rest 256 values as the pixels of the i-th digit.
for j, pixel in enumerate(line[1:]):
X_train[i][j] = pixel
print("Finished reading training data.")
# Read test data
with open(data_path + "/test.txt") as f:
for i, line in enumerate(f):
# Split i-th line
line = line.split()
# Keep the first collumn as the class of the i-th digit
y_test[i] = int(float(line[0]))
# Keep the rest 256 values as the pixels of the i-th digit.
for j, pixel in enumerate(line[1:]):
X_test[i][j] = pixel
print("Finished reading test data.")
# __Step 2:__ Display a certain sample (index 131) as an 16x16 image.
# In[4]:
# Reshape the 256 vector in a 16x16 matrix.
img_131 = np.reshape(X_train[131], (16, 16))
# Turn the axis off and display the image.
plt.axis('off')
plt.imshow(img_131)
# __Step 3:__ Display one random image from each digit.
# In[5]:
# Define a figure with 10 plots.
fig = plt.figure(figsize=(15,6))
columns = 5
rows = 2
for digit in range(10):
# Pick all images of current digit
curr_data = []
for j, y in enumerate(y_train):
if y == digit:
curr_data.append(X_train[j])
# Select randomly an image
sample = random.choice(curr_data)
# Display the randomly selected image in a subplot
fig.add_subplot(rows, columns, digit+1)
plt.axis('off')
plt.imshow(np.reshape(sample, (16, 16)))
plt.show()
# __Step 4:__ Compute the mean value of pixel (10,10) of all 0's in the train set.
# In[6]:
# Get indexes of 0's in the train set
idx_0 = [i for i in range(train_size) if y_train[i] == 0]
# Get pixel (10,10) of all 0's
X_train_0_10 = np.take(X_train[:, 10*16+10], idx_0)
# Compute mean
mean_0_10 = np.mean(X_train_0_10)
print("Mean value of pixel (10, 10) of all 0's in the train set is: " + str(mean_0_10))
# __Step 5:__ Compute variance of (10,10) pixel of all 0's in the train set
# In[7]:
var_0_10 = np.var(X_train_0_10)
print("Variance of pixel (10, 10) of all 0's in the train set is: " + str(var_0_10))
# __Step 6:__ Compute mean value and variance of every pixel of 0's in the train set
# In[8]:
# Get pixels of all 0's
X_train_0 = np.take(X_train, idx_0, axis=0)
# Compute mean value along each pixel
mean_0 = np.mean(X_train_0, axis=0, keepdims=True)
# Compute variance along each pixel
var_0 = np.var(X_train_0, axis=0, keepdims=True)
# Verify their shape
print("Shape of mean values: " + str(mean_0.shape))
print("Shape of variances: " + str(var_0.shape))
# __Step 7:__ Display digit '0' using the mean value of each pixel.
# In[9]:
plt.axis("off")
plt.imshow(np.reshape(mean_0, (16, 16)))
# __Step 8:__ Display '0' using the variance of each pixel.
# In[10]:
plt.axis("off")
plt.imshow(np.reshape(var_0, (16, 16)))
# We observe that the digit in the mean-image contains less noise than in the variance-image. However, in both images the digit can be distinguished.
# __Step 9:__
#
# __(a)__ Compute the mean value and the variance for all digits (0-9).
# In[11]:
mean = np.zeros((10, 256))
var = np.zeros((10, 256))
for digit in range(10):
idx_i = [i for i in range(train_size) if y_train[i] == digit]
X_train_i = np.take(X_train, idx_i, axis=0)
mean[digit, :] = np.mean(X_train_i, axis=0, keepdims=True)
var[digit, :] = np.var(X_train_i, axis=0, keepdims=True)
# __(b)__ Display all digits using their computed mean value.
# In[12]:
fig = plt.figure(figsize=(15,6))
columns = 5
rows = 2
for digit in range(10):
fig.add_subplot(rows, columns, digit+1)
plt.axis('off')
plt.imshow(np.reshape(mean[digit, :], (16, 16)))
plt.show()
# __Step 10:__ Classify X_test[101], using Euclidean distance.
# In[13]:
# Define a function that classifies a sample based on the
# euclidean distance.
def predict_eucl(x):
pred = 0
dist = np.linalg.norm(x - mean[0, :])
for i in range(1, 10):
if np.linalg.norm(x - mean[i, :]) < dist:
dist = np.linalg.norm(x - mean[i, :])
pred = i
return pred
print("Prediction: " + str(predict_eucl(X_test[101])))
print("Ground truth: " + str(y_test[101]))
# In[14]:
plt.axis('off')
plt.imshow(np.reshape(X_test[101], (16, 16)))
# We observe that the classification is wrong, since X_test[101] is the digit 6.
# __Step 11:__
#
# __(a)__ Classify test set using Euclidean distance
# In[15]:
# Compute predictions for each test sample
y_pred = np.zeros(test_size)
for i, x in enumerate(X_test):
y_pred[i] = predict_eucl(x)
# __(b)__ Compute accuracy
# In[16]:
# Count number of correct predictions and output the total accuracy.
corr = 0
for i in range(len(y_test)):
if y_test[i] == y_pred[i]:
corr += 1
acc = corr / len(y_test) * 100
print("Accuracy of Euclidean classifier in test set: " + str(acc))
# __Step 12:__ Create a scikit-learn euclidean estimator
# In[17]:
class EuclideanClassifier(BaseEstimator, ClassifierMixin):
"""Classify samples based on the distance from the mean feature value"""
def __init__(self):
self.X_mean_ = None
self.classes_ = None
def fit(self, X, y):
"""
This should fit classifier. All the "work" should be done here.
Calculates self.X_mean_ based on the mean
feature values in X for each class.
self.X_mean_ becomes a numpy.ndarray of shape
(n_classes, n_features)
fit always returns self.
"""
# Compute classes
self.classes_ = np.unique(y)
train_size, n_features = X.shape
n_classes = len(self.classes_)
self.X_mean_ = np.zeros((n_classes, n_features))
for k in range(n_classes):
idx_i = [i for i in range(train_size) if y[i] == k]
X_k = np.take(X, idx_i, axis=0)
self.X_mean_[k, :] = np.mean(X_k, axis=0, keepdims=True)
return self
def predict(self, X):
"""
Make predictions for X based on the
euclidean distance from self.X_mean_
"""
closest = np.argmin(euclidean_distances(X, self.X_mean_), axis=1)
return closest
def score(self, X, y):
"""
Return accuracy score on the predictions
for X based on ground truth y
"""
corr = 0
y_pred = self.predict(X)
corr = sum(int(y[i] == y_pred[i]) for i in range(len(y)))
acc = corr / len(y)
return acc
# __Step 13:__
#
# __(a)__ Score above euclidean classifier using 5-fold cross-validation
# In[18]:
# Define a custom scorer
def my_scorer(clf, X, y_true):
return clf.score(X, y_true)
# Create the classifier
clf = EuclideanClassifier()
scores = cross_val_score(clf, X_train, y_train,
cv=KFold(n_splits=5, random_state=42),
scoring=my_scorer)
print("Euclidean Classifier score from 5-fold cross-validation = %f +-%f" % (np.mean(scores), np.std(scores)))
# __(b)__ Plot the decision surface of the euclidean classifier
# In[19]:
# Define a function that plots the decision surface of 2-dimensional data
def plot_clf(clf, X, y, labels):
fig, ax = plt.subplots()
# title for the plots
title = ('Decision surface of Classifier')
# Set-up grid for plotting.
X0, X1 = X[:, 0], X[:, 1]
x_min, x_max = X0.min() - 1, X0.max() + 1
y_min, y_max = X1.min() - 1, X1.max() + 1
xx, yy = np.meshgrid( | np.arange(x_min, x_max, .05) | numpy.arange |
import numpy as np
from baselines.deepq.experiments.atari.knn_cuda_fixmem import knn as knn_cuda_fixmem
import copy
import logging
# each action -> a lru_knn buffer
# alpha is for updating the internal reward i.e. count based reward
class LRU_KNN_GPU_PS(object):
def __init__(self, capacity, z_dim, env_name, action, num_actions=6, knn=4, debug=True, gamma=0.99,
alpha=0.1,
beta=0.01):
self.action = action
self.alpha = alpha
self.beta = beta
self.env_name = env_name
self.capacity = capacity
self.num_actions = num_actions
self.rmax = 100000
self.states = np.empty((capacity, z_dim), dtype=np.float32)
# self.hash_table = np.empty((capacity, z_dim), dtype=np.float32)
# self.hashes = {}
self.knn_mean_dist = np.full((capacity,), 0)
self.external_value = | np.full((capacity, num_actions), np.nan) | numpy.full |
import pandas as pd
import time
import random
import numpy as np
from datetime import timedelta
from datetime import datetime
import MAUC
import argparse
parser = argparse.ArgumentParser(usage='python3 evalOneSubmission.py',
description=r'''
TADPOLE Evaluation Script:
The program computes the following matrics:
Clinical diagnosis prediction:
1. Multiclass area under the receiver operating curve (mAUC)
2. Balanced classification accuracy (BCA)
Continuous feature predictions:
3. Mean Absolute Error (MAE)
4. Coverage Probability Accuracy (CPA)
5. Weighted Error Score (WES)
Author: <NAME>, <EMAIL>
''')
def calcBCA(estimLabels, trueLabels, nrClasses):
# Balanced Classification Accuracy
bcaAll = []
for c0 in range(nrClasses):
for c1 in range(c0+1,nrClasses):
# c0 = positive class & c1 = negative class
TP = np.sum((estimLabels == c0) & (trueLabels == c0))
TN = np.sum((estimLabels == c1) & (trueLabels == c1))
FP = np.sum((estimLabels == c1) & (trueLabels == c0))
FN = np.sum((estimLabels == c0) & (trueLabels == c1))
# sometimes the sensitivity of specificity can be NaN, if the user doesn't forecast one of the classes.
# In this case we assume a default value for sensitivity/specificity
if (TP+FN) == 0:
sensitivity = 0.5
else:
sensitivity = TP/(TP+FN)
if (TN+FP) == 0:
specificity = 0.5
else:
specificity = TN/(TN+FP)
bcaCurr = 0.5*(sensitivity+specificity)
bcaAll += [bcaCurr]
# print('bcaCurr %f TP %f TN %f FP %f FN %f' % (bcaCurr, TP, TN, FP, FN))
return np.mean(bcaAll)
def parseData(d4Df, forecastDf, diagLabels):
trueDiag = d4Df['Diagnosis']
trueADAS = d4Df['ADAS13']
trueVents = d4Df['Ventricles']
nrSubj = d4Df.shape[0]
zipTrueLabelAndProbs = []
hardEstimClass = -1 * np.ones(nrSubj, int)
adasEstim = -1 * np.ones(nrSubj, float)
adasEstimLo = -1 * np.ones(nrSubj, float) # lower margin
adasEstimUp = -1 * np.ones(nrSubj, float) # upper margin
ventriclesEstim = -1 * np.ones(nrSubj, float)
ventriclesEstimLo = -1 * np.ones(nrSubj, float) # lower margin
ventriclesEstimUp = -1 * np.ones(nrSubj, float) # upper margin
# print('subDf.keys()', forecastDf['Forecast Date'])
invalidResultReturn = (None,None,None,None,None,None,None,None,None,None,None)
invalidFlag = False
# for each subject in D4 match the closest user forecasts
for s in range(nrSubj):
currSubjMask = d4Df['RID'].iloc[s] == forecastDf['RID']
currSubjData = forecastDf[currSubjMask]
# if subject is missing
if currSubjData.shape[0] == 0:
print('WARNING: Subject RID %s missing from user forecasts' % d4Df['RID'].iloc[s])
invalidFlag = True
continue
# if not all forecast months are present
if currSubjData.shape[0] < 5*12: # check if at least 5 years worth of forecasts exist
print('WARNING: Missing forecast months for subject with RID %s' % d4Df['RID'].iloc[s])
invalidFlag = True
continue
currSubjData = currSubjData.reset_index(drop=True)
timeDiffsScanCog = [d4Df['CognitiveAssessmentDate'].iloc[s] - d for d in currSubjData['Forecast Date']]
# print('Forecast Date 2',currSubjData['Forecast Date'])
indexMin = np.argsort(np.abs(timeDiffsScanCog))[0]
# print('timeDiffsScanMri', indexMin, timeDiffsScanMri)
pCN = currSubjData['CN relative probability'].iloc[indexMin]
pMCI = currSubjData['MCI relative probability'].iloc[indexMin]
pAD = currSubjData['AD relative probability'].iloc[indexMin]
# normalise the relative probabilities by their sum
pSum = (pCN + pMCI + pAD)/3
pCN /= pSum
pMCI /= pSum
pAD /= pSum
hardEstimClass[s] = np.argmax([pCN, pMCI, pAD])
adasEstim[s] = currSubjData['ADAS13'].iloc[indexMin]
adasEstimLo[s] = currSubjData['ADAS13 50% CI lower'].iloc[indexMin]
adasEstimUp[s] = currSubjData['ADAS13 50% CI upper'].iloc[indexMin]
# for the mri scan find the forecast closest to the scan date,
# which might be different from the cognitive assessment date
timeDiffsScanMri = [d4Df['ScanDate'].iloc[s] - d for d in currSubjData['Forecast Date']]
indexMinMri = np.argsort(np.abs(timeDiffsScanMri))[0]
ventriclesEstim[s] = currSubjData['Ventricles_ICV'].iloc[indexMinMri]
ventriclesEstimLo[s] = currSubjData['Ventricles_ICV 50% CI lower'].iloc[indexMinMri]
ventriclesEstimUp[s] = currSubjData['Ventricles_ICV 50% CI upper'].iloc[indexMinMri]
# print('%d probs' % d4Df['RID'].iloc[s], pCN, pMCI, pAD)
if not np.isnan(trueDiag.iloc[s]):
zipTrueLabelAndProbs += [(trueDiag.iloc[s], [pCN, pMCI, pAD])]
if invalidFlag:
# if at least one subject was missing or if
raise ValueError('Submission was incomplete. Please resubmit')
# If there are NaNs in D4, filter out them along with the corresponding user forecasts
# This can happen if rollover subjects don't come for visit in ADNI3.
notNanMaskDiag = np.logical_not(np.isnan(trueDiag))
trueDiagFilt = trueDiag[notNanMaskDiag]
hardEstimClassFilt = hardEstimClass[notNanMaskDiag]
notNanMaskADAS = np.logical_not(np.isnan(trueADAS))
trueADASFilt = trueADAS[notNanMaskADAS]
adasEstim = adasEstim[notNanMaskADAS]
adasEstimLo = adasEstimLo[notNanMaskADAS]
adasEstimUp = adasEstimUp[notNanMaskADAS]
notNanMaskVents = np.logical_not(np.isnan(trueVents))
trueVentsFilt = trueVents[notNanMaskVents]
ventriclesEstim = ventriclesEstim[notNanMaskVents]
ventriclesEstimLo = ventriclesEstimLo[notNanMaskVents]
ventriclesEstimUp = ventriclesEstimUp[notNanMaskVents]
assert trueDiagFilt.shape[0] == hardEstimClassFilt.shape[0]
assert trueADASFilt.shape[0] == adasEstim.shape[0] == adasEstimLo.shape[0] == adasEstimUp.shape[0]
assert trueVentsFilt.shape[0] == ventriclesEstim.shape[0] == \
ventriclesEstimLo.shape[0] == ventriclesEstimUp.shape[0]
return zipTrueLabelAndProbs, hardEstimClassFilt, adasEstim, adasEstimLo, adasEstimUp, \
ventriclesEstim, ventriclesEstimLo, ventriclesEstimUp, trueDiagFilt, trueADASFilt, trueVentsFilt
def evalOneSub(d4Df, forecastDf):
"""
Evaluates one submission.
Parameters
----------
d4Df - Pandas data frame containing the D4 dataset
subDf - Pandas data frame containing user forecasts for D2 subjects.
Returns
-------
mAUC - multiclass Area Under Curve
bca - balanced classification accuracy
adasMAE - ADAS13 Mean Aboslute Error
ventsMAE - Ventricles Mean Aboslute Error
adasCovProb - ADAS13 Coverage Probability for 50% confidence interval
ventsCovProb - Ventricles Coverage Probability for 50% confidence interval
"""
forecastDf['Forecast Date'] = [datetime.strptime(x, '%Y-%m') for x in forecastDf['Forecast Date']] # considers every month estimate to be the actual first day 2017-01
if isinstance(d4Df['Diagnosis'].iloc[0], str):
d4Df['CognitiveAssessmentDate'] = [datetime.strptime(x, '%Y-%m-%d') for x in d4Df['CognitiveAssessmentDate']]
d4Df['ScanDate'] = [datetime.strptime(x, '%Y-%m-%d') for x in d4Df['ScanDate']]
mapping = {'CN' : 0, 'MCI' : 1, 'AD' : 2}
d4Df.replace({'Diagnosis':mapping}, inplace=True)
diagLabels = ['CN', 'MCI', 'AD']
zipTrueLabelAndProbs, hardEstimClass, adasEstim, adasEstimLo, adasEstimUp, \
ventriclesEstim, ventriclesEstimLo, ventriclesEstimUp, trueDiagFilt, trueADASFilt, trueVentsFilt = \
parseData(d4Df, forecastDf, diagLabels)
zipTrueLabelAndProbs = list(zipTrueLabelAndProbs)
########## compute metrics for the clinical status #############
##### Multiclass AUC (mAUC) #####
nrClasses = len(diagLabels)
mAUC = MAUC.MAUC(zipTrueLabelAndProbs, num_classes=nrClasses)
### Balanced Classification Accuracy (BCA) ###
# print('hardEstimClass', np.unique(hardEstimClass), hardEstimClass)
trueDiagFilt = trueDiagFilt.astype(int)
# print('trueDiagFilt', np.unique(trueDiagFilt), trueDiagFilt)
bca = calcBCA(hardEstimClass, trueDiagFilt, nrClasses=nrClasses)
####### compute metrics for Ventricles and ADAS13 ##########
#### Mean Absolute Error (MAE) #####
adasMAE = np.mean(np.abs(adasEstim - trueADASFilt))
ventsMAE = np.mean(np.abs(ventriclesEstim - trueVentsFilt))
##### Weighted Error Score (WES) ####
adasCoeffs = 1/(adasEstimUp - adasEstimLo)
adasWES = np.sum(adasCoeffs * np.abs(adasEstim - trueADASFilt))/np.sum(adasCoeffs)
ventsCoeffs = 1/(ventriclesEstimUp - ventriclesEstimLo)
ventsWES = np.sum(ventsCoeffs * np.abs(ventriclesEstim - trueVentsFilt))/ | np.sum(ventsCoeffs) | numpy.sum |
import numpy as np
from keras.models import Model
from keras.models import load_model, model_from_json
from os.path import join
import config.settings as cnst
import plots.plots as plots
from predict.predict import predict_byte, predict_byte_by_section
from predict.predict_args import DefaultPredictArguments, Predict as pObj
from .ati_args import SectionActivationDistribution
import pandas as pd
from analyzers.collect_exe_files import get_partition_data, store_partition_data
import gc
import logging
import pefile
def find_qualified_sections(sd, trend, common_trend, support, fold_index):
""" Function for training Tier-1 model with whole byte sequence data
Args:
sd: object to hold activation distribution of PE sections
trend: plain activation trend found by core ATI process
common_trend: not used here
support: not used here
fold_index: current fold index of cross validation
Returns:
q_sections_by_q_criteria: a dict with q_criterion found for each percentile supplied and
their respective list of sections qualified.
"""
btrend = trend.loc["BENIGN_ACTIVATION_MAGNITUDE"]
mtrend = trend.loc["MALWARE_ACTIVATION_MAGNITUDE"]
# Averaging based on respective benign and malware population
btrend = btrend / sd.b1_b_truth_count
mtrend = mtrend / sd.b1_m_truth_count
btrend[btrend == 0] = 1
mtrend[mtrend == 0] = 1
malfluence = mtrend / btrend
benfluence = btrend / mtrend
mal_q_criteria_by_percentiles = | np.percentile(malfluence, q=cnst.PERCENTILES) | numpy.percentile |
"""
Copyright 2020 Johns Hopkins University (Author: <NAME>)
Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""
import logging
import math
import multiprocessing
import yaml
from copy import deepcopy
import numpy as np
from ..hyp_defs import float_cpu
from ..io import RandomAccessAudioReader as AR
class SingleNoiseAugment(object):
"""Class to augment speech with additive noise of a single type,
e.g., music, babble, ...
Attributes:
noise_type: string label indicating the noise type.
noise_path: path to Kaldi style wav.scp file indicating the path
to the noise wav files.
min_snr: mininimum SNR(dB) to sample from.
max_snr: maximum SNR(dB) to sample from.
rng: Random number generator returned by
np.random.RandomState (optional).
"""
def __init__(
self, noise_type, noise_path, min_snr, max_snr, random_seed=112358, rng=None
):
logging.info(
"init noise_augment with noise={} noise_path={} snr={}-{}".format(
noise_type, noise_path, min_snr, max_snr
)
)
self.noise_type = noise_type
self.r = AR(noise_path)
self.noise_keys = self.r.keys
self.min_snr = min_snr
self.max_snr = max_snr
self.cache = None
self.lock = multiprocessing.Lock()
if rng is None:
self.rng = np.random.RandomState(seed=random_seed)
else:
self.rng = deepcopy(rng)
logging.info("init noise_augment with noise={} done".format(noise_type))
@staticmethod
def _power(x):
"""Computes power of x in dB."""
return 10 * np.log10((x ** 2).sum())
@staticmethod
def snr(x, n):
"""Computes SNR in dB.
Args:
x: clean speech signal.
n: noise signal.
"""
return SingleNoiseAugment._power(x) - SingleNoiseAugment._power(n)
@staticmethod
def _compute_noise_scale(x, n, target_snr):
snr = SingleNoiseAugment.snr(x, n)
return 10 ** ((snr - target_snr) / 20)
def forward(self, x):
"""Adds noise to signal, SNR is chosen randomly.
Args:
x: clean speech signal.
Returns:
Noisy signal.
Dictionary containing information of noise type and SNR(dB).
"""
num_samples = x.shape[0]
with self.lock:
if self.cache is not None:
if self.cache.shape[0] > num_samples:
noise = self.cache[:num_samples]
self.cache = self.cache[num_samples:]
else:
noise = self.cache
self.cache = None
else:
noise = None
while noise is None or noise.shape[0] < num_samples:
with self.lock:
noise_idx = self.rng.randint(len(self.noise_keys))
key = self.noise_keys[noise_idx]
noise_k, fs_k = self.r.read([key])
noise_k = noise_k[0]
if noise is None:
need_samples = min(x.shape[0], noise_k.shape[0])
noise = noise_k[:need_samples]
else:
need_samples = min(x.shape[0] - noise.shape[0], noise_k.shape[0])
noise = np.concatenate((noise, noise_k[:need_samples]))
if need_samples < noise_k.shape[0]:
with self.lock:
self.cache = noise_k[need_samples:]
with self.lock:
target_snr = self.rng.uniform(self.min_snr, self.max_snr)
scale = self._compute_noise_scale(x, noise, target_snr)
info = {"noise_type": self.noise_type, "snr": target_snr}
return x + scale * noise, info
def __call__(self, x):
return self.forward(x)
class NoiseAugment(object):
"""Class to augment speech with additive noise from multiple types,
e.g., music, babble, ...
It will randomly choose which noise type to add.
Attributes:
noise_prob: probability of adding noise.
noise_types: dictionary of options with one entry per noise-type,
Each entry is also a dictiory with the following entries:
weight, max_snr, min_snr, noise_path. The weight parameter
is proportional to how often we want to sample a given noise
type.
rng: Random number generator returned by
np.random.RandomState (optional).
"""
def __init__(self, noise_prob, noise_types, random_seed=112358, rng=None):
logging.info("init noise augment")
self.noise_prob = noise_prob
assert isinstance(noise_types, dict)
# num_noise_types = len(noise_types)
augmenters = []
self.weights = np.zeros((len(noise_types),))
count = 0
for key, opts in noise_types.items():
self.weights[count] = opts["weight"]
aug = SingleNoiseAugment(
key,
opts["noise_path"],
opts["min_snr"],
opts["max_snr"],
random_seed=random_seed,
rng=rng,
)
augmenters.append(aug)
count += 1
self.weights /= | np.sum(self.weights) | numpy.sum |
import contextlib
import os
import time
from collections import defaultdict
from shapely.geometry import Polygon
from shapely.strtree import STRtree
import lovely_logger as logging
import numpy as np
import pandas as pd
import requests
import streamlit as st
from pandas import read_csv
from scipy import stats
from shapely.geometry import LineString, Point
# name
# trajectory
# geometry
examples = {
# , free choice of destination
"Bidirectional corridor (exp)": [
"Multi-Rooms",
"https://fz-juelich.sciebo.de/s/o4D8Va2MtbSeG2v/download",
"https://fz-juelich.sciebo.de/s/FNuSYwOre85km3U/download",
],
"Bottleneck BUW (exp)": [
"030_c_56_h0",
"https://fz-juelich.sciebo.de/s/AsrA465S3wNDNlo/download",
"https://fz-juelich.sciebo.de/s/rVdksQ7yUngiUmw/download",
],
"Bottleneck WDG (exp)": [
"WDG_09",
"https://fz-juelich.sciebo.de/s/oTG7vRCcQyYJ08q/download",
"https://fz-juelich.sciebo.de/s/lDuCQlJkwh9Of1C/download",
],
"Corner (exp)": [
"jps_eo-300-300-300_combined_MB",
"https://fz-juelich.sciebo.de/s/BfNxMk1qM64QqYj/download",
"https://fz-juelich.sciebo.de/s/qNVoD8RZ8UentBB/download",
],
"Crossing 90 (exp)": [
"CROSSING_90_a_10",
"https://fz-juelich.sciebo.de/s/gLfaofmZCNtf5Vx/download",
"https://fz-juelich.sciebo.de/s/f960CoXb26FKpkw/download",
],
"Crossing 120 (exp)": [
"CROSSING_120_A_1",
"https://fz-juelich.sciebo.de/s/X3WTuExdj2HXRVx/download",
"https://fz-juelich.sciebo.de/s/11Cz0bQWZCv23eI/download",
],
"Crossing 120 (exp)": [
"CROSSING_120_C_1",
"https://fz-juelich.sciebo.de/s/vrkGlCDKVTIz8Ch/download",
"https://fz-juelich.sciebo.de/s/11Cz0bQWZCv23eI/download",
],
"Stadium Entrance (exp)": [
"mo11_combine_MB",
"https://fz-juelich.sciebo.de/s/ckzZLnRJCKKgAnZ/download",
"https://fz-juelich.sciebo.de/s/kgXUEyu95FTQlFC/download",
],
"Multi-Rooms (sim)": [
"Multi-Rooms",
"https://fz-juelich.sciebo.de/s/7kwrnAzcv5m7ii2/download",
"https://fz-juelich.sciebo.de/s/VSPgE6Kcfp8qDIa/download",
],
"Bottleneck (sim)": [
"bottleneck",
"https://fz-juelich.sciebo.de/s/HldXLySEfEDMdZo/download",
"https://fz-juelich.sciebo.de/s/FqiSFGr6FajfYLD/download",
],
"HC-BUW (sim)": [
"HC_BUW",
"https://fz-juelich.sciebo.de/s/GgvVjc81lzmhTgv/download",
"https://fz-juelich.sciebo.de/s/NikHJ6TIHCwSoUM/download",
]
}
def get_time(t):
"""Time in min sec
:param t: Run time
:type t: float
:returns: str
"""
minutes = t // 60
seconds = t % 60
return f"""{minutes:.0f} min:{seconds:.0f} sec"""
def selected_traj_geo(text):
"""Returns a list of trajectory and geometry files"""
if text in examples.keys():
return examples[text]
else:
logging.warning(f"Could not find {text}")
logging.info(f"Available examples are {examples}")
return []
def download(url: str, filename: str):
try:
r = requests.get(url, stream=True)
logging.info(f"saving to {filename}")
with open(filename, "wb") as f:
for chunk in r.iter_content(chunk_size=1024 * 8):
if chunk:
f.write(chunk)
f.flush()
os.fsync(f.fileno())
except Exception as e:
st.error(
f"""Download of file {filename} failed.\n
Error: {e}"""
)
@contextlib.contextmanager
def profile(name):
start_time = time.time()
yield # <-- your code will execute here
total_time = time.time() - start_time
logging.info(f"{name}: {total_time * 1000.0:.4f} ms")
def weidmann(rho, v0=1.34, rho_max=5.4, gamma=1.913):
inv_rho = np.empty_like(rho)
mask = rho <= 0.01
inv_rho[mask] = 1 / rho_max
inv_rho[~mask] = 1 / rho[~mask]
return v0 * (1 - np.exp(-gamma * (inv_rho - 1 / rho_max))) # Eq. 6
def inv_weidmann(v, v0=1.34, rho_max=5.4, gamma=1.913):
v0 = np.max(v)
v[v > v0] = v0
s = 1 - v / v0
# np.log(s, where=np.logical_not(zero_mask))
x = -1 / gamma * np.log(s, out=np.zeros_like(s), where=(s != 0)) + 1 / rho_max
return 1 / x
def get_speed_index(traj_file):
lines = traj_file[:500].split("\n")
for line in lines:
if line.startswith("#ID"):
if "V" in line:
return int(line.split().index("V"))
return -1
def get_header(traj_file):
lines = traj_file[:500].split("\n")
for line in lines:
if line.startswith("#ID"):
if "FR" in line:
return line
return "Not extracted"
# todo: update with more rules for more files
def get_fps(traj_file):
fps = traj_file.split("framerate:")[-1].split("\n")[0]
try:
fps = int(float(fps))
except ValueError:
st.error(f"{fps} in header can not be converted to int")
logging.error(f"{fps} in header can not be converted to int")
st.stop()
return fps
def detect_jpscore(traj_file):
return "#description: jpscore" in traj_file
def get_unit(traj_file):
unit = "NOTHING"
if "#description: jpscore" in traj_file:
unit = "m"
else:
# petrack
unit_list = traj_file.split("unit:")
if len(unit_list) > 1:
unit = unit_list[-1].split("\n")[0]
if "x/" in traj_file:
unit = traj_file.split("x/")[-1].split()[0]
if "<x>" in traj_file:
# <number> <frame> <x> [in m] <y> [in m] <z> [in m]
unit = traj_file.split("<x>")[-1].split()[1].strip("]")
unit = unit.strip()
logging.info(f"Unit detected: <{unit}>")
return unit
def get_transitions(xml_doc, unit):
if unit == "cm":
cm2m = 100
else:
cm2m = 1
transitions = {}
for _, t_elem in enumerate(xml_doc.getElementsByTagName("transition")):
Id = t_elem.getAttribute("id")
n_vertex = len(t_elem.getElementsByTagName("vertex"))
vertex_array = np.zeros((n_vertex, 2))
for v_num, _ in enumerate(t_elem.getElementsByTagName("vertex")):
vertex_array[v_num, 0] = (
t_elem.getElementsByTagName("vertex")[v_num].attributes["px"].value
)
vertex_array[v_num, 1] = (
t_elem.getElementsByTagName("vertex")[v_num].attributes["py"].value
)
transitions[Id] = vertex_array / cm2m
return transitions
def get_measurement_lines(xml_doc, unit):
"""add area_L
https://www.jupedsim.org/jpsreport_inifile#measurement-area
"""
if unit == "cm":
cm2m = 100
else:
cm2m = 1
fake_id = 1000
measurement_lines = {}
for _, t_elem in enumerate(xml_doc.getElementsByTagName("area_L")):
Id = t_elem.getAttribute("id")
logging.info(f"Measurement id = <{Id}>")
if Id == "":
st.warning(f"Got Measurement line with no Id. Setting id = {fake_id}")
logging.info(f"Got Measurement line with no Id. Setting id = {fake_id}")
Id = fake_id
fake_id += 1
n_vertex = 2
vertex_array = np.zeros((n_vertex, 2))
vertex_array[0, 0] = (
t_elem.getElementsByTagName("start")[0].attributes["px"].value
)
vertex_array[0, 1] = (
t_elem.getElementsByTagName("start")[0].attributes["py"].value
)
vertex_array[1, 0] = (
t_elem.getElementsByTagName("end")[0].attributes["px"].value
)
vertex_array[1, 1] = (
t_elem.getElementsByTagName("end")[0].attributes["py"].value
)
measurement_lines[Id] = vertex_array / cm2m
logging.info(f"vertex: {vertex_array}")
return measurement_lines
# def passing_frame(
# ped_data: np.array, line: LineString, fps: int, max_distance: float
# ) -> int:
# """Return frame of first time ped enters the line buffer
# Enlarge the line by eps, a constant that is dependent on fps
# eps = 1/fps * v0, v0 = 1.3 m/s
# :param ped_data: trajectories of ped
# :param line: transition
# :param fps: fps
# : param max_distance: an arbitrary distance to the line
# :returns: frame of entrance. Return negative number if ped did not pass trans
# """
# eps = 1 / fps * 1.3
# line_buffer = line.buffer(eps, cap_style=3)
# p = ped_data[np.abs(ped_data[:, 2] - line.centroid.x) < max_distance]
# for (frame, x, y) in p[:, 1:4]:
# if Point(x, y).within(line_buffer):
# return frame
# return -1
# def passing_frame2(ped_data, line: LineString, fps: int, max_distance: float) -> int:
# s = STRtree([Point(ped_data[i, 2:4]) for i in range(ped_data.shape[0])])
# index = s.nearest_item(line)
# # nearest_point = ped_data[index, 2:4]
# nearest_frame = ped_data[index, 1]
# # print("nearest: ", nearest_point, "at", nearest_frame)
# L1 = line.coords[0]
# L2 = line.coords[1]
# P1 = ped_data[0, 2:4]
# P2 = ped_data[-1, 2:4]
# # print("Ped", P1, P2)
# # print("Line", L1, L2)
# sign1 = np.cross([L1, L2], [L1, P1])[1]
# sign2 = np.cross([L1, L2], [L1, P2])[1]
# if np.sign(sign1) != np.sign(sign2):
# # crossed_line = True
# return nearest_frame
# # crossed_line = False
# return -1
# # print("nearest_frame", nearest_frame)
# # print("Crossed?", crossed_line)
def on_different_sides(L1, L2, P1, P2) -> bool:
"""True is P1 and P2 are on different sides from [L1, L2]
L1
x
|
|
P1 x | x P2
x
L2
--> True
"""
sign1 = np.cross(L1 - L2, L1 - P1)
sign2 = np.cross(L1 - L2, L1 - P2)
return np.sign(sign1) != np.sign(sign2)
def passing_frame(ped_data: np.array, line: LineString, fps: float) -> int:
"""First frame at which the pedestrian is within a buffer around line
fps is used to determin the width of the buffer and is not needed
in the calculations.
Assume a desired speed of 1.3 m/s
:param ped_data: trajectories
:type ped_data: np.array
:param line: measurement line
:type line: LineString
:param fps: frames per second.
:type fps: float
:returns:
"""
XY = ped_data[:, 2:4]
L1 = np.array(line.coords[0])
L2 = np.array(line.coords[1])
P1 = XY[0]
P2 = XY[-1]
i1 = 0 # index of first element
i2 = len(XY) - 1 # index of last element
im = int(len(XY) / 2) # index of the element in the middle
M = XY[im]
i = 0
passed_line_at_frame = -1
sign = -1
if not on_different_sides(L1, L2, P1, P2):
return passed_line_at_frame, sign
while i1 + 1 < i2 and i < 20:
i += 1 # to avoid endless loops! Should be removed!
if on_different_sides(L1, L2, M, P2):
P1 = M
i1 = im
else:
P2 = M
i2 = im
im = int((i1 + i2) / 2)
M = XY[im]
# this is to ensure, that the pedestrian really passed *through* the line
line_buffer = line.buffer(1.3/fps, cap_style=2)
if Point(XY[i1]).within(line_buffer):
passed_line_at_frame = ped_data[i1, 1]
sign = np.sign(np.cross(L1 - L2, XY[i1] - XY[i2]))
elif Point(XY[i2]).within(line_buffer):
passed_line_at_frame = ped_data[i2, 1]
sign = np.sign(np.cross(L1 - L2, XY[i1] - XY[i2]))
return passed_line_at_frame, sign
def read_trajectory(input_file):
data = read_csv(input_file, sep=r"\s+", dtype=np.float64, comment="#").values
return data
def read_obstacle(xml_doc, unit):
if unit == "cm":
cm2m = 100
else:
cm2m = 1
# Initialization of a dictionary with obstacles
return_dict = {}
# read in obstacles and combine
# them into an array for polygon representation
for o_num, o_elem in enumerate(xml_doc.getElementsByTagName("obstacle")):
N_polygon = len(o_elem.getElementsByTagName("polygon"))
if N_polygon == 1:
pass
else:
points = np.zeros((0, 2))
for p_num, p_elem in enumerate(o_elem.getElementsByTagName("polygon")):
for v_num, v_elem in enumerate(p_elem.getElementsByTagName("vertex")):
vertex_x = float(
# p_elem.getElementsByTagName("vertex")[v_num].attributes["px"].value
v_elem.attributes["px"].value
)
vertex_y = float(
# p_elem.getElementsByTagName("vertex")[v_num].attributes["py"].value
v_elem.attributes["py"].value
)
points = np.vstack([points, [vertex_x / cm2m, vertex_y / cm2m]])
points = np.unique(points, axis=0)
x = points[:, 0]
y = points[:, 1]
n = len(points)
center_point = [np.sum(x) / n, np.sum(y) / n]
angles = np.arctan2(x - center_point[0], y - center_point[1])
# sorting the points:
sort_tups = sorted(
[(i, j, k) for i, j, k in zip(x, y, angles)], key=lambda t: t[2]
)
return_dict[o_num] = np.array(sort_tups)[:, 0:2]
return return_dict
def read_subroom_walls(xml_doc, unit):
dict_polynom_wall = {}
n_wall = 0
if unit == "cm":
cm2m = 100
else:
cm2m = 1
for _, s_elem in enumerate(xml_doc.getElementsByTagName("subroom")):
for _, p_elem in enumerate(s_elem.getElementsByTagName("polygon")):
if True or p_elem.getAttribute("caption") == "wall":
n_wall = n_wall + 1
n_vertex = len(p_elem.getElementsByTagName("vertex"))
vertex_array = np.zeros((n_vertex, 2))
for v_num, _ in enumerate(p_elem.getElementsByTagName("vertex")):
vertex_array[v_num, 0] = (
p_elem.getElementsByTagName("vertex")[v_num]
.attributes["px"]
.value
)
vertex_array[v_num, 1] = (
p_elem.getElementsByTagName("vertex")[v_num]
.attributes["py"]
.value
)
dict_polynom_wall[n_wall] = vertex_array / cm2m
return dict_polynom_wall
def geo_limits(geo_xml, unit):
geometry_wall = read_subroom_walls(geo_xml, unit)
geominX = 1000
geomaxX = -1000
geominY = 1000
geomaxY = -1000
Xmin = []
Ymin = []
Xmax = []
Ymax = []
for _, wall in geometry_wall.items():
Xmin.append(np.min(wall[:, 0]))
Ymin.append(np.min(wall[:, 1]))
Xmax.append(np.max(wall[:, 0]))
Ymax.append(np.max(wall[:, 1]))
geominX = np.min(Xmin)
geomaxX = np.max(Xmax)
geominY = np.min(Ymin)
geomaxY = np.max(Ymax)
return geominX, geomaxX, geominY, geomaxY
def get_geometry_file(traj_file):
return traj_file.split("geometry:")[-1].split("\n")[0].strip()
def compute_speed(data, fps, df=10):
"""Calculates the speed and the angle from the trajectory points.
Using the forward formula
speed(f) = (X(f+df) - X(f))/df [1]
note: The last df frames are not calculated using [1].
It is assumes that the speed in the last frames
does not change
:param traj: trajectory of ped (x, y). 2D array
:param df: number of frames forwards
:param fps: frames per seconds
:returns: speed, angle
example:
df=4, S=10
0 1 2 3 4 5 6 7 8 9
X * * * * * * * * * *
V + + + + + +
* *
* * X[df:]
X[:S-df] * * │
│ * * ◄─┘
└────────► * *
* *
"""
agents = np.unique(data[:, 0]).astype(int)
once = 1
speeds = np.array([])
for agent in agents:
ped = data[data[:, 0] == agent]
traj = ped[:, 2:4]
size = traj.shape[0]
speed = np.ones(size)
if size < df:
logging.warning(
f"""Compute_speed: The number of frames used to calculate the speed {df}
exceeds the total amount of frames ({size}) in this trajectory."""
)
st.error(
f"""Compute_speed: The number of frames used to calculate the speed {df}
exceeds the total amount of frames ({size}) in this trajectory."""
)
st.stop()
delta = traj[df:, :] - traj[: size - df, :]
delta_square = np.square(delta)
delta_x_square = delta_square[:, 0]
delta_y_square = delta_square[:, 1]
s = np.sqrt(delta_x_square + delta_y_square)
speed[: size - df] = s / df * fps
speed[size - df :] = speed[size - df - 1]
if once:
speeds = speed
once = 0
else:
speeds = np.hstack((speeds, speed))
return speeds
def compute_speed_and_angle(data, fps, df=10):
"""Calculates the speed and the angle from the trajectory points.
Using the forward formula
speed(f) = (X(f+df) - X(f))/df [1]
note: The last df frames are not calculated using [1].
It is assumes that the speed in the last frames
does not change
:param traj: trajectory of ped (x, y). 2D array
:param df: number of frames forwards
:param fps: frames per seconds
:returns: speed, angle
example:
df=4, S=10
0 1 2 3 4 5 6 7 8 9
X * * * * * * * * * *
V + + + + + +
* *
* * X[df:]
X[:S-df] * * │
│ * * ◄─┘
└────────► * *
* *
"""
agents = np.unique(data[:, 0]).astype(int)
once = 1
speeds = np.array([])
for agent in agents:
ped = data[data[:, 0] == agent]
traj = ped[:, 2:4]
size = traj.shape[0]
speed = np.ones(size)
angle = np.zeros(size)
if size < df:
logging.warning(
f"""Compute_speed_and_angle() The number of frames used to calculate the speed {df}
exceeds the total amount of frames ({size}) for pedestrian {agent}"""
)
st.error(
f"""Compute_speed_and_angle() The number of frames used to calculate the speed {df}
exceeds the total amount of frames ({size}) for pedestrian {agent}"""
)
else:
delta = traj[df:, :] - traj[: size - df, :]
delta_x = delta[:, 0]
delta_y = delta[:, 1]
delta_square = np.square(delta)
delta_x_square = delta_square[:, 0]
delta_y_square = delta_square[:, 1]
angle[: size - df] = np.arctan2(delta_y, delta_x) * 180 / np.pi
s = np.sqrt(delta_x_square + delta_y_square)
speed[: size - df] = s / df * fps
speed[size - df :] = speed[size - df - 1]
angle[size - df :] = angle[size - df - 1]
ped = np.hstack((ped, angle.reshape(size, 1)))
ped = np.hstack((ped, speed.reshape(size, 1)))
if once:
data2 = ped
once = 0
else:
data2 = np.vstack((data2, ped))
return data2
def calculate_speed_average(
geominX, geomaxX, geominY, geomaxY, dx, dy, nframes, X, Y, speed
):
"""Calculate speed average over time"""
xbins = np.arange(geominX, geomaxX + dx, dx)
ybins = np.arange(geominY, geomaxY + dy, dy)
ret = stats.binned_statistic_2d(
X,
Y,
speed,
"mean",
bins=[xbins, ybins],
)
return np.nan_to_num(ret.statistic.T)
def calculate_density_average_weidmann(
geominX, geomaxX, geominY, geomaxY, dx, dy, nframes, X, Y, speed
):
"""Calculate density using Weidmann(speed)"""
density = inv_weidmann(speed)
xbins = np.arange(geominX, geomaxX + dx, dx)
ybins = np.arange(geominY, geomaxY + dy, dy)
ret = stats.binned_statistic_2d(
X,
Y,
density,
"mean",
bins=[xbins, ybins],
)
return np.nan_to_num(ret.statistic.T) # / nframes
def calculate_density_average_classic(
geominX, geomaxX, geominY, geomaxY, dx, dy, nframes, X, Y
):
"""Calculate classical method
Density = mean_time(N/A_i)
"""
xbins = np.arange(geominX, geomaxX + dx, dx)
ybins = np.arange(geominY, geomaxY + dy, dy)
area = dx * dy
ret = stats.binned_statistic_2d(
X,
Y,
None,
"count",
bins=[xbins, ybins],
)
return np.nan_to_num(ret.statistic.T) / nframes / area
def calculate_density_frame_classic(geominX, geomaxX, geominY, geomaxY, dx, dy, X, Y):
"""Calculate classical method
Density = mean_time(N/A_i)
"""
xbins = np.arange(geominX, geomaxX + dx, dx)
ybins = np.arange(geominY, geomaxY + dy, dy)
area = dx * dy
ret = stats.binned_statistic_2d(
X,
Y,
None,
"count",
bins=[xbins, ybins],
)
return | np.nan_to_num(ret.statistic.T) | numpy.nan_to_num |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
The polarization.core test suite.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from future.builtins import * # NOQA
import unittest
from os.path import dirname, join
import numpy as np
from scipy import signal
import obspy
from obspy.signal import polarization, util
def _create_test_data():
"""
Test data used for some polarization tests.
:return:
"""
x = np.arange(0, 2048 / 20.0, 1.0 / 20.0)
x *= 2. * np.pi
y = np.cos(x)
tr_z = obspy.Trace(data=y)
tr_z.stats.sampling_rate = 20.
tr_z.stats.starttime = obspy.UTCDateTime('2014-03-01T00:00')
tr_z.stats.station = 'POLT'
tr_z.stats.channel = 'HHZ'
tr_z.stats.network = 'XX'
tr_n = tr_z.copy()
tr_n.data *= 2.
tr_n.stats.channel = 'HHN'
tr_e = tr_z.copy()
tr_e.stats.channel = 'HHE'
sz = obspy.Stream()
sz.append(tr_z)
sz.append(tr_n)
sz.append(tr_e)
sz.sort(reverse=True)
return sz
class PolarizationTestCase(unittest.TestCase):
"""
Test cases for polarization analysis
"""
def setUp(self):
path = join(dirname(__file__), 'data')
# setting up sliding window data
data_z = np.loadtxt(join(path, 'MBGA_Z.ASC'))
data_e = np.loadtxt(join(path, 'MBGA_E.ASC'))
data_n = np.loadtxt(join(path, 'MBGA_N.ASC'))
n = 256
fs = 75
inc = int(0.05 * fs)
self.data_win_z, self.nwin, self.no_win = \
util.enframe(data_z, signal.hamming(n), inc)
self.data_win_e, self.nwin, self.no_win = \
util.enframe(data_e, signal.hamming(n), inc)
self.data_win_n, self.nwin, self.no_win = \
util.enframe(data_n, signal.hamming(n), inc)
# global test input
self.fk = [2, 1, 0, -1, -2]
self.norm = pow(np.max(data_z), 2)
self.res = np.loadtxt(join(path, '3cssan.hy.1.MBGA_Z'))
def tearDown(self):
pass
def test_polarization(self):
"""
windowed data
"""
pol = polarization.eigval(self.data_win_e, self.data_win_n,
self.data_win_z, self.fk, self.norm)
rms = np.sqrt(np.sum((pol[0] - self.res[:, 34]) ** 2) /
np.sum(self.res[:, 34] ** 2))
self.assertEqual(rms < 1.0e-5, True)
rms = np.sqrt(np.sum((pol[1] - self.res[:, 35]) ** 2) /
np.sum(self.res[:, 35] ** 2))
self.assertEqual(rms < 1.0e-5, True)
rms = np.sqrt(np.sum((pol[2] - self.res[:, 36]) ** 2) /
np.sum(self.res[:, 36] ** 2))
self.assertEqual(rms < 1.0e-5, True)
rms = np.sqrt(np.sum((pol[3] - self.res[:, 40]) ** 2) /
np.sum(self.res[:, 40] ** 2))
self.assertEqual(rms < 1.0e-5, True)
rms = np.sqrt(np.sum((pol[4] - self.res[:, 42]) ** 2) /
np.sum(self.res[:, 42] ** 2))
self.assertEqual(rms < 1.0e-5, True)
rms = np.sqrt(np.sum((pol[5][:, 0] - self.res[:, 37]) ** 2) /
np.sum(self.res[:, 37] ** 2))
self.assertEqual(rms < 1.0e-5, True)
rms = np.sqrt(np.sum((pol[5][:, 1] - self.res[:, 38]) ** 2) /
| np.sum(self.res[:, 38] ** 2) | numpy.sum |
import numpy as np
import matplotlib.pyplot as plt
import sympy
from sympy import *
import sys
sys.path.append(r'C:\Users\elira\Google Drive\butools2\Python')
sys.path.append('/home/d/dkrass/eliransc/Python')
from tqdm import tqdm
from butools.ph import *
from butools.map import *
from butools.queues import *
from butools.mam import *
from butools.dph import *
from scipy.linalg import expm, sinm, cosm
from sympy import *
from sympy import Symbol
from sympy.physics.quantum import TensorProduct
import pickle as pkl
import pandas as pd
from sympy import diff, sin, exp
from numpy.linalg import matrix_power
def busy(s, lam2, mu2):
return ((lam2 + mu2 + s) - ((lam2 + mu2 + s) ** 2 - 4 * lam2 * mu2) ** 0.5) / (2 * lam2)
def ser_lap(s, mu):
return mu / (s + mu)
def hyper(s, lam1, lam2, mu1, mu2):
return ser_lap(s, mu1) * lam1 / (lam1 + lam2) + ser_lap(s, mu2) * lam2 / (lam1 + lam2)
def rho(lam1, lam2, mu1, mu2):
return (lam1 + lam2) * ((lam1 / ((lam1 + lam2) * mu1)) + (lam2 / ((lam1 + lam2) * mu2)))
def w_lap(s, lam1, lam2, mu1, mu2):
return ((1 - rho(lam1, lam2, mu1, mu2)) * s) / (s - (lam1 + lam2) * (1 - hyper(s, lam1, lam2, mu1, mu2)))
def F(s, lam1, lam2, mu1, mu2):
return w_lap(s, lam1, lam2, mu1, mu2) * ser_lap(s, mu1)
def A(s, lam1, lam2, mu2):
return (lam1 / (lam1 + lam2 - lam2 * (ser_lap(s, mu2))))
def beta(s, lam1, lam2, mu1, mu2):
return (lam1 / (lam1 + lam2 + s) + ((A(s, lam1, lam2, mu2) * lam2) / (lam1 + lam2 + s)) * (
ser_lap(s, mu2) - busy(s + lam1, lam2, mu2))) / (
1 - ((lam2 * busy(s + lam1, lam2, mu2)) / (lam1 + lam2 + s)))
def tau(s, lam1, lam2, mu1, mu2):
return ser_lap(s, mu1) * (A(s, lam1, lam2, mu2) * (
1 - F(lam1 + lam2 - lam2 * busy(s + lam1, lam2, mu2), lam1, lam2, mu1, mu2)) + F(
lam1 + lam2 - lam2 * busy(s + lam1, lam2, mu2), lam1, lam2, mu1, mu2) * beta(s, lam1, lam2, mu1, mu2))
def get_var(lam1, lam2, mu1, mu2):
s = Symbol('s')
y = tau(s, lam1, lam2, mu1, mu2)
dx = diff(y, s)
dxdx = diff(dx, s)
return dxdx.subs(s, 0) - (dx.subs(s, 0)) ** 2
def get_nth_moment(lam1, lam2, mu1, mu2, n):
s = Symbol('s')
y = tau(s, lam1, lam2, mu1, mu2)
for i in range(n):
if i == 0:
dx = diff(y, s)
else:
dx = diff(dx, s)
return dx.subs(s, 0)
def get_first_n_moments(parameters, n=5):
lam1, lam2, mu1, mu2 = parameters
moments = []
for n in range(1, n + 1):
moments.append(get_nth_moment(lam1, lam2, mu1, mu2, n) * (-1) ** n)
moments = np.array([moments], dtype='float')
return moments
def kroneker_sum(G, H):
size_g = G.shape[0]
size_h = H.shape[0]
return np.kron(G, np.identity(size_h)) + np.kron(np.identity(size_g), H)
def give_boundry_probs(R, A0, A1, A, B, C0, ro):
p00, p01, p02, p100, p110, p120, p101, p111, p121 = symbols('p00 p01 p02 p100 p110 p120 p101 p111 p121')
eqns = [np.dot(np.array([p00, p01, p02]), np.ones((A0.shape[0]))) - (1 - ro)]
eq3 = np.dot(np.array([p00, p01, p02]), A0) + np.dot(np.array([p100, p110, p120, p101, p111, p121]), A1)
eq1 = np.dot(np.array([p00, p01, p02]), C0)
eq2 = np.dot(np.array([p100, p110, p120, p101, p111, p121]), B + np.dot(R, A))
for eq_ind in range(B.shape[0]):
eqns.append(eq1[0, eq_ind] + eq2[0, eq_ind])
for eq_ind in range(A0.shape[0]):
eqns.append(eq3[0, eq_ind])
A_mat, b = linear_eq_to_matrix(eqns[:-1], [p00, p01, p02, p100, p110, p120, p101, p111, p121])
return A_mat, b
def get_expect_gph_system(R, p1_arr, xm_max=5000):
expected = 0
for pi_val in range(1, xm_max):
ui = p1_arr.reshape((1, R.shape[0]))
Ri = np.linalg.matrix_power(R, pi_val - 1)
expected += np.dot(np.dot(ui, Ri), np.ones((R.shape[0], 1))) * pi_val
return expected[0, 0]
def get_expect_gph_system(R, p1_arr, xm_max=5000):
expected = 0
for pi_val in range(1, xm_max):
ui = p1_arr.reshape((1, R.shape[0]))
Ri = np.linalg.matrix_power(R, pi_val - 1)
expected += np.dot(np.dot(ui, Ri), np.ones((R.shape[0], 1))) * pi_val
return expected[0, 0]
def get_A0(Ts):
krom_sum = kroneker_sum(Ts[0], Ts[1])
if len(Ts) > 2:
for T_ind in range(2, len(Ts)):
krom_sum = kroneker_sum(krom_sum, Ts[T_ind])
return krom_sum
def get_C_first(T0s, Ts, s):
krom_sum = kroneker_sum(T0s[0], T0s[1])
if len(Ts) > 2:
for T_ind in range(2, len(Ts)):
krom_sum = kroneker_sum(krom_sum, T0s[T_ind])
return krom_sum
def get_B(Ts, s):
krom_sum = kroneker_sum(Ts[0], Ts[1])
if len(Ts) > 2:
for T_ind in range(2, len(Ts)):
krom_sum = kroneker_sum(krom_sum, Ts[T_ind])
return kroneker_sum(krom_sum, s)
def get_A(Ts, new_beta, s0):
kron_sum = kroneker_sum(np.zeros(Ts[0].shape[0]), np.zeros(Ts[1].shape[0]))
if len(Ts) > 2:
for T_ind in range(2, len(Ts)):
kron_sum = kroneker_sum(kron_sum, np.zeros(Ts[T_ind].shape[0]))
kron_sum = kroneker_sum(kron_sum, np.dot(s0, new_beta))
return kron_sum
def compute_s_beta(r, mu, num_stations=2):
s_ = np.array([])
total_arrivals_to_station = np.sum(r[:, station_ind]) + np.sum(r[station_ind, :]) - np.sum(
r[station_ind, station_ind])
beta = np.array([])
for stream_ind in range(r.shape[0]):
if r[station_ind, stream_ind] > 0:
beta = np.append(beta, r[station_ind, stream_ind] / total_arrivals_to_station)
s_ = np.append(s_, -mu[station_ind, stream_ind])
for out_station in range(num_stations):
if out_station != station_ind:
if r[out_station, station_ind] > 0:
beta = np.append(beta, r[out_station, station_ind] / total_arrivals_to_station)
s_ = np.append(s_, -mu[station_ind, station_ind])
new_beta = np.array([])
new_s_ = np.unique(s_)
for val in new_s_:
new_beta = np.append(new_beta, np.sum(beta[np.argwhere(s_ == val)]))
new_beta = new_beta.reshape((1, new_beta.shape[0]))
s = np.identity(new_s_.shape[0]) * new_s_
return s, new_beta, new_s_
def compute_curr_t(curr_ind, r, mu):
r_mismatched = np.sum(r[curr_ind, :]) - r[curr_ind, curr_ind]
r_matched = r[curr_ind, curr_ind]
mu_mismatched = np.mean(np.delete(mu[curr_ind, :], curr_ind, 0))
mu_matched = mu[curr_ind, curr_ind]
parameters = (r_mismatched, r_matched, mu_mismatched, mu_matched)
moments = get_first_n_moments(parameters)
return moments
def get_Ts_alphas(r, mu, station_ind):
alphas = []
Ts = []
T0s = []
for curr_ind in range(r.shape[0]):
if curr_ind != station_ind:
mome = compute_curr_t(curr_ind, r, mu)
curr_alpha, curr_T = PH3From5Moments(mome[0])
alphas.append(curr_alpha)
Ts.append(curr_T)
T0s.append(-np.dot(np.dot(curr_T, np.ones((curr_T.shape[0], 1))), curr_alpha))
for stream_ind in range(r[station_ind, :].shape[0]):
Ts.append(np.array(-r[station_ind, stream_ind]).reshape((1, 1)))
alphas.append(1.)
T0s.append(-np.dot(np.dot(Ts[-1], np.ones(1)), alphas[-1]))
return Ts, T0s, alphas
def total_arrivals_to_station(r):
return np.sum(r[:, station_ind]) + np.sum(r[station_ind, :]) - np.sum(r[station_ind, station_ind])
def get_ro(r, mu, new_beta, new_s_):
return np.sum(new_beta * total_arrivals_to_station(r) * (-1 / new_s_))
def get_ro_2(lam_0, lam_1, new_beta, s0):
return (lam_0 + lam_1) * np.dot(new_beta, 1 / s0)
from numpy.linalg import matrix_power
def get_bound_steady_state(R, A0, A1, AA, B, C0, ro):
u0, u10, u11 = symbols('u0 u10 u11')
eqns = [u0 - (1 - ro[0][0])]
for ind in range(2):
eqns.append(np.dot(u0, C0)[0][ind] + np.dot(np.array([u10, u11]), B)[ind] +
np.dot(np.dot(np.array([u10, u11]), R), AA)[0][0, ind])
A_mat, b = linear_eq_to_matrix(eqns, [u0, u10, u11])
u0, u10, u11 = np.linalg.solve(np.array(A_mat, dtype=np.float), np.array(b, dtype=np.float))
return u0[0], u10[0], u11[0]
def get_Avg_system(R, u10, u11):
p1 = np.array([u10, u11])
total_avg = 0
for ind in range(1, 500):
total_avg += ind * np.sum(np.dot(p1, matrix_power(R, ind - 1)))
return total_avg
def get_steady(lam_0, lam_1, mu_0, mu_1):
T0 = np.array([-lam_0])
T1 = np.array([-lam_1])
Ts = [T0, T1]
T00 = np.array([-np.dot(T0, np.ones(1))])
T10 = np.array([-np.dot(T1, | np.ones(1) | numpy.ones |
import sys
from io import StringIO
from typing import Tuple
import numpy as np
from scipy.ndimage import label
def load_map(path: str) -> np.ndarray:
with open(path, 'r') as f:
text = '\n'.join([' '.join(list(s)) for s in f.readlines()])
heightmap = np.loadtxt(StringIO(text), dtype=int)
return heightmap
def find_local_minima(map: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
pm = np.pad(map, pad_width=[(0, 0), (0, 1)], constant_values=10)
minima = np.sign(pm[:, :-1] - pm[:, 1:])
pm = np.pad(map, pad_width=[(0, 0), (1, 0)], constant_values=10)
minima += np.sign(pm[:, 1:] - pm[:, :-1])
pm = np.pad(map, pad_width=[(0, 1), (0, 0)], constant_values=10)
minima += np.sign(pm[:-1] - pm[1:])
pm = | np.pad(map, pad_width=[(1, 0), (0, 0)], constant_values=10) | numpy.pad |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Proof of concept of a minimalist, 3 layers (1 hidden) Neural Network. Code
based on the lectures from the Machine Learning coursework (Stanford -
Coursera) by Prof. Dr. <NAME> and the code implementation of andr0idsensei
(https://gist.github.com/andr0idsensei/92dd7e54a029242690555e84dca93efd).
@author: Dr. <NAME>
"""
from scipy.io import loadmat
import numpy as np
if __name__ == "__main__":
# Setting up parameters.
hidden_size = 30
num_labels = 10
learning_rate = 0.2
J_threshold = 0.0001
regularize = True
n_iter = 8000
# Loading training data.
data = loadmat('data/mnist_sample.mat')
X = np.matrix(data['X'])
y = np.matrix(data['y'])
m = X.shape[0]
# Initializing random parameters (weights) for the activation of
# layers 1 and 2.
e_init = np.sqrt(6) / | np.sqrt(X.shape[1] + hidden_size) | numpy.sqrt |
"""Definition of Maxwell spaces."""
import numpy as _np
import numba as _numba
def _is_screen(grid):
"""Check if there is an edge only adjacent to one triangle."""
for e in range(grid.edges.shape[1]):
if len([j for i in grid.element_edges for j in i if j == e]) < 2:
return True
return False
def rwg0_function_space(
grid,
support_elements=None,
segments=None,
swapped_normals=None,
include_boundary_dofs=False,
truncate_at_segment_edge=True,
):
"""Define a space of RWG functions of order 0."""
from .space import SpaceBuilder, _process_segments
from bempp.api.utils.helpers import serialise_list_of_lists
support, normal_multipliers = _process_segments(
grid, support_elements, segments, swapped_normals
)
edge_neighbors, edge_neighbors_ptr = serialise_list_of_lists(grid.edge_neighbors)
(
global_dof_count,
support,
local2global,
local_multipliers,
) = _compute_rwg0_space_data(
support,
edge_neighbors,
edge_neighbors_ptr,
grid.element_edges,
grid.number_of_elements,
grid.number_of_edges,
include_boundary_dofs,
truncate_at_segment_edge,
)
return (
SpaceBuilder(grid)
.set_codomain_dimension(3)
.set_support(support)
.set_normal_multipliers(normal_multipliers)
.set_order(0)
.set_is_localised(False)
.set_shapeset("rwg0")
.set_identifier("rwg0")
.set_local2global(local2global)
.set_local_multipliers(local_multipliers)
.set_barycentric_representation(rwg0_barycentric_function_space)
.set_numba_evaluator(_numba_rwg0_evaluate)
.build()
)
def rwg0_barycentric_function_space(coarse_space):
"""Define a space of RWG functions of order 0 over a barycentric grid."""
from .space import SpaceBuilder
from scipy.sparse import coo_matrix
number_of_support_elements = coarse_space.number_of_support_elements
bary_grid_number_of_elements = 6 * coarse_space.grid.number_of_elements
bary_support_elements = 6 * _np.repeat(coarse_space.support_elements, 6) + _np.tile(
_np.arange(6), number_of_support_elements
)
bary_support_size = len(bary_support_elements)
support = _np.zeros(6 * coarse_space.grid.number_of_elements, dtype=_np.bool)
support[bary_support_elements] = True
normal_multipliers = _np.repeat(coarse_space.normal_multipliers, 6)
local_coords = _np.array(
[[0, 0], [0.5, 0], [1, 0], [0.5, 0.5], [0, 1], [0, 0.5], [1.0 / 3, 1.0 / 3]]
).T
coeffs = (
_np.array(
[
[1, -1.0 / 3, 0],
[-1.0 / 3, 1, 0],
[0, 1.0 / 3, -1.0 / 6],
[0, 0, 1.0 / 6],
[0, 0, 1.0 / 6],
[1.0 / 3, 0, -1.0 / 6],
]
),
_np.array(
[
[0, 1.0 / 3, -1.0 / 6],
[0, 0, 1.0 / 6],
[0, 0, 1.0 / 6],
[1.0 / 3, 0, -1.0 / 6],
[1, -1.0 / 3, 0],
[-1.0 / 3, 1, 0],
]
),
_np.array(
[
[0, 0, 1.0 / 6],
[1.0 / 3, 0, -1.0 / 6],
[1, -1.0 / 3, 0],
[-1.0 / 3, 1, 0],
[0, 1.0 / 3, -1.0 / 6],
[0, 0, 1.0 / 6],
]
),
)
coarse_dofs, bary_dofs, values = generate_rwg0_map(
coarse_space.grid.data(), coarse_space.support_elements, local_coords, coeffs
)
local2global = _np.zeros((bary_grid_number_of_elements, 3), dtype="uint32")
local_multipliers = _np.zeros((bary_grid_number_of_elements, 3), dtype="uint32")
local2global[support] = _np.arange(3 * bary_support_size).reshape(
bary_support_size, 3
)
local_multipliers[support] = 1
transform = coo_matrix(
(values, (bary_dofs, coarse_dofs)),
shape=(3 * bary_support_size, 3 * number_of_support_elements),
dtype=_np.float64,
).tocsr()
dof_transformation = transform @ coarse_space.map_to_localised_space
return (
SpaceBuilder(coarse_space.grid.barycentric_refinement)
.set_codomain_dimension(3)
.set_support(support)
.set_normal_multipliers(normal_multipliers)
.set_order(0)
.set_is_localised(True)
.set_is_barycentric(True)
.set_shapeset("rwg0")
.set_identifier("rwg0")
.set_local2global(local2global)
.set_local_multipliers(local_multipliers)
.set_dof_transformation(dof_transformation)
.set_numba_evaluator(_numba_rwg0_evaluate)
.build()
)
def snc0_function_space(
grid,
support_elements=None,
segments=None,
swapped_normals=None,
include_boundary_dofs=False,
truncate_at_segment_edge=True,
):
"""Define a space of SNC functions of order 0."""
from .space import SpaceBuilder, _process_segments
from bempp.api.utils.helpers import serialise_list_of_lists
support, normal_multipliers = _process_segments(
grid, support_elements, segments, swapped_normals
)
edge_neighbors, edge_neighbors_ptr = serialise_list_of_lists(grid.edge_neighbors)
(
global_dof_count,
support,
local2global,
local_multipliers,
) = _compute_rwg0_space_data(
support,
edge_neighbors,
edge_neighbors_ptr,
grid.element_edges,
grid.number_of_elements,
grid.number_of_edges,
include_boundary_dofs,
truncate_at_segment_edge,
)
return (
SpaceBuilder(grid)
.set_codomain_dimension(3)
.set_support(support)
.set_normal_multipliers(normal_multipliers)
.set_order(0)
.set_is_localised(False)
.set_shapeset("snc0")
.set_identifier("snc0")
.set_local2global(local2global)
.set_local_multipliers(local_multipliers)
.set_barycentric_representation(snc0_barycentric_function_space)
.set_numba_evaluator(_numba_snc0_evaluate)
.set_numba_surface_curl(_numba_snc0_surface_curl)
.build()
)
def snc0_barycentric_function_space(coarse_space):
"""Define a space of SNC functions of order 0 over a barycentric grid."""
from .space import SpaceBuilder
from scipy.sparse import coo_matrix
number_of_support_elements = coarse_space.number_of_support_elements
bary_grid_number_of_elements = 6 * coarse_space.grid.number_of_elements
bary_support_elements = 6 * _np.repeat(coarse_space.support_elements, 6) + _np.tile(
_np.arange(6), number_of_support_elements
)
bary_support_size = len(bary_support_elements)
support = _np.zeros(6 * coarse_space.grid.number_of_elements, dtype=_np.bool)
support[bary_support_elements] = True
normal_multipliers = _np.repeat(coarse_space.normal_multipliers, 6)
local_coords = _np.array(
[[0, 0], [0.5, 0], [1, 0], [0.5, 0.5], [0, 1], [0, 0.5], [1.0 / 3, 1.0 / 3]]
).T
coeffs = (
_np.array(
[
[1, -1.0 / 3, 0],
[-1.0 / 3, 1, 0],
[0, 1.0 / 3, -1.0 / 6],
[0, 0, 1.0 / 6],
[0, 0, 1.0 / 6],
[1.0 / 3, 0, -1.0 / 6],
]
),
_np.array(
[
[0, 1.0 / 3, -1.0 / 6],
[0, 0, 1.0 / 6],
[0, 0, 1.0 / 6],
[1.0 / 3, 0, -1.0 / 6],
[1, -1.0 / 3, 0],
[-1.0 / 3, 1, 0],
]
),
_np.array(
[
[0, 0, 1.0 / 6],
[1.0 / 3, 0, -1.0 / 6],
[1, -1.0 / 3, 0],
[-1.0 / 3, 1, 0],
[0, 1.0 / 3, -1.0 / 6],
[0, 0, 1.0 / 6],
]
),
)
coarse_dofs, bary_dofs, values = generate_rwg0_map(
coarse_space.grid.data(), coarse_space.support_elements, local_coords, coeffs
)
local2global = _np.zeros((bary_grid_number_of_elements, 3), dtype="uint32")
local_multipliers = _np.zeros((bary_grid_number_of_elements, 3), dtype="uint32")
local2global[support] = _np.arange(3 * bary_support_size).reshape(
bary_support_size, 3
)
local_multipliers[support] = 1
transform = coo_matrix(
(values, (bary_dofs, coarse_dofs)),
shape=(3 * bary_support_size, 3 * number_of_support_elements),
dtype=_np.float64,
).tocsr()
dof_transformation = transform @ coarse_space.map_to_localised_space
return (
SpaceBuilder(coarse_space.grid.barycentric_refinement)
.set_codomain_dimension(3)
.set_support(support)
.set_normal_multipliers(normal_multipliers)
.set_order(0)
.set_is_localised(True)
.set_is_barycentric(True)
.set_shapeset("rwg0")
.set_identifier("snc0")
.set_local2global(local2global)
.set_local_multipliers(local_multipliers)
.set_dof_transformation(dof_transformation)
.set_numba_evaluator(_numba_snc0_evaluate)
.build()
)
def bc_function_space(
grid,
support_elements=None,
segments=None,
swapped_normals=None,
include_boundary_dofs=False,
truncate_at_segment_edge=True,
):
"""Define a space of BC functions."""
from .space import SpaceBuilder
if _is_screen(grid):
# Grid is a screen, not a polyhedron
raise ValueError("BC spaces not yet supported on screens")
bary_grid = grid.barycentric_refinement
coarse_space = rwg0_function_space(
grid,
support_elements,
segments,
swapped_normals,
include_boundary_dofs=include_boundary_dofs,
truncate_at_segment_edge=truncate_at_segment_edge,
)
(
dof_transformation,
support,
normal_multipliers,
local2global,
local_multipliers,
) = _compute_bc_space_data(
grid, bary_grid, coarse_space, truncate_at_segment_edge, swapped_normals
)
return (
SpaceBuilder(bary_grid)
.set_codomain_dimension(3)
.set_support(support)
.set_normal_multipliers(normal_multipliers)
.set_order(0)
.set_is_localised(True)
.set_is_barycentric(True)
.set_shapeset("rwg0")
.set_identifier("rwg0")
.set_local2global(local2global)
.set_local_multipliers(local_multipliers)
.set_dof_transformation(dof_transformation)
.set_numba_evaluator(_numba_rwg0_evaluate)
.build()
)
def rbc_function_space(
grid,
support_elements=None,
segments=None,
swapped_normals=None,
include_boundary_dofs=False,
truncate_at_segment_edge=True,
):
"""Define a space of RBC functions."""
from .space import SpaceBuilder
if _is_screen(grid):
# Grid is a screen, not a polyhedron
raise ValueError("BC spaces not yet supported on screens")
bary_grid = grid.barycentric_refinement
coarse_space = rwg0_function_space(
grid,
support_elements,
segments,
swapped_normals,
include_boundary_dofs=include_boundary_dofs,
truncate_at_segment_edge=truncate_at_segment_edge,
)
(
dof_transformation,
support,
normal_multipliers,
local2global,
local_multipliers,
) = _compute_bc_space_data(
grid, bary_grid, coarse_space, truncate_at_segment_edge, swapped_normals
)
return (
SpaceBuilder(bary_grid)
.set_codomain_dimension(3)
.set_support(support)
.set_normal_multipliers(normal_multipliers)
.set_order(0)
.set_is_localised(True)
.set_is_barycentric(True)
.set_shapeset("rwg0")
.set_identifier("snc0")
.set_local2global(local2global)
.set_local_multipliers(local_multipliers)
.set_dof_transformation(dof_transformation)
.set_numba_evaluator(_numba_snc0_evaluate)
.build()
)
def _compute_bc_space_data(
grid, bary_grid, coarse_space, truncate_at_segment_edge, swapped_normals
):
"""Generate the BC map."""
from bempp.api.grid.grid import enumerate_vertex_adjacent_elements
from scipy.sparse import coo_matrix
coarse_support = _np.zeros(grid.entity_count(0), dtype=_np.bool)
coarse_support[coarse_space.support_elements] = True
if not truncate_at_segment_edge:
for global_dof_index in range(coarse_space.global_dof_count):
local_dofs = coarse_space.global2local[global_dof_index]
edge_index = grid.data().element_edges[local_dofs[0][1], local_dofs[0][0]]
for v in range(2):
vertex = grid.data().edges[v, edge_index]
start = grid.vertex_neighbors.indexptr[vertex]
end = grid.vertex_neighbors.indexptr[vertex + 1]
for cell in grid.vertex_neighbors.indices[start:end]:
coarse_support[cell] = True
coarse_support_elements = _np.array([i for i, j in enumerate(coarse_support) if j])
number_of_support_elements = len(coarse_support_elements)
bary_support_elements = 6 * _np.repeat(coarse_support_elements, 6) + _np.tile(
_np.arange(6), number_of_support_elements
)
support = _np.zeros(bary_grid.number_of_elements, dtype=_np.bool)
support[bary_support_elements] = True
bary_support_size = len(bary_support_elements)
bary_vertex_to_edge = enumerate_vertex_adjacent_elements(
bary_grid, bary_support_elements, swapped_normals
)
edge_vectors = (
bary_grid.vertices[:, bary_grid.edges[0, :]]
- bary_grid.vertices[:, bary_grid.edges[1, :]]
)
edge_lengths = _np.linalg.norm(edge_vectors, axis=0)
normal_multipliers = _np.repeat(coarse_space.normal_multipliers, 6)
local2global = _np.zeros((bary_grid.number_of_elements, 3), dtype="uint32")
local_multipliers = | _np.zeros((bary_grid.number_of_elements, 3), dtype="uint32") | numpy.zeros |
# source ~/tensorflow/bin/activate
from __future__ import print_function
from tensorflow.examples.tutorials.mnist import input_data
data_flag = 1 # 1 for fmnist, 2 for mnist
if data_flag == 1:
mnist = input_data.read_data_sets('fMNIST_data', one_hot=True)
elif data_flag == 2:
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
import tensorflow as tf
import numpy as np
import scipy
from scipy.ndimage.interpolation import zoom
from random import shuffle
def batch_mod(x,y): # adds balanced dimming random images with None label
x_1 = np.int(np.round(( | np.shape(x) | numpy.shape |
import cv2
import sys
import os
import numpy as np
import math
import random
import json
import matplotlib.pyplot as plt
#%matplotlib inline
plt.rcParams['figure.figsize'] = (10, 10)
def get_affine_matrix(center, angle, translate, scale, shear):
# Helper method to compute affine transformation
# As it is explained in PIL.Image.rotate
# We need compute affine transformation matrix: M = T * C * RSS * C^-1
# where T is translation matrix: [1, 0, tx | 0, 1, ty | 0, 0, 1]
# C is translation matrix to keep center: [1, 0, cx | 0, 1, cy | 0, 0, 1]
# RSS is rotation with scale and shear matrix
# RSS(a, scale, shear) = [ cos(a)*sx -sin(a + shear)*sy 0]
# [ sin(a)*sx cos(a + shear)*sy 0]
# [ 0 0 1]
angle = math.radians(angle)
shear = math.radians(shear)
T = np.array([[1, 0, translate[0]], [0, 1, translate[1]], [0, 0, 1]]).astype(np.float32)
C = np.array([[1, 0, center[0]], [0, 1, center[1]], [0, 0, 1]]).astype(np.float32)
RSS = np.array([[ math.cos(angle)*scale[0], -math.sin(angle + shear)*scale[1], 0],
[ math.sin(angle)*scale[0], math.cos(angle + shear)*scale[1], 0],
[ 0, 0, 1]]).astype(np.float32)
C_inv = np.linalg.inv(np.mat(C))
M = T.dot(C).dot(RSS).dot(C_inv)
return M
def get_inverse_affine_matrix(center, angle, translate, scale, shear):
# Helper method to compute inverse matrix for affine transformation
# As it is explained in PIL.Image.rotate
# We need compute INVERSE of affine transformation matrix: M = T * C * RSS * C^-1
# where T is translation matrix: [1, 0, tx | 0, 1, ty | 0, 0, 1]
# C is translation matrix to keep center: [1, 0, cx | 0, 1, cy | 0, 0, 1]
# RSS is rotation with scale and shear matrix
# RSS(a, scale, shear) = [ cos(a)*sx -sin(a + shear)*sy 0]
# [ sin(a)*sx cos(a + shear)*sy 0]
# [ 0 0 1]
# Thus, the inverse is M^-1 = C * RSS^-1 * C^-1 * T^-1
angle = math.radians(angle)
shear = math.radians(shear)
T = np.array([[1, 0, translate[0]], [0, 1, translate[1]], [0, 0, 1]]).astype(np.float32)
C = np.array([[1, 0, center[0]], [0, 1, center[1]], [0, 0, 1]]).astype(np.float32)
RSS = np.array([[ math.cos(angle)*scale[0], -math.sin(angle + shear)*scale[1], 0],
[ math.sin(angle)*scale[0], math.cos(angle + shear)*scale[1], 0],
[ 0, 0, 1]]).astype(np.float32)
T_inv = np.linalg.inv(np.mat(T))
RSS_inv = np.linalg.inv(np.mat(RSS))
C_inv = np.linalg.inv(np.mat(C))
M = C.dot(RSS_inv).dot(C_inv).dot(T_inv)
return M
def masks2bboxes(masks):
'''
masks: (N, H, W) or [N](H, W)
'''
bboxes = []
for mask in masks:
if np.max(mask)<=0.5:
continue
idxs = | np.where(mask>0.5) | numpy.where |
# Copyright (c) <NAME>, 2007
import numpy as np
from . import wrapped_distances
import inspect
import imp
import pickle
from .isotropic_cov_funs import symmetrize, imul
from copy import copy
import sys,os
from pymc import get_threadpool_size, map_noreturn
import pymc
from pymc import six
xrange = six.moves.xrange
mod_search_path = [pymc.__path__[0]+'/gp/cov_funs', os.getcwd()] + sys.path
__all__ = ['covariance_wrapper', 'covariance_function_bundle']
def regularize_array(A):
"""
Takes an np.ndarray as an input.
- If the array is one-dimensional, it's assumed to be an array of input values.
- If the array is more than one-dimensional, its last index is assumed to curse
over spatial dimension.
Either way, the return value is at least two dimensional. A.shape[-1] gives the
number of spatial dimensions.
"""
if not isinstance(A,np.ndarray):
A = | np.array(A, dtype=float) | numpy.array |
'''
,,
.M"""bgd db mm
,MI "Y MM
`MMb. `7MMpMMMb. ,pW"Wq. `7M' ,A `MF' .gP"Ya `7MMpMMMb. `7MMpdMAo. `7Mb,od8 ,pW"Wq. `7MM .gP"Ya ,p6"bo mmMMmm
`YMMNq. MM MM 6W' `Wb VA ,VAA ,V ,M' Yb MM MM MM `Wb MM' "' 6W' `Wb MM ,M' Yb 6M' OO MM
. `MM MM MM 8M M8 VA ,V VA ,V 8M"""""" MM MM MM M8 MM 8M M8 MM 8M"""""" 8M MM
Mb dM MM MM YA. ,A9 VVV VVV YM. , MM MM MM ,AP MM YA. ,A9 MM YM. , YM. , MM
P"Ybmmd" .JMML JMML. `Ybmd9' W W `Mbmmd' .JMML JMML. MMbmmd' .JMML. `Ybmd9' MM `Mbmmd' YMbmd' `Mbmo
MM QO MP
.JMML. `bmP
By <NAME>
'''
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
LIBRARIES
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
import os
from os.path import dirname, realpath, join
import glob
from osgeo import gdal, gdalconst, osr
import folium
from folium.raster_layers import ImageOverlay
from folium.plugins import MeasureControl
from folium import plugins
import matplotlib.pyplot as plt
import numpy as np
from scipy import ndimage
from skimage import measure
from shapely.geometry import mapping, Polygon, Point
import fiona
from fiona.crs import from_epsg
import geopandas as gpd
import pandas as pd
import ogr
import subprocess
from branca.element import Template, MacroElement
from math import radians, sin
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
GENERALS
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
## SAY HELLO ##
def bePolite():
print("HELLO")
def makeFolder(path):
try:
os.makedirs(path, exist_ok=True)
except:
pass
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
STACK OPERATIONS
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
def stack(pt, tifs):
return gdal.Translate(pt, gdal.BuildVRT('/vsimem/stacked.vrt', tifs, separate=True))
def stackBands(folder):
tifs = glob.glob(join(folder, '*.tif'))
print(tifs)
#tifs.sort(key=len)
print("stack", join(folder,'stack.tif'))
stack(join(folder,'stack.tif'), tifs)
def loadRasterStack(bandPath, bands): #bands in [2, 3, 4...]
raster_ds = gdal.Open(bandPath, gdal.GA_ReadOnly)
if raster_ds is None:
raise Exception("can't open stack file")
return raster_ds, [raster_ds.GetRasterBand(elem).ReadAsArray() for elem in bands] # raster object and images in those bands
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
SINGLE BAND OPERATION
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
# Leer una imagen individual
def loadRasterImage(path):
raster_ds = gdal.Open(path, gdal.GA_ReadOnly)
if raster_ds is None:
raise Exception("No se puede abrir el archivo tif")
return raster_ds, raster_ds.GetRasterBand(1).ReadAsArray()#.astype(np.float32)
def saveBandAsTiff(dst, rt, img, tt):
transform = rt.GetGeoTransform()
geotiff = gdal.GetDriverByName('GTiff')
output = geotiff.Create(dst, rt.RasterXSize, rt.RasterYSize, 1,tt)
wkt = rt.GetProjection()
srs = osr.SpatialReference()
srs.ImportFromWkt(wkt)
output.GetRasterBand(1).WriteArray(np.array(img))
output.GetRasterBand(1).SetNoDataValue(-999)
output.SetGeoTransform(transform)
output.SetProjection(srs.ExportToWkt())
output = None
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
INDEX OPERATIONS
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
def calcNormIndex(imgs):
np.seterr(invalid='ignore')
return (imgs[1] - imgs[0]) / (imgs[1] + imgs[0])
def norm(img):
return (img - np.min(img)) / ( | np.max(img) | numpy.max |
import numpy as np
from pyDOE import lhs
import dataset_creation
import utils
from definitions import input_upper_bound, input_lower_bound, valid_sampling_methods
# %% Initialization ---------------------------------------------
# These are the models used for N-1 analysis
Nm1_models = dataset_creation.NSWPH_Initialize_Nm1_Models()
def create_input_OPs(N_points_per_dimension: int = 5, sampling_method: str = 'Grid', seed: int = None):
"""
Basic sampler of input data, i.e., the operating points (OPs). At first sampling in the unscaled domain, i.e.,
[0;1] and then scaling using the value of the domain bounds from definitions.py.
:param N_points_per_dimension: Governing the number of samples, i.e., 4**N. For non-grid sampling, simply 4**N
datapoints are sampled.
:param sampling_method: Defining the sampling method - valid methods in definitions.py
:param seed: Controlling the random sampling of the input OPs (if set)
:return:
"""
assert sampling_method in valid_sampling_methods, \
f'Please choose a valid sampling method.\nAvailable: {valid_sampling_methods}'
num_samples = N_points_per_dimension ** 4
if seed is not None:
np.random.seed(seed=seed)
if sampling_method == 'Grid':
base_linspace = np.linspace(0, 1, N_points_per_dimension)
base_grid = np.meshgrid(base_linspace,
base_linspace,
base_linspace,
base_linspace)
unscaled_samples = np.hstack([grid_dimension.reshape((-1, 1)) for grid_dimension in base_grid])
elif sampling_method == 'LHC':
unscaled_samples = lhs(n=4, samples=num_samples)
elif sampling_method == 'Uniform':
unscaled_samples = np.random.rand(num_samples, 4)
else:
raise Exception('Invalid sampling method.')
input_data = input_lower_bound + unscaled_samples * (input_upper_bound - input_lower_bound)
return input_data
def evaluate_input_OPs(input_data):
"""
Evaluation of the physical model (NSWPH) for the given input operating points (OP).
:param input_data: A numpy array with each row corresponding to an OP that consists of power set points and droop
parameters.
:return: The resulting values of the minimum damping ratio as well as the associated Jacobians and eigenvalues.
"""
input_data = utils.ensure_numpy_array(input_data)
output = [dataset_creation.NSWPH_Minimum_Data_Point(input_OP, Nm1_models) for
input_OP in input_data]
output_damping_ratios = | np.vstack([output_tuple[0] for output_tuple in output]) | numpy.vstack |
"""
Authors: <<NAME>, <NAME>>
Copyright: (C) 2019-2020 <http://www.dei.unipd.it/
Department of Information Engineering> (DEI), <http://www.unipd.it/ University of Padua>, Italy
License: <http://www.apache.org/licenses/LICENSE-2.0 Apache License, Version 2.0>
"""
import os
import math
import string
import subprocess
import itertools
import pickle
import numpy as np
import xml.etree.ElementTree as ET
from collections import Counter
from functools import reduce
from textwrap import wrap
from whoosh.analysis import SimpleAnalyzer
from sklearn.metrics.pairwise import cosine_similarity
from tqdm import tqdm
class Utils(object):
"""utils functions for neural vector space models"""
def __init__(self, seed):
"""set random seed, initialize index variables"""
np.random.seed(seed)
self.term_dict = {}
def build_term_dictionary(self, index, dict_size=65536, oov=False, remove_digits=True, min_doc_freq=2,
max_doc_freq=0.5):
"""create term dictionary"""
reader = index.reader()
# get corpus size
corpus_size = reader.doc_count()
# get unique terms statistics: (term, doc_freq, term_freq)
terms = self.terms_statistics(index)
# initialize count list
count = []
# add terms to count
for term, doc_freq, term_freq in terms:
# check if term does not exceed max_doc_freq (in %)
if doc_freq / corpus_size <= max_doc_freq:
# check if term is not inferior to min_doc_freq (not in %)
if doc_freq >= min_doc_freq:
# check if term does not contain digits
if remove_digits:
if self.has_digit(term): # skip term
continue
else: # keep term
count.extend([(term, term_freq)])
else: # keep terms containing digits
count.extend([(term, term_freq)])
else: # minimum doc freq not reached
# skip term
continue
else: # maximum doc freq exceeded
# skip term
continue
# convert count into Counter object and keep dict_size most frequent terms
count = Counter(dict(count)).most_common(dict_size)
if oov:
# include out of vocabulary token
count.extend([("__UNK__", 1)]) # last index: dict_size
# for each term - that we want in the dictionary - add it and make it the value of the prior dictionary length
for term, term_freq in count:
self.term_dict[term] = len(self.term_dict)
return True
def has_digit(self, term):
"""check whether input term contains digits"""
return any(char.isdigit() for char in term)
def only_digits(self, term):
"""check whether input term contains only digits and/or punctuation"""
return all(char.isdigit() or char in string.punctuation for char in term)
def get_term_dictionary(self):
"""get term dictionary"""
return self.term_dict
def update_term_dictionary(self, term):
"""update term dictionary"""
if term in self.term_dict: # term already in term_dict
return True
else: # update term_dict
self.term_dict[term] = len(self.term_dict)
return True
def find_pos(self, line):
"""split text into terms and return dict {pos: [term, ["__NULL__"]]}"""
pos_terms = {}
terms = line.split()
# define sentence index
index = line.index
running_offset = 0
# loop over terms
for term in terms:
# get term offset
term_offset = index(term, running_offset)
term_len = len(term)
# update running offset
running_offset = term_offset + term_len
# append to term_offset each term + ["__NULL__"] for later use
pos_terms[term_offset] = [term, ["__NULL__"]]
return pos_terms
def terms_statistics(self, index):
"""get unique terms statistics"""
reader = index.reader()
# unique terms
terms = list(reader.field_terms('text'))
# terms statistics
terms_stats = list()
# loop over unique terms
for term in terms:
# term info
term_info = reader.term_info('text', term)
# doc frequency
doc_freq = term_info.doc_frequency()
# term frequency
term_freq = term_info.weight()
# append info to terms statistics
terms_stats.append((term, doc_freq, term_freq))
return terms_stats
def index_statistics(self, index):
"""compute and print index statistics"""
reader = index.reader()
# doc indexes in whoosh
doc_ids = list(reader.all_doc_ids())
# corpus size
corpus_size = reader.doc_count()
# maximum length of given field across all documents
max_length = reader.max_field_length('text')
# minimum length of given field across all documents
min_length = reader.min_field_length('text')
# total number of terms in given field
corpus_length = reader.field_length('text')
# total number of unique terms
terms = list(reader.field_terms('text'))
# number of terms in given field in given document
docs_length = list()
for doc_id in doc_ids:
doc_length = reader.doc_field_length(doc_id, 'text')
if doc_length:
docs_length.append(doc_length)
else:
docs_length.append(0)
# average length of given field across all documents in corpus
avg_length = reduce((lambda x, y: x + y), docs_length) / corpus_size
# print statistics
print('corpus size: {}'.format(corpus_size))
print('maximum length: {}'.format(max_length))
print('minimum length: {}'.format(min_length))
print('average length: {}'.format(avg_length))
print('all terms: {}'.format(corpus_length))
print('unique terms: {}'.format(len(terms)))
return True
def corpus_statistics(self, corpus):
"""compute and print corpus statistics"""
corpus_size = len(corpus)
# compute documents lengths
docs_length = np.array([len(doc) for doc in corpus])
# compute corpus length
corpus_length = [term for doc in corpus for term in doc]
# print statistics
print('corpus size: {}'.format(corpus_size))
print('maximum length: {}'.format( | np.max(docs_length) | numpy.max |
# Anharmonic correction to vibrational frequencies
# Version 1.1 - 16/07/2020
# The file anharm_path.txt must be present in the root folder (the
# one containing the program). The content of anharm_path.txt is the name
# of the folder containing the data (usually, the folder relative to
# the phase to be investigated). Such name is assigned to the abs_path
# variable
# Input file: input_anharm.txt (under the abs_path folder)
# Structure of the input (input_anharm.txt):
#
# 1) folder name where SCAN data from CRYSTAL are stored
# 2) output file name (it will be written in the folder
# specified at line 1)
# 3) minimim, maximum temperatures and number of points
# where the anharmonic Helmholtz function will be
# computed
# 4) order of the polynomial used to fit the Helmholtz
# free energy as a function of V and T. The unit
# of the computed free energy is the hartree.
#
# The output file contains the power of the fitting polynomial
# together with the optimized coefficents to reconstruct the
# Helmholtz free energy as a function of V and T in the specified
# ranges. Volume ranges are from the input files found in the
# specified folder.
# Files required to be found in the specified folder:
# 1) volumes.dat: it contains the volumes at which the SCANMODE's
# where done together with the harmonic frequencies
# computed by CRYSTAL.
# If not both 0., the last two columns, specifies
# the minimum and maximum q to select.
# Volumes of the primitive cell in cubic A;
# frequencies in cm^-1.
# 2) vect.dat: eigenvectors of the normal mode: une column for
# each volume, n the same order as specified in
# the volumes.dat file
# 3) input.txt: it contains the names of the files where the Q
# energies from the SCANMODE's are stored, as
# they are copied and pasted from the CRYSTAL
# output
# 4) files whose names are stored in the input.txt file.
# NOTE: in order to be used with the BM3_thermal_2 program,
# fits from more than one normal modes must be of he same order
# All the output files produced here must be copied in the relevant
# input folder specified for the BM3_thermal_2.
# The Anharmonic correction in BM3_thermal_2 program is activated
# by the ANH keyword in the input file for that program.
# Usage:
# At the simplest level, just use the helm_fit() function to read
# the all the input and to make the relevant fits.
# from IPython import get_ipython
# get_ipython().magic('clear')
# get_ipython().magic('reset -sf')
import datetime
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from scipy.optimize import curve_fit
class anh_class():
pass
class data_class():
def __init__(self,dim):
self.dim=dim
self.nlev=int(self.dim/2)
class data_flag():
def __init__(self):
self.comp=np.array([],dtype=bool)
self.setup=False
def load_files():
'''
Loads data files and file names of the SCAN data
'''
data=np.loadtxt(path+'volumes.dat')
volumes=data[:,0]
h_freq=data[:,1]
qmn=data[:,2]
qmx=data[:,3]
nvol=volumes.size
scan_name=np.loadtxt(path+"input.txt", dtype=str)
mode_vect=np.loadtxt(path+"vect.dat", dtype=float)
glob.data=data
glob.volumes=volumes
glob.h_freq=h_freq
glob.nvol=nvol
glob.scan_name=scan_name
glob.mode_vect=mode_vect
glob.qmn=qmn
glob.qmx=qmx
prn_vol=str(volumes)
print("Number of data SCAN's: %3i:" % nvol)
print("Volumes: %s" % prn_vol)
def set_up():
for i in np.arange(glob.nvol):
qmn=glob.qmn[i]
qmx=glob.qmx[i]
anh[i]=anh_class()
anh[i].name=glob.scan_name[i]
anh[i].vol=glob.volumes[i]
anh[i].h_freq=glob.h_freq[i]
energy_data=np.loadtxt(path+glob.scan_name[i])
anh[i].q=energy_data[:,0].astype(float)
anh[i].q_orig=np.copy(anh[i].q)
energy=energy_data[:,1].astype(float)
min_e=np.min(energy)
anh[i].e=energy-min_e
if (qmn != 0.) or (qmx != 0.):
test=((anh[i].q >= qmn) & (anh[i].q <= qmx))
anh[i].q = anh[i].q[test]
anh[i].e = anh[i].e[test]
anh[i].vector=glob.mode_vect[:,i]
fh_crys=anh[i].h_freq*csl
anh[i].omega=2*np.pi*fh_crys
anh[i].qmax=np.sqrt(sum(anh[i].vector**2))
anh[i].q2max=(anh[i].qmax**2)*(bohr**2)
anh[i].red=ht/(anh[i].omega*anh[i].q2max);
anh[i].q=anh[i].q*anh[i].qmax
flag.comp=np.append(flag.comp, False)
flag.setup=True
def energy_func(qq, a, b, c, d):
return a+b*qq**2+c*qq**3+d*qq**4
def energy_quad(qq, a, b):
return a+b*qq**2
def start_fit(iv, npt=40):
q=anh[iv].q
e=anh[iv].e
fit_par,_ =curve_fit(energy_func,q,e)
fit_quad,_ =curve_fit(energy_quad,q,e)
anh[iv].par=fit_par
min_q=np.min(q)
max_q=np.max(q)
q_list=np.linspace(min_q,max_q,npt)
e4_list=np.array([])
e2_list=np.array([])
for iq in q_list:
ieq4=energy_func(iq,*anh[iv].par)
ieq2=energy_quad(iq,*fit_quad)
e4_list=np.append(e4_list,ieq4)
e2_list=np.append(e2_list,ieq2)
plt.figure()
plt.plot(q_list,e4_list,"-",label='Quartic fit')
plt.plot(q_list,e2_list,"--",label='Quadratic fit')
plt.plot(anh[iv].q,anh[iv].e,"*",label='Actual values')
plt.xlabel("Q")
plt.ylabel("E")
plt.legend(frameon=True)
plt.show()
anh[iv].ko=2*anh[iv].par[1]*conv/(bohr**2)
lam=anh[iv].par[3]
d3l=anh[iv].par[2]
anh[iv].zero_l=anh[iv].par[0]
anh[iv].om=np.sqrt(anh[iv].ko/anh[iv].red)
anh[iv].nu=anh[iv].om/(2*np.pi*csl)
anh[iv].lam=lam*conv/(bohr**4);
anh[iv].d3l=d3l*conv/(bohr**3);
anh[iv].fact=(ht/(2*anh[iv].red*anh[iv].om))**2;
anh[iv].factd=(ht/(2*anh[iv].red*anh[iv].om))**(3/2);
anh[iv].fact_1=anh[iv].lam*anh[iv].fact;
anh[iv].factd_1=iun*anh[iv].factd*anh[iv].d3l;
anh[iv].h_omeg=ht*anh[iv].om;
def diag_n(iv, n):
dn=(anh[iv].fact_1*6*(n**2+n+1/2))+(anh[iv].h_omeg*(n+1/2));
return dn
def extra_1(iv, n):
ext1=-3*anh[iv].factd_1*(n+1)*(np.sqrt(n+1));
return ext1
def extra_2(iv, n):
ext2=-2*anh[iv].fact_1*(3+2*n)*(np.sqrt((n+2)*(n+1)));
return ext2
def extra_3(iv, n):
ext3=anh[iv].factd_1*np.sqrt((n+3)*(n+2)*(n+1));
return ext3
def extra_4(iv, n):
ext4=anh[iv].fact_1*np.sqrt((n+4)*(n+3)*(n+2)*(n+1));
return ext4
def H_matrix(iv):
ind=np.arange(glob.dim)
H=np.zeros((glob.dim,glob.dim),dtype=complex)
for ii in ind:
for jj in ind:
if ii==jj:
H[jj][ii]=diag_n(iv, ii)
elif jj==ii+2:
H[jj][ii]=extra_2(iv, ii)
elif jj==ii-2:
H[jj][ii]=extra_2(iv, jj)
elif jj==ii+4:
H[jj][ii]=extra_4(iv, ii)
elif jj==ii-4:
H[jj][ii]=extra_4(iv, jj)
elif jj==ii+1:
H[jj][ii]=extra_1(iv, ii)
elif jj==ii-1:
H[jj][ii]=-1*extra_1(iv, jj)
elif jj==ii+3:
H[jj][ii]=extra_3(iv, ii)
elif jj==ii-3:
H[jj][ii]=-1*extra_3(iv, jj)
return H
def energy_anh(iv):
H_mat=H_matrix(iv)
vals=np.linalg.eigvals(H_mat)
vals=np.real(vals)
anh[iv].vals=np.sort(vals)
anh[iv].e_zero=anh[iv].zero_l+anh[iv].vals/conv
def partition(iv, temp, nl=10):
"""
Computes the partition function by direct summation of the
exponential terms. By default, the number of the energy levels
involved in the summation is in the variable glob.nlev, whose
value is 1/2 of the dimension of the Hamiltonian matrix.
Args:
v: volume index (according to the list of volumes specified
in the volumes.dat file)
temp: temperature (K)
nl: number of energy levels considered in the summation
(default: 10)
"""
lev_list=np.arange(nl)
z=0.
for i in lev_list:
z=z+np.exp(-1*anh[iv].vals[i]/(k*temp))
return z
def helm(iv, temp):
"""
Computes the Helmholtz free energy (in hartree)
Args:
iv: volume index (according to the list of volumes specified
in the volumes.dat file)
temp: temperature (K)
"""
z=partition(iv, temp, nl=glob.nlev)
return -1*k*temp*np.log(z)/conv
def check_partition(iv, temp, from_plot=False):
"""
Checks convergence of the partition function at a given
temperature
Args:
iv: volume index (according to the list of volumes specified
in the volumes.dat file)
temp: temperature (k)
"""
tol_der=0.005
min_lev=5
max_lev=glob.nlev
lev_list=np.arange(min_lev,max_lev)
z_list=np.array([])
for il in lev_list:
iz=partition(iv,temp,il)
z_list=np.append(z_list,iz)
der_z=np.gradient(z_list)
tlt="Partition function: convergence test for T = " + str(temp) + " K"
plt.figure()
plt.plot(lev_list, z_list)
plt.title(tlt)
plt.xlabel('Number of vibrational levels')
plt.ylabel('Partition function')
plt.show()
test=(der_z >= tol_der)
st=sum(test)+min_lev
print("Threshold for convergence (on the variation of Z): %4.4f" % tol_der)
if (st < glob.nlev):
print("Convergence reached at the %3i level" % st)
else:
print("Warning: convergence never reached")
eth=anh[iv].e_zero[st]
test_scan=(eth-anh[iv].e) >= 0.
zero_scan=True
scan_sum=sum(test_scan)
if scan_sum == 0:
zero_scan=False
if zero_scan:
min_q=0.
max_q=0.
q_test=anh[iv].q[test_scan]
min_q=np.min(q_test)
max_q=np.max(q_test)
else:
min_q=np.min(anh[iv].q)*anh[iv].qmax
max_q=np.max(anh[iv].q)*anh[iv].qmax
min_sc=np.min(anh[iv].q)
max_sc=np.max(anh[iv].q)
mn_qmax=min_q/anh[iv].qmax
mx_qmax=max_q/anh[iv].qmax
if from_plot:
print("Minimum and maximum q values: %4.2f, %4.2f" % (mn_qmax, mx_qmax))
else:
print("Minimum and maximum q values: %4.2f, %4.2f" % (min_q, max_q))
if min_q <= min_sc or max_q >= max_sc:
print("Warning: Q-SCAN out of range")
def frequencies(iv, mxl=5, spect=False):
delta_e=np.gradient(anh[iv].vals)
freq=delta_e/(csl*h)
if not spect:
print("\nFrequencies (cm^-1) from the first %2i levels\n" % mxl)
il=0
while il <= mxl:
print(" %6.2f" % freq[il])
il=il+1
else:
return freq
def computation(iv):
if not flag.setup:
set_up()
start_fit(iv)
energy_anh(iv)
flag.comp[iv]=True
def start(temp=300):
set_up()
for ii in np.arange(glob.nvol):
print("\n--------------\nVolume N. %3i" % ii)
print("Volume %6.3f A^3, harmonic freq.: %6.2f cm^-1" %\
(anh[ii].vol, anh[ii].h_freq))
computation(ii)
check_partition(ii,temp)
frequencies(ii)
def helm_fit(temp=300):
"""
Main function of the program: the produces the final result of
the F(V,T) surface.
Args:
temp: temperature (in K) used in the test for convergence
of the partition function (default: 300 K)
"""
start(temp)
tl=np.linspace(tmin,tmax,nt)
vl=glob.volumes
helm_val=np.array([])
for it in tl:
for iv in np.arange(glob.nvol):
ih=helm(iv,it)
helm_val=np.append(helm_val,ih)
helm_val=helm_val.reshape(nt,glob.nvol)
vl,tl=np.meshgrid(vl,tl)
pl=np.arange(power_limit+1)
p_list=np.array([],dtype=int)
for ip1 in pl:
for ip2 in pl:
i1=ip2
i2=ip1-ip2
if i2 < 0:
break
ic=(i1, i2)
p_list=np.append(p_list,ic)
psize=p_list.size
pterm=int(psize/2)
glob.p_list=p_list.reshape(pterm,2)
x0=np.ones(pterm)
vl=vl.flatten()
tl=tl.flatten()
helm_val=helm_val.flatten()
fit, pcov = curve_fit(helm_func, [vl, tl], helm_val, p0 = x0)
t_plot=np.linspace(tmin,tmax,40)
v_plot=np.linspace(np.min(vl),np.max(vl),40)
v_plot,t_plot=np.meshgrid(v_plot,t_plot)
v_plot=v_plot.flatten()
t_plot=t_plot.flatten()
h_plot=helm_func([v_plot, t_plot], *fit)
h_plot=h_plot.reshape(40,40)
v_plot=v_plot.reshape(40,40)
t_plot=t_plot.reshape(40,40)
fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(111,projection='3d', )
ax.scatter(vl,tl,helm_val,c='r')
ax.plot_surface(v_plot, t_plot, h_plot)
ax.set_xlabel("Volume", labelpad=7)
ax.set_ylabel("Temperature", labelpad=7)
ax.set_zlabel('F(V,T)', labelpad=8)
plt.show()
glob.fit=fit
file=open(outfile,"w")
for iw, iff in zip(glob.p_list,fit):
issw0=iw[0]
issw1=iw[1]
siw=str(issw0)+" "+str(issw1)+" "+str(iff)+"\n"
file.write(siw)
file.write('END')
file.close()
print("\nFile %s written" % outfile)
print("V, T polynomial fit of degree %3i" % power_limit)
print("Temperature range: tmin=%4.1f, tmax=%4.1f" % (tmin,tmax))
vmin=np.min(glob.volumes)
vmax=np.max(glob.volumes)
print("Volume range: Vmin=%5.3f, Vmax=%5.3f" % (vmin, vmax))
hc=helm_func([vl, tl],*glob.fit)
df2=(helm_val-hc)**2
mean_error=np.sqrt(sum(df2))/df2.size
max_error=np.max(np.sqrt(df2))
print("Mean error from fit: %6.2e" % mean_error)
print("Maximum error: %6.2e" % max_error)
def helm_func(data,*par):
vv=data[0]
tt=data[1]
nterm=glob.p_list.shape[0]
func=0.
for it in np.arange(nterm):
pv=glob.p_list[it][0]
pt=glob.p_list[it][1]
func=func+par[it]*(vv**pv)*(tt**pt)
return func
def plot_levels(iv, max_lev, qmin=0., qmax=0., tmin=300, tmax=1000, nt=5, \
degree=4,chk=False, temp=300):
"""
Computes and plots vibrational energy levels on top of the
potential curve of the mode.
Args:
iv: Volume index (select the volume according
to the input list)
max_lev: Number of levels to plot
qmin, qmax: Q-range (default: qmin=qmax=0. --> full range)
tmin, tmax: T-range for the computation of probability of
of occupation of the vibrational levels
(default: 300, 1000K)
nt: number of points in the T-range
degree: degree of the polynomial fitting the potential
function (default: 4)
chk: check on the corvengence of the partition function
(default: False)
temp: temperature for the check of the partition function
(default: 300K)
"""
npoint=200
if not flag.setup:
set_up()
if not flag.comp[iv]:
computation(iv)
if chk:
check_partition(iv, temp, from_plot=True)
levels=anh[iv].vals/conv
pot=anh[iv].e
q=anh[iv].q/anh[iv].qmax
t_list=np.linspace(tmin, tmax, nt)
prob=np.array([])
for it in t_list:
z=partition(iv,it)
for idx in np.arange(max_lev):
energy=levels[idx]*conv
iprob=(np.exp(-1*energy/(k*it)))/z
iprob=(iprob*100).round(1)
prob=np.append(prob, iprob)
prob=prob.reshape(nt,max_lev)
df=pd.DataFrame(prob,index=t_list)
df=df.T
print("Energy levels occupation (probabilities) at several")
print("temperatures in the %4.1f - % 4.1f interval\n" % (tmin, tmax))
print(df.to_string(index=False))
if (qmin == 0.) & (qmax == 0.):
qmin=np.min(q)
qmax=np.max(q)
test=((q>=qmin) & (q<=qmax))
pot=pot[test]
q=q[test]
fit=np.polyfit(q,pot,degree)
q_fit=np.linspace(qmin,qmax,npoint)
e_fit=np.polyval(fit,q_fit)
q_l=np.array([])
for idx in np.arange(max_lev):
ie=levels[idx]
test=(e_fit < ie)
iqmin=np.min(q_fit[test])
iqmax=np.max(q_fit[test])
q_l=np.append(q_l,[iqmin,iqmax])
q_l=q_l.reshape(max_lev,2)
plt.figure()
plt.plot(q,pot)
for idx in np.arange(max_lev):
p1=q_l[idx][0]
p2=q_l[idx][1]
qp=(p1,p2)
ep=(levels[idx],levels[idx])
plt.plot(qp,ep,"k--",linewidth=1)
volume=anh[iv].vol.round(3)
tlt="Volume: "+str(volume)+" A^3; Num. of levels: "+str(max_lev)
plt.xlabel("Q (in unit of Q_max)")
plt.ylabel("E (hartree)")
plt.title(tlt)
plt.show()
def spectrum(iv,temp,nline=5,tail=8., head=8., sigma=2., fwhm=2., eta=0., npp=240):
"""
Computes the spectrum of the anharmonic mode by using a specified peak shape
Args:
iv: Volume index
temp: Temperature (K)
nline: Number of lines to be considered
tail, head: the plotted range is [min(freq)-tail. max(freq)+head]
where min(freq) and max(freq) are respectively the minum and
maximum frequencis resulting from the "nline" transitions
sigma: sigma associated to the Gaussian profile
fwhm: full widthat half maximum associated to the Lorentzian profile
eta: Gaussian/Lorentzian ratio;
eta=0: full Gaussian (G) profile
eta=1: full Lorentzian (L) profile
in general: profile=G*(1-eta)+L*eta
npp: number of points used for the plot
Note:
The vertical lines drawn under the spectrum mark the positions
of the transition frequencies. If the number of lines is greater
than 3, a color code is associated to such lines;
blue - transitions involving levels associated to low quantum numbers;
green -transitions at intermediate quantum numbers;
red - transition at high quantum numbers
"""
if not flag.setup:
set_up()
if not flag.comp[iv]:
computation(iv)
freq=frequencies(iv,nline,spect=True)
freq=freq[0:nline]
z=partition(iv,temp)
levels=anh[iv].vals/conv
prob=np.array([])
for idx in | np.arange(nline) | numpy.arange |
"""
This module provides various methods for cleaning data that has been imported into MAST-ML, prior to model fitting.
DataCleaning:
Class that enables easy use of various data cleaning methods, such as removal of missing values, different
modes of data imputation, or using principal componenet analysis to fill interpolate missing values.
DataUtilities:
Support class used to evaluate some basic statistics of imported data, such as its distribution, mean, etc.
Also provides a means of flagging potential outlier datapoints based on their deviation from the overall data
distribution.
PPCA:
Class used by the PCA data cleaning routine in the DataCleaning class to perform probabilistic PCA to fill in
missing data.
"""
import os
import numpy as np
import pandas as pd
from sklearn.impute import SimpleImputer
from scipy.linalg import orth
from collections import Counter
from datetime import datetime
from mastml.plots import Histogram
class DataCleaning():
"""
Class to perform various data cleaning operations, such as imputation or NaN removal
Args:
None
Methods:
remove: Method that removes a full column or row of data values if one column or row contains NaN or is blank
Args:
X: (pd.DataFrame), dataframe containing X data
y: (pd.Series), series containing y data
axis: (int), whether to remove rows (axis=0) or columns (axis=1)
Returns:
X: (pd.DataFrame): dataframe of cleaned X data
y: (pd.Series): series of cleaned y data
imputation: Method that imputes values to the missing places based on the median, mean, etc. of the data in the column
Args:
X: (pd.DataFrame), dataframe containing X data
y: (pd.Series), series containing y data
strategy: (str), method of imputation, e.g. median, mean, etc.
Returns:
X: (pd.DataFrame): dataframe of cleaned X data
y: (pd.Series): series of cleaned y data
ppca: Method that imputes data using principal component analysis to interpolate missing values
Args:
X: (pd.DataFrame), dataframe containing X data
y: (pd.Series), series containing y data
Returns:
X: (pd.DataFrame): dataframe of cleaned X data
y: (pd.Series): series of cleaned y data
evaluate: Main method to evaluate initial data analysis routines (e.g. flag outliers), perform data cleaning and save output to folder
Args:
X: (pd.DataFrame), dataframe containing X data
y: (pd.Series), series containing y data
method: (str), data cleaning method name, must be one of 'remove', 'imputation' or 'ppca'
savepath: (str), string containing the savepath information
kwargs: additional keyword arguments needed for the remove, imputation or ppca methods
Returns:
X: (pd.DataFrame): dataframe of cleaned X data
y: (pd.Series): series of cleaned y data
_setup_savedir: method to create a savedir based on the provided model, splitter, selector names and datetime
Args:
savepath: (str), string designating the savepath
Returns:
splitdir: (str), string containing the new subdirectory to save results to
"""
def __init__(self):
pass
def remove(self, X, y, axis):
df = pd.concat([X, y], axis=1)
try:
target = y.name
except:
target = y.columns.tolist()[0]
df = df.dropna(axis=axis, how='any')
y = df[target]
X = df[[col for col in df.columns if col != target]]
return X, y
def imputation(self, X, y, strategy):
df = pd.concat([X, y], axis=1)
columns = df.columns.tolist()
df = pd.DataFrame(SimpleImputer(missing_values=np.nan, strategy=strategy).fit_transform(df), columns=columns)
try:
target = y.name
except:
target = y.columns.tolist()[0]
y = df[target]
X = df[[col for col in df.columns if col != target]]
return X, y
def ppca(self, X, y):
df = pd.concat([X, y], axis=1)
try:
target = y.name
except:
target = y.columns.tolist()[0]
columns = df.columns.tolist()
pca_magic = PPCA()
pca_magic.fit(np.array(df))
# Need to un-standardize the pca-transformed data
df = pd.DataFrame(pca_magic.data*pca_magic.stds+pca_magic.means, columns=columns)
y = df[target]
X = df[[col for col in columns if col != target]]
return X, y
def evaluate(self, X, y, method, savepath=None, make_new_dir=True, **kwargs):
if not savepath:
savepath = os.getcwd()
if make_new_dir is True:
splitdir = self._setup_savedir(savepath=savepath)
savepath = splitdir
self.splitdir = splitdir
DataUtilities().flag_columns_with_strings(X=X, y=y, savepath=savepath)
DataUtilities().flag_outliers(X=X, y=y, savepath=savepath, n_stdevs=3)
df_orig = pd.concat([X, y], axis=1)
self.cleaner = getattr(self, method)
X, y = self.cleaner(X, y, **kwargs)
df_cleaned = pd.concat([X, y], axis=1)
df_orig.to_excel(os.path.join(savepath, 'data_original.xlsx'), index=False)
df_cleaned.to_excel(os.path.join(savepath, 'data_cleaned.xlsx'), index=False)
# Make histogram of the input data
Histogram.plot_histogram(df=y, file_name='histogram_target_values', savepath=savepath, x_label='Target values')
return X, y
def _setup_savedir(self, savepath):
now = datetime.now()
dirname = self.__class__.__name__
dirname = f"{dirname}_{now.month:02d}_{now.day:02d}" \
f"_{now.hour:02d}_{now.minute:02d}_{now.second:02d}"
if savepath == None:
splitdir = os.getcwd()
else:
splitdir = os.path.join(savepath, dirname)
if not os.path.exists(splitdir):
os.mkdir(splitdir)
return splitdir
class DataUtilities():
"""
Class that contains some basic data analysis utilities, such as flagging columns that contain problematic string
entries, or flagging potential outlier values based on threshold values
Args:
None
Methods:
flag_outliers: Method that scans values in each X feature matrix column and flags values that are larger than X standard deviations from the average of that column value. The index and column values of potentially problematic points are listed and written to an output file.
Args:
X: (pd.DataFrame), dataframe containing X data
y: (pd.Series), series containing y data
savepath: (str), string containing the save path directory
n_stdevs: (int), number of standard deviations to use as threshold value
Returns:
None
flag_columns_with_strings: Method that ascertains which columns in data contain string entries
Args:
X: (pd.DataFrame), dataframe containing X data
y: (pd.Series), series containing y data
savepath: (str), string containing the save path directory
Returns:
None
"""
@classmethod
def flag_outliers(cls, X, y, savepath, n_stdevs=3):
df = pd.concat([X, y], axis=1)
n_rows = df.shape[0]
outlier_dict = dict()
outlier_rows_all = list()
for col in df.columns:
outlier_rows = list()
outlier_vals = list()
avg = np.average(df[col])
stdev = np.std(df[col])
for row in range(n_rows):
if df[col].iloc[row] > avg + n_stdevs*stdev:
outlier_rows.append(row)
outlier_vals.append(df[col].iloc[row])
elif df[col].iloc[row] < avg - n_stdevs*stdev:
outlier_rows.append(row)
outlier_vals.append(df[col].iloc[row])
else:
pass
outlier_dict[col] = (outlier_rows, outlier_vals)
outlier_rows_all.append(outlier_rows)
# Save data to file
pd.DataFrame().from_dict(data=outlier_dict, orient='index',
columns=['Indices', 'Values']).to_excel(os.path.join(savepath, 'data_outliers_all.xlsx'))
# Also get values of rows that occur most often
outlier_rows_all = np.concatenate(outlier_rows_all).ravel()
outlier_counts = Counter(outlier_rows_all)
# Save summary data of outlier counts to file
pd.DataFrame().from_dict(data=outlier_counts, orient='index',
columns=['Number of occurrences']).to_excel(os.path.join(savepath, 'data_outliers_summary.xlsx'))
return
@classmethod
def flag_columns_with_strings(cls, X, y, savepath):
df = pd.concat([X, y], axis=1)
str_summary = pd.DataFrame(df.applymap(type).eq(str).any())
str_columns = str_summary.index[str_summary[0] == True].tolist()
d = {'columns with strings': str_columns}
pd.DataFrame().from_dict(data=d).to_excel(os.path.join(savepath, 'data_columns_with_strings.xlsx'))
return
class PPCA():
"""
Class to perform probabilistic principal component analysis (PPCA) to fill in missing data.
This PPCA routine was taken directly from https://github.com/allentran/pca-magic. Due to import errors, for ease of use
we have elected to copy the module here. This github repo was last accessed on 8/27/18. The code comprising the PPCA
class below was not developed by and is not owned by the University of Wisconsin-Madison MAST-ML development team.
"""
def __init__(self):
self.raw = None
self.data = None
self.C = None
self.means = None
self.stds = None
self.eig_vals = None
def _standardize(self, X):
if self.means is None or self.stds is None:
raise RuntimeError("Fit model first")
return (X - self.means) / self.stds
def fit(self, data, d=None, tol=1e-4, min_obs=10, verbose=False):
self.raw = data
self.raw[np.isinf(self.raw)] = np.max(self.raw[np.isfinite(self.raw)])
valid_series = np.sum(~np.isnan(self.raw), axis=0) >= min_obs
data = self.raw[:, valid_series].copy()
N = data.shape[0]
D = data.shape[1]
self.means = np.nanmean(data, axis=0)
self.stds = np.nanstd(data, axis=0)
data = self._standardize(data)
observed = ~np.isnan(data)
missing = np.sum(~observed)
data[~observed] = 0
# initial
if d is None:
d = data.shape[1]
if self.C is None:
C = np.random.randn(D, d)
else:
C = self.C
CC = np.dot(C.T, C)
X = np.dot(np.dot(data, C), np.linalg.inv(CC))
recon = np.dot(X, C.T)
recon[~observed] = 0
ss = np.sum((recon - data) ** 2) / (N * D - missing)
v0 = np.inf
counter = 0
while True:
Sx = np.linalg.inv(np.eye(d) + CC / ss)
# e-step
ss0 = ss
if missing > 0:
proj = np.dot(X, C.T)
data[~observed] = proj[~observed]
X = np.dot(np.dot(data, C), Sx) / ss
# m-step
XX = np.dot(X.T, X)
C = np.dot(np.dot(data.T, X), np.linalg.pinv(XX + N * Sx))
CC = np.dot(C.T, C)
recon = np.dot(X, C.T)
recon[~observed] = 0
ss = ( | np.sum((recon - data) ** 2) | numpy.sum |
import os.path
import numpy as np
import math
from collections import namedtuple
from typing import Dict, Any, Tuple, List, Optional
from models.adaptive_model import AdaptiveModel
from models.standard_model import StandardModel
from dataset.dataset import Dataset, DataSeries
from utils.file_utils import save_by_file_suffix, read_by_file_suffix
from utils.sequence_model_utils import SequenceModelType
from utils.constants import OUTPUT, LOGITS, SEQ_LENGTH, SKIP_GATES, PHASE_GATES, STOP_OUTPUT_NAME
LOG_FILE_FMT = 'model-{0}-{1}-{2}.jsonl.gz'
ModelResults = namedtuple('ModelResults', ['predictions', 'labels', 'stop_probs', 'accuracy'])
BATCH_SIZE = 64
def clip(x: int, bounds: Tuple[int, int]) -> int:
if x > bounds[1]:
return bounds[1]
elif x < bounds[0]:
return bounds[0]
return x
def save_test_log(accuracy: float, power: float, valid_accuracy: Optional[float], budget: float, system_name: str, key: str, output_file: str):
test_log: Dict[str, Dict[str, Any]] = dict()
if os.path.exists(output_file):
test_log = list(read_by_file_suffix(output_file))[0]
if key not in test_log:
test_log[key] = dict()
log_value = {
'ACCURACY': accuracy,
'AVG_POWER': power,
'VALID_ACCURACY': valid_accuracy,
'BUDGET': budget,
'SYSTEM_NAME': system_name
}
budget_str = '{0:.4f}'.format(budget)
test_log[key][budget_str] = log_value
save_by_file_suffix([test_log], output_file)
def get_budget_index(budget: float, valid_accuracy: np.ndarray, max_time: int, power_estimates: np.ndarray, allow_violations: bool) -> int:
"""
Selects the single model level which should yield the best overall accuracy. This decision
is based on the validation accuracy for each level.
Args:
budget: The current avg power budget
valid_accuracy: A [L] array containing the validation accuracy for each model level
max_time: The number of timesteps
power_estimates: A [L] array of power estimates for each level
allow_violations: Index selected in a manner which allows for budget violations if such violations
will lead to better end-to-end accuracy.
Returns:
The "optimal" model level.
"""
best_index = 0
best_acc = 0.0
if allow_violations:
num_levels = valid_accuracy.shape[0]
energy_budget = budget * max_time
for level_idx in range(num_levels):
# Estimate the number of timesteps on which we can perform inference with this level
avg_power = power_estimates[level_idx]
projected_timesteps = min(energy_budget / avg_power, max_time)
projected_correct = valid_accuracy[level_idx] * projected_timesteps
estimated_accuracy = projected_correct / max_time
if estimated_accuracy > best_acc:
best_acc = estimated_accuracy
best_index = level_idx
else:
budget_comparison = power_estimates <= budget
if np.any(budget_comparison):
budget_mask = budget_comparison.astype(float)
masked_accuracy = valid_accuracy * budget_mask
best_index = np.argmax(masked_accuracy)
else:
best_index = np.argmin(power_estimates)
return best_index
def concat_model_results(model_results: List[ModelResults]) -> ModelResults:
"""
Stacks each field of the given results into a single array. This is useful
for Skip RNN and Phased RNN systems in which each output is a separate model.
"""
predictions = np.concatenate([r.predictions for r in model_results], axis=1) # [N, L]
labels = model_results[0].labels # [N, 1]
stop_probs = [r.stop_probs for r in model_results]
accuracy = [r.accuracy for r in model_results]
return ModelResults(predictions=predictions,
labels=labels,
stop_probs=stop_probs,
accuracy=accuracy)
def execute_adaptive_model(model: AdaptiveModel, dataset: Dataset, series: DataSeries) -> ModelResults:
"""
Executes the neural network on the given data series. We do this in a separate step
to avoid recomputing for multiple budgets. Executing the neural network is relatively expensive.
Args:
model: The adaptive model used to perform inference
dataset: The dataset to perform inference on
series: The data series to extract. This is usually the TEST set.
Returns:
A model result tuple containing the inference results.
"""
level_predictions: List[np.ndarray] = []
stop_probs: List[np.ndarray] = []
labels: List[np.ndarray] = []
num_outputs = model.num_outputs
# Operations to execute
ops = [LOGITS, STOP_OUTPUT_NAME]
# Make the batch generator. Don't shuffle so we have consistent results.
data_generator = dataset.minibatch_generator(series=series,
batch_size=BATCH_SIZE,
metadata=model.metadata,
should_shuffle=False)
for batch_num, batch in enumerate(data_generator):
# Compute the predicted log probabilities
feed_dict = model.batch_to_feed_dict(batch, is_train=False, epoch_num=0)
model_results = model.execute(feed_dict, ops=ops)
# Concatenate logits into a [B, L, C] array (logit_ops is already ordered by level).
# For reference, L is the number of levels and C is the number of classes
logits = model_results[LOGITS]
stop_output = model_results[STOP_OUTPUT_NAME] # [B, L]
stop_probs.append(stop_output)
# Compute the predictions for each level
level_pred = np.argmax(logits, axis=-1) # [B, L]
level_predictions.append(level_pred)
labels.append( | np.array(batch[OUTPUT]) | numpy.array |
"""
Last change: 12.08.2018
"""
# Some imports
from test_error import TestError
from data_preparation_2 import Data
import numpy as np
from matplotlib import pyplot as plt
import matplotlib
import torch
# Set global font for all matplotlib plots
matplotlib.rcParams.update({'font.size': 15})
# Control variables
estimate_errors = True
estimate_acc = False
step = 0.01
# Load data from data_preparation file
loader = Data()
cluster_size = 8 # Setting manually cluster size
input_filename = "input_data2.csv" # Setting input file (for example "input_data2.csv")
X_train, X_val, X_test, X_dev, X_orig, y_train, y_val, y_test, y_dev, y_orig = \
loader.set_loader(input_filename=input_filename,
set='all',
dev_num=1,
cluster_size=cluster_size,
split_arr=[0.6, 0.1, 0.3],)
# X_val, y_val reducing
X_val_sample_number = 512
np.random.seed(1) # every run will generate the same randomize-mask: seed(1)
randomize = np.arange(X_val.shape[0])
np.random.shuffle(randomize)
X_val = X_val[randomize, :, :]
y_val = y_val[randomize, :]
# Split X_val, y_val
X_val, X_val_rest = np.split(X_val, [X_val_sample_number], axis=0)
y_val, y_val_rest = np.split(y_val, [X_val_sample_number], axis=0)
print('\nLoaded TRAIN-set has a shape of: ', X_train.shape)
print('Loaded VAL-set has a shape of: ', X_val.shape)
print('Loaded TEST-set has a shape of: ', X_test.shape)
print('Loaded DEV-set has a shape of: ', X_dev.shape)
print('Loaded ORIGINAL-set has a shape of: ', X_orig.shape)
print() # print empty line
# Transform input from numpy.ndarray to Tensor
X_dev_tensor = torch.from_numpy(X_dev)
y_dev_tensor = torch.from_numpy(y_dev)
X_train_tensor = torch.from_numpy(X_train)
y_train_tensor = torch.from_numpy(y_train)
X_val_tensor = torch.from_numpy(X_val)
y_val_tensor = torch.from_numpy(y_val)
X_test_tensor = torch.from_numpy(X_test)
y_test_tensor = torch.from_numpy(y_test)
X_orig_tensor = torch.from_numpy(X_orig)
y_orig_tensor = torch.from_numpy(y_orig)
# Instantiate TestError
TestErrorObj = TestError()
# Load NN model
model = torch.load('manual_500iter_leakyReLU_best_model.pt')
# Estimate error
if estimate_errors:
print('estimate_errors')
# Define threshold vector
threshold_vec = np.arange(start=0.01, stop=0.99, step=step)
# Iterate through threshold_vec to find the threshold value with minimal err1
err1_vec = []
err2_vec = []
acc_vec = []
current_procent = 0
for thres in threshold_vec:
# Call function to estimate the accuracy
acc, pred_np, y = TestErrorObj.check_accuracy(model=model, X=X_test_tensor, y=y_test_tensor,
threshold_value=thres,
internal_print=False)
# Call function to estimate the error
err1, err2 = TestErrorObj.check_error(model=model, X=X_test_tensor, y=y_test_tensor,
threshold_value=thres,
internal_print=False)
# Print current status
current_procent = current_procent + 100 / threshold_vec.shape[0]
print('Done: %.2f; Threshold %.2f; Acc: %.3f; Err1: %.3f; Err2: %.3f' %
(current_procent, thres, acc, err1, err2))
err1_vec.append(err1)
err2_vec.append(err2)
acc_vec.append(acc)
fig = plt.figure()
plt.plot(threshold_vec, acc_vec, linewidth=2)
plt.plot(threshold_vec, err1_vec, linewidth=2)
plt.plot(threshold_vec, err2_vec, linewidth=2)
plt.xlabel('Threshold value')
plt.autoscale(enable=True, axis='x', tight=True)
plt.ylabel('Accuracy / Error [%]')
plt.grid()
plt.axis([None, None, 0, 100])
plt.legend(labels=['Accuracy', 'Err1: "ground instead of air"', 'Err2: "air instead of ground"'], loc='best')
plt.savefig('err_plot.pdf')
# Estimate the threshold with the best accuracy
if estimate_acc:
print('estimate_acc')
# Define threshold vector
threshold_vec = np.arange(start=0.01, stop=0.99, step=step)
# Iterate through threshold_vec to find the threshold value with max. accuracy
acc_vec = []
current_procent = 0
for thres in threshold_vec:
# Call function to estimate the accuracy
acc, pred_np, y = TestErrorObj.check_accuracy(model=model, X=X_test_tensor, y=y_test_tensor,
threshold_value=thres,
internal_print=False)
# Print current status
current_procent = current_procent + 100 / threshold_vec.shape[0]
print('Done: %.2f percent; Threshold %.2f; Acc: %.3f' % (current_procent, thres, acc))
acc_vec.append(acc)
# Convert acc_vec list to the numpy.ndarray
acc_vec_np = np.asarray(acc_vec)
max_acc = | np.amax(acc_vec_np) | numpy.amax |
import pytest
from mamosa.synthetic import SyntheticData
import numpy as np
from numpy.testing import assert_array_equal
@pytest.fixture()
def seed():
np.random.seed(123)
@pytest.fixture
def horizons():
horizons = np.array(
[[[0, 1, 1], [0, 1, 0], [1, 1, -1]], [[1, 2, 2], [-1, -1, -1], [-1, -1, -1]]]
)
return horizons
@pytest.mark.filterwarnings("ignore:horizon")
def test_generate_horizons(seed):
shape = (16, 16, 16)
synth = SyntheticData(shape)
n_horizons = 3
min_dist = 3
horizons = synth.generate_horizons(n_horizons, min_dist, fault_size=2)
assert_array_equal(horizons, synth.horizons)
assert np.all(np.isin(horizons, np.arange(-1, shape[2])))
assert horizons.shape == (n_horizons, shape[0], shape[1])
diff = horizons[1:] - horizons[:-1]
oob = horizons[1:] == -1
assert np.all(np.logical_or(diff >= min_dist, oob))
synth.facies = 1
synth.seismic = 1
synth.oob_horizons = [1]
n_horizons = 1
horizons = synth.generate_horizons(n_horizons, min_dist, fault_size=0)
assert horizons.shape[0] == n_horizons
assert synth.facies is None
assert synth.seismic is None
assert synth.oob_horizons == [] # This can actually fail by randomness
def test_generate_oob_horizons(seed):
shape = (16, 16, 16)
synth = SyntheticData(shape)
# Generate out of bound horizons
with pytest.warns(UserWarning, match="horizon"):
synth.generate_horizons(3, shape[2])
assert synth.oob_horizons.__len__() > 0
with pytest.warns(UserWarning, match="horizon"):
h_vol = synth.horizon_volume(2)
assert h_vol is None
def test_horizon_volume(horizons):
synth = SyntheticData((3, 3, 3))
synth.horizons = horizons
h_vol = synth.horizon_volume(0)
h_vol_true = np.array(
[
[[1, 0, 0], [0, 1, 0], [0, 1, 0]],
[[1, 0, 0], [0, 1, 0], [1, 0, 0]],
[[0, 1, 0], [0, 1, 0], [0, 0, 0]],
]
)
assert_array_equal(h_vol, h_vol_true)
def test_ixtn_horizons(horizons):
synth = SyntheticData((3, 3, 3))
synth.horizons = horizons
ixtn = synth.ixtn_horizons()
ixtn_true = np.array(
[
[0, 0, 0, 0],
[0, 1, 1, 0],
[0, 2, 1, 0],
[1, 0, 0, 0],
[1, 1, 1, 0],
[1, 2, 0, 0],
[2, 0, 1, 0],
[2, 1, 1, 0],
[0, 0, 1, 1],
[0, 1, 2, 1],
[0, 2, 2, 1],
]
)
| assert_array_equal(ixtn, ixtn_true) | numpy.testing.assert_array_equal |
# Collection of small helper functions
import numpy as np
import pyaudio
from scipy.fftpack import fft
from .codec import audio_read
import logging
import decimal
import math
class _error(Exception):
pass
def linlin(x, smi, sma, dmi, dma):
"""Linear mapping
Parameters
----------
x : float
input value
smi : float
input range's minimum
sma : float
input range's maximum
dmi : float
input range's minimum
dma :
Returns
-------
_ : float
mapped output
"""
return (x - smi) / (sma - smi) * (dma - dmi) + dmi
def midicps(m):
"""Convert midi number into cycle per second"""
return 440.0 * 2 ** ((m - 69) / 12.0)
def cpsmidi(c):
"""Convert cycle per second into midi number"""
return 69 + 12 * np.log2(c / 440.0)
def dbamp(db):
"""Convert db to amplitude"""
return 10 ** (db / 20.0)
def ampdb(amp):
"""Convert amplitude to db"""
return 20 * np.log10(amp)
def spectrum(sig, samples, channels, sr):
"""Return spectrum of a given signal. This method return spectrum matrix if input signal is multi-channels.
Parameters
----------
sig : numpy.ndarray
signal array
samples : int
total amount of samples
channels : int
signal channels
sr : int
sampling rate
Returns
---------
frq : numpy.ndarray
frequencies
Y : numpy.ndarray
FFT of the signal.
"""
nrfreqs = samples // 2 + 1
frq = np.linspace(0, 0.5 * sr, nrfreqs) # one sides frequency range
if channels == 1:
Y = fft(sig)[:nrfreqs] # / self.samples
else:
Y = np.array(np.zeros((nrfreqs, channels)), dtype=complex)
for i in range(channels):
Y[:, i] = fft(sig[:, i])[:nrfreqs]
return frq, Y
def normalize(d):
"""Return the normalized input array"""
# d is a (n x dimension) np array
d -= np.min(d, axis=0)
d /= np.ptp(d, axis=0)
return d
def audio_from_file(path, dtype=np.float32):
'''Load an audio buffer using audioread.
This loads one block at a time, and then concatenates the results.
'''
y = [] # audio array
with audio_read(path) as input_file:
sr_native = input_file.samplerate
n_channels = input_file.channels
s_start = 0
s_end = np.inf
n = 0
for frame in input_file:
frame = buf_to_float(frame, dtype=dtype)
n_prev = n
n = n + len(frame)
if n_prev <= s_start <= n:
# beginning is in this frame
frame = frame[(s_start - n_prev):]
# tack on the current frame
y.append(frame)
if y:
y = np.concatenate(y)
if n_channels > 1:
y = y.reshape((-1, n_channels))
else:
y = np.empty(0, dtype=dtype)
sr_native = 0
return y, sr_native
def buf_to_float(x, n_bytes=2, dtype=np.float32):
"""Convert an integer buffer to floating point values.
This is primarily useful when loading integer-valued wav data
into numpy arrays.
See Also
--------
buf_to_float
Parameters
----------
x : np.ndarray [dtype=int]
The integer-valued data buffer
n_bytes : int [1, 2, 4]
The number of bytes per sample in `x`
dtype : numeric type
The target output type (default: 32-bit float)
Returns
-------
x_float : np.ndarray [dtype=float]
The input data buffer cast to floating point
"""
# Invert the scale of the data
scale = 1. / float(1 << ((8 * n_bytes) - 1))
# Construct the format string
fmt = '<i{:d}'.format(n_bytes)
# Rescale and format the data buffer
return scale * np.frombuffer(x, fmt).astype(dtype)
def device_info():
"""Return a formatted string about available audio devices and their info"""
pa = pyaudio.PyAudio()
line1 = (f"idx {'Device Name':25}{'INP':4}{'OUT':4} SR INP-(Lo|Hi) OUT-(Lo/Hi) (Latency in ms)")
devs = [pa.get_device_info_by_index(i) for i in range(pa.get_device_count())]
lines = [line1]
for i, d in enumerate(devs):
p1 = f"{i:<4g}{d['name'].strip():24}{d['maxInputChannels']:4}{d['maxOutputChannels']:4}"
p2 = f" {int(d['defaultSampleRate'])} "
p3 = f"{d['defaultLowInputLatency']*1000:6.2g} {d['defaultHighInputLatency']*1000:6.0f}"
p4 = f"{d['defaultLowOutputLatency']*1000:6.2g} {d['defaultHighOutputLatency']*1000:6.0f}"
lines.append(p1 + p2 + p3 + p4)
print(*lines, sep='\n')
return devs
def find_device(min_input=0, min_output=0):
pa = pyaudio.PyAudio()
res = []
for idx in range(pa.get_device_count()):
dev = pa.get_device_info_by_index(idx)
if dev['maxInputChannels'] >= min_input and dev['maxOutputChannels'] >= min_output:
res.append(dev)
return res
def padding(x, width, tail=True, constant_values=0):
"""Pad signal with certain width, support 1-3D tensors.
Use it to add silence to a signal
TODO: CHECK pad array
Parameters
----------
x : np.ndarray
A numpy array
width : int
The amount of padding.
tail : bool
If true pad to the tail, else pad to the start.
constant_values : int or float or None
The value to be padded, add None will pad nan to the array
Returns
-------
_ : np.ndarray
Padded array
"""
pad = (0, width) if tail else (width, 0)
if x.ndim == 1:
return np.pad(x, (pad), mode='constant', constant_values=constant_values)
elif x.ndim == 2:
return np.pad(x, (pad, (0, 0)), mode='constant', constant_values=constant_values)
elif x.ndim == 3:
return | np.pad(x, ((0, 0), pad, (0, 0)), mode='constant', constant_values=constant_values) | numpy.pad |
import numpy as np
import warnings
warnings.filterwarnings("ignore")
def knee_pt(y, x=None):
x_was_none = False
use_absolute_dev_p = True
res_x = np.nan
idx_of_result = np.nan
if type(y) is not np.ndarray:
print('knee_pt: y must be a numpy 1D vector')
return res_x, idx_of_result
else:
if y.ndim >= 2:
print('knee_pt: y must be 1 dimensional')
return res_x, idx_of_result
if np.size(y) == 0:
print('knee_pt: y can not be an empty vector')
return res_x, idx_of_result
else:
if x is None:
x_was_none = True
x = np.arange(1, np.amax(y.shape) + 1, dtype=np.int)
if x.shape != y.shape:
print('knee_pt: y and x must have the same dimensions')
return res_x, idx_of_result
if y.size < 3:
res_x, idx_of_result = np.min(y), np.argmin(y)
return res_x, idx_of_result
if np.all(np.diff(x) >= 0) and (not x_was_none):
idx = np.argsort(x)
y = np.sort(y)
x = np.sort(x)
else:
idx = np.arange(0, np.amax(x.shape))
sigma_xy = np.cumsum(np.multiply(x, y), axis=0)
sigma_x = np.cumsum(x, axis=0)
sigma_y = np.cumsum(y, axis=0)
sigma_xx = np.cumsum(np.multiply(x, x), axis=0)
n = np.arange(1, np.amax(y.shape) + 1).conj().T
det = np.multiply(n, sigma_xx) - np.multiply(sigma_x, sigma_x)
mfwd = (np.multiply(n, sigma_xy) -
np.multiply(sigma_x, sigma_y)) / det
bfwd = -1 * ((np.multiply(sigma_x, sigma_xy) -
np.multiply(sigma_xx, sigma_y)) / det)
sigma_xy = np.cumsum(np.multiply(x[::-1], y[::-1]), axis=0)
sigma_x = | np.cumsum(x[::-1], axis=0) | numpy.cumsum |
import unittest
import itertools
import numpy
import pytest
import cupy
from cupy import testing
def perm(iterable):
return list(itertools.permutations(iterable))
@testing.parameterize(
*testing.product({
'shape': [(4, 4, 4)],
'indexes': (
perm(([1, 0], slice(None))) +
perm(([1, 0], Ellipsis)) +
perm(([1, 2], None, slice(None))) +
perm(([1, 0], 1, slice(None))) +
perm(([1, 2], slice(0, 2), slice(None))) +
perm((1, [1, 2], 1)) +
perm(([[1, -1], [0, 3]], slice(None), slice(None))) +
perm(([1, 0], [3, 2], slice(None))) +
perm((slice(0, 3, 2), [1, 2], [1, 0])) +
perm(([1, 0], [2, 1], [3, 1])) +
perm(([1, 0], 1, [3, 1])) +
perm(([1, 2], [[1, 0], [0, 1], [-1, 1]], slice(None))) +
perm((None, [1, 2], [1, 0])) +
perm((numpy.array(0), numpy.array(-1))) +
perm((numpy.array(0), None)) +
perm((1, numpy.array(2), slice(None)))
)
})
)
@testing.gpu
class TestArrayAdvancedIndexingGetitemPerm(unittest.TestCase):
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_adv_getitem(self, xp, dtype):
a = testing.shaped_arange(self.shape, xp, dtype)
return a[self.indexes]
@testing.parameterize(
{'shape': (2, 3, 4), 'indexes': numpy.array(-1)},
{'shape': (2, 3, 4), 'indexes': (None, [1, 0], [0, 2], slice(None))},
{'shape': (2, 3, 4), 'indexes': (None, [0, 1], None, [2, 1], slice(None))},
{'shape': (2, 3, 4), 'indexes': numpy.array([1, 0])},
{'shape': (2, 3, 4), 'indexes': [1]},
{'shape': (2, 3, 4), 'indexes': [1, 1]},
{'shape': (2, 3, 4), 'indexes': [1, -1]},
{'shape': (2, 3, 4), 'indexes': ([0, 1], slice(None), [[2, 1], [3, 1]])},
# mask
{'shape': (10,), 'indexes': (numpy.random.choice([False, True], (10,)),)},
{'shape': (2, 3, 4), 'indexes': (1, numpy.array([True, False, True]))},
{'shape': (2, 3, 4), 'indexes': (numpy.array([True, False]), 1)},
{'shape': (2, 3, 4),
'indexes': (slice(None), 2, numpy.array([True, False, True, False]))},
{'shape': (2, 3, 4), 'indexes': (slice(None), 2, False)},
{'shape': (2, 3, 4),
'indexes': (numpy.random.choice([False, True], (2, 3, 4)),)},
{'shape': (2, 3, 4),
'indexes': (slice(None), numpy.array([True, False, True]))},
{'shape': (2, 3, 4),
'indexes': (slice(None), slice(None),
numpy.array([True, False, False, True]))},
{'shape': (2, 3, 4),
'indexes': (1, 2,
numpy.array([True, False, False, True]))},
{'shape': (2, 3, 4),
'indexes': (slice(None), numpy.random.choice([False, True], (3, 4)))},
{'shape': (2, 3, 4),
'indexes': numpy.random.choice([False, True], (2, 3))},
# TODO(okuta): pass the following commented out tests
# {'shape': (2, 3, 4),
# 'indexes': (1, None, numpy.array([True, False, True]))},
# empty arrays
{'shape': (2, 3, 4), 'indexes': []},
{'shape': (2, 3, 4), 'indexes': numpy.array([], dtype=numpy.int32)},
{'shape': (2, 3, 4), 'indexes': numpy.array([[]], dtype=numpy.int32)},
{'shape': (2, 3, 4), 'indexes': (slice(None), [])},
{'shape': (2, 3, 4), 'indexes': ([], [])},
{'shape': (2, 3, 4), 'indexes': ([[]],)},
{'shape': (2, 3, 4), 'indexes': numpy.array([], dtype=numpy.bool_)},
{'shape': (2, 3, 4),
'indexes': (slice(None), numpy.array([], dtype=numpy.bool_))},
{'shape': (2, 3, 4), 'indexes': numpy.array([[], []], dtype=numpy.bool_)},
# TODO(okuta): pass the following commented out tests
# {'shape': (2, 3, 4), 'indexes': (True, [True, False])},
# {'shape': (2, 3, 4), 'indexes': (False, [True, False])},
# {'shape': (2, 3, 4), 'indexes': (True, [[1]], slice(1, 2))},
# {'shape': (2, 3, 4), 'indexes': (False, [[1]], slice(1, 2))},
# {'shape': (2, 3, 4), 'indexes': (True, [[1]], slice(1, 2), True)},
# {'shape': (2, 3, 4), 'indexes': (True, [[1]], slice(1, 2), False)},
# zero-dim and zero-sized arrays
{'shape': (), 'indexes': Ellipsis},
{'shape': (), 'indexes': ()},
{'shape': (), 'indexes': None},
{'shape': (), 'indexes': True},
{'shape': (), 'indexes': (True,)},
# TODO(niboshi): pass the following commented out tests
# {'shape': (), 'indexes': (False, True, True)},
{'shape': (), 'indexes': numpy.ones((), dtype=numpy.bool_)},
{'shape': (), 'indexes': numpy.zeros((), dtype=numpy.bool_)},
{'shape': (0,), 'indexes': None},
{'shape': (0,), 'indexes': ()},
{'shape': (2, 0), 'indexes': ([1],)},
{'shape': (0, 3), 'indexes': (slice(None), [1])},
# TODO(niboshi): pass the following commented out tests
# {'shape': (0,), 'indexes': (False, True, True)},
)
@testing.gpu
class TestArrayAdvancedIndexingGetitemParametrized(unittest.TestCase):
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_adv_getitem(self, xp, dtype):
a = testing.shaped_arange(self.shape, xp, dtype)
return a[self.indexes]
@testing.parameterize(
# empty arrays (list indexes)
{'shape': (2, 3, 4), 'indexes': [[]]},
{'shape': (2, 3, 4), 'indexes': [[[]]]},
{'shape': (2, 3, 4), 'indexes': [[[[]]]]},
{'shape': (2, 3, 4, 5), 'indexes': [[[[]]]]},
{'shape': (2, 3, 4, 5), 'indexes': [[[[[]]]]]},
# list indexes
{'shape': (2, 3, 4), 'indexes': [[1]]},
{'shape': (2, 3, 4), 'indexes': [[1, 1]]},
{'shape': (2, 3, 4), 'indexes': [[1], [1]]},
{'shape': (2, 3, 4), 'indexes': [[1, 1], 1]},
{'shape': (2, 3, 4), 'indexes': [[1], slice(1, 2)]},
{'shape': (2, 3, 4), 'indexes': [[[1]], slice(1, 2)]},
)
@testing.gpu
class TestArrayAdvancedIndexingGetitemDeprecated(unittest.TestCase):
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_adv_getitem(self, xp, dtype):
with testing.assert_warns(FutureWarning):
a = testing.shaped_arange(self.shape, xp, dtype)
return a[self.indexes]
@testing.parameterize(
{'shape': (0,), 'indexes': True},
{'shape': (0,), 'indexes': (True,)},
{'shape': (0,), 'indexes': numpy.ones((), dtype=numpy.bool_)},
{'shape': (0,), 'indexes': numpy.zeros((), dtype=numpy.bool_)},
)
@testing.gpu
class TestArrayAdvancedIndexingGetitemParametrized2(unittest.TestCase):
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_adv_getitem(self, xp, dtype):
a = testing.shaped_arange(self.shape, xp, dtype)
return a[self.indexes]
@testing.parameterize(
{'shape': (2, 3, 4), 'transpose': (1, 2, 0),
'indexes': (slice(None), [1, 0])},
{'shape': (2, 3, 4), 'transpose': (1, 0, 2),
'indexes': (None, [1, 2], [0, -1])},
)
@testing.gpu
class TestArrayAdvancedIndexingGetitemParametrizedTransp(unittest.TestCase):
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_adv_getitem(self, xp, dtype):
a = testing.shaped_arange(self.shape, xp, dtype)
if self.transpose:
a = a.transpose(self.transpose)
return a[self.indexes]
@testing.gpu
class TestArrayAdvancedIndexingGetitemCupyIndices(unittest.TestCase):
shape = (2, 3, 4)
def test_adv_getitem_cupy_indices1(self):
a = cupy.zeros(self.shape)
index = cupy.array([1, 0])
original_index = index.copy()
b = a[index]
b_cpu = a.get()[index.get()]
testing.assert_array_equal(b, b_cpu)
testing.assert_array_equal(original_index, index)
def test_adv_getitem_cupy_indices2(self):
a = cupy.zeros(self.shape)
index = cupy.array([1, 0])
original_index = index.copy()
b = a[(slice(None), index)]
b_cpu = a.get()[(slice(None), index.get())]
testing.assert_array_equal(b, b_cpu)
testing.assert_array_equal(original_index, index)
def test_adv_getitem_cupy_indices3(self):
a = cupy.zeros(self.shape)
index = cupy.array([True, False])
original_index = index.copy()
b = a[index]
b_cpu = a.get()[index.get()]
testing.assert_array_equal(b, b_cpu)
testing.assert_array_equal(original_index, index)
def test_adv_getitem_cupy_indices4(self):
a = cupy.zeros(self.shape)
index = cupy.array([4, -5])
original_index = index.copy()
b = a[index]
b_cpu = a.get()[index.get() % self.shape[1]]
testing.assert_array_equal(b, b_cpu)
testing.assert_array_equal(original_index, index)
def test_adv_getitem_cupy_indices5(self):
a = cupy.zeros(self.shape)
index = cupy.array([4, -5])
original_index = index.copy()
b = a[[1, 0], index]
b_cpu = a.get()[[1, 0], index.get() % self.shape[1]]
testing.assert_array_equal(b, b_cpu)
testing.assert_array_equal(original_index, index)
@testing.parameterize(
{'shape': (2**3 + 1, 2**4), 'indexes': (
numpy.array([2**3], dtype=numpy.int8),
numpy.array([1], dtype=numpy.int8))},
{'shape': (2**4 + 1, 2**4), 'indexes': (
numpy.array([2**4], dtype=numpy.uint8),
numpy.array([1], dtype=numpy.uint8))},
{'shape': (2**7 + 1, 2**8), 'indexes': (
numpy.array([2**7], dtype=numpy.int16),
numpy.array([1], dtype=numpy.int16))},
{'shape': (2**8 + 1, 2**8), 'indexes': (
numpy.array([2**8], dtype=numpy.uint16),
numpy.array([1], dtype=numpy.uint16))},
{'shape': (2**7 + 1, 2**8), 'indexes': (
numpy.array([2**7], dtype=numpy.int16),
numpy.array([1], dtype=numpy.int32))},
{'shape': (2**7 + 1, 2**8), 'indexes': (
numpy.array([2**7], dtype=numpy.int16),
numpy.array([1], dtype=numpy.int8))},
# Three-dimensional case
{'shape': (2**3 + 1, 3, 2**4), 'indexes': (
numpy.array([2**3], dtype=numpy.int8),
slice(None),
numpy.array([1], dtype=numpy.int8))},
)
@testing.gpu
class TestArrayAdvancedIndexingOverflow(unittest.TestCase):
def test_getitem(self):
a = cupy.arange(numpy.prod(self.shape)).reshape(self.shape)
indexes_gpu = []
for s in self.indexes:
if isinstance(s, numpy.ndarray):
s = cupy.array(s)
indexes_gpu.append(s)
indexes_gpu = tuple(indexes_gpu)
b = a[indexes_gpu]
b_cpu = a.get()[self.indexes]
testing.assert_array_equal(b, b_cpu)
def test_setitem(self):
a_cpu = numpy.arange(numpy.prod(self.shape)).reshape(self.shape)
a = cupy.array(a_cpu)
indexes_gpu = []
for s in self.indexes:
if isinstance(s, numpy.ndarray):
s = cupy.array(s)
indexes_gpu.append(s)
indexes_gpu = tuple(indexes_gpu)
a[indexes_gpu] = -1
a_cpu[self.indexes] = -1
testing.assert_array_equal(a, a_cpu)
@testing.parameterize(
{'shape': (), 'indexes': (-1,)},
{'shape': (), 'indexes': (0,)},
{'shape': (), 'indexes': (1,)},
{'shape': (), 'indexes': ([0],)},
{'shape': (), 'indexes': (numpy.array([0]),)},
{'shape': (), 'indexes': (numpy.array(0),)},
{'shape': (), 'indexes': numpy.array([True])},
{'shape': (), 'indexes': numpy.array([False, True, True])},
{'shape': (), 'indexes': ([False],)},
{'shape': (0,), 'indexes': (-1,)},
{'shape': (0,), 'indexes': (0,)},
{'shape': (0,), 'indexes': (1,)},
{'shape': (0,), 'indexes': ([0],)},
{'shape': (0,), 'indexes': (numpy.array([0]),)},
{'shape': (0,), 'indexes': (numpy.array(0),)},
{'shape': (0,), 'indexes': numpy.array([True])},
{'shape': (0,), 'indexes': numpy.array([False, True, True])},
{'shape': (0, 1), 'indexes': (0, Ellipsis)},
{'shape': (2, 3), 'indexes': (slice(None), [1, 2], slice(None))},
{'shape': (2, 3), 'indexes': numpy.array([], dtype=numpy.float64)},
)
@testing.gpu
class TestArrayInvalidIndexAdvGetitem(unittest.TestCase):
def test_invalid_adv_getitem(self):
for xp in (numpy, cupy):
a = testing.shaped_arange(self.shape, xp)
with pytest.raises(IndexError):
a[self.indexes]
@testing.parameterize(
{'shape': (0,), 'indexes': ([False],)},
{'shape': (2, 3, 4),
'indexes': (slice(None), numpy.random.choice([False, True], (3, 1)))},
{'shape': (2, 3, 4),
'indexes': numpy.random.choice([False, True], (1, 3))},
)
@testing.gpu
class TestArrayInvalidIndexAdvGetitem2(unittest.TestCase):
def test_invalid_adv_getitem(self):
for xp in (numpy, cupy):
a = testing.shaped_arange(self.shape, xp)
with pytest.raises(IndexError):
a[self.indexes]
@testing.parameterize(
{'shape': (2, 3, 4), 'indexes': [1, [1, [1]]]},
)
@testing.gpu
@testing.with_requires('numpy>=1.16')
class TestArrayInvalidValueAdvGetitem(unittest.TestCase):
def test_invalid_adv_getitem(self):
for xp in (numpy, cupy):
a = testing.shaped_arange(self.shape, xp)
with pytest.raises(IndexError):
with testing.assert_warns(FutureWarning):
a[self.indexes]
@testing.parameterize(
# array only
{'shape': (2, 3, 4), 'indexes': numpy.array(-1), 'value': 1},
{'shape': (2, 3, 4), 'indexes': numpy.array([1, 0]), 'value': 1},
{'shape': (2, 3, 4), 'indexes': [1, 0], 'value': 1},
{'shape': (2, 3, 4), 'indexes': [1, -1], 'value': 1},
{'shape': (2, 3, 4), 'indexes': (slice(None), [1, 2]), 'value': 1},
{'shape': (2, 3, 4),
'indexes': (slice(None), [[1, 2], [0, -1]],), 'value': 1},
{'shape': (2, 3, 4),
'indexes': (slice(None), slice(None), [[1, 2], [0, -1]]), 'value': 1},
# slice and array
{'shape': (2, 3, 4),
'indexes': (slice(None), slice(1, 2), [[1, 2], [0, -1]]), 'value': 1},
# None and array
{'shape': (2, 3, 4),
'indexes': (None, [1, -1]), 'value': 1},
{'shape': (2, 3, 4),
'indexes': (None, [1, -1], None), 'value': 1},
{'shape': (2, 3, 4),
'indexes': (None, None, None, [1, -1]), 'value': 1},
# None, slice and array
{'shape': (2, 3, 4),
'indexes': (slice(0, 1), None, [1, -1]), 'value': 1},
{'shape': (2, 3, 4),
'indexes': (slice(0, 1), slice(1, 2), [1, -1]), 'value': 1},
{'shape': (2, 3, 4),
'indexes': (slice(0, 1), None, slice(1, 2), [1, -1]), 'value': 1},
# mask
{'shape': (2, 3, 4),
'indexes': numpy.array([True, False]), 'value': 1},
{'shape': (2, 3, 4),
'indexes': (1, numpy.array([True, False, True])), 'value': 1},
{'shape': (2, 3, 4),
'indexes': (numpy.array([True, False]), 1), 'value': 1},
{'shape': (2, 3, 4),
'indexes': (slice(None), numpy.array([True, False, True])), 'value': 1},
{'shape': (2, 3, 4),
'indexes': (slice(None), 2, numpy.array([True, False, True, False])),
'value': 1},
{'shape': (2, 3, 4),
'indexes': (slice(None), 2, False), 'value': 1},
{'shape': (2, 3, 4),
'indexes': (slice(None), slice(None),
numpy.random.choice([False, True], (4,))),
'value': 1},
{'shape': (2, 3, 4),
'indexes': (numpy.random.choice([False, True], (2, 3)),), 'value': 1},
{'shape': (2, 3, 4),
'indexes': (slice(None), numpy.random.choice([False, True], (3, 4)),),
'value': 1},
{'shape': (2, 3, 4),
'indexes': (numpy.random.choice([False, True], (2, 3, 4)),), 'value': 1},
# multiple arrays
{'shape': (2, 3, 4), 'indexes': ([0, -1], [1, -1]), 'value': 1},
{'shape': (2, 3, 4),
'indexes': ([0, -1], [1, -1], [2, 1]), 'value': 1},
{'shape': (2, 3, 4), 'indexes': ([0, -1], 1), 'value': 1},
{'shape': (2, 3, 4), 'indexes': ([0, -1], slice(None), [1, -1]),
'value': 1},
{'shape': (2, 3, 4), 'indexes': ([0, -1], 1, 2), 'value': 1},
{'shape': (2, 3, 4), 'indexes': ([1, 0], slice(None), [[2, 0], [3, 1]]),
'value': 1},
# multiple arrays and basic indexing
{'shape': (2, 3, 4), 'indexes': ([0, -1], None, [1, 0]), 'value': 1},
{'shape': (2, 3, 4), 'indexes': ([0, -1], slice(0, 2), [1, 0]),
'value': 1},
{'shape': (2, 3, 4), 'indexes': ([0, -1], None, slice(0, 2), [1, 0]),
'value': 1},
{'shape': (1, 1, 2, 3, 4),
'indexes': (None, slice(None), slice(None), [1, 0], [2, -1], 1),
'value': 1},
{'shape': (1, 1, 2, 3, 4),
'indexes': (None, slice(None), 0, [1, 0], slice(0, 2, 2), [2, -1]),
'value': 1},
{'shape': (2, 3, 4),
'indexes': (slice(None), [0, -1], [[1, 0], [0, 1], [-1, 1]]), 'value': 1},
# empty arrays
{'shape': (2, 3, 4), 'indexes': [], 'value': 1},
{'shape': (2, 3, 4), 'indexes': [],
'value': numpy.array([1, 1, 1, 1])},
{'shape': (2, 3, 4), 'indexes': [],
'value': numpy.random.uniform(size=(3, 4))},
{'shape': (2, 3, 4), 'indexes': numpy.array([], dtype=numpy.int32),
'value': 1},
{'shape': (2, 3, 4), 'indexes': numpy.array([[]], dtype=numpy.int32),
'value': numpy.random.uniform(size=(3, 4))},
{'shape': (2, 3, 4), 'indexes': (slice(None), []),
'value': 1},
{'shape': (2, 3, 4), 'indexes': ([], []),
'value': 1},
{'shape': (2, 3, 4), 'indexes': numpy.array([], dtype=numpy.bool_),
'value': 1},
{'shape': (2, 3, 4),
'indexes': (slice(None), numpy.array([], dtype=numpy.bool_)),
'value': 1},
{'shape': (2, 3, 4), 'indexes': numpy.array([[], []], dtype=numpy.bool_),
'value': numpy.random.uniform(size=(4,))},
# zero-dim and zero-sized arrays
{'shape': (), 'indexes': Ellipsis, 'value': 1},
{'shape': (), 'indexes': (), 'value': 1},
{'shape': (), 'indexes': None, 'value': 1},
{'shape': (), 'indexes': True, 'value': 1},
{'shape': (), 'indexes': (True,), 'value': 1},
# TODO(niboshi): pass the following commented out tests
# {'shape': (), 'indexes': (False, True, True), 'value': 1},
{'shape': (), 'indexes': numpy.ones((), dtype=numpy.bool_), 'value': 1},
{'shape': (), 'indexes': numpy.zeros((), dtype=numpy.bool_), 'value': 1},
{'shape': (0,), 'indexes': None, 'value': 1},
{'shape': (0,), 'indexes': (), 'value': 1},
# TODO(niboshi): pass the following commented out tests
# {'shape': (0,), 'indexes': (False, True, True), 'value': 1},
)
@testing.gpu
class TestArrayAdvancedIndexingSetitemScalarValue(unittest.TestCase):
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_adv_setitem(self, xp, dtype):
a = xp.zeros(self.shape, dtype=dtype)
a[self.indexes] = self.value
return a
@testing.parameterize(
# empty arrays (list indexes)
{'shape': (2, 3, 4), 'indexes': [[]], 'value': 1},
{'shape': (2, 3, 4), 'indexes': [[[]]], 'value': 1},
{'shape': (2, 3, 4), 'indexes': [[[[]]]], 'value': 1},
{'shape': (2, 3, 4, 5), 'indexes': [[[[]]]], 'value': 1},
{'shape': (2, 3, 4, 5), 'indexes': [[[[[]]]]], 'value': 1},
# list indexes
{'shape': (2, 3, 4), 'indexes': [[1]], 'value': 1},
{'shape': (2, 3, 4), 'indexes': [[1, 0]], 'value': 1},
{'shape': (2, 3, 4), 'indexes': [[1], [0]], 'value': 1},
{'shape': (2, 3, 4), 'indexes': [[1, 0], 2], 'value': 1},
{'shape': (2, 3, 4), 'indexes': [[1], slice(1, 2)], 'value': 1},
{'shape': (2, 3, 4), 'indexes': [[[1]], slice(1, 2)], 'value': 1},
)
@testing.gpu
class TestArrayAdvancedIndexingSetitemScalarValueDeprecated(unittest.TestCase):
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_adv_setitem(self, xp, dtype):
a = xp.zeros(self.shape, dtype=dtype)
with testing.assert_warns(FutureWarning):
a[self.indexes] = self.value
return a
@testing.parameterize(
{'shape': (0,), 'indexes': True, 'value': 1},
{'shape': (0,), 'indexes': (True,), 'value': 1},
{'shape': (0,), 'indexes': numpy.ones((), dtype=numpy.bool_), 'value': 1},
{'shape': (0,), 'indexes': numpy.zeros((), dtype=numpy.bool_), 'value': 1},
)
@testing.gpu
class TestArrayAdvancedIndexingSetitemScalarValue2(unittest.TestCase):
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_adv_setitem(self, xp, dtype):
a = xp.zeros(self.shape, dtype=dtype)
a[self.indexes] = self.value
return a
@testing.parameterize(
# zero-dim and zero-sized arrays
{'shape': (), 'indexes': numpy.array([True]), 'value': 1},
{'shape': (), 'indexes': numpy.array([False, True, True]), 'value': 1},
{'shape': (0,), 'indexes': numpy.array([True]), 'value': 1},
{'shape': (0,), 'indexes': numpy.array([False, True, True]), 'value': 1},
)
@testing.gpu
class TestArrayAdvancedIndexingSetitemScalarValueIndexError(unittest.TestCase):
def test_adv_setitem(self):
for xp in (numpy, cupy):
a = xp.zeros(self.shape)
with pytest.raises(IndexError):
a[self.indexes] = self.value
@testing.parameterize(
{'shape': (2, 3, 4), 'indexes': numpy.array(1),
'value': numpy.array([1])},
{'shape': (2, 3, 4), 'indexes': numpy.array(1),
'value': numpy.array([1, 2, 3, 4])},
{'shape': (2, 3, 4), 'indexes': (slice(None), [0, -1]),
'value': numpy.arange(2 * 2 * 4).reshape(2, 2, 4)},
{'shape': (2, 5, 4), 'indexes': (slice(None), [[0, 2], [1, -1]]),
'value': numpy.arange(2 * 2 * 2 * 4).reshape(2, 2, 2, 4)},
# mask
{'shape': (2, 3, 4), 'indexes': numpy.random.choice([False, True], (2, 3)),
'value': numpy.arange(4)},
{'shape': (2, 3, 4),
'indexes': (slice(None), numpy.array([True, False, True])),
'value': numpy.arange(2 * 2 * 4).reshape(2, 2, 4)},
{'shape': (2, 3, 4),
'indexes': (numpy.array([[True, False, False], [False, True, True]]),),
'value': numpy.arange(3 * 4).reshape(3, 4)},
{'shape': (2, 2, 2),
'indexes': (slice(None), numpy.array([[True, False], [False, True]]),),
'value': numpy.arange(2 * 2).reshape(2, 2)},
{'shape': (2, 2, 2),
'indexes': (numpy.array(
[[[True, False], [True, False]], [[True, True], [False, False]]]),),
'value': numpy.arange(4)},
{'shape': (5,),
'indexes': numpy.array([True, False, False, True, True]),
'value': numpy.arange(3)},
# multiple arrays
{'shape': (2, 3, 4), 'indexes': ([1, 0], [2, 1]),
'value': numpy.arange(2 * 4).reshape(2, 4)},
{'shape': (2, 3, 4), 'indexes': ([1, 0], slice(None), [2, 1]),
'value': numpy.arange(2 * 3).reshape(2, 3)},
{'shape': (2, 3, 4), 'indexes': ([1, 0], slice(None), [[2, 0], [3, 1]]),
'value': numpy.arange(2 * 2 * 3).reshape(2, 2, 3)},
{'shape': (2, 3, 4),
'indexes': ([[1, 0], [1, 0]], slice(None), [[2, 0], [3, 1]]),
'value': numpy.arange(2 * 2 * 3).reshape(2, 2, 3)},
{'shape': (2, 3, 4),
'indexes': (1, slice(None), [[2, 0], [3, 1]]),
'value': numpy.arange(2 * 2 * 3).reshape(2, 2, 3)},
# list indexes
{'shape': (2, 3, 4), 'indexes': [1],
'value': numpy.arange(3 * 4).reshape(3, 4)},
)
@testing.gpu
class TestArrayAdvancedIndexingVectorValue(unittest.TestCase):
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_adv_setitem(self, xp, dtype):
a = xp.zeros(self.shape, dtype=dtype)
a[self.indexes] = self.value.astype(a.dtype)
return a
@testing.gpu
class TestArrayAdvancedIndexingSetitemCupyIndices(unittest.TestCase):
shape = (2, 3)
def test_cupy_indices_integer_array_1(self):
a = cupy.zeros(self.shape)
index = cupy.array([0, 1])
original_index = index.copy()
a[:, index] = cupy.array(1.)
testing.assert_array_equal(
a, cupy.array([[1., 1., 0.], [1., 1., 0.]]))
testing.assert_array_equal(index, original_index)
def test_cupy_indices_integer_array_2(self):
a = cupy.zeros(self.shape)
index = cupy.array([3, -5])
original_index = index.copy()
a[:, index] = cupy.array(1.)
testing.assert_array_equal(
a, cupy.array([[1., 1., 0.], [1., 1., 0.]]))
testing.assert_array_equal(index, original_index)
def test_cupy_indices_integer_array_3(self):
a = cupy.zeros(self.shape)
index = cupy.array([3, -5])
original_index = index.copy()
a[[1, 1], index] = cupy.array(1.)
testing.assert_array_equal(
a, cupy.array([[0., 0., 0.], [1., 1., 0.]]))
testing.assert_array_equal(index, original_index)
def test_cupy_indices_boolean_array(self):
a = cupy.zeros(self.shape)
index = cupy.array([True, False])
original_index = index.copy()
a[index] = cupy.array(1.)
testing.assert_array_equal(
a, cupy.array([[1., 1., 1.], [0., 0., 0.]]))
testing.assert_array_almost_equal(original_index, index)
@testing.gpu
class TestArrayAdvancedIndexingSetitemDifferentDtypes(unittest.TestCase):
@testing.for_all_dtypes_combination(names=['src_dtype', 'dst_dtype'],
no_complex=True)
@testing.numpy_cupy_array_equal()
def test_differnt_dtypes(self, xp, src_dtype, dst_dtype):
shape = (2, 3)
a = xp.zeros(shape, dtype=src_dtype)
indexes = xp.array([0, 1])
a[:, indexes] = xp.array(1, dtype=dst_dtype)
return a
@testing.for_all_dtypes_combination(names=['src_dtype', 'dst_dtype'],
no_complex=True)
@testing.numpy_cupy_array_equal()
def test_differnt_dtypes_mask(self, xp, src_dtype, dst_dtype):
shape = (2, 3)
a = xp.zeros(shape, dtype=src_dtype)
indexes = xp.array([True, False])
a[indexes] = xp.array(1, dtype=dst_dtype)
return a
@testing.gpu
class TestArrayAdvancedIndexingSetitemTranspose(unittest.TestCase):
@testing.numpy_cupy_array_equal()
def test_adv_setitem_transp(self, xp):
shape = (2, 3, 4)
a = xp.zeros(shape).transpose(0, 2, 1)
slices = ( | numpy.array([1, 0]) | numpy.array |
"""
Some codes from https://github.com/Newmu/dcgan_code
"""
import math
import os
import errno
import json
import random
import pprint
import scipy.misc
import numpy as np
from time import gmtime, strftime
import tensorflow as tf
pp = pprint.PrettyPrinter()
get_stddev = lambda x, k_h, k_w: 1/math.sqrt(k_w*k_h*x.get_shape()[-1])
index = 0
def get_image(image_path, image_size, is_crop=True, resize_w=64):
global index
out = transform(imread(image_path), image_size, is_crop, resize_w)
return out
def save_images(images, size, image_path):
return imsave(inverse_transform(images), size, image_path)
def imread(path):
img = scipy.misc.imread(path)
if len(img.shape) == 0:
raise ValueError(path + " got loaded as a dimensionless array!")
return img.astype(np.float)
def merge_images(images, size):
return inverse_transform(images)
def merge(images, size):
h, w = images.shape[1], images.shape[2]
img = np.zeros((h * size[0], w * size[1], 3))
for idx, image in enumerate(images):
i = idx % size[1]
j = idx / size[1]
img[j*h:j*h+h, i*w:i*w+w, :] = image
return img
def imsave(images, size, path):
return scipy.misc.imsave(path, merge(images, size))
def center_crop(x, crop_h, crop_w=None, resize_w=64):
h, w = x.shape[:2]
crop_h = min(h, w) # we changed this to override the original DCGAN-TensorFlow behavior
# Just use as much of the image as possible while keeping it square
if crop_w is None:
crop_w = crop_h
j = int(round((h - crop_h)/2.))
i = int(round((w - crop_w)/2.))
return scipy.misc.imresize(x[j:j+crop_h, i:i+crop_w],
[resize_w, resize_w])
def transform(image, npx=64, is_crop=True, resize_w=64):
# npx : # of pixels width/height of image
cropped_image = center_crop(image, npx, resize_w=resize_w)
return | np.array(cropped_image) | numpy.array |
"""
Copyright (c) 2018, <NAME>, <NAME>, <NAME>
https://github.com/spagliarini
Mnemosyne team, Inria, Bordeaux, France
https://team.inria.fr/mnemosyne/fr/
Distributed under the BSD-2-Clause License
PLOT: effect of parameter sigma on distance and convergence time
(Fig. 5-6)
"""
import os
import numpy as np
import matplotlib.pyplot as plt
csfont = {'fontname':'Times New Roman'}
#parameters
sim_number=50
sigma=[0.02, 0.05, 0.1, 0.2, 0.3, 0.5, 0.7] #tuning width
#To have the data all together
#inside directory where all the data about sigma>0.02 are stored
os.chdir('C://Users//Mnemosyne//Documents//Python Scripts//InverseModelBirdsong//results//IMsimple_model//SigmaComparison//DeltaT400by400')
conv=np.zeros((sim_number,np.size(sigma)))
final_dist=np.zeros((sim_number,np.size(sigma)))
for i in range (0, sim_number):
conv[i,1:]=np.load('Convergence time'+str(i)+'simulation.npy')
for j in range (1,np.size(sigma)):
final_dist[i,j]=np.load('Error'+str(i)+'simulation.npy')[j-1,int(conv[i,j])-1]
#add the data from 0.02 simulations
os.chdir('C://Users//Mnemosyne//Documents//Python Scripts//InverseModelBirdsong//results//IMsimple_model//SigmaComparison//FixTime0.02//TwentyMillionStep50')
#convergence time
conv[:,0]=20000000 #if fixed conv time
for i in range (0, sim_number): #over 50 simulations
final_dist[i,0]=np.load('Error'+str(i)+'simulation.npy')[0,-1] #if fixed conv time
#change directory back to the directory where all the data are stored
os.chdir('C://Users//Mnemosyne//Documents//Python Scripts//InverseModelBirdsong//results//IMsimple_model//SigmaComparison//UnifyPlot2')
np.save('FinalDistAll.npy',final_dist)
np.save('ConvTimeAll.npy',conv)
#When data are already all together
os.chdir('C://Users//Mnemosyne//Documents//Python Scripts//InverseModelBirdsong//results//IMsimple_model//SigmaComparison//UnifyPlot2')
conv=np.load('ConvTimeAll.npy')
final_dist=np.load('FinalDistAll.npy')
plt.figure()
for j in range (0,np.size(sigma)):
#Histogram of convergence times
plt.hist(conv[:,j],30,label='sigma=' + str(sigma[j]))
plt.legend(fontsize=8)
plt.xscale('log')
plt.xlabel('Convergence time (in number of time steps)',**csfont, fontsize=8)
plt.savefig('Convergence time.pdf')
mean_final_dist=np.mean(final_dist,axis=0)
median_final_dist=np.median(final_dist,axis=0)
std_final_dist=np.std(final_dist,axis=0)
mean_conv=np.mean(conv,axis=0)
std_conv=np.std(conv,axis=0)
plt.figure(figsize=(10,8))
fig, ax1 = plt.subplots()
ax2 = ax1.twinx() #twinx add the secondary axis
ax1.plot(sigma[1:], mean_conv[1::], color='r', label = 'Convergence time (in number of time steps)')
ax1.fill_between(sigma[1:],mean_conv[1:],mean_conv[1:]-std_conv[1:], color='r', alpha=.25)
ax1.fill_between(sigma[1:],mean_conv[1:],mean_conv[1:]+std_conv[1:], color='r', alpha=.25)
ax1.plot(sigma[0:2],mean_conv[0:2],'r--')
ax1.fill_between(sigma[0:2],mean_conv[0:2],mean_conv[0:2]-std_conv[0:2], color='r', alpha=.25)
ax1.fill_between(sigma[0:2],mean_conv[0:2],mean_conv[0:2]+std_conv[0:2], color='r', alpha=.25)
ax2.plot(sigma, mean_final_dist, color='b', label = 'Final distance from the target')
ax2.fill_between(sigma,mean_final_dist,mean_final_dist-std_final_dist, color='b', alpha=.25)
ax2.fill_between(sigma,mean_final_dist,mean_final_dist+std_final_dist, color='b', alpha=.25)
ax1.spines['top'].set_color('none')
ax2.spines['top'].set_color('none')
ax1.set_yscale('log')
ax1.set_xlim(0,0.72)
ax1.set_xticks(sigma)
ax1.set_xticklabels(sigma, rotation=45)
ax1.set_xlabel('Auditory selectivity $\sigma$', **csfont, fontsize=8)
ax1.set_ylabel('Convergence time', **csfont, fontsize=8)
ax2.set_yscale('log')
ax2.set_ylabel('Distance from the target', **csfont, fontsize=8)
fig.legend(bbox_to_anchor=(.1, .95, 0.85, .1), loc=3,
ncol=2, mode="expand", borderaxespad=0, fontsize=8)
fig.tight_layout() #to avoid the cut of xlabel and right ylabel
plt.savefig('SigmaVsDistance-mean.pdf')
#To compare networks of different dimension
os.chdir('C://Users//Mnemosyne//Documents//Python Scripts//InverseModelBirdsong//results//IMsimple_model//mBYnNeuronModel//ComparisonFix//end1000000')
neurons_num=[1, 2, 3, 4, 5, 6, 7] #variant dimension (motor or auditory)
neurons_fix=3 #fix dimension (motor or auditory)
conv_M=np.zeros((sim_number,np.size(neurons_num))) #fixed motor dim
final_dist_M=np.zeros((sim_number,np.size(neurons_num)))
conv_A=np.zeros((sim_number,np.size(neurons_num))) #fixed auditory dim
final_dist_A=np.zeros((sim_number,np.size(neurons_num)))
mean_conv=np.zeros((np.size(neurons_num),2)) #mean over M and A fixed
mean_final_dist=np.zeros((np.size(neurons_num),2))
std_final_dist=np.zeros((np.size(neurons_num),2)) #std over M and A fixed
std_conv=np.zeros((np.size(neurons_num),2))
for i in range(0,sim_number):
for j in range(1,np.size(neurons_num)):
conv_M[i,j]=np.load('Convergence time' + str(i) +'simulation'+str(neurons_fix)+ ' ' + str(neurons_num[j])+'.npy')
conv_A[i,j]=np.load('Convergence time' + str(i) +'simulation'+str(neurons_num[j])+ ' ' + str(neurons_fix)+'.npy')
final_dist_M[i,j]=np.load('Error'+str(i)+'simulation'+str(neurons_fix)+ ' ' + str(neurons_num[j])+'.npy')[0,int(conv_M[i,j])-1]
final_dist_A[i,j]=np.load('Error'+str(i)+'simulation'+str(neurons_num[j])+ ' ' + str(neurons_fix)+'.npy')[0,int(conv_A[i,j])-1]
mean_conv[:,0]=np.mean(conv_M,axis=0)
mean_conv[:,1]=np.mean(conv_A,axis=0)
std_conv[:,0]= | np.std(conv_M,axis=0) | numpy.std |
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
#--------------------------------------------------------------------------
"""
Convert a Bert large model exported by Keras2Onnx to a tiny model for test purpose.
The input model is generated like the following (need install keras2onnx from source):
import numpy
import keras2onnx
from transformers import (TFBertForQuestionAnswering, BertTokenizer)
tokenizer = BertTokenizer.from_pretrained'bert-large-uncased-whole-word-masking-finetuned-squad', do_lower_case=True, cache_dir=cache_dir)
model = TFBertForQuestionAnswering.from_pretrained(model_name_or_path, cache_dir=cache_dir)
question, text = "What is ONNX Runtime?", "ONNX Runtime is a performance-focused inference engine for ONNX models."
inputs = tokenizer.encode_plus(question, text, add_special_tokens=True, return_tensors='tf')
output_model_path = os.path.join(output_dir, 'keras_{}.onnx'.format(model_name_or_path))
if not os.path.exists(output_model_path):
model.predict(inputs)
onnx_model = keras2onnx.convert_keras(model, model.name)
keras2onnx.save_model(onnx_model, output_model_path)
"""
import onnx
import onnx.utils
import sys
import argparse
import numpy as np
from onnx import ModelProto, TensorProto, numpy_helper
from onnxruntime_tools.transformers.onnx_model import OnnxModel
import os
import onnxruntime
import random
from pathlib import Path
import timeit
DICT_SIZE = 20
SEQ_LEN = 7
""" This class creates a tiny bert model for test purpose. """
class TinyBertOnnxModel(OnnxModel):
def __init__(self, model, verbose):
super(TinyBertOnnxModel, self).__init__(model, verbose)
self.resize_model()
def resize_weight(self, initializer_name, target_shape):
weight = self.get_initializer(initializer_name)
w = numpy_helper.to_array(weight)
target_w = w
if len(target_shape) == 1:
target_w = w[:target_shape[0]]
elif len(target_shape) == 2:
target_w = w[:target_shape[0], :target_shape[1]]
elif len(target_shape) == 3:
target_w = w[:target_shape[0], :target_shape[1], :target_shape[2]]
else:
print("at most 3 dimensions")
tensor = onnx.helper.make_tensor(name=initializer_name + '_resize',
data_type=TensorProto.FLOAT,
dims=target_shape,
vals=target_w.flatten().tolist())
return tensor
def resize_model(self):
graph = self.model.graph
initializers = graph.initializer
# parameters of input base model.
old_parameters = {
"seq_len": 26,
"hidden_size": 1024,
"num_heads": 16,
"size_per_head": 64,
"word_dict_size": [28996, 30522], # list of supported dictionary size.
"max_word_position": 512
}
# parameters of output tiny model.
new_parameters = {
"seq_len": SEQ_LEN,
"hidden_size": 8,
"num_heads": 2,
"size_per_head": 4,
"word_dict_size": DICT_SIZE,
"max_word_position": 10
}
for input in graph.input:
if (input.type.tensor_type.shape.dim[1].dim_value == old_parameters["seq_len"]):
print("input", input.name, input.type.tensor_type.shape)
input.type.tensor_type.shape.dim[1].dim_value = new_parameters["seq_len"]
print("=>", input.type.tensor_type.shape)
reshapes = {}
for initializer in initializers:
tensor = numpy_helper.to_array(initializer)
dtype = np.float32 if initializer.data_type == 1 else np.int32
if len(tensor.shape) == 1 and tensor.shape[0] == 1:
if tensor == old_parameters["num_heads"]:
print("initializer type={}".format(initializer.data_type), initializer.name,
old_parameters["num_heads"], "=>[", new_parameters["num_heads"], "]")
initializer.CopyFrom(
numpy_helper.from_array(np.asarray([new_parameters["num_heads"]], dtype=dtype),
initializer.name))
elif tensor == old_parameters["seq_len"]:
print("initializer type={}".format(initializer.data_type), initializer.name,
old_parameters["seq_len"], "=>[", new_parameters["seq_len"], "]")
initializer.CopyFrom(
numpy_helper.from_array( | np.asarray([new_parameters["seq_len"]], dtype=dtype) | numpy.asarray |
"""Module for reading Sentinel-1 data into a SICD model."""
# SarPy imports
from .sicd import MetaNode
from .utils import chipper
from . import Reader as ReaderSuper # Reader superclass
from . import sicd
from . import tiff
from ...geometry import geocoords as gc
from ...geometry import point_projection as point
# Python standard library imports
import copy
import os
import datetime
import xml.etree.ElementTree as ET
# External dependencies
import numpy as np
# We prefer numpy.polynomial.polynomial over numpy.polyval/polyfit since its coefficient
# ordering is consistent with SICD, and because it supports 2D polynomials.
from numpy.polynomial import polynomial as poly
from scipy.interpolate import griddata
# try to import comb from scipy.special.
# If an old version of scipy is being used then import from scipy.misc
from scipy import __version__ as scipy_version
dot_locs = []
for i, version_char in enumerate(scipy_version):
if version_char == '.':
dot_locs.append(i)
major_version = int(scipy_version[0:dot_locs[0]])
if major_version >= 1:
from scipy.special import comb
else:
from scipy.misc import comb
_classification__ = "UNCLASSIFIED"
__author__ = "<NAME>"
__email__ = "<EMAIL>"
DATE_FMT = '%Y-%m-%dT%H:%M:%S.%f' # The datetime format Sentinel1 always uses
def isa(filename):
# Test to see if file is a manifest.safe file
try:
ns = dict([node for _, node in ET.iterparse(filename, events=['start-ns'])])
# Parse everything else
root_node = ET.parse(filename).getroot()
if ((root_node.find('./metadataSection/metadataObject[@ID="platform"]/' +
'metadataWrap/xmlData/safe:platform/safe:familyName', ns).text ==
'SENTINEL-1') and
(root_node.find('./metadataSection/metadataObject[@ID="generalProductInformation"]/' +
'metadataWrap/xmlData/s1sarl1:standAloneProductInformation/' +
's1sarl1:productType', ns).text ==
'SLC')):
return Reader
except Exception:
pass
class Reader(ReaderSuper):
"""Creates a file reader object for an Sentinel Data."""
def __init__(self, manifest_filename):
# print('Opening Sentinel reader object.')
# Read Sentinel Metadata from XML file first
filesets = manifest_files(manifest_filename)
meta_manifest = meta2sicd_manifest(manifest_filename)
self.sicdmeta = []
self.read_chip = []
for current_fs in filesets:
# There will be a set of files (product, data/tiff, noise, and
# calibration) for each swath and polarization. Within each of
# these file set, there may be multiple bursts, and thus SICDs.
basepathname = os.path.dirname(manifest_filename)
tiff_filename = os.path.join(basepathname, current_fs['data'])
meta_tiff = tiff.read_meta(tiff_filename)
if (('product' in current_fs) and
os.path.isfile(os.path.join(basepathname, current_fs['product']))):
product_filename = os.path.join(basepathname, current_fs['product'])
meta_product = meta2sicd_annot(product_filename)
# Extra calibration files
if (('calibration' in current_fs) and
os.path.isfile(os.path.join(basepathname, current_fs['calibration']))):
cal_filename = os.path.join(basepathname, current_fs['calibration'])
meta2sicd_cal(cal_filename, meta_product, meta_manifest)
# Noise metadata computation
if (('noise' in current_fs) and
os.path.isfile(os.path.join(basepathname, current_fs['noise']))):
noise_filename = os.path.join(basepathname, current_fs['noise'])
meta2sicd_noise(noise_filename, meta_product, meta_manifest)
# Image data
symmetry = (False, False, True) # True for all Sentinel-1 data
if len(meta_product) == 1: # Stripmap, single burst, open entire file
self.read_chip.append(tiff.chipper(tiff_filename, symmetry, meta_tiff))
else: # Multiple bursts within a single data file
base_chipper = tiff.chipper(tiff_filename, symmetry, meta_tiff)
num_lines_burst = int(ET.parse(product_filename).getroot().find(
'./swathTiming/linesPerBurst').text)
for j in range(len(meta_product)):
self.read_chip.append(chipper.subset(
base_chipper, [0, meta_tiff['ImageWidth'][0]],
num_lines_burst*j + np.array([0, num_lines_burst])))
for current_mp in meta_product:
# Populate derived SICD fields now that all data has been read in
sicd.derived_fields(current_mp)
# Handle dual-polarization case. Label channel number
# appropriately for ordering in manifest file.
# Should be the same for all burst in a TIFF
current_mp.ImageFormation.RcvChanProc.ChanIndex = 1 + \
[cp.TxRcvPolarization for cp in
meta_manifest.RadarCollection.RcvChannels.ChanParameters].index(
current_mp.ImageFormation.TxRcvPolarizationProc)
current_mp.merge(meta_manifest)
self.sicdmeta.append(current_mp)
# meta should already be set to this from meta_product:
# self.sicdmeta[-1].ImageData.NumRows = meta_tiff['ImageWidth'][0]
self.sicdmeta[-1].native = MetaNode()
self.sicdmeta[-1].native.tiff = meta_tiff
else: # No annotation metadata could be found
self.sicdmeta.append(meta_manifest)
self.sicdmeta[-1].ImageData = MetaNode()
self.sicdmeta[-1].ImageData.NumCols = meta_tiff['ImageLength'][0]
self.sicdmeta[-1].ImageData.NumRows = meta_tiff['ImageWidth'][0]
self.sicdmeta[-1].native = MetaNode()
self.sicdmeta[-1].native.tiff = meta_tiff
def manifest_files(filename):
"""Extract relevant filenames and relative paths for measurement and metadata files
from a Sentinel manifest.safe file and group them together appropriately."""
def _get_file_location(root_node, schema_type, possible_ids):
"""We want the data object that matches both the desired schema type and
the possible ids from the relevant measurment data unit."""
return [dataobject.find('./byteStream/fileLocation').attrib['href'] # File location
for dataobject in [
root_node.find('dataObjectSection/' +
'dataObject[@repID="' + schema_type + '"]/' +
'[@ID="' + ids + '"]', ns)
for ids in possible_ids] # Attempt to find objects for all ids
if dataobject is not None][0] # ids not found will be None and discarded
# Parse namespaces
ns = dict([node for _, node in ET.iterparse(filename, events=['start-ns'])])
# Parse everything else
root_node = ET.parse(filename).getroot()
files = []
# Iterate through all of the "Measurement Data Units". This should provide each
# data object (measurements), together with its metadata and noise and
# calibration files.
for mdu in root_node.iterfind('./informationPackageMap/xfdu:contentUnit/' +
'xfdu:contentUnit/[@repID="s1Level1MeasurementSchema"]', ns):
# The dmdID references for each measurement data unit are indirect.
# They are equivalently pointers to pointers. Not clear why it was
# done this way, but here we get the real IDs for all files associated
# with this data unit.
associated_ids = [root_node.find('./metadataSection/metadataObject[@ID="' +
dmd + '"]/dataObjectPointer').attrib['dataObjectID']
for dmd in mdu.attrib['dmdID'].split()]
fnames = dict()
# Find data ("measurement") file itself
fnames['data'] = _get_file_location(
root_node, 's1Level1MeasurementSchema',
[mdu.find('./dataObjectPointer').attrib['dataObjectID']])
# Find all metadata files
fnames['product'] = _get_file_location(
root_node, 's1Level1ProductSchema', associated_ids)
fnames['noise'] = _get_file_location(
root_node, 's1Level1NoiseSchema', associated_ids)
fnames['calibration'] = _get_file_location(
root_node, 's1Level1CalibrationSchema', associated_ids)
files.append(fnames)
return files
def meta2sicd_manifest(filename):
# Parse namespaces
ns = dict([node for _, node in ET.iterparse(filename, events=['start-ns'])])
# Parse everything else
root_node = ET.parse(filename).getroot()
manifest = MetaNode()
# CollectionInfo
platform = root_node.find('./metadataSection/' +
'metadataObject[@ID="platform"]/' +
'metadataWrap/' +
'xmlData/' +
'safe:platform', ns)
manifest.CollectionInfo = MetaNode()
manifest.CollectionInfo.CollectorName = (platform.find('./safe:familyName', ns).text +
platform.find('./safe:number', ns).text)
manifest.CollectionInfo.RadarMode = MetaNode()
manifest.CollectionInfo.RadarMode.ModeID = platform.find(
'./safe:instrument/safe:extension/s1sarl1:instrumentMode/s1sarl1:mode', ns).text
if manifest.CollectionInfo.RadarMode.ModeID == 'SM':
manifest.CollectionInfo.RadarMode.ModeType = 'STRIPMAP'
else:
# Actually TOPSAR. Not what we normally think of for DYNAMIC STRIPMAP,
# but it is definitely not SPOTLIGHT, and doesn't seem to be regular
# STRIPMAP either.
manifest.CollectionInfo.RadarMode.ModeType = 'DYNAMIC STRIPMAP'
# Image Creation
processing = root_node.find('./metadataSection/' +
'metadataObject[@ID="processing"]/' +
'metadataWrap/' +
'xmlData/' +
'safe:processing', ns)
facility = processing.find('safe:facility', ns)
software = facility.find('./safe:software', ns)
manifest.ImageCreation = MetaNode()
manifest.ImageCreation.Application = software.attrib['name'] + ' ' + software.attrib['version']
manifest.ImageCreation.DateTime = datetime.datetime.strptime(
processing.attrib['stop'], DATE_FMT)
manifest.ImageCreation.Site = (facility.attrib['name'] + ', ' +
facility.attrib['site'] + ', ' +
facility.attrib['country'])
manifest.ImageCreation.Profile = 'Prototype'
# RadarCollection
manifest.RadarCollection = MetaNode()
manifest.RadarCollection.RcvChannels = MetaNode()
manifest.RadarCollection.RcvChannels.ChanParameters = []
for current_pol in root_node.findall('./metadataSection/' +
'metadataObject[@ID="generalProductInformation"]/' +
'metadataWrap/' +
'xmlData/' +
's1sarl1:standAloneProductInformation/' +
's1sarl1:transmitterReceiverPolarisation', ns):
manifest.RadarCollection.RcvChannels.ChanParameters.append(MetaNode())
manifest.RadarCollection.RcvChannels.ChanParameters[-1].TxRcvPolarization = \
current_pol.text[0] + ':' + current_pol.text[1]
return(manifest)
def meta2sicd_annot(filename):
def _polyshift(a, shift):
b = np.zeros(a.size)
for j in range(1, len(a)+1):
for k in range(j, len(a)+1):
b[j-1] = b[j-1] + (a[k-1]*comb(k-1, j-1)*np.power(shift, (k-j)))
return b
# Setup constants
C = 299792458.
# Parse annotation XML (no namespace to worry about)
root_node = ET.parse(filename).getroot()
common_meta = MetaNode()
# CollectionInfo
common_meta.CollectionInfo = MetaNode()
common_meta.CollectionInfo.CollectorName = root_node.find('./adsHeader/missionId').text
common_meta.CollectionInfo.CollectType = 'MONOSTATIC'
common_meta.CollectionInfo.RadarMode = MetaNode()
common_meta.CollectionInfo.RadarMode.ModeID = root_node.find('./adsHeader/mode').text
if common_meta.CollectionInfo.RadarMode.ModeID[0] == 'S':
common_meta.CollectionInfo.RadarMode.ModeType = 'STRIPMAP'
else:
# Actually TOPSAR. Not what we normally think of for DYNAMIC STRIPMAP,
# but it is definitely not SPOTLIGHT (actually counter to the spotlight
# beam motion), and it isn't STRIPMAP with a constant angle between the
# beam and direction of travel either, so we use DYNAMIC STRIPMAP as a
# catchall.
common_meta.CollectionInfo.RadarMode.ModeType = 'DYNAMIC STRIPMAP'
common_meta.CollectionInfo.Classification = 'UNCLASSIFIED'
# ImageData
common_meta.ImageData = MetaNode()
# For SLC, the following test should always hold true:
if root_node.find('./imageAnnotation/imageInformation/pixelValue').text == 'Complex':
common_meta.ImageData.PixelType = 'RE16I_IM16I'
else: # This code only handles SLC
raise(ValueError('SLC data should be 16-bit complex.'))
burst_list = root_node.findall('./swathTiming/burstList/burst')
if burst_list:
numbursts = len(burst_list)
else:
numbursts = 0
# These two definitions of NumRows should always be the same for
# non-STRIPMAP data (For STRIPMAP, samplesPerBurst is set to zero.) Number
# of rows in burst should be the same as the full image. Both of these
# numbers also should match the ImageWidth field of the measurement TIFF.
# The NumCols definition will be different for TOPSAR/STRIPMAP. Since each
# burst is its own coherent data period, and thus SICD, we set the SICD
# metadata to describe each individual burst.
if numbursts > 0: # TOPSAR
common_meta.ImageData.NumRows = int(root_node.find('./swathTiming/samplesPerBurst').text)
# Ths in the number of columns in a single burst.
common_meta.ImageData.NumCols = int(root_node.find('./swathTiming/linesPerBurst').text)
else: # STRIPMAP
common_meta.ImageData.NumRows = int(root_node.find(
'./imageAnnotation/imageInformation/numberOfSamples').text)
# This in the number of columns in the full TIFF measurement file, even
# if it contains multiple bursts.
common_meta.ImageData.NumCols = int(root_node.find(
'./imageAnnotation/imageInformation/numberOfLines').text)
common_meta.ImageData.FirstRow = 0
common_meta.ImageData.FirstCol = 0
common_meta.ImageData.FullImage = MetaNode()
common_meta.ImageData.FullImage.NumRows = common_meta.ImageData.NumRows
common_meta.ImageData.FullImage.NumCols = common_meta.ImageData.NumCols
# SCP pixel within entire TIFF
# Note: numpy round behaves differently than python round and MATLAB round,
# so we avoid it here.
center_cols = np.ceil((0.5 + np.arange(max(numbursts, 1))) *
float(common_meta.ImageData.NumCols))-1
center_rows = round(float(common_meta.ImageData.NumRows)/2.-1.) * \
np.ones_like(center_cols)
# SCP pixel within single burst image is the same for all burst, since east
# burst is the same size
common_meta.ImageData.SCPPixel = MetaNode()
common_meta.ImageData.SCPPixel.Col = int(center_cols[0])
common_meta.ImageData.SCPPixel.Row = int(center_rows[0])
# GeoData
common_meta.GeoData = MetaNode()
common_meta.GeoData.EarthModel = 'WGS_84'
# Initially, we just seed the SCP coordinate with a rough value. Later
# we will put in something more precise.
geo_grid_point_list = root_node.findall(
'./geolocationGrid/geolocationGridPointList/geolocationGridPoint')
scp_col, scp_row, x, y, z = [], [], [], [], []
for grid_point in geo_grid_point_list:
scp_col.append(float(grid_point.find('./line').text))
scp_row.append(float(grid_point.find('./pixel').text))
lat = float(grid_point.find('./latitude').text)
lon = float(grid_point.find('./longitude').text)
hgt = float(grid_point.find('./height').text)
# Can't interpolate across international date line -180/180 longitude,
# so move to ECF space from griddata interpolation
ecf = gc.geodetic_to_ecf((lat, lon, hgt))
x.append(ecf[0, 0])
y.append(ecf[0, 1])
z.append(ecf[0, 2])
row_col = np.vstack((scp_col, scp_row)).transpose()
center_row_col = np.vstack((center_cols, center_rows)).transpose()
scp_x = griddata(row_col, x, center_row_col)
scp_y = griddata(row_col, y, center_row_col)
scp_z = griddata(row_col, z, center_row_col)
# Grid
common_meta.Grid = MetaNode()
if root_node.find('./generalAnnotation/productInformation/projection').text == 'Slant Range':
common_meta.Grid.ImagePlane = 'SLANT'
common_meta.Grid.Type = 'RGZERO'
delta_tau_s = 1./float(root_node.find(
'./generalAnnotation/productInformation/rangeSamplingRate').text)
common_meta.Grid.Row = MetaNode()
common_meta.Grid.Col = MetaNode()
# Range Processing
range_proc = root_node.find('./imageAnnotation/processingInformation' +
'/swathProcParamsList/swathProcParams/rangeProcessing')
common_meta.Grid.Row.SS = (C/2.) * delta_tau_s
common_meta.Grid.Row.Sgn = -1
# Justification for Sgn:
# 1) "Sentinel-1 Level 1 Detailed Algorithm Definition" shows last step in
# image formation as IFFT, which would mean a forward FFT (-1 Sgn) would be
# required to transform back.
# 2) The forward FFT of a sliding window shows the Doppler centroid
# increasing as you move right in the image, which must be the case for the
# TOPSAR collection mode which starts in a rear squint and transitions to a
# forward squint (and are always right looking).
fc = float(root_node.find('./generalAnnotation/productInformation/radarFrequency').text)
common_meta.Grid.Row.KCtr = 2.*fc / C
common_meta.Grid.Row.DeltaKCOAPoly = np.atleast_2d(0)
common_meta.Grid.Row.ImpRespBW = 2.*float(range_proc.find('./processingBandwidth').text)/C
common_meta.Grid.Row.WgtType = MetaNode()
common_meta.Grid.Row.WgtType.WindowName = range_proc.find('./windowType').text.upper()
if (common_meta.Grid.Row.WgtType.WindowName == 'NONE'):
common_meta.Grid.Row.WgtType.WindowName = 'UNIFORM'
elif (common_meta.Grid.Row.WgtType.WindowName == 'HAMMING'):
# The usual Sentinel weighting
common_meta.Grid.Row.WgtType.Parameter = MetaNode()
# Generalized Hamming window parameter
common_meta.Grid.Row.WgtType.Parameter.name = 'COEFFICIENT'
common_meta.Grid.Row.WgtType.Parameter.value = range_proc.find('./windowCoefficient').text
# Azimuth Processing
az_proc = root_node.find('./imageAnnotation/processingInformation' +
'/swathProcParamsList/swathProcParams/azimuthProcessing')
common_meta.Grid.Col.SS = float(root_node.find(
'./imageAnnotation/imageInformation/azimuthPixelSpacing').text)
common_meta.Grid.Col.Sgn = -1 # Must be the same as Row.Sgn
common_meta.Grid.Col.KCtr = 0
dop_bw = float(az_proc.find('./processingBandwidth').text) # Doppler bandwidth
# Image column spacing in zero doppler time (seconds)
# Sentinel-1 is always right-looking, so should always be positive
ss_zd_s = float(root_node.find('./imageAnnotation/imageInformation/azimuthTimeInterval').text)
# Convert to azimuth spatial bandwidth (cycles per meter)
common_meta.Grid.Col.ImpRespBW = dop_bw*ss_zd_s/common_meta.Grid.Col.SS
common_meta.Grid.Col.WgtType = MetaNode()
common_meta.Grid.Col.WgtType.WindowName = az_proc.find('./windowType').text.upper()
if (common_meta.Grid.Col.WgtType.WindowName == 'NONE'):
common_meta.Grid.Col.WgtType.WindowName = 'UNIFORM'
elif (common_meta.Grid.Row.WgtType.WindowName == 'HAMMING'):
# The usual Sentinel weighting
common_meta.Grid.Col.WgtType.Parameter = MetaNode()
# Generalized Hamming window parameter
common_meta.Grid.Col.WgtType.Parameter.name = 'COEFFICIENT'
common_meta.Grid.Col.WgtType.Parameter.value = az_proc.find('./windowCoefficient').text
# We will compute Grid.Col.DeltaKCOAPoly separately per burst later.
# Grid.Row/Col.DeltaK1/2, WgtFunct, ImpRespWid will be computed later in sicd.derived_fields
# Timeline
prf = float(root_node.find(
'./generalAnnotation/downlinkInformationList/downlinkInformation/prf').text)
common_meta.Timeline = MetaNode()
common_meta.Timeline.IPP = MetaNode()
common_meta.Timeline.IPP.Set = MetaNode()
# Because of calibration pulses, it is unlikely this PRF was maintained
# through this entire period, but we don't currently include that detail.
common_meta.Timeline.IPP.Set.IPPPoly = np.array([0, prf])
# Always the left-most SICD column (of first bursts or entire STRIPMAP dataset),
# since Sentinel-1 is always right-looking.
azimuth_time_first_line = datetime.datetime.strptime(root_node.find(
'./imageAnnotation/imageInformation/productFirstLineUtcTime').text, DATE_FMT)
# Offset in zero Doppler time from first column to SCP column
eta_mid = ss_zd_s * float(common_meta.ImageData.SCPPixel.Col)
# Position
orbit_list = root_node.findall('./generalAnnotation/orbitList/orbit')
# For ARP vector calculation later on
state_vector_T, state_vector_X, state_vector_Y, state_vector_Z = [], [], [], []
for orbit in orbit_list:
state_vector_T.append(datetime.datetime.strptime(orbit.find('./time').text, DATE_FMT))
state_vector_X.append(float(orbit.find('./position/x').text))
state_vector_Y.append(float(orbit.find('./position/y').text))
state_vector_Z.append(float(orbit.find('./position/z').text))
# We could also have used external orbit file here, instead of orbit state fields
# in SLC annotation file.
# RadarCollection
pol = root_node.find('./adsHeader/polarisation').text
common_meta.RadarCollection = MetaNode()
common_meta.RadarCollection.TxPolarization = pol[0]
common_meta.RadarCollection.TxFrequency = MetaNode()
common_meta.RadarCollection.Waveform = MetaNode()
common_meta.RadarCollection.TxFrequency.Min = fc + float(root_node.find(
'./generalAnnotation/downlinkInformationList/downlinkInformation/' +
'downlinkValues/txPulseStartFrequency').text)
wfp_common = MetaNode()
wfp_common.TxFreqStart = common_meta.RadarCollection.TxFrequency.Min
wfp_common.TxPulseLength = float(root_node.find(
'./generalAnnotation/downlinkInformationList/downlinkInformation/' +
'downlinkValues/txPulseLength').text)
wfp_common.TxFMRate = float(root_node.find(
'./generalAnnotation/downlinkInformationList/downlinkInformation/' +
'downlinkValues/txPulseRampRate').text)
bw = wfp_common.TxPulseLength * wfp_common.TxFMRate
common_meta.RadarCollection.TxFrequency.Max = \
common_meta.RadarCollection.TxFrequency.Min + bw
wfp_common.TxRFBandwidth = bw
wfp_common.RcvDemodType = 'CHIRP'
# RcvFMRate = 0 for RcvDemodType='CHIRP'
wfp_common.RcvFMRate = 0
wfp_common.ADCSampleRate = float(root_node.find(
'./generalAnnotation/productInformation/rangeSamplingRate').text) # Raw not decimated
# After decimation would be:
# wfp_common.ADCSampleRate = \
# product/generalAnnotation/downlinkInformationList/downlinkInformation/downlinkValues/rangeDecimation/samplingFrequencyAfterDecimation
# We could have multiple receive window lengths across the collect
swl_list = root_node.findall('./generalAnnotation/downlinkInformationList/' +
'downlinkInformation/downlinkValues/swlList/swl')
common_meta.RadarCollection.Waveform.WFParameters = []
for swl in swl_list:
common_meta.RadarCollection.Waveform.WFParameters.append(copy.deepcopy(wfp_common))
common_meta.RadarCollection.Waveform.WFParameters[-1].RcvWindowLength = \
float(swl.find('./value').text)
# ImageFormation
common_meta.ImageFormation = MetaNode()
common_meta.ImageFormation.RcvChanProc = MetaNode()
common_meta.ImageFormation.RcvChanProc.NumChanProc = 1
common_meta.ImageFormation.RcvChanProc.PRFScaleFactor = 1
# RcvChanProc.ChanIndex must be populated external to this since it depends
# on how the polarization were ordered in manifest file.
common_meta.ImageFormation.TxRcvPolarizationProc = pol[0] + ':' + pol[1]
# Assume image formation uses all data
common_meta.ImageFormation.TStartProc = 0
common_meta.ImageFormation.TxFrequencyProc = MetaNode()
common_meta.ImageFormation.TxFrequencyProc.MinProc = \
common_meta.RadarCollection.TxFrequency.Min
common_meta.ImageFormation.TxFrequencyProc.MaxProc = \
common_meta.RadarCollection.TxFrequency.Max
common_meta.ImageFormation.ImageFormAlgo = 'RMA'
# From the Sentinel-1 Level 1 Detailed Algorithm Definition document
if common_meta.CollectionInfo.RadarMode.ModeID[0] == 'S': # stripmap mode
common_meta.ImageFormation.STBeamComp = 'NO'
else:
common_meta.ImageFormation.STBeamComp = 'SV' # TOPSAR Mode
common_meta.ImageFormation.ImageBeamComp = 'NO'
common_meta.ImageFormation.AzAutofocus = 'NO'
common_meta.ImageFormation.RgAutofocus = 'NO'
# RMA
# "Sentinel-1 Level 1 Detailed Algorithm Definition" document seems to most
# closely match the RangeDoppler algorithm (with accurate secondary range
# compression or "option 2" as described in the Cumming and Wong book).
common_meta.RMA = MetaNode()
common_meta.RMA.RMAlgoType = 'RG_DOP'
common_meta.RMA.ImageType = 'INCA'
# tau_0 is notation from ESA deramping paper
tau_0 = float(root_node.find('./imageAnnotation/imageInformation/slantRangeTime').text)
common_meta.RMA.INCA = MetaNode()
common_meta.RMA.INCA.R_CA_SCP = ((C/2.) *
(tau_0 +
(float(common_meta.ImageData.SCPPixel.Row) *
delta_tau_s)))
common_meta.RMA.INCA.FreqZero = fc
# If we use the Doppler Centroid as defined directly in the manifest.safe
# metadata, then the center of frequency support Col.DeltaKCOAPoly does not
# correspond to RMA.INCA.DopCentroidPoly. However, we will compute
# TimeCOAPoly later to match a newly computed Doppler Centroid based off of
# DeltaKCOAPoly, assuming that the the COA is at the peak signal (fdop_COA
# = fdop_DC).
common_meta.RMA.INCA.DopCentroidCOA = True
# Doppler Centroid
# Get common (non-burst specific) parameters we will need for Doppler
# centroid and rate computations later
dc_estimate_list = root_node.findall('./dopplerCentroid/dcEstimateList/dcEstimate')
dc_az_time, dc_t0, data_dc_poly = [], [], []
for dc_estimate in dc_estimate_list:
dc_az_time.append(datetime.datetime.strptime(
dc_estimate.find('./azimuthTime').text, DATE_FMT))
dc_t0.append(float(dc_estimate.find('./t0').text))
data_dc_poly.append(np.fromstring(dc_estimate.find('./dataDcPolynomial').text, sep=' '))
azimuth_fm_rate_list = root_node.findall(
'./generalAnnotation/azimuthFmRateList/azimuthFmRate')
az_t, az_t0, k_a_poly = [], [], []
for az_fm_rate in azimuth_fm_rate_list:
az_t.append(datetime.datetime.strptime(
az_fm_rate.find('./azimuthTime').text, DATE_FMT))
az_t0.append(float(az_fm_rate.find('./t0').text))
# Two different ways we have seen in XML for storing the FM Rate polynomial
try:
k_a_poly.append(np.fromstring(az_fm_rate.find(
'./azimuthFmRatePolynomial').text, sep=' '))
except TypeError: # old annotation xml file format
k_a_poly.append(np.array([float(az_fm_rate.find('./c0').text),
float(az_fm_rate.find('./c1').text),
float(az_fm_rate.find('./c2').text)]))
# Azimuth steering rate (constant, not dependent on burst or range)
k_psi = float(root_node.find(
'./generalAnnotation/productInformation/azimuthSteeringRate').text)
k_psi = k_psi*np.pi/180. # Convert from degrees/sec into radians/sec
# Compute per/burst metadata
sicd_meta = []
for count in range(max(numbursts, 1)):
burst_meta = copy.deepcopy(common_meta)
# Collection Info
# Sensor specific portions of metadata
sliceNumber = root_node.find('./imageAnnotation/imageInformation/sliceNumber')
if sliceNumber is not None:
sliceNumber = sliceNumber.text
else:
sliceNumber = 0
swath = root_node.find('./adsHeader/swath').text
burst_meta.CollectionInfo.Parameter = [MetaNode()]
burst_meta.CollectionInfo.Parameter[0].name = 'SLICE'
burst_meta.CollectionInfo.Parameter[0].value = sliceNumber
burst_meta.CollectionInfo.Parameter.append(MetaNode())
burst_meta.CollectionInfo.Parameter[1].name = 'SWATH'
burst_meta.CollectionInfo.Parameter[1].value = swath
burst_meta.CollectionInfo.Parameter.append(MetaNode())
burst_meta.CollectionInfo.Parameter[2].name = 'BURST'
burst_meta.CollectionInfo.Parameter[2].value = str(count+1)
burst_meta.CollectionInfo.Parameter.append(MetaNode())
burst_meta.CollectionInfo.Parameter[3].name = 'ORBIT_SOURCE'
burst_meta.CollectionInfo.Parameter[3].value = 'SLC_INTERNAL' # No external orbit file
# Image Data.ValidData
# Get valid bounds of burst from metadata. Assume a rectangular valid
# area-- not totally true, but all that seems to be defined by the
# product XML metadata.
if numbursts > 0: # Valid data does not seem to be defined for STRIPMAP data
burst = root_node.find('./swathTiming/burstList/burst[' + str(count+1) + ']')
xml_first_cols = np.fromstring(burst.find('./firstValidSample').text, sep=' ')
xml_last_cols = np.fromstring(burst.find('./lastValidSample').text, sep=' ')
valid_cols = np.where((xml_first_cols >= 0) & (xml_last_cols >= 0))[0]
first_row = int(min(xml_first_cols[valid_cols]))
last_row = int(max(xml_last_cols[valid_cols]))
# From SICD spec: Vertices ordered clockwise with vertex 1
# determined by: (1) minimum row index, (2) minimum column index if
# 2 vertices exist with minimum row index.
burst_meta.ImageData.ValidData = MetaNode()
burst_meta.ImageData.ValidData.Vertex = [MetaNode()]
burst_meta.ImageData.ValidData.Vertex[0].Row = first_row
burst_meta.ImageData.ValidData.Vertex[0].Col = int(valid_cols[0])
burst_meta.ImageData.ValidData.Vertex.append(MetaNode())
burst_meta.ImageData.ValidData.Vertex[1].Row = first_row
burst_meta.ImageData.ValidData.Vertex[1].Col = int(valid_cols[-1])
burst_meta.ImageData.ValidData.Vertex.append(MetaNode())
burst_meta.ImageData.ValidData.Vertex[2].Row = last_row
burst_meta.ImageData.ValidData.Vertex[2].Col = int(valid_cols[-1])
burst_meta.ImageData.ValidData.Vertex.append(MetaNode())
burst_meta.ImageData.ValidData.Vertex[3].Row = last_row
burst_meta.ImageData.ValidData.Vertex[3].Col = int(valid_cols[0])
else:
burst_meta.ImageData.ValidData = MetaNode()
burst_meta.ImageData.ValidData.Vertex = [MetaNode()]
burst_meta.ImageData.ValidData.Vertex[0].Row = 0
burst_meta.ImageData.ValidData.Vertex[0].Col = 0
burst_meta.ImageData.ValidData.Vertex.append(MetaNode())
burst_meta.ImageData.ValidData.Vertex[1].Row = 0
burst_meta.ImageData.ValidData.Vertex[1].Col = int(common_meta.ImageData.NumCols)
burst_meta.ImageData.ValidData.Vertex.append(MetaNode())
burst_meta.ImageData.ValidData.Vertex[2].Row = int(common_meta.ImageData.NumRows)
burst_meta.ImageData.ValidData.Vertex[2].Col = int(common_meta.ImageData.NumCols)
burst_meta.ImageData.ValidData.Vertex.append(MetaNode())
burst_meta.ImageData.ValidData.Vertex[3].Row = int(common_meta.ImageData.NumRows)
burst_meta.ImageData.ValidData.Vertex[3].Col = 0
# Timeline
if numbursts > 0:
# This is the first and last zero doppler times of the columns in
# the burst. This isn't really what we mean by CollectStart and
# CollectDuration in SICD (really we want first and last pulse
# times), but its all we have.
start = datetime.datetime.strptime(burst.find('./azimuthTime').text, DATE_FMT)
first_line_relative_start = 0 # CollectStart is zero Doppler time of first column
else:
start = datetime.datetime.strptime(root_node.find(
'./generalAnnotation/downlinkInformationList/downlinkInformation/' +
'firstLineSensingTime').text, DATE_FMT)
stop = datetime.datetime.strptime(root_node.find(
'./generalAnnotation/downlinkInformationList/downlinkInformation/' +
'lastLineSensingTime').text, DATE_FMT)
# Maybe CollectStart/CollectDuration should be set by
# product/imageAnnotation/imageInformation/productFirstLineUtcTime
# and productLastLineUtcTime. This would make it consistent with
# non-stripmap which just defines first and last zero doppler
# times, but is not really consistent with what SICD generally
# means by CollectStart/CollectDuration.
burst_meta.Timeline.CollectStart = start
burst_meta.Timeline.CollectDuration = (stop-start).total_seconds()
first_line_relative_start = (azimuth_time_first_line-start).total_seconds()
# After we have start_s, we can generate CoreName
burst_meta.CollectionInfo.CoreName = (
# Prefix with the NGA CoreName standard format
start.strftime('%d%b%y') + common_meta.CollectionInfo.CollectorName +
# The following core name is unique within all Sentinel-1 coherent data periods:
root_node.find('./adsHeader/missionDataTakeId').text + '_' +
('%02d' % int(sliceNumber)) + '_' + swath + '_' + ('%02d' % (count+1)))
# Position
# Polynomial is computed with respect to time from start of burst
state_vector_T_burst = np.array([(t-start).total_seconds() for t in state_vector_T])
# Some datasets don't include enough state vectors for 5th order fit
# One could find the order of polynomial that most accurately describes
# this position, but use velocity as cross-validation so that the data
# is not being overfit. Orders over 5 often become badly conditioned
polyorder = 5
burst_meta.Position = MetaNode()
burst_meta.Position.ARPPoly = MetaNode()
burst_meta.Position.ARPPoly.X = poly.polyfit(state_vector_T_burst,
state_vector_X, polyorder)
burst_meta.Position.ARPPoly.Y = poly.polyfit(state_vector_T_burst,
state_vector_Y, polyorder)
burst_meta.Position.ARPPoly.Z = poly.polyfit(state_vector_T_burst,
state_vector_Z, polyorder)
# RMA (still in for statement for each burst)
# Sentinel-1 is always right-looking, so TimeCAPoly should never have
# to be "flipped" for left-looking cases.
burst_meta.RMA.INCA.TimeCAPoly = np.array([first_line_relative_start + eta_mid,
ss_zd_s/float(common_meta.Grid.Col.SS)])
# Doppler Centroid
# We choose the single Doppler centroid polynomial closest to the
# center of the current burst.
dc_est_times = np.array([(t - start).total_seconds() for t in dc_az_time])
dc_poly_ind = np.argmin(abs(dc_est_times - burst_meta.RMA.INCA.TimeCAPoly[0]))
# Shift polynomial from origin at dc_t0 (reference time for Sentinel
# polynomial) to SCP time (reference time for SICD polynomial)
range_time_scp = (common_meta.RMA.INCA.R_CA_SCP * 2)/C
# The Doppler centroid field in the Sentinel-1 metadata is not
# complete, so we cannot use it directly. That description of Doppler
# centroid by itself does not vary by azimuth although the
# Col.DeltaKCOAPoly we see in the data definitely does. We will define
# DopCentroidPoly differently later down in the code.
# Doppler rate
# Total Doppler rate is a combination of the Doppler FM rate and the
# Doppler rate introduced by the scanning of the antenna.
# We pick a single velocity magnitude at closest approach to represent
# the entire burst. This is valid, since the magnitude of the velocity
# changes very little.
vm_ca = np.linalg.norm([ # Magnitude of the velocity at SCP closest approach
poly.polyval(burst_meta.RMA.INCA.TimeCAPoly[0], # Velocity in X
poly.polyder(burst_meta.Position.ARPPoly.X)),
poly.polyval(burst_meta.RMA.INCA.TimeCAPoly[0], # Velocity in Y
poly.polyder(burst_meta.Position.ARPPoly.Y)),
poly.polyval(burst_meta.RMA.INCA.TimeCAPoly[0], # Velocity in Z
poly.polyder(burst_meta.Position.ARPPoly.Z))])
# Compute FM Doppler Rate, k_a
# We choose the single azimuth FM rate polynomial closest to the
# center of the current burst.
az_rate_times = np.array([(t - start).total_seconds() for t in az_t])
az_rate_poly_ind = np.argmin(abs(az_rate_times - burst_meta.RMA.INCA.TimeCAPoly[0]))
# SICD's Doppler rate seems to be FM Doppler rate, not total Doppler rate
# Shift polynomial from origin at az_t0 (reference time for Sentinel
# polynomial) to SCP time (reference time for SICD polynomial)
DR_CA = _polyshift(k_a_poly[az_rate_poly_ind],
range_time_scp - az_t0[az_rate_poly_ind])
# Scale 1D polynomial to from Hz/s^n to Hz/m^n
DR_CA = DR_CA * ((2./C)**np.arange(len(DR_CA)))
r_ca = np.array([common_meta.RMA.INCA.R_CA_SCP, 1])
# RMA.INCA.DRateSFPoly is a function of Doppler rate.
burst_meta.RMA.INCA.DRateSFPoly = (- np.convolve(DR_CA, r_ca) * # Assumes a SGN of -1
(C / (2 * fc * np.power(vm_ca, 2))))
burst_meta.RMA.INCA.DRateSFPoly = burst_meta.RMA.INCA.DRateSFPoly[:, np.newaxis]
# TimeCOAPoly
# TimeCOAPoly = TimeCA + (DopCentroid/dop_rate); # True if DopCentroidCOA = true
# Since we don't know how to evaluate this equation analytically, we
# could evaluate samples of it across our image and fit a 2D polynomial
# to it later.
POLY_ORDER = 2
grid_samples = POLY_ORDER + 1
cols = np.around(np.linspace(0, common_meta.ImageData.NumCols-1,
num=grid_samples)).astype(int)
rows = np.around(np.linspace(0, common_meta.ImageData.NumRows-1,
num=grid_samples)).astype(int)
coords_az_m = (cols - common_meta.ImageData.SCPPixel.Col).astype(float) *\
common_meta.Grid.Col.SS
coords_rg_m = (rows - common_meta.ImageData.SCPPixel.Row).astype(float) *\
common_meta.Grid.Row.SS
timeca_sampled = poly.polyval(coords_az_m, burst_meta.RMA.INCA.TimeCAPoly)
doprate_sampled = poly.polyval(coords_rg_m, DR_CA)
# Grid.Col.DeltaKCOAPoly
# Reference: Definition of the TOPS SLC deramping function for products
# generated by the S-1 IPF, COPE-GSEG-EOPG-TN-14-0025
tau = tau_0 + delta_tau_s * np.arange(0, int(common_meta.ImageData.NumRows))
# The vm_ca used here is slightly different than the ESA deramp
# document, since the document interpolates the velocity values given
# rather than the position values, which is what we do here.
k_s = (2. * (vm_ca / C)) * fc * k_psi
k_a = poly.polyval(tau - az_t0[az_rate_poly_ind], k_a_poly[az_rate_poly_ind])
k_t = (k_a * k_s)/(k_a - k_s)
f_eta_c = poly.polyval(tau - dc_t0[dc_poly_ind], data_dc_poly[dc_poly_ind])
eta = ((-float(common_meta.ImageData.SCPPixel.Col) * ss_zd_s) +
(np.arange(float(common_meta.ImageData.NumCols))*ss_zd_s))
eta_c = -f_eta_c/k_a # Beam center crossing time. TimeCOA in SICD terminology
eta_ref = eta_c - eta_c[0]
eta_grid, eta_ref_grid = np.meshgrid(eta[cols], eta_ref[rows])
eta_arg = eta_grid - eta_ref_grid
deramp_phase = k_t[rows, np.newaxis] * np.power(eta_arg, 2) / 2
demod_phase = eta_arg * f_eta_c[rows, np.newaxis]
# Sampled phase correction for deramping and demodding
total_phase = deramp_phase + demod_phase
# Least squares fit for 2D polynomial
# A*x = b
[coords_az_m_2d, coords_rg_m_2d] = np.meshgrid(coords_az_m, coords_rg_m)
a = np.zeros(((POLY_ORDER+1)**2, (POLY_ORDER+1)**2))
for k in range(POLY_ORDER+1):
for j in range(POLY_ORDER+1):
a[:, k*(POLY_ORDER+1)+j] = np.multiply(
np.power(coords_az_m_2d.flatten(), j),
np.power(coords_rg_m_2d.flatten(), k))
A = np.zeros(((POLY_ORDER+1)**2, (POLY_ORDER+1)**2))
for k in range((POLY_ORDER+1)**2):
for j in range((POLY_ORDER+1)**2):
A[k, j] = np.multiply(a[:, k], a[:, j]).sum()
b_phase = [np.multiply(total_phase.flatten(), a[:, k]).sum()
for k in range((POLY_ORDER+1)**2)]
x_phase = np.linalg.solve(A, b_phase)
phase = np.reshape(x_phase, (POLY_ORDER+1, POLY_ORDER+1))
# DeltaKCOAPoly is derivative of phase in Col direction
burst_meta.Grid.Col.DeltaKCOAPoly = poly.polyder(phase, axis=1)
# DopCentroidPoly/TimeCOAPoly
# Another way to derive the Doppler Centroid, which is back-calculated
# from the ESA-documented azimuth deramp phase function.
DopCentroidPoly = (burst_meta.Grid.Col.DeltaKCOAPoly *
(float(common_meta.Grid.Col.SS) / ss_zd_s))
burst_meta.RMA.INCA.DopCentroidPoly = DopCentroidPoly
dopcentroid2_sampled = poly.polyval2d(coords_rg_m_2d, coords_az_m_2d, DopCentroidPoly)
timecoa_sampled = timeca_sampled + dopcentroid2_sampled/doprate_sampled[:, np.newaxis]
# Convert sampled TimeCOA to polynomial
b_coa = [np.multiply(timecoa_sampled.flatten(), a[:, k]).sum()
for k in range((POLY_ORDER+1)**2)]
x_coa = np.linalg.solve(A, b_coa)
burst_meta.Grid.TimeCOAPoly = np.reshape(x_coa, (POLY_ORDER+1, POLY_ORDER+1))
# Timeline
# We don't know the precise start and stop time of each burst (as in
# the times of first and last pulses), so we use the min and max COA
# time, which is a closer approximation than the min and max zero
# Doppler times. At least COA times will not overlap between bursts.
if numbursts > 0:
# STRIPMAP case uses another time origin different from first zero Doppler time
time_offset = timecoa_sampled.min()
burst_meta.Timeline.CollectStart = start + datetime.timedelta(seconds=time_offset)
burst_meta.Timeline.CollectDuration = (timecoa_sampled.max() -
timecoa_sampled.min())
# Adjust all SICD fields that were dependent on start time
# Time is output of polynomial:
burst_meta.Grid.TimeCOAPoly[0, 0] = \
burst_meta.Grid.TimeCOAPoly[0, 0] - time_offset
burst_meta.RMA.INCA.TimeCAPoly[0] = \
burst_meta.RMA.INCA.TimeCAPoly[0] - time_offset
# Time is input of polynomial:
burst_meta.Position.ARPPoly.X = \
_polyshift(burst_meta.Position.ARPPoly.X, time_offset)
burst_meta.Position.ARPPoly.Y = \
_polyshift(burst_meta.Position.ARPPoly.Y, time_offset)
burst_meta.Position.ARPPoly.Z = \
_polyshift(burst_meta.Position.ARPPoly.Z, time_offset)
burst_meta.Timeline.IPP.Set.TStart = 0
burst_meta.Timeline.IPP.Set.TEnd = burst_meta.Timeline.CollectDuration
burst_meta.Timeline.IPP.Set.IPPStart = int(0)
burst_meta.Timeline.IPP.Set.IPPEnd = \
int( | np.floor(burst_meta.Timeline.CollectDuration * prf) | numpy.floor |
import os
import random
import itertools
import numpy as np
import collections
import matplotlib.pyplot as plt
from collections import Counter
from itertools import chain
from bisect import bisect_right, bisect_left
from reclist.current import current
def statistics(x_train, y_train, x_test, y_test, y_pred):
train_size = len(x_train)
test_size = len(x_test)
# num non-zero preds
num_preds = len([p for p in y_pred if p])
return {
'training_set__size': train_size,
'test_set_size': test_size,
'num_non_null_predictions': num_preds
}
def sample_hits_at_k(y_preds, y_test, x_test=None, k=3, size=3):
hits = []
for idx, (_p, _y) in enumerate(zip(y_preds, y_test)):
if _y[0] in _p[:k]:
hit_info = {
'Y_TEST': [_y[0]],
'Y_PRED': _p[:k],
}
if x_test:
hit_info['X_TEST'] = [x_test[idx][0]]
hits.append(hit_info)
if len(hits) < size or size == -1:
return hits
return random.sample(hits, k=size)
def sample_misses_at_k(y_preds, y_test, x_test=None, k=3, size=3):
misses = []
for idx, (_p, _y) in enumerate(zip(y_preds, y_test)):
if _y[0] not in _p[:k]:
miss_info = {
'Y_TEST': [_y[0]],
'Y_PRED': _p[:k],
}
if x_test:
miss_info['X_TEST'] = [x_test[idx][0]]
misses.append(miss_info)
if len(misses) < size or size == -1:
return misses
return random.sample(misses, k=size)
def sample_all_misses_at_k(y_preds, y_test, x_test=None, k=3, size=3):
misses = []
for idx, (_p, _y) in enumerate(zip(y_preds, y_test)):
missing_y = [item for item in _y if item not in _p[:k]]
if missing_y:
miss_info = {
'Y_TEST': missing_y,
'Y_PRED': _p[:k],
}
if x_test:
miss_info['X_TEST'] = [x_test[idx][0]]
misses.append(miss_info)
if len(misses) < size or size == -1:
return misses
return random.sample(misses, k=size)
def hit_rate_at_k_nep(y_preds, y_test, k=3):
y_test = [[k] for k in y_test]
return hit_rate_at_k(y_preds, y_test, k=k)
def hit_rate_at_k(y_preds, y_test, k=3):
hits = 0
for _p, _y in zip(y_preds, y_test):
if len(set(_p[:k]).intersection(set(_y))) > 0:
hits += 1
return hits / len(y_test)
def mrr_at_k_nep(y_preds, y_test, k=3):
"""
Computes MRR
:param y_preds: predictions, as lists of lists
:param y_test: target data, as lists of lists (eventually [[sku1], [sku2],...]
:param k: top-k
"""
y_test = [[k] for k in y_test]
return mrr_at_k(y_preds, y_test, k=k)
def mrr_at_k(y_preds, y_test, k=3):
"""
Computes MRR
:param y_preds: predictions, as lists of lists
:param y_test: target data, as lists of lists (eventually [[sku1], [sku2],...]
:param k: top-k
"""
rr = []
for _p, _y in zip(y_preds, y_test):
for rank, p in enumerate(_p[:k], start=1):
if p in _y:
rr.append(1 / rank)
break
else:
rr.append(0)
assert len(rr) == len(y_preds)
return np.mean(rr)
def coverage_at_k(y_preds, product_data, k=3):
pred_skus = set(itertools.chain.from_iterable(y_preds[:k]))
all_skus = set(product_data.keys())
nb_overlap_skus = len(pred_skus.intersection(all_skus))
return nb_overlap_skus / len(all_skus)
def popularity_bias_at_k(y_preds, x_train, k=3):
# estimate popularity from training data
pop_map = collections.defaultdict(lambda : 0)
num_interactions = 0
for session in x_train:
for event in session:
pop_map[event] += 1
num_interactions += 1
# normalize popularity
pop_map = {k:v/num_interactions for k,v in pop_map.items()}
all_popularity = []
for p in y_preds:
average_pop = sum(pop_map.get(_, 0.0) for _ in p[:k]) / len(p) if len(p) > 0 else 0
all_popularity.append(average_pop)
return sum(all_popularity) / len(y_preds)
def precision_at_k(y_preds, y_test, k=3):
precision_ls = [len(set(_y).intersection(set(_p[:k]))) / len(_p) if _p else 1 for _p, _y in zip(y_preds, y_test)]
return np.average(precision_ls)
def recall_at_k(y_preds, y_test, k=3):
recall_ls = [len(set(_y).intersection(set(_p[:k]))) / len(_y) if _y else 1 for _p, _y in zip(y_preds, y_test)]
return np.average(recall_ls)
def ndcg_at_k(y_preds, y_test, k=3):
import sklearn.metrics
results = list(reversed(list(range(1, k+1))))
user_ndcgs = []
for _p, _y in zip(y_preds, y_test):
relevance = []
for j in _p[:k]:
if j in _y:
relevance.append(1)
else:
relevance.append(0)
# 0 pad relevance to k if there are fewer than k predictions
if len(relevance) < k:
relevance += [0]*(k-len(relevance))
user_ndcgs.append(sklearn.metrics.ndcg_score([relevance], [results]))
return np.average(np.asarray(user_ndcgs))
def ndcg_at_k_user_differential(y_preds, y_test, y_test_full, k=3,
user_feature='gender'):
pred_breakdown, test_breakdown = _breakdown_preds_by_user_feature(y_test_full, y_preds, y_test,
user_feature=user_feature)
return _apply_func_to_breakdown(ndcg_at_k, pred_breakdown, test_breakdown, k=k)
def _breakdown_preds_by_user_feature(y_test, y_preds, y_test_ids, user_feature='gender'):
from collections import defaultdict
pred_breakdown = defaultdict(list)
test_breakdown = defaultdict(list)
for _t, _p, _t_ids in zip(y_test, y_preds, y_test_ids):
target_user_feature = _t[0][user_feature]
if not target_user_feature:
target_user_feature = 'unknown'
pred_breakdown[target_user_feature].append(_p)
test_breakdown[target_user_feature].append(_t_ids)
return pred_breakdown, test_breakdown
def _apply_func_to_breakdown(func, pred_breakdown, test_breakdown, *args, **kwargs):
retval = {}
for key in sorted(pred_breakdown.keys()):
retval[key] = func(pred_breakdown[key], test_breakdown[key], *args, **kwargs)
return retval
def rec_items_distribution_at_k(y_preds, k=3, bin_width=100, debug=True):
def _calculate_hist(frequencies, bins):
""" Works out the counts of items in each bucket """
counts_per_bin = Counter([bisect_right(bins, item) - 1 for item in frequencies])
counts_per_bin_list = list(counts_per_bin.items())
empty_bins_indices = [ele for ele in np.arange(len(bins) - 1) if ele not in [
index for index, _ in counts_per_bin_list
]]
counts_per_bin_list.extend([(index, 0) for index in empty_bins_indices])
counts_per_bin_sorted = sorted(counts_per_bin_list, key=lambda x: x[0], reverse=False)
return [y for _, y in counts_per_bin_sorted]
def _format_results(bins, counts_per_bin):
""" Formatting results """
results = {}
for index, (_, count) in enumerate(zip(bins, counts_per_bin)):
if bins[index] != bins[index + 1]:
results[str(bins[index]) + '-' + str(bins[index + 1] - 1)] = count
return results
# estimate frequency of recommended items in predictions
reduce_at_k_preds = [preds[:k] for preds in y_preds]
counts = Counter(chain.from_iterable(reduce_at_k_preds))
frequencies = list(counts.values())
# fixed bin size
bins = np.arange(np.min(frequencies), np.max(frequencies) + bin_width, bin_width)
counts_per_bin_sorted = _calculate_hist(frequencies, bins)
# log bin size
log_bins = np.logspace( | np.log10(bins[0]) | numpy.log10 |
import os, sys
import pandas as pd
import numpy as np
import simpledbf
from scipy.interpolate import interp1d
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
import arcpy
from arcpy import env
from arcpy.sa import *
try:
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) + "\\.site_packages\\riverpy\\")
import config
import fGlobal as fGl
except:
print("ExceptionERROR: Missing RiverArchitect packages (riverpy).")
# Eco series analysis - SHArea
# This python script (1)
# Before you run this code, please put the excel files in SHArea folder to a new folder called "case_name"
#########################
# User defined variables
case_name = "VanillaC4"
fish_periods = ["chsp"] #fish_periods = ["chju", "raju", "raad"]
timeseries_path = "../00_Flows/" + case_name + "/flow_series_" + case_name + ".xlsx"
figure_path = "../SHArC/SHArea/" + case_name + "/"
interptype = 'linear'
scale_to_one = 0
#########################
ind = 0
colors = ["tab:blue", "tab:orange", "tab:green"]
for fish_period in fish_periods:
fish_name = fish_period[0:2]
period = fish_period[2:4]
if fish_name == 'ch':
fish_full = 'Chinook Salmon'
elif fish_name == 'ra':
fish_full = 'Rainbow / Steelhead Trout'
if period == 'sp':
period_full = 'spawning'
elif period == 'ju':
period_full = 'juvenile'
elif period == 'ad':
period_full = 'adult'
fish_period_full = fish_full + ' - ' + period_full
sharea_path = "../SHArC/SHArea/" + case_name + "/" + case_name + "_sharea_" + fish_name + period + ".xlsx"
######################
# Reading SHARrC data
f1 = pd.read_excel(sharea_path, index_col=None, header=None,usecols="B")[3:].values.tolist()
f2 = pd.read_excel(sharea_path, index_col=None, header=None,usecols="F")[3:].values.tolist()
Flow = np.array(f1).transpose()[0]
CalArea = np.array(f2).transpose()[0]
Flow = np.append(Flow, [0])
CalArea = np.append(CalArea, [0])
######################
# Bankfull wetted area
env.workspace = os.path.abspath("../SHArC/HSI/" + case_name)
BfQ_hsi = "dsi_" + fish_period + fGl.write_Q_str(Flow[0]) + ".tif"
# Check out the ArcGIS Spatial Analyst extension license
arcpy.CheckOutExtension("Spatial")
# Execute ExtractValuesToPoints
rasters = arcpy.ListRasters("*", "tif")
for raster in rasters:
if raster == BfQ_hsi:
print(raster)
outRas = Raster(BfQ_hsi) > -1
outPolygons = "BfQ_polygon.shp"
arcpy.RasterToPolygon_conversion(outRas, outPolygons)
# Set local variables
inZoneData = outPolygons
zoneField = "id"
inClassData = outPolygons
classField = "id"
outTable = "BfQ_polygon_table.dbf"
processingCellSize = 0.01
# Execute TabulateArea
TabulateArea(inZoneData, zoneField, inClassData, classField, outTable,
processingCellSize, "CLASSES_AS_ROWS")
BfQ_area_dbf = simpledbf.Dbf5(env.workspace + '\\' + outTable)
BfQ_partial_area = BfQ_area_dbf.to_dataframe()
BfQ_area = np.sum( | np.array(BfQ_partial_area['Area']) | numpy.array |
import os
import numpy as np
import time
import argparse
import sys
from math import ceil
from random import Random
import time
import random
import torch
import torch.distributed as dist
import torch.utils.data.distributed
import torch.nn as nn
import torch.nn.functional as F
from torch.multiprocessing import Process
import torchvision
from torchvision import datasets, transforms
import torch.backends.cudnn as cudnn
import torchvision.models as models
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
import datetime
import LocalSGD as optim
import util_v4 as util
from comm_helpers import SyncAllreduce, SyncAllreduce_1, SyncAllreduce_2
import os
from scipy.io import loadmat
import json
from scipy import io
from dataset.cifar import get_cifar10, get_emnist, get_svhn
from torch.optim.lr_scheduler import LambdaLR
import math
parser = argparse.ArgumentParser(description='CIFAR-10 baseline')
parser.add_argument('--name','-n',
default="default",
type=str,
help='experiment name, used for saving results')
parser.add_argument('--backend',
default="nccl",
type=str,
help='experiment name, used for saving results')
parser.add_argument('--GPU_list',
default='0',
type=str,
help='gpu list')
parser.add_argument('--dataset',
default="cifar10",
type=str,
help='dataset name')
parser.add_argument('--model',
default="res_gn",
type=str,
help='neural network model')
parser.add_argument('--alpha',
default=0.2,
type=float,
help='alpha')
parser.add_argument('--gmf',
default=0,
type=float,
help='global momentum factor')
parser.add_argument('--lr',
default=0.1,
type=float,
help='learning rate')
parser.add_argument('--basicLabelRatio',
default=0.4,
type=float,
help='basicLabelRatio')
parser.add_argument('--bs',
default=64,
type=int,
help='batch size on each worker')
parser.add_argument('--epoch',
default=300,
type=int,
help='total epoch')
parser.add_argument('--cp',
default=8,
type=int,
help='communication period / work per clock')
parser.add_argument('--print_freq',
default=100,
type=int,
help='print info frequency')
parser.add_argument('--rank',
default=0,
type=int,
help='the rank of worker')
parser.add_argument('--size',
default=8,
type=int,
help='number of workers')
parser.add_argument('--seed',
default=1,
type=int,
help='random seed')
parser.add_argument('--num_comm_ue',
default=11,
type=int,
help='communication user number')
parser.add_argument('--iid',
default=1,
type=int,
help='iid')
parser.add_argument('--class_per_device',
default=1,
type=int,
help='class_per_device')
parser.add_argument('--labeled',
default=0,
type=int,
help='labeled all data')
parser.add_argument('--H',
default=0,
type=int,
help='whether use hierarchical method')
parser.add_argument('--save', '-s',
action='store_true',
help='whether save the training results')
parser.add_argument('--ip_address',
default="10.129.2.142",
type=str,
help='ip_address')
parser.add_argument('--master_port',
default="29021",
type=str,
help='master port')
parser.add_argument('--experiment_name',
default="Major1_setting1",
type=str,
help='name of this experiment')
parser.add_argument('--k-img', default=65536, type=int, ### 65536
help='number of examples')
parser.add_argument('--num_data_server', default=1000, type=int,
help='number of samples in server')
parser.add_argument('--num-data-server', default=1000, type=int,
help='number of labeled examples in server')
parser.add_argument('--num-devices', default=10, type=int,
help='num of devices')
args = parser.parse_args()
def get_cosine_schedule_with_warmup(optimizer,
num_warmup_steps,
num_training_steps,
num_cycles=7./16.,
last_epoch=-1,lr_weight=1):
def _lr_lambda(current_step):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1, num_warmup_steps))
no_progress = float(current_step - num_warmup_steps) / \
float(max(1, num_training_steps - num_warmup_steps))
num_cycles = 7.0/16.0*(1024*1024 - num_warmup_steps)/(1024*200 - num_warmup_steps)
return max(0.00000, math.cos(math.pi * num_cycles * no_progress))
return LambdaLR(optimizer, _lr_lambda, last_epoch)
######### Assign Ranks to different GPUs
GRU_list = [i for i in args.GPU_list]
if args.H:
increase_tmp = args.size//len(GRU_list)
else:
increase_tmp = (args.size+1)//len(GRU_list)
ranks_list = np.arange(0,args.size).tolist()
rank_group = []
for rank_id in range(len(GRU_list)):
if rank_id == len(GRU_list)-1:
ranks = ranks_list[rank_id*increase_tmp:]
else:
ranks = ranks_list[rank_id*increase_tmp:(rank_id+1)*increase_tmp]
rank_group.append(ranks)
for group_id in range(len(GRU_list)):
if args.rank in set(rank_group[group_id]):
os.environ["CUDA_VISIBLE_DEVICES"] = GRU_list[group_id]
break
device = 'cuda' if torch.cuda.is_available() else 'cpu'
DATASET_GETTERS = {'cifar10': get_cifar10, 'emnist': get_emnist, 'svhn':get_svhn}
### generate the index of the server dataset and the device dataset
if args.iid:
path_device_idxs = f'{args.dataset}_post_data/iid/{args.size - 1 - args.H}_{args.num_data_server}'
else:
path_device_idxs = f'{args.dataset}_post_data/noniid/{args.size - 1 - args.H}_{args.num_data_server}_{args.class_per_device}_{args.basicLabelRatio}'
if args.dataset == 'emnist':
if args.iid:
path_device_idxs = f'{args.dataset}_post_data/iid/{47}_{args.num_data_server}'
else:
path_device_idxs = f'{args.dataset}_post_data/noniid/{47}_{args.num_data_server}_{args.class_per_device}_{args.basicLabelRatio}'
device_ids = np.load(path_device_idxs + '/device_idxs' + '.npy', allow_pickle=True).item()
server_idxs = np.load(path_device_idxs + '/server_idxs' + '.npy', allow_pickle=True).item()
device_ids = device_ids['device_idxs']
server_idxs = server_idxs['server_idxs']
if args.num_comm_ue < args.size - 1 - args.H:
ue_list_epoches = | np.load(path_device_idxs + '/ue_list_epoch' + '.npy', allow_pickle=True) | numpy.load |
"""Script for sampling COV, burstiness and memory coeficient, and
their uncertainties, on many faults and plotting them
<NAME>
University of Otago
2020
"""
import os, sys
import ast
from glob import glob
from operator import itemgetter
from re import finditer
import numpy as np
from scipy.optimize import curve_fit
from scipy.odr import Model, RealData, ODR
import scipy.odr.odrpack as odrpack
from scipy.stats import expon, gamma, weibull_min, ks_2samp, kstest
# !!! Dangerous hack to swap Weibull for gamma
#from scipy.stats import weibull_min as gamma #
# !!!
from matplotlib import pyplot
from matplotlib.patches import PathPatch
import matplotlib.gridspec as gridspec
from matplotlib.ticker import FormatStrFormatter
from scipy.stats import binom, kde
from adjustText import adjust_text
from QuakeRates.dataman.event_dates import EventSet
from QuakeRates.dataman.parse_oxcal import parse_oxcal
from QuakeRates.dataman.parse_age_sigma import parse_age_sigma
from QuakeRates.dataman.parse_params import parse_param_file, \
get_event_sets, file_len
from QuakeRates.utilities.bilinear import bilinear_reg_zero_slope, \
bilinear_reg_fix, bilinear_reg_fix_zero_slope
from QuakeRates.utilities.memory_coefficient import burstiness, memory_coefficient
filepath = '../params'
param_file_list = glob(os.path.join(filepath, '*.txt'))
param_file_list_NZ = ['Akatore_TaylorSilva_2019.txt',
'AlpineHokuriCk_Berryman_2012_simple.txt',
'AlpineSouthWestland_Cochran_2017_simple.txt',
'AwatereEast_Nicol_2016_simple.txt',
'ClarenceEast_Nicol_2016_simple.txt',
'CloudyFault_Nicol_2016_simple.txt',
'Dunstan_GNS_unpub_simple.txt',
'HopeConway_Hatem_2019_simple.txt',
'Hope_Khajavi_2016_simple.txt',
'Ihaia_Nicol_2016_simple.txt',
'Oaonui_Nicol_2016_simple.txt',
'Ohariu_Nicol_2016_simple.txt',
'Paeroa_Nicol_2016_simple.txt',
'Pihama_Nicol_2016_simple.txt',
'PortersPassEast_Nicol_2016_simple.txt',
'Ngakuru_Nicol_2016_simple.txt',
'Mangatete_Nicol_2016_simple.txt',
'Rangipo_Nicol_2016_simple.txt',
'Rotoitipakau_Nicol_2016_simple.txt',
'Rotohauhau_Nicol_2016_simple.txt',
'Snowden_Nicol_2016_simple.txt',
'Vernon_Nicol_2016_simple.txt',
'WairarapaSouth_Nicol_2016_simple.txt',
'Wairau_Nicol_2018_simple.txt',
'Waimana_Nicol_2016_simple.txt',
'Wellington_Langridge_2011_simple.txt',
'Waitangi_GNS_unpub_simple.txt',
'Whakatane_Nicol_2016_simple.txt',
'Whirinaki_Nicol_2016_simple.txt']
# List of faults in study by Williams et al 2019
# Note this is not entirely the same, as there are some records from
# that study that are not included in ours.
param_file_list_W = ['AlpineHokuriCk_Berryman_2012_simple.txt',
'HaywardTysons_Lienkaemper_2007_simple.txt',
'SanJacintoMysticLake_Onderdonk_2018_simple.txt',
'NorthAnatolianElmacik_Fraser_2010_simple.txt',
'SanAndreasWrightwood_Weldon_2004_simple.txt',
'SanAndreasCarizzo_Akciz_2010_simple.txt',
'SanJacintoHogLake_Rockwell_2015_simple.txt',
'SanAndreasMissionCk_Fumal_2002_simple.txt',
'SanAndreasPalletCk_Scharer_2011_simple.txt',
'Xorkoli_Altyn_Tagh_Yuan_2018.txt',
'NorthAnatolianYaylabeli_Kozaci_2011_simple.txt',
'ElsinoreTemecula_Vaughan_1999_simple.txt',
'DeadSeaJordan_Ferry_2011_simple.txt',
'SanAndreasBigBend_Scharer_2017_simple.txt',
'WasatchBrigham_McCalpin_1996_simple.txt',
'Irpinia_Pantosti_1993_simple.txt',
'WasatchWeber_Duross_2011_simple.txt',
'WasatchNilphi_Duross_2017_simple.txt',
'LomaBlanca_Williams_2017_simple.txt',
'AlaskaPWSCopper_Plafker_1994_simple.txt',
'NankaiTrough_Hori_2004_simple.txt',
'CascadiaNth_Adams_1994_simple.txt',
'CascadiaSth_Goldfinger_2003_simple.txt',
'JavonCanyon_SarnaWojicki_1987_simple.txt',
'NewGuinea_Ota_1996_simple.txt',
'ChileMargin_Moernaut_2018_simple.txt']
#param_file_list = []
#for f in param_file_list_NZ:
#for f in param_file_list_W:
# param_file_list.append(os.path.join(filepath, f))
n_samples = 10000 # Number of Monte Carlo samples of the eq chronologies
half_n = int(n_samples/2)
print(half_n)
annotate_plots = False # If True, lable each fault on the plot
plot_folder = './plots'
if not os.path.exists(plot_folder):
os.makedirs(plot_folder)
# Define subset to take
#faulting_styles = ['Reverse']
#faulting_styles = ['Normal']
#faulting_styles = ['Strike_slip']
faulting_styles = ['all']
tectonic_regions = ['all']
#tectonic_regions = ['Intraplate_noncratonic', 'Intraplate_cratonic', 'Near_plate_boundary']
#tectonic_regions = ['Plate_boundary_master', 'Plate_boundary_network']
#tectonic_regions = ['Plate_boundary_network', 'Near_plate_boundary']
#tectonic_regions = ['Plate_boundary_master']
#tectonic_regions = ['Subduction']
#tectonic_regions = ['Near_plate_boundary']
min_number_events = 5 # Use for all other calculations.
min_num_events_mem = 6 # Use for memory coefficient
#Summarise for comment to add to figure filename
fig_comment = ''
#fig_comment = 'NZ_examples_'
#fig_comment = 'Williams2019_'
for f in faulting_styles:
fig_comment += f
fig_comment += '_'
for t in tectonic_regions:
fig_comment += t
fig_comment += '_'
fig_comment += str(min_number_events)
#fig_comment += 'test_add_event_data'
def piecewise_linear(x, x0, y0, k1, k2):
return np.piecewise(x, [x < x0], [lambda x:k1*x + y0-k1*x0, lambda x:k2*x + y0-k2*x0])
def camel_case_split(identifier):
matches = finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)', identifier)
return [m.group(0) for m in matches]
plot_colours = []
all_ie_times = []
added_events = [] # Store names of records where we've added an event due to
# exceptionally long current open interval
covs = []
cov_bounds = []
burstinesses = []
burstiness_bounds = []
burstiness_stds = []
burstinesses_expon = []
burstinesses_gamma = []
ie_gamma_alpha = []
memory_coefficients = []
memory_bounds = []
memory_stds = []
memory_spearman_coefficients = []
memory_spearman_bounds = []
memory_spearman_lag2_coef = []
memory_spearman_lag2_bounds = []
long_term_rates = []
long_term_rate_stds = []
slip_rates = []
slip_rate_stds = []
slip_rate_bounds = []
max_interevent_times = []
min_interevent_times = []
min_paired_interevent_times = []
std_min_paired_interevent_times = []
std_min_interevent_times = []
std_max_interevent_times = []
max_interevent_times_bounds = []
min_interevent_times_bounds = []
min_paired_interevent_times_bounds = []
ratio_min_pair_max = []
ratio_min_max = []
std_ratio_min_pair_max = []
std_ratio_min_max = []
ratio_min_pair_max_bounds =[]
ratio_min_max_bounds = []
names, event_sets, event_certainties, num_events, tect_regions, fault_styles = \
get_event_sets(param_file_list, tectonic_regions,
faulting_styles, min_number_events)
references = []
# Get citations for each dataset from filename
for s in param_file_list:
sp = s.split('_')
if sp[0].split('/')[2] in names:
references.append(sp[1] + ' ' + sp[2])
n_faults = len(names)
print('Number of faults', n_faults)
for i, event_set in enumerate(event_sets):
# Handle cases with uncertain number of events. Where events identification is
# unsure, event_certainty is given a value of 0, compared with 1 for certain
# events
# First generate chronologies assuming all events are certain
# event_set.name = names[i]
event_set.gen_chronologies(n_samples, observation_end=2020, min_separation=1)
event_set.calculate_cov()
event_set.cov_density()
event_set.memory_coefficient()
event_set.memory_spearman_rank_correlation()
# Store all inter-event times for global statistics
all_ie_times.append(event_set.interevent_times)
# Now calculate some statistics on the sampled chronologies
event_set.basic_chronology_stats()
# Plot histogram of interevent times
figfile = os.path.join(plot_folder, ('interevent_times_%s.png' % names[i]))
event_set.plot_interevent_time_hist(fig_filename=figfile)
# Fit gamma distirbution to event set data
event_set.fit_gamma()
ie_gamma_alpha.append(event_set.mean_gamma_alpha_all) # Get mean estimate of alpha
min_paired_interevent_times.append(event_set.mean_minimum_pair_interevent_time)
max_interevent_times.append(event_set.mean_maximum_interevent_time)
min_interevent_times.append(event_set.mean_minimum_interevent_time)
std_min_paired_interevent_times.append(event_set.std_minimum_pair_interevent_time)
std_min_interevent_times.append(event_set.std_minimum_interevent_time)
std_max_interevent_times.append(event_set.std_maximum_interevent_time)
if event_set.std_maximum_interevent_time == 0:
print('Zero std_maximum_interevent_time for ', names[i])
slip_rates.append(event_set.slip_rates[0])
slip_rate_bounds.append([event_set.slip_rates[1], event_set.slip_rates[2]])
slip_rate_stds.append(abs(np.log10(event_set.slip_rates[2]) - \
np.log10(event_set.slip_rates[1]))/4) # Approx from 95% intervals
max_interevent_times_bounds.append([abs(event_set.mean_maximum_interevent_time -
event_set.maximum_interevent_time_lb),
abs(event_set.mean_maximum_interevent_time -
event_set.maximum_interevent_time_ub)])
min_interevent_times_bounds.append([abs(event_set.mean_minimum_interevent_time -
event_set.minimum_interevent_time_lb),
abs(event_set.mean_minimum_interevent_time -
event_set.minimum_interevent_time_ub)])
min_paired_interevent_times_bounds.append([abs(event_set.mean_minimum_pair_interevent_time -
event_set.minimum_pair_interevent_time_lb),
abs(event_set.mean_minimum_pair_interevent_time -
event_set.minimum_pair_interevent_time_ub)])
ratio_min_pair_max.append(event_set.mean_ratio_min_pair_max)
ratio_min_max.append(event_set.mean_ratio_min_max)
std_ratio_min_pair_max.append(event_set.std_ratio_min_pair_max)
std_ratio_min_max.append(event_set.std_ratio_min_max)
ratio_min_pair_max_bounds.append([abs(event_set.mean_ratio_min_pair_max -
event_set.ratio_min_pair_max_lb),
abs(event_set.mean_ratio_min_pair_max -
event_set.ratio_min_pair_max_ub)])
ratio_min_max_bounds.append([abs(event_set.mean_ratio_min_max -
event_set.ratio_min_max_lb),
abs(event_set.mean_ratio_min_max -
event_set.ratio_min_max_ub)])
# Generate random exponentially and gamma distributed samples of length num_events - 1
# i.e. the number of inter-event times in the chronology. These will be used
# later for testing
scale = 100 # Fix scale, as burstiness is independent of scale for exponentiall distribution
ie_times_expon = expon(scale=scale).rvs(size=(n_samples*(event_set.num_events-1)))
ie_times_expon = np.reshape(np.array(ie_times_expon), (n_samples, (event_set.num_events-1)))
ie_times_expon_T = ie_times_expon.T
burst_expon = burstiness(ie_times_expon_T)
# Gamma
alpha_g = 2.3 #2.2 #1.6 ##2.35 #2.4 #2.0
ie_times_g = gamma(alpha_g, scale=scale).rvs(size=(n_samples*(event_set.num_events-1)))
ie_times_g = np.reshape(np.array(ie_times_g), (n_samples, (event_set.num_events-1)))
ie_times_g_T = ie_times_g.T
burst_g = burstiness(ie_times_g_T)
# Now generate chronologies assuming uncertain events did not occur
if sum(event_certainties[i]) < event_set.num_events:
indices = np.where(event_certainties[i] == 1)
indices = list(indices[0])
# print(indices[0], type(indices))
events_subset = list(itemgetter(*indices)(event_set.event_list))
event_set_certain = EventSet(events_subset)
event_set_certain.name = names[i]
event_set_certain.gen_chronologies(n_samples, observation_end=2019, min_separation=1)
event_set_certain.calculate_cov()
event_set_certain.cov_density()
event_set_certain.basic_chronology_stats()
event_set_certain.memory_coefficient()
event_set_certain.memory_spearman_rank_correlation()
# Generate random exponentially distributed samples of length num_events - 1
# i.e. the number of inter-event times in the chronology. These will be used
# later for testing
ie_times_expon_certain = expon(scale=scale).rvs(size=(n_samples*(len(indices)-1)))
ie_times_expon_certain = np.reshape(np.array(ie_times_expon_certain), (n_samples, (len(indices)-1)))
ie_times_expon_certain_T = ie_times_expon_certain.T
burst_expon_certain = burstiness(ie_times_expon_certain_T)
ie_times_g_certain = gamma(alpha_g, scale=scale).rvs(size=(n_samples*(event_set.num_events-1)))
ie_times_g_certain = np.reshape(np.array(ie_times_g_certain), (n_samples, (event_set.num_events-1)))
ie_times_g_certain_T = ie_times_g_certain.T
burst_g_certain = burstiness(ie_times_g_T)
# Now combine results from certain chronolgies with uncertain ones
combined_covs = np.concatenate([event_set.covs[:half_n],
event_set_certain.covs[:half_n]])
combined_burstiness = np.concatenate([event_set.burstiness[:half_n],
event_set_certain.burstiness[:half_n]])
combined_memory = np.concatenate([event_set.mem_coef[:half_n],
event_set_certain.mem_coef[:half_n]])
combined_memory_spearman = np.concatenate([event_set.rhos[:half_n],
event_set_certain.rhos[:half_n]])
combined_memory_spearman_lag2 = np.concatenate([event_set.rhos2[:half_n],
event_set_certain.rhos2[:half_n]])
combined_burst_expon = np.concatenate([burst_expon[:half_n],
burst_expon_certain[:half_n]])
combined_burst_g = np.concatenate([burst_g[:half_n],
burst_g_certain[:half_n]])
covs.append(combined_covs)
burstinesses.append(combined_burstiness)
memory_coefficients.append(combined_memory)
memory_stds.append(np.std(np.array(combined_memory)))
memory_spearman_coefficients.append(combined_memory_spearman)
memory_spearman_lag2_coef.append(combined_memory_spearman_lag2)
burstinesses_expon.append(combined_burst_expon)
burstinesses_gamma.append(combined_burst_g)
cov_bounds.append([abs(np.mean(combined_covs) - \
min(event_set.cov_lb, event_set_certain.cov_lb)),
abs(np.mean(combined_covs) - \
max(event_set.cov_ub, event_set_certain.cov_ub))])
burstiness_bounds.append([abs(np.mean(combined_burstiness) - \
min(event_set.burstiness_lb,
event_set_certain.burstiness_lb)),
abs(np.mean(combined_burstiness) - \
max(event_set.burstiness_ub,
event_set_certain.burstiness_ub))])
memory_bounds.append([abs(np.mean(combined_memory) - \
min(event_set.memory_lb,
event_set_certain.memory_lb)),
abs(np.mean(combined_memory) - \
max(event_set.memory_ub,
event_set_certain.memory_ub))])
memory_spearman_bounds.append([abs(np.mean(combined_memory_spearman) - \
min(event_set.rho_lb,
event_set_certain.rho_lb)),
abs(np.mean(combined_memory_spearman) - \
max(event_set.rho_ub,
event_set_certain.rho_ub))])
memory_spearman_lag2_bounds.append([abs(np.mean(combined_memory_spearman_lag2) - \
min(event_set.rho2_lb,
event_set_certain.rho2_lb)),
abs(np.mean(combined_memory_spearman_lag2) - \
max(event_set.rho2_ub,
event_set_certain.rho2_ub))])
# Combine, taking n/2 samples from each set
combined_ltrs = np.concatenate([event_set.long_term_rates[:half_n],
event_set_certain.long_term_rates[:half_n]])
burstiness_stds.append(np.std(combined_burstiness))
print(len(combined_ltrs))
long_term_rates.append(combined_ltrs)
long_term_rate_stds.append(np.std(combined_ltrs))
else:
covs.append(event_set.covs)
burstinesses.append(event_set.burstiness)
memory_coefficients.append(event_set.mem_coef)
memory_stds.append(np.std(np.array(event_set.mem_coef)))
memory_spearman_coefficients.append(event_set.rhos)
memory_spearman_lag2_coef.append(event_set.rhos2)
long_term_rates.append(event_set.long_term_rates)
burstinesses_expon.append(burst_expon)
burstinesses_gamma.append(burst_g)
cov_bounds.append([abs(event_set.mean_cov - event_set.cov_lb),
abs(event_set.mean_cov - event_set.cov_ub)])
burstiness_bounds.append([abs(event_set.mean_burstiness - event_set.burstiness_lb),
abs(event_set.mean_burstiness - event_set.burstiness_ub)])
memory_bounds.append([abs(event_set.mean_mem_coef - event_set.memory_lb),
abs(event_set.mean_mem_coef - event_set.memory_ub)])
memory_spearman_bounds.append([abs(event_set.mean_rho - event_set.rho_lb),
abs(event_set.mean_rho - event_set.rho_ub)])
memory_spearman_lag2_bounds.append([abs(event_set.mean_rho2 - event_set.rho2_lb),
abs(event_set.mean_rho2 - event_set.rho2_ub)])
burstiness_stds.append(event_set.std_burstiness)
long_term_rate_stds.append(np.mean(long_term_rates))
# Get colours for plotting later
if event_set.faulting_style == 'Normal':
plot_colours.append('r')
elif event_set.faulting_style == 'Reverse':
plot_colours.append('b')
elif event_set.faulting_style == 'Strike_slip':
plot_colours.append('g')
else:
plot_colours.append('k')
if event_set.add_events: # List of records where we model long open interval
added_events.append(event_set.name)
# Convert to numpy arrays and transpose where necessary
num_events = np.array(num_events)
all_ie_times = np.array(all_ie_times)
max_interevent_times = np.array(max_interevent_times)
min_interevent_times = np.array(min_interevent_times)
min_paired_interevent_times = np.array(min_paired_interevent_times)
std_max_interevent_times = np.array(std_max_interevent_times)
std_min_interevent_times = np.array(std_min_interevent_times)
std_min_paired_interevent_times = np.array(std_min_paired_interevent_times)
max_interevent_times_bounds = np.array(max_interevent_times_bounds).T
min_interevent_times_bounds = np.array(min_interevent_times_bounds).T
min_paired_interevent_times_bounds = np.array(min_paired_interevent_times_bounds).T
long_term_rates_T = np.array(long_term_rates).T
mean_ltr = np.mean(long_term_rates_T, axis = 0)
long_term_rate_stds = np.array(long_term_rate_stds)
slip_rates = np.array(slip_rates).T
slip_rate_bounds = np.array(slip_rate_bounds).T
slip_rate_stds = np.array(slip_rate_stds).T
print('Mean_ltr', mean_ltr)
std_ltr = np.std(long_term_rates_T, axis = 0)
ltr_bounds = np.array([abs(mean_ltr - (np.percentile(long_term_rates_T, 2.5, axis=0))),
abs(mean_ltr - (np.percentile(long_term_rates_T, 97.5, axis=0)))])
ratio_min_pair_max = np.array(ratio_min_pair_max)
ratio_min_max = np.array(ratio_min_max)
std_ratio_min_pair_max = np.array(std_ratio_min_pair_max)
std_ratio_min_max = np.array(std_ratio_min_max)
ratio_min_pair_max_bounds = np.array(ratio_min_pair_max_bounds).T
ratio_min_max_bounds = np.array(ratio_min_max_bounds).T
cov_bounds = np.array(cov_bounds).T
burstiness_bounds = np.array(burstiness_bounds).T
burstiness_stds = np.array(burstiness_stds)
burstiness_expon = np.array(burstinesses_expon)
burstiness_gamma = np.array(burstinesses_gamma)
inds = np.where(num_events >= min_num_events_mem) # Get memory coefficients for more than 6 events
memory_coefficients = np.array(memory_coefficients)
memory_coefficients_min = memory_coefficients[inds]
memory_stds = np.array(memory_stds)
memory_stds_min = memory_stds[inds]
memory_bounds_min = np.array(memory_bounds)[inds].T
memory_bounds = np.array(memory_bounds).T
memory_spearman_bounds = np.array(memory_spearman_bounds).T
memory_spearman_lag2_bounds = np.array(memory_spearman_lag2_bounds).T
ie_gamma_alpha = np.array(ie_gamma_alpha)
# Now plot the means and 95% error bars of COV
pyplot.clf()
ax = pyplot.subplot(111)
mean_covs = []
for i, cov_set in enumerate(covs):
mean_cov = np.mean(cov_set)
mean_covs.append(mean_cov)
colours = []
for mean_cov in mean_covs:
if mean_cov <= 0.9:
colours.append('b')
elif mean_cov > 0.9 and mean_cov <= 1.1:
colours.append('g')
else:
colours.append('r')
pyplot.errorbar(mean_ltr, mean_covs,
xerr = ltr_bounds,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.errorbar(mean_ltr, mean_covs,
yerr = cov_bounds,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.scatter(mean_ltr, mean_covs, marker = 's', c=plot_colours,
s=25, zorder=2)
for i, txt in enumerate(names):
if max_interevent_times[i] > 10 and annotate_plots:
ax.annotate(txt[:4],
(mean_ltr[i], mean_covs[i]),
fontsize=8)
ax.set_ylim([0, 2.5])
ax.set_xlim([1./1000000, 1./40])
ax.set_xscale('log')
ax.set_xlabel('Long-term rate (events per year)')
ax.set_ylabel('COV')
figname = 'mean_cov_vs_lt_rate_%s.png' % fig_comment
pyplot.savefig(figname)
################################
# Plot burstiness against mean ltr
pyplot.clf()
ax = pyplot.subplot(111)
mean_bs = []
for i, b_set in enumerate(burstinesses):
mean_b = np.mean(b_set)
mean_bs.append(mean_b)
colours = []
for mean_b in mean_bs:
if mean_b <= -0.05:
colours.append('b')
elif mean_b > -0.05 and mean_b <= 0.05:
colours.append('g')
else:
colours.append('r')
pyplot.errorbar(mean_ltr, mean_bs,
xerr = ltr_bounds,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.errorbar(mean_ltr, mean_bs,
yerr = burstiness_bounds,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.scatter(mean_ltr, mean_bs, marker = 's', c=plot_colours,
s=25, zorder=2)
for i, txt in enumerate(names):
if max_interevent_times[i] > 10 and annotate_plots:
ax.annotate(txt[:4],
(mean_ltr[i], mean_bs[i]),
fontsize=8)
# Add B=0 linear
pyplot.plot([1./1000000, 1./40], [0, 0], linestyle='dashed', linewidth=1, c='0.5')
ax.set_xscale('log')
ax.set_xlabel('Long-term rate (events per year)')
ax.set_ylabel('B')
# Now do a bi-linear fit to the data
mean_bs = np.array(mean_bs)
indices = np.flatnonzero(mean_ltr > 3e-4)
indices = indices.flatten()
indices_slow_faults = np.flatnonzero(mean_ltr <= 3e-4)
indices_slow_faults = indices_slow_faults.flatten()
# Fit fast rate faults
lf = np.polyfit(np.log10(mean_ltr[indices]),
mean_bs[indices], 1)
# Now force to be a flat line1
lf[0] = 0.
lf[1] = np.mean(mean_bs[indices])
std_lf = np.std(mean_bs[indices])
xvals_short = np.arange(1.5e-4, 2e-2, 1e-4)
yvals = lf[0]*np.log10(xvals_short) + lf[1]
pyplot.plot(xvals_short, yvals, c='0.2')
# Fit slow faults
if len(indices_slow_faults > 1):
lf_slow = np.polyfit(np.log10(mean_ltr[indices_slow_faults]),
mean_bs[indices_slow_faults], 1)
xvals_short = np.arange(1e-6, 1.5e-4, 1e-6)
yvals = lf_slow[0]*np.log10(xvals_short) + lf_slow[1]
pyplot.plot(xvals_short, yvals, c='0.2')
# Add formula for linear fits of data
print('Fits for B vs LTR')
txt = 'Y = {:=+6.2f} +/- {:4.2f}'.format(lf[1], std_lf)
print(txt)
ax.annotate(txt, (2e-4, 0.2), fontsize=8)
try:
txt = 'Y = {:4.2f}Log(x) {:=+6.2f}'.format(lf_slow[0], lf_slow[1])
print(txt)
ax.annotate(txt, (1.5e-6, 0.75), fontsize=8)
except:
pass
# Now try bilinear ODR linear fit
data = odrpack.RealData(np.log10(mean_ltr), mean_bs,
sx=np.log10(long_term_rate_stds), sy=burstiness_stds)
bilin = odrpack.Model(bilinear_reg_zero_slope)
odr = odrpack.ODR(data, bilin, beta0=[-3, -1.0, -4]) # array are starting values
odr.set_job(fit_type=0)
out = odr.run()
print(out.sum_square)
out.pprint()
a = out.beta[0]
b = out.beta[1]
hx = out.beta[2]
xvals = np.arange(1.e-6, 2e-2, 1e-6)
yrng = a*np.log10(xvals) + b #10**(b + a * xvals)
ylevel = a*hx + b #10**(b + a * hx)
print('ylevel', ylevel)
print(10**ylevel)
idx = xvals > 10**hx
yrng[idx] = (ylevel)
print('yrng', yrng)
print('hx', hx)
pyplot.plot(xvals, yrng, c='g')
# Bilinear fixed hinge
hxfix = np.log10(2e-4)
bilin_hxfix_cons_slope = odrpack.Model(bilinear_reg_fix_zero_slope)
odr = odrpack.ODR(data, bilin_hxfix_cons_slope, beta0=[-3, -1.0])
odr.set_job(fit_type=0)
out = odr.run()
print('bilinear hxfix_cons_slope')
print(out.sum_square)
out.pprint()
a = out.beta[0]
b = out.beta[1]
yrng = a*np.log10(xvals) + b
ylevel = a*hxfix + b
print('ylevel hxfix zero slope', ylevel)
print(10**ylevel)
idx = xvals > 10**hxfix
yrng[idx] = (ylevel)
print('yrng', yrng)
print('hx', hxfix)
pyplot.plot(xvals, yrng, c='r')
figname = 'burstiness_vs_lt_rate_%s.png' % fig_comment
pyplot.savefig(figname)
#########################
# Plot burstiness against slip rate
pyplot.clf()
ax = pyplot.subplot(111)
pyplot.errorbar(slip_rates, mean_bs,
xerr = slip_rate_bounds,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.errorbar(slip_rates, mean_bs,
yerr = burstiness_bounds,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.scatter(slip_rates, mean_bs, marker = 's', c=plot_colours,
s=25, zorder=2)
ax.set_ylim([-1, 1])
ax.set_xlim([1./1000, 100])
# Add B=0 linear
pyplot.plot([1./1000, 100], [0, 0], linestyle='dashed', linewidth=1, c='0.5')
ax.set_xscale('log')
ax.set_xlabel('Slip rate (mm/yr)')
ax.set_ylabel('B')
# Now try linear ODR linear fit
def f(B, x):
return B[0]*x + B[1]
print(slip_rates)
print(np.log10(slip_rates))
print(slip_rate_stds)
print(np.log10(slip_rate_stds))
print(burstiness_stds)
wd = 1./np.power(burstiness_stds, 2)
print(wd)
we = 1./np.power(slip_rate_stds, 2)
print(we)
# Std dev already in log-space
data = odrpack.RealData(np.log10(slip_rates), mean_bs,
sx=np.sqrt(slip_rate_stds), sy=np.sqrt(burstiness_stds))
linear = odrpack.Model(f)
odr = odrpack.ODR(data, linear, beta0=[-1, -1.0,])
odr.set_job(fit_type=0)
out = odr.run()
out.pprint()
a = out.beta[0]
b = out.beta[1]
xvals = np.arange(1.e-4, 1e2, 1e-2)
yrng = a*np.log10(xvals) + b #10**(b + a * xvals)
pyplot.plot(xvals, yrng, c='0.6')
txt = 'Y = {:4.2f}Log(x) {:=+6.2f}'.format(a, b)
print(txt)
ax.annotate(txt, (1e0, 0.9), color='0.6')
# Now try bilinear fixed hinge
bilin = odrpack.Model(bilinear_reg_fix_zero_slope)
odr = odrpack.ODR(data, bilin, beta0=[-1, -1.0, -1])
odr.set_job(fit_type=0)
out = odr.run()
out.pprint()
a = out.beta[0]
b = out.beta[1]
yrng = a*np.log10(xvals) + b
ylevel = a*hxfix + b
print('ylevel hxfix zero slope', ylevel)
print(10**ylevel)
idx = xvals > 10**hxfix
yrng[idx] = (ylevel)
print('yrng', yrng)
print('hx', hxfix)
pyplot.plot(xvals, yrng, c='0.2')
txt = 'Y = {:4.2f}Log(x) {:=+6.2f}, x < {:4.2f}'.format(a, b, np.power(10,hxfix))
print(txt)
ax.annotate(txt, (2e-3, 0.9), color='0.2')
txt = 'Y = {:4.2f}, x >= {:4.2f}'.format(ylevel, np.power(10,hxfix))
print(txt)
ax.annotate(txt, (1.2e-2, 0.8), color='0.2')
figname = 'burstiness_vs_slip_rate_%s.png' % fig_comment
pyplot.savefig(figname)
figname = 'burstiness_vs_slip_rate_%s.pdf' % fig_comment
pyplot.savefig(figname)
# Plot memory coefficients against long term rates
pyplot.clf()
ax = pyplot.subplot(111)
mean_mems = []
mean_ltr_mem = mean_ltr[inds]
ltr_bounds_mem = ltr_bounds.T[inds].T
for i, mem_set in enumerate(memory_coefficients):
mean_mem = np.mean(mem_set)
# print('Mean memory coefficient combined', mean_mem)
mean_mems.append(mean_mem)
mean_mems = np.array(mean_mems)
colours = []
plot_colours_mem = list(np.array(plot_colours)[inds])
for mean_mem in mean_mems:
if mean_mem <= -0.05:
colours.append('b')
elif mean_mem > -0.05 and mean_mem <= 0.05:
colours.append('g')
else:
colours.append('r')
pyplot.errorbar(mean_ltr_mem, mean_mems[inds],
xerr = ltr_bounds_mem,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.errorbar(mean_ltr_mem, mean_mems[inds],
yerr = memory_bounds_min,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.scatter(mean_ltr_mem, mean_mems[inds], marker = 's', c=plot_colours_mem,
s=25, zorder=2)
for i, txt in enumerate(names):
if max_interevent_times[i] > 10 and annotate_plots:
ax.annotate(txt[:4],
(mean_ltr[i], mean_mems[i]),
fontsize=8)
ax.set_xlim([1./1000000, 1./40])
ax.set_xscale('log')
ax.set_xlabel('Long-term rate (events per year)')
ax.set_ylabel('M')
figname = 'memory_coefficient_vs_lt_rate_%s.png' % fig_comment
pyplot.savefig(figname)
# Plot Spearman Rank coefficients against long term rates
pyplot.clf()
ax = pyplot.subplot(111)
mean_mems_L1 = []
for i, mem_set in enumerate(memory_spearman_coefficients):
mean_mem = np.mean(mem_set)
mean_mems_L1.append(mean_mem)
colours = []
for mean_mem in mean_mems_L1:
if mean_mem <= -0.05:
colours.append('b')
elif mean_mem > -0.05 and mean_mem <= 0.05:
colours.append('g')
else:
colours.append('r')
pyplot.errorbar(mean_ltr, mean_mems_L1,
xerr = ltr_bounds,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.errorbar(mean_ltr, mean_mems_L1,
yerr = memory_spearman_bounds,
elinewidth=0.7,
ecolor = '0.3',
linestyle="None",
zorder=1)
pyplot.scatter(mean_ltr, mean_mems_L1, marker = 's', c=plot_colours,
s=25, zorder=2)
for i, txt in enumerate(names):
if max_interevent_times[i] > 10 and annotate_plots:
ax.annotate(txt[:4],
(mean_ltr[i], mean_mems_L1[i]),
fontsize=8)
ax.set_xlim([1./1000000, 1./40])
ax.set_xscale('log')
ax.set_xlabel('Long-term rate (events per year)')
ax.set_ylabel('M (Spearman Rank)')
figname = 'memory_coefficient_Spearman_vs_lt_rate_%s.png' % fig_comment
pyplot.savefig(figname)
# Plot Spearman Rank (Lag-2) coefficients against long term rates
pyplot.clf()
ax = pyplot.subplot(111)
mean_mems_L2 = []
for i, mem_set in enumerate(memory_spearman_lag2_coef):
mean_mem = np.mean(mem_set)
mean_mems_L2.append(mean_mem)
colours = []
for mean_mem in mean_mems_L2:
if mean_mem <= -0.05:
colours.append('b')
elif mean_mem > -0.05 and mean_mem <= 0.05:
colours.append('g')
else:
colours.append('r')
pyplot.errorbar(mean_ltr, mean_mems_L2,
xerr = ltr_bounds,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.errorbar(mean_ltr, mean_mems_L2,
yerr = memory_spearman_lag2_bounds,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.scatter(mean_ltr, mean_mems_L2, marker = 's', c=plot_colours,
s=25, zorder=2)
for i, txt in enumerate(names):
if max_interevent_times[i] > 10 and annotate_plots:
ax.annotate(txt[:4],
(mean_ltr[i], mean_mems_L2[i]),
fontsize=8)
ax.set_xlim([1./1000000, 1./40])
ax.set_xscale('log')
ax.set_xlabel('Long-term rate (events per year)')
ax.set_ylabel('M (Spearman Rank Lag-2)')
figname = 'memory_coefficient_Spearman_Lag2_vs_lt_rate_%s.png' % fig_comment
pyplot.savefig(figname)
# Plot Spearman rank Lag-1 against Lag-2
# Plot Spearman Rank coefficients against long term rates
pyplot.clf()
ax = pyplot.subplot(111)
colours = []
for mean_mem in mean_mems_L1:
if mean_mem <= -0.05:
colours.append('b')
elif mean_mem > -0.05 and mean_mem <= 0.05:
colours.append('g')
else:
colours.append('r')
pyplot.errorbar(mean_mems_L1, mean_mems_L2,
xerr = memory_spearman_bounds,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.errorbar(mean_mems_L1, mean_mems_L2,
yerr = memory_spearman_lag2_bounds,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.scatter(mean_mems_L1, mean_mems_L2, marker = 's', c=plot_colours,
s=25, zorder=2)
for i, txt in enumerate(names):
if max_interevent_times[i] > 10 and annotate_plots:
ax.annotate(txt[:4],
(mean_mems_L1[i], mean_mems_L2[i]),
fontsize=8)
ax.set_xlabel('M (Spearman Rank Lag-1)')
ax.set_ylabel('M (Spearman Rank Lag-2)')
figname = 'memory_coefficient_Spearman_L1_vs_L2_%s.png' % fig_comment
pyplot.savefig(figname)
# Plot COV against number of events to look at sampling biases
pyplot.clf()
ax = pyplot.subplot(111)
mean_covs = []
for i, cov_set in enumerate(covs):
mean_cov = np.mean(cov_set)
mean_covs.append(mean_cov)
colours = []
for mean_cov in mean_covs:
if mean_cov <= 0.9:
colours.append('b')
elif mean_cov > 0.9 and mean_cov <= 1.1:
colours.append('g')
else:
colours.append('r')
pyplot.errorbar(mean_covs, num_events,
xerr = cov_bounds,
ecolor = '0.6',
linestyle="None")
pyplot.scatter(mean_covs, num_events, marker = 's', c=plot_colours, s=25)
for i, txt in enumerate(names):
if max_interevent_times[i] > 10 and annotate_plots:
ax.annotate(txt[:4],
(mean_covs[i], num_events[i]),
fontsize=8)
ax.set_xlabel('COV')
ax.set_ylabel('Number of events in earthquake record')
figname = 'mean_cov_vs_number_events_%s.png' % fig_comment
pyplot.savefig(figname)
# Now plot basic statistics
pyplot.clf()
ax = pyplot.subplot(111)
pyplot.errorbar(max_interevent_times, min_interevent_times,
yerr = min_interevent_times_bounds,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.errorbar(max_interevent_times, min_interevent_times,
xerr = max_interevent_times_bounds,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.scatter(max_interevent_times, min_interevent_times,
marker = 's', c=colours, s=25, zorder=2)
ax.set_xlabel('Maximum interevent time')
ax.set_ylabel('Minimum interevent time')
ax.set_xscale('log')
ax.set_yscale('log')
# Label low-slip rate faults
for i, txt in enumerate(names):
if max_interevent_times[i] > 10 and annotate_plots:
ax.annotate(txt[:4],
(max_interevent_times[i], min_interevent_times[i]),
fontsize=8)
# Linear fit only bottom end of data
indices = np.argwhere(max_interevent_times < 10000).flatten()
indices_slow_faults = np.argwhere(max_interevent_times >= 10000).flatten()
lf = np.polyfit(np.log10(max_interevent_times[indices]),
np.log10(min_interevent_times[indices]), 1)
xvals_short = np.arange(100, 1e4, 100)
log_yvals = lf[0]*np.log10(xvals_short) + lf[1]
yvals = np.power(10, log_yvals)
pyplot.plot(xvals_short, yvals)
# Add formula for linear fit to low-end of data
txt = 'Log(Y) = %.2fLog(x) + %.2f' % (lf[0], lf[1])
print(txt)
ax.annotate(txt, (800, 10000))
figname = 'min_vs_max_interevent_time_%s.png' % fig_comment
pyplot.savefig(figname)
# Plot minimum pairs
pyplot.clf()
ax = pyplot.subplot(111)
pyplot.errorbar(max_interevent_times, min_paired_interevent_times,
yerr = min_paired_interevent_times_bounds,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.errorbar(max_interevent_times, min_paired_interevent_times,
xerr = max_interevent_times_bounds,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.scatter(max_interevent_times, min_paired_interevent_times,
marker = 's', c=colours, s=25, zorder=2)
ax.set_xlabel('Maximum interevent time')
ax.set_ylabel('Minimum interevent time \n(mean of two shortest consecutive interevent times)')
ax.set_xscale('log')
ax.set_yscale('log')
# Label low-slip rate faults
for i, txt in enumerate(names):
if max_interevent_times[i] > 10 and annotate_plots:
ax.annotate(txt[:4],
(max_interevent_times[i], min_paired_interevent_times[i]),
fontsize=8)
# Now fit with a regression in log-log space
xvals = np.arange(100, 2e6, 100) # For plotting
# Linear fit
lf = np.polyfit(np.log10(max_interevent_times),
np.log10(min_paired_interevent_times), 1)
log_yvals = lf[0]*np.log10(xvals) + lf[1]
yvals = np.power(10, log_yvals)
pyplot.plot(xvals, yvals)
# Linear fit only bottom end of data
indices = np.argwhere(max_interevent_times < 10000).flatten()
lf = np.polyfit(np.log10(max_interevent_times[indices]),
np.log10(min_paired_interevent_times[indices]), 1)
xvals_short = np.arange(100, 1e4, 100)
log_yvals = lf[0]*np.log10(xvals_short) + lf[1]
yvals = np.power(10, log_yvals)
pyplot.plot(xvals_short, yvals)
# Add formula for linear fit to low-end of data
txt = 'Log(Y) = %.2fLog(x) + %.2f' % (lf[0], lf[1])
print(txt)
ax.annotate(txt, (100, 10000))
# Quadratic fit
qf = np.polyfit(np.log10(max_interevent_times),
np.log10(min_paired_interevent_times), 2)
print(qf)
log_yvals = qf[0]*np.log10(xvals)**2 + qf[1]*np.log10(xvals) + qf[2]
yvals = np.power(10, log_yvals)
pyplot.plot(xvals, yvals)
figname = 'min_pair_vs_max_interevent_time_%s.png' % fig_comment
pyplot.savefig(figname)
# Similar plots, against long term rates
pyplot.clf()
ax = pyplot.subplot(111)
pyplot.errorbar(mean_ltr, min_interevent_times,
yerr = min_interevent_times_bounds,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.errorbar(mean_ltr, min_interevent_times,
xerr = ltr_bounds,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.scatter(mean_ltr, min_interevent_times,
marker='s', c=colours, s=25, zorder=2)
ax.set_xlabel('Long-term rate')
ax.set_ylabel('Minimum interevent time')
ax.set_xscale('log')
ax.set_yscale('log')
# Label low-slip rate faults
for i, txt in enumerate(names):
if max_interevent_times[i] > 10 and annotate_plots:
ax.annotate(txt[:4],
(mean_ltr[i], min_interevent_times[i]),
fontsize=8)
# Linear fit only bottom end of data
indices = np.argwhere(mean_ltr > 2e-4).flatten()
lf = np.polyfit(np.log10(mean_ltr[indices]),
np.log10(min_interevent_times[indices]), 1)
xvals_short = np.arange(5e-4, 1e-2, 1e-4)
log_yvals = lf[0]*np.log10(xvals_short) + lf[1]
yvals = np.power(10, log_yvals)
pyplot.plot(xvals_short, yvals)
# Add formula for linear fit to low-end of data
txt = 'Log(Y) = %.2fLog(x) + %.2f' % (lf[0], lf[1])
ax.annotate(txt, (1e-4, 10000))
figname = 'min_interevent_time_vs_ltr_%s.png' % fig_comment
pyplot.savefig(figname)
# Plot long term rate against minimum pair
pyplot.clf()
ax = pyplot.subplot(111)
pyplot.errorbar(mean_ltr, min_paired_interevent_times,
yerr = min_paired_interevent_times_bounds,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.errorbar(mean_ltr, min_paired_interevent_times,
xerr = ltr_bounds,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.scatter(mean_ltr, min_paired_interevent_times,
marker='s', c=colours, s=25, zorder=2)
ax.set_xlabel('Long-term rate')
ax.set_ylabel('Minimum interevent time \n(mean of two shortest consecutive interevent times)')
ax.set_xscale('log')
ax.set_yscale('log')
# Label low-slip rate faults
for i, txt in enumerate(names):
if max_interevent_times[i] > 10 and annotate_plots:
ax.annotate(txt[:4],
(mean_ltr[i], min_paired_interevent_times[i]),
fontsize=8)
# Linear fit only bottom end of data
indices = np.argwhere(mean_ltr > 2e-4).flatten()
lf = np.polyfit(np.log10(mean_ltr[indices]),
np.log10(min_paired_interevent_times[indices]), 1)
xvals_short = np.arange(5e-4, 1e-2, 1e-4)
log_yvals = lf[0]*np.log10(xvals_short) + lf[1]
yvals = np.power(10, log_yvals)
pyplot.plot(xvals_short, yvals)
# Add formula for linear fit to low-end of data
txt = 'Log(Y) = %.2fLog(x) + %.2f' % (lf[0], lf[1])
print(txt)
ax.annotate(txt, (1e-4, 10000))
figname = 'min_pair_vs_ltr_%s.png' % fig_comment
pyplot.savefig(figname)
# Plot long term rate against maximum interevent time
pyplot.clf()
ax = pyplot.subplot(111)
pyplot.errorbar(mean_ltr, max_interevent_times,
yerr = max_interevent_times_bounds,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.errorbar(mean_ltr, max_interevent_times,
xerr = ltr_bounds,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.scatter(mean_ltr, max_interevent_times,
marker='s', c=plot_colours, s=25, zorder=2)
ax.set_xlabel('Long-term rate')
ax.set_ylabel('Maximum interevent time')
ax.set_xscale('log')
ax.set_yscale('log')
# Label low-slip rate faults
for i, txt in enumerate(names):
if max_interevent_times[i] > 10 and annotate_plots:
ax.annotate(txt[:4],
(mean_ltr[i], max_interevent_times[i]),
fontsize=8)
# Linear fit only bottom end of data
indices = np.argwhere(mean_ltr > 2e-10).flatten() # All data for now
lf = np.polyfit(np.log10(mean_ltr[indices]),
| np.log10(max_interevent_times[indices]) | numpy.log10 |
import numpy as np
import scipy.stats
def normalize(measure: np.ndarray, data: np.ndarray) -> np.ndarray:
r"""
Normalize measure (e.g. :math:`RMSD` or :math:`MAE`) with the range of :math:`data`
:param measure: the measure which should be normalized
:param data: Tensor representing the data by which the measure should be normalized
:return: :math:`\frac{RMSD}{\text{max}(\text{data}) - \text{min}(\text{data})}`
r"""
return measure / (np.max(data) - np.min(data))
def rmsd(estim: np.ndarray, obs: np.ndarray, axis=None) -> np.ndarray:
r"""
Calculate the root of the mean squared deviation between the estimated and the observed data
:param estim: Tensor representing the estimated data
:param obs: Tensor representing the observed data
:param axis: axis to reduce
:return: :math:`\sqrt{\text{mean}{(\text{estim} - \text{obs})^2}}`
r"""
return np.sqrt(np.mean(np.square(estim - obs), axis=axis))
def mae(estim: np.ndarray, obs: np.ndarray, axis=None) -> np.ndarray:
r"""
Calculate the mean absolute error between the estimated weights :math:`b` and the true :math:`b`
:param estim: Tensor representing the estimated data
:param obs: Tensor representing the observed data
:param axis: axis to reduce
:return: :math:`\text{mean}{|\text{estim} - \text{obs}|}`
r"""
return np.mean(np.abs(estim - obs), axis=axis)
def normalized_rmsd(estim: np.ndarray, obs: np.ndarray, axis=None) -> np.ndarray:
r"""
Calculate the normalized RMSD between estimated and observed data
:param estim: Tensor representing the estimated data
:param obs: Tensor representing the observed data
:param axis: axis to reduce
:return: :math:`\frac{RMSD}{\text{max}(\text{obs}) - \text{min}(\text{obs})}`
r"""
return normalize(rmsd(estim, obs, axis=axis), obs)
def normalized_mae(estim: np.ndarray, obs: np.ndarray, axis=None) -> np.ndarray:
r"""
Calculate the normalized MAE between estimated and observed data
:param estim: Tensor representing the estimated data
:param obs: Tensor representing the observed data
:param axis: axis to reduce
:return: :math:`\frac{MAE}{\text{max}(\text{obs}) - \text{min}(\text{obs})}`
r"""
return normalize(mae(estim, obs, axis=axis), obs)
def mapd(estim: np.ndarray, obs: np.ndarray, axis=None) -> np.ndarray:
r"""
Calculate the mean absolute percentage deviation between the estimated and the observed data
:param estim: ndarray representing the estimated data
:param obs: ndarray representing the observed data
:param axis: axis to reduce
:return: :math:`\text{mean}(|\text{estim} - \text{obs}| / |\text{obs}|)`
r"""
return np.mean(abs_percentage_deviation(estim, obs), axis=axis) * 100
def abs_percentage_deviation(estim: np.ndarray, obs: np.ndarray) -> np.ndarray:
r"""
Calculate the absolute percentage deviation between the estimated and the observed data
:param estim: ndarray representing the estimated data
:param obs: ndarray representing the observed data
:return: :math:`\text{mean}(|\text{estim} - \text{obs}| / | \text{obs}|) * 100`
r"""
return abs_proportional_deviation(estim, obs)
def abs_proportional_deviation(estim: np.ndarray, obs: np.ndarray) -> np.ndarray:
r"""
Calculate the absolute proportional deviation between the estimated and the observed data
:param estim: ndarray representing the estimated data
:param obs: ndarray representing the observed data
:return: :math:`\text{mean}{|\text{estim} - \text{obs}| / |\text{obs}|}`
r"""
return np.abs(estim - obs) / np.abs(obs)
def welch(mu1, mu2, var1, var2, n1, n2):
s_delta = np.sqrt((var1 / n1) + (var2 / n2))
s_delta = np.asarray(s_delta)
s_delta = np.nextafter(0, 1, out=s_delta, where=s_delta == 0, dtype=s_delta.dtype)
t = (mu1 - mu2) / s_delta
# t = np.asarray(t)
# t = np.nextafter(0, 1, out=t, where=t == 0, dtype=t.dtype)
denom = (np.square(var1 / n1) / (n1 - 1)) + (np.square(var2 / n2) / (n2 - 1))
denom = np.asarray(denom)
denom = np.nextafter(0, 1, out=denom, where=denom == 0, dtype=denom.dtype)
df = np.square((var1 / n1) + (var2 / n2)) / denom
return t, df
def welch_t_test(x1, x2):
r"""
Calculates a Welch-Test with independent mean and variance for two samples.
Tests the null hypothesis (H0) that the two population means are equal.
:param x1: first sample
:param x2: second sample
:return: p-value
r"""
mu1 = np.mean(x1)
var1 = np.var(x1)
mu2 = np.mean(x2)
var2 = np.var(x2)
n1 = | np.size(x1) | numpy.size |
import os
import sys
import re
import gzip
import tarfile
import io
import scipy
import collections
import argparse
import pandas as pd
import numpy as np
from scipy.stats import binom
from pandas.api.types import is_string_dtype
from pathlib import Path
import numbers
import warnings
class FormatError(Exception):
pass
xls = re.compile("xls")
drop = "series_matrix\.txt\.gz$|filelist\.txt$|readme|\.bam(\.tdf|$)|\.bai(\.gz|$)|\.sam(\.gz|$)|\.csfasta|\.fa(sta)?(\.gz|\.bz2|\.txt\.gz|$)|\.f(a|n)a(\.gz|$)|\.wig|\.big[Ww]ig$|\.bw(\.|$)|\.bed([Gg]raph)?(\.tdf|\.gz|\.bz2|\.txt\.gz|$)|(broad_)?lincs|\.tdf$|\.hic$|\.rds(\.gz|$)|\.tar\.gz$|\.mtx(\.gz$|$)|dge\.txt\.gz$|umis?\.txt\.gz$"
drop = re.compile(drop)
pv_str = "p[^a-zA-Z]{0,4}val"
pv = re.compile(pv_str)
adj = re.compile("adj|fdr|corr|thresh")
ws = re.compile(" ")
mtabs = re.compile("\w+\t{2,}\w+")
tab = re.compile("\t")
fields = ["Type", "Class", "Conversion", "pi0", "FDR_pval", "hist", "note"]
PValSum = collections.namedtuple("PValSum", fields, defaults=[np.nan] * len(fields))
narrowpeak = [
"chrom",
"chromStart",
"chromEnd",
"name",
"score",
"strand",
"signalValue",
"pValue",
"qValue",
"peak",
] # BED6+4
broadpeak = [
"chrom",
"chromStart",
"chromEnd",
"name",
"score",
"strand",
"signalValue",
"pValue",
"qValue",
] # BED6+3
gappedpeak = [
"chrom",
"chromStart",
"chromEnd",
"name",
"score",
"strand",
"thickStart",
"thickEnd",
"itemRgb",
"blockCount",
"blockSizes",
"blockStarts",
"signalValue",
"pValue",
"qValue",
] # BED12+3
peak = re.compile("(narrow|broad|gapped)peak")
class ImportSuppfiles(object):
def __init__(self):
self.out = {}
def from_flat(self, input, tar=None):
if drop.search(input.name.lower() if tar else input.lower()):
key = os.path.basename(input.name if tar else input)
return self.out.update(note(key, "not imported"))
else:
out = {}
try:
if xls.search(input.name if tar else input):
try:
out.update(self.read_excel(input, tar=tar))
except ValueError as e:
out.update(self.read_csv(input, tar=tar))
else:
d = self.read_csv(input, tar=tar)
is_empty = [v.empty for v in d.values()][0]
if is_empty:
raise Exception("empty table")
else:
peakfile = peak.search(
input.name.lower() if tar else input.lower()
)
if peakfile:
key = os.path.basename(input.name if tar else input)
d[key].loc[-1] = d[key].columns
d[key] = d[key].sort_index().reset_index(drop=True)
d[key].columns = eval(peakfile.group(0))
out.update(d)
except Exception as e:
key = os.path.basename(input.name if tar else input)
peakfile = peak.search(input.name.lower() if tar else input.lower())
if peakfile:
e = f"Misspecified '{peakfile.group(0)}' file; {e}"
out.update(note(key, e))
return self.out.update(out)
def from_tar(self, input):
with tarfile.open(input, "r:*") as tar:
for member in tar:
if member.isfile():
self.from_flat(member, tar)
def find_header(self, df, n=20):
head = df.head(n)
matches = [
i[0]
for i in [
[i for i, x in enumerate(head[col].str.contains(pv_str, na=False)) if x]
for col in head
]
if i
]
idx = min(matches) + 1 if matches else 0
if idx == 0:
for index, row in head.iterrows():
if all([isinstance(i, str) for i in row if i is not np.nan]):
idx = index + 1
break
return idx
def csv_helper(self, input, input_name, csv, verbose=0):
# Get comments and set rows to skip
r = pd.read_csv(csv, sep=None, engine="python", iterator=True, nrows=1000)
comment = None
sep = r._engine.data.dialect.delimiter
columns = r._engine.columns
if isinstance(input, (tarfile.ExFileObject)):
with csv as h:
first_line = h.readline()
elif input_name.endswith("gz") or isinstance(input, (gzip.GzipFile)):
with gzip.open(input) as h:
first_line = h.readline().decode("utf-8").rstrip()
else:
with open(input, "r") as h:
first_line = h.readline().rstrip()
more_tabs_than_sep = len(tab.findall(first_line)) > len(
re.findall(sep, first_line)
)
if re.search("^#", first_line) or more_tabs_than_sep:
comment = "#"
# Get delimiter
r = pd.read_csv(
csv, sep=None, engine="python", iterator=True, skiprows=20, nrows=1000
)
sep = r._engine.data.dialect.delimiter
columns = r._engine.columns
if ws.search(sep):
sep = "\s+"
if mtabs.search(first_line):
sep = "\t+"
# Import file
if isinstance(input, (tarfile.ExFileObject)) and input_name.endswith("gz"):
with gzip.open(input) as h:
df = pd.read_csv(h, sep=sep, comment=comment, encoding="unicode_escape")
else:
df = pd.read_csv(input, sep=sep, comment=comment, encoding="unicode_escape")
# Check and fix column names
# Case of extra level of delimiters in column names
if len(df.columns) > len(columns):
df = pd.read_csv(
input,
header=None,
skiprows=[0],
sep=sep,
comment=comment,
encoding="unicode_escape",
).drop([0])
df.columns = columns
unnamed = ["Unnamed" in i for i in df.columns]
# Case of empty rows before header
if all(unnamed):
idx = self.find_header(df)
if idx > 0:
df = pd.read_csv(
input,
sep=sep,
comment=comment,
skiprows=idx,
encoding="unicode_escape",
)
# Case of anonymous row names
if unnamed[-1] & sum(unnamed) == 1:
if any([pv.search(i) for i in df.columns]):
df.columns = [df.columns[-1]] + list(df.columns[:-1])
if verbose > 1:
print("df after import:\n", df)
return {os.path.basename(input_name): df}
def excel_helper(self, input, input_name, verbose=0):
tabs = {}
if input_name.endswith("gz") or isinstance(input, (gzip.GzipFile)):
excel_file = gzip.open(input)
else:
excel_file = input
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
wb = pd.ExcelFile(excel_file)
if len(wb.sheet_names) == 0:
(m,) = [i.message for i in w][0].args
raise FormatError(
f"The data source could not be successfully parsed with warning: '{m}'",
)
sheets = wb.sheet_names
sheets = [i for i in sheets if "README" not in i]
for sheet in sheets:
df = wb.parse(sheet, comment="#")
if df.empty:
df = wb.parse(sheet)
if verbose > 1:
print("df after import:\n", df)
if not df.empty:
pu = sum(["Unnamed" in i for i in list(df.columns)]) / len(df.columns)
if pu >= 2 / 3:
idx = self.find_header(df)
if idx > 0:
df = wb.parse(sheet, skiprows=idx)
tabs.update({os.path.basename(input_name) + "-sheet-" + sheet: df})
return tabs
def read_csv(self, input, tar=None):
if isinstance(input, (tarfile.TarInfo)):
input_name = os.path.basename(input.name)
with tar.extractfile(input) as h:
if input_name.endswith("gz"):
with gzip.open(h) as gz:
csv = io.StringIO(gz.read().decode("unicode_escape"))
else:
csv = io.StringIO(h.read().decode("unicode_escape"))
with tar.extractfile(input) as h:
out = self.csv_helper(h, input_name, csv)
else:
input_name = input
csv = input
out = self.csv_helper(input, input_name, csv)
return out
def read_excel(self, input, tar=None):
if isinstance(input, (tarfile.TarInfo)):
input_name = os.path.basename(input.name)
with tar.extractfile(input) as h:
out = self.excel_helper(h, input_name)
else:
input_name = input
out = self.excel_helper(input, input_name)
return out
def raw_pvalues(i):
return bool(pv.search(i.lower()) and not adj.search(i.lower()))
def filter_pvalue_tables(input, pv=None, adj=None):
return {
k: v
for k, v in input.items()
if any([raw_pvalues(i) for i in v.columns if not isinstance(i, numbers.Number)])
}
def fix_column_dtype(df):
for col in df.columns:
s = df[col]
if is_string_dtype(s):
if "," in s[:5].astype(str).str.cat(sep=" "):
df[col] = s.apply(lambda x: str(x).replace(",", "."))
df[col] = pd.to_numeric(s, errors="coerce")
return df
def summarise_pvalue_tables(
df, var=["basemean", "value", "fpkm", "logcpm", "rpkm", "aveexpr"]
):
# Drop columns with numeric column names
df = df.filter(regex="^\D")
# Drop columns with NaN column names
df = df.loc[:, df.columns.notnull()]
df.columns = map(str.lower, df.columns)
pval_cols = [i for i in df.columns if raw_pvalues(i)]
pvalues = df[pval_cols].copy()
# Check if there is ANOTHER(!!#?) level of ":" delimiters in p value column(s)
extra_delim = ":"
split_col = [i for i in pvalues.columns if extra_delim in i]
if split_col:
for index, col in enumerate(split_col):
col_count = len(re.findall(extra_delim, col))
obs_count = len(re.findall(extra_delim, str(pvalues.iloc[0, index])))
if obs_count == 0:
pass
elif col_count == obs_count:
new_cols = col.split(extra_delim)
split_pval_col = [i for i in new_cols if raw_pvalues(i)]
cols_split = pvalues.iloc[:, index].str.split(extra_delim, expand=True)
try:
cols_split.columns = new_cols
pvalues[split_pval_col] = cols_split[split_pval_col]
pvalues.drop(col, axis=1, inplace=True)
except ValueError:
pass
pval_cols = [i for i in pvalues.columns if raw_pvalues(i)]
pvalues_check = fix_column_dtype(pvalues)
for v in var:
label = v
if v == "value":
v = "^value_\d"
label = "fpkm"
exprs = df.filter(regex=v, axis=1)
if not exprs.empty:
exprs_check = fix_column_dtype(exprs)
exprs_sum = exprs_check.mean(axis=1, skipna=True)
pvalues_check.loc[:, label] = exprs_sum
break
pv_stacked = (
pvalues_check.melt(id_vars=list(set(pvalues_check.columns) - set(pval_cols)))
.set_index("variable")
.rename(columns={"value": "pvalue"})
)
return pv_stacked.dropna()
# https://stackoverflow.com/a/32681075/1657871
def rle(inarray):
"""run length encoding. Partial credit to R rle function.
Multi datatype arrays catered for including non Numpy
returns: tuple (runlengths, startpositions, values)"""
ia = np.asarray(inarray) # force numpy
n = len(ia)
if n == 0:
return (None, None, None)
else:
y = np.array(ia[1:] != ia[:-1]) # pairwise unequal (string safe)
i = np.append(np.where(y), n - 1) # must include last element posi
z = np.diff(np.append(-1, i)) # run lengths
p = np.cumsum(np.append(0, z))[:-1] # positions
return (z, p, ia[i])
def get_hist_class(counts, fdr):
bins = len(counts)
qc = binom.ppf(1 - 1 / bins * fdr, sum(counts), 1 / bins)
counts_over_qc = counts > qc
ru = rle(counts_over_qc)
over_qc = ru[1][ru[2]]
rufl = rle(np.flip(counts_over_qc))
over_qc_fl = rufl[1][rufl[2]]
if all(~counts_over_qc):
Class = "uniform"
elif len(over_qc) == 1:
over_qc_prop = ru[0][ru[2]] / bins
if over_qc == 0 and over_qc_prop < 1 / 3:
Class = "anti-conservative"
elif over_qc_fl == 0:
Class = "conservative"
else:
Class = "other"
elif len(over_qc) == 2:
if over_qc[0] == 0 and over_qc_fl[0] == 0:
Class = "bimodal"
else:
Class = "other"
else:
Class = "other"
return Class
# https://gdsctools.readthedocs.io/en/master/_modules/gdsctools/qvalue.html#QValue
def estimate_pi0(
pv,
lambdas=None,
pi0=None,
df=3,
method="smoother",
smooth_log_pi0=False,
verbose=True,
):
"""Estimate pi0 based on the pvalues"""
try:
pv = np.array(pv)
except:
pv = pv.copy()
assert pv.min() >= 0 and pv.max() <= 1, "p-values should be between 0 and 1"
if lambdas is None:
epsilon = 1e-8
lambdas = np.arange(0, 0.9 + 1e-8, 0.05)
if len(lambdas) > 1 and len(lambdas) < 4:
raise ValueError(
"""if length of lambda greater than 1, you need at least 4 values"""
)
if len(lambdas) >= 1 and (min(lambdas) < 0 or max(lambdas) >= 1):
raise ValueError("lambdas must be in the range[0, 1[")
m = float(len(pv))
pv = pv.ravel() # flatten array
if pi0 is not None:
pass
elif len(lambdas) == 1:
pi0 = np.mean(pv >= lambdas[0]) / (1 - lambdas[0])
pi0 = min(pi0, 1)
else:
# evaluate pi0 for different lambdas
pi0 = [np.mean(pv >= this) / (1 - this) for this in lambdas]
# in R
# lambda = seq(0,0.09, 0.1)
# pi0 = c(1.0000000, 0.9759067, 0.9674164, 0.9622673, 0.9573241,
# 0.9573241 0.9558824, 0.9573241, 0.9544406, 0.9457901)
# spi0 = smooth.spline(lambda, pi0, df=3, all.knots=F, spar=0)
# predict(spi0, x=max(lambda))$y --> 0.9457946
# spi0 = smooth.spline(lambda, pi0, df=3, all.knots=F)
# predict(spi0, x=max(lambda))$y --> 0.9485383
# In this function, using pi0 and lambdas, we get 0.9457946
# this is not too bad, the difference on the v17 data set
# is about 0.3 %
if method == "smoother":
if smooth_log_pi0:
pi0 = np.log(pi0)
# In R, the interpolation is done with smooth.spline
# within qvalue. However this is done with default
# parameters, and this is different from the Python
# code. Note, however, that smooth.spline has a parameter
# called spar. If set to 0, then we would get the same
# as in scipy. It looks like scipy has no equivalent of
# the smooth.spline function in R if spar is not 0
tck = scipy.interpolate.splrep(lambdas, pi0, k=df)
pi0 = scipy.interpolate.splev(lambdas[-1], tck)
if smooth_log_pi0:
pi0 = np.exp(pi0)
pi0 = min(pi0, 1.0)
elif method == "lfdr":
"""Estimate proportion of null p-values
by average local FDR
<NAME> and <NAME>
23 May 2012. Last revised 30 July 2012."""
n = len(pv)
i = np.array(list(range(1, n + 1)))
i.sort()
i = i[::-1]
pv.sort()
pv = pv[::-1]
q = [min(i, 1) for i in n / np.array(i) * np.array(pv)]
n1 = n + 1
pi0 = sum(np.array(i) * q) / n / n1 * 2
elif method == "bootstrap":
raise NotImplementedError
"""minpi0 = min(pi0)
mse = rep(0, len(lambdas))
pi0.boot = rep(0, len(lambdas))
for i in range(1,100):
p.boot = sample(p, size = m, replace = TRUE)
for i in range(0,len(lambdas)):
pi0.boot[i] <- mean(p.boot > lambdas[i])/(1 - lambdas[i])
mse = mse + (pi0.boot - minpi0)^2
pi0 = min(pi0[mse == min(mse)])
pi0 = min(pi0, 1)"""
if pi0 > 1:
if verbose:
print("got pi0 > 1 (%.3f), setting it to 1" % pi0)
pi0 = 1.0
assert pi0 >= 0 and pi0 <= 1, "pi0 is not between 0 and 1: %f" % pi0
return pi0
def conversion(x, y):
classes = pd.DataFrame.from_dict(
{
"uniform": ["same good", "improve, effects", "worsen", "worsen", "worsen"],
"anti-conservative": [
"effects lost",
"same good",
"worsen",
"worsen",
"worsen",
],
"conservative": [
"improvement; no effects",
"improvement; effects",
"same bad",
"no improvement",
"no improvement",
],
"other": [
"improvement; no effects",
"improvement; effects",
"no improvement",
"same bad",
"no improvement",
],
"bimodal": [
"improvement; no effects",
"improvement; effects",
"no improvement",
"no improvement",
"same bad",
],
},
orient="index",
columns=["uniform", "anti-conservative", "conservative", "other", "bimodal"],
)
return classes.loc[x, y]
def summarise_pvalues(
df,
bins=30,
fdr=0.05,
var={
"basemean": 10,
"fpkm": 0.5,
"logcpm": np.log2(0.5),
"rpkm": 0.5,
"aveexpr": | np.log2(10) | numpy.log2 |
"""
*************** INTRINSIC SAMPLING METHOD MODULE *******************
Performs intrinsic sampling analysis on a set of interfacial
simulation configurations
********************************************************************
Created 24/11/2016 by <NAME>
Contributors: <NAME>
Last modified 27/2/2018 by <NAME>
"""
import os
import sys
import time
import tables
import numpy as np
from alias.io.hdf5_io import (
make_hdf5,
load_hdf5,
save_hdf5,
shape_check_hdf5,
frame_check_hdf5,
mode_check_hdf5
)
from alias.io.numpy_io import load_npy
from alias.io.command_line_output import StdOutTable
from alias.src.linear_algebra import update_A_b, lu_decomposition
from alias.src.self_consistent_cycle import (
self_consistent_cycle,
initialise_surface, initialise_recon
)
from alias.src.spectra import intrinsic_area
from alias.src.utilities import create_surface_file_path
from .positions import check_pbc
from .surface_reconstruction import surface_reconstruction
from .utilities import numpy_remove
def build_surface(xmol, ymol, zmol, dim, qm, n0, phi, tau, max_r,
ncube=3, vlim=3, recon=0, surf_0=[0, 0], zvec=None):
"""
Create coefficients for Fourier sum representing intrinsic surface.
Parameters
----------
xmol: float, array_like; shape=(nmol)
Molecular coordinates in x dimension
ymol: float, array_like; shape=(nmol)
Molecular coordinates in y dimension
zmol: float, array_like; shape=(nmol)
Molecular coordinates in z dimension
dim: float, array_like; shape=(3)
XYZ dimensions of simulation cell
mol_sigma: float
Radius of spherical molecular interaction sphere
qm: int
Maximum number of wave frequencies in Fourier Sum
representing intrinsic surface
n0: int
Maximum number of molecular pivot in intrinsic surface
phi: float
Weighting factor of minimum surface area term in surface
optimisation function
tau: float
Tolerance along z axis either side of existing intrinsic
surface for selection of new pivot points
max_r: float
Maximum radius for selection of vapour phase molecules
ncube: int (optional)
Grid size for initial pivot molecule selection
vlim: int (optional)
Minimum number of molecular meighbours within radius max_r
required for molecular NOT to be considered in vapour region
recon: bool (optional)
Whether to peform surface reconstruction routine
surf_0: float, array-like; shape=(2) (optional)
Initial guesses for surface plane positions
Returns
-------
coeff: array_like (float); shape=(2, n_waves**2)
Optimised surface coefficients
pivot: array_like (int); shape=(2, n0)
Indicies of pivot molecules in molecular position arrays
"""
nmol = len(xmol)
mol_list = np.arange(nmol)
start = time.time()
coeff, A, b, area_diag = initialise_surface(qm, phi, dim)
if recon:
psi, curve_matrix, H_var = initialise_recon(qm, phi, dim)
# Remove molecules from vapour phase and assign an initial
# grid of pivots furthest away from centre of mass
print('Lx = {:5.3f} Ly = {:5.3f} qm = {:5d}\n'
'phi = {} n_piv = {:5d} vlim = {:5d} max_r = {:5.3f}'.format(
dim[0], dim[1], qm, phi, n0, vlim, max_r))
print('Surface plane initial guess = {} {}'.format(surf_0[0], surf_0[1]))
pivot_search = (ncube > 0)
stdout_table = StdOutTable()
stdout_table.add_section(
'TIMINGS (s)',
['Pivot selection', 'Matrix Formation',
'LU Decomposition', 'TOTAL'])
stdout_table.add_section('PIVOTS', ['n_piv1', 'n_piv2'])
stdout_table.add_section('INT AREA', ['surf1', 'surf2'])
if not pivot_search:
print(stdout_table.table_header())
start1 = time.time()
if zvec is not None:
"Separate pivots based on orientational vector"
piv_n1 = np.argwhere(zvec < 0).flatten()
piv_n2 = np.argwhere(zvec >= 0).flatten()
else:
"Separate pivots based on position"
piv_n1 = np.argwhere(zmol < 0).flatten()
piv_n2 = np.argwhere(zmol >= 0).flatten()
pivot = [piv_n1, piv_n2]
if not (len(pivot[0]) == n0) * (len(pivot[1]) == n0):
# ut.view_surface(
# coeff, pivot, qm, qm, xmol, ymol, zmol, 2, dim)
zmol, pivot = pivot_swap(
xmol, ymol, zmol, pivot, dim, max_r, n0)
zmol = check_pbc(xmol, ymol, zmol, pivot, dim)
pivot = np.array(pivot, dtype=int)
end1 = time.time()
"Update A matrix and b vector"
temp_A, temp_b, fuv = update_A_b(xmol, ymol, zmol, dim, qm, pivot)
A += temp_A
b += temp_b
end2 = time.time()
"Perform LU decomosition to solve Ax = b"
coeff[0] = lu_decomposition(A[0] + area_diag, b[0])
coeff[1] = lu_decomposition(A[1] + area_diag, b[1])
if recon:
coeff[0], _ = surface_reconstruction(
coeff[0], A[0], b[0], area_diag, curve_matrix,
H_var, qm, pivot[0].size, psi)
coeff[1], _ = surface_reconstruction(
coeff[1], A[1], b[1], area_diag, curve_matrix,
H_var, qm, pivot[1].size, psi)
end3 = time.time()
"Calculate surface areas excess"
area1 = intrinsic_area(coeff[0], qm, qm, dim)
area2 = intrinsic_area(coeff[1], qm, qm, dim)
end = time.time()
output = [
end1 - start1, end2 - end1, end3 - end2, end - start1,
len(pivot[0]), len(pivot[1]), area1, area2
]
print(stdout_table.row(output))
# ut.view_surface(coeff, pivot, qm, qm, xmol, ymol, zmol, 50, dim)
else:
piv_n1 = np.arange(ncube**2)
piv_n2 = np.arange(ncube**2)
piv_z1 = np.zeros(ncube**2)
piv_z2 = np.zeros(ncube**2)
dxyz = np.reshape(
np.tile(
np.stack((xmol, ymol, zmol)), (1, nmol)),
(3, nmol, nmol)
)
dxyz = np.transpose(dxyz, axes=(0, 2, 1)) - dxyz
for i, l in enumerate(dim[:2]):
dxyz[i] -= l * np.array(2 * dxyz[i] / l, dtype=int)
dr2 = np.sum(dxyz**2, axis=0)
vapour_list = np.where(np.count_nonzero(dr2 < max_r**2, axis=1) < vlim)
print('Removing {} vapour molecules'.format(vapour_list[0].size))
mol_list = numpy_remove(mol_list, vapour_list)
del dxyz, dr2
print('Selecting initial {} pivots'.format(ncube**2))
index_x = np.array(xmol * ncube / dim[0], dtype=int) % ncube
index_y = | np.array(ymol * ncube / dim[1], dtype=int) | numpy.array |
import torch
import numpy as np
from collections import defaultdict
from ..bbox.geometry import bbox_overlaps
def iou_score(v1, v2, binary=False):
score = bbox_overlaps(torch.Tensor(v1[:, :4]),
torch.Tensor(v2[:, :4])).numpy()
signal = (score > 0.5).astype(np.float32)
if binary:
return signal
else:
return score * signal
def scatter_by_classes(inputs, num_classes):
outputs = defaultdict(list)
for x in inputs:
if x is None:
for i in range(num_classes):
outputs[i].append(None)
else:
for i, y in enumerate(x):
outputs[i].append(y)
return outputs
def gather_by_classes(inputs):
outputs = []
num_classes = len(inputs)
num_frames = len(inputs[0])
for i in range(num_frames):
per_frame = []
for j in range(num_classes):
per_frame.append(inputs[j][i])
outputs.append(per_frame)
return outputs
def det_to_track(det_results):
num_classes = len(det_results[0])
outputs = []
for i, bboxes in enumerate(det_results):
output = {}
for j in range(num_classes):
results = bboxes[j]
num_bboxes = results.shape[0]
for k in range(num_bboxes):
_box = results[k, :]
instance_id = _box[-1]
if instance_id >= 0:
output[int(instance_id)] = {'bbox': _box[:-1], 'label': j}
outputs.append(output)
return outputs
def check_diff(input1, input2):
for x, y in zip(input1, input2):
for _x, _y in zip(x, y):
score_diff = _x[:, 4] - _y[:, 4]
for score in score_diff:
if score > 0.1:
print(score)
def finding_video_tubes(outputs, dataset):
"""Implementation of `Linking tracklets to object tubes` in the Paper
"Detect to Track and Track to Detect."
Inputs:
track_results (list): -> dict() -> keys:
['bbox_results', 'track_results']
dataset: CLASS DATASET
"""
num_classes = len(dataset.CLASSES)
all_bbox_results = outputs['bbox_results']
all_track_results = outputs['track_results']
all_outputs = defaultdict(list)
count = 0
instance_id = 0
vid_ids = dataset.vid_ids
for vid_id in vid_ids:
vid_name = dataset.bdd.loadVideos(vid_id)
print(vid_name)
img_ids = dataset.bdd.getImgIdsFromVideoId(vid_id)
num_frames = len(img_ids)
vid_bbox_results = all_bbox_results[count:count + num_frames]
vid_track_results = all_track_results[count:count + num_frames]
count += num_frames
assert vid_track_results[0] is None, 'Maybe split videos incorrectly.'
class_bbox_results = scatter_by_classes(vid_bbox_results, num_classes)
class_track_results = scatter_by_classes(vid_track_results,
num_classes)
outputs = []
for cls_index in range(num_classes):
det, instance_id = finding_video_tubes_greedy_per_class(
class_bbox_results[cls_index], class_track_results[cls_index],
instance_id, cls_index)
outputs.append(det)
track_results = track_gather(outputs, img_ids)
# bbox_results = gather_by_classes(outputs)
# check_diff(bbox_results, vid_bbox_results)
# track_results = det_to_track(bbox_results)
# all_outputs['bbox_results'].extend(bbox_results)
all_outputs['track_results'].extend(track_results)
return all_outputs
def track_gather(outputs, img_ids):
num_frames = len(img_ids)
output_list = []
for k in range(num_frames):
out = defaultdict(list)
for res in outputs:
if k in res.keys():
out.update(res[k])
output_list.append(out)
return output_list
def finding_video_tubes_viterbi_per_class(bbox_results, track_results, path_id,
cls_index):
ori_bboxes = bbox_results.copy()
empty_set = False
for i, _ori_bboxes in enumerate(ori_bboxes):
num_bboxes = _ori_bboxes.shape[0]
if num_bboxes == 0:
empty_set = True
ids = np.zeros((num_bboxes, 1)) - 1
ori_bboxes[i] = np.concatenate((ori_bboxes[i], ids), axis=1)
if empty_set:
return ori_bboxes, path_id
num_frames = len(bbox_results)
vertices_isempty = np.zeros(num_frames, dtype=np.bool)
paths = []
while not np.any(vertices_isempty):
data_score = []
data_idx = []
for i in range(num_frames):
num_bboxes = bbox_results[i].shape[0]
data_score.append(np.zeros((num_bboxes, 1)))
data_idx.append(np.zeros((num_bboxes, 1)))
data_idx[i][:] = np.nan
# for i in range(1, num_frames):
# track_results[i] = np.concatenate(
# (track_results[i], bbox_results[i - 1][:, -1][:, None]),
# axis=1)
# edge_score = iou_score(track_results[i],
# bbox_results[i]) #[N_t-1, N_t]
# # TODO: why this
# # edge_score += np.transpose(data_score[i])
# edge_score += bbox_results[i][:, 4][None, :]
# edge_score += track_results[i][:, 4][:, None]
# data_score[i] = np.max(edge_score, axis=1)
# data_idx[i] = np.argmax(edge_score, axis=1)
for i in range(num_frames - 1, 0, -1):
track_results[i] = np.concatenate(
(track_results[i], bbox_results[i - 1][:, -1][:, None]),
axis=1)
edge_score = iou_score(track_results[i],
bbox_results[i]) # [N_t-1, N_t]
if i < num_frames - 2:
edge_score += np.transpose(data_score[i + 1])
edge_score += bbox_results[i][:, 4][None, :]
edge_score += track_results[i][:, 4][:, None]
data_score[i] = np.max(edge_score, axis=1)
data_idx[i] = np.argmax(edge_score, axis=1)
box_index = np.argmax(data_score[1])
boxes = bbox_results[0][box_index, :4]
index = np.array(box_index)
scores = np.array(bbox_results[0][box_index, 4])
for i in range(1, num_frames):
box_index = data_idx[i][box_index]
index = np.hstack((index, np.array(box_index)))
boxes = np.vstack((boxes, bbox_results[i][box_index, :4]))
scores = np.hstack(
(scores, np.array(bbox_results[i][box_index, 4])))
cur_list = [index, boxes, scores]
paths.append(cur_list)
for i in range(num_frames):
mask = np.ones(bbox_results[i].shape[0], dtype=np.bool)
mask[index[i]] = False
bbox_results[i] = bbox_results[i][mask, :]
if not i == num_frames - 1:
track_results[i + 1] = track_results[i + 1][mask, :]
vertices_isempty[i] = (bbox_results[i].shape[0] == 0)
unmap_idx_list = []
for i in range(num_frames):
trimmed_idx = [path[0][i] for path in paths]
flag = np.zeros(ori_bboxes[i].shape[0])
ori_idx = []
for j in trimmed_idx:
count = -1
for k in range(ori_bboxes[i].shape[0]):
if not flag[k]:
count += 1
if count == j:
ori_idx.append(k)
flag[k] = True
unmap_idx_list.append(ori_idx)
for cur_path_id, path in enumerate(paths):
path_score = path[2]
path_score_t = sorted(path_score)[len(path_score) // 2:]
ave_score = sum(path_score_t) / len(path_score_t)
for i in range(num_frames):
unmap_idx = unmap_idx_list[i][cur_path_id]
ori_bboxes[i][unmap_idx, 5] = path_id
# score = ori_bboxes[i][unmap_idx, 4]
# if score < ave_score:
ori_bboxes[i][unmap_idx, 4] += ave_score
ori_bboxes[i][unmap_idx, 4] /= 2
path_id += 1
return ori_bboxes, path_id
def finding_video_tubes_greedy_per_class(bbox_results, track_results, path_id,
cls_index):
# ori_bboxes = bbox_results.copy()
#
# empty_set = False
# for i, _ori_bboxes in enumerate(ori_bboxes):
# num_bboxes = _ori_bboxes.shape[0]
# if num_bboxes == 0:
# empty_set = True
# ids = np.zeros((num_bboxes, 1)) - 1
# ori_bboxes[i] = np.concatenate((ori_bboxes[i], ids), axis=1)
# if empty_set:
# return None, path_id
num_frames = len(bbox_results)
paths = []
for i in range(1, num_frames):
track_results[i] = np.concatenate(
(track_results[i], bbox_results[i - 1][:, -1][:, None]), axis=1)
# per frame calculation
for start_t in range(num_frames - 1):
num_iters = bbox_results[start_t].shape[0]
_iter = 0
while _iter < num_iters:
data_score = []
data_idx = []
for i in range(start_t, num_frames):
num_bboxes = bbox_results[i].shape[0]
data_score.append(np.zeros((num_bboxes, 1)))
data_idx.append(np.zeros((num_bboxes, 1)))
data_idx[i - start_t][:] = np.nan
for i in range(start_t + 1, num_frames):
edge_score = iou_score(track_results[i],
bbox_results[i]) # [N_t-1, N_t]
# TODO: why this
# edge_score += np.transpose(data_score[i])
# edge_score += bbox_results[i][:, 4][None, :]
# edge_score += track_results[i][:, 4][:, None]
if (edge_score.shape[0]) > 0 and (edge_score.shape[1] > 0):
data_score[i - start_t] = np.max(edge_score, axis=1)
data_idx[i - start_t] = np.argmax(edge_score, axis=1)
if len(data_score[1]) == 0:
_iter += 1
continue
box_index = np.argmax(data_score[1])
boxes = bbox_results[start_t][box_index, :4]
index = np.array(box_index)
scores = np.array(bbox_results[start_t][box_index, 4])
for i in range(1, num_frames - start_t):
if len(data_score[i]) == 0:
break
iou = data_score[i][box_index]
if iou > 0.75:
box_index = data_idx[i][box_index]
index = np.hstack((index, np.array(box_index)))
boxes = np.vstack(
(boxes, bbox_results[start_t + i][box_index, :4]))
scores = np.hstack(
(scores,
np.array(bbox_results[start_t + i][box_index, 4])))
else:
break
cur_list = [index, boxes, scores, start_t]
end_i = i
if cur_list[0].size > 1 and len(cur_list[0]) > 1:
paths.append(cur_list)
for i in range(start_t, start_t + end_i):
mask = np.ones(bbox_results[i].shape[0], dtype=np.bool)
mask[index[i - start_t]] = False
bbox_results[i] = bbox_results[i][mask, :]
if not i == num_frames - 1:
track_results[i + 1] = track_results[i + 1][mask, :]
_iter += 1
tracklets = defaultdict(list)
for path in paths:
start_t = path[-1]
max_score = path[2].max()
if max_score < 0.8:
continue
for k in range(path[1].shape[0]):
_bbox = | np.append(path[1][k, :], max_score) | numpy.append |
#! /usr/bin/env python
import numpy as np
# test passed
def generate_path_with_time(path=None, traj_constant=None, T_seg=None, t_start=None):
if (np.any(path!= None) ) and (np.any(traj_constant != None)) and (np.any(T_seg != None)) and (np.any(t_start != None)):
time = t_start
time_list = np.array([[time]])
traj_num = T_seg.shape[0] # may be shape[1]
for j in range(1, traj_num+1):
time = time + T_seg[j-1]
time_array = np.array([[time]])
time_list = np.append(time_list, time_array, axis=0)
path_with_time = np.append(path, time_list, axis=1)
timelist = path_with_time[:, 3]
elif (np.any(path!= None) ) and (np.any(traj_constant != None)) and (np.any(T_seg != None)):
time = 0.0
time_list = np.array([[time]])
for j in range(1, traj_constant.total_traj_num+1):
time = time + T_seg[j-1]
time_array = np.array([[time]])
time_list = | np.append(time_list, time_array, axis=0) | numpy.append |
# MyTT 麦语言-通达信-同花顺指标实现 https://github.com/mpquant/MyTT
# MyTT高级函数验证版本: https://github.com/mpquant/MyTT/blob/main/MyTT_plus.py
# Python2老版本pandas特别的MyTT: https://github.com/mpquant/MyTT/blob/main/MyTT_python2.py
# V2.1 2021-6-6 新增 BARSLAST函数 SLOPE,FORCAST线性回归预测函数
# V2.3 2021-6-13 新增 TRIX,DPO,BRAR,DMA,MTM,MASS,ROC,VR,ASI等指标
# V2.4 2021-6-27 新增 EXPMA,OBV,MFI指标, 改进SMA核心函数(核心函数彻底无循环)
# V2.7 2021-11-21 修正 SLOPE,BARSLAST,函数,新加FILTER,LONGCROSS, 感谢qzhjiang对SLOPE,SMA等函数的指正
# V2.8 2021-11-23 修正 FORCAST,WMA函数,欢迎qzhjiang,stanene,bcq加入社群,一起来完善myTT库
# V2.9 2021-11-29 新增 HHVBARS,LLVBARS,CONST, VALUEWHEN功能函数
# V2.92 2021-11-30 新增 BARSSINCEN函数,现在可以 pip install MyTT 完成安装
# V3.0 2021-12-04 改进 DMA函数支持序列,新增XS2 薛斯通道II指标
# V3.1 2021-12-19 新增 TOPRANGE,LOWRANGE一级函数
#以下所有函数如无特别说明,输入参数S均为numpy序列或者列表list,N为整型int
#应用层1级函数完美兼容通达信或同花顺,具体使用方法请参考通达信
import numpy as np; import pandas as pd
#------------------ 0级:核心工具函数 --------------------------------------------
def RD(N,D=3): return np.round(N,D) #四舍五入取3位小数
def RET(S,N=1): return np.array(S)[-N] #返回序列倒数第N个值,默认返回最后一个
def ABS(S): return np.abs(S) #返回N的绝对值
def LN(S): return np.log(S) #求底是e的自然对数,
def POW(S,N): return np.power(S,N) #求S的N次方
def SQRT(S): return np.sqrt(S) #求S的平方根
def MAX(S1,S2): return np.maximum(S1,S2) #序列max
def MIN(S1,S2): return np.minimum(S1,S2) #序列min
def IF(S,A,B): return np.where(S,A,B) #序列布尔判断 return=A if S==True else B
def REF(S, N=1): #对序列整体下移动N,返回序列(shift后会产生NAN)
return pd.Series(S).shift(N).values
def DIFF(S, N=1): #前一个值减后一个值,前面会产生nan
return pd.Series(S).diff(N).values #np.diff(S)直接删除nan,会少一行
def STD(S,N): #求序列的N日标准差,返回序列
return pd.Series(S).rolling(N).std(ddof=0).values
def SUM(S, N): #对序列求N天累计和,返回序列 N=0对序列所有依次求和
return pd.Series(S).rolling(N).sum().values if N>0 else pd.Series(S).cumsum().values
def CONST(S): #返回序列S最后的值组成常量序列
return np.full(len(S),S[-1])
def HHV(S,N): #HHV(C, 5) 最近5天收盘最高价
return pd.Series(S).rolling(N).max().values
def LLV(S,N): #LLV(C, 5) 最近5天收盘最低价
return pd.Series(S).rolling(N).min().values
def HHVBARS(S,N): #求N周期内S最高值到当前周期数, 返回序列
return pd.Series(S).rolling(N).apply(lambda x: np.argmax(x[::-1]),raw=True).values
def LLVBARS(S,N): #求N周期内S最低值到当前周期数, 返回序列
return pd.Series(S).rolling(N).apply(lambda x: np.argmin(x[::-1]),raw=True).values
def MA(S,N): #求序列的N日简单移动平均值,返回序列
return pd.Series(S).rolling(N).mean().values
def EMA(S,N): #指数移动平均,为了精度 S>4*N EMA至少需要120周期 alpha=2/(span+1)
return pd.Series(S).ewm(span=N, adjust=False).mean().values
def SMA(S, N, M=1): #中国式的SMA,至少需要120周期才精确 (雪球180周期) alpha=1/(1+com)
return pd.Series(S).ewm(alpha=M/N,adjust=False).mean().values #com=N-M/M
def WMA(S, N): #通达信S序列的N日加权移动平均 Yn = (1*X1+2*X2+3*X3+...+n*Xn)/(1+2+3+...+Xn)
return pd.Series(S).rolling(N).apply(lambda x:x[::-1].cumsum().sum()*2/N/(N+1),raw=True).values
def DMA(S, A): #求S的动态移动平均,A作平滑因子,必须 0<A<1 (此为核心函数,非指标)
if isinstance(A,(int,float)): return pd.Series(S).ewm(alpha=A,adjust=False).mean().values
A=np.array(A); A[np.isnan(A)]=1.0; Y= np.zeros(len(S)); Y[0]=S[0]
for i in range(1,len(S)): Y[i]=A[i]*S[i]+(1-A[i])*Y[i-1] #A支持序列 by jqz1226
return Y
def AVEDEV(S, N): #平均绝对偏差 (序列与其平均值的绝对差的平均值)
return pd.Series(S).rolling(N).apply(lambda x: (np.abs(x - x.mean())).mean()).values
def SLOPE(S, N): #返S序列N周期回线性回归斜率
return pd.Series(S).rolling(N).apply(lambda x: np.polyfit(range(N),x,deg=1)[0],raw=True).values
def FORCAST(S, N): #返回S序列N周期回线性回归后的预测值, jqz1226改进成序列出
return pd.Series(S).rolling(N).apply(lambda x:np.polyval(np.polyfit(range(N),x,deg=1),N-1),raw=True).values
def LAST(S, A, B): #从前A日到前B日一直满足S_BOOL条件, 要求A>B & A>0 & B>=0
return np.array(pd.Series(S).rolling(A+1).apply(lambda x:np.all(x[::-1][B:]),raw=True),dtype=bool)
#------------------ 1级:应用层函数(通过0级核心函数实现)使用方法请参考通达信--------------------------------
def COUNT(S, N): # COUNT(CLOSE>O, N): 最近N天满足S_BOO的天数 True的天数
return SUM(S,N)
def EVERY(S, N): # EVERY(CLOSE>O, 5) 最近N天是否都是True
return IF(SUM(S,N)==N,True,False)
def EXIST(S, N): # EXIST(CLOSE>3010, N=5) n日内是否存在一天大于3000点
return IF(SUM(S,N)>0,True,False)
def FILTER(S, N): # FILTER函数,S满足条件后,将其后N周期内的数据置为0, FILTER(C==H,5)
for i in range(len(S)): S[i+1:i+1+N]=0 if S[i] else S[i+1:i+1+N]
return S # 例:FILTER(C==H,5) 涨停后,后5天不再发出信号
def BARSLAST(S): #上一次条件成立到当前的周期, BARSLAST(C/REF(C,1)>=1.1) 上一次涨停到今天的天数
M=np.concatenate(([0],np.where(S,1,0)))
for i in range(1, len(M)): M[i]=0 if M[i] else M[i-1]+1
return M[1:]
def BARSLASTCOUNT(S): # 统计连续满足S条件的周期数 by jqz1226
rt = np.zeros(len(S)+1) # BARSLASTCOUNT(CLOSE>OPEN)表示统计连续收阳的周期数
for i in range(len(S)): rt[i+1]=rt[i]+1 if S[i] else rt[i+1]
return rt[1:]
def BARSSINCEN(S, N): # N周期内第一次S条件成立到现在的周期数,N为常量 by jqz1226
return pd.Series(S).rolling(N).apply(lambda x:N-1-np.argmax(x) if np.argmax(x) or x[0] else 0,raw=True).fillna(0).values.astype(int)
def CROSS(S1, S2): # 判断向上金叉穿越 CROSS(MA(C,5),MA(C,10)) 判断向下死叉穿越 CROSS(MA(C,10),MA(C,5))
return np.concatenate(([False], np.logical_not((S1>S2)[:-1]) & (S1>S2)[1:])) # 不使用0级函数,移植方便 by jqz1226
def LONGCROSS(S1,S2,N): # 两条线维持一定周期后交叉,S1在N周期内都小于S2,本周期从S1下方向上穿过S2时返回1,否则返回0
return np.array(np.logical_and(LAST(S1<S2,N,1),(S1>S2)),dtype=bool) # N=1时等同于CROSS(S1, S2)
def VALUEWHEN(S, X): # 当S条件成立时,取X的当前值,否则取VALUEWHEN的上个成立时的X值 by jqz1226
return pd.Series(np.where(S,X,np.nan)).ffill().values
def BETWEEN(S, A, B): # S处于A和B之间时为真。 包括 A<S<B 或 A>S>B
return ((A<S) & (S<B)) | ((A>S) & (S>B))
def TOPRANGE(S): # TOPRANGE(HIGH)表示当前最高价是近多少周期内最高价的最大值 by jqz1226
rt = np.zeros(len(S))
for i in range(1,len(S)): rt[i] = np.argmin(np.flipud(S[:i]<S[i]))
return rt.astype('int')
def LOWRANGE(S): # LOWRANGE(LOW)表示当前最低价是近多少周期内最低价的最小值 by jqz1226
rt = np.zeros(len(S))
for i in range(1,len(S)): rt[i] = np.argmin(np.flipud(S[:i]>S[i]))
return rt.astype('int')
#------------------ 2级:技术指标函数(全部通过0级,1级函数实现) ------------------------------
def MACD(CLOSE,SHORT=12,LONG=26,M=9): # EMA的关系,S取120日,和雪球小数点2位相同
DIF = EMA(CLOSE,SHORT)-EMA(CLOSE,LONG);
DEA = EMA(DIF,M); MACD=(DIF-DEA)*2
return RD(DIF),RD(DEA),RD(MACD)
def KDJ(CLOSE,HIGH,LOW, N=9,M1=3,M2=3): # KDJ指标
RSV = (CLOSE - LLV(LOW, N)) / (HHV(HIGH, N) - LLV(LOW, N)) * 100
K = EMA(RSV, (M1*2-1)); D = EMA(K,(M2*2-1)); J=K*3-D*2
return K, D, J
def RSI(CLOSE, N=24): # RSI指标,和通达信小数点2位相同
DIF = CLOSE-REF(CLOSE,1)
return RD(SMA(MAX(DIF,0), N) / SMA(ABS(DIF), N) * 100)
def WR(CLOSE, HIGH, LOW, N=10, N1=6): #W&R 威廉指标
WR = (HHV(HIGH, N) - CLOSE) / (HHV(HIGH, N) - LLV(LOW, N)) * 100
WR1 = (HHV(HIGH, N1) - CLOSE) / (HHV(HIGH, N1) - LLV(LOW, N1)) * 100
return RD(WR), RD(WR1)
def BIAS(CLOSE,L1=6, L2=12, L3=24): # BIAS乖离率
BIAS1 = (CLOSE - MA(CLOSE, L1)) / MA(CLOSE, L1) * 100
BIAS2 = (CLOSE - MA(CLOSE, L2)) / MA(CLOSE, L2) * 100
BIAS3 = (CLOSE - MA(CLOSE, L3)) / MA(CLOSE, L3) * 100
return RD(BIAS1), RD(BIAS2), RD(BIAS3)
def BOLL(CLOSE,N=20, P=2): #BOLL指标,布林带
MID = MA(CLOSE, N);
UPPER = MID + STD(CLOSE, N) * P
LOWER = MID - STD(CLOSE, N) * P
return RD(UPPER), RD(MID), RD(LOWER)
def PSY(CLOSE,N=12, M=6):
PSY=COUNT(CLOSE>REF(CLOSE,1),N)/N*100
PSYMA=MA(PSY,M)
return RD(PSY),RD(PSYMA)
def CCI(CLOSE,HIGH,LOW,N=14):
TP=(HIGH+LOW+CLOSE)/3
return (TP-MA(TP,N))/(0.015*AVEDEV(TP,N))
def ATR(CLOSE,HIGH,LOW, N=20): #真实波动N日平均值
TR = MAX(MAX((HIGH - LOW), ABS(REF(CLOSE, 1) - HIGH)), ABS(REF(CLOSE, 1) - LOW))
return MA(TR, N)
def BBI(CLOSE,M1=3,M2=6,M3=12,M4=20): #BBI多空指标
return (MA(CLOSE,M1)+MA(CLOSE,M2)+MA(CLOSE,M3)+MA(CLOSE,M4))/4
def DMI(CLOSE,HIGH,LOW,M1=14,M2=6): #动向指标:结果和同花顺,通达信完全一致
TR = SUM(MAX(MAX(HIGH - LOW, ABS(HIGH - REF(CLOSE, 1))), ABS(LOW - REF(CLOSE, 1))), M1)
HD = HIGH - REF(HIGH, 1); LD = REF(LOW, 1) - LOW
DMP = SUM(IF((HD > 0) & (HD > LD), HD, 0), M1)
DMM = SUM(IF((LD > 0) & (LD > HD), LD, 0), M1)
PDI = DMP * 100 / TR; MDI = DMM * 100 / TR
ADX = MA(ABS(MDI - PDI) / (PDI + MDI) * 100, M2)
ADXR = (ADX + REF(ADX, M2)) / 2
return PDI, MDI, ADX, ADXR
def TAQ(HIGH,LOW,N): #唐安奇通道(海龟)交易指标,大道至简,能穿越牛熊
UP=HHV(HIGH,N); DOWN=LLV(LOW,N); MID=(UP+DOWN)/2
return UP,MID,DOWN
def KTN(CLOSE,HIGH,LOW,N=20,M=10): #肯特纳交易通道, N选20日,ATR选10日
MID=EMA((HIGH+LOW+CLOSE)/3,N)
ATRN=ATR(CLOSE,HIGH,LOW,M)
UPPER=MID+2*ATRN; LOWER=MID-2*ATRN
return UPPER,MID,LOWER
def TRIX(CLOSE,M1=12, M2=20): #三重指数平滑平均线
TR = EMA(EMA(EMA(CLOSE, M1), M1), M1)
TRIX = (TR - REF(TR, 1)) / REF(TR, 1) * 100
TRMA = MA(TRIX, M2)
return TRIX, TRMA
def VR(CLOSE,VOL,M1=26): #VR容量比率
LC = REF(CLOSE, 1)
return SUM(IF(CLOSE > LC, VOL, 0), M1) / SUM(IF(CLOSE <= LC, VOL, 0), M1) * 100
def EMV(HIGH,LOW,VOL,N=14,M=9): #简易波动指标
VOLUME=MA(VOL,N)/VOL; MID=100*(HIGH+LOW-REF(HIGH+LOW,1))/(HIGH+LOW)
EMV=MA(MID*VOLUME*(HIGH-LOW)/MA(HIGH-LOW,N),N); MAEMV=MA(EMV,M)
return EMV,MAEMV
def DPO(CLOSE,M1=20, M2=10, M3=6): #区间震荡线
DPO = CLOSE - REF(MA(CLOSE, M1), M2); MADPO = MA(DPO, M3)
return DPO, MADPO
def BRAR(OPEN,CLOSE,HIGH,LOW,M1=26): #BRAR-ARBR 情绪指标
AR = SUM(HIGH - OPEN, M1) / SUM(OPEN - LOW, M1) * 100
BR = SUM(MAX(0, HIGH - REF(CLOSE, 1)), M1) / SUM(MAX(0, REF(CLOSE, 1) - LOW), M1) * 100
return AR, BR
def DFMA(CLOSE,N1=10,N2=50,M=10): #平行线差指标
DIF=MA(CLOSE,N1)-MA(CLOSE,N2); DIFMA=MA(DIF,M) #通达信指标叫DMA 同花顺叫新DMA
return DIF,DIFMA
def MTM(CLOSE,N=12,M=6): #动量指标
MTM=CLOSE-REF(CLOSE,N); MTMMA=MA(MTM,M)
return MTM,MTMMA
def MASS(HIGH,LOW,N1=9,N2=25,M=6): #梅斯线
MASS=SUM(MA(HIGH-LOW,N1)/MA(MA(HIGH-LOW,N1),N1),N2)
MA_MASS=MA(MASS,M)
return MASS,MA_MASS
def ROC(CLOSE,N=12,M=6): #变动率指标
ROC=100*(CLOSE-REF(CLOSE,N))/REF(CLOSE,N); MAROC=MA(ROC,M)
return ROC,MAROC
def EXPMA(CLOSE,N1=12,N2=50): #EMA指数平均数指标
return EMA(CLOSE,N1),EMA(CLOSE,N2);
def OBV(CLOSE,VOL): #能量潮指标
return SUM(IF(CLOSE>REF(CLOSE,1),VOL,IF(CLOSE<REF(CLOSE,1),-VOL,0)),0)/10000
def MFI(CLOSE,HIGH,LOW,VOL,N=14): #MFI指标是成交量的RSI指标
TYP = (HIGH + LOW + CLOSE)/3
V1=SUM(IF(TYP>REF(TYP,1),TYP*VOL,0),N)/SUM(IF(TYP<REF(TYP,1),TYP*VOL,0),N)
return 100-(100/(1+V1))
def ASI(OPEN,CLOSE,HIGH,LOW,M1=26,M2=10): #振动升降指标
LC=REF(CLOSE,1); AA=ABS(HIGH-LC); BB=ABS(LOW-LC);
CC=ABS(HIGH-REF(LOW,1)); DD=ABS(LC-REF(OPEN,1));
R=IF( (AA>BB) & (AA>CC),AA+BB/2+DD/4,IF( (BB>CC) & (BB>AA),BB+AA/2+DD/4,CC+DD/4));
X=(CLOSE-LC+(CLOSE-OPEN)/2+LC-REF(OPEN,1));
SI=16*X/R*MAX(AA,BB); ASI=SUM(SI,M1); ASIT=MA(ASI,M2);
return ASI,ASIT
def XSII(CLOSE, HIGH, LOW, N=102, M=7): #薛斯通道II
AA = MA((2*CLOSE + HIGH + LOW)/4, 5) #最新版DMA才支持 2021-12-4
TD1 = AA*N/100; TD2 = AA*(200-N) / 100
CC = ABS((2*CLOSE + HIGH + LOW)/4 - MA(CLOSE,20))/MA(CLOSE,20)
DD = DMA(CLOSE,CC); TD3=(1+M/100)*DD; TD4=(1-M/100)*DD
return TD1, TD2, TD3, TD4
#望大家能提交更多指标和函数 https://github.com/mpquant/MyTT
# MyTT 麦语言-通达信-同花顺指标实现 https://github.com/mpquant/MyTT
# 高级函数版本,本文件函数计算结果经过验证完全正确,可以正常使用,但代码比较复杂,做为进阶使用。
# MyTT团队对每个函数精益求精,力争效率速度,代码优雅的完美统一,如果您有更好的实现方案,请不吝赐教!
# 感谢以下团队成员的努力和贡献: 火焰,jqz1226, stanene, bcq
#------------------------工具函数---------------------------------------------
def HHV(S, N): #HHV,支持N为序列版本
# type: (np.ndarray, Optional[int,float, np.ndarray]) -> np.ndarray
"""
HHV(C, 5) # 最近5天收盘最高价
"""
if isinstance(N, (int, float)):
return pd.Series(S).rolling(N).max().values
else:
res = np.repeat(np.nan, len(S))
for i in range(len(S)):
if (not np.isnan(N[i])) and N[i] <= i + 1:
res[i] = S[i + 1 - N[i]:i + 1].max()
return res
def LLV(S, N): #LLV,支持N为序列版本
# type: (np.ndarray, Optional[int,float, np.ndarray]) -> np.ndarray
"""
LLV(C, 5) # 最近5天收盘最低价
"""
if isinstance(N, (int, float)):
return pd.Series(S).rolling(N).min().values
else:
res = np.repeat(np.nan, len(S))
for i in range(len(S)):
if (not np.isnan(N[i])) and N[i] <= i + 1:
res[i] = S[i + 1 - N[i]:i + 1].min()
return res
def DSMA(X, N): # 偏差自适应移动平均线 type: (np.ndarray, int) -> np.ndarray
"""
Deviation Scaled Moving Average (DSMA) Python by: jqz1226, 2021-12-27
Referred function from myTT: SUM, DMA
"""
a1 = math.exp(- 1.414 * math.pi * 2 / N)
b1 = 2 * a1 * math.cos(1.414 * math.pi * 2 / N)
c2 = b1
c3 = -a1 * a1
c1 = 1 - c2 - c3
Zeros = | np.pad(X[2:] - X[:-2],(2,0),'constant') | numpy.pad |
import logging
import numpy as np
import time
import warnings
from qcodes.instrument.base import Instrument
from qcodes.utils import validators as vals
from qcodes.instrument.parameter import ManualParameter
from pycqed.utilities.general import gen_sweep_pts
from pycqed.analysis import measurement_analysis as ma
from pycqed.analysis import fitting_models as fit_mods
class Qubit(Instrument):
'''
Abstract base class for the qubit object.
Contains a template for all methods a qubit (should) has.
N.B. This is not intended to be initialized.
Specific types of qubits should inherit from this class, different
hardware configurations can inherit from those to further specify
the functionality.
Possible inheritance tree
- Qubit (general template class)
- GateMon
- Transmon (contains qubit specific methods)
- transmon in setup a (contains setup specific methods)
Naming conventions for methods
The qubit object is a combination of a parameter holder and a
convenient way of performing measurements. As a convention the qubit
object contains the following types of methods designated by a prefix
- measure_xx() -> bool
A measure_xx method performs a specific experiment such as
a "spectroscopy" or "ramsey".
A measure_xx method typically has a hardware dependent
implementation
- calibrate_xx() -> bool
A calibrate_xx method defines a standard protocol to perform a
specific calibration.
A calibrate_xx method should be blind callable (callable without
specifying any arguments).
A calibrate_xx method should return a boolean indicating the
success of the calibration.
A calibrate_xx method should update the internal parameter it is
related to.
A calibrate_xx method should be defined in the abstract base class
whenever possible and rely on implementations of corresponding
measure_xx methods in the hardware dependent child classes.
- find_xx
similar to calibrate_xx() naming difference is historical
- calculate_
calculates a quantity based on parameters specified in the qubit
object e.g. calculate_frequency
Naming conventions for parameters:
(only for qubit objects after Sept 2017)
Parameters are grouped based on their functionality. This grouping
is achieved through the parameter name.
Prefixes are listed here:
instr_ : references to other instruments
ro_ : parameters relating to RO both CW and TD readout.
mw_ : parameters of single qubit MW control
spec_ : parameters relating to spectroscopy (single qubit CW)
fl_ : parameters relating to flux control, this includes both
flux pulsing as well as flux offset (DC).
tim_ : parameters related to timing, used to set latencies,
these are generally part of a device object (rather
than the qubit objects) but are listed here for
completeness.
cfg_ : configuration, this can be info relevant for compilers
or configurations that determine how the qubit operates.
examples are cfg_qasm and cfg_f_qubit_calc_method.
"" : properties of the qubit do not have a prefix, examples
are T1, T2, etc., F_ssro, F_RB, etc., f_qubit, E_C, etc.
Open for discussion:
- is a split at the level below qubit really required?
- is the name "find_" a good name or should it be merged with measure
or calibrate?
- Should the pulse-parameters be grouped here in some convenient way?
(e.g. parameter prefixes)
'''
def __init__(self, name, **kw):
super().__init__(name, **kw)
self.msmt_suffix = '_' + name # used to append to measurement labels
self._operations = {}
self.add_parameter('operations',
docstring='a list of all operations available on the qubit',
get_cmd=self._get_operations)
def connect_message(self, begin_time=None):
t = time.time() - (begin_time or self._t0)
con_msg = ('Connected to: {repr} '
'in {t:.2f} s'.format(repr=self.__repr__(), t=t))
print(con_msg)
def add_parameters(self):
"""
Add parameters to the qubit object grouped according to the
naming conventions described above
Prefixes are listed here:
instr_ : references to other instruments
ro_ : parameters relating to RO both CW and TD readout.
mw_ : parameters of single qubit MW control
spec_ : parameters relating to spectroscopy (single qubit CW)
fl_ : parameters relating to flux control, this includes both
flux pulsing as well as flux offset (DC).
cfg_ : configuration, this can be info relevant for compilers
or configurations that determine how the qubit operates.
examples are cfg_qasm and cfg_f_qubit_calc_method.
"" : properties of the qubit do not have a prefix, examples
are T1, T2, etc., F_ssro, F_RB, etc., f_qubit, E_C, etc.
"""
self.add_instrument_ref_parameters()
self.add_ro_parameters()
self.add_mw_parameters()
self.add_spec_parameters()
self.add_flux_parameters()
self.add_config_parameters()
self.add_generic_qubit_parameters()
def add_instrument_ref_parameters(self):
pass
def add_ro_parameters(self):
pass
def add_mw_parameters(self):
pass
def add_spec_parameters(self):
pass
def add_flux_parameters(self):
pass
def add_config_parameters(self):
pass
def add_generic_qubit_parameters(self):
pass
def get_idn(self):
return {'driver': str(self.__class__), 'name': self.name}
def _get_operations(self):
return self._operations
def measure_T1(self, times=None, MC=None,
close_fig: bool=True, update: bool=True,
prepare_for_timedomain: bool=True)->float:
"""
Performs a T1 experiment.
Args:
times: array of times to measure at, if None will define a
suitable range based on the last known T1
MC: instance of the MeasurementControl
close_fig: close the figure in plotting
update : update self.T1 with the measured value
returns:
T1 (float) the measured value
"""
# Note: I made all functions lowercase but for T1 it just looks too
# ridiculous
raise NotImplementedError()
def measure_rabi(self):
raise NotImplementedError()
def measure_flipping(self, number_of_flips=2*np.arange(60),
MC=None, label='',
equator=True,
analyze=True, close_fig=True, verbose=True):
raise NotImplementedError
def measure_ramsey(self):
raise NotImplementedError()
def measure_echo(self, times=None, MC=None,
analyze=True, close_fig=True, update=True):
raise NotImplementedError()
def measure_allxy(self, MC=None, analyze: bool=True,
close_fig: bool=True,
prepare_for_timedomain: bool=True):
"""
Performs an AllXY experiment.
Args:
MC : instance of the MeasurementControl
analyze : perform analysis
close_fig : close the figure in plotting
returns:
T1 (float) the measured value
"""
raise NotImplementedError()
def measure_ssro(self, MC=None, analyze: bool=True, nr_shots: int=1024*8,
cases=('off', 'on'), update_threshold: bool=True,
prepare: bool=True, no_figs: bool=False,
update: bool=True,
verbose: bool=True):
raise NotImplementedError()
def measure_spectroscopy(self, freqs, pulsed=True, MC=None,
analyze=True, close_fig=True):
raise NotImplementedError()
def measure_resonator_power(self, freqs, powers,
MC=None, analyze: bool=True,
close_fig: bool=True):
raise NotImplementedError()
def measure_transients(self, MC=None, analyze: bool=True,
cases=('off', 'on'),
prepare: bool=True, depletion_analysis: bool=True,
depletion_analysis_plot: bool=True,
depletion_optimization_window=None):
'''
Measure transients for the cases specified.
Args:
MC (instr): measurement control
analyze (bool) : run analysis and create figure
cases (list) : list of strings specifying cases to perform
transients for, valid cases are "off" and "on" corresponding
to preparing the qubit in the 0 or 1 state respectively.
prepare (bool) : if True runs prepare for timedomain before
measuring the transients
Returns:
list of numpy arrays containing the transients for the cases
specified.
'''
if prepare:
self.prepare_for_timedomain()
raise NotImplementedError()
def measure_motzoi(self, motzois=np.linspace(-.3, .3, 31),
MC=None, analyze=True, close_fig=True):
raise NotImplementedError()
def find_resonator_frequency(self, use_min=False,
update=True,
freqs=None,
MC=None, close_fig=True):
'''
Finds the resonator frequency by performing a heterodyne experiment
if freqs == None it will determine a default range dependent on the
last known frequency of the resonator.
'''
# This snippet exists to be backwards compatible 9/2017.
try:
freq_res_par = self.freq_res
freq_RO_par = self.ro_freq
except:
warnings.warn("Deprecation warning: rename f_res to freq_res")
freq_res_par = self.f_res
freq_RO_par = self.f_RO
if freqs is None:
f_center = freq_res_par()
if f_center is None:
raise ValueError('Specify "freq_res" to generate a freq span')
f_span = 10e6
f_step = 100e3
freqs = np.arange(f_center-f_span/2, f_center+f_span/2, f_step)
self.measure_heterodyne_spectroscopy(freqs, MC, analyze=False)
a = ma.Homodyne_Analysis(label=self.msmt_suffix, close_fig=close_fig)
if use_min:
f_res = a.min_frequency
else:
f_res = a.fit_results.params['f0'].value*1e9 # fit converts to Hz
if f_res > max(freqs) or f_res < min(freqs):
logging.warning('exracted frequency outside of range of scan')
elif update: # don't update if the value is out of the scan range
freq_res_par(f_res)
freq_RO_par(f_res)
return f_res
def find_frequency(self, method='spectroscopy', pulsed=False,
steps=[1, 3, 10, 30, 100, 300, 1000],
freqs=None,
f_span=100e6,
use_max=False,
f_step=1e6,
verbose=True,
update=True,
close_fig=True):
"""
Finds the qubit frequency using either the spectroscopy or the Ramsey
method.
Frequency prediction is done using
"""
if method.lower() == 'spectroscopy':
if freqs is None:
f_qubit_estimate = self.calculate_frequency()
freqs = np.arange(f_qubit_estimate - f_span/2,
f_qubit_estimate + f_span/2,
f_step)
# args here should be handed down from the top.
self.measure_spectroscopy(freqs, pulsed=pulsed, MC=None,
analyze=True, close_fig=close_fig)
label = 'spec'
analysis_spec = ma.Qubit_Spectroscopy_Analysis(
label=label, close_fig=True)
if update:
if use_max:
self.freq_qubit(analysis_spec.peaks['peak'])
else:
self.freq_qubit(analysis_spec.fitted_freq)
# TODO: add updating and fitting
elif method.lower() == 'ramsey':
return self.calibrate_frequency_ramsey(
steps=steps, verbose=verbose, update=update,
close_fig=close_fig)
return self.freq_qubit()
def calibrate_motzoi(self, MC=None, verbose=True, update=True):
motzois = gen_sweep_pts(center=0, span=1, num=31)
# large range
a = self.measure_motzoi(MC=MC, motzois=motzois, analyze=True)
opt_motzoi = a.optimal_motzoi
if opt_motzoi > max(motzois) or opt_motzoi < min(motzois):
if verbose:
print('optimal motzoi {:.3f} '.format(opt_motzoi) +
'outside of measured span, aborting')
return False
# fine range around optimum
motzois = gen_sweep_pts(center=a.optimal_motzoi, span=.4, num=31)
a = self.measure_motzoi(motzois)
opt_motzoi = a.optimal_motzoi
if opt_motzoi > max(motzois) or opt_motzoi < min(motzois):
if verbose:
print('optimal motzoi {:.3f} '.format(opt_motzoi) +
'outside of measured span, aborting')
if update:
if verbose:
print('Setting motzoi to {:.3f}'.format(opt_motzoi))
self.motzoi(opt_motzoi)
return opt_motzoi
def calibrate_optimal_weights(self, MC=None, verify: bool=True,
analyze: bool=True, update: bool=True,
no_figs: bool=False)->bool:
raise NotImplementedError()
def calibrate_MW_RO_latency(self, MC=None, update: bool=True)-> bool:
"""
Calibrates parameters:
"latency_MW"
"RO_acq_delay"
Used to calibrate the delay of the MW pulse with respect to the
RO pulse and the RO acquisition delay.
The MW_pulse_latency is calibrated by setting the frequency of
the LO to the qubit frequency such that both the MW and the RO pulse
will show up in the RO.
Measuring the transients will show what the optimal latency is.
Note that a lot of averages may be required when using dedicated drive
lines.
This function does NOT overwrite the values that were set in the qubit
object and as such can be used to verify the succes of the calibration.
Currently (28/6/2017) the experiment has to be analysed by hand.
"""
raise NotImplementedError()
return True
def calibrate_Flux_pulse_latency(self, MC=None, update=True)-> bool:
"""
Calibrates parameter: "latency_Flux"
Used to calibrate the timing between the MW and Flux pulses.
Flux pulse latency is calibrated using a Ram-Z experiment.
The experiment works as follows:
- x90 | square_flux # defines t = 0
- wait (should be slightly longer than the pulse duration)
- x90
- wait
- RO
The position of the square flux pulse is varied to find the
optimal latency.
"""
raise NotImplementedError
return True
def calibrate_frequency_ramsey(self,
steps=[1, 1, 3, 10, 30, 100, 300, 1000],
stepsize:float =20e-9,
verbose: bool=True, update: bool=True,
close_fig: bool=True):
"""
Runs an iterative procudere of ramsey experiments to estimate
frequency detuning to converge to the qubit frequency up to the limit
set by T2*.
steps:
multiples of the initial stepsize on which to run the
stepsize:
smalles stepsize in ns for which to run ramsey experiments.
"""
cur_freq = self.freq_qubit()
# Steps don't double to be more robust against aliasing
for n in steps:
times = np.arange(self.mw_gauss_width()*4,
50*n*stepsize, n*stepsize)
artificial_detuning = 2.5/times[-1]
self.measure_ramsey(times,
artificial_detuning=artificial_detuning,
freq_qubit=cur_freq,
label='_{}pulse_sep'.format(n),
analyze=False)
a = ma.Ramsey_Analysis(auto=True, close_fig=close_fig,
freq_qubit=cur_freq,
artificial_detuning=artificial_detuning,
close_file=False)
fitted_freq = a.fit_res.params['frequency'].value
measured_detuning = fitted_freq-artificial_detuning
cur_freq = a.qubit_frequency
qubit_ana_grp = a.analysis_group.create_group(self.msmt_suffix)
qubit_ana_grp.attrs['artificial_detuning'] = \
str(artificial_detuning)
qubit_ana_grp.attrs['measured_detuning'] = \
str(measured_detuning)
qubit_ana_grp.attrs['estimated_qubit_freq'] = str(cur_freq)
a.finish() # make sure I close the file
if verbose:
print('Measured detuning:{:.2e}'.format(measured_detuning))
print('Setting freq to: {:.9e}, \n'.format(cur_freq))
if times[-1] > 2.*a.T2_star['T2_star']:
# If the last step is > T2* then the next will be for sure
if verbose:
print('Breaking of measurement because of T2*')
break
if verbose:
print('Converged to: {:.9e}'.format(cur_freq))
if update:
self.freq_qubit(cur_freq)
return cur_freq
def calculate_frequency(self, calc_method=None, V_per_phi0=None, V=None):
'''
Calculates an estimate for the qubit frequency.
Arguments are optional and parameters of the object are used if not
specified.
Args:
calc_method : can be "latest" or "flux" uses last known frequency
or calculates using the cosine arc model as specified
in fit_mods.Qubit_dac_to_freq
corresponding par. : cfg_qubit_freq_calc_method
V_per_phi0 : dac flux coefficient, converts volts to Flux.
Set to 1 to reduce the model to pure flux.
corresponding par. : fl_dc_V_per_phi
V : dac value used when calculating frequency
corresponding par. : fl_dc_V
Calculates the f01 transition frequency using the cosine arc model.
(function available in fit_mods. Qubit_dac_to_freq)
The parameter cfg_qubit_freq_calc_method determines how it is
calculated.
Parameters of the qubit object are used unless specified.
Flux can be specified both in terms of dac voltage or flux but not
both.
'''
if self.cfg_qubit_freq_calc_method() == 'latest':
qubit_freq_est = self.freq_qubit()
elif self.cfg_qubit_freq_calc_method() == 'flux':
if V is None:
V = self.fl_dc_V()
if V_per_phi0 is None:
V_per_phi0 = self.fl_dc_V_per_phi0()
qubit_freq_est = fit_mods.Qubit_dac_to_freq(
dac_voltage=V,
f_max=self.freq_max(),
E_c=self.E_c(),
dac_sweet_spot=self.fl_dc_V0(),
V_per_phi0=V_per_phi0,
asymmetry=self.asymmetry())
return qubit_freq_est
def calibrate_mixer_offsets_drive(self, update: bool=True)-> bool:
'''
Calibrates the mixer skewness and updates the I and Q offsets in
the qubit object.
'''
raise NotImplementedError()
return True
def measure_heterodyne_spectroscopy(self, freqs, MC=None,
analyze=True, close_fig=True):
raise NotImplementedError()
def add_operation(self, operation_name):
self._operations[operation_name] = {}
def link_param_to_operation(self, operation_name, parameter_name,
argument_name):
"""
Links an existing param to an operation for use in the operation dict.
An example of where to use this would be the flux_channel.
Only one parameter is specified but it is relevant for multiple flux
pulses. You don't want a different parameter that specifies the channel
for the iSWAP and the CZ gate. This can be solved by linking them to
your operation.
Args:
operation_name (str): The operation of which this parameter is an
argument. e.g. mw_control or CZ
parameter_name (str): Name of the parameter
argument_name (str): Name of the arugment as used in the sequencer
**kwargs get passed to the add_parameter function
"""
if parameter_name not in self.parameters:
raise KeyError('Parameter {} needs to be added first'.format(
parameter_name))
if operation_name in self.operations().keys():
self._operations[operation_name][argument_name] = parameter_name
else:
raise KeyError('Unknown operation {}, add '.format(operation_name) +
'first using add operation')
def add_pulse_parameter(self,
operation_name,
parameter_name,
argument_name,
initial_value=None,
vals=vals.Numbers(),
**kwargs):
"""
Add a pulse parameter to the qubit.
Args:
operation_name (str): The operation of which this parameter is an
argument. e.g. mw_control or CZ
parameter_name (str): Name of the parameter
argument_name (str): Name of the arugment as used in the sequencer
**kwargs get passed to the add_parameter function
Raises:
KeyError: if this instrument already has a parameter with this
name.
"""
if parameter_name in self.parameters:
raise KeyError(
'Duplicate parameter name {}'.format(parameter_name))
if operation_name in self.operations().keys():
self._operations[operation_name][argument_name] = parameter_name
else:
raise KeyError('Unknown operation {}, add '.format(operation_name) +
'first using add operation')
self.add_parameter(parameter_name,
initial_value=initial_value,
vals=vals,
parameter_class=ManualParameter, **kwargs)
# for use in RemoteInstruments to add parameters to the server
# we return the info they need to construct their proxy
return
def get_operation_dict(self, operation_dict={}):
for op_name, op in self.operations().items():
operation_dict[op_name + ' ' + self.name] = {'target_qubit':
self.name}
for argument_name, parameter_name in op.items():
operation_dict[op_name + ' ' + self.name][argument_name] = \
self.get(parameter_name)
return operation_dict
class Transmon(Qubit):
'''
circuit-QED Transmon as used in DiCarlo Lab.
Adds transmon specific parameters as well
'''
def __init__(self, name, **kw):
super().__init__(name, **kw)
self.add_parameter('E_c', unit='Hz',
parameter_class=ManualParameter,
vals=vals.Numbers())
self.add_parameter('E_j', unit='Hz',
parameter_class=ManualParameter,
vals=vals.Numbers())
self.add_parameter('anharmonicity', unit='Hz',
label='Anharmonicity',
docstring='Anharmonicity, negative by convention',
parameter_class=ManualParameter,
# typical target value
initial_value=-300e6,
vals=vals.Numbers())
self.add_parameter('T1', unit='s',
parameter_class=ManualParameter,
vals=vals.Numbers(0, 200e-6))
self.add_parameter('T2_echo', unit='s',
parameter_class=ManualParameter,
vals=vals.Numbers())
self.add_parameter('T2_star', unit='s',
parameter_class=ManualParameter,
vals=vals.Numbers())
self.add_parameter('dac_voltage', unit='mV',
parameter_class=ManualParameter)
self.add_parameter('dac_sweet_spot', unit='mV',
parameter_class=ManualParameter)
self.add_parameter('dac_flux_coefficient', unit='',
parameter_class=ManualParameter)
self.add_parameter('asymmetry', unit='',
initial_value=0,
parameter_class=ManualParameter)
self.add_parameter('dac_channel', vals=vals.Ints(),
parameter_class=ManualParameter)
self.add_parameter('f_qubit', label='qubit frequency', unit='Hz',
parameter_class=ManualParameter)
self.add_parameter('f_max', label='qubit frequency', unit='Hz',
parameter_class=ManualParameter)
self.add_parameter('f_res', label='resonator frequency', unit='Hz',
parameter_class=ManualParameter)
self.add_parameter('f_RO', label='readout frequency', unit='Hz',
parameter_class=ManualParameter)
# Sequence/pulse parameters
self.add_parameter('RO_pulse_delay', unit='s',
parameter_class=ManualParameter)
self.add_parameter('RO_pulse_length', unit='s',
parameter_class=ManualParameter)
self.add_parameter('RO_acq_marker_delay', unit='s',
parameter_class=ManualParameter)
self.add_parameter('RO_acq_marker_channel',
parameter_class=ManualParameter,
vals=vals.Strings())
self.add_parameter('RO_amp', unit='V',
parameter_class=ManualParameter)
# Time between start of pulses
self.add_parameter('pulse_delay', unit='s',
initial_value=0,
vals=vals.Numbers(0, 1e-6),
parameter_class=ManualParameter)
self.add_parameter('f_qubit_calc_method',
vals=vals.Enum('latest', 'dac', 'flux'),
# in the future add 'tracked_dac', 'tracked_flux',
initial_value='latest',
parameter_class=ManualParameter)
self.add_parameter('F_ssro',
initial_value=0,
label='RO assignment fidelity',
vals=vals.Numbers(0.0, 1.0),
parameter_class=ManualParameter)
self.add_parameter('F_discr',
initial_value=0,
label='RO discrimination fidelity',
vals=vals.Numbers(0.0, 1.0),
parameter_class=ManualParameter)
self.add_parameter('F_RB',
initial_value=0,
label='RB single qubit Clifford fidelity',
vals=vals.Numbers(0, 1.0),
parameter_class=ManualParameter)
self.add_parameter('V_per_phi0',
initial_value=1,
label='V per phi0',
vals=vals.Numbers(),
docstring='Conversion between flux and voltage. '
'How many volts need to be applied to '
'have a flux of 1 phi0 (pulsed).',
parameter_class=ManualParameter)
self.add_parameter('V_offset',
initial_value=0,
label='V offset',
vals=vals.Numbers(),
docstring='AWG voltage at which the sweet spot is '
'found (pulsed).',
parameter_class=ManualParameter)
def calculate_frequency(self,
dac_voltage=None,
flux=None):
'''
Calculates the f01 transition frequency from the cosine arc model.
(function available in fit_mods. Qubit_dac_to_freq)
Parameters of the qubit object are used unless specified.
Flux can be specified both in terms of dac voltage or flux but not
both.
'''
if dac_voltage is not None and flux is not None:
raise ValueError('Specify either dac voltage or flux but not both')
if self.f_qubit_calc_method() == 'latest':
f_qubit_estimate = self.f_qubit()
elif self.f_qubit_calc_method() == 'dac':
if dac_voltage is None:
dac_voltage = self.IVVI.get_instr().get(
'dac{}'.format(self.dac_channel()))
f_qubit_estimate = fit_mods.Qubit_dac_to_freq(
dac_voltage=dac_voltage,
f_max=self.f_max(),
E_c=self.E_c(),
dac_sweet_spot=self.dac_sweet_spot(),
dac_flux_coefficient=self.dac_flux_coefficient(),
asymmetry=self.asymmetry())
elif self.f_qubit_calc_method() == 'flux':
if flux is None:
flux = self.FluxCtrl.get_instr().get(
'flux{}'.format(self.dac_channel()))
f_qubit_estimate = fit_mods.Qubit_dac_to_freq(
dac_voltage=flux,
f_max=self.f_max(),
E_c=self.E_c(),
dac_sweet_spot=0,
dac_flux_coefficient=1,
asymmetry=self.asymmetry())
return f_qubit_estimate
def calculate_flux(self, frequency):
raise NotImplementedError()
def prepare_for_timedomain(self):
raise NotImplementedError()
def prepare_for_continuous_wave(self):
raise NotImplementedError()
def prepare_readout(self):
"""
Configures the readout. Consists of the following steps
- instantiate the relevant detector functions
- set the microwave frequencies and sources
- generate the RO pulse
- set the integration weights
"""
raise NotImplementedError()
def calibrate_frequency_ramsey(self, steps=[1, 1, 3, 10, 30, 100, 300, 1000],
stepsize=None, verbose=True, update=True,
close_fig=True):
if stepsize is None:
stepsize = abs(1/self.f_pulse_mod.get())
cur_freq = self.f_qubit.get()
# Steps don't double to be more robust against aliasing
for n in steps:
times = np.arange(self.pulse_delay.get(),
50*n*stepsize, n*stepsize)
artificial_detuning = 2.5/times[-1]
self.measure_ramsey(times,
artificial_detuning=artificial_detuning,
f_qubit=cur_freq,
label='_{}pulse_sep'.format(n),
analyze=False)
a = ma.Ramsey_Analysis(auto=True, close_fig=close_fig,
qb_name=self.name,
artificial_detuning=artificial_detuning,
close_file=False)
fitted_freq = a.fit_res.params['frequency'].value
measured_detuning = fitted_freq-artificial_detuning
cur_freq -= measured_detuning
qubit_ana_grp = a.analysis_group.create_group(self.msmt_suffix)
qubit_ana_grp.attrs['artificial_detuning'] = \
str(artificial_detuning)
qubit_ana_grp.attrs['measured_detuning'] = \
str(measured_detuning)
qubit_ana_grp.attrs['estimated_qubit_freq'] = str(cur_freq)
a.finish() # make sure I close the file
if verbose:
print('Measured detuning:{:.2e}'.format(measured_detuning))
print('Setting freq to: {:.9e}, \n'.format(cur_freq))
if times[-1] > 2.*a.T2_star['T2_star']:
# If the last step is > T2* then the next will be for sure
if verbose:
print('Breaking of measurement because of T2*')
break
if verbose:
print('Converged to: {:.9e}'.format(cur_freq))
if update:
self.f_qubit.set(cur_freq)
return cur_freq
def find_frequency(self, method='spectroscopy', pulsed=False,
steps=[1, 3, 10, 30, 100, 300, 1000],
freqs=None,
f_span=100e6,
use_max=False,
f_step=1e6,
verbose=True,
update=True,
close_fig=True):
"""
Finds the qubit frequency using either the spectroscopy or the Ramsey
method.
Frequency prediction is done using
"""
if method.lower() == 'spectroscopy':
if freqs is None:
f_qubit_estimate = self.calculate_frequency()
freqs = np.arange(f_qubit_estimate - f_span/2,
f_qubit_estimate + f_span/2,
f_step)
# args here should be handed down from the top.
self.measure_spectroscopy(freqs, pulsed=pulsed, MC=None,
analyze=True, close_fig=close_fig)
if pulsed:
label = 'pulsed-spec'
else:
label = 'spectroscopy'
analysis_spec = ma.Qubit_Spectroscopy_Analysis(
label=label, close_fig=True)
if update:
if use_max:
self.f_qubit(analysis_spec.peaks['peak'])
else:
self.f_qubit(analysis_spec.fitted_freq)
# TODO: add updating and fitting
elif method.lower() == 'ramsey':
return self.calibrate_frequency_ramsey(
steps=steps, verbose=verbose, update=update,
close_fig=close_fig)
return self.f_qubit()
def find_frequency_pulsed(self):
raise NotImplementedError()
def find_frequency_cw_spec(self):
raise NotImplementedError()
def calibrate_pulse_amplitude_coarse(self,
amps=np.linspace(-.5, .5, 31),
close_fig=True, verbose=False,
MC=None, update=True,
take_fit_I=False):
"""
Calibrates the pulse amplitude using a single rabi oscillation
"""
self.measure_rabi(amps, n=1, MC=MC, analyze=False)
a = ma.Rabi_Analysis(close_fig=close_fig)
# Decide which quadrature to take by comparing the contrast
if take_fit_I or len(a.measured_values) == 1:
ampl = abs(a.fit_res[0].params['period'].value)/2.
elif (np.abs(max(a.measured_values[0]) -
min(a.measured_values[0]))) > (
np.abs(max(a.measured_values[1]) -
min(a.measured_values[1]))):
ampl = a.fit_res[0].params['period'].value/2.
else:
ampl = a.fit_res[1].params['period'].value/2.
if update:
self.Q_amp180.set(ampl)
return ampl
def calibrate_pulse_amplitude_flipping(self,
MC=None, update: bool=True,
fine_accuracy: float=0.005,
desired_accuracy: float=0.00005,
max_iterations: int=10,
verbose: bool=True):
"""
Calibrates the pulse amplitude using a flipping sequence.
The flipping sequence itself should be implemented using the
"measure_flipping" method.
It converges to the optimal amplitude using first a coarse and then
a finer scan with more pulses.
Args:
MC : The measurement control used, if None
uses the one specified in the qubit object.
updates (bool) : if True updates the Q_amp180 parameter
fine_accuracy (float) : the accuracy to switch to the fine scan
desired_accuracy (float): the accuracy after which to terminate
the optimization
max_iterations (int) : always terminate after this number of
optimizations.
verbose (bool): if true adds additional print statements.
returns:
success (bool): True if optimization converged.
"""
success = False
fine = False
for k in range(max_iterations):
old_Q_amp180 = self.Q_amp180()
if not fine:
number_of_flips = 2*np.arange(60)
if fine:
number_of_flips = 8*np.arange(60)
a = self.measure_flipping(MC=MC, number_of_flips=number_of_flips)
Q_amp180_scale_factor = a.get_scale_factor()
# Check if Q_amp180_scale_factor is within boundaries
if Q_amp180_scale_factor > 1.1:
Q_amp180_scale_factor = 1.1
if verbose:
print('Qubit drive scaling %.3f ' % Q_amp180_scale_factor
+ 'is too high, capping at 1.1')
elif Q_amp180_scale_factor < 0.9:
Q_amp180_scale_factor = 0.9
if verbose:
print('Qubit drive scaling %.3f ' % Q_amp180_scale_factor
+ 'is too low, capping at 0.9')
self.Q_amp180(np.round(Q_amp180_scale_factor * self.Q_amp180(), 7))
if verbose:
print('Q_amp180_scale_factor: {:.4f}, new Q_amp180: {}'.format(
Q_amp180_scale_factor, self.Q_amp180()))
if (abs(Q_amp180_scale_factor-1) < fine_accuracy) and (not fine):
if verbose:
print('Getting close to optimum, increasing sensitivity')
fine = True
if abs(Q_amp180_scale_factor-1) < desired_accuracy:
if verbose:
print('within threshold')
success = True
break
# If converged?
if success and verbose:
print('Drive calibration set to {}'.format(self.Q_amp180()))
if not update or not success:
self.Q_amp180(old_Q_amp180)
return success
def find_pulse_amplitude(self, amps=np.linspace(-.5, .5, 31),
N_steps=[3, 7, 13, 17], max_n=18,
close_fig=True, verbose=False,
MC=None, update=True, take_fit_I=False):
'''
Finds the pulse-amplitude using a Rabi experiment.
Fine tunes by doing a Rabi around the optimum with an odd
multiple of pulses.
Args:
amps: (array or float) amplitudes of the first Rabi if an array,
if a float is specified it will be treated as an estimate
for the amplitude to be found.
N_steps: (list of int) number of pulses used in the fine tuning
max_n: (int) break of if N> max_n
'''
if MC is None:
MC = self.MC.get_instr()
if np.size(amps) != 1:
ampl = self.calibrate_pulse_amplitude_coarse(
amps=amps, close_fig=close_fig, verbose=verbose,
MC=MC, update=update,
take_fit_I=take_fit_I)
else:
ampl = amps
if verbose:
print('Initial Amplitude:', ampl, '\n')
for n in N_steps:
if n > max_n:
break
else:
old_amp = ampl
ampl_span = 0.5*ampl/n
amps = | np.linspace(ampl-ampl_span, ampl+ampl_span, 15) | numpy.linspace |
from utils import softmax_loss,softmax
from plot_utils import *
import numpy as np
import gzip, pickle
from sklearn.datasets import fetch_mldata
import collections
from sklearn.model_selection import train_test_split
import sys
class TwoLayerNeuralNetwork:
def __init__(self, num_features=784, num_hiddens=1000, num_classes=10):
self.num_hiddens = num_hiddens
self.num_classes = num_classes
# random initialization: create random weights, set all biases to zero
self.params = {}
self.params['W1'] = np.random.randn(num_features, num_hiddens) * 0.001
self.params['W2'] = np.random.randn(num_hiddens, num_classes) * 0.001
self.params['b1'] = np.zeros((num_hiddens,))
self.params['b2'] = np.zeros((num_classes,))
def forward(self, X):
# forward step
W1, b1 = self.params['W1'], self.params['b1']
W2, b2 = self.params['W2'], self.params['b2']
# forward step
h_in = X @ W1 + b1 # hidden layer input
h = np.maximum(0, h_in) # hidden layer output (using ReLU)
scores = h @ W2 + b2 # neural net output
return scores
def train_step(self, X, y):
W1, b1 = self.params['W1'], self.params['b1']
W2, b2 = self.params['W2'], self.params['b2']
# forward step
h_in = X @ W1 + b1 # hidden layer input
h = np.maximum(0, h_in) # hidden layer output (using ReLU)
scores = h @ W2 + b2 # neural net output
#print("scores values is {} ".format(scores))
# compute loss
loss, dscores = softmax_loss(scores, y)
# backward step
db2 = dscores.sum(axis=0)
dW2 = h.T @ dscores
dh = dscores @ W2.T
dh[h_in < 0] = 0.0
db1 = dh.sum(axis=0)
dW1 = X.T @ dh
gradient = {'W1': dW1, 'b1': db1, 'W2': dW2, 'b2': db2}
return loss, gradient
def train(self, X_train, y_train, X_valid, y_valid, batch_size=50,
alpha=0.001, lmbda=0.0001, num_epochs=10):
m, n = X_train.shape
num_batches = m // batch_size
report = "{:3d}: training loss = {:.2f} | validation loss = {:.2f}"
losses = []
for epoch in range(num_epochs):
train_loss = 0.0
for _ in range(num_batches):
W1, b1 = self.params['W1'], self.params['b1']
W2, b2 = self.params['W2'], self.params['b2']
# select a random mini-batch
batch_idx = np.random.choice(m, batch_size, replace=False)
X_batch, y_batch = X_train[batch_idx], y_train[batch_idx]
# train on mini-batch
data_loss, gradient = self.train_step(X_batch, y_batch)
reg_loss = 0.5 * ( | np.sum(W1 ** 2) | numpy.sum |
from __future__ import division
from __future__ import print_function
from builtins import str
from builtins import range
from past.utils import old_div
import numpy as np
import matplotlib.pyplot as plt
import os
from scipy.interpolate import RegularGridInterpolator
from scipy.interpolate import LinearNDInterpolator
from scipy.interpolate import NearestNDInterpolator
import math
from tqdm import trange
import tqdm
import time
import matplotlib
from matplotlib import ticker
from matplotlib.widgets import Slider, Button, RadioButtons
from scipy.optimize import curve_fit
import datetime
colorbar = True
#matplotlib.rc('axes', color_cycle=['r', 'g', 'b', '#004060'])
mainlabel = ""
units = "$\AA^{-1}$"
xunits = units
yunits = units
zunits = units
contours = 200
DPI = 300
format = ".png"
text = "Structure Factor"
PLOT_EWALDS = True # enable ewald-corrected SF plots
savelog = True
savelin = True
NBINSRAD = 0
normplot = 1
FP_THRESHOLD = 1.0E-12
theta = np.pi / 2
title_fontsize = 9
path = ""
def make_flat_plot(D, xr, yr, zr):
if len(xr) != 1 and len(yr) != 1 and len(zr) != 1:
print("error in make_flat_plot! one of these lengths must be 1")
exit()
for ix in xr:
for iy in yr:
for iz in zr:
r = D[ix, iy, iz, :4]
pts.append((r))
def pl(title, obj):
delim = "="*20
print(delim, title, delim)
print(obj)
def pli(obj, title=""):
pl(title, obj)
buf = input("enter q to quit, anything else to continue") # raw_input renamed to input() in python3
if buf == 'q':
exit()
def ple(title, obj):
pl(title, obj)
exit()
def csplot_wlog(X, Y, Z, contours, lab, xlab, ylab, **kwargs):
csplot(X, Y, Z, contours, lab, xlab, ylab, **kwargs)
csplot(X, Y, np.log(Z), contours, "log_"+lab, xlab, ylab, **kwargs)
def csplot(X, Y, Z, contours, lab, xlab, ylab,**kwargs):
title = lab+" S("+xlab+","+ylab+")"
fname = lab+"_"+xlab+"_"+ylab
fig, ax = plt.subplots()
plt.suptitle(title)
plt.xlabel(xlab)
plt.ylabel(ylab)
if normplot == 1:
cax = plt.contourf(X, Y, Z / np.amax(Z), contours, vmin=0.0, vmax=0.05, **kwargs)
else:
cax = plt.contourf(X, Y, Z, contours, vmax=0.01*np.amax(Z), **kwargs)
# ax.set_aspect((np.amax(Y)-np.amin(Y))/(np.amax(X)-np.amin(X)))
# ax.set_aspect('auto')
cbar = fig.colorbar(cax)
plt.savefig(path+fname+format, dpi=DPI)
plt.clf()
def sfplot(data, lcscale, **kwargs):
""" data: plot slice through structure factor"""
if not os.path.exists(path):
os.makedirs(path)
cspos = 0.0
la = []
lb = 0
an = ['x', 'y', 'z'] # axes names
for i in range(data.shape[2] - 1):
if np.unique(data[..., i]).size > 1:
la.append(i)
else:
lb = i
cspos = data[0, 0, i]
title = mainlabel + "\n" + text + "\n" + an[lb] + "=" + str(round(cspos, 2)) + zunits
ltitle = mainlabel + "\n" + "log " + text + "\n" + an[lb] + "=" + str(round(cspos, 2)) + zunits
xlab = an[la[0]]
ylab = an[la[1]]
filename = path + an[lb] + "=" + str(round(cspos, 2))
xlab += "(" + xunits + ")"
ylab += "(" + yunits + ")"
if savelog:
plt.suptitle(ltitle, fontsize=title_fontsize)
plt.xlabel(xlab)
plt.ylabel(ylab)
max_log = np.amax(np.log(data[..., 3]))
plt.contourf(data[..., la[0]], data[..., la[1]], np.log(data[..., 3]), contours, vmax=lcscale*max_log, **kwargs)
plt.savefig(filename+"_log"+format, dpi=DPI)
plt.clf()
if savelin:
plt.suptitle(title, fontsize=title_fontsize)
plt.xlabel(xlab)
plt.ylabel(ylab)
plt.contourf(data[..., la[0]], data[..., la[1]], data[..., 3], contours, **kwargs)
plt.savefig(filename+format, dpi=DPI)
plt.clf()
def radial_integrate(D, Nbins, outputname):
SF = D[:, :, :, 3]
R = (D[:, :, :, 0]**2).astype(np.float16) + (D[:, :, :, 1]**2).astype(np.float16) + (D[:, :, :, 2]**2).astype(np.float16)
H, E = np.histogram(R, bins=Nbins, weights=SF)
Hc, E = np.histogram(R, bins=Nbins)
Hc = np.where(Hc != 0, Hc, 1.0)
H /= Hc
H[:1] = 0.0
H /= np.amax(H)
plt.plot(E[:-1], H)
plt.xlim(0, 5)
plt.savefig(outputname, dpi=DPI)
def spherical_integrate(D):
exit()
def Plot_Ewald_Sphere_Correction_old(D, wavelength_angstroms):
""" pass full 3d data,SF,wavelength in angstroms """
X = D[:, 0, 0, 0]
Y = D[0, :, 0, 1]
Z = D[0, 0, :, 2]
SF = D[:, :, :, 3]
K_ES = 2.0*math.pi/wavelength_angstroms # calculate k for incident xrays in inverse angstroms
ES = RegularGridInterpolator((X, Y, Z), SF)
pts = []
for ix in range(D.shape[0]):
xsq = X[ix]**2.0
for iy in range(D.shape[1]):
R = np.sqrt(xsq+Y[iy]**2.0)
theta = np.arctan(old_div(R,K_ES))
xnew = X[ix]*np.cos(theta)
ynew = Y[iy]*np.cos(theta)
znew = K_ES*(1.0-np.cos(theta))
pts.append((X[ix], Y[iy], xnew, ynew, znew))
pts = np.asarray(pts)
EWD = ES(pts[:, 2:])
EWD = EWD.reshape(D.shape[0], D.shape[1])
plt.contourf(D[:, :, 0, 0], D[:, :, 0, 1], EWD, 200, interpolation=interp)
plt.savefig("EWxy.png",dpi=300)
plt.clf()
plt.contourf(D[:, :, 0, 0], D[:, :, 0, 1], np.log(EWD), 200, interpolation=interp)
plt.savefig("EWxylog.png", dpi=300)
plt.clf()
def Plot_Ewald_Sphere_Correction(D, wavelength_angstroms, ucell=[], cscale=1, lcscale=1, **kwargs):
""" pass full 3d data,SF,wavelength in angstroms """
# cscale : factor by which to scale the maximum value of the colorbar
# lcscale : factor by which to scale the maximum value of the colorbar
if not os.path.exists(path):
os.makedirs(path)
X = D[:, 0, 0, 0]
Y = D[0, :, 0, 1]
Z = D[0, 0, :, 2]
SF = D[:, :, :, 3]
K_ES = 2.0*math.pi/wavelength_angstroms # calculate k for incident xrays in inverse angstroms
ES = RegularGridInterpolator((X, Y, Z), SF, bounds_error=False)
xypts = []
for ix in range(D.shape[0]):
xsq = X[ix]**2.0
for iy in range(D.shape[1]):
theta = np.arctan(old_div(np.sqrt(xsq + Y[iy]**2.0),K_ES))
xypts.append((X[ix]*np.cos(theta), Y[iy]*np.cos(theta), K_ES*(1.0 - np.cos(theta))))
xzpts = []
for ix in range(D.shape[0]):
xsq = X[ix]**2.0
for iz in range(D.shape[2]):
theta = np.arctan(old_div(np.sqrt(xsq + Z[iz]**2.0),K_ES))
xzpts.append((X[ix]*np.cos(theta), K_ES*(1.0-np.cos(theta)), Z[iz]*np.cos(theta)))
yzpts = []
for iy in range(D.shape[1]):
ysq = Y[iy]**2.0
for iz in range(D.shape[2]):
theta = np.arctan(old_div(np.sqrt(ysq+Z[iz]**2.0),K_ES))
yzpts.append((K_ES*(1.0-np.cos(theta)), Y[iy]*np.cos(theta), Z[iz]*np.cos(theta)))
xypts = np.asarray(xypts)
xzpts = np.asarray(xzpts)
yzpts = np.asarray(yzpts)
EWDxy = ES(xypts)
EWDxz = ES(xzpts)
EWDyz = ES(yzpts)
EWDxy = EWDxy.reshape(D.shape[0], D.shape[1])
EWDxz = EWDxz.reshape(D.shape[0], D.shape[2])
EWDyz = EWDyz.reshape(D.shape[1], D.shape[2])
title = "Ewald Corrected Structure Factor \n $\lambda=$"+str(wavelength_angstroms)+" $\AA$ $k_{ew}=$"+str(round(K_ES,2))+" $\AA^{-1}$"
ltitle = 'log ' + title
xlab = 'x (' + units + ")"
ylab = 'y (' + units + ")"
zlab = 'z (' + units + ")"
fname = "Ewald_"
plt.figure(1)
plt.suptitle(title)
plt.xlabel(xlab)
plt.ylabel(ylab)
EWDmax_xy = np.amax(EWDxy)
plt.contourf(D[:, :, 0, 0], D[:, :, 0, 1], EWDxy, contours, vmax=cscale*EWDmax_xy, **kwargs)
plt.savefig(path + fname + "xy" + format, dpi=DPI)
plt.clf()
plt.figure(2)
plt.suptitle(ltitle)
plt.xlabel(xlab)
plt.ylabel(ylab)
EWDmax_xylog = np.amax(np.log(EWDxy))
plt.contourf(D[:, :, 0, 0], D[:, :, 0, 1], np.log(EWDxy), contours, vmax=lcscale*EWDmax_xylog, **kwargs)
plt.savefig(path + fname + "xylog" + format, dpi=DPI)
plt.clf()
plt.figure(3)
plt.suptitle(title)
plt.xlabel(xlab)
plt.ylabel(zlab)
EWDmax_xz = np.amax(EWDxz)
plt.contourf(D[:, 0, :, 0], D[:, 0, :, 2], EWDxz, contours, vmax=cscale*EWDmax_xz, **kwargs)
plt.savefig(path + fname + "xz" + format, dpi=DPI)
plt.clf()
plt.figure(4)
plt.suptitle(ltitle)
plt.xlabel(xlab)
plt.ylabel(zlab)
EWDmax_xzlog = np.amax(np.log(EWDxz))
plt.contourf(D[:, 0, :, 0], D[:, 0, :, 2], np.log(EWDxz), contours, vmax=lcscale*EWDmax_xzlog, **kwargs)
lims = [np.amax(D[:, 0, :, 0]), np.amax(D[:, 0, :, 2])]
qmax = min(lims)
plt.xlim([-qmax, qmax])
plt.ylim([-qmax, qmax])
plt.savefig(path + fname + "xzlog" + format, dpi=DPI)
plt.clf()
plt.figure(5)
plt.suptitle(title)
plt.xlabel(ylab)
plt.ylabel(zlab)
EWDmax_yz = np.amax(EWDyz)
plt.contourf(D[0, :, :, 1], D[0, :, :, 2], EWDyz, contours, vmax=cscale*EWDmax_yz, **kwargs)
plt.savefig(path + fname + "yz" + format, dpi=DPI)
plt.clf()
plt.figure(6)
plt.suptitle(ltitle)
plt.xlabel(ylab)
plt.ylabel(zlab)
EWDmax_yzlog = np.amax(np.log(EWDyz))
plt.contourf(D[0, :, :, 1], D[0, :, :, 2], np.log(EWDyz), contours, vmax=lcscale*EWDmax_yzlog, **kwargs)
plt.savefig(path + fname + "yzlog" + format, dpi=DPI)
plt.clf()
def lorentz(points, a, b):
"""
:param p: lorentzian parameters : [full width half max (FWHM), position of maximum]
:param p: position
:return:
"""
w = np.pi / a
x = (b - points) / (w/2)
return 1 / (1 + x**2)
def inverse_ft(D, ucell):
X = D[:, 0, 0, 0]
Y = D[0, :, 0, 1]
Z = D[0, 0, :, 2]
Z += Z[np.argmin(abs(Z))]
SF = D[..., 3]
fbin_x = X[1] - X[0] # size of x bins in fourier space
fbin_y = Y[1] - Y[0] # size of y bins in fourier space
fbin_z = Z[1] - Z[0] # size of z bins in fourier space
real_x = 2 * np.pi / fbin_x # largest x dimension in real space
real_y = 2 * np.pi / fbin_y # largest y dimension in real space
real_z = 2 * np.pi / fbin_z # largest z dimension in real space
rbin_x = real_x / X.shape[0]
rbin_y = real_y / Y.shape[0]
rbin_z = real_z / Z.shape[0]
X_real = np.linspace(-real_x / 2, real_x / 2, X.shape[0])
Y_real = np.linspace(-real_y / 2, real_y / 2, Y.shape[0])
Z_real = np.linspace(-real_z / 2, real_z / 2, Z.shape[0])
# reorder lists so they conform to numpy (https://docs.scipy.org/doc/numpy-1.14.0/reference/generated/numpy.fft.ifftn.html)
start = list(X).index(0)
X_reordered = np.concatenate((X[start:], X[:start]))
ndx_x = [list(X).index(i) for i in X_reordered]
start = list(Y).index(0)
Y_reordered = np.concatenate((Y[start:], Y[:start]))
ndx_y = [list(Y).index(i) for i in Y_reordered]
start = list(Z).index(0)
Z_reordered = np.concatenate((Z[start:], Z[:start]))
ndx_z = [list(Z).index(i) for i in Z_reordered]
SF_reordered = SF[ndx_x, :, :]
SF_reordered = SF_reordered[:, ndx_y, :]
SF_reordered = SF_reordered[:, :, ndx_z]
# inverse fourier transform
inverse_fft = np.fft.ifftn(SF_reordered)
# reorder again
inverse_fft = inverse_fft[ndx_x, :, :]
inverse_fft = inverse_fft[:, ndx_y, :]
inverse_fft = inverse_fft[:, :, ndx_z]
# fourier transform of inversion as a test
# ft = np.abs(np.fft.fftn(inverse_fft))**2
# ft = ft[ndx_x, :]
# ft = ft[:, ndx_y]
# plt.imshow(ft)
# plt.show()
inverse_fft = inverse_fft.real / np.amax(inverse_fft.real)
final, rfin, zfin = angle_average(X_real, Y_real, Z_real, inverse_fft, ucell=ucell)
rbound1 = 0
rbound2 = 0
while rfin[rbound1] < -15:
rbound1 += 1
while rfin[rbound2] < 15:
rbound2 += 1
zbound1 = 0
zbound2 = 0
while zfin[0][zbound1] < -15:
zbound1 += 1
while zfin[0][zbound2] < 15:
zbound2 += 1
levels = np.linspace(np.amin(final), 0.001 * np.amax(final), 200)
plt.contourf(rfin[rbound1:rbound2], zfin[0][zbound1:zbound2], final[rbound1:rbound2, zbound1:zbound2].T,
levels=levels, cmap='seismic', extend='max')
plt.colorbar()
plt.xlabel('r ($\AA$)')
plt.ylabel('z ($\AA$)')
plt.show()
exit()
def angle_average(X, Y, Z, SF, ucell=None):
ES = RegularGridInterpolator((X, Y, Z), SF, bounds_error=False)
THETA_BINS_PER_INV_ANG = 20.
MIN_THETA_BINS = 10 # minimum allowed bins
RBINS = 100
if ucell is not None:
a1 = ucell[0]
a2 = ucell[1]
a3 = ucell[2]
b1 = (np.cross(a2, a3)) / (np.dot(a1, np.cross(a2, a3)))
b2 = (np.cross(a3, a1)) / (np.dot(a2, np.cross(a3, a1)))
b3 = (np.cross(a1, a2)) / (np.dot(a3, np.cross(a1, a2)))
b_inv = np.linalg.inv(np.vstack((b1, b2, b3)))
ZBINS = Z.shape[0] # 400
XR = (X[-1] - X[0])
YR = (Y[-1] - Y[0])
Rmax = min(XR, YR) / 2.0
Rmax *= 0.95
rarr, rspace = np.linspace(0.0, Rmax, RBINS, retstep=True)
zar = np.linspace(Z[0], Z[-1], ZBINS)
oa = np.zeros((rarr.shape[0], zar.shape[0]))
circ = 2.*np.pi*rarr # circumference
for ir in range(rarr.shape[0]):
NTHETABINS = max(int(THETA_BINS_PER_INV_ANG*circ[ir]), MIN_THETA_BINS) #calculate number of bins at this r
thetas = np.linspace(0.0, np.pi*2.0, NTHETABINS, endpoint=False) # generate theta array
t, r, z = np.meshgrid(thetas, rarr[ir], zar) # generate grid of cylindrical points
xar = r*np.cos(t) # set up x,y coords
yar = r*np.sin(t)
pts = np.vstack((xar.ravel(), yar.ravel(), z.ravel())).T # reshape for interpolation
if ucell is not None:
# pts = mc_inv(pts, ucell)
pts = np.matmul(pts, b_inv)
oa[ir, :] = np.average(ES(pts).reshape(r.shape), axis=1) # store average values in final array
mn = np.nanmin(oa)
oa = np.where(np.isnan(oa), mn, oa)
rad_avg = np.average(oa) # ???
oa /= rad_avg # normalize
# set up data for contourf plot by making it symmetrical
final = np.append(oa[::-1, :], oa[1:], axis=0) # SF
rfin = np.append(-rarr[::-1], rarr[1:]) # R
zfin = np.append(z[:, 0, :], z[1:, 0, :], axis=0) # Z
return final, rfin, zfin
def Rspots(R, Z, waxs, theta=37, theta_sigma=(7, 5), bounds=(1.256, 1.57), cmap='jet'):
""" Measure intensity of R-spots in specified region """
spots = np.copy(waxs.T)
inner = bounds[0]
outer = bounds[1]
I = []
for i in range(R.shape[0]):
for j in range(Z.shape[0]):
if inner < np.linalg.norm([R[i], Z[j]]) < outer:
angle = (180 / np.pi) * np.arctan(Z[j] / R[i])
if (theta - theta_sigma[0]) < angle < (theta + theta_sigma[1]) or \
(theta - theta_sigma[0]) < (angle - 2*angle) < (theta + theta_sigma[1]):
spots[i, j] = 100
I.append(waxs[j, i])
average_intensity = np.mean(I)
plt.figure()
levels = np.linspace(0, 3.1, 200)
plt.contourf(R, Z, spots.T, cmap=cmap, levels=levels, extend='max')
plt.xlim(-2.5, 2.5)
plt.ylim(-2.5, 2.5)
plt.figure()
plt.hist(I, bins=25)
plt.title('Average intensity of R-spots: %.2f' % average_intensity)
# plt.show()
return average_intensity
def gaussian(points, mean, sigma, amplitude, yshift):
return yshift + (amplitude / np.sqrt(2 * np.pi * sigma ** 2)) * np.exp(
-(points - mean) ** 2 / (2 * sigma ** 2))
def lorentz(points, a, b, c):
"""
:param p: lorentzian parameters : [full width half max (FWHM), position of maximum, maximum heigth]
:param p: position
:return:
"""
w = a / 2
x = (b - points) / w
return (c / (np.pi * w)) / (1 + x ** 2)
def triple_lorentz(x, a0, a1, a2, b0, b1, b2, c0, c1, c2):
return lorentz(x, a0, b0, c0) + lorentz(x, a1, b1, c1) + lorentz(x, a2, b2, c2)
def PLOT_RAD_NEW(D, wavelength_angstroms, ucell, format=False, factor=3.1, **kwargs):
"""
:param D: raw structure factor
:param wavelength_angstroms: wavelength of X-ray (angstroms)
:param ucell: 3 x 3 unitcell vectors
:param factor: maximum colorbar value if using formatting from Coscia et al. manuscript
:param format: plot simulated XRD patterns as they appear in Coscai et al. manuscript
:return:
"""
if not os.path.exists(path):
os.makedirs(path)
# inverse_ft(D, ucell)
X = D[:, 0, 0, 0]
Y = D[0, :, 0, 1]
Z = D[0, 0, :, 2]
SF = D[..., 3]
############## Plot z-slice down the middle of the raw structure factor ###################
# plt.plot(Z, SF[len(X)//2, len(Y)//2, :])
# plt.xlabel('q$_z$ ($\AA^{-1}$)')
# plt.ylabel('Intensity')
# plt.savefig('z_section.png')
# plt.show()
# exit()
ES = RegularGridInterpolator((X, Y, Z), SF, bounds_error=False)
THETA_BINS_PER_INV_ANG = 20.
MIN_THETA_BINS = 1 # minimum allowed bins
RBINS = 400
NLEVELS = 200 # number of levels for contour plots
a1 = ucell[0]
a2 = ucell[1]
a3 = ucell[2]
b1 = (np.cross(a2, a3)) / (np.dot(a1, np.cross(a2, a3)))
b2 = (np.cross(a3, a1)) / (np.dot(a2, np.cross(a3, a1)))
b3 = (np.cross(a1, a2)) / (np.dot(a3, np.cross(a1, a2)))
b_inv = np.linalg.inv(np.vstack((b1, b2, b3)))
ZBINS = Z.shape[0] # 400
XR = (X[-1] - X[0])*ucell[0][0]
YR = (Y[-1] - Y[0])*ucell[1][1]
Rmax = min(XR, YR) / 2.0
Rmax *= 0.95
rarr, rspace = np.linspace(0.0, Rmax, RBINS, retstep=True)
zar = np.linspace(Z[0], Z[-1], ZBINS)
oa = np.zeros((rarr.shape[0], zar.shape[0]))
circ = 2.*np.pi*rarr # circumference
for ir in trange(rarr.shape[0]):
NTHETABINS = max(int(THETA_BINS_PER_INV_ANG*circ[ir]), MIN_THETA_BINS) #calculate number of bins at this r
thetas = np.linspace(0.0, np.pi*2.0, NTHETABINS, endpoint=False) # generate theta array
t, r, z = np.meshgrid(thetas, rarr[ir], zar) # generate grid of cylindrical points
xar = r* | np.cos(t) | numpy.cos |
import os
import copy
from tqdm import tqdm
import numpy as np
import pandas as pd
import plotly.graph_objects as go
from stl import mesh
from typing import Dict, List, Optional
from .settings_pyskindose import PhantomDimensions
from pyskindose.plotting.create_ploty_ijk_indices import \
_create_plotly_ijk_indices_for_cuboid_objects
# valid phantom types
VALID_PHANTOM_MODELS = ["plane", "cylinder", "human", "table", "pad"]
class Phantom:
"""Create and handle phantoms for patient, support table and pad.
This class creates a phatom of any of the types specified in
VALID_PHANTOM_MODELS (plane, cylinder or human to represent the patient,
as well as patient support table and pad). The patient phantoms consists of
a number of skin cells where the skin dose can be calculated.
Attributes
----------
phantom_model : str
Type of phantom, i.e. "plane", "cylinder", "human", "table" or "pad"
r : np.array
n*3 array where n are the number of phantom skin cells. Each row
contains the xyz coordinate of one of the phantom skin cells
ijk : np.array
A matrix containing vertex indices. This is required in order to
plot the phantom using plotly Mesh3D. For more info, see "i", "j", and
"k" at https://plot.ly/python/reference/#mesh3d
dose : np.array
An empty 1d array to store skin dose calculation for each of the n
phantom cells. Only for patient phantom types (plane, cylinder, human)
n : np.array
normal vectors to each of the n phantom skin cells. (only for 3D
patient phantoms, i.e. "cylinder" and "human")
r_ref : np.array
Empty array to store of reference position of the phantom cells after
the phantom has been aligned in the geometry with the
position_patient_phantom_on_table function in geom_calc.py
table_length : float
length of patient support table. The is needed for all phantom object
to select correct rotation origin for At1, At2, and At3.
Methods
-------
rotate(rotation)
Rotating the phantom about any of the x, y, or z axis
translate(dr)
Translates the phantom along the x, y or z direction
save_position
Saves the reference position after the phantom has been properly
positioned in the irradiation geometry. This method is called in the
position_patient_phantom_on_table function
position(data_norm)
Positions the phantom from reference position to actual position
according to the table displacement info in data_norm
"""
def __init__(self,
phantom_model: str, phantom_dim: PhantomDimensions,
human_mesh: Optional[str] = None):
"""Create the phantom of choice.
Parameters
----------
phantom_model : str
Type of phantom to create. Valid selections are 'plane',
'cylinder', 'human', "table" an "pad".
phantom_dim : PhantomDimensions
instance of class PhantomDimensions containing dimensions for
all phantoms models except human phantoms: Length, width, radius,
thickness etc.
human_mesh : str, optional
Choose which human mesh phantom to use. Valid selection are names
of the *.stl-files in the phantom_data folder (The default is none.
Raises
------
ValueError
Raises value error if unsupported phantom type are selected,
or if phantom_model='human' selected, without specifying
human_mesh
"""
self.phantom_model = phantom_model.lower()
# Raise error if invalid phantom model selected
if self.phantom_model not in VALID_PHANTOM_MODELS:
raise ValueError(f"Unknown phantom model selected. Valid type:"
f"{'.'.join(VALID_PHANTOM_MODELS)}")
self.r_ref: np.array
# Save table length for all phantom in order to choose correct rotation
# origin when applying At1, At2, and At3
self.table_length = phantom_dim.table_length
# creates a plane phantom (2D grid)
if phantom_model == "plane":
# Use a dense grid if specified by user
if phantom_dim.plane_resolution.lower() == 'dense':
res_length = res_width = 2.0
elif phantom_dim.plane_resolution.lower() == 'sparse':
res_length = res_width = 1.0
# Linearly spaced points along the longitudinal direction
x = np.linspace(-phantom_dim.plane_width / 2,
+phantom_dim.plane_width / 2,
int(res_width * phantom_dim.plane_width + 1))
# Linearly spaced points along the lateral direction
z = np.linspace(0, -phantom_dim.plane_length,
int(res_length * phantom_dim.plane_length))
# Create phantom in form of rectangular grid
x_plane, z_plane = np.meshgrid(x, z)
t = phantom_dim.plane_width
# Create index vectors for plotly mesh3d plotting
i2: List[int] = []
i1 = j1 = k1 = i2
for i in range(len(x) - 1):
for j in range(len(z) - 1):
i1 = i1 + [j * len(x) + i]
j1 = j1 + [j * len(x) + i + 1]
k1 = k1 + [j * len(x) + i + len(x)]
i2 = i2 + [j * len(x) + i + len(x) + 1]
self.r = np.column_stack((x_plane.ravel(),
np.zeros(len(x_plane.ravel())),
z_plane.ravel()))
self.ijk = np.column_stack((i1 + i2, j1 + k1, k1 + j1))
self.dose = np.zeros(len(self.r))
# creates a cylinder phantom (elliptic)
elif phantom_model == "cylinder":
# Use a dense grid if specified by user
if phantom_dim.cylinder_resolution.lower() == 'dense':
res_length = 4
res_width = 0.05
elif phantom_dim.cylinder_resolution.lower() == 'sparse':
res_length = 1.0
res_width = 0.1
# Creates linearly spaced points along an ellipse
# in the lateral direction
t = np.arange(0 * np.pi, 2 * np.pi, res_width)
x = (phantom_dim.cylinder_radii_a * np.cos(t)).tolist()
y = (phantom_dim.cylinder_radii_b * np.sin(t)).tolist()
# calculate normal vectors of a cylinder (pointing outwards)
nx = np.cos(t) / (
np.sqrt(np.square(np.cos(t) + 4 * np.square(np.sin(t)))))
nz = np.zeros(len(t))
ny = 2 * np.sin(t) / (
np.sqrt(np.square(np.cos(t) + 4 * np.square(np.sin(t)))))
nx = nx.tolist()
ny = ny.tolist()
nz = nz.tolist()
n = [[nx[ind], ny[ind], nz[ind]] for ind in range(len(t))]
# Store the coordinates of the cylinder phantom
output: Dict = dict(n=[], x=[], y=[], z=[])
# Extend the ellipse to span the entire length of the phantom,
# thus creating an elliptic cylinder
for index in range(
0, int(res_length) * (phantom_dim.cylinder_length + 2), 1):
output["x"] = output["x"] + x
output["z"] = output["z"] + [-1 / res_length * index] * len(x)
output["y"] = output["y"] + y
output["n"] = output["n"] + n
# Create index vectors for plotly mesh3d plotting
i1 = list(range(0, len(output["x"]) - len(t)))
j1 = list(range(1, len(output["x"]) - len(t) + 1))
k1 = list(range(len(t), len(output["x"])))
i2 = list(range(0, len(output["x"]) - len(t)))
k2 = list(range(len(t) - 1, len(output["x"]) - 1))
j2 = list(range(len(t), len(output["x"])))
for i in range(len(output['y'])):
output['y'][i] -= phantom_dim.cylinder_radii_b
self.r = np.column_stack((output["x"], output["y"], output["z"]))
self.ijk = np.column_stack((i1 + i2, j1 + j2, k1 + k2))
self.dose = np.zeros(len(self.r))
self.n = np.asarray(output["n"])
# creates a human phantom
elif phantom_model == "human":
if human_mesh is None:
raise ValueError('Human model needs to be specified for'
'phantom_model = "human"')
# load selected phantom model from binary .stl file
phantom_path = os.path.join(os.path.dirname(__file__),
'phantom_data', f"{human_mesh}.stl")
phantom_mesh = mesh.Mesh.from_file(phantom_path)
r = phantom_mesh.vectors
n = phantom_mesh.normals
self.r = np.asarray([el for el_list in r for el in el_list])
self.n = np.asarray([x for pair in zip(n, n, n) for x in pair])
# Create index vectors for plotly mesh3d plotting
self.ijk = np.column_stack((
np.arange(0, len(self.r) - 3, 3),
np.arange(1, len(self.r) - 2, 3),
np.arange(2, len(self.r) - 1, 3)))
self.dose = np.zeros(len(self.r))
# Creates the vertices of the patient support table
elif phantom_model == "table":
# Longitudinal position of the the vertices
x_tab = [index * phantom_dim.table_width for index in
[+0.5, +0.5, -0.5, -0.5,
+0.5, +0.5, -0.5, -0.5]]
# Vertical position of the vertices
y_tab = [index * phantom_dim.table_thickness for index in
[0, 0, 0, 0, +1, +1, +1, +1]]
# Lateral position of the vertices
z_tab = [index * phantom_dim.table_length for index in
[0, -1, -1, 0, 0, -1, -1, 0]]
# Create index vectors for plotly mesh3d plotting
i_tab, j_tab, k_tab = \
_create_plotly_ijk_indices_for_cuboid_objects()
self.r = np.column_stack((x_tab, y_tab, z_tab))
self.ijk = np.column_stack((i_tab, j_tab, k_tab))
# Creates the vertices of the patient support table
elif phantom_model == "pad":
# Longitudinal position of the the vertices
x_pad = [index * phantom_dim.pad_width for index in
[+0.5, +0.5, -0.5, -0.5,
+0.5, +0.5, -0.5, -0.5]]
# Vertical position of the vertices
y_pad = [index * phantom_dim.pad_thickness for index in
[0, 0, 0, 0, -1, -1, -1, -1]]
# Lateral position of the the vertices
z_pad = [index * phantom_dim.pad_length for index in
[0, -1, -1, 0, 0, -1, -1, 0]]
# Create index vectors for plotly mesh3d plotting
i_pad, j_pad, k_pad = \
_create_plotly_ijk_indices_for_cuboid_objects()
self.r = np.column_stack((x_pad, y_pad, z_pad))
self.ijk = np.column_stack((i_pad, j_pad, k_pad))
def rotate(self, angles: List[int]) -> None:
"""Rotate the phantom about the angles specified in rotation.
Parameters
----------
angles: List[int]
list of angles in degrees the phantom should be rotated about,
given as [x_rot: <int>, y_rot: <int>, z_rot: <int>]. E.g.
rotation = [0, 90, 0] will rotate the phantom 90 degrees about the
y-axis.
"""
# convert degrees to radians
angles = np.deg2rad(angles)
x_rot = angles[0]
y_rot = angles[1]
z_rot = angles[2]
# Define rotation matricies about the x, y and z axis
Rx = np.array([[+1, +0, +0],
[+0, +np.cos(x_rot), - | np.sin(x_rot) | numpy.sin |
import glob
import os
import json
import numpy as np
import trimesh
import imageio
import openmesh
import cv2
from tqdm import tqdm
import pickle
import time, threading
import scipy.spatial.transform
image_data_root = "/raid/celong/FaceScape/fsmview_images"
landmark_root = "/raid/celong/FaceScape/fsmview_landmarks"
mesh_root = "/raid/celong/FaceScape/textured_meshes"
expressions = {
1: "1_neutral",
2: "2_smile",
3: "3_mouth_stretch",
4: "4_anger",
5: "5_jaw_left",
6: "6_jaw_right",
7: "7_jaw_forward",
8: "8_mouth_left",
9: "9_mouth_right",
10: "10_dimpler",
11: "11_chin_raiser",
12: "12_lip_puckerer",
13: "13_lip_funneler",
14: "14_sadness",
15: "15_lip_roll",
16: "16_grin",
17: "17_cheek_blowing",
18: "18_eye_closed",
19: "19_brow_raiser",
20: "20_brow_lower"
}
lm_list_v10 = np.load("./predef/landmark_indices.npz")['v10']
def get_face_orientation(id_idx, exp_idx, cam_idx, Rt_scale_dict):
x_dir = np.array([1,0,0]).reshape(3,1)
y_dir = np.array([0,1,0]).reshape(3,1)
z_dir = np.array([0,0,1]).reshape(3,1)
Rt_TU = np.array(Rt_scale_dict['%d'%id_idx]['%d'%exp_idx][1])
x_dir = Rt_TU[:3,:3].T @ x_dir
y_dir = Rt_TU[:3,:3].T @ y_dir
z_dir = Rt_TU[:3,:3].T @ z_dir
img_dir = f"{image_data_root}/{id_idx}/{expressions[exp_idx]}"
with open(f"{img_dir}/params.json", 'r') as f:
params = json.load(f)
Rt = np.array(params['%d_Rt' % cam_idx])
R = Rt[:3,:3]
x_dir = R @ x_dir
y_dir = R @ y_dir
z_dir = R @ z_dir
x_dir = x_dir / | np.linalg.norm(x_dir) | numpy.linalg.norm |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import paddle
import paddle.fluid.core as core
from op_test import OpTest, skip_check_grad_ci
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
class TestElementwiseAddOp(OpTest):
def init_kernel_type(self):
self.use_mkldnn = False
def setUp(self):
self.op_type = "elementwise_add"
self.init_dtype()
self.init_input_output()
self.init_kernel_type()
self.init_axis()
self.inputs = {
'X': OpTest.np_dtype_to_fluid_dtype(self.x),
'Y': OpTest.np_dtype_to_fluid_dtype(self.y)
}
self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn}
self.outputs = {'Out': self.out}
def test_check_output(self):
# TODO(wangzhongpu): support mkldnn op in dygraph mode
self.check_output(check_dygraph=(self.use_mkldnn == False))
def test_check_grad_normal(self):
# TODO(wangzhongpu): support mkldnn op in dygraph mode
if self.dtype == np.float16:
return
self.check_grad(
['X', 'Y'], 'Out', check_dygraph=(self.use_mkldnn == False))
def test_check_grad_ingore_x(self):
# TODO(wangzhongpu): support mkldnn op in dygraph mode
if self.dtype == np.float16:
return
self.check_grad(
['Y'],
'Out',
no_grad_set=set("X"),
check_dygraph=(self.use_mkldnn == False))
def test_check_grad_ingore_y(self):
# TODO(wangzhongpu): support mkldnn op in dygraph mode
if self.dtype == np.float16:
return
self.check_grad(
['X'],
'Out',
no_grad_set=set('Y'),
check_dygraph=(self.use_mkldnn == False))
def init_input_output(self):
self.x = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
self.y = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
self.out = np.add(self.x, self.y)
def init_dtype(self):
self.dtype = np.float64
def init_axis(self):
self.axis = -1
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFP16ElementwiseAddOp(TestElementwiseAddOp):
def init_dtype(self):
self.dtype = np.float16
def test_check_output(self):
# TODO(wangzhongpu): support mkldnn op in dygraph mode
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
if core.is_float16_supported(place):
self.check_output_with_place(
place, atol=1e-3, check_dygraph=(self.use_mkldnn == False))
@skip_check_grad_ci(
reason="[skip shape check] Use y_shape(1) to test broadcast.")
class TestElementwiseAddOp_scalar(TestElementwiseAddOp):
def init_input_output(self):
self.x = np.random.rand(2, 3, 4).astype(self.dtype)
self.y = np.random.rand(1).astype(self.dtype)
self.out = self.x + self.y
@skip_check_grad_ci(
reason="[skip shape check] Use y_shape(1) to test broadcast.")
class TestFP16ElementwiseAddOp_scalar(TestFP16ElementwiseAddOp):
def init_input_output(self):
self.x = np.random.rand(2, 3, 4).astype(self.dtype)
self.y = np.random.rand(1).astype(self.dtype)
self.out = self.x + self.y
@skip_check_grad_ci(
reason="[skip shape check] Use y_shape(1,1) to test broadcast.")
class TestElementwiseAddOp_scalar2(TestElementwiseAddOp):
def init_input_output(self):
self.x = np.random.rand(2, 3, 4).astype(self.dtype)
self.y = np.random.rand(1, 1).astype(self.dtype)
self.out = self.x + self.y
@skip_check_grad_ci(
reason="[skip shape check] Use y_shape(1,1) to test broadcast.")
class TestFP16ElementwiseAddOp_scalar2(TestFP16ElementwiseAddOp):
def init_input_output(self):
self.x = np.random.rand(2, 3, 4).astype(self.dtype)
self.y = np.random.rand(1, 1).astype(self.dtype)
self.out = self.x + self.y
class TestElementwiseAddOp_Vector(TestElementwiseAddOp):
def init_input_output(self):
self.x = np.random.random((100, )).astype(self.dtype)
self.y = np.random.random((100, )).astype(self.dtype)
self.out = np.add(self.x, self.y)
class TestFP16ElementwiseAddOp_Vector(TestFP16ElementwiseAddOp):
def init_input_output(self):
self.x = np.random.random((100, )).astype(self.dtype)
self.y = np.random.random((100, )).astype(self.dtype)
self.out = np.add(self.x, self.y)
class TestElementwiseAddOp_broadcast_0(TestElementwiseAddOp):
def init_input_output(self):
self.x = np.random.rand(100, 2, 3).astype(self.dtype)
self.y = np.random.rand(100).astype(self.dtype)
self.out = self.x + self.y.reshape(100, 1, 1)
def init_axis(self):
self.axis = 0
class TestFP16ElementwiseAddOp_broadcast_0(TestFP16ElementwiseAddOp):
def init_input_output(self):
self.x = np.random.rand(100, 2, 3).astype(self.dtype)
self.y = np.random.rand(100).astype(self.dtype)
self.out = self.x + self.y.reshape(100, 1, 1)
def init_axis(self):
self.axis = 0
class TestElementwiseAddOp_broadcast_1(TestElementwiseAddOp):
def init_input_output(self):
self.x = np.random.rand(2, 100, 3).astype(self.dtype)
self.y = np.random.rand(100).astype(self.dtype)
self.out = self.x + self.y.reshape(1, 100, 1)
def init_axis(self):
self.axis = 1
class TestFP16ElementwiseAddOp_broadcast_1(TestFP16ElementwiseAddOp):
def init_input_output(self):
self.x = np.random.rand(2, 100, 3).astype(self.dtype)
self.y = np.random.rand(100).astype(self.dtype)
self.out = self.x + self.y.reshape(1, 100, 1)
def init_axis(self):
self.axis = 1
class TestElementwiseAddOp_broadcast_2(TestElementwiseAddOp):
def init_input_output(self):
self.x = np.random.rand(2, 3, 100).astype(self.dtype)
self.y = np.random.rand(100).astype(self.dtype)
self.out = self.x + self.y.reshape(1, 1, 100)
class TestFP16ElementwiseAddOp_broadcast_2(TestFP16ElementwiseAddOp):
def init_input_output(self):
self.x = np.random.rand(2, 3, 100).astype(self.dtype)
self.y = np.random.rand(100).astype(self.dtype)
self.out = self.x + self.y.reshape(1, 1, 100)
class TestElementwiseAddOp_broadcast_3(TestElementwiseAddOp):
def init_input_output(self):
self.x = np.random.rand(2, 10, 12, 3).astype(self.dtype)
self.y = np.random.rand(10, 12).astype(self.dtype)
self.out = self.x + self.y.reshape(1, 10, 12, 1)
def init_axis(self):
self.axis = 1
class TestFP16ElementwiseAddOp_broadcast_3(TestFP16ElementwiseAddOp):
def init_input_output(self):
self.x = np.random.rand(2, 10, 12, 3).astype(self.dtype)
self.y = np.random.rand(10, 12).astype(self.dtype)
self.out = self.x + self.y.reshape(1, 10, 12, 1)
def init_axis(self):
self.axis = 1
class TestElementwiseAddOp_broadcast_4(TestElementwiseAddOp):
def init_input_output(self):
self.x = np.random.rand(100, 2, 3, 4).astype(self.dtype)
self.y = np.random.rand(100, 1).astype(self.dtype)
self.out = self.x + self.y.reshape(100, 1, 1, 1)
def init_axis(self):
self.axis = 0
class TestFP16ElementwiseAddOp_broadcast_4(TestFP16ElementwiseAddOp):
def init_input_output(self):
self.x = np.random.rand(100, 2, 3, 4).astype(self.dtype)
self.y = np.random.rand(100, 1).astype(self.dtype)
self.out = self.x + self.y.reshape(100, 1, 1, 1)
def init_axis(self):
self.axis = 0
class TestElementwiseAddOp_broadcast_5(TestElementwiseAddOp):
def init_input_output(self):
self.x = np.random.rand(10, 3, 12).astype(self.dtype)
self.y = np.random.rand(10, 1, 12).astype(self.dtype)
self.out = self.x + self.y
class TestFP16ElementwiseAddOp_broadcast_5(TestFP16ElementwiseAddOp):
def init_input_output(self):
self.x = np.random.rand(10, 3, 12).astype(self.dtype)
self.y = np.random.rand(10, 1, 12).astype(self.dtype)
self.out = self.x + self.y
class TestElementwiseAddOp_broadcast_6(TestElementwiseAddOp):
def init_input_output(self):
self.x = np.random.rand(2, 12, 3, 5).astype(self.dtype)
self.y = np.random.rand(2, 12, 1, 5).astype(self.dtype)
self.out = self.x + self.y
class TestElementwiseAddOp_broadcast_7(TestElementwiseAddOp):
def init_input_output(self):
self.x = np.random.rand(1, 1, 20, 5).astype(self.dtype)
self.y = np.random.rand(20, 5, 1, 1).astype(self.dtype)
self.out = self.x + self.y
class TestFP16ElementwiseAddOp_broadcast_6(TestFP16ElementwiseAddOp):
def init_input_output(self):
self.x = np.random.rand(2, 12, 3, 5).astype(self.dtype)
self.y = np.random.rand(2, 12, 1, 5).astype(self.dtype)
self.out = self.x + self.y
class TestElementwiseAddOp_rowwise_add_0(TestElementwiseAddOp):
def init_input_output(self):
self.x = np.random.rand(2, 10, 12).astype(self.dtype)
self.y = np.random.rand(10, 12).astype(self.dtype)
self.out = self.x + self.y.reshape(1, 10, 12)
def init_axis(self):
self.axis = 1
class TestFP16ElementwiseAddOp_rowwise_add_0(TestFP16ElementwiseAddOp):
def init_input_output(self):
self.x = np.random.rand(2, 10, 12).astype(self.dtype)
self.y = np.random.rand(10, 12).astype(self.dtype)
self.out = self.x + self.y.reshape(1, 10, 12)
def init_axis(self):
self.axis = 1
@skip_check_grad_ci(
reason="[skip shape check] Use y_shape(1) to test broadcast.")
class TestElementwiseAddOp_rowwise_add_1(TestElementwiseAddOp):
def init_input_output(self):
self.x = np.random.rand(100, 1).astype(self.dtype)
self.y = np.random.rand(1).astype(self.dtype)
self.out = self.x + self.y.reshape(1, 1)
def init_axis(self):
self.axis = 1
@skip_check_grad_ci(
reason="[skip shape check] Use y_shape(1) to test broadcast.")
class TestFP16ElementwiseAddOp_rowwise_add_1(TestFP16ElementwiseAddOp):
def init_input_output(self):
self.x = np.random.rand(100, 1).astype(self.dtype)
self.y = np.random.rand(1).astype(self.dtype)
self.out = self.x + self.y.reshape(1, 1)
def init_axis(self):
self.axis = 1
class TestElementwiseAddOp_channelwise_add(TestElementwiseAddOp):
def init_input_output(self):
self.x = np.random.rand(100, 2, 3).astype(self.dtype)
self.y = np.random.rand(100, 1, 1).astype(self.dtype)
self.out = self.x + self.y
def init_axis(self):
self.axis = -1
class TestFP16ElementwiseAddOp_channelwise_add(TestFP16ElementwiseAddOp):
def init_input_output(self):
self.x = np.random.rand(100, 2, 3).astype(self.dtype)
self.y = np.random.rand(100, 1, 1).astype(self.dtype)
self.out = self.x + self.y
def init_axis(self):
self.axis = -1
class TestElementwiseAddOp_commonuse_add1(TestElementwiseAddOp):
def init_input_output(self):
self.x = np.random.rand(2, 3, 100).astype(self.dtype)
self.y = np.random.rand(1, 1, 100).astype(self.dtype)
self.out = self.x + self.y
def init_axis(self):
self.axis = -1
class TestElementwiseFP16AddOp_commonuse_add1(TestFP16ElementwiseAddOp):
def init_input_output(self):
self.x = np.random.rand(20, 30, 100).astype(self.dtype)
self.y = np.random.rand(1, 1, 100).astype(self.dtype)
self.out = self.x + self.y
def init_axis(self):
self.axis = -1
class TestElementwiseAddOp_commonuse_add2(TestElementwiseAddOp):
def init_input_output(self):
self.x = np.random.rand(10, 3, 1, 4).astype(self.dtype)
self.y = np.random.rand(10, 1, 12, 1).astype(self.dtype)
self.out = self.x + self.y
def init_axis(self):
self.axis = -1
class TestElementwiseAddOp_xsize_lessthan_ysize_add(TestElementwiseAddOp):
def init_input_output(self):
self.x = | np.random.rand(10, 12) | numpy.random.rand |
# Food Bank Problem
import sys
import importlib
import numpy as np
from scipy.optimize import minimize
import scipy
# ## OPT - via Convex Programming
# Calculates the optimal solution for the offline problem with convex programming
def solve(W, n, k, budget, size):
# Objective function in the nash social welfare
# Note we take the negative one to turn it into a minimization problem
def objective(x, w, n, k, size):
X = | np.reshape(x, (n,k)) | numpy.reshape |
"""Handling of transducer arrays, grouping multiple transducer elements.
The main class is the `TransducerArray` class, but other classes exist to
simplify the creation of the transducer positions for common array geometries.
.. autosummary::
:nosignatures:
TransducerArray
NormalTransducerArray
RectangularArray
DoublesidedArray
DragonflyArray
"""
import numpy as np
from . import utils
class TransducerArray:
"""Base class to handle transducer arrays.
This class has no notion of the layout. If possible, try to use a more specific
implementation instead.
Parameters
----------
positions : numpy.ndarray
The positions of the transducer elements in the array, shape 3xN.
normals : numpy.ndarray
The normals of the transducer elements in the array, shape 3xN.
transducer
An object of `levitate.transducers.TransducerModel` or a subclass. If passed a class it will create a new instance.
**kwargs :
All additional keyword arguments will be passed to the a transducer class
used when instantiating a new transducer model. Note that this will have
no effect on already instantiated transducer models.
Attributes
----------
num_transducers : int
The number of transducers used.
positions : numpy.ndarray
As above.
normals : numpy.ndarray
As above.
transducer : TransducerModel
An instance of a specific transducer model implementation.
freq : float
Frequency of the transducer model.
omega : float
Angular frequency of the transducer model.
k : float
Wavenumber in air, corresponding to `freq`.
wavelength : float
Wavelength in air, corresponding to `freq`.
"""
_repr_fmt_spec = '{:%cls(transducer=%transducer_full,\n\tpositions=%positions,\n\tnormals=%normals)}'
_str_fmt_spec = '{:%cls(transducer=%transducer): %num_transducers transducers}'
from .visualizers import ArrayVisualizer, ForceDiagram
def __init__(self, positions, normals,
transducer=None, medium=None,
**kwargs
):
if 'transducer_size' in kwargs:
kwargs.setdefault('physical_size', kwargs.pop('transducer_size'))
self._extra_print_args = {}
if transducer is None:
from .transducers import PointSource as transducer
if type(transducer) is type:
self.transducer = transducer(**kwargs)
else:
self.transducer = transducer
if medium is not None:
self.medium = medium
self.positions = positions
self.normals = normals
self.visualize = type(self).ArrayVisualizer(self, 'Transducers')
self.force_diagram = type(self).ForceDiagram(self)
def __format__(self, fmt_spec):
s_out = fmt_spec
s_out = s_out.replace('%cls', self.__class__.__name__).replace('%num_transducers', str(self.num_transducers))
s_out = s_out.replace('%transducer_size', str(self.transducer_size))
s_out = s_out.replace('%medium_full', repr(self.medium)).replace('%medium', str(self.medium))
s_out = s_out.replace('%transducer_full', repr(self.transducer)).replace('%transducer', str(self.transducer))
s_out = s_out.replace('%positions', repr(self.positions)).replace('%normals', repr(self.normals))
for key, value in self._extra_print_args.items():
s_out = s_out.replace('%' + key, str(value))
return s_out
def __eq__(self, other):
return (
isinstance(other, TransducerArray)
and self.num_transducers == other.num_transducers
and np.allclose(self.positions, other.positions)
and np.allclose(self.normals, other.normals)
and self.transducer == other.transducer
)
def __add__(self, other):
if isinstance(other, TransducerArray) and self.transducer == other.transducer:
positions = np.concatenate([self.positions, other.positions], axis=1)
normals = np.concatenate([self.normals, other.normals], axis=1)
return TransducerArray(positions=positions, normals=normals, transducer=self.transducer)
else:
return NotImplemented
def __iadd__(self, other):
if isinstance(other, TransducerArray) and self.transducer == other.transducer:
self.positions = np.concatenate([self.positions, other.positions], axis=1)
self.normals = np.concatenate([self.normals, other.normals], axis=1)
return self
else:
return NotImplemented
def __repr__(self):
return self._repr_fmt_spec.format(self)
def _repr_pretty_(self, p, cycle):
p.text(str(self))
def __str__(self):
return self._str_fmt_spec.format(self)
@property
def k(self):
return self.transducer.k
@k.setter
def k(self, value):
self.transducer.k = value
@property
def omega(self):
return self.transducer.omega
@omega.setter
def omega(self, value):
self.transducer.omega = value
@property
def freq(self):
return self.transducer.freq
@freq.setter
def freq(self, value):
self.transducer.freq = value
@property
def wavelength(self):
return self.transducer.wavelength
@wavelength.setter
def wavelength(self, value):
self.transducer.wavelength = value
@property
def medium(self):
return self.transducer.medium
@medium.setter
def medium(self, val):
self.transducer.medium = val
@property
def transducer_size(self):
return self.transducer.physical_size
@transducer_size.setter
def transducer_size(self, value):
self.transducer.physical_size = value
@property
def positions(self):
return self._positions
@positions.setter
def positions(self, val):
val = np.asarray(val)
if not val.shape[0] == 3:
raise ValueError('Cannot set position to these values, the first axis must have length 3 and represent the [x,y,z] coordinates!')
self._positions = val
self._num_transducers = val.shape[1]
@property
def normals(self):
return self._normals
@normals.setter
def normals(self, val):
val = np.asarray(val)
if not val.shape[0] == 3:
raise ValueError('Cannot set normals to these values, the first axis must have length 3 and represent the [x,y,z] components!')
if self.num_transducers == 0:
raise ValueError('Set the array positions before setting the normals!')
if val.ndim == 1:
val = np.tile(val.reshape(3, 1), (1, self.num_transducers))
elif val.shape[1] != self.num_transducers:
raise ValueError('The array needs to have the same number of normals as transducers!')
self._normals = val / np.sum(val**2, axis=0)**0.5
@property
def num_transducers(self):
try:
return self._num_transducers
except AttributeError:
return 0
def focus_phases(self, focus):
"""Focuses the phases to create a focus point.
Parameters
----------
focus : array_like
Three element array with a location where to focus.
Returns
-------
phases : numpy.ndarray
Array with the phases for the transducer elements.
"""
focus = np.asarray(focus)
phase = -np.sum((self.positions - focus.reshape([3, 1]))**2, axis=0)**0.5 * self.k
phase = np.mod(phase + np.pi, 2 * np.pi) - np.pi # Wrap phase to [-pi, pi]
return phase
def signature(self, position, phases, stype=None):
"""Calculate the phase signature of the array.
The signature of an array if the phase of the transducer elements
when the phase required to focus all elements to a specific point
has been removed.
Parameters
----------
position : array_like
Three element array with a position for where the signature is relative to.
phases : numpy.ndarray
The phases of which to calculate the signature.
Returns
-------
signature : numpy.ndarray
The signature wrapped to the interval [-pi, pi].
"""
if stype is not None:
raise NotImplementedError("Unknown phase signature '{}' for array of type `{}`".format(stype, self.__class__.__name__))
focus_phases = self.focus_phases(position)
return np.mod(phases - focus_phases + np.pi, 2 * np.pi) - np.pi
def pressure_derivs(self, positions, orders=3):
"""Calculate derivatives of the pressure.
Calculates the spatial derivatives of the pressure from all individual
transducers in a Cartesian coordinate system.
Parameters
----------
positions : numpy.ndarray
The location(s) at which to evaluate the derivatives, shape (3, ...).
The first dimension must have length 3 and represent the coordinates of the points.
orders : int
How many orders of derivatives to calculate. Currently three orders are supported.
Returns
-------
derivatives : ndarray
Array with the calculated derivatives. Has the shape (M, N, ...) where M is the number of spatial derivatives,
and N is the number of transducers, see `num_spatial_derivatives` and `spatial_derivative_order`,
and the remaining dimensions are the same as the `positions` input with the first dimension removed.
"""
return self.transducer.pressure_derivs(self.positions, self.normals, positions, orders)
def spherical_harmonics(self, positions, orders=0):
"""Spherical harmonics expansion of transducer sound fields.
The sound fields generated by the individual transducers in the array are expanded
in spherical harmonics around the positions specified. The coefficients are calculated
using analytical translation of the transducer radiation patterns. This is a simplified
calculation which will not account for the local directivity curve, only an overall
scaling for each transducer-position combination.
Parameters
----------
positions : numpy.ndarray
The location(s) at which to evaluate the derivatives, shape (3, ...).
The first dimension must have length 3 and represent the coordinates of the points.
orders : int, default 0
The maximum order to expand to.
Return
------
spherical_harmonics_coefficients : numpy.ndarray
Array with the calculated expansion coefficients. The order of the coefficients
are described in `~levitate.utils.SphericalHarmonicsIndexer`.
Has shape (M, N, ...) where `M=len(SphericalHarmonicsIndexer(orders))`,
`N` is the number of transducers in the array, and the remaining dimensions are
the same as the `positions` input with the first dimension removed.
"""
return self.transducer.spherical_harmonics(self.positions, self.normals, positions, orders)
def request(self, requests, position):
"""Evaluate a set of requests.
This takes a mapping (e.g. dict) of requests, and evaluates them
at a given position. This is independent of the current transducer state.
If a certain quantity should be calculated with regards to the current
transducer state, use a `FieldImplementation` from the `fields` module.
Parameters
----------
position: ndarray
The position where to calculate the requirements needed, shape (3,...).
requests : mapping, e.g. dict
A mapping of the desired requests. The keys in the mapping should
start with the desired output, and the value indicates some kind of
parameter set. Possible requests listed below:
pressure_derivs
A number of spatial derivatives of the pressure. Should contain the
maximum order of differentiation, see `pressure_derivs`.
spherical_harmonics
Spherical harmonics coefficients for an expansion of the pressure.
Should contain the maximum order of expansion, see `spherical_harmonics`.
Returns
-------
evaluated_requests : dict
A dictionary of the set of calculated data, according to the requests.
"""
position = np.asarray(position)
parsed_requests = {}
for key, value in requests.items():
if key.find('pressure_derivs') > -1:
parsed_requests['pressure_derivs'] = max(value, parsed_requests.get('pressure_derivs', -1))
elif key.find('spherical_harmonics_gradient') > -1:
parsed_requests['spherical_harmonics'] = max(value + 1, parsed_requests.get('spherical_harmonics', -1))
parsed_requests['spherical_harmonics_gradient'] = max(value, parsed_requests.get('spherical_harmonics_gradient', -1))
elif key.find('spherical_harmonics') > -1:
parsed_requests['spherical_harmonics'] = max(value, parsed_requests.get('spherical_harmonics', -1))
elif key != 'complex_transducer_amplitudes':
raise ValueError("Unknown request from `TransducerArray`: '{}'".format(key))
evaluated_requests = {}
if 'pressure_derivs' in parsed_requests:
evaluated_requests['pressure_derivs'] = self.pressure_derivs(position, orders=parsed_requests.pop('pressure_derivs'))
if 'spherical_harmonics' in parsed_requests:
evaluated_requests['spherical_harmonics'] = self.spherical_harmonics(position, orders=parsed_requests.pop('spherical_harmonics'))
if 'spherical_harmonics_gradient' in parsed_requests:
gradient_order = parsed_requests.pop('spherical_harmonics_gradient')
sph_idx = utils.SphericalHarmonicsIndexer(gradient_order)
def A(n, m):
return ((n + m + 1) * (n + m + 2) / (2 * n + 1) / (2 * n + 3)) ** 0.5
def B(n, m):
return -((n + m + 1) * (n - m + 1) / (2 * n + 1) / (2 * n + 3)) ** 0.5
S = evaluated_requests['spherical_harmonics']
dS_dxpiy = np.zeros((len(sph_idx), self.num_transducers) + position.shape[1:], dtype=complex)
dS_dxmiy = np.zeros((len(sph_idx), self.num_transducers) + position.shape[1:], dtype=complex)
dS_dz = np.zeros((len(sph_idx), self.num_transducers) + position.shape[1:], dtype=complex)
for idx, (n, m) in enumerate(sph_idx):
dS_dxpiy[idx] = A(n, -m) * S[sph_idx(n + 1, m - 1)]
dS_dxmiy[idx] = -A(n, m) * S[sph_idx(n + 1, m + 1)]
dS_dz[idx] = -B(n, m) * S[sph_idx(n + 1, m)]
try:
dS_dxpiy[idx] += A(n - 1, m - 1) * S[sph_idx(n - 1, m - 1)]
except ValueError:
pass
try:
dS_dxmiy[idx] -= A(n - 1, - m - 1) * S[sph_idx(n - 1, m + 1)]
except ValueError:
pass
try:
dS_dz[idx] += B(n - 1, m) * S[sph_idx(n - 1, m)]
except ValueError:
pass
dS_dx = 0.5 * (dS_dxpiy + dS_dxmiy)
dS_dy = -0.5j * (dS_dxpiy - dS_dxmiy)
dS = np.stack([dS_dx, dS_dy, dS_dz], axis=0) * self.k
evaluated_requests['spherical_harmonics_gradient'] = dS
if len(parsed_requests) > 0:
raise ValueError('Unevaluated requests: {}'.format(parsed_requests))
return evaluated_requests
class NormalTransducerArray(TransducerArray):
"""Transducer array with a clearly defined normal.
This is mostly intended as a base class for other implementations.
The advantage is that a simple arrangement can be created assuming a normal
along the z-axis, which is then rotated and moved to the desired orientation.
The positions and normals of the transducers should be input assuming that
the overall normal for the array is along the z-axis. The positions and normals
will be rotated around the origin to give the desired overall normal.
This rotation will take place along the intersection line of the plane specificed
by the desired normal, and the xy-plane.
If rotation is desired, the positions are further rotated using the normal
as the rotation axis. Finally an offset is applied to the entire array.
Parameters
----------
positions : numpy.ndarray
The positions of the transducer elements in the array, shape 3xN.
normals : numpy.ndarray
The normals of the transducer elements in the array, shape 3xN (or 3 elements which will broadcast).
offset : 3 element array_like, default (0, 0, 0)
The location of the center of the array.
normal : 3 element array_like, default (0, 0, 1)
The normal of the overall array.
rotation : float, default 0
The in-plane rotation of the array around the normal.
"""
_str_fmt_spec = '{:%cls(transducer=%transducer, offset=%offset, normal=%normal, rotation=%rotation)}'
def __init__(self, positions, normals, offset=(0, 0, 0), normal=(0, 0, 1), rotation=0, **kwargs):
normal = np.asarray(normal, dtype=float)
normal /= (normal**2).sum()**0.5
self._overall_normal = normal
self._overall_offset = offset
self._overall_rotation = rotation
if normal[0] != 0 or normal[1] != 0:
# We need to rotate the grid to get the correct normal
rotation_vector = np.cross(normal, (0, 0, 1))
rotation_vector /= (rotation_vector**2).sum()**0.5
cross_product_matrix = np.array([[0, rotation_vector[2], -rotation_vector[1]],
[-rotation_vector[2], 0, rotation_vector[0]],
[rotation_vector[1], -rotation_vector[0], 0]])
cos = normal[2]
sin = (1 - cos**2)**0.5
rotation_matrix = (cos * np.eye(3) + sin * cross_product_matrix + (1 - cos) * np.outer(rotation_vector, rotation_vector))
elif normal[2] == -1:
rotation_matrix = | np.zeros((3, 3)) | numpy.zeros |
# -*- coding: utf-8 -*-
"""Routines for multiple scattering. The first half of the module contains functions to explicitly compute the
coupling matrix entries. The second half of the module contains functions for the preparation of lookup tables that
are used to approximate the coupling matrices by interoplation."""
from numba import complex128,int64,jit
from scipy.signal.filter_design import bessel
from tqdm import tqdm
import matplotlib.pyplot as plt
import numpy as np
import scipy.interpolate
import scipy.special
import smuthi.coordinates as coord
import smuthi.cuda_sources as cu
import smuthi.field_expansion as fldex
import smuthi.layers as lay
import smuthi.spherical_functions as sf
import smuthi.vector_wave_functions as vwf
import sys
try:
import pycuda.autoinit
import pycuda.driver as drv
from pycuda import gpuarray
from pycuda.compiler import SourceModule
import pycuda.cumath
except:
pass
@jit(complex128(complex128[:], complex128[:]),
nopython=True, cache=True, nogil=True)
def numba_trapz(y, x):
out = 0.0 + 0.0j
#TODO implement some (optional) advanced summation?
#e.g. https://github.com/nschloe/accupy/blob/master/accupy/sums.py
#or better Sum2 from https://doi.org/10.1137/030601818 (Algorithm 4.4)
#Note, that this may need to have exact summation for x and y, and exact product.
for i in range( len(y) - 2 ):
out += (x[i+1]-x[i]) * (y[i+1] + y[i])/2.0
return out
@jit((complex128[:], complex128[:,:,:],
complex128[:,:,:,:],complex128[:,:,:], int64),
nopython=True,cache=True
,nogil=True
# , parallel=True
)
def eval_BeLBe(BeLBe, BeL, B1, ejkz, n2):
for k in range(len(BeLBe)):
for iplmn2 in range(2):
for pol in range(2):
BeLBe[k] += BeL[pol, iplmn2, k] * B1[pol, iplmn2, n2, k
] * ejkz[1, 1 - iplmn2, k]
def layer_mediated_coupling_block(vacuum_wavelength, receiving_particle, emitting_particle, layer_system,
k_parallel='default', show_integrand=False):
"""Layer-system mediated particle coupling matrix :math:`W^R` for two particles. This routine is explicit, but slow.
Args:
vacuum_wavelength (float): Vacuum wavelength :math:`\lambda` (length unit)
receiving_particle (smuthi.particles.Particle): Particle that receives the scattered field
emitting_particle (smuthi.particles.Particle): Particle that emits the scattered field
layer_system (smuthi.layers.LayerSystem): Stratified medium in which the coupling takes place
k_parallel (numpy ndarray): In-plane wavenumbers for Sommerfeld integral
If 'default', use smuthi.coordinates.default_k_parallel
show_integrand (bool): If True, the norm of the integrand is plotted.
Returns:
Layer mediated coupling matrix block as numpy array.
"""
if type(k_parallel) == str and k_parallel == 'default':
k_parallel = coord.default_k_parallel
omega = coord.angular_frequency(vacuum_wavelength)
# index specs
lmax1 = receiving_particle.l_max
mmax1 = receiving_particle.m_max
lmax2 = emitting_particle.l_max
mmax2 = emitting_particle.m_max
blocksize1 = fldex.blocksize(lmax1, mmax1)
blocksize2 = fldex.blocksize(lmax2, mmax2)
# cylindrical coordinates of relative position vectors
rs1 = np.array(receiving_particle.position)
rs2 = np.array(emitting_particle.position)
rs2s1 = rs1 - rs2
rhos2s1 = np.linalg.norm(rs2s1[0:2])
phis2s1 = np.arctan2(rs2s1[1], rs2s1[0])
is1 = layer_system.layer_number(rs1[2])
ziss1 = rs1[2] - layer_system.reference_z(is1)
is2 = layer_system.layer_number(rs2[2])
ziss2 = rs2[2] - layer_system.reference_z(is2)
# wave numbers
kis1 = omega * layer_system.refractive_indices[is1]
kis2 = omega * layer_system.refractive_indices[is2]
kzis1 = coord.k_z(k_parallel=k_parallel, k=kis1)
kzis2 = coord.k_z(k_parallel=k_parallel, k=kis2)
# phase factors
ejkz = np.zeros((2, 2, len(k_parallel)), dtype=complex) # indices are: particle, plus/minus, kpar_idx
ejkz[0, 0, :] = np.exp(1j * kzis1 * ziss1)
ejkz[0, 1, :] = np.exp(- 1j * kzis1 * ziss1)
ejkz[1, 0, :] = np.exp(1j * kzis2 * ziss2)
ejkz[1, 1, :] = np.exp(- 1j * kzis2 * ziss2)
# layer response
L = np.zeros((2, 2, 2, len(k_parallel)), dtype=complex) # polarization, pl/mn1, pl/mn2, kpar_idx
for pol in range(2):
L[pol, :, :, :] = lay.layersystem_response_matrix(pol, layer_system.thicknesses,
layer_system.refractive_indices, k_parallel, omega, is2, is1)
# transformation coefficients
B = [np.zeros((2, 2, blocksize1, len(k_parallel)), dtype=complex),
np.zeros((2, 2, blocksize2, len(k_parallel)), dtype=complex)]
# list index: particle, np indices: pol, plus/minus, n, kpar_idx
m_vec = [np.zeros(blocksize1, dtype=int), np.zeros(blocksize2, dtype=int)]
# precompute spherical functions
ct = kzis1 / kis1
st = k_parallel / kis1
_, pilm_list_pl, taulm_list_pl = sf.legendre_normalized(ct, st, lmax1)
_, pilm_list_mn, taulm_list_mn = sf.legendre_normalized(-ct, st, lmax1)
pilm = (pilm_list_pl, pilm_list_mn)
taulm = (taulm_list_pl, taulm_list_mn)
for tau in range(2):
for m in range(-mmax1, mmax1 + 1):
for l in range(max(1, abs(m)), lmax1 + 1):
n = fldex.multi_to_single_index(tau, l, m, lmax1, mmax1)
m_vec[0][n] = m
for iplmn in range(2):
for pol in range(2):
B[0][pol, iplmn, n, :] = vwf.transformation_coefficients_vwf(tau, l, m, pol, pilm_list=pilm[iplmn],
taulm_list=taulm[iplmn], dagger=True)
ct = kzis2 / kis2
st = k_parallel / kis2
_, pilm_list_pl, taulm_list_pl = sf.legendre_normalized(ct, st, lmax2)
_, pilm_list_mn, taulm_list_mn = sf.legendre_normalized(-ct, st, lmax2)
pilm = (pilm_list_pl, pilm_list_mn)
taulm = (taulm_list_pl, taulm_list_mn)
for tau in range(2):
for m in range(-mmax2, mmax2 + 1):
for l in range(max(1, abs(m)), lmax2 + 1):
n = fldex.multi_to_single_index(tau, l, m, lmax2, mmax2)
m_vec[1][n] = m
for iplmn in range(2):
for pol in range(2):
B[1][pol, iplmn, n, :] = vwf.transformation_coefficients_vwf(tau, l, m, pol, pilm_list=pilm[iplmn],
taulm_list=taulm[iplmn], dagger=False)
# bessel function and jacobi factor
bessel_list = []
for dm in range(lmax1 + lmax2 + 1):
bessel_list.append(scipy.special.jv(dm, k_parallel * rhos2s1))
jacobi_vector = k_parallel / (kzis2 * kis2)
m2_minus_m1 = m_vec[1] - m_vec[0][np.newaxis].T
wr_const = 4 * (1j) ** abs(m2_minus_m1) * np.exp(1j * m2_minus_m1 * phis2s1)
integral = np.zeros((blocksize1, blocksize2), dtype=complex)
for n1 in range(blocksize1):
BeL = np.zeros((2, 2, len(k_parallel)), dtype=complex) # indices are: pol, plmn2, n1, kpar_idx
for iplmn1 in range(2):
for pol in range(2):
BeL[pol, :, :] += (L[pol, iplmn1, :, :]
* B[0][pol, iplmn1, n1, :]
* ejkz[0, iplmn1, :])
for n2 in range(blocksize2):
bessel_full = bessel_list[abs(m_vec[0][n1] - m_vec[1][n2])]
BeLBe = np.zeros((len(k_parallel)), dtype=complex)
eval_BeLBe(BeLBe, BeL, B[1], ejkz, n2)
integrand = bessel_full * jacobi_vector * BeLBe
integral[n1,n2] = numba_trapz(integrand, k_parallel)
wr = wr_const * integral
return wr
def layer_mediated_coupling_matrix(vacuum_wavelength, particle_list, layer_system, k_parallel='default'):
"""Layer system mediated particle coupling matrix W^R for a particle collection in a layered medium.
Args:
vacuum_wavelength (float): Wavelength in length unit
particle_list (list of smuthi.particles.Particle obejcts: Scattering particles
layer_system (smuthi.layers.LayerSystem): The stratified medium
k_parallel (numpy.ndarray or str): In-plane wavenumber for Sommerfeld integrals.
If 'default', smuthi.coordinates.default_k_parallel
Returns:
Ensemble coupling matrix as numpy array.
"""
# indices
blocksizes = [fldex.blocksize(particle.l_max, particle.m_max) for particle in particle_list]
# initialize result
wr = np.zeros((sum(blocksizes), sum(blocksizes)), dtype=complex)
for s1, particle1 in enumerate(particle_list):
idx1 = np.array(range(sum(blocksizes[:s1]), sum(blocksizes[:s1]) + blocksizes[s1]))
for s2, particle2 in enumerate(particle_list):
idx2 = range(sum(blocksizes[:s2]), sum(blocksizes[:s2]) + blocksizes[s2])
wr[idx1[:, None], idx2] = layer_mediated_coupling_block(vacuum_wavelength, particle1, particle2,
layer_system, k_parallel)
return wr
def direct_coupling_block(vacuum_wavelength, receiving_particle, emitting_particle, layer_system):
"""Direct particle coupling matrix :math:`W` for two particles. This routine is explicit, but slow.
Args:
vacuum_wavelength (float): Vacuum wavelength :math:`\lambda` (length unit)
receiving_particle (smuthi.particles.Particle): Particle that receives the scattered field
emitting_particle (smuthi.particles.Particle): Particle that emits the scattered field
layer_system (smuthi.layers.LayerSystem): Stratified medium in which the coupling takes place
Returns:
Direct coupling matrix block as numpy array.
"""
omega = coord.angular_frequency(vacuum_wavelength)
# index specs
lmax1 = receiving_particle.l_max
mmax1 = receiving_particle.m_max
lmax2 = emitting_particle.l_max
mmax2 = emitting_particle.m_max
blocksize1 = fldex.blocksize(lmax1, mmax1)
blocksize2 = fldex.blocksize(lmax2, mmax2)
# initialize result
w = np.zeros((blocksize1, blocksize2), dtype=complex)
# check if particles are in same layer
rS1 = receiving_particle.position
rS2 = emitting_particle.position
iS1 = layer_system.layer_number(rS1[2])
iS2 = layer_system.layer_number(rS2[2])
if iS1 == iS2 and not emitting_particle == receiving_particle:
k = omega * layer_system.refractive_indices[iS1]
dx = rS1[0] - rS2[0]
dy = rS1[1] - rS2[1]
dz = rS1[2] - rS2[2]
d = np.sqrt(dx**2 + dy**2 + dz**2)
cos_theta = dz / d
sin_theta = np.sqrt(dx**2 + dy**2) / d
phi = np.arctan2(dy, dx)
# spherical functions
bessel_h = [sf.spherical_hankel(n, k * d) for n in range(lmax1 + lmax2 + 1)]
legendre, _, _ = sf.legendre_normalized(cos_theta, sin_theta, lmax1 + lmax2)
# the particle coupling operator is the transpose of the SVWF translation operator
# therefore, (l1,m1) and (l2,m2) are interchanged:
for m1 in range(-mmax1, mmax1 + 1):
for m2 in range(-mmax2, mmax2 + 1):
eimph = np.exp(1j * (m2 - m1) * phi)
for l1 in range(max(1, abs(m1)), lmax1 + 1):
for l2 in range(max(1, abs(m2)), lmax2 + 1):
A, B = complex(0), complex(0)
for ld in range(max(abs(l1 - l2), abs(m1 - m2)), l1 + l2 + 1): # if ld<abs(m1-m2) then P=0
a5, b5 = vwf.ab5_coefficients(l2, m2, l1, m1, ld)
A += a5 * bessel_h[ld] * legendre[ld][abs(m1 - m2)]
B += b5 * bessel_h[ld] * legendre[ld][abs(m1 - m2)]
A, B = eimph * A, eimph * B
for tau1 in range(2):
n1 = fldex.multi_to_single_index(tau1, l1, m1, lmax1, mmax1)
for tau2 in range(2):
n2 = fldex.multi_to_single_index(tau2, l2, m2, lmax2, mmax2)
if tau1 == tau2:
w[n1, n2] = A
else:
w[n1, n2] = B
return w
def direct_coupling_matrix(vacuum_wavelength, particle_list, layer_system):
"""Return the direct particle coupling matrix W for a particle collection in a layered medium.
Args:
vacuum_wavelength (float): Wavelength in length unit
particle_list (list of smuthi.particles.Particle obejcts: Scattering particles
layer_system (smuthi.layers.LayerSystem): The stratified medium
Returns:
Ensemble coupling matrix as numpy array.
"""
# indices
blocksizes = [fldex.blocksize(particle.l_max, particle.m_max)
for particle in particle_list]
# initialize result
w = np.zeros((sum(blocksizes), sum(blocksizes)), dtype=complex)
for s1, particle1 in enumerate(particle_list):
idx1 = np.array(range(sum(blocksizes[:s1]), sum(blocksizes[:s1+1])))
for s2, particle2 in enumerate(particle_list):
idx2 = range(sum(blocksizes[:s2]), sum(blocksizes[:s2+1]))
w[idx1[:, None], idx2] = direct_coupling_block(vacuum_wavelength, particle1, particle2, layer_system)
return w
def volumetric_coupling_lookup_table(vacuum_wavelength, particle_list, layer_system, k_parallel='default',
resolution=None):
"""Prepare Sommerfeld integral lookup table to allow for a fast calculation of the coupling matrix by interpolation.
This function is called when not all particles are on the same z-position.
Args:
vacuum_wavelength (float): Vacuum wavelength in length units
particle_list (list): List of particle objects
layer_system (smuthi.layers.LayerSystem): Stratified medium
k_parallel (numpy.ndarray or str): In-plane wavenumber for Sommerfeld integrals.
If 'default', smuthi.coordinates.default_k_parallel
resolution (float): Spatial resolution of lookup table in length units. (default: vacuum_wavelength / 100)
Smaller means more accurate but higher memory footprint
Returns:
(tuple): tuple containing:
w_pl (ndarray): Coupling lookup for z1 + z2, indices are [rho, z, n1, n2]. Includes layer mediated coupling.
w_mn (ndarray): Coupling lookup for z1 + z2, indices are [rho, z, n1, n2]. Includes layer mediated and
direct coupling.
rho_array (ndarray): Values for the radial distance considered for the lookup (starting from negative
numbers to allow for simpler cubic interpolation without distinction of cases
for lookup edges
sz_array (ndarray): Values for the sum of z-coordinates (z1 + z2) considered for the lookup
dz_array (ndarray): Values for the difference of z-coordinates (z1 - z2) considered for the lookup
"""
sys.stdout.write('Prepare 3D particle coupling lookup:\n')
sys.stdout.flush()
if resolution is None:
resolution = vacuum_wavelength / 100
sys.stdout.write('Setting lookup resolution to %f\n'%resolution)
sys.stdout.flush()
l_max = max([particle.l_max for particle in particle_list])
m_max = max([particle.m_max for particle in particle_list])
blocksize = fldex.blocksize(l_max, m_max)
particle_x_array = np.array([particle.position[0] for particle in particle_list])
particle_y_array = np.array([particle.position[1] for particle in particle_list])
particle_z_array = np.array([particle.position[2] for particle in particle_list])
particle_rho_array = np.sqrt((particle_x_array[:, None] - particle_x_array[None, :]) ** 2
+ (particle_y_array[:, None] - particle_y_array[None, :]) ** 2)
dz_min = particle_z_array.min() - particle_z_array.max()
dz_max = particle_z_array.max() - particle_z_array.min()
sz_min = 2 * particle_z_array.min()
sz_max = 2 * particle_z_array.max()
rho_array = np.arange(- 3 * resolution, particle_rho_array.max() + 3 * resolution, resolution)
sz_array = np.arange(sz_min - 3 * resolution, sz_max + 3 * resolution, resolution)
dz_array = np.arange(dz_min - 3 * resolution, dz_max + 3 * resolution, resolution)
len_rho = len(rho_array)
len_sz = len(sz_array)
len_dz = len(dz_array)
assert len_sz == len_dz
i_s = layer_system.layer_number(particle_list[0].position[2])
k_is = layer_system.wavenumber(i_s, vacuum_wavelength)
z_is = layer_system.reference_z(i_s)
# direct -----------------------------------------------------------------------------------------------------------
w = np.zeros((len_rho, len_dz, blocksize, blocksize), dtype=np.complex64)
sys.stdout.write('Lookup table memory footprint: ' + size_format(2 * w.nbytes) + '\n')
sys.stdout.flush()
r_array = np.sqrt(dz_array[None, :]**2 + rho_array[:, None]**2)
r_array[r_array==0] = 1e-20
ct = dz_array[None, :] / r_array
st = rho_array[:, None] / r_array
legendre, _, _ = sf.legendre_normalized(ct, st, 2 * l_max)
bessel_h = []
for dm in tqdm(range(2 * l_max + 1), desc='Spherical Hankel lookup ', file=sys.stdout,
bar_format='{l_bar}{bar}| elapsed: {elapsed} remaining: {remaining}'):
bessel_h.append(sf.spherical_hankel(dm, k_is * r_array))
pbar = tqdm(total=blocksize**2,
desc='Direct coupling ',
file=sys.stdout,
bar_format='{l_bar}{bar}| elapsed: {elapsed} remaining: {remaining}')
for m1 in range(-m_max, m_max+1):
for m2 in range(-m_max, m_max+1):
for l1 in range(max(1, abs(m1)), l_max + 1):
for l2 in range(max(1, abs(m2)), l_max + 1):
A = np.zeros((len_rho, len_dz), dtype=complex)
B = np.zeros((len_rho, len_dz), dtype=complex)
for ld in range(max(abs(l1 - l2), abs(m1 - m2)), l1 + l2 + 1): # if ld<abs(m1-m2) then P=0
a5, b5 = vwf.ab5_coefficients(l2, m2, l1, m1, ld) # remember that w = A.T
A += a5 * bessel_h[ld] * legendre[ld][abs(m1 - m2)] # remember that w = A.T
B += b5 * bessel_h[ld] * legendre[ld][abs(m1 - m2)] # remember that w = A.T
for tau1 in range(2):
n1 = fldex.multi_to_single_index(tau1, l1, m1, l_max, m_max)
for tau2 in range(2):
n2 = fldex.multi_to_single_index(tau2, l2, m2, l_max, m_max)
if tau1 == tau2:
w[:, :, n1, n2] = A
else:
w[:, :, n1, n2] = B
pbar.update()
pbar.close()
# switch off direct coupling contribution near rho=0:
w[rho_array < particle_rho_array[~np.eye(particle_rho_array.shape[0],dtype=bool)].min() / 2, :, :, :] = 0
# layer mediated ---------------------------------------------------------------------------------------------------
sys.stdout.write('Layer mediated coupling : ...')
sys.stdout.flush()
if type(k_parallel) == str and k_parallel == 'default':
k_parallel = coord.default_k_parallel
kz_is = coord.k_z(k_parallel=k_parallel, k=k_is)
len_kp = len(k_parallel)
# phase factors
epljksz = np.exp(1j * kz_is[None, :] * (sz_array[:, None] - 2 * z_is)) # z, k
emnjksz = np.exp(- 1j * kz_is[None, :] * (sz_array[:, None] - 2 * z_is))
epljkdz = np.exp(1j * kz_is[None, :] * dz_array[:, None])
emnjkdz = np.exp(- 1j * kz_is[None, :] * dz_array[:, None])
# layer response
L = np.zeros((2, 2, 2, len_kp), dtype=complex) # pol, pl/mn1, pl/mn2, kp
for pol in range(2):
L[pol, :, :, :] = lay.layersystem_response_matrix(pol, layer_system.thicknesses,
layer_system.refractive_indices, k_parallel,
coord.angular_frequency(vacuum_wavelength), i_s, i_s)
# transformation coefficients
B_dag = np.zeros((2, 2, blocksize, len_kp), dtype=complex) # pol, pl/mn, n, kp
B = np.zeros((2, 2, blocksize, len_kp), dtype=complex) # pol, pl/mn, n, kp
ct_k = kz_is / k_is
st_k = k_parallel / k_is
_, pilm_pl, taulm_pl = sf.legendre_normalized(ct_k, st_k, l_max)
_, pilm_mn, taulm_mn = sf.legendre_normalized(-ct_k, st_k, l_max)
m_list = [None for i in range(blocksize)]
for tau in range(2):
for m in range(-m_max, m_max + 1):
for l in range(max(1, abs(m)), l_max + 1):
n = fldex.multi_to_single_index(tau, l, m, l_max, m_max)
m_list[n] = m
for pol in range(2):
B_dag[pol, 0, n, :] = vwf.transformation_coefficients_vwf(tau, l, m, pol, pilm_list=pilm_pl,
taulm_list=taulm_pl, dagger=True)
B_dag[pol, 1, n, :] = vwf.transformation_coefficients_vwf(tau, l, m, pol, pilm_list=pilm_mn,
taulm_list=taulm_mn, dagger=True)
B[pol, 0, n, :] = vwf.transformation_coefficients_vwf(tau, l, m, pol, pilm_list=pilm_pl,
taulm_list=taulm_pl, dagger=False)
B[pol, 1, n, :] = vwf.transformation_coefficients_vwf(tau, l, m, pol, pilm_list=pilm_mn,
taulm_list=taulm_mn, dagger=False)
# pairs of (n1, n2), listed by abs(m1-m2)
n1n2_combinations = [[] for dm in range(2*m_max+1)]
for n1 in range(blocksize):
m1 = m_list[n1]
for n2 in range(blocksize):
m2 = m_list[n2]
n1n2_combinations[abs(m1-m2)].append((n1,n2))
wr_pl = np.zeros((len_rho, len_dz, blocksize, blocksize), dtype=np.complex64)
wr_mn = np.zeros((len_rho, len_dz, blocksize, blocksize), dtype=np.complex64)
dkp = np.diff(k_parallel)
if cu.use_gpu:
re_dkp_d = gpuarray.to_gpu(np.float32(dkp.real))
im_dkp_d = gpuarray.to_gpu(np.float32(dkp.imag))
kernel_source_code = cu.volume_lookup_assembly_code %(blocksize, len_rho, len_sz, len_kp)
helper_function = SourceModule(kernel_source_code).get_function("helper")
cuda_blocksize = 128
cuda_gridsize = (len_rho * len_sz + cuda_blocksize - 1) // cuda_blocksize
re_dwr_d = gpuarray.to_gpu(np.zeros((len_rho, len_sz), dtype=np.float32))
im_dwr_d = gpuarray.to_gpu(np.zeros((len_rho, len_sz), dtype=np.float32))
pbar = tqdm(total=blocksize**2,
desc='Layer mediated coupling ',
file=sys.stdout,
bar_format='{l_bar}{bar}| elapsed: {elapsed} remaining: {remaining}')
for dm in range(2*m_max+1):
bessel = scipy.special.jv(dm, (k_parallel[None,:]*rho_array[:,None]))
besjac = bessel * (k_parallel / (kz_is * k_is))[None,:]
for n1n2 in n1n2_combinations[dm]:
n1 = n1n2[0]
m1 = m_list[n1]
n2 = n1n2[1]
m2 = m_list[n2]
belbee_pl = np.zeros((len_dz, len_kp), dtype=complex)
belbee_mn = np.zeros((len_dz, len_kp), dtype=complex)
for pol in range(2):
belbee_pl += ((L[pol, 0, 1, :] * B_dag[pol, 0, n1, :] * B[pol, 1, n2, :])[None, :] * epljksz
+ (L[pol, 1, 0, :] * B_dag[pol, 1, n1, :] * B[pol, 0, n2, :])[None, :] * emnjksz)
belbee_mn += ((L[pol, 0, 0, :] * B_dag[pol, 0, n1, :] * B[pol, 0, n2, :])[None, :] * epljkdz
+ (L[pol, 1, 1, :] * B_dag[pol, 1, n1, :] * B[pol, 1, n2, :])[None, :] * emnjkdz)
if cu.use_gpu:
re_belbee_pl_d = gpuarray.to_gpu(np.float32(belbee_pl[None, :, :].real))
im_belbee_pl_d = gpuarray.to_gpu(np.float32(belbee_pl[None, :, :].imag))
re_belbee_mn_d = gpuarray.to_gpu(np.float32(belbee_mn[None, :, :].real))
im_belbee_mn_d = gpuarray.to_gpu(np.float32(belbee_mn[None, :, :].imag))
re_besjac_d = gpuarray.to_gpu(np.float32(besjac[:, None, :].real))
im_besjac_d = gpuarray.to_gpu(np.float32(besjac[:, None, :].imag))
helper_function(re_besjac_d.gpudata, im_besjac_d.gpudata, re_belbee_pl_d.gpudata,
im_belbee_pl_d.gpudata, re_dkp_d.gpudata, im_dkp_d.gpudata, re_dwr_d.gpudata,
im_dwr_d.gpudata, block=(cuda_blocksize, 1, 1), grid=(cuda_gridsize, 1))
wr_pl[:, :, n1, n2] = 4 * (1j)**abs(m2 - m1) * (re_dwr_d.get() + 1j * im_dwr_d.get())
helper_function(re_besjac_d.gpudata, im_besjac_d.gpudata, re_belbee_mn_d.gpudata,
im_belbee_mn_d.gpudata, re_dkp_d.gpudata, im_dkp_d.gpudata, re_dwr_d.gpudata,
im_dwr_d.gpudata, block=(cuda_blocksize, 1, 1), grid=(cuda_gridsize, 1))
wr_mn[:, :, n1, n2] = 4 * (1j)**abs(m2 - m1) * (re_dwr_d.get() + 1j * im_dwr_d.get())
else:
integrand = besjac[:, None, :] * belbee_pl[None, :, :]
wr_pl[:, :, n1, n2] = 2 * (1j)**abs(m2 - m1) * ((integrand[:, :, :-1] + integrand[:, :, 1:])
* dkp[None, None, :]).sum(axis=-1) # trapezoidal rule
integrand = besjac[:, None, :] * belbee_mn[None, :, :]
wr_mn[:, :, n1, n2] = 2 * (1j)**abs(m2 - m1) * ((integrand[:, :, :-1] + integrand[:, :, 1:])
* dkp[None, None, :]).sum(axis=-1)
pbar.update()
pbar.close()
return wr_pl, w + wr_mn, rho_array, sz_array, dz_array
def radial_coupling_lookup_table(vacuum_wavelength, particle_list, layer_system, k_parallel='default', resolution=None):
"""Prepare Sommerfeld integral lookup table to allow for a fast calculation of the coupling matrix by interpolation.
This function is called when all particles are on the same z-position.
Args:
vacuum_wavelength (float): Vacuum wavelength in length units
particle_list (list): List of particle objects
layer_system (smuthi.layers.LayerSystem): Stratified medium
k_parallel (numpy.ndarray or str): In-plane wavenumber for Sommerfeld integrals.
If 'default', smuthi.coordinates.default_k_parallel
resolution (float): Spatial resolution of lookup table in length units. (default: vacuum_wavelength / 100)
Smaller means more accurate but higher memory footprint
Returns:
(tuple) tuple containing:
lookup_table (ndarray): Coupling lookup, indices are [rho, n1, n2].
rho_array (ndarray): Values for the radial distance considered for the lookup (starting from negative
numbers to allow for simpler cubic interpolation without distinction of cases
at rho=0)
"""
sys.stdout.write('Prepare radial particle coupling lookup:\n')
sys.stdout.flush()
if resolution is None:
resolution = vacuum_wavelength / 100
sys.stdout.write('Setting lookup resolution to %f\n'%resolution)
sys.stdout.flush()
l_max = max([particle.l_max for particle in particle_list])
m_max = max([particle.m_max for particle in particle_list])
blocksize = fldex.blocksize(l_max, m_max)
x_array = np.array([particle.position[0] for particle in particle_list])
y_array = np.array([particle.position[1] for particle in particle_list])
rho_array = np.sqrt((x_array[:, None] - x_array[None, :]) ** 2 + (y_array[:, None] - y_array[None, :]) ** 2)
radial_distance_array = np.arange(- 3 * resolution, rho_array.max() + 3 * resolution, resolution)
z = particle_list[0].position[2]
i_s = layer_system.layer_number(z)
k_is = layer_system.wavenumber(i_s, vacuum_wavelength)
dz = z - layer_system.reference_z(i_s)
len_rho = len(radial_distance_array)
# direct -----------------------------------------------------------------------------------------------------------
w = np.zeros((len_rho, blocksize, blocksize), dtype=np.complex64)
sys.stdout.write('Memory footprint: ' + size_format(w.nbytes) + '\n')
sys.stdout.flush()
ct = np.array([0.0])
st = np.array([1.0])
bessel_h = []
for n in range(2* l_max + 1):
bessel_h.append(sf.spherical_hankel(n, k_is * radial_distance_array))
bessel_h[-1][radial_distance_array <= 0] = np.nan
legendre, _, _ = sf.legendre_normalized(ct, st, 2 * l_max)
pbar = tqdm(total=blocksize**2,
desc='Direct coupling ',
file=sys.stdout,
bar_format='{l_bar}{bar}| elapsed: {elapsed} remaining: {remaining}')
for m1 in range(-m_max, m_max+1):
for m2 in range(-m_max, m_max+1):
for l1 in range(max(1, abs(m1)), l_max + 1):
for l2 in range(max(1, abs(m2)), l_max + 1):
A = np.zeros(len_rho, dtype=complex)
B = np.zeros(len_rho, dtype=complex)
for ld in range(max(abs(l1 - l2), abs(m1 - m2)), l1 + l2 + 1): # if ld<abs(m1-m2) then P=0
a5, b5 = vwf.ab5_coefficients(l2, m2, l1, m1, ld)
A += a5 * bessel_h[ld] * legendre[ld][abs(m1 - m2)]
B += b5 * bessel_h[ld] * legendre[ld][abs(m1 - m2)]
for tau1 in range(2):
n1 = fldex.multi_to_single_index(tau1, l1, m1, l_max, m_max)
for tau2 in range(2):
n2 = fldex.multi_to_single_index(tau2, l2, m2, l_max, m_max)
if tau1 == tau2:
w[:, n1, n2] = A # remember that w = A.T
else:
w[:, n1, n2] = B
pbar.update()
pbar.close()
close_to_zero = radial_distance_array < rho_array[~np.eye(rho_array.shape[0],dtype=bool)].min() / 2
w[close_to_zero, :, :] = 0 # switch off direct coupling contribution near rho=0
# layer mediated ---------------------------------------------------------------------------------------------------
sys.stdout.write('Layer mediated coupling : ...')
sys.stdout.flush()
if type(k_parallel) == str and k_parallel == 'default':
k_parallel = coord.default_k_parallel
kz_is = coord.k_z(k_parallel=k_parallel, k=k_is)
len_kp = len(k_parallel)
# phase factors
epl2jkz = np.exp(2j * kz_is * dz)
emn2jkz = np.exp(-2j * kz_is * dz)
# layer response
L = np.zeros((2,2,2,len_kp), dtype=complex) # pol, pl/mn1, pl/mn2, kp
for pol in range(2):
L[pol,:,:,:] = lay.layersystem_response_matrix(pol, layer_system.thicknesses,
layer_system.refractive_indices, k_parallel,
coord.angular_frequency(vacuum_wavelength), i_s, i_s)
# transformation coefficients
B_dag = np.zeros((2, 2, blocksize, len_kp), dtype=complex) # pol, pl/mn, n, kp
B = np.zeros((2, 2, blocksize, len_kp), dtype=complex) # pol, pl/mn, n, kp
ct = kz_is / k_is
st = k_parallel / k_is
_, pilm_pl, taulm_pl = sf.legendre_normalized(ct, st, l_max)
_, pilm_mn, taulm_mn = sf.legendre_normalized(-ct, st, l_max)
m_list = [None for n in range(blocksize)]
for tau in range(2):
for m in range(-m_max, m_max + 1):
for l in range(max(1, abs(m)), l_max + 1):
n = fldex.multi_to_single_index(tau, l, m, l_max, m_max)
m_list[n] = m
for pol in range(2):
B_dag[pol,0,n,:] = vwf.transformation_coefficients_vwf(tau, l, m, pol, pilm_list=pilm_pl,
taulm_list=taulm_pl, dagger=True)
B_dag[pol,1,n,:] = vwf.transformation_coefficients_vwf(tau, l, m, pol, pilm_list=pilm_mn,
taulm_list=taulm_mn, dagger=True)
B[pol,0,n,:] = vwf.transformation_coefficients_vwf(tau, l, m, pol, pilm_list=pilm_pl,
taulm_list=taulm_pl, dagger=False)
B[pol,1,n,:] = vwf.transformation_coefficients_vwf(tau, l, m, pol, pilm_list=pilm_mn,
taulm_list=taulm_mn, dagger=False)
# pairs of (n1, n2), listed by abs(m1-m2)
n1n2_combinations = [[] for dm in range(2*m_max+1)]
for n1 in range(blocksize):
m1 = m_list[n1]
for n2 in range(blocksize):
m2 = m_list[n2]
n1n2_combinations[abs(m1-m2)].append((n1,n2))
wr = np.zeros((len_rho, blocksize, blocksize), dtype=complex)
dkp = np.diff(k_parallel)
if cu.use_gpu:
re_dkp_d = gpuarray.to_gpu(np.float32(dkp.real))
im_dkp_d = gpuarray.to_gpu(np.float32(dkp.imag))
kernel_source_code = cu.radial_lookup_assembly_code %(blocksize, len_rho, len_kp)
helper_function = SourceModule(kernel_source_code).get_function("helper")
cuda_blocksize = 128
cuda_gridsize = (len_rho + cuda_blocksize - 1) // cuda_blocksize
re_dwr_d = gpuarray.to_gpu(np.zeros(len_rho, dtype=np.float32))
im_dwr_d = gpuarray.to_gpu(np.zeros(len_rho, dtype=np.float32))
n1n2_combinations = [[] for dm in range(2*m_max+1)]
for n1 in range(blocksize):
m1 = m_list[n1]
for n2 in range(blocksize):
m2 = m_list[n2]
n1n2_combinations[abs(m1-m2)].append((n1,n2))
pbar = tqdm(total=blocksize**2,
desc='Layer mediated coupling ',
file=sys.stdout,
bar_format='{l_bar}{bar}| elapsed: {elapsed} remaining: {remaining}')
for dm in range(2*m_max+1):
bessel = scipy.special.jv(dm, (k_parallel[None,:]*radial_distance_array[:,None]))
besjac = bessel * (k_parallel / (kz_is * k_is))[None,:]
for n1n2 in n1n2_combinations[dm]:
n1 = n1n2[0]
m1 = m_list[n1]
n2 = n1n2[1]
m2 = m_list[n2]
belbe = np.zeros(len_kp, dtype=complex) # n1, n2, kp
for pol in range(2):
belbe += L[pol,0,0,:] * B_dag[pol,0,n1,:] * B[pol,0,n2,:]
belbe += L[pol,1,0,:] * B_dag[pol,1,n1,:] * B[pol,0,n2,:] * emn2jkz
belbe += L[pol,0,1,:] * B_dag[pol,0,n1,:] * B[pol,1,n2,:] * epl2jkz
belbe += L[pol,1,1,:] * B_dag[pol,1,n1,:] * B[pol,1,n2,:]
if cu.use_gpu:
re_belbe_d = gpuarray.to_gpu(np.float32(belbe[None, :].real))
im_belbe_d = gpuarray.to_gpu(np.float32(belbe[None, :].imag))
re_besjac_d = gpuarray.to_gpu(np.float32(besjac.real))
im_besjac_d = gpuarray.to_gpu(np.float32(besjac.imag))
helper_function(re_besjac_d.gpudata, im_besjac_d.gpudata,
re_belbe_d.gpudata, im_belbe_d.gpudata,
re_dkp_d.gpudata, im_dkp_d.gpudata,
re_dwr_d.gpudata, im_dwr_d.gpudata,
block=(cuda_blocksize, 1, 1), grid=(cuda_gridsize, 1))
wr[:,n1,n2] = 4 * (1j)**abs(m2-m1) * (re_dwr_d.get() + 1j*im_dwr_d.get())
else:
integrand = besjac * belbe[None, :] # rho, kp
wr[:,n1,n2] = 2 * (1j)**abs(m2-m1) * ((integrand[:,:-1] + integrand[:,1:])
* dkp[None,:]).sum(axis=-1) # trapezoidal rule
pbar.update()
pbar.close()
return w + wr, radial_distance_array
def size_format(b):
if b < 1000:
return '%i' % b + 'B'
elif 1000 <= b < 1000000:
return '%.1f' % float(b/1000) + 'KB'
elif 1000000 <= b < 1000000000:
return '%.1f' % float(b/1000000) + 'MB'
elif 1000000000 <= b < 1000000000000:
return '%.1f' % float(b/1000000000) + 'GB'
elif 1000000000000 <= b:
return '%.1f' % float(b/1000000000000) + 'TB'
def spheroids_closest_points(ab_halfaxis1, c_halfaxis1, center1, orientation1, ab_halfaxis2, c_halfaxis2, center2,
orientation2):
""" Computation of the two closest points of two adjacent spheroids.
For details, see: <NAME>, Algorithms for Ellipsoids, Sibley School of Mechanical & Aerospace Engineering,
Cornell University, Ithaca, New York, February 2008
Args:
ab_halfaxis1 (float): Half axis orthogonal to symmetry axis of spheroid 1
c_halfaxis1 (float): Half axis parallel to symmetry axis of spheroid 1
center1 (numpy.array): Center coordinates of spheroid 1
orientation1 (numpy.array): Orientation angles of spheroid 1
ab_halfaxis2 (float): Half axis orthogonal to symmetry axis of spheroid 2
c_halfaxis2 (float): Half axis parallel to symmetry axis of spheroid 2
center2 (numpy.array): Center coordinates of spheroid 2
orientation2 (numpy.array): Orientation angles of spheroid 2
Retruns:
Tuple containing:
- closest point on first particle (numpy.array)
- closest point on second particle (numpy.array)
- first rotation Euler angle alpha (float)
- second rotation Euler angle beta (float)
"""
def rotation_matrix(ang):
rot_mat = (np.array([[np.cos(ang[0]) * np.cos(ang[1]), -np.sin(ang[0]), np.cos(ang[0]) * np.sin(ang[1])],
[np.sin(ang[0]) * np.cos(ang[1]), np.cos(ang[0]), np.sin(ang[0]) * np.sin(ang[1])],
[-np.sin(ang[1]), 0, np.cos(ang[1])]]))
return rot_mat
rot_matrix_1 = rotation_matrix(orientation1)
rot_matrix_2 = rotation_matrix(orientation2)
a1, a2 = ab_halfaxis1, ab_halfaxis2
c1, c2 = c_halfaxis1, c_halfaxis2
ctr1, ctr2 = np.array(center1), np.array(center2)
eigenvalue_matrix_1 = np.array([[1 / a1 ** 2, 0, 0], [0, 1 / a1 ** 2, 0], [0, 0, 1 / c1 ** 2]])
eigenvalue_matrix_2 = np.array([[1 / a2 ** 2, 0, 0], [0, 1 / a2 ** 2, 0], [0, 0, 1 / c2 ** 2]])
E1 = np.dot(rot_matrix_1, np.dot(eigenvalue_matrix_1, np.transpose(rot_matrix_1)))
E2 = np.dot(rot_matrix_2, np.dot(eigenvalue_matrix_2, np.transpose(rot_matrix_2)))
S = np.matrix.getH(np.linalg.cholesky(E1))
# transformation of spheroid E1 into the unit-sphere with its center at origin / same transformation on E2
# E1_prime = np.dot(np.transpose(np.linalg.inv(S)), np.dot(E1, np.linalg.inv(S)))
# ctr1_prime = ctr1 - ctr1
E2_prime = np.dot(np.transpose(np.linalg.inv(S)), np.dot(E2, np.linalg.inv(S)))
ctr2_prime = -(np.dot(S, (ctr1 - ctr2)))
E2_prime_L = np.linalg.cholesky(E2_prime)
H = np.dot(np.linalg.inv(E2_prime_L), np.transpose(np.linalg.inv(E2_prime_L)))
p = np.array([0, 0, 0])
f = np.dot(np.transpose(ctr2_prime - p), np.transpose(np.linalg.inv(E2_prime_L)))
def minimization_fun(y_vec):
fun = 0.5 * np.dot(np.dot(np.transpose(y_vec), H), y_vec) + np.dot(f, y_vec)
return fun
def constraint_fun(x):
eq_constraint = (x[0] ** 2 + x[1] ** 2 + x[2] ** 2) ** 0.5 - 1
return eq_constraint
bnds = ((-1, 1), (-1, 1), (-1, 1))
length_constraints = {'type' : 'eq', 'fun' : constraint_fun}
flag = False
while flag == False:
x0 = -1 + np.dot((1 + 1), np.random.rand(3))
optimization_result = scipy.optimize.minimize(minimization_fun, x0, method='SLSQP', bounds=bnds,
constraints=length_constraints, tol=None, callback=None, options=None)
x_vec = np.transpose(np.dot(np.transpose(np.linalg.inv(E2_prime_L)), optimization_result['x'])
+ np.transpose(ctr2_prime))
if optimization_result['success'] == True:
if np.linalg.norm(x_vec) <= 1:
raise ValueError("particle error: particles intersect")
elif np.linalg.norm(x_vec) < np.linalg.norm(ctr2_prime):
flag = True
else:
print('wrong minimum ...')
else:
print('No minimum found ...')
p2_prime = x_vec
p2 = np.dot(np.linalg.inv(S), p2_prime) + ctr1
E1_L = np.linalg.cholesky(E1)
H = np.dot(np.linalg.inv(E1_L), np.transpose(np.linalg.inv(E1_L)))
p = p2
f = np.dot(np.transpose(ctr1 - p), np.transpose(np.linalg.inv(E1_L)))
flag = False
while flag == False:
x0 = -1 + np.dot((1 + 1), np.random.rand(3))
optimization_result2 = scipy.optimize.minimize(minimization_fun, x0, method='SLSQP', bounds=bnds,
constraints=length_constraints, tol=None, callback=None, options=None)
p1 = np.transpose(np.dot(np.transpose(np.linalg.inv(E1_L)), optimization_result2['x']) + np.transpose(ctr1))
if optimization_result2['success'] == True:
if np.linalg.norm(p1 - p) < np.linalg.norm(ctr1 - p):
flag = True
else:
print('wrong minimum ...')
else:
print('No minimum found ...')
p1p2 = p2 - p1
azimuth = np.arctan2(p1p2[1], p1p2[0])
elevation = np.arctan2(p1p2[2], (p1p2[0] ** 2 + p1p2[1] ** 2) ** 0.5)
if p1p2[2] < 0:
beta = (np.pi / 2) + elevation
else:
beta = (-np.pi / 2) + elevation
alpha = -azimuth
return p1, p2, alpha, beta
def direct_coupling_block_pvwf_mediated(vacuum_wavelength, receiving_particle, emitting_particle, layer_system,
k_parallel):
"""Direct particle coupling matrix :math:`W` for two particles (via plane vector wave functions).
For details, see:
<NAME> al., Phys. Rev. A 96, 033822, DOI: 10.1103/PhysRevA.96.033822 or arXiv:1708.04808
Args:
vacuum_wavelength (float): Vacuum wavelength :math:`\lambda` (length unit)
receiving_particle (smuthi.particles.Particle): Particle that receives the scattered field
emitting_particle (smuthi.particles.Particle): Particle that emits the scattered field
layer_system (smuthi.layers.LayerSystem): Stratified medium in which the coupling takes place
k_parallel (numpy.array): In-plane wavenumber for plane wave expansion
Returns:
Direct coupling matrix block (numpy array).
"""
if type(receiving_particle).__name__ != 'Spheroid' or type(emitting_particle).__name__ != 'Spheroid':
raise NotImplementedError('plane wave coupling currently implemented only for spheroids')
lmax1 = receiving_particle.l_max
mmax1 = receiving_particle.m_max
assert lmax1 == mmax1, 'PVWF coupling requires lmax == mmax for each particle.'
lmax2 = emitting_particle.l_max
mmax2 = emitting_particle.m_max
assert lmax2 == mmax2, 'PVWF coupling requires lmax == mmax for each particle.'
lmax = max([lmax1, lmax2])
m_max = max([mmax1, mmax2])
blocksize1 = fldex.blocksize(lmax1, mmax1)
blocksize2 = fldex.blocksize(lmax2, mmax2)
n_medium = layer_system.refractive_indices[layer_system.layer_number(receiving_particle.position[2])]
# finding the orientation of a plane separating the spheroids
_, _, alpha, beta = spheroids_closest_points(
emitting_particle.semi_axis_a, emitting_particle.semi_axis_c, emitting_particle.position,
emitting_particle.euler_angles, receiving_particle.semi_axis_a, receiving_particle.semi_axis_c,
receiving_particle.position, receiving_particle.euler_angles)
# positions
r1 = np.array(receiving_particle.position)
r2 = | np.array(emitting_particle.position) | numpy.array |
# -*- coding: utf-8 -*-
"""
@author: <NAME>
This is a unit test. If you would like to further develop pahmc_ode_cpu, you
should visit here frequently. You should also be familiar with the Python (3.7)
built-in module 'unittest'.
To run this unit test, copy this file into its parent directory and run it.
"""
import time
import matplotlib.pyplot as plt
import numpy as np
from pathlib import Path
from pahmc_ode_cpu.data_preparation import Data # import module to be tested
from pahmc_ode_cpu import lib_dynamics
name = 'lorenz96'
# specify the data below
D = 20
length = 1000
dt = 0.025
noise = 0.4 * np.ones(D)
par_true = 8.17
# stimuli = np.ones((D,2*length)) * np.arange(2*length) * 1e-9
stimuli = np.zeros((D,2*length))
x0 = np.ones(D)
x0[0] = 0.01
# instantiate
dyn = getattr(lib_dynamics, f'Builtin_{name}')(name, stimuli)
# get (noisy) data from the module being tested
t0 = time.perf_counter()
data_noisy, stimuli \
= Data().generate(dyn, D, length, dt, noise, par_true, x0, True)
print(f'Time elapsed = {time.perf_counter()-t0:.2f} seconds.')
noiselessfile = np.load(Path.cwd()/'user_data'/f'{dyn.name}_noiseless.npz')
data_noiseless = noiselessfile['data']
noiselessfile.close()
print(f'\nChi-squared = { | np.sum((data_noisy-data_noiseless)**2) | numpy.sum |
"""
GAIL file
"""
import numpy as np
import torch
from torch import nn
# from torch.nn import utils
import torch.nn.functional as f
import random
from policy import MlpNetwork, SoftQLearning
from grid_mdp import GridMDP, MazeWorld, WindyMazeWorld, ToroidWorld
from rnd import RND
import matplotlib.pyplot as plt
from buffers import ReplayBuffer
import argparse
import os
from os import path
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--seed', help='random seed', type=int, default=1123)
parser.add_argument('--rnd', help='random network distillation', type=bool, default=False)
parser.add_argument('--reward', help="reward function to use ['gail', 'airl', 'fairl', 'aim', 'none']", type=str,
default='aim')
parser.add_argument('--dir', help="directory to save results in", type=str,
default='aim_results')
args = parser.parse_args()
torch.set_default_dtype(torch.float32)
# Set random seeds
seed = 42 * args.seed
print(args.seed)
torch.manual_seed(seed)
random.seed = seed
np.random.seed = seed
reward_to_use = args.reward # use one of ['gail', 'airl', 'fairl', 'none']
print(reward_to_use)
def wasserstein_reward(d):
"""
return the wasserstein reward
"""
return d
def gail_reward(d):
"""
Take discriminaotr output and return the gail reward
:param d:
:return:
"""
d = torch.sigmoid(d)
return d.log() # - (1 - d).log()
def airl_reward(d):
"""
Take discriminaotr output and return AIRL reward
:param d:
:return:
"""
s = torch.sigmoid(d)
reward = s.log() - (1 - s).log()
return reward
def fairl_reward(d):
"""
Take discriminator output and return FAIRL reward
:param d:
:return:
"""
d = torch.sigmoid(d)
h = d.log() - (1 - d).log()
return h.exp() * (-h)
reward_dict = {'gail': gail_reward, 'airl': airl_reward, 'fairl': fairl_reward, 'aim': wasserstein_reward,
'none': None}
class Discriminator(nn.Module):
"""
The discriminator used to learn the potentials or the reward functions
"""
def __init__(self, x_dim=1, max_state=10., min_state=0):
super(Discriminator, self).__init__()
self.mean_state = torch.tensor((max_state - min_state) / 2 + min_state, dtype=torch.float32)
self.diff_state = torch.tensor(max_state - min_state, dtype=torch.float32)
self.input_dim = x_dim
self.d = MlpNetwork(self.input_dim, n_units=64) # , activ=f.tanh)
def normalize(self, x: torch.Tensor) -> torch.Tensor:
"""
normalize input
:param x:
:return:
"""
x = x.type(torch.float32)
x = (x - self.mean_state) / self.diff_state
return x
def forward(self, x: torch.Tensor) -> (torch.Tensor, torch.Tensor):
"""
return discriminator output
:param x:
:return:
"""
x = self.normalize(x)
output = self.d(x)
return output
def to_one_hot(x: torch.Tensor, num_vals) -> torch.Tensor:
"""
Convert tensor to one-hot encoding
"""
if type(x) is not torch.Tensor:
x = torch.tensor(x)
x = x.type(torch.long)
x_one_hot = torch.zeros((x.shape[0], num_vals), dtype=torch.float32)
x_one_hot = x_one_hot.scatter(1, x, 1.)
return x_one_hot
class GAIL:
"""
Class to take the continuous MDP and use gail to match given target distribution
"""
def __init__(self):
self.env = MazeWorld()
# self.env = ToroidWorld()
self.policy = SoftQLearning(x_dim=self.env.dims, out_dim=len(self.env.action_space),
max_state=self.env.max_state, min_state=self.env.min_state,
ent_coef=.3, target_update=3e-2)
self.discriminator = Discriminator(x_dim=self.env.dims, max_state=self.env.max_state,
min_state=self.env.min_state)
self.discount = 0.99
self.check_state = set()
self.agent_buffer = ReplayBuffer(size=5000)
self.policy_optimizer = torch.optim.Adam(self.policy.parameters()) # , lr=3e-4)
self.discriminator_optimizer = torch.optim.Adam(self.discriminator.parameters()) # , lr=1e-4)
if args.rnd:
self.rnd = RND(x_dim=self.env.dims)
else:
self.rnd = None
self.max_r = 0.
self.min_r = -1.
def gather_data(self, num_trans=100) -> None:
"""
Gather data from current policy
used to:
* fit value function
* update policy
* plot histograms
:param num_trans:
:return:
"""
t = 0
while t < num_trans:
s = self.env.reset()
s = torch.tensor(s).type(torch.float32).reshape([-1, self.env.dims])
done = False
while not done:
# self.states.append(deepcopy(s))
action = self.policy.sample_action(s)
# self.actions.append(a)
a = np.squeeze(action.data.detach().numpy())
s_p, r, done, _ = self.env.step(a)
s_p = torch.tensor(s_p).type(torch.float32).reshape([-1, self.env.dims])
# d = self.discriminator(sp)
# i_r = gail_reward(d)
# self.next_states.append(deepcopy(s))
# self.rewards.append(i_r) # deepcopy(r))
# self.dones.append(deepcopy(done))
self.agent_buffer.add(s.squeeze(), action.reshape([-1]).detach(), r, s_p.squeeze(), done)
# if s_p not in self.check_state:
# self.check_state.add(s_p)
# self.target_buffer.add(s, a, r, s_p, done)
s = s_p
t += 1
# self.states.append(s)
def compute_td_targets(self, states, next_states, dones, rewards=None):
"""
Compute the value of the current states and
the TD target based on one step reward
and value of next states
:return: value of current states v, TD target targets
"""
states = states.reshape([-1, self.env.dims])
next_states = next_states.reshape([-1, self.env.dims])
v = self.policy(states)[0]
v_prime = self.policy(next_states)[-1]
if rewards is not None:
dones = rewards.type(torch.float32).reshape([-1, 1])
else:
dones = dones.type(torch.float32).reshape([-1, 1])
reward_func = reward_dict[reward_to_use]
if reward_func is not None:
# d0 = self.discriminator(states)
d1 = self.discriminator(next_states)
# Compute rewards
# r0 = reward_func(d0)
r1 = reward_func(d1)
rewards = rewards.type(torch.float32).reshape([-1, 1]) + ((r1 - self.max_r) / (self.max_r - self.min_r))
targets = rewards.type(torch.float32).reshape([-1, 1])
targets += (1. - dones) * self.discount * v_prime.reshape([-1, 1])
return v, targets.detach()
def fit_v_func(self):
"""
This function will train the value function using the collected data
:return:
"""
self.policy_optimizer.zero_grad()
s, a, r, s_p, dones = self.agent_buffer.sample(100)
if args.rnd:
spn = s_p.detach().numpy()
self.rnd.update(spn)
r += 0.3 * self.rnd.reward(spn)
q, targets = self.compute_td_targets(s, s_p, dones, rewards=r)
actions = torch.tensor(a, dtype=torch.long)
v = q.gather(dim=-1, index=actions)
loss = torch.mean(0.5 * (targets - v) ** 2)
loss.backward()
self.policy_optimizer.step()
self.policy.update_target()
return
# def optimize_policy(self):
# """
# This function will optimize the policy to maximize returns
# Based on collected data
# :return:
# """
# self.policy_optimizer.zero_grad()
# s, a, r, s_p, dones = self.agent_buffer.sample(100)
# v, targets = self.compute_td_targets(s, s_p, dones, rewards=r)
# advantages = (targets - v).detach()
# a = a.reshape([-1, 1]).detach()
# neg_log_pi = -1. * self.policy.pi_loss(s.reshape([-1, self.env.dims]), a)
# entropy_kl = self.policy.entropy(s.reshape([-1, self.env.dims]))
# loss = torch.mean(advantages * neg_log_pi) + 1e-1 * torch.mean(entropy_kl)
# loss.backward()
# self.policy_optimizer.step()
# return
def compute_aim_pen(self,
target_state: torch.Tensor,
prev_state: torch.Tensor,
next_state_state: torch.Tensor, lambda_=10.):
"""
Computes values of the discriminator at different points
and constraints the difference to be 0.1
"""
prev_out = self.discriminator(prev_state)
next_out = self.discriminator(next_state_state)
penalty = lambda_ * torch.max(torch.abs(next_out - prev_out) - 0.1, torch.tensor(0.)).pow(2).mean()
return penalty
def compute_grad_pen(self,
target_state: torch.Tensor,
policy_state: torch.Tensor,
lambda_=10.):
"""
Computes the gradients by mixing the data randomly
and creates a loss for the magnitude of the gradients.
"""
alpha = torch.rand(target_state.size(0), 1)
# expert_data = torch.cat([expert_state, expert_action], dim=1)
# policy_data = torch.cat([policy_state, policy_action], dim=1)
alpha = alpha.expand_as(target_state).to(target_state.device)
mixup_data = alpha * target_state + (1 - alpha) * policy_state
mixup_data.requires_grad = True
disc = self.discriminator(mixup_data)
ones = torch.ones(disc.size()).to(disc.device)
grad = torch.autograd.grad(
outputs=disc,
inputs=mixup_data,
grad_outputs=ones,
create_graph=True,
retain_graph=True,
only_inputs=True)[0]
grad_pen = lambda_ * (torch.max(grad.norm(2, dim=1) - 0.01, torch.tensor(0.))).pow(2).mean()
return grad_pen
def optimize_discriminator(self):
"""
Optimize the discriminator based on the memory and
target_distribution
:return:
"""
num_samples = 100
self.discriminator_optimizer.zero_grad()
# _, _, _, target_distribution, _ = self.target_buffer.sample(100)
target_dist = np.reshape(self.env.target_distribution(), (-1,))
target_distribution = np.random.choice(target_dist.shape[0], num_samples, p=target_dist)
states, _, _, next_states, _ = self.agent_buffer.sample(num_samples)
# target_distribution = sample_target_distribution(mean=self.env.target_mean, std=self.env.target_std,
# num=100)
target_distribution = target_distribution.reshape([-1, 1])
if self.env.dims > 1:
target_distribution = np.concatenate([target_distribution, target_distribution], axis=-1)
target_distribution[:, 0] = target_distribution[:, 0] // self.env.y_dim
target_distribution[:, 1] = target_distribution[:, 1] % self.env.y_dim
next_states = next_states.reshape([-1, self.env.dims])
ones = torch.tensor(target_distribution).type(torch.float32).reshape([-1, self.env.dims])
zeros = torch.tensor(next_states).type(torch.float32).reshape([-1, self.env.dims])
zeros_prev = torch.tensor(states).type(torch.float32).reshape([-1, self.env.dims])
# ########## GAIL loss
if reward_to_use != 'aim':
labels_ones = torch.ones((num_samples, 1)) * 0.9
labels_zeros = torch.ones((num_samples, 1)) * 0.1
data = torch.cat([ones, zeros])
pred = self.discriminator(data)
labels = torch.cat([labels_ones, labels_zeros])
gail_loss = f.binary_cross_entropy_with_logits(pred, labels)
grad_penalty = self.compute_grad_pen(ones, zeros)
loss = gail_loss + grad_penalty
else:
# ####### WGAN loss
pred_ones = self.discriminator(ones)
pred_zeros = self.discriminator(zeros)
preds = torch.cat([pred_zeros, pred_ones], dim=0)
self.max_r = torch.max(preds).detach().cpu().numpy() + 0.1
self.min_r = torch.min(preds).detach().cpu().numpy() - 0.1
wgan_loss = torch.mean(pred_zeros) + torch.mean(pred_ones * (-1.))
aim_penalty = self.compute_aim_pen(ones, zeros_prev, zeros)
# grad_penalty = self.compute_grad_pen(ones, zeros)
loss = wgan_loss + aim_penalty # + grad_penalty
# loss = torch.mean(- labels * pred.log() - (1 - labels) * (1. - pred).log())
loss.backward()
# utils.clip_grad_norm_(self.discriminator.parameters(), max_norm=0.5)
self.discriminator_optimizer.step()
def plot_dist(self, num_samples=100, it=0, dname='aim'):
"""
plot the two distributions as histograms
:return:
"""
# dname = 'r_neg'
if not path.exists(dname):
os.mkdir(dname)
# _, _, _, target_distribution, _ = self.target_buffer.sample(num_samples)
states, _, _, next_states, _ = self.agent_buffer.sample(num_samples)
target_dist = np.reshape(self.env.target_distribution(), (-1,))
target_distribution = np.random.choice(target_dist.shape[0], num_samples, p=target_dist)
target_distribution = target_distribution.reshape([-1, 1]).astype(np.float32)
if self.env.dims > 1:
target_distribution = np.concatenate([target_distribution, target_distribution], axis=-1)
target_distribution[:, 0] = target_distribution[:, 0] // self.env.y_dim
target_distribution[:, 1] = target_distribution[:, 1] % self.env.y_dim
# target_distribution += np.random.normal(loc=0, scale=0.5, size=target_distribution.shape)
next_states = next_states.numpy().reshape([-1, self.env.dims]).astype(np.float32)
# next_states += np.random.normal(loc=0., scale=0.01, size=next_states.shape)
q, v, qt, vt = self.policy(states)
print(f"q: {np.mean(q.detach().numpy())}, v: {np.mean(v.detach().numpy())},"
f" qt: {np.mean(qt.detach().numpy())}, vt: {np.mean(vt.detach().numpy())}")
if self.env.dims == 1:
xloc = | np.arange(0, self.env.num_states) | numpy.arange |
"""
Adapated from Vertex frequency codebase. Credit to <NAME>.
Algorithms based on https://arxiv.org/pdf/1905.09758.pdf
Goal is to estimate the density of eigenvalues over a known range.
"""
import numpy as np
import scipy.sparse as ss
import scipy.io as sio
import numpy.random as nr
import matplotlib.pyplot as plt
import graphtools
import sklearn.datasets
import pygsp
import sklearn
import ot
def moments_cheb_dos(A, n, nZ=100, N=10, kind=1):
"""
Compute a column vector of Chebyshev moments of the form c(k) = tr(T_k(A))
for k = 0 to N-1. This routine does no scaling; the spectrum of A should
already lie in [-1,1]. The traces are computed via a stochastic estimator
with nZ probe
Args:
A: Matrix or function apply matrix (to multiple RHS)
n: Dimension of the space
nZ: Number of probe vectors with which we compute moments
N: Number of moments to compute
kind: 1 or 2 for first or second kind Chebyshev functions
(default = 1)
Output:
c: a column vector of N moment estimates
cs: standard deviation of the moment estimator
(std/sqrt(nZ))
"""
# Create a function handle if given a matrix
if callable(A):
Afun = A
else:
if isinstance(A, np.ndarray):
A = ss.csr_matrix(A)
Afun = lambda x: A * x
if N < 2:
N = 2
# Set up random probe vectors (allowed to be passed in)
if not isinstance(nZ, int):
Z = nZ
nZ = Z.shape[1]
else:
Z = np.sign(nr.randn(n, nZ))
# Estimate moments for each probe vector
cZ = moments_cheb(Afun, Z, N, kind)
c = np.mean(cZ, 1)
cs = np.std(cZ, 1, ddof=1) / np.sqrt(nZ)
c = c.reshape([N, -1])
cs = cs.reshape([N, -1])
return c, cs
def moments_cheb(A, V, N=10, kind=1):
"""
Compute a column vector of Chebyshev moments of the form c(k) = v'*T_k(A)*v
for k = 0 to N-1. This routine does no scaling; the spectrum of A should
already lie in [-1,1]
Args:
A: Matrix or function apply matrix (to multiple RHS)
V: Starting vectors
N: Number of moments to compute
kind: 1 or 2 for first or second kind Chebyshev functions
(default = 1)
Output:
c: a length N vector of moments
"""
if N < 2:
N = 2
if not isinstance(V, np.ndarray):
V = V.toarray()
# Create a function handle if given a matrix
if callable(A):
Afun = A
else:
if isinstance(A, np.ndarray):
A = ss.csr_matrix(A)
Afun = lambda x: A * x
n, p = V.shape
c = np.zeros((N, p))
# Run three-term recurrence to compute moments
TVp = V # x
TVk = kind * Afun(V) # Ax
c[0] = np.sum(V * TVp, 0) # xx
c[1] = np.sum(V * TVk, 0) # xAx
for i in range(2, N):
TV = 2 * Afun(TVk) - TVp # A*2T_1 - T_o
TVp = TVk
TVk = TV
c[i] = sum(V * TVk, 0)
return c
def plot_cheb_argparse(npts, c, xx0=-1, ab=np.array([1, 0])):
"""
Handle argument parsing for plotting routines. Should not be called directly
by users.
Args:
npts: Number of points in a default mesh
c: Vector of moments
xx0: Input sampling mesh (original coordinates)
ab: Scaling map parameters
Output:
c: Vector of moments
xx: Input sampling mesh ([-1,1] coordinates)
xx0: Input sampling mesh (original coordinates)
ab: Scaling map parameters
"""
if isinstance(xx0, int):
# only c is given
xx0 = np.linspace(-1 + 1e-8, 1 - 1e-8, npts)
xx = xx0
else:
if len(xx0) == 2:
# parameters are c, ab
ab = xx0
xx = np.linspace(-1 + 1e-8, 1 - 1e-8, npts)
xx0 = ab[0] * xx + ab[1]
else:
# parameteres are c, xx0
xx = xx0
# All parameters specified
if not (ab == [1, 0]).all():
xx = (xx0 - ab[1]) / ab[0]
return c, xx, xx0, ab
def plot_chebint(varargin, npts=1001, pflag=True):
"""
Given a (filtered) set of first-kind Chebyshev moments, compute the integral
of the density:
int_0^s (2/pi)*sqrt(1-x^2)*( c(0)/2+sum_{n=1}^{N-1}c_nT_n(x) )
Output a plot of cumulative density function by default.
Args:
c: Array of Chebyshev moments (on [-1,1])
xx: Evaluation points (defaults to mesh of 1001 pts)
ab: Mapping parameters (default to identity)
pflag: Option to output the plot
Output:
yy: Estimated cumulative density up to each xx point
"""
# Parse arguments
c, xx, xx0, ab = plot_cheb_argparse(npts, *varargin)
N = len(c)
txx = np.arccos(xx)
yy = c[0] * (txx - np.pi) / 2
for idx in np.arange(1, N):
yy += c[idx] * np.sin(idx * txx) / idx
yy *= -2 / np.pi
# Plot by default
if pflag:
plt.plot(xx0, yy)
# plt.ion()
plt.show()
# plt.pause(1)
# plt.clf()
return [xx0, yy]
def plot_chebhist(varargin, pflag=True, npts=21):
"""
Given a (filtered) set of first-kind Chebyshev moments, compute the integral
of the density:
int_0^s (2/pi)*sqrt(1-x^2)*( c(0)/2+sum_{n=1}^{N-1}c_nT_n(x) )
Output a histogram of cumulative density function by default.
Args:
c: Vector of Chebyshev moments (on [-1,1])
xx: Evaluation points (defaults to mesh of 21 pts)
ab: Mapping parameters (default to identity)
pflag: Option to output the plot
Output:
yy: Estimated counts on buckets between xx points
"""
# Parse arguments
c, xx, xx0, ab = plot_cheb_argparse(npts, *varargin)
# Compute CDF and bin the difference
yy = plot_chebint((c, xx0, ab), pflag=False)
yy = yy[1:] - yy[:-1]
xm = (xx0[1:] + xx0[:-1]) / 2
# Plot by default
if pflag:
plt.bar(xm + 1, yy, align="center", width=0.1)
# plt.ion()
plt.show()
# plt.pause(1)
# plt.clf()
return [xm + 1, yy]
def matrix_normalize(W, mode="s"):
"""
Normalize an adjacency matrix.
Args:
W: weighted adjacency matrix
mode: string indicating the style of normalization;
's': Symmetric scaling by the degree (default)
'r': Normalize to row-stochastic
'c': Normalize to col-stochastic
Output:
N: a normalized adjacency matrix or stochastic matrix (in sparse form)
"""
dc = np.asarray(W.sum(0)).squeeze()
dr = np.asarray(W.sum(1)).squeeze()
[i, j, wij] = ss.find(W)
# Normalize in desired style
if mode in "sl":
wij = wij / np.sqrt(dr[i] * dc[j])
elif mode == "r":
wij = wij / dr[i]
elif mode == "c":
wij = wij / dc[j]
else:
raise ValueError("Unknown mode!")
N = ss.csr_matrix((wij, (i, j)), shape=W.shape)
return N
def simple_diffusion_embeddings(graph, distribution_labels, subsample=False, scales=7):
"""
The plain version, without any frills.
Return the vectors whose L1 distances are the EMD between the given distributions.
The graph supplied (a PyGSP graph) should encompass both distributions.
The distributions themselves should be one-hot encoded with the distribution_labels parameter.
"""
heat_filter = pygsp.filters.Heat(
graph, tau=[2 ** i for i in range(1, scales + 1)], normalize=False
)
diffusions = heat_filter.filter(distribution_labels, method="chebyshev", order=32)
print(diffusions.shape)
if subsample:
rng = np.random.default_rng(42)
if len(diffusions.shape) == 2:
n_samples = 1
n, n_scales = diffusions.shape
else:
n, n_samples, n_scales = diffusions.shape
embeddings = []
for i in range(n_scales):
d = diffusions[..., i]
weight = 0.5 ** (n_scales - i)
if subsample:
subsample_idx = rng.integers(n, size=n // 10)
lvl_embed = weight * d[subsample_idx].T
else:
lvl_embed = weight * d.T
embeddings.append(lvl_embed)
if len(diffusions.shape) == 2:
embeddings = np.concatenate(embeddings)
else:
embeddings = np.concatenate(embeddings, axis=1)
return embeddings
def l1_distance_matrix(embeddings):
"""
Gives a square distance matrix with the L1 distances between the provided embeddings
"""
D = np.zeros((len(embeddings), len(embeddings)))
for i, embed1 in enumerate(embeddings):
for j, embed2 in enumerate(embeddings):
D[i][j] = np.sum( | np.abs(embed1 - embed2) | numpy.abs |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 26 11:00:07 2020
@author: <NAME>
"""
import matplotlib.pyplot as plt
from scipy.spatial import distance
from scipy import signal
import numpy as np
# Constants
DEFAULT_NEURONUM = 500
DEFAULT_TEND = 7000
DEFAULT_IDRIVE = 3
DEFAULT_XNUME = 20
DEFAULT_YNUME = 20
DEFAULT_XNUMI = 10
DEFAULT_YNUMI = 10
DEFAULT_DEGREE_EE = 40
DEFAULT_DEGREE_EI = 10
DEFAULT_DEGREE_IE = 400
DEFAULT_DEGREE_II = 100
DEFAULT_WEIGHT_EE = 0.01
DEFAULT_WEIGHT_EI = 0.05
DEFAULT_WEIGHT_IE = 0.04
DEFAULT_WEIGHT_II = 0.04
DEFAULT_TAU_SYN = 3
DEFAULT_GKS_MIN = 0.2
DEFAULT_GKS_MAX = 1.5
# Class
class NeuroNet():
def __init__(self,
neuroNum = DEFAULT_NEURONUM,
tEnd = DEFAULT_TEND,
Idrive = DEFAULT_IDRIVE,
tauSyn = DEFAULT_TAU_SYN,
gKsMin = DEFAULT_GKS_MIN,
gKsMax = DEFAULT_GKS_MAX):
'''
Parameters
----------
neuroNum : TYPE, optional
DESCRIPTION. The default is DEFAULT_NEURONUM.
tEnd : TYPE, optional
DESCRIPTION. The default is DEFAULT_TEND.
Idrive : TYPE, optional
DESCRIPTION. The default is DEFAULT_IDRIVE.
tauSyn : TYPE, optional
DESCRIPTION. The default is DEFAULT_TAU_SYN.
Returns
-------
None.
'''
# simulation properties
self.tEnd = tEnd # ms
self.tStep = 0.05 # ms
self.tPoints = np.arange(0,self.tEnd,self.tStep)
# ensemble properties
self.neuroNum = neuroNum
self.Idrive = Idrive*np.ones(shape=(self.neuroNum,1))
# neuronal properties
self.gKsMin = gKsMin
self.gKsMax = gKsMax
self.randomInitialStates()
self.gKs = self.gKsMax
# initial adjMat
self.adjMat = | np.zeros(shape=(self.neuroNum,self.neuroNum)) | numpy.zeros |
#----------------------------------------------------------------------------------------------------
# This essentially generated context [left_ctx, right_ctx] for each
# mention of entity in each sentence of the validation and test set and then converts that context into
# a glove context vector.
#----------------------------------------------------------------------------------------------------
from generate_feature_vectors_and_class_labels.options import Options
import json
import os
from gensim.parsing.preprocessing import strip_punctuation
from gensim.parsing.preprocessing import preprocess_string
import numpy as np
import scipy as sp
CUSTOM_FILTERS = [lambda x: x.lower(), strip_punctuation]
my_options = Options()
dataset='val'
#-------------------------------------------------------
# Supporting Functions
#-------------------------------------------------------
def clean_type_hierarchy(complete_type):
complete_type.discard('/religion/religion')
complete_type.discard('/computer')
complete_type.add('/computer_science')
complete_type.discard('/computer/algorithm')
complete_type.add('/computer_science/algorithm')
complete_type.discard('/computer/programming_language')
complete_type.add('/computer_science/programming_language')
complete_type.discard('/government/government')
complete_type.add('/government/administration')
return complete_type
#-------------------------------------------------------
def generate_conflicting_labels_dict():
conflicting_labels_dict = {}
conflicting_labels_dict['/computer'] = '/computer_science'
conflicting_labels_dict['/computer/algorithm'] = '/computer_science/algorithm'
conflicting_labels_dict['/computer/programming_language'] = '/computer_science/programming_language'
conflicting_labels_dict['/government/government'] = '/government/administration'
return conflicting_labels_dict
#-------------------------------------------------------
def extract_leaf_and_internal_nodes(complete_type):
for type in complete_type:
type_path = type.split('/')[1:]
leaf_nodes.add(type_path[-1])
for i in range(len(type_path) - 1):
internal_nodes.add(type_path[i])
leaf_nodes_list = list(leaf_nodes)
for node in leaf_nodes_list:
if node in internal_nodes:
leaf_nodes.remove(node)
print(leaf_nodes)
print(len(leaf_nodes))
leaf_nodes_list = list(leaf_nodes)
leaf_nodes_list.sort()
internal_nodes_list = list(internal_nodes)
internal_nodes_list.sort()
total_nodes_list = leaf_nodes_list + internal_nodes_list
parent_of = [0] * (len(leaf_nodes_list) + len(internal_nodes_list))
is_leaf_node_list = [1] * len(leaf_nodes_list) + [0] * len(internal_nodes_list)
for type in complete_type:
type_path = type.split('/')[1:]
if len(type_path) == 1:
index = total_nodes_list.index(type_path[0])
parent_of[index] = -1
else:
for i in range(1, len(type_path)):
if type_path[i] in leaf_nodes_list:
index = total_nodes_list.index(type_path[i])
parent_of[index] = total_nodes_list.index(type_path[i - 1])
return leaf_nodes_list, internal_nodes_list, total_nodes_list, parent_of, is_leaf_node_list
#-------------------------------------------------------
def generate_context_embedding(context_words_list, token_feature_vectors, token_list, method="average"):
if context_words_list == []:
return token_feature_vectors[-2]
else:
context_embd_list = []
no_context_words = 0
for word in context_words_list:
no_context_words += 1
if word in token_list:
index = token_list.index(word)
context_embd_list.append(token_feature_vectors[index])
else:
context_embd_list.append(token_feature_vectors[-1])
if method == 'average':
context_embd = sum(context_embd_list) / no_context_words
return context_embd
#--------------------------------------------------------
# Generating Type Labels Lists
#--------------------------------------------------------
if dataset=='val':
with open(os.path.join(my_options.raw_input_dir,my_options.val_data_file)) as json_file:
sentences_val = json.load(json_file)
else:
with open(os.path.join(my_options.raw_input_dir,my_options.test_data_file)) as json_file:
sentences_val = json_file.readlines()
complete_type=set()
conflicting_labels_dict={}
internal_nodes=set()
leaf_nodes=set()
#-----------------------------------------------------------------
# ----collecting all type paths across all mentions of entities
#-----------------------------------------------------------------
if my_options.with_non_leaf:
with open(os.path.join(my_options.feature_output_dir, 'with_non_leaf_type_name_train_split.json'), 'r') as filehandle:
total_nodes_list = json.load(filehandle)
else:
with open(os.path.join(my_options.feature_output_dir, 'without_non_leaf_type_name_train_split.json'), 'r') as filehandle:
total_nodes_list = json.load(filehandle)
'''
for sentence in sentences_val:
for mentions in json.loads(sentence)['mentions']:
for label in mentions['labels']:
complete_type.add(label)
complete_type = clean_type_hierarchy(complete_type)
conflicting_labels_dict = generate_conflicting_labels_dict()
leaf_nodes_list, internal_nodes_list, total_nodes_list, parent_of, is_leaf_node_list = extract_leaf_and_internal_nodes(complete_type)
'''
#--------------------------------------------------------
# Generating Type Labels and Context Feature Vectors for Entities
#--------------------------------------------------------
entities=[]
#entity_type_matrix = []
entities_labels=[]
entities_total_context_feature_vectors = []
entities_left_context_feature_vectors = []
entities_right_context_feature_vectors = []
entities_left_right_context_feature_vectors = []
val_data=[]
#count_only_non_leaf = 0
#count_both_leaf_and_non_leaf = 0
token_feature_vectors=np.load(os.path.join(my_options.feature_output_dir,'token_embedding_300d.npy'))
with open(os.path.join(my_options.feature_output_dir,'token_list.json'), 'r') as filehandle:
token_list = json.load(filehandle)
token_dict={}
for idx, token in enumerate(token_list):
token_dict[token] = idx
for index, sentence in enumerate(sentences_val):
sentence=json.loads(sentence)
if index%100000==0:
print("#sentences processed so far ", index)
# if index != 50607:
# continue
for mention in sentence['mentions']:
example_dict={}
# --------------------------------------------------------------
# -----Generating context vectors for entities mentions ------------
# --------------------------------------------------------------
start_index=mention['start']
end_index = mention['end']
if start_index==end_index:
continue
entity='_'.join(sentence['tokens'][start_index:end_index])
total_context_words_no_punctuations = []
left_context_words_no_punctuations = []
right_context_words_no_punctuations = []
#---------------------------------------------------------------------------
# If we have to include entity also in the left and right context then uncomment
# following two lines and comment above two lines
# ---------------------------------------------------------------------------
#left_context_words.extend(sentence['tokens'][:start_index])
#right_context_words.extend(sentence['tokens'][end_index:])
# left_context_words.extend(sentence['tokens'][:end_index])
# right_context_words.extend(sentence['tokens'][start_index:])
# total_context_words.extend(sentence['tokens'][:start_index])
# total_context_words.extend(sentence['tokens'][end_index:])
total_ctx_embed = np.zeros((my_options.feature_dim))
left_ctx_embed = np.zeros((my_options.feature_dim))
right_ctx_embed = np.zeros((my_options.feature_dim))
for idx, word in enumerate(sentence['tokens']):
new_word = preprocess_string(word, CUSTOM_FILTERS)
if new_word == []:
continue
else:
for temp_word in new_word:
# if temp_word in token_list:
# index = token_list.index(temp_word)
# temp_word_embed = token_feature_vectors[index]
# else:
# temp_word_embed = token_feature_vectors[-1]
temp_word_embed = token_feature_vectors[token_dict.get(temp_word, -1)]
if idx < start_index:
left_context_words_no_punctuations.append(temp_word)
total_context_words_no_punctuations.append(temp_word)
left_ctx_embed = left_ctx_embed + temp_word_embed
total_ctx_embed = total_ctx_embed + temp_word_embed
elif idx >= start_index and idx < end_index:
left_context_words_no_punctuations.append(temp_word)
right_context_words_no_punctuations.append(temp_word)
left_ctx_embed = left_ctx_embed + temp_word_embed
right_ctx_embed = right_ctx_embed + temp_word_embed
else:
right_context_words_no_punctuations.append(temp_word)
total_context_words_no_punctuations.append(temp_word)
right_ctx_embed = right_ctx_embed + temp_word_embed
total_ctx_embed = total_ctx_embed + temp_word_embed
if len(left_context_words_no_punctuations) > 0:
left_ctx_embed = left_ctx_embed/ len(left_context_words_no_punctuations)
if len(right_context_words_no_punctuations)>0:
right_ctx_embed = left_ctx_embed / len(right_context_words_no_punctuations)
if len(total_context_words_no_punctuations) >0:
total_ctx_embed = left_ctx_embed / len(total_context_words_no_punctuations)
# total_ctx_embed = generate_context_embedding(total_context_words_no_punctuations, token_feature_vectors, token_list, method="average")
# left_ctx_embed = generate_context_embedding(left_context_words_no_punctuations, token_feature_vectors, token_list, method="average")
# right_ctx_embed = generate_context_embedding(right_context_words_no_punctuations, token_feature_vectors, token_list, method="average")
left_right_ctx_embed = | np.concatenate((left_ctx_embed, right_ctx_embed)) | numpy.concatenate |
# Released under The MIT License (MIT)
# http://opensource.org/licenses/MIT
# Copyright (c) 2013-2015 SCoT Development Team
import unittest
import numpy as np
from numpy.testing.utils import assert_allclose
from scot.datatools import dot_special
from scot.csp import csp
try:
from generate_testdata import generate_covsig
except ImportError:
from .generate_testdata import generate_covsig
epsilon = 1e-10
class TestFunctionality(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testComponentSeparation(self):
A = generate_covsig([[10,5,2],[5,10,2],[2,2,10]], 500)
B = generate_covsig([[10,2,2],[2,10,5],[2,5,10]], 500)
X = np.concatenate([A[np.newaxis], B[np.newaxis]], axis=0)
W, V = csp(X, [1, 2])
C1a = np.cov(np.dot(W.T, X[0, :, :]))
C2a = np.cov(np.dot(W.T, X[1, :, :]))
Y = np.concatenate([B[np.newaxis], A[np.newaxis]], axis=0)
W, V = csp(Y, [1, 2])
C1b = np.cov(np.dot(W.T, Y[0, :, :]))
C2b = np.cov(np.dot(W.T, Y[1, :, :]))
# check symmetric case
assert_allclose(C1a.diagonal(), C2a.diagonal()[::-1])
assert_allclose(C1b.diagonal(), C2b.diagonal()[::-1])
# swapping class labels (or in this case, trials) should not change the result
assert_allclose(C1a, C1b, rtol=1e-9, atol=1e-9)
assert_allclose(C2a, C2b, rtol=1e-9, atol=1e-9)
# variance of first component should be greatest for class 1
self.assertGreater(C1a[0, 0], C2a[0, 0])
# variance of last component should be greatest for class 1
self.assertLess(C1a[2, 2], C2a[2, 2])
# variance of central component should be equal for both classes
assert_allclose(C1a[1, 1], C2a[1, 1])
class TestDefaults(unittest.TestCase):
def setUp(self):
self.X = np.random.randn(10,5,100)
self.C = [0,0,0,0,0,1,1,1,1,1]
self.Y = self.X.copy()
self.D = list(self.C)
self.T, self.M, self.N = self.X.shape
self.W, self.V = csp(self.X, self.C)
def tearDown(self):
pass
def testInvalidInput(self):
# pass only 2d data
self.assertRaises(AttributeError, csp, np.random.randn(3,10), [1,1,0,0] )
# number of class labels does not match number of trials
self.assertRaises(AttributeError, csp, np.random.randn(5,3,10), [1,1,0,0] )
def testInputSafety(self):
# function must not change input variables
self.assertTrue((self.X == self.Y).all())
self.assertEqual(self.C, self.D)
def testOutputSizes(self):
# output matrices must have the correct size
self.assertTrue(self.W.shape == (self.M, self.M))
self.assertTrue(self.V.shape == (self.M, self.M))
def testInverse(self):
# V should be the inverse of W
I = np.abs(self.V.dot(self.W))
self.assertTrue(np.abs(np.mean(I.diagonal())) - 1 < epsilon)
self.assertTrue(np.abs(np.sum(I) - I.trace()) < epsilon)
class TestDimensionalityReduction(unittest.TestCase):
def setUp(self):
self.n_comps = 5
self.X = np.random.rand(10,6,100)
self.C = np.asarray([0,0,0,0,0,1,1,1,1,1])
self.X[self.C == 0, 0, :] *= 10
self.X[self.C == 0, 2, :] *= 5
self.X[self.C == 1, 1, :] *= 10
self.X[self.C == 1, 3, :] *= 2
self.Y = self.X.copy()
self.D = list(self.C)
self.T, self.M, self.N = self.X.shape
self.W, self.V = csp(self.X, self.C, numcomp=self.n_comps)
def tearDown(self):
pass
def testOutputSizes(self):
# output matrices must have the correct size
self.assertTrue(self.W.shape == (self.M, 5))
self.assertTrue(self.V.shape == (5, self.M))
def testPseudoInverse(self):
# V should be the pseudo inverse of W
I = self.V.dot(self.W)
assert_allclose(I, np.eye(self.n_comps), rtol=1e-9, atol=1e-9)
def testOutput(self):
x = dot_special(self.W.T, self.X)
v1 = sum(np.var(x[np.array(self.C)==0], axis=2))
v2 = sum(np.var(x[ | np.array(self.C) | numpy.array |
import h5py
import numpy as np
from scipy.io import loadmat
from operator import itemgetter
import math
import scipy as sp
import cv2
import matplotlib.pyplot as plt
import os, sys
import time
import multiprocessing
import random
# Generate Observation Map
def func(theta, m, I, imax, L, w, N, anglemask):
print('*',end='')
rotmat = np.array([[np.cos(theta), -np.sin(theta)],[np.sin(theta), np.cos(theta)]])
p = 0.5*(L[:,0]+1)*(w-1) #x 0:w-1
q = 0.5*(L[:,1]+1)*(w-1) #y 0:w-1
x = [p-0.5*(w-1), q-0.5*(w-1)]
x_ = np.dot(rotmat, x)
p = x_[0,:]+0.5*(w-1);
q = x_[1,:]+0.5*(w-1);
p = np.int32(p)
q = np.int32(q)
light_idx = q*w + p # 0:w*w-1
x = [N[:,0], N[:,1]]
x_ = np.dot(rotmat, x)
pn = x_[0,:];
qn = x_[1,:];
normal = [np.transpose(pn), np.transpose(qn), N[:,2]]
normal = np.transpose(normal)
temp = I*anglemask/np.transpose(imax)
embed = np.zeros((m, w*w), np.float32)
embed[:, light_idx] = temp
embed = np.reshape(embed, (m, w, w))
mask = np.zeros((m, w*w), np.bool_)
mask[:, light_idx] = anglemask
mask = np.reshape(mask, (m, w, w))
return embed, mask, normal, rotmat
def wrapper(args):
return func(*args)
# for multi core cpu
def light_embedding_2d_rot_invariant_multi(I, imax, L, w, N, div, isRandomThresh):
m = I.shape[0]
rows = w
cols = w
embed_rot = []
normal_rot = []
mask_rot = []
rot = []
anglemask = np.zeros((I.shape[0],I.shape[1]),np.float32)
for k in range(I.shape[0]): # numpixel
angle1 = 180*np.arccos(L[:,2])/np.pi
if isRandomThresh == True:
tgt = np.where(angle1<random.randint(20,90))
tgtrandom = np.random.permutation(tgt[0])
tgt = tgtrandom[:random.randint(50,np.min([1000,L.shape[0]]))]
else:
tgt = np.where(angle1<90)
anglemask[k,tgt] = 1
n = multiprocessing.cpu_count()
p = multiprocessing.Pool(n)
params = [(np.pi*(i*360.0/div)/180, m, I, imax, L, w, N, anglemask) for i in range(np.int32(div))]
result = p.map(wrapper, params)
p.close()
embed_list = []
mask_list = []
nml_list = []
rot_list = []
for i in range(div):
embed_list.append(result[i][0].copy())
mask_list.append(result[i][1].copy())
nml_list.append(result[i][2].copy())
rot_list.append(result[i][3].copy())
embed_list = np.array(embed_list)
embed_list = | np.transpose(embed_list, (1,0,2,3)) | numpy.transpose |
from src.modelling.LoNGAE.train_lp_with_feats import run
from .KCG import get_documents_kcg
import paths
import numpy as np
from src.utils.datasets import name_of_dataset
from tensorflow import keras
from src.modelling.LoNGAE.models.ae import autoencoder_with_node_features
class GAE:
def __init__(self, dataset_path, big_graph):
self.dataset_path = dataset_path
self.big_graph = big_graph
self.nodes, self._adjacency, self.doc_to_node_mapping, self.documents_labels = \
get_documents_kcg(self.dataset_path, big_graph)
self._nodes_features = | np.array([node['feature'] for node in self.nodes]) | numpy.array |
# Common libs
import numpy as np
import matplotlib.pyplot as plt
from os.path import isfile, join, exists
from os import listdir, remove, getcwd
from sklearn.metrics import confusion_matrix
# My libs
from utils.config import Config
from utils.metrics import IoU_from_confusions, smooth_metrics
from utils.ply import read_ply
# Datasets
from datasets.Scannet import ScannetDataset
# ----------------------------------------------------------------------------------------------------------------------
#
# Utility functions
# \***********************/
#
def running_mean(signal, n, axis=0):
signal = np.array(signal)
if signal.ndim == 1:
signal_sum = np.convolve(signal, np.ones((2*n+1,)), mode='same')
signal_num = np.convolve(signal*0+1, np.ones((2*n+1,)), mode='same')
return signal_sum/signal_num
elif signal.ndim == 2:
smoothed = np.empty(signal.shape)
if axis == 0:
for i, sig in enumerate(signal):
sig_sum = np.convolve(sig, np.ones((2*n+1,)), mode='same')
sig_num = np.convolve(sig*0+1, np.ones((2*n+1,)), mode='same')
smoothed[i, :] = sig_sum / sig_num
elif axis == 1:
for i, sig in enumerate(signal.T):
sig_sum = np.convolve(sig, np.ones((2*n+1,)), mode='same')
sig_num = np.convolve(sig*0+1, np.ones((2*n+1,)), mode='same')
smoothed[:, i] = sig_sum / sig_num
else:
print('wrong axis')
return smoothed
else:
print('wrong dimensions')
return None
def IoU_multi_metrics(all_IoUs, smooth_n):
# Get mean IoU for consecutive epochs to directly get a mean
all_mIoUs = [np.hstack([np.mean(obj_IoUs, axis=1) for obj_IoUs in epoch_IoUs]) for epoch_IoUs in all_IoUs]
smoothed_mIoUs = []
for epoch in range(len(all_mIoUs)):
i0 = max(epoch - smooth_n, 0)
i1 = min(epoch + smooth_n + 1, len(all_mIoUs))
smoothed_mIoUs += [np.mean(np.hstack(all_mIoUs[i0:i1]))]
# Get mean for each class
all_objs_mIoUs = [[np.mean(obj_IoUs, axis=1) for obj_IoUs in epoch_IoUs] for epoch_IoUs in all_IoUs]
smoothed_obj_mIoUs = []
for epoch in range(len(all_objs_mIoUs)):
i0 = max(epoch - smooth_n, 0)
i1 = min(epoch + smooth_n + 1, len(all_objs_mIoUs))
epoch_obj_mIoUs = []
for obj in range(len(all_objs_mIoUs[0])):
epoch_obj_mIoUs += [np.mean(np.hstack([objs_mIoUs[obj] for objs_mIoUs in all_objs_mIoUs[i0:i1]]))]
smoothed_obj_mIoUs += [epoch_obj_mIoUs]
return np.array(smoothed_mIoUs), np.array(smoothed_obj_mIoUs)
def IoU_class_metrics(all_IoUs, smooth_n):
# Get mean IoU per class for consecutive epochs to directly get a mean without further smoothing
smoothed_IoUs = []
for epoch in range(len(all_IoUs)):
i0 = max(epoch - smooth_n, 0)
i1 = min(epoch + smooth_n + 1, len(all_IoUs))
smoothed_IoUs += [np.mean(np.vstack(all_IoUs[i0:i1]), axis=0)]
smoothed_IoUs = np.vstack(smoothed_IoUs)
smoothed_mIoUs = np.mean(smoothed_IoUs, axis=1)
return smoothed_IoUs, smoothed_mIoUs
def load_confusions(filename, n_class):
with open(filename, 'r') as f:
lines = f.readlines()
confs = np.zeros((len(lines), n_class, n_class))
for i, line in enumerate(lines):
C = np.array([int(value) for value in line.split()])
confs[i, :, :] = C.reshape((n_class, n_class))
return confs
def load_training_results(path):
filename = join(path, 'training.txt')
with open(filename, 'r') as f:
lines = f.readlines()
steps = []
#L_out = []
L_reg = []
L_p = []
#acc = []
t = []
memory = []
L_out0=[]
L_out1=[]
L_out2=[]
L_out3=[]
acc0=[]
acc1=[]
acc2=[]
acc3=[]
for line in lines[1:]:
line_info = line.split()
if (len(line) > 0):
steps += [int(line_info[0])]
L_out0 += [float(line_info[1])]
L_out1 += [float(line_info[2])]
L_out2 += [float(line_info[3])]
L_out3 += [float(line_info[4])]
L_reg += [float(line_info[5])]
L_p += [float(line_info[6])]
acc0 += [float(line_info[7])]
acc1 += [float(line_info[8])]
acc2 += [float(line_info[9])]
acc3 += [float(line_info[10])]
t += [float(line_info[11])]
memory += [float(line_info[12])]
else:
break
return steps, L_out0, L_out1, L_out2, L_out3, L_reg, L_p, acc0, acc1, acc2, acc3, t, memory
def load_single_IoU(filename, n_parts):
with open(filename, 'r') as f:
lines = f.readlines()
# Load all IoUs
all_IoUs = []
for i, line in enumerate(lines):
all_IoUs += [np.reshape([float(IoU) for IoU in line.split()], [-1, n_parts])]
return all_IoUs
def load_snap_clouds(path, dataset, only_last=False):
cloud_folders = np.array([join(path, f) for f in listdir(path) if f.startswith('val_preds')])
cloud_epochs = np.array([int(f.split('_')[-1]) for f in cloud_folders])
epoch_order = np.argsort(cloud_epochs)
cloud_epochs = cloud_epochs[epoch_order]
cloud_folders = cloud_folders[epoch_order]
Confs = np.zeros((len(cloud_epochs), dataset.num_classes, dataset.num_classes), dtype=np.int32)
for c_i, cloud_folder in enumerate(cloud_folders):
if only_last and c_i < len(cloud_epochs) - 1:
continue
# Load confusion if previously saved
conf_file = join(cloud_folder, 'conf.txt')
if isfile(conf_file):
Confs[c_i] += np.loadtxt(conf_file, dtype=np.int32)
else:
for f in listdir(cloud_folder):
if f.endswith('.ply') and not f.endswith('sub.ply'):
data = read_ply(join(cloud_folder, f))
labels = data['class']
preds = data['preds']
Confs[c_i] += confusion_matrix(labels, preds, dataset.label_values).astype(np.int32)
np.savetxt(conf_file, Confs[c_i], '%12d')
# Erase ply to save disk memory
if c_i < len(cloud_folders) - 1:
for f in listdir(cloud_folder):
if f.endswith('.ply'):
remove(join(cloud_folder, f))
# Remove ignored labels from confusions
for l_ind, label_value in reversed(list(enumerate(dataset.label_values))):
if label_value in dataset.ignored_labels:
Confs = np.delete(Confs, l_ind, axis=1)
Confs = np.delete(Confs, l_ind, axis=2)
return cloud_epochs, IoU_from_confusions(Confs)
def load_multi_IoU(filename, n_parts):
with open(filename, 'r') as f:
lines = f.readlines()
# Load all IoUs
all_IoUs = []
for i, line in enumerate(lines):
obj_IoUs = [[float(IoU) for IoU in s.split()] for s in line.split('/')]
obj_IoUs = [np.reshape(IoUs, [-1, n_parts[obj]]) for obj, IoUs in enumerate(obj_IoUs)]
all_IoUs += [obj_IoUs]
return all_IoUs
def compare_trainings(list_of_paths, list_of_labels=None):
# Parameters
# **********
steps_per_epoch = 0
smooth_epochs = 1
if list_of_labels is None:
list_of_labels = [str(i) for i in range(len(list_of_paths))]
# Read Training Logs
# ******************
all_epochs = []
all_loss0 = []
all_loss1 = []
all_loss2 = []
all_loss3 = []
all_lr = []
all_times = []
all_acc0 = []
all_acc1 = []
all_acc2 = []
all_acc3 = []
for path in list_of_paths:
# Load parameters
config = Config()
config.load(path)
# Compute number of steps per epoch
if config.epoch_steps is None:
if config.dataset == 'ModelNet40':
steps_per_epoch = np.ceil(9843 / int(config.batch_num))
else:
raise ValueError('Unsupported dataset')
else:
steps_per_epoch = config.epoch_steps
smooth_n = int(steps_per_epoch * smooth_epochs)
# Load results
steps, L_out0,L_out1,L_out2,L_out3,L_reg,L_p,acc0,acc1,acc2,acc3,t, memory = load_training_results(path)
all_epochs += [np.array(steps) / steps_per_epoch]
all_loss0 += [running_mean(L_out0, smooth_n)]
all_loss1 += [running_mean(L_out1, smooth_n)]
all_loss2 += [running_mean(L_out2, smooth_n)]
all_loss3 += [running_mean(L_out3, smooth_n)]
all_acc0 += [running_mean(acc0, smooth_n)]
all_acc1 += [running_mean(acc1, smooth_n)]
all_acc2 += [running_mean(acc2, smooth_n)]
all_acc3 += [running_mean(acc3, smooth_n)]
all_times += [t]
# Learning rate
lr_decay_v = np.array([lr_d for ep, lr_d in config.lr_decays.items()])
lr_decay_e = np.array([ep for ep, lr_d in config.lr_decays.items()])
max_e = max(np.max(all_epochs[-1]) + 1, np.max(lr_decay_e) + 1)
lr_decays = np.ones(int(np.ceil(max_e)), dtype=np.float32)
lr_decays[0] = float(config.learning_rate)
lr_decays[lr_decay_e] = lr_decay_v
lr = np.cumprod(lr_decays)
all_lr += [lr[np.floor(all_epochs[-1]).astype(np.int32)]]
# Plots learning rate
# *******************
if False:
# Figure
fig = plt.figure('lr')
for i, label in enumerate(list_of_labels):
plt.plot(all_epochs[i], all_lr[i], linewidth=1, label=label)
# Set names for axes
plt.xlabel('epochs')
plt.ylabel('lr')
plt.yscale('log')
# Display legends and title
plt.legend(loc=1)
# Customize the graph
ax = fig.gca()
ax.grid(linestyle='-.', which='both')
# ax.set_yticks(np.arange(0.8, 1.02, 0.02))
# Plots loss
# **********
'''
# Figure
fig = plt.figure('loss')
for i, label in enumerate(list_of_labels):
plt.plot(all_epochs[i], all_loss[i], linewidth=1, label=label)
# Set names for axes
plt.xlabel('epochs')
plt.ylabel('loss')
plt.yscale('log')
# Display legends and title
plt.legend(loc=1)
plt.title('Losses compare')
# Customize the graph
ax = fig.gca()
ax.grid(linestyle='-.', which='both')
# ax.set_yticks(np.arange(0.8, 1.02, 0.02))
'''
# Plots acc
# **********
# Figure
fig = plt.figure('acc')
for i, label in enumerate(list_of_labels):
plt.plot(all_epochs[i], all_acc0[i], linewidth=1, label=label)
for i, label in enumerate(list_of_labels):
plt.plot(all_epochs[i], all_acc1[i], linewidth=1, label=label)
for i, label in enumerate(list_of_labels):
plt.plot(all_epochs[i], all_acc2[i], linewidth=1, label=label)
for i, label in enumerate(list_of_labels):
plt.plot(all_epochs[i], all_acc3[i], linewidth=1, label=label)
# Set names for axes
plt.xlabel('epochs')
plt.ylabel('acc')
plt.yscale('log')
# Display legends and title
plt.legend(loc=0)
plt.title('Acc compare')
# Customize the graph
ax = fig.gca()
ax.grid(linestyle='-.', which='both')
# ax.set_yticks(np.arange(0.8, 1.02, 0.02))
# Plot Times
# **********
# Figure
fig = plt.figure('time')
for i, label in enumerate(list_of_labels):
plt.plot(all_epochs[i], np.array(all_times[i]) / 3600, linewidth=1, label=label)
# Set names for axes
plt.xlabel('epochs')
plt.ylabel('time')
# plt.yscale('log')
# Display legends and title
plt.legend(loc=0)
# Customize the graph
ax = fig.gca()
ax.grid(linestyle='-.', which='both')
# ax.set_yticks(np.arange(0.8, 1.02, 0.02))
# Show all
plt.show()
def compare_convergences_multisegment(list_of_paths, list_of_labels=None):
# Parameters
# **********
steps_per_epoch = 0
smooth_n = 10
if list_of_labels is None:
list_of_labels = [str(i) for i in range(len(list_of_paths))]
# Read Logs
# *********
all_pred_epochs = []
all_instances_mIoUs = []
all_objs_mIoUs = []
all_objs_IoUs = []
all_parts = []
obj_list = ['Air', 'Bag', 'Cap', 'Car', 'Cha', 'Ear', 'Gui', 'Kni',
'Lam', 'Lap', 'Mot', 'Mug', 'Pis', 'Roc', 'Ska', 'Tab']
print('Objs | Inst | Air Bag Cap Car Cha Ear Gui Kni Lam Lap Mot Mug Pis Roc Ska Tab')
print('-----|------|--------------------------------------------------------------------------------')
for path in list_of_paths:
# Load parameters
config = Config()
config.load(path)
# Get the number of classes
n_parts = [4, 2, 2, 4, 4, 3, 3, 2, 4, 2, 6, 2, 3, 3, 3, 3]
part = config.dataset.split('_')[-1]
# Get validation confusions
file = join(path, 'val_IoUs.txt')
val_IoUs = load_multi_IoU(file, n_parts)
file = join(path, 'vote_IoUs.txt')
vote_IoUs = load_multi_IoU(file, n_parts)
#print(len(val_IoUs[0]))
#print(val_IoUs[0][0].shape)
# Get mean IoU
#instances_mIoUs, objs_mIoUs = IoU_multi_metrics(val_IoUs, smooth_n)
# Get mean IoU
instances_mIoUs, objs_mIoUs = IoU_multi_metrics(vote_IoUs, smooth_n)
# Aggregate results
all_pred_epochs += [np.array([i for i in range(len(val_IoUs))])]
all_instances_mIoUs += [instances_mIoUs]
all_objs_IoUs += [objs_mIoUs]
all_objs_mIoUs += [np.mean(objs_mIoUs, axis=1)]
if part == 'multi':
s = '{:4.1f} | {:4.1f} | '.format(100 * np.mean(objs_mIoUs[-1]), 100 * instances_mIoUs[-1])
for obj_mIoU in objs_mIoUs[-1]:
s += '{:4.1f} '.format(100 * obj_mIoU)
print(s)
else:
s = ' -- | -- | '
for obj_name in obj_list:
if part.startswith(obj_name):
s += '{:4.1f} '.format(100 * instances_mIoUs[-1])
else:
s += ' -- '.format(100 * instances_mIoUs[-1])
print(s)
all_parts += [part]
# Plots
# *****
if 'multi' in all_parts:
# Figure
fig = plt.figure('Instances mIoU')
for i, label in enumerate(list_of_labels):
if all_parts[i] == 'multi':
plt.plot(all_pred_epochs[i], all_instances_mIoUs[i], linewidth=1, label=label)
plt.xlabel('epochs')
plt.ylabel('IoU')
# Set limits for y axis
#plt.ylim(0.55, 0.95)
# Display legends and title
plt.legend(loc=4)
# Customize the graph
ax = fig.gca()
ax.grid(linestyle='-.', which='both')
#ax.set_yticks(np.arange(0.8, 1.02, 0.02))
# Figure
fig = plt.figure('mean of categories mIoU')
for i, label in enumerate(list_of_labels):
if all_parts[i] == 'multi':
plt.plot(all_pred_epochs[i], all_objs_mIoUs[i], linewidth=1, label=label)
plt.xlabel('epochs')
plt.ylabel('IoU')
# Set limits for y axis
#plt.ylim(0.8, 1)
# Display legends and title
plt.legend(loc=4)
# Customize the graph
ax = fig.gca()
ax.grid(linestyle='-.', which='both')
#ax.set_yticks(np.arange(0.8, 1.02, 0.02))
for obj_i, obj_name in enumerate(obj_list):
if np.any([part.startswith(obj_name) for part in all_parts]):
# Figure
fig = plt.figure(obj_name + ' mIoU')
for i, label in enumerate(list_of_labels):
if all_parts[i] == 'multi':
plt.plot(all_pred_epochs[i], all_objs_IoUs[i][:, obj_i], linewidth=1, label=label)
elif all_parts[i].startswith(obj_name):
plt.plot(all_pred_epochs[i], all_objs_mIoUs[i], linewidth=1, label=label)
plt.xlabel('epochs')
plt.ylabel('IoU')
# Set limits for y axis
#plt.ylim(0.8, 1)
# Display legends and title
plt.legend(loc=4)
# Customize the graph
ax = fig.gca()
ax.grid(linestyle='-.', which='both')
#ax.set_yticks(np.arange(0.8, 1.02, 0.02))
# Show all
plt.show()
def compare_convergences_segment(dataset, list_of_paths, list_of_names=None):
# Parameters
# **********
smooth_n = 10
if list_of_names is None:
list_of_names = [str(i) for i in range(len(list_of_paths))]
# Read Logs
# *********
all_pred_epochs = []
all_mIoUs = []
all_class_IoUs = []
all_snap_epochs = []
all_snap_IoUs = []
# Load parameters
config = Config()
config.load(list_of_paths[0])
class_list = [dataset.label_to_names[label] for label in dataset.label_values
if label not in dataset.ignored_labels]
s = '{:^10}|'.format('mean')
for c in class_list:
s += '{:^10}'.format(c)
print(s)
print(10*'-' + '|' + 10*config.num_classes*'-')
for path in list_of_paths:
# Get validation IoUs
file = join(path, 'val_IoUs.txt')
val_IoUs = load_single_IoU(file, config.num_classes)
# Get mean IoU
class_IoUs, mIoUs = IoU_class_metrics(val_IoUs, smooth_n)
# Aggregate results
all_pred_epochs += [np.array([i for i in range(len(val_IoUs))])]
all_mIoUs += [mIoUs]
all_class_IoUs += [class_IoUs]
s = '{:^10.1f}|'.format(100*mIoUs[-1])
for IoU in class_IoUs[-1]:
s += '{:^10.1f}'.format(100*IoU)
print(s)
# Get optional full validation on clouds
snap_epochs, snap_IoUs = load_snap_clouds(path, dataset)
all_snap_epochs += [snap_epochs]
all_snap_IoUs += [snap_IoUs]
print(10*'-' + '|' + 10*config.num_classes*'-')
for snap_IoUs in all_snap_IoUs:
if len(snap_IoUs) > 0:
s = '{:^10.1f}|'.format(100* | np.mean(snap_IoUs[-1]) | numpy.mean |
import numpy as np
import matplotlib.pylab as plt
import sys
def run():
visualizeTarget = sys.argv[1]
print(visualizeTarget)
if(visualizeTarget=='step'):
x=np.arange(-5.0,5.0,0.1)
y=step(x)
plt.plot(x,y)
plt.ylim(-0.1,1.1)
plt.show()
elif(visualizeTarget=='sigmoid'):
x=np.arange(-5.0,5.0,0.1)
y=sigmoid(x)
plt.plot(x,y)
plt.ylim(-0.1,1.1)
plt.show()
elif(visualizeTarget=='relu'):
x=np.arange(-5.0,5.0,0.1)
y=relu(x)
plt.plot(x,y)
# plt.ylim(-0.1,1.1)
plt.show()
elif(visualizeTarget=='all'):
x=np.arange(-5.0,5.0,0.1)
y=step(x)
plt.plot(x,y)
# plt.ylim(-0.1,1.1)
x=np.arange(-5.0,5.0,0.1)
y=sigmoid(x)
plt.plot(x,y)
x=np.arange(-5.0,5.0,0.1)
y=relu(x)
plt.plot(x,y)
# plt.ylim(-0.1,3.0)
plt.show()
# for x in sys.argv:
# print(x)
class variable():
def __init__(self, value):
self.data = value
pass
def read(self):
return self.data
def test():
v = variable(424)
print(v.read() == 424)
a = np.array([2,3,1,4,2])
print(a)
print(sigmoid(a))
def TestSimpleANDGate():
print('simple AND gate test')
print(SimpleANDGate(0,0))
print(SimpleANDGate(0,1))
print(SimpleANDGate(1,0))
print(SimpleANDGate(1,1))
def SimpleANDGate(x1,x2):
w1,w2,theta = 0.5,0.5,0.7
tmp = x1*w1+x2*w2
if(tmp<=theta): return 0
elif(tmp>theta): return 1
def TestANDGate():
print('and gate test')
print(ANDGate(0,0))
print(ANDGate(0,1))
print(ANDGate(1,0))
print(ANDGate(1,1))
def ANDGate(x1,x2):
x = np.array([x1,x2])
w=np.array([0.5,0.5])
b=-0.7
tmp=np.sum(w*x)+b
if(tmp<=0): return 0
else: return 1
def TestNANDGate():
print('nand gate test')
print(NANDGate(0,0))
print(NANDGate(0,1))
print(NANDGate(1,0))
print(NANDGate(1,1))
def NANDGate(x1,x2):
x = np.array([x1,x2])
w=np.array([-0.5,-0.5])
b=0.7
tmp=np.sum(w*x)+b
if(tmp<=0): return 0
else: return 1
def TestORGate():
print('OR gate test')
print(ORGate(0,0))
print(ORGate(0,1))
print(ORGate(1,0))
print(ORGate(1,1))
def ORGate(x1,x2):
x = np.array([x1,x2])
w=np.array([0.5,0.5])
b=-0.2
tmp=np.sum(w*x)+b
if(tmp<=0): return 0
else: return 1
def XORGate(x1,x2):
a = ORGate(x1,x2)
b = NANDGate(x1,x2)
return ANDGate(a,b)
def step(x):
y=x>0
return y.astype(np.int)
def simple_step(value):
if(value <= 0): return 0
else: return 1
def sigmoid(value):
return 1/(1+np.exp(-value))
def relu(x):
return np.maximum(0,x)
class MultiplyLayer:
def __init__(self):
self.x = None
self.y = None
def forward(self, x, y):
self.x=x
self.y=y
out=x*y
return out
def backward(self, dout):
dx=dout*self.y
dy=dout*self.x
return dx,dy
def matrixTest1():
print('mat')
b = np.array([[1,2],[3,4],[5,6]])
print(b)
print(np.ndim(b)) # 배열의 차원 수
print(b.shape) # 배열의 형상 (모든 차원의 각 길이)
def matrixMultiplyTest():
print('multiply')
a = | np.array([[1,2],[3,4]]) | numpy.array |
import ops.utils
import networkx as nx
import pandas as pd
import numpy as np
import scipy.spatial.kdtree
from collections import Counter
from scipy.spatial.distance import cdist
from scipy.interpolate import UnivariateSpline
from statsmodels.stats.multitest import multipletests
def format_stats_wide(df_stats):
index = ['gene_symbol']
columns = ['stat_name', 'stimulant']
values = ['statistic', 'pval', 'pval_FDR_10']
stats = (df_stats
.pivot_table(index=index, columns=columns, values=values)
.pipe(ops.utils.flatten_cols))
counts = (df_stats
.pivot_table(index=index, columns='stimulant', values='count')
.rename(columns=lambda x: 'cells_' + x))
return pd.concat([stats, counts], axis=1)
def distribution_difference(df):
col = 'dapi_gfp_corr_early'
y_neg = (df
.query('gene_symbol == "non-targeting"')
[col]
)
return df.groupby('gene_symbol').apply(lambda x:
scipy.stats.wasserstein_distance(x[col], y_neg))
def add_est_timestamps(df_all):
s_per_frame = 24 * 60
sites_per_frame = 2 * 364
s_per_site = s_per_frame / sites_per_frame
starting_time = 3 * 60
cols = ['frame', 'well', 'site']
df_ws = df_all[cols].drop_duplicates().sort_values(cols)
est_timestamps = [(starting_time + i*s_per_site) / 3600
for i in range(len(df_ws))]
df_ws['timestamp'] = est_timestamps
return df_all.join(df_ws.set_index(cols), on=cols)
def add_dapi_diff(df_all):
index = ['well', 'site', 'cell_ph']
dapi_diff = (df_all
.pivot_table(index=index, columns='frame',
values='dapi_max')
.pipe(lambda x: x/x.mean())
.pipe(lambda x: x.max(axis=1) - x.min(axis=1))
.rename('dapi_diff')
)
return df_all.join(dapi_diff, on=index)
def add_spline_diff(df, s=25):
T_neg, Y_neg = (df
.query('gene_symbol == "non-targeting"')
.groupby('timestamp')
['dapi_gfp_corr'].mean()
.reset_index().values.T
)
ix = np.argsort(T_neg)
spl = UnivariateSpline(T_neg[ix], Y_neg[ix], s=s)
return (df
.assign(splined=lambda x: spl(df['timestamp']))
.assign(spline_diff=lambda x: x.eval('dapi_gfp_corr - splined'))
)
def get_stats(df, col='spline_diff'):
df_diff = (df
.groupby(['gene_symbol', 'cell'])
[col].mean()
.sort_values(ascending=False)
.reset_index())
negative_vals = (df_diff
.query('gene_symbol == "non-targeting"')
[col]
)
test = lambda x: scipy.stats.ttest_ind(x, negative_vals).pvalue
stats = (df_diff.groupby('gene_symbol')
[col]
.pipe(ops.utils.groupby_reduce_concat, 'mean', 'count',
pval=lambda x: x.apply(test))
.assign(pval_FDR_10=lambda x:
multipletests(x['pval'], 0.1)[1]))
return stats
# track nuclei nearest neighbor
def initialize_graph(df):
arr_df = [x for _, x in df.groupby('frame')]
nodes = df[['frame', 'label']].values
nodes = [tuple(x) for x in nodes]
G = nx.DiGraph()
G.add_nodes_from(nodes)
edges = []
for df1, df2 in zip(arr_df, arr_df[1:]):
edges = get_edges(df1, df2)
G.add_weighted_edges_from(edges)
return G
def get_edges(df1, df2):
neighboring_points = 3
get_label = lambda x: tuple(int(y) for y in x[[2, 3]])
x1 = df1[['i', 'j', 'frame', 'label']].values
x2 = df2[['i', 'j', 'frame', 'label']].values
kdt = scipy.spatial.kdtree.KDTree(df1[['i', 'j']])
points = df2[['i', 'j']]
result = kdt.query(points, neighboring_points)
edges = []
for i2, (ds, ns) in enumerate(zip(*result)):
end_node = get_label(x2[i2])
for d, i1 in zip(ds, ns):
start_node = get_label(x1[i1])
w = d
edges.append((start_node, end_node, w))
return edges
def displacement(x):
d = np.sqrt(np.diff(x['x'])**2 + np.diff(x['y'])**2)
return d
def analyze_graph(G, cutoff=100):
"""Trace a path forward from each nucleus in the starting frame. Only keep
the paths that reach the final frame.
"""
start_nodes = [n for n in G.nodes if n[0] == 0]
max_frame = max([frame for frame, _ in G.nodes])
cost, path = nx.multi_source_dijkstra(G, start_nodes, cutoff=cutoff)
cost = {k:v for k,v in cost.items() if k[0] == max_frame}
path = {k:v for k,v in path.items() if k[0] == max_frame}
return cost, path
def filter_paths(cost, path, threshold=35):
"""Remove intersecting paths.
returns list of one [(frame, label)] per trajectory
"""
# remove intersecting paths (node in more than one path)
node_count = Counter(sum(path.values(), []))
bad = set(k for k,v in node_count.items() if v > 1)
print('bad', len(bad), len(node_count))
# remove paths with cost over threshold
too_costly = [k for k,v in cost.items() if v > threshold]
bad = bad | set(too_costly)
relabel = [v for v in path.values() if not (set(v) & bad)]
assert(len(relabel) > 0)
return relabel
def relabel_nuclei(nuclei, relabel):
nuclei_ = nuclei.copy()
max_label = nuclei.max() + 1
for i, nodes in enumerate(zip(*relabel)):
labels = [n[1] for n in nodes]
table = np.zeros(max_label).astype(int)
table[labels] = range(len(labels))
nuclei_[i] = table[nuclei_[i]]
return nuclei_
# track nuclei trackmate
def call_TrackMate_centroids(input_path, output_path='trackmate_output.csv', fiji_path=None, threads=1, tracker_settings=dict()):
'''warnings: - `threads` is probably not actually setting the max threads for fiji.
- to allow multiple instances of fiji to run concurrently (e.g., launched from snakemake pipeline), likely have
to set `allowMultiple` parameter in Fiji.app/Contents/Info.plist to true.
`CUTOFF_PERCENTILE` parameter in tracker_settings changes the alternative cost to gap closing/merging/splitting. Higher values ->
more gap closures/merges/splits.
'''
import subprocess, json
if fiji_path is None:
import sys
if sys.platform == "darwin":
fiji_path = '/Applications/Fiji.app/Contents/MacOS/ImageJ-macosx'
elif sys.platform == "linux":
fiji_path = '~/Fiji.app/ImageJ-linux64'
else:
raise ValueError("Currently only OS X and linux systems can infer Fiji install location.")
tracker_defaults = {"LINKING_MAX_DISTANCE":60.,"GAP_CLOSING_MAX_DISTANCE":60.,
"ALLOW_TRACK_SPLITTING":True,"SPLITTING_MAX_DISTANCE":60.,
"ALLOW_TRACK_MERGING":True,"MERGING_MAX_DISTANCE":60.,
"MAX_FRAME_GAP":2,"CUTOFF_PERCENTILE":0.90}
for key, val in tracker_defaults.items():
_ = tracker_settings.setdefault(key,val)
trackmate_call = ('''{fiji_path} --ij2 --headless --console --run {ops_path}/external/TrackMate/track_centroids.py'''
.format(fiji_path=fiji_path,ops_path=ops.__path__[0]))
variables = ('''"input_path='{input_path}',output_path='{output_path}',threads={threads},tracker_settings='{tracker_settings}'"'''
.format(input_path=input_path,output_path=output_path,
threads=int(threads),tracker_settings=json.dumps(tracker_settings)))
output = subprocess.check_output(' '.join([trackmate_call,variables]), shell=True)
print(output.decode("utf-8"))
def format_trackmate(df):
import ast
df = (pd.concat([df,
pd.DataFrame(df['parent_ids'].apply(lambda x: ast.literal_eval(x)).tolist(),
index = df.index,columns=['parent_id_0','parent_id_1'])
],axis=1)
.fillna(value=-1)
.drop(columns=['parent_ids'])
.assign(relabel=-1,parent_cell_0=-1,parent_cell_1=-1)
.astype(int)
.set_index('id')
)
lookup = np.zeros((df.index.max()+2,3),dtype=int)
lookup[df.index] = (df
[['cell','parent_id_0','parent_id_1']]
.values
)
lookup[-1] = np.array([-1,-1,-1])
set_cols = ['relabel','parent_cell_0','parent_cell_1']
current = 1
arr_frames = []
for frame,df_frame in df.groupby('frame'):
df_frame = df_frame.copy()
if frame==0:
arr_frames.append(df_frame.assign(relabel = list(range(current,current+df_frame.pipe(len))),
parent_cell_0 = -1,
parent_cell_1 = -1))
current += df_frame.pipe(len)
continue
# unique child from single parent
idx_propagate = ((df_frame.duplicated(['parent_id_0','parent_id_1'],keep=False)==False)
&
((df_frame[['parent_id_0','parent_id_1']]==-1).sum(axis=1)==1)
).values
lookup[df_frame[idx_propagate].index.values] = df_frame.loc[idx_propagate,set_cols] = lookup[df_frame.loc[idx_propagate,'parent_id_0'].values]
# split, merge, or new
idx_new = ((df_frame.duplicated(['parent_id_0','parent_id_1'],keep=False))
|
((df_frame[['parent_id_0','parent_id_1']]==-1).sum(axis=1)!=1)
).values
lookup[df_frame[idx_new].index.values] = df_frame.loc[idx_new,set_cols] = np.array([list(range(current,current+idx_new.sum())),
lookup[df_frame.loc[idx_new,'parent_id_0'].values,0],
lookup[df_frame.loc[idx_new,'parent_id_1'].values,0]
]).T
current += idx_new.sum()
arr_frames.append(df_frame)
return pd.concat(arr_frames).reset_index()
# recover parent relationships
## during some iterations of trackmate, saving of parent cell identities was unintentionally
## commented out. these functions infer these relationships. For a single tile, correctly assigned
## same parent-child relationships as trackmate for >99.8% of cells. Well-constrained problem.
def recover_parents(df_tracked,threshold=60, cell='cell', ij=('i','j'), keep_cols=['well','tile','track_id','cell']):
# to be run on a table from a single tile
# get junction cells
df_pre_junction = (df_tracked
.groupby(['track_id',cell],group_keys=False)
.apply(lambda x: x.nlargest(1,'frame'))
)
df_post_junction = (df_tracked
.groupby(['track_id',cell],group_keys=False)
.apply(lambda x: x.nsmallest(1,'frame'))
)
arr = []
# assign frame 0 cells or un-tracked cells with no parents
arr.append(df_post_junction
.query('frame==0 | track_id==-1')
[keep_cols]
.assign(parent_cell_0=-1,parent_cell_1=-1)
)
# clean up tables
last_frame = int(df_tracked['frame'].nlargest(1))
df_pre_junction = df_pre_junction.query('frame!=@last_frame & track_id!=-1')
df_post_junction = df_post_junction.query('frame!=0 & track_id!=-1')
# categorize frames to avoid issues with no-cell junction frames
df_pre_junction.loc[:,'frame'] = pd.Categorical(df_pre_junction['frame'],
categories=np.arange(0,last_frame),
ordered=True)
df_post_junction.loc[:,'frame'] = pd.Categorical(df_post_junction['frame'],
categories=np.arange(1,last_frame+1),
ordered=True)
for (frame_pre,df_pre),(frame_post,df_post) in zip(df_pre_junction.groupby('frame'),
df_post_junction.groupby('frame')):
if df_post.pipe(len)==0:
continue
elif df_pre.pipe(len)==0:
arr.append(df_post[keep_cols].assign(parent_cell_0=-1,parent_cell_1=-1))
else:
arr.extend(junction_parent_assignment(pd.concat([df_pre,df_post]),
frame_0=frame_pre,
threshold=threshold,
ij=ij,
cell=cell,
keep_cols=keep_cols
)
)
return pd.concat(arr,ignore_index=True)
def junction_parent_assignment(df_junction, frame_0, threshold, ij, cell, keep_cols):
arr = []
i,j = ij
for track,df_track_junction in df_junction.groupby('track_id'):
if (df_track_junction['frame'].nunique()==1):
if df_track_junction.iloc[0]['frame']==(frame_0+1):
# only post-junction cells -> parents = -1
arr.append(df_track_junction[keep_cols].assign(parent_cell_0=-1,parent_cell_1=-1))
elif df_track_junction.iloc[0]['frame']==frame_0:
# only pre-junction cells -> ends of tracks, don't have to assign
continue
else:
before,after = (g[[cell,i,j]].values
for _,g
in df_track_junction.groupby('frame')
)
distances = cdist(after[:,1:],before[:,1:])
edges = distances<threshold
edges = resolve_conflicts(edges,distances, conflict_type='extra')
edges = resolve_conflicts(edges,distances,conflict_type='tangle')
parents = tuple(before[edge,0]
if edge.sum()>0 else np.array([-1,-1])
for edge in edges)
if len(parents) != edges.shape[0]:
raise ValueError('Length of parents tuple does not match number of post-junction cells')
if max([len(p) for p in parents])>2:
raise ValueError(f'''Conflict resolution error; too many parents selected for at least one cell
for track {track} in frame {frame_0}
''')
parents = np.array([np.concatenate([p, | np.array([-1]) | numpy.array |
import numpy as np
import matplotlib as mpl
mpl.use("agg", warn=False) # noqa
import matplotlib.pyplot as plt
import seaborn as sns
import sklearn.metrics.pairwise
import scipy.cluster.hierarchy as sch
import scipy.sparse as spsp
import scedar.eda as eda
import pytest
class TestSampleDistanceMatrix(object):
"""docstring for TestSampleDistanceMatrix"""
x_3x2 = [[0, 0], [1, 1], [2, 2]]
x_2x4_arr = np.array([[0, 1, 2, 3], [1, 2, 0, 6]])
def test_valid_init(self):
sdm = eda.SampleDistanceMatrix(self.x_3x2, metric='euclidean')
dist_mat = np.array([[0, np.sqrt(2), np.sqrt(8)],
[np.sqrt(2), 0, np.sqrt(2)],
[np.sqrt(8), np.sqrt(2), 0]])
np.testing.assert_allclose(sdm.d, dist_mat)
sdm2 = eda.SampleDistanceMatrix(
self.x_2x4_arr, metric='euclidean', nprocs=5)
sdm2_d1 = np.sqrt(
np.power(self.x_2x4_arr[0] - self.x_2x4_arr[1], 2).sum())
np.testing.assert_allclose(sdm2.d,
np.array([[0, sdm2_d1], [sdm2_d1, 0]]))
sdm3 = eda.SampleDistanceMatrix(
self.x_2x4_arr, metric='correlation', nprocs=5)
sdm3_corr_d = (1 - np.dot(
self.x_2x4_arr[0] - self.x_2x4_arr[0].mean(),
self.x_2x4_arr[1] - self.x_2x4_arr[1].mean()) /
(np.linalg.norm(self.x_2x4_arr[0] - self.x_2x4_arr[0].mean(),
2) *
np.linalg.norm(self.x_2x4_arr[1] - self.x_2x4_arr[1].mean(),
2)))
np.testing.assert_allclose(sdm3.d,
np.array([[0, 0.3618551],
[0.3618551, 0]]))
np.testing.assert_allclose(sdm3.d,
np.array([[0, sdm3_corr_d],
[sdm3_corr_d, 0]]))
sdm4 = eda.SampleDistanceMatrix(self.x_3x2, dist_mat)
sdm5 = eda.SampleDistanceMatrix(
self.x_3x2, dist_mat, metric='euclidean')
sdm5 = eda.SampleDistanceMatrix([[1, 2]], metric='euclidean')
assert sdm5.tsne(n_iter=250).shape == (1, 2)
def test_empty_init(self):
with pytest.raises(ValueError) as excinfo:
eda.SampleDistanceMatrix(np.empty(0), metric='euclidean')
sdm = eda.SampleDistanceMatrix(np.empty((0, 0)), metric='euclidean')
assert len(sdm.sids) == 0
assert len(sdm.fids) == 0
assert sdm._x.shape == (0, 0)
assert sdm._d.shape == (0, 0)
assert sdm._col_sorted_d.shape == (0, 0)
assert sdm._col_argsorted_d.shape == (0, 0)
assert sdm.tsne(n_iter=250).shape == (0, 0)
def test_init_wrong_metric(self):
# when d is None, metric cannot be precomputed
with pytest.raises(Exception) as excinfo:
eda.SampleDistanceMatrix(self.x_3x2, metric='precomputed')
# lazy load d
eda.SampleDistanceMatrix(self.x_3x2, metric='unknown')
with pytest.raises(Exception) as excinfo:
eda.SampleDistanceMatrix(self.x_3x2, metric='unknown').d
eda.SampleDistanceMatrix(self.x_3x2, metric=1)
with pytest.raises(Exception) as excinfo:
eda.SampleDistanceMatrix(self.x_3x2, metric=1).d
eda.SampleDistanceMatrix(self.x_3x2, metric=1.)
with pytest.raises(Exception) as excinfo:
eda.SampleDistanceMatrix(self.x_3x2, metric=1.).d
eda.SampleDistanceMatrix(self.x_3x2, metric=('euclidean', ))
with pytest.raises(Exception) as excinfo:
eda.SampleDistanceMatrix(self.x_3x2, metric=('euclidean', )).d
eda.SampleDistanceMatrix(self.x_3x2, metric=['euclidean'])
with pytest.raises(Exception) as excinfo:
eda.SampleDistanceMatrix(self.x_3x2, metric=['euclidean']).d
def test_init_wrong_d_type(self):
d_3x3 = np.array([[0, np.sqrt(2), np.sqrt(8)],
['1a1', 0, np.sqrt(2)],
[np.sqrt(8), np.sqrt(2), 0]])
with pytest.raises(Exception) as excinfo:
eda.SampleDistanceMatrix(self.x_3x2, d_3x3)
def test_init_wrong_d_size(self):
d_2x2 = np.array([[0, np.sqrt(2)],
[np.sqrt(2), 0]])
d_2x2 = np.array([[0, np.sqrt(2)],
[np.sqrt(2), 0]])
d_1x6 = np.arange(6)
d_3x2 = np.array([[0, np.sqrt(2)],
[np.sqrt(2), 0],
[1, 2]])
with pytest.raises(Exception) as excinfo:
eda.SampleDistanceMatrix(self.x_3x2, d_2x2)
with pytest.raises(Exception) as excinfo:
eda.SampleDistanceMatrix(self.x_3x2, d_3x2)
with pytest.raises(Exception) as excinfo:
eda.SampleDistanceMatrix(self.x_3x2, d_3x2)
with pytest.raises(Exception) as excinfo:
eda.SampleDistanceMatrix(self.x_3x2, d_1x6)
def test_to_classified(self):
sdm = eda.SampleDistanceMatrix(np.arange(100).reshape(50, -1),
metric='euclidean')
# initialize cached results
sdm.tsne_plot()
sdm.pca_plot()
sdm.s_knn_graph(2)
sdm.s_ith_nn_d(1)
sdm.s_ith_nn_ind(1)
labs = [0]*10 + [1]*20 + [0]*10 + [2]*10
slcs = sdm.to_classified(labs)
assert slcs.labs == labs
assert slcs._lazy_load_d is sdm._lazy_load_d
assert slcs._lazy_load_d is not None
assert slcs._metric == sdm._metric
assert slcs._nprocs == sdm._nprocs
assert slcs.sids == sdm.sids
assert slcs.fids == sdm.fids
# tsne
assert slcs._tsne_lut is not None
assert slcs._tsne_lut == sdm._tsne_lut
assert slcs._lazy_load_last_tsne is not None
assert slcs._lazy_load_last_tsne is sdm._lazy_load_last_tsne
# knn
assert slcs._lazy_load_col_sorted_d is not None
assert slcs._lazy_load_col_sorted_d is sdm._lazy_load_col_sorted_d
assert slcs._lazy_load_col_argsorted_d is not None
assert (slcs._lazy_load_col_argsorted_d is
sdm._lazy_load_col_argsorted_d)
assert slcs._knn_ng_lut is not None
assert slcs._knn_ng_lut == sdm._knn_ng_lut
# pca
assert slcs._pca_n_components is not None
assert slcs._lazy_load_skd_pca is not None
assert slcs._lazy_load_pca_x is not None
assert slcs._pca_n_components == sdm._pca_n_components
assert slcs._lazy_load_skd_pca is sdm._lazy_load_skd_pca
assert slcs._lazy_load_pca_x is sdm._lazy_load_pca_x
def test_sort_x_by_d(self):
x1 = np.array([[0, 5, 30, 10],
[1, 5, 30, 10],
[0, 5, 33, 10],
[2, 5, 30, 7],
[2, 5, 30, 9]])
x2 = x1.copy()
opt_inds = eda.HClustTree.sort_x_by_d(
x=x2.T, metric='euclidean', optimal_ordering=True)
assert opt_inds == [2, 3, 1, 0]
np.testing.assert_equal(x1, x2)
x3 = np.array([[0, 0, 30, 10],
[1, 2, 30, 10],
[0, 3, 33, 10],
[2, 4, 30, 7],
[2, 5, 30, 9]])
x4 = x3.copy()
opt_inds = eda.HClustTree.sort_x_by_d(
x=x4.T, metric='euclidean', optimal_ordering=True)
assert opt_inds == [2, 3, 1, 0]
np.testing.assert_equal(x3, x4)
def test_sort_features(self):
x = np.array([[0, 2, 30, 10],
[1, 2, 30, 10],
[0, 3, 33, 10],
[2, 5, 30, 7],
[2, 5, 30, 9]])
sdm = eda.SampleDistanceMatrix(
x, metric='euclidean')
sdm2 = eda.SampleDistanceMatrix(
x, metric='euclidean')
sdm2.sort_features(fdist_metric='euclidean', optimal_ordering=True)
assert sdm2.fids == [2, 3, 1, 0]
def test_get_tsne_kv(self):
tmet = 'euclidean'
sdm = eda.SampleDistanceMatrix(self.x_3x2, metric=tmet)
assert sdm.get_tsne_kv(1) is None
assert sdm.get_tsne_kv(1) is None
assert sdm.get_tsne_kv(0) is None
assert sdm.get_tsne_kv(2) is None
def test_get_tsne_kv_wrong_args(self):
tmet = 'euclidean'
sdm = eda.SampleDistanceMatrix(self.x_3x2, metric=tmet)
with pytest.raises(ValueError) as excinfo:
sdm.get_tsne_kv([1, 2, 3])
with pytest.raises(ValueError) as excinfo:
sdm.get_tsne_kv({1: 2})
def test_put_tsne_wrong_args(self):
tmet = 'euclidean'
sdm = eda.SampleDistanceMatrix(self.x_3x2, metric=tmet)
with pytest.raises(ValueError) as excinfo:
sdm.put_tsne(1, [1, 2, 3])
with pytest.raises(ValueError) as excinfo:
sdm.put_tsne({1: 2}, [1, 2, 3])
def test_tsne(self):
tmet = 'euclidean'
tsne_kwargs = {'metric': tmet, 'n_iter': 250,
'random_state': 123}
ref_tsne = eda.tsne(self.x_3x2, **tsne_kwargs)
sdm = eda.SampleDistanceMatrix(self.x_3x2, metric=tmet)
assert sdm.tsne_lut == {}
tsne1 = sdm.tsne(n_iter=250, random_state=123)
np.testing.assert_allclose(ref_tsne, tsne1)
np.testing.assert_allclose(ref_tsne, sdm._last_tsne)
assert tsne1.shape == (3, 2)
assert len(sdm.tsne_lut) == 1
tsne2 = sdm.tsne(store_res=False, **tsne_kwargs)
np.testing.assert_allclose(ref_tsne, tsne2)
assert len(sdm.tsne_lut) == 1
with pytest.raises(Exception) as excinfo:
wrong_metric_kwargs = tsne_kwargs.copy()
wrong_metric_kwargs['metric'] = 'correlation'
sdm.tsne(**wrong_metric_kwargs)
assert len(sdm.tsne_lut) == 1
tsne3 = sdm.tsne(store_res=True, **tsne_kwargs)
np.testing.assert_allclose(ref_tsne, tsne3)
# (param, ind) as key, so same params get an extra entry.
assert len(sdm.tsne_lut) == 2
np.testing.assert_allclose(tsne1, sdm.get_tsne_kv(1)[1])
np.testing.assert_allclose(tsne3, sdm.get_tsne_kv(2)[1])
assert tsne1 is not sdm.get_tsne_kv(1)[1]
assert tsne3 is not sdm.get_tsne_kv(2)[1]
tsne4 = sdm.tsne(store_res=True, n_iter=250, random_state=123)
np.testing.assert_allclose(ref_tsne, tsne4)
np.testing.assert_allclose(sdm.get_tsne_kv(3)[1], tsne4)
assert len(sdm.tsne_lut) == 3
tsne5 = sdm.tsne(store_res=True, n_iter=251, random_state=123)
tsne6 = sdm.tsne(store_res=True, n_iter=251, random_state=123)
np.testing.assert_allclose(tsne6, tsne5)
np.testing.assert_allclose(tsne5, sdm.get_tsne_kv(4)[1])
np.testing.assert_allclose(tsne6, sdm.get_tsne_kv(5)[1])
assert len(sdm.tsne_lut) == 5
def test_par_tsne(self):
tmet = 'euclidean'
param_list = [{'metric': tmet, 'n_iter': 250, 'random_state': 123},
{'metric': tmet, 'n_iter': 250, 'random_state': 125},
{'metric': tmet, 'n_iter': 250, 'random_state': 123}]
ref_tsne = eda.tsne(self.x_3x2, **param_list[0])
sdm = eda.SampleDistanceMatrix(self.x_3x2, metric=tmet)
# If not store, should not update lut
sdm.par_tsne(param_list, store_res=False)
assert sdm._lazy_load_last_tsne is None
assert sdm.tsne_lut == {}
# store results
tsne1, tsne2, tsne3 = sdm.par_tsne(param_list)
np.testing.assert_allclose(ref_tsne, tsne1)
np.testing.assert_allclose(ref_tsne, tsne3)
np.testing.assert_allclose(ref_tsne, sdm._last_tsne)
assert tsne1.shape == (3, 2)
assert len(sdm.tsne_lut) == 3
np.testing.assert_allclose(tsne1, sdm.get_tsne_kv(1)[1])
np.testing.assert_allclose(tsne2, sdm.get_tsne_kv(2)[1])
np.testing.assert_allclose(tsne3, sdm.get_tsne_kv(3)[1])
np.testing.assert_allclose(tsne3, sdm.get_tsne_kv(1)[1])
def test_par_tsne_mp(self):
tmet = 'euclidean'
param_list = [{'metric': tmet, 'n_iter': 250, 'random_state': 123},
{'metric': tmet, 'n_iter': 250, 'random_state': 125},
{'metric': tmet, 'n_iter': 250, 'random_state': 123}]
ref_tsne = eda.tsne(self.x_3x2, **param_list[0])
sdm = eda.SampleDistanceMatrix(self.x_3x2, metric=tmet)
# If not store, should not update lut
sdm.par_tsne(param_list, store_res=False, nprocs=3)
assert sdm._lazy_load_last_tsne is None
assert sdm.tsne_lut == {}
# store results
tsne1, tsne2, tsne3 = sdm.par_tsne(param_list, nprocs=3)
np.testing.assert_allclose(ref_tsne, tsne1)
np.testing.assert_allclose(ref_tsne, tsne3)
np.testing.assert_allclose(ref_tsne, sdm._last_tsne)
assert tsne1.shape == (3, 2)
assert len(sdm.tsne_lut) == 3
np.testing.assert_allclose(tsne1, sdm.get_tsne_kv(1)[1])
np.testing.assert_allclose(tsne2, sdm.get_tsne_kv(2)[1])
np.testing.assert_allclose(tsne3, sdm.get_tsne_kv(3)[1])
np.testing.assert_allclose(tsne3, sdm.get_tsne_kv(1)[1])
def test_tsne_default_init(self):
tmet = 'euclidean'
tsne_kwargs = {'metric': tmet, 'n_iter': 250,
'random_state': 123}
ref_tsne = eda.tsne(self.x_3x2, **tsne_kwargs)
sdm = eda.SampleDistanceMatrix(self.x_3x2, metric=tmet)
init_tsne = sdm._last_tsne
assert init_tsne.shape == (3, 2)
assert len(sdm.tsne_lut) == 1
tsne2 = sdm.tsne(store_res=True, **tsne_kwargs)
np.testing.assert_allclose(ref_tsne, tsne2)
assert len(sdm.tsne_lut) == 2
def test_ind_x(self):
sids = list("abcdef")
fids = list(range(10, 20))
sdm = eda.SampleDistanceMatrix(
np.random.ranf(60).reshape(6, -1), sids=sids, fids=fids)
# select sf
ss_sdm = sdm.ind_x([0, 5], list(range(9)))
assert ss_sdm._x.shape == (2, 9)
assert ss_sdm.sids == ['a', 'f']
assert ss_sdm.fids == list(range(10, 19))
np.testing.assert_equal(
ss_sdm.d, sdm._d[np.ix_((0, 5), (0, 5))])
# select with Default
ss_sdm = sdm.ind_x()
assert ss_sdm._x.shape == (6, 10)
assert ss_sdm.sids == list("abcdef")
assert ss_sdm.fids == list(range(10, 20))
np.testing.assert_equal(ss_sdm.d, sdm._d)
# select with None
ss_sdm = sdm.ind_x(None, None)
assert ss_sdm._x.shape == (6, 10)
assert ss_sdm.sids == list("abcdef")
assert ss_sdm.fids == list(range(10, 20))
np.testing.assert_equal(ss_sdm.d, sdm._d)
# select non-existent inds
with pytest.raises(IndexError) as excinfo:
sdm.ind_x([6])
with pytest.raises(IndexError) as excinfo:
sdm.ind_x(None, ['a'])
def test_ind_x_empty(self):
sids = list("abcdef")
fids = list(range(10, 20))
sdm = eda.SampleDistanceMatrix(
| np.random.ranf(60) | numpy.random.ranf |
from collections import OrderedDict
import sklearn
import sklearn.metrics
import numpy as np
import torch
import torch.nn.functional as F
from . mol2graph import mol2torchdata
from torch_geometric.data import DataLoader
def clear_model(model):
del model
torch.cuda.empty_cache()
def get_dataloader(df, index, target, mol_column, batch_size, y_scaler):
y_values = df.loc[index, target].values.reshape(-1, 1)
y = y_scaler.transform(y_values).ravel().astype(np.float32)
x = df.loc[index, mol_column].progress_apply(mol2torchdata).tolist()
for data, y_i in zip(x, y):
data.y = torch.tensor([y_i], dtype=torch.float)
data_loader = DataLoader(x, batch_size=batch_size,
shuffle=True, drop_last=True)
return data_loader
def train_step(model, data_loader, optimizer, scheduler, device):
model.train()
loss_sum = 0
for data in data_loader:
data = data.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.mse_loss(output, data.y)
loss.backward()
loss_sum += loss.item() * data.num_graphs
optimizer.step()
n = float(sum([data.num_graphs for data in data_loader]))
stats = {'train_loss': loss_sum / n}
if scheduler:
scheduler.step(loss_sum)
return stats
def reg_stats(y_true, y_pred):
r2 = sklearn.metrics.r2_score(y_true, y_pred)
mae = sklearn.metrics.mean_absolute_error(y_true, y_pred)
return r2, mae
def eval_step(model, data_loader, y_scaler, device, cv_result,
best_value):
with torch.no_grad():
model.eval()
loss_sum = 0
y_pred = []
y_true = []
for data in data_loader:
data = data.to(device)
output = model(data)
y_pred.extend(output.cpu().numpy())
y_true.extend(data.y.cpu().numpy())
loss = F.mse_loss(output, data.y)
loss_sum += loss.item() * data.num_graphs
y_pred = y_scaler.inverse_transform(
np.array(y_pred).reshape(-1, 1)).ravel()
y_true = y_scaler.inverse_transform(
np.array(y_true).reshape(-1, 1)).ravel()
n = float(sum([data.num_graphs for data in data_loader]))
stats = OrderedDict({'test_loss': loss_sum / n})
stats['test_r2'], stats['test_mae'] = reg_stats(y_true, y_pred)
if stats['test_r2'] >= best_value:
best_value = stats['test_r2']
cv_result['target'] = y_true
cv_result['pred'] = y_pred
return stats
def get_embeddings(model, data_loader, y_scaler, device):
with torch.no_grad():
model.eval()
z = []
y = []
for data in data_loader:
data = data.to(device)
z_data = model.forward_gnn(data)
y_data = model.pred(z_data)
y.append(y_data.cpu().numpy())
z.append(z_data.cpu().numpy())
y = y_scaler.inverse_transform(np.vstack(y).reshape(-1, 1)).ravel()
z = | np.vstack(z) | numpy.vstack |
"""
## Script for evaluating the ICSG3D reconstructions
## Example:
## >> python3 eval.py --name heusler --samples 5000
## Plots the reconstructed lattice params and EMD of atomic sites
--------------------------------------------------
## Author: <NAME>.
## Email: <EMAIL>
## Version: 1.0.0
--------------------------------------------------
## License: MIT
## Copyright: Copyright <NAME> & <NAME>rim 2020, ICSG3D
-------------------------------------------------
"""
import argparse
import os
import re
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import rc
from scipy.optimize import linear_sum_assignment
from scipy.spatial.distance import cdist
from unet.unet import AtomUnet
from utils import (
create_crystal,
data_split,
get_sites,
to_lattice_params,
to_voxel_params,
)
from vae.data import VAEDataGenerator
from vae.lattice_vae import LatticeDFCVAE
from watershed import watershed_clustering
font = {"family": "serif"}
rc("font", **font)
rc("text", usetex=True)
rc("text.latex", preamble=r"\usepackage{cmbright}")
def emd(x, y):
"""
Computes the Earth Mover Distance between two point sets
--------------------------------------------------------
params: point sets x and y (N x M)
"""
dist = cdist(x, y)
assign = linear_sum_assignment(dist)
return dist[assign].sum() / min(len(x), len(y))
if __name__ == "__main__":
# Arguments
parser = argparse.ArgumentParser()
parser.add_argument("--name", metavar="name", type=str, help="Name of data folder")
parser.add_argument(
"--batch_size", metavar="batch_size", type=int, help="Batch size", default=10
)
parser.add_argument(
"--samples",
metavar="samples",
type=int,
help="Number of samples",
default=78750,
)
parser.add_argument(
"--eps_frac",
metavar="eps_frac",
type=float,
help="Eps of lattice vector",
default=0.25,
)
parser.add_argument(
"--ncond",
metavar="ncond",
type=int,
help="Number of condition bins",
default=10,
)
parser.add_argument(
"--clus_iters",
metavar="clus_iters",
type=int,
help="Number of iterations for watershed clustering",
default=5,
)
parser.add_argument(
"--split",
metavar="split",
type=float,
help="Train-test split fraction",
default=0.8,
)
parser.add_argument(
"--d",
metavar="d",
type=int,
help="Dimension of density matrices (number of voxels)",
default=32,
)
namespace = parser.parse_args()
mode = namespace.name
ncond = namespace.ncond
data_path = os.path.join("data", mode, "matrices")
cif_path = os.path.join("data", mode, "cifs")
csv_path = os.path.join("data", mode, mode + ".csv")
d = namespace.d
input_shape = (d, d, d, 4)
n = namespace.samples
batch_size = namespace.batch_size
eps = namespace.eps_frac
vae_weights = os.path.join(
"saved_models", "vae", mode, "vae_weights_" + mode + ".best.hdf5"
)
unet_weights = os.path.join(
"saved_models", "unet", mode, "unet_weights_" + mode + ".best.hdf5"
)
perceptual_model = os.path.join(
"saved_models", "unet", mode, "unet_weights_" + mode + ".best.h5"
)
clustering_max_iters = namespace.clus_iters
os.makedirs(os.path.join("output", "eval", mode), exist_ok=True)
# Split the data
training_ids, validation_ids = data_split(
data_path, n, frac=namespace.split, n_rot=0
)
validation_generator = VAEDataGenerator(
validation_ids,
data_path=data_path,
property_csv=csv_path,
batch_size=batch_size,
n_channels=input_shape[-1],
shuffle=False,
n_bins=ncond,
)
# Create the VAE
vae = LatticeDFCVAE(perceptual_model=perceptual_model, cond_shape=ncond)
vae._set_model(weights=vae_weights, batch_size=batch_size)
# Create the Unet
unet = AtomUnet(weights=unet_weights)
true_num_atoms = []
pred_num_atoms = []
true_species = []
pred_species = []
true_lc = []
pred_lc = []
true_coords = []
pred_coords = []
emds = []
c = 0
for M, cond in validation_generator: # Density matrix, condition
# Get the reconstruction
M_prime = vae.model.predict([M, cond])
coords_prime = M_prime[:, :, :, :, 1:]
# Compute the reconstructed species matrix
S_prime, S_b_prime = unet.model.predict(M_prime)
S_prime = np.argmax(S_prime, axis=-1).reshape(batch_size, 32, 32, 32, 1)
S_b_prime[S_b_prime >= 0.8] = 1.0
S_b_prime[S_b_prime < 0.8] = 0.0
S_prime_coords = np.concatenate([S_prime, coords_prime], axis=-1)
# Calculate reconstructed lattice params
l_pred = to_lattice_params(coords_prime)
# Reconstructed voxel params
dv_pred = to_voxel_params(l_pred)
ids = validation_generator.list_IDs_temp
for i, S_prime_i in enumerate(S_prime_coords):
print(ids[i])
# True data
true_id = ids[i]
crystal = create_crystal(
os.path.join(cif_path, re.split("_|\.", true_id)[0] + ".cif"),
primitive=False,
)
N, z, r = get_sites(crystal)
lpt = [crystal.lattice.a, crystal.lattice.b, crystal.lattice.c]
N = np.multiply(N, lpt[:3])
dist = np.linalg.norm(N, ord=2, axis=1)
N = N[np.argsort(dist)]
# Predicted
try:
species, mu = watershed_clustering(
M_prime[i, :, :, :, 0], S_prime[i], S_b_prime[i]
)
except Exception:
print(ids[i], "failed")
continue
for s in N:
true_coords.append(s)
true_lc.append(lpt)
true_num_atoms.append(len(N))
true_species.append(np.unique(z))
pred_lc.append(l_pred[i])
lpp = eps * l_pred[i, :3].reshape(1, 3)
mu = mu * dv_pred[i] - (lpp) + (dv_pred[i] / 2.0)
dist = np.linalg.norm(mu, ord=2, axis=1)
mu = mu[np.argsort(dist)]
dist = emd(mu, N)
emds.append(dist)
# sort pred coords by dist from 0
pred_num_atoms.append(len(species))
pred_species.append(np.unique(species))
c += 1
true_num_atoms = np.array(true_num_atoms)
pred_num_atoms = np.array(pred_num_atoms)
true_lc = np.array(true_lc)
pred_lc = np.array(pred_lc)
print("\nMEAN EMD: ", np.mean(emds))
print("\nMEAN DAtoms: ", np.mean(np.abs(true_num_atoms - pred_num_atoms)))
# Plots
plt.figure()
plt.hist(emds, bins=50, color="tab:cyan")
plt.axvline(
x=np.mean(emds), linestyle="--", color="r", label="Mean = %.3f" % np.mean(emds)
)
plt.xlabel("EMD (Angstrom)")
plt.ylabel("Count")
plt.legend(loc="best")
plt.savefig(os.path.join("output", "eval" + mode + "emd.svg"), format="svg")
plt.close()
plt.figure()
plt.hist(np.abs(true_lc - pred_lc)[:, 0], bins=50, color="tab:cyan")
plt.axvline(
x=np.mean(np.abs(true_lc - pred_lc)[:, 0]),
linestyle="--",
color="tab:red",
label="Mean = %.3f" % np.mean(np.abs(true_lc - pred_lc)[:, 0]),
)
plt.xlabel("$|a_{true}$ - $a_{pred}|$ (Angstrom)")
plt.ylabel("Count")
plt.legend(loc="best")
plt.savefig(os.path.join("output", "eval" + mode + "lattice_a.svg"), format="svg")
plt.close()
plt.figure()
plt.hist(np.abs(true_lc - pred_lc)[:, 1], bins=50, color="tab:cyan")
plt.axvline(
x=np.mean(np.abs(true_lc - pred_lc)[:, 1]),
linestyle="--",
color="tab:red",
label="Mean = %.3f" % np.mean(np.abs(true_lc - pred_lc)[:, 1]),
)
plt.xlabel("$|b_{true}$ - $b_{pred}|$ (Angstrom)")
plt.ylabel("Count")
plt.legend(loc="best")
plt.savefig(os.path.join("output", "eval" + mode + "lattice_b.svg"), format="svg")
plt.close()
plt.figure()
plt.hist(np.abs(true_lc - pred_lc)[:, 2], bins=50, color="tab:cyan")
plt.axvline(
x=np.mean(np.abs(true_lc - pred_lc)[:, 2]),
linestyle="--",
color="tab:red",
label="Mean = %.3f" % np.mean(np.abs(true_lc - pred_lc)[:, 2]),
)
plt.xlabel("$|c_{true}$ - $c_{pred}|$ (Angstrom)")
plt.ylabel("Count")
plt.legend(loc="best")
plt.savefig(os.path.join("output", "eval" + mode + "lattice_c.svg"), format="svg")
plt.close()
plt.figure()
plt.hist(np.abs(true_num_atoms - pred_num_atoms), bins=50, color="tab:cyan")
plt.axvline(
x=np.mean(np.abs(true_num_atoms - pred_num_atoms)),
linestyle="--",
color="tab:red",
label="Mean = %.3f" % np.mean( | np.abs(true_num_atoms - pred_num_atoms) | numpy.abs |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2018 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import scipy.sparse as sps
from mars.tensor.execution.core import Executor
from mars import tensor as mt
from mars.tensor.expressions.datasource import tensor, ones, zeros, arange
from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, \
expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, \
hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, \
flip, flipud, fliplr, repeat, tile, isin
from mars.tensor.expressions.merge import stack
from mars.tensor.expressions.reduction import all as tall
class Test(unittest.TestCase):
def setUp(self):
self.executor = Executor('numpy')
def testRechunkExecution(self):
raw = np.random.random((11, 8))
arr = tensor(raw, chunks=3)
arr2 = arr.rechunk(4)
res = self.executor.execute_tensor(arr2)
self.assertTrue(np.array_equal(res[0], raw[:4, :4]))
self.assertTrue(np.array_equal(res[1], raw[:4, 4:]))
self.assertTrue(np.array_equal(res[2], raw[4:8, :4]))
self.assertTrue(np.array_equal(res[3], raw[4:8, 4:]))
self.assertTrue(np.array_equal(res[4], raw[8:, :4]))
self.assertTrue(np.array_equal(res[5], raw[8:, 4:]))
def testCopytoExecution(self):
a = ones((2, 3), chunks=1)
b = tensor([3, -1, 3], chunks=2)
copyto(a, b, where=b > 1)
res = self.executor.execute_tensor(a, concat=True)[0]
expected = np.array([[3, 1, 3], [3, 1, 3]])
np.testing.assert_equal(res, expected)
def testAstypeExecution(self):
raw = np.random.random((10, 5))
arr = tensor(raw, chunks=3)
arr2 = arr.astype('i8')
res = self.executor.execute_tensor(arr2, concat=True)
self.assertTrue(np.array_equal(res[0], raw.astype('i8')))
raw = sps.random(10, 5, density=.2)
arr = tensor(raw, chunks=3)
arr2 = arr.astype('i8')
res = self.executor.execute_tensor(arr2, concat=True)
self.assertTrue(np.array_equal(res[0].toarray(), raw.astype('i8').toarray()))
def testTransposeExecution(self):
raw = np.random.random((11, 8, 5))
arr = tensor(raw, chunks=3)
arr2 = transpose(arr)
res = self.executor.execute_tensor(arr2, concat=True)
self.assertTrue(np.array_equal(res[0], raw.T))
arr3 = transpose(arr, axes=(-2, -1, -3))
res = self.executor.execute_tensor(arr3, concat=True)
self.assertTrue(np.array_equal(res[0], raw.transpose(1, 2, 0)))
raw = sps.random(11, 8)
arr = tensor(raw, chunks=3)
arr2 = transpose(arr)
self.assertTrue(arr2.issparse())
res = self.executor.execute_tensor(arr2, concat=True)
self.assertTrue(np.array_equal(res[0].toarray(), raw.T.toarray()))
def testSwapaxesExecution(self):
raw = np.random.random((11, 8, 5))
arr = tensor(raw, chunks=3)
arr2 = arr.swapaxes(2, 0)
res = self.executor.execute_tensor(arr2, concat=True)
self.assertTrue(np.array_equal(res[0], raw.swapaxes(2, 0)))
raw = sps.random(11, 8, density=.2)
arr = tensor(raw, chunks=3)
arr2 = arr.swapaxes(1, 0)
res = self.executor.execute_tensor(arr2, concat=True)
self.assertTrue(np.array_equal(res[0].toarray(), raw.toarray().swapaxes(1, 0)))
def testMoveaxisExecution(self):
x = zeros((3, 4, 5), chunks=2)
t = moveaxis(x, 0, -1)
res = self.executor.execute_tensor(t, concat=True)[0]
self.assertEqual(res.shape, (4, 5, 3))
t = moveaxis(x, -1, 0)
res = self.executor.execute_tensor(t, concat=True)[0]
self.assertEqual(res.shape, (5, 3, 4))
t = moveaxis(x, [0, 1], [-1, -2])
res = self.executor.execute_tensor(t, concat=True)[0]
self.assertEqual(res.shape, (5, 4, 3))
t = moveaxis(x, [0, 1, 2], [-1, -2, -3])
res = self.executor.execute_tensor(t, concat=True)[0]
self.assertEqual(res.shape, (5, 4, 3))
def testBroadcastToExecution(self):
raw = np.random.random((10, 5, 1))
arr = tensor(raw, chunks=2)
arr2 = broadcast_to(arr, (5, 10, 5, 6))
res = self.executor.execute_tensor(arr2, concat=True)
self.assertTrue(np.array_equal(res[0], np.broadcast_to(raw, (5, 10, 5, 6))))
def testBroadcastArraysExecutions(self):
x_data = [[1, 2, 3]]
x = tensor(x_data, chunks=1)
y_data = [[1], [2], [3]]
y = tensor(y_data, chunks=2)
a = broadcast_arrays(x, y)
res = [self.executor.execute_tensor(arr, concat=True)[0] for arr in a]
expected = np.broadcast_arrays(x_data, y_data)
for r, e in zip(res, expected):
| np.testing.assert_equal(r, e) | numpy.testing.assert_equal |
import argparse, cv2, imutils, numpy as np, os, time
from datetime import datetime, timedelta
from tabulate import tabulate
np.random.seed(42)
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--input", required=True)
ap.add_argument("-o", "--output", required=True)
args = vars(ap.parse_args())
LABELS = open("model/coco.names").read().strip().split("\n")
COLORS = np.random.randint(0, 255, size=(len(LABELS), 3), dtype="uint8")
net = cv2.dnn.readNetFromDarknet("model/yolov3.cfg", "model/yolov3.weights")
ln = net.getLayerNames()
ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
vs = cv2.VideoCapture(args["input"])
writer = None
(W, H) = (None, None)
prop = cv2.cv.CV_CAP_PROP_FRAME_COUNT if imutils.is_cv2() else cv2.CAP_PROP_FRAME_COUNT
total = int(vs.get(prop))
while True:
(grabbed, frame) = vs.read()
if not grabbed:
break
if W is None or H is None:
(H, W) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(frame, 1 / 255.0, (416, 416), swapRB=True, crop=False)
net.setInput(blob)
start = time.time()
layerOutputs = net.forward(ln)
end = time.time()
boxes, confidences, classIDs = [], [], []
for output in layerOutputs:
for detection in output:
scores = detection[5:]
confidence = scores[classID]
if np.argmax(scores) == 2 and confidence > 0.5:
box = detection[0:4] * | np.array([W, H, W, H]) | numpy.array |
"""
kkpy.io
========================
Functions to read and write files
.. currentmodule:: io
.. autosummary::
kkpy.io.get_fname
kkpy.io.read_aws
kkpy.io.read_2dvd_rho
kkpy.io.read_mxpol_rhi_with_hc
kkpy.io.read_dem
kkpy.io.read_wissdom
"""
import numpy as np
import pandas as pd
import datetime
import glob
import os
import sys
def read_aws(time, date_range=True, datadir='/disk/STORAGE/OBS/AWS/', stnid=None, dask=True):
"""
Read AWS_MIN files into dataframe.
Examples
---------
>>> import datetime
>>> df_aws = kkpy.io.read_aws(time=datetime.datetime(2018,2,28,6,0))
>>> df_aws = kkpy.io.read_aws(time=[datetime.datetime(2018,2,28,6,0),datetime.datetime(2018,3,1,12,0)], datadir='/path/to/aws/files/')
Parameters
----------
time : datetime or array_like of datetime
Datetime of the data you want to read.
If this is array of two elements, it will read all data within two datetimes by default.
If this is array of elements and keyword *date_range* is False, it will read the data of specific time of each element.
date_range : bool, optional
False if argument *time* contains element of specific time you want to read.
datadir : str, optional
Directory of data.
stnid : list, optional
List of station id you want to read. Read all site if None.
dask : boolean, optional
Return a dask dataframe if True, otherwise return a pandas dataframe.
Returns
---------
df_aws : dataframe
Return dataframe of aws data.
"""
import dask.dataframe as dd
if time is None:
sys.exit(f'{__name__}: Check time argument')
if len(time) == 1:
date_range = False
if date_range:
if len(time) != 2:
sys.exit(f'{__name__}: Check time and date_range arguments')
if time[0] >= time[1]:
sys.exit(f'{__name__}: time[1] must be greater than time[0]')
dt_start = datetime.datetime(time[0].year, time[0].month, time[0].day, time[0].hour, time[0].minute)
dt_finis = datetime.datetime(time[1].year, time[1].month, time[1].day, time[1].hour, time[1].minute)
# Get file list
filearr = np.array([])
_dt = dt_start
while _dt <= dt_finis:
_filearr = np.sort(glob.glob(f'{datadir}/{_dt:%Y%m}/{_dt:%d}/AWS_MIN_{_dt:%Y%m%d%H%M}'))
filearr = np.append(filearr, _filearr)
_dt = _dt + datetime.timedelta(minutes=1)
yyyy_filearr = [int(os.path.basename(x)[-12:-8]) for x in filearr]
mm_filearr = [int(os.path.basename(x)[-8:-6]) for x in filearr]
dd_filearr = [int(os.path.basename(x)[-6:-4]) for x in filearr]
hh_filearr = [int(os.path.basename(x)[-4:-2]) for x in filearr]
ii_filearr = [int(os.path.basename(x)[-2:]) for x in filearr]
dt_filearr = np.array([datetime.datetime(yyyy,mm,dd,hh,ii) for (yyyy,mm,dd,hh,ii) in zip(yyyy_filearr, mm_filearr, dd_filearr, hh_filearr, ii_filearr)])
filearr = filearr[(dt_filearr >= dt_start) & (dt_filearr <= dt_finis)]
dt_filearr = dt_filearr[(dt_filearr >= dt_start) & (dt_filearr <= dt_finis)]
else:
list_dt_yyyymmddhhii = np.unique(np.array([datetime.datetime(_time.year, _time.month, _time.day, _time.hour, _time.minute) for _time in time]))
filearr = np.array([f'{datadir}/{_dt:%Y%m}/{_dt:%d}/AWS_MIN_{_dt:%Y%m%d%H%M}' for _dt in list_dt_yyyymmddhhii])
dt_filearr = list_dt_yyyymmddhhii
if len(filearr) == 0:
sys.exit(f'{__name__}: No matched data for the given time period')
df_list = []
names = ['ID', 'YMDHI', 'LON', 'LAT', 'HGT',
'WD', 'WS', 'T', 'RH',
'PA', 'PS', 'RE',
'R60mAcc', 'R1d', 'R15m', 'R60m',
'WDS', 'WSS', 'dummy']
df_aws = dd.read_csv(filearr.tolist(), delimiter='#', names=names, header=None, na_values=[-999,-997])
df_aws = df_aws.drop('dummy', axis=1)
df_aws.WD = df_aws.WD/10.
df_aws.WS = df_aws.WS/10.
df_aws.T = df_aws['T']/10.
df_aws.RH = df_aws.RH/10.
df_aws.PA = df_aws.PA/10.
df_aws.PS = df_aws.PS/10.
df_aws.RE = df_aws.RE/10.
df_aws.R60mAcc = df_aws.R60mAcc/10.
df_aws.R1d = df_aws.R1d/10.
df_aws.R15m = df_aws.R15m/10.
df_aws.R60m = df_aws.R60m/10.
df_aws.WDS = df_aws.WDS/10.
df_aws.WSS = df_aws.WSS/10.
if stnid:
df_aws = df_aws[df_aws['ID'].isin(stnid)]
df_aws = df_aws.set_index(dd.to_datetime(df_aws['YMDHI'], format='%Y%m%d%H%M'))
df_aws = df_aws.drop('YMDHI', axis=1)
if dask:
return df_aws
else:
return df_aws.compute()
def read_2dvd_rho(time, date_range=True, datadir='/disk/common/kwonil_rainy/RHO_2DVD/', filename='2DVD_Dapp_v_rho_201*Deq.txt'):
"""
Read 2DVD density files into dataframe.
Examples
---------
>>> import datetime
>>> df_2dvd_drop = kkpy.io.read_2dvd_rho(time=datetime.datetime(2018,2,28)) # automatically date_range=False
>>> df_2dvd_drop = kkpy.io.read_2dvd_rho(time=[datetime.datetime(2018,2,28,6),datetime.datetime(2018,3,1,12)], datadir='/path/to/2dvd/files/')
>>> df_2dvd_drop = kkpy.io.read_2dvd_rho(time=list_of_many_datetimes, date_range=False)
>>> df_2dvd_drop = kkpy.io.read_2dvd_rho(time=datetime.datetime(2018,2,28), filename='2DVD_rho_test_*.txt')
Parameters
----------
time : datetime or array_like of datetime
Datetime of the data you want to read.
If this is array of two elements, it will read all data within two datetimes by default.
If this is array of elements and keyword *date_range* is False, it will read the data of specific time of each element.
date_range : bool, optional
False if argument *time* contains element of specific time you want to read.
datadir : str, optional
Directory of data.
filename : str, optional
File naming of data.
Returns
---------
df_2dvd_drop : dataframe
Return dataframe of 2dvd data.
"""
# Get file list
filearr = np.array(np.sort(glob.glob(f'{datadir}/**/{filename}', recursive=True)))
yyyy_filearr = [int(os.path.basename(x)[-27:-23]) for x in filearr]
mm_filearr = [int(os.path.basename(x)[-23:-21]) for x in filearr]
dd_filearr = [int(os.path.basename(x)[-21:-19]) for x in filearr]
dt_filearr = np.array([datetime.datetime(yyyy,mm,dd) for (yyyy, mm, dd) in zip(yyyy_filearr, mm_filearr, dd_filearr)])
if time is None:
sys.exit(f'{__name__}: Check time argument')
if len(time) == 1:
date_range = False
if date_range:
if len(time) != 2:
sys.exit(f'{__name__}: Check time and date_range arguments')
if time[0] >= time[1]:
sys.exit(f'{__name__}: time[1] must be greater than time[0]')
dt_start = datetime.datetime(time[0].year, time[0].month, time[0].day)
dt_finis = datetime.datetime(time[1].year, time[1].month, time[1].day)
filearr = filearr[(dt_filearr >= dt_start) & (dt_filearr <= dt_finis)]
dt_filearr = dt_filearr[(dt_filearr >= dt_start) & (dt_filearr <= dt_finis)]
else:
list_dt_yyyymmdd = np.unique(np.array([datetime.datetime(_time.year, _time.month, _time.day) for _time in time]))
filearr = filearr[np.isin(dt_filearr, list_dt_yyyymmdd)]
dt_filearr = dt_filearr[np.isin(dt_filearr, list_dt_yyyymmdd)]
if len(filearr) == 0:
sys.exit(f'{__name__}: No matched data for the given time period')
# # READ DATA
columns = ['hhmm', 'Dapp', 'VEL', 'RHO', 'AREA', 'WA', 'HA', 'WB', 'HB', 'Deq']
dflist = []
for i_file, (file, dt) in enumerate(zip(filearr, dt_filearr)):
_df = pd.read_csv(file, skiprows=1, names=columns, header=None, delim_whitespace=True)
_df['year'] = dt.year
_df['month'] = dt.month
_df['day'] = dt.day
_df['hour'] = np.int_(_df['hhmm'] / 100)
_df['minute'] = _df['hhmm'] % 100
_df['jultime'] = pd.to_datetime(_df[['year','month','day','hour','minute']])
_df = _df.drop(['hhmm','year','month','day','hour','minute'], axis=1)
dflist.append(_df)
print(i_file+1, filearr.size, file)
df_2dvd_drop = pd.concat(dflist, sort=False, ignore_index=True)
df_2dvd_drop.set_index('jultime', inplace=True)
if date_range:
if np.sum([np.sum([_time.hour, _time.minute, _time.second]) for _time in time]) != 0:
df_2dvd_drop = df_2dvd_drop.loc[time[0]:time[1]]
return df_2dvd_drop
def read_mxpol_rhi_with_hc(rhifile_nc, hcfile_mat):
"""
Read MXPOL RHI with hydrometeor classification into py-ART radar object.
Examples
---------
>>> rhifile = '/disk/WORKSPACE/kwonil/MXPOL/RAW/2018/02/28/MXPol-polar-20180228-065130-RHI-225_8.nc'
>>> hidfile = '/disk/WORKSPACE/kwonil/MXPOL/HID/2018/02/28/MXPol-polar-20180228-065130-RHI-225_8_zdrcorr_demix.mat'
>>> radar_mxp = kkpy.io.read_mxpol_rhi_with_hc(rhifile, hcfile)
Parameters
----------
rhifile_nc : str or array_like of str
Filepath of RHI data to read.
The number and the order of elements should match with `hcfile_mat`.
hcfile_mat : str or array_like of str
Filepath of hydrometeor classification file to read.
The number and the order of elements should match with `rhifile_nc`.
Returns
---------
radar : py-ART radar object
Return py-ART radar object.
"""
os.environ['PYART_QUIET'] = "True"
import pyart
import scipy.io
from netCDF4 import Dataset
# HC file
HC_proportion = scipy.io.loadmat(hcfile_mat)
# RHI file
mxpol = Dataset(rhifile_nc,'r')
El = mxpol.variables['Elevation'][:]
wh_hc = np.logical_and(El>5,El<175)
El = El[wh_hc]
R = mxpol.variables['Range'][:]
radar = pyart.testing.make_empty_rhi_radar(HC_proportion['AG'].shape[1], HC_proportion['AG'].shape[0], 1)
######## HIDs ########
# find most probable habit
for i, _HC in HC_proportion.items():
if '_' in i: continue
if i in 'AG':
HC3d_proportion = | np.array(HC_proportion[i]) | numpy.array |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ResNet Train/Eval module.
"""
import os
import six
import subprocess
import sys
import time
import cifar_input
import numpy as np
import resnet_model
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('dataset', 'cifar10', 'cifar10 or cifar100.')
tf.app.flags.DEFINE_string('mode', 'train', 'train or eval.')
tf.app.flags.DEFINE_string('model', '', 'model to train.')
tf.app.flags.DEFINE_string('data_format', 'NHWC',
"""Data layout to use: NHWC (TF native)
or NCHW (cuDNN native).""")
tf.app.flags.DEFINE_string('train_data_path', '',
'Filepattern for training data.')
tf.app.flags.DEFINE_string('eval_data_path', '',
'Filepattern for eval data')
tf.app.flags.DEFINE_integer('image_size', 32, 'Image side length.')
tf.app.flags.DEFINE_string('train_dir', '',
'Directory to keep training outputs.')
tf.app.flags.DEFINE_string('eval_dir', '',
'Directory to keep eval outputs.')
tf.app.flags.DEFINE_integer('eval_batch_count', 50,
'Number of batches to eval.')
tf.app.flags.DEFINE_bool('eval_once', False,
'Whether evaluate the model only once.')
tf.app.flags.DEFINE_string('log_root', '',
'Should be a parent directory of FLAGS.train_dir/eval_dir.')
tf.app.flags.DEFINE_string('checkpoint_dir', '',
'Directory to store the checkpoints')
tf.app.flags.DEFINE_integer('num_gpus', 0,
'Number of gpus used for training. (0 or 1)')
tf.app.flags.DEFINE_bool('use_bottleneck', False,
'Use bottleneck module or not.')
def train(hps):
"""Training loop."""
images, labels = cifar_input.build_input(
FLAGS.dataset, FLAGS.train_data_path, hps.batch_size, FLAGS.mode, hps.data_format)
model = resnet_model.ResNet(hps, images, labels, FLAGS.mode)
model.build_graph()
param_stats = tf.contrib.tfprof.model_analyzer.print_model_analysis(
tf.get_default_graph(),
tfprof_options=tf.contrib.tfprof.model_analyzer.
TRAINABLE_VARS_PARAMS_STAT_OPTIONS)
sys.stdout.write('total_params: %d\n' % param_stats.total_parameters)
tf.contrib.tfprof.model_analyzer.print_model_analysis(
tf.get_default_graph(),
tfprof_options=tf.contrib.tfprof.model_analyzer.FLOAT_OPS_OPTIONS)
truth = tf.argmax(model.labels, axis=1)
predictions = tf.argmax(model.predictions, axis=1)
precision = tf.reduce_mean(tf.to_float(tf.equal(predictions, truth)))
summary_hook = tf.train.SummarySaverHook(
save_steps=100,
output_dir=FLAGS.train_dir,
summary_op=tf.summary.merge([model.summaries,
tf.summary.scalar('Precision', precision)]))
num_steps_per_epoch = 391 # TODO: Don't hardcode this.
logging_hook = tf.train.LoggingTensorHook(
tensors={'step': model.global_step,
'loss': model.cost,
'precision': precision},
every_n_iter=100)
class _LearningRateSetterHook(tf.train.SessionRunHook):
"""Sets learning_rate based on global step."""
def begin(self):
self._lrn_rate = 0.01
def before_run(self, run_context):
return tf.train.SessionRunArgs(
model.global_step, # Asks for global step value.
feed_dict={model.lrn_rate: self._lrn_rate}) # Sets learning rate
def after_run(self, run_context, run_values):
train_step = run_values.results
if train_step < num_steps_per_epoch:
self._lrn_rate = 0.01
elif train_step < (91 * num_steps_per_epoch):
self._lrn_rate = 0.1
elif train_step < (136 * num_steps_per_epoch):
self._lrn_rate = 0.01
elif train_step < (181 * num_steps_per_epoch):
self._lrn_rate = 0.001
else:
self._lrn_rate = 0.0001
class _SaverHook(tf.train.SessionRunHook):
"""Sets learning_rate based on global step."""
def begin(self):
self.saver = tf.train.Saver(max_to_keep=10000)
subprocess.call("rm -rf %s; mkdir -p %s" % (FLAGS.checkpoint_dir,
FLAGS.checkpoint_dir), shell=True)
self.f = open(os.path.join(FLAGS.checkpoint_dir, "times.log"), 'w')
def after_create_session(self, sess, coord):
self.sess = sess
self.start_time = time.time()
def before_run(self, run_context):
return tf.train.SessionRunArgs(
model.global_step # Asks for global step value.
)
def after_run(self, run_context, run_values):
train_step = run_values.results
epoch = train_step / num_steps_per_epoch
if train_step % num_steps_per_epoch == 0:
end_time = time.time()
directory = os.path.join(FLAGS.checkpoint_dir, ("%5d" % epoch).replace(' ', '0'))
subprocess.call("mkdir -p %s" % directory, shell=True)
ckpt_name = 'model.ckpt'
self.saver.save(self.sess, os.path.join(directory, ckpt_name),
global_step=train_step)
self.f.write("Step: %d\tTime: %s\n" % (train_step, end_time - self.start_time))
print("Saved checkpoint after %d epoch(s) to %s..." % (epoch, directory))
sys.stdout.flush()
self.start_time = time.time()
def end(self, sess):
self.f.close()
with tf.train.MonitoredTrainingSession(
checkpoint_dir=FLAGS.log_root,
hooks=[logging_hook, _LearningRateSetterHook()],
chief_only_hooks=[summary_hook, _SaverHook()],
save_checkpoint_secs=None,
# Since we provide a SummarySaverHook, we need to disable default
# SummarySaverHook. To do that we set save_summaries_steps to 0.
save_summaries_steps=None,
save_summaries_secs=None,
config=tf.ConfigProto(allow_soft_placement=True)) as mon_sess:
for i in range(num_steps_per_epoch * 181):
mon_sess.run(model.train_op)
def evaluate(hps):
"""Eval loop."""
images, labels = cifar_input.build_input(
FLAGS.dataset, FLAGS.eval_data_path, hps.batch_size, FLAGS.mode, hps.data_format)
model = resnet_model.ResNet(hps, images, labels, FLAGS.mode)
model.build_graph()
saver = tf.train.Saver()
summary_writer = tf.summary.FileWriter(FLAGS.eval_dir)
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
tf.train.start_queue_runners(sess)
best_precision = 0.0
while True:
try:
ckpt_state = tf.train.get_checkpoint_state(FLAGS.log_root)
except tf.errors.OutOfRangeError as e:
tf.logging.error('Cannot restore checkpoint: %s', e)
continue
if not (ckpt_state and ckpt_state.model_checkpoint_path):
tf.logging.info('No model to eval yet at %s', FLAGS.log_root)
break
tf.logging.info('Loading checkpoint %s', ckpt_state.model_checkpoint_path)
saver.restore(sess, ckpt_state.model_checkpoint_path)
global_step = ckpt_state.model_checkpoint_path.split('/')[-1].split('-')[-1]
if not global_step.isdigit():
global_step = 0
else:
global_step = int(global_step)
total_prediction, correct_prediction, correct_prediction_top5 = 0, 0, 0
for _ in six.moves.range(FLAGS.eval_batch_count):
(summaries, loss, predictions, truth, train_step) = sess.run(
[model.summaries, model.cost, model.predictions,
model.labels, model.global_step])
for (indiv_truth, indiv_prediction) in zip(truth, predictions):
indiv_truth = np.argmax(indiv_truth)
top5_prediction = np.argsort(indiv_prediction)[-5:]
top1_prediction = | np.argsort(indiv_prediction) | numpy.argsort |
from flask import Flask,render_template,request,send_file,send_from_directory
import numpy as np
import pandas as pd
import sklearn.metrics as m
from keras.utils.np_utils import to_categorical
import os
import cv2
from sklearn.model_selection import train_test_split
from keras.models import Sequential
from keras.layers import Dense,Conv2D,Flatten,Activation,MaxPooling2D
from keras.preprocessing import image
from keras.models import load_model
import keras.backend.tensorflow_backend as tb
from skimage import transform
import argparse
from keras.applications.vgg16 import VGG16
from keras.models import Model
import tensorflow as tf
tb._SYMBOLIC_SCOPE.value = True
model = load_model('model-facemask.h5')
def processesing(arr):
for i in arr:
if(i[0]>i[1]):
return 0
else:
return 1
def images(img):
image_read=[]
image1=image.load_img(img)
image2=image.img_to_array(image1)
image3=cv2.resize(image2,(224,224))
image_read.append(image3)
img_array=np.asarray(image_read)
return img_array
app = Flask(__name__,static_folder='static',template_folder='templates')
@app.route('/')
def home():
return render_template("index.html")
def percentage(u,pre):
sum=u[0][0]+u[0][1]
return 100*u[0][pre]/sum
@app.route('/predict',methods=['POST','GET'])
def predict():
if request.method=='POST':
img=request.files['ima'].read()
print(img)
npimg = np.fromstring(img, np.uint8)
# convert numpy array to image
img = cv2.imdecode(npimg,cv2.IMREAD_COLOR)
cv2.imwrite("images/output.png",img)
image3=cv2.resize(img,(224,224))
image = | np.expand_dims(image3, axis=0) | numpy.expand_dims |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.