prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
from __future__ import division
import argparse
import multiprocessing
import numpy as np
import chainer
from chainer import iterators
import chainermn
from chainercv.evaluations import calc_semantic_segmentation_confusion
from chainercv.evaluations import calc_semantic_segmentation_iou
from chainercv.utils import apply_to_iterator
from chainercv.utils import ProgressHook
from eval_semantic_segmentation import get_dataset_and_model
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--dataset', choices=('cityscapes', 'ade20k', 'camvid'))
parser.add_argument(
'--model', choices=(
'pspnet_resnet101', 'segnet'))
parser.add_argument('--pretrained-model')
parser.add_argument('--input-size', type=int, default=None)
args = parser.parse_args()
# https://docs.chainer.org/en/stable/chainermn/tutorial/tips_faqs.html#using-multiprocessiterator
if hasattr(multiprocessing, 'set_start_method'):
multiprocessing.set_start_method('forkserver')
p = multiprocessing.Process()
p.start()
p.join()
comm = chainermn.create_communicator()
device = comm.intra_rank
if args.input_size is None:
input_size = None
else:
input_size = (args.input_size, args.input_size)
dataset, label_names, model = get_dataset_and_model(
args.dataset, args.model, args.pretrained_model,
input_size)
assert len(dataset) % comm.size == 0, \
"The size of the dataset should be a multiple "\
"of the number of GPUs"
chainer.cuda.get_device_from_id(device).use()
model.to_gpu()
if comm.rank == 0:
indices = np.arange(len(dataset))
else:
indices = None
indices = chainermn.scatter_dataset(indices, comm)
dataset = dataset.slice[indices]
it = iterators.SerialIterator(
dataset, 1, repeat=False, shuffle=False)
in_values, out_values, rest_values = apply_to_iterator(
model.predict, it, hook=ProgressHook(len(dataset)))
# Delete an iterator of images to save memory usage.
del in_values
pred_labels, = out_values
gt_labels, = rest_values
confusion = calc_semantic_segmentation_confusion(pred_labels, gt_labels)
confusion = comm.allreduce(confusion)
if comm.rank == 0:
iou = calc_semantic_segmentation_iou(confusion)
pixel_accuracy = np.diag(confusion).sum() / confusion.sum()
class_accuracy = np.diag(confusion) / | np.sum(confusion, axis=1) | numpy.sum |
import os
import pickle
import librosa
import warnings
import numpy as np
import pandas as pd
warnings.filterwarnings('ignore')
from scipy.stats import skew, kurtosis
from pychorus import find_and_output_chorus
from flask import Flask, request, json, render_template
# Create flask app
app = Flask(__name__)
# Load pkl model
model = pickle.load(open('Your model name here', 'rb'))
@app.route('/')
def home():
return render_template('index.html')
@app.route('/predict', methods = ['POST'])
def predict():
song_link = list(request.form.values())[0]
# get features from songs
data = []
d, cols = extract_features(song_link)
data.append(d)
dataset = pd.DataFrame(data, columns=cols)
# select features which we used in ml model
df = pd.read_csv('Data/bestfeatures.csv')
columns = list(df['feature'][df['type']=='best'])
X = dataset[columns]
# making prediction
prediction = model.predict(X)
output = 'Unpopular' if prediction[0] == 0 else 'Popular'
return render_template('index.html', prediction_text = f'The song is {output}')
def statistics(list, feature, columns_name, data):
i = 0
for ele in list:
_skew = skew(ele)
columns_name.append(f'{feature}_kew_{i}')
min = np.min(ele)
columns_name.append(f'{feature}_min_{i}')
max = np.max(ele)
columns_name.append(f'{feature}_max_{i}')
std = np.std(ele)
columns_name.append(f'{feature}_std_{i}')
mean = np.mean(ele)
columns_name.append(f'{feature}_mean_{i}')
median = | np.median(ele) | numpy.median |
# OpenSeesPy visualization module
# Author: <NAME>
# Faculty of Civil Engineering and Architecture
# Opole University of Technology, Poland
# ver. 0.94, 2020 August
# License: MIT
# Notes:
# 1. matplotlib's plt.axis('equal') does not work for 3d plots
# therefore right angles are not guaranteed to be 90 degrees on the
# plots
import openseespy.opensees as ops # installed from pip
# import opensees as ops # local compilation
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
from matplotlib.patches import Circle, Polygon
from matplotlib.animation import FuncAnimation
import matplotlib.tri as tri
# default settings
# fmt: format string setting color, marker and linestyle
# check documentation on matplotlib's plot
# continuous interpolated shape line
fmt_interp = 'b-' # blue solid line, no markers
# element end nodes
fmt_nodes = 'rs' # red square markers, no line
# undeformed model
fmt_undefo = 'g--' # green dashed line, no markers
# section forces
fmt_secforce = 'b-' # blue solid line
# figure left right bottom top offsets
fig_lbrt = (.04, .04, .96, .96)
# azimuth and elevation in degrees
az_el = (-60., 30.)
# figure width and height in centimeters
fig_wi_he = (16., 10.)
def _plot_model_2d(node_labels, element_labels, offset_nd_label, axis_off):
max_x_crd, max_y_crd, max_crd = -np.inf, -np.inf, -np.inf
node_tags = ops.getNodeTags()
ele_tags = ops.getEleTags()
nen = np.shape(ops.eleNodes(ele_tags[0]))[0]
# truss and beam/frame elements
if nen == 2:
for node_tag in node_tags:
x_crd = ops.nodeCoord(node_tag)[0]
y_crd = ops.nodeCoord(node_tag)[1]
if x_crd > max_x_crd:
max_x_crd = x_crd
if y_crd > max_y_crd:
max_y_crd = y_crd
max_crd = np.amax([max_x_crd, max_y_crd])
_offset = 0.005 * max_crd
for i, ele_tag in enumerate(ele_tags):
nd1, nd2 = ops.eleNodes(ele_tag)
# element node1-node2, x, y coordinates
ex = np.array([ops.nodeCoord(nd1)[0], ops.nodeCoord(nd2)[0]])
ey = np.array([ops.nodeCoord(nd1)[1], ops.nodeCoord(nd2)[1]])
# location of label
xt = sum(ex)/nen
yt = sum(ey)/nen
plt.plot(ex, ey, 'bo-')
if element_labels:
if ex[1]-ex[0] == 0:
va = 'center'
ha = 'left'
offset_x, offset_y = _offset, 0.0
elif ey[1]-ey[0] == 0:
va = 'bottom'
ha = 'center'
offset_x, offset_y = 0.0, _offset
else:
va = 'bottom'
ha = 'left'
offset_x, offset_y = 0.03, 0.03
plt.text(xt+offset_x, yt+offset_y, f'{ele_tag}', va=va, ha=ha,
color='red')
if node_labels:
for node_tag in node_tags:
if not offset_nd_label == 'above':
offset_nd_label_x, offset_nd_label_y = _offset, _offset
va = 'bottom'
ha = 'left'
else:
offset_nd_label_x, offset_nd_label_y = 0.0, _offset
va = 'bottom'
ha = 'center'
plt.text(ops.nodeCoord(node_tag)[0]+offset_nd_label_x,
ops.nodeCoord(node_tag)[1]+offset_nd_label_y,
f'{node_tag}', va=va, ha=ha, color='blue')
# plt.axis('equal')
# 2d triangular (tri31) elements
elif nen == 3:
for node_tag in node_tags:
x_crd = ops.nodeCoord(node_tag)[0]
y_crd = ops.nodeCoord(node_tag)[1]
if x_crd > max_x_crd:
max_x_crd = x_crd
if y_crd > max_y_crd:
max_y_crd = y_crd
max_crd = np.amax([max_x_crd, max_y_crd])
_offset = 0.005 * max_crd
_offnl = 0.003 * max_crd
for i, ele_tag in enumerate(ele_tags):
nd1, nd2, nd3 = ops.eleNodes(ele_tag)
# element x, y coordinates
ex = np.array([ops.nodeCoord(nd1)[0],
ops.nodeCoord(nd2)[0],
ops.nodeCoord(nd3)[0]])
ey = np.array([ops.nodeCoord(nd1)[1],
ops.nodeCoord(nd2)[1],
ops.nodeCoord(nd3)[1]])
# location of label
xt = sum(ex)/nen
yt = sum(ey)/nen
plt.plot(np.append(ex, ex[0]), np.append(ey, ey[0]), 'bo-')
if element_labels:
va = 'center'
ha = 'center'
plt.text(xt, yt, f'{ele_tag}', va=va, ha=ha, color='red')
if node_labels:
for node_tag in node_tags:
if not offset_nd_label == 'above':
offset_nd_label_x, offset_nd_label_y = _offnl, _offnl
va = 'bottom'
# va = 'center'
ha = 'left'
else:
offset_nd_label_x, offset_nd_label_y = 0.0, _offnl
va = 'bottom'
ha = 'center'
plt.text(ops.nodeCoord(node_tag)[0]+offset_nd_label_x,
ops.nodeCoord(node_tag)[1]+offset_nd_label_y,
f'{node_tag}', va=va, ha=ha, color='blue')
# 2d quadrilateral (quad) elements
elif nen == 4:
for node_tag in node_tags:
x_crd = ops.nodeCoord(node_tag)[0]
y_crd = ops.nodeCoord(node_tag)[1]
if x_crd > max_x_crd:
max_x_crd = x_crd
if y_crd > max_y_crd:
max_y_crd = y_crd
max_crd = np.amax([max_x_crd, max_y_crd])
_offset = 0.005 * max_crd
_offnl = 0.003 * max_crd
for i, ele_tag in enumerate(ele_tags):
nd1, nd2, nd3, nd4 = ops.eleNodes(ele_tag)
# element x, y coordinates
ex = np.array([ops.nodeCoord(nd1)[0],
ops.nodeCoord(nd2)[0],
ops.nodeCoord(nd3)[0],
ops.nodeCoord(nd4)[0]])
ey = np.array([ops.nodeCoord(nd1)[1],
ops.nodeCoord(nd2)[1],
ops.nodeCoord(nd3)[1],
ops.nodeCoord(nd4)[1]])
# location of label
xt = sum(ex)/nen
yt = sum(ey)/nen
# plt.plot(np.append(ex, ex[0]), np.append(ey, ey[0]), 'bo-')
plt.plot(np.append(ex, ex[0]), np.append(ey, ey[0]), 'b-', lw=0.4)
if element_labels:
va = 'center'
ha = 'center'
plt.text(xt, yt, f'{ele_tag}', va=va, ha=ha, color='red')
if node_labels:
for node_tag in node_tags:
if not offset_nd_label == 'above':
offset_nd_label_x, offset_nd_label_y = _offnl, _offnl
va = 'bottom'
# va = 'center'
ha = 'left'
else:
offset_nd_label_x, offset_nd_label_y = 0.0, _offnl
va = 'bottom'
ha = 'center'
plt.text(ops.nodeCoord(node_tag)[0]+offset_nd_label_x,
ops.nodeCoord(node_tag)[1]+offset_nd_label_y,
f'{node_tag}', va=va, ha=ha, color='blue')
plt.axis('equal')
def _plot_model_3d(node_labels, element_labels, offset_nd_label, axis_off,
az_el, fig_wi_he, fig_lbrt):
node_tags = ops.getNodeTags()
ele_tags = ops.getEleTags()
azim, elev = az_el
fig_wi, fig_he = fig_wi_he
fleft, fbottom, fright, ftop = fig_lbrt
fig = plt.figure(figsize=(fig_wi/2.54, fig_he/2.54))
fig.subplots_adjust(left=.08, bottom=.08, right=.985, top=.94)
ax = fig.add_subplot(111, projection=Axes3D.name)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.view_init(azim=azim, elev=elev)
max_x_crd, max_y_crd, max_z_crd, max_crd = -np.inf, -np.inf, \
-np.inf, -np.inf
nen = np.shape(ops.eleNodes(ele_tags[0]))[0]
# truss and beam/frame elements
if nen == 2:
for node_tag in node_tags:
x_crd = ops.nodeCoord(node_tag)[0]
y_crd = ops.nodeCoord(node_tag)[1]
z_crd = ops.nodeCoord(node_tag)[2]
if x_crd > max_x_crd:
max_x_crd = x_crd
if y_crd > max_y_crd:
max_y_crd = y_crd
if z_crd > max_z_crd:
max_z_crd = z_crd
if offset_nd_label == 0 or offset_nd_label == 0.:
_offset = 0.
else:
max_crd = np.amax([max_x_crd, max_y_crd, max_z_crd])
_offset = 0.005 * max_crd
# # work-around fix because of aspect equal bug
# _max_overall = 1.1*max_crd
# _min_overall = -0.1*max_crd
# ax.set_xlim(_min_overall, _max_overall)
# ax.set_ylim(_min_overall, _max_overall)
# ax.set_zlim(_min_overall, _max_overall)
for i, ele_tag in enumerate(ele_tags):
nd1, nd2 = ops.eleNodes(ele_tag)
# element node1-node2, x, y coordinates
ex = np.array([ops.nodeCoord(nd1)[0], ops.nodeCoord(nd2)[0]])
ey = np.array([ops.nodeCoord(nd1)[1], ops.nodeCoord(nd2)[1]])
ez = np.array([ops.nodeCoord(nd1)[2], ops.nodeCoord(nd2)[2]])
# location of label
xt = sum(ex)/nen
yt = sum(ey)/nen
zt = sum(ez)/nen
ax.plot(ex, ey, ez, 'bo-')
# fixme: placement of node_tag labels
if element_labels:
if ex[1]-ex[0] == 0:
va = 'center'
ha = 'left'
offset_x, offset_y, offset_z = _offset, 0.0, 0.0
elif ey[1]-ey[0] == 0:
va = 'bottom'
ha = 'center'
offset_x, offset_y, offset_z = 0.0, _offset, 0.0
elif ez[1]-ez[0] == 0:
va = 'bottom'
ha = 'center'
offset_x, offset_y, offset_z = 0.0, 0.0, _offset
else:
va = 'bottom'
ha = 'left'
offset_x, offset_y, offset_z = 0.03, 0.03, 0.03
ax.text(xt+offset_x, yt+offset_y, zt+offset_z, f'{ele_tag}',
va=va, ha=ha, color='red')
if node_labels:
for node_tag in node_tags:
ax.text(ops.nodeCoord(node_tag)[0]+_offset,
ops.nodeCoord(node_tag)[1]+_offset,
ops.nodeCoord(node_tag)[2]+_offset,
f'{node_tag}', va='bottom', ha='left', color='blue')
# quad in 3d
elif nen == 4:
for node_tag in node_tags:
x_crd = ops.nodeCoord(node_tag)[0]
y_crd = ops.nodeCoord(node_tag)[1]
z_crd = ops.nodeCoord(node_tag)[2]
if x_crd > max_x_crd:
max_x_crd = x_crd
if y_crd > max_y_crd:
max_y_crd = y_crd
if z_crd > max_z_crd:
max_z_crd = z_crd
# ax.plot(np.array([x_crd]),
# np.array([y_crd]),
# np.array([z_crd]), 'ro')
max_crd = np.amax([max_x_crd, max_y_crd, max_z_crd])
_offset = 0.002 * max_crd
for i, ele_tag in enumerate(ele_tags):
nd1, nd2, nd3, nd4 = ops.eleNodes(ele_tag)
# element node1-node2, x, y coordinates
ex = np.array([ops.nodeCoord(nd1)[0],
ops.nodeCoord(nd2)[0],
ops.nodeCoord(nd3)[0],
ops.nodeCoord(nd4)[0]])
ey = np.array([ops.nodeCoord(nd1)[1],
ops.nodeCoord(nd2)[1],
ops.nodeCoord(nd3)[1],
ops.nodeCoord(nd4)[1]])
ez = np.array([ops.nodeCoord(nd1)[2],
ops.nodeCoord(nd2)[2],
ops.nodeCoord(nd3)[2],
ops.nodeCoord(nd4)[2]])
# location of label
xt = sum(ex)/nen
yt = sum(ey)/nen
zt = sum(ez)/nen
ax.plot(np.append(ex, ex[0]),
np.append(ey, ey[0]),
np.append(ez, ez[0]), 'bo-')
# fixme: placement of node_tag labels
if element_labels:
if ex[1]-ex[0] == 0:
va = 'center'
ha = 'left'
offset_x, offset_y, offset_z = _offset, 0.0, 0.0
elif ey[1]-ey[0] == 0:
va = 'bottom'
ha = 'center'
offset_x, offset_y, offset_z = 0.0, _offset, 0.0
elif ez[1]-ez[0] == 0:
va = 'bottom'
ha = 'center'
offset_x, offset_y, offset_z = 0.0, 0.0, _offset
else:
va = 'bottom'
ha = 'left'
offset_x, offset_y, offset_z = 0.03, 0.03, 0.03
ax.text(xt+offset_x, yt+offset_y, zt+offset_z, f'{ele_tag}',
va=va, ha=ha, color='red')
if node_labels:
for node_tag in node_tags:
ax.text(ops.nodeCoord(node_tag)[0]+_offset,
ops.nodeCoord(node_tag)[1]+_offset,
ops.nodeCoord(node_tag)[2]+_offset,
f'{node_tag}', va='bottom', ha='left', color='blue')
# 8-node brick, 3d model
elif nen == 8:
for node_tag in node_tags:
x_crd = ops.nodeCoord(node_tag)[0]
y_crd = ops.nodeCoord(node_tag)[1]
z_crd = ops.nodeCoord(node_tag)[2]
if x_crd > max_x_crd:
max_x_crd = x_crd
if y_crd > max_y_crd:
max_y_crd = y_crd
if z_crd > max_z_crd:
max_z_crd = z_crd
# ax.plot(np.array([x_crd]),
# np.array([y_crd]),
# np.array([z_crd]), 'ro')
max_crd = np.amax([max_x_crd, max_y_crd, max_z_crd])
_offset = 0.005 * max_crd
# work-around fix because of aspect equal bug
_max_overall = 1.1*max_crd
_min_overall = -0.1*max_crd
ax.set_xlim(_min_overall, _max_overall)
ax.set_ylim(_min_overall, _max_overall)
ax.set_zlim(_min_overall, _max_overall)
for i, ele_tag in enumerate(ele_tags):
nd1, nd2, nd3, nd4, nd5, nd6, nd7, nd8 = ops.eleNodes(ele_tag)
# element node1-node2, x, y coordinates
ex = np.array([ops.nodeCoord(nd1)[0],
ops.nodeCoord(nd2)[0],
ops.nodeCoord(nd3)[0],
ops.nodeCoord(nd4)[0],
ops.nodeCoord(nd5)[0],
ops.nodeCoord(nd6)[0],
ops.nodeCoord(nd7)[0],
ops.nodeCoord(nd8)[0]])
ey = np.array([ops.nodeCoord(nd1)[1],
ops.nodeCoord(nd2)[1],
ops.nodeCoord(nd3)[1],
ops.nodeCoord(nd4)[1],
ops.nodeCoord(nd5)[1],
ops.nodeCoord(nd6)[1],
ops.nodeCoord(nd7)[1],
ops.nodeCoord(nd8)[1]])
ez = np.array([ops.nodeCoord(nd1)[2],
ops.nodeCoord(nd2)[2],
ops.nodeCoord(nd3)[2],
ops.nodeCoord(nd4)[2],
ops.nodeCoord(nd5)[2],
ops.nodeCoord(nd6)[2],
ops.nodeCoord(nd7)[2],
ops.nodeCoord(nd8)[2]])
# location of label
xt = sum(ex)/nen
yt = sum(ey)/nen
zt = sum(ez)/nen
ax.plot(np.append(ex[0:4], ex[0]),
np.append(ey[0:4], ey[0]),
np.append(ez[0:4], ez[0]), 'bo-')
ax.plot(np.append(ex[4:8], ex[4]),
np.append(ey[4:8], ey[4]),
np.append(ez[4:8], ez[4]), 'bo-')
ax.plot(np.array([ex[0], ex[4]]),
np.array([ey[0], ey[4]]),
np.array([ez[0], ez[4]]), 'bo-')
ax.plot(np.array([ex[1], ex[5]]),
np.array([ey[1], ey[5]]),
np.array([ez[1], ez[5]]), 'bo-')
ax.plot(np.array([ex[2], ex[6]]),
np.array([ey[2], ey[6]]),
np.array([ez[2], ez[6]]), 'bo-')
ax.plot(np.array([ex[3], ex[7]]),
np.array([ey[3], ey[7]]),
np.array([ez[3], ez[7]]), 'bo-')
# fixme: placement of node_tag labels
if element_labels:
if ex[1]-ex[0] == 0:
va = 'center'
ha = 'left'
offset_x, offset_y, offset_z = _offset, 0.0, 0.0
elif ey[1]-ey[0] == 0:
va = 'bottom'
ha = 'center'
offset_x, offset_y, offset_z = 0.0, _offset, 0.0
elif ez[1]-ez[0] == 0:
va = 'bottom'
ha = 'center'
offset_x, offset_y, offset_z = 0.0, 0.0, _offset
else:
va = 'bottom'
ha = 'left'
offset_x, offset_y, offset_z = 0.03, 0.03, 0.03
ax.text(xt+offset_x, yt+offset_y, zt+offset_z, f'{ele_tag}',
va=va, ha=ha, color='red')
if node_labels:
for node_tag in node_tags:
ax.text(ops.nodeCoord(node_tag)[0]+_offset,
ops.nodeCoord(node_tag)[1]+_offset,
ops.nodeCoord(node_tag)[2]+_offset,
f'{node_tag}', va='bottom', ha='left', color='blue')
def plot_model(node_labels=1, element_labels=1, offset_nd_label=False,
axis_off=0, az_el=az_el, fig_wi_he=fig_wi_he,
fig_lbrt=fig_lbrt):
"""Plot defined model of the structure.
Args:
node_labels (int): 1 - plot node labels, 0 - do not plot them;
(default: 1)
element_labels (int): 1 - plot element labels, 0 - do not plot
them; (default: 1)
offset_nd_label (bool): False - do not offset node labels from the
actual node location. This option can enhance visibility.
axis_off (int): 0 - turn off axes, 1 - display axes; (default: 0)
az_el (tuple): contains azimuth and elevation for 3d plots
fig_wi_he (tuple): contains width and height of the figure
fig_lbrt (tuple): a tuple contating left, bottom, right and top offsets
Usage:
``plot_()`` - plot deformed shape with default parameters and
automatically calcutated scale factor.
``plot_defo(interpFlag=0)`` - plot displaced nodes without shape
function interpolation
``plot_defo(sfac=1.5)`` - plot with specified scale factor
``plot_defo(unDefoFlag=0, endDispFlag=0)`` - plot without showing
undeformed (original) mesh and without showing markers at the
element ends.
"""
# az_el - azimut, elevation used for 3d plots only
node_tags = ops.getNodeTags()
ndim = np.shape(ops.nodeCoord(node_tags[0]))[0]
if ndim == 2:
_plot_model_2d(node_labels, element_labels, offset_nd_label, axis_off)
if axis_off:
plt.axis('off')
elif ndim == 3:
_plot_model_3d(node_labels, element_labels, offset_nd_label, axis_off,
az_el, fig_wi_he, fig_lbrt)
if axis_off:
plt.axis('off')
else:
print(f'\nWarning! ndim: {ndim} not supported yet.')
# plt.show() # call this from main py file for more control
def _plot_defo_mode_2d(modeNo, sfac, nep, unDefoFlag, fmt_undefo, interpFlag,
endDispFlag, fmt_interp, fmt_nodes):
ele_tags = ops.getEleTags()
nen = np.shape(ops.eleNodes(ele_tags[0]))[0]
# truss and beam/frame elements
if nen == 2:
ndf = np.shape(ops.nodeDOFs(ops.eleNodes(ele_tags[0])[0]))[0]
# truss element
if ndf == 2:
for ele_tag in ele_tags:
nd1, nd2 = ops.eleNodes(ele_tag)
# element x, y coordinates
ex = np.array([ops.nodeCoord(nd1)[0],
ops.nodeCoord(nd2)[0]])
ey = np.array([ops.nodeCoord(nd1)[1],
ops.nodeCoord(nd2)[1]])
if modeNo:
eux = np.array([ops.nodeEigenvector(nd1, modeNo)[0],
ops.nodeEigenvector(nd2, modeNo)[0]])
euy = np.array([ops.nodeEigenvector(nd1, modeNo)[1],
ops.nodeEigenvector(nd2, modeNo)[1]])
else:
eux = np.array([ops.nodeDisp(nd1)[0],
ops.nodeDisp(nd2)[0]])
euy = np.array([ops.nodeDisp(nd1)[1],
ops.nodeDisp(nd2)[1]])
# displaced element coordinates (scaled by sfac factor)
edx = np.array([ex[0] + sfac*eux[0], ex[1] + sfac*eux[1]])
edy = np.array([ey[0] + sfac*euy[0], ey[1] + sfac*euy[1]])
if unDefoFlag:
plt.plot(ex, ey, fmt_undefo)
plt.plot(edx, edy, fmt_interp)
# beam/frame element
elif ndf == 3:
for ele_tag in ele_tags:
nd1, nd2 = ops.eleNodes(ele_tag)
# element x, y coordinates
ex = np.array([ops.nodeCoord(nd1)[0],
ops.nodeCoord(nd2)[0]])
ey = np.array([ops.nodeCoord(nd1)[1],
ops.nodeCoord(nd2)[1]])
if modeNo:
ed = np.array([ops.nodeEigenvector(nd1, modeNo)[0],
ops.nodeEigenvector(nd1, modeNo)[1],
ops.nodeEigenvector(nd1, modeNo)[2],
ops.nodeEigenvector(nd2, modeNo)[0],
ops.nodeEigenvector(nd2, modeNo)[1],
ops.nodeEigenvector(nd2, modeNo)[2]])
else:
ed = np.array([ops.nodeDisp(nd1)[0],
ops.nodeDisp(nd1)[1],
ops.nodeDisp(nd1)[2],
ops.nodeDisp(nd2)[0],
ops.nodeDisp(nd2)[1],
ops.nodeDisp(nd2)[2]])
if unDefoFlag:
plt.plot(ex, ey, fmt_undefo)
# interpolated displacement field
if interpFlag:
xcdi, ycdi = beam_defo_interp_2d(ex, ey, ed, sfac, nep)
plt.plot(xcdi, ycdi, fmt_interp)
# translations of ends
if endDispFlag:
xdi, ydi = beam_disp_ends(ex, ey, ed, sfac)
plt.plot(xdi, ydi, fmt_nodes)
plt.axis('equal')
# plt.show() # call this from main py file for more control
# 2d triangular (tri31) elements
elif nen == 3:
for ele_tag in ele_tags:
nd1, nd2, nd3 = ops.eleNodes(ele_tag)
# element x, y coordinates
ex = np.array([ops.nodeCoord(nd1)[0],
ops.nodeCoord(nd2)[0],
ops.nodeCoord(nd3)[0]])
ey = np.array([ops.nodeCoord(nd1)[1],
ops.nodeCoord(nd2)[1],
ops.nodeCoord(nd3)[1]])
if modeNo:
ed = np.array([ops.nodeEigenvector(nd1, modeNo)[0],
ops.nodeEigenvector(nd1, modeNo)[1],
ops.nodeEigenvector(nd2, modeNo)[0],
ops.nodeEigenvector(nd2, modeNo)[1],
ops.nodeEigenvector(nd3, modeNo)[0],
ops.nodeEigenvector(nd3, modeNo)[1]])
else:
ed = np.array([ops.nodeDisp(nd1)[0],
ops.nodeDisp(nd1)[1],
ops.nodeDisp(nd2)[0],
ops.nodeDisp(nd2)[1],
ops.nodeDisp(nd3)[0],
ops.nodeDisp(nd3)[1]])
if unDefoFlag:
plt.plot(np.append(ex, ex[0]), np.append(ey, ey[0]),
fmt_undefo)
# xcdi, ycdi = beam_defo_interp_2d(ex, ey, ed, sfac, nep)
# xdi, ydi = beam_disp_ends(ex, ey, ed, sfac)
# # interpolated displacement field
# plt.plot(xcdi, ycdi, 'b.-')
# # translations of ends only
# plt.plot(xdi, ydi, 'ro')
# xc = [x, x[0, :]]
# yc = [x, x[0, :]]
# test it with one element
x = ex+sfac*ed[[0, 2, 4]]
y = ey+sfac*ed[[1, 3, 5]]
# x = ex+sfac*ed[[0, 2, 4, 6]]
# y = ey+sfac*ed[[1, 3, 5, 7]]
plt.plot(np.append(x, x[0]), np.append(y, y[0]), 'b.-')
plt.axis('equal')
# 2d quadrilateral (quad) elements
elif nen == 4:
for ele_tag in ele_tags:
nd1, nd2, nd3, nd4 = ops.eleNodes(ele_tag)
# element x, y coordinates
ex = np.array([ops.nodeCoord(nd1)[0],
ops.nodeCoord(nd2)[0],
ops.nodeCoord(nd3)[0],
ops.nodeCoord(nd4)[0]])
ey = np.array([ops.nodeCoord(nd1)[1],
ops.nodeCoord(nd2)[1],
ops.nodeCoord(nd3)[1],
ops.nodeCoord(nd4)[1]])
if modeNo:
ed = np.array([ops.nodeEigenvector(nd1, modeNo)[0],
ops.nodeEigenvector(nd1, modeNo)[1],
ops.nodeEigenvector(nd2, modeNo)[0],
ops.nodeEigenvector(nd2, modeNo)[1],
ops.nodeEigenvector(nd3, modeNo)[0],
ops.nodeEigenvector(nd3, modeNo)[1],
ops.nodeEigenvector(nd4, modeNo)[0],
ops.nodeEigenvector(nd4, modeNo)[1]])
else:
ed = np.array([ops.nodeDisp(nd1)[0],
ops.nodeDisp(nd1)[1],
ops.nodeDisp(nd2)[0],
ops.nodeDisp(nd2)[1],
ops.nodeDisp(nd3)[0],
ops.nodeDisp(nd3)[1],
ops.nodeDisp(nd4)[0],
ops.nodeDisp(nd4)[1]])
if unDefoFlag:
plt.plot(np.append(ex, ex[0]), np.append(ey, ey[0]),
fmt_undefo)
# xcdi, ycdi = beam_defo_interp_2d(ex, ey, ed, sfac, nep)
# xdi, ydi = beam_disp_ends(ex, ey, ed, sfac)
# # interpolated displacement field
# plt.plot(xcdi, ycdi, 'b.-')
# # translations of ends only
# plt.plot(xdi, ydi, 'ro')
# test it with one element
x = ex+sfac*ed[[0, 2, 4, 6]]
y = ey+sfac*ed[[1, 3, 5, 7]]
plt.plot(np.append(x, x[0]), np.append(y, y[0]), 'b.-')
plt.axis('equal')
# 2d 8-node quadratic elements
# elif nen == 8:
# x = ex+sfac*ed[:, [0, 2, 4, 6, 8, 10, 12, 14]]
# y = ex+sfac*ed[:, [1, 3, 5, 7, 9, 11, 13, 15]]
# t = -1
# n = 0
# for s in range(-1, 1.4, 0.4):
# n += 1
# ...
else:
print(f'\nWarning! Elements not supported yet. nen: {nen}; must be: 2, 3, 4, 8.') # noqa: E501
def _plot_defo_mode_3d(modeNo, sfac, nep, unDefoFlag, fmt_undefo, interpFlag,
endDispFlag, fmt_interp, fmt_nodes, az_el, fig_wi_he,
fig_lbrt):
ele_tags = ops.getEleTags()
azim, elev = az_el
fig_wi, fig_he = fig_wi_he
fleft, fbottom, fright, ftop = fig_lbrt
fig = plt.figure(figsize=(fig_wi/2.54, fig_he/2.54))
fig.subplots_adjust(left=.08, bottom=.08, right=.985, top=.94)
ax = fig.add_subplot(111, projection=Axes3D.name)
# ax.axis('equal')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.view_init(azim=azim, elev=elev)
nen = np.shape(ops.eleNodes(ele_tags[0]))[0]
# plot: truss and beam/frame elements in 3d
if nen == 2:
ndf = np.shape(ops.nodeDOFs(ops.eleNodes(ele_tags[0])[0]))[0]
# plot: beam/frame element in 3d
if ndf == 6:
for i, ele_tag in enumerate(ele_tags):
nd1, nd2 = ops.eleNodes(ele_tag)
# element x, y coordinates
ex = np.array([ops.nodeCoord(nd1)[0],
ops.nodeCoord(nd2)[0]])
ey = np.array([ops.nodeCoord(nd1)[1],
ops.nodeCoord(nd2)[1]])
ez = np.array([ops.nodeCoord(nd1)[2],
ops.nodeCoord(nd2)[2]])
if modeNo:
ed = np.array([ops.nodeEigenvector(nd1, modeNo)[0],
ops.nodeEigenvector(nd1, modeNo)[1],
ops.nodeEigenvector(nd1, modeNo)[2],
ops.nodeEigenvector(nd1, modeNo)[3],
ops.nodeEigenvector(nd1, modeNo)[4],
ops.nodeEigenvector(nd1, modeNo)[5],
ops.nodeEigenvector(nd2, modeNo)[0],
ops.nodeEigenvector(nd2, modeNo)[1],
ops.nodeEigenvector(nd2, modeNo)[2],
ops.nodeEigenvector(nd2, modeNo)[3],
ops.nodeEigenvector(nd2, modeNo)[4],
ops.nodeEigenvector(nd2, modeNo)[5]])
else:
ed = np.array([ops.nodeDisp(nd1)[0],
ops.nodeDisp(nd1)[1],
ops.nodeDisp(nd1)[2],
ops.nodeDisp(nd1)[3],
ops.nodeDisp(nd1)[4],
ops.nodeDisp(nd1)[5],
ops.nodeDisp(nd2)[0],
ops.nodeDisp(nd2)[1],
ops.nodeDisp(nd2)[2],
ops.nodeDisp(nd2)[3],
ops.nodeDisp(nd2)[4],
ops.nodeDisp(nd2)[5]])
# eo = Eo[i, :]
xloc = ops.eleResponse(ele_tag, 'xlocal')
yloc = ops.eleResponse(ele_tag, 'ylocal')
zloc = ops.eleResponse(ele_tag, 'zlocal')
g = np.vstack((xloc, yloc, zloc))
if unDefoFlag:
plt.plot(ex, ey, ez, fmt_undefo)
# interpolated displacement field
if interpFlag:
xcd, ycd, zcd = beam_defo_interp_3d(ex, ey, ez, g,
ed, sfac, nep)
ax.plot(xcd, ycd, zcd, fmt_interp)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
# translations of ends
if endDispFlag:
xd, yd, zd = beam_disp_ends3d(ex, ey, ez, ed, sfac)
ax.plot(xd, yd, zd, fmt_nodes)
# # work-around fix because of aspect equal bug
# xmin, xmax = ax.get_xlim()
# ymin, ymax = ax.get_ylim()
# zmin, zmax = ax.get_zlim()
# min_overall = np.amax([np.abs(xmin), np.abs(ymin), np.abs(zmin)])
# max_overall = np.amax([np.abs(xmax), np.abs(ymax), np.abs(zmax)])
# minmax_overall = max(min_overall, max_overall)
# _max_overall = 1.1 * minmax_overall
# _min_overall = -1.1 * minmax_overall
# ax.set_xlim(_min_overall, _max_overall)
# ax.set_ylim(_min_overall, _max_overall)
# # ax.set_zlim(_min_overall, _max_overall)
# ax.set_zlim(0.0, _max_overall)
# plot: quad in 3d
elif nen == 4:
ndf = np.shape(ops.nodeDOFs(ops.eleNodes(ele_tags[0])[0]))[0]
# plot: shell in 3d
if ndf == 6:
for i, ele_tag in enumerate(ele_tags):
nd1, nd2, nd3, nd4 = ops.eleNodes(ele_tag)
# element node1-node2, x, y coordinates
ex = np.array([ops.nodeCoord(nd1)[0],
ops.nodeCoord(nd2)[0],
ops.nodeCoord(nd3)[0],
ops.nodeCoord(nd4)[0]])
ey = np.array([ops.nodeCoord(nd1)[1],
ops.nodeCoord(nd2)[1],
ops.nodeCoord(nd3)[1],
ops.nodeCoord(nd4)[1]])
ez = np.array([ops.nodeCoord(nd1)[2],
ops.nodeCoord(nd2)[2],
ops.nodeCoord(nd3)[2],
ops.nodeCoord(nd4)[2]])
if modeNo:
ed = np.array([ops.nodeEigenvector(nd1, modeNo)[0],
ops.nodeEigenvector(nd1, modeNo)[1],
ops.nodeEigenvector(nd1, modeNo)[2],
ops.nodeEigenvector(nd2, modeNo)[0],
ops.nodeEigenvector(nd2, modeNo)[1],
ops.nodeEigenvector(nd2, modeNo)[2],
ops.nodeEigenvector(nd3, modeNo)[0],
ops.nodeEigenvector(nd3, modeNo)[1],
ops.nodeEigenvector(nd3, modeNo)[2],
ops.nodeEigenvector(nd4, modeNo)[0],
ops.nodeEigenvector(nd4, modeNo)[1],
ops.nodeEigenvector(nd4, modeNo)[2]])
else:
ed = np.array([ops.nodeDisp(nd1)[0],
ops.nodeDisp(nd1)[1],
ops.nodeDisp(nd1)[2],
ops.nodeDisp(nd2)[0],
ops.nodeDisp(nd2)[1],
ops.nodeDisp(nd2)[2],
ops.nodeDisp(nd3)[0],
ops.nodeDisp(nd3)[1],
ops.nodeDisp(nd3)[2],
ops.nodeDisp(nd4)[0],
ops.nodeDisp(nd4)[1],
ops.nodeDisp(nd4)[2]])
if unDefoFlag:
ax.plot(np.append(ex, ex[0]),
np.append(ey, ey[0]),
np.append(ez, ez[0]),
fmt_undefo)
x = ex+sfac*ed[[0, 3, 6, 9]]
y = ey+sfac*ed[[1, 4, 7, 10]]
z = ez+sfac*ed[[2, 5, 8, 11]]
# ax.plot(np.append(x, x[0]),
# np.append(y, y[0]),
# np.append(z, z[0]),
# 'b.-')
# ax.axis('equal')
pts = [[x[0], y[0], z[0]],
[x[1], y[1], z[1]],
[x[2], y[2], z[2]],
[x[3], y[3], z[3]]]
verts = [[pts[0], pts[1], pts[2], pts[3]]]
ax.add_collection3d(Poly3DCollection(verts, linewidths=1,
edgecolors='k',
alpha=.25))
ax.scatter(x, y, z, s=0)
# 8-node brick, 3d model
elif nen == 8:
for i, ele_tag in enumerate(ele_tags):
nd1, nd2, nd3, nd4, nd5, nd6, nd7, nd8 = ops.eleNodes(ele_tag)
# element node1-node2, x, y coordinates
ex = np.array([ops.nodeCoord(nd1)[0],
ops.nodeCoord(nd2)[0],
ops.nodeCoord(nd3)[0],
ops.nodeCoord(nd4)[0],
ops.nodeCoord(nd5)[0],
ops.nodeCoord(nd6)[0],
ops.nodeCoord(nd7)[0],
ops.nodeCoord(nd8)[0]])
ey = np.array([ops.nodeCoord(nd1)[1],
ops.nodeCoord(nd2)[1],
ops.nodeCoord(nd3)[1],
ops.nodeCoord(nd4)[1],
ops.nodeCoord(nd5)[1],
ops.nodeCoord(nd6)[1],
ops.nodeCoord(nd7)[1],
ops.nodeCoord(nd8)[1]])
ez = np.array([ops.nodeCoord(nd1)[2],
ops.nodeCoord(nd2)[2],
ops.nodeCoord(nd3)[2],
ops.nodeCoord(nd4)[2],
ops.nodeCoord(nd5)[2],
ops.nodeCoord(nd6)[2],
ops.nodeCoord(nd7)[2],
ops.nodeCoord(nd8)[2]])
if modeNo:
ed = np.array([ops.nodeEigenvector(nd1, modeNo)[0],
ops.nodeEigenvector(nd1, modeNo)[1],
ops.nodeEigenvector(nd1, modeNo)[2],
ops.nodeEigenvector(nd2, modeNo)[0],
ops.nodeEigenvector(nd2, modeNo)[1],
ops.nodeEigenvector(nd2, modeNo)[2],
ops.nodeEigenvector(nd3, modeNo)[0],
ops.nodeEigenvector(nd3, modeNo)[1],
ops.nodeEigenvector(nd3, modeNo)[2],
ops.nodeEigenvector(nd4, modeNo)[0],
ops.nodeEigenvector(nd4, modeNo)[1],
ops.nodeEigenvector(nd4, modeNo)[2],
ops.nodeEigenvector(nd5, modeNo)[0],
ops.nodeEigenvector(nd5, modeNo)[1],
ops.nodeEigenvector(nd5, modeNo)[2],
ops.nodeEigenvector(nd6, modeNo)[0],
ops.nodeEigenvector(nd6, modeNo)[1],
ops.nodeEigenvector(nd6, modeNo)[2],
ops.nodeEigenvector(nd7, modeNo)[0],
ops.nodeEigenvector(nd7, modeNo)[1],
ops.nodeEigenvector(nd7, modeNo)[2],
ops.nodeEigenvector(nd8, modeNo)[0],
ops.nodeEigenvector(nd8, modeNo)[1],
ops.nodeEigenvector(nd8, modeNo)[2]])
else:
ed = np.array([ops.nodeDisp(nd1)[0],
ops.nodeDisp(nd1)[1],
ops.nodeDisp(nd1)[2],
ops.nodeDisp(nd2)[0],
ops.nodeDisp(nd2)[1],
ops.nodeDisp(nd2)[2],
ops.nodeDisp(nd3)[0],
ops.nodeDisp(nd3)[1],
ops.nodeDisp(nd3)[2],
ops.nodeDisp(nd4)[0],
ops.nodeDisp(nd4)[1],
ops.nodeDisp(nd4)[2],
ops.nodeDisp(nd5)[0],
ops.nodeDisp(nd5)[1],
ops.nodeDisp(nd5)[2],
ops.nodeDisp(nd6)[0],
ops.nodeDisp(nd6)[1],
ops.nodeDisp(nd6)[2],
ops.nodeDisp(nd7)[0],
ops.nodeDisp(nd7)[1],
ops.nodeDisp(nd7)[2],
ops.nodeDisp(nd8)[0],
ops.nodeDisp(nd8)[1],
ops.nodeDisp(nd8)[2]])
if unDefoFlag:
ax.plot(np.append(ex[0:4], ex[0]),
np.append(ey[0:4], ey[0]),
np.append(ez[0:4], ez[0]), fmt_undefo)
ax.plot(np.append(ex[4:8], ex[4]),
np.append(ey[4:8], ey[4]),
np.append(ez[4:8], ez[4]), fmt_undefo)
ax.plot(np.array([ex[0], ex[4]]),
np.array([ey[0], ey[4]]),
np.array([ez[0], ez[4]]), fmt_undefo)
ax.plot(np.array([ex[1], ex[5]]),
np.array([ey[1], ey[5]]),
np.array([ez[1], ez[5]]), fmt_undefo)
ax.plot(np.array([ex[2], ex[6]]),
np.array([ey[2], ey[6]]),
np.array([ez[2], ez[6]]), fmt_undefo)
ax.plot(np.array([ex[3], ex[7]]),
np.array([ey[3], ey[7]]),
np.array([ez[3], ez[7]]), fmt_undefo)
x = ex+sfac*ed[[0, 3, 6, 9, 12, 15, 18, 21]]
y = ey+sfac*ed[[1, 4, 7, 10, 13, 16, 19, 22]]
z = ez+sfac*ed[[2, 5, 8, 11, 14, 17, 20, 23]]
ax.plot(np.append(x[:4], x[0]),
np.append(y[:4], y[0]),
np.append(z[:4], z[0]),
'b.-')
ax.plot(np.append(x[4:8], x[4]),
np.append(y[4:8], y[4]),
np.append(z[4:8], z[4]),
'b.-')
ax.plot(np.array([x[0], x[4]]),
np.array([y[0], y[4]]),
np.array([z[0], z[4]]), 'b.-')
ax.plot(np.array([x[1], x[5]]),
np.array([y[1], y[5]]),
np.array([z[1], z[5]]), 'b.-')
ax.plot(np.array([x[2], x[6]]),
np.array([y[2], y[6]]),
np.array([z[2], z[6]]), 'b.-')
ax.plot(np.array([x[3], x[7]]),
np.array([y[3], y[7]]),
np.array([z[3], z[7]]), 'b.-')
# ax.axis('equal')
# work-around fix because of aspect equal bug
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
zmin, zmax = ax.get_zlim()
min_overall = np.amax([np.abs(xmin), np.abs(ymin), np.abs(zmin)])
max_overall = np.amax([np.abs(xmax), np.abs(ymax), np.abs(zmax)])
minmax_overall = max(min_overall, max_overall)
_min_overall = -1.1 * minmax_overall
_max_overall = 1.1 * minmax_overall
ax.set_xlim(0.3*_min_overall, 0.3*_max_overall)
ax.set_ylim(0.3*_min_overall, 0.3*_max_overall)
# ax.set_zlim(_min_overall, _max_overall)
ax.set_zlim(0.0, _max_overall)
def plot_defo(sfac=False, nep=17, unDefoFlag=1, fmt_undefo=fmt_undefo,
interpFlag=1, endDispFlag=1, fmt_interp=fmt_interp,
fmt_nodes=fmt_nodes, Eo=0, az_el=az_el, fig_wi_he=fig_wi_he,
fig_lbrt=fig_lbrt):
"""Plot deformed shape of the structure.
Args:
sfac (float): scale factor to increase/decrease displacements obtained
from FE analysis. If not specified (False), sfac is automatically
calculated based on the maximum overall displacement and this
maximum displacement is plotted as 20 percent (hordcoded) of
the maximum model dimension.
interpFlag (int): 1 - use interpolated deformation using shape
function, 0 - do not use interpolation, just show displaced element
nodes (default is 1)
nep (int): number of evaluation points for shape function interpolation
(default: 17)
Usage:
``plot_defo()`` - plot deformed shape with default parameters and
automatically calcutated scale factor.
``plot_defo(interpFlag=0)`` - plot simplified deformation by
displacing the nodes connected with straight lines (shape function
interpolation)
``plot_defo(sfac=1.5)`` - plot with specified scale factor
``plot_defo(unDefoFlag=0, endDispFlag=0)`` - plot without showing
undeformed (original) mesh and without showing markers at the
element ends.
"""
node_tags = ops.getNodeTags()
# calculate sfac
min_x, min_y, min_z = np.inf, np.inf, np.inf
max_x, max_y, max_z = -np.inf, -np.inf, -np.inf
max_ux, max_uy, max_uz = -np.inf, -np.inf, -np.inf
ratio = 0.1
ndim = np.shape(ops.nodeCoord(node_tags[0]))[0]
if ndim == 2:
if not sfac:
for node_tag in node_tags:
x_crd = ops.nodeCoord(node_tag)[0]
y_crd = ops.nodeCoord(node_tag)[1]
ux = ops.nodeDisp(node_tag)[0]
uy = ops.nodeDisp(node_tag)[1]
min_x = min(min_x, x_crd)
min_y = min(min_y, y_crd)
max_x = max(max_x, x_crd)
max_y = max(max_y, y_crd)
max_ux = max(max_ux, np.abs(ux))
max_uy = max(max_uy, np.abs(uy))
dxmax = max_x - min_x
dymax = max_y - min_y
dlmax = max(dxmax, dymax)
edmax = max(max_ux, max_uy)
sfac = ratio * dlmax/edmax
if sfac > 1000.:
print("""\nWarning!\nsfac is quite large - perhaps try to specify \
sfac value yourself.
This usually happens when translational DOFs are too small\n\n""")
_plot_defo_mode_2d(0, sfac, nep, unDefoFlag, fmt_undefo, interpFlag,
endDispFlag, fmt_interp, fmt_nodes)
elif ndim == 3:
if not sfac:
for node_tag in node_tags:
x_crd = ops.nodeCoord(node_tag)[0]
y_crd = ops.nodeCoord(node_tag)[1]
z_crd = ops.nodeCoord(node_tag)[2]
ux = ops.nodeDisp(node_tag)[0]
uy = ops.nodeDisp(node_tag)[1]
uz = ops.nodeDisp(node_tag)[2]
min_x = min(min_x, x_crd)
min_y = min(min_y, y_crd)
min_z = min(min_z, z_crd)
max_x = max(max_x, x_crd)
max_y = max(max_y, y_crd)
max_z = max(max_z, z_crd)
max_ux = max(max_ux, np.abs(ux))
max_uy = max(max_uy, np.abs(uy))
max_uz = max(max_uz, np.abs(uz))
dxmax = max_x - min_x
dymax = max_y - min_y
dzmax = max_z - min_z
dlmax = max(dxmax, dymax, dzmax)
edmax = max(max_ux, max_uy, max_uz)
sfac = ratio * dlmax/edmax
_plot_defo_mode_3d(0, sfac, nep, unDefoFlag, fmt_undefo, interpFlag,
endDispFlag, fmt_interp, fmt_nodes, az_el,
fig_wi_he, fig_lbrt)
else:
print(f'\nWarning! ndim: {ndim} not supported yet.')
def _anim_mode_2d(modeNo, sfac, nep, unDefoFlag, fmt_undefo, interpFlag,
endDispFlag, fmt_interp, fmt_nodes, fig_wi_he, xlim, ylim,
lw):
fig_wi, fig_he = fig_wi_he
ele_tags = ops.getEleTags()
nen = np.shape(ops.eleNodes(ele_tags[0]))[0]
# truss and beam/frame elements
if nen == 2:
ndf = np.shape(ops.nodeDOFs(ops.eleNodes(ele_tags[0])[0]))[0]
# truss element
if ndf == 2:
for ele_tag in ele_tags:
nd1, nd2 = ops.eleNodes(ele_tag)
# element x, y coordinates
ex = np.array([ops.nodeCoord(nd1)[0],
ops.nodeCoord(nd2)[0]])
ey = np.array([ops.nodeCoord(nd1)[1],
ops.nodeCoord(nd2)[1]])
if modeNo:
eux = np.array([ops.nodeEigenvector(nd1, modeNo)[0],
ops.nodeEigenvector(nd2, modeNo)[0]])
euy = np.array([ops.nodeEigenvector(nd1, modeNo)[1],
ops.nodeEigenvector(nd2, modeNo)[1]])
else:
eux = np.array([ops.nodeDisp(nd1)[0],
ops.nodeDisp(nd2)[0]])
euy = np.array([ops.nodeDisp(nd1)[1],
ops.nodeDisp(nd2)[1]])
# displaced element coordinates (scaled by sfac factor)
edx = np.array([ex[0] + sfac*eux[0], ex[1] + sfac*eux[1]])
edy = np.array([ey[0] + sfac*euy[0], ey[1] + sfac*euy[1]])
if unDefoFlag:
plt.plot(ex, ey, fmt_undefo)
plt.plot(edx, edy, fmt_interp)
# beam/frame element anim eigen
elif ndf == 3:
fig, ax = plt.subplots(figsize=(fig_wi/2.54, fig_he/2.54))
ax.axis('equal')
ax.set_xlim(xlim[0], xlim[1])
ax.set_ylim(ylim[0], ylim[1])
nel = len(ele_tags)
Ex = np.zeros((nel, 2))
Ey = np.zeros((nel, 2))
Ed = np.zeros((nel, 6))
# time vector for one cycle (period)
n_frames = 32 + 1
t = np.linspace(0., 2*np.pi, n_frames)
lines = []
for i, ele_tag in enumerate(ele_tags):
nd1, nd2 = ops.eleNodes(ele_tag)
# element x, y coordinates
Ex[i, :] = np.array([ops.nodeCoord(nd1)[0],
ops.nodeCoord(nd2)[0]])
Ey[i, :] = np.array([ops.nodeCoord(nd1)[1],
ops.nodeCoord(nd2)[1]])
Ed[i, :] = np.array([ops.nodeEigenvector(nd1, modeNo)[0],
ops.nodeEigenvector(nd1, modeNo)[1],
ops.nodeEigenvector(nd1, modeNo)[2],
ops.nodeEigenvector(nd2, modeNo)[0],
ops.nodeEigenvector(nd2, modeNo)[1],
ops.nodeEigenvector(nd2, modeNo)[2]])
lines.append(ax.plot([], [], fmt_nodes, lw=lw)[0])
def init():
for j, ele_tag in enumerate(ele_tags):
lines[j].set_data([], [])
return lines
def animate(i):
for j, ele_tag in enumerate(ele_tags):
if interpFlag:
xcdi, ycdi = beam_defo_interp_2d(Ex[j, :],
Ey[j, :],
Ed[j, :],
sfac*np.cos(t[i]),
nep)
lines[j].set_data(xcdi, ycdi)
else:
xdi, ydi = beam_disp_ends(Ex[j, :], Ey[j, :], Ed[j, :],
sfac*np.cos(t[i]))
lines[j].set_data(xdi, ydi)
# plt.plot(xcdi, ycdi, fmt_interp)
return lines
FuncAnimation(fig, animate, init_func=init,
frames=n_frames, interval=50, blit=True)
# plt.axis('equal')
# plt.show() # call this from main py file for more control
# 2d triangular elements - todo
# elif nen == 3:
# x = ex+sfac*ed[:, [0, 2, 4]]
# y = ex+sfac*ed[:, [1, 3, 5]]
# xc = [x, x[0, :]]
# yc = [x, x[0, :]]
# 2d quadrilateral (quad) elements
elif nen == 4:
for ele_tag in ele_tags:
nd1, nd2, nd3, nd4 = ops.eleNodes(ele_tag)
# element x, y coordinates
ex = np.array([ops.nodeCoord(nd1)[0],
ops.nodeCoord(nd2)[0],
ops.nodeCoord(nd3)[0],
ops.nodeCoord(nd4)[0]])
ey = np.array([ops.nodeCoord(nd1)[1],
ops.nodeCoord(nd2)[1],
ops.nodeCoord(nd3)[1],
ops.nodeCoord(nd4)[1]])
if modeNo:
ed = np.array([ops.nodeEigenvector(nd1, modeNo)[0],
ops.nodeEigenvector(nd1, modeNo)[1],
ops.nodeEigenvector(nd2, modeNo)[0],
ops.nodeEigenvector(nd2, modeNo)[1],
ops.nodeEigenvector(nd3, modeNo)[0],
ops.nodeEigenvector(nd3, modeNo)[1],
ops.nodeEigenvector(nd4, modeNo)[0],
ops.nodeEigenvector(nd4, modeNo)[1]])
else:
ed = np.array([ops.nodeDisp(nd1)[0],
ops.nodeDisp(nd1)[1],
ops.nodeDisp(nd2)[0],
ops.nodeDisp(nd2)[1],
ops.nodeDisp(nd3)[0],
ops.nodeDisp(nd3)[1],
ops.nodeDisp(nd4)[0],
ops.nodeDisp(nd4)[1]])
if unDefoFlag:
plt.plot(np.append(ex, ex[0]), np.append(ey, ey[0]),
fmt_undefo)
# xcdi, ycdi = beam_defo_interp_2d(ex, ey, ed, sfac, nep)
# xdi, ydi = beam_disp_ends(ex, ey, ed, sfac)
# # interpolated displacement field
# plt.plot(xcdi, ycdi, 'b.-')
# # translations of ends only
# plt.plot(xdi, ydi, 'ro')
# test it with one element
x = ex+sfac*ed[[0, 2, 4, 6]]
y = ey+sfac*ed[[1, 3, 5, 7]]
plt.plot(np.append(x, x[0]), np.append(y, y[0]), 'b.-')
plt.axis('equal')
# 2d 8-node quadratic elements
# elif nen == 8:
# x = ex+sfac*ed[:, [0, 2, 4, 6, 8, 10, 12, 14]]
# y = ex+sfac*ed[:, [1, 3, 5, 7, 9, 11, 13, 15]]
# t = -1
# n = 0
# for s in range(-1, 1.4, 0.4):
# n += 1
# ...
else:
print(f'\nWarning! Elements not supported yet. nen: {nen}; must be: 2, 3, 4, 8.') # noqa: E501
def anim_mode(modeNo, sfac=False, nep=17, unDefoFlag=1, fmt_undefo=fmt_undefo,
interpFlag=1, endDispFlag=1, fmt_interp=fmt_interp,
fmt_nodes='b-', Eo=0, az_el=az_el, fig_wi_he=fig_wi_he,
fig_lbrt=fig_lbrt, xlim=[0, 1], ylim=[0, 1], lw=3.):
"""Make animation of a mode shape obtained from eigenvalue solution.
Args:
modeNo (int): indicates which mode shape to animate.
Eds (ndarray): An array (n_eles x n_dof_per_element) containing
displacements per element.
timeV (1darray): vector of discretized time values
sfac (float): scale factor
nep (integer): number of evaluation points inside the element and
including both element ends
unDefoFlag (integer): 1 - plot the undeformed model (mesh), 0 - do not
plot the mesh
interpFlag (integer): 1 - interpolate deformation inside element,
0 - no interpolation
endDispFlag (integer): 1 - plot marks at element ends, 0 - no marks
fmt_interp (string): format line string for interpolated (continuous)
deformated shape. The format contains information on line color,
style and marks as in the standard matplotlib plot function.
fmt_nodes (string): format string for the marks of element ends
az_el (tuple): a tuple containing the azimuth and elevation
fig_lbrt (tuple): a tuple contating left, bottom, right and top offsets
fig_wi_he (tuple): contains width and height of the figure
Examples:
Notes:
See also:
"""
node_tags = ops.getNodeTags()
# calculate sfac
# min_x, min_y, min_z = np.inf, np.inf, np.inf
# max_x, max_y, max_z = -np.inf, -np.inf, -np.inf
# max_ux, max_uy, max_uz = -np.inf, -np.inf, -np.inf
min_x, min_y = np.inf, np.inf
max_x, max_y = -np.inf, -np.inf
max_ux, max_uy = -np.inf, -np.inf
ratio = 0.1
ndim = np.shape(ops.nodeCoord(node_tags[0]))[0]
if ndim == 2:
if not sfac:
for node_tag in node_tags:
x_crd = ops.nodeCoord(node_tag)[0]
y_crd = ops.nodeCoord(node_tag)[1]
ux = ops.nodeEigenvector(node_tag, modeNo)[0]
uy = ops.nodeEigenvector(node_tag, modeNo)[1]
min_x = min(min_x, x_crd)
min_y = min(min_y, y_crd)
max_x = max(max_x, x_crd)
max_y = max(max_y, y_crd)
max_ux = max(max_ux, np.abs(ux))
max_uy = max(max_uy, np.abs(uy))
dxmax = max_x - min_x
dymax = max_y - min_y
dlmax = max(dxmax, dymax)
edmax = max(max_ux, max_uy)
sfac = ratio * dlmax/edmax
_anim_mode_2d(modeNo, sfac, nep, unDefoFlag, fmt_undefo, interpFlag,
endDispFlag, fmt_interp, fmt_nodes, fig_wi_he, xlim,
ylim, lw)
# elif ndim == 3:
# if not sfac:
# for node_tag in node_tags:
# x_crd = ops.nodeCoord(node_tag)[0]
# y_crd = ops.nodeCoord(node_tag)[1]
# z_crd = ops.nodeCoord(node_tag)[2]
# ux = ops.nodeEigenvector(node_tag, modeNo)[0]
# uy = ops.nodeEigenvector(node_tag, modeNo)[1]
# uz = ops.nodeEigenvector(node_tag, modeNo)[2]
# min_x = min(min_x, x_crd)
# min_y = min(min_y, y_crd)
# min_z = min(min_z, z_crd)
# max_x = max(max_x, x_crd)
# max_y = max(max_y, y_crd)
# max_z = max(max_z, z_crd)
# max_ux = max(max_ux, np.abs(ux))
# max_uy = max(max_uy, np.abs(uy))
# max_uz = max(max_uz, np.abs(uz))
# dxmax = max_x - min_x
# dymax = max_y - min_y
# dzmax = max_z - min_z
# dlmax = max(dxmax, dymax, dzmax)
# edmax = max(max_ux, max_uy, max_uz)
# sfac = ratio * dlmax/edmax
# _plot_defo_mode_3d(modeNo, sfac, nep, unDefoFlag, fmt_undefo,
# interpFlag, endDispFlag, fmt_interp, fmt_nodes,
# Eo, az_el, fig_wi_he, fig_lbrt)
else:
print(f'\nWarning! ndim: {ndim} not supported yet.')
def plot_mode_shape(modeNo, sfac=False, nep=17, unDefoFlag=1,
fmt_undefo=fmt_undefo, interpFlag=1, endDispFlag=1,
fmt_interp=fmt_interp, fmt_nodes=fmt_nodes, Eo=0,
az_el=az_el, fig_wi_he=fig_wi_he, fig_lbrt=fig_lbrt):
"""Plot mode shape of the structure obtained from eigenvalue analysis.
Args:
modeNo (int): indicates which mode shape to plot
sfac (float): scale factor to increase/decrease displacements obtained
from FE analysis. If not specified (False), sfac is automatically
calculated based on the maximum overall displacement and this
maximum displacement is plotted as 20 percent (hordcoded) of
the maximum model dimension.
interpFlag (int): 1 - use interpolated deformation using shape
function, 0 - do not use interpolation, just show displaced element
nodes (default is 1)
nep (int): number of evaluation points for shape function interpolation
(default: 17)
Usage:
``plot_mode_shape(1)`` - plot the first mode shape with default parameters
and automatically calcutated scale factor.
``plot_mode_shape(2, interpFlag=0)`` - plot the 2nd mode shape by
displacing the nodes connected with straight lines (shape function
interpolation)
``plot_mode_shape(3, sfac=1.5)`` - plot the 3rd mode shape with specified
scale factor
``plot_mode_shape(4, unDefoFlag=0, endDispFlag=0)`` - plot the 4th mode
shape without showing undeformed (original) mesh and without showing
markers at the element ends.
Examples:
Notes:
See also:
"""
node_tags = ops.getNodeTags()
# calculate sfac
min_x, min_y, min_z = np.inf, np.inf, np.inf
max_x, max_y, max_z = -np.inf, -np.inf, -np.inf
max_ux, max_uy, max_uz = -np.inf, -np.inf, -np.inf
ratio = 0.1
ndim = np.shape(ops.nodeCoord(node_tags[0]))[0]
if ndim == 2:
if not sfac:
for node_tag in node_tags:
x_crd = ops.nodeCoord(node_tag)[0]
y_crd = ops.nodeCoord(node_tag)[1]
ux = ops.nodeEigenvector(node_tag, modeNo)[0]
uy = ops.nodeEigenvector(node_tag, modeNo)[1]
min_x = min(min_x, x_crd)
min_y = min(min_y, y_crd)
max_x = max(max_x, x_crd)
max_y = max(max_y, y_crd)
max_ux = max(max_ux, np.abs(ux))
max_uy = max(max_uy, np.abs(uy))
dxmax = max_x - min_x
dymax = max_y - min_y
dlmax = max(dxmax, dymax)
edmax = max(max_ux, max_uy)
sfac = ratio * dlmax/edmax
_plot_defo_mode_2d(modeNo, sfac, nep, unDefoFlag, fmt_undefo,
interpFlag, endDispFlag, fmt_interp, fmt_nodes)
elif ndim == 3:
if not sfac:
for node_tag in node_tags:
x_crd = ops.nodeCoord(node_tag)[0]
y_crd = ops.nodeCoord(node_tag)[1]
z_crd = ops.nodeCoord(node_tag)[2]
ux = ops.nodeEigenvector(node_tag, modeNo)[0]
uy = ops.nodeEigenvector(node_tag, modeNo)[1]
uz = ops.nodeEigenvector(node_tag, modeNo)[2]
min_x = min(min_x, x_crd)
min_y = min(min_y, y_crd)
min_z = min(min_z, z_crd)
max_x = max(max_x, x_crd)
max_y = max(max_y, y_crd)
max_z = max(max_z, z_crd)
max_ux = max(max_ux, np.abs(ux))
max_uy = max(max_uy, np.abs(uy))
max_uz = max(max_uz, np.abs(uz))
dxmax = max_x - min_x
dymax = max_y - min_y
dzmax = max_z - min_z
dlmax = max(dxmax, dymax, dzmax)
edmax = max(max_ux, max_uy, max_uz)
sfac = ratio * dlmax/edmax
_plot_defo_mode_3d(modeNo, sfac, nep, unDefoFlag, fmt_undefo,
interpFlag, endDispFlag, fmt_interp, fmt_nodes,
az_el, fig_wi_he, fig_lbrt)
else:
print(f'\nWarning! ndim: {ndim} not supported yet.')
def rot_transf_3d(ex, ey, ez, g):
Lxyz = np.array([ex[1]-ex[0], ey[1]-ey[0], ez[1]-ez[0]])
L = np.sqrt(Lxyz @ Lxyz)
z = np.zeros((3, 3))
G = np.block([[g, z, z, z],
[z, g, z, z],
[z, z, g, z],
[z, z, z, g]])
return G, L
def beam_defo_interp_2d(ex, ey, u, sfac, nep=17):
"""
Interpolate element displacements at nep points.
Parametrs:
ex, ey : element x, y coordinates,
u : element nodal displacements
sfac : scale factor for deformation plot
nep : number of evaluation points (including end nodes)
Returns:
crd_xc, crd_yc : x, y coordinates of interpolated (at nep points)
beam deformation required for plot_defo() function
"""
Lxy = np.array([ex[1]-ex[0], ey[1]-ey[0]])
L = np.sqrt(Lxy @ Lxy)
cosa, cosb = Lxy / L
G = np.array([[cosa, cosb, 0., 0., 0., 0.],
[-cosb, cosa, 0., 0., 0., 0.],
[0., 0., 1., 0., 0., 0.],
[0., 0., 0., cosa, cosb, 0.],
[0., 0., 0., -cosb, cosa, 0.],
[0., 0., 0., 0., 0., 1.]])
u_l = G @ u
xl = np.linspace(0., L, num=nep)
one = np.ones(xl.shape)
# longitudinal deformation (1)
N_a = np.column_stack((one - xl/L, xl/L))
u_ac = N_a @ np.array([u_l[0], u_l[3]])
# transverse deformation (2)
N_t = np.column_stack((one - 3*xl**2/L**2 + 2*xl**3/L**3,
xl - 2*xl**2/L + xl**3/L**2,
3*xl**2/L**2 - 2*xl**3/L**3,
-xl**2/L + xl**3/L**2))
u_tc = N_t @ np.array([u_l[1], u_l[2], u_l[4], u_l[5]])
# combined two row vectors
# 1-st vector longitudinal deformation (1)
# 2-nd vector transverse deformation (2)
u_atc = np.vstack((u_ac, u_tc))
# project longitudinal (u_ac) and transverse deformation
# (local u and v) to (global u and v)
G1 = np.array([[cosa, -cosb],
[cosb, cosa]])
u_xyc = G1 @ u_atc
# discretize element coordinates
# first row = X + [0 dx 2dx ... 4-dx 4]
# second row = Y + [0 dy 2dy ... 3-dy 3]
xy_c = np.vstack((np.linspace(ex[0], ex[1], num=nep),
np.linspace(ey[0], ey[1], num=nep)))
# Continuous x, y displacement coordinates
crd_xc = xy_c[0, :] + sfac * u_xyc[0, :]
crd_yc = xy_c[1, :] + sfac * u_xyc[1, :]
# latex_array(ecrd_xc)
# latex_array(ecrd_yc)
return crd_xc, crd_yc
def beam_defo_interp_3d(ex, ey, ez, g, u, sfac, nep=17):
"""
3d beam version of beam_defo_interp_2d.
"""
G, L = rot_transf_3d(ex, ey, ez, g)
ul = G @ u
_, crd_yc = beam_defo_interp_2d(np.array([0., L]),
np.array([0., 0.]),
np.array([ul[0], ul[1], ul[5], ul[6],
ul[7], ul[11]]), sfac, nep)
crd_xc, crd_zc = beam_defo_interp_2d(np.array([0., L]),
np.array([0., 0.]),
np.array([ul[0], ul[2], -ul[4], ul[6],
ul[8], -ul[10]]), sfac, nep)
xl = np.linspace(0., L, num=nep)
crd_xc = crd_xc - xl
crd_xyzc = np.vstack([crd_xc, crd_yc, crd_zc])
u_xyzc = np.transpose(g) @ crd_xyzc
xyz_c = np.vstack((np.linspace(ex[0], ex[1], num=nep),
np.linspace(ey[0], ey[1], num=nep),
np.linspace(ez[0], ez[1], num=nep)))
crd_xc = xyz_c[0, :] + u_xyzc[0, :]
crd_yc = xyz_c[1, :] + u_xyzc[1, :]
crd_zc = xyz_c[2, :] + u_xyzc[2, :]
return crd_xc, crd_yc, crd_zc
def beam_disp_ends(ex, ey, d, sfac):
"""
Calculate the element deformation at element ends only.
"""
# indx: 0 1 2 3 4 5
# Ed = ux1 uy1 ur1 ux2 uy2 ur2
exd = np.array([ex[0] + sfac*d[0], ex[1] + sfac*d[3]])
eyd = np.array([ey[0] + sfac*d[1], ey[1] + sfac*d[4]])
return exd, eyd
def beam_disp_ends3d(ex, ey, ez, d, sfac):
"""
Calculate the element deformation at element ends only.
"""
# indx: 0 1 2 3 4 5 6 7 8 9 10 11
# Ed = ux1 uy1 uz1 rx1 ry1 rz1 ux2 uy2 uz2 rx2 ry2 rz2
exd = np.array([ex[0] + sfac*d[0], ex[1] + sfac*d[6]])
eyd = np.array([ey[0] + sfac*d[1], ey[1] + sfac*d[7]])
ezd = np.array([ez[0] + sfac*d[2], ez[1] + sfac*d[8]])
return exd, eyd, ezd
# plot_fiber_section is inspired by Matlab ``plotSection.zip``
# written by <NAME> available at
# http://users.ntua.gr/divamva/software.html
def plot_fiber_section(fib_sec_list, fillflag=1,
matcolor=['y', 'b', 'r', 'g', 'm', 'k']):
"""Plot fiber cross-section.
Args:
fib_sec_list (list): list of lists in the format similar to the input
given for in
fillflag (int): 1 - filled fibers with color specified in matcolor
list, 0 - no color, only the outline of fibers
matcolor (list): sequence of colors for various material tags
assigned to fibers
Examples:
::
fib_sec_1 = [['section', 'Fiber', 1, '-GJ', 1.0e6],
['patch', 'quad', 1, 4, 1, 0.032, 0.317, -0.311, 0.067, -0.266, 0.005, 0.077, 0.254], # noqa: E501
['patch', 'quad', 1, 1, 4, -0.075, 0.144, -0.114, 0.116, 0.075, -0.144, 0.114, -0.116], # noqa: E501
['patch', 'quad', 1, 4, 1, 0.266, -0.005, -0.077, -0.254, -0.032, -0.317, 0.311, -0.067] # noqa: E501
]
opsv.fib_sec_list_to_cmds(fib_sec_1)
matcolor = ['r', 'lightgrey', 'gold', 'w', 'w', 'w']
opsv.plot_fiber_section(fib_sec_1, matcolor=matcolor)
plt.axis('equal')
# plt.savefig(f'{kateps}fibsec_rc.png')
plt.show()
Notes:
``fib_sec_list`` can be reused by means of a python helper function
``ops_vis.fib_sec_list_to_cmds(fib_sec_list_1)``
See also:
``ops_vis.fib_sec_list_to_cmds()``
"""
fig, ax = plt.subplots()
ax.set_xlabel('z')
ax.set_ylabel('y')
ax.grid(False)
for item in fib_sec_list:
if item[0] == 'layer':
matTag = item[2]
if item[1] == 'straight':
n_bars = item[3]
As = item[4]
Iy, Iz, Jy, Jz = item[5], item[6], item[7], item[8]
r = np.sqrt(As / np.pi)
Y = np.linspace(Iy, Jy, n_bars)
Z = np.linspace(Iz, Jz, n_bars)
for zi, yi in zip(Z, Y):
bar = Circle((zi, yi), r, ec='k', fc='k', zorder=10)
ax.add_patch(bar)
if item[0] == 'patch':
matTag, nIJ, nJK = item[2], item[3], item[4]
if item[1] == 'quad' or item[1] == 'quadr':
Iy, Iz, Jy, Jz = item[5], item[6], item[7], item[8]
Ky, Kz, Ly, Lz = item[9], item[10], item[11], item[12]
if item[1] == 'rect':
Iy, Iz, Ky, Kz = item[5], item[6], item[7], item[8]
Jy, Jz, Ly, Lz = Ky, Iz, Iy, Kz
# check for convexity (vector products)
outIJxIK = (Jy-Iy)*(Kz-Iz) - (Ky-Iy)*(Jz-Iz)
outIKxIL = (Ky-Iy)*(Lz-Iz) - (Ly-Iy)*(Kz-Iz)
# check if I, J, L points are colinear
outIJxIL = (Jy-Iy)*(Lz-Iz) - (Ly-Iy)*(Jz-Iz)
# outJKxJL = (Ky-Jy)*(Lz-Jz) - (Ly-Jy)*(Kz-Jz)
if outIJxIK <= 0 or outIKxIL <= 0 or outIJxIL <= 0:
print('\nWarning! Patch quad is non-convex or counter-clockwise defined or has at least 3 colinear points in line') # noqa: E501
IJz, IJy = np.linspace(Iz, Jz, nIJ+1), np.linspace(Iy, Jy, nIJ+1)
JKz, JKy = np.linspace(Jz, Kz, nJK+1), np.linspace(Jy, Ky, nJK+1)
LKz, LKy = np.linspace(Lz, Kz, nIJ+1), np.linspace(Ly, Ky, nIJ+1)
ILz, ILy = np.linspace(Iz, Lz, nJK+1), np.linspace(Iy, Ly, nJK+1)
if fillflag:
Z = np.zeros((nIJ+1, nJK+1))
Y = np.zeros((nIJ+1, nJK+1))
for j in range(nIJ+1):
Z[j, :] = np.linspace(IJz[j], LKz[j], nJK+1)
Y[j, :] = np.linspace(IJy[j], LKy[j], nJK+1)
for j in range(nIJ):
for k in range(nJK):
zy = np.array([[Z[j, k], Y[j, k]],
[Z[j, k+1], Y[j, k+1]],
[Z[j+1, k+1], Y[j+1, k+1]],
[Z[j+1, k], Y[j+1, k]]])
poly = Polygon(zy, True, ec='k', fc=matcolor[matTag-1])
ax.add_patch(poly)
else:
# horizontal lines
for az, bz, ay, by in zip(IJz, LKz, IJy, LKy):
plt.plot([az, bz], [ay, by], 'b-', zorder=1)
# vertical lines
for az, bz, ay, by in zip(JKz, ILz, JKy, ILy):
plt.plot([az, bz], [ay, by], 'b-', zorder=1)
def fib_sec_list_to_cmds(fib_sec_list):
"""Reuses fib_sec_list to define fiber section in OpenSees.
At present it is not possible to extract fiber section data from
the OpenSees domain, this function is a workaround. The idea is to
prepare data similar to the one the regular OpenSees commands
(``section('Fiber', ...)``, ``fiber()``, ``patch()`` and/or
``layer()``) require.
Args:
fib_sec_list (list): is a list of fiber section data. First sub-list
also defines the torsional stiffness (GJ).
Warning:
If you use this function, do not issue the regular OpenSees:
section, Fiber, Patch or Layer commands.
See also:
``ops_vis.plot_fiber_section()``
"""
for dat in fib_sec_list:
if dat[0] == 'section':
secTag, GJ = dat[2], dat[4]
ops.section('Fiber', secTag, '-GJ', GJ)
if dat[0] == 'layer':
matTag = dat[2]
if dat[1] == 'straight':
n_bars = dat[3]
As = dat[4]
Iy, Iz, Jy, Jz = dat[5], dat[6], dat[7], dat[8]
ops.layer('straight', matTag, n_bars, As, Iy, Iz, Jy, Jz)
if dat[0] == 'patch':
matTag = dat[2]
nIJ = dat[3]
nJK = dat[4]
if dat[1] == 'quad' or dat[1] == 'quadr':
Iy, Iz, Jy, Jz = dat[5], dat[6], dat[7], dat[8]
Ky, Kz, Ly, Lz = dat[9], dat[10], dat[11], dat[12]
ops.patch('quad', matTag, nIJ, nJK, Iy, Iz, Jy, Jz, Ky, Kz,
Ly, Lz)
if dat[1] == 'rect':
Iy, Iz, Ky, Kz = dat[5], dat[6], dat[7], dat[8]
Jy, Jz, Ly, Lz = Ky, Iz, Iy, Kz
ops.patch('rect', matTag, nIJ, nJK, Iy, Iz, Ky, Kz)
def _anim_defo_2d(Eds, timeV, sfac, nep, unDefoFlag, fmt_undefo,
interpFlag, endDispFlag, fmt_interp, fmt_nodes, fig_wi_he,
xlim, ylim):
fig_wi, fig_he = fig_wi_he
ele_tags = ops.getEleTags()
nen = np.shape(ops.eleNodes(ele_tags[0]))[0]
# truss and beam/frame elements
if nen == 2:
ndf = np.shape(ops.nodeDOFs(ops.eleNodes(ele_tags[0])[0]))[0]
# truss element
if ndf == 2:
for ele_tag in ele_tags:
nd1, nd2 = ops.eleNodes(ele_tag)
# element x, y coordinates
ex = np.array([ops.nodeCoord(nd1)[0],
ops.nodeCoord(nd2)[0]])
ey = np.array([ops.nodeCoord(nd1)[1],
ops.nodeCoord(nd2)[1]])
eux = np.array([ops.nodeDisp(nd1)[0],
ops.nodeDisp(nd2)[0]])
euy = np.array([ops.nodeDisp(nd1)[1],
ops.nodeDisp(nd2)[1]])
# displaced element coordinates (scaled by sfac factor)
edx = np.array([ex[0] + sfac*eux[0], ex[1] + sfac*eux[1]])
edy = np.array([ey[0] + sfac*euy[0], ey[1] + sfac*euy[1]])
if unDefoFlag:
plt.plot(ex, ey, fmt_undefo)
plt.plot(edx, edy, fmt_interp)
# beam/frame element anim defo
elif ndf == 3:
fig, ax = plt.subplots(figsize=(fig_wi/2.54, fig_he/2.54))
ax.axis('equal')
ax.set_xlim(xlim[0], xlim[1])
ax.set_ylim(ylim[0], ylim[1])
# ax.grid()
nel = len(ele_tags)
Ex = np.zeros((nel, 2))
Ey = np.zeros((nel, 2))
# no of frames equal to time intervals
n_frames, _, _ = np.shape(Eds)
lines = []
# time_text = ax.set_title('') # does not work
time_text = ax.text(.05, .95, '', transform=ax.transAxes)
for i, ele_tag in enumerate(ele_tags):
nd1, nd2 = ops.eleNodes(ele_tag)
# element x, y coordinates
Ex[i, :] = np.array([ops.nodeCoord(nd1)[0],
ops.nodeCoord(nd2)[0]])
Ey[i, :] = np.array([ops.nodeCoord(nd1)[1],
ops.nodeCoord(nd2)[1]])
lines.append(ax.plot([], [], fmt_nodes, lw=3)[0])
def init():
for j, ele_tag in enumerate(ele_tags):
lines[j].set_data([], [])
time_text.set_text('')
return tuple(lines) + (time_text,)
def animate(i):
for j, ele_tag in enumerate(ele_tags):
if interpFlag:
xcdi, ycdi = beam_defo_interp_2d(Ex[j, :],
Ey[j, :],
Eds[i, j, :],
sfac,
nep)
lines[j].set_data(xcdi, ycdi)
else:
xdi, ydi = beam_disp_ends(Ex[j, :], Ey[j, :],
Eds[i, j, :], sfac)
lines[j].set_data(xdi, ydi)
# plt.plot(xcdi, ycdi, fmt_interp)
# time_text.set_text(f'f')
time_text.set_text(f'frame: {i+1}/{n_frames}, \
time: {timeV[i]:.3f} s')
return tuple(lines) + (time_text,)
FuncAnimation(fig, animate, init_func=init, frames=n_frames,
interval=50, blit=True, repeat=False)
# plt.axis('equal')
# plt.show() # call this from main py file for more control
# 2d triangular elements
# elif nen == 3:
# x = ex+sfac*ed[:, [0, 2, 4]]
# y = ex+sfac*ed[:, [1, 3, 5]]
# xc = [x, x[0, :]]
# yc = [x, x[0, :]]
# 2d quadrilateral (quad) elements
elif nen == 4:
for ele_tag in ele_tags:
nd1, nd2, nd3, nd4 = ops.eleNodes(ele_tag)
# element x, y coordinates
ex = np.array([ops.nodeCoord(nd1)[0],
ops.nodeCoord(nd2)[0],
ops.nodeCoord(nd3)[0],
ops.nodeCoord(nd4)[0]])
ey = np.array([ops.nodeCoord(nd1)[1],
ops.nodeCoord(nd2)[1],
ops.nodeCoord(nd3)[1],
ops.nodeCoord(nd4)[1]])
# if modeNo:
# ed = np.array([ops.nodeEigenvector(nd1, modeNo)[0],
# ops.nodeEigenvector(nd1, modeNo)[1],
# ops.nodeEigenvector(nd2, modeNo)[0],
# ops.nodeEigenvector(nd2, modeNo)[1],
# ops.nodeEigenvector(nd3, modeNo)[0],
# ops.nodeEigenvector(nd3, modeNo)[1],
# ops.nodeEigenvector(nd4, modeNo)[0],
# ops.nodeEigenvector(nd4, modeNo)[1]])
# else:
ed = np.array([ops.nodeDisp(nd1)[0],
ops.nodeDisp(nd1)[1],
ops.nodeDisp(nd2)[0],
ops.nodeDisp(nd2)[1],
ops.nodeDisp(nd3)[0],
ops.nodeDisp(nd3)[1],
ops.nodeDisp(nd4)[0],
ops.nodeDisp(nd4)[1]])
if unDefoFlag:
plt.plot(np.append(ex, ex[0]), np.append(ey, ey[0]),
fmt_undefo)
# xcdi, ycdi = beam_defo_interp_2d(ex, ey, ed, sfac, nep)
# xdi, ydi = beam_disp_ends(ex, ey, ed, sfac)
# # interpolated displacement field
# plt.plot(xcdi, ycdi, 'b.-')
# # translations of ends only
# plt.plot(xdi, ydi, 'ro')
# test it with one element
x = ex+sfac*ed[[0, 2, 4, 6]]
y = ey+sfac*ed[[1, 3, 5, 7]]
plt.plot(np.append(x, x[0]), np.append(y, y[0]), 'b.-')
plt.axis('equal')
# 2d 8-node quadratic elements
# elif nen == 8:
# x = ex+sfac*ed[:, [0, 2, 4, 6, 8, 10, 12, 14]]
# y = ex+sfac*ed[:, [1, 3, 5, 7, 9, 11, 13, 15]]
# t = -1
# n = 0
# for s in range(-1, 1.4, 0.4):
# n += 1
# ...
else:
print(f'\nWarning! Elements not supported yet. nen: {nen}; must be: 2, 3, 4, 8.') # noqa: E501
def anim_defo(Eds, timeV, sfac, nep=17, unDefoFlag=1, fmt_undefo=fmt_undefo,
interpFlag=1, endDispFlag=1, fmt_interp=fmt_interp,
fmt_nodes='b-', az_el=az_el, fig_lbrt=fig_lbrt,
fig_wi_he=fig_wi_he, xlim=[0, 1], ylim=[0, 1]):
"""Make animation of the deformed shape computed by transient analysis
Args:
Eds (ndarray): An array (n_eles x n_dof_per_element) containing
displacements per element.
timeV (1darray): vector of discretized time values
sfac (float): scale factor
nep (integer): number of evaluation points inside the element and
including both element ends
unDefoFlag (integer): 1 - plot the undeformed model (mesh), 0 - do not
plot the mesh
interpFlag (integer): 1 - interpolate deformation inside element,
0 - no interpolation
endDispFlag (integer): 1 - plot marks at element ends, 0 - no marks
fmt_interp (string): format line string for interpolated (continuous)
deformated shape. The format contains information on line color,
style and marks as in the standard matplotlib plot function.
fmt_nodes (string): format string for the marks of element ends
az_el (tuple): a tuple containing the azimuth and elevation
fig_lbrt (tuple): a tuple contating left, bottom, right and top offsets
fig_wi_he (tuple): contains width and height of the figure
Examples:
Notes:
See also:
"""
node_tags = ops.getNodeTags()
ndim = np.shape(ops.nodeCoord(node_tags[0]))[0]
if ndim == 2:
_anim_defo_2d(Eds, timeV, sfac, nep, unDefoFlag, fmt_undefo,
interpFlag, endDispFlag, fmt_interp, fmt_nodes,
fig_wi_he, xlim, ylim)
else:
print(f'\nWarning! ndim: {ndim} not supported yet.')
def section_force_distribution_2d(ex, ey, pl, nep=2,
ele_load_data=['-beamUniform', 0., 0.]):
"""
Calculate section forces (N, V, M) for an elastic 2D Euler-Bernoulli beam.
Input:
ex, ey - x, y element coordinates in global system
nep - number of evaluation points, by default (2) at element ends
ele_load_list - list of transverse and longitudinal element load
syntax: [ele_load_type, Wy, Wx]
For now only '-beamUniform' element load type is acceptable
Output:
s = [N V M]; shape: (nep,3)
section forces at nep points along local x
xl: coordinates of local x-axis; shape: (nep,)
Use it with dia_sf to draw N, V, M diagrams.
TODO: add '-beamPoint' element load type
"""
# eload_type, Wy, Wx = ele_load_data[0], ele_load_data[1], ele_load_data[2]
Wy, Wx = ele_load_data[1], ele_load_data[2]
nlf = len(pl)
if nlf == 2: # trusses
N_1 = pl[0]
elif nlf == 6: # plane frames
# N_1, V_1, M_1 = pl[0], pl[1], pl[2]
N_1, V_1, M_1 = pl[:3]
else:
print('\nWarning! Not supported. Number of nodal forces: {nlf}')
Lxy = np.array([ex[1]-ex[0], ey[1]-ey[0]])
L = np.sqrt(Lxy @ Lxy)
xl = np.linspace(0., L, nep)
one = np.ones(nep)
N = -1.*(N_1 * one + Wx * xl)
if nlf == 6:
V = V_1 * one + Wy * xl
M = -M_1 * one + V_1 * xl + 0.5 * Wy * xl**2
s = np.column_stack((N, V, M))
elif nlf == 2:
s = np.column_stack((N))
# if eload_type == '-beamUniform':
# else:
return s, xl
def section_force_distribution_3d(ex, ey, ez, pl, nep=2,
ele_load_data=['-beamUniform', 0., 0., 0.]):
"""
Calculate section forces (N, Vy, Vz, T, My, Mz) for an elastic 3d beam.
Longer description
Parameters
----------
ex : list
x element coordinates
ey : list
y element coordinates
ez : list
z element coordinates
pl : ndarray
nep : int
number of evaluation points, by default (2) at element ends
ele_load_list : list
list of transverse and longitudinal element load
syntax: [ele_load_type, Wy, Wz, Wx]
For now only '-beamUniform' element load type is acceptable.
Returns
-------
s : ndarray
[N Vx Vy T My Mz]; shape: (nep,6)
column vectors of section forces along local x-axis
uvwfi : ndarray
[u v w fi]; shape (nep,4)
displacements at nep points along local x
xl : ndarray
coordinates of local x-axis; shape (nep,)
Notes
-----
Todo: add '-beamPoint' element load type
"""
# eload_type = ele_load_data[0]
Wy, Wz, Wx = ele_load_data[1], ele_load_data[2], ele_load_data[3]
N1, Vy1, Vz1, T1, My1, Mz1 = pl[:6]
Lxyz = np.array([ex[1]-ex[0], ey[1]-ey[0], ez[1]-ez[0]])
L = np.sqrt(Lxyz @ Lxyz)
xl = np.linspace(0., L, nep)
one = np.ones(nep)
N = -1.*(N1*one + Wx*xl)
Vy = Vy1*one + Wy*xl
Vz = Vz1*one + Wz*xl
T = -T1*one
Mz = -Mz1*one + Vy1*xl + 0.5*Wy*xl**2
My = My1*one + Vz1*xl + 0.5*Wz*xl**2
s = np.column_stack((N, Vy, Vz, T, My, Mz))
return s, xl
def section_force_diagram_2d(sf_type, Ew, sfac=1., nep=17,
fmt_secforce=fmt_secforce):
"""Display section forces diagram for 2d beam column model.
This function plots a section forces diagram for 2d beam column elements
with or without element loads. For now only '-beamUniform' constant
transverse or axial element loads are supported.
Args:
sf_type (str): type of section force: 'N' - normal force,
'V' - shear force, 'M' - bending moments.
Ew (dict): Ew Python dictionary contains information on non-zero
element loads, therfore each item of the Python dictionary
is in the form: 'ele_tag: ['-beamUniform', Wy, Wx]'.
sfac (float): scale factor by wich the values of section forces are
multiplied.
nep (int): number of evaluation points including both end nodes
(default: 17)
fmt_secforce (str): format line string for section force distribution
curve. The format contains information on line color, style and
marks as in the standard matplotlib plot function.
(default: fmt_secforce = 'b-' # blue solid line)
Usage:
::
Wy, Wx = -10.e+3, 0.
Ew = {3: ['-beamUniform', Wy, Wx]}
sfacM = 5.e-5
plt.figure()
minVal, maxVal = opsv.section_force_diagram_2d('M', Ew, sfacM)
plt.title('Bending moments')
Todo:
Add support for other element loads available in OpenSees: partial
(trapezoidal) uniform element load, and 'beamPoint' element load.
"""
maxVal, minVal = -np.inf, np.inf
ele_tags = ops.getEleTags()
for ele_tag in ele_tags:
# by default no element load
eload_data = ['', 0., 0.]
if ele_tag in Ew:
eload_data = Ew[ele_tag]
nd1, nd2 = ops.eleNodes(ele_tag)
# element x, y coordinates
ex = np.array([ops.nodeCoord(nd1)[0],
ops.nodeCoord(nd2)[0]])
ey = np.array([ops.nodeCoord(nd1)[1],
ops.nodeCoord(nd2)[1]])
Lxy = np.array([ex[1]-ex[0], ey[1]-ey[0]])
L = np.sqrt(Lxy @ Lxy)
cosa, cosb = Lxy / L
pl = ops.eleResponse(ele_tag, 'localForces')
s_all, xl = section_force_distribution_2d(ex, ey, pl, nep, eload_data)
if sf_type == 'N' or sf_type == 'axial':
s = s_all[:, 0]
elif sf_type == 'V' or sf_type == 'shear' or sf_type == 'T':
s = s_all[:, 1]
elif sf_type == 'M' or sf_type == 'moment':
s = s_all[:, 2]
minVal = min(minVal, np.min(s))
maxVal = max(maxVal, np.max(s))
s = s*sfac
s_0 = np.zeros((nep, 2))
s_0[0, :] = [ex[0], ey[0]]
s_0[1:, 0] = s_0[0, 0] + xl[1:] * cosa
s_0[1:, 1] = s_0[0, 1] + xl[1:] * cosb
s_p = np.copy(s_0)
# positive M are opposite to N and V
if sf_type == 'M' or sf_type == 'moment':
s *= -1.
s_p[:, 0] -= s * cosb
s_p[:, 1] += s * cosa
plt.axis('equal')
# section force curve
plt.plot(s_p[:, 0], s_p[:, 1], fmt_secforce,
solid_capstyle='round', solid_joinstyle='round',
dash_capstyle='butt', dash_joinstyle='round')
# model
plt.plot(ex, ey, 'k-', solid_capstyle='round', solid_joinstyle='round',
dash_capstyle='butt', dash_joinstyle='round')
# reference perpendicular lines
for i in np.arange(nep):
plt.plot([s_0[i, 0], s_p[i, 0]], [s_0[i, 1], s_p[i, 1]],
fmt_secforce, solid_capstyle='round',
solid_joinstyle='round', dash_capstyle='butt',
dash_joinstyle='round')
return minVal, maxVal
def section_force_diagram_3d(sf_type, Ew, sfac=1., nep=17,
fmt_secforce=fmt_secforce):
"""Display section forces diagram of a 3d beam column model.
This function plots section forces diagrams for 3d beam column elements
with or without element loads. For now only '-beamUniform' constant
transverse or axial element loads are supported.
Args:
sf_type (str): type of section force: 'N' - normal force,
'Vy' or 'Vz' - shear force, 'My' or 'Mz' - bending moments,
'T' - torsional moment.
Ew (dict): Ew Python dictionary contains information on non-zero
element loads, therfore each item of the Python dictionary
is in the form: 'ele_tag: ['-beamUniform', Wy, Wz, Wx]'.
sfac (float): scale factor by wich the values of section forces are
multiplied.
nep (int): number of evaluation points including both end nodes
(default: 17)
fmt_secforce (str): format line string for section force distribution
curve. The format contains information on line color, style and
marks as in the standard matplotlib plot function.
(default: fmt_secforce = 'b-' # blue solid line)
Usage:
::
Wy, Wz, Wx = -5., 0., 0.
Ew = {3: ['-beamUniform', Wy, Wz, Wx]}
sfacMz = 1.e-1
plt.figure()
minY, maxY = opsv.section_force_diagram_3d('Mz', Ew, sfacMz)
plt.title(f'Bending moments Mz, max = {maxY:.2f}, min = {minY:.2f}')
Todo:
Add support for other element loads available in OpenSees: partial
(trapezoidal) uniform element load, and 'beamPoint' element load.
"""
maxVal, minVal = -np.inf, np.inf
ele_tags = ops.getEleTags()
azim, elev = az_el
fig_wi, fig_he = fig_wi_he
fleft, fbottom, fright, ftop = fig_lbrt
fig = plt.figure(figsize=(fig_wi/2.54, fig_he/2.54))
fig.subplots_adjust(left=.08, bottom=.08, right=.985, top=.94)
ax = fig.add_subplot(111, projection=Axes3D.name)
# ax.axis('equal')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.view_init(azim=azim, elev=elev)
for i, ele_tag in enumerate(ele_tags):
# by default no element load
eload_data = ['-beamUniform', 0., 0., 0.]
if ele_tag in Ew:
eload_data = Ew[ele_tag]
nd1, nd2 = ops.eleNodes(ele_tag)
# element x, y coordinates
ex = np.array([ops.nodeCoord(nd1)[0],
ops.nodeCoord(nd2)[0]])
ey = np.array([ops.nodeCoord(nd1)[1],
ops.nodeCoord(nd2)[1]])
ez = np.array([ops.nodeCoord(nd1)[2],
ops.nodeCoord(nd2)[2]])
# eo = Eo[i, :]
xloc = ops.eleResponse(ele_tag, 'xlocal')
yloc = ops.eleResponse(ele_tag, 'ylocal')
zloc = ops.eleResponse(ele_tag, 'zlocal')
g = np.vstack((xloc, yloc, zloc))
G, _ = rot_transf_3d(ex, ey, ez, g)
g = G[:3, :3]
pl = ops.eleResponse(ele_tag, 'localForces')
s_all, xl = section_force_distribution_3d(ex, ey, ez, pl, nep,
eload_data)
# 1:'y' 2:'z'
if sf_type == 'N':
s = s_all[:, 0]
dir_plt = 1
elif sf_type == 'Vy':
s = s_all[:, 1]
dir_plt = 1
elif sf_type == 'Vz':
s = s_all[:, 2]
dir_plt = 2
elif sf_type == 'T':
s = s_all[:, 3]
dir_plt = 1
elif sf_type == 'My':
s = s_all[:, 4]
dir_plt = 2
elif sf_type == 'Mz':
s = s_all[:, 5]
dir_plt = 1
minVal = min(minVal, np.min(s))
maxVal = max(maxVal, np.max(s))
s = s*sfac
# FIXME - can be simplified
s_0 = np.zeros((nep, 3))
s_0[0, :] = [ex[0], ey[0], ez[0]]
s_0[1:, 0] = s_0[0, 0] + xl[1:] * g[0, 0]
s_0[1:, 1] = s_0[0, 1] + xl[1:] * g[0, 1]
s_0[1:, 2] = s_0[0, 2] + xl[1:] * g[0, 2]
s_p = np.copy(s_0)
# positive M are opposite to N and V
# if sf_type == 'Mz' or sf_type == 'My':
if sf_type == 'Mz':
s *= -1.
s_p[:, 0] += s * g[dir_plt, 0]
s_p[:, 1] += s * g[dir_plt, 1]
s_p[:, 2] += s * g[dir_plt, 2]
# plt.axis('equal')
# section force curve
plt.plot(s_p[:, 0], s_p[:, 1], s_p[:, 2], fmt_secforce,
solid_capstyle='round', solid_joinstyle='round',
dash_capstyle='butt', dash_joinstyle='round')
# model
plt.plot(ex, ey, ez, 'k-', solid_capstyle='round',
solid_joinstyle='round', dash_capstyle='butt',
dash_joinstyle='round')
# reference perpendicular lines
for i in np.arange(nep):
plt.plot([s_0[i, 0], s_p[i, 0]],
[s_0[i, 1], s_p[i, 1]],
[s_0[i, 2], s_p[i, 2]], fmt_secforce,
solid_capstyle='round', solid_joinstyle='round',
dash_capstyle='butt', dash_joinstyle='round')
return minVal, maxVal
def quad_sig_out_per_node():
"""Return a 2d numpy array of stress components per OpenSees node.
Returns:
sig_out (ndarray): a 2d array of stress components per node with
the following components: sxx, syy, sxy, svm, s1, s2, angle.
Size (n_nodes x 7).
Examples:
sig_out = opsv.quad_sig_out_per_node()
Notes:
s1, s2: principal stresses
angle: angle of the principal stress s1
"""
ele_tags = ops.getEleTags()
node_tags = ops.getNodeTags()
n_nodes = len(node_tags)
# initialize helper arrays
sig_out = np.zeros((n_nodes, 7))
nodes_tag_count = np.zeros((n_nodes, 2), dtype=int)
nodes_tag_count[:, 0] = node_tags
for i, ele_tag in enumerate(ele_tags):
nd1, nd2, nd3, nd4 = ops.eleNodes(ele_tag)
ind1 = node_tags.index(nd1)
ind2 = node_tags.index(nd2)
ind3 = node_tags.index(nd3)
ind4 = node_tags.index(nd4)
nodes_tag_count[[ind1, ind2, ind3, ind4], 1] += 1
sig_ip_el = ops.eleResponse(ele_tag, 'stress')
sigM_ip = np.vstack(([sig_ip_el[0:3],
sig_ip_el[3:6],
sig_ip_el[6:9],
sig_ip_el[9:12]]))
sigM_nd = quad_extrapolate_ip_to_node(sigM_ip)
# sxx
sig_out[ind1, 0] += sigM_nd[0, 0]
sig_out[ind2, 0] += sigM_nd[1, 0]
sig_out[ind3, 0] += sigM_nd[2, 0]
sig_out[ind4, 0] += sigM_nd[3, 0]
# syy
sig_out[ind1, 1] += sigM_nd[0, 1]
sig_out[ind2, 1] += sigM_nd[1, 1]
sig_out[ind3, 1] += sigM_nd[2, 1]
sig_out[ind4, 1] += sigM_nd[3, 1]
# sxy
sig_out[ind1, 2] += sigM_nd[0, 2]
sig_out[ind2, 2] += sigM_nd[1, 2]
sig_out[ind3, 2] += sigM_nd[2, 2]
sig_out[ind4, 2] += sigM_nd[3, 2]
indxs, = np.where(nodes_tag_count[:, 1] > 1)
# n_indxs < n_nodes: e.g. 21<25 (bous), 2<6 (2el) etc.
n_indxs = np.shape(indxs)[0]
# divide summed stresses by the number of common nodes
sig_out[indxs, :] = \
sig_out[indxs, :]/nodes_tag_count[indxs, 1].reshape(n_indxs, 1)
# warning reshape from (pts,ncomp) to (ncomp,pts)
vm_out = vm_stress(np.transpose(sig_out[:, :3]))
sig_out[:, 3] = vm_out
princ_sig_out = princ_stress(np.transpose(sig_out[:, :3]))
sig_out[:, 4:7] = np.transpose(princ_sig_out)
return sig_out
def quad_extrapolate_ip_to_node(yip):
"""
Exprapolate values at 4 integration points to 4 nodes of a quad element.
Integration points of Gauss quadrature.
Usefull for : stress components (sxx, syy, sxy)
yip - either a single vector (4,) or array (4,3) /sxx syy sxy/
or array (4, n)
"""
xep = np.sqrt(3.)/2
X = np.array([[1.+xep, -1/2., 1.-xep, -1/2.],
[-1/2., 1.+xep, -1/2., 1.-xep],
[1.-xep, -1/2., 1.+xep, -1/2.],
[-1/2., 1.-xep, -1/2., 1.+xep]])
ynp = X @ yip
return ynp
def quad_9n_extrapolate_ip_to_node(yip):
"""
Exprapolate values at 9 integration points to 9 nodes of a quad element.
Integration points of Gauss quadrature.
Usefull for : stress components (sxx, syy, sxy)
yip - either a single vector (4,) or array (4,3) /sxx syy sxy/
or array (4, n)
"""
a = 1./np.sqrt(0.6)
a10 = 1 - a*a
a9 = a10 * a10
a11, a12 = 1 + a, 1 - a
a1, a2, a3 = a11 * a11, a11 * a12, a12 * a12
a4, a5 = a10 * a11, a10 * a12
# 1
n5, n6, n7, n8 = a4/2-a9/2, a5/2-a9/2, a5/2-a9/2, a4/2-a9/2
n1 = a1/4 - (n8 + n5)/2 - a9/4
n2 = a2/4 - (n5 + n6)/2 - a9/4
n3 = a3/4 - (n6 + n7)/2 - a9/4
n4 = a2/4 - (n7 + n8)/2 - a9/4
r1 = np.array([n1, n2, n3, n4, n5, n6, n7, n8, a9])
# 2
n5, n6, n7, n8 = a4/2-a9/2, a4/2-a9/2, a5/2-a9/2, a5/2-a9/2
n1 = a2/4 - (n8 + n5)/2 - a9/4
n2 = a1/4 - (n5 + n6)/2 - a9/4
n3 = a2/4 - (n6 + n7)/2 - a9/4
n4 = a3/4 - (n7 + n8)/2 - a9/4
r2 = np.array([n1, n2, n3, n4, n5, n6, n7, n8, a9])
# 3
n5, n6, n7, n8 = a5/2-a9/2, a4/2-a9/2, a4/2-a9/2, a5/2-a9/2
n1 = a3/4 - (n8 + n5)/2 - a9/4
n2 = a2/4 - (n5 + n6)/2 - a9/4
n3 = a1/4 - (n6 + n7)/2 - a9/4
n4 = a2/4 - (n7 + n8)/2 - a9/4
r3 = np.array([n1, n2, n3, n4, n5, n6, n7, n8, a9])
# 4
n5, n6, n7, n8 = a5/2-a9/2, a5/2-a9/2, a4/2-a9/2, a4/2-a9/2
n1 = a2/4 - (n8 + n5)/2 - a9/4
n2 = a3/4 - (n5 + n6)/2 - a9/4
n3 = a2/4 - (n6 + n7)/2 - a9/4
n4 = a1/4 - (n7 + n8)/2 - a9/4
r4 = np.array([n1, n2, n3, n4, n5, n6, n7, n8, a9])
# 5
n5, n6, n7, n8 = a11/2-a10/2, a10/2-a10/2, a12/2-a10/2, a10/2-a10/2
n1 = a11/4 - (n8 + n5)/2 - a10/4
n2 = a11/4 - (n5 + n6)/2 - a10/4
n3 = a12/4 - (n6 + n7)/2 - a10/4
n4 = a12/4 - (n7 + n8)/2 - a10/4
r5 = np.array([n1, n2, n3, n4, n5, n6, n7, n8, a10])
# 6
n5, n6, n7, n8 = a10/2-a10/2, a11/2-a10/2, a10/2-a10/2, a12/2-a10/2
n1 = a12/4 - (n8 + n5)/2 - a10/4
n2 = a11/4 - (n5 + n6)/2 - a10/4
n3 = a11/4 - (n6 + n7)/2 - a10/4
n4 = a12/4 - (n7 + n8)/2 - a10/4
r6 = np.array([n1, n2, n3, n4, n5, n6, n7, n8, a10])
# 7
n5, n6, n7, n8 = a12/2-a10/2, a10/2-a10/2, a11/2-a10/2, a10/2-a10/2
n1 = a12/4 - (n8 + n5)/2 - a10/4
n2 = a12/4 - (n5 + n6)/2 - a10/4
n3 = a11/4 - (n6 + n7)/2 - a10/4
n4 = a11/4 - (n7 + n8)/2 - a10/4
r7 = np.array([n1, n2, n3, n4, n5, n6, n7, n8, a10])
# 8
n5, n6, n7, n8 = a10/2-a10/2, a12/2-a10/2, a10/2-a10/2, a11/2-a10/2
n1 = a11/4 - (n8 + n5)/2 - a10/4
n2 = a12/4 - (n5 + n6)/2 - a10/4
n3 = a12/4 - (n6 + n7)/2 - a10/4
n4 = a11/4 - (n7 + n8)/2 - a10/4
r8 = np.array([n1, n2, n3, n4, n5, n6, n7, n8, a10])
r9 = np.array([0., 0., 0., 0., 0., 0., 0., 0., 1.])
X = np.vstack((r1, r2, r3, r4, r5, r6, r7, r8, r9))
ynp = X @ yip
# ynp = 1.0
return ynp
def quad_8n_extrapolate_ip_to_node(yip):
"""
Exprapolate values at 8 integration points to 8 nodes of a quad element.
Integration points of Gauss quadrature.
Usefull for : stress components (sxx, syy, sxy)
yip - either a single vector (4,) or array (4,3) /sxx syy sxy/
or array (4, n)
"""
a = 1./np.sqrt(0.6)
a0 = 1 - a**2
a4, a5 = -(1-a)**2*(1+2*a)/4, -(1+a)**2*(1-2*a)/4
a7 = -a0/4
a11, a12 = a0*(1+a)/2, a0*(1-a)/2
a1, a2, a3 = (1+a)/2, (1-a)/2, (1-a**2)/2
X = np.array([[a5, a7, a4, a7, a11, a12, a12, a11],
[a7, a5, a7, a4, a11, a11, a12, a12],
[a4, a7, a5, a7, a12, a11, a11, a12],
[a7, a4, a7, a5, a12, a12, a11, a11],
[a7, a7, a7, a7, a1, a3, a2, a3],
[a7, a7, a7, a7, a3, a1, a3, a2],
[a7, a7, a7, a7, a2, a3, a1, a3],
[a7, a7, a7, a7, a3, a2, a3, a1]])
ynp = X @ yip
# ynp = 1.0
return ynp
def quad_interpolate_node_to_ip(ynp):
"""
Interpolate values at 4 nodes to 4 integration points a quad element.
Integration points of Gauss quadrature.
Usefull for : stress components (sxx, syy, sxy)
ynp - either a single vector (4,) or array (4,3) /sxx syy sxy/
or array (4, n)
"""
jsz = 1./6.
jtr = 1./3.
p2 = jsz * np.sqrt(3.)
X = np.array([[jtr+p2, jsz, jtr-p2, jsz],
[jsz, jtr+p2, jsz, jtr-p2],
[jtr-p2, jsz, jtr+p2, jsz],
[jsz, jtr-p2, jsz, jtr+p2]])
yip = X @ ynp
return yip
def princ_stress(sig):
"""Return a tuple (s1, s2, angle): principal stresses (plane stress) and angle
Args:
sig (ndarray): input array of stresses at nodes: sxx, syy, sxy (tau)
Returns:
out (ndarray): 1st row is first principal stress s1, 2nd row is second
principal stress s2, 3rd row is the angle of s1
"""
sx, sy, tau = sig[0], sig[1], sig[2]
ds = (sx-sy)/2
R = np.sqrt(ds**2 + tau**2)
s1 = (sx+sy)/2. + R
s2 = (sx+sy)/2. - R
angle = np.arctan2(tau, ds)/2
out = np.vstack((s1, s2, angle))
return out
def vm_stress(sig):
n_sig_comp, n_pts = np.shape(sig)
if n_sig_comp > 3:
x, y, z, xy, xz, yz = sig
else:
x, y, xy = sig
z, xz, yz = 0., 0., 0.
_a = 0.5*((x-y)**2 + (y-z)**2 + (z-x)**2 + 6.*(xy**2 + xz**2 + yz**2))
return np.sqrt(_a)
def quad_crds_node_to_ip():
"""
Return global coordinates of 4 quad ip nodes and corner nodes.
It also returns quad connectivity.
"""
node_tags, ele_tags = ops.getNodeTags(), ops.getEleTags()
n_nodes, n_eles = len(node_tags), len(ele_tags)
# idiom coordinates as ordered in node_tags
nds_crd = np.zeros((n_nodes, 2))
for i, node_tag in enumerate(node_tags):
nds_crd[i] = ops.nodeCoord(node_tag)
quads_conn = np.zeros((n_eles, 4), dtype=int)
# quads_conn_ops = np.zeros((n_eles, 4), dtype=int)
eles_nds_crd = np.zeros((n_eles, 4, 2))
eles_ips_crd = np.zeros((n_eles, 4, 2))
for i, ele_tag in enumerate(ele_tags):
nd1, nd2, nd3, nd4 = ops.eleNodes(ele_tag)
ind1 = node_tags.index(nd1)
ind2 = node_tags.index(nd2)
ind3 = node_tags.index(nd3)
ind4 = node_tags.index(nd4)
quads_conn[i] = np.array([ind1, ind2, ind3, ind4])
# quads_conn_ops[i] = np.array([nd1, nd2, nd3, nd4])
eles_nds_crd[i] = np.array([[ops.nodeCoord(nd1, 1),
ops.nodeCoord(nd1, 2)],
[ops.nodeCoord(nd2, 1),
ops.nodeCoord(nd2, 2)],
[ops.nodeCoord(nd3, 1),
ops.nodeCoord(nd3, 2)],
[ops.nodeCoord(nd4, 1),
ops.nodeCoord(nd4, 2)]])
eles_ips_crd[i] = quad_interpolate_node_to_ip(eles_nds_crd[i])
return eles_ips_crd, eles_nds_crd, nds_crd, quads_conn
def quad_sig_out_per_ele():
"""
Extract stress components for all elements from OpenSees analysis.
Returns:
eles_ips_sig_out, eles_nds_sig_out (tuple of ndarrays):
eles_ips_sig_out - values at integration points of elements
(n_eles x 4 x 4)
eles_nds_sig_out - values at nodes of elements (n_eles x 4 x 4)
Examples:
eles_ips_sig_out, eles_nds_sig_out = opsv.quad_sig_out_per_ele()
Notes:
Stress components in array columns are: Sxx, Syy, Sxy, Svmis, empty
Used e.g. by plot_mesh_with_ips_2d function
"""
node_tags, ele_tags = ops.getNodeTags(), ops.getEleTags()
n_nodes, n_eles = len(node_tags), len(ele_tags)
eles_ips_sig_out = np.zeros((n_eles, 4, 4))
eles_nds_sig_out = np.zeros((n_eles, 4, 4))
# array (n_nodes, 2):
# node_tags, number of occurrence in quad elements)
# correspondence indx and node_tag is in node_tags.index
# (a) data in np.array of integers
nodes_tag_count = np.zeros((n_nodes, 2), dtype=int)
nodes_tag_count[:, 0] = node_tags
for i, ele_tag in enumerate(ele_tags):
nd1, nd2, nd3, nd4 = ops.eleNodes(ele_tag)
ind1 = node_tags.index(nd1)
ind2 = node_tags.index(nd2)
ind3 = node_tags.index(nd3)
ind4 = node_tags.index(nd4)
nodes_tag_count[[ind1, ind2, ind3, ind4], 1] += 1
sig_ip_el = ops.eleResponse(ele_tag, 'stress')
sigM_ip = np.vstack(([sig_ip_el[0:3],
sig_ip_el[3:6],
sig_ip_el[6:9],
sig_ip_el[9:12]]))
sigM_nd = quad_extrapolate_ip_to_node(sigM_ip)
eles_ips_sig_out[i, :, :3] = sigM_ip
eles_nds_sig_out[i, :, :3] = sigM_nd
vm_ip_out = vm_stress(np.transpose(eles_ips_sig_out[i, :, :3]))
vm_nd_out = vm_stress(np.transpose(eles_nds_sig_out[i, :, :3]))
eles_ips_sig_out[i, :, 3] = vm_ip_out
eles_nds_sig_out[i, :, 3] = vm_nd_out
return eles_ips_sig_out, eles_nds_sig_out
def quads_to_4tris(quads_conn, nds_crd, nds_val):
"""
Get triangles connectivity, coordinates and new values at quad centroids.
Args:
quads_conn (ndarray):
nds_crd (ndarray):
nds_val (ndarray):
Returns:
tris_conn, nds_c_crd, nds_c_val (tuple):
Notes:
Triangles connectivity array is based on
quadrilaterals connectivity.
Each quad is split into four triangles.
New nodes are created at the quad centroid.
See also:
function: quads_to_8tris_9n, quads_to_8tris_8n
"""
n_quads, _ = quads_conn.shape
n_nds, _ = nds_crd.shape
# coordinates and values at quad centroids _c_
nds_c_crd = np.zeros((n_quads, 2))
nds_c_val = np.zeros(n_quads)
tris_conn = np.zeros((4*n_quads, 3), dtype=int)
for i, quad_conn in enumerate(quads_conn):
j = 4*i
n0, n1, n2, n3 = quad_conn
# quad centroids
nds_c_crd[i] = np.array([np.sum(nds_crd[[n0, n1, n2, n3], 0])/4.,
np.sum(nds_crd[[n0, n1, n2, n3], 1])/4.])
nds_c_val[i] = np.sum(nds_val[[n0, n1, n2, n3]])/4.
# triangles connectivity
tris_conn[j] = np.array([n0, n1, n_nds+i])
tris_conn[j+1] = np.array([n1, n2, n_nds+i])
tris_conn[j+2] = np.array([n2, n3, n_nds+i])
tris_conn[j+3] = np.array([n3, n0, n_nds+i])
return tris_conn, nds_c_crd, nds_c_val
def plot_mesh_2d(nds_crd, eles_conn, lw=0.4, ec='k'):
"""
Plot 2d mesh (quads or triangles) outline.
"""
for ele_conn in eles_conn:
x = nds_crd[ele_conn, 0]
y = nds_crd[ele_conn, 1]
plt.fill(x, y, edgecolor=ec, lw=lw, fill=False)
def plot_stress_2d(nds_val, mesh_outline=1, cmap='jet'):
"""
Plot stress distribution of a 2d elements of a 2d model.
Args:
nds_val (ndarray): the values of a stress component, which can
be extracted from sig_out array (see quad_sig_out_per_node
function)
mesh_outline (int): 1 - mesh is plotted, 0 - no mesh plotted.
cmap (str): Matplotlib color map (default is 'jet')
Usage:
::
sig_out = opsv.quad_sig_out_per_node()
j, jstr = 3, 'vmis'
nds_val = sig_out[:, j]
opsv.plot_stress_2d(nds_val)
plt.xlabel('x [m]')
plt.ylabel('y [m]')
plt.title(f'{jstr}')
plt.show()
See also:
:ref:`ops_vis_quad_sig_out_per_node`
"""
node_tags, ele_tags = ops.getNodeTags(), ops.getEleTags()
n_nodes, n_eles = len(node_tags), len(ele_tags)
# idiom coordinates as ordered in node_tags
# use node_tags.index(tag) for correspondence
nds_crd = np.zeros((n_nodes, 2))
for i, node_tag in enumerate(node_tags):
nds_crd[i] = ops.nodeCoord(node_tag)
# from utils / quad_sig_out_per_node
# fixme: if this can be simplified
# index (starts from 0) to node_tag correspondence
# (a) data in np.array of integers
# nodes_tag_count = np.zeros((n_nodes, 2), dtype=int)
# nodes_tag_count[:, 0] = node_tags
#
# correspondence indx and node_tag is in node_tags.index
# after testing remove the above
quads_conn = np.zeros((n_eles, 4), dtype=int)
for i, ele_tag in enumerate(ele_tags):
nd1, nd2, nd3, nd4 = ops.eleNodes(ele_tag)
ind1 = node_tags.index(nd1)
ind2 = node_tags.index(nd2)
ind3 = node_tags.index(nd3)
ind4 = node_tags.index(nd4)
quads_conn[i] = np.array([ind1, ind2, ind3, ind4])
tris_conn, nds_c_crd, nds_c_val = \
quads_to_4tris(quads_conn, nds_crd, nds_val)
nds_crd_all = np.vstack((nds_crd, nds_c_crd))
# nds_val_all = np.concatenate((nds_val, nds_c_val))
nds_val_all = np.hstack((nds_val, nds_c_val))
# 1. plot contour maps
triangulation = tri.Triangulation(nds_crd_all[:, 0],
nds_crd_all[:, 1],
tris_conn)
plt.tricontourf(triangulation, nds_val_all, 50, cmap=cmap)
# 2. plot original mesh (quad) without subdivision into triangles
if mesh_outline:
plot_mesh_2d(nds_crd, quads_conn)
# plt.colorbar()
plt.axis('equal')
def plot_stress_9n_2d(nds_val, cmap='jet'):
node_tags, ele_tags = ops.getNodeTags(), ops.getEleTags()
n_nodes, n_eles = len(node_tags), len(ele_tags)
# idiom coordinates as ordered in node_tags
# use node_tags.index(tag) for correspondence
nds_crd = np.zeros((n_nodes, 2))
for i, node_tag in enumerate(node_tags):
nds_crd[i] = ops.nodeCoord(node_tag)
# from utils / quad_sig_out_per_node
# fixme: if this can be simplified
# index (starts from 0) to node_tag correspondence
# (a) data in np.array of integers
# nodes_tag_count = np.zeros((n_nodes, 2), dtype=int)
# nodes_tag_count[:, 0] = node_tags
#
# correspondence indx and node_tag is in node_tags.index
# after testing remove the above
quads_conn = np.zeros((n_eles, 4), dtype=int)
for i, ele_tag in enumerate(ele_tags):
nd1, nd2, nd3, nd4 = ops.eleNodes(ele_tag)
ind1 = node_tags.index(nd1)
ind2 = node_tags.index(nd2)
ind3 = node_tags.index(nd3)
ind4 = node_tags.index(nd4)
quads_conn[i] = np.array([ind1, ind2, ind3, ind4])
tris_conn, nds_c_crd, nds_c_val = \
quads_to_4tris(quads_conn, nds_crd, nds_val)
nds_crd_all = np.vstack((nds_crd, nds_c_crd))
# nds_val_all = np.concatenate((nds_val, nds_c_val))
nds_val_all = np.hstack((nds_val, nds_c_val))
# 1. plot contour maps
triangulation = tri.Triangulation(nds_crd_all[:, 0],
nds_crd_all[:, 1],
tris_conn)
plt.tricontourf(triangulation, nds_val_all, 50, cmap=cmap)
# 2. plot original mesh (quad) without subdivision into triangles
plot_mesh_2d(nds_crd, quads_conn)
# plt.colorbar()
plt.axis('equal')
def plot_extruded_model_rect_section_3d(b, h, az_el=az_el,
fig_wi_he=fig_wi_he,
fig_lbrt=fig_lbrt):
"""Plot an extruded 3d model based on cross-section dimenions.
Three arrows present local section axes: green - local x-axis,
red - local z-axis, blue - local y-axis.
Args:
b (float): section width
h (float): section height
az_el (tuple): azimuth and elevation
fig_wi_he: figure width and height in centimeters
fig_lbrt: figure left, bottom, right, top boundaries
Usage:
::
plot_extruded_model_rect_section_3d(0.3, 0.4)
Notes:
- For now only rectangular cross-section is supported.
"""
b2, h2 = b/2, h/2
ele_tags = ops.getEleTags()
azim, elev = az_el
fig_wi, fig_he = fig_wi_he
fleft, fbottom, fright, ftop = fig_lbrt
fig = plt.figure(figsize=(fig_wi/2.54, fig_he/2.54))
fig.subplots_adjust(left=.08, bottom=.08, right=.985, top=.94)
ax = fig.add_subplot(111, projection=Axes3D.name)
# ax.axis('equal')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.view_init(azim=azim, elev=elev)
for i, ele_tag in enumerate(ele_tags):
nd1, nd2 = ops.eleNodes(ele_tag)
# element x, y coordinates
ex = np.array([ops.nodeCoord(nd1)[0],
ops.nodeCoord(nd2)[0]])
ey = np.array([ops.nodeCoord(nd1)[1],
ops.nodeCoord(nd2)[1]])
ez = np.array([ops.nodeCoord(nd1)[2],
ops.nodeCoord(nd2)[2]])
# eo = Eo[i, :]
xloc = ops.eleResponse(ele_tag, 'xlocal')
yloc = ops.eleResponse(ele_tag, 'ylocal')
zloc = ops.eleResponse(ele_tag, 'zlocal')
g = np.vstack((xloc, yloc, zloc))
# G, L = rot_transf_3d(ex, ey, ez, eo)
G, L = rot_transf_3d(ex, ey, ez, g)
# g = G[:3, :3]
Xi, Yi, Zi = ex[0], ey[0], ez[0]
Xj, Yj, Zj = ex[1], ey[1], ez[1]
g10, g11, g12 = g[1, 0]*h2, g[1, 1]*h2, g[1, 2]*h2
g20, g21, g22 = g[2, 0]*b2, g[2, 1]*b2, g[2, 2]*b2
# beg node cross-section vertices
Xi1, Yi1, Zi1 = Xi - g10 - g20, Yi - g11 - g21, Zi - g12 - g22
Xi2, Yi2, Zi2 = Xi + g10 - g20, Yi + g11 - g21, Zi + g12 - g22
Xi3, Yi3, Zi3 = Xi + g10 + g20, Yi + g11 + g21, Zi + g12 + g22
Xi4, Yi4, Zi4 = Xi - g10 + g20, Yi - g11 + g21, Zi - g12 + g22
# end node cross-section vertices
Xj1, Yj1, Zj1 = Xj - g10 - g20, Yj - g11 - g21, Zj - g12 - g22
Xj2, Yj2, Zj2 = Xj + g10 - g20, Yj + g11 - g21, Zj + g12 - g22
Xj3, Yj3, Zj3 = Xj + g10 + g20, Yj + g11 + g21, Zj + g12 + g22
Xj4, Yj4, Zj4 = Xj - g10 + g20, Yj - g11 + g21, Zj - g12 + g22
# mesh outline
ax.plot(ex, ey, ez, 'k--', solid_capstyle='round',
solid_joinstyle='round', dash_capstyle='butt',
dash_joinstyle='round')
# collected i-beg, j-end node coordinates, counter-clockwise order
pts = [[Xi1, Yi1, Zi1],
[Xi2, Yi2, Zi2],
[Xi3, Yi3, Zi3],
[Xi4, Yi4, Zi4],
[Xj1, Yj1, Zj1],
[Xj2, Yj2, Zj2],
[Xj3, Yj3, Zj3],
[Xj4, Yj4, Zj4]]
# list of 4-node sides
verts = [[pts[0], pts[1], pts[2], pts[3]], # beg
[pts[4], pts[5], pts[6], pts[7]], # end
[pts[0], pts[4], pts[5], pts[1]], # bottom
[pts[3], pts[7], pts[6], pts[2]], # top
[pts[0], pts[4], pts[7], pts[3]], # front
[pts[1], pts[5], pts[6], pts[2]]] # back
# plot 3d element composed with sides
ax.add_collection3d(Poly3DCollection(verts, linewidths=1,
edgecolors='k', alpha=.25))
Xm, Ym, Zm = sum(ex)/2, sum(ey)/2, sum(ez)/2
alen = 0.1*L
# plot local axis directional vectors: workaround quiver = arrow
plt.quiver(Xm, Ym, Zm, g[0, 0], g[0, 1], g[0, 2], color='g',
lw=2, length=alen, alpha=.8, normalize=True)
plt.quiver(Xm, Ym, Zm, g[1, 0], g[1, 1], g[1, 2], color='b',
lw=2, length=alen, alpha=.8, normalize=True)
plt.quiver(Xm, Ym, Zm, g[2, 0], g[2, 1], g[2, 2], color='r',
lw=2, length=alen, alpha=.8, normalize=True)
def plot_mesh_with_ips_2d(nds_crd, eles_ips_crd, eles_nds_crd, quads_conn,
eles_ips_sig_out, eles_nds_sig_out, sig_out_indx):
"""
Plot 2d element mesh with the values at gauss and nodal points.
Args:
nds_crd (ndarray): nodes coordinates (n_nodes x 2)
eles_ips_crd (ndarray): integration points coordinates of elements
(n_eles x 4 x 2)
eles_nds_crd (ndarray): nodal coordinates of elements (n_eles x 4 x 2)
quads_conn (ndarray): connectivity array (n_eles x 4)
eles_ips_sig_out (ndarray): stress component values at integration
points (n_eles x 4 x 5)
eles_nds_sig_out (ndarray): stress component values at element nodes
(n_eles x 4 x 5)
sig_out_indx (int): which sig_out component
Notes: This function is suitable for small models for illustration
purposes.
"""
plot_mesh_2d(nds_crd, quads_conn, lw=1.2, ec='b')
ele_tags = ops.getEleTags()
n_eles = len(ele_tags)
for i in range(n_eles):
plt.plot(eles_ips_crd[i, :, 0], eles_ips_crd[i, :, 1],
'kx', markersize=3)
ips_val = eles_ips_sig_out[i, :, sig_out_indx]
nds_val = eles_nds_sig_out[i, :, sig_out_indx]
# show ips values
plt.text(eles_ips_crd[i, 0, 0], eles_ips_crd[i, 0, 1],
f'{ips_val[0]:.2f}', {'color': 'C0'},
ha='center', va='bottom')
plt.text(eles_ips_crd[i, 1, 0], eles_ips_crd[i, 1, 1],
f'{ips_val[1]:.2f}', {'color': 'C1'},
ha='center', va='bottom')
plt.text(eles_ips_crd[i, 2, 0], eles_ips_crd[i, 2, 1],
f'{ips_val[2]:.2f}', {'color': 'C2'},
ha='center', va='top')
plt.text(eles_ips_crd[i, 3, 0], eles_ips_crd[i, 3, 1],
f'{ips_val[3]:.2f}', {'color': 'C3'},
ha='center', va='top')
# show node values
plt.text(eles_nds_crd[i, 0, 0], eles_nds_crd[i, 0, 1],
f' {nds_val[0]:.2f}', {'color': 'C0'},
ha='left', va='bottom')
plt.text(eles_nds_crd[i, 1, 0], eles_nds_crd[i, 1, 1],
f'{nds_val[1]:.2f} ', {'color': 'C1'},
ha='right', va='bottom')
plt.text(eles_nds_crd[i, 2, 0], eles_nds_crd[i, 2, 1],
f'{nds_val[2]:.2f} ', {'color': 'C2'},
ha='right', va='top')
plt.text(eles_nds_crd[i, 3, 0], eles_nds_crd[i, 3, 1],
f' {nds_val[3]:.2f}', {'color': 'C3'},
ha='left', va='top')
plt.axis('equal')
# see also quads_to_8tris_9n
def quads_to_8tris_8n(quads_conn, nds_crd, nds_val):
"""
Get triangles connectivity, coordinates and new values at quad centroids.
Args:
quads_conn (ndarray):
nds_crd (ndarray):
nds_val (ndarray):
Returns:
tris_conn, nds_c_crd, nds_c_val (tuple):
Notes:
Triangles connectivity array is based on
quadrilaterals connectivity.
Each quad is split into eight triangles.
New nodes are created at the quad centroid.
See also:
function: quads_to_8tris_9n, quads_to_4tris
"""
n_quads, _ = quads_conn.shape
n_nds, _ = nds_crd.shape
# coordinates and values at quad centroids _c_
nds_c_crd = np.zeros((n_quads, 2))
nds_c_val = np.zeros(n_quads)
tris_conn = np.zeros((8*n_quads, 3), dtype=int)
for i, quad_conn in enumerate(quads_conn):
j = 8*i
n0, n1, n2, n3, n4, n5, n6, n7 = quad_conn
# quad centroids
# nds_c_crd[i] = np.array([np.sum(nds_crd[[n0, n1, n2, n3], 0])/4.,
# np.sum(nds_crd[[n0, n1, n2, n3], 1])/4.])
# nds_c_val[i] = np.sum(nds_val[[n0, n1, n2, n3]])/4.
nds_c_crd[i] = quad_8n_val_at_center(nds_crd[[n0, n1, n2, n3,
n4, n5, n6, n7]])
nds_c_val[i] = quad_8n_val_at_center(nds_val[[n0, n1, n2, n3,
n4, n5, n6, n7]])
# triangles connectivity
tris_conn[j] = np.array([n0, n4, n_nds+i])
tris_conn[j+1] = np.array([n4, n1, n_nds+i])
tris_conn[j+2] = np.array([n1, n5, n_nds+i])
tris_conn[j+3] = np.array([n5, n2, n_nds+i])
tris_conn[j+4] = np.array([n2, n6, n_nds+i])
tris_conn[j+5] = np.array([n6, n3, n_nds+i])
tris_conn[j+6] = np.array([n3, n7, n_nds+i])
tris_conn[j+7] = | np.array([n7, n0, n_nds+i]) | numpy.array |
#! /usr/bin/env python
"""
Implementation of a median subtraction algorithm for model PSF subtraction in
high-contrast imaging sequences. In the case of ADI, the algorithm is based on
[MAR06]_. The ADI+IFS method, is an extension of this basic idea to
multi-spectral cubes.
.. [MAR06]
| Marois et al. 2006
| **Angular Differential Imaging: A Powerful High-Contrast Imaging
Technique**
| *The Astrophysical Journal, Volume 641, Issue 1, pp. 556-564*
| `https://arxiv.org/abs/astro-ph/0512335
<https://arxiv.org/abs/astro-ph/0512335>`_
"""
__author__ = '<NAME>'
__all__ = ['median_sub']
import numpy as np
from multiprocessing import cpu_count
from ..config import time_ini, timing
from ..var import get_annulus_segments, mask_circle
from ..preproc import (cube_derotate, cube_collapse, check_pa_vector,
check_scal_vector)
from ..preproc import cube_rescaling_wavelengths as scwave
from ..config.utils_conf import pool_map, iterable, print_precision
from ..preproc.derotation import _find_indices_adi, _define_annuli
from ..preproc.rescaling import _find_indices_sdi
def median_sub(cube, angle_list, scale_list=None, flux_sc_list=None, fwhm=4,
radius_int=0, asize=4, delta_rot=1, delta_sep=(0.1, 1),
mode='fullfr', nframes=4, sdi_only=False, imlib='vip-fft',
interpolation='lanczos4', collapse='median', nproc=1,
full_output=False, verbose=True, **rot_options):
""" Implementation of a median subtraction algorithm for model PSF
subtraction in high-contrast imaging sequences. In the case of ADI, the
algorithm is based on [MAR06]_. The ADI+IFS method is an extension of this
basic idea to multi-spectral cubes.
Parameters
----------
cube : numpy ndarray, 3d
Input cube.
angle_list : numpy ndarray, 1d
Corresponding parallactic angle for each frame.
scale_list : numpy ndarray, 1d, optional
If provided, triggers mSDI reduction. These should be the scaling
factors used to re-scale the spectral channels and align the speckles
in case of IFS data (ADI+mSDI cube). Usually, these can be approximated
by the last channel wavelength divided by the other wavelengths in the
cube (more thorough approaches can be used to get the scaling factors,
e.g. with ``vip_hci.preproc.find_scal_vector``).
flux_sc_list : numpy ndarray, 1d
In the case of IFS data (ADI+SDI), this is the list of flux scaling
factors applied to each spectral frame after geometrical rescaling.
These should be set to either the ratio of stellar fluxes between the
last spectral channel and the other channels, or to the second output
of `preproc.find_scal_vector` (when using 2 free parameters). If not
provided, the algorithm will still work, but with a lower efficiency
at subtracting the stellar halo.
fwhm : float or 1d numpy array
Known size of the FHWM in pixels to be used. Default is 4.
radius_int : int, optional
The radius of the innermost annulus. By default is 0, if >0 then the
central circular area is discarded.
asize : int, optional
The size of the annuli, in pixels.
delta_rot : int, optional
Factor for increasing the parallactic angle threshold, expressed in
FWHM. Default is 1 (excludes 1 FHWM on each side of the considered
frame).
delta_sep : float or tuple of floats, optional
The threshold separation in terms of the mean FWHM (for ADI+mSDI data).
If a tuple of two values is provided, they are used as the lower and
upper intervals for the threshold (grows as a function of the
separation).
mode : {'fullfr', 'annular'}, str optional
In ``fullfr`` mode only the median frame is subtracted, in ``annular``
mode also the 4 closest frames given a PA threshold (annulus-wise) are
subtracted.
nframes : int or None, optional
Number of frames (even value) to be used for building the optimized
reference PSF when working in ``annular`` mode. None by default, which
means that all frames, excluding the thresholded ones, are used.
sdi_only: bool, optional
In the case of IFS data (ADI+SDI), whether to perform median-SDI, or
median-ASDI (default).
imlib : str, optional
See the documentation of the ``vip_hci.preproc.frame_rotate`` function.
interpolation : str, optional
See the documentation of the ``vip_hci.preproc.frame_rotate`` function.
collapse : {'median', 'mean', 'sum', 'trimmean'}, str optional
Sets the way of collapsing the frames for producing a final image.
nproc : None or int, optional
Number of processes for parallel computing. If None the number of
processes will be set to cpu_count()/2. By default the algorithm works
in single-process mode.
full_output: bool, optional
Whether to return the final median combined image only or with other
intermediate arrays.
verbose : bool, optional
If True prints to stdout intermediate info.
rot_options: dictionary, optional
Dictionary with optional keyword values for "border_mode", "mask_val",
"edge_blend", "interp_zeros", "ker" (see documentation of
``vip_hci.preproc.frame_rotate``)
Returns
-------
cube_out : numpy ndarray, 3d
[full_output=True] The cube of residuals.
cube_der : numpy ndarray, 3d
[full_output=True] The derotated cube of residuals.
frame : numpy ndarray, 2d
Median combination of the de-rotated cube.
"""
global ARRAY
ARRAY = cube.copy()
if not (ARRAY.ndim == 3 or ARRAY.ndim == 4):
raise TypeError('Input array is not a 3d or 4d array')
if verbose:
start_time = time_ini()
if nproc is None:
nproc = cpu_count() // 2 # Hyper-threading doubles the # of cores
angle_list = check_pa_vector(angle_list)
if ARRAY.ndim == 3:
n, y, _ = ARRAY.shape
if ARRAY.shape[0] != angle_list.shape[0]:
msg = 'Input vector or parallactic angles has wrong length'
raise TypeError(msg)
# The median frame is first subtracted from each frame
model_psf = np.median(ARRAY, axis=0)
ARRAY -= model_psf
# Depending on the ``mode``
cube_out = ARRAY
if mode == 'fullfr':
# MASK AFTER DEROTATION TO AVOID ARTEFACTS
# if radius_int > 0:
# cube_out = mask_circle(ARRAY, radius_int, fillwith=np.nan)
# else:
# cube_out = ARRAY
if verbose:
print('Median psf reference subtracted')
elif mode == 'annular':
if nframes is not None:
if nframes % 2 != 0:
raise TypeError('`nframes` argument must be even value')
n_annuli = int((y / 2 - radius_int) / asize)
if verbose:
print('N annuli = {}, FWHM = {}'.format(n_annuli, fwhm))
res = pool_map(nproc, _median_subt_ann_adi,
iterable(range(n_annuli)), angle_list, n_annuli,
fwhm, radius_int, asize, delta_rot, nframes,
verbose=verbose, msg='Processing annuli:',
progressbar_single=True)
res = np.array(res, dtype=object)
mres = res[:, 0]
yy = res[:, 1]
xx = res[:, 2]
#cube_out = np.zeros_like(ARRAY)
#cube_out[:] = np.nan
for ann in range(n_annuli):
cube_out[:, yy[ann], xx[ann]] = mres[ann]
if verbose:
print('Optimized median psf reference subtracted')
else:
raise RuntimeError('Mode not recognized')
cube_der = cube_derotate(cube_out, angle_list, nproc=nproc, imlib=imlib,
interpolation=interpolation, **rot_options)
if radius_int:
cube_out = mask_circle(cube_out, radius_int)
cube_der = mask_circle(cube_der, radius_int)
frame = cube_collapse(cube_der, mode=collapse)
elif ARRAY.ndim == 4:
z, n, y_in, x_in = ARRAY.shape
if scale_list is None:
raise ValueError('Scaling factors vector must be provided')
else:
if np.array(scale_list).ndim > 1:
raise ValueError('Scaling factors vector is not 1d')
if not scale_list.shape[0] == z:
raise ValueError('Scaling factors vector has wrong length')
if flux_sc_list is not None:
if np.array(flux_sc_list).ndim > 1:
raise ValueError('Scaling factors vector is not 1d')
if not flux_sc_list.shape[0] == z:
raise ValueError('Scaling factors vector has wrong length')
# Exploiting spectral variability (radial movement)
fwhm = int(np.round(np.mean(fwhm)))
n_annuli = int((y_in / 2 - radius_int) / asize)
if nframes is not None:
if nframes % 2 != 0:
raise TypeError('`nframes` argument must be even value')
if verbose:
print('{} spectral channels per IFS frame'.format(z))
print('First median subtraction exploiting spectral variability')
if mode == 'annular':
print('N annuli = {}, mean FWHM = {:.3f}'.format(n_annuli,
fwhm))
res = pool_map(nproc, _median_subt_fr_sdi, iterable(range(n)),
scale_list, flux_sc_list, n_annuli, fwhm, radius_int,
asize, delta_sep, nframes, imlib, interpolation,
collapse, mode)
residuals_cube_channels = np.array(res)
if verbose:
timing(start_time)
print('{} ADI frames'.format(n))
print('Median subtraction in the ADI fashion')
if sdi_only:
cube_out = residuals_cube_channels
else:
if mode == 'fullfr':
median_frame = np.nanmedian(residuals_cube_channels, axis=0)
cube_out = residuals_cube_channels - median_frame
elif mode == 'annular':
if verbose:
print('N annuli = {}, mean FWHM = {:.3f}'.format(n_annuli,
fwhm))
ARRAY = residuals_cube_channels
res = pool_map(nproc, _median_subt_ann_adi,
iterable(range(n_annuli)), angle_list, n_annuli,
fwhm, radius_int, asize, delta_rot, nframes)
res = np.array(res, dtype=object)
mres = res[:, 0]
yy = res[:, 1]
xx = res[:, 2]
pa_thrs = np.array(res[:, 3])
if verbose:
print('PA thresholds: ')
print_precision(pa_thrs)
cube_out = | np.zeros_like(ARRAY) | numpy.zeros_like |
"""
factor.py
Defines variables, variable sets, and dense factors over discrete variables (tables) for graphical models
Version 0.1.0 (2021-03-25)
(c) 2015-2021 <NAME> under the FreeBSD license; see license.txt for details.
"""
import numpy as np
#import autograd.numpy as np
from sortedcontainers import SortedSet as sset
## Under testing: cython-compiled variable sets for faster operations
try:
from pyGMs.varset_c import Var,VarSet
except ImportError:
#print "Compiled version not loaded; importing python version"
from pyGMs.varset_py import Var,VarSet # sortedcontainers version
#from .varset_py2 import Var,VarSet # numpy array version
inf = float('inf')
orderMethod = 'F' # TODO: currently stores in fortran order (as Matlab); should be trivially changable
#orderMethod = 'C' # Can we make this "seamless" to the user, and/or force them to do something consistent?
# Notes: column-major (order=F) puts last index sequentially ("big endian"): t[0 0 0], t[0 0 1], t[0 1 0] ...
# row major (order=C) puts 1st index sequentially ("little endian"): t[0 0 0], t[1 0 0], t[0 1 0], ...
class Factor(object):
"""A basic factor<float> class
Factors are the basic building block of our graphical model representations. In general, a factor
consists of a set of variables (its "scope"), and a table of values indicating f(x) for each
joint configuration x (a tuple of values) of its variables.
Variables are stored in sorted order; most of the time, factors are constructed by reading from files,
but if built by hand it is safest to use indexing to set the values, e.g.,
>>> f = Factor( [X,Y,Z], 0.0 ) # builds a factor over X,Y,Z filled with zeros
>>> f[0,1,0] = 1.5 # set f(X=0,Y=1,Z=0) to 1.5
Useful attributes are f.vars (the scope) and f.table (the table, a numpy array).
Factors are imbued with many basic operations for manipulation:
Operators: *, +, /, -, **, exp, log, abs, etc.
In-place versions: *=, +=, /=, -=, **=, expIP, logIP, etc.
Elimination: max, min, sum, lse (log-sum-exp), etc.
Conditioning: return a factor defined by a sub-table, assigning some variables to values
Other: argmax, argmin, sample, etc., return configurations of X (tuples)
"""
#v = VarSet([]) # internal storage for variable set (VarSet)
#t = np.ndarray([]) # internal storage for table (numpy array)
def __init__(self,vars=VarSet(),vals=1.0):
"""Constructor for Factor class
>>> f = Factor( [X,Y,Z],[vals] ) # creates factor over [X,Y,Z] with table [vals]
[vals] should be a correctly sized numpy array, or something that can be cast to the same.
"""
# TODO: add user-specified order method for values (order=)
# TODO: accept out-of-order vars list (=> permute vals as req'd)
try:
self.v = VarSet(vars) # try building varset with args
except TypeError: # if not iterable (e.g. single variable)
self.v = VarSet() # try just adding it
self.v.add(vars)
#assert( self.v.nrStates() > 0)
#if self.v.nrStatesDouble() > 1e8: raise ValueError("Too big!");
try:
self.t = np.empty(self.v.dims(), float, orderMethod);
self.t[:] = vals # try filling factor with "vals"
except ValueError: # if it's an incompatible shape,
self.t = np.reshape(np.array(vals, float), self.v.dims(), orderMethod) # try again using reshape
def __build(self,vs,ndarray):
"""Internal build function from numpy ndarray"""
self.v = vs
self.t = ndarray
return self
#TODO: def assign(self, F) : set self equal to rhs F, e.g., *this = F
def copy(self):
"""Copy constructor; make a copy of a factor"""
return Factor().__build(self.v.copy(),self.t.copy('K')) # order=orderMethod?
def changeVars(self, vars, copy=True):
"""Copy a factor but change its arguments (scope).
>>> f = Factor([X0,X1], table)
>>> g = changeVars( f, [X7,X5]) # now, g(X5=b,X7=a) = f(X0=a,X1=b)
"""
v = VarSet(vars)
newOrder = map(lambda x:vars.index(x), v)
if copy: ret = Factor(v, self.t.transpose(newOrder))
else: ret = Factor().__build(v, self.t.transpose(newOrder)) # try not to copy if possible
return ret
def __repr__(self):
"""Detailed representation: scope (varset) + table memory location"""
return 'Factor({:s},[0x{:x}])'.format(str(self.v),self.t.ctypes.data)
def __str__(self):
"""Basic string representation: scope (varset) only"""
return 'Factor({:s})'.format(str(self.v))
def latex(self, valueformat="0.4f", factorname="$f(x)$", varnames=None):
"""Return string containing latex code for table values.
Arguments:
valueformat : string formatter for values in value column; default "0.4f"
factorname : string for header of value column
varnames : dict mapping variable ID to string for that column (defaults to $x_i$ if None)
"""
tex = "\\begin{tabular}[t]{" + "".join(["c" for v in self.v]) + "|c}\n"
#tex += " & ".join(["$x"+str(int(v))+"$" for v in self.v]) + " & $f_{"+"".join([str(int(v)) for v in self.v])+"}$ \\\\ \\hline \n"
if varnames is None: varnames = {v:"$x_{"+str(int(v))+"}$" for v in self.v}
tex += " & ".join([varnames[v] for v in self.v]) + " & "+factorname+" \\\\ \\hline \n"
for s in range(self.numel()):
tex += " & ".join([str(si) for si in self.v.ind2sub(s)]) + " & " + ("{:"+valueformat+"}").format(self[s]) + "\\\\ \n"
tex += "\\end{tabular} \n"
return tex
@property
def vars(self):
"""Variables (scope) of the factor; read-only"""
return self.v
@vars.setter
def vars(self,value):
raise AttributeError("Read-only attribute")
@property
def table(self):
"""Table (values, as numpy array) of the factor"""
return self.t
@table.setter
def table(self,values):
try:
self.t[:] = values # try filling factor with "values"
except ValueError: # if it's an incompatible shape,
self.t = np.array(values,dtype=float).reshape(self.v.dims(),order=orderMethod) # try again using reshape
@property
def nvar(self):
"""Number of arguments (variables, scope size) for the factor"""
return len(self.v)
#@property
def dims(self):
"""Dimensions (table shape) of the tabular factor"""
return self.t.shape
#@property # TODO: make property?
def numel(self):
"""Number of elements (size) of the tabular factor"""
return self.t.size
################## METHODS ##########################################
def __getitem__(self,loc):
"""Accessor: F[x1,x2] = F[sub2ind(x1,x2)] = F(X1=x1,X2=x2)"""
if isinstance(loc, dict): return self.valueMap(loc)
if self.t.ndim == 1 or isinstance(loc, (tuple, list)):
return self.t[loc]
else:
try:
return self.t[self.v.ind2sub(loc)]
except ValueError:
raise IndexError("Index {} invalid for table with size {}".format(loc,self.t.shape))
def __setitem__(self,loc,val):
"""Assign values of the factor: F[i,j,k] = F[idx] = val if idx=sub2ind(i,j,k)"""
if isinstance(loc, dict): return self.setValueMap(loc,val)
if self.t.ndim == 1 or isinstance(loc, (tuple, list)):
self.t[loc] = val
else:
try:
self.t[self.v.ind2sub(loc)] = val
#self.t.flat[loc] = val # uses c-contiguous order...
except ValueError:
raise IndexError("Index {} invalid for table with size {}".format(loc,self.t.shape))
#value = __getitem__ # def f.value(loc): Alternate name for __getitem__
def value(self,x):
"""Type-safe version of __getitem__: returns scalar float entry of table at tuple x, or exception"""
if self.nvar == 0: return self.t[0]
return self.t.item(x)
def setValue(self,x,val):
"""Type-safe version of __setitem__: sets a scalar float entry of table at tuple x, or exception"""
self.t.itemset(x,val)
def valueMap(self,x):
"""Accessor: F[x[i],x[j]] where i,j = F.vars, i.e, x is a map from variables to their state values"""
if self.nvar == 0: return self.t[0] # if a scalar f'n, nothing to index
return self.t[tuple(x[v] for v in self.v)] # otherwise, find entry of table
def setValueMap(self,x,val):
"""Set F[x[i],x[j]] = val, where i,j = F.vars, i.e, x is a map from variables to their state values"""
self.t[tuple(x[v] for v in self.v) if len(self.v) else 0] = val # lookup location to set, or 0 if scalar f'n
def __float__(self):
"""Convert factor F to scalar float if possible; otherwise raises ValueError"""
if (self.nvar == 0): return self.t[0]
else: raise ValueError("Factor is not a scalar; scope {}".format(self.v))
# TODO missing comparator functions?
def isnan(self):
"""Check for NaN (not-a-number) entries in the factor's values; true if any NaN present"""
return self.isAny( (lambda x: np.isnan(x)) )
def isfinite(self):
"""Check for infinite (-inf, inf) or NaN values in the factor; false if any present"""
return not self.isAny( (lambda x: not np.isfinite(x)) )
def isAny(self,test):
"""Generic check for any entries satisfying lambda-expression "test" in the factor"""
for x in np.nditer(self.t, op_flags=['readonly']):
if test(x):
return True
return False
#### UNARY OPERATIONS ####
def __abs__(self):
"""Return the absolute value of F: G = F.abs() => G(x) = | F(x) | for all x"""
return Factor().__build( self.v.copy() , np.fabs(self.t) )
abs = __abs__
def __neg__(self):
"""Return the negative of F: G = -F => G(x) = -F(x) for all x"""
return Factor().__build( self.v.copy() , np.negative(self.t) )
def exp(self):
"""Return the exponential of F: G = F.exp() => G(x) = exp(F(x)) for all x"""
return Factor().__build( self.v.copy() , np.exp(self.t) )
def __pow__(self,power):
"""Return F raised to a power: G = F.power(p) => G(x) = ( F(x) )^p for all x"""
return Factor().__build( self.v.copy() , np.power(self.t,power) )
power = __pow__
def log(self): # just use base?
"""Return the natural log of F: G = F.log() => G(x) = log( F(x) ) for all x"""
with np.errstate(divide='ignore'):
return Factor().__build( self.v.copy() , np.log(self.t) )
def log2(self):
"""Return the log base 2 of F: G = F.log2() => G(x) = log2( F(x) ) for all x"""
with np.errstate(divide='ignore'):
return Factor().__build( self.v.copy() , np.log2(self.t) )
def log10(self):
"""Return the log base 10 of F: G = F.log10() => G(x) = log10( F(x) ) for all x"""
with np.errstate(divide='ignore'):
return Factor().__build( self.v.copy() , np.log10(self.t) )
#### IN-PLACE UNARY OPERATIONS ####
# always return "self" for chaining: f.negIP().expIP() = exp(-f(x)) in-place
def absIP(self):
"""Take the absolute value of F: F.absIP() => F(x) <- |F(x)| (in-place)"""
np.fabs(self.t, out=self.t)
return self
def expIP(self):
"""Take the exponential of F: F.expIP() => F(x) <- exp(F(x)) (in-place)"""
| np.exp(self.t, out=self.t) | numpy.exp |
import numpy as np
from sstcam_sandbox import get_data, get_plot
from CHECLabPy.core.io import HDF5Reader
from IPython import embed
def process(path, output_dir):
with HDF5Reader(path) as reader:
df = reader.read('data')
dtack = []
stale = []
fci = []
for _, group in df.groupby("ipath"):
dtack.append(np.diff(group['tack']))
stale.append(group['stale'][1:])
fci.append(group['fci'][1:])
dtack = np.concatenate(dtack)
stale = np.concatenate(stale).astype(np.bool)
fci = np.concatenate(fci)
istale = | np.where(stale) | numpy.where |
## SOLVING ODEs WITH EULER FORWARD/MODIFIED
#ODE
#dy/dx = x/y, y(0) = 1
import numpy as np
import matplotlib.pyplot as plt
## analytic solution
xa = np.linspace(0,0.3,1000)
ya = | np.sqrt(xa**2 + 1) | numpy.sqrt |
import tensorflow as tf
import numpy as np
import pickle
#from reward_model.utils import LimitedRunningStat, RunningStat
from utils import DynamicRunningStat, LimitedRunningStat, RunningStat
import random
from math import sqrt
from utils import *
eps = 1e-12
class RewardModel:
def __init__(self, actions_size, policy, sess = None, gamma=0.99, lr=1e-5, batch_size=32, num_itr=20,
use_vairl=False, mutual_information=0.5, alpha=0.0005, with_action=False, name='reward_model',
entropy_weight = 0.5, with_value=True, fixed_reward_model=False,
vs=None, **kwargs):
# Initialize some model attributes
# RunningStat to normalize reward from the model
if not fixed_reward_model:
self.r_norm = DynamicRunningStat()
else:
self.r_norm = RunningStat(1)
# Discount factor
self.gamma = gamma
# Policy agent needed to compute the discriminator
self.policy = policy
# Demonstrations buffer
self.expert_traj = None
self.validation_traj = None
# Num of actions available in the environment
self.actions_size = actions_size
# If is state-only or state-action discriminator
self.with_action = with_action
# TF parameters
self.sess = sess
self.lr = lr
self.batch_size = batch_size
self.num_itr = num_itr
self.entropy_weight = entropy_weight
# Use Variation Bottleneck Autoencoder
self.use_vairl = use_vairl
self.mutual_information = mutual_information
self.alpha = alpha
self.name = name
# Buffer of policy experience with which train the reward model
self.buffer = dict()
self.create_buffer()
with tf.compat.v1.variable_scope(name) as vs:
with tf.compat.v1.variable_scope('irl'):
# Input spec for both reward and value function
# Current state (DeepCrawl spec)
self.global_state = tf.compat.v1.placeholder(tf.float32, [None, 10, 10, 52], name='global_state')
self.local_state = tf.compat.v1.placeholder(tf.float32, [None, 5, 5, 52], name='local_state')
self.local_two_state = tf.compat.v1.placeholder(tf.float32, [None, 3, 3, 52], name='local_two_state')
self.agent_stats = tf.compat.v1.placeholder(tf.int32, [None, 16], name='agent_stats')
self.target_stats = tf.compat.v1.placeholder(tf.int32, [None, 15], name='target_stats')
if self.with_action:
self.acts = tf.compat.v1.placeholder(tf.int32, [None, 1], name='acts')
# Next state (DeepCrawl spec) - for discriminator
self.global_state_n = tf.compat.v1.placeholder(tf.float32, [None, 10, 10, 52], name='global_state_n')
self.local_state_n = tf.compat.v1.placeholder(tf.float32, [None, 5, 5, 52], name='local_state_n')
self.local_two_state_n = tf.compat.v1.placeholder(tf.float32, [None, 3, 3, 52], name='local_two_state_n')
self.agent_stats_n = tf.compat.v1.placeholder(tf.int32, [None, 16], name='agent_stats_n')
self.target_stats_n = tf.compat.v1.placeholder(tf.int32, [None, 15], name='target_stats_n')
# Probability distribution and labels - whether or not this state belongs to expert buffer
self.probs = tf.compat.v1.placeholder(tf.float32, [None, 1], name='probs')
self.labels = tf.compat.v1.placeholder(tf.float32, [None, 1], name='labels')
# For V-AIRL
self.use_noise = tf.compat.v1.placeholder(
shape=[1], dtype=tf.float32, name="noise"
)
self.z_sigma_g = None
self.z_sigma_h = None
if self.use_vairl:
self.z_sigma_g = tf.compat.v1.get_variable(
'z_sigma_g',
100,
dtype=tf.float32,
initializer=tf.compat.v1.ones_initializer(),
)
self.z_sigma_g_sq = self.z_sigma_g * self.z_sigma_g
self.z_log_sigma_g_sq = tf.compat.v1.log(self.z_sigma_g_sq + eps)
self.z_sigma_h = tf.compat.v1.get_variable(
"z_sigma_h",
100,
dtype=tf.float32,
initializer=tf.compat.v1.ones_initializer(),
)
self.z_sigma_h_sq = self.z_sigma_h * self.z_sigma_h
self.z_log_sigma_h_sq = tf.compat.v1.log(self.z_sigma_h_sq + eps)
# Reward Funvtion
with tf.compat.v1.variable_scope('reward'):
self.reward, self.z_g = self.conv_net(self.global_state, self.local_state, self.local_two_state,
self.agent_stats, self.target_stats, with_action = self.with_action,
z_sigma=self.z_sigma_g, use_noise=self.use_noise)
# Value Function
if with_value:
with tf.compat.v1.variable_scope('value'):
self.value, self.z_h = self.conv_net(self.global_state, self.local_state, self.local_two_state,
self.agent_stats, self.target_stats,
z_sigma=self.z_sigma_h, use_noise=self.use_noise, with_action=False)
with tf.compat.v1.variable_scope('value', reuse=True):
self.value_n, self.z_1_h = self.conv_net(self.global_state_n, self.local_state_n,
self.local_two_state_n,
self.agent_stats_n, self.target_stats_n,
z_sigma=self.z_sigma_h, use_noise=self.use_noise, with_action=False)
self.f = self.reward + self.gamma * self.value_n - self.value
else:
self.f = self.reward
# Discriminator
self.discriminator = tf.math.divide(tf.math.exp(self.f), tf.math.add(tf.math.exp(self.f), self.probs))
# Loss Function
self.loss = -tf.reduce_mean((self.labels * tf.math.log(self.discriminator + eps)) + (
(1 - self.labels) * tf.math.log(1 - self.discriminator + eps)))
# Loss function modification for V-AIRL
if self.use_vairl:
# Define beta
self.beta = tf.compat.v1.get_variable(
"airl_beta",
[],
trainable=False,
dtype=tf.float32,
initializer=tf.compat.v1.ones_initializer(),
)
# Number of batch element
self.batch = tf.compat.v1.shape(self.z_g)[0]
self.batch_index = tf.dtypes.cast(self.batch / 2, tf.int32)
self.kl_loss = tf.reduce_mean(
-tf.reduce_sum(
1
+ self.z_log_sigma_g_sq
- 0.5 * tf.square(
self.z_g[0:self.batch_index, :] * self.z_h[0:self.batch_index, :] * self.z_1_h[
0:self.batch_index,
:])
- 0.5 * tf.square(
self.z_g[self.batch_index:, :] * self.z_h[self.batch_index:, :] * self.z_1_h[
self.batch_index:, :])
- tf.exp(self.z_log_sigma_g_sq),
1,
)
)
self.loss = self.beta * (self.kl_loss - self.mutual_information) + self.loss
# Adam optimizer with gradient clipping
optimizer = tf.compat.v1.train.AdamOptimizer(self.lr)
gradients, variables = zip(*optimizer.compute_gradients(self.loss))
gradients, _ = tf.compat.v1.clip_by_global_norm(gradients, 1.0)
self.step = optimizer.apply_gradients(zip(gradients, variables))
#self.step = tf.compat.v1.train.AdamOptimizer(learning_rate=self.lr).minimize(self.loss)
if self.use_vairl:
self.make_beta_update()
self.vs = vs
self.saver = tf.compat.v1.train.Saver(max_to_keep=None)
## Layers
def linear(self, inp, inner_size, name='linear', bias=True, activation = None, init = None):
with tf.compat.v1.variable_scope(name):
lin = tf.compat.v1.layers.dense(inp, inner_size, name=name, activation=activation, use_bias=bias,
kernel_initializer=init)
return lin
def conv_layer_2d(self, input, filters, kernel_size, strides=(1, 1), padding="SAME", name='conv', activation=None,
bias = True):
with tf.compat.v1.variable_scope(name):
conv = tf.compat.v1.layers.conv2d(input, filters, kernel_size, strides, padding=padding, name=name,
activation=activation, use_bias=bias)
return conv
def embedding(self, input, indices, size, name='embs'):
with tf.compat.v1.variable_scope(name):
shape = (indices, size)
stddev = min(0.1, sqrt(2.0 / (product(xs=shape[:-1]) + shape[-1])))
initializer = tf.random.normal(shape=shape, stddev=stddev, dtype=tf.float32)
W = tf.Variable(
initial_value=initializer, trainable=True, validate_shape=True, name='W',
dtype=tf.float32, shape=shape
)
return tf.nn.tanh(tf.compat.v1.nn.embedding_lookup(params=W, ids=input, max_norm=None))
# Netowrk specification
def conv_net(self, global_state, local_state, local_two_state, agent_stats, target_stats, z_sigma=None,
use_noise=None, with_action=False):
conv_10 = self.conv_layer_2d(global_state, 32, [1, 1], name='conv_10', activation=tf.nn.tanh)
conv_11 = self.conv_layer_2d(conv_10, 32, [3, 3], name='conv_11', activation=tf.nn.leaky_relu)
conv_12 = self.conv_layer_2d(conv_11, 32, [3, 3], name='conv_12', activation=tf.nn.leaky_relu)
fc11 = tf.reshape(conv_12, [-1,10*10*32])
embs_41 = tf.nn.tanh(self.embedding(agent_stats, 129, 32, name='embs_41'))
embs_41 = tf.reshape(embs_41, [-1, 16 * 32])
fc_41 = self.linear(embs_41, 100, name = 'fc_41', activation=tf.nn.leaky_relu)
embs_51 = self.embedding(target_stats, 125, 32, name='embs_51')
embs_51 = tf.reshape(embs_51, [-1, 15 * 32])
fc_51 = self.linear(embs_51, 100, name = 'fc_51', activation = tf.nn.leaky_relu)
all_flat = tf.concat([fc11, fc_41, fc_51], axis=1)
all_flat = self.linear(all_flat, 32, name='fc1', activation=tf.nn.leaky_relu)
if with_action:
hot_acts = tf.one_hot(self.acts, self.actions_size)
hot_acts = tf.reshape(hot_acts, [-1, self.actions_size])
all_flat = tf.concat([all_flat, hot_acts], axis=1)
z_mean = None
fc2 = self.linear(all_flat, 32, name='fc2', activation = tf.nn.leaky_relu)
# In case we want to use V-AIRL
if self.use_vairl:
z_mean = self.linear(fc2, 32, name='z_mean', init=tf.compat.v1.initializers.variance_scaling(0.01))
noise = tf.compat.v1.random_normal(tf.compat.v1.shape(z_mean), dtype=tf.float32)
z = z_mean + z_sigma * noise * use_noise
fc2 = z
return self.linear(fc2, 1, name='out'), z_mean
else:
return self.linear(fc2, 1, name='out'), None
# Train method of the discriminator
def train(self):
losses = []
# Update discriminator
for it in range(self.num_itr):
expert_batch_idxs = random.sample(range(len(self.expert_traj['obs'])), self.batch_size)
policy_batch_idxs = random.sample(range(len(self.buffer['obs'])), self.batch_size)
#expert_batch_idxs = np.random.randint(0, len(expert_traj['obs']), batch_size)
#policy_batch_idxs = np.random.randint(0, len(policy_traj['obs']), batch_size)
expert_obs = [self.expert_traj['obs'][id] for id in expert_batch_idxs]
policy_obs = [self.buffer['obs'][id] for id in policy_batch_idxs]
expert_obs_n = [self.expert_traj['obs_n'][id] for id in expert_batch_idxs]
policy_obs_n = [self.buffer['obs_n'][id] for id in policy_batch_idxs]
expert_acts = [self.expert_traj['acts'][id] for id in expert_batch_idxs]
policy_acts = [self.buffer['acts'][id] for id in policy_batch_idxs]
expert_probs = []
for (index, state) in enumerate(expert_obs):
_, probs = self.select_action(state)
expert_probs.append(probs[expert_acts[index]])
policy_probs = []
for (index, state) in enumerate(policy_obs):
_, probs = self.select_action(state)
policy_probs.append(probs[policy_acts[index]])
expert_probs = np.asarray(expert_probs)
policy_probs = np.asarray(policy_probs)
labels = np.ones((self.batch_size, 1))
labels = np.concatenate([labels, np.zeros((self.batch_size, 1))])
e_states = self.obs_to_state(expert_obs)
p_states = self.obs_to_state(policy_obs)
all_global = np.concatenate([e_states[0], p_states[0]], axis=0)
all_local = np.concatenate([e_states[1], p_states[1]], axis=0)
all_local_two = | np.concatenate([e_states[2], p_states[2]], axis=0) | numpy.concatenate |
import argparse
import numpy as np
import os, sys
from numpy import linalg as LA
import math
from PIL import Image
from matplotlib import pyplot as plt
import random
try:
sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
except:
pass
import cv2
def circle(x,y,map,x_center,y_center,radius):
if (x-x_center)**2+(y-y_center)**2<=(radius+Robot_dia/2)**2:
map[x,y,:]=[0,0,0]
return map
def rectrangle(x,y,map,x_min,y_min,x_max,y_max):
if x in range (int(x_min-Robot_dia/2),int(x_max+Robot_dia/2)+1) and y in range(int(y_min-Robot_dia/2),int(y_max+Robot_dia/2)+1):
map[x,y,:]=[0,0,0]
return map
def Map():
map=255*np.array(np.ones((1110,1010,3)),dtype=np.uint8)
for X in range (map.shape[0]):
for Y in range(map.shape[1]):
map=circle(X,Y,map,390,965,81/2) # 1
map=circle(X,Y,map,438,736,81/2) # 2
map=circle(X,Y,map,438,274,81/2) # 3
map=circle(X,Y,map,390,45,81/2) # 4
map=circle(X,Y,map,149.95,830.05,159.9/2) # 5
map=circle(X,Y,map,309.73,830.05,159.9/2) # 6
map=rectrangle(X,Y,map,149.95,750.1,309.73,910) # 7
map=rectrangle(X,Y,map,438,315,529,315+183) # 8
map=rectrangle(X,Y,map,438+91,35+152+78,438+91+183,35+152+78+76) # 9
map=rectrangle(X,Y,map,1110-636,35,1110-636+274,35+152) # 10
map=rectrangle(X,Y,map,1110-425,0,1110,35) # 11
map=rectrangle(X,Y,map,1110-183,35,1110,35+76) # 12
map=rectrangle(X,Y,map,1110-117-31-183,35,1110-31-183,35+58) # 13
map=rectrangle(X,Y,map,1110-58,1010-313-76-55.5-117-86-67.25-117,1110,1010-313-76-55.5-117-86-67.25) # 14
map=rectrangle(X,Y,map,438+91+183+72.5,35+152+80,438+91+183+72.5+152,35+152+80+117) # 15
map=rectrangle(X,Y,map,1110-91,1010-313-76-55.5-117-86,1110,1010-313-76-55.5-117) # 16
map=rectrangle(X,Y,map,1110-58,1010-313-76-55.5-117,1110,1010-313-76-55.5) # 17
map=rectrangle(X,Y,map,1110-366,1010-313-76,1110,1010-313) # 18
map=rectrangle(X,Y,map,1110-192-86,1010-183,1110-192,1010) # 19
map=rectrangle(X,Y,map,1110-84-43,1010-91,1110-84,1010) # 20
if X in range(int(Robot_dia/2)+1) or X in range(map.shape[0]-int(Robot_dia/2)-1,map.shape[0]):
map[X,Y,:]=[0,0,0]
if Y in range(int(Robot_dia/2)+1) or Y in range(map.shape[1]-int(Robot_dia/2)-1,map.shape[0]):
map[X,Y,:]=[0,0,0]
return map
def Huerastic_distance(x_src,y_src,x_dest,y_dest):
return np.sqrt((x_src-x_dest)**2+(y_src-y_dest)**2)
def Step(x,y,theta,RPM_L,RPM_R):
instants=50
for i in range(instants):
x=x+Wheel_Dia*math.pi*(RPM_L+RPM_R)*math.cos(theta)*time/(120*instants)
y=y+Wheel_Dia*math.pi*(RPM_L+RPM_R)*math.sin(theta)*time/(120*instants)
theta=theta+Wheel_Dia*math.pi*(RPM_R-RPM_L)*time/(60*Wheel_distance*instants)
return x , y ,theta
def NodeCheck(Master_mat,X,Y):
flag_n=True
threshold=2 # less than buffer
explore_limit=5000
if len(Master_mat)<=explore_limit:
for i in range(len(Master_mat)):
if X<Master_mat[i,2]+threshold and X>Master_mat[i,2]-threshold and Y<Master_mat[i,3]+threshold and Y>Master_mat[i,3]-threshold:
flag_n=False
else :
l=len(Master_mat)
for i in range(1,explore_limit):
if X<Master_mat[l-i,2]+threshold and X>Master_mat[l-i,2]-threshold and Y<Master_mat[l-i,3]+threshold and Y>Master_mat[l-i,3]-threshold:
flag_n=False
return flag_n
def boundry_check(x,y):
flag=0
X=int(x)
Y=int(y)
if int(x) in range(0,1110) and int(y) in range(0,1010):
flag=1
return flag
def Explore_choice(Master_mat,Parent_X,Parent_Y,Parent_Theta,Parent_Cost,Parent_index,RPM_L,RPM_R,weight,flag):
x_new,y_new,theta_new=Step(Parent_X,Parent_Y,Parent_Theta,RPM_L,RPM_R)
if(boundry_check(x_new,y_new)==1 and Map[int(x_new),int(y_new),0]==255 and flag==True and NodeCheck(Master_mat,x_new,y_new)==True):
#cost=Parent_Cost+weight
Map[int(x_new),int(y_new),:]=[0,0,200]
cost=Parent_Cost+Huerastic_distance(Parent_X,Parent_Y,x_new,y_new)
Distance=Huerastic_distance(x_new,y_new,Goal[0],Goal[1])
stack=np.mat([len(Master_mat),Parent_index,x_new,y_new,theta_new,cost,cost+Distance,(RPM_L*2*22)/(7*60),(RPM_R*2*22)/(7*60)])
Master_mat=np.vstack((Master_mat,stack))
if int(x_new) in range(Goal[0]-buffer,Goal[0]+buffer) and int(y_new) in range(Goal[1]-buffer,Goal[1]+buffer):
flag=False
return Master_mat,flag
def pointexplore(index,Master_mat,Map,flag):
Parent_index=Master_mat[index,0]
Parent_X=Master_mat[index,2]
Parent_Y=Master_mat[index,3]
Parent_Theta=Master_mat[index,4]
Parent_Cost=Master_mat[index,5]
RPM_L,RPM_R,weight=RPM1,0,cost_of_step[0,0]
Master_mat,flag=Explore_choice(Master_mat,Parent_X,Parent_Y,Parent_Theta,Parent_Cost,Parent_index,RPM_L,RPM_R,weight,flag)
RPM_L,RPM_R,weight=RPM2,0,cost_of_step[0,1]
Master_mat,flag=Explore_choice(Master_mat,Parent_X,Parent_Y,Parent_Theta,Parent_Cost,Parent_index,RPM_L,RPM_R,weight,flag)
RPM_L,RPM_R,weight=RPM2,RPM1,cost_of_step[0,2]
Master_mat,flag=Explore_choice(Master_mat,Parent_X,Parent_Y,Parent_Theta,Parent_Cost,Parent_index,RPM_L,RPM_R,weight,flag)
RPM_L,RPM_R,weight=RPM1,RPM1,cost_of_step[0,3]
Master_mat,flag=Explore_choice(Master_mat,Parent_X,Parent_Y,Parent_Theta,Parent_Cost,Parent_index,RPM_L,RPM_R,weight,flag)
RPM_L,RPM_R,weight=RPM2,RPM2,cost_of_step[0,4]
Master_mat,flag=Explore_choice(Master_mat,Parent_X,Parent_Y,Parent_Theta,Parent_Cost,Parent_index,RPM_L,RPM_R,weight,flag)
RPM_L,RPM_R,weight=RPM1,RPM2,cost_of_step[0,5]
Master_mat,flag=Explore_choice(Master_mat,Parent_X,Parent_Y,Parent_Theta,Parent_Cost,Parent_index,RPM_L,RPM_R,weight,flag)
RPM_L,RPM_R,weight=0,RPM1,cost_of_step[0,6]
Master_mat,flag=Explore_choice(Master_mat,Parent_X,Parent_Y,Parent_Theta,Parent_Cost,Parent_index,RPM_L,RPM_R,weight,flag)
RPM_L,RPM_R,weight=0,RPM2,cost_of_step[0,7]
Master_mat,flag=Explore_choice(Master_mat,Parent_X,Parent_Y,Parent_Theta,Parent_Cost,Parent_index,RPM_L,RPM_R,weight,flag)
Master_mat[index,6]=math.inf
return Master_mat,flag
def backtrack(Master_mat):
back_mat=[Master_mat[-1,0]]
Found=1
while(Found!=0):
pointer=int(back_mat[-1])
Found=Master_mat[pointer,1]
print(Found)
back_mat=np.append([back_mat],[Found])
return back_mat
# function returns matrix with final path
def backtrackmat(back_mat,Node,Map):
backtrackmat=np.array(np.zeros((len(back_mat),3)))
Velocity_matrix=np.array(np.zeros((len(back_mat),2)))
for a in range(len(back_mat)):
i=len(back_mat)-a-1
value=int(back_mat[a])
backtrackmat[i,0]=Node[value,2]
backtrackmat[i,1]=Node[value,3]
backtrackmat[i,2]=Node[value,4]
Velocity_matrix[i,0]=Node[value,7]
Velocity_matrix[i,1]=Node[value,8]
return backtrackmat,Velocity_matrix,Map
def Step_plot(Path,Velocity_matrix,Map):
instants=50
for i in range(1,len(Path)):
theta=Path[i-1,2]
x=Path[i-1,0]
y=Path[i-1,1]
RPM_L=Velocity_matrix[i,0]*60*7/(2*22)
RPM_R=Velocity_matrix[i,1]*60*7/(2*22)
for j in range(instants):
x=x+Wheel_Dia*math.pi*(RPM_L+RPM_R)*math.cos(theta)*time/(120*instants)
y=y+Wheel_Dia*math.pi*(RPM_L+RPM_R)*math.sin(theta)*time/(120*instants)
theta=theta+Wheel_Dia*math.pi*(RPM_R-RPM_L)*time/(60*Wheel_distance*instants)
Map[int(x),int(y),:]=[0,200,0]
Map_path=cv2.rotate(Map.copy(), cv2.ROTATE_90_COUNTERCLOCKWISE)
scale_p = cv2.resize(Map_path,None,fx=0.5, fy=0.5, interpolation = cv2.INTER_CUBIC)
cv2.imshow('map_p',scale_p)
cv2.waitKey(10)
return Map
######----------------------------------------------------------
# Initialize
RPM1= 15 # in RPM
RPM2= 20 # in RPM
#Goal=[100,500] # in cm
#Start=[100,100] # in cm
Wheel_Dia=7.6 # in cm
Wheel_distance=23 # in cm
Robot_dia=35.4 # in cm
Allowable_clerance=2 # in cm
time=2 # in sec
theta=0 # in degrees
sampling_frequency=1 # samples in a sec.
buffer=3 # in cm
####-----------------------------------------------------------
print('Map is getting generated')
Map=Map()
error=0
print("Input co-ordinates has origin at bottom left" )
print("Input co-ordinates of x between 20 & 1090")
print("Input co-ordinates of y between 20 & 990")
Startx=float(input('Value of Start x \n'))
Starty=float(input('Value of Start y \n'))
Endx=float(input('Value of Goal x \n'))
Endy=float(input('Value of Goal y \n'))
Startx=int(Startx)
Starty=int(Starty)
Endx=int(Endx)
Endy=int(Endy)
# Error conditions
if Startx not in range(1110) and Starty not in range(1010):
print('Start point out of bound')
error=1
if Endx not in range(1110) or Endy not in range(1010):
print('End point out of bound')
error=1
if (error==0):
if (Map[Startx,Starty,0]==0):
print('Start point is in obstacle')
error=1
if (Map[Endx,Endy,0]==0):
print('End point is in obstacle')
error=1
Start=[Startx,Starty]
Goal=[Endx,Endy]
#######------------------------------------------------------------------
cost_of_step= | np.mat([1,1,1,1,1,1,1,1,1]) | numpy.mat |
#!/usr/bin/python -B
# -*- coding: utf-8 -*-
"""
Created on Apr 2014
@author: <NAME>
"""
import site
site.addsitedir('.')
import sys
import argparse
from time import time
import numpy as np
from clmat import HAS_PYOPENCL, CPU, GPU, DEVICE_TYPE_MAP, REDUCTION_ENUM, Computer, Mat, get_gpu
import warnings
# Verbose levels vl must be >= to this
vl_error_summary = 0
vl_full_summary = 1
vl_times = 2
vl_cmds = 3
vl_data = 4
dt_names = {np.single: 's', np.double: 'd'}
dt_colors = {np.single: 36, np.double: 46}
def correct_nan_inf(x0, x1):
nans = np.isnan(x0) * np.isnan(x1)
posinfs = np.isposinf(x0) * np.isposinf(x1)
neginfs = np.isneginf(x0) * np.isneginf(x1)
if isinstance(x0, np.ndarray):
x0 = x0.copy()
x1 = x1.copy()
x0[nans + posinfs + neginfs] = 0
x1[nans + posinfs + neginfs] = 0
elif nans or posinfs or neginfs:
x0 *= 0
x1 *= 0
return x0, x1
def rel_err(x0, x1, x0_baseline=False):
x0 = np.array(x0)
x1 = np.array(x1)
if x0_baseline:
normalizer = np.linalg.norm(x0.flatten(), 2)
else:
normalizer = (np.linalg.norm(x0.flatten(), 2) +
np.linalg.norm(x1.flatten(), 2))/2
if normalizer == 0:
normalizer = 1
return np.linalg.norm((x1-x0).flatten(), 2)/normalizer
def max_rel_diff(x0, x1, x0_baseline=False):
x0 = | np.array(x0) | numpy.array |
__all__ = ["Scalar", "Vector", "Matrix"]
class Scalar(object):
"""
Scalar variable type.
It holds a 64-bits floating point value, stored via a zero-dimensional
``ndl``, listen to changes, and fix or unfix its value.
Parameters
----------
value : float
Initial value.
"""
__slots__ = [
"raw",
"_fixed",
"value",
"__array_interface__",
"__array_struct__",
"_bounds",
]
def __init__(self, value):
from ndarray_listener import ndl
from numpy import float64, inf
self._bounds = (-inf, +inf)
self._fixed = False
value = ndl(float64(value))
self.raw = value
self.__array_interface__ = value.__array_interface__
self.__array_struct__ = value.__array_struct__
@property
def bounds(self):
return self._bounds
@bounds.setter
def bounds(self, v):
self._bounds = v
def copy(self):
"""Return a copy."""
return Scalar(self.raw)
@property
def shape(self):
"""
Shape according to :mod:`numpy`.
"""
return self.raw.shape
@property
def ndim(_):
"""
Number of dimensions.
"""
return 0
@property
def size(self):
"""
Size according to :mod:`numpy`.
"""
return self.raw.size
def asarray(self):
"""
Return a :class:`numpy.ndarray` representation.
"""
from numpy import array
return array(self.raw)
@property
def isfixed(self):
"""
Return whether it is fixed or not.
"""
return self._fixed
def fix(self):
"""
Set it fixed.
"""
self._fixed = True
def unfix(self):
"""
Set it unfixed.
"""
self._fixed = False
def listen(self, you):
"""
Request a callback for value modification.
Parameters
----------
you : object
An instance having ``__call__`` attribute.
"""
self.raw.talk_to(you)
def __setattr__(self, name, value):
from numpy import float64
if name == "value":
try:
value = float64(value)
except TypeError:
value = value[0]
self.raw.itemset(value)
else:
Scalar.__dict__[name].__set__(self, value)
def __getattr__(self, name):
if name == "value":
name = "raw"
return Scalar.__dict__[name].__get__(self)
def __str__(self):
return "Scalar(" + str(self.raw) + ")"
def __repr__(self):
return repr(self.raw)
def __ge__(self, that):
return self.raw >= that.raw
def __gt__(self, that):
return self.raw > that.raw
def __le__(self, that):
return self.raw <= that.raw
def __lt__(self, that):
return self.raw < that.raw
def __eq__(self, that):
return self.raw == that.raw
def __ne__(self, that):
return self.raw != that.raw
class Vector(object):
"""
Vector variable type.
It holds an array of 64-bits floating point values, via an one-dimensional
``ndl``, listen to changes, and fix or unfix its values.
Parameters
----------
value : float
Initial value.
"""
__slots__ = [
"raw",
"_fixed",
"__array_interface__",
"__array_struct__",
"value",
"_bounds",
]
def __init__(self, value):
from numpy import asarray, atleast_1d, inf
from ndarray_listener import ndl
self._bounds = [(-inf, +inf)] * len(value)
self._fixed = False
value = asarray(value, float)
value = ndl(atleast_1d(value).ravel())
self.raw = value
self.__array_interface__ = value.__array_interface__
self.__array_struct__ = value.__array_struct__
@property
def bounds(self):
return self._bounds
@bounds.setter
def bounds(self, v):
self._bounds = v
def copy(self):
"""
Return a copy.
"""
return Vector(self.raw)
@property
def shape(self):
"""
Shape according to :mod:`numpy`.
"""
return self.raw.shape
@property
def ndim(self):
"""
Number of dimensions.
"""
return len(self.shape)
@property
def size(self):
"""
Size according to :mod:`numpy`.
"""
return self.raw.size
def asarray(self):
"""
Return a :class:`numpy.ndarray` representation.
"""
from numpy import array
return | array(self.raw) | numpy.array |
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 2 13:41:28 2016
@author: <NAME>
steele{AT}cbs{dot}mpg{dot}de
"""
import numpy as np
from os.path import sep as pathsep
import sys
#TODO: hardcoded for now, make relative before release
sys.path.append('/home/chris/Documents/code/python/cbstools-python/cbstoolsjcc-3.1.0.1-py2.7-linux-x86_64.egg')
import cbstoolsjcc as cj
from defaults import * #ATLAS_DIR and TOPOLOGY_LUT_DIR
def normalise(img_d):
return (img_d - np.min(img_d))/np.max(img_d)
def setup_JVM(JVM_initialheap = '4000M', JVM_maxheap = '4000M'):
"""
initialise the JVM and set all of the base with reasonable defaults for memory
:param JVM_initialheap:
:param JVM_maxheap:
:return:
"""
try:
res=cj.initVM(initialheap=JVM_initialheap,maxheap=JVM_maxheap)
print(res)
print("Java virtual machine successfully started.")
except ValueError:
print("A java virtual machine is already running.")
def ExtractBrainRegion():
pass
def Mp2rageSkullStripping():
pass
def IntensityBackgroundEstimator():
pass
def SurfaceProbabilityToLevelset():
pass
def get_affine_orientation_slice(a):
# get the orientation of the affine, and the slice order
import nibabel as nb
ori=nb.aff2axcodes(a)
if ori[-1] == "I" or ori[-1] == "S":
slc = "AXIAL"
elif ori[-1] == "L" or ori[-1] == "R":
slc="SAGITTAL"
else:
slc="CORONAL"
return ori, slc
def get_affine_orientation(a):
import nibabel.orientations as orient
return orient.io_orientation(a) #orientation of the x, y, z
def flip_affine_data_orientation(d,a,flipLR = False,flipAP = False, flipIS = False):
if flipLR:
a[1,1]=a[1,1]*-1
if flipAP:
a[2,2] = a[2,2] * -1
#d=d[:,::-1,:]
if flipIS:
a[3,3] = a[3,3]*-1
return d,a
def MGDMBrainSegmentation(input_filename_type_list, output_dir = None, num_steps = 5, atlas_file=None,
topology_lut_dir = None):
"""
Perform MGDM segmentation
:param input_filename_type_list: list of [[fname1,type1],[fname2,type2],...] - for a maximum of 4 inputs
:param output_dir: full path to the output directory
:param num_steps: number of steps for (default 5, set to 0 for testing)
:param atlas_file: full path to the atlas file, default set in defaults.py
:param topology_lut_dir: full path to the directory with the topology files, default set in defaults.py
:return:
"""
from nibabel.orientations import io_orientation, inv_ornt_aff, apply_orientation, ornt_transform
import os
print("Thank you for choosing the MGDM segmentation from the cbstools for your brain segmentation needs")
print("Sit back and relax, let the magic of algorithms happen...")
print("")
if output_dir is None:
output_dir = os.path.dirname(input_filename_type_list[0][0])
if atlas_file is None:
atlas = os.path.join(ATLAS_DIR,'brain-atlas-3.0.3.txt')
else:
atlas = atlas_file
if topology_lut_dir is None:
topology_lut_dir = TOPOLOGY_LUT_DIR # grabbing this from the default settings in defaults.py
else:
if not(topology_lut_dir[-1] == os.sep): #if we don't end in a path sep, we need to make sure that we add it
topology_lut_dir += os.sep
print("Atlas file: " + atlas)
print("Topology LUT durectory: " + topology_lut_dir)
print("")
if not any(isinstance(el, list) for el in input_filename_type_list): #make into list of lists
input_filename_type_list = [input_filename_type_list]
#now we setup the mgdm specfic settings
mgdm = cj.BrainMgdmMultiSegmentation2()
mgdm.setAtlasFile(atlas)
mgdm.setTopologyLUTdirectory(topology_lut_dir)
mgdm.setOutputImages('segmentation')
# --> mgdm.setOrientations(mgdm.AXIAL, mgdm.R2L, mgdm.A2P, mgdm.I2S) # this is the default for MGDM, <--
# mgdm.setOrientations(mgdm.AXIAL, mgdm.L2R, mgdm.P2A, mgdm.I2S) #LR,PA,IS is always how they are returned from nibabel
mgdm.setAdjustIntensityPriors(False) # default is True
mgdm.setComputePosterior(False)
mgdm.setDiffuseProbabilities(False)
mgdm.setSteps(num_steps)
mgdm.setTopology('wcs') # {'wcs','no'} no=off for testing, wcs=default
for idx,con in enumerate(input_filename_type_list):
print("Input files and filetypes:")
print(" " + str(idx+1) + " "),
print(con)
#flipLR = False
#flipAP = False
#flipIS = False
fname = con[0]
type = con[1]
d,d_aff,d_head = niiLoad(fname,return_header=True)
## usage example in the proc_file function of : https://github.com/nipy/nibabel/blob/master/bin/parrec2nii
ornt_orig = io_orientation(d_aff)
ornt_mgdm = io_orientation(np.diag([-1, -1, 1, 1]).dot(d_aff)) # -1 -1 1 LPS (mgdm default); 1 1 1 is RAS
ornt_chng = ornt_transform(ornt_mgdm, ornt_orig) # to get from MGDM to our original input
# convert orientation information to mgdm slice and orientation info
aff_orients,aff_slc = get_affine_orientation_slice(d_aff)
print("data orientation: " + str(aff_orients)),
print("slice settings: " + aff_slc)
print("mgdm orientation: " + str(ornt_mgdm))
print("data orientation: " + str(ornt_orig))
if aff_slc == "AXIAL":
SLC=mgdm.AXIAL
elif aff_slc == "SAGITTAL":
SLC=mgdm.SAGITTAL
else:
SLC=mgdm.CORONAL
for aff_orient in aff_orients: #TODO: if anything is different from the default MGDM settings, we need to flip axes of the data at the end
if aff_orient == "L":
LR=mgdm.R2L
elif aff_orient == "R":
LR = mgdm.L2R
# flipLR = True
elif aff_orient == "A":
AP = mgdm.P2A
#flipAP = True
elif aff_orient == "P":
AP = mgdm.A2P
elif aff_orient == "I":
IS = mgdm.S2I
#flipIS = True
elif aff_orient == "S":
IS = mgdm.I2S
mgdm.setOrientations(SLC, LR, AP, IS) #L2R,P2A,I2S is nibabel default (i.e., RAS)
if idx+1 == 1:
# we use the first image to set the dimensions and resolutions
res = d_head.get_zooms()
res = [a1.item() for a1 in res] # cast to regular python float type
mgdm.setDimensions(d.shape[0], d.shape[1], d.shape[2])
mgdm.setResolutions(res[0], res[1], res[2])
# keep the shape and affine from the first image for saving
d_shape = np.array(d.shape)
out_root_fname = os.path.basename(fname)[0:os.path.basename(fname).find('.')] #assumes no periods in filename, :-/
mgdm.setContrastImage1(cj.JArray('float')((d.flatten('F')).astype(float)))
mgdm.setContrastType1(type)
elif idx+1 == 2:
mgdm.setContrastImage2(cj.JArray('float')((d.flatten('F')).astype(float)))
mgdm.setContrastType2(type)
elif idx + 1 == 3:
mgdm.setContrastImage3(cj.JArray('float')((d.flatten('F')).astype(float)))
mgdm.setContrastType3(type)
elif idx + 1 == 4:
mgdm.setContrastImage4(cj.JArray('float')((d.flatten('F')).astype(float)))
mgdm.setContrastType4(type)
try:
print("Executing MGDM on your inputs")
print("Don't worry, the magic is happening!")
mgdm.execute()
print(os.path.join(output_dir, out_root_fname + '_seg_cjs.nii.gz'))
# outputs
# reshape fortran stype to convert back to the format the nibabel likes
seg_im = np.reshape(np.array(mgdm.getSegmentedBrainImage(), dtype=np.uint32), d_shape,'F')
lbl_im = np.reshape(np.array(mgdm.getPosteriorMaximumLabels4D(), dtype=np.uint32), d_shape, 'F')
ids_im = np.reshape(np.array(mgdm.getSegmentedIdsImage(), dtype=np.uint32), d_shape, 'F')
# fix orientation back to the input orientation :-/ not really working
# seg_im = apply_orientation(seg_im, ornt_chng) # this takes care of the orientations between mipav and input
# lbl_im = apply_orientation(lbl_im, ornt_chng) # TODO: fix the origin point offset?, 2x check possible RL flip
# ids_im = apply_orientation(ids_im, ornt_chng) # alternative: register? https://github.com/pyimreg
#
# save
seg_file = os.path.join(output_dir, out_root_fname + '_seg_cjs.nii.gz')
lbl_file = os.path.join(output_dir, out_root_fname + '_lbl_cjs.nii.gz')
ids_file = os.path.join(output_dir, out_root_fname + '_ids_cjs.nii.gz')
## this will work, but the solution with nibabel.orientations is much cleaner
# if our settings were not the same as MGDM likes, we need to flip the relevant settings:
#d_aff_new = flip_affine_orientation(d_aff, flipLR=flipLR, flipAP=flipAP, flipIS=flipIS)
d_head['data_type'] = np.array(32).astype('uint32') #convert the header as well
d_head['cal_max'] = np.max(seg_im) #max for display
niiSave(seg_file, seg_im, d_aff, header=d_head, data_type='uint32')
d_head['cal_max'] = np.max(lbl_im)
niiSave(lbl_file, lbl_im, d_aff, header=d_head, data_type='uint32')
d_head['cal_max'] = np.max(ids_im) # convert the header as well
niiSave(ids_file, ids_im, d_aff, header=d_head, data_type='uint32')
print("Data stored in: " + output_dir)
except:
print("--- MGDM failed. Go cry. ---")
return
print("Execution completed")
return seg_im,d_aff,d_head
def MGDMBrainSegmentation_v2(con1_files, con1_type, con2_files=None, con2_type=None,
con3_files=None, con3_type=None, con4_files=None, con4_type=None,
output_dir = None, num_steps = 5, topology = 'wcs', atlas_file=None,
topology_lut_dir = None, adjust_intensity_priors = False, compute_posterior = False,
diffuse_probabilities = False, file_suffix = None):
"""
Perform MGDM segmentation
simplified inputs
adjust_intensity_priors is supposed to be True??? totally screws up :-/
:param con1_files: List of files for contrast 1, required
:param con1_type: Contrast 1 type (from get_MGDM_seg_contrast_names(atlas_file))
:param con2_files: List of files for contrast 2, optional, must be matched to con1_files
:param con2_type: Contrast 2 type
:param con3_files: List of files for contrast 3, optional, must be matched to con1_files
:param con3_type: Contrast 3 type
:param con4_files: List of files for contrast 4, optional, must be matched to con1_files
:param con4_type: Contrast 4 type
:param output_dir: Directory to place output, defaults to input directory if = None
:param num_steps: Number of steps for MGDM, default = 5, set to 0 for quicker testing (but worse quality segmentation)
:param topology: Topology setting {'wcs', 'no'} ('no' for no topology)
:param atlas_file: Atlas file full path and filename
:param topology_lut_dir: Directory for topology files
:param adjust_intensity_priors: Adjust intensity priors based on dataset: True/False
:param compute_posterior: Copmute posterior: True/False
:param diffuse_probabilities: Compute diffuse probabilities: True/False
:param file_suffix: Distinguishing text to add to the end of the filename
:return:
"""
#from nibabel.orientations import io_orientation, inv_ornt_aff, apply_orientation, ornt_transform
import os
print("Thank you for choosing the MGDM segmentation from the cbstools for your brain segmentation needs")
print("Sit back and relax, let the magic of algorithms happen...")
print("")
out_files_seg = []
out_files_lbl = []
out_files_ids = []
if output_dir is None:
output_dir = os.path.dirname(con1_files[0])
if atlas_file is None:
atlas = os.path.join(ATLAS_DIR,'brain-atlas-3.0.3.txt')
else:
atlas = atlas_file
create_dir(output_dir)
if topology_lut_dir is None:
topology_lut_dir = TOPOLOGY_LUT_DIR # grabbing this from the default settings in defaults.py
else:
if not(topology_lut_dir[-1] == pathsep): #if we don't end in a path sep, we need to make sure that we add it
topology_lut_dir += pathsep
print("Atlas file: " + atlas)
print("Topology LUT durectory: " + topology_lut_dir)
print("")
if not isinstance(con1_files, list): # make into lists if they were not
con1_files = [con1_files]
if con2_files is not None and not isinstance(con2_files, list): # make into list of lists
con2_files = [con2_files]
if con3_files is not None and not isinstance(con3_files, list): # make into list of lists
con3_files = [con3_files]
if con4_files is not None and not isinstance(con4_files, list): # make into list of lists
con4_files = [con4_files]
#now we setup the mgdm specfic settings
mgdm = cj.BrainMgdmMultiSegmentation2()
mgdm.setAtlasFile(atlas)
mgdm.setTopologyLUTdirectory(topology_lut_dir)
mgdm.setOutputImages('segmentation')
# --> mgdm.setOrientations(mgdm.AXIAL, mgdm.R2L, mgdm.A2P, mgdm.I2S) # this is the default for MGDM, <--
# mgdm.setOrientations(mgdm.AXIAL, mgdm.L2R, mgdm.P2A, mgdm.I2S) #LR,PA,IS is always how they are returned from nibabel
mgdm.setAdjustIntensityPriors(adjust_intensity_priors) # default is True
mgdm.setComputePosterior(compute_posterior)
mgdm.setDiffuseProbabilities(diffuse_probabilities)
mgdm.setSteps(num_steps)
mgdm.setTopology(topology) # {'wcs','no'} no=off for testing, wcs=default
for idx,con1 in enumerate(con1_files):
print("Input files and filetypes:")
print(con1_type + ":\t" + con1.split(pathsep)[-1])
fname = con1
type = con1_type
d,d_aff,d_head = niiLoad(fname,return_header=True)
# convert orientation information to mgdm slice and orientation info
# aff_orients,aff_slc = get_affine_orientation_slice(d_aff)
# print("data orientation: " + str(aff_orients)),
# print("slice settings: " + aff_slc)
# if aff_slc == "AXIAL":
# SLC=mgdm.AXIAL
# elif aff_slc == "SAGITTAL":
# SLC=mgdm.SAGITTAL
# else:
# SLC=mgdm.CORONAL
# for aff_orient in aff_orients: #TODO: if anything is different from the default MGDM settings, we need to flip axes of the data at the end
# if aff_orient == "L":
# LR=mgdm.R2L
# elif aff_orient == "R":
# LR = mgdm.L2R
# # flipLR = True
# elif aff_orient == "A":
# AP = mgdm.P2A
# #flipAP = True
# elif aff_orient == "P":
# AP = mgdm.A2P
# elif aff_orient == "I":
# IS = mgdm.S2I
# #flipIS = True
# elif aff_orient == "S":
# IS = mgdm.I2S
#mgdm.setOrientations(SLC, LR, AP, IS) #L2R,P2A,I2S is nibabel default (i.e., RAS)
# we use the first image to set the dimensions and resolutions
res = d_head.get_zooms()
res = [a1.item() for a1 in res] # cast to regular python float type
mgdm.setDimensions(d.shape[0], d.shape[1], d.shape[2])
mgdm.setResolutions(res[0], res[1], res[2])
# keep the shape and affine from the first image for saving
d_shape = np.array(d.shape)
out_root_fname = os.path.basename(fname)[0:os.path.basename(fname).find('.')] # assumes no periods in filename, :-/
mgdm.setContrastImage1(cj.JArray('float')((d.flatten('F')).astype(float)))
mgdm.setContrastType1(type)
if con2_files is not None: #only bother with the other contrasts if something is in the one before it
print(con2_type + ":\t" + con2_files[idx].split(pathsep)[-1])
d, a = niiLoad(con2_files[idx], return_header=False)
mgdm.setContrastImage2(cj.JArray('float')((d.flatten('F')).astype(float)))
mgdm.setContrastType2(con2_type)
if con3_files is not None:
print(con3_type + ":\t" + con3_files[idx].split(pathsep)[-1])
d, a = niiLoad(con3_files[idx], return_header=False)
mgdm.setContrastImage3(cj.JArray('float')((d.flatten('F')).astype(float)))
mgdm.setContrastType3(con3_type)
if con4_files is not None:
print(con4_type + ":\t" + con4_files[idx].split(pathsep)[-1])
d, a = niiLoad(con4_files[idx], return_header=False)
mgdm.setContrastImage4(cj.JArray('float')((d.flatten('F')).astype(float)))
mgdm.setContrastType4(con4_type)
try:
print("Executing MGDM on your inputs")
print("Don't worry, the magic is happening!")
## ---------------------------- MGDM MAGIC START ---------------------------- ##
mgdm.execute()
## ---------------------------- MGDM MAGIC END ---------------------------- ##
# outputs
# reshape fortran stype to convert back to the format the nibabel likes
seg_im = np.reshape(np.array(mgdm.getSegmentedBrainImage(), dtype=np.uint32), d_shape,'F')
lbl_im = np.reshape(np.array(mgdm.getPosteriorMaximumLabels4D(), dtype=np.uint32), d_shape, 'F')
ids_im = np.reshape(np.array(mgdm.getSegmentedIdsImage(), dtype=np.uint32), d_shape, 'F')
# filenames for saving
if file_suffix is not None:
seg_file = os.path.join(output_dir, out_root_fname + '_seg' + file_suffix + '.nii.gz')
lbl_file = os.path.join(output_dir, out_root_fname + '_lbl' + file_suffix + '.nii.gz')
ids_file = os.path.join(output_dir, out_root_fname + '_ids' + file_suffix + '.nii.gz')
else:
seg_file = os.path.join(output_dir, out_root_fname + '_seg.nii.gz')
lbl_file = os.path.join(output_dir, out_root_fname + '_lbl.nii.gz')
ids_file = os.path.join(output_dir, out_root_fname + '_ids.nii.gz')
d_head['data_type'] = np.array(32).astype('uint32') #convert the header as well
d_head['cal_max'] = np.max(seg_im) #max for display
niiSave(seg_file, seg_im, d_aff, header=d_head, data_type='uint32')
d_head['cal_max'] = np.max(lbl_im)
niiSave(lbl_file, lbl_im, d_aff, header=d_head, data_type='uint32')
d_head['cal_max'] = np.max(ids_im) # convert the header as well
niiSave(ids_file, ids_im, d_aff, header=d_head, data_type='uint32')
print("Data stored in: " + output_dir)
print("")
out_files_seg.append(seg_file)
out_files_lbl.append(lbl_file)
out_files_ids.append(ids_file)
except:
print("--- MGDM failed. Go cry. ---")
return
print("Execution completed")
return out_files_seg, out_files_lbl, out_files_ids
def compare_atlas_segs_priors(seg_file_orig,seg_file_new,atlas_file_orig=None,atlas_file_new=None,
metric_contrast_name=None,background_idx=1,seg_null_value=0):
"""
Compare a new segmentation and atlas priors to another. Comparison is made relative to the orig
:param seg_file_orig:
:param atlas_file_orig:
:param seg_file_new:
:param atlas_file_new:
:param metric_contrast_name: Contrast type from atlas file
:return:
"""
import numpy as np
d1, a1 = niiLoad(seg_file_orig,return_header=False)
d2, a2 = niiLoad(seg_file_new, return_header=False)
idxs1 = np.unique(d1)
idxs2 = np.unique(d2)
[lut1, con_idx1, lut_rows1, priors1] = extract_lut_priors_from_atlas(atlas_file_orig, metric_contrast_name)
#TODO: make sure that all indices are in both segs? or just base it all on the gold standard?
for struc_idx in lut1.Index:
# for struc_idx in idxs1:
if not(struc_idx == background_idx):
print("Structure index: {0}, {1}").format(struc_idx,lut1.index[lut1.Index==struc_idx][0])
bin_vol = np.zeros_like(d1)
bin_vol[d1 == struc_idx] = 1
dice = np.sum(bin_vol[d2 == struc_idx]) * 2.0 / (np.sum(bin_vol) + np.sum(d2 == struc_idx))
print("Dice similarity: {}").format(dice)
#identify misclassifications
bin_vol = np.ones_like(d1) * seg_null_value
bin_vol[d1==struc_idx] = 1
overlap = np.multiply(bin_vol,d2)
overlap_idxs = np.unique(overlap)
overlap_idxs = np.delete(overlap_idxs,np.where(overlap_idxs == struc_idx)) #remove the idx that we should be at the moment
overlap_idxs = np.delete(overlap_idxs,np.where(overlap_idxs == seg_null_value)) #remove the null value, now left with the overlap with things we don't want :-(
#print overlap_idxs
#TODO: overlap comparison here
#[lut2, con_idx2, lut_rows2, priors2] = extract_lut_priors_from_atlas(atlas_file_new, metric_contrast_name)
#TODO: based on overlap comparison, adjust intensity priors
return lut1
def seg_erode(seg_d, iterations=1, background_idx=1,
structure=None, min_vox_count=5, seg_null_value=0,
VERBOSE=False):
"""
Binary erosion (or dilation) of integer type segmentation data (np.array) with options
If iterations < 0, performs binary dilation
:param seg_d: np.array of segmentation, integers
:param iterations: number of erosion iterations, if negative, provides the number of dilations (in this case, min_vox_count not used)
:param background_idx: value for background index, currently ignored (TODO: remove)
:param structure: binary structure for erosion from scipy.ndimage (ndimage.morphology.generate_binary_structure(3,1))
:param min_vox_count: minimun number of voxels to allow to be in a segmentation, if less, does not erode
:param seg_null_value: value to set as null for binary erosion step (i.e., a value NOT in your segmentation index)
:param VERBOSE: spit out loads of text to stdout, because you can.
:return: seg_shrunk_d eroded (or dilated) version of segmentation
"""
import scipy.ndimage as ndi
import numpy as np
if iterations >= 0:
pos_iter = True
else:
iterations = iterations*-1
pos_iter = False
if structure is None:
structure = ndi.morphology.generate_binary_structure(3, 1)
if seg_null_value == 0:
seg_shrunk_d = np.zeros_like(seg_d)
temp_d = np.zeros_like(seg_d)
else:
seg_shrunk_d = np.ones_like(seg_d) * seg_null_value
temp_d = np.ones_like(seg_d) * seg_null_value
seg_idxs = np.unique(seg_d)
if seg_null_value in seg_idxs:
print("Shit, your null value is also an index. This will not work.")
print("Set it to a suitably strange value that is not already an index. {0,999}")
return None
if VERBOSE:
print("Indices:")
for seg_idx in seg_idxs:
if VERBOSE:
print(seg_idx),
if (background_idx is not None) and (background_idx == seg_idx):
seg_shrunk_d[seg_d == seg_idx] = seg_idx # just set the value to the bckgrnd value, and be done with it
if VERBOSE:
print("[bckg]"),
else:
temp_d[seg_d == seg_idx] = 1
for idx in range(0, iterations): # messy, does not exit the loop when already gone too far. but it still works
if pos_iter:
temp_temp_d = ndi.binary_erosion(temp_d, iterations=1, structure=structure)
else:
temp_temp_d = ndi.binary_dilation(temp_d, iterations=1, structure=structure)
if np.sum(temp_temp_d) >= min_vox_count:
temp_d = temp_temp_d
if VERBOSE:
print("[y]"),
else:
if VERBOSE:
print("[no]"),
seg_shrunk_d[temp_d == 1] = seg_idx
temp_d[:, :, :] = seg_null_value
if VERBOSE:
print(seg_idx)
if VERBOSE:
print("")
return seg_shrunk_d
def extract_metrics_from_seg(seg_d, metric_d, seg_idxs=None,norm_data=True,
background_idx=1, seg_null_value=0,
percentile_top_bot=[75, 25],
return_normed_metric_d=False):
"""
Extract median and interquartile range from metric file given a co-registered segmentation
:param seg_d: segmentation data (integers)
:param metric_d: metric data to extract seg-specific values from
:param seg_idxs: indices of segmentation, usually taken from LUT but can be generated based on seg_d
:param norm_data: perform data normalisation on metric_d prior to extracting values from metric
:param background_idx: index for background data, currently treated as just another index (TODO: remove)
:param seg_null_value: value to set as null for binary erosion step, not included in metric extraction
:param percentile_top_bot: top and bottom percentiles to extract from each seg region
:param return_normed_metric_d: return the normalised metric as an np matrix, must also set norm_data=True
:return: seg_idxs, res segmentation indices and results matrix of median, 75, 25 percentliles
(metric_d) optional metric_d scaled between 0 and 1
"""
import numpy as np
if seg_idxs is None:
seg_idxs = np.unique(seg_d)
if (seg_null_value is not None) and (seg_null_value in seg_idxs): #remove the null value from the idxs so we don't look
np.delete(seg_idxs,np.where(seg_idxs==seg_null_value))
res = np.zeros((len(seg_idxs), 3))
if norm_data: # rescale the data to 0
if background_idx is not None: # we need to exclude the background data from the norming
metric_d[seg_d != background_idx] = (metric_d[seg_d != background_idx] - np.min(
metric_d[seg_d != background_idx])) / (np.max(metric_d[seg_d != background_idx]) - np.min(
metric_d[seg_d != background_idx]))
else:
metric_d = (metric_d - np.min(metric_d)) / (np.max(metric_d) - np.min(metric_d))
for idx, seg_idx in enumerate(seg_idxs):
d_1d = np.ndarray.flatten(metric_d[seg_d == seg_idx])
res[idx, :] = [np.median(d_1d),
np.percentile(d_1d, np.max(percentile_top_bot)),
np.percentile(d_1d, np.min(percentile_top_bot))]
if return_normed_metric_d:
return seg_idxs, res, metric_d
else:
return seg_idxs, res
def extract_lut_priors_from_atlas(atlas_file,contrast_name):
"""
Given an MGDM segmentation priors atlas file, extract the lut and identify the start index (in the file) of the
contrast of interest, and the number of rows of priors that it should have. Returns pandas dataframe of lut,
contrast index, number of rows in prior definition, and pd.DataFrame of priors,
:param atlas_file: full path to atlas file for lut and metric index extraction
:param contrast_name: intensity prior contrast name as listed in the metric file
:return: lut, con_idx, lut_rows, priors
"""
import pandas as pd
fp = open(atlas_file)
for i, line in enumerate(fp):
if "Structures:" in line: # this is the beginning of the LUT
lut_idx = i
lut_rows = map(int, [line.split()[1]])[0] + 1 #+1 to ensure that the last line is included
if "Intensity Prior:" in line:
if contrast_name in line:
con_idx = i
fp.close()
# dump lut and priors values into pandas dataframes
lut = pd.read_csv(atlas_file, sep="\t+",
skiprows=lut_idx + 1, nrows=lut_rows, engine='python',
names=["Index", "Type"])
priors = pd.read_csv(atlas_file, sep="\t+",
skiprows=con_idx + 1, nrows=lut_rows, engine='python',
names=["Median", "Spread", "Weight"])
return lut,con_idx,lut_rows,priors
def write_priors_to_atlas(prior_medians,prior_quart_diffs,atlas_file,new_atlas_file,metric_contrast_name):
"""
Write modified priors of given metric contrast to new_atlas
Assumes that the ordering of indices and the ordering of the priors are the same
(could add prior_weights as well, in future, and use something more structured than just line reading and writing)
:param prior_medians: 2xN list of prior medians
:param prior_quart_diffs: 2xN list of prior quartile differences
:param atlas_file: full path to original atlas file
:param new_atlas_file: full path to new atlas file to be written to
:param metric_contrast_name: name of MGDM metric contrast from atlas_file
"""
import pandas as pd
#get the relevant information from the old atlas file
[lut, con_idx, lut_rows, priors] = extract_lut_priors_from_atlas(atlas_file, metric_contrast_name)
seg_idxs = lut.Index.get_values() #np vector of index values
priors_new = pd.DataFrame.copy(priors)
#uppdate the priors with the new ones that were passed
#TODO: double-check this
for idx in lut.Index:
priors_new[lut["Index"] == idx] = [prior_medians[seg_idxs == idx], prior_quart_diffs[seg_idxs == idx],1]
priors_new_string = priors_new.to_csv(sep="\t", header=False, float_format="%.2f")
priors_new_string_lines = priors_new_string.split("\n")[0:-1] # convert to list of lines, cut the last empty '' line
fp = open(atlas_file)
fp_new = open(new_atlas_file, "w")
ii = 0
# only replace the lines that we changed
for i, line in enumerate(fp):
if i > con_idx and i < con_idx + lut_rows:
fp_new.write(priors_new_string_lines[ii] + "\n")
ii += 1
else:
fp_new.write(line)
fp.close()
fp_new.close()
print('New atlas file written to: \n' + fp_new.name)
return fp_new.name
def filter_sigmoid(d, x0=0.002, slope=0.0005, output_fname=None):
"""
Pass data through a sigmoid filter (scaled between 0 and 1). Defaults set for MD rescaling
If you are lazy and pass it a filename, it will pass you back the data with affine and header
:param d:
:param x0:
:param slope:
:return:
"""
import numpy as np
from scipy.stats import linregress
return_nii_parts = False
if not isinstance(d, (np.ndarray, np.generic) ):
try:
[d,a,h]=niiLoad(d,return_header=True)
return_nii_parts = True
except:
print("niiLoad tried to load this is a file and failed, are you calling it properly?")
return
# if x0 is None: #we can see what we can do to generate the mean , not a great solution TODO: improve x0,slope calc
# d_subset = d[d>0]
# d_subset = d_subset[ np.where(np.logical_and(d_subset < np.percentile(d_subset, 95),d_subset > np.percentile(d_subset,75)))]
# x0 = np.median(d_subset)
# print("x0 calculated from the data: %.6F") %x0
# if slope is None:
# x=d_subset[d_subset>x0]
# y=d_subset[d_subset<x0]
# print((linregress(x,y)))
# slope = np.abs(linregress(x,y)[0])
# print("Slope calculated from the data: %.6F") %slope
if output_fname is not None and return_nii_parts:
niiSave(output_fname,d,a,h)
if return_nii_parts:
return 1 / (1 + np.exp(-1 * (d - x0) / slope)), a, h
else:
return 1/(1 + np.exp(-1 * (d - x0) / slope))
def niiLoad(nii_fname,return_header=False):
"""
Load nii data into numpy array, along with aff and header as desired
:param nii_fname:
:param return_affine:
:param return_header:
:return:
"""
import nibabel as nb
img=nb.load(nii_fname)
if return_header:
return img.get_data(), img.affine, img.header
else:
return img.get_data(), img.affine
def niiSave(nii_fname,d,affine,header=None,data_type=None):
"""
Save nifti image to file
:param nii_fname:
:param d:
:param affine:
:param header: text of numpy data_type (e.g. 'uint32','float32')
:param data_type:
:return:
"""
import nibabel as nb
if data_type is not None:
d.astype(data_type)
img=nb.Nifti1Image(d,affine,header=header)
if data_type is not None:
img.set_data_dtype(data_type)
img.to_filename(nii_fname)
return nii_fname
def create_dir(some_directory):
"""
Create directory recursively if it does not exist
- uses os.mkdirs
"""
import os
if not os.path.exists(some_directory):
os.makedirs(some_directory)
def get_MGDM_seg_contrast_names(atlas_file):
"""
Return a list of contrast names that are available as intensity priors in the MGDM atlas that you are using
:param atlas_file: atlas file
:return: seg_contrast_names list of names of contrasts that have intensity priors available
"""
seg_contrast_names = []
fp = open(atlas_file)
for i, line in enumerate(fp):
if "Structures:" in line: # this is the beginning of the LUT
lut_idx = i
lut_rows = map(int, [line.split()[1]])[0]
if "Intensity Prior:" in line:
seg_contrast_names.append(line.split()[-1])
fp.close()
return seg_contrast_names
def generate_group_intensity_priors(orig_seg_files,metric_files,metric_contrast_name,
atlas_file,erosion_iterations=1, min_quart_diff=0.1,
seg_null_value = 0, background_idx = 1,
VERBOSE=False, intermediate_output_dir=None):
"""
generates group intensity priors for metric_files based on orig_seg files (i.e., orig_seg could be Mprage3T and metric_files could be DWIFA3T)
does not do the initial segmentation for you, that needs to be done first :-)
we assume that you already did due-diligence and have matched lists of inputs (orig_seg_files and metric_files)
:param orig_seg_files: segmentation from other modality
:param metric_files: metric files in same space as orig_seg_files
:param metric_contrast_name: name of contrast from priors atlas file, not used currently
:param atlas_file: prior atlas file (use os.path.join(ATLAS_DIR,DEFAULT_ATLAS))
:param erosion_iterations: number of voxels to erode from each segmented region prior to metric extraction
:param min_quart_diff: minimum difference between quartiles to accept, otherwise replace with this
:param seg_null_value: null value for segmentation results (choose a value that is not in your seg, usually 0)
:param background_idx: background index value (usually 1, to leave 0 as a seg_null_value)
:param VERBOSE:
:return: medians, spread metric-specific prior medians and spread for atlas file
"""
import nibabel as nb
import numpy as np
import os
MGDM_contrast_names = get_MGDM_seg_contrast_names(atlas_file)
if metric_contrast_name not in MGDM_contrast_names:
print("You have not chosen a valid contrast for your metric_contrast_name, please choose from: ")
print(", ".join(MGDM_contrast_names))
return [None, None]
[lut,con_idx,lut_rows,priors] = extract_lut_priors_from_atlas(atlas_file, metric_contrast_name)
seg_idxs = lut.Index
all_Ss_priors_median = np.array(seg_idxs) #always put the seg_idxs on top row!
all_Ss_priors_spread = np.array(seg_idxs)
#seg_null_value = 0 #value to fill in when we are NOT using the voxels at all (not background and not other index)
#background_idx = 1
#min_quart_diff = 0.10 #minimun spread allowed in priors atlas
# make a list if we only input one dataset
if len(orig_seg_files) == 1:
orig_seg_files = [orig_seg_files]
if len(metric_files) == 1:
metric_files = [metric_files]
if not(len(orig_seg_files) == len(metric_files)):
print("You do not have the same number of segmentation and metric files. Bad!")
print("Exiting")
return [None, None]
if erosion_iterations >0:
print("Performing segmentation erosion on each segmented region with %i step(s)" % erosion_iterations)
for idx, seg_file in enumerate(orig_seg_files):
metric_file = metric_files[idx]
img=nb.load(metric_file)
d_metric = img.get_data()
a_metric = img.affine #not currently using the affine and header, but could also output the successive steps
h_metric = img.header
print(seg_file.split(pathsep)[-1])
print(metric_file.split(pathsep)[-1])
d_seg = nb.load(seg_file).get_data()
#erode our data
if erosion_iterations>0:
d_seg_ero = seg_erode(d_seg,iterations=erosion_iterations,
background_idx=background_idx,
seg_null_value=seg_null_value)
else:
d_seg_ero = d_seg
#extract summary metrics (median, 75 and 25 percentile) from metric file
[seg_idxs, seg_stats] = extract_metrics_from_seg(d_seg_ero, d_metric, seg_idxs=seg_idxs,
seg_null_value=seg_null_value,
return_normed_metric_d=False)
prior_medians = seg_stats[:, 0]
prior_quart_diffs = np.squeeze(np.abs(np.diff(seg_stats[:, 1:3])))
prior_quart_diffs[prior_quart_diffs < min_quart_diff] = min_quart_diff
#now place this output into a growing array for use on the group level
all_Ss_priors_median = np.vstack((all_Ss_priors_median, prior_medians))
all_Ss_priors_spread = | np.vstack((all_Ss_priors_spread, prior_quart_diffs)) | numpy.vstack |
import os
import sys
import yaml
import numpy as np
import torch
import torch.utils.data as data
import numpy as np
import numpy.random as npr
import cv2
import copy
import glob
import scipy
import datasets
from config.config import cfg
from transforms3d.quaternions import mat2quat, quat2mat
from utils.se3 import *
from utils.pose_error import *
from utils.cython_bbox import bbox_overlaps
_SUBJECTS = [
'20200709-subject-01',
'20200813-subject-02',
'20200820-subject-03',
'20200903-subject-04',
'20200908-subject-05',
'20200918-subject-06',
'20200928-subject-07',
'20201002-subject-08',
'20201015-subject-09',
'20201022-subject-10',
]
_SERIALS = [
'836212060125',
'839512060362',
'840412060917',
'841412060263',
'932122060857',
'932122060861',
'932122061900',
'932122062010',
]
_YCB_CLASSES = {
1: '002_master_chef_can',
2: '003_cracker_box',
3: '004_sugar_box',
4: '005_tomato_soup_can',
5: '006_mustard_bottle',
6: '007_tuna_fish_can',
7: '008_pudding_box',
8: '009_gelatin_box',
9: '010_potted_meat_can',
10: '011_banana',
11: '019_pitcher_base',
12: '021_bleach_cleanser',
13: '024_bowl',
14: '025_mug',
15: '035_power_drill',
16: '036_wood_block',
17: '037_scissors',
18: '040_large_marker',
19: '051_large_clamp',
20: '052_extra_large_clamp',
21: '061_foam_brick',
}
_MANO_JOINTS = [
'wrist',
'thumb_mcp',
'thumb_pip',
'thumb_dip',
'thumb_tip',
'index_mcp',
'index_pip',
'index_dip',
'index_tip',
'middle_mcp',
'middle_pip',
'middle_dip',
'middle_tip',
'ring_mcp',
'ring_pip',
'ring_dip',
'ring_tip',
'little_mcp',
'little_pip',
'little_dip',
'little_tip'
]
_MANO_JOINT_CONNECT = [
[0, 1], [ 1, 2], [ 2, 3], [ 3, 4],
[0, 5], [ 5, 6], [ 6, 7], [ 7, 8],
[0, 9], [ 9, 10], [10, 11], [11, 12],
[0, 13], [13, 14], [14, 15], [15, 16],
[0, 17], [17, 18], [18, 19], [19, 20],
]
_BOP_EVAL_SUBSAMPLING_FACTOR = 4
class dex_ycb_dataset(data.Dataset):
def __init__(self, setup, split, obj_list):
self._setup = setup
self._split = split
self._color_format = "color_{:06d}.jpg"
self._depth_format = "aligned_depth_to_color_{:06d}.png"
self._label_format = "labels_{:06d}.npz"
self._height = 480
self._width = 640
# paths
self._name = 'dex_ycb_' + setup + '_' + split
self._image_set = split
self._dex_ycb_path = self._get_default_path()
path = os.path.join(self._dex_ycb_path, 'data')
self._data_dir = path
self._calib_dir = os.path.join(self._data_dir, "calibration")
self._model_dir = os.path.join(self._data_dir, "models")
self._obj_file = {
k: os.path.join(self._model_dir, v, "textured_simple.obj")
for k, v in _YCB_CLASSES.items()
}
# define all the classes
self._classes_all = ('002_master_chef_can', '003_cracker_box', '004_sugar_box', '005_tomato_soup_can', '006_mustard_bottle', \
'007_tuna_fish_can', '008_pudding_box', '009_gelatin_box', '010_potted_meat_can', '011_banana', '019_pitcher_base', \
'021_bleach_cleanser', '024_bowl', '025_mug', '035_power_drill', '036_wood_block', '037_scissors', '040_large_marker', \
'051_large_clamp', '052_extra_large_clamp', '061_foam_brick')
self._num_classes_all = len(self._classes_all)
self._class_colors_all = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0), (255, 0, 255), (0, 255, 255), \
(128, 0, 0), (0, 128, 0), (0, 0, 128), (128, 128, 0), (128, 0, 128), (0, 128, 128), \
(64, 0, 0), (0, 64, 0), (0, 0, 64), (64, 64, 0), (64, 0, 64), (0, 64, 64),
(192, 0, 0), (0, 192, 0), (0, 0, 192)]
self._extents_all = self._load_object_extents()
self._posecnn_class_indexes = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 20, 21]
# compute class index
class_index = []
for name in obj_list:
for i in range(self._num_classes_all):
if name == self._classes_all[i]:
class_index.append(i)
break
print('class index:', class_index)
self._class_index = class_index
# select a subset of classes
self._classes = obj_list
self._num_classes = len(self._classes)
self._class_colors = [self._class_colors_all[i] for i in class_index]
self._extents = self._extents_all[class_index]
self._points, self._points_all = self._load_object_points(self._classes, self._extents)
# Seen subjects, camera views, grasped objects.
if self._setup == 's0':
if self._split == 'train':
subject_ind = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
serial_ind = [0, 1, 2, 3, 4, 5, 6, 7]
sequence_ind = [i for i in range(100) if i % 5 != 4]
if self._split == 'val':
subject_ind = [0, 1]
serial_ind = [0, 1, 2, 3, 4, 5, 6, 7]
sequence_ind = [i for i in range(100) if i % 5 == 4]
if self._split == 'test':
subject_ind = [2, 3, 4, 5, 6, 7, 8, 9]
serial_ind = [0, 1, 2, 3, 4, 5, 6, 7]
sequence_ind = [i for i in range(100) if i % 5 == 4]
# Unseen subjects.
if self._setup == 's1':
if self._split == 'train':
subject_ind = [0, 1, 2, 3, 4, 5, 9]
serial_ind = [0, 1, 2, 3, 4, 5, 6, 7]
sequence_ind = list(range(100))
if self._split == 'val':
subject_ind = [6]
serial_ind = [0, 1, 2, 3, 4, 5, 6, 7]
sequence_ind = list(range(100))
if self._split == 'test':
subject_ind = [7, 8]
serial_ind = [0, 1, 2, 3, 4, 5, 6, 7]
sequence_ind = list(range(100))
# Unseen camera views.
if self._setup == 's2':
if self._split == 'train':
subject_ind = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
serial_ind = [0, 1, 2, 3, 4, 5]
sequence_ind = list(range(100))
if self._split == 'val':
subject_ind = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
serial_ind = [6]
sequence_ind = list(range(100))
if self._split == 'test':
subject_ind = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
serial_ind = [7]
sequence_ind = list(range(100))
# Unseen grasped objects.
if self._setup == 's3':
if self._split == 'train':
subject_ind = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
serial_ind = [0, 1, 2, 3, 4, 5, 6, 7]
sequence_ind = [
i for i in range(100) if i // 5 not in (3, 7, 11, 15, 19)
]
if self._split == 'val':
subject_ind = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
serial_ind = [0, 1, 2, 3, 4, 5, 6, 7]
sequence_ind = [i for i in range(100) if i // 5 in (3, 19)]
if self._split == 'test':
subject_ind = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
serial_ind = [0, 1, 2, 3, 4, 5, 6, 7]
sequence_ind = [i for i in range(100) if i // 5 in (7, 11, 15)]
self._subjects = [_SUBJECTS[i] for i in subject_ind]
self._serials = [_SERIALS[i] for i in serial_ind]
self._intrinsics = []
for s in self._serials:
intr_file = os.path.join(self._calib_dir, "intrinsics", "{}_{}x{}.yml".format(s, self._width, self._height))
with open(intr_file, 'r') as f:
intr = yaml.load(f, Loader=yaml.FullLoader)
intr = intr['color']
self._intrinsics.append(intr)
# build mapping
self._sequences = []
self._mapping = []
self._ycb_ids = []
offset = 0
for n in self._subjects:
seq = sorted(os.listdir(os.path.join(self._data_dir, n)))
seq = [os.path.join(n, s) for s in seq]
assert len(seq) == 100
seq = [seq[i] for i in sequence_ind]
self._sequences += seq
for i, q in enumerate(seq):
meta_file = os.path.join(self._data_dir, q, "meta.yml")
with open(meta_file, 'r') as f:
meta = yaml.load(f, Loader=yaml.FullLoader)
c = np.arange(len(self._serials))
f = np.arange(meta['num_frames'])
f, c = np.meshgrid(f, c)
c = c.ravel()
f = f.ravel()
s = (offset + i) * np.ones_like(c)
m = np.vstack((s, c, f)).T
self._mapping.append(m)
self._ycb_ids.append(meta['ycb_ids'])
offset += len(seq)
self._mapping = np.vstack(self._mapping)
# sample a subset for training
if split == 'train':
self._mapping = self._mapping[::10]
# dataset size
self._size = len(self._mapping)
print('dataset %s with images %d' % (self._name, self._size))
def __len__(self):
return self._size
def get_bop_id_from_idx(self, idx):
s, c, f = map(lambda x: x.item(), self._mapping[idx])
scene_id = s * len(self._serials) + c
im_id = f
return scene_id, im_id
def __getitem__(self, idx):
s, c, f = self._mapping[idx]
is_testing = f % _BOP_EVAL_SUBSAMPLING_FACTOR == 0
if self._split == 'test' and not is_testing:
sample = {'is_testing': is_testing}
return sample
scene_id, im_id = self.get_bop_id_from_idx(idx)
video_id = '%04d' % (scene_id)
image_id = '%06d' % (im_id)
# posecnn result path
posecnn_result_path = os.path.join(self._dex_ycb_path, 'results_posecnn', self._name, 'vgg16_dex_ycb_epoch_16.checkpoint.pth',
video_id + '_' + image_id + '.mat')
d = os.path.join(self._data_dir, self._sequences[s], self._serials[c])
roidb = {
'color_file': os.path.join(d, self._color_format.format(f)),
'depth_file': os.path.join(d, self._depth_format.format(f)),
'label_file': os.path.join(d, self._label_format.format(f)),
'intrinsics': self._intrinsics[c],
'ycb_ids': self._ycb_ids[s],
'posecnn': posecnn_result_path,
}
# Get the input image blob
im_color, im_depth = self._get_image_blob(roidb['color_file'], roidb['depth_file'])
# build the label blob
im_label, intrinsic_matrix, poses, gt_boxes, poses_result, rois_result, labels_result \
= self._get_label_blob(roidb, self._num_classes)
is_syn = 0
im_scale = 1.0
im_info = np.array([im_color.shape[1], im_color.shape[2], im_scale, is_syn], dtype=np.float32)
sample = {'image_color': im_color[:, :, (2, 1, 0)],
'image_depth': im_depth,
'label': im_label,
'intrinsic_matrix': intrinsic_matrix,
'gt_poses': poses,
'gt_boxes': gt_boxes,
'poses_result': poses_result,
'rois_result': rois_result,
'labels_result': labels_result,
'extents': self._extents,
'points': self._points_all,
'im_info': im_info,
'video_id': video_id,
'image_id': image_id}
if self._split == 'test':
sample['is_testing'] = is_testing
return sample
def _get_image_blob(self, color_file, depth_file):
# rgba
rgba = cv2.imread(color_file, cv2.IMREAD_UNCHANGED)
if rgba.shape[2] == 4:
im = np.copy(rgba[:,:,:3])
alpha = rgba[:,:,3]
I = np.where(alpha == 0)
im[I[0], I[1], :] = 0
else:
im = rgba
im_color = im.astype('float') / 255.0
# depth image
im_depth = cv2.imread(depth_file, cv2.IMREAD_UNCHANGED)
im_depth = im_depth.astype('float') / 1000.0
return im_color, im_depth
def _get_label_blob(self, roidb, num_classes):
""" build the label blob """
# parse data
cls_indexes = roidb['ycb_ids']
classes = np.array(self._class_index)
fx = roidb['intrinsics']['fx']
fy = roidb['intrinsics']['fy']
px = roidb['intrinsics']['ppx']
py = roidb['intrinsics']['ppy']
intrinsic_matrix = np.eye(3, dtype=np.float32)
intrinsic_matrix[0, 0] = fx
intrinsic_matrix[1, 1] = fy
intrinsic_matrix[0, 2] = px
intrinsic_matrix[1, 2] = py
label = np.load(roidb['label_file'])
# label image
im_label = label['seg']
# poses
poses = label['pose_y']
if len(poses.shape) == 2:
poses = np.reshape(poses, (1, 3, 4))
num = poses.shape[0]
assert num == len(cls_indexes), 'number of poses not equal to number of objects'
# bounding boxes
gt_boxes = np.zeros((num, 5), dtype=np.float32)
for i in range(num):
cls = int(cls_indexes[i]) - 1
ind = np.where(classes == cls)[0]
if len(ind) > 0:
R = poses[i, :, :3]
T = poses[i, :, 3]
# compute box
x3d = np.ones((4, self._points_all.shape[1]), dtype=np.float32)
x3d[0, :] = self._points_all[ind,:,0]
x3d[1, :] = self._points_all[ind,:,1]
x3d[2, :] = self._points_all[ind,:,2]
RT = np.zeros((3, 4), dtype=np.float32)
RT[:3, :3] = R
RT[:, 3] = T
x2d = np.matmul(intrinsic_matrix, np.matmul(RT, x3d))
x2d[0, :] = np.divide(x2d[0, :], x2d[2, :])
x2d[1, :] = np.divide(x2d[1, :], x2d[2, :])
gt_boxes[i, 0] = np.min(x2d[0, :])
gt_boxes[i, 1] = np.min(x2d[1, :])
gt_boxes[i, 2] = np.max(x2d[0, :])
gt_boxes[i, 3] = np.max(x2d[1, :])
gt_boxes[i, 4] = ind
# load posecnn result if available
if os.path.exists(roidb['posecnn']):
result = scipy.io.loadmat(roidb['posecnn'])
n = result['poses'].shape[0]
poses_result = np.zeros((n, 9), dtype=np.float32)
poses_result[:, 0] = 1
poses_result[:, 1] = result['rois'][:, 1]
poses_result[:, 2:] = result['poses']
rois_result = result['rois'].copy()
labels_result = result['labels'].copy()
# select the classes, one object per class
index = []
flags = np.zeros((self._num_classes, ), dtype=np.int32)
for i in range(poses_result.shape[0]):
cls = self._posecnn_class_indexes[int(poses_result[i, 1])] - 1
ind = np.where(classes == cls)[0]
if len(ind) > 0 and flags[ind] == 0:
index.append(i)
poses_result[i, 1] = ind
rois_result[i, 1] = ind
flags[ind] = 1
poses_result = poses_result[index, :]
rois_result = rois_result[index, :]
else:
# print('no posecnn result %s' % (roidb['posecnn']))
poses_result = np.zeros((0, 9), dtype=np.float32)
rois_result = np.zeros((0, 7), dtype=np.float32)
labels_result = np.zeros((0, 1), dtype=np.float32)
poses = poses.transpose((1, 2, 0))
return im_label, intrinsic_matrix, poses, gt_boxes, poses_result, rois_result, labels_result
def _get_default_path(self):
"""
Return the default path where YCB_Video is expected to be installed.
"""
return os.path.join(datasets.ROOT_DIR, 'data', 'DEX_YCB')
def _load_object_extents(self):
extents = np.zeros((self._num_classes_all, 3), dtype=np.float32)
for i in range(self._num_classes_all):
point_file = os.path.join(self._model_dir, self._classes_all[i], 'points.xyz')
print(point_file)
assert os.path.exists(point_file), 'Path does not exist: {}'.format(point_file)
points = np.loadtxt(point_file)
extents[i, :] = 2 * np.max(np.absolute(points), axis=0)
return extents
def _load_object_points(self, classes, extents):
points = [[] for _ in range(len(classes))]
num = np.inf
num_classes = len(classes)
for i in range(num_classes):
point_file = os.path.join(self._model_dir, classes[i], 'points.xyz')
print(point_file)
assert os.path.exists(point_file), 'Path does not exist: {}'.format(point_file)
points[i] = np.loadtxt(point_file)
if points[i].shape[0] < num:
num = points[i].shape[0]
points_all = np.zeros((num_classes, num, 3), dtype=np.float32)
for i in range(num_classes):
points_all[i, :, :] = points[i][:num, :]
return points, points_all
def write_dop_results(self, output_dir, modality):
# only write the result file
filename = os.path.join(output_dir, 'poserbpf_' + self._name + '_' + modality + '.csv')
f = open(filename, 'w')
f.write('scene_id,im_id,obj_id,score,R,t,time\n')
# list the mat file
filename = os.path.join(output_dir, '*.mat')
files = sorted(glob.glob(filename))
# for each image
for i in range(len(files)):
filename = os.path.basename(files[i])
# parse filename
pos = filename.find('_')
scene_id = int(filename[:pos])
im_id = int(filename[pos+1:-4])
# load result
print(files[i])
result = scipy.io.loadmat(files[i])
if len(result['rois']) == 0:
continue
rois = result['rois']
num = rois.shape[0]
for j in range(num):
obj_id = self._class_index[int(rois[j, 1])] + 1
if obj_id == 0:
continue
score = rois[j, -1]
run_time = -1
# pose from network
R = quat2mat(result['poses'][j, :4].flatten())
t = result['poses'][j, 4:] * 1000
line = '{scene_id},{im_id},{obj_id},{score},{R},{t},{time}\n'.format(
scene_id=scene_id,
im_id=im_id,
obj_id=obj_id,
score=score,
R=' '.join(map(str, R.flatten().tolist())),
t=' '.join(map(str, t.flatten().tolist())),
time=run_time)
f.write(line)
# close file
f.close()
# compute box
def compute_box(self, cls, intrinsic_matrix, RT):
ind = np.where(self._class_index == cls)[0]
x3d = np.ones((4, self._points_all.shape[1]), dtype=np.float32)
x3d[0, :] = self._points_all[ind,:,0]
x3d[1, :] = self._points_all[ind,:,1]
x3d[2, :] = self._points_all[ind,:,2]
x2d = np.matmul(intrinsic_matrix, np.matmul(RT, x3d))
x2d[0, :] = np.divide(x2d[0, :], x2d[2, :])
x2d[1, :] = np.divide(x2d[1, :], x2d[2, :])
x1 = np.min(x2d[0, :])
y1 = np.min(x2d[1, :])
x2 = np.max(x2d[0, :])
y2 = np.max(x2d[1, :])
return [x1, y1, x2, y2]
def evaluation(self, output_dir, modality):
self.write_dop_results(output_dir, modality)
filename = os.path.join(output_dir, 'results_poserbpf.mat')
if os.path.exists(filename):
results_all = scipy.io.loadmat(filename)
print('load results from file')
print(filename)
distances_sys = results_all['distances_sys']
distances_non = results_all['distances_non']
errors_rotation = results_all['errors_rotation']
errors_translation = results_all['errors_translation']
results_seq_id = results_all['results_seq_id'].flatten()
results_frame_id = results_all['results_frame_id'].flatten()
results_object_id = results_all['results_object_id'].flatten()
results_cls_id = results_all['results_cls_id'].flatten()
else:
# save results
num_max = 200000
num_results = 1
distances_sys = np.zeros((num_max, num_results), dtype=np.float32)
distances_non = np.zeros((num_max, num_results), dtype=np.float32)
errors_rotation = np.zeros((num_max, num_results), dtype=np.float32)
errors_translation = | np.zeros((num_max, num_results), dtype=np.float32) | numpy.zeros |
#!/usr/bin/env python3
import os
import time
import h5py
import numpy as np
import pandas as pd
from pprint import pprint
import matplotlib.pyplot as plt
plt.style.use("../../pygama/clint.mpl")
from pygama import DataSet, read_lh5, get_lh5_header
import pygama.analysis.histograms as pgh
def main():
"""
this is the high-level part of the code, something that a user might
write (even on the interpreter) for processing with a specific config file.
"""
# process_data()
plot_data()
# plot_waveforms()
def process_data():
from pygama import DataSet
ds = DataSet(0, md="config.json")
ds.daq_to_raw(overwrite=True, test=False)
# ds.raw_to_dsp(....)
def plot_data():
"""
read the lh5 output.
"""
f_lh5 = "/Users/wisecg/Data/L200/tier1/t1_run0.lh5"
df = get_lh5_header(f_lh5)
# df = read_lh5(f_lh5)
# print(df)
exit()
# hf = h5py.File("/Users/wisecg/Data/L200/tier1/t1_run0.lh5")
# # 1. energy histogram
# wf_max = hf['/daqdata/wf_max'][...] # slice reads into memory
# wf_bl = hf['/daqdata/baseline'][...]
# wf_max = wf_max - wf_bl
# xlo, xhi, xpb = 0, 5000, 10
# hist, bins = pgh.get_hist(wf_max, range=(xlo, xhi), dx=xpb)
# plt.semilogy(bins, hist, ls='steps', c='b')
# plt.xlabel("Energy (uncal)", ha='right', x=1)
# plt.ylabel("Counts", ha='right', y=1)
# # plt.show()
# # exit()
# plt.cla()
# 2. energy vs time
# ts = hf['/daqdata/timestamp']
# plt.plot(ts, wf_max, '.b')
# plt.show()
# 3. waveforms
nevt = hf['/daqdata/waveform/values/cumulative_length'].size
# create a waveform block compatible w/ pygama
# and yeah, i know, for loops are inefficient. i'll optimize when it matters
wfs = []
wfidx = hf["/daqdata/waveform/values/cumulative_length"] # where each wf starts
wfdata = hf["/daqdata/waveform/values/flattened_data"] # adc values
wfsel = | np.arange(2000) | numpy.arange |
import torch
import torchvision
import torchvision.transforms as tvt
import torch.nn as nn
import matplotlib.pyplot as plt
import numpy as np
from torch import optim
import torch.nn.functional as F
import math as m
import time
import os
#from google.colab import drive
import random
from PIL import Image
from torch.autograd import Variable, variable
from PIL import Image
import numpy
import tensorflow as tf
from pathlib import Path
import pickle
import numpy as np
import torch
import torchvision
import torch.nn.functional as F
import text_model
import test_retrieval
import torch_functions
#import datasets
from tqdm import tqdm as tqdm
import PIL
import argparse
import datasets
import img_text_composition_models
Path1=r"C:\MMaster\Files"
Path1=r"D:\personal\master\MyCode\files"
#Path1=r"C:\MMaster\Files"
################# Support Functions Section #################
def dataset(batch_size_all):
trainset = Fashion200k(
path=Path1,
split='train',
transform=torchvision.transforms.Compose([
torchvision.transforms.Resize(224),
torchvision.transforms.CenterCrop(224),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
]))
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size_all,
shuffle=False, num_workers=2)
return trainset,trainloader
def euclideandistance(signature,signatureimg):
from scipy.spatial import distance
return distance.euclidean(signature, signatureimg)
#.detach().numpy()
def testvaluessame():
train = datasets.Fashion200k(
path=Path1,
split='train',
transform=torchvision.transforms.Compose([
torchvision.transforms.Resize(224),
torchvision.transforms.CenterCrop(224),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
]))
transform=torchvision.transforms.Compose([
torchvision.transforms.Resize(224),
torchvision.transforms.CenterCrop(224),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
])
trig= img_text_composition_models.TIRG([t.encode().decode('utf-8') for t in train.get_all_texts()],512)
trig.load_state_dict(torch.load(Path1+r'\fashion200k.tirg.iter160k.pth' , map_location=torch.device('cpu') )['model_state_dict'])
trig.eval()
query='women/tops/blouses/91422080/91422080_0.jpeg'
qttext='replace sunrise with pleat-neck'
target='women/tops/sleeveless_and_tank_tops/90068628/90068628_0.jpeg'
text=[]
text.append(qttext)
text.append(qttext)
img = Image.open(Path1+'/'+query)
img = img.convert('RGB')
img=transform(img)
img2 = Image.open(Path1+'/'+target)
img2 = img2.convert('RGB')
img2=transform(img2)
img=img.unsqueeze_(0)
img2=img2.unsqueeze_(0)
images=torch.cat([img, img2], dim=0)
trigdataQ=trig.compose_img_text(images,text)
trigdataQ1=trig.compose_img_text(images,text)
print('...........')
print(trigdataQ)
print(trigdataQ1)
def getbetatrainNot():
train = datasets.Fashion200k(
path=Path1,
split='train',
transform=torchvision.transforms.Compose([
torchvision.transforms.Resize(224),
torchvision.transforms.CenterCrop(224),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
]))
trig= img_text_composition_models.TIRG([t.encode().decode('utf-8') for t in train.get_all_texts()],512)
trig.load_state_dict(torch.load(Path1+r'\fashion200k.tirg.iter160k.pth' , map_location=torch.device('cpu') )['model_state_dict'])
trig.eval()
imgs = []
mods = []
trigdata=[]
target=[]
imgdata=[]
for Data in tqdm(train):
imgs += [Data['source_img_data']]
mods += [Data['mod']['str']]
target +=[Data['target_img_data']]
imgs = torch.stack(imgs).float()
imgs = torch.autograd.Variable(imgs)
f = trig.compose_img_text(imgs, mods).data.cpu().numpy()
target = torch.stack(target).float()
target = torch.autograd.Variable(target)
f2 = trig.extract_img_feature(target).data.cpu().numpy()
trigdata.append(f[0])
imgdata.append(f2[0])
imgs = []
mods = []
target = []
trigdata=np.array(trigdata)
imgdata=np.array(imgdata)
Ntrigdata=trigdata
Nimgdata=imgdata
Ntrig2=[]
for i in range(Ntrigdata.shape[0]):
Ntrigdata[i, :] /= np.linalg.norm(Ntrigdata[i, :])
for i in range(Nimgdata.shape[0]):
Nimgdata[i, :] /= np.linalg.norm(Nimgdata[i, :])
for i in range(Ntrigdata.shape[0]):
Ntrig2.append(np.insert(Ntrigdata[i],0, 1))
Ntrig2=np.array(Ntrig2)
Ntrigdata1=Ntrig2.transpose()
X1=np.matmul(Ntrigdata1,Ntrig2)
X2=np.linalg.inv(X1)
X3=np.matmul(X2,Ntrigdata1)
Nbeta=np.matmul(X3,Nimgdata)
with open(Path1+r"/"+'BetaNot.txt', 'wb') as fp:
pickle.dump(Nbeta, fp)
def GetValuestrain15time():
with open (Path1+"/trainBetaNormalized.txt", 'rb') as fp:
BetaNormalize = pickle.load(fp)
trainset = datasets.Fashion200k(
path=Path1,
split='train',
transform=torchvision.transforms.Compose([
torchvision.transforms.Resize(224),
torchvision.transforms.CenterCrop(224),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
]))
trainloader = trainset.get_loader(
batch_size=2,
shuffle=True,
drop_last=True,
num_workers=0)
testset = TestFashion200k(
path=Path1,
split='test',
transform=torchvision.transforms.Compose([
torchvision.transforms.Resize(224),
torchvision.transforms.CenterCrop(224),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
]))
trig= TIRG([t.encode().decode('utf-8') for t in trainset.get_all_texts()],512)
trig.load_state_dict(torch.load(Path1+r'\checkpoint_fashion200k.pth' , map_location=torch.device('cpu') )['model_state_dict'])
opt = argparse.ArgumentParser()
opt.add_argument('--batch_size', type=int, default=2)
opt.add_argument('--dataset', type=str, default='fashion200k')
opt.batch_size =1
opt.dataset='fashion200k'
Results=[]
for i in range(15):
for name, dataset in [ ('train', trainset)]: #,('test', testset)]:
# betaNor="['1 ---> 5.27', '5 ---> 14.39', '10 ---> 21.6', '50 ---> 43.830000000000005', '100 ---> 55.33']"
# Results.append('No.'+str(i)+' DataSet='+name+' Type= BetaNormalized '+' Result=' +betaNor)
try:
betaNor = test_retrieval.testbetanormalizednot(opt, trig, dataset,BetaNormalize)
print(name,' BetaNormalized: ',betaNor)
Results.append('No.'+str(i)+' DataSet='+name+' Type= BetaNormalized '+' Result=' +betaNor)
except:
print('ERROR')
try:
asbook = test_retrieval.test(opt, trig, dataset)
print(name,' As PaPer: ',asbook)
Results.append('No.'+str(i)+' DataSet='+name+' Type= As PaPer '+' Result=' +betaNor)
except:
print('ERROR')
with open(Path1+r"/"+'Results15time.txt', 'wb') as fp:
pickle.dump(Results, fp)
def distanceBetaand():
with open (Path1+"/Beta.txt", 'rb') as fp:
Beta = pickle.load(fp)
trainset = datasets.Fashion200k(
path=Path1,
split='train',
transform=torchvision.transforms.Compose([
torchvision.transforms.Resize(224),
torchvision.transforms.CenterCrop(224),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
]))
test = datasets.Fashion200k(
path=Path1,
split='test',
transform=torchvision.transforms.Compose([
torchvision.transforms.Resize(224),
torchvision.transforms.CenterCrop(224),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
]))
trig= img_text_composition_models.TIRG([t.encode().decode('utf-8') for t in trainset.get_all_texts()],512)
trig.load_state_dict(torch.load(Path1+r'\fashion200k.tirg.iter160k.pth' , map_location=torch.device('cpu') )['model_state_dict'])
trig.eval()
imgs = []
mods = []
target = []
batchsize=2
Distance=[]
sourceid=[]
targetid=[]
countbeta=0
counttrig=0
for Data in tqdm(trainset):
imgs += [Data['source_img_data']]
mods += [Data['mod']['str']]
target +=[Data['target_img_data']]
sourceid.append(Data['source_img_id'])
targetid.append(Data['target_img_id'])
imgs = torch.stack(imgs).float()
imgs = torch.autograd.Variable(imgs)
f = trig.compose_img_text(imgs, mods).data.cpu().numpy()
target = torch.stack(target).float()
target = torch.autograd.Variable(target)
f2 = trig.extract_img_feature(target).data.cpu().numpy()
trigdata=f[0]
trigbeta = np.insert(trigdata,0, 1)
trigbeta=np.matmul(trigbeta,Beta)
Targetdata = f2[0]
SourceTarget=euclideandistance(trigdata,Targetdata)
betaTarget=euclideandistance(trigbeta,Targetdata)
if(SourceTarget > betaTarget):
countbeta= countbeta+1
else:
counttrig=counttrig+1
# opsig={'source':sourceid[0],'target':targetid[0],'disbeta':betaTarget,'disorig':SourceTarget}
# Distance.append(opsig )
imgs = []
mods = []
target = []
sourceid=[]
targetid=[]
with open(Path1+r"/"+'Distance.txt', 'wb') as fp:
pickle.dump(Distance, fp)
print('Train Data :Count beta less:',countbeta , ' ,countbeta bigger:',counttrig)
imgs = []
mods = []
target = []
batchsize=2
Distance=[]
sourceid=[]
targetid=[]
countbeta=0
counttrig=0
for Data in tqdm(test.get_test_queries()):
imgs += [test.get_img(Data['source_img_id'])]
mods += [Data['mod']['str']]
target +=[test.get_img(Data['target_id'])]
imgs = torch.stack(imgs).float()
imgs = torch.autograd.Variable(imgs)
f = trig.compose_img_text(imgs, mods).data.cpu().numpy()
target = torch.stack(target).float()
target = torch.autograd.Variable(target)
f2 = trig.extract_img_feature(target).data.cpu().numpy()
trigdata=f[0]
trigbeta = np.insert(trigdata,0, 1)
trigbeta=np.matmul(trigbeta,Beta)
Targetdata = f2[0]
SourceTarget=euclideandistance(trigdata,Targetdata)
betaTarget=euclideandistance(trigbeta,Targetdata)
if(SourceTarget > betaTarget):
countbeta= countbeta+1
else:
counttrig=counttrig+1
imgs = []
mods = []
target = []
sourceid=[]
targetid=[]
print('Test Data :Count beta less:',countbeta , ' ,countbeta bigger:',counttrig)
################# Beta From Test Set Section #################
def getbeta():
train = datasets.Fashion200k(
path=Path1,
split='train',
transform=torchvision.transforms.Compose([
torchvision.transforms.Resize(224),
torchvision.transforms.CenterCrop(224),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
]))
test = datasets.Fashion200k(
path=Path1,
split='test',
transform=torchvision.transforms.Compose([
torchvision.transforms.Resize(224),
torchvision.transforms.CenterCrop(224),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
]))
trig= img_text_composition_models.TIRG([t.encode().decode('utf-8') for t in train.get_all_texts()],512)
trig.load_state_dict(torch.load(Path1+r'\fashion200k.tirg.iter160k.pth' , map_location=torch.device('cpu') )['model_state_dict'])
trig.eval()
imgs = []
mods = []
trigdata=[]
target=[]
imgdata=[]
all_source_captions=[]
all_target_captions=[]
for Data in tqdm(test.get_test_queries()):
imgs += [test.get_img(Data['source_img_id'])]
mods += [Data['mod']['str']]
target +=[test.get_img(Data['target_id'])]
all_source_captions +=Data['source_caption']
all_target_captions +=Data['target_caption']
imgs = torch.stack(imgs).float()
imgs = torch.autograd.Variable(imgs)
f = trig.compose_img_text(imgs, mods).data.cpu().numpy()
target = torch.stack(target).float()
target = torch.autograd.Variable(target)
f2 = trig.extract_img_feature(target).data.cpu().numpy()
trigdata.append(f[0])
imgdata.append(f2[0])
imgs = []
mods = []
target = []
with open(Path1+r"/"+'test_all_source_captionsG.pkl', 'wb') as fp:
pickle.dump(all_source_captions, fp)
with open(Path1+r"/"+'test_all_target_captionsG.pkl', 'wb') as fp:
pickle.dump(all_target_captions, fp)
trigdata=np.array(trigdata)
imgdata=np.array(imgdata)
with open(Path1+r"/"+'test_all_queriesG.pkl', 'wb') as fp:
pickle.dump(trigdata, fp)
with open(Path1+r"/"+'test_all_imgsG.pkl', 'wb') as fp:
pickle.dump(imgdata, fp)
Ntrigdata=trigdata
Nimgdata=imgdata
Ntrig2=[]
trigdata2=[]
for i in range(Ntrigdata.shape[0]):
Ntrigdata[i, :] /= np.linalg.norm(Ntrigdata[i, :])
for i in range(Nimgdata.shape[0]):
Nimgdata[i, :] /= np.linalg.norm(Nimgdata[i, :])
for i in range(Ntrigdata.shape[0]):
Ntrig2.append(np.insert(Ntrigdata[i],0, 1))
Ntrig2=np.array(Ntrig2)
Ntrigdata1=Ntrig2.transpose()
X1=np.matmul(Ntrigdata1,Ntrig2)
X2=np.linalg.inv(X1)
X3=np.matmul(X2,Ntrigdata1)
Nbeta=np.matmul(X3,Nimgdata)
with open(Path1+r"/"+'testBetaNormalizedG.txt', 'wb') as fp:
pickle.dump(Nbeta, fp)
def GetValues():
with open (Path1+"/testBetaNormalized.txt", 'rb') as fp:
Nbeta = pickle.load(fp)
train = datasets.Fashion200k(
path=Path1,
split='train',
transform=torchvision.transforms.Compose([
torchvision.transforms.Resize(224),
torchvision.transforms.CenterCrop(224),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
]))
test = datasets.Fashion200k(
path=Path1,
split='test',
transform=torchvision.transforms.Compose([
torchvision.transforms.Resize(224),
torchvision.transforms.CenterCrop(224),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
]))
trig= img_text_composition_models.TIRG([t.encode().decode('utf-8') for t in train.get_all_texts()],512)
trig.load_state_dict(torch.load(Path1+r'\fashion200k.tirg.iter160k.pth' , map_location=torch.device('cpu') )['model_state_dict'])
opt = argparse.ArgumentParser()
opt.add_argument('--batch_size', type=int, default=2)
opt.add_argument('--dataset', type=str, default='fashion200k')
opt.batch_size =1
opt.dataset='fashion200k'
for name, dataset in [ ('train', train),('test', test)]: #('train', trainset),
betaNor = test_retrieval.testWbeta(opt, trig, dataset,Nbeta)
print(name,' BetaNormalized: ',betaNor)
asbook = test_retrieval.test(opt, trig, dataset)
print(name,' As PaPer: ',asbook)
################# Beta From Train Set Section #################
def getbetatrain():
train = datasets.Fashion200k(
path=Path1,
split='train',
transform=torchvision.transforms.Compose([
torchvision.transforms.Resize(224),
torchvision.transforms.CenterCrop(224),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
]))
trig= img_text_composition_models.TIRG([t.encode().decode('utf-8') for t in train.get_all_texts()],512)
trig.load_state_dict(torch.load(Path1+r'\fashion200k.tirg.iter160k.pth' , map_location=torch.device('cpu') )['model_state_dict'])
trig.eval()
imgs = []
mods = []
trigdata=[]
target=[]
imgdata=[]
#m = nn.ReLU()
for i in range(172048): #172048
print('get images=',i,end='\r')
item = train[i]
imgs += [item['source_img_data']]
mods += [item['mod']['str']]
target += [item['target_img_data']]
imgs = torch.stack(imgs).float()
imgs = torch.autograd.Variable(imgs)
f = trig.compose_img_text(imgs, mods).data.cpu().numpy()
target = torch.stack(target).float()
target = torch.autograd.Variable(target)
f2 = trig.extract_img_feature(target).data.cpu().numpy()
trigdata.append(f[0])
imgdata.append(f2[0])
imgs = []
mods = []
target = []
trigdata=np.array(trigdata)
imgdata=np.array(imgdata)
Ntrig2=[]
for i in range(trigdata.shape[0]):
trigdata[i, :] /= np.linalg.norm(trigdata[i, :])
for i in range(imgdata.shape[0]):
imgdata[i, :] /= np.linalg.norm(imgdata[i, :])
for i in range(trigdata.shape[0]):
Ntrig2.append(np.insert(trigdata[i],0, 1))
print("Ntrig2 shape %d first elemnt %d",Ntrig2[0] )
Ntrig2=np.array(Ntrig2)
Ntrigdata1=Ntrig2.transpose()
X1=np.matmul(Ntrigdata1,Ntrig2)
X2=np.linalg.inv(X1)
X3=np.matmul(X2,Ntrigdata1)
Nbeta=np.matmul(X3,imgdata)
with open(Path1+r"/"+'Betatrain.txt', 'wb') as fp:
pickle.dump(Nbeta, fp)
def GetValuestrain():
with open (Path1+"\\Betatrain.txt", 'rb') as fp:
BetaNormalize = pickle.load(fp)
trainset = datasets.Fashion200k(
path=Path1,
split='train',
transform=torchvision.transforms.Compose([
torchvision.transforms.Resize(224),
torchvision.transforms.CenterCrop(224),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
]))
testset = datasets.Fashion200k(
path=Path1,
split='test',
transform=torchvision.transforms.Compose([
torchvision.transforms.Resize(224),
torchvision.transforms.CenterCrop(224),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
]))
trig= img_text_composition_models.TIRG([t.encode().decode('utf-8') for t in trainset.get_all_texts()],512)
trig.load_state_dict(torch.load(Path1+r'\fashion200k.tirg.iter160k.pth' , map_location=torch.device('cpu') )['model_state_dict'])
opt = argparse.ArgumentParser()
opt.add_argument('--batch_size', type=int, default=2)
opt.add_argument('--dataset', type=str, default='fashion200k')
opt.batch_size =1
opt.dataset='fashion200k'
for name, dataset in [ ('train', trainset),('test', testset)]: #('train', trainset),
betaNor = test_retrieval.testWbeta(opt, trig, dataset,BetaNormalize)
print(name,' BetaNormalized: ',betaNor)
# asbook = test_retrieval.test(opt, trig, dataset)
# print(name,' As PaPer: ',asbook)
################# Get Average Beta #################
def GetAverageBeta():
with open (Path1+"/Beta.txt", 'rb') as fp:
BetaTrain = pickle.load(fp)
with open (Path1+"/testBetaNormalized.txt", 'rb') as fp:
BetaTest = pickle.load(fp)
BetaAvg1= np.add(BetaTrain, BetaTest)
BetaAvg2=BetaAvg1/2
trainset = datasets.Fashion200k(
path=Path1,
split='train',
transform=torchvision.transforms.Compose([
torchvision.transforms.Resize(224),
torchvision.transforms.CenterCrop(224),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
]))
testset = datasets.Fashion200k(
path=Path1,
split='test',
transform=torchvision.transforms.Compose([
torchvision.transforms.Resize(224),
torchvision.transforms.CenterCrop(224),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
]))
trig= img_text_composition_models.TIRG([t.encode().decode('utf-8') for t in trainset.get_all_texts()],512)
trig.load_state_dict(torch.load(Path1+r'\fashion200k.tirg.iter160k.pth' , map_location=torch.device('cpu') )['model_state_dict'])
opt = argparse.ArgumentParser()
opt.add_argument('--batch_size', type=int, default=2)
opt.add_argument('--dataset', type=str, default='fashion200k')
opt.batch_size =1
opt.dataset='fashion200k'
for name, dataset in [ ('train', trainset),('test', testset)]:
betaNor = test_retrieval.testWbeta(opt, trig, dataset,BetaAvg2)
print(name,' Beta Avg: ',betaNor)
asbook = test_retrieval.test(opt, trig, dataset)
print(name,' As PaPer: ',asbook)
################# Beta From Train & Test Set Section #################
def getbetaall():
test = datasets.Fashion200k(
path=Path1,
split='test',
transform=torchvision.transforms.Compose([
torchvision.transforms.Resize(224),
torchvision.transforms.CenterCrop(224),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
]))
train = datasets.Fashion200k(
path=Path1,
split='train',
transform=torchvision.transforms.Compose([
torchvision.transforms.Resize(224),
torchvision.transforms.CenterCrop(224),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
]))
trig= img_text_composition_models.TIRG([t.encode().decode('utf-8') for t in train.get_all_texts()],512)
trig.load_state_dict(torch.load(Path1+r'\fashion200k.tirg.iter160k.pth' , map_location=torch.device('cpu') )['model_state_dict'])
trig.eval()
imgs = []
mods = []
trigdata=[]
target=[]
imgdata=[]
for Data in tqdm(train):
imgs += [train.get_img(Data['source_img_id'])]
mods += [Data['mod']['str']]
target +=[train.get_img(Data['target_img_id'])]
imgs = torch.stack(imgs).float()
imgs = torch.autograd.Variable(imgs)
f = trig.compose_img_text(imgs, mods).data.cpu().numpy()
target = torch.stack(target).float()
target = torch.autograd.Variable(target)
f2 = trig.extract_img_feature(target).data.cpu().numpy()
trigdata.append(f[0])
imgdata.append(f2[0])
imgs = []
mods = []
target = []
for Data in tqdm(test.get_test_queries()):
imgs += [test.get_img(Data['source_img_id'])]
mods += [Data['mod']['str']]
target +=[test.get_img(Data['target_id'])]
imgs = torch.stack(imgs).float()
imgs = torch.autograd.Variable(imgs)
f = trig.compose_img_text(imgs, mods).data.cpu().numpy()
target = torch.stack(target).float()
target = torch.autograd.Variable(target)
f2 = trig.extract_img_feature(target).data.cpu().numpy()
trigdata.append(f[0])
imgdata.append(f2[0])
imgs = []
mods = []
target = []
trigdata=np.array(trigdata)
imgdata=np.array(imgdata)
Ntrigdata=trigdata
Nimgdata=imgdata
Ntrig2=[]
for i in range(Ntrigdata.shape[0]):
Ntrigdata[i, :] /= np.linalg.norm(Ntrigdata[i, :])
for i in range(Nimgdata.shape[0]):
Nimgdata[i, :] /= np.linalg.norm(Nimgdata[i, :])
for i in range(Ntrigdata.shape[0]):
Ntrig2.append(np.insert(Ntrigdata[i],0, 1))
Ntrig2=np.array(Ntrig2)
Ntrigdata1=Ntrig2.transpose()
X1=np.matmul(Ntrigdata1,Ntrig2)
X2=np.linalg.inv(X1)
X3=np.matmul(X2,Ntrigdata1)
Nbeta=np.matmul(X3,Nimgdata)
with open(Path1+r"/"+'Betaall.txt', 'wb') as fp:
pickle.dump(Nbeta, fp)
def GetValuesall():
with open (Path1+"/Betaall.txt", 'rb') as fp:
BetaNormalize = pickle.load(fp)
trainset = datasets.Fashion200k(
path=Path1,
split='train',
transform=torchvision.transforms.Compose([
torchvision.transforms.Resize(224),
torchvision.transforms.CenterCrop(224),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
]))
testset = datasets.Fashion200k(
path=Path1,
split='test',
transform=torchvision.transforms.Compose([
torchvision.transforms.Resize(224),
torchvision.transforms.CenterCrop(224),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
]))
trig= img_text_composition_models.TIRG([t.encode().decode('utf-8') for t in trainset.get_all_texts()],512)
trig.load_state_dict(torch.load(Path1+r'\fashion200k.tirg.iter160k.pth' , map_location=torch.device('cpu') )['model_state_dict'])
opt = argparse.ArgumentParser()
opt.add_argument('--batch_size', type=int, default=2)
opt.add_argument('--dataset', type=str, default='fashion200k')
opt.batch_size =1
opt.dataset='fashion200k'
for name, dataset in [ ('train', trainset)]: #('train', trainset), ,('test', testset)
betaNor = test_retrieval.testWbeta(opt, trig, dataset,BetaNormalize)
print(name,' BetaNormalized: ',betaNor)
# asbook = test_retrieval.test(opt, trig, dataset)
# print(name,' As PaPer: ',asbook)
def getvaluespdf():
train = datasets.Fashion200k(
path=Path1,
split='train',
transform=torchvision.transforms.Compose([
torchvision.transforms.Resize(224),
torchvision.transforms.CenterCrop(224),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
]))
trig= img_text_composition_models.TIRG([t.encode().decode('utf-8') for t in train.get_all_texts()],512)
trig.load_state_dict(torch.load(Path1+r'\fashion200k.tirg.iter160k.pth' , map_location=torch.device('cpu') )['model_state_dict'])
trig.eval()
imgs = []
mods = []
trigdata=[]
target=[]
imgdata=[]
#m = nn.ReLU()
for i in range(172048): #172048
print('get images=',i,end='\r')
item = train[i]
imgs += [item['source_img_data']]
mods += [item['mod']['str']]
target += [item['target_img_data']]
imgs = torch.stack(imgs).float()
imgs = torch.autograd.Variable(imgs)
f = trig.compose_img_text(imgs, mods).data.cpu().numpy()
target = torch.stack(target).float()
target = torch.autograd.Variable(target)
f2 = trig.extract_img_feature(target).data.cpu().numpy()
trigdata.append(f[0])
imgdata.append(f2[0])
imgs = []
mods = []
target=[]
for i in range(trigdata.shape[0]):
trigdata[i, :] /= np.linalg.norm(trigdata[i, :])
for i in range(imgdata.shape[0]):
imgdata[i, :] /= np.linalg.norm(imgdata[i, :])
print(trigdata)
print(imgdata)
with open(Path1+r"/"+'traindata.txt', 'wb') as fp:
pickle.dump(trigdata, fp)
with open(Path1+r"/"+'imgdata.txt', 'wb') as fp:
pickle.dump(imgdata, fp)
class NLR(nn.Module):
def __init__(self,insize,outsize,hidden):
super().__init__()
self.nlmodel= torch.nn.Sequential(torch.nn.Linear(insize, hidden),torch.nn.Sigmoid(),torch.nn.Linear(hidden, outsize))
def myforward (self,x11):
p=self.nlmodel(x11)
return p
def getNLP():
train = datasets.Fashion200k(
path=Path1,
split='train',
transform=torchvision.transforms.Compose([
torchvision.transforms.Resize(224),
torchvision.transforms.CenterCrop(224),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
]))
trig= img_text_composition_models.TIRG([t.encode().decode('utf-8') for t in train.get_all_texts()],512)
trig.load_state_dict(torch.load(Path1+r'\fashion200k.tirg.iter160k.pth' , map_location=torch.device('cpu') )['model_state_dict'])
trig.eval()
imgs = []
mods = []
trigdata=[]
target=[]
imgdata=[]
dtsz, indm, hddm, oudm = 172048, 513, 700, 512
loss_fn = torch.nn.MSELoss(reduction='sum')
torch.manual_seed(3)
model=NLR(indm,oudm,hddm)
#model=model.cuda()
torch.manual_seed(3)
criterion=nn.MSELoss()
optimizer=torch.optim.SGD(model.parameters(), lr=0.001)
epoch=50
losses=[]
for j in range(epoch):
for l in range(dtsz): #172048
print('Epoch:',j,' get images=',l,end='\r')
item = train[l]
imgs += [item['source_img_data']]
mods += [item['mod']['str']]
target += [item['target_img_data']]
imgs = torch.stack(imgs).float()
imgs = torch.autograd.Variable(imgs)#.cuda()
target = torch.stack(target).float()
target = torch.autograd.Variable(target)#.cuda()
f = trig.compose_img_text(imgs, mods).data.cpu().numpy()
f2 = trig.extract_img_feature(target).data.cpu().numpy()
for i in range(f.shape[0]):
f[i, :] /= np.linalg.norm(f[i, :])
for i in range(f2.shape[0]):
f2[i, :] /= np.linalg.norm(f2[i, :])
for i in range(f.shape[0]):
trigdata =np.insert(f[i],0, 1)
trigdata=torch.from_numpy(trigdata)
f2=torch.from_numpy(f2)
yp=model.myforward(trigdata)
loss=criterion(yp,f2)
if(l%20000 == 0):
print("epoch ",j, "loss ", loss.item())
losses.append(loss)
optimizer.zero_grad()
loss.backward()
optimizer.step()
imgs = []
mods = []
trigdata=[]
target=[]
imgdata=[]
print('Finished Training')
torch.save(model.state_dict(), Path1+r'\NLP2.pth')
def resultsNLP():
dtsz, indm, hddm, oudm = 172048, 513, 700, 512
model=NLR(indm,oudm,hddm)
model.load_state_dict(torch.load(Path1+r'\NLP.pth' , map_location=torch.device('cpu') ))
model.eval()
trainset = datasets.Fashion200k(
path=Path1,
split='train',
transform=torchvision.transforms.Compose([
torchvision.transforms.Resize(224),
torchvision.transforms.CenterCrop(224),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
]))
testset = datasets.Fashion200k(
path=Path1,
split='test',
transform=torchvision.transforms.Compose([
torchvision.transforms.Resize(224),
torchvision.transforms.CenterCrop(224),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
]))
trig= img_text_composition_models.TIRG([t.encode().decode('utf-8') for t in trainset.get_all_texts()],512)
trig.load_state_dict(torch.load(Path1+r'\fashion200k.tirg.iter160k.pth' , map_location=torch.device('cpu') )['model_state_dict'])
opt = argparse.ArgumentParser()
opt.add_argument('--batch_size', type=int, default=2)
opt.add_argument('--dataset', type=str, default='fashion200k')
opt.batch_size =1
opt.dataset='fashion200k'
for name, dataset in [ ('train', trainset),('test', testset)]: #('train', trainset),
NLP = test_retrieval.testNLP(opt, trig, dataset,model)
print(name,' NLP: ',NLP)
asbook = test_retrieval.test(opt, trig, dataset)
print(name,' As PaPer: ',asbook)
def savevaluestofile():
train = datasets.Fashion200k(
path=Path1,
split='train',
transform=torchvision.transforms.Compose([
torchvision.transforms.Resize(224),
torchvision.transforms.CenterCrop(224),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
]))
trig= img_text_composition_models.TIRG([t.encode().decode('utf-8') for t in train.get_all_texts()],512)
trig.load_state_dict(torch.load(Path1+r'\fashion200k.tirg.iter160k.pth' , map_location=torch.device('cpu') )['model_state_dict'])
trig.eval()
imgs = []
mods = []
trigdata=[]
target=[]
imgdata=[]
alldata=[]
#m = nn.ReLU()
for i in range(172048): #172048
print('get images=',i,end='\r')
item = train[i]
imgs += [item['source_img_data']]
mods += [item['mod']['str']]
target += [item['target_img_data']]
imgs = torch.stack(imgs).float()
imgs = torch.autograd.Variable(imgs)
f = trig.compose_img_text(imgs, mods).data.cpu().numpy()
target = torch.stack(target).float()
target = torch.autograd.Variable(target)
f2 = trig.extract_img_feature(target).data.cpu().numpy()
# trigdata.append(f[0])
# imgdata.append(f2[0])
opsig={
'SourceTrig':f[0],
'TargetData':f2[0],
'IDX':i
}
alldata.append(opsig)
imgs = []
mods = []
trigdata=[]
target=[]
imgdata=[]
with open(Path1+r"/"+'TrigImgData172.txt', 'wb') as fp:
pickle.dump(alldata, fp)
def Savevaluestest():
train = datasets.Fashion200k(
path=Path1,
split='train',
transform=torchvision.transforms.Compose([
torchvision.transforms.Resize(224),
torchvision.transforms.CenterCrop(224),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
]))
test = datasets.Fashion200k(
path=Path1,
split='test',
transform=torchvision.transforms.Compose([
torchvision.transforms.Resize(224),
torchvision.transforms.CenterCrop(224),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
]))
trig= img_text_composition_models.TIRG([t.encode().decode('utf-8') for t in train.get_all_texts()],512)
trig.load_state_dict(torch.load(Path1+r'\fashion200k.tirg.iter160k.pth' , map_location=torch.device('cpu') )['model_state_dict'])
trig.eval()
imgs = []
mods = []
trigdata=[]
target=[]
imgdata=[]
alldata=[]
for Data in tqdm(test.get_test_queries()):
imgs += [test.get_img(Data['source_img_id'])]
mods += [Data['mod']['str']]
target +=[test.get_img(Data['target_id'])]
imgs = torch.stack(imgs).float()
imgs = torch.autograd.Variable(imgs)
f = trig.compose_img_text(imgs, mods).data.cpu().numpy()
target = torch.stack(target).float()
target = torch.autograd.Variable(target)
f2 = trig.extract_img_feature(target).data.cpu().numpy()
opsig={
'SourceTrig':f[0],
'TargetData':f2[0],
'IDX':Data['source_img_id']
}
alldata.append(opsig)
all_captions = [img['captions'][0] for img in test.imgs]
imgs = []
mods = []
trigdata=[]
target=[]
imgdata=[]
with open(Path1+r"/"+'allcaptions.txt', 'wb') as fp:
pickle.dump(all_captions, fp)
with open(Path1+r"/"+'TrigImgDatatestset.txt', 'wb') as fp:
pickle.dump(alldata, fp)
def trainsaveddataresultsa():
with open (Path1+"\\TrigImgData172.txt", 'rb') as fp:
Datasaved172 = pickle.load(fp)
with open (Path1+"\\TrigImgDatatestset.txt", 'rb') as fp:
Datasavedtest = pickle.load(fp)
with open (Path1+"\\Betatrain.txt", 'rb') as fp:
BetaNormalize = pickle.load(fp)
#betaNor = test_retrieval.testWbetaWsaveddataa(BetaNormalize,Datasaved172)
#print('trained',' BetaNormalized: ',betaNor)
betaNor = test_retrieval.testWbetaWsaveddataa(BetaNormalize,Datasavedtest)
print('test',' BetaNormalized: ',betaNor)
def trainsaveddataresults():
with open (Path1+"\\TrigImgData172.txt", 'rb') as fp:
Datasaved172 = pickle.load(fp)
with open (Path1+"\\TrigImgDatatestset.txt", 'rb') as fp:
Datasavedtest = pickle.load(fp)
with open (Path1+"\\Betatrain.txt", 'rb') as fp:
BetaNormalize = pickle.load(fp)
trainset = datasets.Fashion200k(
path=Path1,
split='train',
transform=torchvision.transforms.Compose([
torchvision.transforms.Resize(224),
torchvision.transforms.CenterCrop(224),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
]))
testset = datasets.Fashion200k(
path=Path1,
split='test',
transform=torchvision.transforms.Compose([
torchvision.transforms.Resize(224),
torchvision.transforms.CenterCrop(224),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
]))
trig= img_text_composition_models.TIRG([t.encode().decode('utf-8') for t in trainset.get_all_texts()],512)
trig.load_state_dict(torch.load(Path1+r'\fashion200k.tirg.iter160k.pth' , map_location=torch.device('cpu') )['model_state_dict'])
opt = argparse.ArgumentParser()
opt.add_argument('--batch_size', type=int, default=2)
opt.add_argument('--dataset', type=str, default='fashion200k')
opt.batch_size =1
opt.dataset='fashion200k'
for name, dataset in [ ('train', trainset),('test', testset)]: #('train', trainset),
betaNor = test_retrieval.testWbetaWsaveddata(opt, trig, dataset,BetaNormalize,Datasaved172,Datasavedtest)
print(name,' BetaNormalized: ',betaNor)
def Save_GetValues():
train = datasets.Fashion200k(
path=Path1,
split='train',
transform=torchvision.transforms.Compose([
torchvision.transforms.Resize(224),
torchvision.transforms.CenterCrop(224),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
]))
test = datasets.Fashion200k(
path=Path1,
split='test',
transform=torchvision.transforms.Compose([
torchvision.transforms.Resize(224),
torchvision.transforms.CenterCrop(224),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
]))
trig= img_text_composition_models.TIRG([t.encode().decode('utf-8') for t in train.get_all_texts()],512)
trig.load_state_dict(torch.load(Path1+r'\fashion200k.tirg.iter160k.pth' , map_location=torch.device('cpu') )['model_state_dict'])
opt = argparse.ArgumentParser()
opt.add_argument('--batch_size', type=int, default=2)
opt.add_argument('--dataset', type=str, default='fashion200k')
opt.batch_size =1
opt.dataset='fashion200k'
#for name, dataset in [ ('test', test),('train', train)]: #('train', trainset),
for name, dataset in [ ('test', test)]: #('train', trainset),
asbook = test_retrieval.test_and_save(opt, trig, dataset)
print(name,' As PaPer: ',asbook)
def print_results(sourceFile,out,test_train,normal_beta,create_load,filename,normal_normalize, set_size_divider, dot_eucld):
print(' Experiment setup : ', file = sourceFile)
if (test_train==1):
print('Dataset:Training Data set', file = sourceFile)
else:
print('Dataset:Testing Data set', file = sourceFile)
if (normal_beta==0):
print(' Trig', file = sourceFile)
else:
print(' Trig followed by Regression network', file = sourceFile)
if (normal_beta==1):
if (create_load==0):
print(' Regression Network Created, save to file', file = sourceFile)
else:
print(' Regression Network Loaded from file ', file = sourceFile)
print(' = ',filename, file = sourceFile)
if (normal_normalize==0):
print(' Regression done without normalization ', file = sourceFile)
else:
print(' Regression done on normalized vectors ', file = sourceFile)
else:
print(' ', file=sourceFile)
if (dot_eucld==0):
print(' Distance: Cos Angle between vectors ', file = sourceFile)
else:
print(' Distance: Eucledian ', file = sourceFile)
print(' Dataset size Divider ', set_size_divider, file = sourceFile)
print(' Experiment Outcome: - ','\n',out,'\n', file = sourceFile)
def results():
sourceFile = open(Path1+r"/"+'results'+time.strftime("%Y%m%d-%H%M%S")+'.txt', 'w')
test_train=0
normal_beta=0
set_size_divider=1
normal_normalize=0
create_load=0
filename='na'
dot_eucld=0
# 1
print(' 1', file=sourceFile)
out =test_retrieval.test_on_saved(test_train,normal_beta,create_load,filename,normal_normalize, set_size_divider, dot_eucld)
print_results(sourceFile,out,test_train,normal_beta,create_load,filename,normal_normalize, set_size_divider, dot_eucld)
test_train=1
set_size_divider=17.2
# 2
print(' 2', file=sourceFile)
out =test_retrieval.test_on_saved(test_train,normal_beta,create_load,filename,normal_normalize, set_size_divider, dot_eucld)
print_results(sourceFile,out,test_train,normal_beta,create_load,filename,normal_normalize, set_size_divider, dot_eucld)
test_train=1
normal_beta=1
create_load=0
filename='REGTR10ND.BTA'
# 3
print(' 3', file=sourceFile)
out =test_retrieval.test_on_saved(test_train,normal_beta,create_load,filename,normal_normalize, set_size_divider, dot_eucld)
print_results(sourceFile,out,test_train,normal_beta,create_load,filename,normal_normalize, set_size_divider, dot_eucld)
test_train=0
set_size_divider=1
normal_beta=1
create_load=1
filename='REGTR10ND.BTA'
# 4
print(' 4', file=sourceFile)
out =test_retrieval.test_on_saved(test_train,normal_beta,create_load,filename,normal_normalize, set_size_divider, dot_eucld)
print_results(sourceFile,out,test_train,normal_beta,create_load,filename,normal_normalize, set_size_divider, dot_eucld)
test_train=0
set_size_divider=1
normal_beta=1
create_load=0
filename='REGTS33ND.BTA'
# 5
print(' 5', file=sourceFile)
out =test_retrieval.test_on_saved(test_train,normal_beta,create_load,filename,normal_normalize, set_size_divider, dot_eucld)
print_results(sourceFile,out,test_train,normal_beta,create_load,filename,normal_normalize, set_size_divider, dot_eucld)
test_train=1
set_size_divider=1
normal_beta=0
create_load=0
filename='na'
# 6
print(' 6', file=sourceFile)
out =test_retrieval.test_on_saved(test_train,normal_beta,create_load,filename,normal_normalize, set_size_divider, dot_eucld)
print_results(sourceFile,out,test_train,normal_beta,create_load,filename,normal_normalize, set_size_divider, dot_eucld)
test_train=1
set_size_divider=1
normal_beta=1
create_load=0
filename='REGTR172ND,BTA'
# 7
print(' 7', file=sourceFile)
out =test_retrieval.test_on_saved(test_train,normal_beta,create_load,filename,normal_normalize, set_size_divider, dot_eucld)
print_results(sourceFile,out,test_train,normal_beta,create_load,filename,normal_normalize, set_size_divider, dot_eucld)
test_train=0
set_size_divider=1
normal_beta=1
create_load=1
filename='REGTR172ND,BTA'
# 8
print(' 8', file=sourceFile)
out =test_retrieval.test_on_saved(test_train,normal_beta,create_load,filename,normal_normalize, set_size_divider, dot_eucld)
print_results(sourceFile,out,test_train,normal_beta,create_load,filename,normal_normalize, set_size_divider, dot_eucld)
###################NNORMALIZED BETA##############################################################
test_train=3
normal_beta=1
set_size_divider=17.2
normal_normalize=0
create_load=0
filename='REGTR10NND.BTA'
dot_eucld=0
test_train=1
# 3NN
print(' 3NN', file=sourceFile)
out =test_retrieval.test_on_saved(test_train,normal_beta,create_load,filename,normal_normalize, set_size_divider, dot_eucld)
print_results(sourceFile,out,test_train,normal_beta,create_load,filename,normal_normalize, set_size_divider, dot_eucld)
test_train=0
set_size_divider=1
normal_beta=1
create_load=1
filename='REGTR10NND.BTA'
# 4 NN
print(' 4NN', file=sourceFile)
out =test_retrieval.test_on_saved(test_train,normal_beta,create_load,filename,normal_normalize, set_size_divider, dot_eucld)
print_results(sourceFile,out,test_train,normal_beta,create_load,filename,normal_normalize, set_size_divider, dot_eucld)
test_train=1
set_size_divider=1
normal_beta=1
create_load=0
filename='REGTR172NND,BTA'
# 7 NN
print(' 7NN', file=sourceFile)
out =test_retrieval.test_on_saved(test_train,normal_beta,create_load,filename,normal_normalize, set_size_divider, dot_eucld)
print_results(sourceFile,out,test_train,normal_beta,create_load,filename,normal_normalize, set_size_divider, dot_eucld)
test_train=0
set_size_divider=1
normal_beta=1
create_load=1
filename='REGTR172NND,BTA'
# 8 NN
print(' 8NN', file=sourceFile)
out =test_retrieval.test_on_saved(test_train,normal_beta,create_load,filename,normal_normalize, set_size_divider, dot_eucld)
print_results(sourceFile,out,test_train,normal_beta,create_load,filename,normal_normalize, set_size_divider, dot_eucld)
###################eucledian##############################################################
test_train=0
normal_beta=0
set_size_divider=1
normal_normalize=0
create_load=0
filename='na'
dot_eucld=1
# 1 E
print(' 1 E', file=sourceFile)
out =test_retrieval.test_on_saved(test_train,normal_beta,create_load,filename,normal_normalize, set_size_divider, dot_eucld)
print_results(sourceFile,out,test_train,normal_beta,create_load,filename,normal_normalize, set_size_divider, dot_eucld)
test_train=1
set_size_divider=17.2
# 2 E
print(' 2 E', file=sourceFile)
out =test_retrieval.test_on_saved(test_train,normal_beta,create_load,filename,normal_normalize, set_size_divider, dot_eucld)
print_results(sourceFile,out,test_train,normal_beta,create_load,filename,normal_normalize, set_size_divider, dot_eucld)
test_train=1
normal_beta=1
create_load=0
filename='REGTR10NE.BTA'
# 3 E
print(' 3 E', file=sourceFile)
out =test_retrieval.test_on_saved(test_train,normal_beta,create_load,filename,normal_normalize, set_size_divider, dot_eucld)
print_results(sourceFile,out,test_train,normal_beta,create_load,filename,normal_normalize, set_size_divider, dot_eucld)
test_train=0
set_size_divider=1
normal_beta=1
create_load=1
filename='REGTR10NE.BTA'
# 4 E
print(' 4 E', file=sourceFile)
out =test_retrieval.test_on_saved(test_train,normal_beta,create_load,filename,normal_normalize, set_size_divider, dot_eucld)
print_results(sourceFile,out,test_train,normal_beta,create_load,filename,normal_normalize, set_size_divider, dot_eucld)
test_train=0
set_size_divider=1
normal_beta=1
create_load=0
filename='REGTS33NE.BTA'
# 5 E
print(' 5 E', file=sourceFile)
out =test_retrieval.test_on_saved(test_train,normal_beta,create_load,filename,normal_normalize, set_size_divider, dot_eucld)
print_results(sourceFile,out,test_train,normal_beta,create_load,filename,normal_normalize, set_size_divider, dot_eucld)
test_train=1
set_size_divider=1
normal_beta=0
create_load=0
filename='na'
# 6 E
print(' 6 E', file=sourceFile)
out =test_retrieval.test_on_saved(test_train,normal_beta,create_load,filename,normal_normalize, set_size_divider, dot_eucld)
print_results(sourceFile,out,test_train,normal_beta,create_load,filename,normal_normalize, set_size_divider, dot_eucld)
sourceFile.close()
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Evaluates the retrieval model."""
import numpy as np
import pickle
import torch
from tqdm import tqdm as tqdm
from scipy.spatial import distance
def test(opt, model, testset):
"""Tests a model over the given testset."""
model.eval()
test_queries = testset.get_test_queries()
all_imgs = []
all_captions = []
all_queries = []
all_target_captions = []
if test_queries:
# compute test query features
imgs = []
mods = []
for t in tqdm(test_queries):
imgs += [testset.get_img(t['source_img_id'])]
mods += [t['mod']['str']]
if len(imgs) >= opt.batch_size or t is test_queries[-1]:
if 'torch' not in str(type(imgs[0])):
imgs = [torch.from_numpy(d).float() for d in imgs]
imgs = torch.stack(imgs).float()
imgs = torch.autograd.Variable(imgs)#.cuda()
f = model.compose_img_text(imgs, mods).data.cpu().numpy()
all_queries += [f]
imgs = []
mods = []
all_queries = np.concatenate(all_queries)
all_target_captions = [t['target_caption'] for t in test_queries]
# compute all image features
imgs = []
for i in tqdm(range(len(testset.imgs))):
imgs += [testset.get_img(i)]
if len(imgs) >= opt.batch_size or i == len(testset.imgs) - 1:
if 'torch' not in str(type(imgs[0])):
imgs = [torch.from_numpy(d).float() for d in imgs]
imgs = torch.stack(imgs).float()
imgs = torch.autograd.Variable(imgs)#.cuda()
imgs = model.extract_img_feature(imgs).data.cpu().numpy()
all_imgs += [imgs]
imgs = []
all_imgs = np.concatenate(all_imgs)
all_captions = [img['captions'][0] for img in testset.imgs]
else:
# use training queries to approximate training retrieval performance
imgs0 = []
imgs = []
mods = []
for i in range(10000):
print('get images=',i,end='\r')
item = testset[i]
imgs += [item['source_img_data']]
mods += [item['mod']['str']]
if len(imgs) >= opt.batch_size or i == 9999:
imgs = torch.stack(imgs).float()
imgs = torch.autograd.Variable(imgs)
f = model.compose_img_text(imgs, mods).data.cpu().numpy() #.cuda()
all_queries += [f]
imgs = []
mods = []
imgs0 += [item['target_img_data']]
if len(imgs0) >= opt.batch_size or i == 9999:
imgs0 = torch.stack(imgs0).float()
imgs0 = torch.autograd.Variable(imgs0)
imgs0 = model.extract_img_feature(imgs0).data.cpu().numpy() #.cuda()
all_imgs += [imgs0]
imgs0 = []
all_captions += [item['target_caption']]
all_target_captions += [item['target_caption']]
all_imgs = np.concatenate(all_imgs)
all_queries = np.concatenate(all_queries)
# feature normalization
for i in range(all_queries.shape[0]):
all_queries[i, :] /= np.linalg.norm(all_queries[i, :])
for i in range(all_imgs.shape[0]):
all_imgs[i, :] /= np.linalg.norm(all_imgs[i, :])
# match test queries to target images, get nearest neighbors
nn_result = []
for i in tqdm(range(all_queries.shape[0])):
sims = all_queries[i:(i+1), :].dot(all_imgs.T)
if test_queries:
sims[0, test_queries[i]['source_img_id']] = -10e10 # remove query image
nn_result.append(np.argsort(-sims[0, :])[:110])
# compute recalls
out = []
nn_result = [[all_captions[nn] for nn in nns] for nns in nn_result]
for k in [1, 5, 10, 50, 100]:
r = 0.0
for i, nns in enumerate(nn_result):
if all_target_captions[i] in nns[:k]:
r += 1
r /= len(nn_result)
#out += [('recall_top' + str(k) + '_correct_composition', r)]
out.append(str(k) + ' ---> '+ str(r*100))
if opt.dataset == 'mitstates':
r = 0.0
for i, nns in enumerate(nn_result):
if all_target_captions[i].split()[0] in [c.split()[0] for c in nns[:k]]:
r += 1
r /= len(nn_result)
out += [('recall_top' + str(k) + '_correct_adj', r)]
r = 0.0
for i, nns in enumerate(nn_result):
if all_target_captions[i].split()[1] in [c.split()[1] for c in nns[:k]]:
r += 1
r /= len(nn_result)
out += [('recall_top' + str(k) + '_correct_noun', r)]
return out
def testWbeta(opt, model, testset,beta):
"""Tests a model over the given testset."""
model.eval()
test_queries = testset.get_test_queries()
all_imgs = []
all_captions = []
all_queries = []
all_target_captions = []
if test_queries:
# compute test query features
imgs = []
mods = []
for t in tqdm(test_queries):
imgs += [testset.get_img(t['source_img_id'])]
mods += [t['mod']['str']]
if len(imgs) >= opt.batch_size or t is test_queries[-1]:
if 'torch' not in str(type(imgs[0])):
imgs = [torch.from_numpy(d).float() for d in imgs]
imgs = torch.stack(imgs).float()
imgs = torch.autograd.Variable(imgs)
f = model.compose_img_text(imgs, mods).data.cpu().numpy()
for j in range(len(f)):
# for i in range(f.shape[0]):
# f[i, :] /= np.linalg.norm(f[i, :])
f[j, :] /= np.linalg.norm(f[j, :])
X1 = np.insert(f[j],0, 1)
X2=np.matmul(X1,beta)
f[j]=X2
all_queries += [f]
imgs = []
mods = []
all_queries = np.concatenate(all_queries)
all_target_captions = [t['target_caption'] for t in test_queries]
# compute all image features
imgs = []
for i in tqdm(range(len(testset.imgs))):
imgs += [testset.get_img(i)]
if len(imgs) >= opt.batch_size or i == len(testset.imgs) - 1:
if 'torch' not in str(type(imgs[0])):
imgs = [torch.from_numpy(d).float() for d in imgs]
imgs = torch.stack(imgs).float()
imgs = torch.autograd.Variable(imgs)
imgs = model.extract_img_feature(imgs).data.cpu().numpy()
all_imgs += [imgs]
imgs = []
all_imgs = np.concatenate(all_imgs)
all_captions = [img['captions'][0] for img in testset.imgs]
else:
# use training queries to approximate training retrieval performance
imgs0 = []
imgs = []
mods = []
for i in range(10000):
print('get images=',i,end='\r')
item = testset[i]
imgs += [item['source_img_data']]
mods += [item['mod']['str']]
if len(imgs) >= opt.batch_size or i == 9999:
imgs = torch.stack(imgs).float()
imgs = torch.autograd.Variable(imgs)
f = model.compose_img_text(imgs, mods).data.cpu().numpy()
for j in range(len(f)):
#for i in range(f.shape[0]):
#f[i, :] /= np.linalg.norm(f[i, :])
f[j, :] /= np.linalg.norm(f[j, :])
X1 = np.insert(f[j],0, 1)
X2=np.matmul(X1,beta)
f[j]=X2
all_queries += [f]
imgs = []
mods = []
imgs0 += [item['target_img_data']]
if len(imgs0) >= opt.batch_size or i == 9999:
imgs0 = torch.stack(imgs0).float()
imgs0 = torch.autograd.Variable(imgs0)
imgs0 = model.extract_img_feature(imgs0).data.cpu().numpy()
all_imgs += [imgs0]
imgs0 = []
all_captions += [item['target_caption']]
all_target_captions += [item['target_caption']]
all_imgs = np.concatenate(all_imgs)
all_queries = np.concatenate(all_queries)
# feature normalization
for i in range(all_queries.shape[0]):
all_queries[i, :] /= np.linalg.norm(all_queries[i, :])
for i in range(all_imgs.shape[0]):
all_imgs[i, :] /= np.linalg.norm(all_imgs[i, :])
# match test queries to target images, get nearest neighbors
nn_result = []
for i in tqdm(range(all_queries.shape[0])):
sims = all_queries[i:(i+1), :].dot(all_imgs.T)
if test_queries:
sims[0, test_queries[i]['source_img_id']] = -10e10 # remove query image
nn_result.append(np.argsort(-sims[0, :])[:110])
# compute recalls
out = []
nn_result = [[all_captions[nn] for nn in nns] for nns in nn_result]
for k in [1, 5, 10, 50, 100]:
r = 0.0
for i, nns in enumerate(nn_result):
if all_target_captions[i] in nns[:k]:
r += 1
r /= len(nn_result)
#out += [('recall_top' + str(k) + '_correct_composition', r)]
out.append(str(k) + ' ---> '+ str(r*100))
if opt.dataset == 'mitstates':
r = 0.0
for i, nns in enumerate(nn_result):
if all_target_captions[i].split()[0] in [c.split()[0] for c in nns[:k]]:
r += 1
r /= len(nn_result)
out += [('recall_top' + str(k) + '_correct_adj', r)]
r = 0.0
for i, nns in enumerate(nn_result):
if all_target_captions[i].split()[1] in [c.split()[1] for c in nns[:k]]:
r += 1
r /= len(nn_result)
out += [('recall_top' + str(k) + '_correct_noun', r)]
return out
def testNLP(opt, model, testset,model2):
"""Tests a model over the given testset."""
model.eval()
test_queries = testset.get_test_queries()
all_imgs = []
all_captions = []
all_queries = []
all_target_captions = []
if test_queries:
# compute test query features
imgs = []
mods = []
for t in tqdm(test_queries):
imgs += [testset.get_img(t['source_img_id'])]
mods += [t['mod']['str']]
if len(imgs) >= opt.batch_size or t is test_queries[-1]:
if 'torch' not in str(type(imgs[0])):
imgs = [torch.from_numpy(d).float() for d in imgs]
imgs = torch.stack(imgs).float()
imgs = torch.autograd.Variable(imgs)
f = model.compose_img_text(imgs, mods).data.cpu().numpy()
for i in range(f.shape[0]):
f[i, :] /= np.linalg.norm(f[i, :])
f =np.insert(f,0, 1)
f=np.expand_dims(f, axis=0)
f=torch.from_numpy(f)
f=model2.myforward(f).data.cpu().numpy()
all_queries += [f]
imgs = []
mods = []
all_queries = np.concatenate(all_queries)
all_target_captions = [t['target_caption'] for t in test_queries]
# compute all image features
imgs = []
for i in tqdm(range(len(testset.imgs))):
imgs += [testset.get_img(i)]
if len(imgs) >= opt.batch_size or i == len(testset.imgs) - 1:
if 'torch' not in str(type(imgs[0])):
imgs = [torch.from_numpy(d).float() for d in imgs]
imgs = torch.stack(imgs).float()
imgs = torch.autograd.Variable(imgs)
imgs = model.extract_img_feature(imgs).data.cpu().numpy()
all_imgs += [imgs]
imgs = []
all_imgs = np.concatenate(all_imgs)
all_captions = [img['captions'][0] for img in testset.imgs]
else:
# use training queries to approximate training retrieval performance
imgs0 = []
imgs = []
mods = []
for i in range(10000):
print('get images=',i,end='\r')
item = testset[i]
imgs += [item['source_img_data']]
mods += [item['mod']['str']]
if len(imgs) >= opt.batch_size or i == 9999:
imgs = torch.stack(imgs).float()
imgs = torch.autograd.Variable(imgs)
f = model.compose_img_text(imgs, mods).data.cpu().numpy()
for i in range(f.shape[0]):
f[i, :] /= np.linalg.norm(f[i, :])
f =np.insert(f,0, 1)
f=np.expand_dims(f, axis=0)
f=torch.from_numpy(f)
f=model2.myforward(f).data.cpu().numpy()
all_queries += [f]
imgs = []
mods = []
imgs0 += [item['target_img_data']]
if len(imgs0) >= opt.batch_size or i == 9999:
imgs0 = torch.stack(imgs0).float()
imgs0 = torch.autograd.Variable(imgs0)
imgs0 = model.extract_img_feature(imgs0).data.cpu().numpy()
all_imgs += [imgs0]
imgs0 = []
all_captions += [item['target_caption']]
all_target_captions += [item['target_caption']]
all_imgs = np.concatenate(all_imgs)
all_queries = np.concatenate(all_queries)
# feature normalization
# for i in range(all_queries.shape[0]):
# all_queries[i, :] /= np.linalg.norm(all_queries[i, :])
for i in range(all_imgs.shape[0]):
all_imgs[i, :] /= np.linalg.norm(all_imgs[i, :])
# match test queries to target images, get nearest neighbors
nn_result = []
for i in tqdm(range(all_queries.shape[0])):
sims = all_queries[i:(i+1), :].dot(all_imgs.T)
if test_queries:
sims[0, test_queries[i]['source_img_id']] = -10e10 # remove query image
nn_result.append(np.argsort(-sims[0, :])[:110])
# compute recalls
out = []
nn_result = [[all_captions[nn] for nn in nns] for nns in nn_result]
for k in [1, 5, 10, 50, 100]:
r = 0.0
for i, nns in enumerate(nn_result):
if all_target_captions[i] in nns[:k]:
r += 1
r /= len(nn_result)
#out += [('recall_top' + str(k) + '_correct_composition', r)]
out.append(str(k) + ' ---> '+ str(r*100))
if opt.dataset == 'mitstates':
r = 0.0
for i, nns in enumerate(nn_result):
if all_target_captions[i].split()[0] in [c.split()[0] for c in nns[:k]]:
r += 1
r /= len(nn_result)
out += [('recall_top' + str(k) + '_correct_adj', r)]
r = 0.0
for i, nns in enumerate(nn_result):
if all_target_captions[i].split()[1] in [c.split()[1] for c in nns[:k]]:
r += 1
r /= len(nn_result)
out += [('recall_top' + str(k) + '_correct_noun', r)]
return out
def testWbetaWsaveddata(opt, model, testset,beta,savedtrain,savedtest):
"""Tests a model over the given testset."""
model.eval()
test_queries = testset.get_test_queries()
all_imgs = []
all_captions = []
all_queries = []
all_target_captions = []
if test_queries:
# compute test query features
imgs = []
mods = []
for t in range(len(savedtest)):
print('get testdata=',t,end='\r')
f=savedtest[t]['SourceTrig']
f=np.expand_dims(f, axis=0)
for j in range(len(f)):
f[j, :] /= np.linalg.norm(f[j, :])
X1 = np.insert(f[j],0, 1)
X2=np.matmul(X1,beta)
f[j]=X2
all_queries += [f]
imgs = []
mods = []
all_queries = np.concatenate(all_queries)
all_target_captions = [t['target_caption'] for t in test_queries]
# compute all image features
imgs = []
for i in tqdm(range(len(testset.imgs))):
imgs += [testset.get_img(i)]
if len(imgs) >= opt.batch_size or i == len(testset.imgs) - 1:
if 'torch' not in str(type(imgs[0])):
imgs = [torch.from_numpy(d).float() for d in imgs]
imgs = torch.stack(imgs).float()
imgs = torch.autograd.Variable(imgs)
imgs = model.extract_img_feature(imgs).data.cpu().numpy()
all_imgs += [imgs]
imgs = []
all_imgs = np.concatenate(all_imgs)
all_captions = [img['captions'][0] for img in testset.imgs]
else:
# use training queries to approximate training retrieval performance
imgs0 = []
imgs = []
mods = []
for i in range(10000):
print('get images=',i,end='\r')
item = testset[i]
f=savedtrain[i]['SourceTrig']
f=np.expand_dims(f, axis=0)
for j in range(len(f)):
f[j, :] /= | np.linalg.norm(f[j, :]) | numpy.linalg.norm |
from __future__ import print_function
import matplotlib
matplotlib.use('tkAgg')
import matplotlib.pyplot as plt
from scipy.sparse import csr_matrix
from dolfin import *
import scipy
import numpy as np
import pyshtools
from deepsphere import utils
# Test for PETSc and SLEPc
if not has_linear_algebra_backend("PETSc"):
print("DOLFIN has not been configured with PETSc. Exiting.")
exit()
if not has_slepc():
print("DOLFIN has not been configured with SLEPc. Exiting.")
exit()
def to_array(f, bw):
"""From a 1-d vector to a 2D grid necessary to initiate a pyshtools.SHGrid object"""
height, width = 2*bw, 2*bw
array = np.zeros((height, width)) # shape=(longitude, latitude)
f = np.append([f[0]]*(2*bw-1), f) # correct! the first line is the North pole repeated 2bw times
# now we need to undo the meshgrid
assert f.size == array.size
for n, fx in enumerate(f):
j = n%width
i = n//width
array[i, j] = fx
return array
spectral_content = dict()
spectral_content_reordered = dict()
bws = [4, 8]
for bw in bws:
lmax = bw-1
N = np.cumsum(np.arange(1, 2*lmax+2, 2))[-1]
npix = 2*bw*(2*bw-1)+1
# Define mesh, function space
mesh = Mesh("meshes/equi_{}.xml".format(bw))
global_normal = Expression(("x[0]", "x[1]", "x[2]"), degree=1)
mesh.init_cell_orientations(global_normal)
V = FunctionSpace(mesh, "Lagrange", 1)
# Define basis and bilinear form
u = TrialFunction(V)
v = TestFunction(V)
a = dot(grad(u), grad(v))*dx
b = dot(u, v)*dx
# Assemble stiffness form
A = PETScMatrix()
B = PETScMatrix()
assemble(a, tensor=A)
assemble(b, tensor=B)
# Create eigensolver
eigensolver = SLEPcEigenSolver(A, B)
eigensolver.parameters['spectrum'] = 'target real'
eigensolver.parameters['tolerance'] = 1.e-3
eigensolver.parameters['maximum_iterations'] = 100
# Compute all eigenvalues of A x = \lambda x
print("Computing eigenvalues. This can take a minute.")
eigensolver.solve(N)
print('Done. Extracting results...')
eig_vectors = np.ndarray((npix, N), dtype='float')
eig_values = np.ndarray(N, dtype='float')
for i in range(N):
# Extract largest (first) eigenpair
r, c, rx, cx = eigensolver.get_eigenpair(i)
# ----- keeping the dof ordering -----
eig_vectors[:, i] = np.asarray(rx)
eig_values[i] = r
# ---------------------------------------------------------
# ---------------------------------------------------------
cl = np.empty((N, lmax+1))
spectral_content[bw] = np.empty((lmax+1, lmax+1))
for i in range(N):
eigenvector = eig_vectors[:,i]
# ---------ANAFAST ON THIS SAMPLING DOES NOT WORK ANYMORE-------
### cl[i] = hp.sphtfunc.anafast(eigenvector, lmax=lmax, iter=8)
eig_array = to_array(eigenvector, bw)
g = pyshtools.SHGrid.from_array(eig_array)
clm = g.expand(normalization='unnorm')
cl[i] = clm.spectrum()
start = 0
for ell in range(lmax+1):
end = start + (2 * ell + 1)
spectral_content[bw][ell] = np.sum(cl[start:end,:], axis=0)/ np.sum(cl[start:end,:])
start = end
# ---------------------------------------------------------
# ---------- reordering ----------
reordered_mask = np.load('15_reordering_masks/reordering_mask_{}.npy'.format(bw))
eig_vectors = eig_vectors[reordered_mask]
# ---------------------------------------------------------
# ---------------------------------------------------------
cl_reordered = np.empty((N, lmax+1))
spectral_content_reordered[bw] = | np.empty((lmax+1, lmax+1)) | numpy.empty |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
def get_errors(arr,x):
e = [np.linalg.norm(arr[:,i] - x)/np.linalg.norm(x) for i in range(arr.shape[1])]
e.insert(0,1)
return e
def get_x_opt(X,y,gamma):
H = X.T@X + gamma*np.eye(X.shape[1])
x_opt = np.linalg.solve(H,X.T@y)
return x_opt
# Find where errors are > t
def greater_than_thld(arr, thld):
'''
Given an input array arr, find the first index where arr > thld.
'''
i_thld = np.where(arr>thld)[0][0]
return i_thld
def linear_fit(x, a, b):
"""
Wrapper function for scipy to fit a line of best fit to datapoints.
"""
return a*x + b
def reciprocal_fit(x, a, b):
"""
Wrapper function for scipy to fit a curve of a/x + b to datapoints.
"""
return a/x + b
def gaussian_projection(data,sketch_size,random_seed=10):
"""
evaluates the linear random projection SA with S sampled from a
standard Gaussian distribution with appropriate scaling.
"""
rng = np.random.seed(random_seed)
[n,d] = data.shape
S = np.random.randn(sketch_size,n) / np.sqrt(sketch_size)
return S@data
def sparse_projection(data,sketch_size,sparsity=1,random_seed=10):
"""
Performs the sparse johnson lindenstrauss transform of Kane and Nelson
"""
[n,d] = data.shape
sketch = np.zeros((sketch_size,n),dtype=float)
for i in range(n):
nnz_loc = np.random.choice(sketch_size,size=sparsity,replace=False)
nnz_sign = np.random.choice([-1,1],size=sparsity,replace=True)
sketch[nnz_loc,i] = nnz_sign
return (1./np.sqrt(sparsity))*sketch@data
def get_covariance_bound(X,k,sketch_dimension):
'''
Given input data X and rank k, evaluate tthe covariance error bound:
||X - Xk||_F^2 / (sketch_dimension - k)
'''
U, S, Vt = np.linalg.svd(X, full_matrices=False)
Xk = U[:,:k]@(np.diag(S[:k])@Vt[:k,:])
delta_k = np.linalg.norm(X- Xk,ord='fro')**2
return delta_k / (sketch_dimension - k )
def get_covariance_bound_vary_sketch_size(X,k,sketch_sizes):
'''
Returns the range of covariance bounds as the sketch size is varied
'''
U, S, Vt = np.linalg.svd(X, full_matrices=False)
Xk = U[:,:k]@(np.diag(S[:k])@Vt[:k,:])
delta_k = np.linalg.norm(X- Xk,ord='fro')**2
bounds = delta_k* | np.ones_like(sketch_sizes,dtype=float) | numpy.ones_like |
from __future__ import annotations
import scipy
import numpy as np
from warnings import warn
from .arrays import ImgArray, PropArray
from .arrays.utils import _docs
from .arrays.utils._corr import subpixel_pcc
from .utils.axesop import *
from .utils.utilcls import Progress
from .utils.deco import dims_to_spatial_axes
from ._cupy import xp, asnumpy, xp_ndi
from ._types import Dims
__all__ = ["fsc", "fourier_shell_correlation", "ncc", "zncc", "fourier_ncc", "fourier_zncc",
"nmi", "pcc_maximum", "ft_pcc_maximum", "pearson_coloc", "manders_coloc"]
@_docs.write_docs
@dims_to_spatial_axes
def fsc(img0: ImgArray,
img1: ImgArray,
nbin: int = 32,
r_max: float = None,
*,
squeeze: bool = True,
dims: Dims = None) -> PropArray:
r"""
Calculate Fourier Shell Correlation (FSC; or Fourier Ring Correlation, FRC, for 2-D images)
between two images. FSC is defined as:
.. math::
FSC(r) = \frac{Re(\sum_{r<r'<r+dr}[F_0(r') \cdot \bar{F_1}(r)])}
{\sqrt{\sum_{r<r'<r+dr}|F_0(r')|^2 \cdot \sum_{r<r'<r+dr}|F_1(r')|^2}}
Parameters
----------
{inputs_of_correlation}
nbin : int, default is 32
Number of bins.
r_max : float, optional
Maximum radius to make profile. Region 0 <= r < r_max will be split into `nbin` rings
(or shells). **Scale must be considered** because scales of each axis may vary.
{squeeze}
{dims}
Returns
-------
PropArray
FSC stored in x-axis by default. If input images have tzcyx-axes, then an array with
tcx-axes will be returned. Make sure x-axis no longer means length in x because images
are Fourier transformed.
"""
img0, img1 = _check_inputs(img0, img1)
spatial_shape = img0.sizesof(dims)
inds = xp.indices(spatial_shape)
center = [s/2 for s in spatial_shape]
r = xp.sqrt(sum(((x - c)/img0.scale[a])**2 for x, c, a in zip(inds, center, dims)))
r_lim = r.max()
# check r_max
if r_max is None:
r_max = r_lim
elif r_max > r_lim or r_max <= 0:
raise ValueError(f"`r_max` must be in range of 0 < r_max <= {r_lim} with this image.")
with Progress("fsc"):
# make radially separated labels
r_rel = r/r_max
labels = (nbin * r_rel).astype(np.uint16)
labels[r_rel >= 1] = 0
c_axes = complement_axes(dims, img0.axes)
nlabels = int(asnumpy(labels.max()))
out = xp.empty(img0.sizesof(c_axes)+(nlabels,), dtype=xp.float32)
def radial_sum(arr):
arr = xp.asarray(arr)
return xp_ndi.sum_labels(arr, labels=labels, index=xp.arange(1, nlabels+1))
f0 = img0.fft(dims=dims)
f1 = img1.fft(dims=dims)
for sl, f0_, f1_ in iter2(f0, f1, c_axes, exclude=dims):
cov = f0_.real*f1_.real + f0_.imag*f1_.imag
pw0 = f0_.real**2 + f0_.imag**2
pw1 = f1_.real**2 + f1_.imag**2
out[sl] = radial_sum(cov)/xp.sqrt(radial_sum(pw0)*radial_sum(pw1))
if out.ndim == 0 and squeeze:
out = out[()]
out = PropArray(asnumpy(out), dtype=np.float32, axes=c_axes+dims[-1],
dirpath=img0.dirpath, metadata=img0.metadata, propname="fsc")
return out
# alias
fourier_shell_correlation = fsc
def _ncc(img0: ImgArray, img1: ImgArray, dims: Dims):
# Basic Normalized Cross Correlation with batch processing
n = np.prod(img0.sizesof(dims))
if isinstance(dims, str):
dims = tuple(img0.axisof(a) for a in dims)
img0 = xp.asarray(img0)
img1 = xp.asarray(img1)
corr = xp.sum(img0 * img1, axis=dims) / (
xp.std(img0, axis=dims)*xp.std(img1, axis=dims)) / n
return asnumpy(corr)
def _masked_ncc(img0: ImgArray, img1: ImgArray, dims: Dims, mask: ImgArray):
if mask.ndim < img0.ndim:
mask = add_axes(img0.axes, img0.shape, mask, mask.axes)
n = np.prod(img0.sizesof(dims))
img0ma = np.ma.array(img0.value, mask=mask)
img1ma = np.ma.array(img1.value, mask=mask)
axis = tuple(img0.axisof(a) for a in dims)
return np.ma.sum(img0ma * img1ma, axis=axis) / (
np.ma.std(img0ma, axis=axis)*np.ma.std(img1ma, axis=axis)) / n
def _zncc(img0: ImgArray, img1: ImgArray, dims: Dims):
# Basic Zero-Normalized Cross Correlation with batch processing.
# Inputs must be already zero-normalized.
if isinstance(dims, str):
dims = tuple(img0.axisof(a) for a in dims)
img0 = xp.asarray(img0)
img1 = xp.asarray(img1)
corr = xp.sum(img0 * img1, axis=dims) / (
xp.sqrt(xp.sum(img0**2, axis=dims)*xp.sum(img1**2, axis=dims)))
return asnumpy(corr)
def _masked_zncc(img0: ImgArray, img1: ImgArray, dims: Dims, mask: ImgArray):
if mask.ndim < img0.ndim:
mask = add_axes(img0.axes, img0.shape, mask, mask.axes)
img0ma = np.ma.array(img0.value, mask=mask)
img1ma = np.ma.array(img1.value, mask=mask)
axis = tuple(img0.axisof(a) for a in dims)
return np.sum(img0ma * img1ma, axis=axis) / (
np.sqrt(np.sum(img0ma**2, axis=axis)*np.sum(img1ma**2, axis=axis)))
@_docs.write_docs
@dims_to_spatial_axes
def ncc(img0: ImgArray,
img1: ImgArray,
mask: ImgArray | None = None,
squeeze: bool = True,
*,
dims: Dims = None) -> PropArray | float:
"""
Normalized Cross Correlation.
Parameters
----------
{inputs_of_correlation}
mask : boolean ImgArray, optional
If provided, True regions will be masked and will not be taken into account when calculate
correlation.
{squeeze}
{dims}
Returns
-------
PropArray or float
Correlation value(s).
"""
with Progress("ncc"):
img0, img1 = _check_inputs(img0, img1)
if mask is None:
corr = _ncc(img0, img1, dims)
else:
corr = _masked_ncc(img0, img1, dims, mask)
return _make_corr_output(corr, img0, "ncc", squeeze, dims)
@_docs.write_docs
@dims_to_spatial_axes
def zncc(img0: ImgArray,
img1: ImgArray,
mask: ImgArray | None = None,
squeeze: bool = True,
*,
dims: Dims = None) -> PropArray | float:
"""
Zero-Normalized Cross Correlation.
Parameters
----------
{inputs_of_correlation}
mask : boolean ImgArray, optional
If provided, True regions will be masked and will not be taken into account when calculate
correlation.
{squeeze}
{dims}
Returns
-------
PropArray or float
Correlation value(s).
"""
with Progress("zncc"):
img0, img1 = _check_inputs(img0, img1)
img0zn = img0 - np.mean(img0, axis=dims, keepdims=True)
img1zn = img1 - np.mean(img1, axis=dims, keepdims=True)
if mask is None:
corr = _zncc(img0zn, img1zn, dims)
else:
corr = _masked_zncc(img0zn, img1zn, dims, mask)
return _make_corr_output(corr, img0, "zncc", squeeze, dims)
# alias
pearson_coloc = zncc
@_docs.write_docs
@dims_to_spatial_axes
def nmi(img0: ImgArray,
img1: ImgArray,
mask: ImgArray | None = None,
bins: int = 100,
squeeze: bool = True,
*,
dims: Dims = None) -> PropArray | float:
r"""
Normalized Mutual Information.
:math:`Y(A, B) = \frac{H(A) + H(B)}{H(A, B)}`
See "Elegant SciPy"
Parameters
----------
{inputs_of_correlation}
mask : boolean ImgArray, optional
If provided, True regions will be masked and will not be taken into account when calculate
correlation.
bins : int, default is 100
Number of bins to construct histograms.
{squeeze}
{dims}
Returns
-------
PropArray or float
Correlation value(s).
"""
from scipy.stats import entropy
img0, img1 = _check_inputs(img0, img1)
c_axes = complement_axes(dims, img0.axes)
out = np.empty(img0.sizesof(c_axes), dtype=np.float32)
if mask.ndim < img0.ndim:
mask = add_axes(img0.axes, img0.shape, mask, mask.axes)
for sl, img0_, img1_ in iter2(img0, img1, c_axes):
mask_ = mask[sl]
hist, edges = np.histogramdd([np.ravel(img0_[mask_]),
| np.ravel(img1_[mask_]) | numpy.ravel |
import numpy as np
import sys
import monai
import ponai
# sys.path.append('/nfs/home/pedro/portio')
from torchvision import datasets, transforms, models
from torch.utils.data import DataLoader, Dataset
from PIL import Image
import pandas as pd
import os
import argparse
import torchvision
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
from model.model import nnUNet
import random
from model.metric import DiceLoss
import glob
import time
import nibabel as nib
import re
import monai.visualize.img2tensorboard as img2tensorboard
sys.path.append('/nfs/home/pedro/RangerLARS/over9000')
# from over9000 import RangerLars
os.chdir('/nfs/home/pedro/PhysicsPyTorch')
import porchio
from early_stopping import pytorchtools
import runai.hpo
strategy = runai.hpo.Strategy.GridSearch
runai.hpo.init('/nfs/home/pedro/', 'stratification')
# import yaml
# print(f'The pyyaml version is {yaml.__version__}')
class PairwiseMeasures(object):
def __init__(self, seg_img, ref_img,
measures=None, num_neighbors=8, pixdim=(1, 1, 1),
empty=False, list_labels=None):
self.m_dict = {
'ref volume': (self.n_pos_ref, 'Volume (Ref)'),
'seg volume': (self.n_pos_seg, 'Volume (Seg)'),
'ref bg volume': (self.n_neg_ref, 'Volume (Ref bg)'),
'seg bg volume': (self.n_neg_seg, 'Volume (Seg bg)'),
'list_labels': (self.list_labels, 'List Labels Seg'),
'fp': (self.fp, 'FP'),
'fn': (self.fn, 'FN'),
'tp': (self.tp, 'TP'),
'tn': (self.tn, 'TN'),
'n_intersection': (self.n_intersection, 'Intersection'),
'n_union': (self.n_union, 'Union'),
'sensitivity': (self.sensitivity, 'Sens'),
'specificity': (self.specificity, 'Spec'),
'accuracy': (self.accuracy, 'Acc'),
'fpr': (self.false_positive_rate, 'FPR'),
'ppv': (self.positive_predictive_values, 'PPV'),
'npv': (self.negative_predictive_values, 'NPV'),
'dice': (self.dice_score, 'Dice'),
'IoU': (self.intersection_over_union, 'IoU'),
'jaccard': (self.jaccard, 'Jaccard'),
'informedness': (self.informedness, 'Informedness'),
'markedness': (self.markedness, 'Markedness'),
'vol_diff': (self.vol_diff, 'VolDiff'),
'ave_dist': (self.measured_average_distance, 'AveDist'),
'haus_dist': (self.measured_hausdorff_distance, 'HausDist'),
'connected_elements': (self.connected_elements, 'TPc,FPc,FNc'),
'outline_error': (self.outline_error, 'OER,OEFP,OEFN'),
'detection_error': (self.detection_error, 'DE,DEFP,DEFN')
}
self.seg = seg_img
self.ref = ref_img
self.list_labels = list_labels
self.flag_empty = empty
self.measures = measures if measures is not None else self.m_dict
self.neigh = num_neighbors
self.pixdim = pixdim
def check_binary(self):
"""
Checks whether self.seg and self.ref are binary. This is to enable
measurements such as 'false positives', which only have meaning in
the binary case (what is positive/negative for multiple class?)
"""
is_seg_binary, is_ref_binary = [((x > 0.5) == x).all()
for x in [self.seg, self.ref]]
# if (not is_ref_binary) or (not is_seg_binary):
# raise ValueError("The input segmentation/reference images"
# " must be binary for this function.")
def __FPmap(self):
"""
This function calculates the false positive map from binary
segmentation and reference maps
:return: FP map
"""
self.check_binary()
return np.asarray((self.seg - self.ref) > 0.0, dtype=np.float32)
def __FNmap(self):
"""
This function calculates the false negative map
:return: FN map
"""
self.check_binary()
return np.asarray((self.ref - self.seg) > 0.0, dtype=np.float32)
def __TPmap(self):
"""
This function calculates the true positive map (i.e. how many
reference voxels are positive)
:return: TP map
"""
self.check_binary()
return np.logical_and(self.ref > 0.5, self.seg > 0.5).astype(float)
def __TNmap(self):
"""
This function calculates the true negative map
:return: TN map
"""
self.check_binary()
return np.logical_and(self.ref < 0.5, self.seg < 0.5).astype(float)
def __union_map(self):
"""
This function calculates the union map between segmentation and
reference image
:return: union map
"""
self.check_binary()
return np.logical_or(self.ref, self.seg).astype(float)
def __intersection_map(self):
"""
This function calculates the intersection between segmentation and
reference image
:return: intersection map
"""
self.check_binary()
return np.multiply(self.ref, self.seg)
def n_pos_ref(self):
return np.sum(self.ref)
def n_neg_ref(self):
self.check_binary()
return np.sum(self.ref == 0)
def n_pos_seg(self):
return np.sum(self.seg)
def n_neg_seg(self):
return np.sum(1 - self.seg)
def fp(self):
return np.sum(self.__FPmap())
def fn(self):
return np.sum(self.__FNmap())
def tp(self):
return np.sum(self.__TPmap())
def tn(self):
return np.sum(self.__TNmap())
def n_intersection(self):
return np.sum(self.__intersection_map())
def n_union(self):
return np.sum(self.__union_map())
def sensitivity(self):
return self.tp() / self.n_pos_ref()
def specificity(self):
return self.tn() / self.n_neg_ref()
def accuracy(self):
return (self.tn() + self.tp()) / \
(self.tn() + self.tp() + self.fn() + self.fp())
def false_positive_rate(self):
return self.fp() / self.n_neg_ref()
def positive_predictive_values(self):
if self.flag_empty:
return -1
return self.tp() / (self.tp() + self.fp())
def negative_predictive_values(self):
"""
This function calculates the negative predictive value ratio between
the number of true negatives and the total number of negative elements
:return:
"""
return self.tn() / (self.fn() + self.tn())
def dice_score(self):
"""
This function returns the dice score coefficient between a reference
and segmentation images
:return: dice score
"""
return 2 * self.tp() / np.sum(self.ref + self.seg)
def intersection_over_union(self):
"""
This function the intersection over union ratio - Definition of
jaccard coefficient
:return:
"""
return self.n_intersection() / self.n_union()
def jaccard(self):
"""
This function returns the jaccard coefficient (defined as
intersection over union)
:return: jaccard coefficient
"""
return self.intersection_over_union()
def informedness(self):
"""
This function calculates the informedness between the segmentation
and the reference
:return: informedness
"""
return self.sensitivity() + self.specificity() - 1
def markedness(self):
"""
This functions calculates the markedness
:return:
"""
return self.positive_predictive_values() + \
self.negative_predictive_values() - 1
def list_labels(self):
if self.list_labels is None:
return ()
return tuple(np.unique(self.list_labels))
def vol_diff(self):
"""
This function calculates the ratio of difference in volume between
the reference and segmentation images.
:return: vol_diff
"""
return np.abs(self.n_pos_ref() - self.n_pos_seg()) / self.n_pos_ref()
# @CacheFunctionOutput
# def _boundaries_dist_mat(self):
# dist = DistanceMetric.get_metric('euclidean')
# border_ref = MorphologyOps(self.ref, self.neigh).border_map()
# border_seg = MorphologyOps(self.seg, self.neigh).border_map()
# coord_ref = np.multiply(np.argwhere(border_ref > 0), self.pixdim)
# coord_seg = np.multiply(np.argwhere(border_seg > 0), self.pixdim)
# pairwise_dist = dist.pairwise(coord_ref, coord_seg)
# return pairwise_dist
def measured_distance(self):
"""
This functions calculates the average symmetric distance and the
hausdorff distance between a segmentation and a reference image
:return: hausdorff distance and average symmetric distance
"""
ref_border_dist, seg_border_dist, ref_border, \
seg_border = self.border_distance()
average_distance = (np.sum(ref_border_dist) + np.sum(
seg_border_dist)) / (np.sum(self.ref + self.seg))
hausdorff_distance = np.max(
[np.max(ref_border_dist), np.max(seg_border_dist)])
return hausdorff_distance, average_distance
def measured_average_distance(self):
"""
This function returns only the average distance when calculating the
distances between segmentation and reference
:return:
"""
return self.measured_distance()[1]
def measured_hausdorff_distance(self):
"""
This function returns only the hausdorff distance when calculated the
distances between segmentation and reference
:return:
"""
return self.measured_distance()[0]
# def average_distance(self):
# pairwise_dist = self._boundaries_dist_mat()
# return (np.sum(np.min(pairwise_dist, 0)) + \
# np.sum(np.min(pairwise_dist, 1))) / \
# (np.sum(self.ref + self.seg))
#
# def hausdorff_distance(self):
# pairwise_dist = self._boundaries_dist_mat()
# return np.max((np.max(np.min(pairwise_dist, 0)),
# np.max(np.min(pairwise_dist, 1))))
def connected_elements(self):
"""
This function returns the number of FP FN and TP in terms of
connected components.
:return: Number of true positive connected components, Number of
false positives connected components, Number of false negatives
connected components
"""
blobs_ref, blobs_seg, init = self._connected_components()
list_blobs_ref = range(1, blobs_ref[1])
list_blobs_seg = range(1, blobs_seg[1])
mul_blobs_ref = np.multiply(blobs_ref[0], init)
mul_blobs_seg = np.multiply(blobs_seg[0], init)
list_TP_ref = np.unique(mul_blobs_ref[mul_blobs_ref > 0])
list_TP_seg = np.unique(mul_blobs_seg[mul_blobs_seg > 0])
list_FN = [x for x in list_blobs_ref if x not in list_TP_ref]
list_FP = [x for x in list_blobs_seg if x not in list_TP_seg]
return len(list_TP_ref), len(list_FP), len(list_FN)
def connected_errormaps(self):
"""
This functions calculates the error maps from the connected components
:return:
"""
blobs_ref, blobs_seg, init = self._connected_components()
list_blobs_ref = range(1, blobs_ref[1])
list_blobs_seg = range(1, blobs_seg[1])
mul_blobs_ref = np.multiply(blobs_ref[0], init)
mul_blobs_seg = np.multiply(blobs_seg[0], init)
list_TP_ref = np.unique(mul_blobs_ref[mul_blobs_ref > 0])
list_TP_seg = np.unique(mul_blobs_seg[mul_blobs_seg > 0])
list_FN = [x for x in list_blobs_ref if x not in list_TP_ref]
list_FP = [x for x in list_blobs_seg if x not in list_TP_seg]
# print(np.max(blobs_ref),np.max(blobs_seg))
tpc_map = np.zeros_like(blobs_ref[0])
fpc_map = np.zeros_like(blobs_ref[0])
fnc_map = np.zeros_like(blobs_ref[0])
for i in list_TP_ref:
tpc_map[blobs_ref[0] == i] = 1
for i in list_TP_seg:
tpc_map[blobs_seg[0] == i] = 1
for i in list_FN:
fnc_map[blobs_ref[0] == i] = 1
for i in list_FP:
fpc_map[blobs_seg[0] == i] = 1
return tpc_map, fnc_map, fpc_map
def outline_error(self):
"""
This function calculates the outline error as defined in Wack et al.
:return: OER: Outline error ratio, OEFP: number of false positive
outlier error voxels, OEFN: number of false negative outline error
elements
"""
TPcMap, _, _ = self.connected_errormaps()
OEFMap = self.ref - np.multiply(TPcMap, self.seg)
unique, counts = np.unique(OEFMap, return_counts=True)
# print(counts)
OEFN = counts[unique == 1]
OEFP = counts[unique == -1]
OEFN = 0 if len(OEFN) == 0 else OEFN[0]
OEFP = 0 if len(OEFP) == 0 else OEFP[0]
OER = 2 * (OEFN + OEFP) / (self.n_pos_seg() + self.n_pos_ref())
return OER, OEFP, OEFN
def detection_error(self):
"""
This function calculates the volume of detection error as defined in
Wack et al.
:return: DE: Total volume of detection error, DEFP: Detection error
false positives, DEFN: Detection error false negatives
"""
TPcMap, FNcMap, FPcMap = self.connected_errormaps()
DEFN = np.sum(FNcMap)
DEFP = np.sum(FPcMap)
return DEFN + DEFP, DEFP, DEFN
def header_str(self):
result_str = [self.m_dict[key][1] for key in self.measures]
result_str = ',' + ','.join(result_str)
return result_str
def to_string(self, fmt='{:.4f}'):
result_str = ""
list_space = ['com_ref', 'com_seg', 'list_labels']
for key in self.measures:
result = self.m_dict[key][0]()
if key in list_space:
result_str += ' '.join(fmt.format(x) for x in result) \
if isinstance(result, tuple) else fmt.format(result)
else:
result_str += ','.join(fmt.format(x) for x in result) \
if isinstance(result, tuple) else fmt.format(result)
result_str += ','
return result_str[:-1] # trim the last comma
class PairwiseMeasuresRegression(object):
def __init__(self, reg_img, ref_img, measures=None):
self.reg = reg_img
self.ref = ref_img
self.measures = measures
self.m_dict = {
'mse': (self.mse, 'MSE'),
'rmse': (self.rmse, 'RMSE'),
'mae': (self.mae, 'MAE'),
'r2': (self.r2, 'R2')
}
def mse(self):
return np.mean(np.square(self.reg - self.ref))
def rmse(self):
return np.sqrt(self.mse())
def mae(self):
return np.mean(np.abs(self.ref - self.reg))
def r2(self):
ref_var = np.sum(np.square(self.ref - np.mean(self.ref)))
reg_var = np.sum(np.square(self.reg - np.mean(self.reg)))
cov_refreg = np.sum(
(self.reg - | np.mean(self.reg) | numpy.mean |
import numpy as np
from scipy.interpolate import CubicSpline
class WaypointTraj(object):
"""
"""
def __init__(self, points):
"""
This is the constructor for the Trajectory object. A fresh trajectory
object will be constructed before each mission. For a waypoint
trajectory, the input argument is an array of 3D destination
coordinates. You are free to choose the times of arrival and the path
taken between the points in any way you like.
You should initialize parameters and pre-compute values such as
polynomial coefficients here.
Inputs:
points, (N, 3) array of N waypoint coordinates in 3D
"""
self.v = 2 #m/s
self.points = points
self.t = np.zeros(len(points),)
if np.shape(self.points) == (3,) or np.shape(self.points) == (1,3):
pass
elif np.shape(self.points) != (3,) or np.shape(self.points) != (1,3):
for i in range(len(self.t)-1):
self.t[(i+1)] = np.linalg.norm((points[(i+1)]-points[i]))/self.v
self.point_t = np.zeros(len(points),)
for i in range(int(len(self.t)-1)):
self.point_t[(i+1)] = self.point_t[i] + self.t[i+1]
self.f = CubicSpline(self.point_t,self.points,axis = 0)
def update(self, t):
"""
Given the present time, return the desired flat output and derivatives.
Inputs
t, time, s
Outputs
flat_output, a dict describing the present desired flat outputs with keys
x, position, m
x_dot, velocity, m/s
x_ddot, acceleration, m/s**2
x_dddot, jerk, m/s**3
x_ddddot, snap, m/s**4
yaw, yaw angle, rad
yaw_dot, yaw rate, rad/s
"""
x = np.zeros((3,))
x_dot = np.zeros((3,))
x_ddot = np.zeros((3,))
x_dddot = np.zeros((3,))
x_ddddot = np.zeros((3,))
yaw = 0
yaw_dot = 0
if | np.shape(self.points) | numpy.shape |
import warnings
import numpy as np
from scipy import linspace
from scipy.ndimage import affine_transform
'''Automatically determine whether background is darker than foreground'''
MODE_AUTO = "auto"
'''Background is darker than foreground'''
MODE_DARK = "dark"
'''Background is brighter than foreground'''
MODE_BRIGHT = "bright"
'''Some foreground is darker, some is lighter'''
MODE_GRAY = "gray"
def prcntiles(x,percents):
'''Equivalent to matlab prctile(x,p), uses linear interpolation.'''
x=np.array(x).flatten()
listx = np.sort(x)
xpcts=[]
lenlistx=len(listx)
refs=[]
for i in range(0,lenlistx):
r=100*((.5+i)/lenlistx) #refs[i] is percentile of listx[i] in matrix x
refs.append(r)
rpcts=[]
for p in percents:
if p<refs[0]:
rpcts.append(listx[0])
elif p>refs[-1]:
rpcts.append(listx[-1])
else:
for j in range(0,lenlistx): #lenlistx=len(refs)
if refs[j]<=p and refs[j+1]>=p:
my=listx[j+1]-listx[j]
mx=refs[j+1]-refs[j]
m=my/mx #slope of line between points
rpcts.append((m*(p-refs[j]))+listx[j])
break
xpcts.append(rpcts)
return np.array(xpcts).transpose()
def automode(data):
'''Tries to guess if the image contains dark objects on a bright background (1)
or if the image contains bright objects on a dark background (-1),
or if it contains both dark and bright objects on a gray background (0).'''
pct=prcntiles(np.array(data),[1,20,80,99])
upper=pct[3]-pct[2]
mid=pct[2]-pct[1]
lower=pct[1]-pct[0]
##print 'upper = '+str(upper)
##print 'mid = '+str(mid)
##print 'lower = '+str(lower)
#upper objects
if upper>mid:
uo=1
else:
uo=0
##print 'uo = '+str(uo)
#lower objects
if lower>mid:
lo=1
else:
lo=0
##print 'lo = '+str(lo)
if uo==1:
if lo==1:
mode=0
#both upper and lower objects
else:
mode=-1
#only upper objects
else:
if lo==1:
mode=1
#only lower objects
else:
mode=0
#no objects at all
return mode
def spline_factors(u):
'''u is np.array'''
X = np.array([(1.-u)**3 , 4-(6.*(u**2))+(3.*(u**3)) , 1.+(3.*u)+(3.*(u**2))-(3.*(u**3)) , u**3]) * (1./6)
return X
def pick(picklist,val):
'''Index to first value in picklist that is larger than val.
If none is larger, index=len(picklist).'''
assert np.all(np.sort(picklist) == picklist), "pick list is not ordered correctly"
val = np.array(val)
i_pick, i_val = np.mgrid[0:len(picklist),0:len(val)]
#
# Mark a picklist entry as 1 if the value is before or at,
# mark it as zero if it is afterward
#
is_not_larger = picklist[i_pick] <= val[i_val]
#
# The index is the number of entries that are 1
#
p = np.sum(is_not_larger, 0)
return p
def confine(x,low,high):
'''Confine x to [low,high]. Values outside are set to low/high.
See also restrict.'''
y=x.copy()
y[y < low] = low
y[y > high] = high
return y
def gauss(x,m_y,sigma):
'''returns the gaussian with mean m_y and std. dev. sigma,
calculated at the points of x.'''
e_y = [np.exp((1.0/(2*float(sigma)**2)*-(n-m_y)**2)) for n in np.array(x)]
y = [1.0/(float(sigma) * np.sqrt(2 * np.pi)) * e for e in e_y]
return np.array(y)
def d2gauss(x,m_y,sigma):
'''returns the second derivative of the gaussian with mean m_y,
and standard deviation sigma, calculated at the points of x.'''
return gauss(x,m_y,sigma)*[-1/sigma**2 + (n-m_y)**2/sigma**4 for n in x]
def spline_matrix(x,px):
n=len(px)
lx=len(x)
# Assign each x to an interval. Subtract 1 to get the beginning of the interval.
px = np.array(px)
x = np.array(x)
j = np.array(pick(px,x)) - 1
#
# We need at least one entry before and two after for the four factors
# of the cubic spline
#
j = confine(j, 1, n-3)
u = (x-px[j]) / (px[j+1]-px[j]) #how far are we on the line segment px[j]->px[j+1], 0<=u<1
spf=spline_factors(u)
#
# Set up to broadcast spf to the correct spline factors
# The cubic has four factors that broadcast starting at j-1 to j+2
#
ii, jj = np.mgrid[0:spf.shape[0], 0:lx]
V = np.zeros((n, lx))
V[j[jj] - 1 + ii, jj] = spf[ii, jj]
return V
def spline_matrix2d(x,y,px,py,mask=None):
'''For boundary constraints, the first two and last two spline pieces are constrained
to be part of the same cubic curve.'''
V = np.kron(spline_matrix(x,px),spline_matrix(y,py))
lenV = len(V)
if mask is not None:
indices = np.nonzero(mask.T.flatten())
if len(indices)>1:
indices = np.nonzero(mask.T.flatten())[1][0]
newV=V.T[indices]
V=newV.T
V=V.reshape((V.shape[0],V.shape[1]))
return V
def splinefit2d(x, y, z, px, py, mask=None):
'''Make a least squares fit of the spline (px,py,pz) to the surface (x,y,z).
If mask is given, only masked points are used for the regression.'''
if mask is None:
V = np.array(spline_matrix2d(x, y, px, py))
a = np.array(z.T.flatten())
pz = | np.linalg.lstsq(V.T, a.T) | numpy.linalg.lstsq |
import numpy as np
import vectormath as vmath
from sortedcontainers import SortedList
# constants
INFINITY = 2 ** 63 # integer max (python 3 has no bound)
DEBUG = False
############################################################################
# sub classes for algorithm
#
############################################################################
# sub triangle data (vertex indexes, coordinates, scales, precomputed Matrix)
class Triangle:
def __init__(self, v1, v2, v3): # 3 vertices for a trangle
self.nVerts = [v1, v2, v3]
self.vTriCoords = [] # 2D position (x,y)
self.vScaled = np.zeros((3, 2), dtype=float) # un-scaled triangle
# GMatrix: pre-computed matrices for triangle scaling step
self.mF = self.mC = [[]]
# simply 2D coordinate
# class Vertex:
# def __init__(self, x, y):
# self.x, self.y = x, y
class Constraint:
def __init__(self, nVertex, vec):
self.nVertex = nVertex
self.vConstrainedPos = vec
def __lt__(self, other):
return self.nVertex < other.nVertex
# LU-decomp, matrix and pivot
class LUData: # information of LU decompositions
def __init__(self, matrix, vPivots):
self.mLU = matrix
self.vPivots = vPivots
##############################################################################
# global variables : m is member variable
# @TODO make it as a class
###############################################################################
m_bSetupValid = None
m_mFirstMatrix = None # G' matrix
m_vConstraints = SortedList()
m_vInitialVerts = [] # initial positions of points
m_vDeformedVerts = [] # current deformed positions of points
m_vTriangles = [] # contains deformed triangles
m_vVertexMap = [] # m_vVertexMap
m_mHXPrime, m_mHYPrime = None, None # m_mHXPrime, m_mHYPrime
m_mDX, m_mDY = None, None # m_mDX, m_mDY
m_mLUDecompX, m_mLUDecompY = None, None # m_mLUDecompX, m_mLUDecompY
# functions
def Error():
print("ERROR")
exit()
def _invalidateSetup():
# global m_bSetupValid
m_bSetupValid = False
def _getInitialVert(nVert, Verts):
ret = vmath.Vector2(float(Verts[nVert][0]), float(Verts[nVert][1]))
return ret
def _normalize(vec):
l = vec.length
return vec / l
def _squared_length(vec):
return vec.length * vec.length
def _extractSubMatrix(mFrom, nRowOffset, nColOffset, row, col):
ret = np.zeros((row, col), dtype=float)
for i in range(row):
for j in range(col):
ret[i][j] = mFrom[i + nRowOffset][j + nColOffset]
return ret
####################################################################
# Static Matrices
#
####################################################################
#
# 1. scale-free transfrom matrix
#
def _precomputeOrientationMatrix():
if DEBUG:
print("\nprecomputeOrientationMatrix()")
# m_vConstraints = shared.m_vConstraints
# put constraints into vConstraintVec
vConstraintVec = []
for i in range(len(m_vConstraints)):
vConstraintVec.append(m_vConstraints[i])
# resize matrix and clear to zero
nVerts = len(m_vDeformedVerts)
G = np.zeros((nVerts * 2, nVerts * 2), dtype=float) # G' matrix in eqn (8)
nConstraints = len(vConstraintVec)
nFreeVerts = nVerts - nConstraints
if DEBUG:
print("nConstraints =", nConstraints, ", Free =", nFreeVerts)
# figure out vertices ordering. First free vertices and then constraints
nRow = 0
m_vVertexMap = np.zeros(nVerts, dtype=int)
for i in range(nVerts):
c = Constraint(i, [0.0, 0.0])
if m_vConstraints.count(c) > 0:
continue
m_vVertexMap[i] = nRow
nRow += 1
if nRow != nFreeVerts:
Error()
for i in range(nConstraints):
m_vVertexMap[vConstraintVec[i].nVertex] = nRow
nRow += 1
if nRow != nVerts:
Error()
# test vectors
gUTest = np.zeros(nVerts * 2, dtype=float)
for i in range(nVerts):
c = Constraint(i, [0.0, 0.0])
if m_vConstraints.count(c) > 0:
continue
Row = m_vVertexMap[i]
gUTest[Row * 2] = m_vInitialVerts[i][0]
gUTest[Row * 2 + 1] = m_vInitialVerts[i][1]
for i in range(nConstraints):
Row = m_vVertexMap[vConstraintVec[i].nVertex]
gUTest[Row * 2] = vConstraintVec[i].vConstrainedPos[0]
gUTest[Row * 2 + 1] = vConstraintVec[i].vConstrainedPos[1]
# fill matrix
line = 1
nTri = len(m_vTriangles)
for i in range(nTri):
t = m_vTriangles[i]
fTriSumErr = 0 # Error of the triangles
for j in range(3):
fTriErr = 0 # Error of the subtriangles
n0x = 2 * m_vVertexMap[t.nVerts[j]]
n0y = n0x + 1
n1x = 2 * m_vVertexMap[t.nVerts[(j + 1) % 3]]
n1y = n1x + 1
n2x = 2 * m_vVertexMap[t.nVerts[(j + 2) % 3]]
n2y = n2x + 1
x, y = t.vTriCoords[j][0], t.vTriCoords[j][1]
v0 = vmath.Vector2(float(gUTest[n0x]), float(gUTest[n0y]))
v1 = vmath.Vector2(float(gUTest[n1x]), float(gUTest[n1y]))
v2 = vmath.Vector2(float(gUTest[n2x]), float(gUTest[n2y]))
v01 = v1 - v0
v01Perp = vmath.Vector2(v01[1], -v01[0])
vTest = v0 + x * v01 + y * v01Perp
fDist = (vTest - v2).dot(vTest - v2)
"""
add line = 1 for debug
print("debug line", line, ":", x, y)
print("debug line", line, ":", v0[0], v0[1])
print("debug line", line, ":", v1[0], v1[1])
print("debug line", line, ":", v2[0], v2[1])
print("debug line", line, ":", v01[0], v01[1])
print("debug line", line, ":", v01Perp[0], v01Perp[1])
print("debug line", line, ":", vTest[0], vTest[1])
line += 1
if fDist > 0.0001:
Error()
"""
G[n0x][n0x] += 1 - 2 * x + x * x + y * y
G[n0x][n1x] += 2 * x - 2 * x * x - 2 * y * y
G[n0x][n1y] += 2 * y
G[n0x][n2x] += -2 + 2 * x
G[n0x][n2y] += -2 * y
fTriErr += (1 - 2 * x + x * x + y * y) * gUTest[n0x] * gUTest[n0x]
fTriErr += (2 * x - 2 * x * x - 2 * y * y) * \
gUTest[n0x] * gUTest[n1x]
fTriErr += (2 * y) * gUTest[n0x] * gUTest[n1y]
fTriErr += (-2 + 2 * x) * gUTest[n0x] * gUTest[n2x]
fTriErr += (-2 * y) * gUTest[n0x] * gUTest[n2y]
G[n0y][n0y] += 1 - 2 * x + x * x + y * y
G[n0y][n1x] += -2 * y
G[n0y][n1y] += 2 * x - 2 * x * x - 2 * y * y
G[n0y][n2x] += 2 * y
G[n0y][n2y] += -2 + 2 * x
fTriErr += (1 - 2 * x + x * x + y * y) * gUTest[n0y] * gUTest[n0y]
fTriErr += (-2 * y) * gUTest[n0y] * gUTest[n1x]
fTriErr += (2 * x - 2 * x * x - 2 * y * y) * \
gUTest[n0y] * gUTest[n1y]
fTriErr += (2 * y) * gUTest[n0y] * gUTest[n2x]
fTriErr += (-2 + 2 * x) * gUTest[n0y] * gUTest[n2y]
G[n1x][n1x] += x * x + y * y
G[n1x][n2x] += -2 * x
G[n1x][n2y] += 2 * y
fTriErr += (x * x + y * y) * gUTest[n1x] * gUTest[n1x]
fTriErr += (-2 * x) * gUTest[n1x] * gUTest[n2x]
fTriErr += (2 * y) * gUTest[n1x] * gUTest[n2y]
G[n1y][n1y] += x * x + y * y
G[n1y][n2x] += -2 * y
G[n1y][n2y] += -2 * x
fTriErr += (x * x + y * y) * gUTest[n1y] * gUTest[n1y]
fTriErr += (-2 * y) * gUTest[n1y] * gUTest[n2x]
fTriErr += (-2 * x) * gUTest[n1y] * gUTest[n2y]
G[n2x][n2x] += 1
G[n2y][n2y] += 1
fTriErr += gUTest[n2x] * gUTest[n2x] + gUTest[n2y] * gUTest[n2y]
fTriSumErr += fTriErr
gUTemp = np.matmul(G, gUTest)
fSum = gUTemp.dot(gUTest)
# print("(test) Residual =", fSum)
# extract G00 matrix
G00 = np.zeros((2 * nFreeVerts, 2 * nFreeVerts), dtype=float)
dim = np.shape(G00)
row, col = dim[0], dim[1]
G00 = _extractSubMatrix(G, 0, 0, row, col)
# extract G01 and G10 matrices
G01 = np.zeros((2 * nFreeVerts, 2 * nConstraints), dtype=float)
dim = np.shape(G01)
row, col = dim[0], dim[1]
G01 = _extractSubMatrix(G, 0, 2 * nFreeVerts, row, col)
G10 = np.zeros((2 * nConstraints, 2 * nFreeVerts), dtype=float)
dim = np.shape(G10)
row, col = dim[0], dim[1]
G10 = _extractSubMatrix(G, 2 * nFreeVerts, 0, row, col)
# compute GPrime = G00 + Transpose(G00) and B = G01 + Transpose(G10) eqn (8)
GPrime = G00 + np.transpose(G00)
B = G01 + np.transpose(G10)
# invert GPrime and final result = -GPrimeInverse * B
GPrimeInverse = np.linalg.inv(GPrime)
mFinal = np.matmul(GPrimeInverse, B)
return -mFinal
# checked: gUTest, m_vVertexMap, G, G00, G01, G10, GPrime, B, GPrimeInverse, mFinal
#
# LUDecompostion for Scale Matrix calculation
#
def _LUDecompose(mMatrix, vDecomp): # return tuple(ifSquare, vDecomp)
dim = np.shape(mMatrix)
row, col = dim[0], dim[1]
if row != col:
return False, vDecomp
# initialize vDecomp
vDecomp = LUData(np.zeros((row, row), dtype=float), np.zeros(row, int))
vPivots = vDecomp.vPivots # need to assign value back
mLUMatrix = vDecomp.mLU # need to assign value back
mLUMatrix = mMatrix
# scaling of each row
dRowSwaps, dTemp = 1, None
vScale = np.zeros(row, dtype=float)
for i in range(row):
dLargest = 0.0
for j in range(row):
dTemp = abs(float(mLUMatrix[i][j]))
if (dTemp > dLargest):
dLargest = dTemp
if dLargest == 0:
return False, vDecomp
vScale[i] = 1.0 / dLargest
niMax = 0
for j in range(row):
for i in range(j):
dSum = mLUMatrix[i][j]
for k in range(i):
dSum -= mLUMatrix[i][k] * mLUMatrix[k][j]
mLUMatrix[i][j] = dSum
dLargestPivot = 0.0
for i in range(j, row):
dSum = mLUMatrix[i][j]
for k in range(j):
dSum -= mLUMatrix[i][k] * mLUMatrix[k][j]
mLUMatrix[i][j] = dSum
dTemp = vScale[i] * abs(float(dSum))
if dTemp > dLargestPivot:
dLargestPivot = dTemp
niMax = i
if j != niMax:
for k in range(row):
dSwap = mLUMatrix[niMax][k]
mLUMatrix[niMax][k] = mLUMatrix[j][k]
mLUMatrix[j][k] = dSwap
dRowSwaps = -dRowSwaps
vScale[niMax] = vScale[j]
vPivots[j] = niMax
if mLUMatrix[j][j] == 0:
mLUMatrix[j][j] = EPSILON
if j != row - 1:
dScale = 1.0 / mLUMatrix[j][j]
for i in range(j + 1, row):
mLUMatrix[i][j] *= dScale
vDecomp = LUData(mLUMatrix, vPivots)
return True, vDecomp
#
# 2. scaling matrix
#
#
def _precomputeScalingMatrices(nTriangle):
if DEBUG:
print("precomputeScalingMatrices(", nTriangle, ")")
t = m_vTriangles[nTriangle]
t.mF = np.zeros((4, 4), dtype=float)
t.mC = np.zeros((4, 6), dtype=float)
# precompute coefficients
x01 = t.vTriCoords[0][0]
y01 = t.vTriCoords[0][1]
x12 = t.vTriCoords[1][0]
y12 = t.vTriCoords[1][1]
x20 = t.vTriCoords[2][0]
y20 = t.vTriCoords[2][1]
k1 = x12 * y01 + (-1 + x01) * y12
k2 = -x12 + x01 * x12 - y01 * y12
k3 = -y01 + x20 * y01 + x01 * y20
k4 = -y01 + x01 * y01 + x01 * y20
k5 = -x01 + x01 * x20 - y01 * y20
a = -1 + x01
a1 = pow(-1 + x01, 2) + pow(y01, 2)
a2 = pow(x01, 2) + pow(y01, 2)
b = -1 + x20
b1 = pow(-1 + x20, 2) + pow(y20, 2)
c2 = pow(x12, 2) + pow(y12, 2)
r1 = 1 + 2 * a * x12 + a1 * pow(x12, 2) - 2 * y01 * y12 + a1 * pow(y12, 2)
r2 = -(b * x01) - b1 * pow(x01, 2) + y01 * (-(b1 * y01) + y20)
r3 = -(a * x12) - a1 * pow(x12, 2) + y12 * (y01 - a1 * y12)
r5 = a * x01 + pow(y01, 2)
r6 = -(b * y01) - x01 * y20
r7 = 1 + 2 * b * x01 + b1 * pow(x01, 2) + b1 * pow(y01, 2) - 2 * y01 * y20
# setup F matrix
t.mF[0][0] = 2 * a1 + 2 * a1 * c2 + 2 * r7
t.mF[0][1] = 0
t.mF[0][2] = 2 * r2 + 2 * r3 - 2 * r5
t.mF[0][3] = 2 * k1 + 2 * r6 + 2 * y01
t.mF[1][0] = 0
t.mF[1][1] = 2 * a1 + 2 * a1 * c2 + 2 * r7
t.mF[1][2] = -2 * k1 + 2 * k3 - 2 * y01
t.mF[1][3] = 2 * r2 + 2 * r3 - 2 * r5
t.mF[2][0] = 2 * r2 + 2 * r3 - 2 * r5
t.mF[2][1] = -2 * k1 + 2 * k3 - 2 * y01
t.mF[2][2] = 2 * a2 + 2 * a2 * b1 + 2 * r1
t.mF[2][3] = 0
t.mF[3][0] = 2 * k1 - 2 * k3 + 2 * y01
t.mF[3][1] = 2 * r2 + 2 * r3 - 2 * r5
t.mF[3][2] = 0
t.mF[3][3] = 2 * a2 + 2 * a2 * b1 + 2 * r1
mFInverse = np.linalg.inv(t.mF)
mFInverse *= -1.0
t.mF = mFInverse
# setup C matrix
t.mC[0][0] = 2 * k2
t.mC[0][1] = -2 * k1
t.mC[0][2] = 2 * (-1 - k5)
t.mC[0][3] = 2 * k3
t.mC[0][4] = 2 * a
t.mC[0][5] = -2 * y01
t.mC[1][0] = 2 * k1
t.mC[1][1] = 2 * k2
t.mC[1][2] = -2 * k3
t.mC[1][3] = 2 * (-1 - k5)
t.mC[1][4] = 2 * y01
t.mC[1][5] = 2 * a
t.mC[2][0] = 2 * (-1 - k2)
t.mC[2][1] = 2 * k1
t.mC[2][2] = 2 * k5
t.mC[2][3] = 2 * r6
t.mC[2][4] = -2 * x01
t.mC[2][5] = 2 * y01
t.mC[3][0] = 2 * k1
t.mC[3][1] = 2 * (-1 - k2)
t.mC[3][2] = -2 * k3
t.mC[3][3] = 2 * k5
t.mC[3][4] = -2 * y01
t.mC[3][5] = -2 * x01
# np.set_printoptions(precision = 4, suppress = True)
# print("t.mC:", t.mC)
# print("t.mF:", t.mF)
return t
#
# 3. Fitting Matrix
#
def _precomputeFittingMatrices():
if DEBUG:
print("precomputeFittingMatrices()")
global m_mHXPrime, m_mHYPrime, m_mDX, m_mDY, m_vConstraints
# put constraints into vConstraintVec
vConstraintVec = []
for i in range(len(m_vConstraints)):
vConstraintVec.append(m_vConstraints[i])
# resize matrix and clear to zero
nVerts = len(m_vDeformedVerts)
nConstraints = len(vConstraintVec)
nFreeVerts = nVerts - nConstraints
# figure out vertices ordering. First free vertices and then constraints
nRow = 0
global m_vVertexMap
m_vVertexMap = np.zeros(nVerts, dtype=int)
for i in range(nVerts):
c = Constraint(i, [0.0, 0.0])
if m_vConstraints.count(c) > 0:
continue
m_vVertexMap[i] = nRow
nRow += 1
if nRow != nFreeVerts:
Error()
for i in range(nConstraints):
m_vVertexMap[vConstraintVec[i].nVertex] = nRow
nRow += 1
if nRow != nVerts:
Error()
# test vectors
gUTestX = np.zeros(nVerts, dtype=float)
gUTestY = np.zeros(nVerts, dtype=float)
for i in range(nVerts):
c = Constraint(i, [0.0, 0.0])
if m_vConstraints.count(c) > 0:
continue
row = m_vVertexMap[i]
gUTestX[row] = m_vInitialVerts[i][0]
gUTestY[row] = m_vInitialVerts[i][1]
for i in range(nConstraints):
row = m_vVertexMap[vConstraintVec[i].nVertex]
gUTestX[row] = vConstraintVec[i].vConstrainedPos[0]
gUTestY[row] = vConstraintVec[i].vConstrainedPos[1]
# construct Hy and Hx matrices
mHX = np.zeros((nVerts, nVerts), dtype=float)
mHY = np.zeros((nVerts, nVerts), dtype=float)
nTri = len(m_vTriangles)
for i in range(nTri):
t = m_vTriangles[i]
for j in range(3):
nA, nB = m_vVertexMap[t.nVerts[j]
], m_vVertexMap[t.nVerts[(j + 1) % 3]]
mHX[nA][nA] += 2
mHX[nA][nB] += -2
mHX[nB][nA] += -2
mHX[nB][nB] += 2
mHY[nA][nA] += 2
mHY[nA][nB] += -2
mHY[nB][nA] += -2
mHY[nB][nB] += 2
# extract HX00 and HY00 matrices
mHX00 = np.zeros((nFreeVerts, nFreeVerts), dtype=float)
mHY00 = np.zeros((nFreeVerts, nFreeVerts), dtype=float)
dim = np.shape(mHX00)
row, col = dim[0], dim[1]
mHX00 = _extractSubMatrix(mHX, 0, 0, row, col)
dim = np.shape(mHY00)
row, col = dim[0], dim[1]
mHY00 = _extractSubMatrix(mHY, 0, 0, row, col)
# extract HX01 and HX10 matrices
mHX01 = np.zeros((nFreeVerts, nConstraints), dtype=float)
mHX10 = np.zeros((nConstraints, nFreeVerts), dtype=float)
dim = np.shape(mHX01)
row, col = dim[0], dim[1]
mHX01 = _extractSubMatrix(mHX, 0, nFreeVerts, row, col)
dim = np.shape(mHX10)
row, col = dim[0], dim[1]
mHX10 = _extractSubMatrix(mHX, nFreeVerts, 0, row, col)
# extract HY01 and HY10 matrices
mHY01 = np.zeros((nFreeVerts, nConstraints), dtype=float)
mHY10 = | np.zeros((nConstraints, nFreeVerts), dtype=float) | numpy.zeros |
# -*- coding: iso-8859-1 -*-
"""
This code plots the scattering limit test of our code.
"""
########################
###Import useful libraries
########################
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import pdb
import pickle
deg2rad=np.pi/180.
def cm2inch(cm): #function to convert cm to inches; useful for complying with Astrobiology size guidelines
return cm/2.54
########################
###A=0
########################
###z=0
fnetdict=pickle.load(open('./TwoStreamOutput/rugheimer_earth_epoch0_w0=1-1e-12_a=0_z=0.p','rb'))
F_net=fnetdict['F_net'] #net flux in each layer, 0th layer is TOA, erg/s/cm2/nm
wav_leftedges=fnetdict['wav_leftedges'] #nm
wav_rightedges=fnetdict['wav_rightedges'] #nm
wav_centers=fnetdict['wav_centers'] #nm
z_lower=fnetdict['z_lower'] #cm, 0th layer is TOA
z_upper=fnetdict['z_upper'] #cm
z_center=fnetdict['z_center'] #cm
flux_toa=fnetdict['flux_toa'] #TOA "flux" (really intensity) in erg/s/cm2/nm (cgs)
solarzenithangle=fnetdict['solarzenithangle'] #radians
N_wavelengths=np.size(wav_centers) ###NOTE: We assume wavelength structure is the same for all of these (!!!)
direct_flux_toa=np.cos(solarzenithangle)*flux_toa #true TOA flux
F_net_deviation=np.zeros(np.shape(F_net))
F_net_deviation_max=np.zeros(N_wavelengths)
F_net_deviation_stddevs=np.zeros(N_wavelengths)
F_net_deviation_median=np.zeros(N_wavelengths)
for ind in range(0, N_wavelengths):
median_val=np.median(F_net[:,ind])
F_net_deviation_median[ind]=median_val
F_net_deviation[:,ind]=F_net[:,ind]-median_val
F_net_deviation_max[ind]=np.max(np.abs(F_net_deviation[:,ind]))
F_net_deviation_stddevs[ind]=np.std(F_net[:,ind])
F_net_deviation_max_normalized_0_0=F_net_deviation_max/(direct_flux_toa)
F_net_deviation_stddevs_normalized_0_0=F_net_deviation_stddevs/(direct_flux_toa)
###z=60
fnetdict=pickle.load(open('./TwoStreamOutput/rugheimer_earth_epoch0_w0=1-1e-12_a=0_z=60.p','rb'))
F_net=fnetdict['F_net'] #net flux in each layer, 0th layer is TOA, erg/s/cm2/nm
wav_leftedges=fnetdict['wav_leftedges'] #nm
wav_rightedges=fnetdict['wav_rightedges'] #nm
wav_centers=fnetdict['wav_centers'] #nm
z_lower=fnetdict['z_lower'] #cm, 0th layer is TOA
z_upper=fnetdict['z_upper'] #cm
z_center=fnetdict['z_center'] #cm
flux_toa=fnetdict['flux_toa'] #TOA "flux" (really intensity) in erg/s/cm2/nm (cgs)
solarzenithangle=fnetdict['solarzenithangle'] #radians
direct_flux_toa=np.cos(solarzenithangle)*flux_toa #true TOA flux
F_net_deviation=np.zeros(np.shape(F_net))
F_net_deviation_max=np.zeros(N_wavelengths)
F_net_deviation_stddevs=np.zeros(N_wavelengths)
F_net_deviation_median=np.zeros(N_wavelengths)
for ind in range(0, N_wavelengths):
median_val=np.median(F_net[:,ind])
F_net_deviation_median[ind]=median_val
F_net_deviation[:,ind]=F_net[:,ind]-median_val
F_net_deviation_max[ind]=np.max(np.abs(F_net_deviation[:,ind]))
F_net_deviation_stddevs[ind]=np.std(F_net[:,ind])
F_net_deviation_max_normalized_0_60=F_net_deviation_max/(direct_flux_toa)
F_net_deviation_stddevs_normalized_0_60=F_net_deviation_stddevs/(direct_flux_toa)
###z=85
fnetdict=pickle.load(open('./TwoStreamOutput/rugheimer_earth_epoch0_w0=1-1e-12_a=0_z=85.p','rb'))
F_net=fnetdict['F_net'] #net flux in each layer, 0th layer is TOA, erg/s/cm2/nm
wav_leftedges=fnetdict['wav_leftedges'] #nm
wav_rightedges=fnetdict['wav_rightedges'] #nm
wav_centers=fnetdict['wav_centers'] #nm
z_lower=fnetdict['z_lower'] #cm, 0th layer is TOA
z_upper=fnetdict['z_upper'] #cm
z_center=fnetdict['z_center'] #cm
flux_toa=fnetdict['flux_toa'] #TOA "flux" (really intensity) in erg/s/cm2/nm (cgs)
solarzenithangle=fnetdict['solarzenithangle'] #radians
direct_flux_toa=np.cos(solarzenithangle)*flux_toa #true TOA flux
F_net_deviation=np.zeros(np.shape(F_net))
F_net_deviation_max=np.zeros(N_wavelengths)
F_net_deviation_stddevs=np.zeros(N_wavelengths)
F_net_deviation_median=np.zeros(N_wavelengths)
for ind in range(0, N_wavelengths):
median_val=np.median(F_net[:,ind])
F_net_deviation_median[ind]=median_val
F_net_deviation[:,ind]=F_net[:,ind]-median_val
F_net_deviation_max[ind]=np.max(np.abs(F_net_deviation[:,ind]))
F_net_deviation_stddevs[ind]=np.std(F_net[:,ind])
F_net_deviation_max_normalized_0_85=F_net_deviation_max/(direct_flux_toa)
F_net_deviation_stddevs_normalized_0_85=F_net_deviation_stddevs/(direct_flux_toa)
########################
###A=0.20
########################
###z=0
fnetdict=pickle.load(open('./TwoStreamOutput/rugheimer_earth_epoch0_w0=1-1e-12_a=0.2_z=0.p','rb'))
F_net=fnetdict['F_net'] #net flux in each layer, 0th layer is TOA, erg/s/cm2/nm
wav_leftedges=fnetdict['wav_leftedges'] #nm
wav_rightedges=fnetdict['wav_rightedges'] #nm
wav_centers=fnetdict['wav_centers'] #nm
z_lower=fnetdict['z_lower'] #cm, 0th layer is TOA
z_upper=fnetdict['z_upper'] #cm
z_center=fnetdict['z_center'] #cm
flux_toa=fnetdict['flux_toa'] #TOA "flux" (really intensity) in erg/s/cm2/nm (cgs)
solarzenithangle=fnetdict['solarzenithangle'] #radians
direct_flux_toa=np.cos(solarzenithangle)*flux_toa #true TOA flux
F_net_deviation=np.zeros(np.shape(F_net))
F_net_deviation_max=np.zeros(N_wavelengths)
F_net_deviation_stddevs=np.zeros(N_wavelengths)
F_net_deviation_median=np.zeros(N_wavelengths)
for ind in range(0, N_wavelengths):
median_val=np.median(F_net[:,ind])
F_net_deviation_median[ind]=median_val
F_net_deviation[:,ind]=F_net[:,ind]-median_val
F_net_deviation_max[ind]=np.max(np.abs(F_net_deviation[:,ind]))
F_net_deviation_stddevs[ind]=np.std(F_net[:,ind])
F_net_deviation_max_normalized_p245_0=F_net_deviation_max/(direct_flux_toa)
F_net_deviation_stddevs_normalized_p245_0=F_net_deviation_stddevs/(direct_flux_toa)
###z=60
fnetdict=pickle.load(open('./TwoStreamOutput/rugheimer_earth_epoch0_w0=1-1e-12_a=0.2_z=60.p','rb'))
F_net=fnetdict['F_net'] #net flux in each layer, 0th layer is TOA, erg/s/cm2/nm
wav_leftedges=fnetdict['wav_leftedges'] #nm
wav_rightedges=fnetdict['wav_rightedges'] #nm
wav_centers=fnetdict['wav_centers'] #nm
z_lower=fnetdict['z_lower'] #cm, 0th layer is TOA
z_upper=fnetdict['z_upper'] #cm
z_center=fnetdict['z_center'] #cm
flux_toa=fnetdict['flux_toa'] #TOA "flux" (really intensity) in erg/s/cm2/nm (cgs)
solarzenithangle=fnetdict['solarzenithangle'] #radians
direct_flux_toa=np.cos(solarzenithangle)*flux_toa #true TOA flux
F_net_deviation=np.zeros(np.shape(F_net))
F_net_deviation_max=np.zeros(N_wavelengths)
F_net_deviation_stddevs=np.zeros(N_wavelengths)
F_net_deviation_median=np.zeros(N_wavelengths)
for ind in range(0, N_wavelengths):
median_val=np.median(F_net[:,ind])
F_net_deviation_median[ind]=median_val
F_net_deviation[:,ind]=F_net[:,ind]-median_val
F_net_deviation_max[ind]=np.max(np.abs(F_net_deviation[:,ind]))
F_net_deviation_stddevs[ind]=np.std(F_net[:,ind])
F_net_deviation_max_normalized_p245_60=F_net_deviation_max/(direct_flux_toa)
F_net_deviation_stddevs_normalized_p245_60=F_net_deviation_stddevs/(direct_flux_toa)
###z=85
fnetdict=pickle.load(open('./TwoStreamOutput/rugheimer_earth_epoch0_w0=1-1e-12_a=0.2_z=85.p','rb'))
F_net=fnetdict['F_net'] #net flux in each layer, 0th layer is TOA, erg/s/cm2/nm
wav_leftedges=fnetdict['wav_leftedges'] #nm
wav_rightedges=fnetdict['wav_rightedges'] #nm
wav_centers=fnetdict['wav_centers'] #nm
z_lower=fnetdict['z_lower'] #cm, 0th layer is TOA
z_upper=fnetdict['z_upper'] #cm
z_center=fnetdict['z_center'] #cm
flux_toa=fnetdict['flux_toa'] #TOA "flux" (really intensity) in erg/s/cm2/nm (cgs)
solarzenithangle=fnetdict['solarzenithangle'] #radians
direct_flux_toa=np.cos(solarzenithangle)*flux_toa #true TOA flux
F_net_deviation=np.zeros(np.shape(F_net))
F_net_deviation_max=np.zeros(N_wavelengths)
F_net_deviation_stddevs=np.zeros(N_wavelengths)
F_net_deviation_median= | np.zeros(N_wavelengths) | numpy.zeros |
# -*- coding: utf-8 -*-
import os #OS関連処理用モジュールの読込
import sys #システム関連処理用モジュールの読込
import time #時間関連処理用モジュールの読込
import numpy as np #行列処理用モジュールの読込
import math as mt #各種計算用モジュールの読込
import cv2 #画像処理用モジュールの読込
import glob #ファイルパス一括取得用モジュールの読込
from PySide2 import QtCore, QtGui, QtWidgets #GUI関連処理用モジュールの読込
from MIIL_DATASET_CREATER_A_GUI import Ui_MainWindow #QT Designerで作成し変換したファイルの読込
from getRectanglePos import getRectanglePos #2点の何れかが選択領域の開始点(左上)になり、終点(左下)になるか判定し、さらに終点が指定した範囲にあるかるか確認するライブラリ
from getRotatedRectanglePos import getRotatedRectanglePos #座標回転後の四角内包座取得用モジュールの読込
from getRotatedPos import getRotatedPos #回転座標取得用モジュールの読込
import shutil #ドライブ操作用モジュールの読込
import random
#####グローバル変数########################################
cap = 0
#cap = cv2.VideoCapture(0) #キャプチャーオブジェクトを作成
#cap.set(3, 320) #3 = CV_CAP_PROP_FRAME_WIDTH
#cap.set(4, 240) #4 = CV_CAP_PROP_FRAME_HEIGHT
capLoop = 0 #動画を表示中か判定するフラグ
#camWidth = 320 #動画の横サイズ
#camHeight = 240 #動画の縦サイズ
sStartFlag = 0 #領域選択開始フラグ
sFlag = 0 #領域選択成功フラグ
mX1 = 0 #マウスボタンを押した時の横方向の座標
mY1 = 0 #マウスボタンを押した時の縦方向の座標
mX2 = 0 #マウスボタンを離した時の横方向の座標
mY2 = 0 #マウスボタンを離した時の縦方向の座標
ssX = 0 #選択領域開始点(左上)の横方向座標
ssY = 0 #選択領域開始点(左上)の縦方向座標
seX = 0 #選択領域終点(右下)の横方向座標(デフォルトではフレームワークで未使用)
seY = 0 #選択領域終点(右下)の縦方向座標(デフォルトではフレームワークで未使用)
sXL = 0 #選択領域の横方向座標の長さ(開始点+長さで終点を求める場合は1を引く)
sYL = 0 #選択領域の縦方向座標の長さ(開始点+長さで終点を求める場合は1を引く)
######フレームワーク以外のグローバル変数変数########################################
FileNum = 0 #読込んだファイル数を記憶
DirPath = "" #写真が保存してあるフォルダパスを記憶
SettingDataDir = "" #領域生データ保存用フォルダ
AnnotationDir = "" #アノテーションデータ保存用フォルダ
#DarknetAnnotationDir = "" #Darknetアノテーションデータ保存用フォルダ
SettingList = [] #設定データ保存用リスト
CurPic = "" #読込んだ画像データ
CurPicWidth = 0
CurPicHeight = 0
CapWidth = 320 #キャプチャー用Width
CapHeight = 240 #キャプチャー用Height
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
aviOut = cv2.VideoWriter()
cFlag = 0 #1フレーム読込み用
rnFlag = 0 #1を設定した場合、listWidget2のイベント発生時に処理をしないようにする。
trimMode = 0 #トリムモード用フラグ
tw = 0 #トリムモード用Width
th = 0 #トリムモード用Height
label_color ={} #各ラベルのカラーコード保存用ディクショナリ
color_code = 255 #カラーコード保存用変数
label_pos = {} #各ラベルのカラーパターン保存用ディクショナリ
color_pos = 0 #カラーパターン保存用変数
curLabel = 0 #現在のラベル番号
#####各種処理用関数########################################
#=====メインループ処理========================================
#スタートボタンで開始
def mainLoop():
global capLoop
global sStartFlag
global sFlag
global ssX
global ssY
global sXL
global sYL
global FileNum
global DirPath
global cFlag
global aviOut
global CurPic
global CurPicWidth
global CurPicHeight
global label_color
global label_pos
global color_pos
global color_code
global curLabel
curLabelColor1 = 2550 #現在のラベル表示色
curLabelColor2 = 1550 #現在のラベル表示色
while(True):
if capLoop == 1:
##########
#カメラから写真を取得するモード
##########
if win.ui.radioButton1.isChecked() == True: ##########CAMERA INPUT MODE##########
ret, frame = cap.read()
if ret == True:
#frame = gray(frame)
########## frameB = frameとした場合、frameBに対する処理がframeに反映されてしまう
########## frameと同じサイズの空画像frameBを作成し、そこにframeコピーする事で上記の問題は改善出来る
frameB = np.copy(frame) #画像を画像にコピー
##########
##########
#!!!!!!!!!!openCVの処理は此処で行う!!!!!!!!!!
if trimMode == 1: #トリムモードで領域が選択されている場合
trimY = int((CapHeight - int(th)) / 2)
trimX = int((CapWidth - int(tw)) / 2)
frameB = frameB[trimY:trimY + int(th), trimX:trimX + int(tw)] #指定したサイズに画像をトリム
cv2.imshow("MIIL MDC CAMERA MODE",frameB)
cvKey = cv2.waitKey(1)
if cvKey == 32: ##########SPACE KEY##########
cv2.imwrite(DirPath + '/' + str(FileNum) + '.jpg', frameB)
time.sleep(0.5)
f = open(DirPath + '/' + str(FileNum) + '.txt', "w")
f.write('')
f.close()
win.ui.listWidget2.addItem(str(FileNum))
Lpos = win.ui.listWidget2.count() - 1
win.ui.listWidget2.setCurrentRow(Lpos)
app.processEvents() #ボタン処理のタイミング確認用
if capLoop == 1:
font_size = 2
font = cv2.FONT_HERSHEY_PLAIN
cv2.putText(frameB, str(FileNum) + '.jpg SAVED.' , (5, 25), font, font_size,(0, 0, 255), 1)
cv2.imshow("MIIL MDC CAMERA MODE",frameB)
app.processEvents()
time.sleep(1)
app.processEvents() #ボタン処理のタイミング確認用
if capLoop == 1:
cv2.imshow("MIIL MDC CAMERA MODE",frameB)
FileNum += 1
else:
msgbox = QtWidgets.QMessageBox()
msgbox.setWindowTitle("MDC")
msgbox.setText("Failed to process video.\nPlease change camera ID or camera resolution to capture.") #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
break
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
##########
#認識領域描画モード
##########
elif win.ui.radioButton2.isChecked() == True: ##########LABEL DRAWING ON PICTURE MODE##########
frame = np.copy(CurPic)
if capLoop == 1:
#frame = gray(frame)
########## frameB = frameとした場合、frameBに対する処理がframeに反映されてしまう
########## frameと同じサイズの空画像frameBを作成し、そこにframeコピーする事で上記の問題は改善出来る
frameB = np.copy(frame) #画像を画像にコピー
##########
##########
#!!!!!!!!!!openCVの処理は此処で行う!!!!!!!!!!
curLabelProcess = 0
if len(SettingList) > 0:
for x in SettingList:
ROW, LABEL,TX, TY, BX, BY = x.split(',')
if(LABEL in label_color) == False: #ラベル名がラベル色保存用ディクショナリにないか確認
label_color[LABEL] = color_code #ラベルに対する色を保存
label_pos[LABEL] = color_pos #ラベルに対するカラーパターンを保存
color_pos += 1
if color_pos == 6: #6パターン毎に色の明度を下げる
color_pos = 0
color_code -= 10
TX = int(TX)
TY = int(TY)
BX = int(BX)
BY = int(BY)
font_size = 1 #フォントサイズを指定
#pix_size = 10
font = cv2.FONT_HERSHEY_PLAIN #フォントを指定
if curLabelProcess == curLabel:
curCol1 = int(curLabelColor1 / 10)
curCol2 = int(curLabelColor2 / 10)
cv2.putText(frameB, LABEL,(TX - 6, TY - 1), font, font_size, (curCol2, curCol2, curCol2), 1) #ラベル名の影を描画
cv2.putText(frameB, LABEL,(TX - 7, TY - 2), font, font_size, (curCol1, curCol1, curCol1), 1) #ラベル名を描画
cv2.rectangle(frameB, (TX + 1, TY + 1), (BX, BY), (curCol2, curCol2, curCol2), 1) #検出領域に枠の影を描画
cv2.rectangle(frameB, (TX, TY), (BX - 1, BY - 1), (curCol1, curCol1, curCol1), 1) #検出領域に枠を描画
else:
if label_pos[LABEL] == 0: #パターン0の色設定
cv2.putText(frameB, LABEL,(TX - 6, TY - 1), font, font_size, (0, 0, 0), 1) #ラベル名の影を描画
cv2.putText(frameB, LABEL,(TX - 7, TY - 2), font, font_size, (label_color[LABEL], 0, 0), 1) #ラベル名を描画
cv2.rectangle(frameB, (TX + 1, TY + 1), (BX, BY), (0, 0, 0), 1) #検出領域に枠の影を描画
cv2.rectangle(frameB, (TX, TY), (BX - 1, BY - 1), (label_color[LABEL], 0, 0), 1) #検出領域に枠を描画
elif label_pos[LABEL] == 1: #パターン1の色設定
cv2.putText(frameB, LABEL,(TX - 6, TY - 1), font, font_size, (0, 0, 0), 1) #ラベル名の影を描画
cv2.putText(frameB, LABEL,(TX - 7, TY - 2), font, font_size, (0, label_color[LABEL], 0), 1) #ラベル名を描画
cv2.rectangle(frameB, (TX + 1, TY + 1), (BX, BY), (0, 0, 0), 1) #検出領域に枠の影を描画
cv2.rectangle(frameB, (TX, TY), (BX - 1, BY - 1), (0, label_color[LABEL], 0), 1) #検出領域に枠を描画
elif label_pos[LABEL] == 2: #パターン2の色設定
cv2.putText(frameB, LABEL,(TX - 6, TY - 1), font, font_size, (0, 0, 0), 1) #ラベル名の影を描画
cv2.putText(frameB, LABEL,(TX - 7, TY - 2), font, font_size, (0, 0, label_color[LABEL]), 1) #ラベル名を描画
cv2.rectangle(frameB, (TX + 1, TY + 1), (BX, BY), (0, 0, 0), 1) #検出領域に枠の影を描画
cv2.rectangle(frameB, (TX, TY), (BX - 1, BY - 1), (0, 0, label_color[LABEL]), 1) #検出領域に枠を描画
elif label_pos[LABEL] == 3: #パターン3の色設定
cv2.putText(frameB, LABEL,(TX - 6, TY - 1), font, font_size, (0, 0, 0), 1) #ラベル名の影を描画
cv2.putText(frameB, LABEL,(TX - 7, TY - 2), font, font_size, (label_color[LABEL], label_color[LABEL], 0), 1) #ラベル名を描画
cv2.rectangle(frameB, (TX + 1, TY + 1), (BX, BY), (0, 0, 0), 1) #検出領域に枠の影を描画
cv2.rectangle(frameB, (TX, TY), (BX - 1, BY - 1), (label_color[LABEL], label_color[LABEL], 0), 1) #検出領域に枠を描画
elif label_pos[LABEL] == 4: #パターン4の色設定
cv2.putText(frameB, LABEL,(TX - 6, TY - 1), font, font_size, (0, 0, 0), 1) #ラベル名の影を描画
cv2.putText(frameB, LABEL,(TX - 7, TY - 2), font, font_size, (label_color[LABEL], 0, label_color[LABEL]), 1) #ラベル名を描画
cv2.rectangle(frameB, (TX + 1, TY + 1), (BX, BY), (0, 0, 0), 1) #検出領域に枠の影を描画
cv2.rectangle(frameB, (TX, TY), (BX - 1, BY - 1), (label_color[LABEL], 0, label_color[LABEL]), 1) #検出領域に枠を描画
elif label_pos[LABEL] == 5: #パターン5の色設定
cv2.putText(frameB, LABEL,(TX - 6, TY - 1), font, font_size, (0, 0, 0), 1) #ラベル名の影を描画
cv2.putText(frameB, LABEL,(TX - 7, TY - 2), font, font_size, (0, label_color[LABEL], label_color[LABEL]), 1) #ラベル名を描画
cv2.rectangle(frameB, (TX + 1, TY + 1), (BX, BY), (0, 0, 0), 1) #検出領域に枠の影を描画
cv2.rectangle(frameB, (TX, TY), (BX - 1, BY - 1), (0, label_color[LABEL], label_color[LABEL]), 1) #検出領域に枠を描画
curLabelProcess += 1
curLabelColor1 -= 5
curLabelColor2 -= 5
if curLabelColor1 < 1550:
curLabelColor1 = 2550
if curLabelColor2 < 550:
curLabelColor2 = 1550
else:
cv2.rectangle(frameB, (0, 0), (CurPicWidth - 1, CurPicHeight - 1), (0, 0, 255), 1)
font_size = 1
font = cv2.FONT_HERSHEY_PLAIN
cv2.putText(frameB, 'EXCLUDE', (10, 20),font, font_size,(0,0,255),1)
if sStartFlag == 1: #領域選択開始後の処理
frameB = cv2.rectangle(frameB, (ssX, ssY), (sXL, sYL), (0, 0, 255), 1)
if trimMode == 1:
cv2.rectangle(frameB, (mX1, mY1), (mX1 + int(tw) - 1, mY1 + int(th) - 1), (255, 0, 0), 1)
cvKey = cv2.waitKey(1)
Lcnt1 = win.ui.listWidget1.count()
cur1 = win.ui.listWidget1.currentRow()
Lcnt2 = win.ui.listWidget2.count()
cur2 = win.ui.listWidget2.currentRow()
if cvKey == 113 and Lcnt1 > 0: ##########KEY Q##########
if cur1 - 1 >= 0:
cur1 = cur1 - 1
win.ui.listWidget1.setCurrentRow(cur1)
elif cvKey == 97 and Lcnt1 > 0: ##########KEY A##########
if cur1 + 1 <= Lcnt1 - 1:
cur1 = cur1 + 1
win.ui.listWidget1.setCurrentRow(cur1)
elif cvKey == 119 and Lcnt2 > 0: ##########KEY W##########
if cur2 - 1 >= 0:
cur2 = cur2 - 1
win.ui.listWidget2.setCurrentRow(cur2)
elif cvKey == 115 and Lcnt2 > 0: ##########KEY S##########
if cur2 + 1 <= Lcnt2 - 1:
cur2 = cur2 + 1
win.ui.listWidget2.setCurrentRow(cur2)
elif cvKey == 101 and Lcnt2 > 0: ##########KEY E##########
if curLabel - 1 >= 0:
curLabel = curLabel - 1
elif cvKey == 100 and Lcnt2 > 0: ##########KEY D##########
if curLabel + 1 <= len(SettingList) - 1:
curLabel = curLabel + 1
elif cvKey == 98 and Lcnt2 > 0 and len(SettingList) > 0: ##########KEY B##########
del SettingList[curLabel]
filepath = SettingDataDir + '/' + win.ui.listWidget2.currentItem().text() + '.set'
filepath2 = AnnotationDir + '/' + win.ui.listWidget2.currentItem().text() + '.xml'
filepath3 = DirPath + '/' + win.ui.listWidget2.currentItem().text() + '.txt'
if len(SettingList) > 0:
text = ""
for x in SettingList:
text = text + x + "\n"
f = open(filepath, "w")
f.writelines(text)
f.close()
fY = CurPic.shape[0]
fX = CurPic.shape[1]
text2 = '<annotation>' + '\n<filename>' + win.ui.listWidget2.currentItem().text() + '.jpg</filename>\n<size>\n<width>' + str(fX) + '</width>' + '\n' + '<height>' + str(fY) + '</height>\n</size>\n'
for x in SettingList:
ROW, LABEL,TX, TY, BX, BY = x.split(',')
text2 = text2 + '<object>\n<name>' + LABEL + '</name>\n<bndbox>' + '\n' + '<xmin>' + str(int(TX)) + '</xmin>' + '\n' + '<ymin>' + str(int(TY)) + '</ymin>' + '\n' + '<xmax>' + str(int(BX)) + '</xmax>' + '\n' + '<ymax>' + str(int(BY)) + '</ymax>\n</bndbox>\n</object>\n'
text2 = text2 + '</annotation>\n'
f = open(filepath2, "w")
f.writelines(text2)
f.close()
text3 = ""
for x in SettingList:
ROW, LABEL,TX, TY, BX, BY = x.split(',')
cw = 1 / CurPicWidth
ch = 1 / CurPicHeight
cnx = (int(TX) + int(BX)) / 2
cny = (int(TY) + int(BY)) / 2
cnw = int(BX) - int(TX)
cnh = int(BY) - int(TY)
cnx = cnx * cw
cny = cny * ch
cnw = cnw * cw
cnh = cnh * ch
text3 = text3 + ROW + ' ' + str(cnx) + ' ' + str(cny) + ' ' + str(cnw) + ' ' + str(cnh) + '\n'
f = open(filepath3, "w")
f.writelines(text3)
f.close()
else:
if os.path.isfile(filepath):
os.remove(filepath)
if os.path.isfile(filepath2):
os.remove(filepath2)
if os.path.isfile(filepath3) == False:
f = open(filepath3, "w")
f.write('')
f.close()
curLabel = 0
elif cvKey == 116 and Lcnt2 > 0 and trimMode == 1: ##########KEY T##########
if mX1 > 0 and mY1 and mX1 + int(tw) - 1 <= CurPicWidth and mY1 + int(th) - 1 <= CurPicHeight:
frameB = frame[mY1:mY1 + int(th), mX1:mX1 + int(tw)]
cv2.imwrite(DirPath + '/' + win.ui.listWidget2.currentItem().text() +'.jpg', frameB)
CurPic = frameB
CurPicHeight = CurPic.shape[0]
CurPicWidth = CurPic.shape[1]
sFile = SettingDataDir + '/' + win.ui.listWidget2.currentItem().text() +'.set'
aFile = AnnotationDir + '/' + win.ui.listWidget2.currentItem().text() +'.xml'
dFile = DirPath + '/' + win.ui.listWidget2.currentItem().text() +'.txt'
if os.path.isfile(sFile):
os.remove(sFile)
SettingList.clear()
if os.path.isfile(aFile):
os.remove(aFile)
if os.path.isfile(dFile):
os.remove(dFile)
if os.path.isfile(dFile) == False:
f = open(dFile, "w")
f.write('')
f.close()
elif cvKey == 27 and Lcnt2 > 0: ##########KEY ESC##########
sFile = SettingDataDir + '/' + win.ui.listWidget2.currentItem().text() +'.set'
aFile = AnnotationDir + '/' + win.ui.listWidget2.currentItem().text() +'.xml'
dFile = DirPath + '/' + win.ui.listWidget2.currentItem().text() +'.txt'
if os.path.isfile(sFile):
os.remove(sFile)
SettingList.clear()
if os.path.isfile(aFile):
os.remove(aFile)
if os.path.isfile(dFile):
os.remove(dFile)
if os.path.isfile(dFile) == False:
f = open(dFile, "w")
f.write('')
f.close()
app.processEvents() #ボタン処理のタイミング確認用
if capLoop == 1:
currentListIndex = win.ui.listWidget2.currentRow()
if currentListIndex != -1:
cv2.imshow("MIIL MDC DRAW MODE",frameB)
cv2.setMouseCallback("MIIL MDC DRAW MODE", onInput)
else:
break
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
##########
#ビデオ録画モード
##########
elif win.ui.radioButton3.isChecked() == True: ##########VIDEO RECORDING MODE##########
ret, frame = cap.read()
if ret == True:
frameB = np.copy(frame) #画像を画像にコピー
if capLoop == 1:
cv2.imshow("MIIL MDC VIDEO MODE",frameB)
aviOut.write(frameB)
app.processEvents() #ボタン処理のタイミング確認用
else:
msgbox = QtWidgets.QMessageBox()
msgbox.setWindowTitle("MDC")
msgbox.setText("Failed to process video.\nPlease change camera ID or camera resolution to capture.") #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
break
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
##########
#ビデオファイルから写真を取得するモード
##########
elif win.ui.radioButton4.isChecked() == True: ##########GET PICTRE FROM VIDEO FILE##########
if cFlag == 1:
ret, frame = cap.read()
cFlag = 0
if ret == True:
frameB = np.copy(frame) #画像を画像にコピー
else:
msgbox = QtWidgets.QMessageBox()
msgbox.setWindowTitle("MDC")
msgbox.setText("Failed to process video.\nIf reading from the video is not done, please change camera ID or camera resolution to capture.") #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
break
cvKey = cv2.waitKey(1)
if cvKey == 32: ##########KEY SPACE##########
cv2.imwrite(DirPath + '/' + str(FileNum) + '.jpg', frame)
f = open(DirPath + '/' + str(FileNum) + '.txt', "w")
f.write('')
f.close()
win.ui.listWidget2.addItem(str(FileNum))
Lpos = win.ui.listWidget2.count() -1
win.ui.listWidget2.setCurrentRow(Lpos)
app.processEvents() #ボタン処理のタイミング確認用
if capLoop == 1:
font_size = 2
font = cv2.FONT_HERSHEY_PLAIN
cv2.putText(frameB, str(FileNum) + '.jpg SAVED.' , (5, 25), font, font_size,(0, 0, 255), 1)
cv2.imshow("MIIL MDC FILE MODE",frameB)
app.processEvents()
frameB = np.copy(frame) #画像を画像にコピー
app.processEvents() #ボタン処理のタイミング確認用
if capLoop == 1:
cv2.imshow("MIIL MDC FILE MODE",frameB)
FileNum += 1
elif cvKey == 116 and trimMode == 1: ##########KEY T##########
fY = frame.shape[0]
fX = frame.shape[1]
if mX1 > 0 and mY1 and mX1 + int(tw) - 1 <= fX and mY1 + int(th) - 1 <= fY:
frameB = frame[mY1:mY1 + int(th), mX1:mX1 + int(tw)]
cv2.imwrite(DirPath + '/' + str(FileNum) +'.jpg', frameB)
f = open(DirPath + '/' + str(FileNum) + '.txt', "w")
f.write('')
f.close()
app.processEvents()
if capLoop == 1:
cv2.imshow("MIIL MDC FILE MODE",frameB)
app.processEvents()
time.sleep(1)
win.ui.listWidget2.addItem(str(FileNum))
Lpos = win.ui.listWidget2.count() -1
win.ui.listWidget2.setCurrentRow(Lpos)
FileNum += 1
frameB = np.copy(frame) #画像を画像にコピー
elif cvKey == 122: ##########KEY Z##########
cFlag = 1
app.processEvents() #ボタン処理のタイミング確認用
if capLoop == 1:
if trimMode == 1:
cv2.rectangle(frameB, (mX1, mY1), (mX1 + int(tw) - 1, mY1 + int(th) - 1), (255, 0, 0), 1)
cv2.imshow("MIIL MDC FILE MODE", frameB)
cv2.setMouseCallback("MIIL MDC FILE MODE", onInput)
else:
break
app.processEvents()
#####Pysideのウィンドウ処理クラス########################################
class MainWindow1(QtWidgets.QMainWindow): #QtWidgets.QMainWindowを継承
#=====GUI用クラス継承の定型文========================================
def __init__(self, parent = None): #クラス初期化時にのみ実行される関数(コンストラクタと呼ばれる)
super(MainWindow1, self).__init__(parent) #親クラスのコンストラクタを呼び出す(親クラスのコンストラクタを再利用したい場合) 指定する引数は、親クラスのコンストラクタの引数からselfを除いた引数
self.ui = Ui_MainWindow() #uiクラスの作成。Ui_MainWindowのMainWindowは、QT DesignerのobjectNameで設定した名前
self.ui.setupUi(self) #uiクラスの設定
self.ui.comboBox1.addItems(["320x240", "640x480", "800x600", "1024x768", "1280x960", "1400x1050", "2592x1944", "320x180", "640x360", "1280x720", "1600x900", "1920x1080"]) #####コンボボックスにアイテムを追加
self.ui.comboBox1.setCurrentIndex(0) #####コンボボックスのアイテムを選択
self.ui.comboBox2.addItems(["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10"]) #コンボボックスにアイテムを追加
self.ui.comboBox2.setCurrentIndex(0) #コンボボックスのアイテムを選択
#self.ui.comboBox2.addItems(["0.1", "0.2", "0.3", "0.4", "0.5", "0.6", "0.7", "0.8", "0.9", "1.0"]) #####コンボボックスにアイテムを追加
#self.ui.comboBox2.setCurrentIndex(6) #####コンボボックスのアイテムを選択
#-----シグナルにメッソドを関連付け----------------------------------------
self.ui.listWidget2.currentRowChanged.connect(self.listWidget2_changed) #listWidget2_changedは任意(新方式)
self.ui.comboBox1.currentIndexChanged.connect(self.comboBox1_changed) #comboBox1_changedは任意
self.ui.checkBox1.clicked.connect(self.checkBox1_clicked) #checkBox1_clickedは任意
QtCore.QObject.connect(self.ui.pushButton1, QtCore.SIGNAL("clicked()"), self.pushButton1_clicked) #pushButton1_clickedは任意
QtCore.QObject.connect(self.ui.pushButton2, QtCore.SIGNAL("clicked()"), self.pushButton2_clicked) #pushButton2_clickedは任意
QtCore.QObject.connect(self.ui.pushButton3, QtCore.SIGNAL("clicked()"), self.pushButton3_clicked) #pushButton1_clickedは任意
QtCore.QObject.connect(self.ui.pushButton4, QtCore.SIGNAL("clicked()"), self.pushButton4_clicked) #pushButton2_clickedは任意
QtCore.QObject.connect(self.ui.pushButton5, QtCore.SIGNAL("clicked()"), self.pushButton5_clicked) #pushButton1_clickedは任意
QtCore.QObject.connect(self.ui.pushButton6, QtCore.SIGNAL("clicked()"), self.pushButton6_clicked) #pushButton2_clickedは任意
QtCore.QObject.connect(self.ui.pushButton7, QtCore.SIGNAL("clicked()"), self.pushButton7_clicked) #pushButton7_clickedは任意
QtCore.QObject.connect(self.ui.pushButton8, QtCore.SIGNAL("clicked()"), self.pushButton8_clicked) #pushButton8_clickedは任意
QtCore.QObject.connect(self.ui.pushButton9, QtCore.SIGNAL("clicked()"), self.pushButton9_clicked) #pushButton9_clickedは任意
QtCore.QObject.connect(self.ui.pushButton10, QtCore.SIGNAL("clicked()"), self.pushButton10_clicked) #pushButton10_clickedは任意
QtCore.QObject.connect(self.ui.pushButton11, QtCore.SIGNAL("clicked()"), self.pushButton11_clicked) #pushButton11_clickedは任意
QtCore.QObject.connect(self.ui.pushButton12, QtCore.SIGNAL("clicked()"), self.pushButton12_clicked) #pushButton12_clickedは任意
QtCore.QObject.connect(self.ui.pushButton13, QtCore.SIGNAL("clicked()"), self.pushButton13_clicked) #pushButton13_clickedは任意
QtCore.QObject.connect(self.ui.pushButton14, QtCore.SIGNAL("clicked()"), self.pushButton14_clicked) #pushButton14_clickedは任意
QtCore.QObject.connect(self.ui.pushButton15, QtCore.SIGNAL("clicked()"), self.pushButton15_clicked) #pushButton15_clickedは任意
QtCore.QObject.connect(self.ui.pushButton16, QtCore.SIGNAL("clicked()"), self.pushButton16_clicked) #pushButton16_clickedは任意
QtCore.QObject.connect(self.ui.pushButton17, QtCore.SIGNAL("clicked()"), self.pushButton17_clicked) #pushButton17_clickedは任意
QtCore.QObject.connect(self.ui.pushButton18, QtCore.SIGNAL("clicked()"), self.pushButton18_clicked) #pushButton18_clickedは任意
QtCore.QObject.connect(self.ui.pushButton19, QtCore.SIGNAL("clicked()"), self.pushButton19_clicked) #pushButton19_clickedは任意
QtCore.QObject.connect(self.ui.pushButton20, QtCore.SIGNAL("clicked()"), self.pushButton20_clicked) #pushButton20_clickedは任意
QtCore.QObject.connect(self.ui.pushButton21, QtCore.SIGNAL("clicked()"), self.pushButton21_clicked) #pushButton21_clickedは任意
QtCore.QObject.connect(self.ui.pushButton22, QtCore.SIGNAL("clicked()"), self.pushButton22_clicked) #pushButton22_clickedは任意
QtCore.QObject.connect(self.ui.pushButton23, QtCore.SIGNAL("clicked()"), self.pushButton23_clicked) #pushButton23_clickedは任意
QtCore.QObject.connect(self.ui.pushButton24, QtCore.SIGNAL("clicked()"), self.pushButton24_clicked) #pushButton24_clickedは任意
QtCore.QObject.connect(self.ui.pushButton25, QtCore.SIGNAL("clicked()"), self.pushButton25_clicked) #pushButton25_clickedは任意
QtCore.QObject.connect(self.ui.pushButton26, QtCore.SIGNAL("clicked()"), self.pushButton26_clicked) #pushButton26_clickedは任意
QtCore.QObject.connect(self.ui.pushButton27, QtCore.SIGNAL("clicked()"), self.pushButton27_clicked) #pushButton27_clickedは任意
QtCore.QObject.connect(self.ui.pushButton28, QtCore.SIGNAL("clicked()"), self.pushButton28_clicked) #pushButton28_clickedは任意
QtCore.QObject.connect(self.ui.pushButton29, QtCore.SIGNAL("clicked()"), self.pushButton29_clicked) #pushButton29_clickedは任意
QtCore.QObject.connect(self.ui.pushButton30, QtCore.SIGNAL("clicked()"), self.pushButton30_clicked) #pushButton30_clickedは任意
QtCore.QObject.connect(self.ui.pushButton31, QtCore.SIGNAL("clicked()"), self.pushButton31_clicked) #pushButton31_clickedは任意
QtCore.QObject.connect(self.ui.pushButton32, QtCore.SIGNAL("clicked()"), self.pushButton32_clicked) #pushButton32_clickedは任意
QtCore.QObject.connect(self.ui.pushButton33, QtCore.SIGNAL("clicked()"), self.pushButton33_clicked) #pushButton33_clickedは任意
QtCore.QObject.connect(self.ui.pushButton34, QtCore.SIGNAL("clicked()"), self.pushButton34_clicked) #pushButton34_clickedは任意
QtCore.QObject.connect(self.ui.pushButton35, QtCore.SIGNAL("clicked()"), self.pushButton35_clicked) #pushButton35_clickedは任意
QtCore.QObject.connect(self.ui.pushButton36, QtCore.SIGNAL("clicked()"), self.pushButton36_clicked) #pushButton36_clickedは任意
QtCore.QObject.connect(self.ui.pushButton37, QtCore.SIGNAL("clicked()"), self.pushButton37_clicked) #pushButton37_clickedは任意
#=====ウィジットのシグナル処理用メッソド========================================
#-----checkBox1用イベント処理----------------------------------------
def checkBox1_clicked(self):
global tw
global th
global trimMode
if self.ui.checkBox1.isChecked() == True:
tw = self.ui.lineEdit4.text()
th = self.ui.lineEdit5.text()
if tw.isdigit() == False or th.isdigit() == False:
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText("Need digits.") #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
self.ui.checkBox1.setChecked(False) #チェックを外す
elif int(tw) < 100 or int(th) < 100:
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText("Value should be more than 100.") #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
self.ui.checkBox1.setChecked(False) #チェックを外す
#elif int(tw) > CapWidth or int(th) > CapHeight:
#msgbox = QtWidgets.QMessageBox(self)
#msgbox.setWindowTitle("MDC")
#msgbox.setText("value should be less than the source size.") #メッセージボックスのテキストを設定
#ret = msgbox.exec_() #メッセージボックスを表示
#self.ui.checkBox1.setChecked(False) #チェックを外す
else:
self.ui.lineEdit4.setEnabled(False)
self.ui.lineEdit5.setEnabled(False)
#self.ui.comboBox1.setEnabled(False)
trimMode = 1
else:
self.ui.lineEdit4.setEnabled(True)
self.ui.lineEdit5.setEnabled(True)
#self.ui.comboBox1.setEnabled(True)
trimMode = 0
#-----listWidget2用イベント処理----------------------------------------
def listWidget2_changed(self):
global SettingDataDir #領域生データ保存用フォルダ
global SettingList #設定データ保存用リスト
global CurPic
global CurPicWidth
global CurPicHeight
global sStartFlag
global sFlag
global curLabel
curLabel = 0
currentListIndex = self.ui.listWidget2.currentRow()
self.ui.lineEdit6.setText(str(currentListIndex))
if rnFlag == 0 and currentListIndex != -1: #listWidget2のイベント処理が可能な場合。
sStartFlag = 0
sFlag = 0
SettingList.clear()
picpath = DirPath + '/' + self.ui.listWidget2.currentItem().text() + '.jpg'
if os.path.isfile(picpath):
CurPic = cv2.imread(picpath)
CurPicHeight = CurPic.shape[0]
CurPicWidth = CurPic.shape[1]
filepath = SettingDataDir + '/' + self.ui.listWidget2.currentItem().text() + '.set'
if os.path.isfile(filepath):
#####ファイル名のみの取得
f = open(filepath, "r")
text = f.readlines() #改行コードも含む
f.close()
if len(text) > 0:
for setting in text:
SettingList.append(setting.replace("\n", ""))
else:
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText("Something is wrong with setting data.") #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
#####
else:
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText("Something is wrong with picture data.") #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
#-----comboBox1用イベント処理----------------------------------------
def comboBox1_changed(self):
#global cap
global CapWidth
global CapHeight
res = self.ui.comboBox1.currentText()
rx, ry = res.split('x')
#cap.set(3, int(rx)) #3 = CV_CAP_PROP_FRAME_WIDTH
#cap.set(4, int(ry)) #4 = CV_CAP_PROP_FRAME_HEIGHT
CapWidth = int(rx)
CapHeight = int(ry)
#-----pushButton1用イベント処理----------------------------------------
def pushButton1_clicked(self):
global capLoop
global aviOut
global cap
global CapWidth
global CapHeight
global cFlag
global curLabel
curLabel = 0
currentListIndex = self.ui.listWidget1.currentRow()
currentListIndex2 = self.ui.listWidget2.currentRow()
if self.ui.lineEdit1.text() == "" and self.ui.radioButton3.isChecked() != True:
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText("Please set a picture folder path.") #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
elif self.ui.radioButton2.isChecked() == True and currentListIndex2 == -1: #FileNum == 0:
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText("No file in the directory.") #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
elif self.ui.radioButton2.isChecked() == True and currentListIndex == -1:
msgbox = QtWidgets.QMessageBox() #####メッセージボックスを準備
msgbox.setWindowTitle("MDC")
msgbox.setText("No label selected.") #####メッセージボックスのテキストを設定
ret = msgbox.exec_() #####メッセージボックスを表示
elif self.ui.radioButton3.isChecked() == True and self.ui.lineEdit3.text() == "":
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText("Please set a video file name to save.") #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
elif self.ui.radioButton4.isChecked() == True and self.ui.lineEdit3.text() == "":
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText("Please open a movie file.") #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
elif self.ui.radioButton1.isChecked() == True and self.ui.checkBox1.isChecked() == True:
if int(tw) > CapWidth or int(th) > CapHeight:
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText("Triming size should be less than the source size.") #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
elif self.ui.radioButton4.isChecked() == True and self.ui.checkBox1.isChecked() == True:
if int(tw) > CapWidth or int(th) > CapHeight:
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText("Triming size should be less than the source size.") #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
else:
self.ui.pushButton1.setEnabled(False)
self.ui.pushButton2.setEnabled(True)
self.ui.pushButton3.setEnabled(False)
self.ui.pushButton4.setEnabled(False)
self.ui.pushButton5.setEnabled(False)
self.ui.pushButton6.setEnabled(False)
self.ui.pushButton7.setEnabled(False)
self.ui.pushButton8.setEnabled(False)
self.ui.pushButton9.setEnabled(False)
self.ui.pushButton11.setEnabled(False)
self.ui.pushButton12.setEnabled(False)
self.ui.pushButton17.setEnabled(False)
self.ui.pushButton20.setEnabled(False)
self.ui.pushButton31.setEnabled(False)
if win.ui.radioButton2.isChecked() == True:
self.ui.pushButton10.setEnabled(True)
self.ui.pushButton13.setEnabled(True)
self.ui.pushButton14.setEnabled(True)
self.ui.pushButton15.setEnabled(True)
self.ui.pushButton16.setEnabled(True)
self.ui.pushButton18.setEnabled(True)
self.ui.pushButton19.setEnabled(True)
self.ui.pushButton21.setEnabled(True)
self.ui.pushButton22.setEnabled(True)
self.ui.pushButton23.setEnabled(True)
self.ui.pushButton24.setEnabled(True)
self.ui.pushButton25.setEnabled(True)
self.ui.pushButton26.setEnabled(True)
self.ui.pushButton27.setEnabled(True)
self.ui.pushButton28.setEnabled(True)
self.ui.pushButton29.setEnabled(True)
self.ui.pushButton30.setEnabled(True)
self.ui.pushButton32.setEnabled(True)
self.ui.pushButton33.setEnabled(True)
self.ui.pushButton34.setEnabled(True)
self.ui.pushButton35.setEnabled(True)
self.ui.pushButton36.setEnabled(True)
self.ui.pushButton37.setEnabled(True)
self.ui.radioButton1.setEnabled(False)
self.ui.radioButton2.setEnabled(False)
self.ui.radioButton3.setEnabled(False)
self.ui.radioButton4.setEnabled(False)
self.ui.lineEdit2.setEnabled(False)
self.ui.lineEdit4.setEnabled(False)
self.ui.lineEdit5.setEnabled(False)
self.ui.checkBox1.setEnabled(False)
self.ui.comboBox1.setEnabled(False)
self.ui.comboBox2.setEnabled(False)
if self.ui.radioButton1.isChecked() == True:
cap = cv2.VideoCapture(int(self.ui.comboBox2.currentText()))
res = self.ui.comboBox1.currentText()
rx, ry = res.split('x')
#cap.set(6,cv2.VideoWriter_fourcc(*'MJPG'))
#cap.set(5,10)
cap.set(3, int(rx)) #3 = CV_CAP_PROP_FRAME_WIDTH
cap.set(4, int(ry)) #4 = CV_CAP_PROP_FRAME_HEIGHT
CapWidth = int(rx)
CapHeight = int(ry)
elif self.ui.radioButton3.isChecked() == True:
cap = cv2.VideoCapture(int(self.ui.comboBox2.currentText()))
res = self.ui.comboBox1.currentText()
rx, ry = res.split('x')
cap.set(3, int(rx)) #3 = CV_CAP_PROP_FRAME_WIDTH
cap.set(4, int(ry)) #4 = CV_CAP_PROP_FRAME_HEIGHT
CapWidth = int(rx)
CapHeight = int(ry)
rate, buttonState = QtWidgets.QInputDialog.getItem(self, "MDC", "Please choose frame rate.", ["1", "2", "3", "4", "5", "10", "20", "30", "60", "120", "144", "240"], 4, False)
if buttonState:
aviOut = cv2.VideoWriter(self.ui.lineEdit3.text(), fourcc, int(rate), (CapWidth, CapHeight))
else:
msgbox = QtWidgets.QMessageBox(self) #####メッセージボックスを準備
msgbox.setWindowTitle("MDC")
msgbox.setText("Set frame rate to 20.") #####メッセージボックスのテキストを設定
ret = msgbox.exec_() #####メッセージボックスを表示
aviOut = cv2.VideoWriter(self.ui.lineEdit3.text(), fourcc, 20, (CapWidth, CapHeight))
elif self.ui.radioButton4.isChecked() == True:
#cap = cv2.VideoCapture(0)
cap = cv2.VideoCapture(self.ui.lineEdit3.text())
cFlag = 1
if capLoop == 0:
capLoop = 1
mainLoop()
#-----pushButton2用イベント処理----------------------------------------
def pushButton2_clicked(self):
global capLoop
global sStartFlag
global sFlag
if capLoop == 1:
capLoop = 0
time.sleep(0.2)
sStartFlag = 0
sFlag = 0
self.ui.pushButton1.setEnabled(True)
self.ui.pushButton2.setEnabled(False)
self.ui.pushButton3.setEnabled(True)
self.ui.pushButton4.setEnabled(True)
self.ui.pushButton5.setEnabled(True)
self.ui.pushButton6.setEnabled(True)
self.ui.pushButton7.setEnabled(True)
self.ui.pushButton8.setEnabled(True)
self.ui.pushButton9.setEnabled(True)
self.ui.pushButton10.setEnabled(False)
self.ui.pushButton11.setEnabled(True)
self.ui.pushButton12.setEnabled(True)
self.ui.pushButton13.setEnabled(False)
self.ui.pushButton14.setEnabled(False)
self.ui.pushButton15.setEnabled(False)
self.ui.pushButton16.setEnabled(False)
self.ui.pushButton17.setEnabled(True)
self.ui.pushButton18.setEnabled(False)
self.ui.pushButton19.setEnabled(False)
self.ui.pushButton20.setEnabled(True)
self.ui.pushButton21.setEnabled(False)
self.ui.pushButton22.setEnabled(False)
self.ui.pushButton23.setEnabled(False)
self.ui.pushButton24.setEnabled(False)
self.ui.pushButton25.setEnabled(False)
self.ui.pushButton26.setEnabled(False)
self.ui.pushButton27.setEnabled(False)
self.ui.pushButton28.setEnabled(False)
self.ui.pushButton29.setEnabled(False)
self.ui.pushButton30.setEnabled(False)
self.ui.pushButton32.setEnabled(False)
self.ui.pushButton33.setEnabled(False)
self.ui.pushButton34.setEnabled(False)
self.ui.pushButton35.setEnabled(False)
self.ui.pushButton36.setEnabled(False)
self.ui.pushButton37.setEnabled(False)
self.ui.pushButton31.setEnabled(True)
self.ui.radioButton1.setEnabled(True)
self.ui.radioButton2.setEnabled(True)
self.ui.radioButton3.setEnabled(True)
self.ui.radioButton4.setEnabled(True)
self.ui.lineEdit2.setEnabled(True)
self.ui.lineEdit4.setEnabled(True)
self.ui.lineEdit5.setEnabled(True)
self.ui.checkBox1.setEnabled(True)
#if trimMode == 0:
self.ui.comboBox1.setEnabled(True)
self.ui.comboBox2.setEnabled(True)
if self.ui.radioButton1.isChecked() == True:
cap.release() #キャプチャー用オブジェクトを廃棄
elif self.ui.radioButton3.isChecked() == True:
cap.release() #キャプチャー用オブジェクトを廃棄
aviOut.release() #録画用オブジェクトを廃棄
elif self.ui.radioButton4.isChecked() == True:
cap.release() #キャプチャー用オブジェクトを廃棄
cv2.destroyAllWindows()
#-----処理開始----------------------------------------
def process_start(self):
self.ui.pushButton2.setEnabled(False)
self.ui.pushButton10.setEnabled(False)
self.ui.pushButton13.setEnabled(False)
self.ui.pushButton14.setEnabled(False)
self.ui.pushButton15.setEnabled(False)
self.ui.pushButton16.setEnabled(False)
self.ui.pushButton18.setEnabled(False)
self.ui.pushButton19.setEnabled(False)
self.ui.pushButton21.setEnabled(False)
self.ui.pushButton22.setEnabled(False)
self.ui.pushButton23.setEnabled(False)
self.ui.pushButton24.setEnabled(False)
self.ui.pushButton25.setEnabled(False)
self.ui.pushButton26.setEnabled(False)
self.ui.pushButton27.setEnabled(False)
self.ui.pushButton28.setEnabled(False)
self.ui.pushButton29.setEnabled(False)
self.ui.pushButton30.setEnabled(False)
self.ui.pushButton32.setEnabled(False)
self.ui.pushButton33.setEnabled(False)
self.ui.pushButton34.setEnabled(False)
self.ui.pushButton35.setEnabled(False)
self.ui.pushButton36.setEnabled(False)
self.ui.pushButton37.setEnabled(False)
self.ui.listWidget1.setEnabled(False)
self.ui.listWidget2.setEnabled(False)
self.ui.lineEdit7.setEnabled(False)
self.ui.lineEdit8.setEnabled(False)
#-----処理開始----------------------------------------
def process_end(self):
self.ui.pushButton2.setEnabled(True)
self.ui.pushButton10.setEnabled(True)
self.ui.pushButton13.setEnabled(True)
self.ui.pushButton14.setEnabled(True)
self.ui.pushButton15.setEnabled(True)
self.ui.pushButton16.setEnabled(True)
self.ui.pushButton18.setEnabled(True)
self.ui.pushButton19.setEnabled(True)
self.ui.pushButton21.setEnabled(True)
self.ui.pushButton22.setEnabled(True)
self.ui.pushButton23.setEnabled(True)
self.ui.pushButton24.setEnabled(True)
self.ui.pushButton25.setEnabled(True)
self.ui.pushButton26.setEnabled(True)
self.ui.pushButton27.setEnabled(True)
self.ui.pushButton28.setEnabled(True)
self.ui.pushButton29.setEnabled(True)
self.ui.pushButton30.setEnabled(True)
self.ui.pushButton32.setEnabled(True)
self.ui.pushButton33.setEnabled(True)
self.ui.pushButton34.setEnabled(True)
self.ui.pushButton35.setEnabled(True)
self.ui.pushButton36.setEnabled(True)
self.ui.pushButton37.setEnabled(True)
self.ui.listWidget1.setEnabled(True)
self.ui.listWidget2.setEnabled(True)
self.ui.lineEdit7.setEnabled(True)
self.ui.lineEdit8.setEnabled(True)
#-----pushButton3用イベント処理----------------------------------------
def pushButton3_clicked(self):
global FileNum #読込んだファイル数を記憶
global DirPath #写真が保存してあるフォルダパスを記憶
global SettingDataDir #領域生データ保存用フォルダ
global AnnotationDir #アノテーションデータ保存用フォルダ
global rnFlag
#####ディレクトリ選択
DirPath = QtWidgets.QFileDialog.getExistingDirectory(self) #写真が保存してあるフォルダを選択
if DirPath: #フォルダが選択された場合
rnFlag = 1 #listWidget2のイベント発生時に処理をしないようにする。
self.ui.listWidget2.clear()
self.ui.lineEdit1.setText(DirPath) #フォルダパスを表示
DN = DirPath.rsplit('/', 1) #フォルダパスの文字列右側から指定文字列で分割
SettingDataDir = DN[0] + '/' + DN[1] + '_setting'
AnnotationDir = DN[0] + '/' + DN[1] + '_annotation'
if os.path.isdir(SettingDataDir) == False:
os.mkdir(SettingDataDir)
msgbox = QtWidgets.QMessageBox(self)
msgbox.setText(DN[1] + '_setting directory created.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
if os.path.isdir(AnnotationDir) == False:
os.mkdir(AnnotationDir)
msgbox = QtWidgets.QMessageBox(self)
msgbox.setText(DN[1] + '_annotation directory created.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
NumList = [] #ファイルの連番名記憶用リスト
FileList = glob.glob(DirPath + '/*.jpg') #フォルダ内の各ファイルパスをリスト形式で取得
for FN in FileList:
FN1 = FN.rsplit(".", 1) #ファイルパスの文字列右側から指定文字列で分割
FN1[0] = FN1[0].replace('\\', '/') #globのバグを修正
FN2 = FN1[0].rsplit('/', 1) #ファイルパスの文字列右側から指定文字列で分割
if FN2[1].isdigit() == True: #ファイル名が数字か確認
if os.path.isfile(FN1[0] + '.txt') == False: #画像と同名のテキストファイルがあるか確認
f = open(FN1[0] + '.txt', "w")
f.write('')
f.close()
NumList.append(int(FN2[1])) #ファイルの連番名を取得
NumList.sort()
else:
print("\nNOTICE : A file whose name is not digit was found while reading jpg files.")
print("NOTICE : Please check " + FN2[1] + ".jpg\n")
if len(NumList) > 0:
FileNum = max(NumList) + 1#ファイルの連番名の最大値を取得
self.ui.listWidget2.clear()
for x in NumList:
self.ui.listWidget2.addItem(str(x))
rnFlag = 0
self.ui.listWidget2.setCurrentRow(0)
else:
rnFlag = 0
FileNum = 0
#-----pushButton4用イベント処理----------------------------------------
def pushButton4_clicked(self):
#####ファイル読込
filepath, _ = QtWidgets.QFileDialog.getOpenFileName(self, "Open File", "",'lab File (*.lab)')
if filepath:
#####ファイル名のみの取得
filename1 = filepath.rsplit(".", 1) #ファイルパスの文字列右側から指定文字列で分割
filename2 = filename1[0].rsplit("/", 1) #ファイルパスの文字列右側から指定文字列で分割
#os.chdir(filename2[0] + "/") #カレントディレクトリをファイルパスへ変更
f = open(filepath, "r")
text = f.readlines() #改行コードも含む
f.close()
self.ui.listWidget1.clear()
if len(text) > 0:
for Label in text:
self.ui.listWidget1.addItem(Label.replace("\n", ""))
self.ui.listWidget1.setCurrentRow(0)
#####
#####
#-----pushButton5用イベント処理----------------------------------------
def pushButton5_clicked(self):
#####ファイル書込み
Lcount = self.ui.listWidget1.count()
if Lcount == 0:
msgbox = QtWidgets.QMessageBox(self) #####メッセージボックスを準備
msgbox.setWindowTitle("MDC")
msgbox.setText("No item in the list.") #####メッセージボックスのテキストを設定
ret = msgbox.exec_() #####メッセージボックスを表示
return
filepath, _ = QtWidgets.QFileDialog.getSaveFileName(self, "Save File", "",'lab File (*.lab)')
if filepath:
#####ファイル名のみの取得
filename1 = filepath.rsplit(".", 1) #ファイルパスの文字列右側から指定文字列で分割
filename2 = filename1[0].rsplit("/", 1) #ファイルパスの文字列右側から指定文字列で分割
#os.chdir(filename2[0] + "/") #カレントディレクトリをファイルパスへ変更
f = open(filepath, "w")
CurRow = 0
text = ""
for CurRow in range(Lcount):
text = text + self.ui.listWidget1.item(CurRow).text() + "\n"
CurRow += 1
f.writelines(text)
f.close()
msgbox = QtWidgets.QMessageBox(self) #####メッセージボックスを準備
msgbox.setWindowTitle("MDC")
msgbox.setText("FILE : Saved.") #####メッセージボックスのテキストを設定
ret = msgbox.exec_() #####メッセージボックスを表示
#####
#####
#-----pushButton6用イベント処理----------------------------------------
def pushButton6_clicked(self):
#####リストウィジットにアイテムを追加
if self.ui.lineEdit2.text() == "":
msgbox = QtWidgets.QMessageBox(self) #####メッセージボックスを準備
msgbox.setWindowTitle("MDC")
msgbox.setText("No text to add.") #####メッセージボックスのテキストを設定
ret = msgbox.exec_() #####メッセージボックスを表示
else:
self.ui.listWidget1.addItem(self.ui.lineEdit2.text())
self.ui.listWidget1.setCurrentRow(0)
#####
#-----pushButton7用イベント処理----------------------------------------
def pushButton7_clicked(self):
#####リストウィジットにアイテムを追加
currentListIndex = self.ui.listWidget1.currentRow()
if currentListIndex == -1:
msgbox = QtWidgets.QMessageBox(self) #####メッセージボックスを準備
msgbox.setWindowTitle("MDC")
msgbox.setText("No item selected.") #####メッセージボックスのテキストを設定
ret = msgbox.exec_() #####メッセージボックスを表示
else:
self.ui.listWidget1.takeItem(currentListIndex)
#####
#-----pushButton8用イベント処理----------------------------------------
def pushButton8_clicked(self):
#####ファイル読込
filepath, _ = QtWidgets.QFileDialog.getSaveFileName(self, "Open File", "",'avi File (*.avi)')
if filepath:
self.ui.lineEdit3.setText(filepath)
#-----pushButton9用イベント処理----------------------------------------
def pushButton9_clicked(self):
#####ファイル読込
filepath, _ = QtWidgets.QFileDialog.getOpenFileName(self, "Open File", "",'avi File (*.avi)')
if filepath:
self.ui.lineEdit3.setText(filepath)
#-----pushButton10用イベント処理----------------------------------------
def pushButton10_clicked(self):
#####リストウィジットにアイテムを追加
currentListIndex = self.ui.listWidget2.currentRow()
if currentListIndex == -1:
msgbox = QtWidgets.QMessageBox(self) #####メッセージボックスを準備
msgbox.setWindowTitle("MDC")
msgbox.setText("No item selected.") #####メッセージボックスのテキストを設定
ret = msgbox.exec_() #####メッセージボックスを表示
else:
fName = self.ui.listWidget2.currentItem().text()
self.ui.listWidget2.takeItem(currentListIndex)
pFile = DirPath + '/' + fName +'.jpg'
sFile = SettingDataDir + '/' + fName +'.set'
aFile = AnnotationDir + '/' + fName +'.xml'
dFile = DirPath + '/' + fName +'.txt'
if os.path.isfile(pFile):
os.remove(pFile)
if os.path.isfile(sFile):
os.remove(sFile)
if os.path.isfile(aFile):
os.remove(aFile)
if os.path.isfile(dFile):
os.remove(dFile)
cv2.destroyAllWindows()
#####
#-----pushButton11用イベント処理----------------------------------------
def pushButton11_clicked(self):
msgbox = QtWidgets.QMessageBox(self)
ret = msgbox.question(None, "MDC", "Renumber file names.", QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No) #選択用メッセージボックスを表示
if ret == QtWidgets.QMessageBox.Yes: #メッセージボックスでYESが選択された場合
cn, buttonState = QtWidgets.QInputDialog.getInt(self, "MDC", "Please input starting number.", 0, 0, 9999999, 1)
if buttonState:
global DirPath #写真が保存してあるフォルダパスを記憶
global SettingDataDir #領域生データ保存用フォルダ
global AnnotationDir #アノテーションデータ保存用フォルダ
#global DarknetAnnotationDir #アノテーションデータ保存用フォルダ
global rnFlag
global FileNum
lCount = self.ui.listWidget2.count()
if lCount == 0:
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('No item in the list.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
#elif cn.isdigit() == False:
#msgbox = QtWidgets.QMessageBox(self)
#msgbox.setWindowTitle("MDC")
#msgbox.setText('Please input digits.') #メッセージボックスのテキストを設定
#ret = msgbox.exec_() #メッセージボックスを表示
else:
rnFlag = 1
iNum = int(cn)
cNum = 0
LWitems = []
progC = 0
progP = 0
prog = QtWidgets.QProgressDialog('Renaming files.', None, 0, 100, None, QtCore.Qt.Window | QtCore.Qt.WindowTitleHint | QtCore.Qt.CustomizeWindowHint)
prog.setWindowTitle("MDC")
prog.setFixedSize(prog.sizeHint())
prog.setValue(progP)
prog.show()
while(True):
self.ui.listWidget2.setCurrentRow(cNum)
LWitems.append(iNum)
pFile = DirPath + '/' + win.ui.listWidget2.currentItem().text() +'.jpg'
npFile = DirPath + '/a' + str(iNum) +'.jpg'
sFile = SettingDataDir + '/' + win.ui.listWidget2.currentItem().text() +'.set'
nsFile = SettingDataDir + '/a' + str(iNum) +'.set'
aFile = AnnotationDir + '/' + win.ui.listWidget2.currentItem().text() +'.xml'
naFile = AnnotationDir + '/a' + str(iNum) +'.xml'
daFile = DirPath + '/' + win.ui.listWidget2.currentItem().text() +'.txt'
dnaFile = DirPath + '/a' + str(iNum) +'.txt'
if os.path.isfile(pFile):
os.rename(pFile, npFile)
if os.path.isfile(sFile):
os.rename(sFile, nsFile)
if os.path.isfile(aFile):
os.rename(aFile, naFile)
if os.path.isfile(daFile):
os.rename(daFile, dnaFile)
iNum += 1
cNum += 1
progC += 1
progP = int(100 * progC / (lCount * 2))
prog.setValue(progP)
app.processEvents()
if cNum > lCount - 1:
break
iNum = int(cn)
cNum = 0
while(True):
pFile = DirPath + '/a' + str(iNum) +'.jpg'
npFile = DirPath + '/' + str(iNum) +'.jpg'
sFile = SettingDataDir + '/a' + str(iNum) +'.set'
nsFile = SettingDataDir + '/' + str(iNum) +'.set'
aFile = AnnotationDir + '/a' + str(iNum) +'.xml'
naFile = AnnotationDir + '/' + str(iNum) +'.xml'
daFile = DirPath + '/a' + str(iNum) +'.txt'
dnaFile = DirPath + '/' + str(iNum) +'.txt'
if os.path.isfile(pFile):
os.rename(pFile, npFile)
if os.path.isfile(sFile):
os.rename(sFile, nsFile)
if os.path.isfile(aFile):
os.rename(aFile, naFile)
f = open(naFile, "r")
text = f.readlines() #改行コードも含む
f.close()
text2 = ""
if len(text) > 0:
for line in text:
if(("<filename>" in line) == True):
line = '<filename>' + str(iNum) +'.jpg' + '</filename>\n'
text2 = text2 + line
f = open(naFile, "w")
f.write(text2)
f.close()
if os.path.isfile(daFile):
os.rename(daFile, dnaFile)
iNum += 1
cNum += 1
progC += 1
progP = int(100 * progC / (lCount * 2))
prog.setValue(progP)
app.processEvents()
if cNum > lCount - 1:
break
self.ui.listWidget2.clear()
LWitems.sort()
for x in LWitems:
self.ui.listWidget2.addItem(str(x))
FileNum = iNum
rnFlag = 0
self.ui.listWidget2.setCurrentRow(0)
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('Reumbering done.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
#-----pushButton12用イベント処理----------------------------------------
def pushButton12_clicked(self):
##########
#指定したディレクトリのファイル名を連番にする
##########
msgbox = QtWidgets.QMessageBox(self)
ret = msgbox.question(None, "MDC", "If you would like to renumber file names to serial number, press yes.\nDo not renumber the files in the folder you are currently editing or the folder you have edited.\nOtherwise the data is going to be corrupted!!!\n\nThis function renumbers any files in the folder.\nPlease only place files you want to renumber in the folder.", QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No) #選択用メッセージボックスを表示
if ret == QtWidgets.QMessageBox.Yes: #メッセージボックスでYESが選択された場合
#####ディレクトリ選択
tmpPath = QtWidgets.QFileDialog.getExistingDirectory(self) #写真が保存してあるフォルダを選択
if tmpPath: #フォルダが選択された場合
cn, buttonState = QtWidgets.QInputDialog.getInt(self, "MDC", "Please input starting number.", 0, 0, 9999999, 1)
if buttonState:
FileList = glob.glob(tmpPath + '/*.jpg') #フォルダ内の各ファイルパスをリスト形式で取得
FileList2 = []
lCount = len(FileList)
progC = 0
progP = 0
prog = QtWidgets.QProgressDialog('Renaming files.', None, 0, 100, None, QtCore.Qt.Window | QtCore.Qt.WindowTitleHint | QtCore.Qt.CustomizeWindowHint)
prog.setWindowTitle("MDC")
prog.setFixedSize(prog.sizeHint())
prog.setValue(progP)
prog.show()
for FN in FileList:
FileList2.append(FN.replace('\\', '/')) #globのバグを修正
print(str(len(FileList)))
iNum = cn
rF = []
for FN in FileList2:
npFile = tmpPath + '/abcdefghijklmnopqrstuvqxyz' + str(iNum) +'.jpg'
os.rename(FN, npFile)
iNum += 1
progC += 1
progP = int(100 * progC / (lCount * 2))
prog.setValue(progP)
app.processEvents()
rF.append(npFile)
iNum = cn
for FN in rF:
npFile = tmpPath + '/' + str(iNum) +'.jpg'
os.rename(FN, npFile)
iNum += 1
progC += 1
progP = int(100 * progC / (lCount * 2))
prog.setValue(progP)
app.processEvents()
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('Reumbering done.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
#####
#-----pushButton13用イベント処理----------------------------------------
def pushButton13_clicked(self):
##########
#現在の画像を回転させ保存する
##########
degree, buttonState = QtWidgets.QInputDialog.getItem(self, "MDC", "Please choose dgrees to rotate the pictures.", ["1", "2", "3", "4", "5", "8", "10", "15", "30", "60", "90", "120", "180"], 2, False)
if buttonState:
self.process_start()
if self.ui.listWidget2.count() != -1:
self.rotatePic(int(degree))
self.process_end()
#-----pushButton14用イベント処理----------------------------------------
def pushButton14_clicked(self):
##########
#現在の画像の輝度と色合いを変えて保存する
##########
text, buttonState = QtWidgets.QInputDialog.getItem(self, "MDC", "Please choose what to change and add to the list.", ["GAMMA", "COLOR", "GAMMA AND COLOR"], 0, False)
if buttonState:
if text == "GAMMA":
flag = 0
elif text == "COLOR":
flag = 1
else:
flag = 2
self.process_start()
if self.ui.listWidget2.count() != -1:
self.cngGamma(flag)
self.process_end()
#-----pushButton15用イベント処理----------------------------------------
def pushButton15_clicked(self):
##########
#リストウィジット内全ての画像ファイルを回転させ保存する
##########
SP = self.ui.lineEdit7.text()
EP = self.ui.lineEdit8.text()
if self.ui.listWidget2.count() == -1:
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('No file in the directory.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
elif SP.isdigit() == False:
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('Please input digit for start position.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
elif EP.isdigit() == False:
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('Please input digit for end position.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
elif int(SP) > int(EP):
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('Start position should be equal or smaller than end position.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
elif int(SP) > self.ui.listWidget2.count() - 1:
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('Start position should be equal or smaller than max picture position.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
elif int(EP) > self.ui.listWidget2.count() - 1:
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('End position should be equal or smaller than max picture position.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
else:
SP = int(SP)
EP = int(EP) + 1
msgbox = QtWidgets.QMessageBox(self)
ret = msgbox.question(None, "MDC", "Create rotated files from pictures in the list?", QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No) #選択用メッセージボックスを表示
if ret == QtWidgets.QMessageBox.Yes: #メッセージボックスでYESが選択された場合
degree, buttonState = QtWidgets.QInputDialog.getItem(self, "MDC", "Please choose dgrees to rotate the pictures.", ["1", "2", "3", "4", "5", "8", "10", "15", "30", "60", "90", "120", "180"], 2, False)
if buttonState:
step, buttonState = QtWidgets.QInputDialog.getInt(self, "MDC", "Please choose steps.", 1, 0, 100, 1)
if buttonState:
self.process_start()
#listItemCount = self.ui.listWidget2.count()
#if self.ui.listWidget2.count() != -1:
count = 0
for x in range(SP, EP):
self.ui.listWidget2.setCurrentRow(x)
count += 1
if count == step:
self.rotatePic(int(degree))
app.processEvents()
count = 0
self.process_end()
#-----pushButton16用イベント処理----------------------------------------
def pushButton16_clicked(self):
##########
#リストウィジット内全ての画像ファイルを輝度と色合いを変えて保存する
##########
SP = self.ui.lineEdit7.text()
EP = self.ui.lineEdit8.text()
if self.ui.listWidget2.count() == -1:
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('No file in the directory.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
elif SP.isdigit() == False:
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('Please input digit for start position.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
elif EP.isdigit() == False:
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('Please input digit for end position.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
elif int(SP) > int(EP):
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('Start position should be equal or smaller than end position.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
elif int(SP) > self.ui.listWidget2.count() - 1:
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('Start position should be equal or smaller than max picture position.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
elif int(EP) > self.ui.listWidget2.count() - 1:
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('End position should be equal or smaller than max picture position.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
else:
SP = int(SP)
EP = int(EP) + 1
msgbox = QtWidgets.QMessageBox(self)
ret = msgbox.question(None, "MDC", "Create colored files from pictures in the list?", QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No) #選択用メッセージボックスを表示
if ret == QtWidgets.QMessageBox.Yes: #メッセージボックスでYESが選択された場合
text, buttonState = QtWidgets.QInputDialog.getItem(self, "MDC", "Please choose what to change and add to the list.", ["GAMMA", "COLOR", "GAMMA AND COLOR"], 0, False)
if buttonState:
if text == "GAMMA":
flag = 0
elif text == "COLOR":
flag = 1
else:
flag = 2
step, buttonState = QtWidgets.QInputDialog.getInt(self, "MDC", "Please choose steps.", 1, 0, 100, 1)
if buttonState:
self.process_start()
count = 0
for x in range(SP, EP):
self.ui.listWidget2.setCurrentRow(x)
count += 1
if count == step:
self.cngGamma(flag)
app.processEvents()
count = 0
self.process_end()
#-----pushButton17用イベント処理----------------------------------------
def pushButton17_clicked(self):
##########
#ビデオから画像ファイルを自動保存
##########
msgbox = QtWidgets.QMessageBox(self)
ret = msgbox.question(None, "MDC", "Get pictures from a video file automaticaly?\nPlease do not choose the foulder you are currently editing.", QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No) #選択用メッセージボックスを表示
if ret == QtWidgets.QMessageBox.Yes: #メッセージボックスでYESが選択された場合
filepath, _ = QtWidgets.QFileDialog.getOpenFileName(self, "Load File", "",'avi File (*.avi)')
if filepath: #ファイルパスが選択されているか確認
DirPath = QtWidgets.QFileDialog.getExistingDirectory(self) #フォルダの選択
if DirPath: #フォルダが選択された場合
intVal, buttonState = QtWidgets.QInputDialog.getInt(self, "MDC", "Please set first file number.", 0, 0, 1000000, 1)
if buttonState:
step, buttonState = QtWidgets.QInputDialog.getInt(self, "MDC", "Please choose steps.", 1, 0, 100, 1)
if buttonState:
cap = cv2.VideoCapture(filepath)
capNum = intVal
count = 0
while(True):
ret, frame = cap.read()
if ret == True:
count += 1
if count == step:
if trimMode == 1: #トリムモードで領域が選択されている場合
vidHeight, vidWidth = frame.shape[:2]
trimH = int((vidHeight - int(th)) / 2)
trimW = int((vidWidth - int(tw)) / 2)
frame = frame[trimH:trimH + int(th), trimW:trimW + int(tw)] #指定したサイズに画像をトリム
cv2.imshow("CAPTURE",frame)
cv2.imwrite(DirPath + '/' + str(capNum) + '.jpg', frame)
capNum += 1
count = 0
app.processEvents() #ボタン処理のタイミング確認用
else:
cap.release() #キャプチャー用オブジェクトを廃棄
cv2.destroyAllWindows()
msgbox = QtWidgets.QMessageBox()
msgbox.setWindowTitle("MDC")
msgbox.setText("Done.") #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
break
#-----pushButton18用イベント処理----------------------------------------
def pushButton18_clicked(self):
##########
#現在の画像を射影変換し保存する
##########
self.process_start()
if self.ui.listWidget2.count() != -1:
self.persPic()
self.process_end()
#-----pushButton19用イベント処理----------------------------------------
def pushButton19_clicked(self):
##########
#リストウィジット内の全ての画像を射影変換し保存する
##########
SP = self.ui.lineEdit7.text()
EP = self.ui.lineEdit8.text()
if self.ui.listWidget2.count() == -1:
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('No file in the directory.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
elif SP.isdigit() == False:
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('Please input digit for start position.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
elif EP.isdigit() == False:
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('Please input digit for end position.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
elif int(SP) > int(EP):
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('Start position should be equal or smaller than end position.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
elif int(SP) > self.ui.listWidget2.count() - 1:
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('Start position should be equal or smaller than max picture position.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
elif int(EP) > self.ui.listWidget2.count() - 1:
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('End position should be equal or smaller than max picture position.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
else:
SP = int(SP)
EP = int(EP) + 1
step, buttonState = QtWidgets.QInputDialog.getInt(self, "MDC", "Please choose steps.", 1, 0, 100, 1)
if buttonState:
self.process_start()
count = 0
for x in range(SP, EP):
self.ui.listWidget2.setCurrentRow(x)
count += 1
if count == step:
self.persPic()
app.processEvents()
count = 0
self.process_end()
#-----pushButton20用イベント処理----------------------------------------
def pushButton20_clicked(self):
##########
#指定したディレクトリの画像サイズを全て変更する
##########
msgbox = QtWidgets.QMessageBox(self)
ret = msgbox.question(None, "MDC", "If you would like to resize pictures, press yes.\nDo not resise the files in the folder you are currently editing or the folder you have edited.\nOtherwise the data is going to be corrupted!!!\n\nThis function resize any pictures in the folder.\nPlease only place files you want to resize in the folder.", QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No) #選択用メッセージボックスを表示
if ret == QtWidgets.QMessageBox.Yes: #メッセージボックスでYESが選択された場合
#####ディレクトリ選択
tmpPath = QtWidgets.QFileDialog.getExistingDirectory(self) #写真が保存してあるフォルダを選択
if tmpPath: #フォルダが選択された場合
text, buttonState = QtWidgets.QInputDialog.getItem(self, "MDC", "Choose a way to resize pictures", ["CHOOSE PERCENTAGE", "INPUT PERCENTAGE", "INPUT SIZE"], 0, False)
if buttonState:
if text == "CHOOSE PERCENTAGE":
percentage, buttonState = QtWidgets.QInputDialog.getItem(self, "MDC", "Please choose percentage to resize the pictures.", ["10", "20", "30", "40", "50", "60", "70", "80", "90", "110", "120", "130", "140", "150", "160", "170", "180", "190", "200"], 4, False)
if buttonState:
percentage = int(percentage) * 0.01
FileList = glob.glob(tmpPath + '/*.jpg') #フォルダ内の各ファイルパスをリスト形式で取得
FileList2 = []
lCount = len(FileList)
progC = 0
progP = 0
prog = QtWidgets.QProgressDialog('Resize files.', None, 0, 100, None, QtCore.Qt.Window | QtCore.Qt.WindowTitleHint | QtCore.Qt.CustomizeWindowHint)
prog.setWindowTitle("MDC")
prog.setFixedSize(prog.sizeHint())
prog.setValue(progP)
prog.show()
for FN in FileList:
FileList2.append(FN.replace('\\', '/')) #globのバグを修正
for FN in FileList2:
img = cv2.imread(FN)
height = img.shape[0]
width = img.shape[1]
img2 = cv2.resize(img , (int(width * percentage), int(height * percentage)))
cv2.imwrite(FN, img2)
progC += 1
progP = int(100 * progC / (lCount))
prog.setValue(progP)
app.processEvents()
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('Resizing done.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
if text == "INPUT PERCENTAGE":
percentage, buttonState = QtWidgets.QInputDialog.getInt(self, "MDC", "Please input percentage to resize the pictures.", 50, 10, 200, 10)
if buttonState:
percentage = int(percentage) * 0.01
FileList = glob.glob(tmpPath + '/*.jpg') #フォルダ内の各ファイルパスをリスト形式で取得
FileList2 = []
lCount = len(FileList)
progC = 0
progP = 0
prog = QtWidgets.QProgressDialog('Resize files.', None, 0, 100, None, QtCore.Qt.Window | QtCore.Qt.WindowTitleHint | QtCore.Qt.CustomizeWindowHint)
prog.setWindowTitle("MDC")
prog.setFixedSize(prog.sizeHint())
prog.setValue(progP)
prog.show()
for FN in FileList:
FileList2.append(FN.replace('\\', '/')) #globのバグを修正
for FN in FileList2:
img = cv2.imread(FN)
height = img.shape[0]
width = img.shape[1]
img2 = cv2.resize(img , (int(width * percentage), int(height * percentage)))
cv2.imwrite(FN, img2)
progC += 1
progP = int(100 * progC / (lCount))
prog.setValue(progP)
app.processEvents()
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('Resizing done.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
if text == "INPUT SIZE":
width, buttonState = QtWidgets.QInputDialog.getInt(self, "MDC", "Please input width to resize the pictures.", 608, 1, 99999, 1)
if buttonState:
height, buttonState = QtWidgets.QInputDialog.getInt(self, "MDC", "Please input height to resize the pictures.", 608, 1, 99999, 1)
if buttonState:
FileList = glob.glob(tmpPath + '/*.jpg') #フォルダ内の各ファイルパスをリスト形式で取得
FileList2 = []
lCount = len(FileList)
progC = 0
progP = 0
prog = QtWidgets.QProgressDialog('Resize files.', None, 0, 100, None, QtCore.Qt.Window | QtCore.Qt.WindowTitleHint | QtCore.Qt.CustomizeWindowHint)
prog.setWindowTitle("MDC")
prog.setFixedSize(prog.sizeHint())
prog.setValue(progP)
prog.show()
for FN in FileList:
FileList2.append(FN.replace('\\', '/')) #globのバグを修正
for FN in FileList2:
img = cv2.imread(FN)
#height = img.shape[0]
#width = img.shape[1]
img2 = cv2.resize(img , (width, height))
cv2.imwrite(FN, img2)
progC += 1
progP = int(100 * progC / (lCount))
prog.setValue(progP)
app.processEvents()
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('Resizing done.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
#####
#-----pushButton21用イベント処理----------------------------------------
def pushButton21_clicked(self):
##########
#画像サイズを変更する
##########
if self.ui.listWidget2.count() != -1:
self.process_start()
msgbox = QtWidgets.QMessageBox(self)
ret = msgbox.question(None, "MDC", "Resize the picture currently selected?", QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No) #選択用メッセージボックスを表示
if ret == QtWidgets.QMessageBox.Yes: #メッセージボックスでYESが選択された場合
text, buttonState = QtWidgets.QInputDialog.getItem(self, "MDC", "Choose a way to resize pictures", ["CHOOSE PERCENTAGE", "INPUT PERCENTAGE", "INPUT SIZE"], 0, False)
if buttonState:
if text == "CHOOSE PERCENTAGE":
percentage, buttonState = QtWidgets.QInputDialog.getItem(self, "MDC", "Please choose percentage to resize the pictures.", ["10", "20", "30", "40", "50", "60", "70", "80", "90", "110", "120", "130", "140", "150", "160", "170", "180", "190", "200"], 4, False)
if buttonState:
self.resizePic(percentage, 0, 0, mode = 0)
if text == "INPUT PERCENTAGE":
percentage, buttonState = QtWidgets.QInputDialog.getInt(self, "MDC", "Please input percentage to resize the pictures.", 50, 10, 200, 10)
if buttonState:
self.resizePic(percentage, 0, 0, mode = 0)
if text == "INPUT SIZE":
width, buttonState = QtWidgets.QInputDialog.getInt(self, "MDC", "Please input width to resize the pictures.", 608, 1, 99999, 1)
if buttonState:
height, buttonState = QtWidgets.QInputDialog.getInt(self, "MDC", "Please input height to resize the pictures.", 608, 1, 99999, 1)
if buttonState:
self.resizePic(0, width, height, mode = 1)
self.process_end()
#####
#-----pushButton22用イベント処理----------------------------------------
def pushButton22_clicked(self):
##########
#画像サイズを全て変更する
##########
SP = self.ui.lineEdit7.text()
EP = self.ui.lineEdit8.text()
if self.ui.listWidget2.count() == -1:
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('No file in the directory.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
elif SP.isdigit() == False:
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('Please input digit for start position.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
elif EP.isdigit() == False:
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('Please input digit for end position.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
elif int(SP) > int(EP):
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('Start position should be equal or smaller than end position.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
elif int(SP) > self.ui.listWidget2.count() - 1:
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('Start position should be equal or smaller than max picture position.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
elif int(EP) > self.ui.listWidget2.count() - 1:
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('End position should be equal or smaller than max picture position.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
else:
SP = int(SP)
EP = int(EP) + 1
#if self.ui.listWidget2.count() != -1:
self.process_start()
msgbox = QtWidgets.QMessageBox(self)
ret = msgbox.question(None, "MDC", "Resize all pictures in the folder?", QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No) #選択用メッセージボックスを表示
if ret == QtWidgets.QMessageBox.Yes: #メッセージボックスでYESが選択された場合
text, buttonState = QtWidgets.QInputDialog.getItem(self, "MDC", "Choose a way to resize pictures", ["CHOOSE PERCENTAGE", "INPUT PERCENTAGE", "INPUT SIZE"], 0, False)
if buttonState:
if text == "CHOOSE PERCENTAGE":
percentage, buttonState = QtWidgets.QInputDialog.getItem(self, "MDC", "Please choose percentage to resize the pictures.", ["10", "20", "30", "40", "50", "60", "70", "80", "90", "110", "120", "130", "140", "150", "160", "170", "180", "190", "200"], 4, False)
if buttonState:
for x in range(SP, EP):
self.ui.listWidget2.setCurrentRow(x)
self.resizePic(percentage, 0, 0, mode = 0)
app.processEvents()
if text == "INPUT PERCENTAGE":
percentage, buttonState = QtWidgets.QInputDialog.getInt(self, "MDC", "Please input percentage to resize the pictures.", 50, 10, 200, 10)
if buttonState:
for x in range(SP, EP):
self.ui.listWidget2.setCurrentRow(x)
self.resizePic(percentage, 0, 0, mode = 0)
app.processEvents()
if text == "INPUT SIZE":
width, buttonState = QtWidgets.QInputDialog.getInt(self, "MDC", "Please input width to resize the pictures.", 608, 1, 99999, 1)
if buttonState:
height, buttonState = QtWidgets.QInputDialog.getInt(self, "MDC", "Please input height to resize the pictures.", 608, 1, 99999, 1)
if buttonState:
for x in range(SP, EP):
self.ui.listWidget2.setCurrentRow(x)
self.resizePic(0, width, height, mode = 1)
app.processEvents()
self.process_end()
#####
#-----pushButton23用イベント処理----------------------------------------
def pushButton23_clicked(self):
##########
#領域を切り抜き回転させ背景ににコピー後保存する
##########
#####ディレクトリ選択
msgbox = QtWidgets.QMessageBox(self)
ret = msgbox.question(None, "MDC", "Create rotated files from a picture in the list with different background?\nPlease choose a folder contains background pictures.", QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No) #選択用メッセージボックスを表示
if ret == QtWidgets.QMessageBox.Yes: #メッセージボックスでYESが選択された場合
tmpPath = QtWidgets.QFileDialog.getExistingDirectory(self) #写真が保存してあるフォルダを選択
if tmpPath: #フォルダが選択された場合
FileList = glob.glob(tmpPath + '/*.jpg') #フォルダ内の各ファイルパスをリスト形式で取得
if len(FileList) == 0:
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('JPEG file not found in the folder.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
else:
degree, buttonState = QtWidgets.QInputDialog.getItem(self, "MDC", "Please choose dgrees to rotate the pictures.", ["1", "2", "3", "4", "5", "8", "10", "15", "30", "60", "90", "120", "180"], 2, False)
if buttonState:
FileList2 = []
for FN in FileList:
FileList2.append(FN.replace('\\', '/')) #globのバグを修正
self.pasteRotatePic(int(degree), FileList2)
#-----pushButton24用イベント処理----------------------------------------
def pushButton24_clicked(self):
##########
#フォルダ内全ての写真に対して、領域を切り抜き回転させ背景にコピー後保存する
##########
SP = self.ui.lineEdit7.text()
EP = self.ui.lineEdit8.text()
if self.ui.listWidget2.count() == -1:
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('No file in the directory.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
elif SP.isdigit() == False:
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('Please input digit for start position.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
elif EP.isdigit() == False:
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('Please input digit for end position.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
elif int(SP) > int(EP):
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('Start position should be equal or smaller than end position.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
elif int(SP) > self.ui.listWidget2.count() - 1:
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('Start position should be equal or smaller than max picture position.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
elif int(EP) > self.ui.listWidget2.count() - 1:
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('End position should be equal or smaller than max picture position.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
else:
SP = int(SP)
EP = int(EP) + 1
msgbox = QtWidgets.QMessageBox(self)
ret = msgbox.question(None, "MDC", "Create rotated files from pictures in the list with different background?\nPlease choose a folder contains background pictures.", QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No) #選択用メッセージボックスを表示
if ret == QtWidgets.QMessageBox.Yes: #メッセージボックスでYESが選択された場合
tmpPath = QtWidgets.QFileDialog.getExistingDirectory(self) #写真が保存してあるフォルダを選択
if tmpPath: #フォルダが選択された場合
FileList = glob.glob(tmpPath + '/*.jpg') #フォルダ内の各ファイルパスをリスト形式で取得
if len(FileList) == 0:
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('JPEG file not found in the folder.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
else:
degree, buttonState = QtWidgets.QInputDialog.getItem(self, "MDC", "Please choose dgrees to rotate the pictures.", ["1", "2", "3", "4", "5", "8", "10", "15", "30", "60", "90", "120", "180"], 2, False)
if buttonState:
step, buttonState = QtWidgets.QInputDialog.getInt(self, "MDC", "Please choose steps.", 1, 0, 100, 1)
if buttonState:
self.process_start()
FileList2 = []
for FN in FileList:
FileList2.append(FN.replace('\\', '/')) #globのバグを修正
count = 0
for x in range(SP, EP):
self.ui.listWidget2.setCurrentRow(x)
count += 1
if count == step:
self.pasteRotatePic(int(degree), FileList2)
app.processEvents()
count = 0
self.process_end()
#-----pushButton25用イベント処理----------------------------------------
def pushButton25_clicked(self):
##########
#領域を切り抜き回転保存する
##########
#####ディレクトリ選択
degree, buttonState = QtWidgets.QInputDialog.getItem(self, "MDC", "Please choose dgrees to rotate the pictures.", ["1", "2", "3", "4", "5", "8", "10", "15", "30", "60", "90", "120", "180"], 2, False)
if buttonState:
self.cutRotatePic(int(degree))
#-----pushButton26用イベント処理----------------------------------------
def pushButton26_clicked(self):
##########
#フォルダ内全ての写真に対して、領域を切り抜き回転させ保存する
##########
SP = self.ui.lineEdit7.text()
EP = self.ui.lineEdit8.text()
if self.ui.listWidget2.count() == -1:
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('No file in the directory.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
elif SP.isdigit() == False:
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('Please input digit for start position.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
elif EP.isdigit() == False:
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('Please input digit for end position.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
elif int(SP) > int(EP):
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('Start position should be equal or smaller than end position.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
elif int(SP) > self.ui.listWidget2.count() - 1:
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('Start position should be equal or smaller than max picture position.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
elif int(EP) > self.ui.listWidget2.count() - 1:
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('End position should be equal or smaller than max picture position.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
else:
SP = int(SP)
EP = int(EP) + 1
msgbox = QtWidgets.QMessageBox(self)
ret = msgbox.question(None, "MDC", "Create rotated files from pictures in the list with black background?", QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No) #選択用メッセージボックスを表示
if ret == QtWidgets.QMessageBox.Yes: #メッセージボックスでYESが選択された場合
degree, buttonState = QtWidgets.QInputDialog.getItem(self, "MDC", "Please choose dgrees to rotate the pictures.", ["1", "2", "3", "4", "5", "8", "10", "15", "30", "60", "90", "120", "180"], 2, False)
if buttonState:
step, buttonState = QtWidgets.QInputDialog.getInt(self, "MDC", "Please choose steps.", 1, 0, 100, 1)
if buttonState:
self.process_start()
count = 0
for x in range(SP, EP):
self.ui.listWidget2.setCurrentRow(x)
count += 1
if count == step:
self.cutRotatePic(int(degree))
app.processEvents()
count = 0
self.process_end()
#-----pushButton25用イベント処理----------------------------------------
def pushButton27_clicked(self):
##########
#領域を切り抜き回転保存する
##########
#####ディレクトリ選択
degree, buttonState = QtWidgets.QInputDialog.getItem(self, "MDC", "Please choose dgrees to rotate the pictures.", ["1", "2", "3", "4", "5", "8", "10", "15", "30", "60", "90", "120", "180"], 2, False)
if buttonState:
self.cutBRotatePic(int(degree))
#-----pushButton26用イベント処理----------------------------------------
def pushButton28_clicked(self):
##########
#フォルダ内全ての写真に対して、領域を切り抜き回転させ保存する
##########
SP = self.ui.lineEdit7.text()
EP = self.ui.lineEdit8.text()
if self.ui.listWidget2.count() == -1:
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('No file in the directory.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
elif SP.isdigit() == False:
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('Please input digit for start position.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
elif EP.isdigit() == False:
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('Please input digit for end position.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
elif int(SP) > int(EP):
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('Start position should be equal or smaller than end position.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
elif int(SP) > self.ui.listWidget2.count() - 1:
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('Start position should be equal or smaller than max picture position.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
elif int(EP) > self.ui.listWidget2.count() - 1:
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('End position should be equal or smaller than max picture position.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
else:
SP = int(SP)
EP = int(EP) + 1
msgbox = QtWidgets.QMessageBox(self)
ret = msgbox.question(None, "MDC", "Create rotated files from pictures in the list with black background?", QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No) #選択用メッセージボックスを表示
if ret == QtWidgets.QMessageBox.Yes: #メッセージボックスでYESが選択された場合
degree, buttonState = QtWidgets.QInputDialog.getItem(self, "MDC", "Please choose dgrees to rotate the pictures.", ["1", "2", "3", "4", "5", "8", "10", "15", "30", "60", "90", "120", "180"], 2, False)
if buttonState:
step, buttonState = QtWidgets.QInputDialog.getInt(self, "MDC", "Please choose steps.", 1, 0, 100, 1)
if buttonState:
self.process_start()
count = 0
for x in range(SP, EP):
self.ui.listWidget2.setCurrentRow(x)
count += 1
if count == step:
self.cutBRotatePic(int(degree))
app.processEvents()
count = 0
self.process_end()
#-----pushButton29用イベント処理----------------------------------------
def pushButton29_clicked(self):
##########
#領域を切り抜き回転させ背景ににコピー後保存する
##########
#####ディレクトリ選択
msgbox = QtWidgets.QMessageBox(self)
ret = msgbox.question(None, "MDC", "Create rotated files from a picture in the list with different background?\nPlease choose a folder contains background pictures.", QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No) #選択用メッセージボックスを表示
if ret == QtWidgets.QMessageBox.Yes: #メッセージボックスでYESが選択された場合
tmpPath = QtWidgets.QFileDialog.getExistingDirectory(self) #写真が保存してあるフォルダを選択
if tmpPath: #フォルダが選択された場合
FileList = glob.glob(tmpPath + '/*.jpg') #フォルダ内の各ファイルパスをリスト形式で取得
if len(FileList) == 0:
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('JPEG file not found in the folder.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
else:
degree, buttonState = QtWidgets.QInputDialog.getItem(self, "MDC", "Please choose dgrees to rotate the pictures.", ["1", "2", "3", "4", "5", "8", "10", "15", "30", "60", "90", "120", "180"], 2, False)
if buttonState:
FileList2 = []
for FN in FileList:
FileList2.append(FN.replace('\\', '/')) #globのバグを修正
self.pasteBRotatePic(int(degree), FileList2)
#-----pushButton30用イベント処理----------------------------------------
def pushButton30_clicked(self):
##########
#フォルダ内全ての写真に対して、領域を切り抜き回転させ背景にコピー後保存する
##########
SP = self.ui.lineEdit7.text()
EP = self.ui.lineEdit8.text()
if self.ui.listWidget2.count() == -1:
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('No file in the directory.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
elif SP.isdigit() == False:
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('Please input digit for start position.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
elif EP.isdigit() == False:
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('Please input digit for end position.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
elif int(SP) > int(EP):
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('Start position should be equal or smaller than end position.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
elif int(SP) > self.ui.listWidget2.count() - 1:
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('Start position should be equal or smaller than max picture position.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
elif int(EP) > self.ui.listWidget2.count() - 1:
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('End position should be equal or smaller than max picture position.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
else:
SP = int(SP)
EP = int(EP) + 1
msgbox = QtWidgets.QMessageBox(self)
ret = msgbox.question(None, "MDC", "Create rotated files from pictures in the list with different background?\nPlease choose a folder contains background pictures.", QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No) #選択用メッセージボックスを表示
if ret == QtWidgets.QMessageBox.Yes: #メッセージボックスでYESが選択された場合
tmpPath = QtWidgets.QFileDialog.getExistingDirectory(self) #写真が保存してあるフォルダを選択
if tmpPath: #フォルダが選択された場合
FileList = glob.glob(tmpPath + '/*.jpg') #フォルダ内の各ファイルパスをリスト形式で取得
if len(FileList) == 0:
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('JPEG file not found in the folder.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
else:
degree, buttonState = QtWidgets.QInputDialog.getItem(self, "MDC", "Please choose dgrees to rotate the pictures.", ["1", "2", "3", "4", "5", "8", "10", "15", "30", "60", "90", "120", "180"], 2, False)
if buttonState:
step, buttonState = QtWidgets.QInputDialog.getInt(self, "MDC", "Please choose steps.", 1, 0, 100, 1)
if buttonState:
self.process_start()
FileList2 = []
for FN in FileList:
FileList2.append(FN.replace('\\', '/')) #globのバグを修正
count = 0
for x in range(SP, EP):
self.ui.listWidget2.setCurrentRow(x)
count += 1
if count == step:
self.pasteBRotatePic(int(degree), FileList2)
app.processEvents()
count = 0
self.process_end()
#-----pushButton31用イベント処理----------------------------------------
def pushButton31_clicked(self):
#####写真からビデオを作成
filepath, _ = QtWidgets.QFileDialog.getSaveFileName(self, "Open File", "",'avi File (*.avi)')
if filepath:
dirPath = QtWidgets.QFileDialog.getExistingDirectory(self) #写真が保存してあるフォルダを選択
if dirPath: #フォルダが選択された場合
FileList = glob.glob(dirPath + '/*.jpg') #フォルダ内の各ファイルパスをリスト形式で取得
if len(FileList) == 0:
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('JPEG file not found in the folder.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
else:
rate, buttonState = QtWidgets.QInputDialog.getItem(self, "MDC", "Please choose frame rate.", ["1", "2", "3", "4", "5", "10", "20", "30", "60", "120", "144", "240"], 4, False)
if buttonState:
msgbox = QtWidgets.QMessageBox(self)
ans = msgbox.question(None, "MDC", "Put file names on pictures.", QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No) #選択用メッセージボックスを表示
tmp = FileList[0]
tmp = tmp.replace('\\', '/') #globのバグを修正
tmpImg = cv2.imread(tmp)
imgHeight = tmpImg.shape[0]
imgWidth = tmpImg.shape[1]
aviOut = cv2.VideoWriter(filepath, fourcc, int(rate), (imgWidth, imgHeight))
for FN in FileList:
fPath = FN.replace('\\', '/') #globのバグを修正
img = cv2.imread(fPath)
if ans == QtWidgets.QMessageBox.Yes: #メッセージボックスでYESが選択された場合
fPath1 = fPath.rsplit(".", 1) #ファイルパスの文字列右側から指定文字列で分割
fPath2 = fPath1[0].rsplit("/", 1) #ファイルパスの文字列右側から指定文字列で分割
font_size = 1
font = cv2.FONT_HERSHEY_PLAIN
cv2.putText(img, fPath2[1], (5, 25), font, font_size, (255, 255, 255), 1)
cv2.imshow("MIIL MDC", img)
app.processEvents() #ボタン処理のタイミング確認用
aviOut.write(img)
cv2.destroyAllWindows()
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('Video file created.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
#-----pushButton32用イベント処理----------------------------------------
def pushButton32_clicked(self):
##########
#現在の画像を回転させ保存する
##########
degree, buttonState = QtWidgets.QInputDialog.getItem(self, "MDC", "Please choose dgrees to rotate the pictures.", ["1", "2", "3", "4", "5", "8", "10", "15", "30", "60", "90", "120", "180"], 2, False)
if buttonState:
self.process_start()
if self.ui.listWidget2.count() != -1:
self.rotatePicPC(int(degree))
self.process_end()
#-----pushButton33用イベント処理----------------------------------------
def pushButton33_clicked(self):
##########
#リストウィジット内全ての画像ファイルを回転させ保存する
##########
SP = self.ui.lineEdit7.text()
EP = self.ui.lineEdit8.text()
if self.ui.listWidget2.count() == -1:
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('No file in the directory.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
elif SP.isdigit() == False:
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('Please input digit for start position.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
elif EP.isdigit() == False:
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('Please input digit for end position.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
elif int(SP) > int(EP):
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('Start position should be equal or smaller than end position.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
elif int(SP) > self.ui.listWidget2.count() - 1:
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('Start position should be equal or smaller than max picture position.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
elif int(EP) > self.ui.listWidget2.count() - 1:
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('End position should be equal or smaller than max picture position.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
else:
SP = int(SP)
EP = int(EP) + 1
msgbox = QtWidgets.QMessageBox(self)
ret = msgbox.question(None, "MDC", "Create rotated files from pictures in the list?", QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No) #選択用メッセージボックスを表示
if ret == QtWidgets.QMessageBox.Yes: #メッセージボックスでYESが選択された場合
degree, buttonState = QtWidgets.QInputDialog.getItem(self, "MDC", "Please choose dgrees to rotate the pictures.", ["1", "2", "3", "4", "5", "8", "10", "15", "30", "60", "90", "120", "180"], 2, False)
if buttonState:
step, buttonState = QtWidgets.QInputDialog.getInt(self, "MDC", "Please choose steps.", 1, 0, 100, 1)
if buttonState:
self.process_start()
#listItemCount = self.ui.listWidget2.count()
#if self.ui.listWidget2.count() != -1:
count = 0
for x in range(SP, EP):
self.ui.listWidget2.setCurrentRow(x)
count += 1
if count == step:
self.rotatePicPC(int(degree))
app.processEvents()
count = 0
self.process_end()
#-----pushButton34用イベント処理----------------------------------------
def pushButton34_clicked(self):
##########
#現在の画像を反転させて保存する
##########
self.process_start()
if self.ui.listWidget2.count() != -1:
self.reversePic()
self.process_end()
#-----pushButton35用イベント処理----------------------------------------
def pushButton35_clicked(self):
##########
#現在の画像を反転させて保存する
##########
SP = self.ui.lineEdit7.text()
EP = self.ui.lineEdit8.text()
if self.ui.listWidget2.count() == -1:
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('No file in the directory.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
elif SP.isdigit() == False:
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('Please input digit for start position.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
elif EP.isdigit() == False:
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('Please input digit for end position.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
elif int(SP) > int(EP):
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('Start position should be equal or smaller than end position.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
elif int(SP) > self.ui.listWidget2.count() - 1:
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('Start position should be equal or smaller than max picture position.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
elif int(EP) > self.ui.listWidget2.count() - 1:
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('End position should be equal or smaller than max picture position.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
else:
SP = int(SP)
EP = int(EP) + 1
msgbox = QtWidgets.QMessageBox(self)
ret = msgbox.question(None, "MDC", "Flip the picture?", QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No) #選択用メッセージボックスを表示
if ret == QtWidgets.QMessageBox.Yes: #メッセージボックスでYESが選択された場合
step, buttonState = QtWidgets.QInputDialog.getInt(self, "MDC", "Please choose steps.", 1, 0, 100, 1)
if buttonState:
self.process_start()
#listItemCount = self.ui.listWidget2.count()
#if self.ui.listWidget2.count() != -1:
count = 0
for x in range(SP, EP):
self.ui.listWidget2.setCurrentRow(x)
count += 1
if count == step:
self.reversePic()
app.processEvents()
count = 0
self.process_end()
#-----pushButton36用イベント処理----------------------------------------
def pushButton36_clicked(self):
##########
#現在の画像を移動させて保存する
##########
self.process_start()
if self.ui.listWidget2.count() != -1:
self.movePic()
self.process_end()
#-----pushButton37用イベント処理----------------------------------------
def pushButton37_clicked(self):
##########
#現在の画像を移動させて保存する
##########
SP = self.ui.lineEdit7.text()
EP = self.ui.lineEdit8.text()
if self.ui.listWidget2.count() == -1:
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('No file in the directory.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
elif SP.isdigit() == False:
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('Please input digit for start position.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
elif EP.isdigit() == False:
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('Please input digit for end position.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
elif int(SP) > int(EP):
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('Start position should be equal or smaller than end position.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
elif int(SP) > self.ui.listWidget2.count() - 1:
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('Start position should be equal or smaller than max picture position.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
elif int(EP) > self.ui.listWidget2.count() - 1:
msgbox = QtWidgets.QMessageBox(self)
msgbox.setWindowTitle("MDC")
msgbox.setText('End position should be equal or smaller than max picture position.') #メッセージボックスのテキストを設定
ret = msgbox.exec_() #メッセージボックスを表示
else:
SP = int(SP)
EP = int(EP) + 1
msgbox = QtWidgets.QMessageBox(self)
ret = msgbox.question(None, "MDC", "Shift the picture?", QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No) #選択用メッセージボックスを表示
if ret == QtWidgets.QMessageBox.Yes: #メッセージボックスでYESが選択された場合
step, buttonState = QtWidgets.QInputDialog.getInt(self, "MDC", "Please choose steps.", 1, 0, 100, 1)
if buttonState:
self.process_start()
#listItemCount = self.ui.listWidget2.count()
#if self.ui.listWidget2.count() != -1:
count = 0
for x in range(SP, EP):
self.ui.listWidget2.setCurrentRow(x)
count += 1
if count == step:
self.movePic()
app.processEvents()
count = 0
self.process_end()
#-----画像反転保存用関数----------------------------------------
def reversePic(self):
##########
#現在の画像を反転させ保存する
##########
global FileNum
items = []
cIndex = self.ui.listWidget2.currentRow()
frameR = np.copy(CurPic)
fPic = cv2.flip(frameR, 1)
cv2.imwrite(DirPath + '/' + str(FileNum) + '.jpg', fPic)
text1 = ""
text2 = ""
text3 = ""
fY = CurPic.shape[0]
fX = CurPic.shape[1]
xmlHeader = '<annotation>' + '\n<filename>' + str(FileNum) + '.jpg</filename>\n<size>\n<width>' + str(fX) + '</width>' + '\n' + '<height>' + str(fY) + '</height>\n</size>\n'
xmlfooter = '</annotation>\n'
for x in SettingList:
ROW, LABEL,TX, TY, BX, BY = x.split(',')
ROW = ROW.replace(" ", "")
LABEL = LABEL.replace(" ", "")
fsx = abs(int(TX) - (CurPicWidth -1))
fsy = int(TY)
esx = abs(int(BX) - (CurPicWidth -1))
esy = int(BY)
_ , fsx, fsy, esx, esy, _ , _ = getRectanglePos(fsx, fsy, esx, esy, CurPicWidth, CurPicHeight)
filepath = SettingDataDir + '/' + str(FileNum) + '.set'
filepath2 = AnnotationDir + '/' + str(FileNum) + '.xml'
filepath3 = DirPath + '/' + str(FileNum) + '.txt'
text1 = text1 + ROW + ', ' + LABEL + ', ' + str(fsx) + ', ' + str(fsy) + ', ' + str(esx) + ', ' + str(esy) + '\n'
text2 = text2 + '<object>\n<name>' + LABEL + '</name>\n<bndbox>' + '\n' + '<xmin>' + str(fsx) + '</xmin>' + '\n' + '<ymin>' + str(fsy) + '</ymin>' + '\n' + '<xmax>' + str(esx) + '</xmax>' + '\n' + '<ymax>' + str(esy) + '</ymax>\n</bndbox>\n</object>\n'
cw = 1 / CurPicWidth
ch = 1 / CurPicHeight
cnx = (fsx + esx) / 2
cny = (fsy + esy) / 2
cnw = esx - fsx
cnh = esy - fsy
cnx = cnx * cw
cny = cny * ch
cnw = cnw * cw
cnh = cnh * ch
text3 = text3 + ROW + ' ' + str(cnx) + ' ' + str(cny) + ' ' + str(cnw) + ' ' + str(cnh) + '\n'
cv2.rectangle(fPic, (fsx, fsy), (esx, esy), (0, 255, 0), 1)
font_size = 1
font = cv2.FONT_HERSHEY_PLAIN
cv2.putText(fPic, LABEL, (fsx + 2, fsy - 2), font, font_size,(0,255,0),1)
f = open(filepath, "w")
f.writelines(text1)
f.close()
text2 = xmlHeader + text2 + xmlfooter
f = open(filepath2, "w")
f.writelines(text2)
f.close()
f = open(filepath3, "w")
f.writelines(text3)
f.close()
cv2.imshow("MIIL MDC DRAW MODE", fPic)
items.append(str(FileNum))
FileNum += 1
#Lpos = win.ui.listWidget2.count() - 1
#self.ui.listWidget2.setCurrentRow(Lpos)
app.processEvents() #ボタン処理のタイミング確認用
self.ui.listWidget2.addItems(items)
self.ui.listWidget2.setCurrentRow(cIndex)
#-----画像回転保存用関数----------------------------------------
def pasteBRotatePic(self, degree, BackgroundList):
##########
#領域を切り抜き回転させ背景がにコピー後保存する
##########
global FileNum
items = []
rTimes = int(360 / degree)
cIndex = self.ui.listWidget2.currentRow()
for x in SettingList:
ROW, LABEL,TX, TY, BX, BY = x.split(',')
ROW = ROW.replace(" ", "")
LABEL = LABEL.replace(" ", "")
if len(SettingList) == 1:
fy = CurPic.shape[0]
fx = CurPic.shape[1]
copyPic = np.copy(CurPic)
imageArray = np.zeros((fy, fx, 3), np.uint8)
cutPic = copyPic[int(TY):int(BY), int(TX):int(BX)]
imageArray[int(TY):int(BY), int(TX):int(BX)] = cutPic
for i in range(1, rTimes):
deg = i * degree
cx = int(TX) + int((int(BX) - int(TX)) / 2)
cy = int(TY) + int((int(BY) - int(TY)) / 2)
fsx, fsy, esx, esy, _ , _ = getRotatedRectanglePos(deg, cx, cy, int(TX), int(TY), int(BX), int(BY), CurPicWidth, CurPicHeight)
frameR = np.copy(imageArray)
M = cv2.getRotationMatrix2D((cx, cy), deg, 1)
fPic = cv2.warpAffine(frameR,M,(CurPicWidth, CurPicHeight), borderValue=(255, 255, 255))
bgPic = cv2.imread(random.choice(BackgroundList))
cutPicB = fPic[fsy:esy, fsx:esx]
bgPic[fsy:esy, fsx:esx] = cutPicB
cv2.imwrite(DirPath + '/' + str(FileNum) + '.jpg', bgPic)
filepath = SettingDataDir + '/' + str(FileNum) + '.set'
filepath2 = AnnotationDir + '/' + str(FileNum) + '.xml'
filepath3 = DirPath + '/' + str(FileNum) + '.txt'
text = ROW + ', ' + LABEL + ', ' + str(fsx) + ', ' + str(fsy) + ', ' + str(esx) + ', ' + str(esy) + '\n'
f = open(filepath, "w")
f.writelines(text)
f.close()
text = '<annotation>' + '\n<filename>' + str(FileNum) + '.jpg</filename>\n<size>\n<width>' + str(fx) + '</width>' + '\n' + '<height>' + str(fy) + '</height>\n</size>\n'
text = text + '<object>\n<name>' + LABEL + '</name>\n<bndbox>' + '\n' + '<xmin>' + str(fsx) + '</xmin>' + '\n' + '<ymin>' + str(fsy) + '</ymin>' + '\n' + '<xmax>' + str(esx) + '</xmax>' + '\n' + '<ymax>' + str(esy) + '</ymax>\n</bndbox>\n</object>\n'
text = text + '</annotation>\n'
f = open(filepath2, "w")
f.writelines(text)
f.close()
cw = 1 / CurPicWidth
ch = 1 / CurPicHeight
cnx = (fsx + esx) / 2
cny = (fsy + esy) / 2
cnw = esx - fsx
cnh = esy - fsy
cnx = cnx * cw
cny = cny * ch
cnw = cnw * cw
cnh = cnh * ch
text = ROW + ' ' + str(cnx) + ' ' + str(cny) + ' ' + str(cnw) + ' ' + str(cnh) + '\n'
f = open(filepath3, "w")
f.writelines(text)
f.close()
cv2.rectangle(bgPic, (fsx, fsy), (esx, esy), (0, 255, 0), 1)
font_size = 1
font = cv2.FONT_HERSHEY_PLAIN
cv2.putText(bgPic, LABEL,(fsx + 2, fsy - 2),font, font_size,(0,255,0),1)
cv2.imshow("MIIL MDC DRAW MODE", bgPic)
items.append(str(FileNum))
FileNum += 1
#Lpos = win.ui.listWidget2.count() - 1
#self.ui.listWidget2.setCurrentRow(Lpos)
app.processEvents() #ボタン処理のタイミング確認用
self.ui.listWidget2.addItems(items)
self.ui.listWidget2.setCurrentRow(cIndex)
#-----画像回転保存用関数----------------------------------------
def cutBRotatePic(self, degree):
##########
#領域を切り抜き回転させ背景がにコピー後保存する
##########
global FileNum
items = []
rTimes = int(360 / degree)
cIndex = self.ui.listWidget2.currentRow()
for x in SettingList:
ROW, LABEL,TX, TY, BX, BY = x.split(',')
ROW = ROW.replace(" ", "")
LABEL = LABEL.replace(" ", "")
if len(SettingList) == 1:
fy = CurPic.shape[0]
fx = CurPic.shape[1]
copyPic = np.copy(CurPic)
imageArray = np.zeros((fy, fx, 3), np.uint8)
cutPic = copyPic[int(TY):int(BY), int(TX):int(BX)]
imageArray[int(TY):int(BY), int(TX):int(BX)] = cutPic
for i in range(1, rTimes):
deg = i * degree
cx = int(TX) + int((int(BX) - int(TX)) / 2)
cy = int(TY) + int((int(BY) - int(TY)) / 2)
fsx, fsy, esx, esy, _ , _ = getRotatedRectanglePos(deg, cx, cy, int(TX), int(TY), int(BX), int(BY), CurPicWidth, CurPicHeight)
frameR = np.copy(CurPic)
frameRB = np.copy(imageArray)
M = cv2.getRotationMatrix2D((cx, cy), deg, 1)
fPic = cv2.warpAffine(frameR,M,(CurPicWidth, CurPicHeight), borderValue=(255, 255, 255))
fPicB = cv2.warpAffine(frameRB,M,(CurPicWidth, CurPicHeight), borderValue=(255, 255, 255))
cutPicB = fPicB[fsy:esy, fsx:esx]
fPic[fsy:esy, fsx:esx] = cutPicB
cv2.imwrite(DirPath + '/' + str(FileNum) + '.jpg', fPic)
filepath = SettingDataDir + '/' + str(FileNum) + '.set'
filepath2 = AnnotationDir + '/' + str(FileNum) + '.xml'
filepath3 = DirPath + '/' + str(FileNum) + '.txt'
text = ROW + ', ' + LABEL + ', ' + str(fsx) + ', ' + str(fsy) + ', ' + str(esx) + ', ' + str(esy) + '\n'
f = open(filepath, "w")
f.writelines(text)
f.close()
text = '<annotation>' + '\n<filename>' + str(FileNum) + '.jpg</filename>\n<size>\n<width>' + str(fx) + '</width>' + '\n' + '<height>' + str(fy) + '</height>\n</size>\n'
text = text + '<object>\n<name>' + LABEL + '</name>\n<bndbox>' + '\n' + '<xmin>' + str(fsx) + '</xmin>' + '\n' + '<ymin>' + str(fsy) + '</ymin>' + '\n' + '<xmax>' + str(esx) + '</xmax>' + '\n' + '<ymax>' + str(esy) + '</ymax>\n</bndbox>\n</object>\n'
text = text + '</annotation>\n'
f = open(filepath2, "w")
f.writelines(text)
f.close()
cw = 1 / CurPicWidth
ch = 1 / CurPicHeight
cnx = (fsx + esx) / 2
cny = (fsy + esy) / 2
cnw = esx - fsx
cnh = esy - fsy
cnx = cnx * cw
cny = cny * ch
cnw = cnw * cw
cnh = cnh * ch
text = ROW + ' ' + str(cnx) + ' ' + str(cny) + ' ' + str(cnw) + ' ' + str(cnh) + '\n'
f = open(filepath3, "w")
f.writelines(text)
f.close()
cv2.rectangle(fPic, (fsx, fsy), (esx, esy), (0, 255, 0), 1)
font_size = 1
font = cv2.FONT_HERSHEY_PLAIN
cv2.putText(fPic, LABEL,(fsx + 2, fsy - 2),font, font_size,(0,255,0),1)
cv2.imshow("MIIL MDC DRAW MODE", fPic)
items.append(str(FileNum))
FileNum += 1
#Lpos = win.ui.listWidget2.count() - 1
#self.ui.listWidget2.setCurrentRow(Lpos)
app.processEvents() #ボタン処理のタイミング確認用
self.ui.listWidget2.addItems(items)
self.ui.listWidget2.setCurrentRow(cIndex)
#-----画像回転保存用関数----------------------------------------
def cutRotatePic(self, degree):
##########
#領域を切り抜き回転させ背景がにコピー後保存する
##########
global FileNum
items = []
rTimes = int(360 / degree)
cIndex = self.ui.listWidget2.currentRow()
for x in SettingList:
ROW, LABEL,TX, TY, BX, BY = x.split(',')
ROW = ROW.replace(" ", "")
LABEL = LABEL.replace(" ", "")
if len(SettingList) == 1:
fy = CurPic.shape[0]
fx = CurPic.shape[1]
copyPic = np.copy(CurPic)
imageArray = np.zeros((fy, fx, 3), np.uint8)
cutPic = copyPic[int(TY):int(BY), int(TX):int(BX)]
imageArray[int(TY):int(BY), int(TX):int(BX)] = cutPic
for i in range(1, rTimes):
deg = i * degree
cx = int(TX) + int((int(BX) - int(TX)) / 2)
cy = int(TY) + int((int(BY) - int(TY)) / 2)
fsx, fsy, esx, esy, _ , _ = getRotatedRectanglePos(deg, cx, cy, int(TX), int(TY), int(BX), int(BY), CurPicWidth, CurPicHeight)
frameR = np.copy(imageArray)
M = cv2.getRotationMatrix2D((cx, cy), deg, 1)
fPic = cv2.warpAffine(frameR,M,(CurPicWidth, CurPicHeight), borderValue=(255, 255, 255))
cv2.imwrite(DirPath + '/' + str(FileNum) + '.jpg', fPic)
filepath = SettingDataDir + '/' + str(FileNum) + '.set'
filepath2 = AnnotationDir + '/' + str(FileNum) + '.xml'
filepath3 = DirPath + '/' + str(FileNum) + '.txt'
text = ROW + ', ' + LABEL + ', ' + str(fsx) + ', ' + str(fsy) + ', ' + str(esx) + ', ' + str(esy) + '\n'
f = open(filepath, "w")
f.writelines(text)
f.close()
text = '<annotation>' + '\n<filename>' + str(FileNum) + '.jpg</filename>\n<size>\n<width>' + str(fx) + '</width>' + '\n' + '<height>' + str(fy) + '</height>\n</size>\n'
text = text + '<object>\n<name>' + LABEL + '</name>\n<bndbox>' + '\n' + '<xmin>' + str(fsx) + '</xmin>' + '\n' + '<ymin>' + str(fsy) + '</ymin>' + '\n' + '<xmax>' + str(esx) + '</xmax>' + '\n' + '<ymax>' + str(esy) + '</ymax>\n</bndbox>\n</object>\n'
text = text + '</annotation>\n'
f = open(filepath2, "w")
f.writelines(text)
f.close()
cw = 1 / CurPicWidth
ch = 1 / CurPicHeight
cnx = (fsx + esx) / 2
cny = (fsy + esy) / 2
cnw = esx - fsx
cnh = esy - fsy
cnx = cnx * cw
cny = cny * ch
cnw = cnw * cw
cnh = cnh * ch
text = ROW + ' ' + str(cnx) + ' ' + str(cny) + ' ' + str(cnw) + ' ' + str(cnh) + '\n'
f = open(filepath3, "w")
f.writelines(text)
f.close()
cv2.rectangle(fPic, (fsx, fsy), (esx, esy), (0, 255, 0), 1)
font_size = 1
font = cv2.FONT_HERSHEY_PLAIN
cv2.putText(fPic, LABEL,(fsx + 2, fsy - 2),font, font_size,(0,255,0),1)
cv2.imshow("MIIL MDC DRAW MODE", fPic)
items.append(str(FileNum))
FileNum += 1
#Lpos = win.ui.listWidget2.count() - 1
#self.ui.listWidget2.setCurrentRow(Lpos)
app.processEvents() #ボタン処理のタイミング確認用
self.ui.listWidget2.addItems(items)
self.ui.listWidget2.setCurrentRow(cIndex)
#-----画像回転保存用関数----------------------------------------
def pasteRotatePic(self, degree, BackgroundList):
##########
#領域を切り抜き回転させ背景がにコピー後保存する
##########
global FileNum
items = []
rTimes = int(360 / degree)
cIndex = self.ui.listWidget2.currentRow()
for x in SettingList:
ROW, LABEL,TX, TY, BX, BY = x.split(',')
ROW = ROW.replace(" ", "")
LABEL = LABEL.replace(" ", "")
if len(SettingList) == 1:
fy = CurPic.shape[0]
fx = CurPic.shape[1]
#copyPic = np.copy(CurPic)
#imageArray = np.zeros((fy, fx, 3), np.uint8)
#cutPic = copyPic[int(TY):int(BY), int(TX):int(BX)]
#imageArray[int(TY):int(BY), int(TX):int(BX)] = cutPic
for i in range(1, rTimes):
deg = i * degree
cx = int(TX) + int((int(BX) - int(TX)) / 2)
cy = int(TY) + int((int(BY) - int(TY)) / 2)
fsx, fsy, esx, esy, _ , _ = getRotatedRectanglePos(deg, cx, cy, int(TX), int(TY), int(BX), int(BY), CurPicWidth, CurPicHeight)
frameR = np.copy(CurPic)
M = cv2.getRotationMatrix2D((cx, cy), deg, 1)
fPic = cv2.warpAffine(frameR,M,(CurPicWidth, CurPicHeight), borderValue=(255, 255, 255))
x1, y1, x2, y2, x3 , y3, x4, y4 = getRotatedPos(deg, cx, cy, int(TX), int(TY), int(BX), int(BY))
maskPic = np.zeros((fy, fx, 3), np.uint8)
# 任意の描画したいポリゴンの頂点を与える
contours = np.array(
[
[x1, y1],
[x2, y2],
[x3, y3],
[x4, y4],
]
)
cv2.fillConvexPoly(maskPic, points = contours, color=(255, 255, 255))
bgPic = cv2.imread(random.choice(BackgroundList))
roi = bgPic[0:fy, 0:fx, :]
rPic = np.where(maskPic==255, fPic, roi)
cv2.imwrite(DirPath + '/' + str(FileNum) + '.jpg', rPic)
filepath = SettingDataDir + '/' + str(FileNum) + '.set'
filepath2 = AnnotationDir + '/' + str(FileNum) + '.xml'
filepath3 = DirPath + '/' + str(FileNum) + '.txt'
text = ROW + ', ' + LABEL + ', ' + str(fsx) + ', ' + str(fsy) + ', ' + str(esx) + ', ' + str(esy) + '\n'
f = open(filepath, "w")
f.writelines(text)
f.close()
text = '<annotation>' + '\n<filename>' + str(FileNum) + '.jpg</filename>\n<size>\n<width>' + str(fx) + '</width>' + '\n' + '<height>' + str(fy) + '</height>\n</size>\n'
text = text + '<object>\n<name>' + LABEL + '</name>\n<bndbox>' + '\n' + '<xmin>' + str(fsx) + '</xmin>' + '\n' + '<ymin>' + str(fsy) + '</ymin>' + '\n' + '<xmax>' + str(esx) + '</xmax>' + '\n' + '<ymax>' + str(esy) + '</ymax>\n</bndbox>\n</object>\n'
text = text + '</annotation>\n'
f = open(filepath2, "w")
f.writelines(text)
f.close()
cw = 1 / CurPicWidth
ch = 1 / CurPicHeight
cnx = (fsx + esx) / 2
cny = (fsy + esy) / 2
cnw = esx - fsx
cnh = esy - fsy
cnx = cnx * cw
cny = cny * ch
cnw = cnw * cw
cnh = cnh * ch
text = ROW + ' ' + str(cnx) + ' ' + str(cny) + ' ' + str(cnw) + ' ' + str(cnh) + '\n'
f = open(filepath3, "w")
f.writelines(text)
f.close()
cv2.rectangle(rPic, (fsx, fsy), (esx, esy), (0, 255, 0), 1)
font_size = 1
font = cv2.FONT_HERSHEY_PLAIN
cv2.putText(rPic, LABEL,(fsx + 2, fsy - 2),font, font_size,(0,255,0),1)
cv2.imshow("MIIL MDC DRAW MODE", rPic)
items.append(str(FileNum))
FileNum += 1
#Lpos = win.ui.listWidget2.count() - 1
#self.ui.listWidget2.setCurrentRow(Lpos)
app.processEvents() #ボタン処理のタイミング確認用
self.ui.listWidget2.addItems(items)
self.ui.listWidget2.setCurrentRow(cIndex)
#-----画像回転保存用関数(リージョン中心)----------------------------------------
def rotatePic(self, degree):
##########
#現在の画像を回転させ保存する
##########
global FileNum
items = []
rTimes = int(360 / degree)
cIndex = self.ui.listWidget2.currentRow()
for x in SettingList:
ROW, LABEL,TX, TY, BX, BY = x.split(',')
ROW = ROW.replace(" ", "")
LABEL = LABEL.replace(" ", "")
if len(SettingList) == 1:
for i in range(1, rTimes):
deg = i * degree
cx = int(TX) + int((int(BX) - int(TX)) / 2)
cy = int(TY) + int((int(BY) - int(TY)) / 2)
fsx, fsy, esx, esy, _ , _ = getRotatedRectanglePos(deg, cx, cy, int(TX), int(TY), int(BX), int(BY), CurPicWidth, CurPicHeight)
frameR = np.copy(CurPic)
M = cv2.getRotationMatrix2D((cx, cy), deg, 1)
print(M)
fPic = cv2.warpAffine(frameR,M,(CurPicWidth, CurPicHeight), borderValue=(255, 255, 255))
cv2.imwrite(DirPath + '/' + str(FileNum) + '.jpg', fPic)
filepath = SettingDataDir + '/' + str(FileNum) + '.set'
filepath2 = AnnotationDir + '/' + str(FileNum) + '.xml'
filepath3 = DirPath + '/' + str(FileNum) + '.txt'
text = ROW + ', ' + LABEL + ', ' + str(fsx) + ', ' + str(fsy) + ', ' + str(esx) + ', ' + str(esy) + '\n'
f = open(filepath, "w")
f.writelines(text)
f.close()
fY = CurPic.shape[0]
fX = CurPic.shape[1]
text = '<annotation>' + '\n<filename>' + str(FileNum) + '.jpg</filename>\n<size>\n<width>' + str(fX) + '</width>' + '\n' + '<height>' + str(fY) + '</height>\n</size>\n'
text = text + '<object>\n<name>' + LABEL + '</name>\n<bndbox>' + '\n' + '<xmin>' + str(fsx) + '</xmin>' + '\n' + '<ymin>' + str(fsy) + '</ymin>' + '\n' + '<xmax>' + str(esx) + '</xmax>' + '\n' + '<ymax>' + str(esy) + '</ymax>\n</bndbox>\n</object>\n'
text = text + '</annotation>\n'
f = open(filepath2, "w")
f.writelines(text)
f.close()
cw = 1 / CurPicWidth
ch = 1 / CurPicHeight
cnx = (fsx + esx) / 2
cny = (fsy + esy) / 2
cnw = esx - fsx
cnh = esy - fsy
cnx = cnx * cw
cny = cny * ch
cnw = cnw * cw
cnh = cnh * ch
text = ROW + ' ' + str(cnx) + ' ' + str(cny) + ' ' + str(cnw) + ' ' + str(cnh) + '\n'
f = open(filepath3, "w")
f.writelines(text)
f.close()
cv2.rectangle(fPic, (fsx, fsy), (esx, esy), (0, 255, 0), 1)
font_size = 1
font = cv2.FONT_HERSHEY_PLAIN
cv2.putText(fPic, LABEL,(fsx + 2, fsy - 2),font, font_size,(0,255,0),1)
cv2.imshow("MIIL MDC DRAW MODE", fPic)
items.append(str(FileNum))
FileNum += 1
#Lpos = win.ui.listWidget2.count() - 1
#self.ui.listWidget2.setCurrentRow(Lpos)
app.processEvents() #ボタン処理のタイミング確認用
self.ui.listWidget2.addItems(items)
self.ui.listWidget2.setCurrentRow(cIndex)
#-----画像回転保存用関数(画像中心)----------------------------------------
def rotatePicPC(self, degree):
##########
#現在の画像を回転させ保存する
##########
global FileNum
items = []
rTimes = int(360 / degree)
cIndex = self.ui.listWidget2.currentRow()
for i in range(1, rTimes):
deg = i * degree
cx = int((CurPicWidth -1) / 2)
cy = int((CurPicHeight -1) / 2)
frameR = np.copy(CurPic)
M = cv2.getRotationMatrix2D((cx, cy), deg, 1)
fPic = cv2.warpAffine(frameR,M,(CurPicWidth, CurPicHeight), borderValue=(255, 255, 255))
cv2.imwrite(DirPath + '/' + str(FileNum) + '.jpg', fPic)
text1 = ""
text2 = ""
text3 = ""
fY = CurPic.shape[0]
fX = CurPic.shape[1]
xmlHeader = '<annotation>' + '\n<filename>' + str(FileNum) + '.jpg</filename>\n<size>\n<width>' + str(fX) + '</width>' + '\n' + '<height>' + str(fY) + '</height>\n</size>\n'
xmlfooter = '</annotation>\n'
if len(SettingList) > 0:
for x in SettingList:
ROW, LABEL,TX, TY, BX, BY = x.split(',')
ROW = ROW.replace(" ", "")
LABEL = LABEL.replace(" ", "")
fsx, fsy, esx, esy, _ , _ = getRotatedRectanglePos(deg, cx, cy, int(TX), int(TY), int(BX), int(BY), CurPicWidth, CurPicHeight)
filepath = SettingDataDir + '/' + str(FileNum) + '.set'
filepath2 = AnnotationDir + '/' + str(FileNum) + '.xml'
filepath3 = DirPath + '/' + str(FileNum) + '.txt'
text1 = text1 + ROW + ', ' + LABEL + ', ' + str(fsx) + ', ' + str(fsy) + ', ' + str(esx) + ', ' + str(esy) + '\n'
text2 = text2 + '<object>\n<name>' + LABEL + '</name>\n<bndbox>' + '\n' + '<xmin>' + str(fsx) + '</xmin>' + '\n' + '<ymin>' + str(fsy) + '</ymin>' + '\n' + '<xmax>' + str(esx) + '</xmax>' + '\n' + '<ymax>' + str(esy) + '</ymax>\n</bndbox>\n</object>\n'
cw = 1 / CurPicWidth
ch = 1 / CurPicHeight
cnx = (fsx + esx) / 2
cny = (fsy + esy) / 2
cnw = esx - fsx
cnh = esy - fsy
cnx = cnx * cw
cny = cny * ch
cnw = cnw * cw
cnh = cnh * ch
text3 = text3 + ROW + ' ' + str(cnx) + ' ' + str(cny) + ' ' + str(cnw) + ' ' + str(cnh) + '\n'
cv2.rectangle(fPic, (fsx, fsy), (esx, esy), (0, 255, 0), 1)
font_size = 1
font = cv2.FONT_HERSHEY_PLAIN
cv2.putText(fPic, LABEL, (fsx + 2, fsy - 2), font, font_size,(0,255,0),1)
f = open(filepath, "w")
f.writelines(text1)
f.close()
text2 = xmlHeader + text2 + xmlfooter
f = open(filepath2, "w")
f.writelines(text2)
f.close()
f = open(filepath3, "w")
f.writelines(text3)
f.close()
else:
filepath3 = DirPath + '/' + str(FileNum) + '.txt'
f = open(filepath3, "w")
f.writelines("")
f.close()
cv2.imshow("MIIL MDC DRAW MODE", fPic)
items.append(str(FileNum))
FileNum += 1
#Lpos = win.ui.listWidget2.count() - 1
#self.ui.listWidget2.setCurrentRow(Lpos)
app.processEvents() #ボタン処理のタイミング確認用
self.ui.listWidget2.addItems(items)
self.ui.listWidget2.setCurrentRow(cIndex)
#-----輝度・色合い変更保存用関数1----------------------------------------
def cngGamma(self, flag):
##########
#現在の画像の輝度と色合いを変えて保存する
##########
lookUpTable = np.empty((1,256), np.uint8) #ガンマ値を使ってLook up tableを作成
currentListIndex = self.ui.listWidget2.currentRow()
if currentListIndex != -1: #listWidget2のアイテムが選択されている場合。
currentListText = self.ui.listWidget2.currentItem().text()
sFile = SettingDataDir + '/' + currentListText +'.set'
xFile = AnnotationDir + '/' + currentListText +'.xml'
tFile = DirPath + '/' + currentListText +'.txt'
CurPicB = np.copy(CurPic) #画像を画像にコピー
if flag == 0 or flag == 2:
CurPicC = np.copy(CurPicB) #画像を画像にコピー
self.saveFile(CurPicC, sFile, xFile, tFile, 1.4)
#CurPicC = np.copy(CurPicB) #画像を画像にコピー
#self.saveFile(CurPicC, sFile, xFile, tFile, 1.2)
#CurPicC = np.copy(CurPicB) #画像を画像にコピー
#self.saveFile(CurPicC, sFile, xFile, tFile, 0.8)
CurPicC = np.copy(CurPicB) #画像を画像にコピー
self.saveFile(CurPicC, sFile, xFile, tFile, 0.6)
if flag == 1 or flag == 2:
b,g,r = cv2.split(CurPicB) #色要素を分割
gamma = 0.8 #ガンマ値を決める
for i in range(256):
lookUpTable[0,i] = np.clip(pow(i / 255.0, gamma) * 255.0, 0, 255)
bL = cv2.LUT(b, lookUpTable) #Look up tableを使って画像の輝度値を変更
gamma = 1.2 #ガンマ値を決める
for i in range(256):
lookUpTable[0,i] = np.clip(pow(i / 255.0, gamma) * 255.0, 0, 255)
bH = cv2.LUT(b, lookUpTable) #Look up tableを使って画像の輝度値を変更
gamma = 0.8 #ガンマ値を決める
for i in range(256):
lookUpTable[0,i] = np.clip(pow(i / 255.0, gamma) * 255.0, 0, 255)
gL = cv2.LUT(g, lookUpTable) #Look up tableを使って画像の輝度値を変更
gamma = 1.2 #ガンマ値を決める
for i in range(256):
lookUpTable[0,i] = np.clip(pow(i / 255.0, gamma) * 255.0, 0, 255)
gH = cv2.LUT(g, lookUpTable) #Look up tableを使って画像の輝度値を変更
#ガンマ値を決める
gamma = 0.8 #ガンマ値を決める
for i in range(256):
lookUpTable[0,i] = np.clip(pow(i / 255.0, gamma) * 255.0, 0, 255)
rL = cv2.LUT(r, lookUpTable) #Look up tableを使って画像の輝度値を変更
gamma = 1.2 #ガンマ値を決める
for i in range(256):
lookUpTable[0,i] = np.clip(pow(i / 255.0, gamma) * 255.0, 0, 255)
rH = cv2.LUT(r, lookUpTable) #Look up tableを使って画像の輝度値を変更
CurPicC = cv2.merge((bL, gL, r)) #色要素を結合
self.saveFile(CurPicC, sFile, xFile, tFile, 0)
CurPicC = cv2.merge((bL, gL, rH)) #色要素を結合
self.saveFile(CurPicC, sFile, xFile, tFile, 0)
CurPicC = cv2.merge((bL, g, rL)) #色要素を結合
self.saveFile(CurPicC, sFile, xFile, tFile, 0)
CurPicC = cv2.merge((bL ,g, r)) #色要素を結合
self.saveFile(CurPicC, sFile, xFile, tFile, 0)
CurPicC = cv2.merge((bL, g, rH)) #色要素を結合
self.saveFile(CurPicC, sFile, xFile, tFile, 0)
CurPicC = cv2.merge((bL, gH, rL)) #色要素を結合
self.saveFile(CurPicC, sFile, xFile, tFile, 0)
CurPicC = cv2.merge((bL, gH, r)) #色要素を結合
self.saveFile(CurPicC, sFile, xFile, tFile, 0)
CurPicC = cv2.merge((bL, gH, rH)) #色要素を結合
self.saveFile(CurPicC, sFile, xFile, tFile, 0)
CurPicC = cv2.merge((b, gL, rL)) #色要素を結合
self.saveFile(CurPicC, sFile, xFile, tFile, 0)
CurPicC = cv2.merge((b, gL, r)) #色要素を結合
self.saveFile(CurPicC, sFile, xFile, tFile, 0)
CurPicC = cv2.merge((b, gL, rH)) #色要素を結合
self.saveFile(CurPicC, sFile, xFile, tFile, 0)
CurPicC = cv2.merge((b, g, rL)) #色要素を結合
self.saveFile(CurPicC, sFile, xFile, tFile, 0)
CurPicC = cv2.merge((b, g, rH)) #色要素を結合
self.saveFile(CurPicC, sFile, xFile, tFile, 0)
CurPicC = cv2.merge((b, gH, rL)) #色要素を結合
self.saveFile(CurPicC, sFile, xFile, tFile, 0)
CurPicC = cv2.merge((b, gH, r)) #色要素を結合
self.saveFile(CurPicC, sFile, xFile, tFile, 0)
CurPicC = cv2.merge((b, gH, rH)) #色要素を結合
self.saveFile(CurPicC, sFile, xFile, tFile, 0)
CurPicC = cv2.merge((bH, gL, rL)) #色要素を結合
self.saveFile(CurPicC, sFile, xFile, tFile, 0)
CurPicC = cv2.merge((bH, gL, r)) #色要素を結合
self.saveFile(CurPicC, sFile, xFile, tFile, 0)
CurPicC = cv2.merge((bH, gL, rH)) #色要素を結合
self.saveFile(CurPicC, sFile, xFile, tFile, 0)
CurPicC = cv2.merge((bH, g, rL)) #色要素を結合
self.saveFile(CurPicC, sFile, xFile, tFile, 0)
CurPicC = cv2.merge((bH, g, r)) #色要素を結合
self.saveFile(CurPicC, sFile, xFile, tFile, 0)
CurPicC = cv2.merge((bH, g, rH)) #色要素を結合
self.saveFile(CurPicC, sFile, xFile, tFile, 0)
CurPicC = cv2.merge((bH, gH, rL)) #色要素を結合
self.saveFile(CurPicC, sFile, xFile, tFile, 0)
CurPicC = cv2.merge((bH, gH, r)) #色要素を結合
self.saveFile(CurPicC, sFile, xFile, tFile, 0)
self.ui.listWidget2.setCurrentRow(currentListIndex)
#-----輝度・色合い変更保存用関数2----------------------------------------
def saveFile(self, CurPicC, sFile, xFile, tFile, gamma):
global FileNum
if gamma > 0:
lookUpTable = | np.empty((1,256), np.uint8) | numpy.empty |
import numpy as np
import cv2
import dlib
import math
import imutils
def warp_affine(image, points, scale=1.0):
dis1,dis2 = getDis(points[2][0],points[2][1],points[0][0],points[0][1],points[1][0],points[1][1])
eye_center = ((points[0][0] + points[1][0]) / 2, (points[0][1] + points[1][1]) / 2)
dy = points[1][1] - points[0][1]
dx = points[1][0] - points[0][0]
center=(points[2][0],points[2][1])
# 计算旋转角度
angle = cv2.fastAtan2(dy, dx) #获取旋转角度angle = cv2.fastAtan2((y2 - y1), (x2 - x1))
print("angle:",angle)
rot = cv2.getRotationMatrix2D(center, angle, scale=scale) # 获取旋转矩阵
rot_img = cv2.warpAffine(image, rot, dsize=(image.shape[1], image.shape[0]))
delta_width = dis2*1
delta_height1 = dis1*3
delta_height2 = dis1*2
x1 = max(round(center[0]-delta_width),0)
y1 = max(round(center[1]-delta_height1),0)
x2 = min(x1+round(delta_width*2),rot_img.shape[1])
y2 = min(round(y1+delta_height1+delta_height2),rot_img.shape[0])
return rot_img,(x1,y1,x2,y2)
def detect(image):
detector = dlib.get_frontal_face_detector()
# 取灰度
img_gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
# 人脸数rects
rects = detector(img_gray, 0)
return rects
def landmark(image,rects):
predictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')
landmarksList=[]
for i in range(len(rects)):
landmarks = np.matrix([[p.x, p.y] for p in predictor(image,rects[i]).parts()])
landmarksList.append(landmarks)
return landmarksList
def vis_landmark(landmarksList,image):
img = image.copy()
for idx, point in enumerate(landmarks):
# 68点的坐标
pos = (point[0, 0], point[0, 1])
print(idx,pos)
# 利用cv2.circle给每个特征点画一个圈,共68个
cv2.circle(img, pos, 5, color=(0, 255, 0))
# 利用cv2.putText输出1-68
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img, str(idx+1), pos, font, 0.8, (0, 0, 255), 1,cv2.LINE_AA)
return img
def dlib_points(landmark):
eye_left=np.mean(np.asarray(landmark[36:42]), axis=0)
eye_right=np.mean(np.asarray(landmark[42:48]), axis=0)
nose_tip = np.asarray(landmark[30])
nose_tip = np.squeeze(nose_tip)
points=[]
points.append([eye_left[0],eye_left[1]])
points.append([eye_right[0],eye_right[1]])
points.append([nose_tip[0],nose_tip[1]])
return points
def getDis(pointX,pointY,lineX1,lineY1,lineX2,lineY2):
a=lineY2-lineY1
b=lineX1-lineX2
c=lineX2*lineY1-lineX1*lineY2
dis1=(math.fabs(a*pointX+b*pointY+c))/(math.pow(a*a+b*b,0.5))
dis2=math.sqrt(a*a+b*b)
return dis1,dis2
def crop_face(image, points):
dis1,dis2 = getDis(points[2][0],points[2][1],points[0][0],points[0][1],points[1][0],points[1][1])
dy = points[1][1] - points[0][1]
dx = points[1][0] - points[0][0]
center=(points[2][0],points[2][1])
print("center:",center)
cv2.circle(image,center,radius =3,
color = (0,0,255), thickness = 2)
print(dis1,dis2)
# 计算旋转角度
angle = cv2.fastAtan2(dy, dx) #获取旋转角度angle = cv2.fastAtan2((y2 - y1), (x2 - x1))
delta_width = dis2*1
delta_height1 = dis1*3
delta_height2 = dis1*2
x1 = max(round(center[0]-delta_width),0)
y1 = max(round(center[1]-delta_height1),0)
x2 = min(x1+round(delta_width*2),image.shape[1])
y2 = min(round(y1+delta_height1+delta_height2),image.shape[0])
polygon = np.array([(x1,y1),
(x2,y1),
(x2,y2),
(x1,y2),],np.int32)
print("polygon:",polygon)
#cv2.circle(image,(int(center[0]-delta_width),int(center[1]-delta_height)),radius =3,
# color = (0,0,255), thickness = 2)
#cv2.circle(image,(int(center[0]+delta_width),int(center[1]+delta_height)),radius =3,
# color = (0,0,255), thickness = 2)
# magic that makes sense if one understands numpy arrays
poly = | np.reshape(polygon,(4,1,2)) | numpy.reshape |
import os
import re
import glob
import numpy as np
import matplotlib.pylab as plt
import matplotlib
from scipy.spatial import ConvexHull
from scipy.interpolate import interp1d
from itertools import chain, count
from collections import defaultdict
from os import makedirs
from os.path import isdir, isfile, join
from plot_util import *
from plot_other import *
# ------------------------------------------------------------------------------
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
method_labels_map = {
'FH': 'FH',
'FH_Minus': 'FH$^-$',
'NH': 'NH',
'FH_wo_S': 'FH-wo-S',
'FH_Minus_wo_S': 'FH$^{-}$-wo-S',
'NH_wo_S': 'NH-wo-S',
'EH': 'EH',
'Orig_EH': 'EH',
'BH': 'BH',
'Orig_BH': 'BH',
'MH': 'MH',
'Orig_MH': 'MH',
'Random_Scan': 'Random-Scan',
'Sorted_Scan': 'Sorted-Scan',
'Linear': 'Linear-Scan'
}
dataset_labels_map = {
'Yelp': 'Yelp',
'Music': 'Music-100',
'GloVe100': 'GloVe',
'Tiny1M': 'Tiny-1M',
'Msong': 'Msong',
'MovieLens150': 'MovieLens',
'Netflix300': 'Netflix',
'Yahoo300': 'Yahoo',
'Mnist': 'Mnist',
'Sift': 'Sift',
'Gaussian': 'Gaussian',
'Gist': 'Gist',
}
# datasets = ['Yelp', 'GloVe100']
datasets = ['Yelp', 'Music', 'GloVe100', 'Tiny1M', 'Msong']
dataset_labels = [dataset_labels_map[dataset] for dataset in datasets]
method_colors = ['red', 'blue', 'green', 'purple', 'deepskyblue', 'darkorange',
'olive', 'deeppink', 'dodgerblue', 'dimgray']
method_markers = ['o', '^', 's', 'd', '*', 'p', 'x', 'v', 'D', '>']
# ------------------------------------------------------------------------------
def calc_width_and_height(n_datasets, n_rows):
'''
calc the width and height of figure
:params n_datasets: number of dataset (integer)
:params n_rows: number of rows (integer)
:returns: width and height of figure
'''
fig_width = 0.55 + 3.333 * n_datasets
fig_height = 0.80 + 2.5 * n_rows
return fig_width, fig_height
# ------------------------------------------------------------------------------
def get_filename(input_folder, dataset_name, method_name):
'''
get the file prefix 'dataset_method'
:params input_folder: input folder (string)
:params dataset_name: name of dataset (string)
:params method_name: name of method (string)
:returns: file prefix (string)
'''
name = '%s%s_%s.out' % (input_folder, dataset_name, method_name)
return name
# ------------------------------------------------------------------------------
def parse_res(filename, chosen_top_k):
'''
parse result and get info such as ratio, qtime, recall, index_size,
chosen_k, and the setting of different methods
BH: m=2, l=8, b=0.90
Indexing Time: 2.708386 Seconds
Estimated Memory: 347.581116 MB
cand=10000
1 5.948251 2.960960 0.000000 0.000000 0.844941
5 4.475743 2.954690 0.400000 0.000200 0.845279
10 3.891794 2.953910 0.900000 0.000899 0.845703
20 3.289422 2.963460 0.950000 0.001896 0.846547
50 2.642880 2.985980 0.900000 0.004478 0.849082
100 2.244649 3.012860 0.800000 0.007922 0.853307
cand=50000
1 3.905541 14.901140 6.000000 0.000120 4.222926
5 2.863510 14.905370 4.800000 0.000480 4.223249
10 2.626913 14.910181 5.300000 0.001061 4.223649
20 2.392440 14.913270 4.850000 0.001941 4.224458
50 2.081206 14.931760 4.560000 0.004558 4.227065
100 1.852284 14.964050 4.500000 0.008987 4.231267
'''
setting_pattern = re.compile(r'\S+\s+.*=.*')
setting_m = re.compile(r'.*(m)=(\d+).*')
setting_l = re.compile(r'.*(l)=(\d+).*')
setting_M = re.compile(r'.*(M)=(\d+).*')
setting_s = re.compile(r'.*(s)=(\d+).*')
setting_b = re.compile(r'.*(b)=(\d+\.\d+).*')
param_settings = [setting_m, setting_l, setting_M, setting_s, setting_b]
index_time_pattern = re.compile(r'Indexing Time: (\d+\.\d+).*')
memory_usage_pattern = re.compile(r'Estimated Memory: (\d+\.\d+).*')
candidate_pattern = re.compile(r'.*cand=(\d+).*')
records_pattern = re.compile(r'(\d+)\s*(\d+\.\d+)\s*(\d+\.\d+)\s*(\d+\.\d+)\s*(\d+\.\d+)\s*(\d+\.\d+)')
params = {}
with open(filename, 'r') as f:
for line in f:
res = setting_pattern.match(line)
if res:
for param_setting in param_settings:
tmp_res = param_setting.match(line)
if tmp_res is not None:
# print(tmp_res.groups())
params[tmp_res.group(1)] = tmp_res.group(2)
# print("setting=", line)
res = index_time_pattern.match(line)
if res:
chosen_k = float(res.group(1))
# print('chosen_k=', chosen_k)
res = memory_usage_pattern.match(line)
if res:
memory_usage = float(res.group(1))
# print('memory_usage=', memory_usage)
res = candidate_pattern.match(line)
if res:
cand = int(res.group(1))
# print('cand=', cand)
res = records_pattern.match(line)
if res:
top_k = int(res.group(1))
ratio = float(res.group(2))
qtime = float(res.group(3))
recall = float(res.group(4))
precision = float(res.group(5))
fraction = float(res.group(6))
# print(top_k, ratio, qtime, recall, precision, fraction)
if top_k == chosen_top_k:
yield ((cand, params), (top_k, chosen_k, memory_usage,
ratio, qtime, recall, precision, fraction))
# ------------------------------------------------------------------------------
def getindexingtime(res):
return res[1]
def getindexsize(res):
return res[2]
def getratio(res):
return res[3]
def gettime(res):
return res[4]
def getrecall(res):
return res[5]
def getprecision(res):
return res[6]
def getfraction(res):
return res[7]
def get_cand(res):
return int(res[0][0])
def get_l(res):
return int(res[0][1]['l'])
def get_m(res):
return int(res[0][1]['m'])
def get_s(res):
return int(res[0][1]['s'])
def get_time(res):
return float(res[1][4])
def get_recall(res):
return float(res[1][5])
def get_precision(res):
return float(res[1][6])
def get_fraction(res):
return float(res[1][7])
# ------------------------------------------------------------------------------
def lower_bound_curve(xys):
'''
get the time-recall curve by convex hull and interpolation
:params xys: 2-dim array (np.array)
:returns: time-recall curve with interpolation
'''
# add noise and conduct convex hull to find the curve
eps = np.random.normal(size=xys.shape) * 1e-6
xys += eps
# print(xys)
hull = ConvexHull(xys)
hull_vs = xys[hull.vertices]
# hull_vs = np.array(sorted(hull_vs, key=lambda x:x[1]))
# print("hull_vs: ", hull_vs)
# find max pair (maxv0) and min pairs (v1s) from the convex hull
v1s = []
maxv0 = [-1, -1]
for v0, v1 in zip(hull_vs, chain(hull_vs[1:], hull_vs[:1])):
# print(v0, v1)
if v0[1] > v1[1] and v0[0] > v1[0]:
v1s = np.append(v1s, v1, axis=-1)
if v0[1] > maxv0[1]:
maxv0 = v0
# print(v1s, maxv0)
# interpolation: vs[:, 1] -> recall (x), vs[:, 0] -> time (y)
vs = np.array( | np.append(maxv0, v1s) | numpy.append |
###########################################################
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import scikitplot as skplt
import pandas as pd
from sklearn.metrics import classification_report
#Util functions
def Extract(lst):
return [item[0] for item in lst]
fontdict = {'fontsize': 9, 'fontweight': 'medium'}
def training_roc(roc):
roc = roc.toPandas()
plt.plot(roc['FPR'], roc['TPR'])
plt.ylabel('False Positive Rate', fontdict=fontdict, color='black')
plt.xlabel('True Positive Rate', fontdict=fontdict, color='black')
plt.title('Training - ROC Curve', fontsize=10, weight='bold', color='steelblue')
plt.plot([0, 1], [0, 1], 'k--')
plt.xticks([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0], fontsize=8, weight='bold', color='darkslategrey')
plt.yticks([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0], fontsize=8, weight='bold', color='darkslategrey')
plt.show()
def training_pr(pr):
pr = pr.toPandas()
plt.plot(pr['recall'], pr['precision'])
plt.ylabel('Precision', fontdict=fontdict, color='black')
plt.xlabel('Recall', fontdict=fontdict, color='black')
plt.title('Training - PR Curve', fontsize=10, weight='bold', color='steelblue')
plt.plot([0, 1], [0, 1], 'r--')
plt.xticks([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0], fontsize=8, weight='bold', color='darkslategrey')
plt.yticks([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0], fontsize=8, weight='bold', color='darkslategrey')
plt.show()
def plot_cm(cm):
fig_len = 1 + cm.shape[0]
fig_wid = round(fig_len * 0.75)
fig, ax = plt.subplots(figsize=(fig_len, fig_wid))
sns.set(font_scale=0.75)
cm_sum = np.sum(cm, axis=1, keepdims=True)
cm_perc = cm / cm_sum.astype(float) * 100
annot = | np.empty_like(cm) | numpy.empty_like |
#!/usr/bin/env python
import roslib
roslib.load_manifest('crazyflie_control')
import rospy
import sys
from geometry_msgs.msg import Vector3
from nav_msgs.msg import Odometry
from crazyflie_driver.msg import RPYT
import dynamic_reconfigure.server
from crazyflie_control.cfg import CrazyflieControlConfig
from math import *
import numpy as np
class CrazyflieControlNode(object):
mass = 1.0
gravity = 9.801
kpz = 1.0
kdz = 1.0
kpx = 1.0
kpy = 1.0
kdx = 1.0
kdy = 1.0
xd = 0.0
yd = 0.0
zd = 0.0
xp = 0.0
yp = 0.0
zp = 0.0
x = 0.0
y = 0.0
z = 0.0
q0 = 1.0
q1 = 0.0
q2 = 0.0
q3 = 0.0
last_odometry_update = rospy.Time()
def __init__(self, default_name='apollo', default_update_rate=100):
self.default_name = default_name
self.default_update_rate = default_update_rate
rospy.init_node('crazyflie_control')
self._init_params()
self._init_pubsub()
dynamic_reconfigure.server.Server(CrazyflieControlConfig, self.reconfigure)
self.last_odometry_update = rospy.get_rostime()
def _init_params(self):
self.name = rospy.get_param('~name', self.default_name)
self.update_rate = rospy.get_param('~update_rate', self.default_update_rate)
def _init_pubsub(self):
self.vicon_sub = rospy.Subscriber('/' + self.name + '/odom', Odometry, self.set_odometry)
self.rotation_desired_pub = rospy.Publisher('/' + self.name + '/rotation_desired', RPYT)
self.rotation_actual_pub = rospy.Publisher('/' + self.name + '/rotation_actual', Vector3)
def set_odometry(self, msg):
now = rospy.get_rostime()
dt = self.last_odometry_update - now
x_old = self.x
y_old = self.y
z_old = self.z
self.x = msg.pose.pose.position.x * 0.001
self.y = msg.pose.pose.position.y * 0.001
self.z = msg.pose.pose.position.z * 0.001
self.q1 = msg.pose.pose.orientation.x
self.q2 = msg.pose.pose.orientation.y
self.q3 = msg.pose.pose.orientation.z
self.q0 = msg.pose.pose.orientation.w
self.xd = (2.0/dt.to_sec())*(self.x - x_old) - self.xd
self.yd = (2.0/dt.to_sec())*(self.y - y_old) - self.yd
self.zd = (2.0/dt.to_sec())*(self.z - z_old) - self.zd
self.last_odometry_update = now
def reconfigure(self, config, level):
self.kpx = config['kpx']
self.kpy = config['kpy']
self.kpz = config['kpz']
self.kdx = config['kdx']
self.kdy = config['kdy']
self.kdz = config['kdz']
self.xd = config['xd']
self.yd = config['yd']
self.zd = config['zd']
self.power = config['power']
return config
def spin(self):
rospy.loginfo("Spinning")
r = rospy.Rate(self.update_rate)
while not rospy.is_shutdown():
gx = 2 * (self.q1*self.q3 - self.q0*self.q2);
gy = 2 * (self.q0*self.q1 + self.q2*self.q3);
gz = self.q0*self.q0 - self.q1*self.q1 - self.q2*self.q2 + self.q3*self.q3;
yaw = atan2(2*self.q1*self.q2 - 2*self.q0*self.q3, 2*self.q0*self.q0 + 2*self.q1*self.q1 - 1) * 180 /pi;
pitch = atan(gx / sqrt(gy*gy + gz*gz)) * 180 / pi;
roll = atan(gy / sqrt(gx*gx + gz*gz)) * 180 / pi;
msg_actual = Vector3()
msg_actual.x = roll
msg_actual.y = pitch
msg_actual.z = yaw
self.rotation_actual_pub.publish(msg_actual)
R = [ [0]*3 ]*3
R[0][0] = pow(self.q0,2) + pow(self.q1,2) - pow(self.q2,2) - pow(self.q3,2)
R[0][1] = 2*self.q0*self.q1 - 2*self.q0*self.q3
R[0][2] = 2*self.q1*self.q3 + 2*self.q0*self.q2
R[1][0] = 2*self.q0*self.q1 + 2*self.q0*self.q3
R[1][1] = pow(self.q0,2) - pow(self.q1,2) + pow(self.q2,2) - pow(self.q3,2)
R[1][2] = 2*self.q2*self.q3 - 2*self.q0*self.q1
R[2][0] = 2*self.q1*self.q3 - 2*self.q0*self.q2
R[2][1] = 2*self.q2*self.q3 + 2*self.q0*self.q1
R[2][2] = pow(self.q0,2) - pow(self.q1,2) - pow(self.q2,2) + pow(self.q3,2)
r_matrix = np.matrix(R)
# This is the thrust, should be also placed in the function below...
f = self.mass / R[2][2] * ( self.gravity - self.kpz*(self.z-self.zd) - self.kdz*self.zp )
r13d = self.mass / f * ( -self.kpx*(self.x-self.xd) - self.kdx*self.xp )
r23d = self.mass / f * ( -self.kpy*(self.y-self.yd) - self.kdy*self.yp )
r33d = sqrt(1-pow(r13d,2)-pow(r23d,2))
v = [0]*3
v[0] = -r23d
v[1] = r13d
v[2] = 0.0
angle = acos(r33d)
ca = cos(angle)
sa = sin(angle)
A = [ [0]*3 ]*3
A[0][0] = ca + pow(v[0],2)*(1-ca)
A[0][1] = v[0]*v[1]*(1-ca) - v[2]*sa
A[0][2] = v[0]*v[2]*(1-ca) + v[1]*sa
A[1][0] = v[0]*v[1]*(1-ca) + v[2]*sa
A[1][1] = ca + pow(v[1],2)*(1-ca)
A[1][2] = v[1]*v[2]*(1-ca) - v[0]*sa
A[2][0] = v[0]*v[2]*(1-ca) + v[1]*sa
A[2][1] = v[1]*v[2]*(1-ca) + v[0]*sa
A[2][2] = ca + pow(v[2],2)*(1-ca)
a_matrix = np.matrix(A)
rd = [0]*3
rd[0] = r13d
rd[1] = r23d
rd[2] = r33d
rd_matrix = np.matrix(rd)
gd = | np.transpose(r_matrix) | numpy.transpose |
from math import radians, degrees, sin, cos, tan, asin, acos, atan2
import numpy as np
import matplotlib.pyplot as plt
#Define Latitude in radians
lat=radians(49.3978620896919)
#Define hoirzontal limit in altitude in degree
horizon_limit=12
def equ_to_altaz(ha,dec):
""" Transforms equatorial coordinates (hourangle, declination)
to horizontal coordinates (azimuth,altitude).
Input: ha in hours as float, dec in degree as float.
Returns altitude and azimuth as float in degrees.
"""
#Check if Input arrays have same dimensions
if not np.isscalar(ha) and not np.isscalar(dec):
if (len(ha)!=len(dec) or ha.ndim!=1 or dec.ndim!=1):
return 0
#Convert hour angle to radians
#Convert hour angle to degree first and convert negative hour angles to
#positive ones (e.g. -11 to 13)
ha=ha+24*(ha<0)
ha=np.radians(ha*15.)
#Convert declination to radians
dec=np.radians(dec)
#Calculate altitude and azimuth (formulaes from celestial mechanics script
#of <NAME>)
#For altitudwe have the formula:
#sin(alt)=cos(ha)*cos(lat)*cos(dec)+sin(lat)*sin(dec))
alt=np.arcsin(np.sin(lat)*np.sin(dec)+np.cos(lat)*np.cos(dec)*np.cos(ha))
#For azimuth we have the formula
#tan(az)=-sin(ha)/(cos(lat)*tan(dec)-sin(lat)*cos(ha))
az=np.arctan2(np.sin(ha),(-np.cos(lat)*np.tan(dec)+np.sin(lat)*np.cos(ha)))
#Convert alt and az to degrees
alt=np.degrees(alt)
az=np.degrees(az)
#If Input was an array longer than 1 return the float arrays
if not np.isscalar(alt):
return (alt,az)
#If Input was single values than also format the Output
#In that case transform arrays to float
alt=float(alt)
az=float(az)
formated_coord_list=[]
#Also Format alt/az to +dd°mm'ss" as string
#Get the sign of ha_float
for coord in [alt,az]:
if coord>=0:
sign='+'
elif coord<0:
sign='-'
#Calculate the absolute of coord to convert it to hh mm ss
coord=abs(coord)
#Format hour angle to hh:mm:ss
deg=int(coord)
rest=abs(coord-deg)*60
minutes=int(rest)
rest=abs(rest-minutes)*60
#We want to round seconds to get a more continous updating of seconds
seconds=round(rest)
#But we have to take care of rounding up to 60. Increase minutes by one in that case.
if seconds==60:
seconds=0
minutes=minutes+1
coord='''{}{:02}°{:02}'{:02}"'''.format(sign,deg,minutes,seconds)
formated_coord_list.append(coord)
#Return altitude and azimuth
return (alt,az,formated_coord_list[0],formated_coord_list[1])
def altaz_to_equ(alt,az):
""" Transforms horizontal coordinates (azimuth,altitude).
to equatorial coordinates (hourangle, declination).
Input: alt in degrees as float or array of floats,
az in degrees as float or array of floats.
Returns ha as float in hours and dec as float in degrees.
"""
#Convert alt and az to radians
alt=np.radians(alt)
az=np.radians(az)
#Calculate hour angle and declination (formulaes from celestial mechanics script
#of Genevieve Parmentier)
#For hour angle we have the formula:
#tan(ha)=(sin(az))/(cos(lat)*tan(alt)+cos(az)*sin(lat))
ha=np.arctan2(np.sin(az),np.cos(lat)*np.tan(alt)+np.cos(az)*np.sin(lat))
#For declination we have the formula:
#sin(dec)=sin(lat)*sin(alt)-cos(lat)*cos(alt)*cos(az)
dec=np.arcsin(np.sin(lat)*np.sin(alt)-np.cos(lat)*np.cos(alt)*np.cos(az))
#Convert ha to hours
ha=np.degrees(ha)/15.
#Convert dec to degrees
dec=np.degrees(dec)
return (ha, dec)
def check_coordinates(alt,az):
"""Checks if coordinates are observable and safe to slew.
Returns True if coordinates do not reach limits.
Returns False if coordinates are in limits.
"""
#Check if alt and az are set as floats
if (not isinstance(alt, (int, float))
or not isinstance(az, (int, float))):
return False
#Calculate altitude limit
alt_limit=calc_alt_limit(az)
#Check if altitude is above or below limit
if alt>=alt_limit:
return True
else:
return False
def calc_alt_limit(az):
""" Calculates altitude limits.
Returns Altitude limit in degrees.
Input: Array of az as floats between -180 and 180
"""
#Check Input: If int or float make array
if isinstance(az,(float,int)):
az= | np.array([az]) | numpy.array |
import imageio
import numpy as np
import cv2
from PIL import Image
from scipy.spatial import distance as dist
import pyautogui
import time
import os
pyautogui.PAUSE = 0.1
DIFFICULTIES = ['medium']
CELL_DEF = {
"medium": (14,18)
}
# Background Color Definitions
BACKGROUND_COLORS = [(229,193,161), (215,183,155), (136,174,70), (171,213,94), (163, 207, 86)]
UNCLICKABLE = [(229,193,161), (215,183,155)]
CLICKABLE = [(171,213,94), (163, 207, 86)]
# Cell grid number color definitions
NUMBER_COLORS = [(27,121,206), (63,142,69), (210,51,54), (134,54,158), (254,146,0), (14,152,166)]
class Cell(object):
def __init__(self, value, left, top, width,height):
self.value = value
self.left = int(left)
self.top = int(top)
self.width = int(width)
self.height = int(height)
self.mouse_center = (left+width/2, top+height/2)
class SweeperGrid(object):
def __init__(self, difficulty='medium'):
if difficulty not in DIFFICULTIES:
raise Exception("Only {} difficulties supported. You passed: {}".format(DIFFICULTIES, difficulty))
medium_grid = cv2.imread('resources/medium_grid.png', cv2.IMREAD_GRAYSCALE)
# Visualization Params
self.screen_vis = None
# Locate the grid and save where it is
(self.x_min, self.y_min),(self.x_max, self.y_max) = self.getGridPosition(medium_grid)
self.grid_w, self.grid_h = (self.x_max-self.x_min, self.y_max-self.y_min)
# Compute and initialze each grid cell
self.rows, self.cols = CELL_DEF[difficulty]
self.cell_w, self.cell_h = (self.grid_w/self.cols, self.grid_h/self.rows)
x_grid = np.linspace(0, self.grid_w, num = self.cols, endpoint=False)
y_grid = np.linspace(0, self.grid_h, num = self.rows, endpoint=False)
self.cells = [[Cell(-1, self.x_min+x, self.y_min+y, self.cell_w, self.cell_h) for x in x_grid] for y in y_grid]
def getGridPosition(self, grid_template):
screen = np.asarray(imageio.imread('<screen>'))
self.screen_vis = screen
screen = cv2.cvtColor(screen, cv2.COLOR_BGR2GRAY)
(tH, tW) = grid_template.shape[:2]
found = None
for scale in np.linspace(0.2, 1.0, 100)[::-1]:
resized = np.array(Image.fromarray(screen).resize( (int(screen.shape[1] * scale), int(screen.shape[0] * scale)) ))
r = screen.shape[0] / float(resized.shape[0])
if resized.shape[0] < tH or resized.shape[1] < tW:
break
result = cv2.matchTemplate(resized, grid_template, cv2.TM_CCOEFF_NORMED)
(_, maxVal, _, maxLoc) = cv2.minMaxLoc(result)
if found is None or maxVal > found[0]:
found = (maxVal, maxLoc, r)
if maxVal > 0.99:
break
(maxVal, maxLoc, r) = found
if maxVal < 0.9:
raise Exception("Unable to find a suitable playing grid")
(startX, startY) = (int(maxLoc[0] * r), int(maxLoc[1] * r))
(endX, endY) = (int((maxLoc[0] + tW) * r), int((maxLoc[1] + tH) * r))
return (startX, startY), (endX, endY)
def updateGrid(self):
screen = np.asarray(imageio.imread('<screen>'))
self.screen_vis = screen
cells = []
for row in self.cells:
for col in row:
cells.append(screen[col.top:col.top+col.height, col.left: col.left+col.width].copy())
cells = np.stack(cells)
cell_masks = self._getMaskedCells(cells, BACKGROUND_COLORS)
for i, (cell, mask) in enumerate(zip(cells, cell_masks)):
row, col = (int(i/self.cols), int(i%self.cols))
mean = cv2.mean(cell, mask = mask.astype('uint8'))[:3]
if np.sum(mean) > 50:
minDist = self._getClosestColor(mean, NUMBER_COLORS)
self.cells[row][col].value = minDist[1] + 1
else:
mean = cv2.mean(cell)[:3]
minDist = self._getClosestColor(mean, CLICKABLE + UNCLICKABLE)
if minDist[1]<=1:
self.cells[row][col].value = -1
else:
self.cells[row][col].value = -2
def updateMines(self, mine_locations):
for mines in mine_locations:
self.cells[mines[0]][mines[1]].value -3
def _getMaskedCells(self, cells, background_colors):
final_mask = np.zeros(cells.shape[:-1])
for color in background_colors:
final_mask = np.logical_or( | np.all(cells == color, axis=-1) | numpy.all |
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from scipy.constants import golden
mpl.rc("text", usetex=True)
mpl.rc("font", family="serif")
x = np.array([-1, -0.8, -0.6, -0.4, -0.2, 0, 0.2, 0.4, 0.6, 0.8, 1])
t = np.array([-4.9, -3.5, -2.8, 0.8, 0.3, -1.6, -1.3, 0.5, 2.1, 2.9, 5.6])
def f(x):
return 3*np.sin((1/2)*np.pi * x) - 2*np.sin((3/2) * np.pi * x)
Ms = [2, 4, 6, 8]
fig = plt.figure(figsize=(8, 8/golden))
for i, M in enumerate(Ms):
N = len(x)
X = np.zeros((N, M+1))
for m in range(M+1):
X[:, m] = x**m
w = np.linalg.inv(X.T @ X) @ X.T @ t
h = np.poly1d( | np.flip(w, 0) | numpy.flip |
#!/usr/bin/env python
# coding: utf-8
# # Parts-of-Speech Tagging - Working with tags and Numpy
# In this lecture notebook you will create a matrix using some tag information and then modify it using different approaches.
# This will serve as hands-on experience working with Numpy and as an introduction to some elements used for POS tagging.
# In[1]:
import numpy as np
import pandas as pd
# ### Some information on tags
# For this notebook you will be using a toy example including only three tags (or states). In a real world application there are many more tags which can be found [here](https://www.ling.upenn.edu/courses/Fall_2003/ling001/penn_treebank_pos.html).
# In[2]:
# Define tags for Adverb, Noun and To (the preposition) , respectively
tags = ['RB', 'NN', 'TO']
# In this week's assignment you will construct some dictionaries that provide useful information of the tags and words you will be working with.
#
# One of these dictionaries is the `transition_counts` which counts the number of times a particular tag happened next to another. The keys of this dictionary have the form `(previous_tag, tag)` and the values are the frequency of occurrences.
#
# Another one is the `emission_counts` dictionary which will count the number of times a particular pair of `(tag, word)` appeared in the training dataset.
#
# In general think of `transition` when working with tags only and of `emission` when working with tags and words.
#
# In this notebook you will be looking at the first one:
# In[3]:
# Define 'transition_counts' dictionary
# Note: values are the same as the ones in the assignment
transition_counts = {
('NN', 'NN'): 16241,
('RB', 'RB'): 2263,
('TO', 'TO'): 2,
('NN', 'TO'): 5256,
('RB', 'TO'): 855,
('TO', 'NN'): 734,
('NN', 'RB'): 2431,
('RB', 'NN'): 358,
('TO', 'RB'): 200
}
# Notice that there are 9 combinations of the 3 tags used. Each tag can appear after the same tag so you should include those as well.
#
# ### Using Numpy for matrix creation
#
# Now you will create a matrix that includes these frequencies using Numpy arrays:
# In[4]:
# Store the number of tags in the 'num_tags' variable
num_tags = len(tags)
# Initialize a 3X3 numpy array with zeros
transition_matrix = np.zeros((num_tags, num_tags))
# Print matrix
transition_matrix
# Visually you can see the matrix has the correct dimensions. Don't forget you can check this too using the `shape` attribute:
# In[5]:
# Print shape of the matrix
transition_matrix.shape
# Before filling this matrix with the values of the `transition_counts` dictionary you should sort the tags so that their placement in the matrix is consistent:
# In[6]:
# Create sorted version of the tag's list
sorted_tags = sorted(tags)
# Print sorted list
sorted_tags
# To fill this matrix with the correct values you can use a `double for loop`. You could also use `itertools.product` to one line this double loop:
# In[7]:
# Loop rows
for i in range(num_tags):
# Loop columns
for j in range(num_tags):
# Define tag pair
tag_tuple = (sorted_tags[i], sorted_tags[j])
# Get frequency from transition_counts dict and assign to (i, j) position in the matrix
transition_matrix[i, j] = transition_counts.get(tag_tuple)
# Print matrix
transition_matrix
# Looks like this worked fine. However the matrix can be hard to read as `Numpy` is more about efficiency, rather than presenting values in a pretty format.
#
# For this you can use a `Pandas DataFrame`. In particular, a function that takes the matrix as input and prints out a pretty version of it will be very useful:
# In[8]:
# Define 'print_matrix' function
def print_matrix(matrix):
print(pd.DataFrame(matrix, index=sorted_tags, columns=sorted_tags))
# Notice that the tags are not a parameter of the function. This is because the `sorted_tags` list will not change in the rest of the notebook so it is safe to use the variable previously declared. To test this function simply run:
# In[9]:
# Print the 'transition_matrix' by calling the 'print_matrix' function
print_matrix(transition_matrix)
# That is a lot better, isn't it?
#
# As you may have already deducted this matrix is not symmetrical.
# ### Working with Numpy for matrix manipulation
# Now that you got the matrix set up it is time to see how a matrix can be manipulated after being created.
#
# `Numpy` allows vectorized operations which means that operations that would normally include looping over the matrix can be done in a simpler manner. This is consistent with treating numpy arrays as matrices since you get support for common matrix operations. You can do matrix multiplication, scalar multiplication, vector addition and many more!
#
# For instance try scaling each value in the matrix by a factor of $\frac{1}{10}$. Normally you would loop over each value in the matrix, updating them accordingly. But in Numpy this is as easy as dividing the whole matrix by 10:
# In[10]:
# Scale transition matrix
transition_matrix = transition_matrix/10
# Print scaled matrix
print_matrix(transition_matrix)
# Another trickier example is to normalize each row so that each value is equal to $\frac{value}{sum \,of \,row}$.
#
# This can be easily done with vectorization. First you will compute the sum of each row:
# In[11]:
# Compute sum of row for each row
rows_sum = transition_matrix.sum(axis=1, keepdims=True)
# Print sum of rows
rows_sum
# Notice that the `sum()` method was used. This method does exactly what its name implies. Since the sum of the rows was desired the axis was set to `1`. In Numpy `axis=1` refers to the columns so the sum is done by summing each column of a particular row, for each row.
#
# Also the `keepdims` parameter was set to `True` so the resulting array had shape `(3, 1)` rather than `(3,)`. This was done so that the axes were consistent with the desired operation.
#
# When working with Numpy, always remember to check the shape of the arrays you are working with, many unexpected errors happen because of axes not being consistent. The `shape` attribute is your friend for these cases.
# In[12]:
# Normalize transition matrix
transition_matrix = transition_matrix / rows_sum
# Print normalized matrix
print_matrix(transition_matrix)
# Notice that the normalization that was carried out forces the sum of each row to be equal to `1`. You can easily check this by running the `sum` method on the resulting matrix:
# In[13]:
transition_matrix.sum(axis=1, keepdims=True)
# For a final example you are asked to modify each value of the diagonal of the matrix so that they are equal to the `log` of the sum of the current row plus the current value. When doing mathematical operations like this one don't forget to import the `math` module.
#
# This can be done using a standard `for loop` or `vectorization`. You'll see both in action:
# In[14]:
import math
# Copy transition matrix for for-loop example
t_matrix_for = np.copy(transition_matrix)
# Copy transition matrix for numpy functions example
t_matrix_np = | np.copy(transition_matrix) | numpy.copy |
import json
import os
import time
from copy import deepcopy
import TransportMaps.Distributions as dist
import TransportMaps.Likelihoods as like
from typing import List, Dict
from matplotlib import pyplot as plt
from factors.Factors import Factor, ExplicitPriorFactor, ImplicitPriorFactor, \
LikelihoodFactor, BinaryFactorMixture, KWayFactor
from sampler.NestedSampling import GlobalNestedSampler
from sampler.SimulationBasedSampler import SimulationBasedSampler
from slam.Variables import Variable, VariableType
from slam.FactorGraph import FactorGraph
from slam.BayesTree import BayesTree, BayesTreeNode
import numpy as np
from sampler.sampler_utils import JointFactor
from utils.Functions import sort_pair_lists
from utils.Visualization import plot_2d_samples
from utils.Functions import sample_dict_to_array, array_order_to_dict
class SolverArgs:
def __init__(self,
elimination_method: str = "natural",
posterior_sample_num: int = 500,
local_sample_num: int = 500,
store_clique_samples: bool = False,
local_sampling_method="direct",
adaptive_posterior_sampling=None,
*args, **kwargs
):
# graph-related and tree-related params
self.elimination_method = elimination_method
self.posterior_sample_num = posterior_sample_num
self.store_clique_samples = store_clique_samples
self.local_sampling_method = local_sampling_method
self.local_sample_num = local_sample_num
self.adaptive_posterior_sampling = adaptive_posterior_sampling
def jsonStr(self):
return json.dumps(self.__dict__)
class CliqueSeparatorFactor(ImplicitPriorFactor):
def sample(self, num_samples: int, **kwargs):
return NotImplementedError("implementation depends on density models")
class ConditionalSampler:
def conditional_sample_given_observation(self, conditional_dim,
obs_samples=None,
sample_number=None):
"""
This method returns samples with the dimension of conditional_dim.
If sample_number is given, samples of the first conditional_dim variables are return.
If obs_samples is given, samples of the first conditional_dim variables after
the dimension of obs_samples will be returned. obs_samples.shape = (sample num, dim)
Note that the dims here are of the vectorized point on manifolds not the dim of manifold.
"""
raise NotImplementedError("Implementation depends on density estimation method.")
class FactorGraphSolver:
"""
This is the abstract class of factor graph solvers.
It mainly works as:
1. the interface for users to define and solve factor graphs.
2. the maintainer of factor graphs and Bayes tree for incremental inference
3. fitting probabilistic models to the working part of factor graph and Bayes tree
4. inference (sampling) on the entire Bayes tree
The derived class may reply on different probabilistic modeling approaches.
"""
def __init__(self, args: SolverArgs):
"""
Parameters
----------
elimination_method : string
option of heuristics for variable elimination ordering.
TODO: this can be a dynamic parameter when updating Bayes tree
"""
self._args = args
self._physical_graph = FactorGraph()
self._working_graph = FactorGraph()
self._physical_bayes_tree = None
self._working_bayes_tree = None
self._conditional_couplings = {} # map from Bayes tree clique to flows
self._implicit_factors = {} # map from Bayes tree clique to factor
self._samples = {} # map from variable to samples
self._new_nodes = []
self._new_factors = []
self._clique_samples = {} # map from Bayes tree clique to samples
self._clique_true_obs = {} # map from Bayes tree clique to observations which augments flow models
self._clique_density_model = {} # map from Bayes tree clique to flow model
# map from Bayes tree clique to variable pattern; (Separator,Frontal) in reverse elimination order
self._clique_variable_pattern = {}
self._elimination_ordering = []
self._reverse_ordering_map = {}
self._temp_training_loss = {}
def set_args(self, args: SolverArgs):
raise NotImplementedError("Implementation depends on probabilistic modeling approaches.")
@property
def elimination_method(self) -> str:
return self._args.elimination_method
@property
def elimination_ordering(self) -> List[Variable]:
return self._elimination_ordering
@property
def physical_vars(self) -> List[Variable]:
return self._physical_graph.vars
@property
def new_vars(self) -> List[Variable]:
return self._new_nodes
@property
def working_vars(self) -> List[Variable]:
return self._working_graph.vars
@property
def physical_factors(self) -> List[Factor]:
return self._physical_graph.factors
@property
def new_factors(self) -> List[Factor]:
return self._new_factors
@property
def working_factors(self) -> List[Factor]:
return self._working_graph.factors
@property
def working_factor_graph(self) -> FactorGraph:
return self._working_graph
@property
def physical_factor_graph(self) -> FactorGraph:
return self._physical_graph
@property
def working_bayes_tree(self) -> BayesTree:
return self._working_bayes_tree
@property
def physical_bayes_tree(self) -> BayesTree:
return self._physical_bayes_tree
def generate_natural_ordering(self) -> None:
"""
Generate the ordering by which nodes are added
"""
self._elimination_ordering = self._physical_graph.vars + self._new_nodes
def generate_pose_first_ordering(self) -> None:
"""
Generate the ordering by which nodes are added and lmk eliminated later
"""
natural_order = self._physical_graph.vars + self._new_nodes
pose_list = []
lmk_list = []
for node in natural_order:
if node._type == VariableType.Landmark:
lmk_list.append(node)
else:
pose_list.append(node)
self._elimination_ordering = pose_list + lmk_list
def generate_ccolamd_ordering(self) -> None:
"""
"""
physical_graph_ordering = [var for var in self._elimination_ordering if var not in self._working_graph.vars]
working_graph_ordering = self._working_graph.analyze_elimination_ordering(
method="ccolamd", last_vars=
[[var for var in self._working_graph.vars if
var.type == VariableType.Pose][-1]])
self._elimination_ordering = physical_graph_ordering + working_graph_ordering
def generate_ordering(self) -> None:
"""
Generate the ordering by which Bayes tree should be generated
"""
if self._args.elimination_method == "natural":
self.generate_natural_ordering()
elif self._args.elimination_method == "ccolamd":
self.generate_ccolamd_ordering()
elif self._args.elimination_method == "pose_first":
self.generate_pose_first_ordering()
self._reverse_ordering_map = {
var: index for index, var in
enumerate(self._elimination_ordering[::-1])}
# TODO: Add other ordering methods
def add_node(self, var: Variable = None, name: str = None,
dim: int = None) -> "FactorGraphSolver":
"""
Add a new node
The node has not been added to the physical or current factor graphs
:param var:
:param name: used only when variable is not specified
:param dim: used only when variable is not specified
:return: the current problem
"""
if var:
self._new_nodes.append(var)
else:
self._new_nodes.append(Variable(name, dim))
return self
def add_factor(self, factor: Factor) -> "FactorGraphSolver":
"""
Add a prior factor to specified nodes
The factor has not been added to physical or current factor graphs
:param factor
:return: the current problem
"""
self._new_factors.append(factor)
return self
def add_prior_factor(self, vars: List[Variable],
distribution: dist.Distribution) -> "FactorGraphSolver":
"""
Add a prior factor to specified nodes
The factor has not been added to physical or current factor graphs
:param vars
:param distribution
:return: the current problem
"""
self._new_factors.append(ExplicitPriorFactor(
vars=vars, distribution=distribution))
return self
def add_likelihood_factor(self, vars: List[Variable],
likelihood: like.LikelihoodBase) -> "FactorGraphSolver":
"""
Add a likelihood factor to specified nodes
The factor has not been added to physical or current factor graphs
:param vars
:param likelihood
:return: the current problem
"""
self._new_factors.append(LikelihoodFactor(
vars=vars, log_likelihood=likelihood))
return self
def update_physical_and_working_graphs(self, timer: List[float] = None, device: str = "cpu"
) -> "FactorGraphSolver":
"""
Add all new nodes and factors into the physical factor graph,
retrieve the working factor graph, update Bayes trees
:return: the current problem
"""
start = time.time()
# Determine the affected variables in the physical Bayes tree
old_nodes = set(self.physical_vars)
nodes_of_new_factors = set.union(*[set(factor.vars) for
factor in self._new_factors])
old_nodes_of_new_factors = set.intersection(old_nodes,
nodes_of_new_factors)
# Get the working factor graph
if self._physical_bayes_tree: # if not first step, get sub graph
affected_nodes, sub_bayes_trees = \
self._physical_bayes_tree. \
get_affected_vars_and_partial_bayes_trees(
vars=old_nodes_of_new_factors)
self._working_graph = self._physical_graph.get_sub_factor_graph_with_prior(
variables=affected_nodes,
sub_trees=sub_bayes_trees,
clique_prior_dict=self._implicit_factors)
else:
sub_bayes_trees = set()
for node in self._new_nodes:
self._working_graph.add_node(node)
for factor in self._new_factors:
self._working_graph.add_factor(factor)
# Get the working Bayes treeget_sub_factor_graph
old_ordering = self._elimination_ordering
self.generate_ordering()
self._working_bayes_tree = self._working_graph.get_bayes_tree(
ordering=[var for var in self._elimination_ordering
if var in set(self.working_vars)])
# Update the physical factor graph
for node in self._new_nodes:
self._physical_graph.add_node(node)
for factor in self._new_factors:
self._physical_graph.add_factor(factor)
# Update the physical Bayesian tree
self._physical_bayes_tree = self._working_bayes_tree.__copy__()
self._physical_bayes_tree.append_child_bayes_trees(sub_bayes_trees)
# Delete legacy conditional samplers in the old tree and
# convert the density model w/o separator at leaves to density model w/ separator.
cliques_to_delete = set()
for old_clique in set(self._clique_density_model.keys()).difference(self._physical_bayes_tree.clique_nodes):
for new_clique in self._working_bayes_tree.clique_nodes:
if old_clique.vars == new_clique.vars and [var for var in old_ordering if var in old_clique.vars] == \
[var for var in self._elimination_ordering if var in new_clique.vars]:
# This clique was the root in the old tree but is leaf in the new tree.
# If the ordering of variables remains the same, its density model can be re-used.
# Update the clique to density model dict
self._clique_true_obs[new_clique] = self._clique_true_obs[old_clique]
if old_clique in self._clique_variable_pattern:
self._clique_variable_pattern[new_clique] = self._clique_variable_pattern[old_clique]
if old_clique in self._clique_samples:
self._clique_samples[new_clique] = self._clique_samples[old_clique]
self._clique_density_model[new_clique] = \
self.root_clique_density_model_to_leaf(old_clique, new_clique, device)
# since new clique will be skipped, related factors shall be eliminated beforehand.
# TODO: update _clique_density_model.keys() in which some clique parents change
# TODO: this currently has no impact on results
# TODO: if we store all models or clique-depend values on cliques, this issue will disappear
new_separator_factor = None
if new_clique.separator:
# extract new factor over separator
separator_var_list = sorted(new_clique.separator, key=lambda x: self._reverse_ordering_map[x])
new_separator_factor = \
self.clique_density_to_separator_factor(separator_var_list,
self._clique_density_model[new_clique],
self._clique_true_obs[old_clique])
self._implicit_factors[new_clique] = new_separator_factor
self._working_graph = self._working_graph.eliminate_clique_variables(clique=new_clique,
new_factor=new_separator_factor)
break
cliques_to_delete.add(old_clique)
for old_clique in cliques_to_delete:
del self._clique_density_model[old_clique]
del self._clique_true_obs[old_clique]
if old_clique in self._clique_variable_pattern:
del self._clique_variable_pattern[old_clique]
if old_clique in self._clique_samples:
del self._clique_samples[old_clique]
# Clear all newly added variables and factors
self._new_nodes = []
self._new_factors = []
end = time.time()
if timer is not None:
timer.append(end - start)
return self
def root_clique_density_model_to_leaf(self,
old_clique: BayesTreeNode,
new_clique: BayesTreeNode,
device) -> "ConditionalSampler":
"""
when old clique and new clique have same variables but different division of frontal and separator vars,
recycle the density model in the old clique and convert it to that in the new clique.
"""
raise NotImplementedError("Implementation depends on probabilistic modeling")
def clique_density_to_separator_factor(self,
separator_var_list: List[Variable],
density_model,
true_obs: np.ndarray) -> CliqueSeparatorFactor:
"""
extract marginal of separator variables from clique density as separator factor
"""
raise NotImplementedError("Implementation depends on probabilistic modeling")
def incremental_inference(self,
timer: List[float] = None,
clique_dim_timer: List[List[float]] = None,
*args, **kwargs
):
self.fit_tree_density_models(timer=timer,
clique_dim_timer=clique_dim_timer,
*args, **kwargs)
if self._args.adaptive_posterior_sampling is None:
self._samples = self.sample_posterior(timer=timer, *args, **kwargs)
else:
self._samples = self.adaptive_posterior(timer=timer, *args, **kwargs)
return self._samples
def fit_clique_density_model(self,
clique,
samples,
var_ordering,
timer,
*args, **kwargs) -> "ConditionalSampler":
raise NotImplementedError("Implementation depends on probabilistic modeling.")
def adaptive_posterior(self, timer: List[float] = None, *args, **kwargs
) -> Dict[Variable, np.ndarray]:
"""
Generate samples for all variables
"""
raise NotADirectoryError("implementation depends on density models.")
def fit_tree_density_models(self,
timer: List[float] = None,
clique_dim_timer: List[List[float]] = None,
*args, **kwargs):
"""
By the order of Bayes tree, perform local sampling and training
on all cliques
:return:
"""
self._temp_training_loss = {}
clique_ordering = self._working_bayes_tree.clique_ordering()
total_clique_num = len(clique_ordering)
clique_cnt = 1
before_clique_time = time.time()
while clique_ordering:
start_clique_time = time.time()
clique = clique_ordering.pop()
if clique in self._clique_density_model:
end_clique_time = time.time()
print(f"\tTime for clique {clique_cnt}/{total_clique_num}: " + str(
end_clique_time - start_clique_time) + " sec, "
"total time elapsed: " + str(
end_clique_time - before_clique_time) + " sec")
clique_cnt += 1
if (clique_dim_timer is not None):
clique_dim_timer.append([clique.dim, end_clique_time - before_clique_time])
continue
# local sampling
sampler_start = time.time()
local_samples, sample_var_ordering, true_obs = \
self.clique_training_sampler(clique,
num_samples=self._args.local_sample_num,
method=self._args.local_sampling_method)
sampler_end = time.time()
if timer is not None:
timer.append(sampler_end - sampler_start)
self._clique_true_obs[clique] = true_obs
if self._args.store_clique_samples:
self._clique_samples[clique] = local_samples
local_density_model = \
self.fit_clique_density_model(clique=clique,
samples=local_samples,
var_ordering=sample_var_ordering,
timer=timer)
self._clique_density_model[clique] = local_density_model
new_separator_factor = None
if clique.separator:
# extract new factor over separator
separator_list = sorted(clique.separator,
key=lambda x:
self._reverse_ordering_map[x])
new_separator_factor = self.clique_density_to_separator_factor(separator_list,
local_density_model,
true_obs)
self._implicit_factors[clique] = new_separator_factor
self._working_graph = self._working_graph.eliminate_clique_variables(clique=clique,
new_factor=new_separator_factor)
end_clique_time = time.time()
print(f"\tTime for clique {clique_cnt}/{total_clique_num}: " + str(
end_clique_time - start_clique_time) + " sec, "
"total time elapsed: " + str(
end_clique_time - before_clique_time) + " sec" + ", clique_dim is " + str(clique.dim))
if (clique_dim_timer is not None):
clique_dim_timer.append([clique.dim, end_clique_time - before_clique_time])
clique_cnt += 1
def clique_training_sampler(self, clique: BayesTreeNode, num_samples: int, method: str):
r""" This function returns training samples, simulated variables, and unused observations
"""
graph = self._working_graph.get_clique_factor_graph(clique)
variable_pattern = \
self._working_bayes_tree.clique_variable_pattern(clique)
if method == "direct":
sampler = SimulationBasedSampler(factors=graph.factors, vars=variable_pattern)
samples, var_list, unused_obs = sampler.sample(num_samples)
elif method == "nested" or method == "dynamic nested":
ns_sampler = GlobalNestedSampler(nodes=variable_pattern, factors=graph.factors)
samples = ns_sampler.sample(live_points=num_samples, sampling_method=method)
var_list = variable_pattern
unused_obs = np.array([])
else:
raise ValueError("Unknown sampling method.")
return samples, var_list, unused_obs
def sample_posterior(self, timer: List[float] = None, *args, **kwargs
) -> Dict[Variable, np.ndarray]:
"""
Generate samples for all variables
"""
num_samples = self._args.posterior_sample_num
start = time.time()
stack = [self._physical_bayes_tree.root]
samples = {}
while stack:
# Retrieve the working clique
clique = stack.pop()
# Local sampling
frontal_list = sorted(clique.frontal,
key=lambda x: self._reverse_ordering_map[x])
separator_list = sorted(clique.separator,
key=lambda x: self._reverse_ordering_map[x])
clique_density_model = self._clique_density_model[clique]
obs = self._clique_true_obs[clique]
aug_separator_samples = np.zeros(shape=(num_samples, 0))
if len(obs) != 0:
aug_separator_samples = np.tile(obs, (num_samples, 1))
for var in separator_list:
aug_separator_samples = np.hstack((aug_separator_samples,
samples[var]))
if aug_separator_samples.shape[1] != 0:
frontal_samples = clique_density_model. \
conditional_sample_given_observation(conditional_dim=clique.frontal_dim,
obs_samples=aug_separator_samples)
else: # the root clique
frontal_samples = clique_density_model. \
conditional_sample_given_observation(conditional_dim=clique.frontal_dim,
sample_number=num_samples)
# Dispatch samples
cur_index = 0
for var in frontal_list:
samples[var] = frontal_samples[:,
cur_index: cur_index + var.dim]
cur_index += var.dim
if clique.children:
for child in clique.children:
stack.append(child)
end = time.time()
if timer is not None:
timer.append(end - start)
return samples
def plot2d_posterior(self, title: str = None, xlim=None, ylim=None,
marker_size: float = 1, if_legend: bool = False):
# xlim and ylim are tuples
vars = self._elimination_ordering
# list(self._samples.keys())
len_var = len(vars)
for i in range(len_var):
cur_sample = self._samples[vars[i]]
plt.scatter(cur_sample[:, 0], cur_sample[:, 1], marker=".",
s=marker_size)
if xlim is not None:
plt.xlim(xlim)
if ylim is not None:
plt.ylim(ylim)
if if_legend:
plt.legend([var.name for var in vars])
plt.xlabel('x (m)')
plt.ylabel('y (m)')
if title is not None:
plt.title(title)
fig_handle = plt.gcf()
plt.show()
return fig_handle
def results(self):
return list(self._samples.values()), list(self._samples.keys())
def plot2d_mean_points(self, title: str = None, xlim=None, ylim=None,
if_legend: bool = False):
# xlim and ylim are tuples
vars = self._elimination_ordering
# list(self._samples.keys())
len_var = len(vars)
x_list = []
y_list = []
for i in range(len_var):
cur_sample = self._samples[vars[i]]
x = np.mean(cur_sample[:, 0])
y = np.mean(cur_sample[:, 1])
x_list.append(x)
y_list.append(y)
if xlim is not None:
plt.xlim(xlim)
if ylim is not None:
plt.ylim(ylim)
plt.plot(x_list, y_list)
if if_legend:
plt.legend([var.name for var in vars])
plt.xlabel('x (m)')
plt.ylabel('y (m)')
if title is not None:
plt.title(title)
fig_handle = plt.gcf()
plt.show()
return fig_handle
def plot2d_mean_rbt_only(self, title: str = None, xlim=None, ylim=None,
if_legend: bool = False, fname=None, front_size=None, show_plot=False, **kwargs):
# xlim and ylim are tuples
vars = self._elimination_ordering
# list(self._samples.keys())
len_var = len(vars)
x_list = []
y_list = []
lmk_list = []
for i in range(len_var):
if vars[i]._type == VariableType.Landmark:
lmk_list.append(vars[i])
else:
cur_sample = self._samples[vars[i]]
x = np.mean(cur_sample[:, 0])
y = np.mean(cur_sample[:, 1])
x_list.append(x)
y_list.append(y)
if xlim is not None:
plt.xlim(xlim)
if ylim is not None:
plt.ylim(ylim)
plt.plot(x_list, y_list)
for var in lmk_list:
cur_sample = self._samples[var]
plt.scatter(cur_sample[:, 0], cur_sample[:, 1], label=var.name)
if if_legend:
if front_size is not None:
plt.legend()
else:
plt.legend(fontsize=front_size)
if front_size is not None:
plt.xlabel('x (m)', fontsize=front_size)
plt.ylabel('y (m)', fontsize=front_size)
else:
plt.xlabel('x (m)')
plt.ylabel('y (m)')
if title is not None:
if front_size is not None:
plt.title(title, fontsize=front_size)
else:
plt.title(title)
fig_handle = plt.gcf()
if fname is not None:
plt.savefig(fname)
if show_plot:
plt.show()
return fig_handle
def plot2d_MAP_rbt_only(self, title: str = None, xlim=None, ylim=None,
if_legend: bool = False, fname=None, front_size=None):
# xlim and ylim are tuples
vars = self._elimination_ordering
jf = JointFactor(self.physical_factors, vars)
# list(self._samples.keys())
all_sample = sample_dict_to_array(self._samples, vars)
log_pdf = jf.log_pdf(all_sample)
max_idx = np.argmax(log_pdf)
map_sample = all_sample[max_idx:max_idx+1]
map_sample_dict = array_order_to_dict(map_sample, vars)
len_var = len(vars)
x_list = []
y_list = []
lmk_list = []
for i in range(len_var):
if vars[i]._type == VariableType.Landmark:
lmk_list.append(vars[i])
else:
cur_sample = map_sample_dict[vars[i]]
x = np.mean(cur_sample[:, 0])
y = np.mean(cur_sample[:, 1])
x_list.append(x)
y_list.append(y)
if xlim is not None:
plt.xlim(xlim)
if ylim is not None:
plt.ylim(ylim)
plt.plot(x_list, y_list)
for var in lmk_list:
cur_sample = map_sample_dict[var]
plt.scatter(cur_sample[:, 0], cur_sample[:, 1], label=var.name)
if if_legend:
if front_size is not None:
plt.legend()
else:
plt.legend(fontsize=front_size)
if front_size is not None:
plt.xlabel('x (m)', fontsize=front_size)
plt.ylabel('y (m)', fontsize=front_size)
else:
plt.xlabel('x (m)')
plt.ylabel('y (m)')
if title is not None:
if front_size is not None:
plt.title(title, fontsize=front_size)
else:
plt.title(title)
fig_handle = plt.gcf()
if fname is not None:
plt.savefig(fname)
plt.show()
return fig_handle
def plot2d_mean_poses(self, title: str = None, xlim=None, ylim=None,
width: float = 0.05, if_legend: bool = False):
# xlim and ylim are tuples
vars = self._elimination_ordering
# list(self._samples.keys())
len_var = len(vars)
x_list = []
y_list = []
for i in range(len_var):
cur_sample = self._samples[vars[i]]
x = np.mean(cur_sample[:, 0])
y = np.mean(cur_sample[:, 1])
x_list.append(x)
y_list.append(y)
# th_mean = circmean(cur_sample[:,2])
# dx, dy = np.cos(th_mean), np.sin(th_mean)
# plt.arrow(x-dx/2, y-dy/2, dx, dy,
# head_width=4*width,
# width=0.05)
if xlim is not None:
plt.xlim(xlim)
if ylim is not None:
plt.ylim(ylim)
plt.plot(x_list, y_list)
if if_legend:
plt.legend([var.name for var in vars])
plt.xlabel('x (m)')
plt.ylabel('y (m)')
if title is not None:
plt.title(title)
fig_handle = plt.gcf()
plt.show()
return fig_handle
def plot_factor_graph(self):
pass
def plot_bayes_tree(self):
pass
def run_incrementally(case_dir: str, solver: FactorGraphSolver, nodes_factors_by_step, truth=None, traj_plot=False,
plot_args=None, check_root_transform=False) -> None:
run_count = 1
while os.path.exists(f"{case_dir}/run{run_count}"):
run_count += 1
os.mkdir(f"{case_dir}/run{run_count}")
run_dir = f"{case_dir}/run{run_count}"
print("create run dir: " + run_dir)
file = open(f"{run_dir}/parameters", "w+")
params = solver._args.jsonStr()
print(params)
file.write(params)
file.close()
num_batches = len(nodes_factors_by_step)
observed_nodes = []
step_timer = []
step_list = []
posterior_sampling_timer = []
fitting_timer = []
mixture_factor2weights = {}
show_plot = True
if "show_plot" in plot_args and not plot_args["show_plot"]:
show_plot = False
for i in range(num_batches):
step_nodes, step_factors = nodes_factors_by_step[i]
for node in step_nodes:
solver.add_node(node)
for factor in step_factors:
solver.add_factor(factor)
if isinstance(factor, BinaryFactorMixture):
mixture_factor2weights[factor] = []
observed_nodes += step_nodes
step_list.append(i)
step_file_prefix = f"{run_dir}/step{i}"
detailed_timer = []
clique_dim_timer = []
start = time.time()
solver.update_physical_and_working_graphs(timer=detailed_timer)
cur_sample = solver.incremental_inference(timer=detailed_timer, clique_dim_timer=clique_dim_timer)
end = time.time()
step_timer.append(end - start)
print(f"step {i}/{num_batches} time: {step_timer[-1]} sec, "
f"total time: {sum(step_timer)}")
file = open(f"{step_file_prefix}_ordering", "w+")
file.write(" ".join([var.name for var in solver.elimination_ordering]))
file.close()
file = open(f"{step_file_prefix}_split_timing", "w+")
file.write(" ".join([str(t) for t in detailed_timer]))
file.close()
file = open(f"{step_file_prefix}_step_training_loss", "w+")
last_training_loss = json.dumps(solver._temp_training_loss)
file.write(last_training_loss)
file.close()
posterior_sampling_timer.append(detailed_timer[-1])
fitting_timer.append(sum(detailed_timer[1:-1]))
X = np.hstack([cur_sample[var] for var in solver.elimination_ordering])
np.savetxt(fname=step_file_prefix, X=X)
# check transformation
if check_root_transform:
root_clique = solver.physical_bayes_tree.root
root_clique_model = solver._clique_density_model[root_clique]
y = root_clique_model.prior.sample((3000,))
tx = deepcopy(y)
if hasattr(root_clique_model, "flows"):
for f in root_clique_model.flows[::-1]:
tx = f.inverse_given_separator(tx, None)
y = y.detach().numpy()
tx = tx.detach().numpy()
np.savetxt(fname=step_file_prefix + '_root_normal_data', X=y)
| np.savetxt(fname=step_file_prefix + '_root_transformed', X=tx) | numpy.savetxt |
# Copyright 2019 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the python library parsing Revisited Oxford/Paris datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import tensorflow as tf
from delf.python.detect_to_retrieve import dataset
class DatasetTest(tf.test.TestCase):
def testParseEasyMediumHardGroundTruth(self):
# Define input.
ground_truth = [{
'easy': np.array([10, 56, 100]),
'hard': np.array([0]),
'junk': np.array([6, 90])
}, {
'easy': np.array([], dtype='int64'),
'hard': [5],
'junk': [99, 100]
}, {
'easy': [33],
'hard': [66, 99],
'junk': np.array([], dtype='int64')
}]
# Run tested function.
(easy_ground_truth, medium_ground_truth,
hard_ground_truth) = dataset.ParseEasyMediumHardGroundTruth(ground_truth)
# Define expected outputs.
expected_easy_ground_truth = [{
'ok': np.array([10, 56, 100]),
'junk': np.array([6, 90, 0])
}, {
'ok': np.array([], dtype='int64'),
'junk': np.array([99, 100, 5])
}, {
'ok': np.array([33]),
'junk': np.array([66, 99])
}]
expected_medium_ground_truth = [{
'ok': np.array([10, 56, 100, 0]),
'junk': np.array([6, 90])
}, {
'ok': np.array([5]),
'junk': np.array([99, 100])
}, {
'ok': np.array([33, 66, 99]),
'junk': np.array([], dtype='int64')
}]
expected_hard_ground_truth = [{
'ok': np.array([0]),
'junk': np.array([6, 90, 10, 56, 100])
}, {
'ok': np.array([5]),
'junk': np.array([99, 100])
}, {
'ok': np.array([66, 99]),
'junk': np.array([33])
}]
# Compare actual versus expected.
def _AssertListOfDictsOfArraysAreEqual(ground_truth, expected_ground_truth):
"""Helper function to compare ground-truth data.
Args:
ground_truth: List of dicts of arrays.
expected_ground_truth: List of dicts of arrays.
"""
self.assertEqual(len(ground_truth), len(expected_ground_truth))
for i, ground_truth_entry in enumerate(ground_truth):
self.assertEqual(sorted(ground_truth_entry.keys()), ['junk', 'ok'])
self.assertAllEqual(ground_truth_entry['junk'],
expected_ground_truth[i]['junk'])
self.assertAllEqual(ground_truth_entry['ok'],
expected_ground_truth[i]['ok'])
_AssertListOfDictsOfArraysAreEqual(easy_ground_truth,
expected_easy_ground_truth)
_AssertListOfDictsOfArraysAreEqual(medium_ground_truth,
expected_medium_ground_truth)
_AssertListOfDictsOfArraysAreEqual(hard_ground_truth,
expected_hard_ground_truth)
def testAdjustPositiveRanksWorks(self):
# Define inputs.
positive_ranks = np.array([0, 2, 6, 10, 20])
junk_ranks = np.array([1, 8, 9, 30])
# Run tested function.
adjusted_positive_ranks = dataset.AdjustPositiveRanks(
positive_ranks, junk_ranks)
# Define expected output.
expected_adjusted_positive_ranks = [0, 1, 5, 7, 17]
# Compare actual versus expected.
self.assertAllEqual(adjusted_positive_ranks,
expected_adjusted_positive_ranks)
def testComputeAveragePrecisionWorks(self):
# Define input.
positive_ranks = [0, 2, 5]
# Run tested function.
average_precision = dataset.ComputeAveragePrecision(positive_ranks)
# Define expected output.
expected_average_precision = 0.677778
# Compare actual versus expected.
self.assertAllClose(average_precision, expected_average_precision)
def testComputePRAtRanksWorks(self):
# Define inputs.
positive_ranks = np.array([0, 2, 5])
desired_pr_ranks = np.array([1, 5, 10])
# Run tested function.
precisions, recalls = dataset.ComputePRAtRanks(positive_ranks,
desired_pr_ranks)
# Define expected outputs.
expected_precisions = [1.0, 0.4, 0.5]
expected_recalls = [0.333333, 0.666667, 1.0]
# Compare actual versus expected.
self.assertAllClose(precisions, expected_precisions)
self.assertAllClose(recalls, expected_recalls)
def testComputeMetricsWorks(self):
# Define inputs: 3 queries. For the last one, there are no expected images
# to be retrieved
sorted_index_ids = np.array([[4, 2, 0, 1, 3], [0, 2, 4, 1, 3],
[0, 1, 2, 3, 4]])
ground_truth = [{
'ok': np.array([0, 1]),
'junk': np.array([2])
}, {
'ok': np.array([0, 4]),
'junk': np.array([], dtype='int64')
}, {
'ok': np.array([], dtype='int64'),
'junk': np.array([], dtype='int64')
}]
desired_pr_ranks = [1, 2, 5]
# Run tested function.
(mean_average_precision, mean_precisions, mean_recalls, average_precisions,
precisions, recalls) = dataset.ComputeMetrics(sorted_index_ids,
ground_truth,
desired_pr_ranks)
# Define expected outputs.
expected_mean_average_precision = 0.604167
expected_mean_precisions = [0.5, 0.5, 0.666667]
expected_mean_recalls = [0.25, 0.5, 1.0]
expected_average_precisions = [0.416667, 0.791667, float('nan')]
expected_precisions = [[0.0, 0.5, 0.666667], [1.0, 0.5, 0.666667],
[float('nan'),
float('nan'),
float('nan')]]
expected_recalls = [[0.0, 0.5, 1.0], [0.5, 0.5, 1.0],
[float('nan'), float('nan'),
float('nan')]]
# Compare actual versus expected.
self.assertAllClose(mean_average_precision, expected_mean_average_precision)
self.assertAllClose(mean_precisions, expected_mean_precisions)
self.assertAllClose(mean_recalls, expected_mean_recalls)
self.assertAllClose(average_precisions, expected_average_precisions)
self.assertAllClose(precisions, expected_precisions)
self.assertAllClose(recalls, expected_recalls)
def testSaveMetricsFileWorks(self):
# Define inputs.
mean_average_precision = {'hard': 0.7, 'medium': 0.9}
mean_precisions = {
'hard': np.array([1.0, 0.8]),
'medium': np.array([1.0, 1.0])
}
mean_recalls = {
'hard': | np.array([0.5, 0.8]) | numpy.array |
"""Film Mode Matching Mode Solver
Implementation of the Film Mode Matching (FMM) algorithm, as described in:
- Sudbo, "Film mode matching a versatile numerical method for vector mode field calculations in dielectric waveguides", Pure App. Optics, 2 (1993), 211-233
- Sudbo, "Improved formulation of the film mode matching method for mode field calculations in dielectric waveguides", Pure App. Optics, 3 (1994), 381-388
Examples
========
See L{FMM1d} and L{FMM2d}.
"""
from __future__ import print_function
from builtins import zip
from builtins import range
from builtins import object
from functools import reduce
__author__ = '<NAME> & <NAME>'
import numpy
import scipy
import scipy.optimize
import copy
import EMpy.utils
from EMpy.modesolvers.interface import *
import pylab
class Message(object):
def __init__(self, msg, verbosity=0):
self.msg = msg
self.verbosity = verbosity
def show(self, verbosity=0):
if self.verbosity <= verbosity:
print((self.verbosity - 1) * '\t' + self.msg)
class Struct(object):
"""Empty class to fill with whatever I want. Maybe a dictionary would do?"""
pass
class Boundary(object):
"""Boundary conditions.
Electric and Magnetic boundary conditions are translated to Symmetric
and Antisymmetric for each field.
@ivar xleft: Left bc on x.
@ivar xright: Right bc on x.
@ivar yleft: Left bc on y.
@ivar yright: Right bc on y.
"""
def __init__(self, xleft='Electric Wall',
yleft='Magnetic Wall',
xright='Electric Wall',
yright='Magnetic Wall'):
"""Set the boundary conditions, validate and translate."""
self.xleft = xleft
self.yleft = yleft
self.xright = xright
self.yright = yright
self.validate()
self.translate()
def validate(self):
"""Validate the input.
@raise ValueError: Unknown boundary.
"""
if not reduce(lambda x, y: x & y,
[(x == 'Electric Wall') | (x == 'Magnetic Wall') for x in [self.xleft, self.yleft, self.xright, self.yright]]):
raise ValueError('Unknown boundary.')
def translate(self):
"""Translate for each field.
@raise ValueError: Unknown boundary.
"""
self.xh = ''
self.xe = ''
self.yh = ''
self.ye = ''
if self.xleft == 'Electric Wall':
self.xh += 'A'
self.xe += 'S'
elif self.xleft == 'Magnetic Wall':
self.xh += 'S'
self.xe += 'A'
else:
raise ValueError('Unknown boundary.')
if self.xright == 'Electric Wall':
self.xh += 'A'
self.xe += 'S'
elif self.xright == 'Magnetic Wall':
self.xh += 'S'
self.xe += 'A'
else:
raise ValueError('Unknown boundary.')
if self.yleft == 'Electric Wall':
self.yh += 'A'
self.ye += 'S'
elif self.yleft == 'Magnetic Wall':
self.yh += 'S'
self.ye += 'A'
else:
raise ValueError('Unknown boundary.')
if self.yright == 'Electric Wall':
self.yh += 'A'
self.ye += 'S'
elif self.yright == 'Magnetic Wall':
self.yh += 'S'
self.ye += 'A'
else:
raise ValueError('Unknown boundary.')
def __str__(self):
return 'xleft = %s, xright = %s, yleft = %s, yright = %s' % (self.xleft, self.xright, self.yleft, self.yright)
class Slice(object):
"""One dimensional arrangement of layers and 1d modes.
A slice is made of a stack of layers, i.e. refractive indeces with a thickness,
with given boundary conditions.
It holds 1d modes, both TE and TM.
@ivar x1: start point of the slice in x.
@ivar x2: end point of the slice in x.
@ivar Uy: array of points delimiting the layers.
@ivar boundary: boundary conditions.
@ivar modie: E modes.
@ivar modih: H modes.
@ivar Ux: array of points delimiting the slices in x (internally set).
@ivar refractiveindex: refractive index of all the slices (internally set).
@ivar epsilon: epsilon of all the slices (internally set).
@ivar wl: vacuum wavelength.
"""
def __init__(self, x1, x2, Uy, boundary, modie, modih):
self.x1 = x1
self.x2 = x2
self.Uy = Uy
self.boundary = boundary
self.modie = modie
self.modih = modih
def __str__(self):
return 'x1 = %g, x2 = %g\nUy = %s\nboundary = %s' % (self.x1, self.x2, self.Uy, self.boundary)
class FMMMode1d(Mode):
"""One dimensional mode.
Note
====
Virtual class.
"""
pass
class FMMMode1dx(FMMMode1d):
"""Matching coefficients in the x-direction.
L{FMMMode1dy}s are weighted by these coefficients to assure continuity.
"""
def __str__(self):
return 'sl = %s\nsr = %s\nal = %s\nar = %s\nk = %s\nU = %s' % \
(self.sl.__str__(),
self.sr.__str__(),
self.al.__str__(),
self.ar.__str__(),
self.k.__str__(),
self.U.__str__())
class FMMMode1dy(FMMMode1d):
"""One dimensional mode.
It holds the coefficients that describe the mode in the FMM expansion.
Note
====
The mode is suppose one dimensional, in the y direction.
@ivar sl: array of value of the mode at the lhs of each slice.
@ivar sr: array of value of the mode at the rhs of each slice.
@ivar al: array of value of the derivative of the mode at the lhs of each slice.
@ivar ar: array of value of the derivative of the mode at the lhs of each slice.
@ivar k: wavevector inside each layer.
@ivar keff: effective wavevector.
@ivar zero: how good the mode is? it must be as close to zero as possible!
@ivar Uy: array of points delimiting the layers.
"""
def eval(self, y_):
"""Evaluate the mode at y."""
y = numpy.atleast_1d(y_)
ny = len(y)
f = numpy.zeros(ny, dtype=complex)
for iU in range(len(self.U) - 1):
k = self.k[iU]
sl = self.sl[iU]
al = self.al[iU]
Ul = self.U[iU]
Ur = self.U[iU+1]
idx = numpy.where((Ul <= y) & (y <= Ur))
yy = y[idx] - Ul
f[idx] = sl * numpy.cos(k * yy) + al * sinxsux(k * yy) * yy
return f
def plot(self, y):
f = self.eval(y)
pylab.plot(y, numpy.real(f), y, numpy.imag(y))
pylab.legend(('real', 'imag'))
pylab.xlabel('y')
pylab.ylabel('mode1d')
pylab.show()
def __str__(self):
return 'sl = %s\nsr = %s\nal = %s\nar = %s\nk = %s\nkeff = %s\nzero = %s\nU = %s' % \
(self.sl.__str__(),
self.sr.__str__(),
self.al.__str__(),
self.ar.__str__(),
self.k.__str__(),
self.keff.__str__(),
self.zero.__str__(),
self.U.__str__())
class FMMMode2d(Mode):
"""Two dimensional mode.
It holds the coefficients that describe the mode in the FMM expansion.
"""
def get_x(self, n=100):
return numpy.linspace(self.slicesx[0].Ux[0], self.slicesx[0].Ux[-1], n)
def get_y(self, n=100):
return numpy.linspace(self.slicesx[0].Uy[0], self.slicesx[0].Uy[-1], n)
def eval(self, x_=None, y_=None):
"""Evaluate the mode at x,y."""
if x_ is None:
x = self.get_x()
else:
x = numpy.atleast_1d(x_)
if y_ is None:
y = self.get_y()
else:
y = numpy.atleast_1d(y_)
nmodi = len(self.modie)
lenx = len(x)
leny = len(y)
k0 = 2. * numpy.pi / self.slicesx[0].wl
kz = self.keff
uh = numpy.zeros((nmodi, lenx), dtype=complex)
ue = numpy.zeros_like(uh)
udoth = numpy.zeros_like(uh)
udote = numpy.zeros_like(uh)
Exsh = numpy.zeros((leny, nmodi), dtype=complex)
Exah = numpy.zeros_like(Exsh)
Exse = numpy.zeros_like(Exsh)
Exae = numpy.zeros_like(Exsh)
Eysh = numpy.zeros_like(Exsh)
Eyah = numpy.zeros_like(Exsh)
Eyse = numpy.zeros_like(Exsh)
Eyae = numpy.zeros_like(Exsh)
Ezsh = numpy.zeros_like(Exsh)
Ezah = numpy.zeros_like(Exsh)
Ezse = numpy.zeros_like(Exsh)
Ezae = numpy.zeros_like(Exsh)
cBxsh = numpy.zeros_like(Exsh)
cBxah = numpy.zeros_like(Exsh)
cBxse = numpy.zeros_like(Exsh)
cBxae = numpy.zeros_like(Exsh)
cBysh = numpy.zeros_like(Exsh)
cByah = numpy.zeros_like(Exsh)
cByse = numpy.zeros_like(Exsh)
cByae = numpy.zeros_like(Exsh)
cBzsh = numpy.zeros_like(Exsh)
cBzah = numpy.zeros_like(Exsh)
cBzse = numpy.zeros_like(Exsh)
cBzae = numpy.zeros_like(Exsh)
ExTE = numpy.zeros((leny,lenx), dtype=complex)
EyTE = numpy.zeros_like(ExTE)
EzTE = numpy.zeros_like(ExTE)
ExTM = numpy.zeros_like(ExTE)
EyTM = numpy.zeros_like(ExTE)
EzTM = numpy.zeros_like(ExTE)
cBxTE = numpy.zeros_like(ExTE)
cByTE = numpy.zeros_like(ExTE)
cBzTE = numpy.zeros_like(ExTE)
cBxTM = numpy.zeros_like(ExTE)
cByTM = numpy.zeros_like(ExTE)
cBzTM = numpy.zeros_like(ExTE)
for mx, slice in enumerate(self.slicesx):
idx = numpy.where((slice.x1 <= x) & (x < slice.x2))
x2 = x[idx] - slice.x1
x1 = slice.x2 - x[idx]
dx = slice.x2 - slice.x1
for n in range(nmodi):
fi = slice.modih[n].eval(y)
fidot = dot(slice.modih[n]).eval(y)
psi = slice.modie[n].eval(y)
psisueps = sueps(slice.modie[n]).eval(y)
psidotsueps = sueps(dot(slice.modie[n])).eval(y)
kfh = self.modih[n].k[mx]
kxh = scipy.sqrt(kfh**2 - kz**2)
sl = self.modih[n].sl[mx] * (k0/kfh)**2
al = self.modih[n].al[mx]
sr = self.modih[n].sr[mx] * (k0/kfh)**2
ar = self.modih[n].ar[mx]
uh[n,idx] = (numpy.sin(kxh * x1) * sl + numpy.sin(kxh * x2) * sr) / numpy.sin(kxh * dx)
udoth[n,idx] = (numpy.sin(kxh * x1) * al + numpy.sin(kxh * x2) * ar) / numpy.sin(kxh * dx)
kfe = self.modie[n].k[mx]
kxe = scipy.sqrt(kfe**2 - kz**2)
sl = self.modie[n].sl[mx] * (k0/kfe)**2
al = self.modie[n].al[mx]
sr = self.modie[n].sr[mx] * (k0/kfe)**2
ar = self.modie[n].ar[mx]
ue[n,idx] = (numpy.sin(kxe * x1) * sl + numpy.sin(kxe * x2) * sr) / numpy.sin(kxe * dx)
udote[n,idx] = (numpy.sin(kxe * x1) * al + numpy.sin(kxe * x2) * ar) / numpy.sin(kxe * dx)
Exsh[:,n] = (kz/k0) * fi
Exah[:,n] = 0
Exse[:,n] = 0
Exae[:,n] = -psidotsueps / k0**2
Eysh[:,n] = 0
Eyah[:,n] = 0
Eyse[:,n] = -(kfe/k0)**2 * psisueps
Eyae[:,n] = 0
Ezsh[:,n] = 0
Ezah[:,n] = -1j * fi / k0
Ezse[:,n] = 1j * kz / k0**2 * psidotsueps
Ezae[:,n] = 0
cBxsh[:,n] = 0
cBxah[:,n] = fidot / k0**2
cBxse[:,n] = kz / k0 * psi
cBxae[:,n] = 0
cBysh[:,n] = (kfh/k0)**2 * fi
cByah[:,n] = 0
cByse[:,n] = 0
cByae[:,n] = 0
cBzsh[:,n] = -1j * kz / k0**2 * fidot
cBzah[:,n] = 0
cBzse[:,n] = 0
cBzae[:,n] = -1j * psi / k0
ExTE[:,idx] = numpy.tensordot(Exsh, uh[:,idx], axes=1) + numpy.tensordot(Exah, udoth[:,idx], axes=1)
ExTM[:,idx] = numpy.tensordot(Exse, ue[:,idx], axes=1) + numpy.tensordot(Exae, udote[:,idx], axes=1)
EyTE[:,idx] = numpy.tensordot(Eysh, uh[:,idx], axes=1) + numpy.tensordot(Eyah, udoth[:,idx], axes=1)
EyTM[:,idx] = numpy.tensordot(Eyse, ue[:,idx], axes=1) + numpy.tensordot(Eyae, udote[:,idx], axes=1)
EzTE[:,idx] = numpy.tensordot(Ezsh, uh[:,idx], axes=1) + numpy.tensordot(Ezah, udoth[:,idx], axes=1)
EzTM[:,idx] = numpy.tensordot(Ezse, ue[:,idx], axes=1) + numpy.tensordot(Ezae, udote[:,idx], axes=1)
cBxTE[:,idx] = numpy.tensordot(cBxsh, uh[:,idx], axes=1) + numpy.tensordot(cBxah, udoth[:,idx], axes=1)
cBxTM[:,idx] = numpy.tensordot(cBxse, ue[:,idx], axes=1) + numpy.tensordot(cBxae, udote[:,idx], axes=1)
cByTE[:,idx] = numpy.tensordot(cBysh, uh[:,idx], axes=1) + numpy.tensordot(cByah, udoth[:,idx], axes=1)
cByTM[:,idx] = numpy.tensordot(cByse, ue[:,idx], axes=1) + numpy.tensordot(cByae, udote[:,idx], axes=1)
cBzTE[:,idx] = numpy.tensordot(cBzsh, uh[:,idx], axes=1) + numpy.tensordot(cBzah, udoth[:,idx], axes=1)
cBzTM[:,idx] = numpy.tensordot(cBzse, ue[:,idx], axes=1) + numpy.tensordot(cBzae, udote[:,idx], axes=1)
return (ExTE, ExTM, EyTE, EyTM, EzTE, EzTM, cBxTE, cBxTM, cByTE, cByTM, cBzTE, cBzTM)
def fields(self, x=None, y=None):
ExTE, ExTM, EyTE, EyTM, EzTE, EzTM, cBxTE, cBxTM, cByTE, cByTM, cBzTE, cBzTM = self.eval(x, y)
Ex = ExTE + ExTM
Ey = EyTE + EyTM
Ez = EzTE + EzTM
cBx = cBxTE + cBxTM
cBy = cByTE + cByTM
cBz = cBzTE + cBzTM
return (Ex, Ey, Ez, cBx, cBy, cBz)
def intensity(self, x=None, y=None):
Ex, Ey, Ez, cBx, cBy, cBz = self.fields(x, y)
cSz = .5 * (Ex * numpy.conj(cBy) - Ey * numpy.conj(cBx))
return cSz
def TEfrac_old(self, x_=None, y_=None):
if x_ is None:
x = self.get_x()
else:
x = numpy.atleast_1d(x_)
if y_ is None:
y = self.get_y()
else:
y = numpy.atleast_1d(y_)
Ex, Ey, Ez, cBx, cBy, cBz, cSz = self.fields(x, y)
cSTE = .5 * EMpy.utils.trapz2(Ex * numpy.conj(cBy), y, x)
cSTM = .5 * EMpy.utils.trapz2(-Ey * numpy.conj(cBx), y, x)
return numpy.abs(cSTE) / (numpy.abs(cSTE) + numpy.abs(cSTM))
def TEfrac(self):
Sx, Sy = self.__overlap(self)
return Sx / (Sx - Sy)
def overlap_old(self, m, x_=None, y_=None):
if x_ is None:
x = self.get_x()
else:
x = numpy.atleast_1d(x_)
if y_ is None:
y = self.get_y()
else:
y = numpy.atleast_1d(y_)
Ex, Ey, Ez, cBx, cBy, cBz = self.fields(x, y)
cSz = self.intensity(x, y)
norm = scipy.sqrt(EMpy.utils.trapz2(cSz, y, x))
Ex1, Ey1, Ez1, cBx1, cBy1, cBz1 = m.fields(x, y)
cSz1 = m.intensity(x, y)
norm1 = scipy.sqrt(EMpy.utils.trapz2(cSz1, y, x))
return .5 * EMpy.utils.trapz2(Ex/norm * numpy.conj(cBy1/norm1) - Ey/norm * numpy.conj(cBx1/norm1), y, x)
def __overlap_old(self, mode):
nmodi = len(self.modie)
k0 = 2. * numpy.pi / self.slicesx[0].wl
kz = self.keff
Sx = 0j
Sy = 0j
for mx, slice in enumerate(self.slicesx):
for n1 in range(nmodi):
phi_n1 = slice.modih[n1]
phidot_n1 = dot(phi_n1)
psi_n1 = slice.modie[n1]
psisueps_n1 = sueps(psi_n1)
psidotsueps_n1 = sueps(dot(psi_n1))
uh_n1 = copy.deepcopy(self.modih[n1])
# reduce to a single slice
kfh_n1 = uh_n1.k[mx]
uh_n1.k = numpy.atleast_1d(scipy.sqrt(kfh_n1**2 - kz**2))
uh_n1.sl = numpy.atleast_1d(uh_n1.sl[mx] * (k0/kfh_n1)**2)
uh_n1.al = numpy.atleast_1d(uh_n1.al[mx])
uh_n1.sr = numpy.atleast_1d(uh_n1.sr[mx] * (k0/kfh_n1)**2)
uh_n1.ar = numpy.atleast_1d(uh_n1.ar[mx])
uh_n1.U = numpy.atleast_1d(uh_n1.U[mx:mx+2])
uhdot_n1 = dot(uh_n1)
ue_n1 = copy.deepcopy(self.modie[n1])
# reduce to a single slice
kfe_n1 = ue_n1.k[mx]
ue_n1.k = numpy.atleast_1d(scipy.sqrt(kfe_n1**2 - kz**2))
ue_n1.sl = numpy.atleast_1d(ue_n1.sl[mx] * (k0/kfe_n1)**2)
ue_n1.al = numpy.atleast_1d(ue_n1.al[mx])
ue_n1.sr = numpy.atleast_1d(ue_n1.sr[mx] * (k0/kfe_n1)**2)
ue_n1.ar = numpy.atleast_1d(ue_n1.ar[mx])
ue_n1.U = numpy.atleast_1d(ue_n1.U[mx:mx+2])
uedot_n1 = dot(ue_n1)
for n2 in range(nmodi):
phi_n2 = mode.slicesx[mx].modih[n2]
phidot_n2 = dot(phi_n2)
psi_n2 = mode.slicesx[mx].modie[n2]
psisueps_n2 = sueps(psi_n2)
psidotsueps_n2 = sueps(dot(psi_n2))
uh_n2 = copy.deepcopy(mode.modih[n2])
# reduce to a single slice
kfh_n2 = uh_n2.k[mx]
uh_n2.k = numpy.atleast_1d(scipy.sqrt(kfh_n2**2 - kz**2))
uh_n2.sl = numpy.atleast_1d(uh_n2.sl[mx] * (k0/kfh_n2)**2)
uh_n2.al = numpy.atleast_1d(uh_n2.al[mx])
uh_n2.sr = numpy.atleast_1d(uh_n2.sr[mx] * (k0/kfh_n2)**2)
uh_n2.ar = numpy.atleast_1d(uh_n2.ar[mx])
uh_n2.U = numpy.atleast_1d(uh_n2.U[mx:mx+2])
uhdot_n2 = dot(uh_n2)
ue_n2 = copy.deepcopy(mode.modie[n2])
# reduce to a single slice
kfe_n2 = ue_n2.k[mx]
ue_n2.k = numpy.atleast_1d(scipy.sqrt(kfe_n2**2 - kz**2))
ue_n2.sl = numpy.atleast_1d(ue_n2.sl[mx] * (k0/kfe_n2)**2)
ue_n2.al = numpy.atleast_1d(ue_n2.al[mx])
ue_n2.sr = numpy.atleast_1d(ue_n2.sr[mx] * (k0/kfe_n2)**2)
ue_n2.ar = numpy.atleast_1d(ue_n2.ar[mx])
ue_n2.U = | numpy.atleast_1d(ue_n2.U[mx:mx+2]) | numpy.atleast_1d |
import numpy as np
import biorbd
try:
import BiorbdViz
biorbd_viz_found = True
except ModuleNotFoundError:
biorbd_viz_found = False
#
# This examples shows how to
# 1. Load a model
# 2. Generate data (should be acquired via real data)
# 3. Create a Kalman filter
# 4. Apply the Kalman filter (inverse kinematics)
# 5. Plot the kinematics (Q), velocity (Qdot) and acceleration (Qddot)
#
# Please note that this example will work only with the Eigen backend.
# Please also note that kalman will be VERY slow if compiled in debug
#
# Load a predefined model
model = biorbd.Model("../pyomecaman.bioMod")
nq = model.nbQ()
nb_mus = model.nbMuscles()
n_frames = 20
# Generate clapping gesture data
qinit = np.array([0, 0, -.3, 0.35, 1.15, -0.35, 1.15, 0, 0, 0, 0, 0, 0])
qmid = | np.array([0, 0, -.3, 0.5, 1.15, -0.5, 1.15, 0, 0, 0, 0, 0, 0]) | numpy.array |
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 28 09:27:49 2020
@author: <NAME>
"""
import pickle
import pandas as pd
import numpy as np
from country import country
from scipy.integrate import solve_ivp
from scipy.optimize import minimize
from scipy.optimize import dual_annealing
from scipy.optimize import brute
from scipy.interpolate import interp1d
from scipy.ndimage.filters import uniform_filter1d
import psutil
from functools import partial
import multiprocessing as mp
from tqdm import tqdm_notebook as tqdm
import pdb
from datetime import date, datetime, timedelta
import time
from pathlib import Path
from matplotlib import pyplot as plt
import statsmodels.api as sm
from sklearn import linear_model
import matplotlib.patches as mpatches
import country_converter as coco
import math
import seaborn as sns
# --------------------------------------------------------
# Global variables, chosen cohorts of data and estimates
# --------------------------------------------------------
from param_simple import *
# ----------------------
# Main class
# ----------------------
class solveCovid:
def __init__(self,iso2: str): # eg 'US'
self.iso2 = iso2
# Policy strategies for forecast
self.policy = 'optim' # ['optim', 'linear']
self.phi_option = 'fit' # ['fit','exo']: Fit phi to latest data or specify as exogenous
self.phi_exo = 2.5e-9 # weight on mobility in social welfare function
self.phi_min = 1e-13 # Lowerbound for phi - authorities care about output
# Infection rate model for forecast
self.gamma_tilde_model = 'AR1' # ['AR1','AR2','shock']
self.gamma_shock_length = 10 # Shock gamma_tilde for x days
self.gamma_shock_depth = 0.5 # Daily increment of gamma
self.default_init_single = default_init_single
self.default_bounds_single = default_bounds_single
# Vaccine assumptions
self.vac_assump = 'vac_base' # Vaccination scenarios: ['vac_base','vac_worse','vac_better']
self.vac_receiver = 'S+R' # Vaccines given to S or S+R? ['S only','S+R']
self.effi_one = 0.5 # Efficacy after one dose in %
self.effi_two = 0.95 # Efficacy after two doses in %
self.target_weight = 0.7 # How targeted vaccine distribution is (1 = sequenced from eldest to youngest, 0 is random)
self.vac_base_cover = 1 # Baseline: (already started): % of effective coverage by December 2021 (to be controlled by country-specific scaling factor below)
self.vac_base_delayedstart = '2021-06-30' # Baseline: (hasn't started): first date of vaccination
self.vac_base_delayedcover = 0.75 # Baseline: (hasn't started): % of contracted dosages deployed by December 2021
self.vac_worse_cover = 0.3 # Worse (started): Use by end of 2021
self.vac_worse_delayedstart = '2021-09-30' # Worse (hasn't started): Starting date
self.vac_worse_delayedcover = 0.3 # Worse (hasn't started): Use by end of 2021
self.vac_better_cover = 1.3
self.vac_better_delayedstart = '2021-06-30'
self.vac_better_delayedcover = 1
# Reinfection and loss of immunity
self.reinfect = 'immune' # ['immune','reinfect']
self.r_re1_R = np.log(2)/10000 # Baseline: R loses immunity after 3 years
self.r_re1_V = np.log(2)/10000 # Baseline: V loses immunity after 3 years
self.r_re2_R = np.log(2)/60 # Downside risk: R loses immunity after 60 days, approx 1% of R lose immunity each day
self.r_re2_V = np.log(2)/60 # Downside risk: V loses immunity after 60 days, approx 1% of V lose immunity each day
# Death probabilities
self.pdth_assump = 'martingale' # ['martingale','treatment']
self.pdth_min = 0.005 # Lowerbound on death probability - countries with very few cases still think there is death probability
self.pdth_halflife = 60 # Halflife for treatment case; no. of days it takes to close half the gap of current and assumed minimum death prob
self.pdth_theta = np.exp(-np.log(2)/self.pdth_halflife)
# --------------- 1. Preliminary: Get the data ------------------------
def prelim(self):
iso2 = self.iso2
self.N = df1.fillna(method='ffill')['population'][iso2].iloc[-1]
df2 = df1.iloc[:,df1.columns.get_level_values(1)==iso2][[
'total_cases','total_deaths','new_cases','new_deaths',
'google_smooth','icu_patients','hosp_patients','reproduction_rate',
'new_tests','tests_per_case','aged_70_older',
'vac_total','vac_people',
'vac_fully']][df1['total_cases'][iso2] > virus_thres]
df2 = df2.droplevel('iso2',axis=1)
df2['vac_total'] = df2['vac_total'].interpolate()
df2['vac_people'] = df2['vac_people'].interpolate()
if iso2 == 'AU' or iso2 == 'SA': # Countries with no breakdowns; do manual approximation
df2['vac_partial'] = 0.8 * df2['vac_total']
df2['vac_fully'] = 0.2 * df2['vac_total']
else : # For most countries,
date1 = df2['vac_fully'].first_valid_index() # Next 2 lines fill NA in 'vac_fully', so vac_partial is defined
df2['vac_fully'].iloc[:df2.index.get_loc(date1)-1] = 0
df2['vac_fully'] = df2['vac_fully'].interpolate()
df2['vac_partial'] = df2['vac_people'] - df2['vac_fully']
df2 = df2.fillna(0) # Replace NaN by 0 - deaths and vaccinations
PopulationI = df2['total_cases'][0]
PopulationD = df2['total_deaths'][0]
if PopulationD==0:
PopulationD = 0
PopulationR = 5
else:
PopulationR = PopulationD * 5
PopulationCI = PopulationI - PopulationD - PopulationR # Undetected and infectious cases
self.cases_data_fit = df2['total_cases'].tolist()
self.deaths_data_fit = df2['total_deaths'].tolist()
self.newcases_data_fit = df2['new_cases'].tolist()
self.newdeaths_data_fit = df2['new_deaths'].tolist()
self.balance = self.cases_data_fit[-1] / max(self.deaths_data_fit[-1], 10) / 3
date_day_since100 = pd.to_datetime(df2.index[0])
self.maxT = (default_maxT - date_day_since100).days + 1
self.mobility_vec = df2['google_smooth'].values
self.T = len(df2)
self.t_cases = np.arange(0,self.T)
self.mobility_interp = interp1d(self.t_cases,self.mobility_vec,bounds_error=False,fill_value=0.,kind='cubic')
self.GLOBAL_PARAMS = (self.N, PopulationCI, PopulationR, PopulationD, PopulationI, p_d, p_h, p_v)
self.gamma_0_days = 1 # average of gamma_t during first n days becomes the target
# Compute vaccination parameters
self.vac_partial = df2['vac_partial'].values
self.vac_fully = df2['vac_fully'].values
#self.vac_contracted = 1000*df_vac.loc[iso2]['No. of people covered (thousands)']/self.N
df2['V_'] = self.N * (self.effi_one*df2['vac_partial']
+ self.effi_two*df2['vac_fully'])/100 # V = expected number of effectively vaccinated persons
ix = pd.date_range(start=df2.index[0], end=default_maxT, freq='D') # Expand time-sample, to include forecast later
df_v = df2.reindex(ix)
# Vaccination assumptions
if self.iso2 in ['GB','US']:
vac_scale = 1
elif self.iso2 in ['BE','FR','DE','IT','NL','PL','SG','ES','CH','RO','CL','CA']:
vac_scale = 0.8
elif self.iso2 in ['AU','SA','SE','TR']:
vac_scale = 0.65
elif self.iso2 in ['AR','BR','MX','RU']:
vac_scale = 0.50
elif self.iso2 in ['ID','IN','JP','KR','MY','TH']:
vac_scale = 0.25
elif self.iso2 in ['ZA']:
vac_scale = 0.10
else:
vac_scale = 0.50
print('Missing vaccine assumption for selected country')
if self.vac_assump == 'vac_base':
if df2['V_'][-1] > 0: # already started
df_v['V_'].loc['2021-12-31'] = self.vac_base_cover * vac_scale * self.N
elif df2['V_'][-1] == 0: # If has not started, assume starting by xxx and cover xxx at year end
df_v['V_'].loc[self.vac_base_delayedstart] = 100 # 100 = assumed number of effectively vaccinated on first day
df_v['V_'].loc['2021-12-31'] = self.vac_base_delayedcover* vac_scale*self.N # partial orders filled by year end
elif self.vac_assump == 'vac_worse':
if df2['V_'][-1] > 0:
df_v['V_'].loc['2021-12-31'] = self.vac_worse_cover * vac_scale * self.N
elif df2['V_'][-1] == 0:
df_v['V_'].loc[self.vac_worse_delayedstart] = 100
df_v['V_'].loc['2021-12-31'] = self.vac_worse_delayedcover* vac_scale*self.N
elif self.vac_assump == 'vac_better':
if df2['V_'][-1]>0:
df_v['V_'].loc['2021-12-31'] = self.vac_better_cover * vac_scale * self.N
elif df2['V_'][-1] == 0:
df_v['V_'].loc[self.vac_better_delayedstart] = 100
df_v['V_'].loc['2021-12-31'] = self.vac_better_delayedcover* vac_scale*self.N
df_v['V_'] = df_v['V_'].interpolate()
df_v['V_'] = df_v['V_'].clip(0,self.N)
self.df2 = df2
self.df_v = df_v
print(f'Data preparation for {iso2} done')
# --------------------------3 . SEIR model ------------------
def step_seir(self, t, x, gamma_t, p_dth) -> list:
"""
SEIR model building on DELPHI v.3
Features 16 distinct states, taking into account undetected, deaths, hospitalized and
recovered
[0 S, 1 E, 2 I, 3 UR, 4 DHR, 5 DQR, 6 UD, 7 DHD, 8 DQD, 9 R, 10 D,
11 TH, 12 DVR,13 DVD, 14 DD, 15 DT, 16 V]
"""
S, E, I, AR, DHR, DQR, AD, DHD, DQD, R, D, TH, DVR, DVD, DD, DT, V = x
r_v = self.df_v['V_'].iloc[t+1] - self.df_v['V_'].iloc[t]
# Reinfection parameters
if self.reinfect == 'immune':
r_re_R = self.r_re1_R
r_re_V = self.r_re1_V
elif self.reinfect == 'reinfect':
if t <= self.T:
r_re_R = self.r_re1_R
r_re_V = self.r_re1_V
else:
r_re_R = self.r_re2_R
r_re_V = self.r_re2_V
# Vaccination recipients (S, or S+R)
if self.vac_receiver == 'S only':
zeta = 1
elif self.vac_receiver == 'S+R':
zeta = S/(S+R)
else:
print('Re-specify vaccine recipient choice')
# Main equations
S1 = S - gamma_t * S * I / self.N + r_re_R*R +r_re_V*V - r_v * zeta
if S1 < 0: # Vaccination reaches saturating point
S1 = 0
r_v = (S - gamma_t * S * I / self.N + r_re_R*R +r_re_V*V) /zeta
E1 = E + gamma_t * S * I / self.N - r_i * E
I1 = I + r_i * E - r_d * I
AR1 = AR + r_d * (1 - p_dth) * (1 - p_d) * I - r_ri * AR
DHR1 = DHR + r_d * (1 - p_dth) * p_d * p_h * I - r_rh * DHR
DQR1 = DQR + r_d * (1 - p_dth) * p_d * (1 - p_h) * I - r_ri * DQR
AD1 = AD + r_d * p_dth * (1 - p_d) * I - r_dth * AD
DHD1 = DHD + r_d * p_dth * p_d * p_h * I - r_dth * DHD
DQD1 = DQD + r_d * p_dth * p_d * (1 - p_h) * I - r_dth * DQD
R1 = R + r_ri * (AR + DQR) + r_rh * DHR - r_re_R*R - r_v * (1-zeta)
D1 = D + r_dth * (AD + DQD + DHD)
# Helper states
TH1 = TH + r_d * p_d * p_h * I
DVR1 = DVR + r_d * (1 - p_dth) * p_d * p_h * p_v * I - r_rv * DVR
DVD1 = DVD + r_d * p_dth * p_d * p_h * p_v * I - r_dth * DVD
DD1 = DD + r_dth * (DHD + DQD)
DT1 = DT + r_d * p_d * I
V1 = V + r_v -r_re_V*V
x1 = [S1, E1, I1, AR1, DHR1, DQR1, AD1, DHD1, DQD1,
R1, D1, TH1, DVR1, DVD1, DD1, DT1, V1]
return x1
# ------------------ X. Construct initial conditions
def initial_states_func(self,k):
N, PopulationCI, PopulationR, PopulationD, PopulationI, p_d, p_h, p_v = self.GLOBAL_PARAMS
p_dth0 = self.newdeaths_data_fit[0]/(r_dth*PopulationCI) # Set p_dth0 to match D1-D0 to newdeaths_data_fit
E_0 = PopulationCI / p_d * k
I_0 = PopulationCI / p_d * k
UR_0 = (PopulationCI / p_d - PopulationCI) * (1 - p_dth0)
DHR_0 = (PopulationCI * p_h) * (1 - p_dth0)
DQR_0 = PopulationCI * (1 - p_h) * (1 - p_dth0)
UD_0 = (PopulationCI / p_d - PopulationCI) * p_dth0
DHD_0 = PopulationCI * p_h * p_dth0
DQD_0 = PopulationCI * (1 - p_h) * p_dth0
R_0 = PopulationR / p_d
D_0 = PopulationD / p_d
S_0 = N - (E_0 +I_0 +UR_0 +DHR_0 +DQR_0 +UD_0 +DHD_0 +DQD_0 +R_0 +D_0)
TH_0 = PopulationCI * p_h
DVR_0 = (PopulationCI * p_h * p_v) * (1 - p_dth0)
DVD_0 = (PopulationCI * p_h * p_v) * p_dth0
DD_0 = PopulationD
DT_0 = PopulationI
V_0 = 0
x_init = [
S_0, E_0, I_0, UR_0, DHR_0, DQR_0, UD_0, DHD_0, DQD_0, R_0,
D_0, TH_0, DVR_0, DVD_0, DD_0, DT_0, V_0
]
return x_init
# Find k=k1,k2 that matches gamma_0 to 2.08 (R0=6 equivalent)
def loss_gamma0(self,k):
newcases = np.array(self.newcases_data_fit)
newdeaths = np.array(self.newdeaths_data_fit)
newcases_sm = uniform_filter1d(newcases, size=21, mode='nearest')
newdeaths_sm = uniform_filter1d(newdeaths, size=21, mode='nearest')
gamma_t_vec = []
x_init = self.initial_states_func(k)
(S_0, E_0, I_0, UR_0, DHR_0, DQR_0, UD_0, DHD_0, DQD_0, R_0,
D_0, TH_0, DVR_0, DVD_0, DD_0, DT_0, V_0) = x_init
newcases_sm2 = np.append(newcases_sm, newcases_sm[-2:]) # Extend the list for forward projection below
newdeaths_sm2 = np.append(newdeaths_sm, newdeaths_sm[-1])
x_0 = x_init.copy()
for t in range(self.gamma_0_days): # Target first n days
gamma_t = (newcases_sm2[t+2]/(r_d*p_d) - (1-r_d)**2 *I_0 - r_i*(2-r_d-r_i)*E_0 )*self.N/(r_i*S_0*I_0)
p_dth = (newdeaths_sm2[t+1] - r_dth*(1-r_dth)*(DHD_0 + DQD_0))/(r_dth*r_d*p_d*I_0)
gamma_t = np.clip(gamma_t, 0.01, 10)
p_dth = np.clip(p_dth,0,1) # Probability limit [0,1]
x_1 = self.step_seir(t, x_0, gamma_t, p_dth)
x_0 = x_1
gamma_t_vec.append(gamma_t)
gamma_0 = np.mean(gamma_t_vec)
loss = (gamma_0 - (r_d*6) )**2 # gamma_0 equivalent to R0=6 is 2.08
return loss
def fit_gamma0(self):
output = dual_annealing(
self.loss_gamma0,
x0 = [5],
bounds = [(1,50)],
)
k_star = output.x
return k_star
def get_initial_conditions(self):
if Path(f'../params/param_fixed/kstar.csv').exists():
df = pd.read_csv(f'../params/param_fixed/kstar.csv')
kstar = df[self.iso2].values[0]
else:
kstar = self.fit_gamma0()[0] # find kstar that matches gamma_0 to target
x_init = self.initial_states_func(kstar)
return x_init
# -------------------- x. Implied gamma_t and pdth_t in-sample -------------------
def gamma_t_compute(self):
newcases = np.array(self.newcases_data_fit)
newdeaths = np.array(self.newdeaths_data_fit)
newcases_sm = uniform_filter1d(newcases, size=21, mode='nearest')
newdeaths_sm = uniform_filter1d(newdeaths, size=21, mode='nearest')
gamma_t_vec = []
p_dth_vec = []
x_init = self.get_initial_conditions()
S_0, E_0, I_0, AR_0, DHR_0, DQR_0, AD_0, DHD_0, DQD_0, R_0, D_0, TH_0, DVR_0, DVD_0, DD_0, DT_0, V_0 = x_init
S_vec = [S_0]
E_vec = [E_0]
I_vec = [I_0]
DT_vec = [DT_0]
DD_vec = [DD_0]
DHR_vec = [DHR_0]
DHD_vec = [DHD_0]
newcases_sm2 = np.append(newcases_sm, newcases_sm[-2:]) # Extend the list for forward projection below
newdeaths_sm2 = np.append(newdeaths_sm, newdeaths_sm[-1])
x_0 = x_init.copy()
for t in range(len(newcases)):
# Work backwards to compute 'exact' gamma_t and p_dth
gamma_t = (newcases_sm2[t+2]/(r_d*p_d) - (1-r_d)**2 *I_0 - r_i*(2-r_d-r_i)*E_0 )*self.N/(r_i*S_0*I_0)
p_dth = (newdeaths_sm2[t+1] - r_dth*(1-r_dth)*(DHD_0 + DQD_0))/(r_dth*r_d*p_d*I_0)
gamma_t = np.clip(gamma_t, 0.01, 10)
p_dth = np.clip(p_dth,0,1) # Probability limit [0,1]
x_1 = self.step_seir(t, x_0, gamma_t, p_dth)
S_0, E_0, I_0, AR_0, DHR_0, DQR_0, AD_0, DHD_0, DQD_0, R_0, D_0, TH_0, DVR_0, DVD_0, DD_0, DT_0, V_0 = x_1
x_0 = x_1
gamma_t_vec.append(gamma_t)
p_dth_vec.append(p_dth)
S_vec.append(S_0)
I_vec.append(I_0)
E_vec.append(E_0)
DT_vec.append(DT_0)
DD_vec.append(DD_0)
DHR_vec.append(DHR_0)
DHD_vec.append(DHD_0)
self.df2['gamma_t'] = gamma_t_vec
self.df2['pdth_t'] = p_dth_vec
self.S_vec = S_vec # In-sample estmates, useful for phi calculation later on
self.I_vec = I_vec
self.DHR_vec = DHR_vec # For fitting death probability
self.DHD_vec = DHD_vec
HD_HR = np.array(self.DHR_vec) + np.array(self.DHD_vec)
self.df2['HD_HR'] = 100*HD_HR[:-1]/self.N
# gamma_t_sm = uniform_filter1d(gamma_t_vec, size=6, mode='nearest')
# self.df2['gamma_sm'] = gamma_t_sm
return gamma_t_vec, p_dth_vec
# -------------------- x. Estimating the model -----------
def gamma_func(self, params):
m_t = self.df2['google_smooth'].values
tvec = np.arange(len(m_t))
beta0, beta1 = params
gamma_vec = beta0*np.exp(beta1* m_t)
return gamma_vec
def loss_betas(self, params) -> float:
gamma_model = self.gamma_func(params)
loss = sum( (self.df2['gamma_t'].values[:len(gamma_model)] - gamma_model)**2 )
return loss
def fitmodel(self):
# A. Fit beta0 and beta1
x0 = self.default_init_single
bounds_0 = self.default_bounds_single
output = dual_annealing(
self.loss_betas,
x0 = x0,
bounds = bounds_0,
)
best_betas = output.x
self.best_betas = best_betas
# B. Fit the residual (gamma_tilde) to AR models
m_t = self.df2['google_smooth'].values
tvec = np.arange(len(self.df2))
beta0, beta1 = self.best_betas
self.df2['gamma_mob'] = beta0*np.exp(beta1* m_t)
self.df2['gamma_tilde'] = self.df2['gamma_t'] - self.df2['gamma_mob']
self.df2['gamma_tilde_sm'] = uniform_filter1d(self.df2['gamma_tilde'],
size=21, mode='reflect')
self.df2['gamma_tilde_resid'] = self.df2['gamma_tilde'] - self.df2['gamma_tilde_sm']
y = self.df2['gamma_tilde_sm']
self.df2['gamma_tilde_sm_lag1'] = self.df2['gamma_tilde_sm'].shift(1) # No constant term
self.df2['gamma_tilde_sm_lag2'] = self.df2['gamma_tilde_sm'].shift(2)
reg_AR1 = sm.OLS(y,self.df2['gamma_tilde_sm_lag1'],missing='drop').fit()
reg_AR2 = sm.OLS(y,self.df2[['gamma_tilde_sm_lag1','gamma_tilde_sm_lag2']],missing='drop').fit()
best_rho1 = reg_AR1.params[0]
best_rho1 = np.clip(best_rho1, 0.1, 0.99) #Assume stationarity
best_rho2 = reg_AR2.params[:]
best_params = np.array([beta0, beta1, best_rho1, best_rho2[0], best_rho2[1]])
self.best_rho1 = best_rho1
self.best_rho2 = best_rho2
self.best_params = best_params
# C. Empirically fit phi for optimal policy to last observation
if self.phi_option == 'fit':
m = self.df2['google_smooth'][-15:].mean() # Take average of last 15 days to smooth volatility
s = self.S_vec[-1]/self.N
i = self.I_vec[-1]/self.N
gamma_tilde = self.df2['gamma_tilde'][-1]
pdth = self.df2['pdth_t'][-1]
pdth = max(pdth, self.pdth_min) # Get around cases where pdth=0 for countries with very few cases
LHS1 = pdth*r_d*i*s*(beta0*beta1*np.exp(beta1*m))
LHS2 = pdth*r_d*i*(1 - r_d + s*(gamma_tilde + beta0*np.exp(beta1*m)))
phi = -(LHS1 * LHS2)/m
self.phi = max(phi, self.phi_min)
elif self.phi_option == 'exo':
self.phi = self.phi_exo
return best_params
# ------------------ x. Forecasts ---------------------------
def step_gamma_tilde(self, gamma_tilde_lag1, gamma_tilde_lag2, model='AR1'):
if model =='AR1':
return self.best_rho1*gamma_tilde_lag1
elif model =='AR2':
return self.best_rho2[0]*gamma_tilde_lag1 + self.best_rho2[1]*gamma_tilde_lag2
def mobility_choice(self,x,gamma_tilde,pdth):
if self.policy == 'constant':
mob = self.poparam_constant
elif self.policy == 'linear-I': # Respond linearly to infection level
mob = self.poparam_linear_I[0] + self.poparam_linear_I[1]*x[2]
elif self.policy == 'linear-dI': # Respond to new infections
dI = r_i*x[1] - r_d*x[2] # x[1]=E, x[2]=I
mob = self.poparam_linear_dI[0] + self.poparam_linear_dI[1]*dI
elif self.policy == 'optim': # Analytical optimal policy based on simplified model and quadratic losses
beta0 = self.best_params[0]
beta1 = self.best_params[1]
phi = self.phi
s = x[0]/self.N
i = x[2]/self.N
m_set = np.linspace(-1,0,101)
RHS = -phi*m_set
LHS1 = pdth*r_d*i*s*(beta0*beta1*np.exp(beta1*m_set))
LHS2 = pdth*r_d*i*(1 - r_d + s*(gamma_tilde + beta0*np.exp(beta1*m_set)))
LHS = LHS1 * LHS2
m_id = np.argmin(np.abs(RHS-LHS))
mob = m_set[m_id]
return mob
def fatality_factor(self,V): # Factor to adjust 'base' fatality prob
idx = (f_table[self.iso2]['vaccine_%'] - V/self.N).abs().argmin() # Find idx to look up in fatality table
factor = f_table[self.iso2]['fatality_ratio'][idx]
return factor
def sim_seir(self):
df2 = self.df2
ix = pd.date_range(start=df2.index[0], end=default_maxT, freq='D') # Expand time-sample, to include forecast later
df3 = df2.reindex(ix)
x_init = self.get_initial_conditions()
x_data = np.array(x_init)
gamma_tilde_fc = self.df2['gamma_tilde'].values
gamma_tilde_sm_fc = self.df2['gamma_tilde_sm'].values
pdth_t_targ = [] # Death prob when vaccines are targeted
pdth_t_base = [] # Base death prob if vaccines are given randomly
pdth_t_fc = self.df2['pdth_t'].values
pdth_t_base_fc = pdth_t_fc.copy()
gamma_mob_fc = self.df2['gamma_mob'].values
mob_fc = self.df2['google_smooth'].values
# Load parameters
if hasattr(self, 'best_params'):
beta0, beta1, rho, rhos_1, rhos_2 = self.best_params
else:
df_param = pd.read_csv(f'../params/{param_load_folder}/param_est.csv')
beta0, beta1, rho, rhos_1, rhos_2 = df_param[self.iso2]
for t in range(self.maxT):
factor = self.fatality_factor(x_init[-1])
eta = self.target_weight
if t<len(self.df2): # In sample
pdth_t = pdth_t_fc[t]
pdth_base = pdth_t/(eta*factor + 1-eta)
pdth_targ = factor*pdth_base
# if t==len(self.df2): # Parse pdth_base of hospitalised/N
# y = pdth_t_base
# X = self.df2['HD_HR'].shift(30) # Use lagged hospitalised as the predictor
# X = sm.add_constant(X)
# reg_pdth = sm.OLS(y,X, missing='drop').fit()
# thetas = reg_pdth.params
# self.best_theta = thetas
# pdb.set_trace()
# pdth_t_basex = y - thetas[0] - thetas[1]*X # Base death prob, parsed of hospitalisation wave
# self.df2['pdth_base'] = pdth_t_base
# self.df2['pdth_base_x'] = pdth_t_basex
if t>len(self.df2)-1: # Out of sample
# Death probability
if self.pdth_assump == 'martingale': # Martingale death rate
pdth_base = pdth_t_base[-1]
elif self.pdth_assump == 'treatment': # Death prob slowly declines to assumed minimum and assumed halflife
pdth_base = self.pdth_theta*pdth_t_base[-1] + (1-self.pdth_theta)*self.pdth_min
pdth_base = max(pdth_base, self.pdth_min) # To get around pdth=0 for countries with very few cases
pdth_t = (eta*factor + 1-eta)*pdth_base
pdth_targ = factor*pdth_base
# Gamma_tilde
if self.gamma_tilde_model == 'AR1':
gamma_tilde = rho*gamma_tilde_sm_fc[t-1]
elif self.gamma_tilde_model == 'AR2':
gamma_tilde = rhos_1*gamma_tilde_sm_fc[t-1] + rhos_2*gamma_tilde_sm_fc[t-2]
elif self.gamma_tilde_model =='shock':
if t < len(self.df2) + self.gamma_shock_length:
gamma_tilde = gamma_tilde_sm_fc[len(self.df2)-1] + self.gamma_shock_depth
else:
gamma_tilde = rho*gamma_tilde_sm_fc[t-1]
# Mobility and overall gamma_t
mob_t = self.mobility_choice(x_init, gamma_tilde, pdth_t)
mob_t = max(mob_t, max_lockdown)
gamma_mob_t = beta0*np.exp(beta1*mob_t)
gamma_t = gamma_tilde + gamma_mob_t
# Append to data array
gamma_tilde_sm_fc = np.append(gamma_tilde_sm_fc, gamma_tilde)
gamma_tilde_fc = np.append(gamma_tilde_fc, gamma_tilde)
gamma_mob_fc = np.append(gamma_mob_fc, gamma_mob_t)
mob_fc = np.append(mob_fc, mob_t)
pdth_t_fc = np.append(pdth_t_fc, pdth_t)
pdth_t_base.append(pdth_base)
pdth_t_targ.append(pdth_targ)
# For in sample, use 'true' inputs
gamma_t = gamma_tilde_fc[t] + gamma_mob_fc[t]
p_dth = pdth_t_fc[t]
if t < range(self.maxT)[-1]: # Stop forecasting at the final period
x_next = self.step_seir(t, x_init, gamma_t, p_dth)
x_data = np.vstack((x_data, np.array(x_next)))
x_init = x_next
# Fill dataframe
col_temp = ['S', 'E', 'I', 'AR', 'DHR', 'DQR', 'AD', 'DHD', 'DQD', 'R', 'D', 'TH', 'DVR', 'DVD', 'DD', 'DT', 'V']
df4 = pd.DataFrame(x_data, columns=col_temp, index=df3.index)
df3 = df3.merge(df4, how='left', left_index=True, right_index=True)
df3['gamma_tilde_fc'] = gamma_tilde_fc
df3['gamma_mob_fc'] = gamma_mob_fc
df3['gamma_t_fc'] = df3['gamma_tilde_fc'] + df3['gamma_mob_fc']
df3['mob_fc'] = mob_fc
df3['pdth_t_fc'] = pdth_t_fc
df3['pdth_t_base'] = | np.array(pdth_t_base) | numpy.array |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# Deep Recurrent Reinforcement Learning: 1 capa LSTM y 4 capas Dense, Funcion de activacion tanh, 12 episodes, 50 iteraciones
drnnLSTMtanhMakespan0=[799, 798, 799, 799, 805, 806, 799, 805, 805, 800, 798, 798]
drnnLSTMtanhMakespan1=[800, 798, 796, 800, 796, 794, 795, 798, 800, 798, 805, 798]
drnnLSTMtanhMakespan2=[796, 800, 798, 804, 800, 798, 798, 798, 800, 800, 802, 797]
drnnLSTMtanhMakespan3=[805, 800, 800, 803, 794, 802, 800, 798, 799, 804, 799, 806]
drnnLSTMtanhMakespan4=[796, 798, 795, 798, 796, 799, 800, 796, 796, 798, 806, 800]
drnnLSTMtanhMakespan5=[798, 798, 799, 800, 800, 808, 798, 798, 801, 796, 799, 798]
drnnLSTMtanhMakespan6=[800, 796, 805, 798, 798, 796, 799, 800, 803, 800, 798, 800]
drnnLSTMtanhMakespan7=[799, 805, 802, 805, 800, 799, 800, 799, 805, 800, 794, 796]
drnnLSTMtanhMakespan8=[799, 798, 800, 798, 798, 800, 800, 800, 804, 799, 800, 804]
drnnLSTMtanhMakespan9=[795, 800, 795, 796, 798, 796, 797, 800, 797, 798, 796, 795]
drnnLSTMtanhMakespan10=[804, 799, 805, 798, 798, 798, 805, 800, 796, 804, 796, 799]
drnnLSTMtanhMakespan11=[795, 803, 805, 798, 795, 801, 798, 798, 804, 803, 799, 804]
drnnLSTMtanhMakespan12=[798, 798, 799, 800, 798, 798, 799, 799, 801, 796, 799, 798]
drnnLSTMtanhMakespan13=[798, 798, 799, 797, 796, 796, 800, 797, 805, 800, 800, 794]
drnnLSTMtanhMakespan14=[800, 798, 798, 796, 800, 800, 798, 798, 802, 798, 802, 798]
drnnLSTMtanhMakespan15=[796, 796, 800, 801, 800, 800, 796, 794, 796, 800, 796, 798]
drnnLSTMtanhMakespan16=[798, 798, 795, 797, 795, 799, 800, 796, 795, 796, 800, 800]
drnnLSTMtanhMakespan17=[794, 795, 800, 798, 795, 796, 798, 796, 795, 794, 798, 796]
drnnLSTMtanhMakespan18=[797, 795, 794, 794, 800, 796, 796, 795, 798, 795, 798, 794]
drnnLSTMtanhMakespan19=[797, 795, 795, 796, 798, 799, 795, 799, 795, 794, 795, 795]
drnnLSTMtanhMakespan20=[796, 794, 798, 797, 798, 799, 795, 795, 797, 795, 795, 792]
drnnLSTMtanhMakespan21=[797, 795, 797, 793, 794, 794, 800, 794, 798, 795, 797, 795]
drnnLSTMtanhMakespan22=[794, 800, 798, 795, 795, 796, 796, 799, 795, 794, 795, 795]
drnnLSTMtanhMakespan23=[795, 795, 794, 795, 794, 794, 797, 799, 796, 794, 794, 795]
drnnLSTMtanhMakespan24=[798, 795, 795, 795, 792, 794, 795, 794, 794, 795, 795, 795]
drnnLSTMtanhMakespan25=[794, 792, 794, 795, 795, 794, 794, 794, 794, 795, 794, 793]
drnnLSTMtanhMakespan26=[794, 794, 795, 796, 798, 795, 794, 794, 794, 794, 795, 794]
drnnLSTMtanhMakespan27=[795, 794, 795, 795, 795, 794, 794, 794, 794, 794, 795, 795]
drnnLSTMtanhMakespan28=[795, 794, 794, 795, 794, 795, 795, 795, 795, 794, 795, 794]
drnnLSTMtanhMakespan29=[792, 794, 795, 794, 794, 795, 794, 793, 795, 794, 795, 792]
drnnLSTMtanhMakespan30=[795, 794, 795, 795, 794, 794, 794, 795, 794, 794, 794, 794]
drnnLSTMtanhMakespan31=[794, 794, 795, 794, 795, 793, 795, 795, 795, 792, 794, 794]
drnnLSTMtanhMakespan32=[795, 795, 794, 793, 795, 795, 795, 795, 794, 794, 795, 794]
drnnLSTMtanhMakespan33=[793, 794, 795, 793, 792, 795, 794, 794, 794, 794, 794, 795]
drnnLSTMtanhMakespan34=[794, 795, 795, 794, 794, 794, 794, 793, 794, 794, 794, 794]
drnnLSTMtanhMakespan35=[794, 794, 797, 793, 792, 794, 793, 794, 795, 794, 795, 792]
drnnLSTMtanhMakespan36=[794, 794, 793, 794, 795, 797, 795, 795, 794, 795, 793, 794]
drnnLSTMtanhMakespan37=[795, 793, 795, 794, 795, 798, 795, 794, 795, 793, 795, 794]
drnnLSTMtanhMakespan38=[794, 795, 793, 795, 794, 794, 794, 794, 794, 794, 797, 795]
drnnLSTMtanhMakespan39=[794, 794, 795, 794, 795, 795, 794, 795, 794, 795, 798, 797]
drnnLSTMtanhMakespan40=[795, 795, 794, 795, 794, 795, 795, 794, 794, 794, 795, 795]
drnnLSTMtanhMakespan41=[794, 795, 792, 794, 794, 798, 795, 794, 794, 794, 793, 795]
drnnLSTMtanhMakespan42=[793, 795, 794, 793, 794, 794, 792, 794, 795, 794, 794, 793]
drnnLSTMtanhMakespan43=[793, 792, 793, 794, 794, 795, 792, 794, 795, 794, 795, 794]
drnnLSTMtanhMakespan44=[793, 794, 795, 795, 794, 794, 795, 798, 794, 792, 795, 794]
drnnLSTMtanhMakespan45=[795, 794, 794, 794, 794, 792, 794, 795, 794, 796, 795, 794]
drnnLSTMtanhMakespan46=[794, 793, 793, 795, 795, 794, 794, 794, 794, 796, 794, 794]
drnnLSTMtanhMakespan47=[794, 794, 795, 794, 794, 795, 792, 795, 794, 795, 795, 794]
drnnLSTMtanhMakespan48=[794, 795, 794, 794, 794, 792, 794, 795, 796, 794, 794, 795]
drnnLSTMtanhMakespan49=[794, 794, 794, 794, 794, 794, 792, 794, 793, 794, 795, 794]
drnnLSTMtanhRewards0=[-0.1759911894273128, -0.17580964970257765, -0.1759911894273128, -0.1759911894273128, -0.177078750549934, -0.17725973169122497, -0.1759911894273128, -0.177078750549934, -0.177078750549934, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765]
drnnLSTMtanhRewards1=[-0.17617264919621228, -0.17580964970257765, -0.17544633017412387, -0.17617264919621228, -0.17544633017412387, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.17580964970257765, -0.17580964970257765]
drnnLSTMtanhRewards2=[-0.17544633017412387, -0.17617264919621228, -0.17580964970257765, -0.1768976897689769, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17653532907770195, -0.17562802996914942]
drnnLSTMtanhRewards3=[-0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.17671654929577466, -0.17508269018743108, -0.17653532907770195, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.1768976897689769, -0.1759911894273128, -0.17725973169122497]
drnnLSTMtanhRewards4=[-0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765, -0.17544633017412387, -0.1759911894273128, -0.17617264919621228, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17725973169122497, -0.17617264919621228]
drnnLSTMtanhRewards5=[-0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17617264919621228, -0.17617264919621228, -0.1776214552648934, -0.17580964970257765, -0.17580964970257765, -0.1763540290620872, -0.17544633017412387, -0.1759911894273128, -0.17580964970257765]
drnnLSTMtanhRewards6=[-0.17617264919621228, -0.17544633017412387, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.1759911894273128, -0.17617264919621228, -0.17671654929577466, -0.17617264919621228, -0.17580964970257765, -0.17617264919621228]
drnnLSTMtanhRewards7=[-0.1759911894273128, -0.177078750549934, -0.17653532907770195, -0.177078750549934, -0.17617264919621228, -0.1759911894273128, -0.17617264919621228, -0.1759911894273128, -0.177078750549934, -0.17617264919621228, -0.17508269018743108, -0.17544633017412387]
drnnLSTMtanhRewards8=[-0.1759911894273128, -0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.1768976897689769, -0.1759911894273128, -0.17617264919621228, -0.1768976897689769]
drnnLSTMtanhRewards9=[-0.17526455026455026, -0.17617264919621228, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17544633017412387, -0.17562802996914942, -0.17617264919621228, -0.17562802996914942, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026]
drnnLSTMtanhRewards10=[-0.1768976897689769, -0.1759911894273128, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17617264919621228, -0.17544633017412387, -0.1768976897689769, -0.17544633017412387, -0.1759911894273128]
drnnLSTMtanhRewards11=[-0.17526455026455026, -0.17671654929577466, -0.177078750549934, -0.17580964970257765, -0.17526455026455026, -0.1763540290620872, -0.17580964970257765, -0.17580964970257765, -0.1768976897689769, -0.17671654929577466, -0.1759911894273128, -0.1768976897689769]
drnnLSTMtanhRewards12=[-0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.1759911894273128, -0.1763540290620872, -0.17544633017412387, -0.1759911894273128, -0.17580964970257765]
drnnLSTMtanhRewards13=[-0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17562802996914942, -0.17544633017412387, -0.17544633017412387, -0.17617264919621228, -0.17562802996914942, -0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.17508269018743108]
drnnLSTMtanhRewards14=[-0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17653532907770195, -0.17580964970257765, -0.17653532907770195, -0.17580964970257765]
drnnLSTMtanhRewards15=[-0.17544633017412387, -0.17544633017412387, -0.17617264919621228, -0.1763540290620872, -0.17617264919621228, -0.17617264919621228, -0.17544633017412387, -0.17508269018743108, -0.17544633017412387, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765]
drnnLSTMtanhRewards16=[-0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.1759911894273128, -0.17617264919621228, -0.17544633017412387, -0.17544633017412387, -0.17526455026455026, -0.17617264919621228, -0.17617264919621228]
drnnLSTMtanhRewards17=[-0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17544633017412387]
drnnLSTMtanhRewards18=[-0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17617264919621228, -0.17544633017412387, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108]
drnnLSTMtanhRewards19=[-0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.1759911894273128, -0.17526455026455026, -0.1759911894273128, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drnnLSTMtanhRewards20=[-0.17544633017412387, -0.17508269018743108, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.1759911894273128, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17471872931833224]
drnnLSTMtanhRewards21=[-0.17562802996914942, -0.17526455026455026, -0.17562802996914942, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17617264919621228, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026]
drnnLSTMtanhRewards22=[-0.17508269018743108, -0.17617264919621228, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17544633017412387, -0.1759911894273128, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drnnLSTMtanhRewards23=[-0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942, -0.1759911894273128, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026]
drnnLSTMtanhRewards24=[-0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026]
drnnLSTMtanhRewards25=[-0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221]
drnnLSTMtanhRewards26=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards27=[-0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drnnLSTMtanhRewards28=[-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards29=[-0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224]
drnnLSTMtanhRewards30=[-0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drnnLSTMtanhRewards31=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108]
drnnLSTMtanhRewards32=[-0.17526455026455026, -0.17526455026455026, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards33=[-0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026]
drnnLSTMtanhRewards34=[-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drnnLSTMtanhRewards35=[-0.17508269018743108, -0.17508269018743108, -0.17562802996914942, -0.17471872931833224, -0.1749007498897221, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224]
drnnLSTMtanhRewards36=[-0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17508269018743108]
drnnLSTMtanhRewards37=[-0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards38=[-0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026]
drnnLSTMtanhRewards39=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17562802996914942]
drnnLSTMtanhRewards40=[-0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drnnLSTMtanhRewards41=[-0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026]
drnnLSTMtanhRewards42=[-0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221]
drnnLSTMtanhRewards43=[-0.1749007498897221, -0.17471872931833224, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards44=[-0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards45=[-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards46=[-0.17508269018743108, -0.1749007498897221, -0.1749007498897221, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108]
drnnLSTMtanhRewards47=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards48=[-0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026]
drnnLSTMtanhRewards49=[-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
# Deep Recurrent Reinforcement Learning: 1 capa LSTM y 4 capas Dense, Funcion de activacion relu, 12 episodes, 50 iteraciones
drnnLSTMreluMakespan0=[805, 800, 800, 800, 794, 800, 798, 809, 795, 800, 798, 798]
drnnLSTMreluMakespan1=[798, 798, 796, 799, 800, 796, 796, 798, 798, 794, 798, 800]
drnnLSTMreluMakespan2=[805, 805, 798, 799, 806, 799, 806, 799, 800, 798, 805, 795]
drnnLSTMreluMakespan3=[800, 800, 800, 796, 800, 800, 799, 806, 808, 798, 797, 798]
drnnLSTMreluMakespan4=[805, 805, 795, 796, 799, 804, 798, 794, 798, 794, 796, 810]
drnnLSTMreluMakespan5=[798, 798, 798, 795, 800, 798, 796, 802, 800, 800, 805, 801]
drnnLSTMreluMakespan6=[800, 798, 798, 795, 800, 796, 800, 798, 799, 796, 805, 800]
drnnLSTMreluMakespan7=[800, 800, 800, 799, 798, 798, 800, 805, 800, 799, 800, 801]
drnnLSTMreluMakespan8=[799, 800, 800, 799, 795, 795, 805, 795, 798, 800, 798, 800]
drnnLSTMreluMakespan9=[800, 796, 805, 798, 798, 795, 805, 800, 799, 795, 800, 805]
drnnLSTMreluMakespan10=[805, 798, 805, 800, 801, 805, 799, 805, 798, 800, 800, 798]
drnnLSTMreluMakespan11=[798, 803, 800, 797, 795, 796, 794, 799, 800, 800, 800, 796]
drnnLSTMreluMakespan12=[799, 798, 799, 795, 798, 795, 798, 798, 798, 795, 798, 798]
drnnLSTMreluMakespan13=[798, 798, 799, 796, 798, 796, 800, 799, 796, 794, 796, 795]
drnnLSTMreluMakespan14=[796, 798, 806, 799, 804, 798, 805, 798, 800, 805, 794, 800]
drnnLSTMreluMakespan15=[806, 795, 800, 796, 798, 796, 810, 798, 799, 798, 800, 800]
drnnLSTMreluMakespan16=[799, 796, 798, 798, 798, 800, 798, 810, 796, 805, 800, 795]
drnnLSTMreluMakespan17=[798, 798, 798, 794, 798, 805, 801, 798, 800, 799, 798, 798]
drnnLSTMreluMakespan18=[795, 800, 794, 798, 797, 798, 794, 800, 797, 796, 794, 794]
drnnLSTMreluMakespan19=[798, 802, 794, 798, 799, 795, 797, 795, 800, 796, 797, 796]
drnnLSTMreluMakespan20=[794, 797, 795, 794, 799, 795, 795, 795, 800, 797, 794, 798]
drnnLSTMreluMakespan21=[799, 798, 796, 795, 794, 798, 795, 795, 798, 798, 795, 794]
drnnLSTMreluMakespan22=[794, 794, 795, 797, 795, 795, 795, 792, 794, 795, 794, 794]
drnnLSTMreluMakespan23=[794, 794, 794, 794, 795, 796, 793, 794, 795, 794, 797, 795]
drnnLSTMreluMakespan24=[794, 792, 792, 794, 796, 792, 794, 795, 794, 792, 796, 795]
drnnLSTMreluMakespan25=[794, 795, 795, 794, 794, 792, 795, 792, 795, 794, 794, 794]
drnnLSTMreluMakespan26=[795, 794, 794, 795, 794, 794, 793, 794, 797, 795, 794, 795]
drnnLSTMreluMakespan27=[794, 794, 795, 796, 795, 797, 794, 794, 795, 801, 794, 795]
drnnLSTMreluMakespan28=[795, 795, 795, 795, 794, 792, 794, 797, 794, 795, 795, 795]
drnnLSTMreluMakespan29=[794, 792, 798, 794, 797, 795, 793, 795, 795, 794, 795, 795]
drnnLSTMreluMakespan30=[795, 794, 798, 794, 794, 795, 792, 796, 794, 796, 794, 794]
drnnLSTMreluMakespan31=[794, 795, 795, 794, 795, 794, 795, 795, 794, 794, 795, 795]
drnnLSTMreluMakespan32=[798, 794, 794, 794, 798, 792, 795, 795, 795, 796, 794, 795]
drnnLSTMreluMakespan33=[794, 796, 794, 794, 794, 795, 794, 794, 797, 793, 793, 795]
drnnLSTMreluMakespan34=[794, 794, 795, 794, 794, 793, 794, 795, 793, 795, 795, 794]
drnnLSTMreluMakespan35=[798, 796, 795, 794, 795, 795, 795, 795, 794, 795, 797, 795]
drnnLSTMreluMakespan36=[794, 796, 794, 794, 794, 794, 795, 795, 797, 796, 795, 795]
drnnLSTMreluMakespan37=[795, 794, 796, 795, 795, 795, 795, 794, 792, 797, 794, 793]
drnnLSTMreluMakespan38=[794, 798, 794, 792, 794, 792, 795, 797, 793, 794, 794, 797]
drnnLSTMreluMakespan39=[792, 794, 794, 794, 792, 795, 795, 795, 794, 794, 795, 794]
drnnLSTMreluMakespan40=[792, 795, 795, 792, 795, 795, 794, 795, 794, 795, 794, 795]
drnnLSTMreluMakespan41=[794, 797, 795, 794, 795, 795, 798, 794, 795, 796, 796, 794]
drnnLSTMreluMakespan42=[794, 795, 795, 795, 794, 795, 795, 794, 794, 795, 793, 795]
drnnLSTMreluMakespan43=[795, 794, 795, 794, 795, 795, 792, 794, 794, 795, 794, 795]
drnnLSTMreluMakespan44=[795, 794, 792, 795, 794, 794, 795, 794, 796, 795, 796, 794]
drnnLSTMreluMakespan45=[795, 794, 793, 794, 793, 795, 794, 794, 795, 794, 795, 794]
drnnLSTMreluMakespan46=[794, 796, 793, 794, 794, 795, 799, 795, 794, 794, 794, 794]
drnnLSTMreluMakespan47=[794, 794, 794, 794, 795, 793, 795, 795, 794, 795, 795, 795]
drnnLSTMreluMakespan48=[794, 794, 795, 794, 795, 795, 795, 794, 794, 795, 795, 794]
drnnLSTMreluMakespan49=[795, 795, 795, 794, 795, 795, 794, 795, 793, 793, 792, 792]
drnnLSTMreluRewards0=[-0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.17508269018743108, -0.17617264919621228, -0.17580964970257765, -0.1778021978021978, -0.17526455026455026, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765]
drnnLSTMreluRewards1=[-0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.1759911894273128, -0.17617264919621228, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17508269018743108, -0.17580964970257765, -0.17617264919621228]
drnnLSTMreluRewards2=[-0.177078750549934, -0.177078750549934, -0.17580964970257765, -0.1759911894273128, -0.17725973169122497, -0.1759911894273128, -0.17725973169122497, -0.1759911894273128, -0.17617264919621228, -0.17580964970257765, -0.177078750549934, -0.17526455026455026]
drnnLSTMreluRewards3=[-0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.17544633017412387, -0.17617264919621228, -0.17617264919621228, -0.1759911894273128, -0.17725973169122497, -0.1776214552648934, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765]
drnnLSTMreluRewards4=[-0.177078750549934, -0.177078750549934, -0.17526455026455026, -0.17544633017412387, -0.1759911894273128, -0.1768976897689769, -0.17580964970257765, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17544633017412387, -0.17798286090969018]
drnnLSTMreluRewards5=[-0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17617264919621228, -0.17580964970257765, -0.17544633017412387, -0.17653532907770195, -0.17617264919621228, -0.17617264919621228, -0.177078750549934, -0.1763540290620872]
drnnLSTMreluRewards6=[-0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.17544633017412387, -0.177078750549934, -0.17617264919621228]
drnnLSTMreluRewards7=[-0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.17617264919621228, -0.1759911894273128, -0.17617264919621228, -0.1763540290620872]
drnnLSTMreluRewards8=[-0.1759911894273128, -0.17617264919621228, -0.17617264919621228, -0.1759911894273128, -0.17526455026455026, -0.17526455026455026, -0.177078750549934, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.17617264919621228]
drnnLSTMreluRewards9=[-0.17617264919621228, -0.17544633017412387, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17526455026455026, -0.17617264919621228, -0.1759911894273128, -0.17526455026455026, -0.17617264919621228, -0.177078750549934]
drnnLSTMreluRewards10=[-0.177078750549934, -0.17580964970257765, -0.177078750549934, -0.17617264919621228, -0.1763540290620872, -0.1759911894273128, -0.177078750549934, -0.177078750549934, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765]
drnnLSTMreluRewards11=[-0.17580964970257765, -0.17671654929577466, -0.17617264919621228, -0.17562802996914942, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.1759911894273128, -0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.17544633017412387]
drnnLSTMreluRewards12=[-0.1759911894273128, -0.17580964970257765, -0.1759911894273128, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17580964970257765]
drnnLSTMreluRewards13=[-0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17544633017412387, -0.17580964970257765, -0.17544633017412387, -0.17617264919621228, -0.1759911894273128, -0.17544633017412387, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026]
drnnLSTMreluRewards14=[-0.17544633017412387, -0.17580964970257765, -0.17725973169122497, -0.1759911894273128, -0.1768976897689769, -0.17580964970257765, -0.177078750549934, -0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.17508269018743108, -0.17617264919621228]
drnnLSTMreluRewards15=[-0.17725973169122497, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765, -0.17544633017412387, -0.17798286090969018, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228]
drnnLSTMreluRewards16=[-0.1759911894273128, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.17798286090969018, -0.17544633017412387, -0.177078750549934, -0.17617264919621228, -0.17526455026455026]
drnnLSTMreluRewards17=[-0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17508269018743108, -0.17580964970257765, -0.177078750549934, -0.1763540290620872, -0.17580964970257765, -0.17617264919621228, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765]
drnnLSTMreluRewards18=[-0.17526455026455026, -0.17617264919621228, -0.17508269018743108, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.17508269018743108, -0.17617264919621228, -0.17562802996914942, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108]
drnnLSTMreluRewards19=[-0.17580964970257765, -0.17653532907770195, -0.17508269018743108, -0.17580964970257765, -0.1759911894273128, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.17562802996914942, -0.17544633017412387]
drnnLSTMreluRewards20=[-0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.1759911894273128, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17617264919621228, -0.17562802996914942, -0.17508269018743108, -0.17580964970257765]
drnnLSTMreluRewards21=[-0.1759911894273128, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108]
drnnLSTMreluRewards22=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108]
drnnLSTMreluRewards23=[-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026]
drnnLSTMreluRewards24=[-0.17508269018743108, -0.17471872931833224, -0.17471872931833224, -0.17508269018743108, -0.17544633017412387, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17544633017412387, -0.17526455026455026]
drnnLSTMreluRewards25=[-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drnnLSTMreluRewards26=[-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026]
drnnLSTMreluRewards27=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.1763540290620872, -0.17508269018743108, -0.17526455026455026]
drnnLSTMreluRewards28=[-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17562802996914942, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026]
drnnLSTMreluRewards29=[-0.17508269018743108, -0.17471872931833224, -0.17580964970257765, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drnnLSTMreluRewards30=[-0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17544633017412387, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108]
drnnLSTMreluRewards31=[-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drnnLSTMreluRewards32=[-0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17471872931833224, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.17526455026455026]
drnnLSTMreluRewards33=[-0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942, -0.1749007498897221, -0.1749007498897221, -0.17526455026455026]
drnnLSTMreluRewards34=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108]
drnnLSTMreluRewards35=[-0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026]
drnnLSTMreluRewards36=[-0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026]
drnnLSTMreluRewards37=[-0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17562802996914942, -0.17508269018743108, -0.1749007498897221]
drnnLSTMreluRewards38=[-0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.17562802996914942, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942]
drnnLSTMreluRewards39=[-0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drnnLSTMreluRewards40=[-0.17471872931833224, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026]
drnnLSTMreluRewards41=[-0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17544633017412387, -0.17508269018743108]
drnnLSTMreluRewards42=[-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026]
drnnLSTMreluRewards43=[-0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026]
drnnLSTMreluRewards44=[-0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108]
drnnLSTMreluRewards45=[-0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drnnLSTMreluRewards46=[-0.17508269018743108, -0.17544633017412387, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.1759911894273128, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drnnLSTMreluRewards47=[-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026]
drnnLSTMreluRewards48=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108]
drnnLSTMreluRewards49=[-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.1749007498897221, -0.17471872931833224, -0.17471872931833224]
# Deep Recurrent Reinforcement Learning: 1 capa GRU y 4 capas Dense, Funcion de activacion tanh, 12 episodes, 50 iteraciones
drnnGRUtanhMakespan0 = [798, 799, 798, 804, 805, 799, 801, 801, 801, 799, 798, 796]
drnnGRUtanhMakespan1 = [800, 798, 798, 798, 798, 798, 801, 798, 795, 796, 800, 796]
drnnGRUtanhMakespan2 = [795, 804, 805, 800, 800, 796, 804, 800, 795, 798, 798, 801]
drnnGRUtanhMakespan3 = [806, 796, 794, 797, 798, 800, 800, 808, 805, 798, 800, 809]
drnnGRUtanhMakespan4 = [805, 801, 795, 798, 798, 800, 796, 796, 805, 798, 799, 798]
drnnGRUtanhMakespan5 = [804, 799, 798, 804, 796, 799, 798, 805, 796, 805, 798, 800]
drnnGRUtanhMakespan6 = [800, 799, 794, 801, 799, 796, 800, 804, 797, 796, 800, 798]
drnnGRUtanhMakespan7 = [798, 800, 810, 810, 805, 800, 795, 798, 800, 805, 799, 800]
drnnGRUtanhMakespan8 = [798, 797, 800, 800, 804, 805, 798, 798, 801, 795, 798, 809]
drnnGRUtanhMakespan9 = [803, 800, 800, 805, 805, 798, 804, 803, 805, 801, 810, 801]
drnnGRUtanhMakespan10 = [798, 799, 798, 798, 805, 804, 805, 798, 799, 798, 800, 800]
drnnGRUtanhMakespan11 = [796, 795, 805, 800, 800, 798, 795, 804, 805, 798, 800, 800]
drnnGRUtanhMakespan12 = [799, 799, 809, 800, 799, 799, 797, 805, 799, 800, 798, 795]
drnnGRUtanhMakespan13 = [805, 800, 800, 805, 800, 799, 798, 801, 798, 797, 805, 800]
drnnGRUtanhMakespan14 = [800, 798, 800, 800, 800, 804, 804, 799, 799, 800, 798, 798]
drnnGRUtanhMakespan15 = [805, 800, 795, 800, 804, 795, 800, 798, 799, 798, 800, 796]
drnnGRUtanhMakespan16 = [806, 795, 801, 799, 799, 796, 796, 794, 802, 796, 800, 802]
drnnGRUtanhMakespan17 = [796, 800, 798, 800, 794, 800, 804, 805, 798, 810, 800, 798]
drnnGRUtanhMakespan18 = [798, 800, 794, 794, 797, 798, 800, 805, 798, 798, 804, 798]
drnnGRUtanhMakespan19 = [796, 800, 806, 799, 796, 800, 798, 805, 798, 799, 797, 805]
drnnGRUtanhMakespan20 = [805, 800, 799, 796, 805, 805, 805, 794, 809, 796, 800, 797]
drnnGRUtanhMakespan21 = [798, 800, 800, 800, 798, 801, 796, 801, 801, 801, 795, 799]
drnnGRUtanhMakespan22 = [798, 801, 797, 800, 799, 795, 799, 799, 800, 801, 800, 799]
drnnGRUtanhMakespan23 = [800, 798, 799, 805, 794, 800, 798, 796, 796, 804, 800, 794]
drnnGRUtanhMakespan24 = [800, 800, 798, 805, 804, 799, 798, 801, 800, 798, 798, 798]
drnnGRUtanhMakespan25 = [798, 798, 798, 795, 800, 803, 798, 798, 800, 799, 796, 798]
drnnGRUtanhMakespan26 = [796, 798, 798, 798, 805, 796, 798, 798, 805, 795, 801, 796]
drnnGRUtanhMakespan27 = [794, 796, 796, 800, 800, 798, 800, 798, 802, 798, 797, 798]
drnnGRUtanhMakespan28 = [799, 799, 800, 800, 798, 802, 799, 798, 795, 795, 794, 798]
drnnGRUtanhMakespan29 = [798, 796, 796, 797, 796, 798, 800, 800, 796, 798, 800, 795]
drnnGRUtanhMakespan30 = [799, 798, 795, 795, 800, 795, 798, 798, 799, 798, 805, 799]
drnnGRUtanhMakespan31 = [795, 799, 794, 794, 796, 795, 795, 794, 798, 797, 798, 795]
drnnGRUtanhMakespan32 = [797, 798, 795, 796, 798, 795, 797, 798, 795, 794, 795, 796]
drnnGRUtanhMakespan33 = [799, 795, 794, 794, 798, 795, 798, 797, 800, 796, 795, 794]
drnnGRUtanhMakespan34 = [798, 795, 798, 796, 798, 794, 796, 798, 798, 798, 796, 797]
drnnGRUtanhMakespan35 = [795, 798, 796, 798, 794, 801, 795, 800, 795, 800, 794, 800]
drnnGRUtanhMakespan36 = [798, 799, 796, 797, 795, 794, 800, 795, 795, 794, 795, 795]
drnnGRUtanhMakespan37 = [799, 798, 795, 795, 794, 795, 795, 796, 805, 795, 798, 796]
drnnGRUtanhMakespan38 = [798, 794, 795, 795, 795, 796, 795, 796, 800, 798, 797, 796]
drnnGRUtanhMakespan39 = [794, 795, 795, 797, 795, 795, 794, 794, 798, 795, 794, 798]
drnnGRUtanhMakespan40 = [795, 795, 795, 795, 795, 795, 794, 794, 793, 797, 794, 795]
drnnGRUtanhMakespan41 = [794, 794, 795, 793, 795, 795, 792, 794, 795, 794, 794, 794]
drnnGRUtanhMakespan42 = [795, 795, 795, 796, 794, 797, 795, 795, 792, 795, 796, 793]
drnnGRUtanhMakespan43 = [794, 795, 795, 794, 795, 794, 798, 794, 797, 795, 794, 794]
drnnGRUtanhMakespan44 = [795, 795, 793, 794, 795, 794, 795, 795, 794, 794, 795, 794]
drnnGRUtanhMakespan45 = [794, 794, 794, 794, 794, 794, 795, 794, 794, 794, 796, 795]
drnnGRUtanhMakespan46 = [795, 794, 795, 794, 794, 794, 793, 794, 795, 795, 794, 797]
drnnGRUtanhMakespan47 = [794, 794, 794, 794, 795, 794, 795, 792, 794, 795, 794, 794]
drnnGRUtanhMakespan48 = [795, 794, 794, 794, 795, 798, 794, 794, 794, 795, 794, 794]
drnnGRUtanhMakespan49 = [795, 795, 794, 795, 793, 795, 796, 794, 795, 794, 794, 797]
drnnGRUtanhRewards0 = [-0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.1768976897689769, -0.177078750549934, -0.1759911894273128, -0.1763540290620872, -0.1763540290620872, -0.1763540290620872, -0.1759911894273128, -0.17580964970257765, -0.17544633017412387]
drnnGRUtanhRewards1 = [-0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.1763540290620872, -0.17580964970257765, -0.17526455026455026, -0.17544633017412387, -0.17617264919621228, -0.17544633017412387]
drnnGRUtanhRewards2 = [-0.17526455026455026, -0.1768976897689769, -0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.17544633017412387, -0.1768976897689769, -0.17617264919621228, -0.17526455026455026, -0.17580964970257765, -0.17580964970257765, -0.1763540290620872]
drnnGRUtanhRewards3 = [-0.17725973169122497, -0.17544633017412387, -0.17508269018743108, -0.17562802996914942, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.1776214552648934, -0.177078750549934, -0.17580964970257765, -0.17617264919621228, -0.1778021978021978]
drnnGRUtanhRewards4 = [-0.177078750549934, -0.1763540290620872, -0.17526455026455026, -0.17580964970257765, -0.17580964970257765, -0.17617264919621228, -0.17544633017412387, -0.17544633017412387, -0.177078750549934, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765]
drnnGRUtanhRewards5 = [-0.1768976897689769, -0.1759911894273128, -0.17580964970257765, -0.1768976897689769, -0.17544633017412387, -0.1759911894273128, -0.17580964970257765, -0.177078750549934, -0.17544633017412387, -0.177078750549934, -0.17580964970257765, -0.17617264919621228]
drnnGRUtanhRewards6 = [-0.17617264919621228, -0.1759911894273128, -0.17508269018743108, -0.1763540290620872, -0.1759911894273128, -0.17544633017412387, -0.17617264919621228, -0.1768976897689769, -0.17562802996914942, -0.17544633017412387, -0.17617264919621228, -0.17580964970257765]
drnnGRUtanhRewards7 = [-0.17580964970257765, -0.17617264919621228, -0.17798286090969018, -0.177078750549934, -0.17798286090969018, -0.17617264919621228, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.1759911894273128, -0.17617264919621228]
drnnGRUtanhRewards8 = [-0.17580964970257765, -0.17562802996914942, -0.17617264919621228, -0.17617264919621228, -0.1768976897689769, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.1763540290620872, -0.17580964970257765, -0.1778021978021978]
drnnGRUtanhRewards9 = [-0.17671654929577466, -0.17617264919621228, -0.17617264919621228, -0.177078750549934, -0.177078750549934, -0.17580964970257765, -0.1768976897689769, -0.17671654929577466, -0.177078750549934, -0.1763540290620872, -0.17798286090969018, -0.1763540290620872]
drnnGRUtanhRewards10 = [-0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.1768976897689769, -0.177078750549934, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228]
drnnGRUtanhRewards11 = [-0.17544633017412387, -0.17526455026455026, -0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17526455026455026, -0.1768976897689769, -0.177078750549934, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228]
drnnGRUtanhRewards12 = [-0.1759911894273128, -0.1759911894273128, -0.1778021978021978, -0.17617264919621228, -0.1759911894273128, -0.1759911894273128, -0.17562802996914942, -0.177078750549934, -0.1759911894273128, -0.17617264919621228, -0.17580964970257765, -0.17526455026455026]
drnnGRUtanhRewards13 = [-0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.177078750549934, -0.17617264919621228, -0.1759911894273128, -0.17580964970257765, -0.1763540290620872, -0.17580964970257765, -0.17562802996914942, -0.177078750549934, -0.17617264919621228]
drnnGRUtanhRewards14 = [-0.17617264919621228, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.1768976897689769, -0.1768976897689769, -0.1759911894273128, -0.1759911894273128, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765]
drnnGRUtanhRewards15 = [-0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.17526455026455026, -0.1768976897689769, -0.17526455026455026, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17617264919621228, -0.17544633017412387]
drnnGRUtanhRewards16 = [-0.17725973169122497, -0.17526455026455026, -0.1763540290620872, -0.1759911894273128, -0.1759911894273128, -0.17544633017412387, -0.17544633017412387, -0.17508269018743108, -0.17653532907770195, -0.17544633017412387, -0.17617264919621228, -0.17653532907770195]
drnnGRUtanhRewards17 = [-0.17544633017412387, -0.17617264919621228, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17508269018743108, -0.1768976897689769, -0.177078750549934, -0.17580964970257765, -0.17798286090969018, -0.17617264919621228, -0.17580964970257765]
drnnGRUtanhRewards18 = [-0.17580964970257765, -0.17617264919621228, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942, -0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.1768976897689769, -0.17580964970257765]
drnnGRUtanhRewards19 = [-0.17544633017412387, -0.17617264919621228, -0.17725973169122497, -0.1759911894273128, -0.17544633017412387, -0.17617264919621228, -0.17580964970257765, -0.177078750549934, -0.17580964970257765, -0.17562802996914942, -0.1759911894273128, -0.177078750549934]
drnnGRUtanhRewards20 = [-0.17617264919621228, -0.177078750549934, -0.1759911894273128, -0.17544633017412387, -0.177078750549934, -0.177078750549934, -0.177078750549934, -0.17508269018743108, -0.1778021978021978, -0.17544633017412387, -0.17617264919621228, -0.17562802996914942]
drnnGRUtanhRewards21 = [-0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.1763540290620872, -0.17544633017412387, -0.1763540290620872, -0.1763540290620872, -0.1763540290620872, -0.17526455026455026, -0.1759911894273128]
drnnGRUtanhRewards22 = [-0.17580964970257765, -0.1763540290620872, -0.17562802996914942, -0.17617264919621228, -0.1759911894273128, -0.17526455026455026, -0.1759911894273128, -0.1759911894273128, -0.17617264919621228, -0.1763540290620872, -0.17617264919621228, -0.1759911894273128]
drnnGRUtanhRewards23 = [-0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.177078750549934, -0.17508269018743108, -0.17617264919621228, -0.17580964970257765, -0.17544633017412387, -0.17544633017412387, -0.1768976897689769, -0.17617264919621228, -0.17508269018743108]
drnnGRUtanhRewards24 = [-0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.177078750549934, -0.1768976897689769, -0.17580964970257765, -0.1763540290620872, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765]
drnnGRUtanhRewards25 = [-0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17617264919621228, -0.17580964970257765, -0.17671654929577466, -0.17580964970257765, -0.17617264919621228, -0.1759911894273128, -0.17544633017412387, -0.17580964970257765]
drnnGRUtanhRewards26 = [-0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17526455026455026, -0.1763540290620872, -0.17544633017412387]
drnnGRUtanhRewards27 = [-0.17508269018743108, -0.17544633017412387, -0.17544633017412387, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.17653532907770195, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765]
drnnGRUtanhRewards28 = [-0.1759911894273128, -0.1759911894273128, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17653532907770195, -0.17580964970257765, -0.1759911894273128, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765]
drnnGRUtanhRewards29 = [-0.17580964970257765, -0.17544633017412387, -0.17544633017412387, -0.17562802996914942, -0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.17526455026455026]
drnnGRUtanhRewards30 = [-0.1759911894273128, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17617264919621228, -0.17526455026455026, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.1759911894273128]
drnnGRUtanhRewards31 = [-0.17526455026455026, -0.1759911894273128, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.17526455026455026]
drnnGRUtanhRewards32 = [-0.17562802996914942, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026]
drnnGRUtanhRewards33 = [-0.1759911894273128, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17580964970257765, -0.17562802996914942, -0.17617264919621228, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108]
drnnGRUtanhRewards34 = [-0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17544633017412387, -0.17508269018743108, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17562802996914942]
drnnGRUtanhRewards35 = [-0.17526455026455026, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17508269018743108, -0.1763540290620872, -0.17526455026455026, -0.17617264919621228, -0.17526455026455026, -0.17617264919621228, -0.17508269018743108, -0.17617264919621228]
drnnGRUtanhRewards36 = [-0.17580964970257765, -0.1759911894273128, -0.17544633017412387, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17617264919621228, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drnnGRUtanhRewards37 = [-0.1759911894273128, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.177078750549934, -0.17526455026455026, -0.17580964970257765, -0.17544633017412387]
drnnGRUtanhRewards38 = [-0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17617264919621228, -0.17580964970257765, -0.17562802996914942, -0.17544633017412387]
drnnGRUtanhRewards39 = [-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765]
drnnGRUtanhRewards40 = [-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17562802996914942, -0.17508269018743108, -0.17526455026455026]
drnnGRUtanhRewards41 = [-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drnnGRUtanhRewards42 = [-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17544633017412387, -0.1749007498897221]
drnnGRUtanhRewards43 = [-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108]
drnnGRUtanhRewards44 = [-0.17526455026455026, -0.17526455026455026, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drnnGRUtanhRewards45 = [-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026]
drnnGRUtanhRewards46 = [-0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17562802996914942]
drnnGRUtanhRewards47 = [-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108]
drnnGRUtanhRewards48 = [-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108]
drnnGRUtanhRewards49 = [-0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942]
# Deep Recurrent Reinforcement Learning: 1 capa GRU y 4 capas Dense, Funcion de activacion relu, 12 episodes, 50 iteraciones
drnnGRUreluMakespan0 = [800, 799, 798, 797, 798, 800, 800, 796, 800, 794, 800, 800]
drnnGRUreluMakespan1 = [798, 800, 805, 795, 799, 808, 795, 800, 796, 798, 799, 798]
drnnGRUreluMakespan2 = [799, 800, 806, 800, 800, 805, 805, 798, 799, 807, 800, 800]
drnnGRUreluMakespan3 = [798, 795, 799, 800, 800, 796, 798, 800, 800, 804, 805, 800]
drnnGRUreluMakespan4 = [811, 800, 799, 800, 805, 798, 798, 799, 796, 804, 805, 804]
drnnGRUreluMakespan5 = [799, 795, 797, 800, 798, 800, 800, 798, 800, 797, 800, 798]
drnnGRUreluMakespan6 = [798, 800, 798, 799, 797, 798, 800, 796, 801, 799, 795, 798]
drnnGRUreluMakespan7 = [800, 804, 795, 801, 796, 806, 805, 798, 800, 799, 799, 804]
drnnGRUreluMakespan8 = [800, 799, 799, 800, 805, 796, 800, 800, 810, 796, 800, 798]
drnnGRUreluMakespan9 = [794, 800, 799, 805, 800, 800, 798, 798, 796, 795, 798, 796]
drnnGRUreluMakespan10 = [798, 800, 798, 801, 795, 802, 796, 809, 800, 800, 798, 795]
drnnGRUreluMakespan11 = [804, 800, 799, 799, 798, 803, 798, 798, 805, 803, 800, 796]
drnnGRUreluMakespan12 = [800, 799, 805, 797, 798, 796, 799, 794, 799, 805, 799, 800]
drnnGRUreluMakespan13 = [796, 800, 798, 800, 795, 799, 800, 804, 800, 794, 805, 805]
drnnGRUreluMakespan14 = [800, 795, 796, 798, 798, 801, 805, 794, 800, 801, 801, 796]
drnnGRUreluMakespan15 = [798, 800, 796, 796, 798, 794, 797, 800, 796, 801, 795, 799]
drnnGRUreluMakespan16 = [800, 805, 794, 800, 799, 800, 805, 801, 798, 800, 801, 799]
drnnGRUreluMakespan17 = [797, 803, 801, 808, 794, 799, 799, 800, 805, 796, 801, 796]
drnnGRUreluMakespan18 = [805, 800, 800, 804, 799, 798, 800, 799, 804, 796, 800, 804]
drnnGRUreluMakespan19 = [804, 798, 800, 799, 799, 799, 805, 795, 801, 799, 799, 805]
drnnGRUreluMakespan20 = [799, 804, 796, 798, 796, 798, 800, 805, 799, 810, 800, 800]
drnnGRUreluMakespan21 = [798, 799, 799, 805, 798, 798, 805, 798, 794, 799, 798, 798]
drnnGRUreluMakespan22 = [799, 798, 798, 796, 798, 805, 799, 798, 798, 799, 796, 798]
drnnGRUreluMakespan23 = [798, 805, 808, 798, 798, 805, 810, 796, 804, 799, 800, 799]
drnnGRUreluMakespan24 = [798, 796, 798, 795, 800, 798, 799, 798, 797, 805, 798, 800]
drnnGRUreluMakespan25 = [799, 796, 799, 798, 805, 798, 798, 800, 796, 794, 810, 798]
drnnGRUreluMakespan26 = [799, 798, 805, 800, 802, 798, 799, 799, 799, 794, 802, 797]
drnnGRUreluMakespan27 = [798, 800, 805, 796, 798, 795, 802, 796, 798, 800, 798, 794]
drnnGRUreluMakespan28 = [796, 805, 798, 800, 800, 798, 810, 798, 798, 798, 796, 796]
drnnGRUreluMakespan29 = [800, 798, 798, 802, 794, 798, 796, 808, 800, 800, 798, 799]
drnnGRUreluMakespan30 = [798, 796, 798, 798, 794, 798, 794, 800, 796, 794, 800, 800]
drnnGRUreluMakespan31 = [794, 802, 797, 799, 798, 800, 799, 799, 796, 796, 798, 798]
drnnGRUreluMakespan32 = [799, 798, 794, 795, 798, 805, 804, 797, 795, 800, 796, 798]
drnnGRUreluMakespan33 = [803, 799, 805, 796, 794, 798, 797, 798, 798, 794, 794, 798]
drnnGRUreluMakespan34 = [810, 796, 795, 798, 799, 798, 796, 795, 795, 797, 798, 798]
drnnGRUreluMakespan35 = [799, 799, 799, 799, 795, 798, 795, 800, 796, 795, 795, 796]
drnnGRUreluMakespan36 = [795, 797, 798, 799, 799, 799, 800, 794, 796, 795, 798, 800]
drnnGRUreluMakespan37 = [800, 798, 799, 794, 800, 796, 798, 798, 797, 800, 794, 798]
drnnGRUreluMakespan38 = [800, 799, 794, 796, 795, 800, 796, 804, 800, 795, 800, 798]
drnnGRUreluMakespan39 = [794, 798, 795, 804, 805, 799, 798, 800, 796, 798, 795, 794]
drnnGRUreluMakespan40 = [799, 798, 796, 798, 798, 799, 800, 796, 798, 798, 799, 798]
drnnGRUreluMakespan41 = [796, 798, 800, 797, 799, 796, 797, 796, 799, 804, 805, 798]
drnnGRUreluMakespan42 = [798, 794, 795, 799, 799, 798, 797, 798, 798, 798, 798, 795]
drnnGRUreluMakespan43 = [799, 798, 794, 794, 795, 794, 795, 799, 799, 800, 799, 794]
drnnGRUreluMakespan44 = [795, 796, 795, 799, 794, 795, 794, 796, 795, 794, 795, 796]
drnnGRUreluMakespan45 = [794, 797, 794, 795, 796, 795, 794, 799, 795, 794, 798, 798]
drnnGRUreluMakespan46 = [795, 795, 794, 795, 794, 794, 792, 794, 795, 797, 794, 794]
drnnGRUreluMakespan47 = [798, 796, 797, 798, 794, 798, 794, 797, 794, 803, 798, 798]
drnnGRUreluMakespan48 = [795, 794, 796, 798, 795, 794, 796, 795, 796, 794, 796, 796]
drnnGRUreluMakespan49 = [798, 798, 796, 798, 798, 796, 796, 798, 798, 798, 796, 798]
drnnGRUreluRewards0 = [-0.17617264919621228, -0.1759911894273128, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17544633017412387, -0.17617264919621228, -0.17508269018743108, -0.17617264919621228, -0.17617264919621228]
drnnGRUreluRewards1 = [-0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.17526455026455026, -0.1759911894273128, -0.1776214552648934, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765]
drnnGRUreluRewards2 = [-0.1759911894273128, -0.17617264919621228, -0.17725973169122497, -0.17617264919621228, -0.17617264919621228, -0.177078750549934, -0.177078750549934, -0.17580964970257765, -0.1759911894273128, -0.1774406332453826, -0.17617264919621228, -0.17617264919621228]
drnnGRUreluRewards3 = [-0.17580964970257765, -0.17526455026455026, -0.1759911894273128, -0.17617264919621228, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.1768976897689769, -0.177078750549934, -0.17617264919621228]
drnnGRUreluRewards4 = [-0.1781634446397188, -0.17617264919621228, -0.1759911894273128, -0.17617264919621228, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17544633017412387, -0.1768976897689769, -0.177078750549934, -0.1768976897689769]
drnnGRUreluRewards5 = [-0.1759911894273128, -0.17526455026455026, -0.17562802996914942, -0.17617264919621228, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17617264919621228, -0.17562802996914942, -0.17617264919621228, -0.17580964970257765]
drnnGRUreluRewards6 = [-0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.17562802996914942, -0.17580964970257765, -0.17544633017412387, -0.17617264919621228, -0.1763540290620872, -0.1759911894273128, -0.17526455026455026, -0.17580964970257765]
drnnGRUreluRewards7 = [-0.17617264919621228, -0.1768976897689769, -0.17526455026455026, -0.1763540290620872, -0.17544633017412387, -0.17725973169122497, -0.177078750549934, -0.17580964970257765, -0.17617264919621228, -0.1759911894273128, -0.1759911894273128, -0.1768976897689769]
drnnGRUreluRewards8 = [-0.17617264919621228, -0.1759911894273128, -0.1759911894273128, -0.17617264919621228, -0.177078750549934, -0.17544633017412387, -0.17617264919621228, -0.17617264919621228, -0.17798286090969018, -0.17544633017412387, -0.17617264919621228, -0.17580964970257765]
drnnGRUreluRewards9 = [-0.17508269018743108, -0.17617264919621228, -0.1759911894273128, -0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765, -0.17544633017412387]
drnnGRUreluRewards10 = [-0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.1763540290620872, -0.17526455026455026, -0.17653532907770195, -0.17544633017412387, -0.1778021978021978, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17526455026455026]
drnnGRUreluRewards11 = [-0.1768976897689769, -0.17617264919621228, -0.1759911894273128, -0.1759911894273128, -0.17580964970257765, -0.17671654929577466, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17671654929577466, -0.17617264919621228, -0.17544633017412387]
drnnGRUreluRewards12 = [-0.17617264919621228, -0.1759911894273128, -0.177078750549934, -0.17562802996914942, -0.17580964970257765, -0.17544633017412387, -0.1759911894273128, -0.17508269018743108, -0.1759911894273128, -0.177078750549934, -0.1759911894273128, -0.17617264919621228]
drnnGRUreluRewards13 = [-0.17544633017412387, -0.17617264919621228, -0.17580964970257765, -0.17617264919621228, -0.17526455026455026, -0.1759911894273128, -0.17617264919621228, -0.1768976897689769, -0.17617264919621228, -0.17508269018743108, -0.177078750549934, -0.177078750549934]
drnnGRUreluRewards14 = [-0.17617264919621228, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.1763540290620872, -0.177078750549934, -0.17508269018743108, -0.17617264919621228, -0.1763540290620872, -0.1763540290620872, -0.17544633017412387]
drnnGRUreluRewards15 = [-0.17580964970257765, -0.17617264919621228, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17508269018743108, -0.17562802996914942, -0.17617264919621228, -0.17544633017412387, -0.1763540290620872, -0.17526455026455026, -0.1759911894273128]
drnnGRUreluRewards16 = [-0.17617264919621228, -0.177078750549934, -0.17508269018743108, -0.17617264919621228, -0.1759911894273128, -0.17617264919621228, -0.177078750549934, -0.1763540290620872, -0.17580964970257765, -0.17617264919621228, -0.1763540290620872, -0.1759911894273128]
drnnGRUreluRewards17 = [-0.17562802996914942, -0.17671654929577466, -0.1763540290620872, -0.1776214552648934, -0.17508269018743108, -0.1759911894273128, -0.17617264919621228, -0.1759911894273128, -0.177078750549934, -0.17544633017412387, -0.1763540290620872, -0.17544633017412387]
drnnGRUreluRewards18 = [-0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.1768976897689769, -0.1759911894273128, -0.17580964970257765, -0.17617264919621228, -0.1759911894273128, -0.1768976897689769, -0.17544633017412387, -0.17617264919621228, -0.1768976897689769]
drnnGRUreluRewards19 = [-0.1768976897689769, -0.17580964970257765, -0.17617264919621228, -0.1759911894273128, -0.1759911894273128, -0.1759911894273128, -0.177078750549934, -0.17526455026455026, -0.1763540290620872, -0.1759911894273128, -0.1759911894273128, -0.177078750549934]
drnnGRUreluRewards20 = [-0.1759911894273128, -0.1768976897689769, -0.17544633017412387, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.1759911894273128, -0.17798286090969018, -0.17617264919621228, -0.17617264919621228]
drnnGRUreluRewards21 = [-0.17580964970257765, -0.1759911894273128, -0.1759911894273128, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17580964970257765, -0.17508269018743108, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765]
drnnGRUreluRewards22 = [-0.1759911894273128, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.177078750549934, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17544633017412387, -0.17580964970257765]
drnnGRUreluRewards23 = [-0.17580964970257765, -0.177078750549934, -0.1776214552648934, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17798286090969018, -0.17544633017412387, -0.1768976897689769, -0.1759911894273128, -0.17617264919621228, -0.1759911894273128]
drnnGRUreluRewards24 = [-0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17562802996914942, -0.177078750549934, -0.17580964970257765, -0.17617264919621228]
drnnGRUreluRewards25 = [-0.1759911894273128, -0.17544633017412387, -0.1759911894273128, -0.17580964970257765, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.17617264919621228, -0.17544633017412387, -0.17508269018743108, -0.17798286090969018, -0.17580964970257765]
drnnGRUreluRewards26 = [-0.1759911894273128, -0.17580964970257765, -0.177078750549934, -0.17617264919621228, -0.17653532907770195, -0.17580964970257765, -0.1759911894273128, -0.1759911894273128, -0.1759911894273128, -0.17508269018743108, -0.17653532907770195, -0.17562802996914942]
drnnGRUreluRewards27 = [-0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17653532907770195, -0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.17508269018743108]
drnnGRUreluRewards28 = [-0.17544633017412387, -0.177078750549934, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17798286090969018, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17544633017412387]
drnnGRUreluRewards29 = [-0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17653532907770195, -0.17508269018743108, -0.17580964970257765, -0.17544633017412387, -0.1776214552648934, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128]
drnnGRUreluRewards30 = [-0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17617264919621228, -0.17544633017412387, -0.17508269018743108, -0.17617264919621228, -0.17617264919621228]
drnnGRUreluRewards31 = [-0.17508269018743108, -0.17653532907770195, -0.17562802996914942, -0.1759911894273128, -0.17580964970257765, -0.17617264919621228, -0.1759911894273128, -0.1759911894273128, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765]
drnnGRUreluRewards32 = [-0.1759911894273128, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.1768976897689769, -0.177078750549934, -0.17562802996914942, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765]
drnnGRUreluRewards33 = [-0.17671654929577466, -0.1759911894273128, -0.177078750549934, -0.17544633017412387, -0.17508269018743108, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765]
drnnGRUreluRewards34 = [-0.17798286090969018, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17580964970257765, -0.17580964970257765]
drnnGRUreluRewards35 = [-0.1759911894273128, -0.1759911894273128, -0.1759911894273128, -0.1759911894273128, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387]
drnnGRUreluRewards36 = [-0.17526455026455026, -0.17562802996914942, -0.17580964970257765, -0.1759911894273128, -0.1759911894273128, -0.1759911894273128, -0.17617264919621228, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228]
drnnGRUreluRewards37 = [-0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.17508269018743108, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17562802996914942, -0.17617264919621228, -0.17508269018743108, -0.17580964970257765]
drnnGRUreluRewards38 = [-0.17617264919621228, -0.1759911894273128, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.1768976897689769, -0.17617264919621228, -0.17526455026455026, -0.17617264919621228, -0.17580964970257765]
drnnGRUreluRewards39 = [-0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.1768976897689769, -0.177078750549934, -0.1759911894273128, -0.17580964970257765, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108]
drnnGRUreluRewards40 = [-0.1759911894273128, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765]
drnnGRUreluRewards41 = [-0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.17562802996914942, -0.1759911894273128, -0.17544633017412387, -0.17562802996914942, -0.17544633017412387, -0.1759911894273128, -0.1768976897689769, -0.177078750549934, -0.17580964970257765]
drnnGRUreluRewards42 = [-0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.1759911894273128, -0.1759911894273128, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026]
drnnGRUreluRewards43 = [-0.1759911894273128, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.1759911894273128, -0.1759911894273128, -0.17617264919621228, -0.1759911894273128, -0.17508269018743108]
drnnGRUreluRewards44 = [-0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.1759911894273128, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387]
drnnGRUreluRewards45 = [-0.17508269018743108, -0.17562802996914942, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.1759911894273128, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17580964970257765]
drnnGRUreluRewards46 = [-0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108]
drnnGRUreluRewards47 = [-0.17580964970257765, -0.17544633017412387, -0.17562802996914942, -0.17580964970257765, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17562802996914942, -0.17508269018743108, -0.17671654929577466, -0.17580964970257765, -0.17580964970257765]
drnnGRUreluRewards48 = [-0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.17544633017412387, -0.17544633017412387]
drnnGRUreluRewards49 = [-0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765]
# Deep Reinforcement Learning: 5 capas Dense, Funcion de activacion tanh, 12 episodios, 50 iteraciones
drlTanhMakespan0 = [794, 794, 805, 799, 810, 800, 794, 810, 804, 806, 812, 808]
drlTanhMakespan1 = [796, 795, 795, 798, 799, 800, 800, 795, 797, 796, 797, 799]
drlTanhMakespan2 = [800, 797, 798, 801, 799, 800, 796, 795, 797, 796, 794, 798]
drlTanhMakespan3 = [800, 795, 799, 796, 799, 798, 795, 799, 795, 799, 798, 796]
drlTanhMakespan4 = [809, 795, 795, 800, 797, 795, 798, 798, 799, 799, 798, 798]
drlTanhMakespan5 = [795, 795, 795, 799, 795, 798, 795, 800, 795, 796, 795, 805]
drlTanhMakespan6 = [794, 800, 795, 793, 798, 795, 794, 798, 795, 799, 795, 796]
drlTanhMakespan7 = [795, 795, 795, 795, 798, 795, 797, 797, 795, 795, 798, 797]
drlTanhMakespan8 = [795, 795, 795, 794, 800, 800, 794, 795, 794, 794, 797, 795]
drlTanhMakespan9 = [793, 794, 796, 795, 796, 800, 794, 797, 793, 795, 798, 795]
drlTanhMakespan10 = [795, 795, 797, 794, 795, 798, 797, 795, 798, 794, 794, 794]
drlTanhMakespan11 = [795, 795, 795, 795, 797, 795, 795, 794, 795, 795, 795, 794]
drlTanhMakespan12 = [794, 798, 795, 794, 795, 795, 795, 797, 799, 795, 795, 795]
drlTanhMakespan13 = [795, 797, 795, 800, 796, 795, 796, 795, 795, 795, 798, 794]
drlTanhMakespan14 = [795, 795, 796, 794, 794, 794, 797, 795, 798, 795, 795, 793]
drlTanhMakespan15 = [799, 794, 795, 795, 795, 796, 801, 797, 795, 794, 795, 799]
drlTanhMakespan16 = [795, 795, 796, 798, 795, 795, 795, 795, 795, 798, 798, 796]
drlTanhMakespan17 = [800, 798, 795, 795, 798, 794, 795, 795, 797, 795, 796, 794]
drlTanhMakespan18 = [797, 800, 798, 797, 796, 794, 799, 797, 795, 796, 799, 798]
drlTanhMakespan19 = [797, 800, 795, 794, 794, 796, 795, 798, 796, 798, 797, 795]
drlTanhMakespan20 = [794, 795, 795, 799, 798, 797, 795, 795, 798, 795, 798, 795]
drlTanhMakespan21 = [796, 795, 795, 795, 795, 797, 798, 794, 797, 795, 796, 794]
drlTanhMakespan22 = [799, 796, 795, 795, 795, 795, 796, 795, 796, 798, 796, 795]
drlTanhMakespan23 = [799, 799, 795, 796, 796, 799, 796, 797, 794, 794, 798, 796]
drlTanhMakespan24 = [795, 795, 797, 800, 797, 795, 795, 796, 795, 795, 798, 799]
drlTanhMakespan25 = [795, 797, 795, 795, 795, 795, 800, 796, 795, 797, 795, 795]
drlTanhMakespan26 = [795, 795, 799, 794, 797, 794, 794, 798, 794, 796, 795, 798]
drlTanhMakespan27 = [796, 796, 795, 796, 798, 797, 794, 795, 794, 794, 794, 798]
drlTanhMakespan28 = [795, 795, 794, 798, 796, 796, 800, 797, 797, 796, 795, 794]
drlTanhMakespan29 = [795, 795, 798, 800, 797, 794, 796, 794, 792, 794, 794, 795]
drlTanhMakespan30 = [798, 797, 795, 799, 797, 800, 798, 799, 797, 800, 794, 796]
drlTanhMakespan31 = [794, 795, 800, 798, 800, 794, 800, 798, 799, 798, 798, 798]
drlTanhMakespan32 = [795, 795, 795, 794, 794, 794, 793, 795, 794, 793, 794, 795]
drlTanhMakespan33 = [794, 797, 792, 794, 795, 795, 797, 795, 795, 794, 792, 795]
drlTanhMakespan34 = [795, 794, 795, 798, 795, 796, 794, 795, 794, 794, 795, 794]
drlTanhMakespan35 = [796, 794, 797, 793, 794, 798, 795, 794, 793, 793, 795, 794]
drlTanhMakespan36 = [795, 795, 794, 795, 795, 795, 794, 795, 795, 793, 795, 794]
drlTanhMakespan37 = [794, 794, 798, 794, 794, 796, 795, 794, 793, 795, 795, 792]
drlTanhMakespan38 = [794, 796, 795, 794, 798, 798, 795, 795, 794, 794, 795, 794]
drlTanhMakespan39 = [794, 795, 795, 796, 792, 794, 795, 794, 795, 794, 794, 795]
drlTanhMakespan40 = [798, 795, 794, 795, 794, 794, 793, 795, 794, 794, 797, 794]
drlTanhMakespan41 = [795, 792, 795, 794, 794, 795, 794, 795, 792, 797, 795, 795]
drlTanhMakespan42 = [792, 794, 794, 795, 794, 794, 795, 794, 792, 794, 794, 794]
drlTanhMakespan43 = [794, 796, 794, 793, 795, 795, 793, 798, 794, 794, 798, 794]
drlTanhMakespan44 = [794, 794, 794, 794, 795, 794, 793, 794, 794, 795, 795, 794]
drlTanhMakespan45 = [790, 794, 793, 794, 793, 794, 795, 794, 791, 795, 795, 794]
drlTanhMakespan46 = [792, 794, 794, 794, 794, 794, 794, 793, 794, 794, 794, 794]
drlTanhMakespan47 = [794, 794, 794, 794, 794, 794, 794, 794, 792, 795, 793, 795]
drlTanhMakespan48 = [794, 794, 792, 792, 797, 794, 792, 794, 794, 795, 794, 795]
drlTanhMakespan49 = [795, 794, 794, 796, 794, 797, 794, 794, 794, 794, 794, 794]
drlTanhMakespan50 = [794, 792, 795, 794, 794, 794, 794, 794, 795, 794, 795, 794]
drlTanhMakespan51 = [794, 792, 796, 795, 794, 794, 795, 794, 795, 795, 795, 794]
drlTanhMakespan52 = [794, 794, 795, 792, 795, 795, 795, 792, 794, 793, 795, 794]
drlTanhMakespan53 = [794, 792, 794, 792, 794, 794, 794, 795, 795, 794, 794, 792]
drlTanhMakespan54 = [795, 793, 794, 794, 794, 792, 795, 794, 794, 792, 794, 796]
drlTanhMakespan55 = [795, 794, 794, 795, 795, 793, 794, 795, 794, 797, 795, 792]
drlTanhMakespan56 = [795, 795, 792, 795, 794, 795, 794, 794, 794, 795, 795, 795]
drlTanhMakespan57 = [795, 792, 795, 794, 795, 795, 792, 795, 794, 797, 792, 792]
drlTanhMakespan58 = [795, 795, 794, 795, 792, 794, 794, 794, 792, 792, 792, 793]
drlTanhMakespan59 = [795, 794, 792, 794, 794, 794, 792, 794, 794, 794, 793, 795]
drlTanhMakespan60 = [794, 795, 795, 795, 798, 794, 794, 794, 794, 794, 794, 792]
drlTanhMakespan61 = [792, 795, 794, 794, 795, 794, 792, 795, 795, 794, 794, 795]
drlTanhMakespan62 = [795, 794, 794, 794, 799, 794, 792, 794, 795, 795, 794, 793]
drlTanhMakespan63 = [791, 795, 792, 796, 794, 794, 792, 795, 793, 794, 792, 794]
drlTanhRewards0 = [-0.17508269018743108, -0.17508269018743108, -0.177078750549934, -0.1759911894273128, -0.17798286090969018, -0.17617264919621228, -0.17508269018743108, -0.17798286090969018, -0.1768976897689769, -0.17725973169122497, -0.17834394904458598, -0.1776214552648934]
drlTanhRewards1 = [-0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228, -0.1759911894273128, -0.17617264919621228, -0.17526455026455026, -0.17562802996914942, -0.17544633017412387, -0.17562802996914942, -0.1759911894273128]
drlTanhRewards2 = [-0.17617264919621228, -0.17562802996914942, -0.17580964970257765, -0.1763540290620872, -0.1759911894273128, -0.17617264919621228, -0.17544633017412387, -0.17526455026455026, -0.17562802996914942, -0.17508269018743108, -0.17544633017412387, -0.17580964970257765]
drlTanhRewards3 = [-0.17617264919621228, -0.1759911894273128, -0.17526455026455026, -0.17544633017412387, -0.1759911894273128, -0.17580964970257765, -0.17526455026455026, -0.1759911894273128, -0.17526455026455026, -0.1759911894273128, -0.17580964970257765, -0.17544633017412387]
drlTanhRewards4 = [-0.1778021978021978, -0.17526455026455026, -0.17526455026455026, -0.17617264919621228, -0.17562802996914942, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765]
drlTanhRewards5 = [-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.1759911894273128, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17617264919621228, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.177078750549934]
drlTanhRewards6 = [-0.17508269018743108, -0.17617264919621228, -0.17526455026455026, -0.1749007498897221, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.1759911894273128, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17544633017412387]
drlTanhRewards7 = [-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17562802996914942, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17562802996914942]
drlTanhRewards8 = [-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17617264919621228, -0.17508269018743108, -0.17617264919621228, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026]
drlTanhRewards9 = [-0.1749007498897221, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17617264919621228, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.1749007498897221, -0.17580964970257765, -0.17526455026455026]
drlTanhRewards10 = [-0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drlTanhRewards11 = [-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026]
drlTanhRewards12 = [-0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.1759911894273128, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026]
drlTanhRewards13 = [-0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17617264919621228, -0.17544633017412387, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108]
drlTanhRewards14 = [-0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.1749007498897221]
drlTanhRewards15 = [-0.1759911894273128, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17562802996914942, -0.1763540290620872, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.1759911894273128]
drlTanhRewards16 = [-0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17544633017412387]
drlTanhRewards17 = [-0.17617264919621228, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108]
drlTanhRewards18 = [-0.17562802996914942, -0.17617264919621228, -0.17580964970257765, -0.17562802996914942, -0.17544633017412387, -0.1759911894273128, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17544633017412387, -0.1759911894273128, -0.17580964970257765]
drlTanhRewards19 = [-0.17562802996914942, -0.17617264919621228, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17562802996914942, -0.17526455026455026]
drlTanhRewards20 = [-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.1759911894273128, -0.17580964970257765, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026]
drlTanhRewards21 = [-0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17562802996914942, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108]
drlTanhRewards22 = [-0.1759911894273128, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026]
drlTanhRewards23 = [-0.1759911894273128, -0.1759911894273128, -0.17544633017412387, -0.17544633017412387, -0.17526455026455026, -0.1759911894273128, -0.17544633017412387, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17544633017412387]
drlTanhRewards24 = [-0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17617264919621228, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765, -0.1759911894273128]
drlTanhRewards25 = [-0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026]
drlTanhRewards26 = [-0.17526455026455026, -0.17526455026455026, -0.1759911894273128, -0.17508269018743108, -0.17562802996914942, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765]
drlTanhRewards27 = [-0.17544633017412387, -0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17562802996914942, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108]
drlTanhRewards28 = [-0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17544633017412387, -0.17562802996914942, -0.17544633017412387, -0.17617264919621228, -0.17562802996914942, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards29 = [-0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228, -0.17562802996914942, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026]
drlTanhRewards30 = [-0.17580964970257765, -0.17562802996914942, -0.17526455026455026, -0.1759911894273128, -0.17562802996914942, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.17562802996914942, -0.17617264919621228, -0.17508269018743108, -0.17544633017412387]
drlTanhRewards31 = [-0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17508269018743108, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765]
drlTanhRewards32 = [-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026]
drlTanhRewards33 = [-0.17508269018743108, -0.17562802996914942, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026]
drlTanhRewards34 = [-0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards35 = [-0.17544633017412387, -0.17508269018743108, -0.17562802996914942, -0.1749007498897221, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards36 = [-0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards37 = [-0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224]
drlTanhRewards38 = [-0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards39 = [-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026]
drlTanhRewards40 = [-0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942, -0.17508269018743108]
drlTanhRewards41 = [-0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026]
drlTanhRewards42 = [-0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drlTanhRewards43 = [-0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108]
drlTanhRewards44 = [-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards45 = [-0.1749007498897221, -0.17435444714191128, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17453662842012357, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards46 = [-0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drlTanhRewards47 = [-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026]
drlTanhRewards48 = [-0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17471872931833224, -0.17508269018743108, -0.17562802996914942, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026]
drlTanhRewards49 = [-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drlTanhRewards50 = [-0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108]
drlTanhRewards51 = [-0.17508269018743108, -0.17471872931833224, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards52 = [-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards53 = [-0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224]
drlTanhRewards54 = [-0.17526455026455026, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17544633017412387]
drlTanhRewards55 = [-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17471872931833224]
drlTanhRewards56 = [-0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026]
drlTanhRewards57 = [-0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17562802996914942, -0.17471872931833224, -0.17471872931833224]
drlTanhRewards58 = [-0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17471872931833224, -0.17471872931833224, -0.1749007498897221]
drlTanhRewards59 = [-0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026]
drlTanhRewards60 = [-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224]
drlTanhRewards61 = [-0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026]
drlTanhRewards62 = [-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1759911894273128, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221]
drlTanhRewards63 = [-0.17453662842012357, -0.17471872931833224, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.1749007498897221, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108]
# Deep Reinforcement Learning: 5 capas Dense, Funcion de activacion relu, 12 episodios, 50 iteraciones
drlReluMakespan0 = [796, 798, 809, 798, 796, 800, 798, 799, 800, 794, 800, 798]
drlReluMakespan1 = [800, 800, 801, 806, 804, 806, 808, 798, 796, 796, 798, 800]
drlReluMakespan2 = [805, 805, 798, 800, 800, 798, 801, 799, 800, 806, 800, 800]
drlReluMakespan3 = [798, 799, 798, 795, 798, 808, 803, 800, 798, 795, 799, 800]
drlReluMakespan4 = [805, 805, 799, 796, 798, 803, 799, 800, 800, 800, 795, 794]
drlReluMakespan5 = [799, 796, 795, 800, 801, 796, 800, 795, 803, 800, 800, 805]
drlReluMakespan6 = [799, 795, 798, 794, 805, 796, 795, 799, 798, 795, 804, 796]
drlReluMakespan7 = [795, 798, 799, 798, 798, 799, 795, 794, 796, 794, 795, 805]
drlReluMakespan8 = [805, 794, 794, 795, 798, 795, 798, 795, 799, 800, 796, 798]
drlReluMakespan9 = [797, 797, 797, 794, 795, 794, 794, 797, 796, 795, 801, 799]
drlReluMakespan10 = [799, 794, 797, 795, 794, 794, 795, 795, 795, 796, 797, 799]
drlReluMakespan11 = [796, 798, 800, 795, 805, 794, 798, 796, 795, 794, 798, 795]
drlReluMakespan12 = [800, 795, 794, 798, 800, 805, 800, 798, 804, 799, 794, 803]
drlReluMakespan13 = [796, 799, 798, 794, 800, 794, 795, 796, 798, 795, 794, 799]
drlReluMakespan14 = [795, 798, 798, 798, 805, 798, 798, 798, 795, 794, 800, 796]
drlReluMakespan15 = [795, 798, 795, 805, 798, 794, 795, 798, 796, 794, 795, 796]
drlReluMakespan16 = [798, 795, 796, 799, 796, 798, 798, 795, 795, 795, 795, 799]
drlReluMakespan17 = [794, 798, 796, 798, 795, 801, 794, 798, 797, 795, 796, 801]
drlReluMakespan18 = [798, 795, 798, 798, 801, 798, 795, 795, 797, 800, 794, 800]
drlReluMakespan19 = [795, 798, 794, 800, 796, 795, 798, 797, 795, 794, 796, 796]
drlReluMakespan20 = [794, 794, 795, 795, 795, 795, 796, 798, 799, 799, 799, 795]
drlReluMakespan21 = [802, 796, 794, 797, 797, 800, 794, 794, 804, 803, 798, 797]
drlReluMakespan22 = [794, 795, 795, 795, 798, 795, 794, 799, 794, 803, 795, 794]
drlReluMakespan23 = [794, 798, 799, 794, 795, 795, 799, 795, 796, 795, 797, 799]
drlReluMakespan24 = [795, 794, 797, 800, 794, 795, 795, 795, 795, 800, 800, 798]
drlReluMakespan25 = [795, 794, 797, 796, 798, 795, 795, 794, 799, 795, 794, 798]
drlReluMakespan26 = [801, 795, 800, 794, 794, 796, 800, 798, 798, 799, 794, 796]
drlReluMakespan27 = [796, 795, 796, 795, 796, 795, 795, 800, 794, 794, 794, 796]
drlReluMakespan28 = [794, 794, 795, 796, 794, 795, 795, 797, 794, 794, 796, 795]
drlReluMakespan29 = [793, 794, 795, 800, 795, 795, 794, 798, 798, 796, 795, 794]
drlReluMakespan30 = [802, 794, 794, 798, 794, 796, 805, 794, 800, 794, 796, 794]
drlReluMakespan31 = [797, 794, 794, 794, 800, 800, 794, 794, 798, 795, 794, 798]
drlReluMakespan32 = [794, 798, 794, 795, 794, 795, 798, 794, 794, 795, 794, 798]
drlReluMakespan33 = [798, 794, 798, 795, 794, 793, 797, 798, 794, 794, 801, 793]
drlReluMakespan34 = [794, 798, 794, 795, 794, 793, 798, 795, 794, 800, 794, 795]
drlReluMakespan35 = [794, 796, 794, 796, 806, 795, 795, 795, 796, 795, 795, 799]
drlReluMakespan36 = [795, 794, 794, 796, 796, 798, 794, 796, 794, 795, 794, 795]
drlReluMakespan37 = [795, 794, 795, 798, 794, 794, 794, 794, 794, 794, 795, 797]
drlReluMakespan38 = [794, 798, 794, 798, 797, 794, 794, 795, 795, 794, 795, 795]
drlReluMakespan39 = [797, 794, 795, 796, 796, 796, 798, 794, 794, 795, 794, 798]
drlReluMakespan40 = [798, 795, 795, 798, 792, 795, 795, 794, 795, 794, 798, 794]
drlReluMakespan41 = [795, 794, 794, 794, 794, 794, 798, 793, 794, 794, 794, 793]
drlReluMakespan42 = [794, 794, 794, 794, 799, 794, 795, 794, 796, 794, 794, 794]
drlReluMakespan43 = [794, 797, 795, 794, 795, 794, 794, 795, 794, 794, 793, 794]
drlReluMakespan44 = [794, 792, 793, 794, 794, 796, 794, 798, 795, 794, 794, 796]
drlReluMakespan45 = [795, 794, 799, 794, 794, 793, 794, 795, 795, 793, 796, 794]
drlReluMakespan46 = [794, 796, 794, 794, 794, 794, 794, 793, 799, 792, 794, 794]
drlReluMakespan47 = [795, 794, 793, 794, 796, 797, 794, 794, 795, 794, 794, 794]
drlReluMakespan48 = [794, 794, 794, 792, 794, 794, 795, 794, 794, 794, 794, 794]
drlReluMakespan49 = [794, 794, 795, 792, 797, 797, 794, 794, 792, 800, 795, 795]
drlReluRewards0 = [-0.17544633017412387, -0.17580964970257765, -0.1778021978021978, -0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.17508269018743108, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765]
drlReluRewards1 = [-0.17617264919621228, -0.17617264919621228, -0.1763540290620872, -0.17725973169122497, -0.1768976897689769, -0.17725973169122497, -0.1776214552648934, -0.17580964970257765, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17617264919621228]
drlReluRewards2 = [-0.177078750549934, -0.177078750549934, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.1763540290620872, -0.1759911894273128, -0.17617264919621228, -0.17725973169122497, -0.17617264919621228, -0.17617264919621228]
drlReluRewards3 = [-0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.1776214552648934, -0.17671654929577466, -0.17617264919621228, -0.17580964970257765, -0.17526455026455026, -0.1759911894273128, -0.17617264919621228]
drlReluRewards4 = [-0.177078750549934, -0.177078750549934, -0.1759911894273128, -0.17544633017412387, -0.17580964970257765, -0.17671654929577466, -0.1759911894273128, -0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.17526455026455026, -0.17508269018743108]
drlReluRewards5 = [-0.1759911894273128, -0.17544633017412387, -0.17526455026455026, -0.17617264919621228, -0.1763540290620872, -0.17544633017412387, -0.17526455026455026, -0.17617264919621228, -0.17671654929577466, -0.17617264919621228, -0.17617264919621228, -0.177078750549934]
drlReluRewards6 = [-0.1759911894273128, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108, -0.177078750549934, -0.17544633017412387, -0.17526455026455026, -0.1759911894273128, -0.17580964970257765, -0.17526455026455026, -0.1768976897689769, -0.17544633017412387]
drlReluRewards7 = [-0.17526455026455026, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.177078750549934]
drlReluRewards8 = [-0.177078750549934, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.1759911894273128, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765]
drlReluRewards9 = [-0.17562802996914942, -0.17562802996914942, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942, -0.17544633017412387, -0.17526455026455026, -0.1763540290620872, -0.1759911894273128]
drlReluRewards10 = [-0.1759911894273128, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17562802996914942, -0.1759911894273128]
drlReluRewards11 = [-0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.17526455026455026, -0.177078750549934, -0.17508269018743108, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026]
drlReluRewards12 = [-0.17617264919621228, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.17617264919621228, -0.17580964970257765, -0.1768976897689769, -0.1759911894273128, -0.17508269018743108, -0.17671654929577466]
drlReluRewards13 = [-0.17544633017412387, -0.1759911894273128, -0.17580964970257765, -0.17508269018743108, -0.17617264919621228, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.1759911894273128]
drlReluRewards14 = [-0.17526455026455026, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17617264919621228, -0.17544633017412387]
drlReluRewards15 = [-0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.177078750549934, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17544633017412387, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387]
drlReluRewards16 = [-0.17580964970257765, -0.17526455026455026, -0.17544633017412387, -0.1759911894273128, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.1759911894273128]
drlReluRewards17 = [-0.17508269018743108, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.1763540290620872, -0.17508269018743108, -0.17580964970257765, -0.17562802996914942, -0.17526455026455026, -0.17544633017412387, -0.1763540290620872]
drlReluRewards18 = [-0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17580964970257765, -0.1763540290620872, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17617264919621228, -0.17508269018743108, -0.17617264919621228]
drlReluRewards19 = [-0.17526455026455026, -0.17580964970257765, -0.17508269018743108, -0.17617264919621228, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17544633017412387]
drlReluRewards20 = [-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.1759911894273128, -0.1759911894273128, -0.1759911894273128, -0.17526455026455026]
drlReluRewards21 = [-0.17653532907770195, -0.17544633017412387, -0.17562802996914942, -0.17508269018743108, -0.17562802996914942, -0.17617264919621228, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.1768976897689769, -0.17671654929577466, -0.17562802996914942]
drlReluRewards22 = [-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.1759911894273128, -0.17508269018743108, -0.17671654929577466, -0.17526455026455026, -0.17508269018743108]
drlReluRewards23 = [-0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.1759911894273128, -0.17508269018743108, -0.17526455026455026, -0.1759911894273128, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17562802996914942, -0.1759911894273128]
drlReluRewards24 = [-0.17526455026455026, -0.17508269018743108, -0.17562802996914942, -0.17617264919621228, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765]
drlReluRewards25 = [-0.17526455026455026, -0.17508269018743108, -0.17562802996914942, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.1759911894273128, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765]
drlReluRewards26 = [-0.1763540290620872, -0.17526455026455026, -0.17617264919621228, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17508269018743108, -0.17544633017412387]
drlReluRewards27 = [-0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17617264919621228, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387]
drlReluRewards28 = [-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026]
drlReluRewards29 = [-0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.17617264919621228, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108]
drlReluRewards30 = [-0.17653532907770195, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17544633017412387, -0.177078750549934, -0.17508269018743108, -0.17617264919621228, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108]
drlReluRewards31 = [-0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17617264919621228, -0.17617264919621228, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108, -0.17580964970257765]
drlReluRewards32 = [-0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765]
drlReluRewards33 = [-0.17580964970257765, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.17562802996914942, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.1763540290620872, -0.1749007498897221]
drlReluRewards34 = [-0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17617264919621228, -0.17508269018743108, -0.17526455026455026]
drlReluRewards35 = [-0.17508269018743108, -0.17544633017412387, -0.17725973169122497, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.1759911894273128]
drlReluRewards36 = [-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026]
drlReluRewards37 = [-0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17562802996914942]
drlReluRewards38 = [-0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17580964970257765, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drlReluRewards39 = [-0.17562802996914942, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765]
drlReluRewards40 = [-0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17471872931833224, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108]
drlReluRewards41 = [-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221]
drlReluRewards42 = [-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1759911894273128, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drlReluRewards43 = [-0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108]
drlReluRewards44 = [-0.17508269018743108, -0.17471872931833224, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387]
drlReluRewards45 = [-0.17526455026455026, -0.17508269018743108, -0.1759911894273128, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.1749007498897221, -0.17544633017412387, -0.17508269018743108]
drlReluRewards46 = [-0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.1759911894273128, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108]
drlReluRewards47 = [-0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17544633017412387, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drlReluRewards48 = [-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drlReluRewards49 = [-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17562802996914942, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17617264919621228, -0.17526455026455026, -0.17526455026455026]
if __name__ == "__main__":
##############################################
##############################################
##############################################
# Deep Recurrent Reinforcement Learning with 1 GRU layer and 4 Dense layers
drnnGRUtanhMakespan = []
drnnGRUtanhRewards = []
drnnGRUtanhMakespanList = []
drnnGRUtanhRewardsList = []
drnnGRUtanhMakespanValues = []
drnnGRUtanhRewardsValues = []
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan0))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan1))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan2))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan3))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan4))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan5))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan6))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan7))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan8))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan9))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan10))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan11))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan12))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan13))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan14))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan15))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan16))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan17))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan18))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan19))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan20))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan21))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan22))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan23))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan24))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan25))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan26))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan27))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan28))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan29))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan30))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan31))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan32))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan33))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan34))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan35))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan36))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan37))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan38))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan39))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan40))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan41))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan42))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan43))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan44))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan45))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan46))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan47))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan48))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan49))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards0))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards1))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards2))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards3))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards4))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards5))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards6))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards7))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards8))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards9))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards10))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards11))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards12))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards13))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards14))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards15))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards16))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards17))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards18))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards19))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards20))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards21))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards22))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards23))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards24))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards25))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards26))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards27))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards28))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards29))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards30))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards31))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards32))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards33))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards34))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards35))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards36))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards37))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards38))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards39))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards40))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards41))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards42))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards43))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards44))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards45))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards46))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards47))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards48))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards49))
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan0)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan1)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan2)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan3)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan4)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan5)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan6)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan7)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan8)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan9)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan10)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan11)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan12)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan13)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan14)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan15)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan16)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan17)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan18)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan19)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan20)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan21)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan22)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan23)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan24)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan25)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan26)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan27)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan28)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan29)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan30)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan31)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan32)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan33)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan34)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan35)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan36)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan37)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan38)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan39)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan40)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan41)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan42)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan43)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan44)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan45)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan46)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan47)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan48)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan49)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards0)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards1)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards2)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards3)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards4)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards5)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards6)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards7)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards8)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards9)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards10)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards11)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards12)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards13)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards14)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards15)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards16)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards17)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards18)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards19)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards20)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards21)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards22)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards23)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards24)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards25)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards26)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards27)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards28)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards29)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards30)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards31)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards32)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards33)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards34)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards35)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards36)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards37)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards38)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards39)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards40)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards41)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards42)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards43)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards44)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards45)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards46)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards47)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards48)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards49)
drnnGRUreluMakespan = []
drnnGRUreluRewards = []
drnnGRUreluMakespanList = []
drnnGRUreluRewardsList = []
drnnGRUreluMakespanValues = []
drnnGRUreluRewardsValues = []
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan0))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan1))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan2))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan3))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan4))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan5))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan6))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan7))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan8))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan9))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan10))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan11))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan12))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan13))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan14))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan15))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan16))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan17))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan18))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan19))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan20))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan21))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan22))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan23))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan24))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan25))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan26))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan27))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan28))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan29))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan30))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan31))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan32))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan33))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan34))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan35))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan36))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan37))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan38))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan39))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan40))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan41))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan42))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan43))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan44))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan45))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan46))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan47))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan48))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan49))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards0))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards1))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards2))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards3))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards4))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards5))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards6))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards7))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards8))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards9))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards10))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards11))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards12))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards13))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards14))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards15))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards16))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards17))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards18))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards19))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards20))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards21))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards22))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards23))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards24))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards25))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards26))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards27))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards28))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards29))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards30))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards31))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards32))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards33))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards34))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards35))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards36))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards37))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards38))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards39))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards40))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards41))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards42))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards43))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards44))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards45))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards46))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards47))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards48))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards49))
drnnGRUreluMakespanList.append(drnnGRUreluMakespan0)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan1)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan2)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan3)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan4)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan5)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan6)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan7)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan8)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan9)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan10)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan11)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan12)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan13)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan14)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan15)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan16)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan17)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan18)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan19)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan20)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan21)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan22)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan23)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan24)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan25)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan26)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan27)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan28)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan29)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan30)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan31)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan32)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan33)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan34)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan35)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan36)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan37)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan38)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan39)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan40)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan41)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan42)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan43)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan44)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan45)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan46)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan47)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan48)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan49)
drnnGRUreluRewardsList.append(drnnGRUreluRewards0)
drnnGRUreluRewardsList.append(drnnGRUreluRewards1)
drnnGRUreluRewardsList.append(drnnGRUreluRewards2)
drnnGRUreluRewardsList.append(drnnGRUreluRewards3)
drnnGRUreluRewardsList.append(drnnGRUreluRewards4)
drnnGRUreluRewardsList.append(drnnGRUreluRewards5)
drnnGRUreluRewardsList.append(drnnGRUreluRewards6)
drnnGRUreluRewardsList.append(drnnGRUreluRewards7)
drnnGRUreluRewardsList.append(drnnGRUreluRewards8)
drnnGRUreluRewardsList.append(drnnGRUreluRewards9)
drnnGRUreluRewardsList.append(drnnGRUreluRewards10)
drnnGRUreluRewardsList.append(drnnGRUreluRewards11)
drnnGRUreluRewardsList.append(drnnGRUreluRewards12)
drnnGRUreluRewardsList.append(drnnGRUreluRewards13)
drnnGRUreluRewardsList.append(drnnGRUreluRewards14)
drnnGRUreluRewardsList.append(drnnGRUreluRewards15)
drnnGRUreluRewardsList.append(drnnGRUreluRewards16)
drnnGRUreluRewardsList.append(drnnGRUreluRewards17)
drnnGRUreluRewardsList.append(drnnGRUreluRewards18)
drnnGRUreluRewardsList.append(drnnGRUreluRewards19)
drnnGRUreluRewardsList.append(drnnGRUreluRewards20)
drnnGRUreluRewardsList.append(drnnGRUreluRewards21)
drnnGRUreluRewardsList.append(drnnGRUreluRewards22)
drnnGRUreluRewardsList.append(drnnGRUreluRewards23)
drnnGRUreluRewardsList.append(drnnGRUreluRewards24)
drnnGRUreluRewardsList.append(drnnGRUreluRewards25)
drnnGRUreluRewardsList.append(drnnGRUreluRewards26)
drnnGRUreluRewardsList.append(drnnGRUreluRewards27)
drnnGRUreluRewardsList.append(drnnGRUreluRewards28)
drnnGRUreluRewardsList.append(drnnGRUreluRewards29)
drnnGRUreluRewardsList.append(drnnGRUreluRewards30)
drnnGRUreluRewardsList.append(drnnGRUreluRewards31)
drnnGRUreluRewardsList.append(drnnGRUreluRewards32)
drnnGRUreluRewardsList.append(drnnGRUreluRewards33)
drnnGRUreluRewardsList.append(drnnGRUreluRewards34)
drnnGRUreluRewardsList.append(drnnGRUreluRewards35)
drnnGRUreluRewardsList.append(drnnGRUreluRewards36)
drnnGRUreluRewardsList.append(drnnGRUreluRewards37)
drnnGRUreluRewardsList.append(drnnGRUreluRewards38)
drnnGRUreluRewardsList.append(drnnGRUreluRewards39)
drnnGRUreluRewardsList.append(drnnGRUreluRewards40)
drnnGRUreluRewardsList.append(drnnGRUreluRewards41)
drnnGRUreluRewardsList.append(drnnGRUreluRewards42)
drnnGRUreluRewardsList.append(drnnGRUreluRewards43)
drnnGRUreluRewardsList.append(drnnGRUreluRewards44)
drnnGRUreluRewardsList.append(drnnGRUreluRewards45)
drnnGRUreluRewardsList.append(drnnGRUreluRewards46)
drnnGRUreluRewardsList.append(drnnGRUreluRewards47)
drnnGRUreluRewardsList.append(drnnGRUreluRewards48)
drnnGRUreluRewardsList.append(drnnGRUreluRewards49)
for vector in drnnGRUtanhMakespanList:
for element in vector:
drnnGRUtanhMakespanValues.append(element)
for vector in drnnGRUtanhRewardsList:
for element in vector:
drnnGRUtanhRewardsValues.append(element)
##################
for vector in drnnGRUreluMakespanList:
for element in vector:
drnnGRUreluMakespanValues.append(element)
for vector in drnnGRUreluRewardsList:
for element in vector:
drnnGRUreluRewardsValues.append(element)
#####################
smoothGRUtanhMakespanValues = pd.Series(drnnGRUtanhMakespanValues).rolling(12).mean()
plt.plot(smoothGRUtanhMakespanValues)
plt.xlabel("Episodios")
plt.ylabel("Segundos")
plt.title("'Makespan' con red neuronal profunda que incluye 1 capa GRU")
plt.show()
smoothGRUtanhRewardsValues = pd.Series(drnnGRUtanhRewardsValues).rolling(12).mean()
plt.plot(smoothGRUtanhRewardsValues)
plt.xlabel("Episodios")
plt.ylabel("Premio")
plt.title("'Reward' con red neuronal profunda que incluye 1 capa GRU")
plt.show()
#####################
smoothGRUreluMakespanValues = pd.Series(drnnGRUreluMakespanValues).rolling(12).mean()
plt.plot(smoothGRUreluMakespanValues)
plt.xlabel("Episodios")
plt.ylabel("Segundos")
plt.title("'Makespan' con red neuronal profunda que incluye 1 capa GRU y ReLU")
plt.show()
smoothGRUreluRewardsValues = pd.Series(drnnGRUreluRewardsValues).rolling(12).mean()
plt.plot(smoothGRUreluRewardsValues)
plt.xlabel("Episodios")
plt.ylabel("Premio")
plt.title("'Reward' con red neuronal profunda que incluye 1 capa GRU y ReLU")
plt.show()
###################
plt.plot(smoothGRUtanhMakespanValues, color='blue', label='tanh')
plt.plot(smoothGRUreluMakespanValues, color='orange', label='relu')
plt.xlabel("Episodios")
plt.ylabel("Segundos")
plt.title("'Makespan' con red neuronal profunda que incluye 1 capa GRU")
plt.legend()
plt.show()
###################
plt.plot(smoothGRUtanhRewardsValues, color='blue', label='tanh')
plt.plot(smoothGRUreluRewardsValues, color='orange', label='relu')
plt.xlabel("Episodios")
plt.ylabel("Premio")
plt.title("'Reward' con red neuronal profunda que incluye 1 capa GRU")
plt.legend()
plt.show()
###################
drnnLSTMtanhMakespan = []
drnnLSTMtanhRewards = []
drnnLSTMtanhMakespanList = []
drnnLSTMtanhRewardsList = []
drnnLSTMtanhMakespanValues = []
drnnLSTMtanhRewardsValues = []
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan0))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan1))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan2))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan3))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan4))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan5))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan6))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan7))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan8))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan9))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan10))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan11))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan12))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan13))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan14))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan15))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan16))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan17))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan18))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan19))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan20))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan21))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan22))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan23))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan24))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan25))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan26))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan27))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan28))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan29))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan30))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan31))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan32))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan33))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan34))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan35))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan36))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan37))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan38))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan39))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan40))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan41))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan42))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan43))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan44))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan45))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan46))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan47))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan48))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan49))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards0))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards1))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards2))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards3))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards4))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards5))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards6))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards7))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards8))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards9))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards10))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards11))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards12))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards13))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards14))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards15))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards16))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards17))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards18))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards19))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards20))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards21))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards22))
drnnLSTMtanhRewards.append( | np.mean(drnnLSTMtanhRewards23) | numpy.mean |
import numpy as np
import cv2 as cv2
import glob as glob
import os
from math import ceil
from datetime import datetime
from skimage.feature import hog
import joblib
from sklearn.svm import LinearSVC
from sklearn.model_selection import train_test_split
def get_resize_info(input_path):
# loop through every training image to find the median shape
name_list = glob.glob(input_path, recursive=True)
shape_matrix = np.zeros((len(name_list), 2))
for i in range(len(name_list)):
img = cv2.imread(name_list[i], 0)
shape_matrix[i, :] = img.shape
median = np.ceil( | np.median(shape_matrix, axis=0) | numpy.median |
import pytest
import numpy as np
import numpy.testing as npt
import pandas as pd
import pandas.testing as pdt
import networkx as nx
from mossspider import NetworkTMLE
@pytest.fixture
def sm_network():
"""Loads a small network for short test runs and checks of data set creations"""
G = nx.Graph()
G.add_nodes_from([(1, {'W': 1, 'A': 1, 'Y': 1, 'C': 1}),
(2, {'W': 0, 'A': 0, 'Y': 0, 'C': -1}),
(3, {'W': 0, 'A': 1, 'Y': 0, 'C': 5}),
(4, {'W': 0, 'A': 0, 'Y': 1, 'C': 0}),
(5, {'W': 1, 'A': 0, 'Y': 0, 'C': 0}),
(6, {'W': 1, 'A': 0, 'Y': 1, 'C': 0}),
(7, {'W': 0, 'A': 1, 'Y': 0, 'C': 10}),
(8, {'W': 0, 'A': 0, 'Y': 0, 'C': -5}),
(9, {'W': 1, 'A': 1, 'Y': 0, 'C': -5})])
G.add_edges_from([(1, 2), (1, 3), (1, 9),
(2, 3), (2, 6),
(3, 4),
(4, 7),
(5, 7), (5, 9)
])
return G
@pytest.fixture
def r_network():
"""Loads network from the R library tmlenet for comparison"""
df = pd.read_csv("tests/tmlenet_r_data.csv")
df['IDs'] = df['IDs'].str[1:].astype(int)
df['NETID_split'] = df['Net_str'].str.split()
G = nx.DiGraph()
G.add_nodes_from(df['IDs'])
for i, c in zip(df['IDs'], df['NETID_split']):
if type(c) is list:
for j in c:
G.add_edge(i, int(j[1:]))
# Adding attributes
for node in G.nodes():
G.nodes[node]['W'] = np.int(df.loc[df['IDs'] == node, 'W1'])
G.nodes[node]['A'] = np.int(df.loc[df['IDs'] == node, 'A'])
G.nodes[node]['Y'] = np.int(df.loc[df['IDs'] == node, 'Y'])
return G
class TestNetworkTMLE:
def test_error_node_ids(self):
G = nx.Graph()
G.add_nodes_from([(1, {'A': 1, 'Y': 1}), (2, {'A': 0, 'Y': 1}), ("N", {'A': 1, 'Y': 0}), (4, {'A': 0, 'Y': 0})])
with pytest.raises(ValueError):
NetworkTMLE(network=G, exposure='A', outcome='Y')
def test_error_self_loops(self):
G = nx.Graph()
G.add_nodes_from([(1, {'A': 1, 'Y': 1}), (2, {'A': 0, 'Y': 1}), (3, {'A': 1, 'Y': 0}), (4, {'A': 0, 'Y': 0})])
G.add_edges_from([(1, 1), (1, 2), (3, 4)])
with pytest.raises(ValueError):
NetworkTMLE(network=G, exposure='A', outcome='Y')
def test_error_nonbinary_a(self):
G = nx.Graph()
G.add_nodes_from([(1, {'A': 2, 'Y': 1}), (2, {'A': 5, 'Y': 1}), (3, {'A': 1, 'Y': 0}), (4, {'A': 0, 'Y': 0})])
with pytest.raises(ValueError):
NetworkTMLE(network=G, exposure='A', outcome='Y')
def test_error_degree_restrictions(self, r_network):
with pytest.raises(ValueError):
NetworkTMLE(network=r_network, exposure='A', outcome='Y', degree_restrict=2)
with pytest.raises(ValueError):
NetworkTMLE(network=r_network, exposure='A', outcome='Y', degree_restrict=[0, 1, 2])
with pytest.raises(ValueError):
NetworkTMLE(network=r_network, exposure='A', outcome='Y', degree_restrict=[2, 0])
def test_error_fit_gimodel(self, r_network):
tmle = NetworkTMLE(network=r_network, exposure='A', outcome='Y')
# tmle.exposure_model('W')
tmle.exposure_map_model('W', distribution=None)
tmle.outcome_model('A + W')
with pytest.raises(ValueError):
tmle.fit(p=0.0, samples=10)
def test_error_fit_gsmodel(self, r_network):
tmle = NetworkTMLE(network=r_network, exposure='A', outcome='Y')
tmle.exposure_model('W')
# tmle.exposure_map_model('W', distribution=None)
tmle.outcome_model('A + W')
with pytest.raises(ValueError):
tmle.fit(p=0.0, samples=10)
def test_error_gs_distributions(self, r_network):
tmle = NetworkTMLE(network=r_network, exposure='A', outcome='Y')
with pytest.raises(ValueError):
tmle.exposure_map_model('W', measure='mean', distribution=None)
with pytest.raises(ValueError):
tmle.exposure_map_model('W', measure='mean', distribution='multinomial')
def test_error_fit_qmodel(self, r_network):
tmle = NetworkTMLE(network=r_network, exposure='A', outcome='Y')
tmle.exposure_model('W')
tmle.exposure_map_model('W', distribution=None)
# tmle.outcome_model('A + W')
with pytest.raises(ValueError):
tmle.fit(p=0.0, samples=10)
def test_error_p_bound(self, r_network):
tmle = NetworkTMLE(network=r_network, exposure='A', outcome='Y')
tmle.exposure_model('W')
tmle.exposure_map_model('W', distribution=None)
tmle.outcome_model('A + W')
# For single 'p'
with pytest.raises(ValueError):
tmle.fit(p=1.5, samples=10)
# For multiple 'p'
with pytest.raises(ValueError):
tmle.fit(p=[0.1, 1.5, 0.1,
0.1, 0.1, 0.1,
0.1, 0.1, 0.1], samples=100)
def test_error_p_type(self, r_network):
tmle = NetworkTMLE(network=r_network, exposure='A', outcome='Y')
tmle.exposure_model('W')
tmle.exposure_map_model('W', distribution=None)
tmle.outcome_model('A + W')
with pytest.raises(ValueError):
tmle.fit(p=5, samples=10)
def test_error_summary(self, r_network):
tmle = NetworkTMLE(network=r_network, exposure='A', outcome='Y')
tmle.exposure_model('W')
tmle.exposure_map_model('W', distribution=None)
tmle.outcome_model('A + W')
with pytest.raises(ValueError):
tmle.summary()
def test_df_creation(self, sm_network):
columns = ["_original_id_", "W", "A", "Y", "A_sum", "A_mean", "W_sum", "W_mean", "degree"]
expected = pd.DataFrame([[1, 1, 1, 1, 2, 2/3, 1, 1/3, 3],
[2, 0, 0, 0, 2, 2/3, 2, 2/3, 3],
[3, 0, 1, 0, 1, 1/3, 1, 1/3, 3],
[4, 0, 0, 1, 2, 1, 0, 0, 2],
[5, 1, 0, 0, 2, 1, 1, 1/2, 2],
[6, 1, 0, 1, 0, 0, 0, 0, 1],
[7, 0, 1, 0, 0, 0, 1, 1/2, 2],
[8, 0, 0, 0, 0, 0, 0, 0, 0],
[9, 1, 1, 0, 1, 1/2, 2, 1, 2]],
columns=columns,
index=[0, 1, 2, 3, 4, 5, 6, 7, 8])
tmle = NetworkTMLE(network=sm_network, exposure='A', outcome='Y')
created = tmle.df
# Checking that expected is the same as the created
assert tmle._continuous_outcome is False
pdt.assert_frame_equal(expected,
created[columns],
check_dtype=False)
def test_df_creation_restricted(self, sm_network):
expected = pd.DataFrame([[1, 1, 1, 2, 2/3, 1, 1/3, 3],
[0, 0, 0, 2, 2/3, 2, 2/3, 3],
[0, 1, 0, 1, 1/3, 1, 1/3, 3],
[0, 0, 1, 2, 1, 0, 0, 2],
[1, 0, 0, 2, 1, 1, 1/2, 2],
[1, 0, 1, 0, 0, 0, 0, 1],
[0, 1, 0, 0, 0, 1, 1/2, 2],
[0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 1, 1/2, 2, 1, 2]],
columns=["W", "A", "Y", "A_sum", "A_mean", "W_sum", "W_mean", "degree"],
index=[0, 1, 2, 3, 4, 5, 6, 7, 8])
expected_r = pd.DataFrame([[0, 0, 1, 2, 1, 0, 0, 2],
[1, 0, 0, 2, 1, 1, 1/2, 2],
[1, 0, 1, 0, 0, 0, 0, 1],
[0, 1, 0, 0, 0, 1, 1/2, 2],
[0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 1, 1/2, 2, 1, 2]],
columns=["W", "A", "Y", "A_sum", "A_mean", "W_sum", "W_mean", "degree"],
index=[3, 4, 5, 6, 7, 8])
tmle = NetworkTMLE(network=sm_network, exposure='A', outcome='Y', degree_restrict=[0, 2])
created = tmle.df
created_r = tmle.df_restricted
# Checking that expected is the same as the created
pdt.assert_frame_equal(expected,
created[["W", "A", "Y", "A_sum", "A_mean", "W_sum", "W_mean", "degree"]],
check_dtype=False)
pdt.assert_frame_equal(expected_r,
created_r[["W", "A", "Y", "A_sum", "A_mean", "W_sum", "W_mean", "degree"]],
check_dtype=False)
def test_restricted_number(self, sm_network):
tmle = NetworkTMLE(network=sm_network, exposure='A', outcome='Y', degree_restrict=[0, 2])
n_created = tmle.df.shape[0]
n_created_r = tmle.df_restricted.shape[0]
assert 6 == n_created_r
assert 3 == n_created - n_created_r
tmle = NetworkTMLE(network=sm_network, exposure='A', outcome='Y', degree_restrict=[1, 3])
n_created = tmle.df.shape[0]
n_created_r = tmle.df_restricted.shape[0]
assert 8 == n_created_r
assert 1 == n_created - n_created_r
def test_continuous_processing(self):
G = nx.Graph()
y_list = [1, -1, 5, 0, 0, 0, 10, -5]
G.add_nodes_from([(1, {'A': 0, 'Y': y_list[0]}), (2, {'A': 1, 'Y': y_list[1]}),
(3, {'A': 1, 'Y': y_list[2]}), (4, {'A': 0, 'Y': y_list[3]}),
(5, {'A': 1, 'Y': y_list[4]}), (6, {'A': 1, 'Y': y_list[5]}),
(7, {'A': 0, 'Y': y_list[6]}), (8, {'A': 0, 'Y': y_list[7]})])
tmle = NetworkTMLE(network=G, exposure='A', outcome='Y', continuous_bound=0.0001)
# Checking all flagged parts are correct
assert tmle._continuous_outcome is True
assert tmle._continuous_min_ == -5.0001
assert tmle._continuous_max_ == 10.0001
assert tmle._cb_ == 0.0001
# Checking that TMLE bounding works as intended
maximum = 10.0001
minimum = -5.0001
y_bound = (np.array(y_list) - minimum) / (maximum - minimum)
pdt.assert_series_equal(pd.Series(y_bound, index=[0, 1, 2, 3, 4, 5, 6, 7]),
tmle.df['Y'],
check_dtype=False, check_names=False)
def test_df_creation_continuous(self, sm_network):
expected = pd.DataFrame([[1, 1, 2, 1, 3],
[0, 0, 2, 2, 3],
[0, 1, 1, 1, 3],
[0, 0, 2, 0, 2],
[1, 0, 2, 1, 2],
[1, 0, 0, 0, 1],
[0, 1, 0, 1, 2],
[0, 0, 0, 0, 0],
[1, 1, 1, 2, 2]],
columns=["W", "A", "A_sum", "W_sum", "degree"],
index=[0, 1, 2, 3, 4, 5, 6, 7, 8])
expected["C"] = [4.00001333e-01, 2.66669778e-01, 6.66664444e-01, 3.33335556e-01, 3.33335556e-01,
3.33335556e-01, 9.99993333e-01, 6.66657778e-06, 6.66657778e-06]
tmle = NetworkTMLE(network=sm_network, exposure='A', outcome='C', continuous_bound=0.0001)
created = tmle.df
# Checking that expected is the same as the created
assert tmle._continuous_outcome is True
pdt.assert_frame_equal(expected[["W", "A", "C", "A_sum", "W_sum", "degree"]],
created[["W", "A", "C", "A_sum", "W_sum", "degree"]],
check_dtype=False)
def test_no_consecutive_ids(self):
G = nx.Graph()
G.add_nodes_from([(1, {'W': 1, 'A': 1, 'Y': 1}), (2, {'W': 0, 'A': 0, 'Y': 0}),
(3, {'W': 0, 'A': 1, 'Y': 0}), (4, {'W': 0, 'A': 0, 'Y': 1}),
(5, {'W': 1, 'A': 0, 'Y': 0}), (7, {'W': 1, 'A': 0, 'Y': 1}),
(9, {'W': 0, 'A': 1, 'Y': 0}), (11, {'W': 0, 'A': 0, 'Y': 0}),
(12, {'W': 1, 'A': 1, 'Y': 0})])
G.add_edges_from([(1, 2), (1, 3), (1, 12), (2, 3), (2, 7),
(3, 4), (4, 9), (5, 9), (5, 12)])
expected = pd.DataFrame([[1, 1, 1, 1, 2, 2 / 3, 1, 1 / 3, 3],
[2, 0, 0, 0, 2, 2/3, 2, 2/3, 3],
[3, 0, 1, 0, 1, 1 / 3, 1, 1 / 3, 3],
[4, 0, 0, 1, 2, 1, 0, 0, 2],
[5, 1, 0, 0, 2, 1, 1, 1 / 2, 2],
[7, 1, 0, 1, 0, 0, 0, 0, 1],
[8, 0, 1, 0, 0, 0, 1, 1 / 2, 2],
[11, 0, 0, 0, 0, 0, 0, 0, 0],
[12, 1, 1, 0, 1, 1 / 2, 2, 1, 2]
],
columns=["_original_id_", "W", "A", "Y", "A_sum",
"A_mean", "W_sum", "W_mean", "degree"],
index=[0, 1, 2, 3, 4, 5, 6, 7, 8])
tmle = NetworkTMLE(network=G, exposure='A', outcome='Y')
created = tmle.df.sort_values(by='_original_id_').reset_index()
pdt.assert_frame_equal(expected[["W", "A", "Y", "A_sum", "A_mean", "W_sum", "W_mean", "degree"]],
created[["W", "A", "Y", "A_sum", "A_mean", "W_sum", "W_mean", "degree"]],
check_dtype=False)
def test_df_creation_nonparametric(self, sm_network):
columns = ["_original_id_", "A", "A_map1", "A_map2", "A_map3"]
expected = pd.DataFrame([[1, 1, 0, 1, 1],
[2, 0, 1, 1, 0],
[3, 1, 1, 0, 0],
[4, 0, 1, 1, 0],
[5, 0, 1, 1, 0],
[6, 0, 0, 0, 0],
[7, 1, 0, 0, 0],
[8, 0, 0, 0, 0],
[9, 1, 1, 0, 0]],
columns=columns,
index=[0, 1, 2, 3, 4, 5, 6, 7, 8])
tmle = NetworkTMLE(network=sm_network, exposure='A', outcome='Y')
created = tmle.df.sort_values(by='_original_id_').reset_index()
# Checking that expected is the same as the created
pdt.assert_frame_equal(expected[columns], created[columns], check_dtype=False)
def test_summary_measures_creation(self, sm_network):
columns = ["_original_id_", "A_sum", "A_mean", "A_var", "W_sum", "W_mean", "W_var"]
neighbors_w = {1: np.array([0, 0, 1]), 2: np.array([0, 1, 1]), 3: np.array([0, 0, 1]), 4: np.array([0, 0]),
5: np.array([0, 1]), 6: np.array([0]), 7: np.array([0, 1]), 9: np.array([1, 1])}
neighbors_a = {1: np.array([0, 1, 1]), 2: np.array([0, 1, 1]), 3: np.array([0, 0, 1]), 4: np.array([1, 1]),
5: np.array([1, 1]), 6: np.array([0]), 7: np.array([0, 0]), 9: np.array([0, 1])}
expected = pd.DataFrame([[1, np.sum(neighbors_a[1]), np.mean(neighbors_a[1]), np.var(neighbors_a[1]),
np.sum(neighbors_w[1]), np.mean(neighbors_w[1]), np.var(neighbors_w[1])],
[2, np.sum(neighbors_a[2]), np.mean(neighbors_a[2]), np.var(neighbors_a[2]),
np.sum(neighbors_w[2]), np.mean(neighbors_w[2]), np.var(neighbors_w[2])],
[3, np.sum(neighbors_a[3]), np.mean(neighbors_a[3]), np.var(neighbors_a[3]),
np.sum(neighbors_w[3]), np.mean(neighbors_w[3]), np.var(neighbors_w[3])],
[4, np.sum(neighbors_a[4]), np.mean(neighbors_a[4]), np.var(neighbors_a[4]),
np.sum(neighbors_w[4]), np.mean(neighbors_w[4]), np.var(neighbors_w[4])],
[5, np.sum(neighbors_a[5]), np.mean(neighbors_a[5]), np.var(neighbors_a[5]),
np.sum(neighbors_w[5]), np.mean(neighbors_w[5]), np.var(neighbors_w[5])],
[6, np.sum(neighbors_a[6]), np.mean(neighbors_a[6]), np.var(neighbors_a[6]),
np.sum(neighbors_w[6]), np.mean(neighbors_w[6]), np.var(neighbors_w[6])],
[7, np.sum(neighbors_a[7]), np.mean(neighbors_a[7]), np.var(neighbors_a[7]),
np.sum(neighbors_w[7]), np.mean(neighbors_w[7]), np.var(neighbors_w[7])],
[8, 0, 0, 0, 0, 0, 0], # Isolates are = 0
[9, np.sum(neighbors_a[9]), np.mean(neighbors_a[9]), np.var(neighbors_a[9]),
np.sum(neighbors_w[9]), np.mean(neighbors_w[9]), np.var(neighbors_w[9])]],
columns=columns,
index=[0, 1, 2, 3, 4, 5, 6, 7, 8])
tmle = NetworkTMLE(network=sm_network, exposure='A', outcome='Y')
created = tmle.df
# Checking that expected is the same as the created
assert tmle._continuous_outcome is False
pdt.assert_frame_equal(expected,
created[columns],
check_dtype=False)
def test_distance_measures_creation(self, sm_network):
columns = ["_original_id_", "A_mean_dist", "A_var_dist", "W_mean_dist", "W_var_dist"]
neighbors_w = {1: np.array([-1, -1, 0]), 2: np.array([0, 1, 1]), 3: np.array([0, 0, 1]), 4: np.array([0, 0]),
5: np.array([-1, 0]), 6: np.array([-1]), 7: np.array([0, 1]), 9: np.array([0, 0])}
neighbors_a = {1: np.array([-1, 0, 0]), 2: np.array([0, 1, 1]), 3: np.array([-1, -1, 0]), 4: np.array([1, 1]),
5: np.array([1, 1]), 6: np.array([0]), 7: np.array([-1, -1]), 9: np.array([-1, 0])}
expected = pd.DataFrame([[1, np.mean(neighbors_a[1]), np.var(neighbors_a[1]),
np.mean(neighbors_w[1]), np.var(neighbors_w[1])],
[2, np.mean(neighbors_a[2]), np.var(neighbors_a[2]),
np.mean(neighbors_w[2]), np.var(neighbors_w[2])],
[3, np.mean(neighbors_a[3]), np.var(neighbors_a[3]),
np.mean(neighbors_w[3]), np.var(neighbors_w[3])],
[4, np.mean(neighbors_a[4]), | np.var(neighbors_a[4]) | numpy.var |
import os
import time
import numpy as np
import pybullet as p
from surrol.tasks.psm_env import PsmsEnv
from surrol.utils.pybullet_utils import (
get_link_pose,
step
)
from surrol.utils.robotics import get_matrix_from_pose_2d
from surrol.const import ASSET_DIR_PATH
import time
from stable_baselines3.common.env_checker import check_env
class NeedleRegrasp_custom(PsmsEnv):
ACTION_MODE = 'pitch'
WORKSPACE_LIMITS1 = ((0.55, 0.6), (0.01, 0.08), (0.695, 0.745))
WORKSPACE_LIMITS2 = ((0.55, 0.6), (-0.08, -0.01), (0.695, 0.745))
SCALING = 5.
def _env_setup(self):
super(NeedleRegrasp_custom, self)._env_setup()
self.has_object = True
self._waypoint_goal = True
# robot
for psm, workspace_limits in ((self.psm1, self.workspace_limits1), (self.psm2, self.workspace_limits2)):
pos = (workspace_limits[0].mean(),
workspace_limits[1].mean(),
workspace_limits[2].mean())
# orn = p.getQuaternionFromEuler(np.deg2rad([0, np.random.uniform(-45, -135), -90]))
orn = p.getQuaternionFromEuler(np.deg2rad([0, -90, -90])) # reduce difficulty
# psm.reset_joint(self.QPOS_PSM1)
joint_positions = psm.inverse_kinematics((pos, orn), psm.EEF_LINK_INDEX)
psm.reset_joint(joint_positions)
self.block_gripper = False # set the constraint
psm = self.psm1
workspace_limits = self.workspace_limits1
# needle
limits_span = (workspace_limits[:, 1] - workspace_limits[:, 0]) / 3
sample_space = workspace_limits.copy()
sample_space[:, 0] += limits_span
sample_space[:, 1] -= limits_span
# Load the needle
obj_id = p.loadURDF(os.path.join(ASSET_DIR_PATH, 'needle/needle_40mm.urdf'),
(0.01 * self.SCALING, 0, 0),
(0, 0, 0, 1),
useFixedBase=False,
globalScaling=self.SCALING)
# Needle appearance
p.changeVisualShape(obj_id, -1, specularColor=(80, 80, 80))
# Make needle rigid
self.obj_ids['rigid'].append(obj_id)
self.obj_id, self.obj_link1, self.obj_link2 = self.obj_ids['rigid'][0], 4, 5
while True:
# open the jaw
psm.open_jaw()
# TODO: strange thing that if we use --num_env=1 with openai baselines, the qs vary before and after step!
step(0.5)
# set the position until the psm can grasp it
pos_needle = np.random.uniform(low=sample_space[:, 0], high=sample_space[:, 1])
pitch = np.random.uniform(low=-105., high=-75.) # reduce difficulty
orn_needle = p.getQuaternionFromEuler(np.deg2rad([-90, pitch, 90]))
p.resetBasePositionAndOrientation(obj_id, pos_needle, orn_needle)
# record the needle pose and move the psm to grasp the needle
pos_waypoint, orn_waypoint = get_link_pose(obj_id, self.obj_link2) # the right side waypoint
orn_waypoint = np.rad2deg(p.getEulerFromQuaternion(orn_waypoint))
p.resetBasePositionAndOrientation(obj_id, (0, 0, 0.01 * self.SCALING), (0, 0, 0, 1))
# get the eef pose according to the needle pose
orn_tip = p.getQuaternionFromEuler(np.deg2rad([90, -90 - orn_waypoint[1], 90]))
pose_tip = [pos_waypoint + np.array([0.0015 * self.SCALING, 0, 0]), orn_tip]
pose_eef = psm.pose_tip2eef(pose_tip)
# move the psm
pose_world = get_matrix_from_pose_2d(pose_eef)
action_rcm = psm.pose_world2rcm(pose_world)
success = psm.move(action_rcm)
if success is False:
continue
step(1)
p.resetBasePositionAndOrientation(obj_id, pos_needle, orn_needle)
cid = p.createConstraint(obj_id, -1, -1, -1,
p.JOINT_FIXED, [0, 0, 0], [0, 0, 0], pos_needle,
childFrameOrientation=orn_needle)
psm.close_jaw()
step(0.5)
p.removeConstraint(cid)
self._activate(0)
self._step_callback()
step(1)
self._step_callback()
if self._activated >= 0:
break
def _sample_goal(self) -> np.ndarray:
""" Samples a new goal and returns it.
"""
workspace_limits = self.workspace_limits2
goal = workspace_limits.mean(axis=1) + np.random.randn(3) * 0.005 * self.SCALING
goal.clip(workspace_limits[:, 0], workspace_limits[:, 1])
return goal.copy()
def _sample_goal_callback(self):
""" Define waypoints
"""
super()._sample_goal_callback()
# Initialising 6 waypoints
self._waypoints = [None, None, None, None, None, None]
# Position of one grasping point on the needle
pos_obj1, _ = get_link_pose(self.obj_id, self.obj_link2)
# Position of second grasping point on the needle
pos_obj2, _ = get_link_pose(self.obj_id, self.obj_link1)
# Making positions ndarrays
pos_obj1, pos_obj2 = np.array(pos_obj1), np.array(pos_obj2)
# Distance between the two grasping points
pos_dis = np.linalg.norm(pos_obj1 - pos_obj2)
# Same pitch angels for both grasps
pitch1, pitch2 = np.deg2rad(-30), np.deg2rad(-30)
# Open gripper
jaw = 0.8
####################################################################
############# MOVING BOTH PSMS TO THE MIDDLE #######################
####################################################################
# Position of the tip for the PSM1 (with needle)
pos_tip1 = (pos_obj1[0] + 0.002 * self.SCALING, pos_dis / 2, pos_obj1[2])
# Orientation of the tip for the PSM1 (with needle)
orn_tip1 = p.getQuaternionFromEuler(np.deg2rad([90, -30, 90]))
# Pose of the tip for the PSM1 (with needle)
pose_tip1 = [pos_tip1, orn_tip1]
# Position of the EE for the PSM1 (with needle)
pos_eef1, _ = self.psm1.pose_tip2eef(pose_tip1)
# Position of the tip for the PSM2 (without needle)
pos_tip2 = (pos_obj1[0] - 0.002 * self.SCALING, - pos_dis / 2, pos_obj1[2])
# Orientation of the tip for the PSM2 (without needle)
orn_tip2 = p.getQuaternionFromEuler(np.deg2rad([90, -150, 90]))
# Pose of the tip for the PSM2 (without needle)
pose_tip2 = [pos_tip2, orn_tip2]
# Position of the EE for the PSM2 (without needle)
pos_eef2, _ = self.psm2.pose_tip2eef(pose_tip2)
# Move both PSMs to the middle
self._waypoints[0] = np.array([pos_eef1[0], pos_eef1[1], pos_eef1[2], pitch1, -jaw,
pos_eef2[0], pos_eef2[1], pos_eef2[2], pitch2, jaw])
####################################################################
################## PSM2 APPROACH NEEDLE ############################
####################################################################
# Move PSM1 a little back
pose_tip1[0] = (pos_obj1[0], pos_dis / 2, pos_obj1[2])
pos_eef1, _ = self.psm1.pose_tip2eef(pose_tip1)
# Move PSM2 forwards towards the needle
pose_tip2[0] = (pos_obj1[0] + 0.002 * self.SCALING, - pos_dis / 2, pos_obj1[2])
pos_eef2, _ = self.psm2.pose_tip2eef(pose_tip2)
self._waypoints[1] = np.array([pos_eef1[0], pos_eef1[1], pos_eef1[2], pitch1, -jaw,
pos_eef2[0], pos_eef2[1], pos_eef2[2], pitch2, jaw])
####################################################################
################## PSM2 GRASP, PSM1 RELEASE ########################
####################################################################
# PSM2 grasp
self._waypoints[2] = np.array([pos_eef1[0], pos_eef1[1], pos_eef1[2], pitch1, -jaw,
pos_eef2[0], pos_eef2[1], pos_eef2[2], pitch2, -jaw])
# PSM1 RELEASE
self._waypoints[3] = np.array([pos_eef1[0], pos_eef1[1], pos_eef1[2], pitch1, jaw,
pos_eef2[0], pos_eef2[1], pos_eef2[2], pitch2, -jaw])
####################################################################
############### ADJUST POSES TO AVOID COLLISION ####################
####################################################################
# Move PSM1 a little back and to the right
pose_tip1[0] = (pos_obj1[0] - 0.005 * self.SCALING, pos_dis / 2 + 0.01 * self.SCALING, pos_obj1[2])
pos_eef1, _ = self.psm1.pose_tip2eef(pose_tip1)
# Move PSM2 a little to the front
pose_tip2[0] = (pos_obj1[0] + 0.005 * self.SCALING, - pos_dis / 2, pos_obj1[2])
pos_eef2, _ = self.psm2.pose_tip2eef(pose_tip2)
self._waypoints[4] = np.array([pos_eef1[0], pos_eef1[1], pos_eef1[2], pitch1, jaw,
pos_eef2[0], pos_eef2[1], pos_eef2[2], pitch2, -jaw])
####################################################################
#################### MOVE PSM2 TO RED DOT ##########################
####################################################################
# PSM2 tip position must match goal position
pose_tip2[0] = (self.goal[0], self.goal[1], self.goal[2])
# Convert to EE position
pos_eef2, _ = self.psm2.pose_tip2eef(pose_tip2)
# Place
self._waypoints[5] = np.array([pos_eef1[0], pos_eef1[1], pos_eef1[2], pitch1, jaw,
pos_eef2[0], pos_eef2[1], pos_eef2[2], pitch2, -jaw])
def _meet_contact_constraint_requirement(self):
""" add a contact constraint to the grasped needle to make it stable
"""
return True
def get_oracle_action(self, obs) -> np.ndarray:
"""
Define a human expert strategy
"""
# six waypoints executed in sequential order
action = np.zeros(10)
action[4], action[9] = 0.8, -0.8
pitch_scaling = np.deg2rad(15)
for i, waypoint in enumerate(self._waypoints):
time.sleep(0.2)
if waypoint is None:
continue
delta_pos1 = (waypoint[0: 3] - obs['observation'][0: 3]) / 0.01 / self.SCALING
delta_pitch1 = ((waypoint[3] - obs['observation'][4]) / pitch_scaling).clip(-1, 1)
delta_pos2 = (waypoint[5: 8] - obs['observation'][7: 10]) / 0.01 / self.SCALING
delta_pitch2 = ((waypoint[8] - obs['observation'][11]) / pitch_scaling).clip(-1, 1)
if np.abs(delta_pos1).max() > 1:
delta_pos1 /= np.abs(delta_pos1).max()
if np.abs(delta_pos2).max() > 1:
delta_pos2 /= np.abs(delta_pos2).max()
scale_factor = 0.5
delta_pos1 *= scale_factor
delta_pos2 *= scale_factor
action = np.array([delta_pos1[0], delta_pos1[1], delta_pos1[2], delta_pitch1, waypoint[4],
delta_pos2[0], delta_pos2[1], delta_pos2[2], delta_pitch2, waypoint[9]])
if np.linalg.norm(delta_pos1) * 0.01 / scale_factor < 1e-4 and np.abs(delta_pitch1) < 2. \
and np.linalg.norm(delta_pos2) * 0.01 / scale_factor < 1e-4 and | np.abs(delta_pitch2) | numpy.abs |
#!/usr/bin/env python3
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorrt as trt
import os
import sys
import platform
import onnx
import ctypes
import struct
import numpy as np
sys.path.insert(0, os.getcwd())
from importlib import import_module
from code.common import logging, dict_get, BENCHMARKS
from code.common import get_system
from code.common.builder import BenchmarkBuilder
import pycuda.autoinit
RN50Calibrator = import_module("code.resnet50.tensorrt.calibrator").RN50Calibrator
AUTOSINIAN_CNN_PLUGIN_LIBRARY = "code/resnet50/tensorrt/libautosiniancnnplugin_ampere.so" if pycuda.autoinit.device.compute_capability()[0] > 7 else "code/resnet50/tensorrt/libautosiniancnnplugin_turing.so"
if not os.path.isfile(AUTOSINIAN_CNN_PLUGIN_LIBRARY):
raise IOError("{}\n".format(
"Failed to load library ({}).".format(AUTOSINIAN_CNN_PLUGIN_LIBRARY)
))
ctypes.CDLL(AUTOSINIAN_CNN_PLUGIN_LIBRARY)
class ResNet50(BenchmarkBuilder):
"""Resnet50 engine builder."""
def __init__(self, args):
workspace_size = dict_get(args, "workspace_size", default=(1 << 30))
logging.info("Use workspace_size: {:}".format(workspace_size))
super().__init__(args, name=BENCHMARKS.ResNet50, workspace_size=workspace_size)
# Model path
self.model_path = dict_get(args, "model_path", default="code/resnet50/tensorrt/ofa_autosinian_is176.onnx")
logging.info("Using AutoSinian optimized once-for-all network")
self.cache_file = None
self.need_calibration = False
if self.precision == "int8":
# Get calibrator variables
calib_batch_size = dict_get(self.args, "calib_batch_size", default=1)
calib_max_batches = dict_get(self.args, "calib_max_batches", default=500)
force_calibration = dict_get(self.args, "force_calibration", default=False)
cache_file = dict_get(self.args, "cache_file", default="code/resnet50/tensorrt/calibrator.cache")
preprocessed_data_dir = dict_get(self.args, "preprocessed_data_dir", default="build/preprocessed_data")
calib_data_map = dict_get(self.args, "calib_data_map", default="data_maps/imagenet/cal_map.txt")
calib_image_dir = os.path.join(preprocessed_data_dir, "imagenet/ResNet50/fp32")
# Set up calibrator
self.calibrator = RN50Calibrator(calib_batch_size=calib_batch_size, calib_max_batches=calib_max_batches,
force_calibration=force_calibration, cache_file=cache_file,
image_dir=calib_image_dir, calib_data_map=calib_data_map)
self.builder_config.int8_calibrator = self.calibrator
self.cache_file = cache_file
self.need_calibration = force_calibration or not os.path.exists(cache_file)
def initialize(self):
"""
Parse input ONNX file to a TRT network. Apply layer optimizations and fusion plugins on network.
"""
# Query system id for architecture
self.system = get_system()
self.gpu_arch = self.system.arch
# Create network.
self.network = self.builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
# Parse from onnx file.
parser = trt.OnnxParser(self.network, self.logger)
with open(self.model_path, "rb") as f:
model = f.read()
success = parser.parse(model)
if not success:
raise RuntimeError("ofa_autusinian onnx model processing failed! Error: {:}".format(parser.get_error(0).desc()))
# Set input dtype and format
input_tensor = self.network.get_input(0)
if self.input_dtype == "int8":
input_tensor.dtype = trt.int8
scale = struct.unpack('!f', bytes.fromhex('3caa5293'))[0]
input_tensor.dynamic_range = (-scale*127.0, scale*127.0)
if self.input_format == "linear":
input_tensor.allowed_formats = 1 << int(trt.TensorFormat.LINEAR)
elif self.input_format == "chw4":
input_tensor.allowed_formats = 1 << int(trt.TensorFormat.CHW4)
# Get the layers we care about.
nb_layers = self.network.num_layers
while self.network.num_outputs > 0:
logging.info("Unmarking output: {:}".format(self.network.get_output(0).name))
self.network.unmark_output(self.network.get_output(0))
#add top-k
last_fc_layer = self.network.get_layer(nb_layers - 1)
topk_layer = self.network.add_topk(last_fc_layer.get_output(0), trt.TopKOperation.MAX, 1, 2)
topk_layer.name = "topk_layer"
topk_layer.get_output(0).name = "topk_layer_output_value"
topk_layer.get_output(1).name = "topk_layer_output_index"
self.network.mark_output(topk_layer.get_output(1))
if self.network.num_outputs != 1:
logging.warning("num outputs should be 1 after unmarking! Has {:}".format(self.network.num_outputs))
raise Exception
if self.precision == "int8" and self.batch_size > 1 and (not self.need_calibration):
self.autosinian_optimize()
self.initialized = True
def autosinian_optimize(self):
logging.info("Applying AutoSinian Optimization...")
optimize_points = [(10,15), (21,26), (27,32), (38,43), (44,49), (55,60), (61,66), (67,72), (78,83), (84,89), (90,95), (0,4), (5,9), (16,20), (33,37), (50,54), (73,77), (96,100)]
optimizer = AutoSinian_Optimizer(self.cache_file)
for point in optimize_points:
optimizer.optimize(self.network, point)
class AutoSinian_Optimizer:
'''AutoSinian optimizer, optimize the hardware implementation of the layers.'''
def __init__(self, cache_file = None):
self.plugin_registery = trt.get_plugin_registry()
foundPlugin = False
for plugin_creator in self.plugin_registery.plugin_creator_list:
if plugin_creator.name == self.name:
self.creator = self.plugin_registery.get_plugin_creator(self.name,'1','')
foundPlugin = True if self.creator else False
break
assert(foundPlugin), "fail to found %s!" % self.name
self.scale_map = {}
with open(cache_file, "r") as f:
for line in f:
pair = line.rstrip().split(':')
if len(pair) == 2:
self.scale_map[pair[0]] = struct.unpack('!f', bytes.fromhex(pair[1]))[0]
self.count = 0
@property
def name(self):
return "AutoSinianCNN_TRT"
def optimize(self, network, point):
fields = trt.PluginFieldCollection()
saved = [] #values must be alive when creating the plugin.
inputs = [network.get_layer(point[0]).get_input(0)]
append_fields(network, point[0], fields, saved, self.scale_map)
append_fields(network, point[0]+2, fields, saved, self.scale_map)
append_fields(network, point[0]+4, fields, saved, self.scale_map)
plugin=self.creator.create_plugin(self.name, fields)
if plugin is None:
raise Exception("Plugin creation failed")
plugin_layer = network.add_plugin_v2(inputs, plugin)
plugin_layer.name = self.name + "_%d" % self.count
self.count += 1
origin_output = network.get_layer(point[1]).get_output(0)
plugin_output = plugin_layer.get_output(0)
assert(origin_output.name in self.scale_map), "%s not found!" % origin_output.name
dynamic_range=self.scale_map[origin_output.name]*127.0
plugin_output.set_dynamic_range(-dynamic_range, dynamic_range)
for j in range(network.num_layers):
layer = network.get_layer(j)
if layer.name==plugin_layer.name :
continue
for k in range(layer.num_inputs):
if layer.get_input(k) == origin_output:
layer.set_input(k, plugin_output)
def append_fields(network, index, fields, saved, scale_map):
layer = network.get_layer(index)
assert(isinstance(layer, trt.ILayer) and (layer.type == trt.LayerType.CONVOLUTION)), "must be a conv layer"
layer.__class__ = trt.IConvolutionLayer
output_layer = layer
npa1 = np.array([layer.kernel_size.h], dtype=np.int32)
saved.append(npa1)
npa2 = np.array([layer.num_output_maps], dtype=np.int32)
saved.append(npa2)
npa3 = np.array([layer.num_groups], dtype=np.int32)
saved.append(npa3)
npa4 = np.array([layer.stride.h], dtype=np.int32)
saved.append(npa4)
npa5 = np.array([layer.pre_padding[0]], dtype=np.int32)
saved.append(npa5)
npa6 = | np.array([layer.post_padding[0]], dtype=np.int32) | numpy.array |
#
# Author: <NAME>
# and <NAME> <<EMAIL>)
# Lincense: Academic Free License (AFL) v3.0
#
import numpy as np
from math import pi
from mpi4py import MPI
try:
from scipy import comb
except ImportError:
from scipy.special import comb
import prosper.em as em
import prosper.utils.parallel as parallel
import prosper.utils.tracing as tracing
from prosper.utils.datalog import dlog
from prosper.em.camodels import CAModel
class BSC_ET(CAModel):
"""Binary Sparse Coding
Implements learning and inference of a Binary Sparse coding model under a variational approximation
Attributes
----------
comm : MPI communicator
D : int
number of features
gamma : int
approximation parameter for maximum number of non-zero states
H : int
number of latent variables
Hprime : int
approximation parameter for latent space trunctation
K : int
number of different values the latent variables can take
no_states : (..., Hprime) ndarray
number of different states of latent variables except singleton states and zero state
single_state_matrix : ((K-1)*H, H) ndarray
matrix that holds all possible singleton states
state_abs : (no_states, ) ndarray
number of non-zero elements in the rows of the state_matrix
state_matrix : (no_states, Hprime) ndarray
latent variable states taken into account during the em algorithm
states : (K,) ndarray
the differnt values that a latent variable can take must include 0 and one more integer
to_learn : list
list of strings included in model_params.keys() that specify which parameters are going to be optimized
References
----------
[1] <NAME>, <NAME>, <NAME>, <NAME>, and <NAME> (2010). Binary Sparse Coding. Proc. LVA/ICA 2010, LNCS 6365, 450-457.
[2] <NAME> and <NAME> (2010). Expectation Truncation and the Benefits of Preselection in Training Generative Models. Journal of Machine Learning Research 11:2855-2900.
"""
def __init__(self, D, H, Hprime, gamma, to_learn=['W', 'pi', 'sigma'], comm=MPI.COMM_WORLD):
CAModel.__init__(self, D, H, Hprime, gamma, to_learn, comm)
@tracing.traced
def generate_from_hidden(self, model_params, my_hdata):
""" Generate data according to the MCA model while the latents are
given in my_hdata['s'].
This method does _not_ obey gamma: The generated data may have more
than gamma active causes for a given datapoint.
"""
W = model_params['W'].T
pies = model_params['pi']
sigma = model_params['sigma']
H, D = W.shape
s = my_hdata['s']
my_N, _ = s.shape
# Create output arrays, y is data
y = np.zeros( (my_N, D) )
for n in range(my_N):
# Combine accoring do magnitude-max rulew
for h in range(H):
if s[n,h]:
y[n] += W[h]
# Add noise according to the model parameters
y += np.random.normal( scale=sigma, size=(my_N, D) )
# Build return structure
return { 'y': y, 's': s }
@tracing.traced
def select_Hprimes(self, model_params, data):
"""
Return a new data-dictionary which has been annotated with
a data['candidates'] dataset. A set of self.Hprime candidates
will be selected.
"""
my_y = data['y']
W = model_params['W'].T
Hprime = self.Hprime
my_N, D = my_y.shape
candidates = np.zeros( (my_N, Hprime), dtype=np.int )
for n in range(my_N):
sim = np.inner(W,my_y[n])/ np.sqrt(np.diag(np.inner(W,W)))/ np.sqrt(np.inner(my_y[n],my_y[n]))
candidates[n] = np.argsort(sim)[-Hprime:]
data['candidates'] = candidates
return data
@tracing.traced
def E_step(self, anneal, model_params, my_data):
""" BSC E_step
my_data variables used:
my_data['y'] Datapoints
my_data['can'] Candidate H's according to selection func.
Annealing variables used:
anneal['T'] Temperature for det. annealing
anneal['N_cut_factor'] 0.: no truncation; 1. trunc. according to model
"""
comm = self.comm
my_y = my_data['y'].copy()
my_cand = my_data['candidates']
my_N, D = my_data['y'].shape
H = self.H
SM = self.state_matrix # shape: (no_states, Hprime)
state_abs = self.state_abs # shape: (no_states,)
W = model_params['W'].T
pies = model_params['pi']
sigma = model_params['sigma']
try:
mu = model_params['mu']
except:
mu = np.zeros(D)
model_params['mu'] = mu
# Precompute
beta = 1./anneal['T']
pre1 = -1./2./sigma/sigma
pil_bar = np.log( pies/(1.-pies) )
# Allocate return structures
F = np.empty( [my_N, 1+H+self.no_states] )
pre_F = np.empty( [my_N, 1+H+ self.no_states] )
denoms = np.zeros(my_N)
# Pre-fill pre_F:
pre_F[:,0] = 0.
pre_F[:,1:H+1] = pil_bar
pre_F[:,1+H:] = pil_bar * state_abs # is (no_states,)
# Iterate over all datapoints
tracing.tracepoint("E_step:iterating")
for n in range(my_N):
y = my_data['y'][n,:] - mu
cand = my_data['candidates'][n,:]
# Zero active hidden causes
log_prod_joint = pre1 * (y**2).sum()
F[n,0] = log_prod_joint
# Hidden states with one active cause
log_prod_joint = pre1 * ((W-y)**2).sum(axis=1)
F[n,1:H+1] = log_prod_joint
# Handle hidden states with more than 1 active cause
W_ = W[cand] # is (Hprime x D)
Wbar = np.dot(SM,W_)
log_prod_joint = pre1 * ((Wbar-y)**2).sum(axis=1)
F[n,1+H:] = log_prod_joint
if anneal['anneal_prior']:
F = beta * (pre_F + F)
else:
F = pre_F + beta * F
return { 'logpj': F }
@tracing.traced
def M_step(self, anneal, model_params, my_suff_stat, my_data):
""" BSC M_step
my_data variables used:
my_data['y'] Datapoints
my_data['candidates'] Candidate H's according to selection func.
Annealing variables used:
anneal['T'] Temperature for det. annealing
anneal['N_cut_factor'] 0.: no truncation; 1. trunc. according to model
"""
comm = self.comm
H, Hprime = self.H, self.Hprime
gamma = self.gamma
W = model_params['W'].T
pies = model_params['pi']
sigma = model_params['sigma']
mu = model_params['mu']
# Read in data:
my_y = my_data['y'].copy()
candidates = my_data['candidates']
logpj_all = my_suff_stat['logpj']
all_denoms = np.exp(logpj_all).sum(axis=1)
my_N, D = my_y.shape
N = comm.allreduce(my_N)
# Joerg's data noise idea
data_noise_scale = anneal['data_noise']
if data_noise_scale > 0:
my_y += my_data['data_noise']
SM = self.state_matrix # shape: (no_states, Hprime)
# To compute et_loglike:
my_ldenom_sum = 0.0
ldenom_sum = 0.0
# Precompute factor for pi update
A_pi_gamma = 0
B_pi_gamma = 0
for gamma_p in range(gamma+1):
A_pi_gamma += comb(H,gamma_p) * (pies**gamma_p) * ((1-pies)**(H-gamma_p))
B_pi_gamma += gamma_p * comb(H,gamma_p) * (pies**gamma_p) * ((1-pies)**(H-gamma_p))
E_pi_gamma = pies * H * A_pi_gamma / B_pi_gamma
# Truncate data
if anneal['Ncut_factor'] > 0.0:
tracing.tracepoint("M_step:truncating")
#alpha = 0.9 # alpha from ET paper
#N_use = int(alpha * (N * (1 - (1 - A_pi_gamma) * anneal['Ncut_factor'])))
N_use = int(N * (1 - (1 - A_pi_gamma) * anneal['Ncut_factor']))
cut_denom = parallel.allsort(all_denoms)[-N_use]
which = np.array(all_denoms >= cut_denom)
candidates = candidates[which]
logpj_all = logpj_all[which]
my_y = my_y[which]
my_N, D = my_y.shape
N_use = comm.allreduce(my_N)
else:
N_use = N
dlog.append('N', N_use)
# Calculate truncated Likelihood
L = H * | np.log(1-pies) | numpy.log |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import math
import unittest
from data_manager import DataManager
from hd_cells import HDCells
from place_cells import PlaceCells
class DataManagerTest(unittest.TestCase):
def setUp(self):
self.data_manager = DataManager()
def test_value_range(self):
# Pos range is -4.5 ~ 4.5 (実際は-4.03~4.03あたり)
self.assertLessEqual( np.max(self.data_manager.pos_xs), 4.5)
self.assertGreaterEqual(np.min(self.data_manager.pos_xs), -4.5)
# Angle range is -pi ~ pi
self.assertLessEqual( | np.max(self.data_manager.angles) | numpy.max |
import numpy as np
a=np.array([[1,2,3,4],[5,6,7,8]])
print(a)
# Get dimensions
print(a.ndim)
#Get shape
print(a.shape)
b= np.array([3,4,5,6],dtype='int16')
print(a*b)
print(b.ndim)
print(b.dtype)
print(b.itemsize)
print(b.nbytes)
a=np.array([[1,2,3,4,5,6,7],[8,9,10,11,12,13,14]])
print(a)
print(a.shape)
print(a[0,5])
print(a[0,:])
print(a[:,3])
print(a[0,1:7:2])
a[1,:]=22
print(a)
b= np.array([[[1,2],[3,4]],[[5,6],[7,8]]])
print(b)
print(b[0,1,1])
print(b[:,1,:])
# Initialize all zero matrix
a=np.zeros((2,3))
print(a)
b=np.ones((2,2))
print(b)
# initalize with any random value
b=np.full((2,2),98,dtype='float32')
print(b)
#full_like method
b=np.full_like(b,4)
print(b)
# initialize random decimal numbers
b=np.random.rand(4,2)
print(b)
b= np.random.random_sample(b.shape)
print(b)
b=np.random.randint(-2,20,size=(3,3))
print(b)
# identity matrix
b=np.identity(5)
print(b)
#Repeat an array
a=np.array([1,2,3])
a=np.repeat(a,3)
print(a)
a=np.array([[1,2,3],[4,5,6]])
a=np.array([[1,2,3]])
a=np.repeat(a,4,axis=0)
print(a)
# print
# [[1. 1. 1. 1. 1.]
# [1. 0. 0. 0. 1.]
# [1. 0. 9. 0. 1.]
# [1. 0. 0. 0. 1.]
# [1. 1. 1. 1. 1.]]
a= | np.ones((5,5)) | numpy.ones |
import json
import os
from collections import OrderedDict
from copy import deepcopy
import SimpleITK as sitk
from batchgenerators.augmentations.utils import resize_segmentation # resize_softmax_output
from skimage.transform import resize
from torch.optim import lr_scheduler
from torch import nn
import numpy as np
import torch
from scipy.ndimage import binary_fill_holes
'''
This code is not intended to be looked at by anyone. It is messy. It is undocumented.
And the entire training pipeline is missing.
'''
max_num_filters_3d = 320
max_num_filters_2d = 480
join = os.path.join
def load_json(file):
with open(file, 'r') as f:
a = json.load(f)
return a
def resize_image(image, old_spacing, new_spacing, order=3, cval=0):
new_shape = (int(np.round(old_spacing[0]/new_spacing[0]*float(image.shape[0]))),
int(np.round(old_spacing[1]/new_spacing[1]*float(image.shape[1]))),
int(np.round(old_spacing[2]/new_spacing[2]*float(image.shape[2]))))
if any([i != j for i, j in zip(image.shape, new_shape)]):
res = resize(image, new_shape, order=order, mode='edge', cval=cval)
else:
res = image
return res
class ConvDropoutNormNonlin(nn.Module):
def __init__(self, input_channels, output_channels,
conv_op=nn.Conv2d, conv_kwargs=None,
norm_op=nn.BatchNorm2d, norm_op_kwargs=None,
dropout_op=nn.Dropout2d, dropout_op_kwargs=None,
nonlin=nn.LeakyReLU, nonlin_kwargs=None):
super(ConvDropoutNormNonlin, self).__init__()
if nonlin_kwargs is None:
nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
if dropout_op_kwargs is None:
dropout_op_kwargs = {'p': 0.5, 'inplace': True}
if norm_op_kwargs is None:
norm_op_kwargs = {'eps': 1e-5, 'affine': True, 'momentum': 0.1}
if conv_kwargs is None:
conv_kwargs = {'kernel_size': 3, 'stride': 1, 'padding': 1, 'dilation': 1, 'bias': True}
self.nonlin_kwargs = nonlin_kwargs
self.nonlin = nonlin
self.dropout_op = dropout_op
self.dropout_op_kwargs = dropout_op_kwargs
self.norm_op_kwargs = norm_op_kwargs
self.conv_kwargs = conv_kwargs
self.conv_op = conv_op
self.norm_op = norm_op
self.conv = self.conv_op(input_channels, output_channels, **self.conv_kwargs)
if self.dropout_op is not None and self.dropout_op_kwargs['p'] is not None and self.dropout_op_kwargs[
'p'] > 0:
self.dropout = self.dropout_op(**self.dropout_op_kwargs)
else:
self.dropout = None
self.instnorm = self.norm_op(output_channels, **self.norm_op_kwargs)
self.lrelu = nn.LeakyReLU(**self.nonlin_kwargs)
def forward(self, x):
x = self.conv(x)
if self.dropout is not None:
x = self.dropout(x)
return self.lrelu(self.instnorm(x))
def pad_nd_image(image, new_shape=None, mode="edge", kwargs=None, return_slicer=False, shape_must_be_divisible_by=None):
if kwargs is None:
kwargs = {}
if new_shape is not None:
old_shape = np.array(image.shape[-len(new_shape):])
else:
assert shape_must_be_divisible_by is not None
assert isinstance(shape_must_be_divisible_by, (list, tuple, np.ndarray))
new_shape = image.shape[-len(shape_must_be_divisible_by):]
old_shape = new_shape
num_axes_nopad = len(image.shape) - len(new_shape)
new_shape = [max(new_shape[i], old_shape[i]) for i in range(len(new_shape))]
if not isinstance(new_shape, np.ndarray):
new_shape = np.array(new_shape)
if shape_must_be_divisible_by is not None:
if not isinstance(shape_must_be_divisible_by, (list, tuple, np.ndarray)):
shape_must_be_divisible_by = [shape_must_be_divisible_by] * len(new_shape)
else:
assert len(shape_must_be_divisible_by) == len(new_shape)
for i in range(len(new_shape)):
if new_shape[i] % shape_must_be_divisible_by[i] == 0:
new_shape[i] -= shape_must_be_divisible_by[i]
new_shape = np.array([new_shape[i] + shape_must_be_divisible_by[i] - new_shape[i] % shape_must_be_divisible_by[i] for i in range(len(new_shape))])
difference = new_shape - old_shape
pad_below = difference // 2
pad_above = difference // 2 + difference % 2
pad_list = [[0, 0]]*num_axes_nopad + list([list(i) for i in zip(pad_below, pad_above)])
res = np.pad(image, pad_list, mode, **kwargs)
if not return_slicer:
return res
else:
pad_list = np.array(pad_list)
pad_list[:, 1] = np.array(res.shape) - pad_list[:, 1]
slicer = list(slice(*i) for i in pad_list)
return res, slicer
class NeuralNetwork(nn.Module):
def __init__(self):
super(NeuralNetwork, self).__init__()
def get_device(self):
if next(self.parameters()).device == "cpu":
return "cpu"
else:
return next(self.parameters()).device.index
def set_device(self, device):
if device == "cpu":
self.cpu()
else:
self.cuda(device)
def forward(self, x):
raise NotImplementedError
class SegmentationNetwork(NeuralNetwork):
def __init__(self):
self.input_shape_must_be_divisible_by = None
self.conv_op = None
super(NeuralNetwork, self).__init__()
self.inference_apply_nonlin = lambda x:x
def predict_3D(self, x, do_mirroring, num_repeats=1, use_train_mode=False, batch_size=1, mirror_axes=(2, 3, 4),
tiled=False, tile_in_z=True, step=2, patch_size=None, regions_class_order=None, use_gaussian=False,
pad_border_mode="edge", pad_kwargs=None):
"""
:param x: (c, x, y , z)
:param do_mirroring:
:param num_repeats:
:param use_train_mode:
:param batch_size:
:param mirror_axes:
:param tiled:
:param tile_in_z:
:param step:
:param patch_size:
:param regions_class_order:
:param use_gaussian:
:return:
"""
current_mode = self.training
if use_train_mode is not None and use_train_mode:
self.train()
elif use_train_mode is not None and not use_train_mode:
self.eval()
else:
pass
assert len(x.shape) == 4, "data must have shape (c,x,y,z)"
if self.conv_op == nn.Conv3d:
if tiled:
res = self._internal_predict_3D_3Dconv_tiled(x, num_repeats, batch_size, tile_in_z, step, do_mirroring,
mirror_axes, patch_size, regions_class_order, use_gaussian,
pad_border_mode, pad_kwargs=pad_kwargs)
else:
res = self._internal_predict_3D_3Dconv(x, do_mirroring, num_repeats, patch_size, batch_size,
mirror_axes, regions_class_order, pad_border_mode, pad_kwargs=pad_kwargs)
elif self.conv_op == nn.Conv2d:
if tiled:
res = self._internal_predict_3D_2Dconv_tiled(x, do_mirroring, num_repeats, batch_size, mirror_axes,
step, patch_size, regions_class_order, use_gaussian,
pad_border_mode, pad_kwargs=pad_kwargs)
else:
res = self._internal_predict_3D_2Dconv(x, do_mirroring, num_repeats, patch_size, batch_size,
mirror_axes, regions_class_order, pad_border_mode, pad_kwargs=pad_kwargs)
else:
raise RuntimeError("Invalid conv op, cannot determine what dimensionality (2d/3d) the network is")
if use_train_mode is not None:
self.train(current_mode)
return res
def _internal_maybe_mirror_and_pred_3D(self, x, num_repeats, mirror_axes, do_mirroring=True):
with torch.no_grad():
a = torch.zeros(x.shape).float()
if self.get_device() == "cpu":
a = a.cpu()
else:
a = a.cuda(self.get_device())
if do_mirroring:
mirror_idx = 8
else:
mirror_idx = 1
all_preds = []
for i in range(num_repeats):
for m in range(mirror_idx):
data_for_net = np.array(x)
do_stuff = False
if m == 0:
do_stuff = True
pass
if m == 1 and (4 in mirror_axes):
do_stuff = True
data_for_net = data_for_net[:, :, :, :, ::-1]
if m == 2 and (3 in mirror_axes):
do_stuff = True
data_for_net = data_for_net[:, :, :, ::-1, :]
if m == 3 and (4 in mirror_axes) and (3 in mirror_axes):
do_stuff = True
data_for_net = data_for_net[:, :, :, ::-1, ::-1]
if m == 4 and (2 in mirror_axes):
do_stuff = True
data_for_net = data_for_net[:, :, ::-1, :, :]
if m == 5 and (2 in mirror_axes) and (4 in mirror_axes):
do_stuff = True
data_for_net = data_for_net[:, :, ::-1, :, ::-1]
if m == 6 and (2 in mirror_axes) and (3 in mirror_axes):
do_stuff = True
data_for_net = data_for_net[:, :, ::-1, ::-1, :]
if m == 7 and (2 in mirror_axes) and (3 in mirror_axes) and (4 in mirror_axes):
do_stuff = True
data_for_net = data_for_net[:, :, ::-1, ::-1, ::-1]
if do_stuff:
_ = a.data.copy_(torch.from_numpy(np.copy(data_for_net)))
p = self.inference_apply_nonlin(self(a))
p = p.data.cpu().numpy()
if m == 0:
pass
if m == 1 and (4 in mirror_axes):
p = p[:, :, :, :, ::-1]
if m == 2 and (3 in mirror_axes):
p = p[:, :, :, ::-1, :]
if m == 3 and (4 in mirror_axes) and (3 in mirror_axes):
p = p[:, :, :, ::-1, ::-1]
if m == 4 and (2 in mirror_axes):
p = p[:, :, ::-1, :, :]
if m == 5 and (2 in mirror_axes) and (4 in mirror_axes):
p = p[:, :, ::-1, :, ::-1]
if m == 6 and (2 in mirror_axes) and (3 in mirror_axes):
p = p[:, :, ::-1, ::-1, :]
if m == 7 and (2 in mirror_axes) and (3 in mirror_axes) and (4 in mirror_axes):
p = p[:, :, ::-1, ::-1, ::-1]
all_preds.append(p)
return | np.vstack(all_preds) | numpy.vstack |
import sys
import time
import numpy as np
from psychopy import visual, core, event #import some libraries from PsychoPy
import optotrak.ndiapiconstants
import optotrak.client as oclient
import optotrak.datarecorder as dr
import linalg.routines as lr
ndi = optotrak.ndiapiconstants.NDI
opt = oclient.connect(oclient.InstanceType.Emulator)
# pygame seems to be faster than pyglet
# pyglet is the default library
windowed = True
if windowed:
mywin = visual.Window([800,600], monitor="testMonitor", units="deg")
else:
mywin = visual.Window(size=(1920, 1080), fullscr=True, screen=0, allowGUI=False, allowStencil=False,
winType='pyglet', # 'pyglet', 'pygame'
monitor=u'testMonitor',
color=[0, 0, 0],
colorSpace='rgb',
blendMode='avg',
useFBO=False,
units="cm",
#waitBlanking=True
)
#create some stimuli
nmarkers = 4
fixation = visual.GratingStim(win=mywin, size=0.2, pos=[0,0], sf=0, rgb=-1)
circles = [visual.Circle(win=mywin,
radius=0.5,
edges=100,
lineColor=[1.0, 0.0, 0.0],
fillColor=[1.0, 0.0, 0.0],
autoDraw=False)
for i in range(nmarkers)]
for i in range(nmarkers):
circles[i].setPos([0.0, 0.0])
arm_endpoint = visual.Circle(win=mywin,
radius=0.5,
edges=100,
lineColor=[1.0, 0.0, 0.0],
fillColor=[1.0, 0.0, 0.0],
autoDraw=False)
if True:
recorder = dr.OptotrakDataRecorder(opt)
recorder.sleep_period = 0.001
recorder.init_optotrak(nummarkers=nmarkers, collecttime=10.0, datafps=120)
recorder.start_pulling_thread()
while recorder.realtimedatabuffer.is_empty():
time.sleep(0.1)
bias = | np.array([0.0, 0.0, -2500.0]) | numpy.array |
#!/usr/bin/env python
import sys
import numpy as np
import argparse
Res31 = {'ALA': 'A', 'CYS': 'C', 'ASP': 'D', 'GLU': 'E', 'PHE': 'F',
'GLY': 'G', 'HIS': 'H', 'ILE': 'I', 'LYS': 'K', 'LEU': 'L',
'MET': 'M', 'ASN': 'N', 'PRO': 'P', 'GLN': 'Q', 'ARG': 'R',
'SER': 'S', 'THR': 'T', 'VAL': 'V', 'TRP': 'W', 'TYR': 'Y',
'ASX': 'N', 'GLX': 'Q', 'UNK': 'X', 'INI': 'K', 'AAR': 'R',
'ACE': 'X', 'ACY': 'G', 'AEI': 'T', 'AGM': 'R', 'ASQ': 'D',
'AYA': 'A', 'BHD': 'D', 'CAS': 'C', 'CAY': 'C', 'CEA': 'C',
'CGU': 'E', 'CME': 'C', 'CMT': 'C', 'CSB': 'C', 'CSD': 'C',
'CSE': 'C', 'CSO': 'C', 'CSP': 'C', 'CSS': 'C', 'CSW': 'C',
'CSX': 'C', 'CXM': 'M', 'CYG': 'C', 'CYM': 'C', 'DOH': 'D',
'EHP': 'F', 'FME': 'M', 'FTR': 'W', 'GL3': 'G', 'H2P': 'H',
'HIC': 'H', 'HIP': 'H', 'HTR': 'W', 'HYP': 'P', 'KCX': 'K',
'LLP': 'K', 'LLY': 'K', 'LYZ': 'K', 'M3L': 'K', 'MEN': 'N',
'MGN': 'Q', 'MHO': 'M', 'MHS': 'H', 'MIS': 'S', 'MLY': 'K',
'MLZ': 'K', 'MSE': 'M', 'NEP': 'H', 'NPH': 'C', 'OCS': 'C',
'OCY': 'C', 'OMT': 'M', 'OPR': 'R', 'PAQ': 'Y', 'PCA': 'Q',
'PHD': 'D', 'PRS': 'P', 'PTH': 'Y', 'PYX': 'C', 'SEP': 'S',
'SMC': 'C', 'SME': 'M', 'SNC': 'C', 'SNN': 'D', 'SVA': 'S',
'TPO': 'T', 'TPQ': 'Y', 'TRF': 'W', 'TRN': 'W', 'TRO': 'W',
'TYI': 'Y', 'TYN': 'Y', 'TYQ': 'Y', 'TYS': 'Y', 'TYY': 'Y',
'YOF': 'Y', 'FOR': 'X', '---': '-', 'PTR': 'Y', 'LCX': 'K',
'SEC': 'D', 'MCL': 'K', 'LDH': 'K'}
typecode = "ARNDCQEGHILKMFPSTWYVX"
Ntype = 21
code_dict = dict()
def read_pdb(pdb_file, hydrogen=False):
main_chain_atoms = ['N', 'CA', 'C', 'O']
fp_file = open(pdb_file)
lines = fp_file.readlines()
fp_file.close()
chain_dict = dict()
for line in lines:
if line[:6] == 'ATOM ':
chain_id = line[21]
res_idx = line[22:27] # not integer
atom_idx = int(line[6:11].strip())
atomname = line[12:16]
atomname2 = atomname.strip()
res_name = line[17:20]
altloc = line[16]
coor = np.array([float(line[30:38]), float(line[38:46]),
float(line[46:54])], dtype=np.float32)
if altloc != ' ' and altloc != 'A':
continue
if atomname2[0] == 'H' and not hydrogen:
continue
if chain_id not in chain_dict:
chain_dict[chain_id] = dict()
if res_idx not in chain_dict[chain_id]:
chain_dict[chain_id][res_idx] = {
'res_name': res_name, 'atom': dict()}
chain_dict[chain_id][res_idx]['atom'][atom_idx] = {
'atomname': atomname, 'coor': coor,
'line': line}
chain_keys = sorted(chain_dict.keys())
for chain_id in chain_keys:
chain = chain_dict[chain_id]
res_idx_keys = sorted(chain.keys())
for res_idx in res_idx_keys:
residue = chain[res_idx]
res_name = residue['res_name']
atom_dict = residue['atom']
atom_keys = sorted(atom_dict.keys())
sg_coor = []
for atom_idx in atom_keys:
atom = atom_dict[atom_idx]
atomname = atom['atomname']
coor = atom['coor']
if atomname == ' CA ':
chain_dict[chain_id][res_idx]['CA'] = coor
if res_name != 'GLY':
chain_dict[chain_id][res_idx]['CB'] = coor
if atomname == ' CB ':
chain_dict[chain_id][res_idx]['CB'] = coor
atomname2 = atomname.strip()
if res_name != 'GLY':
if (atomname2 not in main_chain_atoms) and (atomname2[0] != 'H'):
sg_coor += [coor]
if res_name != 'GLY':
sg_coor = | np.concatenate([sg_coor]) | numpy.concatenate |
import sys, numpy, scipy.ndimage
from collections import defaultdict, deque
import itertools
import copy
import re
file_name = "Input.txt"
size = 12 # 3 or 12 sqrt num_tiles
tile_size = 10
'''
with open(file_name, "r") as fp:
lines = fp.readlines()
lines = [x.strip() for x in lines]
line = lines[0].strip()
toks = line.split(",")
nums = [int(n) for n in toks]
'''
'''
with open(file_name, "r") as fp:
lines = fp.readlines()
lines = [x.strip() for x in lines]
'''
with open(file_name, "r") as fp:
lines = fp.readlines()
lines = [x.strip() for x in lines]
tiles = {}
tiles_data = {}
index = 0
# hash of sides and reversed sides: ids
tile_sides = defaultdict(list)
def R(s):
return ''.join(reversed(s))
while index < len(lines):
tile_no = int(lines[index][5:9])
index += 1
tile_data = []
for _ in range(10):
data = ['0' if x == '.' else '1' for x in lines[index]]
tile_data.append(data)
index += 1
index += 1
tiles_data[tile_no] = tile_data
# Initially left to right, top to bottom
l = ''
for i in range(tile_size):
l += tile_data[i][0]
r = ''
for i in range(tile_size):
r += tile_data[i][-1]
t = ''
for i in range(tile_size):
t += tile_data[0][i]
b = ''
for i in range(tile_size):
b += tile_data[-1][i]
tiles[tile_no] = (l, t, r, b)
tile_sides[l].append((tile_no, (l, t, r, b), '0'))
tile_sides[R(l)].append((tile_no, (R(l), b, R(r), t), 'h'))
tile_sides[r].append((tile_no, (r, R(t), l, R(b)), 'v'))
tile_sides[R(r)].append((tile_no, (R(r), R(b), R(l), R(t)), '180'))
tile_sides[t].append((tile_no, (t, l, b, r), '90h'))
tile_sides[R(t)].append((tile_no, (R(t), r, R(b), l), '90'))
tile_sides[b].append((tile_no, (b, R(l), t, R(r)), '270'))
tile_sides[R(b)].append((tile_no, (R(b), R(r), R(t), R(l)), '270h'))
print(tiles)
print(tiles_data)
print(len(tiles))
print(len(tile_sides))
def find_runs(tile_data, tile_key, tile_dict, run_size, so_far, results):
# print(f"Level {run_size} {tile_data} {so_far}")
if run_size == 1:
results.append(so_far)
# print(f"RETURN {so_far}")
return
# print(f"find_runs {tile_data} rs = {run_size}")
l1,t1,r1,b1 = tile_data
# for k,t in tile_dict.items()
# l2,t2,r2,b2 = t
for opts in tile_sides[r1]:
key2 = opts[0]
if key2 == tile_key:
continue
if key2 not in tile_dict:
continue
l2,t2,r2,b2 = opts[1]
if r1 == l2: # r == l
# r -> l
op = opts[2]
run = (l2, t2, r2, b2) #
tile_copy = dict(tile_dict)
del tile_copy[key2]
find_runs(run, key2, tile_copy, run_size-1, so_far + [(key2, run, op)], results)
'''
if r1 == l2: # r == l
# r -> l
op = '0'
run = (l2, t2, r2, b2) #
find_runs(run, key2, tile_copy, run_size-1, so_far + [(key2, run, op)], results)
if r1 == R(l2): # flip horiz around x axis
op = 'h'
run = (R(l2), b2, R(r2), t2) #
find_runs(run, key2, tile_copy, run_size-1, so_far + [(key2, run, op)], results)
if r1 == r2: # flipv around y axis
op = 'v'
run = (r2, R(t2), l2, R(b2)) #
find_runs(run, key2, tile_copy, run_size-1, so_far + [(key2, run, op)], results)
if r1 == R(r2): # rot 180
op = '180'
run = (R(r2), R(b2), R(l2), R(t2)) #
find_runs(run, key2, tile_copy, run_size-1, so_far + [(key2, run, op)], results)
if r1 == t2: # rot90+fliph
op = '90h'
run = (t2, l2, b2, r2)
find_runs(run, key2, tile_copy, run_size-1, so_far + [(key2, run, op)], results)
if r1 == R(t2): # rot90
op = '90'
run = (R(t2), r2, R(b2), l2)
find_runs(run, key2, tile_copy, run_size-1, so_far + [(key2, run, op)], results)
if r1 == b2: # rot 270
op = '270'
run = (b2, R(l2), t2, R(r2)) #
find_runs(run, key2, tile_copy, run_size-1, so_far + [(key2, run, op)], results)
if r1 == R(b2): # rot 270 + fliph
op = '270h'
run = (R(b2), R(l2), R(t2), R(r2))
find_runs(run, key2, tile_copy, run_size-1, so_far + [(key2, run, op)], results)
'''
return
def find_vertical(run, run_size, all_results, results):
if len(run) == run_size:
results.append(run)
return
if run == [3]:
print("3")
pass
for next_row_index in range(len(all_results)):
if run == [3] and next_row_index == 19:
print("19")
if next_row_index in run:
# Already in current run_size
continue
used_keys = set()
for run_index in run:
for res in all_results[run_index]:
used_keys.add(res[0])
next_row_keys = set()
for res in all_results[next_row_index]:
next_row_keys.add(res[0])
if not used_keys.isdisjoint(next_row_keys):
# common keys
continue
# check if stitched
ok = True
for col in range(size):
if all_results[run[-1]][col][1][3] != all_results[next_row_index][col][1][1]:
ok = False
break
if not ok:
continue
# Recurse
find_vertical(run + [next_row_index], run_size, all_results, results)
return
all_results = []
for k,v in tiles.items():
l,t,r,b = v
tiles_copy = copy.deepcopy(tiles)
del tiles_copy[k]
results = []
# print(f"k= '{k}'")
run = (l, t, r, b)
op = '0'
find_runs(run, k, tiles_copy, size, [(k, run, op)], results) #
for res in results:
all_results.append(res)
results = []
run = (R(l), b, R(r), t) #
op = 'h'
find_runs(run, k, tiles_copy, size, [(k, run, op)], results) #
for res in results:
all_results.append(res)
results = []
run = (r, R(t), l, R(b)) #
op = 'v'
find_runs(run, k, tiles_copy, size, [(k, run, op)], results) #
for res in results:
all_results.append(res)
results = []
run = (R(r), R(b), R(l), R(t)) #
op = '180'
find_runs(run, k, tiles_copy, size, [(k, run, op)], results) #
for res in results:
all_results.append(res)
results = []
run = (t, l, b, r)
op = '90h'
find_runs(run, k, tiles_copy, size, [(k, run, op)], results) #
for res in results:
all_results.append(res)
results = []
run = (R(t), r, R(b), l)
op = '90'
find_runs(run, k, tiles_copy, size, [(k, run, op)], results) ##
for res in results:
all_results.append(res)
results = []
run = (b, R(l), t, R(r)) #
op = '270'
find_runs(run, k, tiles_copy, size, [(k, run, op)], results) ##
for res in results:
all_results.append(res)
results = []
run = (R(b), R(r), R(t), R(l))
op = '270h'
find_runs(run, k, tiles_copy, size, [(k, run, op)], results) #
for res in results:
all_results.append(res)
#print(f"ALL ({len(all_results)}) = ")
#for i in range(len(all_results)):
# print(f" {i}: {all_results[i]}")
#print(f"len = {len(all_results)}")
print(f"ALL_RESULTS=")
for _index,ar in enumerate(all_results):
for _ar in ar:
print(f" {_index}: {_ar[0]} {_ar[2]} ", end='')
print('')
print('')
print(f"Got {len(all_results)} all_results")
tile_keys = set([k for k,_ in tiles_data.items()])
print(f"tile_keys = {tile_keys}")
results = []
for i in range(len(all_results)):
find_vertical([i], size, all_results, results)
if results:
r0 = results[0]
good_squares = [all_results[x] for x in r0]
print(f"FOUND {len(results)} RESULTS = {results}")
# print(f"good_squares {good_squares}")
print("Solutions:")
for res in results:
for index in res:
for r in all_results[index]:
print(f"{r[0]} ", end='')
print('')
print('')
for i in range(size):
print(f"{i}: {good_squares[i]}")
solution = good_squares[0][0][0] * good_squares[0][-1][0] * good_squares[-1][0][0] * good_squares[-1][-1][0]
print(f"Solution1 = {solution}")
def apply_op(data, op):
data = numpy.array(data)
if op == '0':
pass
elif op == 'h':
data = numpy.flip(data, 0)
elif op == 'v':
data = numpy.flip(data, 1)
elif op == '180':
data = numpy.rot90(data, 2)
elif op == '90h':
data = numpy.rot90(data, 1)
data = numpy.flip(data, 0)
elif op == '90':
data = numpy.rot90(data, 1)
elif op == '270':
data = numpy.rot90(data, 3)
elif op == '270h':
data = numpy.rot90(data, 3)
data = numpy.flip(data, 0)
else:
0/0
return data
# #####
'''
big_image = numpy.zeros((size*tile_size, size*tile_size), int)
for row in range(size):
for y in range(0, tile_size):
for col in range(size):
tile_data = tiles_data[good_squares[row][col][0]]
tdata = []
for _r in tile_data:
tdata.append([0 if _x == '0' else 1 for _x in _r])
op = good_squares[row][col][2]
if good_squares[row][col][0] == 1951:
print(f"key = 1951 tdata=\n{tdata} col={col} y={y}")
tdata = apply_op(tdata, op)
if good_squares[row][col][0] == 1951:
print(f"op = {op} tdata=\n{tdata}")
for x in range(0, tile_size):
big_image[row*tile_size + y][col*(tile_size) + (x)] = tdata[y][x]
print(f"BIG_IMAGE = \n{big_image}")
'''
# #####
# Create stiched image
image = numpy.zeros((size*(tile_size-2), size*(tile_size-2)), int)
for row in range(size):
for y in range(1, tile_size-1):
for col in range(size):
tile_data = tiles_data[good_squares[row][col][0]]
tdata = []
for _r in tile_data:
tdata.append([0 if _x == '0' else 1 for _x in _r])
op = good_squares[row][col][2]
tdata = apply_op(tdata, op)
for x in range(1, tile_size-1):
image[row*(tile_size-2) + y-1][col*(tile_size-2) + (x-1)] = tdata[y][x]
print(f"IMAGE = \n{image}")
'''
image = []
for row in range(size):
for y in range(1, tile_size-1):
row_data = [0 for _ in range(size * (tile_size-2))]
for col in range(size):
tile_data = tiles_data[good_squares[row][col][0]]
tdata = []
for _r in tile_data:
tdata.append([0 if _x == '0' else 1 for _x in _r])
op = good_squares[row][col][2]
tdata = apply_op(tdata, op)
for x in range(1, tile_size-1):
row_data[col*(tile_size-2) + (x-1)] = tdata[y][x]
image.append(row_data)
'''
'''
print("Image=")
for row in image:
for c in row:
# print('.' if c == 0 else '#', end='')
print(c, end='')
print('')
print('')
'''
array = image
print(f"ARRAY = \n{array}")
kernel = [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1],
[0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0]]
kernel = numpy.array(kernel)
kernel_count = 15
print(f"Kernel = \n{kernel}")
'''
#
# ## ## ###
# # # # # #
'''
def find_max_matches(convolve, kernel_count):
max_vals = 0
max = 0
for row in convolve:
for col in row:
if col == kernel_count:
max_vals += 1
if col > max:
max = col
print(f"max_vals={max_vals} max = {max}")
return max_vals
answer = 0
while True:
arr = array # 0
convolve = scipy.ndimage.convolve(arr, kernel, None, 'constant', 0)
answer = find_max_matches(convolve, kernel_count)
if answer:
break
arr = numpy.flip(array, 0) # h
convolve = scipy.ndimage.convolve(arr, kernel, None, 'constant', 0)
answer = find_max_matches(convolve, kernel_count)
if answer:
break
arr = numpy.flip(array, 1) # v
convolve = scipy.ndimage.convolve(arr, kernel, None, 'constant', 0)
answer = find_max_matches(convolve, kernel_count)
if answer:
break
arr = numpy.rot90(array, 2) # 180
convolve = scipy.ndimage.convolve(arr, kernel, None, 'constant', 0)
answer = find_max_matches(convolve, kernel_count)
if answer:
break
arr = numpy.rot90(array) # 90h
arr = numpy.flip(arr, 0)
convolve = scipy.ndimage.convolve(arr, kernel, None, 'constant', 0)
answer = find_max_matches(convolve, kernel_count)
if answer:
break
arr = numpy.rot90(array, 1) # 90
convolve = scipy.ndimage.convolve(arr, kernel, None, 'constant', 0)
answer = find_max_matches(convolve, kernel_count)
if answer:
break
arr = numpy.rot90(array, 3) # 270
convolve = scipy.ndimage.convolve(arr, kernel, None, 'constant', 0)
answer = find_max_matches(convolve, kernel_count)
if answer:
break
arr = numpy.rot90(array, 3) # 270h
arr = | numpy.flip(arr, 0) | numpy.flip |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test cases for the bfloat16 Python type."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
# pylint: disable=unused-import,g-bad-import-order
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.framework import dtypes
from tensorflow.python.platform import test
bfloat16 = pywrap_tensorflow.TF_bfloat16_type()
class Bfloat16Test(test.TestCase):
def float_values(self):
"""Returns values that should round trip exactly to float and back."""
epsilon = float.fromhex("1.0p-7")
return [
0.0, 1.0, -1, 0.5, -0.5, epsilon, 1.0 + epsilon, 1.0 - epsilon,
-1.0 - epsilon, -1.0 + epsilon, 3.5, 42.0, 255.0, 256.0,
float("inf"), float("-inf"), float("nan")]
def _assertFloatIdentical(self, v, w):
if math.isnan(v):
self.assertTrue(math.isnan(w))
else:
self.assertEqual(v, w)
def testRoundTripToFloat(self):
for v in self.float_values():
self._assertFloatIdentical(v, float(bfloat16(v)))
def testRoundTripToInt(self):
for v in [-256, -255, -34, -2, -1, 0, 1, 2, 10, 47, 128, 255, 256, 512]:
self.assertEqual(v, int(bfloat16(v)))
def testStr(self):
self.assertEqual("0", str(bfloat16(0.0)))
self.assertEqual("1", str(bfloat16(1.0)))
self.assertEqual("-3.5", str(bfloat16(-3.5)))
self.assertEqual("0.0078125", str(bfloat16(float.fromhex("1.0p-7"))))
self.assertEqual("inf", str(bfloat16(float("inf"))))
self.assertEqual("-inf", str(bfloat16(float("-inf"))))
self.assertEqual("nan", str(bfloat16(float("nan"))))
def testRepr(self):
self.assertEqual("bfloat16(0)", repr(bfloat16(0)))
self.assertEqual("bfloat16(1)", repr(bfloat16(1)))
self.assertEqual("bfloat16(-3.5)", repr(bfloat16(-3.5)))
self.assertEqual("bfloat16(0.0078125)",
repr(bfloat16(float.fromhex("1.0p-7"))))
self.assertEqual("bfloat16(inf)", repr(bfloat16(float("inf"))))
self.assertEqual("bfloat16(-inf)", repr(bfloat16(float("-inf"))))
self.assertEqual("bfloat16(nan)", repr(bfloat16(float("nan"))))
def testHash(self):
self.assertEqual(0, hash(bfloat16(0.0)))
self.assertEqual(0x3f80, hash(bfloat16(1.0)))
self.assertEqual(0x7fc0, hash(bfloat16(float("nan"))))
# Tests for Python operations
def testNegate(self):
for v in self.float_values():
self._assertFloatIdentical(-v, float(-bfloat16(v)))
def testAdd(self):
self._assertFloatIdentical(0, float(bfloat16(0) + bfloat16(0)))
self._assertFloatIdentical(1, float(bfloat16(1) + bfloat16(0)))
self._assertFloatIdentical(0, float(bfloat16(1) + bfloat16(-1)))
self._assertFloatIdentical(5.5, float(bfloat16(2) + bfloat16(3.5)))
self._assertFloatIdentical(1.25, float(bfloat16(3.5) + bfloat16(-2.25)))
self._assertFloatIdentical(float("inf"),
float(bfloat16(float("inf")) + bfloat16(-2.25)))
self._assertFloatIdentical(float("-inf"),
float(bfloat16(float("-inf")) + bfloat16(-2.25)))
self.assertTrue(math.isnan(float(bfloat16(3.5) + bfloat16(float("nan")))))
def testSub(self):
self._assertFloatIdentical(0, float(bfloat16(0) - bfloat16(0)))
self._assertFloatIdentical(1, float(bfloat16(1) - bfloat16(0)))
self._assertFloatIdentical(2, float(bfloat16(1) - bfloat16(-1)))
self._assertFloatIdentical(-1.5, float(bfloat16(2) - bfloat16(3.5)))
self._assertFloatIdentical(5.75, float(bfloat16(3.5) - bfloat16(-2.25)))
self._assertFloatIdentical(float("-inf"),
float(bfloat16(-2.25) - bfloat16(float("inf"))))
self._assertFloatIdentical(float("inf"),
float(bfloat16(-2.25) - bfloat16(float("-inf"))))
self.assertTrue(math.isnan(float(bfloat16(3.5) - bfloat16(float("nan")))))
def testMul(self):
self._assertFloatIdentical(0, float(bfloat16(0) * bfloat16(0)))
self._assertFloatIdentical(0, float(bfloat16(1) * bfloat16(0)))
self._assertFloatIdentical(-1, float(bfloat16(1) * bfloat16(-1)))
self._assertFloatIdentical(-7.875, float(bfloat16(3.5) * bfloat16(-2.25)))
self._assertFloatIdentical(float("-inf"),
float(bfloat16(float("inf")) * bfloat16(-2.25)))
self._assertFloatIdentical(float("inf"),
float(bfloat16(float("-inf")) * bfloat16(-2.25)))
self.assertTrue(math.isnan(float(bfloat16(3.5) * bfloat16(float("nan")))))
def testDiv(self):
self.assertTrue(math.isnan(float(bfloat16(0) / bfloat16(0))))
self._assertFloatIdentical(float("inf"), float(bfloat16(1) / bfloat16(0)))
self._assertFloatIdentical(-1, float(bfloat16(1) / bfloat16(-1)))
self._assertFloatIdentical(-1.75, float(bfloat16(3.5) / bfloat16(-2)))
self._assertFloatIdentical(float("-inf"),
float(bfloat16(float("inf")) / bfloat16(-2.25)))
self._assertFloatIdentical(float("inf"),
float(bfloat16(float("-inf")) / bfloat16(-2.25)))
self.assertTrue(math.isnan(float(bfloat16(3.5) / bfloat16(float("nan")))))
def testLess(self):
for v in self.float_values():
for w in self.float_values():
self.assertEqual(v < w, bfloat16(v) < bfloat16(w))
def testLessEqual(self):
for v in self.float_values():
for w in self.float_values():
self.assertEqual(v <= w, bfloat16(v) <= bfloat16(w))
def testGreater(self):
for v in self.float_values():
for w in self.float_values():
self.assertEqual(v > w, bfloat16(v) > bfloat16(w))
def testGreaterEqual(self):
for v in self.float_values():
for w in self.float_values():
self.assertEqual(v >= w, bfloat16(v) >= bfloat16(w))
def testEqual(self):
for v in self.float_values():
for w in self.float_values():
self.assertEqual(v == w, bfloat16(v) == bfloat16(w))
def testNotEqual(self):
for v in self.float_values():
for w in self.float_values():
self.assertEqual(v != w, bfloat16(v) != bfloat16(w))
def testNan(self):
a = np.isnan(bfloat16(float("nan")))
self.assertTrue(a)
np.testing.assert_allclose(np.array([1.0, a]), | np.array([1.0, a]) | numpy.array |
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 5 12:34:57 2019
@author: yijin
"""
import numpy as np
# machine limits epsilon
eps = np.finfo(float).eps
def gradient_descent(w, x, y):
"Logistic regression: e(w) = ln(1+e**(-y*wT*x)"
err0 = -y*x[0]/(1 + np.exp(y*np.dot(w, x)))
err1 = -y*x[1]/(1 + np.exp(y*np.dot(w, x)))
err2 = -y*x[2]/(1 + np.exp(y*np.dot(w, x)))
return np.array([err0, err1, err2])
sum_out = 0
sum_iter = 0
for i in range(100):
data_set = np.random.uniform(low=-1, high=1.0+eps, size=(2,2))
# y = m*x + c
A = np.column_stack((data_set[:,0], np.ones(2)))
y = data_set[:,1]
m, c = | np.linalg.lstsq(A, y, rcond=None) | numpy.linalg.lstsq |
import os
import re
import unittest
import numpy as np
import wfdb
class TestAnnotation(unittest.TestCase):
"""
Testing read and write of WFDB annotations, including Physionet
streaming.
Target files created using the original WFDB Software Package
version 10.5.24
"""
def test_1(self):
"""
Target file created with:
rdann -r sample-data/100 -a atr > ann-1
"""
annotation = wfdb.rdann("sample-data/100", "atr")
# This is not the fault of the script. The annotation file specifies a
# length 3
annotation.aux_note[0] = "(N"
# aux_note field with a null written after '(N' which the script correctly picks up. I am just
# getting rid of the null in this unit test to compare with the regexp output below which has
# no null to detect in the output text file of rdann.
# Target data from WFDB software package
lines = tuple(open("tests/target-output/ann-1", "r"))
nannot = len(lines)
target_time = [None] * nannot
target_sample = np.empty(nannot, dtype="object")
target_symbol = [None] * nannot
target_subtype = np.empty(nannot, dtype="object")
target_chan = np.empty(nannot, dtype="object")
target_num = np.empty(nannot, dtype="object")
target_aux_note = [None] * nannot
RXannot = re.compile(
"[ \t]*(?P<time>[\[\]\w\.:]+) +(?P<sample>\d+) +(?P<symbol>.) +(?P<subtype>\d+) +(?P<chan>\d+) +(?P<num>\d+)\t?(?P<aux_note>.*)"
)
for i in range(0, nannot):
(
target_time[i],
target_sample[i],
target_symbol[i],
target_subtype[i],
target_chan[i],
target_num[i],
target_aux_note[i],
) = RXannot.findall(lines[i])[0]
# Convert objects into integers
target_sample = target_sample.astype("int")
target_num = target_num.astype("int")
target_subtype = target_subtype.astype("int")
target_chan = target_chan.astype("int")
# Compare
comp = [
np.array_equal(annotation.sample, target_sample),
np.array_equal(annotation.symbol, target_symbol),
np.array_equal(annotation.subtype, target_subtype),
np.array_equal(annotation.chan, target_chan),
np.array_equal(annotation.num, target_num),
annotation.aux_note == target_aux_note,
]
# Test file streaming
pn_annotation = wfdb.rdann(
"100",
"atr",
pn_dir="mitdb",
return_label_elements=["label_store", "symbol"],
)
pn_annotation.aux_note[0] = "(N"
pn_annotation.create_label_map()
# Test file writing
annotation.wrann(write_fs=True)
write_annotation = wfdb.rdann(
"100", "atr", return_label_elements=["label_store", "symbol"]
)
write_annotation.create_label_map()
assert comp == [True] * 6
assert annotation.__eq__(pn_annotation)
assert annotation.__eq__(write_annotation)
def test_2(self):
"""
Annotation file with many aux_note strings.
Target file created with:
rdann -r sample-data/100 -a atr > ann-2
"""
annotation = wfdb.rdann("sample-data/12726", "anI")
# Target data from WFDB software package
lines = tuple(open("tests/target-output/ann-2", "r"))
nannot = len(lines)
target_time = [None] * nannot
target_sample = np.empty(nannot, dtype="object")
target_symbol = [None] * nannot
target_subtype = np.empty(nannot, dtype="object")
target_chan = np.empty(nannot, dtype="object")
target_num = np.empty(nannot, dtype="object")
target_aux_note = [None] * nannot
RXannot = re.compile(
"[ \t]*(?P<time>[\[\]\w\.:]+) +(?P<sample>\d+) +(?P<symbol>.) +(?P<subtype>\d+) +(?P<chan>\d+) +(?P<num>\d+)\t?(?P<aux_note>.*)"
)
for i in range(0, nannot):
(
target_time[i],
target_sample[i],
target_symbol[i],
target_subtype[i],
target_chan[i],
target_num[i],
target_aux_note[i],
) = RXannot.findall(lines[i])[0]
# Convert objects into integers
target_sample = target_sample.astype("int")
target_num = target_num.astype("int")
target_subtype = target_subtype.astype("int")
target_chan = target_chan.astype("int")
# Compare
comp = [
np.array_equal(annotation.sample, target_sample),
np.array_equal(annotation.symbol, target_symbol),
np.array_equal(annotation.subtype, target_subtype),
np.array_equal(annotation.chan, target_chan),
np.array_equal(annotation.num, target_num),
annotation.aux_note == target_aux_note,
]
# Test file streaming
pn_annotation = wfdb.rdann(
"12726",
"anI",
pn_dir="prcp",
return_label_elements=["label_store", "symbol"],
)
pn_annotation.create_label_map()
# Test file writing
annotation.wrann(write_fs=True)
write_annotation = wfdb.rdann(
"12726", "anI", return_label_elements=["label_store", "symbol"]
)
write_annotation.create_label_map()
assert comp == [True] * 6
assert annotation.__eq__(pn_annotation)
assert annotation.__eq__(write_annotation)
def test_3(self):
"""
Annotation file with custom annotation types
Target file created with:
rdann -r sample-data/1003 -a atr > ann-3
"""
annotation = wfdb.rdann("sample-data/1003", "atr")
# Target data from WFDB software package
lines = tuple(open("tests/target-output/ann-3", "r"))
nannot = len(lines)
target_time = [None] * nannot
target_sample = np.empty(nannot, dtype="object")
target_symbol = [None] * nannot
target_subtype = np.empty(nannot, dtype="object")
target_chan = np.empty(nannot, dtype="object")
target_num = | np.empty(nannot, dtype="object") | numpy.empty |
import numpy as np
import scipy.io as sio
import torch.utils.data
from torch.utils.data import DataLoader
import pdb
class NeuralData(torch.utils.data.Dataset):
def __init__(self, data, data2, num_trials_per_class=91):
self.data = data
self.data2 = data2
self.num_trials_per_class = num_trials_per_class
self.size = data.shape[0]
def __getitem__(self, index):
input1_data = self.data[index]
input2_data = self.data2[index]
target = index // self.num_trials_per_class
return input1_data, input2_data, target
def __len__(self):
return self.size
def break_correlations(data):
# data is a TxN matrix, representing trials by neurons (and I want to permute the neurons across trials differently to break single trial correlations)
permuted_data = np.zeros_like(data)
for i in range(data.shape[1]):
permuted_data[:, i] = np.random.permutation(data[:, i])
return permuted_data
def get_neural_nocorr_loader(workers=0, batch_size=10, time1=None, time2=None, deltat=None):
data = sio.loadmat('data/ps4_realdata.mat') # load the .mat file.
NumTrainData = data['train_trial'].shape[0]
NumClass = data['train_trial'].shape[1]
NumTestData = data['test_trial'].shape[0]
trainDataArr = np.zeros((NumClass, NumTrainData, 97)) # contains the firing rates for all neurons on all 8 x 91 trials in the training set
testDataArr = np.zeros((NumClass, NumTestData, 97)) # for the testing set.
for classIX in range(NumClass):
for trainDataIX in range(NumTrainData):
trainDataArr[classIX, trainDataIX, :] = np.sum(data['train_trial'][trainDataIX, classIX][1][:, 350:550], 1)
for testDataIX in range(NumTestData):
testDataArr[classIX, testDataIX, :] = np.sum(data['test_trial'][testDataIX, classIX][1][:, 350:550], 1)
# permute the data to break the single trial correlations
trainDataArrNoCorr = np.zeros((NumClass, NumTrainData, 97))
for classIX in range(NumClass):
trainDataArrNoCorr[classIX, :, :] = break_correlations(trainDataArr[classIX, :, :])
trainData = trainDataArr.reshape(-1, 97)
trainDataNoCorr = trainDataArrNoCorr.reshape(-1, 97)
testData = testDataArr.reshape(-1, 97)
trainset = NeuralData(data=trainData, data2=trainDataNoCorr)
testset = NeuralData(data=testData, data2=testData)
trainloader = DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=workers)
testloader = DataLoader(testset, batch_size=100, shuffle=False, num_workers=workers)
return trainloader, testloader
# get different time windows
def get_neural_time_loader(workers=0, batch_size=10, time1=150, time2=350, deltat=100):
data = sio.loadmat('data/ps4_realdata.mat') # load the .mat file.
NumTrainData = data['train_trial'].shape[0]
NumClass = data['train_trial'].shape[1]
NumTestData = data['test_trial'].shape[0]
trainDataArr = np.zeros((NumClass, NumTrainData, 97)) # contains the firing rates for all neurons on all 8 x 91 trials in the training set
trainDataArr2 = np.zeros((NumClass, NumTrainData, 97))
testDataArr = np.zeros((NumClass, NumTestData, 97)) # for the testing set.
testDataArr2 = np.zeros((NumClass, NumTestData, 97)) # for the testing set.
for classIX in range(NumClass):
for trainDataIX in range(NumTrainData):
trainDataArr[classIX, trainDataIX, :] = np.sum(data['train_trial'][trainDataIX, classIX][1][:, time1:time1 + deltat], 1)
trainDataArr2[classIX, trainDataIX, :] = np.sum(data['train_trial'][trainDataIX, classIX][1][:, time2:time2 + deltat], 1)
for testDataIX in range(NumTestData):
testDataArr[classIX, testDataIX, :] = np.sum(data['test_trial'][testDataIX, classIX][1][:, time1:time1 + deltat], 1)
testDataArr2[classIX, testDataIX, :] = | np.sum(data['test_trial'][testDataIX, classIX][1][:, time2:time2 + deltat], 1) | numpy.sum |
'''
A simple Bax-Sneppen 2D model with basic functions to populate and advance the model.
This code was built by <NAME>, <NAME>, <NAME> and <NAME>,
all master students Computational Science at the University of Amsterdam.
'''
import itertools
from copy import deepcopy
from scipy.stats import multivariate_normal
import numpy as np
class BaxSneppen2D(object):
'''
A simple Bax-Sneppen 2D model with basic functions to populate and advance the model.
'''
def __init__(self, slum_size=(15, 15), empty_percent=0.3, cell_decrease_factor=0.8):
# Set the cell decrease factor parameter.
self.cell_decrease_factor = cell_decrease_factor
# Set some variables to keep track of the slum.
self.state = np.ones(slum_size) * 2
self.ages = np.ones(slum_size) * -1
self.neighbour_counts = {}
# Populate the grid.
self.populate(empty_percent, slum_size)
# Get the neighbour counts of the empty cells
self.init_neighbour_counts()
# Normal distribution used to add values to the grid.
x_mean = slum_size[0]*0.5
y_mean = slum_size[1]*0.5
cov = np.array([[x_mean*0.8, 0], [0, y_mean*0.8]])
self.mvn = multivariate_normal([x_mean, y_mean], cov)
self.slum_size = slum_size
def populate(self, empty_percent, slum_size):
'''
Determines the next slum a cell wants to go to,
with a preference to cells with cells that are more
satisfied.
PARAMETERS
===================================================
empty_percent: float
The percent of cells which have to become empty.
Range between 0 and 1.
slum_size: (int, int)
The size of the slum which has to be populated.
'''
if empty_percent == 1:
return
for x_cell in range(slum_size[0]):
for y_cell in range(slum_size[1]):
self.state[x_cell][y_cell] = np.random.uniform(0, 1, 1)
self.ages[x_cell][y_cell] = 0
empty = np.random.choice(range(slum_size[0] * slum_size[1]),
empty_percent * slum_size[0] * slum_size[1], replace=False)
for i in empty:
self.state[i % slum_size[0]][i // slum_size[0]] = 2
self.ages[i % slum_size[0]][i // slum_size[0]] = -1
def get_min_val(self):
'''
Returns the minimum cell value in the state.
PARAMETERS
===================================================
None
RETURNS
===================================================
float
The minimum cell value in the state. Range between
0 and 1.
'''
return np.min(self.state)
# This is a logical function to have within the class.
# pylint: disable=no-self-use
def count_neighbours(self, state, x_cell, y_cell):
'''
Returns the index of the cell with the minimum cell
value in the state.
PARAMETERS
===================================================
state: numpy.ndarray
The state in which the cell resides.
x_cell: integer
The x-coordinate of the cell.
y_cell: integer
The y-coordinate of the cell
RETURNS
===================================================
integer
The number of neighbours of the cell at
(x_cell, y_cell).
'''
neighbours = 0
combinations = [[-1, 0], [1, 0], [0, -1], [0, 1]]
for x_dif, y_dif in combinations:
x_coor = (x_cell + x_dif) % len(state)
y_coor = (y_cell + y_dif) % len(state[0])
if state[x_coor][y_coor] != 2:
neighbours += 1
return neighbours
def init_neighbour_counts(self):
'''
Initialises the neighbour counts of all cells.
PARAMETERS
===================================================
None
'''
for x_cell in range(len(self.state)):
for y_cell in range(len(self.state[0])):
if self.state[x_cell][y_cell] == 2:
count = self.count_neighbours(self.state, x_cell, y_cell)
self.neighbour_counts[(x_cell, y_cell)] = count
def update_neighbour_counts(self, state, x_cell, y_cell, update_value):
'''
Returns the index of the cell with the minimum cell
value in the state.
PARAMETERS
===================================================
state: numpy.ndarray
The state in which the cell resides.
x_cell: integer
The x-coordinate of the cell.
y_cell: integer
The y-coordinate of the cell
update_value: integer
The change in the neighbour count.
'''
combinations = [[-1, 0], [1, 0], [0, -1], [0, 1]]
if update_value < 0:
self.neighbour_counts[(x_cell, y_cell)] = self.count_neighbours(state, x_cell, y_cell)
for x_dif, y_dif in combinations:
x_coor = (x_cell + x_dif) % len(state)
y_coor = (y_cell + y_dif) % len(state[0])
if state[x_coor][y_coor] == 2:
self.neighbour_counts[(x_coor, y_coor)] += update_value
def get_min_val_index(self):
'''
Returns the index of the cell with the minimum cell
value in the state.
PARAMETERS
===================================================
None
RETURNS
===================================================
float
The index of the cell with the minimum cell value
in the state.
'''
return np.argmin(self.state)
def get_avg_val(self):
'''
Returns the average cell value in the state.
PARAMETERS
===================================================
None
RETURNS
===================================================
float
The average cell value in the state. Range between
0 and 1.
'''
non_empty = self.state[self.state != 2]
if non_empty.any():
return np.mean(non_empty)
return 0
def has_empty(self):
'''
Returns if there is an empty cell in the state.
PARAMETERS
===================================================
None
RETURNS
===================================================
boolean
Whether the state has an empty cell.
'''
empty = np.where(self.state == 2)
if not empty[0].any():
return False
return True
def get_density(self):
'''
Returns the density of the current state.
PARAMETERS
===================================================
None
RETURNS
===================================================
float
The density of the current state. If the state is
empty, -1 is returned.
'''
non_empty_size = | np.sum(self.state != 2) | numpy.sum |
#!/usr/bin/env python
# coding: utf-8
"""
Tools for plotting data from Dimitris' global high resolution
model once read into xarray Dataset.
"""
import numpy as np
import xarray as xr
import gsw
import scipy as sp
from scipy import interpolate
def distance(lon, lat, p=np.array([0]), axis=-1):
"""
From gsw: Great-circle distance in m between lon, lat points.
Parameters
----------
lon, lat : array-like, 1-D or 2-D (shapes must match)
Longitude, latitude, in degrees.
p : array-like, scalar, 1-D or 2-D, optional, default is 0
Sea pressure (absolute pressure minus 10.1325 dbar), dbar
axis : int, -1, 0, 1, optional
The axis or dimension along which *lat and lon* vary.
This differs from most functions, for which axis is the
dimension along which p increases.
Returns
-------
distance : 1-D or 2-D array
distance in meters between adjacent points.
"""
earth_radius = 6371e3
if not lon.shape == lat.shape:
raise ValueError('lon, lat shapes must match; found %s, %s'
% (lon.shape, lat.shape))
if not (lon.ndim in (1, 2) and lon.shape[axis] > 1):
raise ValueError('lon, lat must be 1-D or 2-D with more than one point'
' along axis; found shape %s and axis %s'
% (lon.shape, axis))
if lon.ndim == 1:
one_d = True
lon = lon[np.newaxis, :]
lat = lat[np.newaxis, :]
axis = -1
else:
one_d = False
one_d = one_d and p.ndim == 1
if axis == 0:
indm = (slice(0, -1), slice(None))
indp = (slice(1, None), slice(None))
else:
indm = (slice(None), slice(0, -1))
indp = (slice(None), slice(1, None))
if np.all(p == 0):
z = 0
else:
lon, lat, p = np.broadcast_arrays(lon, lat, p)
p_mid = 0.5 * (p[indm] + p[indp])
lat_mid = 0.5 * (lat[indm] + lat[indp])
z = z_from_p(p_mid, lat_mid)
lon = np.radians(lon)
lat = np.radians(lat)
dlon = np.diff(lon, axis=axis)
dlat = np.diff(lat, axis=axis)
a = ((np.sin(dlat / 2)) ** 2 + np.cos(lat[indm]) *
np.cos(lat[indp]) * (np.sin(dlon / 2)) ** 2)
angles = 2 * np.arctan2(np.sqrt(a), np.sqrt(1 - a))
distance = (earth_radius + z) * angles
if one_d:
distance = distance[0]
return distance
def model_bathy_section(lon,lat,d,res=1,ext=0):
"""Extract Samoan Passage bathymetry along sections defined by lon/lat
coordinates.
Parameters
----------
lon : arraylike
Longitude position along section
lat : arraylike
Latitude position along section
d : Dataset
Model Dataset to access lon/lat/BottomDepth
res : float
Bathymetry resolution
ext : float
Extension on both sides in km. Set to 0 for no extension
Returns
-------
out : dict
Dictionary with output variables
"""
# Make sure lon and lat have the same dimensions
assert lon.shape==lat.shape, 'lat and lon must have the same size'
# Make sure lon and lat have at least 3 elements
assert len(lon)>1 and len(lat)>1, 'lon/lat must have at least 2 elements'
# Load bathymetry
# plon = b['lon']
plon = d.lon.values
# plat = b['lat']
plat = d.lat.values
# ptopo = -b['merged']
ptopo = d.BottomDepth.values
# 2D interpolation function used below
f = interpolate.f = interpolate.RectBivariateSpline(plat,plon,ptopo)
# calculate distance between original points
dist = np.cumsum(distance(lon, lat, np.array([0]))/1000)
# distance 0 as first element
dist = np.insert(dist,0,0)
# Extend lon and lat if ext>0
if ext:
'''
Linear fit to start and end points. Do two separate fits if more than
4 data points are give. Otherwise fit all points together.
Need to calculate distance first and then scale the lon/lat extension with distance.
'''
if len(lon)<5:
# only one fit for 4 or less data points
dlo = np.abs(lon[0]-lon[-1])
dla = np.abs(lat[0]-lat[-1])
dd = np.abs(dist[0]-dist[-1])
# fit either against lon or lat, depending on orientation of section
if dlo>dla:
bfit = np.polyfit(lon,lat,1)
# extension expressed in longitude (scale dist to lon)
lonext = 1.1*ext/dd*dlo
if lon[0]<lon[-1]:
elon = np.array([lon[0]-lonext,lon[-1]+lonext])
else:
elon = np.array([lon[0]+lonext,lon[-1]-lonext])
blat = np.polyval(bfit,elon)
nlon = np.hstack((elon[0],lon,elon[-1]))
nlat = np.hstack((blat[0],lat,blat[-1]))
else:
bfit = np.polyfit(lat,lon,1)
# extension expressed in latitude (scale dist to lat)
latext = 1.1*ext/dd*dla
if lat[0]<lat[-1]:
elat = np.array([lat[0]-lonext,lat[-1]+lonext])
else:
elat = np.array([lat[0]+lonext,lat[-1]-lonext])
blon = np.polyval(bfit,elat)
nlon = np.hstack((blon[0],lon,blon[-1]))
nlat = np.hstack((elat[0],lat,elat[-1]))
else:
# one fit on each side of the section as it may change direction
dlo1 = np.abs(lon[0]-lon[2])
dla1 = np.abs(lat[0]-lat[2])
dd1 = np.abs(dist[0]-dist[2])
dlo2 = np.abs(lon[-3]-lon[-1])
dla2 = np.abs(lat[-3]-lat[-1])
dd2 = np.abs(dist[-3]-dist[-1])
# deal with one side first
if dlo1>dla1:
bfit1 = np.polyfit(lon[0:3],lat[0:3],1)
lonext1 = 1.1*ext/dd1*dlo1
if lon[0]<lon[2]:
elon1 = np.array([lon[0]-lonext1,lon[0]])
else:
elon1 = np.array([lon[0]+lonext1,lon[0]])
elat1 = np.polyval(bfit1,elon1)
else:
bfit1 = np.polyfit(lat[0:3],lon[0:3],1)
latext1 = 1.1*ext/dd1*dla1
if lat[0]<lat[2]:
elat1 = np.array([lat[0]-latext1,lat[0]])
else:
elat1 = np.array([lat[0]+latext1,lat[0]])
elon1 = np.polyval(bfit1,elat1)
# now the other side
if dlo2>dla2:
bfit2 = np.polyfit(lon[-3:],lat[-3:],1)
lonext2 = 1.1*ext/dd2*dlo2
if lon[-3]<lon[-1]:
elon2 = np.array([lon[-1],lon[-1]+lonext2])
else:
elon2 = np.array([lon[-1],lon[-1]-lonext2])
elat2 = np.polyval(bfit2,elon2)
else:
bfit2 = np.polyfit(lat[-3:],lon[-3:],1)
latext2 = 1.1*ext/dd2*dla2
if lat[-3]<lat[-1]:
elat2 = np.array([lat[-1],lat[-1]+latext2])
else:
elat2 = np.array([lat[-1],lat[-1]-latext2])
elon2 = np.polyval(bfit2,elat2)
# combine everything
nlon = np.hstack((elon1[0],lon,elon2[1]))
nlat = np.hstack((elat1[0],lat,elat2[1]))
lon = nlon
lat = nlat
# Original points (but including extension if there are any)
olat = lat
olon = lon
# Interpolated points
ilat = []
ilon = []
# Output dict
out = {}
# calculate distance between points
dist2 = distance(lon,lat, | np.array([0]) | numpy.array |
import os
from PIL import Image
from lib.detect import detect
import numpy as np
from tqdm import tqdm
import cv2
from lib.detect import resize_img,load_pil,adjust_ratio,restore_polys,plot_boxes
import torch
import lanms
import shapely
from shapely.geometry import Polygon
from lib.swt import SWT
def get_negative_sample(score_map,negative_thresh=0.5):
'''Get the third negative samples
reduce FN
Input:
score map: <numpy.ndarray, (m,n)>
Output:
negative map: <numpy.ndarray, (m,n)>
'''
score_map = score_map[0, :, :]
negative_map = np.zeros(score_map.shape)
postive_score = (score_map>negative_thresh)*1.0
# 膨胀
kernel = np.ones((4, 4), np.uint8)
postive_score = cv2.dilate(postive_score, kernel, iterations=1)
xy_negative = np.argwhere(score_map*(1-postive_score))
xy_negative = xy_negative[np.argsort(xy_negative[:, 0])]
valid_score = score_map[xy_negative[:, 0], xy_negative[:, 1]] # 5 x n
xy_negative = xy_negative[np.argsort(valid_score)]
# negative_number = xy_negative.shape[0]
xy_negative = xy_negative[:int(xy_negative.shape[0] / 3), :].T #取前1/3的值作为负样本进行监督
negative_map[xy_negative[0], xy_negative[1]] = 1
return negative_map
def get_boxes(score, geo, score_thresh=0.9, nms_thresh=0.2):
'''get boxes from feature map
Input:
score : score map from model <numpy.ndarray, (1,row,col)>
geo : geo map from model <numpy.ndarray, (5,row,col)>
score_thresh: threshold to segment score map
nms_thresh : threshold in nms
Output:
boxes : final polys <numpy.ndarray, (n,9)>
'''
score = score[0,:,:]
xy_text = np.argwhere(score > score_thresh) # n x 2, format is [r, c]
if xy_text.size == 0:
return None,None
xy_text = xy_text[ | np.argsort(xy_text[:, 0]) | numpy.argsort |
from PIL import Image
import numpy as np
def get_size_and_grad(height_len, width_len):
correct = True
flag = 0
while correct:
if flag > 0:
print("размеру мозаики должен быть делителем для ширины и высоты изображения, "
"а градация серого должна быть меньше 128")
print('Введите размер мозаики и градацию серого через пробел: ')
flag += 1
array = input().split(' ')
array = [int(x) for x in array]
if height_len % array[0] == 0 and width_len % array[0] == 0 and array[1] < 128:
correct = False
print('Данные корректы, ищите результат в папке.')
return(array[0], array[1])
def search_grey(i, j, array, size, grad):
sum = int( | np.sum(array[i:i + size, j:j + size, 0]) | numpy.sum |
"""
Generate a bunch of trimesh objects, in meter radian
"""
import math
import numpy as np
import basis.trimesh.primitives as tp
import basis.trimesh as trm
import basis.robot_math as rm
import shapely.geometry as shpg
def gen_box(extent=np.array([1, 1, 1]), homomat=np.eye(4)):
"""
:param extent: x, y, z (origin is 0)
:param homomat: rotation and translation
:return: a Trimesh object (Primitive)
author: weiwei
date: 20191228osaka
"""
return tp.Box(box_extents=extent, box_transform=homomat)
def gen_stick(spos=np.array([0, 0, 0]), epos=np.array([0.1, 0, 0]), thickness=0.005, type="rect", sections=8):
"""
interface to genrectstick/genroundstick
:param spos: 1x3 nparray
:param epos: 1x3 nparray
:param thickness: 0.005 m by default
:param type: rect or round
:param sections: # of discretized sectors used to approximate a cylinder
:return:
author: weiwei
date: 20191228osaka
"""
if type == "rect":
return gen_rectstick(spos, epos, thickness, sections=sections)
if type == "round":
return gen_roundstick(spos, epos, thickness, count=[sections / 2.0, sections / 2.0])
def gen_rectstick(spos=np.array([0, 0, 0]), epos=np.array([0.1, 0, 0]), thickness=.005, sections=8):
"""
:param spos: 1x3 nparray
:param epos: 1x3 nparray
:param thickness: 0.005 m by default
:param sections: # of discretized sectors used to approximate a cylinder
:return: a Trimesh object (Primitive)
author: weiwei
date: 20191228osaka
"""
pos = spos
height = np.linalg.norm(epos - spos)
if np.allclose(height, 0):
rotmat = np.eye(3)
else:
rotmat = rm.rotmat_between_vectors(np.array([0, 0, 1]), epos - spos)
homomat = rm.homomat_from_posrot(pos, rotmat)
return tp.Cylinder(height=height, radius=thickness / 2.0, sections=sections, homomat=homomat)
def gen_roundstick(spos=np.array([0, 0, 0]), epos=np.array([0.1, 0, 0]), thickness=0.005, count=[8, 8]):
"""
:param spos:
:param epos:
:param thickness:
:return: a Trimesh object (Primitive)
author: weiwei
date: 20191228osaka
"""
pos = spos
height = np.linalg.norm(epos - spos)
if np.allclose(height, 0):
rotmat = np.eye(3)
else:
rotmat = rm.rotmat_between_vectors(np.array([0, 0, 1]), epos - spos)
homomat = rm.homomat_from_posrot(pos, rotmat)
return tp.Capsule(height=height, radius=thickness / 2.0, count=count, homomat=homomat)
def gen_dashstick(spos=np.array([0, 0, 0]), epos=np.array([0.1, 0, 0]), thickness=0.005, lsolid=None, lspace=None,
sections=8, sticktype="rect"):
"""
:param spos: 1x3 nparray
:param epos: 1x3 nparray
:param thickness: 0.005 m by default
:param lsolid: length of the solid section, 1*thickness if None
:param lspace: length of the empty section, 1.5*thickness if None
:return:
author: weiwei
date: 20191228osaka
"""
solidweight = 1.6
spaceweight = 1.07
if not lsolid:
lsolid = thickness * solidweight
if not lspace:
lspace = thickness * spaceweight
length, direction = rm.unit_vector(epos - spos, toggle_length=True)
nstick = math.floor(length / (lsolid + lspace))
vertices = np.empty((0, 3))
faces = np.empty((0, 3))
for i in range(0, nstick):
tmp_spos = spos + (lsolid * direction + lspace * direction) * i
tmp_stick = gen_stick(spos=tmp_spos,
epos=tmp_spos + lsolid * direction,
thickness=thickness,
type=sticktype,
sections=sections)
tmp_stick_faces = tmp_stick.faces + len(vertices)
vertices = np.vstack((vertices, tmp_stick.vertices))
faces = np.vstack((faces, tmp_stick_faces))
# wrap up the last segment
tmp_spos = spos + (lsolid * direction + lspace * direction) * nstick
tmp_epos = tmp_spos + lsolid * direction
final_length, _ = rm.unit_vector(tmp_epos - spos, toggle_length=True)
if final_length > length:
tmp_epos = epos
tmp_stick = gen_stick(spos=tmp_spos,
epos=tmp_epos,
thickness=thickness,
type=sticktype,
sections=sections)
tmp_stick_faces = tmp_stick.faces + len(vertices)
vertices = np.vstack((vertices, tmp_stick.vertices))
faces = np.vstack((faces, tmp_stick_faces))
return trm.Trimesh(vertices=vertices, faces=faces)
def gen_sphere(pos=np.array([0, 0, 0]), radius=0.02, subdivisions=2):
"""
:param pos: 1x3 nparray
:param radius: 0.02 m by default
:param subdivisions: levels of icosphere discretization
:return:
author: weiwei
date: 20191228osaka
"""
return tp.Sphere(sphere_radius=radius, sphere_center=pos, subdivisions=subdivisions)
def gen_ellipsoid(pos=np.array([0, 0, 0]), axmat=np.eye(3), subdivisions=5):
"""
:param pos:
:param axmat: 3x3 mat, each column is an axis of the ellipse
:param subdivisions: levels of icosphere discretization
:return:
author: weiwei
date: 20191228osaka
"""
homomat = rm.homomat_from_posrot(pos, axmat)
sphere = tp.Sphere(sphere_radius=1, sphere_center=pos, subdivisions=subdivisions)
vertices = rm.homomat_transform_points(homomat, sphere.vertices)
return trm.Trimesh(vertices=vertices, faces=sphere.faces)
def gen_dumbbell(spos=np.array([0, 0, 0]), epos=np.array([0.1, 0, 0]), thickness=0.005, sections=8, subdivisions=1):
"""
NOTE: return stick+spos_ball+epos_ball also work, but it is a bit slower
:param spos: 1x3 nparray
:param epos: 1x3 nparray
:param thickness: 0.005 m by default
:param sections:
:param subdivisions: levels of icosphere discretization
:return:
author: weiwei
date: 20191228osaka
"""
stick = gen_rectstick(spos=spos, epos=epos, thickness=thickness, sections=sections)
spos_ball = gen_sphere(pos=spos, radius=thickness, subdivisions=subdivisions)
epos_ball = gen_sphere(pos=epos, radius=thickness, subdivisions=subdivisions)
vertices = np.vstack((stick.vertices, spos_ball.vertices, epos_ball.vertices))
sposballfaces = spos_ball.faces + len(stick.vertices)
endballfaces = epos_ball.faces + len(spos_ball.vertices) + len(stick.vertices)
faces = np.vstack((stick.faces, sposballfaces, endballfaces))
return trm.Trimesh(vertices=vertices, faces=faces)
def gen_cone(spos= | np.array([0, 0, 0]) | numpy.array |
import numpy as np #for array processing
from PIL import Image #for Image Array conversion
Image.MAX_IMAGE_PIXELS = None #to remove limit on maximum number of pixels to be processed
# 3x3 edge detection
def edgeDetection():
kernel= | np.array([[1,0,-1],[2,0,-2],[1,0,-1]]) | numpy.array |
from torch import optim, nn
import coloredlogs
import logging
import os
import torch
import numpy as np
from datetime import datetime
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from transformers import AdamW, get_constant_schedule_with_warmup
import datasets.utils
import models.utils
from datasets.episode import EpisodeDataset
from models.base_models import BERTSequenceModel
from models.seq_meta import SeqMetaModel
logger = logging.getLogger('MAML Log')
coloredlogs.install(logger=logger, level='DEBUG', fmt='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
class MAML:
def __init__(self, config):
now = datetime.now()
date_time = now.strftime("%m-%d-%H-%M-%S")
self.tensorboard_writer = SummaryWriter(log_dir='runs/MAML-' + date_time)
self.base_path = config['base_path']
self.stamp = config['stamp']
self.updates = config['num_updates']
self.meta_epochs = config['num_meta_epochs']
self.early_stopping = config['early_stopping']
self.meta_lr = config.get('meta_lr', 1e-3)
self.meta_weight_decay = config.get('meta_weight_decay', 0.0)
self.stopping_threshold = config.get('stopping_threshold', 1e-3)
self.meta_batch_size = config.get('meta_batch_size', 16)
self.fomaml = config.get('fomaml', False)
self.multi_gpu = torch.cuda.device_count() > 1 and config.get('multi_gpu', False)
if self.multi_gpu:
self.n_devices = torch.cuda.device_count()
logger.info('Using {} GPUs'.format(self.n_devices))
if 'seq' in config['meta_model']:
self.meta_model = SeqMetaModel(config)
if self.fomaml:
logger.info('FOMAML instantiated')
else:
logger.info('MAML instantiated')
def _replicate_model(self):
replica_meta_models = models.utils.replicate_model_to_gpus(self.meta_model, list(range(self.n_devices)))
return replica_meta_models
def _multi_gpu_training(self, train_episodes):
chunked_train_episodes = []
for i in range(self.n_devices):
chunk = train_episodes[i::self.n_devices]
chunked_train_episodes.append([chunk])
kwargs_tup = ({'updates': self.updates}, ) * self.n_devices
parallel_outputs = nn.parallel.parallel_apply(self.replica_meta_models, chunked_train_episodes,
kwargs_tup, list(range(self.n_devices)))
losses, accuracies, precisions, recalls, f1s = [], [], [], [], []
for i in range(self.n_devices):
losses.extend(parallel_outputs[i][0])
accuracies.extend(parallel_outputs[i][1])
precisions.extend(parallel_outputs[i][2])
recalls.extend(parallel_outputs[i][3])
f1s.extend(parallel_outputs[i][4])
target_device = next(self.meta_model.learner.parameters()).device
for name, param in self.meta_model.learner.named_parameters():
if param.requires_grad:
grad_sum = 0
for r in self.replica_meta_models:
for n, p in r.learner.named_parameters():
if n == name:
grad_sum += p.grad.to(target_device)
param.grad = grad_sum / self.n_devices
return losses, accuracies, precisions, recalls, f1s
def _synchronize_weights(self):
for rm in self.replica_meta_models[1:]:
rm.learner.load_state_dict(self.meta_model.learner.state_dict())
rm.learner.zero_grad()
def initialize_optimizer_scheduler(self):
learner_params = [p for p in self.meta_model.learner.parameters() if p.requires_grad]
if isinstance(self.meta_model.learner, BERTSequenceModel):
meta_optimizer = AdamW(learner_params, lr=self.meta_lr, weight_decay=self.meta_weight_decay)
lr_scheduler = get_constant_schedule_with_warmup(meta_optimizer, num_warmup_steps=100)
else:
meta_optimizer = optim.Adam(learner_params, lr=self.meta_lr, weight_decay=self.meta_weight_decay)
lr_scheduler = optim.lr_scheduler.StepLR(meta_optimizer, step_size=200, gamma=0.5)
return meta_optimizer, lr_scheduler
def training(self, train_episodes, val_episodes):
meta_optimizer, lr_scheduler = self.initialize_optimizer_scheduler()
best_loss = float('inf')
best_f1 = 0
patience = 0
global_step = 0
model_path = os.path.join(self.base_path, 'saved_models', 'MetaModel-{}.h5'.format(self.stamp))
logger.info('Model name: MetaModel-{}.h5'.format(self.stamp))
episode_train_dataset = EpisodeDataset(train_episodes)
episode_train_dataloader = DataLoader(episode_train_dataset, batch_size=self.meta_batch_size,
collate_fn=datasets.utils.prepare_task_batch, shuffle=True)
if self.multi_gpu:
self.replica_meta_models = self._replicate_model()
for epoch in range(self.meta_epochs):
logger.info('Starting epoch {}'.format(epoch+1))
losses, accuracies, precisions, recalls, f1s = [], [], [], [], []
for idx, epts in enumerate(episode_train_dataloader):
logger.info('Iteration {}/{}'.format(idx+1, len(episode_train_dataloader)))
meta_optimizer.zero_grad()
if not self.multi_gpu:
ls, acc, prec, rcl, f1 = self.meta_model(epts, self.updates)
else:
ls, acc, prec, rcl, f1 = self._multi_gpu_training(epts)
meta_optimizer.step()
lr_scheduler.step()
global_step += 1
if self.multi_gpu:
self._synchronize_weights()
losses.extend(ls)
accuracies.extend(acc)
precisions.extend(prec)
recalls.extend(rcl)
f1s.extend(f1)
# Log params and grads into tensorboard
# for name, param in self.meta_model.named_parameters():
# if param.requires_grad and param.grad is not None:
# self.tensorboard_writer.add_histogram('Params/' + name, param.data.view(-1), global_step=global_step)
# self.tensorboard_writer.add_histogram('Grads/' + name, param.grad.data.view(-1),
# global_step=global_step)
avg_loss = np.mean(losses)
avg_accuracy = np.mean(accuracies)
avg_precision = np.mean(precisions)
avg_recall = np.mean(recalls)
avg_f1 = np.mean(f1s)
logger.info('Meta train epoch {}: Avg loss = {:.5f}, avg accuracy = {:.5f}, avg precision = {:.5f}, '
'avg recall = {:.5f}, avg F1 score = {:.5f}'.format(epoch + 1, avg_loss, avg_accuracy,
avg_precision, avg_recall, avg_f1))
self.tensorboard_writer.add_scalar('Loss/train', avg_loss, global_step=epoch+1)
self.tensorboard_writer.add_scalar('F1/train', avg_f1, global_step=epoch+1)
losses, accuracies, precisions, recalls, f1s = self.meta_model(val_episodes, self.updates, testing=True)
avg_loss = np.mean(losses)
avg_accuracy = np.mean(accuracies)
avg_precision = np.mean(precisions)
avg_recall = np.mean(recalls)
avg_f1 = | np.mean(f1s) | numpy.mean |
#!/usr/bin/env python
"""
tidal_energy.py
State Estimation and Analysis for PYthon
Module to compute tidal energy from a column of data.
Written by <NAME> on 03/30/16
Copyright (c)2019 University of Hawaii under the MIT-License.
Notes
-----
Barotropic to Baroclinic conversion is given by:
.. math::
C=1 / T_t \int_0^T_t P'_t * wbar_t * dt, (1)
where, T_t is the tidal period for consituent, t, P' is the pressure perturbation,
wbar is the vertical velocity. Hence, conversion is the time average of the
vertical motion of the bottom pressure perturbation. We can do it spectrally if
we represent P'_t and wbar_t as waves:
.. math::
P'_t = Amp_P'_t * sin( 2 * pi * t / T_t + Pha_P'_t ) (2) \\
wbar_t = Amp_wbar_t * sin( 2 * pi * t / T_t + Pha_wbar_t ) (3)
If we substitute (2) and (3) into (1) using trig. identity and integrate over
the period (recall that integrating a wave over one period is zero):
.. math::
Conversion = 0.5 * Amp_P'_t * Amp_wbar_t * cos( Pha_P'_t - Pha_wbar_t )(4)
Energy Flux is given by:
.. math::
Flux_u = 1 / T_t * \int_0^T_t u'_t * P'_t * dt, (5) \\
Flux_v = 1 / T_t * \int_0^T_t v'_t * P'_t * dt, (6)
where u' and v' are the velocity anomalies for the constituent, t. As per
above, we can express as waves to yield:
.. math::
Flux_u = 0.5 * Amp_u'_t * Amp_P'_t * cos( Pha_u'_t - Pha_P'_t ) (7) \\
Flux_v = 0.5 * Amp_v'_t * Amp_P'_t * cos( Pha_v'_t - Pha_P'_t ) (8)
Displacement is given by:
.. math::
Displace = \int_0^T_t/2 g * rho'_t / ( rho0 * N_t**2 ) * dt, (9)
where rho' is the density anomaly and N**2 is the Brunt-Vaisala. NOTE:
this is integrated over one-half period becuase (by definition), it would
integrate to zero. However, if we know the tidal vertical velocity, then
we can integrate it for one-half period for the todal displacement:
.. math::
Displace = \int_0^T_t/2 w_t * dt \\
= \int_0^T_t/2 Amp_w_t * sin( 2 * pi * t / T_t ) (10) \\
= Amp_w_t * T_t / pi
Horizontal Kinetic Energy is given by:
.. math::
HKE = 0.5 * rho0 * 1 / T_t * \int_0^T_t (u'_t**2 + v'_t**2) * dt (11)
substitute u' and v' as waveforms and integrate over a period,
.. math::
HKE = 0.5 * rho0 * 0.5 * ( Amp_u'_t**2 _ Amp_v'_t**2 ) (12)
Available Potential Energy is given by:
.. math::
APE = 0.5 * rho0 * 1 / T_t * \int_0^T_t N_t**2 * Displace_t**2 * dt (13)
For this, we will use the time-average N**2 (not at the specific tidal
frequency) and use (10); hence, it becomes:
.. math::
APE = 0.5 * rho0 * (Amp_w_t * T_t / pi)**2 * 1/T_t \int_0^T_t N**2 * dt (14)
"""
import numpy as np
import seapy
_rho0 = 1000
class energetics():
"""
This class is a container for the energetics produced by the tidal_energy
calculation to simplify access to the resulting data.
"""
def __init__(self, tides, energy, integrals, ellipse):
try:
self.tides = tides.tolist()
except AttributeError:
self.tides = tides
self.energy = energy
if len(tides) != energy.shape[0]:
raise ValueError(
"The number of tides and energy values are inconsistent")
self.integrals = integrals
self.ellipse = ellipse
pass
def __getitem__(self, key):
"""
Return the energetics for a tidal constituent
"""
t = self.tides.index(key.upper())
return {"conversion": self.integrals[t, 2],
"flux_u": self.energy[t, :, 0],
"flux_v": self.energy[t, :, 1],
"disp": self.energy[t, :, 2],
"hke": self.energy[t, :, 3],
"ape": self.energy[t, :, 4],
"total_flux_u": self.integrals[t, 0],
"total_flux_v": self.integrals[t, 1],
"total_hke": self.integrals[t, 3],
"total_ape": self.integrals[t, 4],
"ellipse": self.ellipse[key.upper()]}
pass
def tidal_energy(time, hz, u, v, w, pressure, bvf=None, tides=None,
ubar=None, vbar=None, wbar=None):
"""
Calculate aspects of tidal energy from the given data: baroclinic energy flux,
HKE, APE, displacement, and conversion.
This only works for a single depth profile, and the arrays are to be 2D with
dimensions of [time, depth] with depth index 0 as the bottom and inded -1 as
the surface. Likewise, the hz field is oriented the same.
Parameters
----------
time : list of datetime,
times of data
hz : ndarray,
Thickness of the water column represented by 3D quantities [m]
u : ndarray,
u-component of 3D velocity [m s**-1]
v : ndarray,
v-component of 3D velocity [m s**-1]
w : ndarray,
w-component of 3D velocity [m s**-1]
pressure : ndarray,
pressure of the 3D field [dbar]
bvf : ndarray, optional
Brunt-Vaisala Frequency of the 3D field [s**-1]. If not specified
the APE will not be computed
tides: list of strings, optional
The names of the tides to use for analysis. If none
provided, use the defaults from seapy.tide
ubar : ndarray, optional
u-component of barotropic velocity [m s**-1]. If none
provided, compute from u
vbar : ndarray, optional
v-component of barotropic velocity [m s**-1]. If none
provided, compute from v
wbar : ndarray, optional
w-component of barotropic velocity [m s**-1]. If none
provided, compute from w
Returns
-------
energetics : class,
The energetics for each tidal consituent as well as the
vertically integrated properties. The energetics class
provides various methods for accessing the data
"""
# Ensure arrays as needed
u = np.ma.array(u)
v = np.ma.array(v)
w = np.ma.array(w)
pressure = np.ma.array(pressure)
# Setup the thicknesses in time
hz = np.ma.array(hz)
if hz.ndims == 1:
hz = np.tile(hz, (u.shape[0]))
total_h = np.sum(hz, axis=1)
ndep = hz.shape[1]
# If BVF not given, set to zero
if bvf:
bvf = np.ma.array(bvf).mean(axis=0)
else:
bvf = np.zeros(ndep)
# Setup the tides
tides = seapy.tide._set_tides(tides)
ntides = len(tides)
period = 3600 / seapy.tide.frequency(tides)
# Setup the barotropic velocities
if ubar and vbar:
ubar = np.ma.array(ubar)
vbar = np.ma.array(vbar)
wbar = np.ma.array(wbar)
else:
ubar = np.sum(hz * u, axis=1) / total_h
vbar = np.sum(hz * v, axis=1) / total_h
wbar = np.sum(hz * w, axis=1) / total_h
# Calculate Pressure Anomalies
p_prime = pressure - pressure.mean(axis=0)
# Apply baroclinicity
p_prime -= | np.sum(p_prime * hz) | numpy.sum |
###############################################################################
# evolveddiskdf.py: module that builds a distribution function as a
# steady-state DF + subsequent evolution
#
# This module contains the following classes:
#
# evolveddiskdf - top-level class that represents a distribution function
###############################################################################
from __future__ import print_function
_NSIGMA= 4.
_NTS= 1000
_PROFILE= False
import sys
import math
import copy
import time as time_module
import warnings
import numpy as nu
from scipy import integrate
from galpy.util import galpyWarning
from galpy.orbit import Orbit
from galpy.potential import calcRotcurve
from galpy.df_src.df import df, _APY_LOADED
from galpy.potential_src.Potential import _check_c
from galpy.util.bovy_quadpack import dblquad
from galpy.util import bovy_plot
from galpy.util.bovy_conversion import physical_conversion, \
potential_physical_input, time_in_Gyr
if _APY_LOADED:
from astropy import units
_DEGTORAD= math.pi/180.
_RADTODEG= 180./math.pi
_NAN= nu.nan
class evolveddiskdf(df):
"""Class that represents a diskdf as initial DF + subsequent secular evolution"""
def __init__(self,initdf,pot,to=0.):
"""
NAME:
__init__
PURPOSE:
initialize
INPUT:
initdf - the df at the start of the evolution (at to) (units are transferred)
pot - potential to integrate orbits in
to= initial time (time at which initdf is evaluated; orbits are integrated from current t back to to) (can be Quantity)
OUTPUT:
instance
HISTORY:
2011-03-30 - Written - Bovy (NYU)
"""
if initdf._roSet: ro= initdf._ro
else: ro= None
if initdf._voSet: vo= initdf._vo
else: vo= None
df.__init__(self,ro=ro,vo=vo)
self._initdf= initdf
self._pot= pot
if _APY_LOADED and isinstance(to,units.Quantity):
to= to.to(units.Gyr).value/time_in_Gyr(self._vo,self._ro)
self._to= to
@physical_conversion('phasespacedensity2d',pop=True)
def __call__(self,*args,**kwargs):
"""
NAME:
__call__
PURPOSE:
evaluate the distribution function
INPUT:
Orbit instance:
a) Orbit instance alone: use initial state and t=0
b) Orbit instance + t: Orbit instance *NOT* called (i.e., Orbit's initial condition is used, call Orbit yourself), t can be Quantity
If t is a list of t, DF is returned for each t, times must be in descending order and equally spaced (does not work with marginalize...)
marginalizeVperp - marginalize over perpendicular velocity (only supported with 1a) above) + nsigma, +scipy.integrate.quad keywords
marginalizeVlos - marginalize over line-of-sight velocity (only supported with 1a) above) + nsigma, +scipy.integrate.quad keywords
log= if True, return the log (not for deriv, bc that can be negative)
integrate_method= method argument of orbit.integrate
deriv= None, 'R', or 'phi': calculates derivative of the moment wrt R or phi **not with the marginalize options**
OUTPUT:
DF(orbit,t)
HISTORY:
2011-03-30 - Written - Bovy (NYU)
2011-04-15 - Added list of times option - Bovy (NYU)
"""
integrate_method= kwargs.pop('integrate_method','dopr54_c')
# Must match Python fallback for non-C potentials here, bc odeint needs
# custom t list to avoid numerically instabilities
if '_c' in integrate_method and not _check_c(self._pot):
if ('leapfrog' in integrate_method \
or 'symplec' in integrate_method):
integrate_method= 'leapfrog'
else:
integrate_method= 'odeint'
deriv= kwargs.get('deriv',None)
if isinstance(args[0],Orbit):
if len(args) == 1:
t= 0.
else:
t= args[1]
else:
raise IOError("Input to __call__ not understood; this has to be an Orbit instance with optional time")
if isinstance(t,list):
t= nu.array(t)
tlist= True
elif isinstance(t,nu.ndarray) and \
not (hasattr(t,'isscalar') and t.isscalar):
tlist= True
else: tlist= False
if _APY_LOADED and isinstance(t,units.Quantity):
t= t.to(units.Gyr).value/time_in_Gyr(self._vo,self._ro)
if kwargs.pop('marginalizeVperp',False):
if tlist: raise IOError("Input times to __call__ is a list; this is not supported in conjunction with marginalizeVperp")
if kwargs.pop('log',False):
return nu.log(self._call_marginalizevperp(args[0],integrate_method=integrate_method,**kwargs))
else:
return self._call_marginalizevperp(args[0],integrate_method=integrate_method,**kwargs)
elif kwargs.pop('marginalizeVlos',False):
if tlist: raise IOError("Input times to __call__ is a list; this is not supported in conjunction with marginalizeVlos")
if kwargs.pop('log',False):
return nu.log(self._call_marginalizevlos(args[0],integrate_method=integrate_method,**kwargs))
else:
return self._call_marginalizevlos(args[0],integrate_method=integrate_method,**kwargs)
#Integrate back
if tlist:
if self._to == t[0]:
if kwargs.get('log',False):
return nu.log([self._initdf(args[0],use_physical=False)])
else:
return [self._initdf(args[0],use_physical=False)]
ts= self._create_ts_tlist(t,integrate_method)
o= args[0]
#integrate orbit
if _PROFILE: #pragma: no cover
start= time_module.time()
if not deriv is None:
#Also calculate the derivative of the initial df with respect to R, phi, vR, and vT, and the derivative of Ro wrt R/phi etc., to calculate the derivative; in this case we also integrate a small area of phase space
if deriv.lower() == 'r':
dderiv= 10.**-10.
tmp= o.R(use_physical=False)+dderiv
dderiv= tmp-o.R(use_physical=False)
msg= o._orb.integrate_dxdv([dderiv,0.,0.,0.],ts,self._pot,method=integrate_method)
elif deriv.lower() == 'phi':
dderiv= 10.**-10.
tmp= o.phi(use_physical=False)+dderiv
dderiv= tmp-o.phi(use_physical=False)
msg= o._orb.integrate_dxdv([0.,0.,0.,dderiv],ts,self._pot,method=integrate_method)
if msg > 0.: # pragma: no cover
print("Warning: dxdv integration inaccurate, returning zero everywhere ... result might not be correct ...")
if kwargs.get('log',False) and deriv is None: return nu.zeros(len(t))-nu.finfo(nu.dtype(nu.float64)).max
else: return nu.zeros(len(t))
o._orb.orbit= o._orb.orbit_dxdv[:,0:4]
else:
o.integrate(ts,self._pot,method=integrate_method)
if _PROFILE: #pragma: no cover
int_time= (time_module.time()-start)
#Now evaluate the DF
if _PROFILE: #pragma: no cover
start= time_module.time()
if integrate_method == 'odeint':
retval= []
os= [o(self._to+t[0]-ti,use_physical=False) for ti in t]
retval= nu.array(self._initdf(os,use_physical=False))
else:
if len(t) == 1:
orb_array= o.getOrbit().T
orb_array= orb_array[:,1]
else:
orb_array= o.getOrbit().T
retval= self._initdf(orb_array,use_physical=False)
if (isinstance(retval,float) or len(retval.shape) == 0) \
and nu.isnan(retval):
retval= 0.
elif not isinstance(retval,float) and len(retval.shape) > 0:
retval[(nu.isnan(retval))]= 0.
if len(t) > 1: retval= retval[::-1]
if _PROFILE: #pragma: no cover
df_time= (time_module.time()-start)
tot_time= int_time+df_time
print(int_time/tot_time, df_time/tot_time, tot_time)
if not deriv is None:
if integrate_method == 'odeint':
dlnfdRo= nu.array([self._initdf._dlnfdR(o.R(self._to+t[0]-ti,use_physical=False),
o.vR(self._to+t[0]-ti,use_physical=False),
o.vT(self._to+t[0]-ti,use_physical=False))
for ti in t])
dlnfdvRo= nu.array([self._initdf._dlnfdvR(o.R(self._to+t[0]-ti,use_physical=False),
o.vR(self._to+t[0]-ti,use_physical=False),
o.vT(self._to+t[0]-ti,use_physical=False))
for ti in t])
dlnfdvTo= nu.array([self._initdf._dlnfdvT(o.R(self._to+t[0]-ti,use_physical=False),
o.vR(self._to+t[0]-ti,use_physical=False),
o.vT(self._to+t[0]-ti,use_physical=False))
for ti in t])
dRo= nu.array([o._orb.orbit_dxdv[list(ts).index(self._to+t[0]-ti),4] for ti in t])/dderiv
dvRo= nu.array([o._orb.orbit_dxdv[list(ts).index(self._to+t[0]-ti),5] for ti in t])/dderiv
dvTo= nu.array([o._orb.orbit_dxdv[list(ts).index(self._to+t[0]-ti),6] for ti in t])/dderiv
#print(dRo, dvRo, dvTo)
dlnfderiv= dlnfdRo*dRo+dlnfdvRo*dvRo+dlnfdvTo*dvTo
retval*= dlnfderiv
else:
if len(t) == 1:
dlnfdRo= self._initdf._dlnfdR(orb_array[0],
orb_array[1],
orb_array[2])
dlnfdvRo= self._initdf._dlnfdvR(orb_array[0],
orb_array[1],
orb_array[2])
dlnfdvTo= self._initdf._dlnfdvT(orb_array[0],
orb_array[1],
orb_array[2])
else:
dlnfdRo= nu.array([self._initdf._dlnfdR(orb_array[0,ii],
orb_array[1,ii],
orb_array[2,ii])
for ii in range(len(t))])
dlnfdvRo= nu.array([self._initdf._dlnfdvR(orb_array[0,ii],
orb_array[1,ii],
orb_array[2,ii])
for ii in range(len(t))])
dlnfdvTo= nu.array([self._initdf._dlnfdvT(orb_array[0,ii],
orb_array[1,ii],
orb_array[2,ii])
for ii in range(len(t))])
dorb_array= o._orb.orbit_dxdv.T
if len(t) == 1: dorb_array= dorb_array[:,1]
dRo= dorb_array[4]/dderiv
dvRo= dorb_array[5]/dderiv
dvTo= dorb_array[6]/dderiv
#print(dRo, dvRo, dvTo)
dlnfderiv= dlnfdRo*dRo+dlnfdvRo*dvRo+dlnfdvTo*dvTo
if len(t) > 1: dlnfderiv= dlnfderiv[::-1]
retval*= dlnfderiv
else:
if self._to == t and deriv is None:
if kwargs.get('log',False):
return nu.log(self._initdf(args[0],use_physical=False))
else:
return self._initdf(args[0],use_physical=False)
elif self._to == t and not deriv is None:
if deriv.lower() == 'r':
return self._initdf(args[0])*self._initdf._dlnfdR(args[0]._orb.vxvv[0],
args[0]._orb.vxvv[1],
args[0]._orb.vxvv[2])
elif deriv.lower() == 'phi':
return 0.
if integrate_method == 'odeint':
ts= nu.linspace(t,self._to,_NTS)
else:
ts= nu.linspace(t,self._to,2)
o= args[0]
#integrate orbit
if not deriv is None:
ts= nu.linspace(t,self._to,_NTS)
#Also calculate the derivative of the initial df with respect to R, phi, vR, and vT, and the derivative of Ro wrt R/phi etc., to calculate the derivative; in this case we also integrate a small area of phase space
if deriv.lower() == 'r':
dderiv= 10.**-10.
tmp= o.R(use_physical=False)+dderiv
dderiv= tmp-o.R(use_physical=False)
o._orb.integrate_dxdv([dderiv,0.,0.,0.],ts,self._pot,method=integrate_method)
elif deriv.lower() == 'phi':
dderiv= 10.**-10.
tmp= o.phi(use_physical=False)+dderiv
dderiv= tmp-o.phi(use_physical=False)
o._orb.integrate_dxdv([0.,0.,0.,dderiv],ts,self._pot,method=integrate_method)
o._orb.orbit= o._orb.orbit_dxdv[:,0:4]
else:
o.integrate(ts,self._pot,method=integrate_method)
#int_time= (time.time()-start)
#Now evaluate the DF
if o.R(self._to-t,use_physical=False) <= 0.:
if kwargs.get('log',False):
return -nu.finfo(nu.dtype(nu.float64)).max
else:
return nu.finfo(nu.dtype(nu.float64)).eps
#start= time.time()
retval= self._initdf(o(self._to-t,use_physical=False),
use_physical=False)
#print( int_time/(time.time()-start))
if nu.isnan(retval): print(retval, o._orb.vxvv, o(self._to-t)._orb.vxvv)
if not deriv is None:
thisorbit= o(self._to-t)._orb.vxvv
dlnfdRo= self._initdf._dlnfdR(thisorbit[0],
thisorbit[1],
thisorbit[2])
dlnfdvRo= self._initdf._dlnfdvR(thisorbit[0],
thisorbit[1],
thisorbit[2])
dlnfdvTo= self._initdf._dlnfdvT(thisorbit[0],
thisorbit[1],
thisorbit[2])
indx= list(ts).index(self._to-t)
dRo= o._orb.orbit_dxdv[indx,4]/dderiv
dvRo= o._orb.orbit_dxdv[indx,5]/dderiv
dvTo= o._orb.orbit_dxdv[indx,6]/dderiv
dlnfderiv= dlnfdRo*dRo+dlnfdvRo*dvRo+dlnfdvTo*dvTo
retval*= dlnfderiv
if kwargs.get('log',False) and deriv is None:
if tlist:
out= nu.log(retval)
out[retval == 0.]= -nu.finfo(nu.dtype(nu.float64)).max
else:
if retval == 0.: out= -nu.finfo(nu.dtype(nu.float64)).max
else: out= nu.log(retval)
return out
else:
return retval
def vmomentsurfacemass(self,R,n,m,t=0.,nsigma=None,deg=False,
epsrel=1.e-02,epsabs=1.e-05,phi=0.,
grid=None,gridpoints=101,returnGrid=False,
hierarchgrid=False,nlevels=2,
print_progress=False,
integrate_method='dopr54_c',
deriv=None):
"""
NAME:
vmomentsurfacemass
PURPOSE:
calculate the an arbitrary moment of the velocity distribution at (R,phi) times the surfacmass
INPUT:
R - radius at which to calculate the moment (in natural units)
phi= azimuth (rad unless deg=True)
n - vR^n
m - vT^m
t= time at which to evaluate the DF (can be a list or ndarray; if this is the case, list needs to be in descending order and equally spaced)
nsigma - number of sigma to integrate the velocities over (based on an estimate, so be generous, but not too generous)
deg= azimuth is in degree (default=False)
epsrel, epsabs - scipy.integrate keywords (the integration calculates the ratio of this vmoment to that of the initial DF)
grid= if set to True, build a grid and use that to evaluate integrals; if set to a grid-objects (such as returned by this procedure), use this grid; if this was created for a list of times, moments are calculated for each time
gridpoints= number of points to use for the grid in 1D (default=101)
returnGrid= if True, return the grid object (default=False)
hierarchgrid= if True, use a hierarchical grid (default=False)
nlevels= number of hierarchical levels for the hierarchical grid
print_progress= if True, print progress updates
integrate_method= orbit.integrate method argument
deriv= None, 'R', or 'phi': calculates derivative of the moment wrt R or phi **onnly with grid options**
OUTPUT:
<vR^n vT^m x surface-mass> at R,phi (no support for units)
COMMENT:
grid-based calculation is the only one that is heavily tested (although the test suite also tests the direct calculation)
HISTORY:
2011-03-30 - Written - Bovy (NYU)
"""
#if we have already precalculated a grid, use that
if not grid is None and isinstance(grid,evolveddiskdfGrid):
if returnGrid:
return (self._vmomentsurfacemassGrid(n,m,grid),grid)
else:
return self._vmomentsurfacemassGrid(n,m,grid)
elif not grid is None \
and isinstance(grid,evolveddiskdfHierarchicalGrid):
if returnGrid:
return (self._vmomentsurfacemassHierarchicalGrid(n,m,grid),
grid)
else:
return self._vmomentsurfacemassHierarchicalGrid(n,m,grid)
#Otherwise we need to do some more work
if deg: az= phi*_DEGTORAD
else: az= phi
if nsigma is None: nsigma= _NSIGMA
if _PROFILE: #pragma: no cover
start= time_module.time()
if hasattr(self._initdf,'_estimatemeanvR') \
and hasattr(self._initdf,'_estimatemeanvT') \
and hasattr(self._initdf,'_estimateSigmaR2') \
and hasattr(self._initdf,'_estimateSigmaT2'):
sigmaR1= nu.sqrt(self._initdf._estimateSigmaR2(R,phi=az))
sigmaT1= nu.sqrt(self._initdf._estimateSigmaT2(R,phi=az))
meanvR= self._initdf._estimatemeanvR(R,phi=az)
meanvT= self._initdf._estimatemeanvT(R,phi=az)
else:
warnings.warn("No '_estimateSigmaR2' etc. functions found for initdf in evolveddf; thus using potentially slow sigmaR2 etc functions",
galpyWarning)
sigmaR1= nu.sqrt(self._initdf.sigmaR2(R,phi=az,use_physical=False))
sigmaT1= nu.sqrt(self._initdf.sigmaT2(R,phi=az,use_physical=False))
meanvR= self._initdf.meanvR(R,phi=az,use_physical=False)
meanvT= self._initdf.meanvT(R,phi=az,use_physical=False)
if _PROFILE: #pragma: no cover
setup_time= (time_module.time()-start)
if not grid is None and isinstance(grid,bool) and grid:
if not hierarchgrid:
if _PROFILE: #pragma: no cover
start= time_module.time()
grido= self._buildvgrid(R,az,nsigma,t,
sigmaR1,sigmaT1,meanvR,meanvT,
gridpoints,print_progress,
integrate_method,deriv)
if _PROFILE: #pragma: no cover
grid_time= (time_module.time()-start)
print(setup_time/(setup_time+grid_time), \
grid_time/(setup_time+grid_time), \
setup_time+grid_time)
if returnGrid:
return (self._vmomentsurfacemassGrid(n,m,grido),grido)
else:
return self._vmomentsurfacemassGrid(n,m,grido)
else: #hierarchical grid
grido= evolveddiskdfHierarchicalGrid(self,R,az,nsigma,t,
sigmaR1,sigmaT1,meanvR,
meanvT,
gridpoints,nlevels,deriv,
print_progress=print_progress)
if returnGrid:
return (self._vmomentsurfacemassHierarchicalGrid(n,m,
grido),
grido)
else:
return self._vmomentsurfacemassHierarchicalGrid(n,m,grido)
#Calculate the initdf moment and then calculate the ratio
initvmoment= self._initdf.vmomentsurfacemass(R,n,m,nsigma=nsigma,
phi=phi)
if initvmoment == 0.: initvmoment= 1.
norm= sigmaR1**(n+1)*sigmaT1**(m+1)*initvmoment
if isinstance(t,(list,nu.ndarray)):
raise IOError("list of times is only supported with grid-based calculation")
return dblquad(_vmomentsurfaceIntegrand,
meanvT/sigmaT1-nsigma,
meanvT/sigmaT1+nsigma,
lambda x: meanvR/sigmaR1
-nu.sqrt(nsigma**2.-(x-meanvT/sigmaT1)**2.),
lambda x: meanvR/sigmaR1
+nu.sqrt(nsigma**2.-(x-meanvT/sigmaT1)**2.),
(R,az,self,n,m,sigmaR1,sigmaT1,t,initvmoment),
epsrel=epsrel,epsabs=epsabs)[0]*norm
@potential_physical_input
@physical_conversion('angle',pop=True)
def vertexdev(self,R,t=0.,nsigma=None,deg=False,
epsrel=1.e-02,epsabs=1.e-05,phi=0.,
grid=None,gridpoints=101,returnGrid=False,
sigmaR2=None,sigmaT2=None,sigmaRT=None,surfacemass=None,
hierarchgrid=False,nlevels=2,
integrate_method='dopr54_c'):
"""
NAME:
vertexdev
PURPOSE:
calculate the vertex deviation of the velocity distribution at (R,phi)
INPUT:
R - radius at which to calculate the moment (can be Quantity)
phi= azimuth (rad unless deg=True; can be Quantity)
t= time at which to evaluate the DF (can be a list or ndarray; if this is the case, list needs to be in descending order and equally spaced) (can be Quantity)
sigmaR2, sigmaT2, sigmaRT= if set the vertex deviation is simply calculated using these
nsigma - number of sigma to integrate the velocities over (based on an estimate, so be generous)
deg= azimuth is in degree (default=False); do not set this when giving phi as a Quantity
epsrel, epsabs - scipy.integrate keywords (the integration calculates the ratio of this vmoment to that of the initial DF)
grid= if set to True, build a grid and use that to evaluate integrals; if set to a grid-objects (such as returned by this procedure), use this grid
gridpoints= number of points to use for the grid in 1D (default=101)
returnGrid= if True, return the grid object (default=False)
hierarchgrid= if True, use a hierarchical grid (default=False)
nlevels= number of hierarchical levels for the hierarchical grid
integrate_method= orbit.integrate method argument
OUTPUT:
vertex deviation in rad
HISTORY:
2011-03-31 - Written - Bovy (NYU)
"""
#The following aren't actually the moments, but they are the moments
#times the surface-mass density; that drops out
if isinstance(grid,evolveddiskdfGrid) or \
isinstance(grid,evolveddiskdfHierarchicalGrid):
grido= grid
elif (sigmaR2 is None or sigmaT2 is None or sigmaRT is None) \
and isinstance(grid,bool) and grid:
#Precalculate the grid
(sigmaR2_tmp,grido)= self.vmomentsurfacemass(R,2,0,deg=deg,t=t,
nsigma=nsigma,phi=phi,
epsrel=epsrel,
epsabs=epsabs,grid=grid,
gridpoints=gridpoints,
returnGrid=True,
hierarchgrid=hierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method)
else:
grido= False
if sigmaR2 is None:
sigmaR2= self.sigmaR2(R,deg=deg,t=t,phi=phi,
nsigma=nsigma,epsrel=epsrel,
epsabs=epsabs,grid=grido,
gridpoints=gridpoints,
returnGrid=False,
hierarchgrid=hierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method,
use_physical=False)
if sigmaT2 is None:
sigmaT2= self.sigmaT2(R,deg=deg,t=t,phi=phi,
nsigma=nsigma,epsrel=epsrel,
epsabs=epsabs,grid=grido,
gridpoints=gridpoints,
returnGrid=False,
hierarchgrid=hierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method,
use_physical=False)
if sigmaRT is None:
sigmaRT= self.sigmaRT(R,deg=deg,t=t,phi=phi,
nsigma=nsigma,epsrel=epsrel,
epsabs=epsabs,grid=grido,
gridpoints=gridpoints,
returnGrid=False,
hierarchgrid=hierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method,
use_physical=False)
warnings.warn("In versions >1.3, the output unit of evolveddiskdf.vertexdev has been changed to radian (from degree before)",galpyWarning)
if returnGrid and ((isinstance(grid,bool) and grid) or
isinstance(grid,evolveddiskdfGrid) or
isinstance(grid,evolveddiskdfHierarchicalGrid)):
return (-nu.arctan(2.*sigmaRT/(sigmaR2-sigmaT2))/2.,grido)
else:
return -nu.arctan(2.*sigmaRT/(sigmaR2-sigmaT2))/2.
@potential_physical_input
@physical_conversion('velocity',pop=True)
def meanvR(self,R,t=0.,nsigma=None,deg=False,phi=0.,
epsrel=1.e-02,epsabs=1.e-05,
grid=None,gridpoints=101,returnGrid=False,
surfacemass=None,
hierarchgrid=False,nlevels=2,integrate_method='dopr54_c'):
"""
NAME:
meanvR
PURPOSE:
calculate the mean vR of the velocity distribution at (R,phi)
INPUT:
R - radius at which to calculate the moment(/ro) (can be Quantity)
phi= azimuth (rad unless deg=True; can be Quantity)
t= time at which to evaluate the DF (can be a list or ndarray; if this is the case, list needs to be in descending order and equally spaced) (can be Quantity)
surfacemass= if set use this pre-calculated surfacemass
nsigma - number of sigma to integrate the velocities over (based on an estimate, so be generous)
deg= azimuth is in degree (default=False); do not set this when giving phi as a Quantity
epsrel, epsabs - scipy.integrate keywords (the integration calculates the ratio of this vmoment to that of the initial DF)
grid= if set to True, build a grid and use that to evaluate integrals; if set to a grid-objects (such as returned by this procedure), use this grid
gridpoints= number of points to use for the grid in 1D (default=101)
returnGrid= if True, return the grid object (default=False)
hierarchgrid= if True, use a hierarchical grid (default=False)
nlevels= number of hierarchical levels for the hierarchical grid
integrate_method= orbit.integrate method argument
OUTPUT:
mean vR
HISTORY:
2011-03-31 - Written - Bovy (NYU)
"""
if isinstance(grid,evolveddiskdfGrid) or \
isinstance(grid,evolveddiskdfHierarchicalGrid):
grido= grid
vmomentR= self.vmomentsurfacemass(R,1,0,deg=deg,t=t,phi=phi,
nsigma=nsigma,
epsrel=epsrel,
epsabs=epsabs,grid=grid,
gridpoints=gridpoints,
returnGrid=False,
hierarchgrid=hierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method)
elif isinstance(grid,bool) and grid:
#Precalculate the grid
(vmomentR,grido)= self.vmomentsurfacemass(R,1,0,deg=deg,t=t,
nsigma=nsigma,phi=phi,
epsrel=epsrel,
epsabs=epsabs,grid=grid,
gridpoints=gridpoints,
returnGrid=True,
hierarchgrid=hierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method)
else:
grido= False
vmomentR= self.vmomentsurfacemass(R,1,0,deg=deg,t=t,phi=phi,
nsigma=nsigma,
epsrel=epsrel,
epsabs=epsabs,grid=grid,
gridpoints=gridpoints,
returnGrid=False,
hierarchgrid=hierarchgrid,
nlevels=nlevels,integrate_method=integrate_method)
if surfacemass is None:
surfacemass= self.vmomentsurfacemass(R,0,0,deg=deg,t=t,phi=phi,
nsigma=nsigma,epsrel=epsrel,
epsabs=epsabs,grid=grido,
gridpoints=gridpoints,
returnGrid=False,
hierarchgrid=hierarchgrid,
nlevels=nlevels,integrate_method=integrate_method)
out= vmomentR/surfacemass
if returnGrid and ((isinstance(grid,bool) and grid) or
isinstance(grid,evolveddiskdfGrid) or
isinstance(grid,evolveddiskdfHierarchicalGrid)):
return (out,grido)
else:
return out
@potential_physical_input
@physical_conversion('velocity',pop=True)
def meanvT(self,R,t=0.,nsigma=None,deg=False,phi=0.,
epsrel=1.e-02,epsabs=1.e-05,
grid=None,gridpoints=101,returnGrid=False,
surfacemass=None,
hierarchgrid=False,nlevels=2,integrate_method='dopr54_c'):
"""
NAME:
meanvT
PURPOSE:
calculate the mean vT of the velocity distribution at (R,phi)
INPUT:
R - radius at which to calculate the moment (can be Quantity)
phi= azimuth (rad unless deg=True; can be Quantity)
t= time at which to evaluate the DF (can be a list or ndarray; if this is the case, list needs to be in descending order and equally spaced) (can be Quantity)
surfacemass= if set use this pre-calculated surfacemass
nsigma - number of sigma to integrate the velocities over (based on an estimate, so be generous)
deg= azimuth is in degree (default=False); do not set this when giving phi as a Quantity
epsrel, epsabs - scipy.integrate keywords (the integration calculates the ratio of this vmoment to that of the initial DF)
grid= if set to True, build a grid and use that to evaluate integrals; if set to a grid-objects (such as returned by this procedure), use this grid
gridpoints= number of points to use for the grid in 1D (default=101)
returnGrid= if True, return the grid object (default=False)
hierarchgrid= if True, use a hierarchical grid (default=False)
nlevels= number of hierarchical levels for the hierarchical grid
integrate_method= orbit.integrate method argument
OUTPUT:
mean vT
HISTORY:
2011-03-31 - Written - Bovy (NYU)
"""
if isinstance(grid,evolveddiskdfGrid) or \
isinstance(grid,evolveddiskdfHierarchicalGrid):
grido= grid
vmomentT= self.vmomentsurfacemass(R,0,1,deg=deg,t=t,
nsigma=nsigma,phi=phi,
epsrel=epsrel,
epsabs=epsabs,grid=grid,
gridpoints=gridpoints,
returnGrid=False,
hierarchgrid=hierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method)
elif isinstance(grid,bool) and grid:
#Precalculate the grid
(vmomentT,grido)= self.vmomentsurfacemass(R,0,1,deg=deg,t=t,
nsigma=nsigma,phi=phi,
epsrel=epsrel,
epsabs=epsabs,grid=grid,
gridpoints=gridpoints,
returnGrid=True,
hierarchgrid=hierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method)
else:
grido= False
vmomentT= self.vmomentsurfacemass(R,0,1,deg=deg,t=t,
nsigma=nsigma,phi=phi,
epsrel=epsrel,
epsabs=epsabs,grid=grid,
gridpoints=gridpoints,
returnGrid=False,
hierarchgrid=hierarchgrid,
nlevels=nlevels,integrate_method=integrate_method)
if surfacemass is None:
surfacemass= self.vmomentsurfacemass(R,0,0,deg=deg,t=t,phi=phi,
nsigma=nsigma,epsrel=epsrel,
epsabs=epsabs,grid=grido,
gridpoints=gridpoints,
returnGrid=False,
hierarchgrid=hierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method)
out= vmomentT/surfacemass
if returnGrid and ((isinstance(grid,bool) and grid) or
isinstance(grid,evolveddiskdfGrid) or
isinstance(grid,evolveddiskdfHierarchicalGrid)):
return (out,grido)
else:
return out
@potential_physical_input
@physical_conversion('velocity2',pop=True)
def sigmaR2(self,R,t=0.,nsigma=None,deg=False,phi=0.,
epsrel=1.e-02,epsabs=1.e-05,
grid=None,gridpoints=101,returnGrid=False,
surfacemass=None,meanvR=None,
hierarchgrid=False,nlevels=2,
integrate_method='dopr54_c'):
"""
NAME:
sigmaR2
PURPOSE:
calculate the radial variance of the velocity distribution at (R,phi)
INPUT:
R - radius at which to calculate the moment (can be Quantity)
phi= azimuth (rad unless deg=True; can be Quantity)
t= time at which to evaluate the DF (can be a list or ndarray; if this is the case, list needs to be in descending order and equally spaced) (can be Quantity)
surfacemass, meanvR= if set use this pre-calculated surfacemass and mean vR
nsigma - number of sigma to integrate the velocities over (based on an estimate, so be generous)
deg= azimuth is in degree (default=False); do not set this when giving phi as a Quantity
epsrel, epsabs - scipy.integrate keywords (the integration calculates the ratio of this vmoment to that of the initial DF)
grid= if set to True, build a grid and use that to evaluate integrals; if set to a grid-objects (such as returned by this procedure), use this grid
gridpoints= number of points to use for the grid in 1D (default=101)
returnGrid= if True, return the grid object (default=False)
hierarchgrid= if True, use a hierarchical grid (default=False)
nlevels= number of hierarchical levels for the hierarchical grid
integrate_method= orbit.integrate method argument
OUTPUT:
variance of vR
HISTORY:
2011-03-31 - Written - Bovy (NYU)
"""
#The following aren't actually the moments, but they are the moments
#times the surface-mass density
if isinstance(grid,evolveddiskdfGrid) or \
isinstance(grid,evolveddiskdfHierarchicalGrid):
grido= grid
sigmaR2= self.vmomentsurfacemass(R,2,0,deg=deg,t=t,phi=phi,
nsigma=nsigma,
epsrel=epsrel,
epsabs=epsabs,grid=grido,
gridpoints=gridpoints,
returnGrid=False,
hierarchgrid=hierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method)
elif (meanvR is None or surfacemass is None ) \
and isinstance(grid,bool) and grid:
#Precalculate the grid
(sigmaR2,grido)= self.vmomentsurfacemass(R,2,0,deg=deg,t=t,
nsigma=nsigma,phi=phi,
epsrel=epsrel,
epsabs=epsabs,grid=grid,
gridpoints=gridpoints,
returnGrid=True,
hierarchgrid=hierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method)
else:
grido= False
sigmaR2= self.vmomentsurfacemass(R,2,0,deg=deg,t=t,
nsigma=nsigma,phi=phi,
epsrel=epsrel,
epsabs=epsabs,grid=grido,
gridpoints=gridpoints,
returnGrid=False,
hierarchgrid=hierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method)
if surfacemass is None:
surfacemass= self.vmomentsurfacemass(R,0,0,deg=deg,t=t,phi=phi,
nsigma=nsigma,epsrel=epsrel,
epsabs=epsabs,grid=grido,
gridpoints=gridpoints,
returnGrid=False,
hierarchgrid=hierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method)
if meanvR is None:
meanvR= self.vmomentsurfacemass(R,1,0,deg=deg,t=t,phi=phi,
nsigma=nsigma,epsrel=epsrel,
epsabs=epsabs,grid=grido,
gridpoints=gridpoints,
returnGrid=False,
hierarchgrid=hierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method)/surfacemass
out= sigmaR2/surfacemass-meanvR**2.
if returnGrid and ((isinstance(grid,bool) and grid) or
isinstance(grid,evolveddiskdfGrid) or
isinstance(grid,evolveddiskdfHierarchicalGrid)):
return (out,grido)
else:
return out
@potential_physical_input
@physical_conversion('velocity2',pop=True)
def sigmaT2(self,R,t=0.,nsigma=None,deg=False,phi=0.,
epsrel=1.e-02,epsabs=1.e-05,
grid=None,gridpoints=101,returnGrid=False,
surfacemass=None,meanvT=None,
hierarchgrid=False,nlevels=2,
integrate_method='dopr54_c'):
"""
NAME:
sigmaT2
PURPOSE:
calculate the tangential variance of the velocity distribution at (R,phi)
INPUT:
R - radius at which to calculate the moment (can be Quantity)
phi= azimuth (rad unless deg=True; can be Quantity)
t= time at which to evaluate the DF (can be a list or ndarray; if this is the case, list needs to be in descending order and equally spaced) (can be Quantity)
surfacemass, meanvT= if set use this pre-calculated surfacemass and mean tangential velocity
nsigma - number of sigma to integrate the velocities over (based on an estimate, so be generous)
deg= azimuth is in degree (default=False); do not set this when giving phi as a Quantity
epsrel, epsabs - scipy.integrate keywords (the integration calculates the ratio of this vmoment to that of the initial DF)
grid= if set to True, build a grid and use that to evaluate integrals; if set to a grid-objects (such as returned by this procedure), use this grid
gridpoints= number of points to use for the grid in 1D (default=101)
returnGrid= if True, return the grid object (default=False)
hierarchgrid= if True, use a hierarchical grid (default=False)
nlevels= number of hierarchical levels for the hierarchical grid
integrate_method= orbit.integrate method argument
OUTPUT:
variance of vT
HISTORY:
2011-03-31 - Written - Bovy (NYU)
"""
if isinstance(grid,evolveddiskdfGrid) or \
isinstance(grid,evolveddiskdfHierarchicalGrid):
grido= grid
sigmaT2= self.vmomentsurfacemass(R,0,2,deg=deg,t=t,phi=phi,
nsigma=nsigma,
epsrel=epsrel,
epsabs=epsabs,grid=grido,
gridpoints=gridpoints,
returnGrid=False,
hierarchgrid=hierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method)
elif (meanvT is None or surfacemass is None ) \
and isinstance(grid,bool) and grid:
#Precalculate the grid
(sigmaT2,grido)= self.vmomentsurfacemass(R,0,2,deg=deg,t=t,
nsigma=nsigma,phi=phi,
epsrel=epsrel,
epsabs=epsabs,grid=grid,
gridpoints=gridpoints,
returnGrid=True,
hierarchgrid=hierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method)
else:
grido= False
sigmaT2= self.vmomentsurfacemass(R,0,2,deg=deg,t=t,
nsigma=nsigma,phi=phi,
epsrel=epsrel,
epsabs=epsabs,grid=grido,
gridpoints=gridpoints,
returnGrid=False,
hierarchgrid=hierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method)
if surfacemass is None:
surfacemass= self.vmomentsurfacemass(R,0,0,deg=deg,t=t,phi=phi,
nsigma=nsigma,epsrel=epsrel,
epsabs=epsabs,grid=grido,
gridpoints=gridpoints,
returnGrid=False,
hierarchgrid=hierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method)
if meanvT is None:
meanvT= self.vmomentsurfacemass(R,0,1,deg=deg,t=t,phi=phi,
nsigma=nsigma,epsrel=epsrel,
epsabs=epsabs,grid=grido,
gridpoints=gridpoints,
returnGrid=False,
hierarchgrid=hierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method)/surfacemass
out= sigmaT2/surfacemass-meanvT**2.
if returnGrid and ((isinstance(grid,bool) and grid) or
isinstance(grid,evolveddiskdfGrid) or
isinstance(grid,evolveddiskdfHierarchicalGrid)):
return (out,grido)
else:
return out
@potential_physical_input
@physical_conversion('velocity2',pop=True)
def sigmaRT(self,R,t=0.,nsigma=None,deg=False,
epsrel=1.e-02,epsabs=1.e-05,phi=0.,
grid=None,gridpoints=101,returnGrid=False,
surfacemass=None,meanvR=None,meanvT=None,
hierarchgrid=False,nlevels=2,
integrate_method='dopr54_c'):
"""
NAME:
sigmaRT
PURPOSE:
calculate the radial-tangential co-variance of the velocity distribution at (R,phi)
INPUT:
R - radius at which to calculate the moment (can be Quantity)
phi= azimuth (rad unless deg=True; can be Quantity)
t= time at which to evaluate the DF (can be a list or ndarray; if this is the case, list needs to be in descending order and equally spaced) (can be Quantity)
surfacemass, meanvR, meavT= if set use this pre-calculated surfacemass and mean vR and vT
nsigma - number of sigma to integrate the velocities over (based on an estimate, so be generous)
deg= azimuth is in degree (default=False); do not set this when giving phi as a Quantity
epsrel, epsabs - scipy.integrate keywords (the integration calculates the ration of this vmoment to that of the initial DF)
grid= if set to True, build a grid and use that to evaluate integrals; if set to a grid-objects (such as returned by this procedure), use this grid
gridpoints= number of points to use for the grid in 1D (default=101)
returnGrid= if True, return the grid object (default=False)
hierarchgrid= if True, use a hierarchical grid (default=False)
nlevels= number of hierarchical levels for the hierarchical grid
integrate_method= orbit.integrate method argument
OUTPUT:
covariance of vR and vT
HISTORY:
2011-03-31 - Written - Bovy (NYU)
"""
#The following aren't actually the moments, but they are the moments
#times the surface-mass density
if isinstance(grid,evolveddiskdfGrid) or \
isinstance(grid,evolveddiskdfHierarchicalGrid):
grido= grid
sigmaRT= self.vmomentsurfacemass(R,1,1,deg=deg,t=t,phi=phi,
nsigma=nsigma,
epsrel=epsrel,
epsabs=epsabs,grid=grido,
gridpoints=gridpoints,
returnGrid=False,
hierarchgrid=hierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method)
elif (meanvR is None or surfacemass is None ) \
and isinstance(grid,bool) and grid:
#Precalculate the grid
(sigmaRT,grido)= self.vmomentsurfacemass(R,1,1,deg=deg,t=t,
nsigma=nsigma,phi=phi,
epsrel=epsrel,
epsabs=epsabs,grid=grid,
gridpoints=gridpoints,
returnGrid=True,
hierarchgrid=hierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method)
else:
grido= False
sigmaRT= self.vmomentsurfacemass(R,1,1,deg=deg,t=t,
nsigma=nsigma,phi=phi,
epsrel=epsrel,
epsabs=epsabs,grid=grido,
gridpoints=gridpoints,
returnGrid=False,
hierarchgrid=hierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method)
if surfacemass is None:
surfacemass= self.vmomentsurfacemass(R,0,0,deg=deg,t=t,phi=phi,
nsigma=nsigma,epsrel=epsrel,
epsabs=epsabs,grid=grido,
gridpoints=gridpoints,
returnGrid=False,
hierarchgrid=hierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method)
if meanvR is None:
meanvR= self.vmomentsurfacemass(R,1,0,deg=deg,t=t,phi=phi,
nsigma=nsigma,epsrel=epsrel,
epsabs=epsabs,grid=grido,
gridpoints=gridpoints,
returnGrid=False,
hierarchgrid=hierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method)/surfacemass
if meanvT is None:
meanvT= self.vmomentsurfacemass(R,0,1,deg=deg,t=t,phi=phi,
nsigma=nsigma,epsrel=epsrel,
epsabs=epsabs,grid=grido,
gridpoints=gridpoints,
returnGrid=False,
hierarchgrid=hierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method)/surfacemass
out= sigmaRT/surfacemass-meanvR*meanvT
if returnGrid and ((isinstance(grid,bool) and grid) or
isinstance(grid,evolveddiskdfGrid) or
isinstance(grid,evolveddiskdfHierarchicalGrid)):
return (out,grido)
else:
return out
@potential_physical_input
@physical_conversion('frequency-kmskpc',pop=True)
def oortA(self,R,t=0.,nsigma=None,deg=False,phi=0.,
epsrel=1.e-02,epsabs=1.e-05,
grid=None,gridpoints=101,returnGrids=False,
derivRGrid=None,derivphiGrid=None,derivGridpoints=101,
derivHierarchgrid=False,
hierarchgrid=False,nlevels=2,integrate_method='dopr54_c'):
"""
NAME:
oortA
PURPOSE:
calculate the Oort function A at (R,phi,t)
INPUT:
R - radius at which to calculate A (can be Quantity)
phi= azimuth (rad unless deg=True; can be Quantity
t= time at which to evaluate the DF (can be a list or ndarray; if this is the case, list needs to be in descending order and equally spaced) (can be Quantity)
nsigma - number of sigma to integrate the velocities over (based on an estimate, so be generous)
deg= azimuth is in degree (default=False); do not set this when giving phi as a Quantity
epsrel, epsabs - scipy.integrate keywords
grid= if set to True, build a grid and use that to evaluate integrals; if set to a grid-objects (such as returned by this procedure), use this grid
derivRGrid, derivphiGrid= if set to True, build a grid and use that to evaluate integrals of the derivatives of the DF;if set to a grid-objects (such as returned by this procedure), use this grid
gridpoints= number of points to use for the grid in 1D (default=101)
derivGridpoints= number of points to use for the grid in 1D (default=101)
returnGrid= if True, return the grid objects (default=False)
hierarchgrid= if True, use a hierarchical grid (default=False)
derivHierarchgrid= if True, use a hierarchical grid (default=False)
nlevels= number of hierarchical levels for the hierarchical grid
integrate_method= orbit.integrate method argument
OUTPUT:
Oort A at R,phi,t
HISTORY:
2011-10-16 - Written - Bovy (NYU)
"""
#First calculate the grids if they are not given
if isinstance(grid,bool) and grid:
(surfacemass,grid)= self.vmomentsurfacemass(R,0,0,deg=deg,t=t,phi=phi,
nsigma=nsigma,epsrel=epsrel,
epsabs=epsabs,grid=True,
gridpoints=gridpoints,
returnGrid=True,
hierarchgrid=hierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method)
elif isinstance(grid,evolveddiskdfGrid) or \
isinstance(grid,evolveddiskdfHierarchicalGrid):
surfacemass= self.vmomentsurfacemass(R,0,0,deg=deg,t=t,phi=phi,
nsigma=nsigma,epsrel=epsrel,
epsabs=epsabs,grid=grid,
gridpoints=gridpoints,
returnGrid=False,
hierarchgrid=hierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method)
if isinstance(derivRGrid,bool) and derivRGrid:
(dsurfacemassdR,derivRGrid)= self.vmomentsurfacemass(R,0,0,deg=deg,t=t,phi=phi,
nsigma=nsigma,epsrel=epsrel,
epsabs=epsabs,grid=True,
gridpoints=derivGridpoints,
returnGrid=True,
hierarchgrid=derivHierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method,deriv='R')
elif isinstance(derivRGrid,evolveddiskdfGrid) or \
isinstance(derivRGrid,evolveddiskdfHierarchicalGrid):
dsurfacemassdR= self.vmomentsurfacemass(R,0,0,deg=deg,t=t,phi=phi,
nsigma=nsigma,epsrel=epsrel,
epsabs=epsabs,grid=derivRGrid,
gridpoints=derivGridpoints,
returnGrid=False,
hierarchgrid=derivHierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method,deriv='R')
if isinstance(derivphiGrid,bool) and derivphiGrid:
(dsurfacemassdphi,derivphiGrid)= self.vmomentsurfacemass(R,0,0,deg=deg,t=t,phi=phi,
nsigma=nsigma,epsrel=epsrel,
epsabs=epsabs,grid=True,
gridpoints=derivGridpoints,
returnGrid=True,
hierarchgrid=derivHierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method,deriv='phi')
elif isinstance(derivphiGrid,evolveddiskdfGrid) or \
isinstance(derivphiGrid,evolveddiskdfHierarchicalGrid):
dsurfacemassdphi= self.vmomentsurfacemass(R,0,0,deg=deg,t=t,phi=phi,
nsigma=nsigma,epsrel=epsrel,
epsabs=epsabs,grid=derivphiGrid,
gridpoints=derivGridpoints,
returnGrid=False,
hierarchgrid=derivHierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method,deriv='phi')
#2A= meanvT/R-dmeanvR/R/dphi-dmeanvphi/dR
#meanvT
meanvT= self.meanvT(R,t=t,nsigma=nsigma,deg=deg,phi=phi,
epsrel=epsrel,epsabs=epsabs,
grid=grid,gridpoints=gridpoints,returnGrid=False,
surfacemass=surfacemass,
hierarchgrid=hierarchgrid,
nlevels=nlevels,integrate_method=integrate_method,
use_physical=False)
dmeanvRdphi= (self.vmomentsurfacemass(R,1,0,deg=deg,t=t,phi=phi,
nsigma=nsigma,epsrel=epsrel,
epsabs=epsabs,grid=derivphiGrid,
gridpoints=derivGridpoints,
returnGrid=False,
hierarchgrid=derivHierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method,deriv='phi')
/surfacemass
-self.vmomentsurfacemass(R,1,0,deg=deg,t=t,phi=phi,
nsigma=nsigma,epsrel=epsrel,
epsabs=epsabs,grid=grid,
gridpoints=gridpoints,
returnGrid=False,
hierarchgrid=hierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method)
/surfacemass**2.*dsurfacemassdphi)
dmeanvTdR= (self.vmomentsurfacemass(R,0,1,deg=deg,t=t,phi=phi,
nsigma=nsigma,epsrel=epsrel,
epsabs=epsabs,grid=derivRGrid,
gridpoints=derivGridpoints,
returnGrid=False,
hierarchgrid=derivHierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method,deriv='R')
/surfacemass
-self.vmomentsurfacemass(R,0,1,deg=deg,t=t,phi=phi,
nsigma=nsigma,epsrel=epsrel,
epsabs=epsabs,grid=grid,
gridpoints=gridpoints,
returnGrid=False,
hierarchgrid=hierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method)
/surfacemass**2.*dsurfacemassdR)
if returnGrids:
return (0.5*(meanvT/R-dmeanvRdphi/R-dmeanvTdR),grid,
derivRGrid,derivphiGrid)
else:
return 0.5*(meanvT/R-dmeanvRdphi/R-dmeanvTdR)
@potential_physical_input
@physical_conversion('frequency-kmskpc',pop=True)
def oortB(self,R,t=0.,nsigma=None,deg=False,phi=0.,
epsrel=1.e-02,epsabs=1.e-05,
grid=None,gridpoints=101,returnGrids=False,
derivRGrid=None,derivphiGrid=None,derivGridpoints=101,
derivHierarchgrid=False,
hierarchgrid=False,nlevels=2,integrate_method='dopr54_c'):
"""
NAME:
oortB
PURPOSE:
calculate the Oort function B at (R,phi,t)
INPUT:
R - radius at which to calculate B (can be Quantity)
phi= azimuth (rad unless deg=True; can be Quantity)
t= time at which to evaluate the DF (can be a list or ndarray; if this is the case, list needs to be in descending order and equally spaced) (can be Quantity)
nsigma - number of sigma to integrate the velocities over (based on an estimate, so be generous)
deg= azimuth is in degree (default=False); do not set this when giving phi as a Quantity
epsrel, epsabs - scipy.integrate keywords
grid= if set to True, build a grid and use that to evaluate integrals; if set to a grid-objects (such as returned by this procedure), use this grid
derivRGrid, derivphiGrid= if set to True, build a grid and use that to evaluat integrals of the derivatives of the DF: if set to a grid-objects (such as returned by this procedure), use this grid
gridpoints= number of points to use for the grid in 1D (default=101)
derivGridpoints= number of points to use for the grid in 1D (default=101)
returnGrid= if True, return the grid objects (default=False)
hierarchgrid= if True, use a hierarchical grid (default=False)
derivHierarchgrid= if True, use a hierarchical grid (default=False)
nlevels= number of hierarchical levels for the hierarchical grid
integrate_method= orbit.integrate method argument
OUTPUT:
Oort B at R,phi,t
HISTORY:
2011-10-16 - Written - Bovy (NYU)
"""
#First calculate the grids if they are not given
if isinstance(grid,bool) and grid:
(surfacemass,grid)= self.vmomentsurfacemass(R,0,0,deg=deg,t=t,phi=phi,
nsigma=nsigma,epsrel=epsrel,
epsabs=epsabs,grid=True,
gridpoints=gridpoints,
returnGrid=True,
hierarchgrid=hierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method)
elif isinstance(grid,evolveddiskdfGrid) or \
isinstance(grid,evolveddiskdfHierarchicalGrid):
surfacemass= self.vmomentsurfacemass(R,0,0,deg=deg,t=t,phi=phi,
nsigma=nsigma,epsrel=epsrel,
epsabs=epsabs,grid=grid,
gridpoints=gridpoints,
returnGrid=False,
hierarchgrid=hierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method)
if isinstance(derivRGrid,bool) and derivRGrid:
(dsurfacemassdR,derivRGrid)= self.vmomentsurfacemass(R,0,0,deg=deg,t=t,phi=phi,
nsigma=nsigma,epsrel=epsrel,
epsabs=epsabs,grid=True,
gridpoints=derivGridpoints,
returnGrid=True,
hierarchgrid=derivHierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method,deriv='R')
elif isinstance(derivRGrid,evolveddiskdfGrid) or \
isinstance(derivRGrid,evolveddiskdfHierarchicalGrid):
dsurfacemassdR= self.vmomentsurfacemass(R,0,0,deg=deg,t=t,phi=phi,
nsigma=nsigma,epsrel=epsrel,
epsabs=epsabs,grid=derivRGrid,
gridpoints=derivGridpoints,
returnGrid=False,
hierarchgrid=derivHierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method,deriv='R')
if isinstance(derivphiGrid,bool) and derivphiGrid:
(dsurfacemassdphi,derivphiGrid)= self.vmomentsurfacemass(R,0,0,deg=deg,t=t,phi=phi,
nsigma=nsigma,epsrel=epsrel,
epsabs=epsabs,grid=True,
gridpoints=derivGridpoints,
returnGrid=True,
hierarchgrid=derivHierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method,deriv='phi')
elif isinstance(derivphiGrid,evolveddiskdfGrid) or \
isinstance(derivphiGrid,evolveddiskdfHierarchicalGrid):
dsurfacemassdphi= self.vmomentsurfacemass(R,0,0,deg=deg,t=t,phi=phi,
nsigma=nsigma,epsrel=epsrel,
epsabs=epsabs,grid=derivphiGrid,
gridpoints=derivGridpoints,
returnGrid=False,
hierarchgrid=derivHierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method,deriv='phi')
#2B= -meanvT/R+dmeanvR/R/dphi-dmeanvphi/dR
#meanvT
meanvT= self.meanvT(R,t=t,nsigma=nsigma,deg=deg,phi=phi,
epsrel=epsrel,epsabs=epsabs,
grid=grid,gridpoints=gridpoints,returnGrid=False,
surfacemass=surfacemass,
hierarchgrid=hierarchgrid,
nlevels=nlevels,integrate_method=integrate_method,
use_physical=False)
dmeanvRdphi= (self.vmomentsurfacemass(R,1,0,deg=deg,t=t,phi=phi,
nsigma=nsigma,epsrel=epsrel,
epsabs=epsabs,grid=derivphiGrid,
gridpoints=derivGridpoints,
returnGrid=False,
hierarchgrid=derivHierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method,deriv='phi')
/surfacemass
-self.vmomentsurfacemass(R,1,0,deg=deg,t=t,phi=phi,
nsigma=nsigma,epsrel=epsrel,
epsabs=epsabs,grid=grid,
gridpoints=gridpoints,
returnGrid=False,
hierarchgrid=hierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method)
/surfacemass**2.*dsurfacemassdphi)
dmeanvTdR= (self.vmomentsurfacemass(R,0,1,deg=deg,t=t,phi=phi,
nsigma=nsigma,epsrel=epsrel,
epsabs=epsabs,grid=derivRGrid,
gridpoints=derivGridpoints,
returnGrid=False,
hierarchgrid=derivHierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method,deriv='R')
/surfacemass
-self.vmomentsurfacemass(R,0,1,deg=deg,t=t,phi=phi,
nsigma=nsigma,epsrel=epsrel,
epsabs=epsabs,grid=grid,
gridpoints=gridpoints,
returnGrid=False,
hierarchgrid=hierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method)
/surfacemass**2.*dsurfacemassdR)
if returnGrids:
return (0.5*(-meanvT/R+dmeanvRdphi/R-dmeanvTdR),grid,
derivRGrid,derivphiGrid)
else:
return 0.5*(-meanvT/R+dmeanvRdphi/R-dmeanvTdR)
@potential_physical_input
@physical_conversion('frequency-kmskpc',pop=True)
def oortC(self,R,t=0.,nsigma=None,deg=False,phi=0.,
epsrel=1.e-02,epsabs=1.e-05,
grid=None,gridpoints=101,returnGrids=False,
derivRGrid=None,derivphiGrid=None,derivGridpoints=101,
derivHierarchgrid=False,
hierarchgrid=False,nlevels=2,integrate_method='dopr54_c'):
"""
NAME:
oortC
PURPOSE:
calculate the Oort function C at (R,phi,t)
INPUT:
R - radius at which to calculate C (can be Quantity)
phi= azimuth (rad unless deg=True; can be Quantity)
t= time at which to evaluate the DF (can be a list or ndarray; if this is the case, list needs to be in descending order and equally spaced) (can be Quantity)
nsigma - number of sigma to integrate the velocities over (based on an estimate, so be generous)
deg= azimuth is in degree (default=False); do not set this when giving phi as a Quantity
epsrel, epsabs - scipy.integrate keywords
grid= if set to True, build a grid and use that to evaluate integrals; if set to a grid-objects (such as returned by this procedure), use this grid
derivRGrid, derivphiGrid= if set to True, build a grid and use that to evaluate integrals of the derivatives of the DF; if set to a grid-objects (such as returned by this procedure), use this grid
gridpoints= number of points to use for the grid in 1D (default=101)
derivGridpoints= number of points to use for the grid in 1D (default=101)
returnGrid= if True, return the grid objects (default=False)
hierarchgrid= if True, use a hierarchical grid (default=False)
derivHierarchgrid= if True, use a hierarchical grid (default=False)
nlevels= number of hierarchical levels for the hierarchical grid
integrate_method= orbit.integrate method argument
OUTPUT:
Oort C at R,phi,t
HISTORY:
2011-10-16 - Written - Bovy (NYU)
"""
#First calculate the grids if they are not given
if isinstance(grid,bool) and grid:
(surfacemass,grid)= self.vmomentsurfacemass(R,0,0,deg=deg,t=t,phi=phi,
nsigma=nsigma,epsrel=epsrel,
epsabs=epsabs,grid=True,
gridpoints=gridpoints,
returnGrid=True,
hierarchgrid=hierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method)
elif isinstance(grid,evolveddiskdfGrid) or \
isinstance(grid,evolveddiskdfHierarchicalGrid):
surfacemass= self.vmomentsurfacemass(R,0,0,deg=deg,t=t,phi=phi,
nsigma=nsigma,epsrel=epsrel,
epsabs=epsabs,grid=grid,
gridpoints=gridpoints,
returnGrid=False,
hierarchgrid=hierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method)
if isinstance(derivRGrid,bool) and derivRGrid:
(dsurfacemassdR,derivRGrid)= self.vmomentsurfacemass(R,0,0,deg=deg,t=t,phi=phi,
nsigma=nsigma,epsrel=epsrel,
epsabs=epsabs,grid=True,
gridpoints=derivGridpoints,
returnGrid=True,
hierarchgrid=derivHierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method,deriv='R')
elif isinstance(derivRGrid,evolveddiskdfGrid) or \
isinstance(derivRGrid,evolveddiskdfHierarchicalGrid):
dsurfacemassdR= self.vmomentsurfacemass(R,0,0,deg=deg,t=t,phi=phi,
nsigma=nsigma,epsrel=epsrel,
epsabs=epsabs,grid=derivRGrid,
gridpoints=derivGridpoints,
returnGrid=False,
hierarchgrid=derivHierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method,deriv='R')
if isinstance(derivphiGrid,bool) and derivphiGrid:
(dsurfacemassdphi,derivphiGrid)= self.vmomentsurfacemass(R,0,0,deg=deg,t=t,phi=phi,
nsigma=nsigma,epsrel=epsrel,
epsabs=epsabs,grid=True,
gridpoints=derivGridpoints,
returnGrid=True,
hierarchgrid=derivHierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method,deriv='phi')
elif isinstance(derivphiGrid,evolveddiskdfGrid) or \
isinstance(derivphiGrid,evolveddiskdfHierarchicalGrid):
dsurfacemassdphi= self.vmomentsurfacemass(R,0,0,deg=deg,t=t,phi=phi,
nsigma=nsigma,epsrel=epsrel,
epsabs=epsabs,grid=derivphiGrid,
gridpoints=derivGridpoints,
returnGrid=False,
hierarchgrid=derivHierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method,deriv='phi')
#2C= -meanvR/R-dmeanvT/R/dphi+dmeanvR/dR
#meanvR
meanvR= self.meanvR(R,t=t,nsigma=nsigma,deg=deg,phi=phi,
epsrel=epsrel,epsabs=epsabs,
grid=grid,gridpoints=gridpoints,returnGrid=False,
surfacemass=surfacemass,
hierarchgrid=hierarchgrid,
nlevels=nlevels,integrate_method=integrate_method,
use_physical=False)
dmeanvTdphi= (self.vmomentsurfacemass(R,0,1,deg=deg,t=t,phi=phi,
nsigma=nsigma,epsrel=epsrel,
epsabs=epsabs,grid=derivphiGrid,
gridpoints=derivGridpoints,
returnGrid=False,
hierarchgrid=derivHierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method,deriv='phi')
/surfacemass
-self.vmomentsurfacemass(R,0,1,deg=deg,t=t,phi=phi,
nsigma=nsigma,epsrel=epsrel,
epsabs=epsabs,grid=grid,
gridpoints=gridpoints,
returnGrid=False,
hierarchgrid=hierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method)
/surfacemass**2.*dsurfacemassdphi)
dmeanvRdR= (self.vmomentsurfacemass(R,1,0,deg=deg,t=t,phi=phi,
nsigma=nsigma,epsrel=epsrel,
epsabs=epsabs,grid=derivRGrid,
gridpoints=derivGridpoints,
returnGrid=False,
hierarchgrid=derivHierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method,deriv='R')
/surfacemass
-self.vmomentsurfacemass(R,1,0,deg=deg,t=t,phi=phi,
nsigma=nsigma,epsrel=epsrel,
epsabs=epsabs,grid=grid,
gridpoints=gridpoints,
returnGrid=False,
hierarchgrid=hierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method)
/surfacemass**2.*dsurfacemassdR)
if returnGrids:
return (0.5*(-meanvR/R-dmeanvTdphi/R+dmeanvRdR),grid,
derivRGrid,derivphiGrid)
else:
return 0.5*(-meanvR/R-dmeanvTdphi/R+dmeanvRdR)
@potential_physical_input
@physical_conversion('frequency-kmskpc',pop=True)
def oortK(self,R,t=0.,nsigma=None,deg=False,phi=0.,
epsrel=1.e-02,epsabs=1.e-05,
grid=None,gridpoints=101,returnGrids=False,
derivRGrid=None,derivphiGrid=None,derivGridpoints=101,
derivHierarchgrid=False,
hierarchgrid=False,nlevels=2,integrate_method='dopr54_c'):
"""
NAME:
oortK
PURPOSE:
calculate the Oort function K at (R,phi,t)
INPUT:
R - radius at which to calculate K (can be Quantity)
phi= azimuth (rad unless deg=True; can be Quantity)
t= time at which to evaluate the DF (can be a list or ndarray; if this is the case, list needs to be in descending order and equally spaced) (can be Quantity)
nsigma - number of sigma to integrate the velocities over (based on an estimate, so be generous)
deg= azimuth is in degree (default=False); do not set this when giving phi as a Quantity
epsrel, epsabs - scipy.integrate keywords
grid= if set to True, build a grid and use that to evaluate integrals; if set to a grid-objects (such as returned by this procedure), use this grid
derivRGrid, derivphiGrid= if set to True, build a grid and use that to evaluate integrals of the derivatives of the DF; if set to a grid-objects (such as returned by this procedure), use this grid
gridpoints= number of points to use for the grid in 1D (default=101)
derivGridpoints= number of points to use for the grid in 1D (default=101)
returnGrid= if True, return the grid objects (default=False)
hierarchgrid= if True, use a hierarchical grid (default=False)
derivHierarchgrid= if True, use a hierarchical grid (default=False)
nlevels= number of hierarchical levels for the hierarchical grid
integrate_method= orbit.integrate method argument
OUTPUT:
Oort K at R,phi,t
HISTORY:
2011-10-16 - Written - Bovy (NYU)
"""
#First calculate the grids if they are not given
if isinstance(grid,bool) and grid:
(surfacemass,grid)= self.vmomentsurfacemass(R,0,0,deg=deg,t=t,phi=phi,
nsigma=nsigma,epsrel=epsrel,
epsabs=epsabs,grid=True,
gridpoints=gridpoints,
returnGrid=True,
hierarchgrid=hierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method)
elif isinstance(grid,evolveddiskdfGrid) or \
isinstance(grid,evolveddiskdfHierarchicalGrid):
surfacemass= self.vmomentsurfacemass(R,0,0,deg=deg,t=t,phi=phi,
nsigma=nsigma,epsrel=epsrel,
epsabs=epsabs,grid=grid,
gridpoints=gridpoints,
returnGrid=False,
hierarchgrid=hierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method)
if isinstance(derivRGrid,bool) and derivRGrid:
(dsurfacemassdR,derivRGrid)= self.vmomentsurfacemass(R,0,0,deg=deg,t=t,phi=phi,
nsigma=nsigma,epsrel=epsrel,
epsabs=epsabs,grid=True,
gridpoints=derivGridpoints,
returnGrid=True,
hierarchgrid=derivHierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method,deriv='R')
elif isinstance(derivRGrid,evolveddiskdfGrid) or \
isinstance(derivRGrid,evolveddiskdfHierarchicalGrid):
dsurfacemassdR= self.vmomentsurfacemass(R,0,0,deg=deg,t=t,phi=phi,
nsigma=nsigma,epsrel=epsrel,
epsabs=epsabs,grid=derivRGrid,
gridpoints=derivGridpoints,
returnGrid=False,
hierarchgrid=derivHierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method,deriv='R')
if isinstance(derivphiGrid,bool) and derivphiGrid:
(dsurfacemassdphi,derivphiGrid)= self.vmomentsurfacemass(R,0,0,deg=deg,t=t,phi=phi,
nsigma=nsigma,epsrel=epsrel,
epsabs=epsabs,grid=True,
gridpoints=derivGridpoints,
returnGrid=True,
hierarchgrid=derivHierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method,deriv='phi')
elif isinstance(derivphiGrid,evolveddiskdfGrid) or \
isinstance(derivphiGrid,evolveddiskdfHierarchicalGrid):
dsurfacemassdphi= self.vmomentsurfacemass(R,0,0,deg=deg,t=t,phi=phi,
nsigma=nsigma,epsrel=epsrel,
epsabs=epsabs,grid=derivphiGrid,
gridpoints=derivGridpoints,
returnGrid=False,
hierarchgrid=derivHierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method,deriv='phi')
#2K= meanvR/R+dmeanvT/R/dphi+dmeanvR/dR
#meanvR
meanvR= self.meanvR(R,t=t,nsigma=nsigma,deg=deg,phi=phi,
epsrel=epsrel,epsabs=epsabs,
grid=grid,gridpoints=gridpoints,returnGrid=False,
surfacemass=surfacemass,
hierarchgrid=hierarchgrid,
nlevels=nlevels,integrate_method=integrate_method,
use_physical=False)
dmeanvTdphi= (self.vmomentsurfacemass(R,0,1,deg=deg,t=t,phi=phi,
nsigma=nsigma,epsrel=epsrel,
epsabs=epsabs,grid=derivphiGrid,
gridpoints=derivGridpoints,
returnGrid=False,
hierarchgrid=derivHierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method,deriv='phi')
/surfacemass
-self.vmomentsurfacemass(R,0,1,deg=deg,t=t,phi=phi,
nsigma=nsigma,epsrel=epsrel,
epsabs=epsabs,grid=grid,
gridpoints=gridpoints,
returnGrid=False,
hierarchgrid=hierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method)
/surfacemass**2.*dsurfacemassdphi)
dmeanvRdR= (self.vmomentsurfacemass(R,1,0,deg=deg,t=t,phi=phi,
nsigma=nsigma,epsrel=epsrel,
epsabs=epsabs,grid=derivRGrid,
gridpoints=derivGridpoints,
returnGrid=False,
hierarchgrid=derivHierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method,deriv='R')
/surfacemass
-self.vmomentsurfacemass(R,1,0,deg=deg,t=t,phi=phi,
nsigma=nsigma,epsrel=epsrel,
epsabs=epsabs,grid=grid,
gridpoints=gridpoints,
returnGrid=False,
hierarchgrid=hierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method)
/surfacemass**2.*dsurfacemassdR)
if returnGrids:
return (0.5*(meanvR/R+dmeanvTdphi/R+dmeanvRdR),grid,
derivRGrid,derivphiGrid)
else:
return 0.5*(meanvR/R+dmeanvTdphi/R+dmeanvRdR)
def _vmomentsurfacemassGrid(self,n,m,grid):
"""Internal function to evaluate vmomentsurfacemass using a grid
rather than direct integration"""
if len(grid.df.shape) == 3: tlist= True
else: tlist= False
if tlist:
nt= grid.df.shape[2]
out= []
for ii in range(nt):
out.append(nu.dot(grid.vRgrid**n,nu.dot(grid.df[:,:,ii],grid.vTgrid**m))*\
(grid.vRgrid[1]-grid.vRgrid[0])*(grid.vTgrid[1]-grid.vTgrid[0]))
return nu.array(out)
else:
return nu.dot(grid.vRgrid**n,nu.dot(grid.df,grid.vTgrid**m))*\
(grid.vRgrid[1]-grid.vRgrid[0])*(grid.vTgrid[1]-grid.vTgrid[0])
def _buildvgrid(self,R,phi,nsigma,t,sigmaR1,sigmaT1,meanvR,meanvT,
gridpoints,print_progress,integrate_method,deriv):
"""Internal function to grid the vDF at a given location"""
out= evolveddiskdfGrid()
out.sigmaR1= sigmaR1
out.sigmaT1= sigmaT1
out.meanvR= meanvR
out.meanvT= meanvT
out.vRgrid= nu.linspace(meanvR-nsigma*sigmaR1,meanvR+nsigma*sigmaR1,
gridpoints)
out.vTgrid= nu.linspace(meanvT-nsigma*sigmaT1,meanvT+nsigma*sigmaT1,
gridpoints)
if isinstance(t,(list,nu.ndarray)):
nt= len(t)
out.df= nu.zeros((gridpoints,gridpoints,nt))
for ii in range(gridpoints):
for jj in range(gridpoints-1,-1,-1):#Reverse, so we get the peak before we get to the extreme lags NOT NECESSARY
if print_progress: #pragma: no cover
sys.stdout.write('\r'+"Velocity gridpoint %i out of %i" % \
(jj+ii*gridpoints+1,gridpoints*gridpoints))
sys.stdout.flush()
thiso= Orbit([R,out.vRgrid[ii],out.vTgrid[jj],phi])
out.df[ii,jj,:]= self(thiso,nu.array(t).flatten(),
integrate_method=integrate_method,
deriv=deriv,use_physical=False)
out.df[ii,jj,nu.isnan(out.df[ii,jj,:])]= 0. #BOVY: for now
if print_progress: sys.stdout.write('\n') #pragma: no cover
else:
out.df= nu.zeros((gridpoints,gridpoints))
for ii in range(gridpoints):
for jj in range(gridpoints):
if print_progress: #pragma: no cover
sys.stdout.write('\r'+"Velocity gridpoint %i out of %i" % \
(jj+ii*gridpoints+1,gridpoints*gridpoints))
sys.stdout.flush()
thiso= Orbit([R,out.vRgrid[ii],out.vTgrid[jj],phi])
out.df[ii,jj]= self(thiso,t,
integrate_method=integrate_method,
deriv=deriv,use_physical=False)
if nu.isnan(out.df[ii,jj]): out.df[ii,jj]= 0. #BOVY: for now
if print_progress: sys.stdout.write('\n') #pragma: no cover
return out
def _create_ts_tlist(self,t,integrate_method):
#Check input
if not all(t == sorted(t,reverse=True)): raise IOError("List of times has to be sorted in descending order")
#Initialize
if integrate_method == 'odeint':
_NTS= 1000
tmax= nu.amax(t)
ts= nu.linspace(tmax,self._to,_NTS)
#Add other t
ts= list(ts)
ts.extend([self._to+tmax-ti for ti in t if ti != tmax])
else:
if len(t) == 1: #Special case this because it is confusing
ts= nu.array([t[0],self._to])
else:
ts= -t+self._to+nu.amax(t)
#sort
ts= list(ts)
ts.sort(reverse=True)
return nu.array(ts)
def _call_marginalizevperp(self,o,integrate_method='dopr54_c',**kwargs):
"""Call the DF, marginalizing over perpendicular velocity"""
#Get d, l, vlos
l= o.ll(obs=[1.,0.,0.],ro=1.)*_DEGTORAD
vlos= o.vlos(ro=1.,vo=1.,obs=[1.,0.,0.,0.,0.,0.])
R= o.R(use_physical=False)
phi= o.phi(use_physical=False)
#Get local circular velocity, projected onto the los
if isinstance(self._pot,list):
vcirc= calcRotcurve([p for p in self._pot if not p.isNonAxi],R)[0]
else:
vcirc= calcRotcurve(self._pot,R)[0]
vcirclos= vcirc*math.sin(phi+l)
#Marginalize
alphalos= phi+l
if not 'nsigma' in kwargs or ('nsigma' in kwargs and \
kwargs['nsigma'] is None):
nsigma= _NSIGMA
else:
nsigma= kwargs['nsigma']
kwargs.pop('nsigma',None)
#BOVY: add asymmetric drift here?
if math.fabs(math.sin(alphalos)) < math.sqrt(1./2.):
sigmaR1= nu.sqrt(self._initdf.sigmaT2(R,phi=phi,
use_physical=False)) #Slight abuse
cosalphalos= math.cos(alphalos)
tanalphalos= math.tan(alphalos)
return integrate.quad(_marginalizeVperpIntegrandSinAlphaSmall,
-nsigma,nsigma,
args=(self,R,cosalphalos,tanalphalos,
vlos-vcirclos,vcirc,
sigmaR1,phi),
**kwargs)[0]/math.fabs(cosalphalos)*sigmaR1
else:
sigmaR1= nu.sqrt(self._initdf.sigmaR2(R,phi=phi,
use_physical=False))
sinalphalos= math.sin(alphalos)
cotalphalos= 1./math.tan(alphalos)
return integrate.quad(_marginalizeVperpIntegrandSinAlphaLarge,
-nsigma,nsigma,
args=(self,R,sinalphalos,cotalphalos,
vlos-vcirclos,vcirc,sigmaR1,phi),
**kwargs)[0]/math.fabs(sinalphalos)*sigmaR1
def _call_marginalizevlos(self,o,integrate_method='dopr54_c',**kwargs):
"""Call the DF, marginalizing over line-of-sight velocity"""
#Get d, l, vperp
l= o.ll(obs=[1.,0.,0.],ro=1.)*_DEGTORAD
vperp= o.vll(ro=1.,vo=1.,obs=[1.,0.,0.,0.,0.,0.])
R= o.R(use_physical=False)
phi= o.phi(use_physical=False)
#Get local circular velocity, projected onto the perpendicular
#direction
if isinstance(self._pot,list):
vcirc= calcRotcurve([p for p in self._pot if not p.isNonAxi],R)[0]
else:
vcirc= calcRotcurve(self._pot,R)[0]
vcircperp= vcirc*math.cos(phi+l)
#Marginalize
alphaperp= math.pi/2.+phi+l
if not 'nsigma' in kwargs or ('nsigma' in kwargs and \
kwargs['nsigma'] is None):
nsigma= _NSIGMA
else:
nsigma= kwargs['nsigma']
kwargs.pop('nsigma',None)
if math.fabs(math.sin(alphaperp)) < math.sqrt(1./2.):
sigmaR1= nu.sqrt(self._initdf.sigmaT2(R,phi=phi,
use_physical=False)) #slight abuse
va= vcirc-self._initdf.meanvT(R,phi=phi,use_physical=False)
cosalphaperp= math.cos(alphaperp)
tanalphaperp= math.tan(alphaperp)
#we can reuse the VperpIntegrand, since it is just another angle
return integrate.quad(_marginalizeVperpIntegrandSinAlphaSmall,
-va/sigmaR1-nsigma,
-va/sigmaR1+nsigma,
args=(self,R,cosalphaperp,tanalphaperp,
vperp-vcircperp,vcirc,
sigmaR1,phi),
**kwargs)[0]/math.fabs(cosalphaperp)*sigmaR1
else:
sigmaR1= nu.sqrt(self._initdf.sigmaR2(R,phi=phi,
use_physical=False))
sinalphaperp= math.sin(alphaperp)
cotalphaperp= 1./math.tan(alphaperp)
#we can reuse the VperpIntegrand, since it is just another angle
return integrate.quad(_marginalizeVperpIntegrandSinAlphaLarge,
-nsigma,nsigma,
args=(self,R,sinalphaperp,cotalphaperp,
vperp-vcircperp,vcirc,sigmaR1,phi),
**kwargs)[0]/math.fabs(sinalphaperp)*sigmaR1
def _vmomentsurfacemassHierarchicalGrid(self,n,m,grid):
"""Internal function to evaluate vmomentsurfacemass using a
hierarchical grid rather than direct integration,
rather unnecessary"""
return grid(n,m)
class evolveddiskdfGrid(object):
"""(not quite) Empty class since it is only used to store some stuff"""
def __init__(self):
return None
def plot(self,tt=0):
"""
NAME:
plot
PURPOSE:
plot the velocity distribution
INPUT:
t= optional time index
OUTPUT:
plot of velocity distribution to output device
HISTORY:
2011-06-27 - Written - Bovy (NYU)
"""
xrange= [self.vRgrid[0],self.vRgrid[len(self.vRgrid)-1]]
yrange= [self.vTgrid[0],self.vTgrid[len(self.vTgrid)-1]]
if len(self.df.shape) == 3:
plotthis= self.df[:,:,tt]
else:
plotthis= self.df
bovy_plot.bovy_dens2d(plotthis.T,cmap='gist_yarg',origin='lower',
aspect=(xrange[1]-xrange[0])/\
(yrange[1]-yrange[0]),
extent=[xrange[0],xrange[1],
yrange[0],yrange[1]],
xlabel=r'$v_R / v_0$',
ylabel=r'$v_T / v_0$')
class evolveddiskdfHierarchicalGrid(object):
"""Class that holds a hierarchical velocity grid"""
def __init__(self,edf,R,phi,nsigma,t,sigmaR1,sigmaT1,meanvR,meanvT,
gridpoints,nlevels,deriv,upperdxdy=None,print_progress=False,
nlevelsTotal=None):
"""
NAME:
__init__
PURPOSE:
Initialize a hierarchical grid
INPUT:
edf - evolveddiskdf instance
R - Radius
phi- azimuth
nsigma - number of sigma to integrate over
t- time
sigmaR1 - radial dispersion
sigmaT1 - tangential dispersion
meanvR - mean of radial velocity
meanvT - mean of tangential velocity
gridpoints- number of gridpoints
nlevels- number of levels to build
deriv- None, 'R', or 'phi': calculates derivative of the moment wrt
R or phi
upperdxdy= area element of previous hierarchical level
print_progress= if True, print progress on building the grid
OUTPUT:
object
HISTORY:
2011-04-21 - Written - Bovy (NYU)
"""
self.sigmaR1= sigmaR1
self.sigmaT1= sigmaT1
self.meanvR= meanvR
self.meanvT= meanvT
self.gridpoints= gridpoints
self.vRgrid= nu.linspace(self.meanvR-nsigma*self.sigmaR1,
self.meanvR+nsigma*self.sigmaR1,
self.gridpoints)
self.vTgrid= nu.linspace(self.meanvT-nsigma*self.sigmaT1,
self.meanvT+nsigma*self.sigmaT1,
self.gridpoints)
self.t= t
if nlevelsTotal is None:
nlevelsTotal= nlevels
self.nlevels= nlevels
self.nlevelsTotal= nlevelsTotal
if isinstance(t,(list,nu.ndarray)):
nt= len(t)
self.df= nu.zeros((gridpoints,gridpoints,nt))
dxdy= (self.vRgrid[1]-self.vRgrid[0])\
*(self.vTgrid[1]-self.vTgrid[0])
if nlevels > 0:
xsubmin= int(gridpoints)//4
xsubmax= gridpoints-int(gridpoints)//4
else:
xsubmin= gridpoints
xsubmax= 0
ysubmin, ysubmax= xsubmin, xsubmax
for ii in range(gridpoints):
for jj in range(gridpoints):
if print_progress: #pragma: no cover
sys.stdout.write('\r'+"Velocity gridpoint %i out of %i" % \
(jj+ii*gridpoints+1,gridpoints*gridpoints))
sys.stdout.flush()
#If this is part of a subgrid, ignore
if nlevels > 1 and ii >= xsubmin and ii < xsubmax \
and jj >= ysubmin and jj < ysubmax:
continue
thiso= Orbit([R,self.vRgrid[ii],self.vTgrid[jj],phi])
self.df[ii,jj,:]= edf(thiso,nu.array(t).flatten(),
deriv=deriv)
self.df[ii,jj,nu.isnan(self.df[ii,jj,:])]= 0.#BOVY: for now
#Multiply in area, somewhat tricky for edge objects
if upperdxdy is None or (ii != 0 and ii != gridpoints-1\
and jj != 0
and jj != gridpoints-1):
self.df[ii,jj,:]*= dxdy
elif ((ii == 0 or ii == gridpoints-1) and \
(jj != 0 and jj != gridpoints-1))\
or \
((jj == 0 or jj == gridpoints-1) and \
(ii != 0 and ii != gridpoints-1)): #edge
self.df[ii,jj,:]*= 1.5*dxdy/1.5 #turn this off for now
else: #corner
self.df[ii,jj,:]*= 2.25*dxdy/2.25 #turn this off for now
if print_progress: sys.stdout.write('\n') #pragma: no cover
else:
self.df= nu.zeros((gridpoints,gridpoints))
dxdy= (self.vRgrid[1]-self.vRgrid[0])\
*(self.vTgrid[1]-self.vTgrid[0])
if nlevels > 0:
xsubmin= int(gridpoints)//4
xsubmax= gridpoints-int(gridpoints)//4
else:
xsubmin= gridpoints
xsubmax= 0
ysubmin, ysubmax= xsubmin, xsubmax
for ii in range(gridpoints):
for jj in range(gridpoints):
if print_progress: #pragma: no cover
sys.stdout.write('\r'+"Velocity gridpoint %i out of %i" % \
(jj+ii*gridpoints+1,gridpoints*gridpoints))
sys.stdout.flush()
#If this is part of a subgrid, ignore
if nlevels > 1 and ii >= xsubmin and ii < xsubmax \
and jj >= ysubmin and jj < ysubmax:
continue
thiso= Orbit([R,self.vRgrid[ii],self.vTgrid[jj],phi])
self.df[ii,jj]= edf(thiso,t,deriv=deriv)
if nu.isnan(self.df[ii,jj]): self.df[ii,jj]= 0. #BOVY: for now
#Multiply in area, somewhat tricky for edge objects
if upperdxdy is None or (ii != 0 and ii != gridpoints-1\
and jj != 0
and jj != gridpoints-1):
self.df[ii,jj]*= dxdy
elif ((ii == 0 or ii == gridpoints-1) and \
(jj != 0 and jj != gridpoints-1))\
or \
((jj == 0 or jj == gridpoints-1) and \
(ii != 0 and ii != gridpoints-1)): #edge
self.df[ii,jj]*= 1.5*dxdy/1.5#turn this off for now
else: #corner
self.df[ii,jj]*= 2.25*dxdy/2.25#turn this off for now
if print_progress: sys.stdout.write('\n') #pragma: no cover
if nlevels > 1:
#Set up subgrid
subnsigma= (self.meanvR-self.vRgrid[xsubmin])/self.sigmaR1
self.subgrid= evolveddiskdfHierarchicalGrid(edf,R,phi,
subnsigma,t,
sigmaR1,
sigmaT1,
meanvR,
meanvT,
gridpoints,
nlevels-1,
deriv,
upperdxdy=dxdy,
print_progress=print_progress,
nlevelsTotal=nlevelsTotal)
else:
self.subgrid= None
return None
def __call__(self,n,m):
"""Call"""
if isinstance(self.t,(list,nu.ndarray)): tlist= True
else: tlist= False
if tlist:
nt= self.df.shape[2]
out= []
for ii in range(nt):
#We already multiplied in the area
out.append(nu.dot(self.vRgrid**n,nu.dot(self.df[:,:,ii],
self.vTgrid**m)))
if self.subgrid is None: return nu.array(out)
else: return nu.array(out)+ self.subgrid(n,m)
else:
#We already multiplied in the area
thislevel= nu.dot(self.vRgrid**n,nu.dot(self.df,self.vTgrid**m))
if self.subgrid is None: return thislevel
else: return thislevel+self.subgrid(n,m)
def plot(self,tt=0,vmax=None,aspect=None,extent=None):
"""
NAME:
plot
PURPOSE:
plot the velocity distribution
INPUT:
t= optional time index
OUTPUT:
plot of velocity distribution to output device
HISTORY:
2011-06-27 - Written - Bovy (NYU)
"""
if vmax is None:
vmax= self.max(tt=tt)*2.
#Figure out how big of a grid we need
dvR= (self.vRgrid[1]-self.vRgrid[0])
dvT= (self.vTgrid[1]-self.vTgrid[0])
nvR= len(self.vRgrid)
nvT= len(self.vTgrid)
nUpperLevels= self.nlevelsTotal-self.nlevels
nvRTot= nvR*2**nUpperLevels
nvTTot= nvT*2**nUpperLevels
plotthis= | nu.zeros((nvRTot,nvTTot)) | numpy.zeros |
import os
import sys
import itertools
import unittest
import numpy as np
from io import StringIO
from numpy.testing import assert_almost_equal
try:
from scipy.sparse import load_npz
except ImportError:
load_npz = None
import openmdao.api as om
from openmdao.utils.general_utils import set_pyoptsparse_opt
from openmdao.utils.coloring import _compute_coloring, array_viz, compute_total_coloring
from openmdao.utils.mpi import MPI
from openmdao.utils.testing_utils import use_tempdirs
from openmdao.test_suite.tot_jac_builder import TotJacBuilder
from openmdao.utils.general_utils import run_driver
import openmdao.test_suite
try:
from parameterized import parameterized
except ImportError:
from openmdao.utils.assert_utils import SkipParameterized as parameterized
try:
from openmdao.vectors.petsc_vector import PETScVector
except ImportError:
PETScVector = None
# check that pyoptsparse is installed
OPT, OPTIMIZER = set_pyoptsparse_opt('SNOPT')
if OPTIMIZER:
from openmdao.drivers.pyoptsparse_driver import pyOptSparseDriver
class CounterGroup(om.Group):
def __init__(self, *args, **kwargs):
self._solve_count = 0
self._solve_nl_count = 0
self._apply_nl_count = 0
super().__init__(*args, **kwargs)
def _solve_linear(self, *args, **kwargs):
super()._solve_linear(*args, **kwargs)
self._solve_count += 1
def _solve_nonlinear(self, *args, **kwargs):
super()._solve_nonlinear(*args, **kwargs)
self._solve_nl_count += 1
def _apply_nonlinear(self, *args, **kwargs):
super()._apply_nonlinear(*args, **kwargs)
self._apply_nl_count += 1
# note: size must be an even number
SIZE = 10
class DynPartialsComp(om.ExplicitComponent):
def __init__(self, size):
super().__init__()
self.size = size
self.num_computes = 0
def setup(self):
self.add_input('y', np.ones(self.size))
self.add_input('x', np.ones(self.size))
self.add_output('g', np.ones(self.size))
# turn on dynamic partial coloring
self.declare_coloring(wrt='*', method='cs', perturb_size=1e-5, num_full_jacs=2, tol=1e-20)
def compute(self, inputs, outputs):
outputs['g'] = np.arctan(inputs['y'] / inputs['x'])
self.num_computes += 1
def run_opt(driver_class, mode, assemble_type=None, color_info=None, derivs=True,
recorder=None, has_lin_constraint=True, has_diag_partials=True, partial_coloring=False,
use_vois=True, auto_ivc=False, **options):
p = om.Problem(model=CounterGroup())
if assemble_type is not None:
p.model.linear_solver = om.DirectSolver(assemble_jac=True)
p.model.options['assembled_jac_type'] = assemble_type
# the following were randomly generated using np.random.random(10)*2-1 to randomly
# disperse them within a unit circle centered at the origin.
x_init = np.array([ 0.55994437, -0.95923447, 0.21798656, -0.02158783, 0.62183717,
0.04007379, 0.46044942, -0.10129622, 0.27720413, -0.37107886])
y_init = np.array([ 0.52577864, 0.30894559, 0.8420792 , 0.35039912, -0.67290778,
-0.86236787, -0.97500023, 0.47739414, 0.51174103, 0.10052582])
r_init = .7
if auto_ivc:
p.model.set_input_defaults('x', x_init)
p.model.set_input_defaults('y', y_init)
p.model.set_input_defaults('r', r_init)
else:
indeps = p.model.add_subsystem('indeps', om.IndepVarComp(), promotes_outputs=['*'])
indeps.add_output('x', x_init)
indeps.add_output('y', y_init)
indeps.add_output('r', r_init)
if partial_coloring:
arctan_yox = om.ExecComp('g=arctan(y/x)', shape=SIZE)
arctan_yox.declare_coloring(wrt='*', method='cs', perturb_size=1e-5, num_full_jacs=2, tol=1e-20)
else:
arctan_yox = om.ExecComp('g=arctan(y/x)', shape=SIZE, has_diag_partials=has_diag_partials)
p.model.add_subsystem('arctan_yox', arctan_yox)
p.model.add_subsystem('circle', om.ExecComp('area=pi*r**2'))
p.model.add_subsystem('r_con', om.ExecComp('g=x**2 + y**2 - r', has_diag_partials=has_diag_partials,
g=np.ones(SIZE), x=np.ones(SIZE), y=np.ones(SIZE)))
thetas = np.linspace(0, np.pi/4, SIZE)
p.model.add_subsystem('theta_con', om.ExecComp('g = x - theta', has_diag_partials=has_diag_partials,
g=np.ones(SIZE), x=np.ones(SIZE),
theta=thetas))
p.model.add_subsystem('delta_theta_con', om.ExecComp('g = even - odd', has_diag_partials=has_diag_partials,
g=np.ones(SIZE//2), even=np.ones(SIZE//2),
odd=np.ones(SIZE//2)))
p.model.add_subsystem('l_conx', om.ExecComp('g=x-1', has_diag_partials=has_diag_partials, g=np.ones(SIZE), x=np.ones(SIZE)))
IND = np.arange(SIZE, dtype=int)
ODD_IND = IND[1::2] # all odd indices
EVEN_IND = IND[0::2] # all even indices
if auto_ivc:
p.model.promotes('circle', inputs=['r'])
p.model.promotes('r_con', inputs=['r', 'x', 'y'])
p.model.promotes('l_conx', inputs=['x'])
p.model.promotes('arctan_yox', inputs=['x', 'y'])
else:
p.model.connect('r', ('circle.r', 'r_con.r'))
p.model.connect('x', ['r_con.x', 'arctan_yox.x', 'l_conx.x'])
p.model.connect('y', ['r_con.y', 'arctan_yox.y'])
p.model.connect('arctan_yox.g', 'theta_con.x')
p.model.connect('arctan_yox.g', 'delta_theta_con.even', src_indices=EVEN_IND)
p.model.connect('arctan_yox.g', 'delta_theta_con.odd', src_indices=ODD_IND)
p.driver = driver_class()
if 'method' in options:
p.model.approx_totals(method=options['method'])
del options['method']
if 'dynamic_total_coloring' in options:
p.driver.declare_coloring(tol=1e-15)
del options['dynamic_total_coloring']
p.driver.options.update(options)
if use_vois:
p.model.add_design_var('x')
p.model.add_design_var('y')
p.model.add_design_var('r', lower=.5, upper=10)
# nonlinear constraints
p.model.add_constraint('r_con.g', equals=0)
p.model.add_constraint('theta_con.g', lower=-1e-5, upper=1e-5, indices=EVEN_IND)
p.model.add_constraint('delta_theta_con.g', lower=-1e-5, upper=1e-5)
# this constrains x[0] to be 1 (see definition of l_conx)
p.model.add_constraint('l_conx.g', equals=0, linear=False, indices=[0,])
# linear constraint (if has_lin_constraint is set)
p.model.add_constraint('y', equals=0, indices=[0,], linear=has_lin_constraint)
p.model.add_objective('circle.area', ref=-1)
# setup coloring
if color_info is not None:
p.driver.use_fixed_coloring(color_info)
if recorder:
p.driver.add_recorder(recorder)
p.setup(mode=mode, derivatives=derivs)
if use_vois:
p.run_driver()
else:
p.run_model()
return p
@use_tempdirs
class SimulColoringPyoptSparseTestCase(unittest.TestCase):
@unittest.skipUnless(OPTIMIZER == 'SNOPT', "This test requires SNOPT.")
def test_dynamic_total_coloring_snopt_auto(self):
# first, run w/o coloring
p = run_opt(pyOptSparseDriver, 'auto', optimizer='SNOPT', print_results=False)
p_color = run_opt(pyOptSparseDriver, 'auto', optimizer='SNOPT', print_results=False,
dynamic_total_coloring=True)
assert_almost_equal(p['circle.area'], np.pi, decimal=7)
assert_almost_equal(p_color['circle.area'], np.pi, decimal=7)
# - coloring saves 16 solves per driver iter (5 vs 21)
# - initial solve for linear constraints takes 21 in both cases (only done once)
# - dynamic case does 3 full compute_totals to compute coloring, which adds 21 * 3 solves
# - (total_solves - N) / (solves_per_iter) should be equal between the two cases,
# - where N is 21 for the uncolored case and 21 * 4 for the dynamic colored case.
self.assertEqual((p.model._solve_count - 21) / 21,
(p_color.model._solve_count - 21 * 4) / 5)
@unittest.skipUnless(OPTIMIZER == 'SNOPT', "This test requires SNOPT.")
def test_dynamic_total_coloring_snopt_auto_autoivc(self):
# first, run w/o coloring
p = run_opt(pyOptSparseDriver, 'auto', optimizer='SNOPT', print_results=False,
auto_ivc=True)
p_color = run_opt(pyOptSparseDriver, 'auto', optimizer='SNOPT', print_results=False,
dynamic_total_coloring=True, auto_ivc=True)
assert_almost_equal(p['circle.area'], np.pi, decimal=7)
assert_almost_equal(p_color['circle.area'], np.pi, decimal=7)
# - coloring saves 16 solves per driver iter (5 vs 21)
# - initial solve for linear constraints takes 21 in both cases (only done once)
# - dynamic case does 3 full compute_totals to compute coloring, which adds 21 * 3 solves
# - (total_solves - N) / (solves_per_iter) should be equal between the two cases,
# - where N is 21 for the uncolored case and 21 * 4 for the dynamic colored case.
self.assertEqual((p.model._solve_count - 21) / 21,
(p_color.model._solve_count - 21 * 4) / 5)
@unittest.skipUnless(OPTIMIZER == 'SNOPT', "This test requires SNOPT.")
def test_dynamic_total_coloring_snopt_auto_dyn_partials(self):
# first, run w/o coloring
p = run_opt(pyOptSparseDriver, 'auto', optimizer='SNOPT', print_results=False)
p_color = run_opt(pyOptSparseDriver, 'auto', optimizer='SNOPT', print_results=False,
dynamic_total_coloring=True, partial_coloring=True)
assert_almost_equal(p['circle.area'], np.pi, decimal=7)
assert_almost_equal(p_color['circle.area'], np.pi, decimal=7)
# - coloring saves 16 solves per driver iter (5 vs 21)
# - initial solve for linear constraints takes 21 in both cases (only done once)
# - dynamic case does 3 full compute_totals to compute coloring, which adds 21 * 3 solves
# - (total_solves - N) / (solves_per_iter) should be equal between the two cases,
# - where N is 21 for the uncolored case and 21 * 4 for the dynamic colored case.
self.assertEqual((p.model._solve_count - 21) / 21,
(p_color.model._solve_count - 21 * 4) / 5)
partial_coloring = p_color.model._get_subsystem('arctan_yox')._coloring_info['coloring']
expected = [
"self.declare_partials(of='g', wrt='x', rows=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], cols=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9])",
"self.declare_partials(of='g', wrt='y', rows=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], cols=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9])",
]
decl_partials_calls = partial_coloring.get_declare_partials_calls().strip()
for i, d in enumerate(decl_partials_calls.split('\n')):
self.assertEqual(d.strip(), expected[i])
fwd_solves, rev_solves = p_color.driver._coloring_info['coloring'].get_row_var_coloring('delta_theta_con.g')
self.assertEqual(fwd_solves, 4)
self.assertEqual(rev_solves, 0)
@unittest.skipUnless(OPTIMIZER == 'SNOPT', "This test requires SNOPT.")
def test_dynamic_total_coloring_snopt_auto_dyn_partials_assembled_jac(self):
# first, run w/o coloring
p = run_opt(pyOptSparseDriver, 'auto', assemble_type='csc', optimizer='SNOPT', print_results=False)
p_color = run_opt(pyOptSparseDriver, 'auto', assemble_type='csc', optimizer='SNOPT', print_results=False,
dynamic_total_coloring=True, partial_coloring=True)
assert_almost_equal(p['circle.area'], np.pi, decimal=7)
assert_almost_equal(p_color['circle.area'], np.pi, decimal=7)
# - coloring saves 16 solves per driver iter (5 vs 21)
# - initial solve for linear constraints takes 21 in both cases (only done once)
# - dynamic case does 3 full compute_totals to compute coloring, which adds 21 * 3 solves
# - (total_solves - N) / (solves_per_iter) should be equal between the two cases,
# - where N is 21 for the uncolored case and 21 * 4 for the dynamic colored case.
# This has been changed to greater or equal to 28 iterations. This change arose when updating
# to pyOptSparse v2.1.0 and SNOPT 7.7
self.assertGreaterEqual((p.model._solve_count - 21) / 21,
(p_color.model._solve_count - 21 * 4) / 5)
@unittest.skipUnless(OPTIMIZER == 'SNOPT', "This test requires SNOPT.")
def test_dynamic_total_coloring_snopt_auto_assembled(self):
# first, run w/o coloring
p = run_opt(pyOptSparseDriver, 'auto', assemble_type='dense', optimizer='SNOPT', print_results=False)
p_color = run_opt(pyOptSparseDriver, 'auto', assemble_type='dense', optimizer='SNOPT', print_results=False,
dynamic_total_coloring=True)
assert_almost_equal(p['circle.area'], np.pi, decimal=7)
assert_almost_equal(p_color['circle.area'], np.pi, decimal=7)
# - coloring saves 16 solves per driver iter (5 vs 21)
# - initial solve for linear constraints takes 21 in both cases (only done once)
# - dynamic case does 3 full compute_totals to compute coloring, which adds 21 * 3 solves
# - (total_solves - N) / (solves_per_iter) should be equal between the two cases,
# - where N is 21 for the uncolored case and 21 * 4 for the dynamic colored case.
self.assertEqual((p.model._solve_count - 21) / 21,
(p_color.model._solve_count - 21 * 4) / 5)
@unittest.skipUnless(OPTIMIZER == 'SNOPT', "This test requires SNOPT.")
def test_dynamic_fwd_simul_coloring_snopt_approx_cs(self):
# first, run w/o coloring
p = run_opt(pyOptSparseDriver, 'fwd', optimizer='SNOPT', print_results=False, has_lin_constraint=False, method='cs')
p_color = run_opt(pyOptSparseDriver, 'fwd', optimizer='SNOPT', has_lin_constraint=False,
has_diag_partials=True, print_results=False,
dynamic_total_coloring=True, method='cs')
assert_almost_equal(p['circle.area'], np.pi, decimal=7)
assert_almost_equal(p_color['circle.area'], np.pi, decimal=7)
# - fwd coloring saves 16 nonlinear solves per driver iter (6 vs 22).
# - dynamic coloring takes 66 nonlinear solves (22 each for 3 full jacs)
# - (total_solves - 2) / (solves_per_iter) should be equal to
# (total_color_solves - 2 - dyn_solves) / color_solves_per_iter
self.assertEqual((p.model._solve_nl_count - 2) / 22,
(p_color.model._solve_nl_count - 2 - 66) / 6)
@unittest.skipUnless(OPTIMIZER == 'SNOPT', "This test requires SNOPT.")
def test_dynamic_fwd_simul_coloring_snopt_approx_fd(self):
# first, run w/o coloring
p = run_opt(pyOptSparseDriver, 'fwd', optimizer='SNOPT', print_results=False, has_lin_constraint=False, method='cs')
p_color = run_opt(pyOptSparseDriver, 'fwd', optimizer='SNOPT', has_lin_constraint=False,
has_diag_partials=True, print_results=False,
dynamic_total_coloring=True, method='fd')
assert_almost_equal(p['circle.area'], np.pi, decimal=7)
assert_almost_equal(p_color['circle.area'], np.pi, decimal=7)
# - fwd coloring saves 16 nonlinear solves per driver iter (6 vs 22).
# - dynamic coloring takes 66 nonlinear solves (22 each for 3 full jacs)
# - (total_solves - 2) / (solves_per_iter) should be equal to
# (total_color_solves - 2 - dyn_solves) / color_solves_per_iter
self.assertEqual((p.model._solve_nl_count - 2) / 22,
(p_color.model._solve_nl_count - 2 - 66) / 6)
def test_size_zero_array_in_component(self):
class DynamicPartialsComp(om.ExplicitComponent):
def __init__(self, size):
super().__init__()
self.size = size
self.num_computes = 0
def setup(self):
self.add_input('y', np.ones(self.size))
self.add_input('x', np.ones(self.size))
self.add_output('g', np.ones(self.size))
self.declare_partials('*', '*', method='cs')
# turn on dynamic partial coloring
self.declare_coloring(wrt='*', method='cs', perturb_size=1e-5, num_full_jacs=2, tol=1e-20,
orders=20, show_summary=True, show_sparsity=True)
def compute(self, inputs, outputs):
outputs['g'] = np.arctan(inputs['y'] / inputs['x'])
self.num_computes += 1
SIZE = 0
p = om.Problem()
arctan_yox = p.model.add_subsystem('arctan_yox', DynamicPartialsComp(SIZE))
p.driver = om.ScipyOptimizeDriver()
p.driver.options['optimizer'] = 'SLSQP'
p.driver.options['disp'] = False
p.driver.declare_coloring(show_summary=True, show_sparsity=True)
p.setup(mode='fwd')
with self.assertRaises(Exception) as context:
p.run_driver()
self.assertEqual(str(context.exception),
"'arctan_yox' <class DynamicPartialsComp>: 'arctan_yox.g' is an array of size 0")
def test_size_zero_array_declare_partials(self):
class DynamicPartialsComp(om.ExplicitComponent):
def __init__(self, size):
super().__init__()
self.size = size
self.num_computes = 0
def setup(self):
self.add_input('y', np.ones(self.size))
self.add_input('x', np.ones(self.size))
self.add_output('g', np.ones(self.size))
self.add_output('r', np.ones(1))
self.declare_partials('r', 'y', method='cs')
# turn on dynamic partial coloring
self.declare_coloring(wrt='*', method='cs', perturb_size=1e-5, num_full_jacs=2, tol=1e-20,
orders=20, show_summary=True, show_sparsity=True)
def compute(self, inputs, outputs):
outputs['g'] = np.arctan(inputs['y'] / inputs['x'])
self.num_computes += 1
SIZE = 0
p = om.Problem()
arctan_yox = p.model.add_subsystem('arctan_yox', DynamicPartialsComp(SIZE))
p.driver = om.ScipyOptimizeDriver()
p.driver.options['optimizer'] = 'SLSQP'
p.driver.options['disp'] = False
p.driver.declare_coloring(show_summary=True, show_sparsity=True)
p.setup(mode='fwd')
with self.assertRaises(Exception) as context:
p.run_driver()
self.assertEqual(str(context.exception),
"'arctan_yox' <class DynamicPartialsComp>: 'arctan_yox.y' is an array of size 0")
def test_dynamic_total_coloring_pyoptsparse_slsqp_auto(self):
try:
from pyoptsparse import OPT
except ImportError:
raise unittest.SkipTest("This test requires pyoptsparse.")
try:
OPT('SLSQP')
except:
raise unittest.SkipTest("This test requires pyoptsparse SLSQP.")
p_color = run_opt(pyOptSparseDriver, 'auto', optimizer='SLSQP', print_results=False,
dynamic_total_coloring=True)
assert_almost_equal(p_color['circle.area'], np.pi, decimal=7)
# run w/o coloring
p = run_opt(pyOptSparseDriver, 'auto', optimizer='SLSQP', print_results=False)
assert_almost_equal(p['circle.area'], np.pi, decimal=7)
# - coloring saves 16 solves per driver iter (5 vs 21)
# - initial solve for linear constraints takes 21 in both cases (only done once)
# - dynamic case does 3 full compute_totals to compute coloring, which adds 21 * 3 solves
# - (total_solves - N) / (solves_per_iter) should be equal between the two cases,
# - where N is 21 for the uncolored case and 21 * 4 for the dynamic colored case.
self.assertEqual((p.model._solve_count - 21) / 21,
(p_color.model._solve_count - 21 * 4) / 5)
# test __repr__
rep = repr(p_color.driver._coloring_info['coloring'])
self.assertEqual(rep.replace('L', ''), 'Coloring (direction: fwd, ncolors: 5, shape: (22, 21), pct nonzero: 13.42, tol: 1e-15')
@unittest.skipUnless(OPTIMIZER == 'SNOPT', "This test requires SNOPT.")
def test_print_options_total_with_coloring_fwd(self):
# first, run w/o coloring
p = run_opt(pyOptSparseDriver, 'fwd', optimizer='SNOPT', print_results=False)
p_color = run_opt(pyOptSparseDriver, 'fwd', optimizer='SNOPT', print_results=False,
dynamic_total_coloring=True, debug_print=['totals'])
failed, output = run_driver(p_color)
self.assertFalse(failed, "Optimization failed.")
assert_almost_equal(p['circle.area'], np.pi, decimal=7)
assert_almost_equal(p_color['circle.area'], np.pi, decimal=7)
self.assertTrue('In mode: fwd, Solving variable(s) using simul coloring:' in output)
self.assertTrue("('indeps.y', [1, 3, 5, 7, 9])" in output)
self.assertTrue('Elapsed Time:' in output)
@unittest.skipUnless(OPTIMIZER == 'SNOPT', "This test requires SNOPT.")
def test_print_options_total_with_coloring_rev(self):
# first, run w/o coloring
p = run_opt(pyOptSparseDriver, 'rev', optimizer='SNOPT', print_results=False)
p_color = run_opt(pyOptSparseDriver, 'rev', optimizer='SNOPT', print_results=False,
dynamic_total_coloring=True, debug_print=['totals'])
failed, output = run_driver(p_color)
self.assertFalse(failed, "Optimization failed.")
self.assertTrue('In mode: rev, Solving variable(s) using simul coloring:' in output)
self.assertTrue("('r_con.g', [0])" in output)
self.assertTrue('Elapsed Time:' in output)
@use_tempdirs
@unittest.skipUnless(OPTIMIZER == 'SNOPT', "This test requires SNOPT.")
class SimulColoringRecordingTestCase(unittest.TestCase):
def test_recording(self):
# coloring involves an underlying call to run_model (and final_setup),
# this verifies that it is handled properly by the recording setup logic
recorder = om.SqliteRecorder('cases.sql')
p = run_opt(pyOptSparseDriver, 'auto', assemble_type='csc', optimizer='SNOPT',
dynamic_total_coloring=True, print_results=False, recorder=recorder)
cr = om.CaseReader('cases.sql')
self.assertEqual(cr.list_cases(out_stream=None), ['rank0:pyOptSparse_SNOPT|%d' % i for i in range(p.driver.iter_count)])
@use_tempdirs
class SimulColoringPyoptSparseRevTestCase(unittest.TestCase):
"""Reverse coloring tests for pyoptsparse."""
@unittest.skipUnless(OPTIMIZER == 'SNOPT', "This test requires SNOPT.")
def test_dynamic_rev_simul_coloring_snopt(self):
# first, run w/o coloring
p = run_opt(pyOptSparseDriver, 'rev', optimizer='SNOPT', print_results=False)
p_color = run_opt(pyOptSparseDriver, 'rev', optimizer='SNOPT', print_results=False,
dynamic_total_coloring=True)
assert_almost_equal(p['circle.area'], np.pi, decimal=7)
assert_almost_equal(p_color['circle.area'], np.pi, decimal=7)
# - rev coloring saves 11 solves per driver iter (11 vs 22)
# - initial solve for linear constraints takes 1 in both cases (only done once)
# - dynamic case does 3 full compute_totals to compute coloring, which adds 22 * 3 solves
# - (total_solves - N) / (solves_per_iter) should be equal between the two cases,
# - where N is 1 for the uncolored case and 22 * 3 + 1 for the dynamic colored case.
self.assertEqual((p.model._solve_count - 1) / 22,
(p_color.model._solve_count - 1 - 22 * 3) / 11)
# improve coverage of coloring.py
coloring = p_color.driver._coloring_info['coloring']
coloring.display_txt()
with open(os.devnull, 'w') as f:
array_viz(coloring.get_dense_sparsity(), prob=p_color, stream=f)
array_viz(coloring.get_dense_sparsity(), stream=f)
def test_dynamic_rev_simul_coloring_pyoptsparse_slsqp(self):
try:
from pyoptsparse import OPT
except ImportError:
raise unittest.SkipTest("This test requires pyoptsparse.")
try:
OPT('SLSQP')
except:
raise unittest.SkipTest("This test requires pyoptsparse SLSQP.")
p_color = run_opt(pyOptSparseDriver, 'rev', optimizer='SLSQP', print_results=False,
dynamic_total_coloring=True)
assert_almost_equal(p_color['circle.area'], np.pi, decimal=7)
# Tests a bug where coloring ran the model when not needed.
self.assertEqual(p_color.model.iter_count, 9)
# run w/o coloring
p = run_opt(pyOptSparseDriver, 'rev', optimizer='SLSQP', print_results=False)
assert_almost_equal(p['circle.area'], np.pi, decimal=7)
# - coloring saves 11 solves per driver iter (11 vs 22)
# - initial solve for linear constraints takes 1 in both cases (only done once)
# - dynamic case does 3 full compute_totals to compute coloring, which adds 22 * 3 solves
# - (total_solves - N) / (solves_per_iter) should be equal between the two cases,
# - where N is 1 for the uncolored case and 22 * 3 + 1 for the dynamic colored case.
self.assertEqual((p.model._solve_count - 1) / 22,
(p_color.model._solve_count - 1 - 22 * 3) / 11)
@use_tempdirs
class SimulColoringScipyTestCase(unittest.TestCase):
def test_bad_mode(self):
p_color_fwd = run_opt(om.ScipyOptimizeDriver, 'fwd', optimizer='SLSQP', disp=False, dynamic_total_coloring=True)
coloring = p_color_fwd.driver._coloring_info['coloring']
with self.assertRaises(Exception) as context:
p_color = run_opt(om.ScipyOptimizeDriver, 'rev', color_info=coloring, optimizer='SLSQP', disp=False)
self.assertEqual(str(context.exception),
"Simultaneous coloring does forward solves but mode has been set to 'rev'")
def test_dynamic_total_coloring_auto(self):
# first, run w/o coloring
p = run_opt(om.ScipyOptimizeDriver, 'auto', optimizer='SLSQP', disp=False)
p_color = run_opt(om.ScipyOptimizeDriver, 'auto', optimizer='SLSQP', disp=False, dynamic_total_coloring=True)
assert_almost_equal(p['circle.area'], np.pi, decimal=7)
assert_almost_equal(p_color['circle.area'], np.pi, decimal=7)
# - bidirectional coloring saves 16 solves per driver iter (5 vs 21)
# - initial solve for linear constraints takes 21 in both cases (only done once)
# - dynamic case does 3 full compute_totals to compute coloring, which adds 21 * 3 solves
# - (total_solves - N) / (solves_per_iter) should be equal between the two cases,
# - where N is 21 for the uncolored case and 21 * 4 for the dynamic colored case.
self.assertEqual((p.model._solve_count - 21) / 21,
(p_color.model._solve_count - 21 * 4) / 5)
def test_problem_total_coloring_auto(self):
p = run_opt(om.ScipyOptimizeDriver, 'auto', optimizer='SLSQP', disp=False, use_vois=False)
coloring = compute_total_coloring(p,
of=['r_con.g', 'theta_con.g', 'delta_theta_con.g',
'l_conx.g', 'y', 'circle.area'],
wrt=['x', 'y', 'r'])
self.assertEqual(coloring.total_solves(), 5)
def test_problem_total_coloring_auto_mixed_vois(self):
p = run_opt(om.ScipyOptimizeDriver, 'auto', optimizer='SLSQP', disp=False,)
coloring = compute_total_coloring(p,
of=['r_con.g', 'theta_con.g', 'delta_theta_con.g',
'l_conx.g', 'y', 'circle.area'],
wrt=['x', 'y', 'r'])
self.assertEqual(coloring.total_solves(), 5)
coloring.display_txt() # leave this in because at one point it caused an exception
def test_simul_coloring_example(self):
SIZE = 10
p = om.Problem()
p.model.add_subsystem('arctan_yox', om.ExecComp('g=arctan(y/x)', has_diag_partials=True,
g=np.ones(SIZE), x=np.ones(SIZE), y=np.ones(SIZE)),
promotes_inputs=['x', 'y'])
p.model.add_subsystem('circle', om.ExecComp('area=pi*r**2'), promotes_inputs=['r'])
p.model.add_subsystem('r_con', om.ExecComp('g=x**2 + y**2 - r', has_diag_partials=True,
g=np.ones(SIZE), x=np.ones(SIZE), y=np.ones(SIZE)),
promotes_inputs=['r', 'x', 'y'])
thetas = np.linspace(0, np.pi/4, SIZE)
p.model.add_subsystem('theta_con', om.ExecComp('g = x - theta', has_diag_partials=True,
g=np.ones(SIZE), x=np.ones(SIZE),
theta=thetas))
p.model.add_subsystem('delta_theta_con', om.ExecComp('g = even - odd', has_diag_partials=True,
g=np.ones(SIZE//2), even=np.ones(SIZE//2),
odd=np.ones(SIZE//2)))
p.model.add_subsystem('l_conx', om.ExecComp('g=x-1', has_diag_partials=True, g= | np.ones(SIZE) | numpy.ones |
import numpy as np
k = np.arange(18)
s = 5.0/3.0
fat = [s]
for i in np.arange(1,18): fat = np.append(fat, fat[i-1]*(s+i))
def sN2(T):
c = [0.0, 0.88, 4.9, 48.0, 32.0]
return c[0] + c[1]*np.exp(-c[2]*300.0/T) + c[3]*np.exp(-c[4]*300.0/T)
def sO2(T):
c = [25.1]
return c[0]
def sNO(T):
c = [43.0]
return c[0]
def sO(T):
c = [32.0]
return c[0]
def sNO2(T):
c = [82.0, 9.0, 0.54]
return c[0] + c[1]*(300.0/T)**c[2]
def sNOBRE(T):
c = [0.0]
return c[0]
def sH2(T):
c = [0.0]
return c[0]
def sCO(T):
c = [5.9, 5.3, 7.0, 22.1, 14.0]
return c[0] + c[1]*np.exp(-c[2]*300.0/T) + c[3]*np.exp(-c[4]*300.0/T)
def sH2O(T):
c = [28.2, 3.39, 0.15, 2.95]
x = c[2]*(300.0/T) + c[3]*(300.0/T)**2.0
S = ((x**s)*np.exp(-x)*(x**k)/fat).sum()
return c[0]*( (1.0+x)*np.exp(-x) + c[1]*S*x**(0.333) )
def sOH(T):
c = [82.0]
return c[0]
def sH(T):
c = [12.0]
return c[0]
def sN2O(T):
c = [59.0, 0.99, 3.98, 0.16]
x = c[2]*(300.0/T) + c[3]*(300.0/T)**2.0
S = ((x**s)* | np.exp(-x) | numpy.exp |
import os
import numpy as np
import matplotlib.pyplot as plt
def compute_uncertainty_bounds(est: np.array, std: np.array):
return np.maximum(0, est - 2 * std), est + 2 * std
def plot_market_estimates(data: dict, est: np.array, std: np.array):
"""
It makes a market estimation plot with prices, trends, uncertainties and volumes.
Parameters
----------
data: dict
Downloaded data.
est: np.array
Price trend estimate at market-level.
std: np.array
Standard deviation estimate of price trend at market-level.
"""
print('\nPlotting market estimation...')
fig = plt.figure(figsize=(10, 3))
logp = np.log(data['price'])
t = logp.shape[1]
lb, ub = compute_uncertainty_bounds(est, std)
plt.grid(axis='both')
plt.title("Market", fontsize=15)
avg_price = np.exp(logp.mean(0))
l1 = plt.plot(data["dates"], avg_price, label="avg. price in {}".format(data['default_currency']), color="C0")
l2 = plt.plot(data["dates"], est[0], label="trend", color="C1")
l3 = plt.fill_between(data["dates"], lb[0], ub[0], alpha=0.2, label="+/- 2 st. dev.", color="C0")
plt.ylabel("avg. price in {}".format(data['default_currency']), fontsize=12)
plt.twinx()
l4 = plt.bar(data["dates"], data['volume'].mean(0), width=1, color='g', alpha=0.2, label='avg. volume')
l4[0].set_edgecolor('r')
for d in range(1, t):
if avg_price[d] - avg_price[d - 1] < 0:
l4[d].set_color('r')
plt.ylabel("avg. volume", fontsize=12)
ll = l1 + l2 + [l3] + [l4]
labels = [l.get_label() for l in ll]
plt.legend(ll, labels, loc="upper left")
fig_name = 'market_estimation.png'
fig.savefig(fig_name, dpi=fig.dpi)
print('Market estimation plot has been saved to {}/{}.'.format(os.getcwd(), fig_name))
def plot_sector_estimates(data: dict, info: dict, est: np.array, std: np.array):
"""
It makes a plot for each sector with prices, trends, uncertainties and volumes.
Parameters
----------
data: dict
Downloaded data.
info: dict
Model hierarchy information.
est: np.array
Price trend estimate at sector-level.
std: np.array
Standard deviation estimate of price trend at sector-level.
"""
print('\nPlotting sector estimation...')
num_columns = 3
logp = np.log(data['price'])
t = logp.shape[1]
lb, ub = compute_uncertainty_bounds(est, std)
NA_sectors = np.where(np.array([sec[:2] for sec in info['unique_sectors']]) == "NA")[0]
num_NA_sectors = len(NA_sectors)
fig = plt.figure(figsize=(20, max(info['num_sectors'] - num_NA_sectors, 5)))
j = 0
for i in range(info['num_sectors']):
if i not in NA_sectors:
j += 1
plt.subplot(int(np.ceil((info['num_sectors'] - num_NA_sectors) / num_columns)), num_columns, j)
plt.grid(axis='both')
plt.title(info['unique_sectors'][i], fontsize=15)
idx_sectors = np.where(np.array(info['sectors_id']) == i)[0]
avg_price = np.exp(logp[idx_sectors].reshape(-1, t).mean(0))
l1 = plt.plot(data["dates"], avg_price,
label="avg. price in {}".format(data['default_currency']), color="C0")
l2 = plt.plot(data["dates"], est[i], label="trend", color="C1")
l3 = plt.fill_between(data["dates"], lb[i], ub[i], alpha=0.2, label="+/- 2 st. dev.",
color="C0")
plt.ylabel("avg. price in {}".format(data['default_currency']), fontsize=12)
plt.xticks(rotation=45)
plt.twinx()
l4 = plt.bar(data["dates"],
data['volume'][np.where( | np.array(info['sectors_id']) | numpy.array |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Generator to yield resampled volume data for training and validation
"""
# %%
from keras.models import load_model, Model
from matplotlib import pyplot as plt
import numpy as np
import os
from os import path
import random
import SimpleITK as sitk
from stl import mesh
from utils import data_loading_funcs as dlf
from utils import mhd_utils as mu
from utils import reg_evaluator as regev
from utils import volume_resampler_3d as vr
import tensorflow as tf
from utils import registration_reader as rr
import scipy
#from augment_data import augment
# %%
class VolumeDataGenerator(object):
"""Generate volume image for training or validation
#Arguments
"""
def __init__(self,
data_folder,
case_num_range,
case_num_range_2=None,
max_registration_error = 20.0):
self.data_folder = data_folder
cases = []
# Go through all the case
for caseIdx in range(case_num_range[0], case_num_range[1]+1):
caseFolder = 'Case{:04d}'.format(caseIdx)
full_case = path.join(data_folder, caseFolder)
if not path.isdir(full_case):
continue
else:
cases.append(caseIdx)
if case_num_range_2 != None:
for caseIdx in range(case_num_range_2[0], case_num_range_2[1]+1):
caseFolder = 'Case{:04d}'.format(caseIdx)
full_case = path.join(data_folder, caseFolder)
if not path.isdir(full_case):
continue
else:
cases.append(caseIdx)
self.good_cases = np.asarray(cases, dtype=np.int32)
self.num_cases = self.good_cases.size
random.seed()
self.e_t = 0.5
self.e_rot = 1
self.isMultiGauss = False
self.max_error = max_registration_error
print('VolumeDataGenerator: max_registration_error = {}'.format(self.max_error))
#self.width, self.height, self.depth = 96, 96, 32
# ----- #
def get_sample_multi_gauss(self,mean,cov):
return np.random.multivariate_normal(mean,cov)
def get_num_cases(self):
return self.num_cases
# ----- #
def _get_random_value(self, r, center, hasSign):
randNumber = random.random() * r + center
if hasSign:
sign = random.random() > 0.5
if sign == False:
randNumber *= -1
return randNumber
# ----- #
def get_array_from_itk_matrix(self, itk_mat):
mat = np.reshape(np.asarray(itk_mat), (3,3))
return mat
# ----- #
def generate(self, shuffle=True, shape=(96,96,96)):
"""
"""
currentIdx = 0
np.random.seed()
(width, height, depth) = shape
print('Shuffle = {}'.format(shuffle))
while True:
idx = currentIdx % self.num_cases
currentIdx += 1
# Shuffle cases
if idx == 0:
if shuffle:
case_array = np.random.permutation(self.good_cases)
else:
case_array = self.good_cases
case_no = case_array[idx]
sampledFixed, sampledMoving, err, params = self.create_sample(case_no, shape)
#sampledFixed, sampledMoving, pos_neg, err, params = self.create_sample(450, shape)
print('Sample generated frome Case{:04d}'.format(case_no))
# Put into 4D array
sample4D = np.zeros((depth, height, width, 2), dtype=np.ubyte)
sample4D[:,:,:,0] = sitk.GetArrayFromImage(sampledFixed)
sample4D[:,:,:,1] = sitk.GetArrayFromImage(sampledMoving)
yield sample4D, err, params
# ----- #
def generate_batch(self, batch_size=32, shape=(96,96,32)):
"""Used for keras training and validation
"""
batch_index = 0
np.random.seed()
(width, height, depth) = shape
while True:
# Shuffle cases
if batch_index == 0:
case_array = np.random.permutation(self.good_cases)
#current_index = (batch_index * batch_size) % self.num_cases
current_index = batch_index * batch_size
if (current_index + batch_size) < self.num_cases:
current_batch_size = batch_size
batch_index += 1
else:
# handle special case where only 1 sample left for the batch
if (self.num_cases - current_index) > 1:
current_batch_size = self.num_cases - current_index
else:
current_batch_size = 2
current_index -= 1
batch_index = 0
batch_samples = np.zeros((current_batch_size, depth, height, width, 2), dtype=np.ubyte)
#batch_labels = []
batch_errors = []
batch_params = []
for k in range(current_batch_size):
case_no = case_array[k + current_index]
#print(case_no)
sampledFixed, sampledMoving, err, params = self.create_sample(case_no, shape)
# Put into 4D array
sample4D = np.zeros((depth, height, width, 2), dtype=np.ubyte)
sample4D[:,:,:,0] = sitk.GetArrayFromImage(sampledFixed)
sample4D[:,:,:,1] = sitk.GetArrayFromImage(sampledMoving)
batch_samples[k, :,:,:,:] = sample4D
#batch_labels.append(pos_neg)
batch_errors.append([err])
batch_params.append(params)
#yield (batch_samples, [np.asarray(batch_errors), np.asarray(batch_params)])
yield (batch_samples, np.asarray(batch_params))
#yield (batch_samples, np.asarray(batch_errors))
def generate_batch_classification(self, batch_size=32, shape=(96,96,32)):
"""Used for keras training and validation
"""
batch_index = 0
np.random.seed()
(width, height, depth) = shape
while True:
# Shuffle cases
if batch_index == 0:
case_array = | np.random.permutation(self.good_cases) | numpy.random.permutation |
# -*- coding: utf-8 -*-
"""
Regularization path OT solvers
"""
# Author: <NAME> <<EMAIL>>
# License: MIT License
import numpy as np
import scipy.sparse as sp
def recast_ot_as_lasso(a, b, C):
r"""This function recasts the l2-penalized UOT problem as a Lasso problem.
Recall the l2-penalized UOT problem defined in
:ref:`[41] <references-regpath>`
.. math::
\text{UOT}_{\lambda} = \min_T <C, T> + \lambda \|T 1_m -
\mathbf{a}\|_2^2 +
\lambda \|T^T 1_n - \mathbf{b}\|_2^2
s.t.
T \geq 0
where :
- :math:`C` is the cost matrix
- :math:`\lambda` is the l2-regularization parameter
- :math:`\mathbf{a}` and :math:`\mathbf{b}` are the source and target \
distributions
- :math:`T` is the transport plan to optimize
The problem above can be reformulated as a non-negative penalized
linear regression problem, particularly Lasso
.. math::
\text{UOT2}_{\lambda} = \min_{\mathbf{t}} \gamma \mathbf{c}^T
\mathbf{t} + 0.5 * \|H \mathbf{t} - \mathbf{y}\|_2^2
s.t.
\mathbf{t} \geq 0
where :
- :math:`\mathbf{c}` is the flattened version of the cost matrix :math:`C`
- :math:`\mathbf{y}` is the concatenation of vectors :math:`\mathbf{a}` \
and :math:`\mathbf{b}`
- :math:`H` is a metric matrix, see :ref:`[41] <references-regpath>` for \
the design of :math:`H`. The matrix product :math:`H\mathbf{t}` \
computes both the source marginal and the target marginals.
- :math:`\mathbf{t}` is the flattened version of the transport plan \
:math:`T`
Parameters
----------
a : np.ndarray (dim_a,)
Histogram of dimension dim_a
b : np.ndarray (dim_b,)
Histogram of dimension dim_b
C : np.ndarray, shape (dim_a, dim_b)
Cost matrix
Returns
-------
H : np.ndarray (dim_a+dim_b, dim_a*dim_b)
Design matrix that contains only 0 and 1
y : np.ndarray (ns + nt, )
Concatenation of histograms :math:`\mathbf{a}` and :math:`\mathbf{b}`
c : np.ndarray (ns * nt, )
Flattened array of the cost matrix
Examples
--------
>>> import ot
>>> a = np.array([0.2, 0.3, 0.5])
>>> b = np.array([0.1, 0.9])
>>> C = np.array([[16., 25.], [28., 16.], [40., 36.]])
>>> H, y, c = ot.regpath.recast_ot_as_lasso(a, b, C)
>>> H.toarray()
array([[1., 1., 0., 0., 0., 0.],
[0., 0., 1., 1., 0., 0.],
[0., 0., 0., 0., 1., 1.],
[1., 0., 1., 0., 1., 0.],
[0., 1., 0., 1., 0., 1.]])
>>> y
array([0.2, 0.3, 0.5, 0.1, 0.9])
>>> c
array([16., 25., 28., 16., 40., 36.])
References
----------
.. [41] <NAME>., <NAME>., <NAME>., <NAME>., and <NAME>. (2021).
Unbalanced optimal transport through non-negative penalized
linear regression. NeurIPS.
"""
dim_a = np.shape(a)[0]
dim_b = np.shape(b)[0]
y = np.concatenate((a, b))
c = C.flatten()
jHa = np.arange(dim_a * dim_b)
iHa = np.repeat(np.arange(dim_a), dim_b)
jHb = np.arange(dim_a * dim_b)
iHb = np.tile(np.arange(dim_b), dim_a) + dim_a
j = np.concatenate((jHa, jHb))
i = np.concatenate((iHa, iHb))
H = sp.csc_matrix((np.ones(dim_a * dim_b * 2), (i, j)),
shape=(dim_a + dim_b, dim_a * dim_b))
return H, y, c
def recast_semi_relaxed_as_lasso(a, b, C):
r"""This function recasts the semi-relaxed l2-UOT problem as Lasso problem.
.. math::
\text{semi-relaxed UOT} = \min_T <C, T>
+ \lambda \|T 1_m - \mathbf{a}\|_2^2
s.t.
T^T 1_n = \mathbf{b}
\mathbf{t} \geq 0
where :
- :math:`C` is the metric cost matrix
- :math:`\lambda` is the l2-regularization parameter
- :math:`\mathbf{a}` and :math:`\mathbf{b}` are the source and target \
distributions
- :math:`T` is the transport plan to optimize
The problem above can be reformulated as follows
.. math::
\text{semi-relaxed UOT2} = \min_t \gamma \mathbf{c}^T t
+ 0.5 * \|H_r \mathbf{t} - \mathbf{a}\|_2^2
s.t.
H_c \mathbf{t} = \mathbf{b}
\mathbf{t} \geq 0
where :
- :math:`\mathbf{c}` is flattened version of the cost matrix :math:`C`
- :math:`\gamma = 1/\lambda` is the l2-regularization parameter
- :math:`H_r` is a metric matrix which computes the sum along the \
rows of the transport plan :math:`T`
- :math:`H_c` is a metric matrix which computes the sum along the \
columns of the transport plan :math:`T`
- :math:`\mathbf{t}` is the flattened version of :math:`T`
Parameters
----------
a : np.ndarray (dim_a,)
Histogram of dimension dim_a
b : np.ndarray (dim_b,)
Histogram of dimension dim_b
C : np.ndarray, shape (dim_a, dim_b)
Cost matrix
Returns
-------
Hr : np.ndarray (dim_a, dim_a * dim_b)
Auxiliary matrix constituted by 0 and 1, which computes
the sum along the rows of transport plan :math:`T`
Hc : np.ndarray (dim_b, dim_a * dim_b)
Auxiliary matrix constituted by 0 and 1, which computes
the sum along the columns of transport plan :math:`T`
c : np.ndarray (ns * nt, )
Flattened array of the cost matrix
Examples
--------
>>> import ot
>>> a = np.array([0.2, 0.3, 0.5])
>>> b = np.array([0.1, 0.9])
>>> C = np.array([[16., 25.], [28., 16.], [40., 36.]])
>>> Hr,Hc,c = ot.regpath.recast_semi_relaxed_as_lasso(a, b, C)
>>> Hr.toarray()
array([[1., 1., 0., 0., 0., 0.],
[0., 0., 1., 1., 0., 0.],
[0., 0., 0., 0., 1., 1.]])
>>> Hc.toarray()
array([[1., 0., 1., 0., 1., 0.],
[0., 1., 0., 1., 0., 1.]])
>>> c
array([16., 25., 28., 16., 40., 36.])
"""
dim_a = np.shape(a)[0]
dim_b = np.shape(b)[0]
c = C.flatten()
jHr = np.arange(dim_a * dim_b)
iHr = np.repeat(np.arange(dim_a), dim_b)
jHc = np.arange(dim_a * dim_b)
iHc = np.tile(np.arange(dim_b), dim_a)
Hr = sp.csc_matrix((np.ones(dim_a * dim_b), (iHr, jHr)),
shape=(dim_a, dim_a * dim_b))
Hc = sp.csc_matrix((np.ones(dim_a * dim_b), (iHc, jHc)),
shape=(dim_b, dim_a * dim_b))
return Hr, Hc, c
def ot_next_gamma(phi, delta, HtH, Hty, c, active_index, current_gamma):
r""" This function computes the next value of gamma if a variable
is added in the next iteration of the regularization path.
We look for the largest value of gamma such that
the gradient of an inactive variable vanishes
.. math::
\max_{i \in \bar{A}} \frac{\mathbf{h}_i^T(H_A \phi - \mathbf{y})}
{\mathbf{h}_i^T H_A \delta - \mathbf{c}_i}
where :
- A is the current active set
- :math:`\mathbf{h}_i` is the :math:`i` th column of the design \
matrix :math:`{H}`
- :math:`{H}_A` is the sub-matrix constructed by the columns of \
:math:`{H}` whose indices belong to the active set A
- :math:`\mathbf{c}_i` is the :math:`i` th element of the cost vector \
:math:`\mathbf{c}`
- :math:`\mathbf{y}` is the concatenation of the source and target \
distributions
- :math:`\phi` is the intercept of the solutions at the current iteration
- :math:`\delta` is the slope of the solutions at the current iteration
Parameters
----------
phi : np.ndarray (size(A), )
Intercept of the solutions at the current iteration
delta : np.ndarray (size(A), )
Slope of the solutions at the current iteration
HtH : np.ndarray (dim_a * dim_b, dim_a * dim_b)
Matrix product of :math:`{H}^T {H}`
Hty : np.ndarray (dim_a + dim_b, )
Matrix product of :math:`{H}^T \mathbf{y}`
c: np.ndarray (dim_a * dim_b, )
Flattened array of the cost matrix :math:`{C}`
active_index : list
Indices of active variables
current_gamma : float
Value of the regularization parameter at the beginning of the current \
iteration
Returns
-------
next_gamma : float
Value of gamma if a variable is added to active set in next iteration
next_active_index : int
Index of variable to be activated
References
----------
.. [41] <NAME>., <NAME>., <NAME>., <NAME>., and <NAME>. (2021).
Unbalanced optimal transport through non-negative penalized
linear regression. NeurIPS.
"""
M = (HtH[:, active_index].dot(phi) - Hty) / \
(HtH[:, active_index].dot(delta) - c + 1e-16)
M[active_index] = 0
M[M > (current_gamma - 1e-10 * current_gamma)] = 0
return np.max(M), | np.argmax(M) | numpy.argmax |
#! /usr/bin/env python
# encoding: utf-8
# Copied from https://github.com/mauriciovander/silence-removal/blob/master/segment.py
import numpy
import scipy.io.wavfile as wf
import sys
from synth.config import config
class VoiceActivityDetectionYAM:
def __init__(self, sr, ms, channel):
self.__sr = sr
self.__channel = channel
self.__step = int(sr/50)
self.__buffer_size = int(sr/50)
self.__buffer_back = numpy.array([],dtype=numpy.int16)
self.__buffer = numpy.array([],dtype=numpy.int16)
self.__out_buffer_back = numpy.array([],dtype=numpy.int16)
self.__out_buffer = numpy.array([],dtype=numpy.int16)
self.__n = 0
self.__VADthd = 0.
self.__VADn = 0.
self.__silence_counter = 0
self.__segment_count = 0
self.__voice_detected = False
self.__silence_thd_ms = ms
self.out_segments = []
self.out_segments_back = []
# Voice Activity Detection
# Adaptive threshold
def vad(self, _frame):
frame = | numpy.array(_frame) | numpy.array |
"""Script that calculates the parameters for a log normal distribution given the input
To use: python calculate_parameters file1.csv file2.csv ... fileN.csv [optional output_dir=output]
The details of the calculations in this script are in the appendix of the docs.
"""
import sys, csv
from scipy.optimize import minimize, Bounds, NonlinearConstraint
from scipy.stats import norm, lognorm
import numpy as np
def main(files, output_dir):
to_ignore = 0
for file in files:
company_sizes = read_file(file)
parameters = {}
options = []
for key, size_dist in company_sizes.items():
option_1 = max_likelihood(size_dist)
option_2 = match_expectation(size_dist)
options.append((option_1, option_2))
if option_1 is not None:
var = lognorm.var(option_1[1],scale=np.exp(option_1[0]))
elif option_2 is not None:
option_1 = option_2
var = lognorm.var(option_2[1],scale=np.exp(option_2[0]))
else:
continue
if option_1[0] == 0 and option_2[1] == 1:
for n in size_dist.values():
to_ignore += n
print('ignoring ' + key)
else:
parameters[key] = option_1
#max_likelyhood(size_dist)
with open(output_dir + file[:-4] + '_out.csv', 'w') as csvfile:
writer = csv.writer(csvfile)
for key, params in parameters.items():
writer.writerow([key, params[0], params[1]])
print(to_ignore)
""" size_dist parameter is a dictionary with form {'lower-upper': n, ... 'lower+': n}
like the ONS size distributions
return mean and standard deviation (not variance)
"""
def match_expectation(size_dist):
result = minimize(lambda x: expectation_difference(x, size_dist), (0, 1), bounds=Bounds([-np.inf, 0], [np.inf, np.inf]))
if result.success:
return result.x
else:
return None
def max_likelihood(size_dist, distribution_mean=None):
""" Returns the estimated mean, sd from size dist
Arguments
------------
size_dist: dict of the form {str: float or int} where the string is 'a_i-a_i+1' or 'a_n+' and the float or int is the proportion or number of companies in that bin.
(optional) distribution_mean: if the mean of the distribution is known then this is a constraint that can be used to improve the estimation.
"""
if distribution_mean is None:
result = minimize(lambda x: -likelihood(x, size_dist), (0.5, 1.5), jac=lambda x: -likelihood_jacobian(x, size_dist), bounds=Bounds([-np.inf, 0], [np.inf, np.inf]))
else:
result = minimize(lambda x: -likelihood(x, size_dist), (0.5, 1.5), jac=lambda x: -likelihood_jacobian(x, size_dist), bounds=Bounds([-np.inf, 0], [np.inf, np.inf]), constraints={'type': 'eq', 'fun': lambda x: np.exp(x[0] + x[1] ** 2 / 2) - distribution_mean})
#print(result)
if result.success:
return result.x
else:
return None
def likelihood(params, size_dist):
mean, sd = params
total = 0
for size_band, n in size_dist.items():
if '-' in size_band:
lower = int(size_band.split('-')[0])
upper = int(size_band.split('-')[1]) + 1
else:
lower = int(size_band.split('+')[0])
upper = np.inf
if upper == np.inf:
x = 1 - norm.cdf((np.log(lower) - mean) / sd)
elif lower == 0:
x = norm.cdf((np.log(upper) - mean) / sd)
else:
x = norm.cdf(( | np.log(upper) | numpy.log |
"""Dynamic Imaging of Coherent Sources (DICS)."""
# Authors: <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
from copy import deepcopy
import numpy as np
from scipy import linalg
from ..utils import logger, verbose, warn
from ..forward import _subject_from_forward
from ..minimum_norm.inverse import combine_xyz, _check_reference
from ..source_estimate import _make_stc
from ..time_frequency import CrossSpectralDensity, csd_epochs
from ._lcmv import _prepare_beamformer_input, _setup_picks, _reg_pinv
from ..externals import six
@verbose
def _apply_dics(data, info, tmin, forward, noise_csd, data_csd, reg,
label=None, picks=None, pick_ori=None, real_filter=False,
verbose=None):
"""Dynamic Imaging of Coherent Sources (DICS)."""
is_free_ori, _, proj, vertno, G =\
_prepare_beamformer_input(info, forward, label, picks, pick_ori)
Cm = data_csd.data.copy()
# Take real part of Cm to compute real filters
if real_filter:
Cm = Cm.real
# Tikhonov regularization using reg parameter to control for
# trade-off between spatial resolution and noise sensitivity
# eq. 25 in Gross and Ioannides, 1999 Phys. Med. Biol. 44 2081
Cm_inv, _ = _reg_pinv(Cm, reg)
del Cm
# Compute spatial filters
W = np.dot(G.T, Cm_inv)
n_orient = 3 if is_free_ori else 1
n_sources = G.shape[1] // n_orient
for k in range(n_sources):
Wk = W[n_orient * k: n_orient * k + n_orient]
Gk = G[:, n_orient * k: n_orient * k + n_orient]
Ck = np.dot(Wk, Gk)
# TODO: max-power is not implemented yet, however DICS does employ
# orientation picking when one eigen value is much larger than the
# other
if is_free_ori:
# Free source orientation
Wk[:] = np.dot(linalg.pinv(Ck, 0.1), Wk)
else:
# Fixed source orientation
Wk /= Ck
# Noise normalization
noise_norm = np.dot(np.dot(Wk.conj(), noise_csd.data), Wk.T)
noise_norm = np.abs(noise_norm).trace()
Wk /= np.sqrt(noise_norm)
# Pick source orientation normal to cortical surface
if pick_ori == 'normal':
W = W[2::3]
is_free_ori = False
if isinstance(data, np.ndarray) and data.ndim == 2:
data = [data]
return_single = True
else:
return_single = False
subject = _subject_from_forward(forward)
for i, M in enumerate(data):
if len(M) != len(picks):
raise ValueError('data and picks must have the same length')
if not return_single:
logger.info("Processing epoch : %d" % (i + 1))
# Apply SSPs
if info['projs']:
M = np.dot(proj, M)
# project to source space using beamformer weights
if is_free_ori:
sol = np.dot(W, M)
logger.info('combining the current components...')
sol = combine_xyz(sol)
else:
# Linear inverse: do not delay compuation due to non-linear abs
sol = np.dot(W, M)
tstep = 1.0 / info['sfreq']
if np.iscomplexobj(sol):
sol = np.abs(sol) # XXX : STC cannot contain (yet?) complex values
yield _make_stc(sol, vertices=vertno, tmin=tmin, tstep=tstep,
subject=subject)
logger.info('[done]')
@verbose
def dics(evoked, forward, noise_csd, data_csd, reg=0.05, label=None,
pick_ori=None, real_filter=False, verbose=None):
"""Dynamic Imaging of Coherent Sources (DICS).
Compute a Dynamic Imaging of Coherent Sources (DICS) [1]_ beamformer
on evoked data and return estimates of source time courses.
.. note:: Fixed orientation forward operators with ``real_filter=False``
will result in complex time courses, in which case absolute
values will be returned.
.. note:: This implementation has not been heavily tested so please
report any issues or suggestions.
Parameters
----------
evoked : Evoked
Evoked data.
forward : dict
Forward operator.
noise_csd : instance of CrossSpectralDensity
The noise cross-spectral density.
data_csd : instance of CrossSpectralDensity
The data cross-spectral density.
reg : float
The regularization for the cross-spectral density.
label : Label | None
Restricts the solution to a given label.
pick_ori : None | 'normal'
If 'normal', rather than pooling the orientations by taking the norm,
only the radial component is kept.
real_filter : bool
If True, take only the real part of the cross-spectral-density matrices
to compute real filters as in [2]_. Default is False.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
stc : SourceEstimate | VolSourceEstimate
Source time courses
See Also
--------
dics_epochs
Notes
-----
For more information about ``real_filter``, see the
`supplemental information <http://www.cell.com/cms/attachment/616681/4982593/mmc1.pdf>`_
from [2]_.
References
----------
.. [1] <NAME> al. Dynamic imaging of coherent sources: Studying neural
interactions in the human brain. PNAS (2001) vol. 98 (2) pp. 694-699
.. [2] <NAME>, <NAME>, <NAME> (2011) Oscillatory Synchronization
in Large-Scale Cortical Networks Predicts Perception.
Neuron 69:387-396.
""" # noqa: E501
_check_reference(evoked)
info = evoked.info
data = evoked.data
tmin = evoked.times[0]
picks = _setup_picks(picks=None, info=info, forward=forward)
data = data[picks]
stc = _apply_dics(data, info, tmin, forward, noise_csd, data_csd, reg=reg,
label=label, pick_ori=pick_ori, picks=picks,
real_filter=real_filter)
return six.advance_iterator(stc)
@verbose
def dics_epochs(epochs, forward, noise_csd, data_csd, reg=0.05, label=None,
pick_ori=None, return_generator=False, real_filter=False,
verbose=None):
"""Dynamic Imaging of Coherent Sources (DICS).
Compute a Dynamic Imaging of Coherent Sources (DICS) beamformer
on single trial data and return estimates of source time courses.
.. note:: Fixed orientation forward operators with ``real_filter=False``
will result in complex time courses, in which case absolute
values will be returned.
.. warning:: This implementation has not been heavily tested so please
report any issues or suggestions.
Parameters
----------
epochs : Epochs
Single trial epochs.
forward : dict
Forward operator.
noise_csd : instance of CrossSpectralDensity
The noise cross-spectral density.
data_csd : instance of CrossSpectralDensity
The data cross-spectral density.
reg : float
The regularization for the cross-spectral density.
label : Label | None
Restricts the solution to a given label.
pick_ori : None | 'normal'
If 'normal', rather than pooling the orientations by taking the norm,
only the radial component is kept.
return_generator : bool
Return a generator object instead of a list. This allows iterating
over the stcs without having to keep them all in memory.
real_filter : bool
If True, take only the real part of the cross-spectral-density matrices
to compute real filters as in [1]_. Default is False.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
stc: list | generator of SourceEstimate | VolSourceEstimate
The source estimates for all epochs
See Also
--------
dics
References
----------
.. [1] <NAME>, <NAME>, <NAME> (2011) Oscillatory Synchronization
in Large-Scale Cortical Networks Predicts Perception.
Neuron 69:387-396.
"""
_check_reference(epochs)
info = epochs.info
tmin = epochs.times[0]
picks = _setup_picks(picks=None, info=info, forward=forward)
data = epochs.get_data()[:, picks, :]
stcs = _apply_dics(data, info, tmin, forward, noise_csd, data_csd, reg=reg,
label=label, pick_ori=pick_ori, picks=picks,
real_filter=real_filter)
if not return_generator:
stcs = list(stcs)
return stcs
@verbose
def dics_source_power(info, forward, noise_csds, data_csds, reg=0.05,
label=None, pick_ori=None, real_filter=False,
verbose=None):
"""Dynamic Imaging of Coherent Sources (DICS).
Calculate source power in time and frequency windows specified in the
calculation of the data cross-spectral density matrix or matrices. Source
power is normalized by noise power.
NOTE : This implementation has not been heavily tested so please
report any issues or suggestions.
Parameters
----------
info : dict
Measurement info, e.g. epochs.info.
forward : dict
Forward operator.
noise_csds : instance or list of instances of CrossSpectralDensity
The noise cross-spectral density matrix for a single frequency or a
list of matrices for multiple frequencies.
data_csds : instance or list of instances of CrossSpectralDensity
The data cross-spectral density matrix for a single frequency or a list
of matrices for multiple frequencies.
reg : float
The regularization for the cross-spectral density.
label : Label | None
Restricts the solution to a given label.
pick_ori : None | 'normal'
If 'normal', rather than pooling the orientations by taking the norm,
only the radial component is kept.
real_filter : bool
If True, take only the real part of the cross-spectral-density matrices
to compute real filters.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
stc : SourceEstimate | VolSourceEstimate
Source power with frequency instead of time.
Notes
-----
The original reference is:
<NAME>. Dynamic imaging of coherent sources: Studying neural
interactions in the human brain. PNAS (2001) vol. 98 (2) pp. 694-699
"""
if isinstance(data_csds, CrossSpectralDensity):
data_csds = [data_csds]
if isinstance(noise_csds, CrossSpectralDensity):
noise_csds = [noise_csds]
def csd_shapes(x):
return tuple(c.data.shape for c in x)
if (csd_shapes(data_csds) != csd_shapes(noise_csds) or
any(len(set(csd_shapes(c))) > 1 for c in [data_csds, noise_csds])):
raise ValueError('One noise CSD matrix should be provided for each '
'data CSD matrix and vice versa. All CSD matrices '
'should have identical shape.')
frequencies = []
for data_csd, noise_csd in zip(data_csds, noise_csds):
if not np.allclose(data_csd.frequencies, noise_csd.frequencies):
raise ValueError('Data and noise CSDs should be calculated at '
'identical frequencies')
# If CSD is summed over multiple frequencies, take the average
# frequency
if(len(data_csd.frequencies) > 1):
frequencies.append(np.mean(data_csd.frequencies))
else:
frequencies.append(data_csd.frequencies[0])
fmin = frequencies[0]
if len(frequencies) > 2:
fstep = []
for i in range(len(frequencies) - 1):
fstep.append(frequencies[i + 1] - frequencies[i])
if not np.allclose(fstep, np.mean(fstep), 1e-5):
warn('Uneven frequency spacing in CSD object, frequencies in the '
'resulting stc file will be inaccurate.')
fstep = fstep[0]
elif len(frequencies) > 1:
fstep = frequencies[1] - frequencies[0]
else:
fstep = 1 # dummy value
picks = _setup_picks(picks=None, info=info, forward=forward)
is_free_ori, _, proj, vertno, G =\
_prepare_beamformer_input(info, forward, label, picks=picks,
pick_ori=pick_ori)
n_orient = 3 if is_free_ori else 1
n_sources = G.shape[1] // n_orient
source_power = np.zeros((n_sources, len(data_csds)))
n_csds = len(data_csds)
logger.info('Computing DICS source power...')
for i, (data_csd, noise_csd) in enumerate(zip(data_csds, noise_csds)):
if n_csds > 1:
logger.info(' computing DICS spatial filter %d out of %d' %
(i + 1, n_csds))
Cm = data_csd.data.copy()
# Take real part of Cm to compute real filters
if real_filter:
Cm = Cm.real
# Tikhonov regularization using reg parameter to control for
# trade-off between spatial resolution and noise sensitivity
# eq. 25 in Gross and Ioannides, 1999 Phys. Med. Biol. 44 2081
Cm_inv, _ = _reg_pinv(Cm, reg)
del Cm
# Compute spatial filters
W = np.dot(G.T, Cm_inv)
for k in range(n_sources):
Wk = W[n_orient * k: n_orient * k + n_orient]
Gk = G[:, n_orient * k: n_orient * k + n_orient]
Ck = np.dot(Wk, Gk)
if is_free_ori:
# Free source orientation
Wk[:] = np.dot(linalg.pinv(Ck, 0.1), Wk)
else:
# Fixed source orientation
Wk /= Ck
# Noise normalization
noise_norm = np.dot(np.dot(Wk.conj(), noise_csd.data), Wk.T)
noise_norm = | np.abs(noise_norm) | numpy.abs |
"""Learn ideal points with the text-based ideal point model (TBIP).
Let y_{dv} denote the counts of word v in document d. Let x_d refer to the
ideal point of the author of document d. Then we model:
theta, beta ~ Gamma(alpha, alpha)
x, eta ~ N(0, 1)
y_{dv} ~ Pois(sum_k theta_dk beta_kv exp(x_d * eta_kv).
We perform variational inference to provide estimates for the posterior
distribution of each latent variable. We take reparameterization gradients,
using a lognormal variational family for the positive variables (theta, beta)
and a normal variational family for the real variables (x, eta).
The directory `data/{data_name}/clean/` should have the following four files:
1. `counts.npz`: a [num_documents, num_words] sparse matrix containing the
word counts for each document.
2. `author_indices.npy`: a [num_documents] vector where each entry is an
integer in the set {0, 1, ..., num_authors - 1}, indicating the author of
the corresponding document in `counts.npz`.
3. `vocabulary.txt`: a [num_words]-length file where each line is a string
denoting the corresponding word in the vocabulary.
4. `author_map.txt`: a [num_authors]-length file where each line is a string
denoting the name of an author in the corpus.
We provide more details in our paper [1].
#### References
[1]: <NAME>, <NAME>, <NAME>. Text-Based Ideal Points. In
_Conference of the Association for Computational Linguistics_, 2020.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import time
from absl import flags
import numpy as np
import scipy.sparse as sparse
import tensorflow as tf
import tensorflow_probability as tfp
flags.DEFINE_float("learning_rate",
default=0.01,
help="Adam learning rate.")
flags.DEFINE_integer("max_steps",
default=1000000,
help="Number of training steps to run.")
flags.DEFINE_integer("num_topics",
default=50,
help="Number of topics.")
flags.DEFINE_integer("batch_size",
default=1024,
help="Batch size.")
flags.DEFINE_integer("num_samples",
default=1,
help="Number of samples to use for ELBO approximation.")
flags.DEFINE_enum("counts_transformation",
default="nothing",
enum_values=["nothing", "binary", "sqrt", "log"],
help="Transformation used on counts data.")
flags.DEFINE_boolean("pre_initialize_parameters",
default=True,
help="Whether to use pre-initialized document and topic "
"intensities (with Poisson factorization).")
flags.DEFINE_string("data",
default="senate-speeches-114",
help="Data source being used.")
flags.DEFINE_integer("senate_session",
default=113,
help="Senate session (used only when data is "
"'senate-speech-comparisons'.")
flags.DEFINE_integer("print_steps",
default=500,
help="Number of steps to print and save results.")
flags.DEFINE_integer("seed",
default=123,
help="Random seed to be used.")
FLAGS = flags.FLAGS
def build_input_pipeline(data_dir,
batch_size,
random_state,
counts_transformation="nothing"):
"""Load data and build iterator for minibatches.
Args:
data_dir: The directory where the data is located. There must be four
files inside the rep: `counts.npz`, `author_indices.npy`,
`author_map.txt`, and `vocabulary.txt`.
batch_size: The batch size to use for training.
random_state: A NumPy `RandomState` object, used to shuffle the data.
counts_transformation: A string indicating how to transform the counts.
One of "nothing", "binary", "log", or "sqrt".
"""
counts = sparse.load_npz(os.path.join(data_dir, "counts.npz"))
num_documents, num_words = counts.shape
author_indices = np.load(
os.path.join(data_dir, "author_indices.npy")).astype(np.int32)
num_authors = | np.max(author_indices + 1) | numpy.max |
import numpy as np
import torch
import torch.multiprocessing as mp
from torch.multiprocessing import Pool
def init_nnf(source_size, target_size=None):
target_size = source_size if target_size is None else target_size
y, x = np.meshgrid(np.linspace(0, target_size[1]-1, source_size[1], dtype=np.int32),
np.linspace(0, target_size[0]-1, source_size[0], dtype=np.int32))
return np.stack((x, y), axis=2)
def upSample_nnf(nnf, target_size=None):
target_size = [x * 2 for x in nnf.shape] if target_size is None else target_size
ratio = np.array([target_size[0] / nnf.shape[0], target_size[1] / nnf.shape[1]])
coords = np.stack(np.meshgrid( | np.arange(target_size[1]) | numpy.arange |
import visr_bear
import numpy as np
import numpy.testing as npt
from pathlib import Path
import scipy.signal as sig
from utils import data_path
def do_render(renderer, period, objects=None, direct_speakers=None, hoa=None):
not_none = [x for x in [objects, direct_speakers, hoa] if x is not None][0]
length = not_none.shape[1]
dummy_samples = np.zeros((0, length), dtype=np.float32)
output = np.zeros((2, length), dtype=np.float32)
def convert(samples):
if samples is None:
return dummy_samples
return samples.astype(np.float32, order="C", copy=False)
objects = convert(objects)
direct_speakers = convert(direct_speakers)
hoa = convert(hoa)
for i in range(length // period):
s = np.s_[:, i * period : (i + 1) * period]
renderer.process(objects[s], direct_speakers[s], hoa[s], output[s])
return output
def correlate(a, b):
"""returns (delay, correlation), where correlation
is the full cross-correlation, and delay is a vector of
delays corresponding to the delay from a to b for each
sample in correlation."""
correlation = np.correlate(b, a, mode="full")
delay = np.arange(len(correlation)) - (len(a) - 1)
return delay, correlation
period = 512
def render_directspeakers_front(data_file, samples):
config = visr_bear.api.Config()
config.num_objects_channels = 0
config.num_direct_speakers_channels = 1
config.period_size = period
config.data_path = data_file
renderer = visr_bear.api.Renderer(config)
dsi = visr_bear.api.DirectSpeakersInput()
dsi.rtime = visr_bear.api.Time(0, 1)
dsi.duration = visr_bear.api.Time(1, 1)
renderer.add_direct_speakers_block(0, dsi)
return do_render(renderer, period, direct_speakers=samples)
def render_objects_front(data_file, samples):
config = visr_bear.api.Config()
config.num_objects_channels = 1
config.num_direct_speakers_channels = 0
config.period_size = period
config.data_path = data_file
renderer = visr_bear.api.Renderer(config)
oi = visr_bear.api.ObjectsInput()
oi.rtime = visr_bear.api.Time(0, 1)
oi.duration = visr_bear.api.Time(1, 1)
oi.type_metadata.position = visr_bear.api.PolarPosition(0, 0, 1)
renderer.add_objects_block(0, oi)
return do_render(renderer, period, objects=samples)
def render_diffuse_front(data_file, samples):
config = visr_bear.api.Config()
config.num_objects_channels = 1
config.num_direct_speakers_channels = 0
config.period_size = period
config.data_path = data_file
renderer = visr_bear.api.Renderer(config)
oi = visr_bear.api.ObjectsInput()
oi.rtime = visr_bear.api.Time(0, 1)
oi.duration = visr_bear.api.Time(1, 1)
oi.type_metadata.position = visr_bear.api.PolarPosition(0, 0, 1)
oi.type_metadata.diffuse = 1.0
renderer.add_objects_block(0, oi)
return do_render(renderer, period, objects=samples)
def render_hoa_omni(data_file, samples):
config = visr_bear.api.Config()
config.num_objects_channels = 0
config.num_direct_speakers_channels = 0
config.num_hoa_channels = 1
config.period_size = period
config.data_path = data_file
renderer = visr_bear.api.Renderer(config)
hi = visr_bear.api.HOAInput()
hi.rtime = visr_bear.api.Time(0, 1)
hi.duration = visr_bear.api.Time(1, 1)
hi.channels = [0]
hi.type_metadata.orders = [0]
hi.type_metadata.degrees = [0]
hi.type_metadata.normalization = "SN3D"
renderer.add_hoa_block(0, hi)
return do_render(renderer, period, hoa=samples)
def test_objects_direct_speakers_delays():
"""check that delays between direct/diffuse/directspeakers paths match.
These share the same IRs so can be tested exactly."""
files_dir = Path(__file__).parent / "files"
data_file = str(files_dir / "unity_brirs_decorrelators.tf")
input_samples = np.random.normal(size=(1, 48000)).astype(np.float32)
direct_speakers_samples = render_directspeakers_front(data_file, input_samples)
objects_samples = render_objects_front(data_file, input_samples)
diffuse_samples = render_diffuse_front(data_file, input_samples)
# skip 2 periods, because the gains settle during the first period, and
# some of this will still be coming through the delays in the second period
npt.assert_allclose(
direct_speakers_samples[:, 2 * period :],
objects_samples[:, 2 * period :],
atol=2e-4,
)
npt.assert_allclose(
direct_speakers_samples[:, 2 * period :],
diffuse_samples[:, 2 * period :],
atol=2e-4,
)
def test_objects_hoa_delays():
"""check that delays between objects and HOA paths match. These use
different IRs, so check with cross-correlation."""
input_samples = np.zeros(shape=(1, 10240)).astype(np.float32)
input_samples[:, 4800] = 1.0
objects_samples = render_objects_front(data_path, input_samples)
hoa_samples = render_hoa_omni(data_path, input_samples)
def check_delay(a, b):
osa = 4
a_osa = sig.resample(a, len(a) * osa)
b_osa = sig.resample(b, len(b) * osa)
delay, correlation = correlate(a_osa, b_osa)
# check that 0 delay is a peak comparable with the delay that has the
# highest correlation
assert correlation[ | np.where(delay == 0) | numpy.where |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.2'
# jupytext_version: 1.1.3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown]
# # Dimensionality Reduction in [Bayer and Luetticke (2018)](https://cepr.org/active/publications/discussion_papers/dp.php?dpno=13071)
#
# [](https://mybinder.org/v2/gh/econ-ark/HARK/BayerLuetticke?filepath=HARK%2FBayerLuetticke%2FDCT-Copula-Illustration.ipynb)
#
# This companion to the [main notebook](TwoAsset.ipynb) explains in more detail how the authors reduce the dimensionality of their problem
#
# - Based on original slides by <NAME> and <NAME>
# - Original Jupyter notebook by <NAME>
# - Further edits by <NAME>, <NAME>
#
# %% [markdown]
# ### Preliminaries
#
# In Steady-state Equilibrium (StE) in the model, in any given period, a consumer in state $s$ (which comprises liquid assets $m$, illiquid assets $k$, and human capital $\newcommand{hLev}{h}\hLev$) has two key choices:
# 1. To adjust ('a') or not adjust ('n') their holdings of illiquid assets $k$
# 1. Contingent on that choice, decide the level of consumption, yielding consumption functions:
# * $c_n(s)$ - nonadjusters
# * $c_a(s)$ - adjusters
#
# The usual envelope theorem applies here, so marginal value wrt the liquid asset equals marginal utility with respect to consumption:
# $[\frac{d v}{d m} = \frac{d u}{d c}]$.
# In practice, the authors solve their problem using the marginal value of money $\texttt{Vm} = dv/dm$, but because the marginal utility function is invertible it is trivial to recover $\texttt{c}$ from $(u^{\prime})^{-1}(\texttt{Vm} )$. The consumption function is therefore computed from the $\texttt{Vm}$ function
# %% {"code_folding": [0]}
# Setup stuff
# This is a jupytext paired notebook that autogenerates a corresponding .py file
# which can be executed from a terminal command line via "ipython [name].py"
# But a terminal does not permit inline figures, so we need to test jupyter vs terminal
# Google "how can I check if code is executed in the ipython notebook"
def in_ipynb():
try:
if str(type(get_ipython())) == "<class 'ipykernel.zmqshell.ZMQInteractiveShell'>":
return True
else:
return False
except NameError:
return False
# Determine whether to make the figures inline (for spyder or jupyter)
# vs whatever is the automatic setting that will apply if run from the terminal
if in_ipynb():
# %matplotlib inline generates a syntax error when run from the shell
# so do this instead
get_ipython().run_line_magic('matplotlib', 'inline')
else:
get_ipython().run_line_magic('matplotlib', 'auto')
# The tools for navigating the filesystem
import sys
import os
# Find pathname to this file:
my_file_path = os.path.dirname(os.path.abspath("DCT-Copula-Illustration.ipynb"))
# Relative directory for pickled code
code_dir = os.path.join(my_file_path, "../Assets/Two")
sys.path.insert(0, code_dir)
sys.path.insert(0, my_file_path)
# %% {"code_folding": []}
# Load precalculated Stationary Equilibrium (StE) object EX3SS
import pickle
os.chdir(code_dir) # Go to the directory with pickled code
## EX3SS_20.p is the information in the stationary equilibrium
## (20: the number of illiquid and liquid weath gridpoints)
### The comments above are original, but it seems that there are 30 not 20 points now
EX3SS=pickle.load(open("EX3SS_20.p", "rb"))
# %% [markdown]
# ### Dimensions
#
# The imported StE solution to the problem represents the functions at a set of gridpoints of
# * liquid assets ($n_m$ points), illiquid assets ($n_k$), and human capital ($n_h$)
# * In the code these are $\{\texttt{nm,nk,nh}\}$
#
# So even if the grids are fairly sparse for each state variable, the total number of combinations of the idiosyncratic state gridpoints is large: $n = n_m \times n_k \times n_h$. So, e.g., $\bar{c}$ is a set of size $n$ containing the level of consumption at each possible _combination_ of gridpoints.
#
# In the "real" micro problem, it would almost never happen that a continuous variable like $m$ would end up being exactly equal to one of the prespecified gridpoints. But the functions need to be evaluated at such non-grid points. This is addressed by linear interpolation. That is, if, say, the grid had $m_{8} = 40$ and $m_{9} = 50$ then and a consumer ended up with $m = 45$ then the approximation is that $\tilde{c}(45) = 0.5 \bar{c}_{8} + 0.5 \bar{c}_{9}$.
#
# %% {"code_folding": [0]}
# Show dimensions of the consumer's problem (state space)
print('c_n is of dimension: ' + str(EX3SS['mutil_c_n'].shape))
print('c_a is of dimension: ' + str(EX3SS['mutil_c_a'].shape))
print('Vk is of dimension:' + str(EX3SS['Vk'].shape))
print('Vm is of dimension:' + str(EX3SS['Vm'].shape))
print('For convenience, these are all constructed from the same exogenous grids:')
print(str(len(EX3SS['grid']['m']))+' gridpoints for liquid assets;')
print(str(len(EX3SS['grid']['k']))+' gridpoints for illiquid assets;')
print(str(len(EX3SS['grid']['h']))+' gridpoints for individual productivity.')
print('')
print('Therefore, the joint distribution is of size: ')
print(str(EX3SS['mpar']['nm'])+
' * '+str(EX3SS['mpar']['nk'])+
' * '+str(EX3SS['mpar']['nh'])+
' = '+ str(EX3SS['mpar']['nm']*EX3SS['mpar']['nk']*EX3SS['mpar']['nh']))
# %% [markdown]
# ### Dimension Reduction
#
# The authors use different dimensionality reduction methods for the consumer's problem and the distribution across idiosyncratic states
# %% [markdown]
# #### Representing the consumer's problem with Basis Functions
#
# The idea is to find an efficient "compressed" representation of our functions (e.g., the consumption function), which BL do using tools originally developed for image compression. The analogy to image compression is that nearby pixels are likely to have identical or very similar colors, so we need only to find an efficient way to represent how the colors _change_ from one pixel to nearby ones. Similarly, consumption at a given point $s_{i}$ is likely to be close to consumption point at another point $s_{j}$ that is "close" in the state space (similar wealth, income, etc), so a function that captures that similarity efficiently can preserve most of the information without keeping all of the points.
#
# Like linear interpolation, the [DCT transformation](https://en.wikipedia.org/wiki/Discrete_cosine_transform) is a method of representing a continuous function using a finite set of numbers. It uses a set of independent [basis functions](https://en.wikipedia.org/wiki/Basis_function) to do this.
#
# But it turns out that some of those basis functions are much more important than others in representing the steady-state functions. Dimension reduction is accomplished by basically ignoring all basis functions that make "small enough" contributions to the representation of the function.
#
# ##### When might this go wrong?
#
# Suppose the consumption function changes in a recession in ways that change behavior radically at some states. Like, suppose unemployment almost never happens in steady state, but it can happen in temporary recessions. Suppose further that, even for employed people, in a recession, _worries_ about unemployment cause many of them to prudently withdraw some of their illiquid assets -- behavior opposite of what people in the same state would be doing during expansions. In that case, the basis functions that represented the steady state function would have had no incentive to be able to represent well the part of the space that is never seen in steady state, so any functions that might help do so might well have been dropped in the dimension reduction stage.
#
# On the whole, it seems unlikely that this kind of thing is a major problem, because the vast majority of the variation that people experience is idiosyncratic. There is always unemployment, for example; it just moves up and down a bit with aggregate shocks, but since the experience of unemployment is in fact well represented in the steady state the method should have no trouble capturing it.
#
# Where the method might have more trouble is in representing economies in which there are multiple equilibria in which behavior is quite different.
# %% [markdown]
# #### For the distribution of agents across states: Copula
#
# The other tool the authors use is the ["copula"](https://en.wikipedia.org/wiki/Copula_(probability_theory)), which allows us to represent the distribution of people across idiosyncratic states efficiently
#
# The copula is computed from the joint distribution of states in StE and will be used to transform the [marginal distributions](https://en.wikipedia.org/wiki/Marginal_distribution) back to joint distributions. (For an illustration of how the assumptions used when modeling asset price distributions using copulas can fail see [Salmon](https://www.wired.com/2009/02/wp-quant/))
#
# * A copula is a representation of the joint distribution expressed using a mapping between the uniform joint CDF and the marginal distributions of the variables
#
# * The crucial assumption is that what aggregate shocks do is to squeeze or distort the steady state distribution, but leave the rank structure of the distribution the same
# * An example of when this might not hold is the following. Suppose that in expansions, the people at the top of the distribution of illiquid assets (the top 1 percent, say) are also at the top 1 percent of liquid assets. But in recessions the bottom 99 percent get angry at the top 1 percent of illiquid asset holders and confiscate part of their liquid assets (the illiquid assets can't be confiscated quickly because they are illiquid). Now the people in the top 99 percent of illiquid assets might be in the _bottom_ 1 percent of liquid assets.
#
# - In this case we just need to represent how the mapping from ranks into levels of assets
#
# - This reduces the number of points for which we need to track transitions from $3600 = 30 \times 30 \times 4$ to $64 = 30+30+4$. Or the total number of points we need to contemplate goes from $3600^2 \approx 13 $million to $64^2=4096$.
# %% {"code_folding": [0]}
# Get some specs about the copula, which is precomputed in the EX3SS object
print('The copula consists of two parts: gridpoints and values at those gridpoints:'+ \
'\n gridpoints have dimensionality of '+str(EX3SS['Copula']['grid'].shape) + \
'\n where the first element is total number of gridpoints' + \
'\n and the second element is number of idiosyncratic state variables' + \
'\n whose values also are of dimension of '+str(EX3SS['Copula']['value'].shape[0]) + \
'\n each entry of which is the probability that all three of the'
'\n state variables are below the corresponding point.')
# %% {"code_folding": [0]}
## Import BL codes
import sys
# Relative directory for BL codes
sys.path.insert(0,'../../../..') # comment by TW: this is not the same as in TwoAsset.ipynb.
from HARK.BayerLuetticke.Assets.Two.FluctuationsTwoAsset import FluctuationsTwoAsset
# %% {"code_folding": [0]}
## Import other necessary libraries
import numpy as np
#from numpy.linalg import matrix_rank
import scipy as sc
import matplotlib.pyplot as plt
import time
import scipy.fftpack as sf # scipy discrete fourier transforms
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from matplotlib import cm
from matplotlib import lines
import seaborn as sns
import copy as cp
from scipy import linalg #linear algebra
# %% {"code_folding": [0]}
## Choose an aggregate shock to perturb(one of three shocks: MP, TFP, Uncertainty)
# EX3SS['par']['aggrshock'] = 'MP'
# EX3SS['par']['rhoS'] = 0.0 # Persistence of variance
# EX3SS['par']['sigmaS'] = 0.001 # STD of variance shocks
#EX3SS['par']['aggrshock'] = 'TFP'
#EX3SS['par']['rhoS'] = 0.95
#EX3SS['par']['sigmaS'] = 0.0075
EX3SS['par']['aggrshock'] = 'Uncertainty'
EX3SS['par']['rhoS'] = 0.84 # Persistence of variance
EX3SS['par']['sigmaS'] = 0.54 # STD of variance shocks
# %% {"code_folding": []}
## Choose an accuracy of approximation with DCT
### Determines number of basis functions chosen -- enough to match this accuracy
### EX3SS is precomputed steady-state pulled in above
EX3SS['par']['accuracy'] = 0.99999
# %% {"code_folding": []}
## Implement state reduction and DCT
### Do state reduction on steady state
EX3SR=FluctuationsTwoAsset(**EX3SS) # Takes StE result as input and get ready to invoke state reduction operation
SR=EX3SR.StateReduc() # StateReduc is operated
# %% {"code_folding": [0]}
# Measuring the effectiveness of the state reduction
print('What are the results from the state reduction?')
#print('Newly added attributes after the operation include \n'+str(set(SR.keys())-set(EX3SS.keys())))
print('\n')
print('To achieve an accuracy of '+str(EX3SS['par']['accuracy'])+'\n')
print('The dimension of the policy functions is reduced to '+str(SR['indexMUdct'].shape[0]) \
+' from '+str(EX3SS['mpar']['nm']*EX3SS['mpar']['nk']*EX3SS['mpar']['nh'])
)
print('The dimension of the marginal value functions is reduced to '+str(SR['indexVKdct'].shape[0]) \
+ ' from ' + str(EX3SS['Vk'].shape))
print('The total number of control variables is '+str(SR['Contr'].shape[0])+'='+str(SR['indexMUdct'].shape[0]) + \
'+'+str(SR['indexVKdct'].shape[0])+'+ # of other macro controls')
print('\n')
print('The copula represents the joint distribution with a vector of size '+str(SR['Gamma_state'].shape) )
print('The dimension of states including exogenous state, is ' +str(SR['Xss'].shape[0]))
print('It simply stacks all grids of different\
\n state variables regardless of their joint distributions.\
\n This is due to the assumption that the rank order remains the same.')
print('The total number of state variables is '+str(SR['State'].shape[0]) + '='+\
str(SR['Gamma_state'].shape[1])+'+ the number of macro states (like the interest rate)')
# %% [markdown]
# ### Graphical Illustration
#
# #### Policy/value functions
#
# Taking the consumption function as an example, we plot consumption by adjusters and non-adjusters over a range of $k$ and $m$ that encompasses 100 as well 90 percent of the mass of the distribution function,respectively.
#
# We plot the functions for the each of the 4 values of the wage $h$.
#
# %% {"code_folding": [0]}
## Graphical illustration
xi = EX3SS['par']['xi']
invmutil = lambda x : (1./x)**(1./xi)
### convert marginal utilities back to consumption function
mut_StE = EX3SS['mutil_c']
mut_n_StE = EX3SS['mutil_c_n'] # marginal utility of non-adjusters
mut_a_StE = EX3SS['mutil_c_a'] # marginal utility of adjusters
c_StE = invmutil(mut_StE)
cn_StE = invmutil(mut_n_StE)
ca_StE = invmutil(mut_a_StE)
### grid values
dim_StE = mut_StE.shape
mgrid = EX3SS['grid']['m']
kgrid = EX3SS['grid']['k']
hgrid = EX3SS['grid']['h']
# %% {"code_folding": [0]}
## Define some functions to be used next
def dct3d(x):
x0=sf.dct(x.copy(),axis=0,norm='ortho')
x1=sf.dct(x0.copy(),axis=1,norm='ortho')
x2=sf.dct(x1.copy(),axis=2,norm='ortho')
return x2
def idct3d(x):
x2 = sf.idct(x.copy(),axis=2,norm='ortho')
x1 = sf.idct(x2.copy(),axis=1,norm='ortho')
x0 = sf.idct(x1.copy(),axis=0,norm='ortho')
return x0
def DCTApprox(fullgrids,dct_index):
dim=fullgrids.shape
dctcoefs = dct3d(fullgrids)
dctcoefs_rdc = np.zeros(dim)
dctcoefs_rdc[dct_index]=dctcoefs[dct_index]
approxgrids = idct3d(dctcoefs_rdc)
return approxgrids
# %% [markdown]
# Depending on the accuracy level, the DCT operation choses the necessary number of basis functions used to approximate consumption function at the full grids. This is illustrated in the p31-p34 in this [slides](https://www.dropbox.com/s/46fdxh0aphazm71/presentation_method.pdf?dl=0). We show this for both 1-dimensional (m or k) or 2-dimenstional grids (m and k) in the following.
# %% {"code_folding": []}
## 2D graph of consumption function: c(m) fixing k and h
## list of accuracy levels
Accuracy_BL = 0.99999 # From BL
Accuracy_Less0 = 0.999
Accuracy_Less1 = 0.99
Accuracy_Less2 = 0.95
acc_lst = np.array([Accuracy_BL,Accuracy_Less0,Accuracy_Less1,Accuracy_Less2])
## c(m) fixing k and h
fig = plt.figure(figsize=(8,8))
fig.suptitle('c at full grids and c approximated by DCT in different accuracy levels'
'\n non-adjusters, fixing k and h',
fontsize=(13))
fig.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=0.3)
for idx in range(len(acc_lst)):
EX3SS_cp =cp.deepcopy(EX3SS)
EX3SS_cp['par']['accuracy'] = acc_lst[idx]
EX3SR_cp=FluctuationsTwoAsset(**EX3SS_cp) # Takes StE result as input and get ready to invoke state reduction operation
SR_cp=EX3SR_cp.StateReduc()
mut_rdc_idx_flt_cp = SR_cp['indexMUdct']
mut_rdc_idx_cp = np.unravel_index(mut_rdc_idx_flt_cp,dim_StE,order='F')
nb_bf_cp = len(mut_rdc_idx_cp[0])
print(str(nb_bf_cp) +" basis functions used.")
c_n_approx_cp = DCTApprox(cn_StE,mut_rdc_idx_cp)
c_a_approx_cp = DCTApprox(ca_StE,mut_rdc_idx_cp)
cn_diff_cp = c_n_approx_cp-cn_StE
# choose the fix grid of h and k
hgrid_fix=2 # fix level of h as an example
kgrid_fix=10 # fix level of k as an example
# get the corresponding c function approximated by dct
cVec = c_a_approx_cp[:,kgrid_fix,hgrid_fix]
## plots
ax = fig.add_subplot(2,2,idx+1)
ax.plot(mgrid,cVec,label='c approximated by DCT')
ax.plot(mgrid,ca_StE[:,kgrid_fix,hgrid_fix],'--',label='c at full grids')
ax.plot(mgrid,cVec,'r*')
ax.set_xlabel('m',fontsize=13)
ax.set_ylabel(r'$c(m)$',fontsize=13)
ax.set_title(r'accuracy=${}$'.format(acc_lst[idx]))
ax.legend(loc=0)
# %% {"code_folding": [0]}
## 2D graph of consumption function: c(k) fixing m and h
fig = plt.figure(figsize=(8,8))
fig.suptitle('c at full grids and c approximated by DCT in different accuracy levels'
'\n non-adjusters, fixing m and h',
fontsize=(13))
fig.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=0.3)
for idx in range(len(acc_lst)):
EX3SS_cp =cp.deepcopy(EX3SS)
EX3SS_cp['par']['accuracy'] = acc_lst[idx]
EX3SR_cp=FluctuationsTwoAsset(**EX3SS_cp) # Takes StE result as input and get ready to invoke state reduction operation
SR_cp=EX3SR_cp.StateReduc()
mut_rdc_idx_flt_cp= SR_cp['indexMUdct']
mut_rdc_idx_cp = np.unravel_index(mut_rdc_idx_flt_cp,dim_StE,order='F')
nb_bf_cp = len(mut_rdc_idx_cp[0])
print(str(nb_bf_cp) +" basis functions used.")
c_n_approx_cp = DCTApprox(cn_StE,mut_rdc_idx_cp)
c_a_approx_cp = DCTApprox(ca_StE,mut_rdc_idx_cp)
cn_diff_cp = c_n_approx_cp-cn_StE
# choose the fix grid of h and m
hgrid_fix=2 # fix level of h as an example
mgrid_fix=10 # fix level of k as an example
# get the corresponding c function approximated by dct
cVec = c_n_approx_cp[mgrid_fix,:,hgrid_fix]
## plots
ax = fig.add_subplot(2,2,idx+1)
ax.plot(kgrid,cVec,label='c approximated by DCT')
ax.plot(kgrid,cn_StE[mgrid_fix,:,hgrid_fix],'--',label='c at full grids')
ax.plot(kgrid,cVec,'r*')
ax.set_xlabel('k',fontsize=13)
ax.set_ylabel(r'$c(k)$',fontsize=13)
ax.set_title(r'accuracy=${}$'.format(acc_lst[idx]))
ax.legend(loc=0)
# %% {"code_folding": [0]}
## Set the population density for plotting graphs
print('Input: plot the graph for bottom x (0-1) of the distribution.')
mass_pct = float(input())
print('Input:choose the accuracy level for DCT, i.e. 0.99999 in the basline of Bayer and Luetticke')
Accuracy_BS = float(input()) ## baseline accuracy level
# %% {"code_folding": [0]}
# Restore the solution corresponding to the original BL accuracy
EX3SS['par']['accuracy'] = Accuracy_BS
EX3SR=FluctuationsTwoAsset(**EX3SS) # Takes StE result as input and get ready to invoke state reduction operation
SR=EX3SR.StateReduc() # StateReduc is operated
## meshgrids for plots
mmgrid,kkgrid = np.meshgrid(mgrid,kgrid)
## indexMUdct is one dimension, needs to be unraveled to 3 dimensions
mut_rdc_idx_flt = SR['indexMUdct']
mut_rdc_idx = np.unravel_index(mut_rdc_idx_flt,dim_StE,order='F')
## Note: the following chunk of codes can be used to recover the indices of grids selected by DCT. not used here.
#nb_dct = len(mut_StE.flatten())
#mut_rdc_bool = np.zeros(nb_dct) # boolean array of 30 x 30 x 4
#for i in range(nb_dct):
# mut_rdc_bool[i]=i in list(SR['indexMUdct'])
#mut_rdc_bool_3d = (mut_rdc_bool==1).reshape(dim_StE)
#mut_rdc_mask_3d = (mut_rdc_bool).reshape(dim_StE)
## For BL accuracy level, get dct compressed c functions at all grids
c_n_approx = DCTApprox(cn_StE,mut_rdc_idx)
c_a_approx = DCTApprox(ca_StE,mut_rdc_idx)
# Get the joint distribution calculated elsewhere
joint_distr = EX3SS['joint_distr']
# %% {"code_folding": [0]}
## Functions used to plot consumption functions at the trimmed grids
def WhereToTrim2d(joint_distr,mass_pct):
"""
parameters
-----------
marginal1: marginal pdf in the 1st dimension
marginal2: marginal pdf in the 2nd dimension
mass_pct: bottom percentile to keep
returns
----------
trim1_idx: idx for trimming in the 1s dimension
trim2_idx: idx for trimming in the 1s dimension
"""
marginal1 = joint_distr.sum(axis=0)
marginal2 = joint_distr.sum(axis=1)
## this can handle cases where the joint_distr itself is a marginal distr from 3d,
## i.e. marginal.cumsum().max() =\= 1
trim1_idx = (np.abs(marginal1.cumsum()-mass_pct*marginal1.cumsum().max())).argmin()
trim2_idx = (np.abs(marginal2.cumsum()-mass_pct*marginal2.cumsum().max())).argmin()
return trim1_idx,trim2_idx
def TrimMesh2d(grids1,grids2,trim1_idx,trim2_idx,drop=True):
if drop ==True:
grids_trim1 = grids1.copy()
grids_trim2 = grids2.copy()
grids_trim1=grids_trim1[:trim1_idx]
grids_trim2=grids_trim2[:trim2_idx]
grids1_trimmesh, grids2_trimmesh = np.meshgrid(grids_trim1,grids_trim2)
else:
grids_trim1 = grids1.copy()
grids_trim2 = grids2.copy()
grids_trim1[trim1_idx:]=np.nan
grids_trim2[trim2_idx:]=np.nan
grids1_trimmesh, grids2_trimmesh = np.meshgrid(grids_trim1,grids_trim2)
return grids1_trimmesh,grids2_trimmesh
# %% {"code_folding": [0]}
## Other configurations for plotting
distr_min = 0
distr_max = np.nanmax(joint_distr)
fontsize_lg = 13
## lower bound for grid
mmin = np.nanmin(mgrid)
kmin = np.nanmin(kgrid)
# %% {"code_folding": [0]}
# For non-adjusters: 3D surface plots of consumption function at full grids and approximated by DCT
## at all grids and grids after dct first for non-adjusters and then for adjusters
fig = plt.figure(figsize=(14,14))
fig.suptitle('Consumption of non-adjusters at grid points of m and k \n where ' +str(int(mass_pct*100))+ ' % of the agents are distributed \n (for each h)',
fontsize=(fontsize_lg))
for hgrid_id in range(EX3SS['mpar']['nh']):
## get the grids and distr for fixed h
hgrid_fix = hgrid_id
distr_fix = joint_distr[:,:,hgrid_fix]
c_n_approx_fix = c_n_approx[:,:,hgrid_fix]
c_n_StE_fix = cn_StE[:,:,hgrid_fix]
## additions to the above cell
## for each h grid, take the 90% mass of m and k as the maximum of the m and k axis
mk_marginal = joint_distr[:,:,hgrid_fix]
mmax_idx, kmax_idx = WhereToTrim2d(mk_marginal,mass_pct)
mmax, kmax = mgrid[mmax_idx],kgrid[kmax_idx]
mmgrid_trim,kkgrid_trim = TrimMesh2d(mgrid,kgrid,mmax_idx,kmax_idx)
c_n_approx_trim = c_n_approx_fix.copy()
c_n_approx_trim = c_n_approx_trim[:kmax_idx:,:mmax_idx] # the dimension is transposed for meshgrid.
distr_fix_trim = distr_fix.copy()
cn_StE_trim = c_n_StE_fix.copy()
cn_StE_trim = cn_StE_trim[:kmax_idx,:mmax_idx]
distr_fix_trim = distr_fix_trim[:kmax_idx,:mmax_idx]
## find the maximum z
zmax = | np.nanmax(c_n_approx_trim) | numpy.nanmax |
"""
.. module:: PhotonPipe
:synopsis: A recreation / port of key functionality of the GALEX mission
pipeline to generate calibrated and sky-projected photon-level data from
raw spacecraft and detector telemetry. Generates time-tagged photon lists
given mission-produced -raw6, -scst, and -asprta data.
"""
from __future__ import absolute_import, division, print_function
# Core and Third Party imports.
from astropy.io import fits as pyfits
from builtins import str
from builtins import range
import csv
import numpy as np
import os
import time
# gPhoton imports.
import gPhoton.cal as cal
from gPhoton.CalUtils import (clk_cen_scl_slp, get_stim_coefs, find_fuv_offset,
post_csp_caldata, rtaph_yac, rtaph_yac2,
compute_stimstats, create_ssd)
from gPhoton.FileUtils import load_aspect, web_query_aspect, download_data
from gPhoton.gnomonic import gnomfwd_simple, gnomrev_simple
from gPhoton.MCUtils import print_inline
# ------------------------------------------------------------------------------
def photonpipe(outbase, band, raw6file=None, scstfile=None, aspfile=None,
ssdfile=None, nullfile=None, verbose=0, retries=20,
eclipse=None):
"""
Apply static and sky calibrations to -raw6 GALEX data, producing fully
aspect-corrected and time-tagged photon list files.
:param raw6file: Name of the raw6 file to use.
:type raw6file: str
:param scstfile: Spacecraft state file to use.
:type scstfile: str
:param band: Name of the band to use, either 'FUV' or 'NUV'.
:type band: str
:param outbase: Base of the output file names.
:type outbase: str
:param aspfile: Name of aspect file to use.
:type aspfile: int
:param ssdfile: Name of Stim Separation Data file to use.
:type ssdfile: int
:param nullfile: Name of output file to record NULL lines.
:type nullfile: int
:param verbose: Note used. Included for consistency with other tools.
:type verbose: int
:param retries: Number of query retries to attempt before giving up.
:type retries: int
"""
startt = time.time()
# Scale factor for the time column in the output csv so that it
# can be recorded as an int in the database.
dbscale = 1000
# This number determines the size of the chunks that gPhoton reads
# in from the raw6 for processing. Even if your machine has a lot
# of memory, making this number bigger is unlikely to improve the
# processing time much because so much is eaten up by the .csv write.
chunksz = 1000000
# These are just constants for the mission.
detsize = 1.25 # Detector size in degrees
pltscl = 68.754932 # Plate scale
aspum = pltscl/1000.0
arcsecperpixel = 1.5
xi_xsc, xi_ysc, eta_xsc, eta_ysc = 0., 1., 1., 0.
# Determine the eclipse number from the raw6 header.
if not raw6file:
if not eclipse:
raise ValueError('Must specifiy eclipse if no raw6file.')
else:
raw6file = download_data(
eclipse,band,'raw6',datadir=os.path.dirname(outbase))
if raw6file==None:
print('Unable to retrieve raw6 file for this eclipse.')
return
hdulist = pyfits.open(raw6file)
hdr = hdulist[0].header
hdulist.close()
if eclipse and (eclipse!=hdr['eclipse']): # just a consistency check
print("Warning: eclipse mismatch {e0} vs. {e1} (header)".format(
e0=eclipse,e1=hdr['eclipse']))
eclipse = hdr['eclipse']
print("Processing eclipse "+str(eclipse)+".")
# Returns detector constants.
print("Band is "+band+".")
(xclk, yclk, xcen, ycen, xscl, yscl, xslp,
yslp) = clk_cen_scl_slp(band, eclipse)
# This determines the values for the post-CSP detector stim scaling
# and detector constant corrections.
Mx, Bx, My, By, stimsep = 1, 0, 1, 0, 0
if eclipse > 37460:
(Mx, Bx, My, By, stimsep, yactbl) = compute_stimstats(raw6file, band,
eclipse)
wig2, wig2data, wlk2, wlk2data, clk2, clk2data = post_csp_caldata()
print("Loading wiggle files...")
wiggle_x, _ = cal.wiggle(band, 'x')
wiggle_y, _ = cal.wiggle(band, 'y')
print("Loading walk files...")
walk_x, _ = cal.walk(band, 'x')
walk_y, _ = cal.walk(band, 'y')
print("Loading linearity files...")
linearity_x, _ = cal.linearity(band, 'x')
linearity_y, _ = cal.linearity(band, 'y')
# This is for the post-CSP stim distortion corrections.
print("Loading distortion files...")
if eclipse > 37460:
print(" Using stim separation of :"+str(stimsep))
distortion_x, disthead = cal.distortion(band, 'x', eclipse, stimsep)
distortion_y, _ = cal.distortion(band, 'y', eclipse, stimsep)
(cube_x0, cube_dx, cube_y0, cube_dy, cube_d0, cube_dd, cube_nd, cube_nc,
cube_nr) = (disthead['DC_X0'], disthead['DC_DX'],
disthead['DC_Y0'], disthead['DC_DY'],
disthead['DC_D0'], disthead['DC_DD'],
disthead['NAXIS3'], disthead['NAXIS1'],
disthead['NAXIS2'])
if band == 'FUV':
if not scstfile:
if not eclipse:
raise ValueError('Must specifiy eclipse if no scstfile.')
else:
scstfile = download_data(
eclipse,band,'scst',datadir=os.path.dirname(outbase))
if scstfile==None:
print('Unable to retrieve SCST file for this eclipse.')
return
xoffset, yoffset = find_fuv_offset(scstfile)
else:
xoffset, yoffset = 0., 0.
if os.path.isfile(str(ssdfile)):
print("SSD file provided: "+str(ssdfile))
stim_coef0, stim_coef1 = get_stim_coefs(ssdfile)
elif ssdfile:
print("SSD file requested: "+str(ssdfile))
stim_coef0, stim_coef1 = create_ssd(raw6file, band, eclipse, ssdfile)
else:
print("No SSD file provided or requested.")
stim_coef0, stim_coef1 = create_ssd(raw6file, band, eclipse)
print(" stim_coef0, stim_coef1 = "+str(stim_coef0)+", "+str(stim_coef1))
print("Loading mask file...")
mask, maskinfo = cal.mask(band)
npixx = mask.shape[0]
npixy = mask.shape[1]
pixsz = maskinfo['CDELT2']
maskfill = detsize/(npixx*pixsz)
print("Loading aspect data...")
if aspfile:
(aspra, aspdec, asptwist, asptime, aspheader,
aspflags) = load_aspect(aspfile)
else:
(aspra, aspdec, asptwist, asptime, aspheader,
aspflags) = web_query_aspect(eclipse, retries=retries)
minasp, maxasp = min(asptime), max(asptime)
trange = [minasp, maxasp]
print(" trange= ( {t0} , {t1} )".format(t0=trange[0], t1=trange[1]))
ra0, dec0, roll0 = aspheader['RA'], aspheader['DEC'], aspheader['ROLL']
print(" [avgRA, avgDEC, avgROLL] = [{RA}, {DEC}, {ROLL}]".format(
RA=aspra.mean(), DEC=aspdec.mean(), ROLL=asptwist.mean()))
# This projects the aspect solutions onto the MPS field centers.
print("Computing aspect vectors...")
(xi_vec, eta_vec) = gnomfwd_simple(aspra, aspdec, ra0, dec0, -asptwist,
1.0/36000.0, 0.)
print("Loading raw6 file...")
raw6hdulist = pyfits.open(raw6file, memmap=1)
raw6htab = raw6hdulist[1].header
nphots = raw6htab['NAXIS2']
print(" "+str(nphots)+" events")
cnt = 0
outfile = outbase+'.csv'
print("Preparing output file "+outfile)
spreadsheet = csv.writer(open(outfile, 'w'), delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
# If specified, dump lines with NULLS into a separate csv file.
if nullfile:
nullfile = outbase+'_NULL.csv'
print("Preparing output file "+nullfile)
NULLspreadsheet = csv.writer(open(nullfile, 'w'), delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
print("")
for i in range(int(nphots/chunksz)+1):
a = time.time()
csvrows = []
chunkbeg, chunkend = i*chunksz, (i+1)*chunksz
if chunkend > nphots:
chunkend = nphots
chunkid = " "+str(i+1)+" of "+str(int(nphots/chunksz)+1)+": "
print_inline(chunkid+"Unpacking raw6 data...")
t = np.array(raw6hdulist[1].data.field('t')[chunkbeg:chunkend])
phb1 = np.array(raw6hdulist[1].data.field('phb1')[chunkbeg:chunkend],
dtype='int64')
phb2 = np.array(raw6hdulist[1].data.field('phb2')[chunkbeg:chunkend],
dtype='int64')
phb3 = np.array(raw6hdulist[1].data.field('phb3')[chunkbeg:chunkend],
dtype='int64')
phb4 = np.array(raw6hdulist[1].data.field('phb4')[chunkbeg:chunkend],
dtype='int64')
phb5 = np.array(raw6hdulist[1].data.field('phb5')[chunkbeg:chunkend],
dtype='int64')
# Bitwise "decoding" of the raw6 telemetry.
q = ((phb4 & 3) << 3) + ((phb5 & 224) >> 5)
xb = phb1 >> 5
xamc = (
np.array(((phb1 & 31) << 7), dtype='int16') +
np.array(((phb2 & 254) >> 1), dtype='int16') -
np.array(((phb1 & 16) << 8), dtype='int16'))
yb = ((phb2 & 1) << 2) + ((phb3 & 192) >> 6)
yamc = (
np.array(((phb3 & 63) << 6), dtype='int16') +
np.array(((phb4 & 252) >> 2), dtype='int16') -
np.array(((phb3 & 32) << 7), dtype='int16'))
xa = ((phb5 & 16) >> 4) + ((phb5 & 3) << 3) + ((phb5 & 12) >> 1)
xraw0 = xb*xclk + xamc
yraw0 = yb*yclk + yamc
ya = np.array(((((yraw0/(2*yclk) - xraw0/(2*xclk)) + 10)*32) + xa),
dtype='int64') % 32
xraw = xraw0 + np.array((((xa+7) % 32) - 16), dtype='int64') * xslp
yraw = yraw0 + np.array((((ya+7) % 32) - 16), dtype='int64') * yslp
# Centering and scaling.
x = (xraw - xcen)*xscl
y = (yraw - ycen)*yscl
# Post-CSP 'yac' corrections.
if eclipse > 37460:
x = Mx*x+Bx
y = My*y+By
yac = rtaph_yac(yactbl, ya, yb, yamc, eclipse)
y = y-yac
yac = rtaph_yac2(q, xb, yb, ya, y, aspum, wig2, wig2data, wlk2,
wlk2data, clk2, clk2data)
y = y + yac
# [Future] This and other ugly lines like it below are for the purpose
# of memory management. There is likely a more Pythonic way.
(phb1, phb2, phb3, phb4, phb5, xb, xamc, yb, yamc, xraw0, yraw0, xraw,
yraw) = ([], [], [], [], [], [], [], [], [], [], [], [], [])
flags = np.zeros(len(t))
print_inline(chunkid+"Applying wiggle correction...")
x_as = x*aspum
y_as = y*aspum
fptrx = x_as/10. + 240.
fptry = y_as/10. + 240.
x_as, y_as = [], []
# This and other lines like it below are to verify that the
# event is still on the detector.
cut = ((fptrx > 0.) & (fptrx < 479.) & (fptry > 0.) & (fptry < 479.) &
(flags == 0))
flags[np.where(cut == False)[0]] = 8
ix = np.where(cut == True)[0]
blt = fptrx-np.array(fptrx, dtype='int64')
blu = fptry-np.array(fptry, dtype='int64')
wigx, wigy = np.zeros(len(t)), np.zeros(len(t))
wigx[ix] = (
(1-blt[ix])*(wiggle_x[xa[ix], np.array(fptrx[ix], dtype='int64')]) +
(blt[ix])*(wiggle_x[xa[ix], np.array(fptrx[ix], dtype='int64')+1]))
wigy[ix] = (
(1-blu[ix])*(wiggle_y[ya[ix], np.array(fptry[ix], dtype='int64')]) +
(blu[ix])*(wiggle_y[ya[ix], np.array(fptry[ix], dtype='int64')+1]))
xdig = x + wigx/(10.*aspum)
ydig = y + wigy/(10.*aspum)
wigx, wigy = [], []
print_inline(chunkid+"Applying walk correction...")
xdig_as = xdig*aspum
ydig_as = ydig*aspum
fptrx = xdig_as/10. + 240.
fptry = ydig_as/10. + 240.
xdig_as, ydig_as = [], []
cut = ((fptrx > 0.) & (fptrx < 479.) & (fptry > 0.) & (fptry < 479.) &
(flags == 0))
flags[np.where(cut == False)[0]] = 9
ix = np.where(cut == True)[0]
cut[ix] = ((walk_x[q[ix], np.array(fptry[ix], dtype='int64'),
np.array(fptrx[ix], dtype='int64')] != -999) |
(walk_x[q[ix], np.array(fptry[ix], dtype='int64'),
np.array(fptrx[ix], dtype='int64')+1] != -999) |
(walk_x[q[ix], np.array(fptry[ix], dtype='int64')+1,
np.array(fptrx[ix], dtype='int64')] != -999) |
(walk_x[q[ix], np.array(fptry[ix], dtype='int64')+1,
np.array(fptrx[ix], dtype='int64')+1] != -999) |
(walk_y[q[ix], np.array(fptry[ix], dtype='int64'),
np.array(fptrx[ix], dtype='int64')] != -999) |
(walk_y[q[ix], np.array(fptry[ix], dtype='int64'),
np.array(fptrx[ix], dtype='int64')+1] != -999) |
(walk_y[q[ix], np.array(fptry[ix], dtype='int64')+1,
np.array(fptrx[ix], dtype='int64')] != -999) |
(walk_y[q[ix], np.array(fptry[ix], dtype='int64')+1,
np.array(fptrx[ix], dtype='int64')+1] != -999))
flags[np.where(cut == False)[0]] = 9
ix = np.where(cut == True)[0]
blt = fptrx-np.array(fptrx, dtype='int64')
blu = fptry-np.array(fptry, dtype='int64')
walkx, walky = np.zeros(len(t)), np.zeros(len(t))
walkx[ix] = (
(1-blt[ix])*(1-blu[ix])*(
walk_x[q[ix],
np.array(fptry[ix], dtype='int64'),
np.array(fptrx[ix], dtype='int64')])+
(blt[ix])*(1-blu[ix])*(
walk_x[q[ix],
np.array(fptry[ix], dtype='int64'),
np.array(fptrx[ix], dtype='int64')+1])+
(1-blt[ix])*(blu[ix])*(
walk_x[q[ix],
np.array(fptry[ix], dtype='int64')+1,
np.array(fptrx[ix], dtype='int64')]) +
(blt[ix])*(blu[ix])*(
walk_x[q[ix],
np.array(fptry[ix], dtype='int64')+1,
np.array(fptrx[ix], dtype='int64')+1]))
walky[ix] = (
(1-blt[ix])*(1-blu[ix])*(
walk_y[q[ix],
np.array(fptry[ix], dtype='int64'),
np.array(fptrx[ix], dtype='int64')]) +
(blt[ix])*(1-blu[ix])*(
walk_y[q[ix],
np.array(fptry[ix], dtype='int64'),
np.array(fptrx[ix], dtype='int64')+1]) +
(1-blt[ix])*(blu[ix])*(
walk_y[q[ix],
| np.array(fptry[ix], dtype='int64') | numpy.array |
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Sawyer environment for pushing objects."""
import metaworld.envs.mujoco.cameras as camera_configs
from metaworld.google import glfw
import mujoco_py
import numpy as np
from collections import OrderedDict
from gym.spaces import Dict, Box
from metaworld.envs.env_util import get_stat_in_paths, \
create_stats_ordered_dict, get_asset_full_path
from metaworld.envs.mujoco.sawyer_xyz.base import SawyerXYZEnv
from metaworld.envs.mujoco.utils.rotation import euler2quat
from metaworld.envs.mujoco.sawyer_xyz.base import OBS_TYPE
sideview_cam = camera_configs.create_sawyer_camera_init(
lookat=(0.2, 0.75, 0.4),
distance=0.8,
elevation=-55,
azimuth=180,
trackbodyid=-1,
)
topview_cam = camera_configs.create_sawyer_camera_init(
lookat=(0., 1.0, 0.5),
distance=0.6,
elevation=-45,
azimuth=270,
trackbodyid=-1,
)
# list of changes
# object position has been changed to have lower variance
# the constant for pushing reward has been changed from 1000 -> 10
# added reset_goal function
# the observation "with_goal" has been changed
class SawyerReachPushPickPlaceEnv(SawyerXYZEnv):
def __init__(
self,
random_init=False,
task_types=['pick_place', 'reach', 'push'],
task_type='pick_place',
obs_type='plain',
goal_low=(-0.1, 0.8, 0.05),
goal_high=(0.1, 0.9, 0.3),
liftThresh=0.04,
sampleMode='equal',
rotMode='fixed', #'fixed',
**kwargs):
self.quick_init(locals())
hand_low = (-0.5, 0.40, 0.05)
hand_high = (0.5, 1, 0.5)
obj_low = (-0.02, 0.58, 0.02)
obj_high = (0.02, 0.62, 0.02)
SawyerXYZEnv.__init__(
self,
frame_skip=5,
action_scale=1. / 100,
hand_low=hand_low,
hand_high=hand_high,
model_name=self.model_name,
**kwargs)
self.task_type = task_type
self.init_config = {
'obj_init_angle': .3,
'obj_init_pos': np.array([0, 0.6, 0.02]),
'hand_init_pos': np.array([0, .6, .2]),
}
# we only do one task from [pick_place, reach, push]
# per instance of SawyerReachPushPickPlaceEnv.
# Please only set task_type from constructor.
if self.task_type == 'pick_place':
self.goal = np.array([0.1, 0.8, 0.2])
elif self.task_type == 'reach':
self.goal = np.array([-0.1, 0.8, 0.2])
elif self.task_type == 'push':
self.goal = np.array([0.1, 0.8, 0.02])
else:
raise NotImplementedError
self.obj_init_angle = self.init_config['obj_init_angle']
self.obj_init_pos = self.init_config['obj_init_pos']
self.hand_init_pos = self.init_config['hand_init_pos']
assert obs_type in OBS_TYPE
self.obs_type = obs_type
if goal_low is None:
goal_low = self.hand_low
if goal_high is None:
goal_high = self.hand_high
self.random_init = random_init
self.liftThresh = liftThresh
self.max_path_length = 150
self.rotMode = rotMode
self.sampleMode = sampleMode
self.task_types = task_types
if rotMode == 'fixed':
self.action_space = Box(
np.array([-1, -1, -1, -1]),
np.array([1, 1, 1, 1]),
)
elif rotMode == 'rotz':
self.action_rot_scale = 1. / 50
self.action_space = Box(
np.array([-1, -1, -1, -np.pi, -1]),
np.array([1, 1, 1, np.pi, 1]),
)
elif rotMode == 'quat':
self.action_space = Box(
np.array([-1, -1, -1, 0, -1, -1, -1, -1]),
np.array([1, 1, 1, 2 * np.pi, 1, 1, 1, 1]),
)
else:
self.action_space = Box(
np.array([-1, -1, -1, -np.pi / 2, -np.pi / 2, 0, -1]),
np.array([1, 1, 1, np.pi / 2, np.pi / 2, np.pi * 2, 1]),
)
self.obj_and_goal_space = Box(
np.hstack((obj_low, goal_low)),
np.hstack((obj_high, goal_high)),
)
self.goal_space = Box(np.array(goal_low), | np.array(goal_high) | numpy.array |
# File: problem1.py
# Author: <NAME>
# Date: 11/06/2019
# Class: ECE 555 - Probability for Electrical Engineers
# Description:
# Write and run a program to simulate an M/E2/1 queue and obtain realizations of
# the four stochastic processes defined in Example 6.2. Plot these realizations. You
# may use a simulation language such as SIMULA or GPSS or you may use one
# of the standard high-level languages. You will have to generate random deviates
# of the interarrival time distribution (assume arrival rate λ = 1 per second) and
# the service time distribution (assume mean service time 0.8 s) using methods of
# Chapter 3.
import math
import queue
import numpy as np
import matplotlib.pyplot as plt
def getErlang2(u1, u2, l=1):
return (-np.log(u1)/(2*l)) + (-np.log(u2)/(2*l))
# u is a number drawn from a random uniform distribution (between 0 and 1)
# l is the rate of the exp distrbution
def getEXP(u,l=1):
return -np.log(u)/l
# Returns a list of all the values from a given list larger than a specific value
def getSmaller(list2Check, value):
return [x for x in list2Check if x <= value]
def getBetween(list2Check, small_val, big_val):
return [x for x in list2Check if x >= small_val and x <= big_val]
def addToQueue(list2Check, que):
# add to queue
if que.empty():
for value in list2Check:
que.put(value)
return que
# When queue has elementsd
for value in list2Check:
# add to queue
if value > que.queue[-1]:
que.put(value)
return que
def graph(x, xlabel, ylabel, title, isDiscrete=True):
if isDiscrete:
y = [i for i in range(len(x))]
plt.scatter(x,y)
else:
plt.plot(x)
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.show()
return 0
#
# Arrival time = exponentially distributed
# Service time = erlang distribution
# Iterations = number of seconds
def simulate_M_E2_1(total_jobs=1000, arrival_rate=1, mean_service_time=0.8):
k = total_jobs # Total number of jobs
rand_samples1 = np.random.random_sample((k,))
rand_samples2 = np.random.random_sample((k,))
rand_samples3 = np.random.random_sample((k,))
arrival_queue = queue.Queue(maxsize=k-1)
service_queue = queue.Queue(maxsize=1)
arrival_times = np.zeros(k)
continuous_arrivals = np.zeros(k)
service_times = np.zeros(k)
continuous_service = np.zeros(k)
N_k = np.zeros(k+1)
exit_times = np.copy(N_k) # related to X_t and Y_t
W_n = | np.zeros(k+1) | numpy.zeros |
# next is to add accel and see the difference
# add stiffness too
import numpy as np
from scipy import signal, stats
from matplotlib import pyplot as plt
from all_functions import *
import pickle
from warnings import simplefilter
def exp2_learning_curves_cal_fcn(errors_all):
average_curve_mean = errors_all.mean(0).mean(1)
q0_curve_mean = errors_all[0].mean(1)
q1_curve_mean = errors_all[1].mean(1)
average_curve_std = errors_all.mean(0).std(1)
q0_curve_std = errors_all[0].std(1)
q1_curve_std = errors_all[1].std(1)
return average_curve_mean, q0_curve_mean, q1_curve_mean, average_curve_std, q0_curve_std, q1_curve_std
simplefilter(action='ignore', category=FutureWarning)
import matplotlib
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
experiment_ID = "experiment_2_2way"
number_of_refinements = 5
errors_all_cyc_A_A = np.load("./results/{}/errors_all_cyc_A_A.npy".format(experiment_ID))
errors_all_cyc_A_B = np.load("./results/{}/errors_all_cyc_A_B.npy".format(experiment_ID))
errors_all_cyc_B_B = np.load("./results/{}/errors_all_cyc_B_B.npy".format(experiment_ID))
errors_all_cyc_B_A = np.load("./results/{}/errors_all_cyc_B_A.npy".format(experiment_ID))
errors_all_p2p_A_A = np.load("./results/{}/errors_all_p2p_A_A.npy".format(experiment_ID))
errors_all_p2p_A_B = np.load("./results/{}/errors_all_p2p_A_B.npy".format(experiment_ID))
errors_all_p2p_B_B = np.load("./results/{}/errors_all_p2p_B_B.npy".format(experiment_ID))
errors_all_p2p_B_A = np.load("./results/{}/errors_all_p2p_B_A.npy".format(experiment_ID))
number_of_mods = 8
errors_all = np.zeros((number_of_mods,)+errors_all_cyc_A_A.shape)
average_curve_mean_all = np.zeros([number_of_mods,number_of_refinements+1])
q0_curve_mean_all = np.zeros([number_of_mods,number_of_refinements+1])
q1_curve_mean_all= np.zeros([number_of_mods,number_of_refinements+1])
average_curve_std_all = np.zeros([number_of_mods,number_of_refinements+1])
q0_curve_std_all = np.zeros([number_of_mods,number_of_refinements+1])
q1_curve_std_all= np.zeros([number_of_mods,number_of_refinements+1])
errors_all = \
| np.array([errors_all_cyc_A_A,
errors_all_cyc_B_A,
errors_all_cyc_B_B,
errors_all_cyc_A_B,
errors_all_p2p_A_A,
errors_all_p2p_B_A,
errors_all_p2p_B_B,
errors_all_p2p_A_B]) | numpy.array |
from numpy import zeros, ones, ndarray, average
from networkx import adjacency_matrix, Graph
from scipy.sparse import diags, lil_matrix, spmatrix
from scipy.sparse.linalg import lsqr
def forward_hierarchical_levels(graph, weight=None):
"""Returns the forward hierarchical levels of the nodes of a network as an array.
Parameters
----------
graph : Graph, array
A NetworkX graph or numpy/sparse array
weight : string or None
If you have weighted edges insert weight='string', where string is your underlying weight attribute. Only relevant if graph object is a networkx
graph instance. Otherwise the default is None.
Returns
-------
forward hierarchical levels : array
A Nx1 dimensional array indexed by the nodes, in the same order as graph.nodes if graph object, otherwise indexed in the same the numpy/sparse array, holding the value of their forward hierarchical levels.
References
----------
.. [1] <NAME>., <NAME>., <NAME>., & <NAME>. (2019).
Graph hierarchy and spread of infections.
arXiv preprint arXiv:1908.04358."""
if isinstance(graph, ndarray):
A = graph.transpose()
k_in = A.sum(axis=1)
elif isinstance(graph, spmatrix):
A = graph.transpose()
k_in = A.sum(axis=1).A1
elif isinstance(graph, Graph):
A = adjacency_matrix(graph, weight=weight).transpose()
k_in = A.sum(axis=1).A1
D_in = diags(k_in, 0)
L_in = D_in - A
return lsqr(L_in, k_in)[0]
def backward_hierarchical_levels(graph, weight=None):
"""Returns the backward hierarchical levels of the nodes of a network as an array. This is the transpose of the original graph, so out-edges now become in-edges.
Parameters
----------
graph : Graph, array
A NetworkX graph or numpy/sparse array
weight : string or None
If you have weighted edges insert weight='string', where string is your underlying weight attribute. Only relevant if graph object is a networkx
graph instance. Otherwise the default is None.
Returns
-------
backward hierarchical levels : array
A Nx1 dimensional array indexed by the nodes, in the same order as graph.nodes if graph object, otherwise indexed in the same the numpy/sparse array, holding the value of their forward hierarchical levels.
References
----------
.. [1] <NAME>., <NAME>., <NAME>., & <NAME>. (2019).
Graph hierarchy and spread of infections.
arXiv preprint arXiv:1908.04358."""
if isinstance(graph, ndarray):
A = graph
k_in = A.sum(axis=1)
elif isinstance(graph, spmatrix):
A = graph
k_in = A.sum(axis=1).A1
elif isinstance(graph, Graph):
A = adjacency_matrix(graph, weight=weight)
k_in = A.sum(axis=1).A1
D_in = diags(k_in, 0)
L_in = D_in - A
return lsqr(L_in, k_in)[0]
def hierarchical_levels(graph, weight=None):
"""Returns the hierarchical levels of the nodes of a network as an array which aids visualisation of the hierarchical structure in the network.
Parameters
----------
graph : Graph
A NetworkX graph or numpy/sparse array
weight : string or None
If you have weighted edges insert weight='string', where string is your underlying weight attribute. Only relevant if graph object is a networkx
graph instance. Otherwise the default is None.
Returns
-------
hierarchical levels : array
A Nx1 dimensional array indexed by the nodes, in the same order as graph.nodes, holding the value of their hierarchical levels.
References
----------
.. [1] <NAME>., <NAME>., <NAME>., & <NAME>. (2019).
Graph hierarchy and spread of infections.
arXiv preprint arXiv:1908.04358."""
return 0.5*(forward_hierarchical_levels(graph, weight=weight) - backward_hierarchical_levels(graph, weight=weight))
def sparse_forward_hierarchical_differences(graph, weight=None):
''' Just a copy of the forward hierarchical differences function that returns the sparse matrix, instead of the dense representation, in lil format'''
if isinstance(graph, (ndarray, spmatrix)):
A = graph.transpose()
elif isinstance(graph, Graph):
A = adjacency_matrix(graph, weight=weight).transpose()
s = forward_hierarchical_levels(graph, weight=weight)
TD = lil_matrix(A.shape, dtype=float)
for i, j in zip(A.nonzero()[0], A.nonzero()[1]):
TD[i,j] = s[i] - s[j]
return TD
def forward_hierarchical_differences(graph, weight=None):
"""Returns the forward hierarchical differences over the edges of a network in the form of a weighted adjacency matrix
Parameters
----------
graph : Graph, array
A NetworkX graph or numpy/sparse array
weight : string
If you have weighted edges insert weight='string', where string is your underlying weight attribute. Only relevant if graph object is a networkx
graph instance. Otherwise the default is None.
Returns
-------
forward hierarchical differences : array
A NxN dimensional array representing a weighted adjacency matrix, with the edge weights corresponding to the forward hierarchical differences.
The column index represents the source node of the edge and the row index represents the destination node of the edge.
References
----------
.. [1] <NAME>., <NAME>., <NAME>., & <NAME>. (2019).
Graph hierarchy and spread of infections.
arXiv preprint arXiv:1908.04358."""
TD = sparse_forward_hierarchical_differences(graph, weight=weight)
return TD.toarray()
def sparse_backward_hierarchical_differences(graph, weight=None):
''' Just a copy of the backward hierarchical differences function that returns the sparse matrix, instead of the dense representation, in lil format'''
if isinstance(graph, (ndarray, spmatrix)):
A = graph
elif isinstance(graph, Graph):
A = adjacency_matrix(graph, weight=weight)
s = backward_hierarchical_levels(graph, weight=weight)
TD = lil_matrix(A.shape, dtype=float)
for i, j in zip(A.nonzero()[0], A.nonzero()[1]):
TD[i,j] = s[i] - s[j]
return TD
def backward_hierarchical_differences(graph, weight=None):
"""Returns the backward hierarchical differences over the edges of a network in the form of a weighted adjacency matrix
Parameters
----------
graph : Graph, array
A NetworkX graph or numpy/sparse array
weight : string or None
If you have weighted edges insert weight='string', where string is your underlying weight attribute. Only relevant if graph object is a networkx
graph instance. Otherwise the default is None.
Returns
-------
backward hierarchical differences : array
A NxN dimensional array representing a weighted adjacency matrix, with the edge weights corresponding to the backward hierarchical differences.
The column index represents the source node of the edge and the row index represents the destination node of the edge.
References
----------
.. [1] <NAME>., <NAME>., <NAME>., & <NAME>. (2019).
Graph hierarchy and spread of infections.
arXiv preprint arXiv:1908.04358."""
TD = sparse_backward_hierarchical_differences(graph, weight=weight)
return TD.toarray()
def forward_hierarchical_incoherence(graph, weight=None):
"""Returns the forward hierarchical differences over the edges of a network in the form of a weighted adjacency matrix,
mean of the distribution of differences and standard deviation of this distribution.
Parameters
----------
graph : Graph, array
A NetworkX graph or numpy/sparse array
weight : string or None
If you have weighted edges insert weight='string', where string is your underlying weight attribute. Only relevant if graph object is a networkx
graph instance. Otherwise the default is None.
Returns
-------
forward hierarchical differences : sparse array
A NxN sparse dimensional sparse array representing a weighted adjancency matrix, with the edge weights corresponding to the forward hierarchical differences.
The column index represents the source node of the edge and the row index represents the destination node of the edge.
mean hierarchical difference : float
The mean of the distribution of forward hierarchical differences.
forward hierarchical incoherence : float
The standard deviation of the distribution of forward hierarchical differences.
References
----------
.. [1] <NAME>., <NAME>., <NAME>., & <NAME>. (2019).
Graph hierarchy and spread of infections.
arXiv preprint arXiv:1908.04358."""
if isinstance(graph, ndarray):
A = graph.transpose()
TD = forward_hierarchical_differences(graph, weight=weight)
m = average(TD, weights=A)
m2 = average(TD**2, weights=A)
elif isinstance(graph, spmatrix):
A = graph.transpose()
TD = sparse_forward_hierarchical_differences(graph, weight=weight).tocsc()
m = (A.multiply(TD)).sum() / A.sum()
m2 = (A.multiply(TD.power(2))).sum() / A.sum()
elif isinstance(graph, Graph):
A = adjacency_matrix(graph, weight=weight).transpose()
TD = sparse_forward_hierarchical_differences(graph, weight=weight).tocsc()
m = (A.multiply(TD)).sum() / A.sum()
m2 = (A.multiply(TD.power(2))).sum() / A.sum()
std = (m2 - m**2)**0.5
return TD, m, std
def backward_hierarchical_incoherence(graph, weight=None):
"""Returns the backward hierarchical differences over the edges of a network in the form of a weighted adjacency matrix,
mean of the distribution of differences and standard deviation of this distribution.
Parameters
----------
graph : Graph, array
A NetworkX graph or numpy/sparse array
weight : string
If you have weighted edges insert weight='string', where string is your underlying weight attribute. Only relevant if graph object is a networkx
graph instance. Otherwise the default is None.
Returns
-------
backward hierarchical differences : sparse array
A NxN dimensional sparse array representing a weighted adjancency matrix, with the edge weights corresponding to the backward hierarchical differences.
The column index represents the source node of the edge and the row index represents the destination node of the edge.
mean hierarchical difference : float
The mean of the distribution of backward hierarchical differences.
backward hierarchical incoherence : float
The standard deviation of the distribution of backward hierarchical differences.
References
----------
.. [1] <NAME>., <NAME>., <NAME>., & <NAME>. (2019).
Graph hierarchy and spread of infections.
arXiv preprint arXiv:1908.04358."""
if isinstance(graph, ndarray):
A = graph
TD = backward_hierarchical_differences(graph, weight=weight)
m = average(TD, weights=A)
m2 = average(TD**2, weights=A)
elif isinstance(graph, spmatrix):
A = graph
TD = sparse_backward_hierarchical_differences(graph, weight=weight).tocsr()
m = (A.multiply(TD)).sum() / A.sum()
m2 = (A.multiply(TD.power(2))).sum() / A.sum()
elif isinstance(graph, Graph):
A = adjacency_matrix(graph, weight=weight)
TD = sparse_backward_hierarchical_differences(graph, weight=weight).tocsr()
m = (A.multiply(TD)).sum() / A.sum()
m2 = (A.multiply(TD.power(2))).sum() / A.sum()
std = (m2 - m**2)**0.5
return TD, m, std
# Returns a measure of equitable controllability over the full graph/network
def forward_democracy_coefficient(graph, weight=None):
"""Returns the forward democracy coeffcient of a graph, a topological network metric.
Parameters
----------
graph : Graph, array
A NetworkX graph or numpy/sparse array
weight : string
If you have weighted edges insert weight='string', where string is your underlying weight attribute. Only relevant if graph object is a networkx
graph instance. Otherwise the default is None.
Returns
-------
forward democracy coefficient : float
forward democracy coefficient of a graph
References
----------
.. [1] <NAME>., <NAME>., <NAME>., & <NAME>. (2019).
Graph hierarchy and spread of infections.
arXiv preprint arXiv:1908.04358."""
if isinstance(graph, ndarray):
A = graph.transpose()
TD = forward_hierarchical_differences(graph, weight=weight)
m = average(TD, weights=A)
elif isinstance(graph, spmatrix):
A = graph.transpose()
TD = sparse_forward_hierarchical_differences(graph, weight=weight).tocsc()
m = (A.multiply(TD)).sum() / A.sum()
elif isinstance(graph, Graph):
A = adjacency_matrix(graph, weight=weight).transpose()
TD = sparse_forward_hierarchical_differences(graph, weight=weight).tocsc()
m = (A.multiply(TD)).sum() / A.sum()
return 1 - m
# Returns a measure of equitable controllability over the full graph/network
def backward_democracy_coefficient(graph, weight=None):
"""Returns the backward democracy coeffcient of a graph, a topological network metric.
Parameters
----------
graph : Graph, array
A NetworkX graph or numpy/sparse array
weight : string or None
If you have weighted edges insert weight='string', where string is your underlying weight attribute. Only relevant if graph object is a networkx
graph instance. Otherwise the default is None.
Returns
-------
backward democracy coefficient : float
backward democracy coefficient of a graph
References
----------
.. [1] <NAME>., <NAME>., <NAME>., & <NAME>. (2019).
Graph hierarchy and spread of infections.
arXiv preprint arXiv:1908.04358."""
if isinstance(graph, ndarray):
A = graph
TD = backward_hierarchical_differences(graph, weight=weight)
m = average(TD, weights=A)
elif isinstance(graph, spmatrix):
A = graph
TD = sparse_backward_hierarchical_differences(graph, weight=weight).tocsr()
m = (A.multiply(TD)).sum() / A.sum()
elif isinstance(graph, Graph):
A = adjacency_matrix(graph, weight=weight)
TD = sparse_backward_hierarchical_differences(graph, weight=weight).tocsr()
m = (A.multiply(TD)).sum() / A.sum()
return 1 - m
def node_forward_influence_centrality(graph, node, weight=None):
"""Returns the forward influence centrality of the given node in the network.
Parameters
----------
graph : Graph, array
A NetworkX graph or numpy/sparse array
node : number
Label of the node as determined by the indexing of the graph.nodes() call or the index of the numpy/sparse array.
weight : string
If you have weighted edges insert weight='string', where string is your underlying weight attribute. Only relevant if graph object is a networkx
graph instance. Otherwise the default is None.
Returns
-------
forward influence centrality : float
A node's forward influence centrality.
References
----------
.. [1] <NAME>., <NAME>., <NAME>., & <NAME>. (2019).
Graph hierarchy and spread of infections.
arXiv preprint arXiv:1908.04358."""
if isinstance(graph, ndarray):
A = graph.transpose()
index = node
TD = forward_hierarchical_differences(graph, weight=weight)
if A[index].sum() == 0:
m = 0
else:
m = average(TD[index], weights=A[index])
elif isinstance(graph, spmatrix):
A = graph.transpose()
index = node
TD = sparse_forward_hierarchical_differences(graph, weight=weight).tocsc()
if A[index].sum() == 0:
m = 0
else:
m = (A[index].multiply(TD[index])).sum() / A[index].sum()
elif isinstance(graph, Graph):
A = adjacency_matrix(graph, weight=weight).transpose()
index = list(graph.nodes).index(node)
TD = sparse_forward_hierarchical_differences(graph, weight=weight).tocsc()
if A[index].sum() == 0:
m = 0
else:
m = (A[index].multiply(TD[index])).sum() / A[index].sum()
return 1 - m
def node_backward_influence_centrality(graph, node, weight=None):
"""Returns the backward influence centrality of the given node in the network.
Parameters
----------
graph : Graph array
A NetworkX graph or numpy/sparse array
node : number
Label of the node as determined by the indexing of the graph.nodes() call or the index of the numpy/sparse array.
weight : string or None
If you have weighted edges insert weight='string', where string is your underlying weight attribute. Only relevant if graph object is a networkx
graph instance, otherwise the default is None.
Returns
-------
backward influence centrality : float
A node's backward influence centrality.
References
----------
.. [1] <NAME>., <NAME>., <NAME>., & <NAME>. (2019).
Graph hierarchy and spread of infections.
arXiv preprint arXiv:1908.04358."""
if isinstance(graph, ndarray):
A = graph
index = node
TD = backward_hierarchical_differences(graph, weight=weight)
if A[index].sum() == 0:
m = 0
else:
m = average(TD[index], weights=A[index])
elif isinstance(graph, spmatrix):
A = graph
index = node
TD = sparse_backward_hierarchical_differences(graph, weight=weight).tocsr()
if A[index].sum() == 0:
m = 0
else:
m = (A[index].multiply(TD[index])).sum() / A[index].sum()
elif isinstance(graph, Graph):
A = adjacency_matrix(graph, weight=weight)
index = list(graph.nodes).index(node)
TD = sparse_backward_hierarchical_differences(graph, weight=weight).tocsr()
if A[index].sum() == 0:
m = 0
else:
m = (A[index].multiply(TD[index])).sum() / A[index].sum()
return 1 - m
def forward_influence_centrality(graph, weight=None):
"""Returns the forward influence centrality of the nodes in a network as an array.
Parameters
----------
graph : Graph, array
A NetworkX graph or numpy/sparse array
weight : string or None
If you have weighted edges insert weight='string', where string is your underlying weight attribute. Only relevant if graph object is a networkx
graph instance, otherwise the default is None.
Returns
-------
forward influence centrality : array
A Nx1 dimensional array indexed by the nodes, in the same order as graph.nodes, holding the value of their forward influence centralities.
References
----------
.. [1] <NAME>., <NAME>., <NAME>., & <NAME>. (2019).
Graph hierarchy and spread of infections.
arXiv preprint arXiv:1908.04358."""
if isinstance(graph, ndarray):
A = graph.transpose()
TD = forward_hierarchical_differences(graph, weight=weight)
m = zeros((TD.shape[0], 1))
for i in range(m.shape[0]):
if A[i].sum() == 0:
m[i] = 0
else:
m[i] = average(TD[i], weights=A[i])
elif isinstance(graph, spmatrix):
A = graph.transpose()
TD = sparse_forward_hierarchical_differences(graph, weight=weight).tocsc()
m = zeros((TD.shape[0], 1))
for i in range(m.shape[0]):
if A[i].sum() == 0:
m[i] = 0
else:
m[i] = (A[i].multiply(TD[i])).sum() / A[i].sum()
elif isinstance(graph, Graph):
A = adjacency_matrix(graph, weight=weight).transpose()
TD = sparse_forward_hierarchical_differences(graph, weight=weight).tocsc()
m = | zeros((TD.shape[0], 1)) | numpy.zeros |
#!/usr/bin/env python
"""
Test projection and grid routines. Generally test all available pyproj
projection presets but not basemap since they are slower.
"""
import tracpy
import tracpy.calcs
import os
import numpy as np
import matplotlib.tri as mtri
# List projection setup for use in tests
# pyproj-based setups
projpyproj = ['galveston', 'nwgom-pyproj']
projbasemap = ['nwgom'] # don't usually test with this since slow
def test_proj_init():
"""Test initialization of preset pyproj projections."""
# loop through projection presets
for projsetup in projpyproj:
# Get projection object
proj = tracpy.tools.make_proj(setup=projsetup)
assert proj
def test_grid_init():
"""Test initialization of grid."""
# loop through projection presets
for projsetup in projpyproj:
# Get projection object
proj = tracpy.tools.make_proj(setup=projsetup)
grid_filename = os.path.join('input', 'grid.nc')
# Read in grid
grid = tracpy.inout.readgrid(grid_filename, proj, usespherical=True)
assert grid
def test_proj_variant():
"""Test creating a projection with different than built-in variables."""
pass
def test_proj_iteration():
"""Test projection conversion back and forth between spaces.
Set up a projection, then convert between spaces and check that the
result is close to the starting values.
"""
# loop through projection presets
for projsetup in projpyproj:
# Get projection object. Can use either 'galveston' or 'nwgom-pyproj'
# built in projection setups to test quickly ('nwgom' is for use with
# usebasemap=True and thus is slow for testing).
proj = tracpy.tools.make_proj(setup=projsetup)
grid_filename = os.path.join('input', 'grid.nc')
# Read in grid
grid = tracpy.inout.readgrid(grid_filename, proj, usespherical=True)
# convert back and forth
lon_rho2, lat_rho2 = grid.proj(grid.x_rho, grid.y_rho, inverse=True)
print(grid.lat_rho[0, :])
print(lat_rho2[0, :])
print(grid.lon_rho[0, :])
print(lon_rho2[0, :])
assert np.allclose(grid.lat_rho, lat_rho2)
assert np.allclose(grid.lon_rho, lon_rho2)
def test_grid_triangulation_spherical():
"""Test that the grid triangulations are valid: spherical test cases."""
# loop through projection presets
for projsetup in projpyproj:
# Get projection object
proj = tracpy.tools.make_proj(setup=projsetup)
grid_filename = os.path.join('input', 'grid.nc')
# Read in grid
grid = tracpy.inout.readgrid(grid_filename, proj, usespherical=True)
assert mtri.LinearTriInterpolator(grid.trir, grid.x_rho.flatten())
def test_grid_triangulation_projected():
"""Test that the grid triangulations are valid: projected test cases."""
# loop through projection presets
for projsetup in projpyproj:
# Get projection object
proj = tracpy.tools.make_proj(setup=projsetup)
grid_filename = os.path.join('input', 'gridxy.nc')
# Read in grid
grid = tracpy.inout.readgrid(grid_filename, proj, usespherical=False)
assert mtri.LinearTriInterpolator(grid.trir, grid.x_rho.flatten())
def test_interpolation():
"""Test interpolation with grid space and projected grid the same.
Create a test case with the 'projected' grid in grid space coordinates.
When interpolating between them, there should be a shift because the
rho points in projected space are not in the same setup as grid coords.
"""
# Get projection object
proj = tracpy.tools.make_proj(setup='nwgom-pyproj')
grid_filename = os.path.join('input', 'gridij.nc')
# Read in grid
grid = tracpy.inout.readgrid(grid_filename, proj, usespherical=False)
# Do some interpolating
# projected grid to grid space, delaunay
X, Y, _ = tracpy.tools.interpolate2d(grid.x_rho[2, 3], grid.y_rho[2, 3],
grid, 'd_xy2ij')
# There is a shift between the rho grid and the grid space grid because
# of the staggered layout. Grid space counts from the u/v grid and
# therefore is a little different from the rho grid.
assert np.allclose(X, grid.x_rho[2, 3] + 0.5)
assert | np.allclose(Y, grid.y_rho[2, 3] + 0.5) | numpy.allclose |
# coding: utf-8
# In[1]:
import numpy as np
import random
from scipy.spatial import distance
from sklearn.preprocessing import StandardScaler
#Store Data Variables
import json
with open('feature_data.json', 'r') as f:
features = json.load(f)
from scipy.io import loadmat
train_idxs = loadmat('cuhk03_new_protocol_config_labeled.mat')['train_idx'].flatten()
query_idxs = loadmat('cuhk03_new_protocol_config_labeled.mat')['query_idx'].flatten()
labels = loadmat('cuhk03_new_protocol_config_labeled.mat')['labels'].flatten()
gallery_idxs = loadmat('cuhk03_new_protocol_config_labeled.mat')['gallery_idx'].flatten()
filelist = loadmat('cuhk03_new_protocol_config_labeled.mat')['filelist'].flatten()
camId = loadmat('cuhk03_new_protocol_config_labeled.mat')['camId'].flatten()
# In[9]:
#scaler = StandardScaler()
#features = scaler.fit_transform(features)
X = np.array(features)
print(X)
y = np.array(labels)
filelist = np.array(filelist)
camId = np.array(camId)
# In[10]:
mask_train = np.array(train_idxs).ravel()
mask_query = np.array(query_idxs).ravel()
mask_gallery = np.array(gallery_idxs).ravel()
mask_train = np.subtract(mask_train, 1)
mask_query = | np.subtract(mask_query, 1) | numpy.subtract |
# Copyright (c) 2019 ipychord3 authors
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""This module contains some of the old function with sf_ prefix
"""
import logging
from copy import deepcopy
from collections import namedtuple
import matplotlib.pyplot as plt
from matplotlib.figure import figaspect
from matplotlib.font_manager import FontProperties
from matplotlib.patches import Wedge
from matplotlib.path import Path
import skimage.draw as sidraw
import skimage.transform as sitransform
import numpy as np
from scipy import ndimage
from scipy.ndimage.filters import median_filter
from . import cmaps
# setup logging
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
# we can't use TK backend, it will crash with python 3.4 on windows:
# https://github.com/ipython/ipython/issues/8921#issuecomment-151046708
# matplotlib.use('TkAgg')
# matplotlib.use('Qt4Agg')
# ----------------------------------------------------------------------------
# Miscellaneous
# ----------------------------------------------------------------------------
Circle = namedtuple('Circle', ['center', 'radius'])
Rectangle = namedtuple('Rectangle', ['corner', 'width', 'height'])
Polygon = namedtuple('Polygon', ['vertices'])
RingSector = namedtuple('RingSector', ['theta1', 'theta2', 'radius',
'width', 'center'])
def handle_close(event):
"""Handle the closing of a figure,
it stops the event loop so the program can continue
"""
fig = event.canvas.figure
logger.debug("stopping blocking event loop")
fig.canvas.stop_event_loop()
def prepare_patter_for_show(img, ulev=None, dlev=None, log=0, med=None,
over=None, neg=False, mag=False, clip=True):
"""prepare pattern for show
Scales the pattern and computes ``ulev`` and ``dlev``
A copy of the pattern is returned.
:param img: the image dictionary
:type img: dict
:param ulev: show the image in certain interval, ulev defines the upper level.
:type ulev: float
:param dlev: defines the down level
:type dlev: float
:param log: show the image in log scale coordinate
:log==0: show it in linear coordinate
:log==1: show it in log() coordinate
:log==2: show it in log(log()) coordinate
:type log: int
:param med: use median filter to estimate ulev, ``3 < med < 15`` otherwise ``med=5``
:type med: float
:param over: overestimation of the scales
:type over: float
:param neg: show the negative side of the image
:type neg: bool
:param mag: show magnitude of image
:type mag: bool
:param clip: clip negative values
:type clip: bool
:return: scaled pattern, ulev, dlev
:rtype: pattern dict, float, float
"""
img = deepcopy(img)
if mag:
img['map'] = np.absolute(img['map'])
if neg:
mask = img['map'] <= 0.0
img['map'] *= mask
img['map'] *= -1.0
if clip:
img['map'] = img['map'] * (img['map'] >= 0.0)
if log == 0:
logger.info("The image is shown in the linear coordinates")
if ulev is None:
if med is not None:
if not 3 < med < 15:
med = 5
ulev = ndimage.median_filter(img['map'], med).max()
logger.debug("ulev is None: estimated with median %g linear as %g" %(med, ulev))
else:
ulev = img['map'].max()
logger.debug("ulev is None: estimated directly as %g" %ulev)
logger.debug("linear ulev = %g" %ulev)
else:
logger.debug("ulev set by user as %g" %ulev)
if dlev is None:
dlev = img['map'].min()
logger.debug("dlev is None: estimated as %g" %dlev)
else:
logger.debug("dlev set as: %g" %dlev)
elif log == 1:
img['map'] = np.log(img['map']+1.0)
if ulev is None:
logger.debug("estimating ulev")
ulev = (img['map']+1.0).max()
dlev = (img['map']+1.0).min()
logger.debug("log scale used: dlev = %g, ulev = %g" % (dlev, ulev))
elif log == 2:
img['map'] = np.log(np.log(img['map']+1.0)+1.0)
if ulev is None:
ulev = (img['map']+1.0).max()
dlev = (img['map']+1.0).min()
logger.debug("double log scale used: dlev = %g, ulev = %g" % (dlev, ulev))
if over is not None:
ulev /= over
logger.info("overestimated ulev corrected to %g" % ulev)
return img, ulev, dlev
# ----------------------------------------------------------------------------
# Interactive functions
# ----------------------------------------------------------------------------
def show(img, ulev=None, dlev=None, log=0, med=None, win=None, block=False, show=True,
cmap='viridis', over=None, neg=False, mag=False, clip=True, scalefig=1.2):
""" show the image under different conditions
.. note:: This function can not show the positive and negative value
in one image except we use the absolute value to show all the
values in one image.
:param img: the image dictionary
:type img: dict
:param ulev: show the image in certain interval, ulev defines the upper level.
:type ulev: float
:param dlev: defines the down level
:type dlev: float
:param log: show the image in log scale coordinate
:log==0: show it in linear coordinate
:log==1: show it in log() coordinate
:log==2: show it in log(log()) coordinate
:type log: int
:param med: use median filter to estimate ulev, ``3 < med < 15`` otherwise ``med=5``
:type med: float
:param win:
:type win: matplotlib window, int
:param block: show the image in the interactive way and block the command line
:type block: bool
:param show: show the figure
:type show: bool
:param cmap: colormap to be passed to ``imshow``, delauft ``ipychord3.cmaps.lut05()``
:type cmap: matplotlib.colors.Colormap or string
:param over: overestimation of the scales
:type over: float
:param neg: show the negative side of the image
:type neg: bool
:param mag: show magnitude of image
:type mag: bool
:param clip: clip negative values
:type clip: bool
:param float scalefig: scale the figure by factor
:return: figure
:rtype: matplotlib figure object
"""
# protect the virgin img
kwargs_for_prepare = {'ulev': ulev, 'dlev': dlev, 'log': log, 'med': med,
'over': over, 'neg': neg, 'mag': mag, 'clip': clip}
img, ulev, dlev = prepare_patter_for_show(img, **kwargs_for_prepare)
# create figure
w, h = figaspect(img['map'])
if win is None:
fig = plt.figure(figsize=(scalefig*w, scalefig*h))
else:
fig = plt.figure(win, figsize=(scalefig*w, scalefig*h))
logger.info("dlev = %g ulev = %g" % (dlev, ulev))
# create the axis and show the image
ax = plt.Axes(fig, [0., 0., 1., 1.])
fig.add_axes(ax)
ax.imshow(img['map'], interpolation='nearest', vmin=dlev, vmax=ulev, cmap=cmap, origin='upper')
ax.set_aspect('equal')
ax.set_axis_off()
if 'filename' in img:
fig.canvas.set_window_title(img['filename']+'_sf_show')
elif 'title' in img:
fig.canvas.set_window_title(img['title'])
else:
fig.canvas.set_window_title('sf.show pattern')
fig._sf_kwargs_for_prepare = kwargs_for_prepare
if not show:
return fig
elif block:
# now we start an extra event loop for this figure
# it will block the program until fig.canvas.stop_event_loop() is called
fig.canvas.mpl_connect('close_event', handle_close)
fig.show()
logger.debug("starting blocking event loop")
fig.canvas.start_event_loop(timeout=-1)
else:
fig.show()
logger.debug("show non-blocking figure: starting event loop to force drawing of figure")
fig.canvas.start_event_loop(timeout=.01) # start extra event loop for a short time to
# force drawing of the figure
logger.debug("show non-blocking figure: event loop exited")
return fig
def killcircle(img, fig={}):
"""Select circles in figure and mask them in the pattern
:param img: pattern dict
:param fig: a dictionary with keyword arguments for ``sf.show``
or a matplotlib figure
.. note:: If you use ``sf.show`` the figure must be created using ``block=False``.
If a dict is passed ``block`` will be set to ``False``.
:returns: masked pattern, mask, circles
:raises RuntimeError: if no circles have been drawn
.. hint::
:Draw circle: left mouse button to set center, set radius by clicking left button again
:Modify circle:
use +/- keys to increase/decrease radius by 1 px
use arrow-keys to move center
:Delete circle: backspace
:Select circle: use "ctrl+[0..6]" to select one of the first 7 circles
"""
if isinstance(fig, dict):
logger.debug('creating figure')
fig['block'] = False
fig = show(img, **fig)
selector = SelectCircles(fig)
circles = selector.circles
imgmask, mask = mask_circles(img, circles)
return imgmask, mask, circles
def killbox(img, fig={}):
"""Select rectangles in figure and mask the pattern
:param img: pattern dict
:param fig: a dictionary with keyword arguments for ``sf.show``
or a matplotlib figure
.. note:: If you use ``sf.show`` the figure must be created using ``block=False``.
If a dict is passed ``block`` will be set to ``False``.
:returns: masked pattern, mask, rectangles
:raises RuntimeError: if no rectangles have been drawn
.. hint::
:Draw rectangle: left mouse button to set corner, set other corner by clicking left button again
:Modify circle:
use +/-/*/_ keys to increase/decrease x/y by 1 px
use arrow-keys to first corner
:Delete rectangle: backspace
"""
if isinstance(fig, dict):
logger.debug('creating figure')
fig['block'] = False
fig = show(img, **fig)
selector = SelectRectangles(fig)
rectangles = selector.rectangles
imgmask, mask = mask_rectangles(img, rectangles)
return imgmask, mask, rectangles
def killpoly(img, fig={}):
"""Select polygons in figure and mask them in the pattern
:param img: pattern dict
:param fig: a dictionary with keyword arguments for ``sf.show``
or a matplotlib figure
.. note:: If you use ``sf.show`` the figure must be created using ``block=False``.
If a dict is passed ``block`` will be set to ``False``.
:returns: masked pattern, mask, polygons
:raises RuntimeError: if no polygons have been drawn
.. hint::
:Draw polygon: left mouse button to set vertices, set last vertex with right mouse button
:Modify polygon:
use `shift-backspace` to delete a vertex
:Delete polygon: backspace
"""
if isinstance(fig, dict):
logger.debug('creating figure')
fig['block'] = False
fig = show(img, **fig)
selector = SelectPolygons(fig)
polygons = selector.polygons
imgmask, mask = mask_polygons(img, polygons)
return imgmask, mask, polygons
def kill_ringsector(img, fig={}, center=None):
"""Select ring sectors in figure and mask them in the pattern
:param img: pattern dict
:param fig: a dictionary with keyword arguments for ``sf.show``
or a matplotlib figure
.. note:: If you use ``sf.show`` the figure must be created using ``block=False``.
If a dict is passed ``block`` will be set to ``False``.
:returns: masked pattern, mask, masks, sectors
:raises RuntimeError: if no sectors have been drawn
.. hint::
:Draw sector: left mouse button to set vertices, adjust position with keyboard (see ctrl-h), press space to draw new sector
:Delete sector: backspace
"""
if isinstance(fig, dict):
logger.debug('creating figure')
fig['block'] = False
fig = show(img, **fig)
selector = SelectRingSectors(fig, center=center)
sectors = selector.sectors
imgmask, mask, masks = mask_ring_sectors(img, sectors)
return imgmask, mask, masks, sectors
def create_peak_masks(img, fig={}, center=None):
"""Select ring sectors in figure to select peaks and a corresponding reference.
This function returns a stacked list of masks [peak_mask, reference_mask]
:param img: pattern dict
:param fig: a dictionary with keyword arguments for ``sf.show``
or a matplotlib figure
.. note:: If you use ``sf.show`` the figure must be created using ``block=False``.
If a dict is passed ``block`` will be set to ``False``.
:returns: masks, sectors
:raises RuntimeError: if no sectors have been drawn
.. hint::
:Draw sector: left mouse button to set vertices, adjust position with keyboard (see ctrl-h), press space to draw new sector.
:Delete sector: backspace
"""
if isinstance(fig, dict):
logger.debug('creating figure')
fig['block'] = False
fig = show(img, **fig)
selector = SelectTwoRingSectors(fig, center=center)
sectors = selector.sectors
mask = make_peak_mask(img, sectors)
return mask, sectors
def rotate_pattern(img, fig={}, angle=None):
""" Rotates the pattern by interactively by 0.3° / 1° or non-interactive by ``angle``
:param img: pattern dict
:param fig: a dictionary with keyword arguments for ``sf.show``
or a matplotlib figure
.. note:: If you use ``sf.show`` the figure must be created using ``block=False``.
If a dict is passed ``block`` will be set to ``False``.
:param angle: if not ``None`` rotate pattern by ``angle`` without opening a figure window
:returns: rotated pattern, angle
.. hint::
:rotate clockwise: ``r``: 0.3° ``R``: 1°
:rotate anticlockwise: ``a``: 0.3° ``A``: 1°
"""
img = deepcopy(img)
if angle is not None:
img['map'] = ndimage.rotate(img['map'], angle, mode='constant', cval=0.0)
img['beam_position'] = midpnt(img)
else:
if isinstance(fig, dict):
logger.debug('creating figure')
fig['block'] = False
fig = show(img, **fig)
rot = RotatePattern(fig, img)
img = rot.img
angle = rot.angle
return img, angle
def debye(img, fig, center=None):
"""Draw the Debye-Scherrer rings, calculates diameter and center of each
and returns the mean center for mirroring
:param fig: a dictionary with keyword arguments for ``sf.show``
or a matplotlib figure
.. note:: If you use ``sf.show`` the figure must be created using ``block=False``.
If a dict is passed ``block`` will be set to ``False``.
:param center: set the beam position (x, y)
:returns: ``[center x, center y, circles]``
:raises RuntimeError: if no circles have been drawn
"""
if isinstance(fig, dict):
logger.debug('creating figure')
fig['block'] = False
fig = show(img, **fig)
print('Draw circles in Debye-Scherrer rings to calculate their diameter \
and center.')
selector = SelectCirclesSameCenter(fig, center)
centers = []
circles = selector.circles
if not circles:
raise RuntimeError("You have to create at least one circle")
logger.debug("length of circles = %d" % len(circles))
for i, circle in enumerate(circles):
d = 2 * circle.radius
center = circle.center
centers.append(center)
print("Circle %d: (%.4f, %.4f)@%.4f" % (i + 1, center[0], center[1], d))
return circles[0].center[0], circles[0].center[1], circles
# ----------------------------------------------------------------------------
# Modifier functions
# ----------------------------------------------------------------------------
def mask_circles(img, circles):
"""mask ``circle`` in ``img``
:param img: pattern dict
:param circles: list of ``sf.Circle``'s
:returns: masked image, mask
"""
imgmask = deepcopy(img)
mask = | np.ones_like(imgmask['map'], dtype=np.uint8) | numpy.ones_like |
import tensorflow as tf
import numpy as np
import pickle, argparse, os
from os.path import join, exists
from SCFNet import Network
from semantic3d_test import ModelTester
from helper_ply import read_ply
from helper_dp import DataProcessing as DP
class cfg:
k_n = 16 # KNN
num_layers = 5 # Number of layers
num_points = 65536 # Number of input points
num_classes = 8 # Number of valid classes
sub_grid_size = 0.06 # preprocess_parameter
batch_size = 3 # batch_size during training
val_batch_size = 16 # batch_size during validation and test
train_steps = 1000 # Number of steps per epochs
val_steps = 100 # Number of validation steps per epoch
sub_sampling_ratio = [4, 4, 4, 4, 2] # sampling ratio of random sampling at each layer
d_out = [16, 64, 128, 256, 512] # feature dimension
noise_init = 3.5 # noise initial parameter
max_epoch = 100 # maximum epoch during training
learning_rate = 1e-2 # initial learning rate
lr_decays = {i: 0.95 for i in range(0, 500)} # decay rate of learning rate
train_sum_dir = 'train_log'
saving = True
saving_path = None
augment_scale_anisotropic = True
augment_symmetries = [True, False, False]
augment_rotation = 'vertical'
augment_scale_min = 0.8
augment_scale_max = 1.2
augment_noise = 0.001
augment_occlusion = 'none'
augment_color = 0.8
class Semantic3D:
def __init__(self):
self.name = 'Semantic3D'
self.path = './data/semantic3d'
self.label_to_names = {0: 'unlabeled',
1: 'man-made terrain',
2: 'natural terrain',
3: 'high vegetation',
4: 'low vegetation',
5: 'buildings',
6: 'hard scape',
7: 'scanning artefacts',
8: 'cars'}
self.num_classes = len(self.label_to_names)
self.label_values = np.sort([k for k, v in self.label_to_names.items()])
self.label_to_idx = {l: i for i, l in enumerate(self.label_values)}
self.ignored_labels = np.sort([0])
self.original_folder = join(self.path, 'original_data')
self.full_pc_folder = join(self.path, 'original_ply')
self.sub_pc_folder = join(self.path, 'input_{:.3f}'.format(cfg.sub_grid_size))
# Following KPConv to do the train-validation split
self.all_splits = [0, 1, 4, 5, 3, 4, 3, 0, 1, 2, 3, 4, 2, 0, 5]
self.val_split = 1
# Initial training-validation-testing files
self.train_files = []
self.val_files = []
self.test_files = []
cloud_names = [file_name[:-4] for file_name in os.listdir(self.original_folder) if file_name[-4:] == '.txt']
for pc_name in cloud_names:
if exists(join(self.original_folder, pc_name + '.labels')):
self.train_files.append(join(self.sub_pc_folder, pc_name + '.ply'))
else:
self.test_files.append(join(self.full_pc_folder, pc_name + '.ply'))
# elif '-reduced' in pc_name:
# self.test_files.append(join(self.full_pc_folder, pc_name + '.ply'))
self.train_files = np.sort(self.train_files)
self.test_files = np.sort(self.test_files)
for i, file_path in enumerate(self.train_files):
if self.all_splits[i] == self.val_split:
self.val_files.append(file_path)
self.train_files = np.sort([x for x in self.train_files if x not in self.val_files])
# Initiate containers
self.val_proj = []
self.val_labels = []
self.test_proj = []
self.test_labels = []
self.possibility = {}
self.min_possibility = {}
self.class_weight = {}
self.input_trees = {'training': [], 'validation': [], 'test': []}
self.input_colors = {'training': [], 'validation': [], 'test': []}
self.input_labels = {'training': [], 'validation': []}
# Ascii files dict for testing
self.ascii_files = {
'MarketplaceFeldkirch_Station4_rgb_intensity-reduced.ply': 'marketsquarefeldkirch4-reduced.labels',
'sg27_station10_rgb_intensity-reduced.ply': 'sg27_10-reduced.labels',
'sg28_Station2_rgb_intensity-reduced.ply': 'sg28_2-reduced.labels',
'StGallenCathedral_station6_rgb_intensity-reduced.ply': 'stgallencathedral6-reduced.labels',
'birdfountain_station1_xyz_intensity_rgb.ply': 'birdfountain1.labels',
'castleblatten_station1_intensity_rgb.ply': 'castleblatten1.labels',
'castleblatten_station5_xyz_intensity_rgb.ply': 'castleblatten5.labels',
'marketplacefeldkirch_station1_intensity_rgb.ply': 'marketsquarefeldkirch1.labels',
'marketplacefeldkirch_station4_intensity_rgb.ply': 'marketsquarefeldkirch4.labels',
'marketplacefeldkirch_station7_intensity_rgb.ply': 'marketsquarefeldkirch7.labels',
'sg27_station10_intensity_rgb.ply': 'sg27_10.labels',
'sg27_station3_intensity_rgb.ply': 'sg27_3.labels',
'sg27_station6_intensity_rgb.ply': 'sg27_6.labels',
'sg27_station8_intensity_rgb.ply': 'sg27_8.labels',
'sg28_station2_intensity_rgb.ply': 'sg28_2.labels',
'sg28_station5_xyz_intensity_rgb.ply': 'sg28_5.labels',
'stgallencathedral_station1_intensity_rgb.ply': 'stgallencathedral1.labels',
'stgallencathedral_station3_intensity_rgb.ply': 'stgallencathedral3.labels',
'stgallencathedral_station6_intensity_rgb.ply': 'stgallencathedral6.labels'}
self.load_sub_sampled_clouds(cfg.sub_grid_size)
def load_sub_sampled_clouds(self, sub_grid_size):
tree_path = join(self.path, 'input_{:.3f}'.format(sub_grid_size))
files = np.hstack((self.train_files, self.val_files, self.test_files))
for i, file_path in enumerate(files):
cloud_name = file_path.split('/')[-1][:-4]
print('Load_pc_' + str(i) + ': ' + cloud_name)
if file_path in self.val_files:
cloud_split = 'validation'
elif file_path in self.train_files:
cloud_split = 'training'
else:
cloud_split = 'test'
# elif file_path in self.test_files:
# cloud_split = 'test'
# Name of the input files
kd_tree_file = join(tree_path, '{:s}_KDTree.pkl'.format(cloud_name))
sub_ply_file = join(tree_path, '{:s}.ply'.format(cloud_name))
# read ply with data
data = read_ply(sub_ply_file)
sub_colors = np.vstack((data['red'], data['green'], data['blue'])).T
if cloud_split == 'test':
sub_labels = None
else:
sub_labels = data['class']
# Read pkl with search tree
with open(kd_tree_file, 'rb') as f:
search_tree = pickle.load(f)
self.input_trees[cloud_split] += [search_tree]
self.input_colors[cloud_split] += [sub_colors]
if cloud_split in ['training', 'validation']:
self.input_labels[cloud_split] += [sub_labels]
# Get validation and test re_projection indices
print('\nPreparing reprojection indices for validation and test')
for i, file_path in enumerate(files):
# get cloud name and split
cloud_name = file_path.split('/')[-1][:-4]
# Validation projection and labels
if file_path in self.val_files:
proj_file = join(tree_path, '{:s}_proj.pkl'.format(cloud_name))
with open(proj_file, 'rb') as f:
proj_idx, labels = pickle.load(f)
self.val_proj += [proj_idx]
self.val_labels += [labels]
# Test projection
if file_path in self.test_files:
proj_file = join(tree_path, '{:s}_proj.pkl'.format(cloud_name))
with open(proj_file, 'rb') as f:
proj_idx, labels = pickle.load(f)
self.test_proj += [proj_idx]
self.test_labels += [labels]
print('finished')
return
# Generate the input data flow
def get_batch_gen(self, split):
if split == 'training':
num_per_epoch = cfg.train_steps * cfg.batch_size
elif split == 'validation':
num_per_epoch = cfg.val_steps * cfg.val_batch_size
elif split == 'test':
num_per_epoch = cfg.val_steps * cfg.val_batch_size
# Reset possibility
self.possibility[split] = []
self.min_possibility[split] = []
self.class_weight[split] = []
# Random initialize
for i, tree in enumerate(self.input_trees[split]):
self.possibility[split] += [np.random.rand(tree.data.shape[0]) * 1e-3]
self.min_possibility[split] += [float(np.min(self.possibility[split][-1]))]
if split != 'test':
_, num_class_total = np.unique(np.hstack(self.input_labels[split]), return_counts=True)
self.class_weight[split] += [np.squeeze([num_class_total / np.sum(num_class_total)], axis=0)]
def spatially_regular_gen():
# Generator loop
for i in range(num_per_epoch): # num_per_epoch
# Choose the cloud with the lowest probability
cloud_idx = int(np.argmin(self.min_possibility[split]))
# choose the point with the minimum of possibility in the cloud as query point
point_ind = np.argmin(self.possibility[split][cloud_idx])
# Get all points within the cloud from tree structure
points = np.array(self.input_trees[split][cloud_idx].data, copy=False)
# Center point of input region
center_point = points[point_ind, :].reshape(1, -1)
# Add noise to the center point
noise = np.random.normal(scale=cfg.noise_init / 10, size=center_point.shape)
pick_point = center_point + noise.astype(center_point.dtype)
query_idx = self.input_trees[split][cloud_idx].query(pick_point, k=cfg.num_points)[1][0]
# Shuffle index
query_idx = DP.shuffle_idx(query_idx)
# Get corresponding points and colors based on the index
queried_pc_xyz = points[query_idx]
queried_pc_xyz[:, 0:2] = queried_pc_xyz[:, 0:2] - pick_point[:, 0:2]
queried_pc_colors = self.input_colors[split][cloud_idx][query_idx]
if split == 'test':
queried_pc_labels = np.zeros(queried_pc_xyz.shape[0])
queried_pt_weight = 1
else:
queried_pc_labels = self.input_labels[split][cloud_idx][query_idx]
queried_pc_labels = np.array([self.label_to_idx[l] for l in queried_pc_labels])
queried_pt_weight = np.array([self.class_weight[split][0][n] for n in queried_pc_labels])
# Update the possibility of the selected points
dists = np.sum(np.square((points[query_idx] - pick_point).astype(np.float32)), axis=1)
delta = np.square(1 - dists / np.max(dists)) * queried_pt_weight
self.possibility[split][cloud_idx][query_idx] += delta
self.min_possibility[split][cloud_idx] = float(np.min(self.possibility[split][cloud_idx]))
if True:
yield (queried_pc_xyz,
queried_pc_colors.astype(np.float32),
queried_pc_labels,
query_idx.astype(np.int32),
np.array([cloud_idx], dtype=np.int32))
gen_func = spatially_regular_gen
gen_types = (tf.float32, tf.float32, tf.int32, tf.int32, tf.int32)
gen_shapes = ([None, 3], [None, 3], [None], [None], [None])
return gen_func, gen_types, gen_shapes
def get_tf_mapping(self):
# Collect flat inputs
def tf_map(batch_xyz, batch_features, batch_labels, batch_pc_idx, batch_cloud_idx):
batch_features = tf.map_fn(self.tf_augment_input, [batch_xyz, batch_features], dtype=tf.float32)
input_points = []
input_neighbors = []
input_pools = []
input_up_samples = []
for i in range(cfg.num_layers):
neigh_idx = tf.py_func(DP.knn_search, [batch_xyz, batch_xyz, cfg.k_n], tf.int32)
sub_points = batch_xyz[:, :tf.shape(batch_xyz)[1] // cfg.sub_sampling_ratio[i], :]
pool_i = neigh_idx[:, :tf.shape(batch_xyz)[1] // cfg.sub_sampling_ratio[i], :]
up_i = tf.py_func(DP.knn_search, [sub_points, batch_xyz, 1], tf.int32)
input_points.append(batch_xyz)
input_neighbors.append(neigh_idx)
input_pools.append(pool_i)
input_up_samples.append(up_i)
batch_xyz = sub_points
input_list = input_points + input_neighbors + input_pools + input_up_samples
input_list += [batch_features, batch_labels, batch_pc_idx, batch_cloud_idx]
return input_list
return tf_map
# data augmentation
@staticmethod
def tf_augment_input(inputs):
xyz = inputs[0]
features = inputs[1]
theta = tf.random_uniform((1,), minval=0, maxval=2 * np.pi)
# Rotation matrices
c, s = tf.cos(theta), tf.sin(theta)
cs0 = tf.zeros_like(c)
cs1 = tf.ones_like(c)
R = tf.stack([c, -s, cs0, s, c, cs0, cs0, cs0, cs1], axis=1)
stacked_rots = tf.reshape(R, (3, 3))
# Apply rotations
transformed_xyz = tf.reshape(tf.matmul(xyz, stacked_rots), [-1, 3])
# Choose random scales for each example
min_s = cfg.augment_scale_min
max_s = cfg.augment_scale_max
if cfg.augment_scale_anisotropic:
s = tf.random_uniform((1, 3), minval=min_s, maxval=max_s)
else:
s = tf.random_uniform((1, 1), minval=min_s, maxval=max_s)
symmetries = []
for i in range(3):
if cfg.augment_symmetries[i]:
symmetries.append(tf.round(tf.random_uniform((1, 1))) * 2 - 1)
else:
symmetries.append(tf.ones([1, 1], dtype=tf.float32))
s *= tf.concat(symmetries, 1)
# Create N x 3 vector of scales to multiply with stacked_points
stacked_scales = tf.tile(s, [tf.shape(transformed_xyz)[0], 1])
# Apply scales
transformed_xyz = transformed_xyz * stacked_scales
noise = tf.random_normal(tf.shape(transformed_xyz), stddev=cfg.augment_noise)
transformed_xyz = transformed_xyz + noise
rgb = features[:, :3]
stacked_features = tf.concat([transformed_xyz, rgb], axis=-1)
return stacked_features
def init_input_pipeline(self):
print('Initiating input pipelines')
cfg.ignored_label_inds = [self.label_to_idx[ign_label] for ign_label in self.ignored_labels]
gen_function, gen_types, gen_shapes = self.get_batch_gen('training')
gen_function_val, _, _ = self.get_batch_gen('validation')
gen_function_test, _, _ = self.get_batch_gen('test')
self.train_data = tf.data.Dataset.from_generator(gen_function, gen_types, gen_shapes)
self.val_data = tf.data.Dataset.from_generator(gen_function_val, gen_types, gen_shapes)
self.test_data = tf.data.Dataset.from_generator(gen_function_test, gen_types, gen_shapes)
self.batch_train_data = self.train_data.batch(cfg.batch_size)
self.batch_val_data = self.val_data.batch(cfg.val_batch_size)
self.batch_test_data = self.test_data.batch(cfg.val_batch_size)
map_func = self.get_tf_mapping()
self.batch_train_data = self.batch_train_data.map(map_func=map_func)
self.batch_val_data = self.batch_val_data.map(map_func=map_func)
self.batch_test_data = self.batch_test_data.map(map_func=map_func)
self.batch_train_data = self.batch_train_data.prefetch(cfg.batch_size)
self.batch_val_data = self.batch_val_data.prefetch(cfg.val_batch_size)
self.batch_test_data = self.batch_test_data.prefetch(cfg.val_batch_size)
iter = tf.data.Iterator.from_structure(self.batch_train_data.output_types, self.batch_train_data.output_shapes)
self.flat_inputs = iter.get_next()
self.train_init_op = iter.make_initializer(self.batch_train_data)
self.val_init_op = iter.make_initializer(self.batch_val_data)
self.test_init_op = iter.make_initializer(self.batch_test_data)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, default=0, help='the number of GPUs to use [default: 0]')
parser.add_argument('--mode', type=str, default='train', help='options: train, test, vis')
parser.add_argument('--model_path', type=str, default='None', help='pretrained model path')
FLAGS = parser.parse_args()
GPU_ID = FLAGS.gpu
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ['CUDA_VISIBLE_DEVICES'] = str(GPU_ID)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
Mode = FLAGS.mode
dataset = Semantic3D()
dataset.init_input_pipeline()
if Mode == 'train':
model = Network(dataset, cfg)
model.train(dataset)
elif Mode == 'test':
cfg.saving = False
model = Network(dataset, cfg)
if FLAGS.model_path is not 'None':
chosen_snap = FLAGS.model_path
else:
chosen_snapshot = -1
logs = np.sort([os.path.join('results', f) for f in os.listdir('results') if f.startswith('Log')])
chosen_folder = logs[-1]
snap_path = join(chosen_folder, 'snapshots')
snap_steps = [int(f[:-5].split('-')[-1]) for f in os.listdir(snap_path) if f[-5:] == '.meta']
chosen_step = | np.sort(snap_steps) | numpy.sort |
# coding=UTF-8
import os
import os.path
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import pickle
from PIL import Image
from load.load_mnist import load
from source.k_means import k_means, mask_one_hot, draw_heatmap, entropy_score
from sklearn.datasets import load_iris
def gaussian(x, mu, sigma):
d = len(mu)
dev = np.linalg.det(sigma) ** .5
coefficient = 1./(2. * np.pi) ** (d * .5) / dev
return coefficient * np.exp(-.5 * (x - mu).T @ np.linalg.inv(sigma) @ (x - mu))
def e_step(data, pi, mu, sigma, K):
burden_rate = np.array([pi[k] * np.diag(gaussian(data, mu[:, k, np.newaxis], sigma[k])) for k in range(K)])
# N = n(data, mu, sigma) # 60000 * 60000
# burden_rate = pi * N / np.sum(pi * N) # 10 * 60000
return burden_rate.T
def m_step(data, burden_rate, K):
s = | np.sum(burden_rate, axis=0) | numpy.sum |
from torch.utils.data import Dataset
import numpy as np
from utils import generate_file_name_from_labels
from constants import META_PATH, DATA_PATH, label_dict, folder_labels
from obspy import read
import os
import warnings
from kymatio.numpy import Scattering1D
import matplotlib.pyplot as plt
from PIL import Image
from pathlib import Path
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
class QuakeDataSet(Dataset):
def __init__(self, ld_files, ld_folders, ld_unlabeled, excerpt_len, transforms=[],
mode='train'):
"""
Args:
ld_files (list): A list of dictionaries where the dictionary contains parameters for
_load_data func
ld_folders (list): List of dictionaries where each dictionary contains parameters
for _load_data_from_folder func
ld_unlabeled (list) List of dictionaries where each dict contains params for
_load_unlabeled func
excerpt_len (int): Length of each trace in final training data. Each signal will be
padded/trimmed to this len.
transforms (list): A list of strings listing transforms to apply.
Supported transforms:
'wavelet' - Applies wavelet scattering transform
mode (str): mode can be 'train' or any other str. For train, random offset sampling
is performed.
"""
self.X, self.Y, self.X_names, self.X_users, self.X_ids, self.X_imgs = [], [], [], [], [], []
self.excerpt_len = excerpt_len
self.mode = mode
self.transforms = transforms
self.avg = False
if ld_files is not None:
for ld_file in ld_files:
x, y, x_names, x_ids, x_users, x_imgs = self._load_data(**ld_file)
self.X.extend(x)
self.Y.extend(y)
self.X_names.extend(x_names)
self.X_ids.extend(x_ids)
self.X_users.extend(x_users)
if not x_imgs:
self.X_imgs.extend(len(y)*[-1])
else:
self.X_imgs.extend(x_imgs)
if ld_folders is not None:
for ld_folder in ld_folders:
x, y, x_names, x_imgs = self._load_data_from_folder(**ld_folder)
self.X.extend(x)
self.Y.extend(y)
self.X_names.extend(x_names)
# In case of folders, we don't keep track of subject ids / users
# As folders are different data sources (not from Zooniverse)
self.X_ids.extend(len(y)*[-1])
self.X_users.extend(len(y)*[-1])
if not x_imgs:
self.X_imgs.extend(len(y)*[-1])
else:
self.X_imgs.extend(x_imgs)
if ld_unlabeled is not None:
for func_params in ld_unlabeled:
x, y, x_names, x_imgs = self._load_unlabeled(**func_params)
self.X.extend(x)
self.Y.extend(y)
self.X_names.extend(x_names)
self.X_ids.extend(len(y) * [-1])
self.X_users.extend(len(y) * [-1])
if not x_imgs:
self.X_imgs.extend(len(y) * [-1])
else:
self.X_imgs.extend(x_imgs)
# Convert to numpy
self.Y = np.array(self.Y, dtype='int64')
self.X_ids = np.array(self.X_ids, dtype='int64')
self.X_users = np.array(self.X_users, dtype='int64')
# Print data distribution
self.get_distribution()
# Initialize the transforms
# Note: This is too slow to perform dynamically; for now don't use inside dataset class.
for transform in self.transforms:
if transform == 'wavelet':
self.scattering = Scattering1D(J=6, Q=16, shape=self.excerpt_len)
def __getitem__(self, item):
# Pad the data to fixed excerpt length
cur_item = self.X[item]
t_item = []
for feature in cur_item:
if self.avg:
feature = self._avg_data(feature)
transformed_data = self._pad_data(feature)
t_item.append(transformed_data)
t_item = np.array(t_item, dtype='float32')
# Apply any specified transforms - Currently only supports wavelet transform
# Note: Too slow, so avoid using inside dataset class
for transform in self.transforms:
if transform == 'wavelet':
t_item = self.scattering(t_item)
return {'data': t_item, 'label': self.Y[item], 'sub_id': self.X_ids[item],
'user': self.X_users[item], 'img': | np.array(self.X_imgs[item], dtype='float32') | numpy.array |
import numpy as np
from .representations import *
########### I/O UTILITIES ##############
def fix_pyscf_l1(fock, frame, orbs):
""" pyscf stores l=1 terms in a xyz order, corresponding to (m=1, 0, -1).
this converts into a canonical form where m is sorted as (-1, 0,1) """
idx = []
iorb = 0;
atoms = list(frame.symbols)
for atype in atoms:
cur=()
for ia, a in enumerate(orbs[atype]):
n,l,m = a
if (n,l) != cur:
if l == 1:
idx += [iorb+1, iorb+2, iorb]
else:
idx += range(iorb, iorb+2*l+1)
iorb += 2*l+1
cur = (n,l)
return fock[idx][:,idx]
########### HAMILTONIAN MANIPULATION ###########
def lowdin_orthogonalize(fock, s):
"""
lowdin orthogonalization of a fock matrix computing the square root of the overlap matrix
"""
eva, eve = np.linalg.eigh(s)
sm12 = eve @ np.diag(1.0/np.sqrt(eva)) @ eve.T
return sm12 @ fock @ sm12
########## ORBITAL INDEXING ##############
def orbs_base(orbs):
# converts list of orbitals into an index to access different sub-blocks
norbs = 0
io_base = {}
el_dict = {}
for el in orbs.keys():
io_base[el] = norbs
cur_a = ()
for na, la, ma in orbs[el]:
if cur_a == (na,la): continue
cur_a = (na, la)
el_dict[(na+io_base[el], la)] = el
norbs+=1
return io_base, el_dict
############ matrix/block manipulations ###############
def matrix_to_blocks(fock, frame, orbs):
""" splits an atomic orbital matrix to (uncoupled momentum) orbital blocks. """
# maps atom types to different n indices
io_base, _ = orbs_base(orbs)
# prepares storage
diaglist = {}
offdlist_p = {}
offdlist_m = {}
heterolist = {}
# creates storage. these are the blocks of the matrix we'll have to fill up later
lorbs = []
for el_a in orbs.keys():
for ia, a in enumerate(orbs[el_a]):
na, la, ma = a
na += io_base[el_a] # adds element offset
for el_b in orbs.keys():
for ib, b in enumerate(orbs[el_b]):
nb, lb, mb = b
nb += io_base[el_b] # adds element offset
if ( (nb>na or (nb==na and lb>=la)) and
not (na,la,nb,lb) in lorbs ):
orb = (na,la,nb,lb)
lorbs.append(orb)
if el_a == el_b:
diaglist[orb] = []
offdlist_p[orb] = []
offdlist_m[orb] = []
else:
heterolist[orb] = []
# reads in and partitions into blocks
ki = 0
nat = len(frame.numbers)
for i in range(nat):
el_a = frame.symbols[i]
cur_a = ()
for ia, oa in enumerate(orbs[el_a]):
na, la, ma = oa
na += io_base[el_a]
# we read the Hamiltonian in blocks
if (cur_a == (na,la)): continue
cur_a = (na,la)
kj = 0
for j in range(nat):
el_b = frame.symbols[j]
cur_b = ()
for ib, ob in enumerate(orbs[el_b]):
nb, lb, mb = ob
nb += io_base[el_b] # adds element offset
if (cur_b == (nb,lb)): continue # only read at the beginning of each m block
cur_b = (nb,lb)
if (nb<na or (nb==na and lb<la)): continue
orb = (na,la,nb,lb)
blockij = fock[ki+ia:ki+ia+2*la+1, kj+ib:kj+ib+2*lb+1]
if (i==j):
diaglist[orb].append(blockij)
elif (i<j and el_a == el_b):
blockji= fock[kj+ia:kj+ia+2*la+1, ki+ib:ki+ib+2*lb+1]
offdlist_p[orb].append((blockij+blockji)/np.sqrt(2))
offdlist_m[orb].append((blockij-blockji)/np.sqrt(2))
elif(el_a != el_b):
heterolist[orb].append(blockij)
kj += len(orbs[el_b])
ki += len(orbs[el_a])
# stores as ndarray for more flexible indexing
for orb in lorbs:
for d in [diaglist, offdlist_p, offdlist_m, heterolist]:
if orb in d:
d[orb] = np.asarray(d[orb])
return dict( diag=diaglist, offd_p=offdlist_p, offd_m=offdlist_m, hete=heterolist)
def matrix_to_ij_indices(fock, frame, orbs):
""" Creates indices to the atoms involved in each block """
# maps atom types to different n indices
io_base, _ = orbs_base(orbs)
# prepares storage
diaglist = {}
offdlist_p = {}
offdlist_m = {}
heterolist = {}
# creates storage. these are the blocks of the matrix we'll have to fill up later
lorbs = []
for el_a in orbs.keys():
for ia, a in enumerate(orbs[el_a]):
na, la, ma = a
na += io_base[el_a] # adds element offset
for el_b in orbs.keys():
for ib, b in enumerate(orbs[el_b]):
nb, lb, mb = b
nb += io_base[el_b] # adds element offset
if ( (nb>na or (nb==na and lb>=la)) and
not (na,la,nb,lb) in lorbs ):
orb = (na,la,nb,lb)
lorbs.append(orb)
if el_a == el_b:
diaglist[orb] = []
offdlist_p[orb] = []
offdlist_m[orb] = []
else:
heterolist[orb] = []
# reads in and partitions into blocks
ki = 0
nat = len(frame.numbers)
for i in range(nat):
el_a = frame.symbols[i]
cur_a = ()
for ia, oa in enumerate(orbs[el_a]):
na, la, ma = oa
na += io_base[el_a]
# we read the Hamiltonian in blocks
if (cur_a == (na,la)): continue
cur_a = (na,la)
kj = 0
for j in range(nat):
el_b = frame.symbols[j]
cur_b = ()
for ib, ob in enumerate(orbs[el_b]):
nb, lb, mb = ob
nb += io_base[el_b] # adds element offset
if (cur_b == (nb,lb)): continue # only read at the beginning of each m block
cur_b = (nb,lb)
if (nb<na or (nb==na and lb<la)): continue
orb = (na,la,nb,lb)
blockij = (i,j)
if (i==j):
diaglist[orb].append(blockij)
elif (i<j and el_a == el_b):
offdlist_p[orb].append(blockij)
offdlist_m[orb].append(blockij)
elif(el_a != el_b):
heterolist[orb].append(blockij)
kj += len(orbs[el_b])
ki += len(orbs[el_a])
# stores as ndarray for more flexible indexing
for orb in lorbs:
for d in [diaglist, offdlist_p, offdlist_m, heterolist]:
if orb in d:
d[orb] = np.asarray(d[orb])
return dict( diag=diaglist, offd_p=offdlist_p, offd_m=offdlist_m, hete=heterolist)
def blocks_to_matrix(blocks, frame, orbs):
""" assembles (uncoupled momentum) orbital blocks into a matrix form
NB - the l terms are stored in canonical order, m=-l..l """
io_base, _ = orbs_base(orbs)
norbs = 0
for el in list(frame.symbols):
norbs+= len(orbs[el])
nat = len(list(frame.symbols))
unfock = np.zeros((norbs, norbs))
bidx = {}
for k in blocks.keys():
bidx[k] = {}
for bk in blocks[k].keys():
bidx[k][bk] = 0
cur_a = ()
ki = 0
nat = len(frame.numbers)
for i in range(nat):
el_a = frame.symbols[i]
cur_a = ()
for ia, oa in enumerate(orbs[el_a]):
na, la, ma = oa
na += io_base[el_a]
# we read the Hamiltonian in blocks
if (cur_a == (na,la)): continue
cur_a = (na,la)
kj = 0
for j in range(nat):
el_b = frame.symbols[j]
cur_b = ()
for ib, ob in enumerate(orbs[el_b]):
nb, lb, mb = ob
nb += io_base[el_b] # adds element offset
if (cur_b == (nb,lb)): continue # only read at the beginning of each m block
cur_b = (nb,lb)
if (nb<na or (nb==na and lb<la)): continue
orb = (na,la,nb,lb)
if (i==j):
blockij = blocks['diag'][orb][bidx['diag'][orb]]
unfock[ki+ia:ki+ia+2*la+1, kj+ib:kj+ib+2*lb+1] = blockij
unfock[ki+ib:ki+ib+2*lb+1, kj+ia:kj+ia+2*la+1] = blockij.T
bidx['diag'][orb] += 1
elif (el_a == el_b and i<j):
blockij = ( ( blocks['offd_p'][orb][bidx['offd_p'][orb]] if orb in blocks['offd_p'] else 0)
+ ( blocks['offd_m'][orb][bidx['offd_m'][orb]] if orb in blocks['offd_m'] else 0)
)/ | np.sqrt(2) | numpy.sqrt |
from __future__ import print_function
import os, sys
import pickle
import time
import glob
import numpy as np
import torch
from model import PVSE
from loss import cosine_sim, order_sim
from vocab import Vocabulary
from data import get_test_loader
from logger import AverageMeter
from option import parser, verify_input_args
ORDER_BATCH_SIZE = 100
def encode_data(model, data_loader, use_gpu=False):
"""Encode all images and sentences loadable by data_loader"""
# switch to evaluate mode
model.eval()
use_mil = model.module.mil if hasattr(model, 'module') else model.mil
# numpy array to keep all the embeddings
img_embs, txt_embs = None, None
for i, data in enumerate(data_loader):
img, txt, txt_len, ids = data
if torch.cuda.is_available():
img, txt, txt_len = img.cuda(), txt.cuda(), txt_len.cuda()
# compute the embeddings
img_emb, txt_emb, _, _, _, _ = model.forward(img, txt, txt_len)
del img, txt, txt_len
# initialize the output embeddings
if img_embs is None:
if use_gpu:
emb_sz = [len(data_loader.dataset), img_emb.size(1), img_emb.size(2)] \
if use_mil else [len(data_loader.dataset), img_emb.size(1)]
img_embs = torch.zeros(emb_sz, dtype=img_emb.dtype, requires_grad=False).cuda()
txt_embs = torch.zeros(emb_sz, dtype=txt_emb.dtype, requires_grad=False).cuda()
else:
emb_sz = (len(data_loader.dataset), img_emb.size(1), img_emb.size(2)) \
if use_mil else (len(data_loader.dataset), img_emb.size(1))
img_embs = np.zeros(emb_sz)
txt_embs = np.zeros(emb_sz)
# preserve the embeddings by copying from gpu and converting to numpy
img_embs[ids] = img_emb if use_gpu else img_emb.data.cpu().numpy().copy()
txt_embs[ids] = txt_emb if use_gpu else txt_emb.data.cpu().numpy().copy()
return img_embs, txt_embs
def i2t(images, sentences, nreps=1, npts=None, return_ranks=False, order=False, use_gpu=False):
"""
Images->Text (Image Annotation)
Images: (nreps*N, K) matrix of images
Captions: (nreps*N, K) matrix of sentences
"""
if use_gpu:
assert not order, 'Order embedding not supported in GPU mode'
if npts is None:
npts = int(images.shape[0] / nreps)
index_list = []
ranks, top1 = np.zeros(npts), np.zeros(npts)
for index in range(npts):
# Get query image
im = images[nreps * index]
im = im.reshape((1,) + im.shape)
# Compute scores
if use_gpu:
if len(sentences.shape) == 2:
sim = im.mm(sentences.t()).view(-1)
else:
_, K, D = im.shape
sim_kk = im.view(-1, D).mm(sentences.view(-1, D).t())
sim_kk = sim_kk.view(im.size(0), K, sentences.size(0), K)
sim_kk = sim_kk.permute(0,1,3,2).contiguous()
sim_kk = sim_kk.view(im.size(0), -1, sentences.size(0))
sim, _ = sim_kk.max(dim=1)
sim = sim.flatten()
else:
if order:
if index % ORDER_BATCH_SIZE == 0:
mx = min(images.shape[0], nreps * (index + ORDER_BATCH_SIZE))
im2 = images[nreps * index:mx:nreps]
sim_batch = order_sim(torch.Tensor(im2).cuda(), torch.Tensor(sentences).cuda())
sim_batch = sim_batch.cpu().numpy()
sim = sim_batch[index % ORDER_BATCH_SIZE]
else:
sim = np.tensordot(im, sentences, axes=[2, 2]).max(axis=(0,1,3)).flatten() \
if len(sentences.shape) == 3 else np.dot(im, sentences.T).flatten()
if use_gpu:
_, inds_gpu = sim.sort()
inds = inds_gpu.cpu().numpy().copy()[::-1]
else:
inds = np.argsort(sim)[::-1]
index_list.append(inds[0])
# Score
rank = 1e20
for i in range(nreps * index, nreps * (index + 1), 1):
tmp = np.where(inds == i)[0][0]
if tmp < rank:
rank = tmp
ranks[index] = rank
top1[index] = inds[0]
# Compute metrics
r1 = 100.0 * len(np.where(ranks < 1)[0]) / len(ranks)
r5 = 100.0 * len(np.where(ranks < 5)[0]) / len(ranks)
r10 = 100.0 * len(np.where(ranks < 10)[0]) / len(ranks)
medr = np.floor(np.median(ranks)) + 1
meanr = ranks.mean() + 1
if return_ranks:
return (r1, r5, r10, medr, meanr), (ranks, top1)
else:
return (r1, r5, r10, medr, meanr)
def t2i(images, sentences, nreps=1, npts=None, return_ranks=False, order=False, use_gpu=False):
"""
Text->Images (Image Search)
Images: (nreps*N, K) matrix of images
Captions: (nreps*N, K) matrix of sentences
"""
if use_gpu:
assert not order, 'Order embedding not supported in GPU mode'
if npts is None:
npts = int(images.shape[0] / nreps)
if use_gpu:
ims = torch.stack([images[i] for i in range(0, len(images), nreps)])
else:
ims = np.array([images[i] for i in range(0, len(images), nreps)])
ranks, top1 = np.zeros(nreps * npts), | np.zeros(nreps * npts) | numpy.zeros |
import numpy as np
from multiprocessing import Pool
from functools import partial
from tqdm import trange
from sys import exit
from PreFRBLE.convenience import *
from PreFRBLE.parameter import *
from PreFRBLE.physics import *
from PreFRBLE.LikelihoodFunction import LikelihoodFunction
from PreFRBLE.Scenario import Scenario
############################################################################
############### MATHEMATICAL LIKELIHOOD STANDARD OPERATIONS ################
############################################################################
### !!! depreceated, remove
def Likelihood( data=np.arange(1,3), bins=10, range=None, density=True, log=False, weights=None, **kwargs ):
""" wrapper for numpy.histogram that allows for log-scaled probability density function, used to compute likelihood function """
if log:
if range is not None:
range = np.log10(range)
h, x = np.histogram( np.log10(np.abs(data)), bins=bins, range=range, weights=weights )
x = 10.**x
h = h.astype('float64')
if density:
h = h / ( np.sum( h )*np.diff(x) )
else:
if range is None:
range = ( np.min(data), np.max(data) )
h, x = np.histogram( data, bins=bins, range=range, density=density, weights=weights )
L = LikelihoodFunction( P=h, x=x, **kwargs )
return L
# return h, x
Histogram = Likelihood ## old name, replace everywhere
### !!! depreceated, remove
def LikelihoodSmooth( P=[], x=[], dev=[], mode='MovingAverage' ):
"""
Smooth likelihood function P(x)
modes available:
MovingAverage : smooth using moving average over 5 neighbouring boxes
"""
norm = LikelihoodNorm( P=P, x=x, dev=dev )
if mode == 'MovingAverage':
box_pts = 5
P = np.convolve( P, np.ones(box_pts)/box_pts, mode='same' )
## smoothing doesn't conserve normalization
P *= norm/LikelihoodNorm( P=P, x=x, dev=dev )
res = [P, x]
if len(dev)>0:
res.append(dev)
return res
### !!! depreceated, remove
def LikelihoodNorm( P=[], x=[], dev=[] ):
""" Compute norm of likelihood function P """
return np.sum(P*np.diff(x))
### !!! depreceated, remove
def LikelihoodDeviation( P=[], x=[], N=1 ):
""" compute relative deviation (Poisson noise) of likelihood function of individual model obtained from sample of N events """
res = ( P*np.diff(x)*N )**-0.5
res[ np.isinf(res) + np.isnan(res)] = 0
return res
### !!! depreceated, remove
def Likelihoods( measurements=[], P=[], x=[], dev=[], minimal_likelihood=0., density=False ):
"""
returns likelihoods for given measurements according to likelihood function given by P and x
Parameters
---------
measurements : array_like
measurements for which the likelihood shall be returned
P : array_like, shape(N)
likelihood function
x : array_like, shape(N+1)
range of bins in likelihood function
dev : array_like, shape(N), optional
deviation of likelihood function, if given, return deviation of returned likelihoods
minimal_likelihood : float
value returned in case that measurement is outside x
density : boolean
if True, return probability density ( P ) instead of probability ( P*dx )
Returns
-------
likelihoods: numpy array, shape( len(measurements) )
likelihood of measurements = value of P*dx for bin, where measurement is found
"""
likelihoods = np.zeros( len( measurements ) ) ## collector for likelihoods of measurements
deviations = likelihoods.copy()
prob = P if density else P*np.diff(x) ## probability for obtaining measure from within bin
isort = np.argsort( measurements ) ## sorted order of measurements
i = 0 ## marker for current bin
## for each measurement (in ascending order)
for m, i_s in zip( np.array(measurements)[isort], isort ):
## check bins >= previous results
for xi in x[i:]:
## whether measure is inside
if m >= xi: ## measure is bigger than current bin range
## set marker and continue with next bin
i += 1
continue
else: ## otherwise, measure is in the bin
## put result in correct place and stop checking bins
likelihoods[i_s] = prob[i-1] if i > 0 else minimal_likelihood ## if that was the lowest bound, probability is ->zero if measurement is outside the range of P, i. e. P~0
if len(dev):
deviations[i_s] = dev[i-1] if i > 0 else 1
break ## continue with the next measurement
else:
## if measure is bigger than the last bin
likelihoods[i_s] = minimal_likelihood ## probability is zero if measurement is outside the range of P, i. e. P~0
if len(dev):
deviations[i_s] = 1
# likelihoods = np.array( likelihoods )
if len(dev):
return likelihoods, deviations
else:
return likelihoods
### !!! depreceated, remove
def RandomSample( N=1, P=np.array(0), x=np.array(0), log=True ):
"""
returns sample of size N according to likelihood function P(x)
Parameter
---------
P, x : array-like
renormalized probability density function, i. e. sum(P*np.diff(x))=1
log: indicates whether x is log-scaled
Output
------
res : list of N values, distributed according to P(x)
"""
Pd = P*np.diff(x)
if np.round( np.sum(Pd), 4) != 1:
sys.exit( "function is not normalized, %f != 1" % (np.sum(Pd)) )
f = Pd.max()
lo, hi = x[0], x[-1]
if log:
lo, hi = np.log10( [lo,hi] )
res = []
while len(res) < N:
## create random uniform sample in the desired range
r = np.random.uniform( high=hi, low=lo, size=N )
if log:
r = 10.**r
## randomly reject candiates with chance = 1 - P to recreate P
z = np.random.uniform( size=N )
## obtain probability for bins where measures measures are found
p = Likelihoods( r, P/f, x, density=False ) ### renormalize pdf to maximum value of probability, such that values at maximum probability are never rejected. This minimizes the number of rejected random draws
res.extend( r[ np.where( z < p )[0] ] )
return res[:N]
### !!! depreceated, remove
def LikelihoodShift( P=[], x=[], dev=[], shift=1. ):
""" Shift x-values of likelihood function and renormalize accordingly: P'(x|shift) = shift * P(shift*x|1) """
# x' = shift*x, thus P' = P dx/dx' = P / shift
res = [ P/shift, x*shift ]
if len(dev):
res.append(dev)
return res
def LikelihoodsAdd( *Ls, shrink=False, weights=None, dev_weights=None, renormalize=1, min=None, max=None, smooth=True, scenario=False ):
"""
add together several likelihood functions
Parameters
----------
Ls : array-like
list of LikelihoodFunction objects to be added
shrink : integer
determine number of bins in result, otherwise use size of Ps[0]
weights : array-like, len=Ls.shape[0], optional
provide weights for the likelihood functions, assumed =1 if not given
dev_weights : array-like, len=Ps.shape[0], optional
provide deviation for the weights, used to compute error of result
renormalize : float
renormalization of result
min, max : float
indicate minimum and/or maximum value of summed function
smooth : boolean
if True, return smoothed L ( LikelihoodSmooth )
scenario : Scenario-object
scenario described by summed LikelihoodFunction
Returns
-------
LikelihoodFunction : summed likelihood
"""
if len(Ls) == 1:
## if only one function is given, return the original
L = Ls[0]
### !!! depreceated
if renormalize: ## maybe renormalized to new value
L.Renormalize( renormalize ) ### !!! depreceated
if smooth: ### !!! depreceated
L.smooth()
return L
## new function support
l = len(Ls[0].P)
if shrink:
l = shrink
x_min = min if min else np.min( [L.x.min() for L in Ls] )
x_max = max if max else np.max( [L.x.max() for L in Ls] )
if Ls[0].log:
x = 10.**np.linspace( np.log10(x_min), np.log10(x_max), l+1 )
else:
x = np.linspace( x_min, x_max, l+1 )
if weights is None:
weights = np.ones( len(Ls) )
if dev_weights is None:
dev_weights = np.zeros( len(Ls) )
P = np.zeros( l )
dev = P.copy()
## for each function
for i_L, (L, w) in enumerate( zip(Ls, weights) ):
## loop through target bins
for ib, (b0, b1) in enumerate( zip( x, x[1:] ) ):
## start where bins are not too low
if b1 < L.x[0]:
continue
## stop when bins become too high
if b0 > L.x[-1]:
break
## identify contributing bins
ix, = np.where( ( L.x[:-1] < b1 ) * ( L.x[1:] > b0 ) )
if len(ix) == 0:
continue ## skip if none
elif len(ix) == 1:
P[ib] += w * L.P[ix] ## add if one
if len(L.dev)>0:
dev[ib] += (w * L.P[ix])**2 * ( L.dev[ix]**2 + dev_weights[i_L]**2 )
else: ## compute average of contributing bins
## get corresponding ranges
x_ = L.x[np.append(ix,ix[-1]+1)].copy()
## restrict range to within target bin
x_[0], x_[-1] = b0, b1
## add weighed average to target likelihood
add = w * np.sum( L.P[ix]*np.diff(x_) ) / (b1-b0)
P[ib] += add
if len(L.dev)>0:
dev[ib] += add**2 * ( np.sum( ( L.dev[ix]*L.P[ix]*np.diff(x_) )**2 ) /np.sum( ( L.P[ix]*np.diff(x_) )**2 ) + dev_weights[i_L]**2 )
dev = np.sqrt(dev)/P
dev[ np.isnan(dev) ] = 0
L = LikelihoodFunction( P=P, x=x, dev=dev, typ=Ls[0].typ, measure=Ls[0].measure ) ### this L is still missing scenario
L.Renormalize( renormalize )
if smooth:
L.Smooth()
return L
### !!! old version, remove
def LikelihoodsAdd_old( Ps=[], xs=[], devs=[], log=True, shrink=False, weights=None, dev_weights=None, renormalize=False, min=None, max=None, smooth=True ):
"""
add together several likelihood functions
Parameters
----------
Ps : array-like
list of likelihood functions
xs : array-like
list of bin ranges of likelihood functions
devs : array-like, optional
list of deviations of likelihood functions, used to compute deviation of result
log : boolean
indicate wether xs are log-scaled
shrink : integer
determine number of bins in result, otherwise use size of Ps[0]
weights : array-like, len=Ps.shape[0], optional
provide weights for the likelihood functions
dev_weights : array-like, len=Ps.shape[0], optional
provide deviation for the weights
renormalize : float, optional
renormlization of result
min, max : float
indicate minimum and/or maximum value of added function
smooth : boolean
if True, return smoothed P ( LikelihoodSmooth )
Returns
-------
P, x, (dev) : summed likelihood function values, range, (deviation)
"""
if len(Ps) == 1:
## if only one function is given, return the original
P, x = Ps[0], xs[0]
norm = 1
if renormalize: ## maybe renormalized to new value
norm = renormalize/np.sum( P*np.diff(x) )
P *= norm
if smooth:
P, x = LikelihoodSmooth( P=P, x=x )
res = [P, x]
if len(devs) > 0:
res.append( devs[0] )
return res
## new function support
l = len(Ps[0])
if shrink:
l = shrink
x_min = min if min else np.min(xs)
x_max = max if max else np.max(xs)
if log:
x = 10.**np.linspace( np.log10(x_min), np.log10(x_max), l+1 )
else:
x = np.linspace( x_min, x_max, l+1 )
if weights is None:
weights = np.ones( len(Ps) )
if dev_weights is None:
dev_weights = np.zeros( len(Ps) )
P = np.zeros( l )
dev = P.copy()
## for each function
for i_f, (f, x_f, w) in enumerate( zip(Ps, xs, weights) ):
## loop through target bins
for ib, (b0, b1) in enumerate( zip( x, x[1:] ) ):
## stop when bins become too high
if b0 > x_f[-1]:
break
## identify contributing bins
ix, = np.where( ( x_f[:-1] < b1 ) * ( x_f[1:] > b0 ) )
if len(ix) == 0:
continue ## skip if none
elif len(ix) == 1:
P[ib] += w * f[ix] ## add if one
if len(devs)>0:
dev[ib] += (w * f[ix])**2 * ( devs[i_f][ix]**2 + dev_weights[i_f]**2 )
else: ## compute average of contributing bins
## get corresponding ranges
x_ = x_f[np.append(ix,ix[-1]+1)]
## restrict range to within target bin
x_[0], x_[-1] = b0, b1
## add weighed average to target likelihood
add = w * np.sum( f[ix]*np.diff(x_) ) / (b1-b0)
P[ib] += add
if len(devs)>0:
dev[ib] += add**2 * ( np.sum( ( devs[i_f][ix]*f[ix]*np.diff(x_) )**2 ) /np.sum( ( f[ix]*np.diff(x_) )**2 ) + dev_weights[i_f]**2 )
if len(devs)>0:
dev = np.sqrt(dev)/P
dev[ np.isnan(dev) ] = 0
if renormalize:
P *= renormalize/np.sum( P*np.diff(x) )
if smooth:
P, x, dev = LikelihoodSmooth( P=P, x=x, dev=dev )
res = [P,x]
if len(devs)>0:
res.append( dev )
return res
### !!! depreceated, remove
def LikelihoodShrink( P=np.array(0), x=np.array(0), dev=[], bins=100, log=True, renormalize=False, **kwargs_LikelihoodsAdd ):
""" reduce number of bins in likelihood function, contains normalization """
### Actual work is done by LikelihoodsAdd, which adds up several P to new range with limited number of bins
### to shrink function, add P=0 with identical range
devs = [dev,np.zeros(len(dev))] if len(dev) > 0 else []
renorm = renormalize if renormalize else np.sum( P*np.diff(x) )
return LikelihoodsAdd( [P, np.zeros(len(P))], [x,x], devs=devs, shrink=bins, log=log, renormalize=renorm, **kwargs_LikelihoodsAdd )
def LikelihoodsConvolve( *Ls, dev=True, N=50000, absolute=False, renormalize=False, smooth=True, shrink=False ):
"""
compute convolution of likelihood functions P in brute force method, i. e. add samples of size N of each P
Parameter
---------
Ls : list of LikelihoodFunction objects
insert likelihood functions as lists [P,x]
N : integer
size of sample to compute convolution and corresponding deviation
dev : boolean
indicate whether to return the relative deviation based on shot noise of sample with size N
shrink : boolean (depreceated)
if True, reduce number of bins of result to standard number of bins
absolute : boolean
indicate whether likelihood describes absolute value (possibly negative)
if True, allow to values to cancel out by assuming same likelihood for positive and negative values
smooth : boolean
if True, return smoothed P ( LikelihoodSmooth )
renormalize : float (depreceated)
renormalization factor of final result. False to keep normalization after convolution
Returns
-------
P, x, (dev) : convolve likelihood function values, range (and relative deviation, if dev=True)
"""
samples = []
for L in Ls:
norm = L.Norm()
## obtain sample
sample = L.RandomSample( N=N )
# sample = np.array( RandomSample( N=N, P=P[0]/norm, x=P[1], log=log ) ) ### requires norm = 1. other cases are cared for later
## account for norm < 1, i. e. distribution only contributes to amount norm of values
if norm != 1: ### random number of 1-norm events put to 0
sample[np.random.rand(N) > norm] = 0
## account for values to potentially cancel each other
if absolute: ### random half of sample with negative sign
sample[np.random.rand(N) > 0.5] *= -1 ### assume same likelihood for positive and negative values
samples.append( sample )
## compute likelihood
L = LikelihoodFunction( measure=Ls[0].measure, typ=Ls[0].typ )
L.Likelihood( np.abs( np.sum( samples, axis=0 ) ), log=log, bins=Ls[0].P.size )
if smooth:
L.Smooth()
L.ShotNoise( N=N )
return L
### !!! depreceated, remove
def LikelihoodsConvolve_old( *Ps, dev=True, log=True, N=50000, absolute=False, renormalize=False, smooth=True, shrink=False ):
"""
compute convolution of likelihood functions P in brute force method, i. e. add samples of size N of each P
Parameter
---------
Ps : likelihood functions
insert likelihood functions as lists [P,x]
N : integer
size of sample to compute convolution and corresponding deviation
dev : boolean
indicate whether to return the relative deviation based on shot noise of sample with size N
shrink : boolean (depreceated)
if True, reduce number of bins of result to standard number of bins
log : boolean
indicates whether x_f and x_g are log-scaled
absolute : boolean
indicate whether likelihood describes absolute value (possibly negative)
if True, allow to values to cancel out by assuming same likelihood for positive and negative values
smooth : boolean
if True, return smoothed P ( LikelihoodSmooth )
renormalize : float (depreceated)
renormalization factor of final result. False to keep normalization after convolution
Returns
-------
P, x, (dev) : convolve likelihood function values, range (and relative deviation, if dev=True)
"""
samples = []
for P in Ps:
norm = LikelihoodNorm( *P )
## obtain sample
sample = np.array( RandomSample( N=N, P=P[0]/norm, x=P[1], log=log ) ) ### requires norm = 1. other cases are cared for later
## account for norm < 1, i. e. distribution only contributes to amount norm of values
if norm != 1:
sample[np.random.rand(N) > norm] = 0
## account for values to potentially cancel each other
if absolute:
sample[np.random.rand(N) > 0.5] *= -1 ### assume same likelihood for positive and negative values
samples.append( sample )
## compute likelihood
P, x = Likelihood( np.abs( np.sum( samples, axis=0 ) ), log=log, bins=len(Ps[0][0]) )
if smooth:
P, x = LikelihoodSmooth( P=P, x=x )
res = [ P, x ]
if dev:
res.append( LikelihoodDeviation( P=P, x=x, N=N ) )
return res
### !!! depreceated, remove
def Likelihood2Expectation( P=np.array(0), x=np.array(0), log=True, density=True, sigma=1, std_nan=np.nan ):
"""
computes the estimate value and deviation from likelihood function P (must be normalized to 1)
Parameters
--------
P : array_like, shape(N)
likelihood function
x : array_like, shape(N+1)
range of bins in likelihood function
log : boolean
indicates, whether x is log-scaled
density : boolean
indicates whether P is probability density, should always be true
sigma : integer
indicates the sigma range to be returned. must be contained in sigma_probability in physics.py
std_nan
value returned in case that P=0 everywhere. if not NaN, should reflect upper limit
Returns
-------
expect: float
expectation value of likelihood function
deviation: numpy_array, shape(1,2)
lower and uppper bound of sigma standard deviation width
is given such to easily work with plt.errorbar( 1, expect, deviation )
"""
if log:
x_log = np.log10(x)
x_ = x_log[:-1] + np.diff(x_log)/2
else:
x_ = x[:-1] + np.diff(x)/2
## need probability function, i. e. sum(P)=1
if density:
P_ = P*np.diff(x)
else:
P_ = P
if np.round( np.sum( P_ ), 2) != 1:
if np.all(P_ == 0):
return std_nan #, [std_nan,std_nan]
sys.exit( 'P is not normalized' )
## mean is probabilty weighted sum of possible values
expect = np.sum( x_*P_ )
if log:
expect = 10.**expect
## exactly compute sigma range
P_cum = np.cumsum( P_ )
## find where half of remaining probability 1-P(sigma) is entailed in x <= x_lo
lo = expect - first( zip(x, P_cum), condition= lambda x: x[1] > 0.5*(1-sigma_probability[sigma]) )[0]
## find where half of remaining probability 1-P(sigma) is entailed in x >= x_hi
hi = - expect + first( zip(x[1:], P_cum), condition= lambda x: x[1] > 1- 0.5*(1-sigma_probability[sigma]) )[0]
## if z is clearly within one bin, hi drops negative value
#hi = np.abs(hi)
# x_std = np.sqrt( np.sum( P_ * ( x_ - expect)**2 ) ) ### only works for gaussian, so never
deviation = np.array([lo,hi]).reshape([2,1])
return expect, deviation
### keep
def WeighBayesFactor( bayes=1, weight=1 ):
""" Weigh the significance of Bayes factor bayes with weight w"""
w_log = np.log10(weight)
return 10.**( np.log10(bayes) * (1+np.abs(w_log))**(1 - 2*(w_log<0) - (w_log==0) ) )
### keep
def BayesTotalLog( bayes, axis=None ):
""" return log10 of total bayes factor along axis """
return np.nansum( | np.log10(bayes) | numpy.log10 |
"""
BYNIS
- (BY)te is encoded in the sum of (N)ode (I)Ds of a (S)ynthetic edge.
- This method synthesizes the edges of network according to the message.
- To mimic real-world networks, it uses a reference degree distribution.
"""
import struct
import numpy as np
from networkx.generators.random_graphs import powerlaw_cluster_graph
import pandas as pd
from tqdm import tqdm
from sgcn.utils import get_bytewidth
from sgcn.algorithms.base import Base
from sgcn.logging import write_log
# Byte-width to unsigned format in struct standard package
bw2fmt = {1: "B", 2: "H", 4: "I", 8: "Q"}
class BYNIS(Base):
def __init__(self, engine):
super().__init__(engine)
def estimate_number_of_nodes(self, n_bytes):
return int(np.ceil(10**np.round(np.log10(n_bytes))))
def encode(self,
msg_bytes,
pw=None,
g_ref=None,
directed=False,
max_try_rename=20):
"""Encode the message bytes into the node IDs of a synthetic edge.
"""
stats = {}
if pw:
pw = 1
| np.random.seed(pw) | numpy.random.seed |
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
IMG_DIM = 28
def tomat(vec):
img_dim = int(np.sqrt(len(vec)))
return vec.reshape((img_dim,img_dim))
def plot_1(img_vec):
"""img_mat must be single vector-representation of image to plot"""
if len(img_vec.shape) <= 1:
img_mat = tomat(img_vec)
else:
img_mat = img_vec # It was a matrix already
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.matshow(img_mat, cmap=mpl.cm.binary)
ax.axis('off')
plt.show()
def plot_100(img_vec_array):
first_100 = img_vec_array[:100]
image_mat_10x10 = np.zeros((IMG_DIM*10, IMG_DIM*10))
for x in range(10):
for y in range(10):
# Replace sub-matrix with appropriate values
image_mat_10x10[IMG_DIM*y : IMG_DIM*y+IMG_DIM,
IMG_DIM*x : IMG_DIM*x+IMG_DIM] = tomat(first_100[10*y + x])
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.matshow(image_mat_10x10, cmap=mpl.cm.binary)
ax.axis('off')
plt.show()
return fig
def sig(x):
return 1 / (1 + np.exp(-x))
def w_scale(fan_in=100, fan_out=100):
return 4* | np.sqrt(6/(fan_in+fan_out)) | numpy.sqrt |
#!/GPFS/zhangli_lab_permanent/zhuqingjie/env/py3_tf2/bin/python
'''
@Time : 20/07/21 下午 03:25
@Author : zhuqingjie
@User : zhu
@FileName: analysis.py
@Software: PyCharm
'''
import pickle
import warnings
from pathlib import Path
import cv2
import math
import numpy as np
import pandas as pd
from easyFlyTracker.src_code.Camera_Calibration import Undistortion
from easyFlyTracker.src_code.utils import NumpyArrayHasNanValuesExceptin
from easyFlyTracker.src_code.utils import Pbar, equalizeHist_use_mask
from easyFlyTracker.src_code.kernel_density_estimation import get_KernelDensity
warnings.filterwarnings("ignore")
class Analysis():
'''
分析实验结果,
这里计算出来的结果涉及到长度的单位都是像素,比例尺在后续分析中使用,在这统一不使用比例尺。
'''
__doc__ = 'ana'
def __init__(
self,
video_path, # 视频路径
output_dir, # 输出文件夹
roi_flys_flag,
area_th=0.5, # 内圈面积占比
roi_flys_ids=None,
ana_time_duration=10., # 分析移动距离的时候每个值需要统计的时间跨度
sleep_time_duration=10., # 统计睡眠信息的时候每个值需要统计的时间跨度
angle_time_duration=10, # 统计角度变化信息的时候每个值需要统计的时间跨度
sleep_dist_th_per_second=5,
sleep_time_th=300, # 每秒睡眠状态持续多久算是真正的睡眠
Undistortion_model_path=None, # 畸变矫正参数路径
):
# 初始化各种文件夹及路径
self.video_path = Path(video_path)
self.res_dir = Path(output_dir) # 保存用户需要的结果
self.cache_dir = Path(self.res_dir, '.cache') # 保存程序计算的中间结果
self.saved_dir = Path(self.cache_dir, 'analysis_result') # analysis计算出的结果
self.npy_file_path = Path(self.cache_dir, f'track.npy')
self.npy_file_path_cor = Path(self.cache_dir, f'track_cor.npy')
self.move_direction_pre_frame_path = Path(self.saved_dir, 'move_direction_pre_frame.npy')
self.fly_angles_cor_path = Path(self.saved_dir, 'fly_angles_cor.npy')
self.speeds_npy = Path(self.saved_dir, 'all_fly_speeds_per_frame.npy')
self.dist_npy = Path(self.saved_dir, 'all_fly_dist_per_frame.npy')
# self.angle_changes_path = Path(self.saved_dir, 'angle_changes.npy')
config_pkl_path = Path(self.res_dir, 'config.pkl')
self.cache_dir.mkdir(exist_ok=True)
self.saved_dir.mkdir(exist_ok=True)
self.Undistortion_model_path = Undistortion_model_path
# load cps and radius
config_pk = np.array(pickle.load(open(config_pkl_path, 'rb'))[0])
self.cps = config_pk[:, :2]
self.dish_radius = int(round(float(np.mean(config_pk[:, -1]))))
self.mask_imgs = np.load(Path(self.cache_dir, 'mask_imgs.npy'))
self.mask_imgs = self.mask_imgs.astype(np.bool)
self.roi_flys_flag = roi_flys_flag
self.ana_time_duration = ana_time_duration
self.sleep_time_duration = sleep_time_duration
self.angle_time_duration = angle_time_duration
self.sleep_dist_th_per_second = sleep_dist_th_per_second
self.sleep_time_th = sleep_time_th
self.region_radius = int(round(math.sqrt(area_th) * self.dish_radius))
cap = cv2.VideoCapture(str(video_path))
self.fps = round(cap.get(cv2.CAP_PROP_FPS))
self.video_frames_num = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
cap.release()
# 果蝇总数,这个结果是综合了roi_flys_mask_arry和Dish_exclude后的结果
if roi_flys_ids == None:
self.roi_flys_list = np.array([True] * len(self.cps))
else:
self.roi_flys_list = np.array([False] * len(self.cps))
self.roi_flys_list[roi_flys_ids] = True
self.roi_flys_id = [i for i, r in enumerate(self.roi_flys_list) if r]
self.roi_flys_nubs = self.roi_flys_list.sum()
# 初始化加载某些数据
self._get_all_res()
self._get_speed_perframe_dist_perframe()
# load heatmap
heatmap_path = Path(self.cache_dir, 'heatmap.npy')
self.heatmap = np.load(heatmap_path)
def _get_all_res(self):
if self.npy_file_path_cor.exists():
self.all_datas = np.load(self.npy_file_path_cor)
else:
res = np.load(self.npy_file_path)
self.all_datas = np.transpose(res, [1, 0, 2])
self._cor()
np.save(self.npy_file_path_cor, self.all_datas)
def _cor(self):
def _correction(l):
'''
对一个向量进行校正,以下规则:
1,不含有-1,不作处理直接return;
2,含有-1,使用线性插值去掉-1。
:param l:
:return:
'''
# l: 一个int或者float类型的数值list,形如[-1, -1, 88, 90, -1, -1, -1, 100],其中-1为异常点
if not (np.array(l) == -1).any(): # 不含有-1直接return
return l
# 因为pandas的方法不能对前面的坑进行插值,所以先把前面的坑补全了
if l[0] < 0:
for i in range(len(l)):
if l[i] > 0:
l[:i] = l[i]
break
l = np.where(l < 0, np.nan, l)
df = pd.DataFrame(data=l)
df.interpolate(method="linear", inplace=True)
return df.values[:, 0]
def correction2D(ps):
return list(zip(_correction(ps[:, 0]), _correction(ps[:, 1])))
res = []
for ps in self.all_datas:
# 判断是不是空盘(空盘值全部为(-1,-1)),空盘直接返回
if ps.min() == -1 and ps.max() == -1:
res.append(ps)
else:
res.append(correction2D(ps))
res = np.array(res)
if np.isnan(res).sum() != 0:
raise NumpyArrayHasNanValuesExceptin(res)
self.all_datas = res
def _get_speed_perframe_dist_perframe(self, redo=False):
if self.speeds_npy.exists() and self.dist_npy.exists() and not redo:
self.all_fly_speeds_per_frame = np.load(self.speeds_npy)
self.all_fly_dist_per_frame = np.load(self.dist_npy)
return
fn = lambda x, y: math.sqrt(pow(x, 2) + pow(y, 2))
fn2 = lambda ps, k: fn(ps[k][0] - ps[k + 1][0], # 两点之间的距离
ps[k][1] - ps[k + 1][1])
all_fly_speeds = [] # 长度等于帧数
all_fly_displacement = [] # 长度等于帧数减一
mperframe = 1 / self.fps
for fly in self.all_datas:
# if not exc:
# all_fly_displacement.append([0] * (self.all_datas.shape[1] - 1))
# all_fly_speeds.append([0] * self.all_datas.shape[1])
# continue
ds = [fn2(fly, i) for i in range(len(fly) - 1)]
all_fly_displacement.append(ds)
ds = [ds[0]] + ds + [ds[-1]]
speed = [(ds[i] + ds[i + 1]) / (2 * mperframe) for i in range(len(ds) - 1)]
all_fly_speeds.append(speed)
if np.isnan(all_fly_speeds).sum() != 0:
raise NumpyArrayHasNanValuesExceptin(all_fly_speeds)
if np.isnan(all_fly_displacement).sum() != 0:
raise NumpyArrayHasNanValuesExceptin(all_fly_displacement)
np.save(self.speeds_npy, all_fly_speeds)
np.save(self.dist_npy, all_fly_displacement)
self.all_fly_speeds_per_frame = np.array(all_fly_speeds)
self.all_fly_dist_per_frame = np.array(all_fly_displacement)
def PARAM_speed_displacement(self, redo=False):
'''
计算两个参数:
time_duration_stat_speed:10分钟总速度/帧数/果蝇个数;
time_duration_stat_displacement:10分钟总位移/果蝇个数
:return: 返回npy路径
'''
speed_npy = Path(self.saved_dir, f'speed_per_duration_{self.roi_flys_flag}.npy')
disp_npy = Path(self.saved_dir, f'displacement_per_duration_{self.roi_flys_flag}.npy')
if speed_npy.exists() and disp_npy.exists() and not redo:
return speed_npy, disp_npy
duration_frames = int(round(self.ana_time_duration * 60 * self.fps))
frame_start_ind = list(range(0, self.all_datas.shape[1], duration_frames))
all_fly_speeds = self.all_fly_speeds_per_frame * \
np.tile(self.roi_flys_list[:, np.newaxis],
(1, self.all_fly_speeds_per_frame.shape[1]))
all_fly_displacement = self.all_fly_dist_per_frame * \
np.tile(self.roi_flys_list[:, np.newaxis],
(1, self.all_fly_dist_per_frame.shape[1]))
# res = []
# for ind in frame_start_ind:
# x = np.sum(self.all_fly_dist_per_frame[:, ind:ind + duration_frames], axis=1)
# res.append(x)
# res = np.stack(res, axis=-1)
# res = res * 0.26876426270157516
# np.save(r'Z:\dataset\qususu\ceshishipin\v080\output_72hole_0330_v080\plot_images\qudashen.npy', res)
# df = pd.DataFrame(data=res)
# df.to_excel(r'Z:\dataset\qususu\ceshishipin\v080\output_72hole_0330_v080\plot_images\qudashen.xlsx')
# exit()
time_duration_stat_speed = [] # 10分钟总速度/帧数/果蝇个数
time_duration_stat_displacement = [] # 10分钟总位移/果蝇个数
for ind in frame_start_ind:
time_duration_stat_speed.append(
all_fly_speeds[:, ind:ind + duration_frames].sum() / duration_frames / self.roi_flys_nubs)
time_duration_stat_displacement.append(
all_fly_displacement[:, ind:ind + duration_frames].sum() / self.roi_flys_nubs)
np.save(speed_npy, time_duration_stat_speed)
np.save(disp_npy, time_duration_stat_displacement)
return speed_npy, disp_npy
def PARAM_dist_per_h(self):
'''
每小时的移动总距离/果蝇数量
:return: list
'''
self._get_speed_perframe_dist_perframe()
fps = self.fps
dist_per_h = []
da = self.all_fly_dist_per_frame * \
np.tile(self.roi_flys_list[:, np.newaxis],
(1, self.all_fly_dist_per_frame.shape[1]))
duration_frames = int(round(fps * 60 * 60))
for i in range(0, da.shape[1], duration_frames):
dist_per_h.append(np.sum(da[:, i:i + duration_frames]) / self.roi_flys_nubs)
return dist_per_h
def PARAM_sleep_status(self, redo=False):
'''
首先计算每一秒的睡眠状态,然后计算统计时间段(sleep_time_duration)内果蝇的总睡眠时长,计算方式为:
所有果蝇该时间段的总睡眠时长/果蝇数量
返回保存的npy路径
:param redo:
:return:
'''
npy_path = Path(self.saved_dir, f'sleep_time_per_duration_{self.roi_flys_flag}.npy')
if npy_path.exists() and not redo:
return str(npy_path)
cache_all_sleep_status_path = Path(self.cache_dir, 'all_sleep_status.npy')
def get_all_sleep_status(self):
if cache_all_sleep_status_path.exists():
return np.load(cache_all_sleep_status_path)
self._get_speed_perframe_dist_perframe()
fps = self.fps
all_dist_per_s = []
for i in range(self.all_fly_dist_per_frame.shape[0]):
dist_per_s = []
for j in range(0, self.all_fly_dist_per_frame.shape[1], fps):
dist_per_s.append(np.sum(self.all_fly_dist_per_frame[i, j:j + fps]))
all_dist_per_s.append(dist_per_s)
sleep_dist_th = self.sleep_dist_th_per_second
all_sleep_status_per_s = np.array(all_dist_per_s) < sleep_dist_th
self.all_sleep_status_per_s = all_sleep_status_per_s
# all_sleep_status_per_s = np.delete(all_sleep_status_per_s, exclude_ids, axis=0)
sleep_time_th = self.sleep_time_th
all_sleep_status = []
for k, sleep_status_per_s in enumerate(all_sleep_status_per_s):
sleep_time = 0 # 用于保存截止当前秒已经睡了多久(单位秒)
sleep_status_per_s = np.concatenate(
[sleep_status_per_s, np.array([False])]) # 在末尾加一个false,防止末尾是True时遍历结束时无法判断睡眠
sleep_status = np.zeros([len(sleep_status_per_s) - 1, ], np.bool) # 新创建的list,用于保存睡眠状态
for i, ss in enumerate(sleep_status_per_s):
if ss:
sleep_time += 1
else:
# 到没睡的时候都判断一下,上一刻截止是不是满足睡眠条件
if sleep_time >= sleep_time_th:
sleep_status[i - sleep_time:i] = True
sleep_time = 0
all_sleep_status.append(sleep_status)
# 每个果蝇每秒钟的睡眠状态
all_sleep_status = np.array(all_sleep_status)
np.save(cache_all_sleep_status_path, all_sleep_status)
return all_sleep_status
all_sleep_status = get_all_sleep_status(self)
all_sleep_status = all_sleep_status[self.roi_flys_id]
dt = int(round(self.sleep_time_duration * 60)) # 多少秒
start_ind = list(range(0, all_sleep_status.shape[1], dt))
# 因为最后一个时间段可能不足设定的时间段,所以这里一块返回两者
values_durations = []
flys_num = self.roi_flys_nubs
for i in range(len(start_ind) - 1):
all_sleep_status_duration = all_sleep_status[:, start_ind[i]:start_ind[i + 1]]
value = all_sleep_status_duration.sum() / flys_num
value = value / 60 # 转化为分钟
sleep_flys_nubs = np.sum(all_sleep_status_duration, axis=-1).astype(np.bool).sum()
proportion_of_sleep_flys = sleep_flys_nubs / flys_num # 当前时间段睡觉的果蝇的比例
values_durations.append([value, dt, proportion_of_sleep_flys])
last_da = all_sleep_status[:, start_ind[-1]:]
value = last_da.sum() / flys_num
value = value / 60 # 转化为分钟
sleep_flys_nubs = np.sum(last_da, axis=-1).astype(np.bool).sum()
proportion_of_sleep_flys = sleep_flys_nubs / flys_num # 当前时间段睡觉的果蝇的比例
values_durations.append([value, last_da.shape[1], proportion_of_sleep_flys])
values_durations = np.array(values_durations)
np.save(str(npy_path), values_durations)
return str(npy_path)
def PARAM_region_status(self):
'''
统计每一帧是否在内圈的结果,在为True,不在为False。注意,被排除的果盘也被置为False了
:return: 保存的npy路径 (果蝇数,帧数)
'''
region_status_npy = Path(self.saved_dir, f'region_status.npy')
if Path(region_status_npy).exists():
self.all_region_status = np.load(region_status_npy)
return str(region_status_npy)
cps = self.cps
all_datas = self.all_datas.astype(np.float64)
all_region_status = []
print('get_region_status:')
pbar = Pbar(total=len(cps))
for i, (cp, da) in enumerate(zip(cps, all_datas)):
dist_to_cp = lambda x: math.sqrt(math.pow(x[0] - cp[0], 2) + math.pow(x[1] - cp[1], 2))
region_status = np.array([dist_to_cp(p) < self.region_radius for p in da])
all_region_status.append(region_status)
pbar.update()
pbar.close()
self.all_region_status = np.array(all_region_status)
np.save(region_status_npy, self.all_region_status)
return str(region_status_npy)
def heatmap_to_pcolor(self, heat, mask):
"""
转伪彩图
:return:
"""
# 尝试了生成16位的伪彩图,发现applyColorMap函数不支持
max_v, datatype = 255, np.uint8
heat = equalizeHist_use_mask(heat, mask, notuint8=True)
heat = heat / heat.max() * max_v
heat = np.round(heat).astype(datatype)
heat = cv2.applyColorMap(heat, cv2.COLORMAP_JET)
return heat
def PARAM_heatmap(self, p):
'''
跟roi没有关系,算的是所有果蝇的热图。
:param p:
:return:
'''
heatmap = self.heatmap.copy()
heatmaps = []
for mask, cp in zip(self.mask_imgs, self.cps):
hm = heatmap * mask
pcolor = self.heatmap_to_pcolor(hm, mask)
pcolor *= np.tile(mask[:, :, None], (1, 1, 3)) # 只有在这mask一下,后面才能叠加
heatmaps.append(pcolor)
heatmap_img = np.array(heatmaps).sum(axis=0).astype(np.uint8) # 叠加后的图像背景是黑的
mask_all = np.array(self.mask_imgs).sum(axis=0)
mask_all = (mask_all == 0).astype(np.uint8) * 128 # 背景蓝色 bgr(128,0,0)
heatmap_img[:, :, 0] += mask_all
# cv2.imshow('', heatmap_img)
# cv2.waitKeyEx()
cv2.imwrite(str(p), heatmap_img)
def PARAM_heatmap_barycenter(self, p, p_heatmap):
'''
计算热图重心,并可视化
:return:
'''
def get_barycenter_of_mat(m): # 求矩阵重心
def get_barycenter_of_line(l): # 求直线重心
i = np.arange(len(l))
return np.sum(l * i) / np.sum(l)
lx = np.sum(m, axis=0)
ly = np.sum(m, axis=1)
return (get_barycenter_of_line(lx),
get_barycenter_of_line(ly))
barycps = []
heatmap = self.heatmap
r = self.dish_radius
for cp in self.cps:
p0 = (cp[0] - r, cp[1] - r)
m = heatmap[
p0[1]:p0[1] + 2 * r + 1,
p0[0]:p0[0] + 2 * r + 1]
barycp = get_barycenter_of_mat(m)
barycps.append((barycp[0] + p0[0],
barycp[1] + p0[1]))
self.barycps = barycps
img = cv2.imread(str(p_heatmap))
img = | np.zeros_like(img) | numpy.zeros_like |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for cartesian_4d_velocity_effector.py."""
import copy
from absl.testing import absltest
from absl.testing import parameterized
from dm_control import mjcf
from dm_robotics.geometry import geometry
from dm_robotics.geometry import mujoco_physics
from dm_robotics.moma.effectors import cartesian_4d_velocity_effector
from dm_robotics.moma.effectors import test_utils
from dm_robotics.moma.models.robots.robot_arms import sawyer
import numpy as np
class Cartesian4DVelocityEffectorTest(parameterized.TestCase):
def test_zero_xy_rot_vel_pointing_down(self):
# When the arm is pointing straight down, the default effector shouldn't
# apply any X or Y rotations.
arm = sawyer.Sawyer(with_pedestal=False)
physics = mjcf.Physics.from_mjcf_model(arm.mjcf_model)
effector_6d = test_utils.Spy6dEffector(arm.wrist_site)
effector_4d = cartesian_4d_velocity_effector.Cartesian4dVelocityEffector(
effector_6d, element=arm.wrist_site, effector_prefix='sawyer_4d')
arm.set_joint_angles(
physics, joint_angles=test_utils.SAFE_SAWYER_JOINTS_POS)
physics.step() # propagate the changes to the rest of the physics.
# Send an XYZ + Z rot command. We shouldn't see any XY rotation components.
effector_4d.set_control(physics, command=np.ones(4) * 0.1)
np.testing.assert_allclose(effector_6d.previous_action,
[0.1, 0.1, 0.1, 0.0, 0.0, 0.1],
atol=1e-3, rtol=0.0)
def test_nonzero_xy_rot_vel_not_pointing_down(self):
# When the arm is NOT pointing straight down, the default effector should
# apply X and Y rotations to push it back to the desired quat.
arm = sawyer.Sawyer(with_pedestal=False)
physics = mjcf.Physics.from_mjcf_model(arm.mjcf_model)
effector_6d = test_utils.Spy6dEffector(arm.wrist_site)
effector_4d = cartesian_4d_velocity_effector.Cartesian4dVelocityEffector(
effector_6d, element=arm.wrist_site, effector_prefix='sawyer_4d')
# random offset to all joints.
joint_angles = test_utils.SAFE_SAWYER_JOINTS_POS + 0.1
arm.set_joint_angles(physics, joint_angles=joint_angles)
physics.step() # propagate the changes to the rest of the physics.
# Send an XYZ + Z rot command. We SHOULD see XY rotation components.
effector_4d.set_control(physics, command= | np.ones(4) | numpy.ones |
import os
import subprocess
from platform import system as operating_system
import threading
from numpy.lib.function_base import average
def clear_static():
for root, dirs, files in os.walk('src/static'):
for file in files:
#append the file name to the list
print(os.path.join(root,file))
os.remove(os.path.join(root,file))
for root, dirs, files in os.walk('static'):
for file in files:
#append the file name to the list
print(os.path.join(root,file))
os.remove(os.path.join(root,file))
import datetime as dt
import pandas_datareader as pdr
import pandas as pd
# https://cnvrg.io/pytorch-lstm/
def get_stock_data(tag: str, start_date: dt.datetime, end_date: dt.datetime) -> pd.core.frame.DataFrame:
for i in ['data', 'img']:
os.system(f'mkdir {i}')
if operating_system() == 'Windows':
os.system('cls')
else:
os.system('clear')
# forces tag into a list
tag = [tag]
# attempts to pull the data
try:
# get it from yahoo
data = pdr.get_data_yahoo(tag, start=start_date, end=end_date)
# generate a index
"""
Date,Adj Close,Close,High,Low,Open,Volume
df = df.reindex(columns=column_names)
___
df = df[['favorite_color','grade','name','age']]
___
df1 = pd.DataFrame(df1,columns=['Name','Gender','Score','Rounded_score'])
"""
# write it out in the og format
data.to_csv(f'data/{tag[0]}.csv')
# so that it can be read in
with open(f'data/{tag[0]}.csv', 'r') as in_file:
lines = in_file.readlines()
# and manipulated before being exported
with open(f'data/{tag[0]}.csv', 'w+') as out_file:
lines[0] = lines[0].replace('Attributes', 'Date')
del lines[1:3]
for i in lines:
out_file.write(i)
data = pd.read_csv(f'data/{tag[0]}.csv', index_col=0, parse_dates=True)
data = data[['Open', 'High', 'Low', 'Close', 'Adj Close', 'Volume']]
# data["str_date"] = data["Date"]
# data["Date"] = data["Date"].apply(
# lambda x: dt.datetime(
# *list(
# map(
# int, x.split('-')
# )
# )
# ).toordinal()
# )
# and loaded in as a dataframe and exported out as the function
return data
except Exception as e:
# if the data is wrong, return a blank one
print(e)
return pd.DataFrame()
from flask import url_for
# Mathematical functions
import math
# Fundamental package for scientific computing with Python
import numpy as np
# Additional functions for analysing and manipulating data
import pandas as pd
# Date Functions
from datetime import date, timedelta, datetime
# This function adds plotting functions for calender dates
from pandas.plotting import register_matplotlib_converters
# Important package for visualization - we use this to plot the market data
import matplotlib.pyplot as plt
# Formatting dates
import matplotlib.dates as mdates
# Packages for measuring model performance / errors
from sklearn.metrics import mean_absolute_error, mean_squared_error
# Deep learning library, used for neural networks
from keras.models import Sequential
# Deep learning classes for recurrent and regular densely-connected layers
from keras.layers import LSTM, Dense, Dropout
# EarlyStopping during model training
from keras.callbacks import EarlyStopping
# This Scaler removes the median and scales the data according to the quantile range to normalize the price data
from sklearn.preprocessing import RobustScaler
def get_predictive_model(tag:str, start_date = pd.to_datetime('2020-01-01'), end_date = dt.datetime.today()):
# pull in data from
df = get_stock_data(tag, start_date, end_date)
train_dfs = df.copy()
# Indexing Batches
train_df = train_dfs.sort_values(by=['Date']).copy()
# We safe a copy of the dates index, before we need to reset it to numbers
date_index = train_df.index
# Adding Month and Year in separate columns
d = pd.to_datetime(train_df.index)
train_df['Month'] = d.strftime("%m")
train_df['Year'] = d.strftime("%Y")
# We reset the index, so we can convert the date-index to a number-index
train_df = train_df.reset_index(drop=True).copy()
FEATURES = ['High', 'Low', 'Open', 'Close', 'Volume', 'Month']
data = pd.DataFrame(train_df)
data_filtered = data[FEATURES]
# We add a prediction column and set dummy values to prepare the data for scaling
data_filtered_ext = data_filtered.copy()
data_filtered_ext['Prediction'] = data_filtered_ext['Close']
# Calculate the number of rows in the data
nrows = data_filtered.shape[0]
np_data_unscaled = np.array(data_filtered)
np_data_unscaled = | np.reshape(np_data_unscaled, (nrows, -1)) | numpy.reshape |
"""Definitions of ground truth NSRTs for all environments."""
import itertools
from typing import List, Sequence, Set, cast
import numpy as np
from predicators.src.envs import get_or_create_env
from predicators.src.envs.behavior import BehaviorEnv
from predicators.src.envs.behavior_options import grasp_obj_param_sampler, \
navigate_to_param_sampler, place_ontop_obj_pos_sampler
from predicators.src.envs.doors import DoorsEnv
from predicators.src.envs.painting import PaintingEnv
from predicators.src.envs.pddl_env import _PDDLEnv
from predicators.src.envs.playroom import PlayroomEnv
from predicators.src.envs.repeated_nextto_painting import \
RepeatedNextToPaintingEnv
from predicators.src.envs.satellites import SatellitesEnv
from predicators.src.envs.tools import ToolsEnv
from predicators.src.settings import CFG
from predicators.src.structs import NSRT, Array, GroundAtom, LiftedAtom, \
Object, ParameterizedOption, Predicate, State, Type, Variable
from predicators.src.utils import null_sampler
def get_gt_nsrts(predicates: Set[Predicate],
options: Set[ParameterizedOption]) -> Set[NSRT]:
"""Create ground truth NSRTs for an env."""
if CFG.env in ("cover", "cover_hierarchical_types", "cover_typed_options",
"cover_regrasp", "cover_multistep_options",
"pybullet_cover"):
nsrts = _get_cover_gt_nsrts()
elif CFG.env == "cluttered_table":
nsrts = _get_cluttered_table_gt_nsrts()
elif CFG.env == "cluttered_table_place":
nsrts = _get_cluttered_table_gt_nsrts(with_place=True)
elif CFG.env in ("blocks", "pybullet_blocks"):
nsrts = _get_blocks_gt_nsrts()
elif CFG.env == "behavior":
nsrts = _get_behavior_gt_nsrts() # pragma: no cover
elif CFG.env in ("painting", "repeated_nextto_painting"):
nsrts = _get_painting_gt_nsrts()
elif CFG.env == "tools":
nsrts = _get_tools_gt_nsrts()
elif CFG.env == "playroom":
nsrts = _get_playroom_gt_nsrts()
elif CFG.env == "repeated_nextto":
nsrts = _get_repeated_nextto_gt_nsrts(CFG.env)
elif CFG.env == "repeated_nextto_single_option":
nsrts = _get_repeated_nextto_single_option_gt_nsrts()
elif CFG.env == "screws":
nsrts = _get_screws_gt_nsrts()
elif CFG.env.startswith("pddl_"):
nsrts = _get_pddl_env_gt_nsrts(CFG.env)
elif CFG.env == "touch_point":
nsrts = _get_touch_point_gt_nsrts()
elif CFG.env == "stick_button":
nsrts = _get_stick_button_gt_nsrts()
elif CFG.env == "doors":
nsrts = _get_doors_gt_nsrts()
elif CFG.env == "coffee":
nsrts = _get_coffee_gt_nsrts()
elif CFG.env in ("satellites", "satellites_simple"):
nsrts = _get_satellites_gt_nsrts()
else:
raise NotImplementedError("Ground truth NSRTs not implemented")
# Filter out excluded predicates from NSRTs, and filter out NSRTs whose
# options are excluded.
final_nsrts = set()
for nsrt in nsrts:
if nsrt.option not in options:
continue
nsrt = nsrt.filter_predicates(predicates)
final_nsrts.add(nsrt)
return final_nsrts
def _get_from_env_by_names(env_name: str, names: Sequence[str],
env_attr: str) -> List:
"""Helper for loading types, predicates, and options by name."""
env = get_or_create_env(env_name)
name_to_env_obj = {}
for o in getattr(env, env_attr):
name_to_env_obj[o.name] = o
assert set(name_to_env_obj).issuperset(set(names))
return [name_to_env_obj[name] for name in names]
def _get_types_by_names(env_name: str, names: Sequence[str]) -> List[Type]:
"""Load types from an env given their names."""
return _get_from_env_by_names(env_name, names, "types")
def _get_predicates_by_names(env_name: str,
names: Sequence[str]) -> List[Predicate]:
"""Load predicates from an env given their names."""
return _get_from_env_by_names(env_name, names, "predicates")
def _get_options_by_names(env_name: str,
names: Sequence[str]) -> List[ParameterizedOption]:
"""Load parameterized options from an env given their names."""
return _get_from_env_by_names(env_name, names, "options")
def _get_cover_gt_nsrts() -> Set[NSRT]:
"""Create ground truth NSRTs for CoverEnv or environments that inherit from
CoverEnv."""
# Types
block_type, target_type, robot_type = _get_types_by_names(
CFG.env, ["block", "target", "robot"])
# Objects
block = Variable("?block", block_type)
robot = Variable("?robot", robot_type)
target = Variable("?target", target_type)
# Predicates
IsBlock, IsTarget, Covers, HandEmpty, Holding = \
_get_predicates_by_names(CFG.env, ["IsBlock", "IsTarget", "Covers",
"HandEmpty", "Holding"])
# Options
if CFG.env in ("cover", "pybullet_cover", "cover_hierarchical_types",
"cover_regrasp"):
PickPlace, = _get_options_by_names(CFG.env, ["PickPlace"])
elif CFG.env in ("cover_typed_options", "cover_multistep_options"):
Pick, Place = _get_options_by_names(CFG.env, ["Pick", "Place"])
nsrts = set()
# Pick
parameters = [block]
holding_predicate_args = [block]
if CFG.env == "cover_multistep_options":
parameters.append(robot)
holding_predicate_args.append(robot)
preconditions = {LiftedAtom(IsBlock, [block]), LiftedAtom(HandEmpty, [])}
add_effects = {LiftedAtom(Holding, holding_predicate_args)}
delete_effects = {LiftedAtom(HandEmpty, [])}
if CFG.env in ("cover", "pybullet_cover", "cover_hierarchical_types",
"cover_regrasp"):
option = PickPlace
option_vars = []
elif CFG.env == "cover_typed_options":
option = Pick
option_vars = [block]
elif CFG.env == "cover_multistep_options":
option = Pick
option_vars = [block, robot]
if CFG.env == "cover_multistep_options":
def pick_sampler(state: State, goal: Set[GroundAtom],
rng: np.random.Generator,
objs: Sequence[Object]) -> Array:
# The only things that change are the block's grasp, and the
# robot's grip, holding, x, and y.
assert len(objs) == 2
block, robot = objs
assert block.is_instance(block_type)
assert robot.is_instance(robot_type)
bx, by = state.get(block, "x"), state.get(block, "y")
rx, ry = state.get(robot, "x"), state.get(robot, "y")
bw = state.get(block, "width")
if CFG.cover_multistep_goal_conditioned_sampling:
# Goal conditioned sampling currently assumes one goal.
assert len(goal) == 1
goal_atom = next(iter(goal))
t = goal_atom.objects[1]
tx, tw = state.get(t, "x"), state.get(t, "width")
thr_found = False # target hand region
# Loop over objects in state to find target hand region,
# whose center should overlap with the target.
for obj in state.data:
if obj.type.name == "target_hand_region":
tlb = state.get(obj, "lb")
tub = state.get(obj, "ub")
tm = (tlb + tub) / 2 # midpoint of hand region
if tx - tw / 2 < tm < tx + tw / 2:
thr_found = True
break
assert thr_found
if CFG.cover_multistep_degenerate_oracle_samplers:
desired_x = float(bx)
elif CFG.cover_multistep_goal_conditioned_sampling:
# Block position adjusted by target/ thr offset
desired_x = bx + (tm - tx)
else:
desired_x = rng.uniform(bx - bw / 2, bx + bw / 2)
# This option changes the grasp for the block from -1.0 to 1.0, so
# the delta is 1.0 - (-1.0) = 2.0
block_param = [2.0]
# The grip changes from -1.0 to 1.0.
# The holding changes from -1.0 to 1.0.
# x, y, grip, holding
robot_param = [desired_x - rx, by - ry, 2.0, 2.0]
param = block_param + robot_param
return np.array(param, dtype=np.float32)
else:
def pick_sampler(state: State, goal: Set[GroundAtom],
rng: np.random.Generator,
objs: Sequence[Object]) -> Array:
del goal # unused
assert len(objs) == 1
b = objs[0]
assert b.is_instance(block_type)
if CFG.env == "cover_typed_options":
lb = float(-state.get(b, "width") / 2)
ub = float(state.get(b, "width") / 2)
elif CFG.env in ("cover", "pybullet_cover",
"cover_hierarchical_types", "cover_regrasp"):
lb = float(state.get(b, "pose") - state.get(b, "width") / 2)
lb = max(lb, 0.0)
ub = float(state.get(b, "pose") + state.get(b, "width") / 2)
ub = min(ub, 1.0)
return np.array(rng.uniform(lb, ub, size=(1, )), dtype=np.float32)
pick_nsrt = NSRT("Pick", parameters, preconditions, add_effects,
delete_effects, set(), option, option_vars, pick_sampler)
nsrts.add(pick_nsrt)
# Place (to Cover)
parameters = [block, target]
holding_predicate_args = [block]
if CFG.env == "cover_multistep_options":
parameters = [block, robot, target]
holding_predicate_args.append(robot)
preconditions = {
LiftedAtom(IsBlock, [block]),
LiftedAtom(IsTarget, [target]),
LiftedAtom(Holding, holding_predicate_args)
}
add_effects = {
LiftedAtom(HandEmpty, []),
LiftedAtom(Covers, [block, target])
}
delete_effects = {LiftedAtom(Holding, holding_predicate_args)}
if CFG.env == "cover_regrasp":
Clear, = _get_predicates_by_names("cover_regrasp", ["Clear"])
preconditions.add(LiftedAtom(Clear, [target]))
delete_effects.add(LiftedAtom(Clear, [target]))
if CFG.env in ("cover", "pybullet_cover", "cover_hierarchical_types",
"cover_regrasp"):
option = PickPlace
option_vars = []
elif CFG.env == "cover_typed_options":
option = Place
option_vars = [target]
elif CFG.env == "cover_multistep_options":
option = Place
option_vars = [block, robot, target]
if CFG.env == "cover_multistep_options":
def place_sampler(state: State, goal: Set[GroundAtom],
rng: np.random.Generator,
objs: Sequence[Object]) -> Array:
if CFG.cover_multistep_goal_conditioned_sampling:
# Goal conditioned sampling currently assumes one goal.
assert len(goal) == 1
goal_atom = next(iter(goal))
t = goal_atom.objects[1]
tx, tw = state.get(t, "x"), state.get(t, "width")
thr_found = False # target hand region
# Loop over objects in state to find target hand region,
# whose center should overlap with the target.
for obj in state.data:
if obj.type.name == "target_hand_region":
lb = state.get(obj, "lb")
ub = state.get(obj, "ub")
m = (lb + ub) / 2 # midpoint of hand region
if tx - tw / 2 < m < tx + tw / 2:
thr_found = True
break
assert thr_found
assert len(objs) == 3
block, robot, target = objs
assert block.is_instance(block_type)
assert robot.is_instance(robot_type)
assert target.is_instance(target_type)
rx = state.get(robot, "x")
tx, tw = state.get(target, "x"), state.get(target, "width")
if CFG.cover_multistep_degenerate_oracle_samplers:
desired_x = float(tx)
elif CFG.cover_multistep_goal_conditioned_sampling:
desired_x = m # midpoint of hand region
else:
desired_x = rng.uniform(tx - tw / 2, tx + tw / 2)
delta_x = desired_x - rx
# This option changes the grasp for the block from 1.0 to -1.0, so
# the delta is -1.0 - 1.0 = -2.0.
# x, grasp
block_param = [delta_x, -2.0]
# The grip changes from 1.0 to -1.0.
# The holding changes from 1.0 to -1.0.
# x, grip, holding
robot_param = [delta_x, -2.0, -2.0]
param = block_param + robot_param
return np.array(param, dtype=np.float32)
else:
def place_sampler(state: State, goal: Set[GroundAtom],
rng: np.random.Generator,
objs: Sequence[Object]) -> Array:
del goal # unused
assert len(objs) == 2
t = objs[-1]
assert t.is_instance(target_type)
lb = float(state.get(t, "pose") - state.get(t, "width") / 10)
lb = max(lb, 0.0)
ub = float(state.get(t, "pose") + state.get(t, "width") / 10)
ub = min(ub, 1.0)
return np.array(rng.uniform(lb, ub, size=(1, )), dtype=np.float32)
place_nsrt = NSRT("Place",
parameters, preconditions, add_effects, delete_effects,
set(), option, option_vars, place_sampler)
nsrts.add(place_nsrt)
# Place (not on any target)
if CFG.env == "cover_regrasp":
parameters = [block]
preconditions = {
LiftedAtom(IsBlock, [block]),
LiftedAtom(Holding, [block])
}
add_effects = {
LiftedAtom(HandEmpty, []),
}
delete_effects = {LiftedAtom(Holding, [block])}
option = PickPlace
option_vars = []
def place_on_table_sampler(state: State, goal: Set[GroundAtom],
rng: np.random.Generator,
objs: Sequence[Object]) -> Array:
# Always at the current location.
del goal, rng # this sampler is deterministic
assert len(objs) == 1
held_obj = objs[0]
x = state.get(held_obj, "pose") + state.get(held_obj, "grasp")
return np.array([x], dtype=np.float32)
place_on_table_nsrt = NSRT("PlaceOnTable", parameters,
preconditions, add_effects, delete_effects,
set(), option, option_vars,
place_on_table_sampler)
nsrts.add(place_on_table_nsrt)
return nsrts
def _get_cluttered_table_gt_nsrts(with_place: bool = False) -> Set[NSRT]:
"""Create ground truth NSRTs for ClutteredTableEnv."""
can_type, = _get_types_by_names("cluttered_table", ["can"])
HandEmpty, Holding, Untrashed = _get_predicates_by_names(
"cluttered_table", ["HandEmpty", "Holding", "Untrashed"])
if with_place:
Grasp, Place = _get_options_by_names("cluttered_table_place",
["Grasp", "Place"])
else:
Grasp, Dump = _get_options_by_names("cluttered_table",
["Grasp", "Dump"])
nsrts = set()
# Grasp
can = Variable("?can", can_type)
parameters = [can]
option_vars = [can]
option = Grasp
preconditions = {LiftedAtom(HandEmpty, []), LiftedAtom(Untrashed, [can])}
add_effects = {LiftedAtom(Holding, [can])}
delete_effects = {LiftedAtom(HandEmpty, [])}
def grasp_sampler(state: State, goal: Set[GroundAtom],
rng: np.random.Generator,
objs: Sequence[Object]) -> Array:
del goal # unused
assert len(objs) == 1
can = objs[0]
# Need a max here in case the can is trashed already, in which case
# both pose_x and pose_y will be -999.
end_x = max(0.0, state.get(can, "pose_x"))
end_y = max(0.0, state.get(can, "pose_y"))
if with_place:
start_x, start_y = 0.2, 0.1
else:
start_x, start_y = rng.uniform(0.0, 1.0,
size=2) # start from anywhere
return | np.array([start_x, start_y, end_x, end_y], dtype=np.float32) | numpy.array |
import json
import logging
import os
from glob import glob
from multiprocessing.pool import Pool
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import tifffile as tf
from matplotlib.widgets import Button, RectangleSelector
from scipy.interpolate import interp1d
# from skimage.transform import resize
from .util import resize_axis
from .filter import selectiveMedianFilter
tifffile_logger = logging.getLogger("tifffile")
tifffile_logger.setLevel(logging.ERROR)
POSITIONS = {x: i for i, x in enumerate("xyz")}
__JOBSDIR__ = "_jobs"
__DECONDIR__ = "_decon"
__TMXDIR__ = "_tmx"
class Selection3D:
def __init__(self, coords=None):
# keys are axis, values are array of (selector, axis_number) tuples
self.selectors = []
self.coords = {}
if coords is not None:
self.coords = {"x": coords[0], "y": coords[1], "z": coords[2]}
self.linked_axes = []
self.data_lim = [None, None, None] # x, y, z Data Size
def width(self, axis):
if axis not in self.coords:
raise ValueError("Unknown axis: %s" % axis)
return round( | np.diff(self.coords[axis]) | numpy.diff |
""" Document Localization using Recursive CNN
Maintainer : <NAME>
Email : <EMAIL> """
import imgaug.augmenters as iaa
import csv
import logging
import os
import xml.etree.ElementTree as ET
import numpy as np
from torchvision import transforms
import utils.utils as utils
# To incdude a new Dataset, inherit from Dataset and add all the Dataset specific parameters here.
# Goal : Remove any data specific parameters from the rest of the code
logger = logging.getLogger("iCARL")
class Dataset:
"""
Base class to reprenent a Dataset
"""
def __init__(self, name):
self.name = name
self.data = []
self.labels = []
def getTransformsByImgaug():
return iaa.Sequential(
[
iaa.Resize(32),
# Add blur
iaa.Sometimes(
0.05,
iaa.OneOf(
[
iaa.GaussianBlur(
(0, 3.0)
), # blur images with a sigma between 0 and 3.0
iaa.AverageBlur(
k=(2, 11)
), # blur image using local means with kernel sizes between 2 and 7
iaa.MedianBlur(
k=(3, 11)
), # blur image using local medians with kernel sizes between 2 and 7
iaa.MotionBlur(k=15, angle=[-45, 45]),
]
),
),
# Add color
iaa.Sometimes(
0.05,
iaa.OneOf(
[
iaa.WithHueAndSaturation(iaa.WithChannels(0, iaa.Add((0, 50)))),
iaa.AddToBrightness((-30, 30)),
iaa.MultiplyBrightness((0.5, 1.5)),
iaa.AddToHueAndSaturation((-50, 50), per_channel=True),
iaa.Grayscale(alpha=(0.0, 1.0)),
iaa.ChangeColorTemperature((1100, 10000)),
iaa.KMeansColorQuantization(),
]
),
),
# Add wether
iaa.Sometimes(
0.05,
iaa.OneOf(
[
iaa.Clouds(),
iaa.Fog(),
iaa.Snowflakes(flake_size=(0.1, 0.4), speed=(0.01, 0.05)),
iaa.Rain(speed=(0.1, 0.3)),
]
),
),
# Add contrast
iaa.Sometimes(
0.05,
iaa.OneOf(
[
iaa.GammaContrast((0.5, 2.0)),
iaa.GammaContrast((0.5, 2.0), per_channel=True),
iaa.SigmoidContrast(gain=(3, 10), cutoff=(0.4, 0.6)),
iaa.SigmoidContrast(
gain=(3, 10), cutoff=(0.4, 0.6), per_channel=True
),
iaa.LogContrast(gain=(0.6, 1.4)),
iaa.LogContrast(gain=(0.6, 1.4), per_channel=True),
iaa.LinearContrast((0.4, 1.6)),
iaa.LinearContrast((0.4, 1.6), per_channel=True),
iaa.AllChannelsCLAHE(),
iaa.AllChannelsCLAHE(clip_limit=(1, 10)),
iaa.AllChannelsCLAHE(clip_limit=(1, 10), per_channel=True),
iaa.Alpha((0.0, 1.0), iaa.HistogramEqualization()),
iaa.Alpha((0.0, 1.0), iaa.AllChannelsHistogramEqualization()),
iaa.AllChannelsHistogramEqualization(),
]
),
)
]
).augment_image
class SmartDoc(Dataset):
"""
Class to include MNIST specific details
"""
def __init__(self, directory="data"):
super().__init__("smartdoc")
self.data = []
self.labels = []
for d in directory:
self.directory = d
self.train_transform = transforms.Compose(
[
getTransformsByImgaug(),
# transforms.Resize([32, 32]),
# transforms.ColorJitter(1.5, 1.5, 0.9, 0.5),
transforms.ToTensor(),
]
)
self.test_transform = transforms.Compose(
[
iaa.Sequential(
[
iaa.Resize(32),
]
).augment_image,
transforms.ToTensor(),
]
)
logger.info("Pass train/test data paths here")
self.classes_list = {}
file_names = []
print(self.directory, "gt.csv")
with open(os.path.join(self.directory, "gt.csv"), "r") as csvfile:
spamreader = csv.reader(
csvfile, delimiter=",", quotechar="|", quoting=csv.QUOTE_MINIMAL
)
import ast
for row in spamreader:
file_names.append(row[0])
self.data.append(os.path.join(self.directory, row[0]))
test = row[1].replace("array", "")
self.labels.append((ast.literal_eval(test)))
self.labels = np.array(self.labels)
self.labels = np.reshape(self.labels, (-1, 8))
logger.debug("Ground Truth Shape: %s", str(self.labels.shape))
logger.debug("Data shape %s", str(len(self.data)))
self.myData = [self.data, self.labels]
class SmartDocDirectories(Dataset):
"""
Class to include MNIST specific details
"""
def __init__(self, directory="data"):
super().__init__("smartdoc")
self.data = []
self.labels = []
for folder in os.listdir(directory):
if os.path.isdir(directory + "/" + folder):
for file in os.listdir(directory + "/" + folder):
images_dir = directory + "/" + folder + "/" + file
if os.path.isdir(images_dir):
list_gt = []
tree = ET.parse(images_dir + "/" + file + ".gt")
root = tree.getroot()
for a in root.iter("frame"):
list_gt.append(a)
im_no = 0
for image in os.listdir(images_dir):
if image.endswith(".jpg"):
# print(im_no)
im_no += 1
# Now we have opened the file and GT. Write code to create multiple files and scale gt
list_of_points = {}
# img = cv2.imread(images_dir + "/" + image)
self.data.append(os.path.join(images_dir, image))
for point in list_gt[int(float(image[0:-4])) - 1].iter(
"point"
):
myDict = point.attrib
list_of_points[myDict["name"]] = (
int(float(myDict["x"])),
int(float(myDict["y"])),
)
ground_truth = np.asarray(
(
list_of_points["tl"],
list_of_points["tr"],
list_of_points["br"],
list_of_points["bl"],
)
)
ground_truth = utils.sort_gt(ground_truth)
self.labels.append(ground_truth)
self.labels = np.array(self.labels)
self.labels = np.reshape(self.labels, (-1, 8))
logger.debug("Ground Truth Shape: %s", str(self.labels.shape))
logger.debug("Data shape %s", str(len(self.data)))
self.myData = []
for a in range(len(self.data)):
self.myData.append([self.data[a], self.labels[a]])
class SelfCollectedDataset(Dataset):
"""
Class to include MNIST specific details
"""
def __init__(self, directory="data"):
super().__init__("smartdoc")
self.data = []
self.labels = []
for image in os.listdir(directory):
# print (image)
if image.endswith("jpg") or image.endswith("JPG"):
if os.path.isfile(os.path.join(directory, image + ".csv")):
with open(os.path.join(directory, image + ".csv"), "r") as csvfile:
spamwriter = csv.reader(
csvfile,
delimiter=" ",
quotechar="|",
quoting=csv.QUOTE_MINIMAL,
)
img_path = os.path.join(directory, image)
gt = []
for row in spamwriter:
gt.append(row)
gt = np.array(gt).astype(np.float32)
ground_truth = utils.sort_gt(gt)
self.labels.append(ground_truth)
self.data.append(img_path)
self.labels = | np.array(self.labels) | numpy.array |
from lumopt.geometries.geometry import Geometry
from lumopt.utilities.materials import Material
from lumopt.lumerical_methods.lumerical_scripts import set_spatial_interp, get_eps_from_sim
import lumapi
import numpy as np
import scipy as sp
from scipy.interpolate import RegularGridInterpolator
from scipy.signal import convolve2d
import matplotlib.pyplot as plt
eps0 = sp.constants.epsilon_0
class TopologyOptimization2DParameters(Geometry):
def __init__(self, params, eps_min, eps_max, x, y, z, filter_R, eta, beta):
self.last_params=params
self.eps_min=eps_min
self.eps_max=eps_max
self.eps = None
self.x=x
self.y=y
self.z=z
self.bounds=[(0,1)]*(len(x)*len(y))
self.filter_R = filter_R
self.eta = eta
self.beta = beta
self.dx = x[1]-x[0]
self.dy = y[1]-y[0]
self.dz = z[1]-z[0] if (hasattr(z, "__len__") and len(z)>1) else 0
self.depth = z[-1]-z[0] if (hasattr(z, "__len__") and len(z)>1) else 220e-9
self.beta_factor = 1.2
self.discreteness = 0
self.unfold_symmetry = False #< We do not want monitors to unfold symmetry
def use_interpolation(self):
return True
def calc_discreteness(self):
''' Computes a measure of discreteness. Is 1 when the structure is completely discrete and less when it is not. '''
rho = self.calc_params_from_eps(self.eps).flatten()
return 1 - np.sum(4*rho*(1-rho)) / len(rho)
def progress_continuation(self):
self.discreteness = self.calc_discreteness()
print("Discreteness: {}".format(self.discreteness))
# If it is sufficiently discrete (99%), we terminate
if self.discreteness > 0.99:
return False
## Otherwise, we increase beta and keep going
self.beta *= self.beta_factor
print('Beta is {}'.format(self.beta))
return True
def to_file(self, filename):
np.savez(filename, params=self.last_params, eps_min=self.eps_min, eps_max=self.eps_max, x=self.x, y=self.y, z=self.z, depth=self.depth, beta=self.beta)
def calc_params_from_eps(self,eps):
# Use the permittivity in z-direction. Does not really matter since this is just used for the initial guess and is (usually) heavily smoothed
return (eps - self.eps_min) / (self.eps_max-self.eps_min)
def set_params_from_eps(self,eps):
# Use the permittivity in z-direction. Does not really matter since this is just used for the initial guess and is (usually) heavily smoothed
self.last_params = self.calc_params_from_eps(eps)
def extract_parameters_from_simulation(self, sim):
sim.fdtd.selectpartial('import')
sim.fdtd.eval('set("enabled",0);')
sim.fdtd.selectpartial('initial_guess')
sim.fdtd.eval('set("enabled",1);')
eps = get_eps_from_sim(sim.fdtd, unfold_symmetry=False)
sim.fdtd.selectpartial('initial_guess')
sim.fdtd.eval('set("enabled",0);')
sim.fdtd.selectpartial('import')
sim.fdtd.eval('set("enabled",1);')
reduced_eps = np.real(eps[0])
self.set_params_from_eps(reduced_eps)
def get_eps_from_params(self, sim, params):
rho = np.reshape(params, (len(self.x),len(self.y)))
self.last_params = rho
## Use script function to convert the raw parameters to a permittivity distribution and get the result
sim.fdtd.putv("topo_rho", rho)
sim.fdtd.eval(('params = struct;'
'params.eps_levels=[{0},{1}];'
'params.filter_radius = {2};'
'params.beta = {3};'
'params.eta = {4};'
'params.dx = {5};'
'params.dy = {6};'
'params.dz = 0.0;'
'eps_geo = topoparamstoindex(params,topo_rho);').format(self.eps_min,self.eps_max,self.filter_R,self.beta,self.eta,self.dx,self.dy) )
eps = sim.fdtd.getv("eps_geo")
return eps
def initialize(self, wavelengths, opt):
self.opt=opt
pass
def update_geometry(self, params, sim):
self.eps = self.get_eps_from_params(sim, params)
self.discreteness = self.calc_discreteness()
def get_current_params_inshape(self):
return self.last_params
def get_current_params(self):
params = self.get_current_params_inshape()
return np.reshape(params,(-1)) if params is not None else None
def plot(self,ax_eps):
ax_eps.clear()
x = self.x
y = self.y
eps = self.eps
ax_eps.imshow(np.real(np.transpose(eps)), vmin=self.eps_min, vmax=self.eps_max, extent=[min(x)*1e6,max(x)*1e6,min(y)*1e6,max(y)*1e6], origin='lower')
ax_eps.set_title('Eps')
ax_eps.set_xlabel('x(um)')
ax_eps.set_ylabel('y(um)')
return True
def write_status(self, f):
f.write(', {:.4f}, {:.4f}'.format(self.beta, self.discreteness))
class TopologyOptimization2D(TopologyOptimization2DParameters):
'''
'''
self_update = False
def __init__(self, params, eps_min, eps_max, x, y, z=0, filter_R=200e-9, eta=0.5, beta=1):
super().__init__(params, eps_min, eps_max, x, y, z, filter_R, eta, beta)
@classmethod
def from_file(cls, filename, z=0, filter_R=200e-9, eta=0.5, beta = None):
data = np.load(filename)
if beta is None:
beta = data["beta"]
return cls(data["params"], data["eps_min"], data["eps_max"], data["x"], data["y"], z = z, filter_R = filter_R, eta=eta, beta=beta)
def set_params_from_eps(self,eps):
# Use the permittivity in z-direction. Does not really matter since this is just used for the initial guess and is (usually) heavily smoothed
super().set_params_from_eps(eps[:,:,0,0,2])
def calculate_gradients_on_cad(self, sim, forward_fields, adjoint_fields, wl_scaling_factor):
lumapi.putMatrix(sim.fdtd.handle, "wl_scaling_factor", wl_scaling_factor)
sim.fdtd.eval("V_cell = {};".format(self.dx*self.dy) +
"dF_dEps = pinch(sum(2.0 * V_cell * eps0 * {0}.E.E * {1}.E.E,5),3);".format(forward_fields, adjoint_fields) +
"num_wl_pts = length({0}.E.lambda);".format(forward_fields) +
"for(wl_idx = [1:num_wl_pts]){" +
" dF_dEps(:,:,wl_idx) = dF_dEps(:,:,wl_idx) * wl_scaling_factor(wl_idx);" +
"}" +
"dF_dEps = real(dF_dEps);")
rho = self.get_current_params_inshape()
sim.fdtd.putv("topo_rho", rho)
sim.fdtd.eval(('params = struct;'
'params.eps_levels=[{0},{1}];'
'params.filter_radius = {2};'
'params.beta = {3};'
'params.eta = {4};'
'params.dx = {5};'
'params.dy = {6};'
'params.dz = 0.0;'
'topo_grad = topoparamstogradient(params,topo_rho,dF_dEps);').format(self.eps_min,self.eps_max,self.filter_R,self.beta,self.eta,self.dx,self.dy) )
topo_grad = sim.fdtd.getv("topo_grad")
return topo_grad.reshape(-1, topo_grad.shape[-1])
def calculate_gradients(self, gradient_fields, sim):
rho = self.get_current_params_inshape()
# If we have frequency data (3rd dim), we need to adjust the dimensions of epsilon for broadcasting to work
E_forward_dot_E_adjoint = np.atleast_3d(np.real(np.squeeze(np.sum(gradient_fields.get_field_product_E_forward_adjoint(),axis=-1))))
dF_dEps = 2*self.dx*self.dy*eps0*E_forward_dot_E_adjoint
sim.fdtd.putv("topo_rho", rho)
sim.fdtd.putv("dF_dEps", dF_dEps)
sim.fdtd.eval(('params = struct;'
'params.eps_levels=[{0},{1}];'
'params.filter_radius = {2};'
'params.beta = {3};'
'params.eta = {4};'
'params.dx = {5};'
'params.dy = {6};'
'params.dz = 0.0;'
'topo_grad = topoparamstogradient(params,topo_rho,dF_dEps);').format(self.eps_min,self.eps_max,self.filter_R,self.beta,self.eta,self.dx,self.dy) )
topo_grad = sim.fdtd.getv("topo_grad")
return topo_grad.reshape(-1, topo_grad.shape[-1])
def add_geo(self, sim, params=None, only_update = False):
fdtd=sim.fdtd
eps = self.eps if params is None else self.get_eps_from_params(sim, params.reshape(-1))
fdtd.putv('x_geo',self.x)
fdtd.putv('y_geo',self.y)
fdtd.putv('z_geo',np.array([self.z-self.depth/2,self.z+self.depth/2]))
if not only_update:
set_spatial_interp(sim.fdtd,'opt_fields','specified position')
set_spatial_interp(sim.fdtd,'opt_fields_index','specified position')
script=('select("opt_fields");'
'set("x min",{});'
'set("x max",{});'
'set("y min",{});'
'set("y max",{});').format( | np.amin(self.x) | numpy.amin |
import numpy as np
import scipy as sp
from scipy.sparse import linalg
import copy
import moments.Spectrum_mod
from . import Numerics
import Jackknife as jk
import LinearSystem_1D as ls1
import LinearSystem_2D as ls2
from . import Reversible
#------------------------------------------------------------------------------
# Functions for the computation of the Phi-moments for multidimensional models:
# we integrate the ode system on the Phi_n(i) to compute their evolution
# we write it (and solve it) as an approximated linear system:
# Phi_n' = Bn(N) + (1/(4N)Dn + S1n + S2n)Phi_n
# where :
# N is the total population size
# Bn(N) is the mutation source term
# 1/(4N)Dn is the drift effect matrix
# S1n is the selection matrix for h = 0.5
# S2n is the effect of h != 0.5
#------------------------------------------------------------------------------
#-----------------------------------
# functions to compute the matrices-
#-----------------------------------
# Mutations
def _calcB(dims, u):
# u is a list of mutation rates in each population
# allows for different mutation rates in different pops
B = np.zeros(dims)
for k in range(len(dims)):
ind = np.zeros(len(dims), dtype='int')
ind[k] = int(1)
tp = tuple(ind)
B[tp] = (dims[k] - 1) * u[k]
return B
# Finite genome mutation model
def _calcB_FB(dims, theta_fd, theta_bd):
"""
dims : List containing the pop sizes
u: scalar forward mutation rate
v: scalar backward mutation rate
Returns mutation matrix for finite genome model
"""
if len(dims) == 1:
return ls1.calcB_FB(dims[0], theta_fd, theta_bd)
elif len(dims) == 2: # return list of mutation matrices
return [ls2.calcB_FB1(dims, theta_fd, theta_bd), ls2.calcB_FB2(dims, theta_fd, theta_bd)]
elif len(dims) == 3:
return Reversible.calc_FB_3pop(dims, theta_fd, theta_bd)
elif len(dims) == 4:
return Reversible.calc_FB_4pop(dims, theta_fd, theta_bd)
elif len(dims) == 5:
return Reversible.calc_FB_5pop(dims, theta_fd, theta_bd)
# Drift
def _calcD(dims):
"""
dims : List containing the pop sizes
Returns a list of drift matrices for each pair of pops
"""
res = []
for i in range(len(dims)):
for j in range(i + 1, len(dims)):
res.append([ls2.calcD1(np.array([dims[i], dims[j]])),
ls2.calcD2(np.array([dims[i], dims[j]]))])
return res
def _buildD(vd, dims, N):
"""
Builds the effective drift matrices by multiplying by the 1/4N coeff
vd : List containing the drift matrices
dims : List containing the pop sizes
N : List containing the effective pop sizes for each pop
Returns a list of effective drift matrices for each pair of pops
"""
if (len(dims) == 1): return [1.0 / 4 / N[0] * vd[0][0]]
res = []
ctr = 0
for i in range(len(dims)):
for j in range(i + 1, len(dims)):
res.append(1.0/(4*N[i])*vd[ctr][0] + 1.0/(4*N[j])*vd[ctr][1])
ctr += 1
return res
# Selection 1
def _calcS(dims, ljk):
"""
dims : List containing the pop sizes
ljk : List containing the 1 jump jackknife matrices for each pair of pop
Returns a list of selection matrices for each pair of pops
"""
res = []
for i in range(len(dims)):
for j in range(i + 1, len(dims)):
res.append([ls2.calcS_1(np.array([dims[i], dims[j]]), ljk[i]),
ls2.calcS_2(np.array([dims[i], dims[j]]), ljk[j])])
return res
def _buildS(vs, dims, s, h):
"""
Builds the effective selection matrices by multiplying by the correct coeff
vs : List containing the selection matrices
dims : List containing the pop sizes
s : List containing the selection coefficients
h : List containing the dominance coefficients
Returns a list of effective selection matrices for each pair of pops
"""
if (len(dims) == 1): return [vs[0][0]]
res = []
ctr = 0
for i in range(len(dims)):
for j in range(i + 1, len(dims)):
res.append(s[i]*h[i]*vs[ctr][0] + s[j]*h[j]*vs[ctr][1])
ctr += 1
return res
# Selection 2
def _calcS2(dims, ljk):
"""
dims : List containing the pop sizes
ljk : List containing the 2 jumps jackknife matrices for each pair of pop
Returns a list of selection matrices for each pair of pops
"""
res = []
for i in range(len(dims)):
for j in range(i + 1, len(dims)):
res.append([ls2.calcS2_1(np.array([dims[i], dims[j]]), ljk[i]),
ls2.calcS2_2(np.array([dims[i], dims[j]]), ljk[j])])
return res
def _buildS2(vs, dims, s, h):
"""
Builds the effective selection matrices (part due to dominance)
by multiplying by the correct coeff
vs : List containing the selection matrices
dims : List containing the pop sizes
s : List containing the selection coefficients
h : List containing the dominance coefficients
Returns a list of effective selection matrices for each pair of pops
"""
if (len(dims) == 1): return [vs[0][0]]
res = []
ctr = 0
for i in range(len(dims)):
for j in range(i + 1, len(dims)):
res.append(s[i]*(1-2.0*h[i])*vs[ctr][0] + s[j]*(1-2.0*h[j])*vs[ctr][1])
ctr += 1
return res
# Migrations
def _calcM(dims, ljk):
"""
dims : List containing the pop sizes
ljk : List containing the 1 jump jackknife matrices for each pair of pop
Returns a list of migration matrices for each pair of pops
"""
res = []
for i in range(len(dims)):
for j in range(i + 1, len(dims)):
res.append([ls2.calcM_1(np.array([dims[i], dims[j]]), ljk[j]),
ls2.calcM_2(np.array([dims[i], dims[j]]), ljk[i])])
return res
def _buildM(vm, dims, m):
"""
Builds the effective migration matrices by multiplying by the migration coeff
vm : List containing the migration matrices
dims : List containing the pop sizes
m : matrix containing the migration coefficients
Returns a list of effective migration matrices for each pair of pops
"""
res = []
ctr = 0
for i in range(len(dims)):
for j in range(i + 1, len(dims)):
res.append(m[i, j]*vm[ctr][0] + m[j, i]*vm[ctr][1])
ctr += 1
return res
#----------------------------------
# updates for the time integration-
#----------------------------------
# we solve a system like PX = QY
# step 1 functions correspond to the QY computation
# and step 2 to the resolution of PX = Y'
# 2D
#step 1
def _ud1_2pop_1(sfs, Q, dims):
sfs = Q[0].dot(sfs.reshape(dims[0] * dims[1])).reshape(dims)
return sfs
# step 2
def _ud2_2pop_1(sfs, slv, dims):
sfs = (slv[0](sfs.reshape(dims[0] * dims[1]))).reshape(dims)
return sfs
# for 3D, 4D and 5D cases, each couple of directions are coded separately to simplify the permutations...
#------------------------------
# 3D
# step 1
def _ud1_3pop_1(sfs, Q, dims):
for i in range(int(dims[2])):
sfs[:, :, i] = Q[0].dot(sfs[:, :, i].reshape(dims[0] * dims[1])).reshape(dims[0], dims[1])
return sfs
def _ud1_3pop_2(sfs, Q, dims):
for i in range(int(dims[1])):
sfs[:, i, :] = Q[1].dot(sfs[:, i, :].reshape(dims[0] * dims[2])).reshape(dims[0], dims[2])
return sfs
def _ud1_3pop_3(sfs, Q, dims):
for i in range(int(dims[0])):
sfs[i, :, :] = Q[2].dot(sfs[i, :, :].reshape(dims[1] * dims[2])).reshape(dims[1], dims[2])
return sfs
# step 2
def _ud2_3pop_1(sfs, slv, dims):
for i in range(int(dims[2])):
sfs[:, :, i] = slv[0](sfs[:, :, i].reshape(dims[0] * dims[1])).reshape(dims[0], dims[1])
return sfs
def _ud2_3pop_2(sfs, slv, dims):
for i in range(int(dims[1])):
sfs[:, i, :] = slv[1](sfs[:, i, :].reshape(dims[0] * dims[2])).reshape(dims[0], dims[2])
return sfs
def _ud2_3pop_3(sfs, slv, dims):
for i in range(int(dims[0])):
sfs[i, :, :] = slv[2](sfs[i, :, :].reshape(dims[1] * dims[2])).reshape(dims[1], dims[2])
return sfs
#------------------------------
# 4D
# step 1
def _ud1_4pop_1(sfs, Q, dims):
for i in range(int(dims[2])):
for j in range(int(dims[3])):
sfs[:, :, i, j] = Q[0].dot(sfs[:, :, i, j].reshape(dims[0] * dims[1])).reshape(dims[0], dims[1])
return sfs
def _ud1_4pop_2(sfs, Q, dims):
for i in range(int(dims[1])):
for j in range(int(dims[3])):
sfs[:, i, :, j] = Q[1].dot(sfs[:, i, :, j].reshape(dims[0] * dims[2])).reshape(dims[0], dims[2])
return sfs
def _ud1_4pop_3(sfs, Q, dims):
for i in range(int(dims[1])):
for j in range(int(dims[2])):
sfs[:, i, j, :] = Q[2].dot(sfs[:, i, j, :].reshape(dims[0] * dims[3])).reshape(dims[0], dims[3])
return sfs
def _ud1_4pop_4(sfs, Q, dims):
for i in range(int(dims[0])):
for j in range(int(dims[3])):
sfs[i, :, :, j] = Q[3].dot(sfs[i, :, :, j].reshape(dims[1] * dims[2])).reshape(dims[1], dims[2])
return sfs
def _ud1_4pop_5(sfs, Q, dims):
for i in range(int(dims[0])):
for j in range(int(dims[2])):
sfs[i, :, j, :] = Q[4].dot(sfs[i, :, j, :].reshape(dims[1] * dims[3])).reshape(dims[1], dims[3])
return sfs
def _ud1_4pop_6(sfs, Q, dims):
for i in range(int(dims[0])):
for j in range(int(dims[1])):
sfs[i, j, :, :] = Q[5].dot(sfs[i, j, :, :].reshape(dims[2] * dims[3])).reshape(dims[2], dims[3])
return sfs
# step 2
def _ud2_4pop_1(sfs, slv, dims):
for i in range(int(dims[2])):
for j in range(int(dims[3])):
sfs[:, :, i, j] = slv[0](sfs[:, :, i, j].reshape(dims[0] * dims[1])).reshape(dims[0], dims[1])
return sfs
def _ud2_4pop_2(sfs, slv, dims):
for i in range(int(dims[1])):
for j in range(int(dims[3])):
sfs[:, i, :, j] = slv[1](sfs[:, i, :, j].reshape(dims[0] * dims[2])).reshape(dims[0], dims[2])
return sfs
def _ud2_4pop_3(sfs, slv, dims):
for i in range(int(dims[1])):
for j in range(int(dims[2])):
sfs[:, i, j, :] = slv[2](sfs[:, i, j, :].reshape(dims[0] * dims[3])).reshape(dims[0], dims[3])
return sfs
def _ud2_4pop_4(sfs, slv, dims):
for i in range(int(dims[0])):
for j in range(int(dims[3])):
sfs[i, :, :, j] = slv[3](sfs[i, :, :, j].reshape(dims[1] * dims[2])).reshape(dims[1], dims[2])
return sfs
def _ud2_4pop_5(sfs, slv, dims):
for i in range(int(dims[0])):
for j in range(int(dims[2])):
sfs[i, :, j, :] = slv[4](sfs[i, :, j, :].reshape(dims[1] * dims[3])).reshape(dims[1], dims[3])
return sfs
def _ud2_4pop_6(sfs, slv, dims):
for i in range(int(dims[0])):
for j in range(int(dims[1])):
sfs[i, j, :, :] = slv[5](sfs[i, j, :, :].reshape(dims[2] * dims[3])).reshape(dims[2], dims[3])
return sfs
#------------------------------
# 5D
# step 1
def _ud1_5pop_1(sfs, Q, dims):
for i in range(int(dims[0])):
for j in range(int(dims[1])):
for k in range(int(dims[2])):
sfs[i, j, k, :, :] = Q[9].dot(sfs[i, j, k, :, :].reshape(dims[3] * dims[4])).reshape(dims[3], dims[4])
return sfs
def _ud1_5pop_2(sfs, Q, dims):
for i in range(int(dims[0])):
for j in range(int(dims[1])):
for k in range(int(dims[3])):
sfs[i, j, :, k, :] = Q[8].dot(sfs[i, j, :, k, :].reshape(dims[2] * dims[4])).reshape(dims[2], dims[4])
return sfs
def _ud1_5pop_3(sfs, Q, dims):
for i in range(int(dims[0])):
for j in range(int(dims[1])):
for k in range(int(dims[4])):
sfs[i, j, :, :, k] = Q[7].dot(sfs[i, j, :, :, k].reshape(dims[2] * dims[3])).reshape(dims[2], dims[3])
return sfs
def _ud1_5pop_4(sfs, Q, dims):
for i in range(int(dims[0])):
for j in range(int(dims[2])):
for k in range(int(dims[3])):
sfs[i, :, j, k, :] = Q[6].dot(sfs[i, :, j, k, :].reshape(dims[1] * dims[4])).reshape(dims[1], dims[4])
return sfs
def _ud1_5pop_5(sfs, Q, dims):
for i in range(int(dims[0])):
for j in range(int(dims[2])):
for k in range(int(dims[4])):
sfs[i, :, j, :, k] = Q[5].dot(sfs[i, :, j, :, k].reshape(dims[1] * dims[3])).reshape(dims[1], dims[3])
return sfs
def _ud1_5pop_6(sfs, Q, dims):
for i in range(int(dims[0])):
for j in range(int(dims[3])):
for k in range(int(dims[4])):
sfs[i, :, :, j, k] = Q[4].dot(sfs[i, :, :, j, k].reshape(dims[1] * dims[2])).reshape(dims[1], dims[2])
return sfs
def _ud1_5pop_7(sfs, Q, dims):
for i in range(int(dims[1])):
for j in range(int(dims[2])):
for k in range(int(dims[3])):
sfs[:, i, j, k, :] = Q[3].dot(sfs[:, i, j, k, :].reshape(dims[0] * dims[4])).reshape(dims[0], dims[4])
return sfs
def _ud1_5pop_8(sfs, Q, dims):
for i in range(int(dims[1])):
for j in range(int(dims[2])):
for k in range(int(dims[4])):
sfs[:, i, j, :, k] = Q[2].dot(sfs[:, i, j, :, k].reshape(dims[0] * dims[3])).reshape(dims[0], dims[3])
return sfs
def _ud1_5pop_9(sfs, Q, dims):
for i in range(int(dims[1])):
for j in range(int(dims[3])):
for k in range(int(dims[4])):
sfs[:, i, :, j, k] = Q[1].dot(sfs[:, i, :, j, k].reshape(dims[0] * dims[2])).reshape(dims[0], dims[2])
return sfs
def _ud1_5pop_10(sfs, Q, dims):
for i in range(int(dims[2])):
for j in range(int(dims[3])):
for k in range(int(dims[4])):
sfs[:, :, i, j, k] = Q[0].dot(sfs[:, :, i, j, k].reshape(dims[0] * dims[1])).reshape(dims[0], dims[1])
return sfs
# step 2
def _ud2_5pop_1(sfs, slv, dims):
for i in range(int(dims[0])):
for j in range(int(dims[1])):
for k in range(int(dims[2])):
sfs[i, j, k, :, :] = slv[9](sfs[i, j, k, :, :].reshape(dims[3] * dims[4])).reshape(dims[3], dims[4])
return sfs
def _ud2_5pop_2(sfs, slv, dims):
for i in range(int(dims[0])):
for j in range(int(dims[1])):
for k in range(int(dims[3])):
sfs[i, j, :, k, :] = slv[8](sfs[i, j, :, k, :].reshape(dims[2] * dims[4])).reshape(dims[2], dims[4])
return sfs
def _ud2_5pop_3(sfs, slv, dims):
for i in range(int(dims[0])):
for j in range(int(dims[1])):
for k in range(int(dims[4])):
sfs[i, j, :, :, k] = slv[7](sfs[i, j, :, :, k].reshape(dims[2] * dims[3])).reshape(dims[2], dims[3])
return sfs
def _ud2_5pop_4(sfs, slv, dims):
for i in range(int(dims[0])):
for j in range(int(dims[2])):
for k in range(int(dims[3])):
sfs[i, :, j, k, :] = slv[6](sfs[i, :, j, k, :].reshape(dims[1] * dims[4])).reshape(dims[1], dims[4])
return sfs
def _ud2_5pop_5(sfs, slv, dims):
for i in range(int(dims[0])):
for j in range(int(dims[2])):
for k in range(int(dims[4])):
sfs[i, :, j, :, k] = slv[5](sfs[i, :, j, :, k].reshape(dims[1] * dims[3])).reshape(dims[1], dims[3])
return sfs
def _ud2_5pop_6(sfs, slv, dims):
for i in range(int(dims[0])):
for j in range(int(dims[3])):
for k in range(int(dims[4])):
sfs[i, :, :, j, k] = slv[4](sfs[i, :, :, j, k].reshape(dims[1] * dims[2])).reshape(dims[1], dims[2])
return sfs
def _ud2_5pop_7(sfs, slv, dims):
for i in range(int(dims[1])):
for j in range(int(dims[2])):
for k in range(int(dims[3])):
sfs[:, i, j, k, :] = slv[3](sfs[:, i, j, k, :].reshape(dims[0] * dims[4])).reshape(dims[0], dims[4])
return sfs
def _ud2_5pop_8(sfs, slv, dims):
for i in range(int(dims[1])):
for j in range(int(dims[2])):
for k in range(int(dims[4])):
sfs[:, i, j, :, k] = slv[2](sfs[:, i, j, :, k].reshape(dims[0] * dims[3])).reshape(dims[0], dims[3])
return sfs
def _ud2_5pop_9(sfs, slv, dims):
for i in range(int(dims[1])):
for j in range(int(dims[3])):
for k in range(int(dims[4])):
sfs[:, i, :, j, k] = slv[1](sfs[:, i, :, j, k].reshape(dims[0] * dims[2])).reshape(dims[0], dims[2])
return sfs
def _ud2_5pop_10(sfs, slv, dims):
for i in range(int(dims[2])):
for j in range(int(dims[3])):
for k in range(int(dims[4])):
sfs[:, :, i, j, k] = slv[0](sfs[:, :, i, j, k].reshape(dims[0] * dims[1])).reshape(dims[0], dims[1])
return sfs
# update nD with permutations
def _update_step1(sfs, Q, dims, order):
assert(len(sfs.shape) == len(dims))
assert(len(Q) == len(dims) * (len(dims)-1) / 2)
for i in order:
sfs = eval('_ud1_' + str(len(dims)) + 'pop_' + str(i + 1) + '(sfs, Q, dims)')
return sfs
def _update_step2(sfs, slv, dims, order):
assert(len(sfs.shape) == len(dims))
assert(len(slv) == len(dims) * (len(dims)-1) / 2)
for i in order:
sfs = eval('_ud2_' + str(len(dims)) + 'pop_' + str(i + 1) + '(sfs, slv, dims)')
return sfs
def _permute(tab):
res = tab[1:]
res.append(tab[0])
return res
# timestep computation
def compute_dt(sample_sizes, N, s, h, m, dt_default, factor=10.0):
sample_sizes = np.array(sample_sizes, dtype=np.float64)
N = np.amin(np.array(N)/sample_sizes)
sel1 = np.amax(abs(np.array(s) * np.array(h) * sample_sizes))
sel2 = np.amax(abs(np.array(s) * (1-2.0*np.array(h)) * sample_sizes))
mig = np.amax(m) * np.amax(sample_sizes)
eps = 10e-16 # to avoid division by zero
return min(dt_default, factor * min(2*N, 1.0 / (mig+eps),
1.0 / (sel1+eps), 1.0 / (sel2+eps)))
def compute_dt_bis(N, T, drift, selmig, dims):
if callable(N):
Nmin = N(0)#np.amin(N(0), N(T))
else:
Nmin = N
nbp = int(len(dims) * (len(dims)-1) / 2)
D = _buildD(drift, dims, Nmin)
Mat = [(D[i] + selmig[i]).todense() for i in range(nbp)]
ev = [np.linalg.eigvals(Mat[i]) for i in range(nbp)]
return 0
def _compute_dt_1pop(N, m, s, h, timescale_factor=0.15):
maxVM = max(0.25/N, max(m),\
abs(s) * 2*max(np.abs(h + (1-2*h)*0.5) * 0.5*(1-0.5),
np.abs(h + (1-2*h)*0.25) * 0.25*(1-0.25)))
if maxVM > 0:
dt = timescale_factor / maxVM
else:
dt = np.inf
if dt == 0:
raise ValueError('Timestep is zero. Values passed in are N=%f, m=%s,'
's=%f, h=%f.' % (N, str(m), s, h))
return dt
def compute_dt(N, m=None, s=None, h=None, timescale_factor=0.1):
#def compute_dt(N, m=None, s=None, h=None, timescale_factor=0.05):
if m is None:
m = np.zeros([len(N), len(N)])
if s is None:
s = np.zeros(len(N))
if h is None:
h = 0.5*np.ones(len(N))
timesteps = [_compute_dt_1pop(N[i], m[i, :], s[i], h[i], timescale_factor) for i in range(len(N))]
return min(timesteps)
def integrate_nD(sfs0, Npop, tf, dt_fac=0.1, gamma=None, h=None, m=None, theta=1.0, adapt_dt=False,
finite_genome=False, theta_fd=None, theta_bd=None, frozen=[False]):
"""
N : total population size (vector N = (N1,...,Np))
tf : final simulation time (/2N1 generations)
gamma : selection coefficients (vector gamma = (gamma1,...,gammap))
theta : mutation rate
h : allele dominance (vector h = (h1,...,hp))
m : migration rates matrix (2D array, m[i,j] is the migration rate from pop j to pop i, normalized by 1/4N1)
finite_genome : whether to integrate under the finite genome model
if we integrate under finite genome model, theta_fd and theta_bd should be given
for a "lambda" definition of N - with backward Euler integration scheme
where t is the relative time in generations such as t = 0 initially
Npop is a lambda function of the time t returning the vector N = (N1,...,Np) or directly the vector if N does not evolve in time
"""
sfs0 = np.array(sfs0)
n = np.array(sfs0.shape)-1
# neutral case if the parameters are not provided
if gamma is None: gamma = np.zeros(len(n))
if h is None: h = 0.5 * np.ones(len(n))
if m is None: m = np.zeros([len(n), len(n)])
s = np.array(gamma)
h = np.array(h)
Tmax = tf * 2.0
dt = Tmax * dt_fac
# dimensions of the sfs
dims = np.array(n + np.ones(len(n)), dtype=int)
d = int(np.prod(dims))
# if theta is single value, mutation rate is same in each population
if finite_genome == False:
if hasattr(theta, "__len__"):
u = np.array(theta) / 4.0
else:
u = np.array([theta / 4.0] * len(dims))
else:
if hasattr(theta_fd, "__len__"):
u = | np.array(theta_fd) | numpy.array |
"""
Utility functions.
Author: <NAME>, agilescientific.com
Licence: Apache 2.0
Copyright 2022 Agile Scientific
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from .feature import is_standardized
def is_numeric(a):
"""
Decide if a sequence is numeric.
Args:
a (array): A sequence.
Returns:
bool: True if a is numeric.
Example:
>>> is_numeric([1, 2, 3])
True
>>> is_numeric(['a', 'b', 'c'])
False
"""
a = np.asarray(a)
return np.issubdtype(a.dtype, np.number)
def generate_data(counts):
"""
Generate data from a list of counts.
Args:
counts (array): A sequence of class counts.
Returns:
array: A sequence of classes matching the counts.
Example:
>>> generate_data([3, 5])
[0, 0, 0, 1, 1, 1, 1, 1]
"""
data = [c * [i] for i, c in enumerate(counts)]
return [item for sublist in data for item in sublist]
def sorted_unique(a):
"""
Unique items in appearance order.
`np.unique` is sorted, `set()` is unordered, `pd.unique()` is fast, but we
don't have to rely on it. This does the job, and is not too slow.
Args:
a (array): A sequence.
Returns:
array: The unique items, in order of first appearance.
Example:
>>> sorted_unique([3, 0, 0, 1, 3, 2, 3])
array([3, 0, 1, 2])
"""
a = np.asarray(a)
_, idx = np.unique(a, return_index=True)
return a[ | np.sort(idx) | numpy.sort |
import numpy as np
import tensorflow as tf
from functools import partial
import cv2 as cv
import random
def gaussian_noise(img_set, mean=0, var=0.001):
ret = | np.empty(img_set.shape) | numpy.empty |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.