prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
# Copyright 2021-2022 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import cunumeric as num
def test():
# np.random.seed(42)
# anp = np.array([1, 54, 4 , 4, 0, 45, 5, 58, 0, 9, 0, 4, 0, 0, 0, 5, 0])
# a = num.array(anp)
# assert(num.array_equal(np.where(anp), num.where(a)))
# cnp = np.array([1, 54, 4 , 4, 0, 45, 5, 58, 0, 9, 0, 4, 0, 0, 0, 5, 0, 1]).reshape((6,3)) # noqa E501
# c = num.array(cnp)
# bnp = np.random.randn(6,3)
# b = num.array(bnp)
# assert(num.array_equal(num.extract(c, b), np.extract(cnp, bnp)))
anp = np.array([[True, False], [True, True]])
xnp = np.array([[1, 2], [3, 4]])
ynp = np.array([[9, 8], [7, 6]])
a = num.array(anp)
x = num.array(xnp)
y = num.array(ynp)
assert np.array_equal(np.where(anp, xnp, ynp), num.where(a, x, y))
anp = np.array([True, False])
xnp = np.array([[1, 2], [3, 4]])
ynp = np.array([[9, 8], [7, 6]])
a = num.array(anp)
x = num.array(xnp)
y = num.array(ynp)
assert np.array_equal(np.where(anp, xnp, ynp), num.where(a, x, y))
anp = | np.array([[True, False]]) | numpy.array |
import logging
log = logging.getLogger(__name__)
import itertools
import numpy as np
from copy import deepcopy
import pycqed.measurement.waveform_control.sequence as sequence
from pycqed.utilities.general import add_suffix_to_dict_keys
import pycqed.measurement.randomized_benchmarking.randomized_benchmarking as rb
import pycqed.measurement.randomized_benchmarking.two_qubit_clifford_group as tqc
from pycqed.measurement.pulse_sequences.single_qubit_tek_seq_elts import \
get_pulse_dict_from_pars, add_preparation_pulses, pulse_list_list_seq, \
prepend_pulses, add_suffix, sweep_pulse_params
from pycqed.measurement.gate_set_tomography.gate_set_tomography import \
create_experiment_list_pyGSTi_qudev as get_exp_list
from pycqed.measurement.waveform_control import pulsar as ps
import pycqed.measurement.waveform_control.segment as segment
from pycqed.analysis_v2 import tomography_qudev as tomo
station = None
kernel_dir = 'kernels/'
# You need to explicitly set this before running any functions from this module
# I guess there are cleaner solutions :)
cached_kernels = {}
def n_qubit_off_on(pulse_pars_list, RO_pars_list, return_seq=False,
parallel_pulses=False, preselection=False, upload=True,
RO_spacing=2000e-9):
n = len(pulse_pars_list)
seq_name = '{}_qubit_OffOn_sequence'.format(n)
seq = sequence.Sequence(seq_name)
seg_list = []
RO_pars_list_presel = deepcopy(RO_pars_list)
for i, RO_pars in enumerate(RO_pars_list):
RO_pars['name'] = 'RO_{}'.format(i)
RO_pars['element_name'] = 'RO'
if i != 0:
RO_pars['ref_point'] = 'start'
for i, RO_pars_presel in enumerate(RO_pars_list_presel):
RO_pars_presel['ref_pulse'] = RO_pars_list[-1]['name']
RO_pars_presel['ref_point'] = 'start'
RO_pars_presel['element_name'] = 'RO_presel'
RO_pars_presel['pulse_delay'] = -RO_spacing
# Create a dict with the parameters for all the pulses
pulse_dict = dict()
for i, pulse_pars in enumerate(pulse_pars_list):
pars = pulse_pars.copy()
if i == 0 and parallel_pulses:
pars['ref_pulse'] = 'segment_start'
if i != 0 and parallel_pulses:
pars['ref_point'] = 'start'
pulses = add_suffix_to_dict_keys(
get_pulse_dict_from_pars(pars), ' {}'.format(i))
pulse_dict.update(pulses)
# Create a list of required pulses
pulse_combinations = []
for pulse_list in itertools.product(*(n*[['I', 'X180']])):
pulse_comb = (n)*['']
for i, pulse in enumerate(pulse_list):
pulse_comb[i] = pulse + ' {}'.format(i)
pulse_combinations.append(pulse_comb)
for i, pulse_comb in enumerate(pulse_combinations):
pulses = []
for j, p in enumerate(pulse_comb):
pulses += [pulse_dict[p]]
pulses += RO_pars_list
if preselection:
pulses = pulses + RO_pars_list_presel
seg = segment.Segment('segment_{}'.format(i), pulses)
seg_list.append(seg)
seq.add(seg)
repeat_dict = {}
repeat_pattern = ((1.0 + int(preselection))*len(pulse_combinations),1)
for i, RO_pars in enumerate(RO_pars_list):
repeat_dict = seq.repeat(RO_pars, None, repeat_pattern)
if upload:
ps.Pulsar.get_instance().program_awgs(seq)
if return_seq:
return seq, seg_list
else:
return seq_name
def two_qubit_randomized_benchmarking_seqs(
qb1n, qb2n, operation_dict, cliffords, nr_seeds=None,
max_clifford_idx=11520, cz_pulse_name=None, cal_points=None,
net_clifford=0, clifford_decomposition_name='HZ',
cl_sequence=None, sampling_seeds=None,
interleaved_gate=None, upload=True, prep_params=dict()):
"""
Args
qb1n (str): name of qb1
qb2n (str): name of qb2
operation_dict (dict): dict with all operations from both qubits and
with the multiplexed RO pulse pars
cliffords (array): array of ints specifying the number of random
Cliffords to generate in each sequence
nr_seeds (array): array of the form np.arange(nr_seeds_value)
max_clifford_idx (int): specifies up to which index of the elements in
the two-qubit Clifford group to include in the random generation.
See measurement/randomized_benchmarking/two_qubit_clifford_group.py.
CZ_pulse_name (str): pycqed name of the CZ pulse
cal_points (CalibrationPoints): instance of CalibrationPoints
net_clifford (int): 0 or 1; whether the recovery Clifford returns
qubits to ground statea (0) or puts them in the excited states (1)
clifford_decomp_name (str): the decomposition of Clifford gates
into primitives; can be "XY", "HZ", or "5Primitives"
cl_sequence (list): the Clifford sequence to use for all seeds. Can
also be lists of lists in which case the user must ensure that
len(nr seeds) % len(cl_sequence) == 0.
sampling_seeds (array of ints): ints that will be used as seeds for
the random generation of Cliffords. Should have the same length
as nr_seeds.
interleaved_gate (str): pycqed name for a gate
upload (bool): whether to upload sequence to AWGs
prep_params (dict): qubit preparation_params dict
"""
# This is used for checking that the recovery is correct
import qutip as qtp
standard_pulses = {
'I': qtp.qeye(2),
'Z0': qtp.qeye(2),
'X180': qtp.sigmax(),
'mX180': qtp.sigmax(),
'Y180': qtp.sigmay(),
'mY180': qtp.sigmay(),
'X90': qtp.rotation(qtp.sigmax(), np.pi / 2),
'mX90': qtp.rotation(qtp.sigmax(), -np.pi / 2),
'Y90': qtp.rotation(qtp.sigmay(), np.pi / 2),
'mY90': qtp.rotation(qtp.sigmay(), -np.pi / 2),
'Z90': qtp.rotation(qtp.sigmaz(), np.pi / 2),
'mZ90': qtp.rotation(qtp.sigmaz(), -np.pi / 2),
'Z180': qtp.sigmaz(),
'mZ180': qtp.sigmaz(),
'CZ': qtp.cphase(np.pi)
}
seq_name = '2Qb_RB_sequence'
if sampling_seeds is None:
if nr_seeds is None:
raise ValueError('Please provide either "sampling_seeds" or '
'"nr_seeds."')
sampling_seeds = [None] * len(nr_seeds)
else:
nr_seeds = np.arange(len(sampling_seeds))
# Set Clifford decomposition
tqc.gate_decomposition = rb.get_clifford_decomposition(
clifford_decomposition_name)
if cl_sequence is not None:
if isinstance(cl_sequence[0], list):
# if cl_sequence is a list of lists such that
# len(nr_seeds) != len(cl_sequence) but
# len(nr_seeds) % len(cl_sequence) == 0,
# then create as many copies of the lists in cl_sequence until
# len(cl_sequence) == len(nr_seeds).
assert len(nr_seeds) % len(cl_sequence) == 0
k = len(nr_seeds) // len(cl_sequence)
cl_seq_temp = k * cl_sequence
sequences = []
for nCl in cliffords:
pulse_list_list_all = []
for s in nr_seeds:
if cl_sequence is None:
cl_seq = rb.randomized_benchmarking_sequence_new(
nCl,
number_of_qubits=2,
max_clifford_idx=max_clifford_idx,
interleaving_cl=interleaved_gate,
desired_net_cl=net_clifford,
seed=sampling_seeds[s])
elif isinstance(cl_sequence[0], list):
cl_seq = cl_seq_temp[s]
else:
cl_seq = cl_sequence
pulse_list = []
pulsed_qubits = {qb1n, qb2n}
pulse_tuples_list_all = []
for idx in cl_seq:
pulse_tuples_list = tqc.TwoQubitClifford(idx).gate_decomposition
pulse_tuples_list_all += pulse_tuples_list
for j, pulse_tuple in enumerate(pulse_tuples_list):
if isinstance(pulse_tuple[1], list):
pulse_list += [operation_dict[cz_pulse_name]]
pulsed_qubits = {qb1n, qb2n}
else:
qb_name = qb1n if '0' in pulse_tuple[1] else qb2n
pulse_name = pulse_tuple[0]
if 'Z' not in pulse_name:
if qb_name not in pulsed_qubits:
pulse_name += 's'
else:
pulsed_qubits = set()
pulsed_qubits |= {qb_name}
pulse_list += [
operation_dict[pulse_name + ' ' + qb_name]]
# check recovery
gproduct = qtp.tensor(qtp.identity(2), qtp.identity(2))
for i, cl_tup in enumerate(pulse_tuples_list_all):
if cl_tup[0] == 'CZ':
gproduct = standard_pulses[cl_tup[0]] * gproduct
else:
eye_2qb = [qtp.identity(2), qtp.identity(2)]
eye_2qb[int(cl_tup[1][-1])] = standard_pulses[cl_tup[0]]
gproduct = qtp.tensor(eye_2qb) * gproduct
x = gproduct.full() / gproduct.full()[0][0]
assert (np.all((np.allclose(np.real(x), | np.eye(4) | numpy.eye |
import numpy as np
from scipy import optimize
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
class Constants:
"""
Physical Constants
"""
R = 8.314472 # [J/mol.K]
T0K = 273.15 # [K]
@staticmethod
def TK(TC):
"""
Temperature Conversion from Celcius to Kelvin
"""
return TC + Constants.T0K
@staticmethod
def TC(TK):
"""
Temperature Conversion from Kelvin to Celcius
"""
return TK - Constants.T0K
class Conditions:
"""
Standard Conditions
"""
p0 = 1.01325e5 # [Pa]
T0 = 0 + Constants.T0K # [K]
class Water:
"""
Water Physical/Chemical Description
"""
M = 18.0153e-3 # [Kg/mol]
Tvap = 99.94 + Constants.T0K # [K]
cp = 1.826e3 # [J/kg.K]
Hv = 40.662e3 # [J/mol]
lv = Hv/M # [J/Kg]
class Air:
"""
Dry Air Physical/Chemical Description
"""
M = 28.6953e-3 # [Kg/mol]
cp = 1.006e3 # [J/Kg.K]
class Mix:
"""
Mix of Gas and Liquid
All quantities are expressed in Standard Units System
"""
C = Constants
CSTP = Conditions
gas = Air
liquid = Water
Mr = liquid.M/gas.M
@staticmethod
def psat(T):
"""
Saturation Pressure p_sat(T) [Pa]
as a Temperature T [K] function
"""
return Mix.CSTP.p0*np.exp(-Mix.liquid.Hv/Mix.C.R*(1/T - 1/Mix.liquid.Tvap))
@staticmethod
def xpw(pw):
"""
Vapour Mass Ratio x(p_w) [Kg Liquid/Kg Gas]
as a function of Liquid Partial Pressure p_w [Pa]
"""
return Mix.Mr*pw/(Mix.CSTP.p0-pw)
@staticmethod
def xw(T, phi):
"""
Vapour Mass Ratio x(p_w) [Kg Liquid/Kg Gas]
as a function of Liquid Partial Pressure p_w [Pa]
"""
return Mix.pisow(T, phi=phi)
@staticmethod
def pwx(x):
"""
Liquid Partial Pressure p_w(x) [Pa]
as a function of Vapour Mass Ratio x [Kg Liquid/Kg Gas]
"""
return Mix.CSTP.p0*x/(x + Mix.Mr)
@staticmethod
def pisow(T, phi=1.):
"""
Isopleth: Iso Relative Humidity (phi) Curve w(T)=k [-]
as a function of Temperature T [K]
Relative Humidity is defined as the ratio of Liquid Partial Pressure p_w [Pa]
and Saturation Pressure p_sat(T) [Pa]: w = p_w/p_sat(T)
"""
return phi*Mix.psat(T)
@staticmethod
def pisov(T, v):
"""
Isopleth (Isochoric): Iso Specific Volume Curve v(T)=k [m^3 Mix/Kg Gas]
as a function of Temperature T [K]
"""
return Mix.CSTP.p0 - (Mix.C.R*T)/(Mix.gas.M*v)
@staticmethod
def pisoh(T, h):
"""
Isopleth (Isenthalpic): Iso Specific Enthalpy Curve h(T)=k [J/Kg Gas]
as a function of Temperature T [K]
"""
dT = (T - Mix.CSTP.T0)
return Mix.CSTP.p0*(h - Mix.gas.cp*dT)/((h + Mix.Mr*Mix.liquid.lv) + (Mix.Mr*Mix.liquid.cp - Mix.gas.cp)*dT)
@staticmethod
def Tmin_score(f, k):
"""
Score function for then intersection of the k-isopleth of kind f and Saturation Curve p_sat(T)
as a function of Temperature T [K]
Score function is designed to determine Tmin [K] for Psychrometric Chart Display
"""
def inner(T):
return Mix.psat(T) - f(T, k)
return inner
@staticmethod
def Tmin(f, k, tol=5e-3):
"""
Solve score function to determine Tmin [K] for Psychrometric Chart Display
"""
return optimize.root(Mix.Tmin_score(f, k), 0.1, tol=tol)
@staticmethod
def Tmax(f, k, tol=5e-3):
"""
Find root of the k-isopleth of kind f to get Tmax [K] for Psychrometric Chart Display
"""
return optimize.root(lambda T: f(T, k), 0.1, tol=tol)
@staticmethod
def get_limits(f, konsts, Tmin, Tmax):
"""
Compute Temperature Boundaries for a given isopleth of a kind f of level k
"""
n = konsts.size
Ts = np.full((n, 2), np.nan)
for i, k in enumerate(konsts):
rmin = Mix.Tmin(f, k)
if rmin.success:
Ts[i, 0] = max(rmin.x[0], Tmin)
rmax = Mix.Tmax(f, k)
if rmax.success:
Ts[i, 1] = min(rmax.x[0], Tmax)
return Ts
@staticmethod
def domestic_ranges():
"""
Basic Ranges for Domestic Use
"""
return {
'Tmin': +0. + Constants.T0K, # [K]
'Tmax': +35. + Constants.T0K, # [K]
'isow': np.arange(0.1, 0.91, 0.1), # [-]
'isov': | np.arange(0.76, 0.95, 0.01) | numpy.arange |
import os
import numpy as np
import matplotlib.pyplot as plt
def scatterPlot(X_f,figHeight,figWidth,filename):
plt.figure(figsize=(figWidth,figHeight))
plt.scatter(X_f[:,0], X_f[:,1],s=0.5)
plt.xlabel('$x$',fontweight='bold',fontsize=14)
plt.ylabel('$y$',fontweight='bold',fontsize=14)
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.tight_layout()
plt.savefig(filename +'.pdf',dpi=700, facecolor='w', edgecolor='w',
transparent = 'true', bbox_inches = 'tight')
plt.show()
plt.close()
def genGrid(nPred,L,secBound):
xCrackDown = np.linspace(0.0, L, nPred[0,0], dtype = np.float32)
yCrackDown = np.linspace(secBound[0,0], secBound[0,1], nPred[0,1], dtype = np.float32)
xCD, yCD = np.meshgrid(xCrackDown, yCrackDown)
xCD = np.array([xCD.flatten()])
yCD = np.array([yCD.flatten()])
X_CD = np.concatenate((xCD.T, yCD.T), axis=1)
xCrack = np.linspace(0.0, L, nPred[1,0], dtype = np.float32)
yCrack = np.linspace(secBound[1,0], secBound[1,1], nPred[1,1], dtype = np.float32)
xC, yC = np.meshgrid(xCrack, yCrack)
xC = np.array([xC.flatten()])
yC = np.array([yC.flatten()])
X_C = np.concatenate((xC.T, yC.T), axis=1)
xCrackUp = np.linspace(0.0, L, nPred[2,0], dtype = np.float32)
yCrackUp = np.linspace(secBound[2,0], secBound[2,1], nPred[2,1], dtype = np.float32)
xCU, yCU = np.meshgrid(xCrackUp, yCrackUp)
xCU = np.array([xCU.flatten()])
yCU = np.array([yCU.flatten()])
X_CU = np.concatenate((xCU.T, yCU.T), axis=1)
Grid = np.concatenate((X_CD,X_C),axis=0)
Grid = np.concatenate((Grid,X_CU),axis=0)
xGrid = np.transpose(np.array([Grid[:,0]]))
yGrid = np.transpose(np.array([Grid[:,1]]))
totalPts = np.sum(nPred[:,0]*nPred[:,1])
hist = np.zeros((totalPts,1), dtype = np.float32)
return Grid, xGrid, yGrid, hist
def plotPhiStrainEnerg(nPred,xGrid,yGrid,phi_pred,frac_energy_pred,iStep,figHeight,figWidth):
# Removing the negative values of phi
index = np.where(phi_pred[:,0] < 0.0)
np.put(phi_pred, index[0], [0.0])
index = np.where(phi_pred[:,0] > 1.0)
np.put(phi_pred, index[0], [1.0])
phi_min = min(phi_pred)
phi_max = max(phi_pred)
frac_energy_min = min(frac_energy_pred)
frac_energy_max = max(frac_energy_pred)
# Plot results
oShapeX_CD = np.resize(xGrid[0 : nPred[0,0] * nPred[0,1], 0], [nPred[0,1], nPred[0,0]])
oShapeY_CD = np.resize(yGrid[0 : nPred[0,0] * nPred[0,1], 0], [nPred[0,1], nPred[0,0]])
phi_CD = np.resize(phi_pred[0 : nPred[0,0] * nPred[0,1], 0], [nPred[0,1], nPred[0,0]])
frac_energy_CD = np.resize(frac_energy_pred[0 : nPred[0,0] * nPred[0,1], 0], [nPred[0,1], nPred[0,0]])
oShapeX_C = np.resize(xGrid[nPred[0,0] * nPred[0,1] : (nPred[0,0]*nPred[0,1]) + (nPred[1,0]*nPred[1,1]), 0], [nPred[1,1], nPred[1,0]])
oShapeY_C = np.resize(yGrid[nPred[0,0] * nPred[0,1] : (nPred[0,0]*nPred[0,1]) + (nPred[1,0]*nPred[1,1]), 0], [nPred[1,1], nPred[1,0]])
phi_C = np.resize(phi_pred[nPred[0,0] * nPred[0,1] : (nPred[0,0]*nPred[0,1]) + (nPred[1,0]*nPred[1,1]), 0], [nPred[1,1], nPred[1,0]])
frac_energy_C = np.resize(frac_energy_pred[nPred[0,0] * nPred[0,1] : (nPred[0,0]*nPred[0,1]) + (nPred[1,0]*nPred[1,1]), 0], [nPred[1,1], nPred[1,0]])
oShapeX_CU = np.resize(xGrid[(nPred[0,0]*nPred[0,1]) + (nPred[1,0]*nPred[1,1]):, 0], [nPred[2,1], nPred[2,0]])
oShapeY_CU = np.resize(yGrid[(nPred[0,0]*nPred[0,1]) + (nPred[1,0]*nPred[1,1]):, 0], [nPred[2,1], nPred[2,0]])
phi_CU = np.resize(phi_pred[(nPred[0,0]*nPred[0,1]) + (nPred[1,0]*nPred[1,1]):, 0], [nPred[2,1], nPred[2,0]])
frac_energy_CU = np.resize(frac_energy_pred[(nPred[0,0]*nPred[0,1]) + (nPred[1,0]*nPred[1,1]):, 0], [nPred[2,1], nPred[2,0]])
# Plotting phi
filename = "Phi"
plt.figure(figsize=(figWidth, figHeight))
cbarlabels = np.linspace(0.0, 1.0, 255, endpoint=True)
cbarticks = np.linspace(0.0, 1.0, 15, endpoint=True)
plt.contourf(oShapeX_CD, oShapeY_CD, phi_CD, cbarlabels, vmin = phi_min, vmax = phi_max, cmap=plt.cm.jet)
plt.contourf(oShapeX_C, oShapeY_C, phi_C, cbarlabels, vmin = phi_min, vmax = phi_max, cmap=plt.cm.jet)
plt.contourf(oShapeX_CU, oShapeY_CU, phi_CU, cbarlabels, vmin = phi_min, vmax = phi_max, cmap=plt.cm.jet)
cbar = plt.colorbar(ticks = cbarticks)
cbar.ax.tick_params(labelsize=14)
plt.xlabel('$x$',fontweight='bold',fontsize=14)
plt.ylabel('$y$',fontweight='bold',fontsize=14)
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.tight_layout()
plt.savefig(filename + str(iStep)+".png",dpi=700, facecolor='w', edgecolor='w',
transparent = 'true', bbox_inches = 'tight')
plt.show()
plt.close()
# Plotting the strain energy densities
filename = "fracEnergy"
plt.figure(figsize=(figWidth, figHeight))
plt.contourf(oShapeX_CD, oShapeY_CD, frac_energy_CD, 255, vmin = frac_energy_min, vmax = frac_energy_max, cmap=plt.cm.jet)
plt.contourf(oShapeX_C, oShapeY_C, frac_energy_C, 255, vmin = frac_energy_min, vmax = frac_energy_max, cmap=plt.cm.jet)
plt.contourf(oShapeX_CU, oShapeY_CU, frac_energy_CU, 255, vmin = frac_energy_min, vmax = frac_energy_max, cmap=plt.cm.jet)
cbar = plt.colorbar()
cbar.ax.tick_params(labelsize=14)
#plt.title("Fracture Energy Density for "+str(iStep)+" and with convergernce step " +str(nIter))
plt.xlabel('$x$',fontweight='bold',fontsize=14)
plt.ylabel('$y$',fontweight='bold',fontsize=14)
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.tight_layout()
plt.savefig(filename + str(iStep)+".png",dpi=700, facecolor='w', edgecolor='w',
transparent = 'true', bbox_inches = 'tight')
plt.show()
plt.close()
def plotDispStrainEnerg(nPred,xGrid,yGrid,u_pred,v_pred,elas_energy_pred,iStep,figHeight,figWidth):
# Magnification factors for plotting the deformed shape
x_fac = 50
y_fac = 50
v_min = min(v_pred)
v_max = max(v_pred)
elas_energy_min = min(elas_energy_pred)
elas_energy_max = max(elas_energy_pred)
# Compute the approximate displacements at plot points
oShapeX_CD = np.resize(xGrid[0 : nPred[0,0] * nPred[0,1], 0], [nPred[0,1], nPred[0,0]])
oShapeY_CD = np.resize(yGrid[0 : nPred[0,0] * nPred[0,1], 0], [nPred[0,1], nPred[0,0]])
surfaceUx_CD = np.resize(u_pred[0 : nPred[0,0] * nPred[0,1], 0], [nPred[0,1], nPred[0,0]])
surfaceUy_CD = np.resize(v_pred[0 : nPred[0,0] * nPred[0,1], 0], [nPred[0,1], nPred[0,0]])
defShapeX_CD = oShapeX_CD + surfaceUx_CD * x_fac
defShapeY_CD = oShapeY_CD + surfaceUy_CD * y_fac
elas_energy_CD = np.resize(elas_energy_pred[0 : nPred[0,0] * nPred[0,1], 0], [nPred[0,1], nPred[0,0]])
oShapeX_C = np.resize(xGrid[nPred[0,0] * nPred[0,1] : (nPred[0,0]*nPred[0,1]) + (nPred[1,0]*nPred[1,1]), 0], [nPred[1,1], nPred[1,0]])
oShapeY_C = np.resize(yGrid[nPred[0,0] * nPred[0,1] : (nPred[0,0]*nPred[0,1]) + (nPred[1,0]*nPred[1,1]), 0], [nPred[1,1], nPred[1,0]])
surfaceUx_C = np.resize(u_pred[nPred[0,0] * nPred[0,1] : (nPred[0,0]*nPred[0,1]) + (nPred[1,0]*nPred[1,1]), 0], [nPred[1,1], nPred[1,0]])
surfaceUy_C = np.resize(v_pred[nPred[0,0] * nPred[0,1] : (nPred[0,0]*nPred[0,1]) + (nPred[1,0]*nPred[1,1]), 0], [nPred[1,1], nPred[1,0]])
defShapeX_C = oShapeX_C + surfaceUx_C * x_fac
defShapeY_C = oShapeY_C + surfaceUy_C * y_fac
elas_energy_C = | np.resize(elas_energy_pred[nPred[0,0] * nPred[0,1] : (nPred[0,0]*nPred[0,1]) + (nPred[1,0]*nPred[1,1]), 0], [nPred[1,1], nPred[1,0]]) | numpy.resize |
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import fsolve
from numpy import cos, sin
from scipy.optimize import minimize
from scipy.optimize import Bounds
from scipy.optimize import LinearConstraint
class Rover():
def __init__(self,l1, l2, l3, l4, alpha, beta, gamma, wheel_rad = 0.4, body_len = None, body_wid = None):
self.l1 = l1
self.l2 = l2
self.l3 = l3
self.l4 = l4
self.alpha = alpha
self.beta = beta
self.gamma = gamma
self.wheel_rad = wheel_rad
self.body_len = body_len
self.body_wid = body_wid
def set_terrain(self, terr):
self.terrain = terr
def set_inertias(self, mass, g):
self.mass = mass
self.g = g
def z_center(self, x):
if not hasattr(self, 'terrain'):
print("No terrain specified")
z_gnd = 0.0
grad = 0.0
else:
z_gnd = self.terrain.heightAt(x)
grad = self.terrain.gradient(x)
z_center = z_gnd + self.wheel_rad * np.cos(np.arctan(grad))
return z_center
def func_th2(self, th2, x2, z2):
l3 = self.l3
l4 = self.l4
beta = self.beta
z_center = self.z_center
x3 = x2 + l3*np.cos(th2) + l4*np.cos(np.pi - beta - th2)
z3_gnd = z_center(x3)
z3_kin = z2 + l3*np.sin(th2) - l4*np.sin(np.pi - beta - th2)
return z3_gnd - z3_kin
def func_th1(self, th1, xb, zb):
l1 = self.l1
l2 = self.l2
alpha = self.alpha
z_center = self.z_center
x1 = xb - l2*np.cos(np.pi - alpha - th1) - l1*np.cos(th1)
z1_gnd = z_center(x1)
z1_kin = zb + l2*np.sin(np.pi - alpha - th1) - l1*np.sin(th1)
return z1_gnd - z1_kin
def find_angles(self, x2):
z2 = self.z_center(x2)
th2_guess = np.deg2rad(50) # guess
th2 = fsolve(self.func_th2, th2_guess, args=(x2, z2))[0]
xb = x2 + self.l3*np.cos(th2)
zb = z2 + self.l3*np.sin(th2)
th1_guess = np.deg2rad(50) # guess
th1 = fsolve(self.func_th1, th1_guess, args=(xb, zb))[0]
return th1, th2
def find_geom(self, x2):
l1 = self.l1
l2 = self.l2
l3 = self.l3
l4 = self.l4
alpha = self.alpha
beta = self.beta
th1, th2 = self.find_angles(x2)
z2 = self.z_center(x2)
xb = x2 + l3*np.cos(th2)
zb = z2 + l3*np.sin(th2)
x3 = x2 + l3*np.cos(th2) + l4*np.cos(np.pi - beta - th2)
z3 = z2 + l3*np.sin(th2) - l4*np.sin(np.pi - beta - th2)
z3_gnd = self.z_center(x3)
x0 = xb - l2*np.cos(np.pi - alpha - th1)
z0 = zb + l2*np.sin(np.pi - alpha - th1)
x1 = xb - l2*np.cos(np.pi - alpha - th1) - l1*np.cos(th1)
z1 = zb + l2*np.sin(np.pi - alpha - th1) - l1*np.sin(th1)
z1_gnd = self.z_center(x1)
r0 = (x0,z0)
r1 = (x1,z1)
r2 = (x2,z2)
r3 = (x3,z3)
rb = (xb,zb)
return r0, r1, rb, r2, r3
def find_slope_alphas(self, r1, r2, r3):
alpha1 = np.arctan(self.terrain.gradient(r1[0]))
alpha2 = np.arctan(self.terrain.gradient(r2[0]))
alpha3 = np.arctan(self.terrain.gradient(r3[0]))
return alpha1, alpha2, alpha3
def find_torques(self, x2, Fxnet, Fznet, Mynet, mu, vel = 0.0, crr = 0.0):
l1 = self.l1
l2 = self.l2
l3 = self.l3
l4 = self.l4
rad = self.wheel_rad
alpha = self.alpha
beta = self.beta
mass = self.mass
g = self.g
if not self.mass>0:
print("Error. Mass not specified.")
if vel==0.0 and Fxnet<=0.0:
# No rolling resistance
crr = 0.0
else:
# Account for rolling resistance, if specified
crr = crr
r0, r1, rb, r2, r3 = self.find_geom(x2)
alpha1, alpha2, alpha3 = self.find_slope_alphas(r1, r2, r3)
th1, th2 = self.find_angles(x2)
ux = -rad*sin(alpha1) + l1*cos(th1) - l2*cos(th1+self.alpha)
uy = rad*cos(alpha1) + l1*sin(th1) - l2*sin(th1+self.alpha)
vx = -rad*sin(alpha2) + l3*cos(th2)
vy = -rad*cos(alpha2) + l3*cos(th2)
wx = -rad*sin(alpha3) + l4*cos(th2+beta)
wy = rad*cos(alpha3) + l4*sin(th2+beta)
zx = -l2*cos(th1+alpha)
zy = -l2*sin(th1+alpha)
A = np.array([[cos(alpha1), cos(alpha2), cos(alpha3), -sin(alpha1)-crr*cos(alpha1), -sin(alpha2)-crr*cos(alpha2), -sin(alpha3)-crr*cos(alpha3)],
[sin(alpha1), sin(alpha2), sin(alpha3), cos(alpha1)-crr*sin(alpha1), cos(alpha2)-crr*sin(alpha2), cos(alpha3)-crr*sin(alpha3)],
[cos(alpha1)*uy - sin(alpha1)*ux, 0, 0, -sin(alpha1)*uy -cos(alpha1)*ux - crr*(cos(alpha1)*uy - sin(alpha1)*ux), 0, 0],
[0, cos(alpha2)*vy - sin(alpha2)*vx, cos(alpha3)*wy - sin(alpha3)*wx, 0, -cos(alpha2)*vx - sin(alpha2)*vy -crr*(cos(alpha2)*vy - sin(alpha2)*vx), -cos(alpha3)*wx - sin(alpha3)*wy -crr*(cos(alpha3)*wy - sin(alpha3)*wx)]])
E = [[Fxnet],[Fznet + mass*g],[Fxnet*zy - Fznet*zx + Mynet - mass*g*zx],[0]]
# min P = T1^2 + T2^2 + T3^2
# Constraints:
# Ax = E
# N1>=0 N2 >= 0 N3>= 0
# T1 >= - mu*N1, T1<=mu*N1
def power(x):
# x is of shape 6,1
return x[0]**2 + x[1]**2 + x[2]**2
# N1>=0, N2 >= 0, N3>= 0
bounds = Bounds([-np.inf, -np.inf, -np.inf, 0, 0, 0], [np.inf, np.inf, np.inf, np.inf, np.inf, np.inf])
# Ax = E
linear_constraint_force_bal = LinearConstraint(A, np.squeeze(E), np.squeeze(E))
# T1 >= - mu*N1, T1<=mu*N1
lb = [0, -np.inf, 0, -np.inf, 0, -np.inf]
ub = [np.inf, 0, np.inf, 0, np.inf, 0]
mat = np.array([[1,0,0,mu,0,0],
[1,0,0,-mu,0,0],
[0,1,0,0,mu,0],
[0,1,0,0,-mu,0],
[0,0,1,0,0,mu],
[0,0,1,0,0,-mu]])
linear_constraint_fric = LinearConstraint(mat, lb, ub)
x0 = np.matmul(np.linalg.pinv(A), E)
# print("Psuedo inverse soln:")
# print("torques and normal forces:",x0)
# print("power consumption:",power(x0))
res = minimize(power, x0, bounds= bounds, constraints=[linear_constraint_force_bal, linear_constraint_fric])
# print("Optimizer soln:")
# print("torques and normal forces:",res.x)
# print("power consumption:",res.fun)
return res.x, res.fun
def apply_torques(self, x2, tau1, tau2, tau3, Fznet, Mynet, mu, vel = 0.0, crr = 0.0):
l1 = self.l1
l2 = self.l2
l3 = self.l3
l4 = self.l4
rad = self.wheel_rad
alpha = self.alpha
beta = self.beta
r0, r1, rb, r2, r3 = self.find_geom(x2)
alpha1, alpha2, alpha3 = self.find_slope_alphas(r1, r2, r3)
th1, th2 = self.find_angles(x2)
mass = self.mass
g = self.g
if not self.mass>0:
print("Error. Mass not specified.")
T1 = tau1/rad
T2 = tau2/rad
T3 = tau3/rad
ux = -rad*sin(alpha1) + l1*cos(th1) - l2*cos(th1+self.alpha)
uy = rad*cos(alpha1) + l1*sin(th1) - l2*sin(th1+self.alpha)
vx = -rad* | sin(alpha2) | numpy.sin |
from __future__ import division
import numpy as np
from collections import namedtuple
import bilby
from bilby.gw import conversion
import os
import gwpopulation
MassContainer = namedtuple('MassContainer', ['primary_masses', 'secondary_masses',
'mass_ratios', 'total_masses', 'chirp_masses'])
SpinContainer = namedtuple('SpinContainer', ['s13', 's23'])
ExtrinsicParameterContainer = namedtuple('ExtrinisicParamterContainer', ['inc', 'ra', 'dec',
'phase', 'psi', 'geocent_time',
'luminosity_distance'])
AllParameterContainer = namedtuple('AllParameterContainer',
['primary_masses', 'secondary_masses', 'mass_ratios', 'total_masses',
'chirp_masses', 's13', 's23', 'inc', 'ra', 'dec',
'phase', 'psi', 'geocent_time', 'luminosity_distance'])
def generate_mass_parameters(size=10000, clean=False, alpha=1.5, mmin=8, mmax=45, beta=3, plot=False):
m1s = np.linspace(4, 45, size)
qs = np.linspace(0.01, 1, size)
q_mesh, m_mesh = np.meshgrid(qs, m1s)
outfile = 'pop_masses_{}.txt'.format(size)
if clean or not os.path.isfile(outfile):
primary_masses, mass_ratios = \
_generate_masses(m_mesh, q_mesh, size, alpha=alpha, m_min=mmin, m_max=mmax, beta=beta)
save = | np.array((primary_masses, mass_ratios)) | numpy.array |
# -----------------------------------------------------------------------------
# From Numpy to Python
# Copyright (2017) <NAME> - BSD license
# More information at https://github.com/rougier/numpy-book
# -----------------------------------------------------------------------------
import numpy as np
import numba as nb
@nb.jit(nopython=True, parallel=False, fastmath=True)
def mgrid(xn, yn):
Xi = np.empty((xn, yn), dtype=np.int64)
Yi = np.empty((xn, yn), dtype=np.int64)
for i in range(xn):
Xi[i, :] = i
for j in range(yn):
Yi[:, j] = j
return Xi, Yi
@nb.jit(nopython=True, parallel=False, fastmath=True)
def linspace(start, stop, num, dtype):
X = np.empty((num, ), dtype=dtype)
dist = (stop - start) / (num - 1)
for i in range(num):
X[i] = start + i * dist
return X
@nb.jit(nopython=True, parallel=False, fastmath=True)
def mandelbrot(xmin, xmax, ymin, ymax, xn, yn, itermax, horizon=2.0):
# Adapted from
# https://thesamovar.wordpress.com/2009/03/22/fast-fractals-with-python-and-numpy/
# Xi, Yi = np.mgrid[0:xn, 0:yn]
# X = np.linspace(xmin, xmax, xn, dtype=np.float64)[Xi]
# Y = np.linspace(ymin, ymax, yn, dtype=np.float64)[Yi]
Xi, Yi = mgrid(xn, yn)
X = linspace(xmin, xmax, xn, dtype=np.float64)
Y = linspace(ymin, ymax, yn, dtype=np.float64)
# C = X + Y*1j
C = np.reshape(X, (xn, 1)) + Y * 1j
N_ = np.zeros(C.shape, dtype=np.int64)
Z_ = np.zeros(C.shape, dtype=np.complex128)
# Xi.shape = Yi.shape = C.shape = xn*yn
Xi = np.reshape(Xi, (xn * yn))
Yi = np.reshape(Yi, (xn * yn))
C = np.reshape(C, (xn * yn))
Z = | np.zeros(C.shape, np.complex128) | numpy.zeros |
"""
Auto-tuning a convolutional network for ARM CPU
====================================================
**Author**: `<NAME> <https://https://github.com/merrymercy>`_
Auto-tuning for a specific ARM device is critical for getting the best
performance. This is a tutorial about how to tune a whole convolutional
network.
The operator implementation for ARM CPU in TVM is written in template form.
It has many tunable knobs (tile factor, vectorization, unrolling, etc).
We will do tuning for all convolution and depthwise convolution operators
in the neural network. After the tuning, we can get a log file which stores
the best knob values for all required operators. When the tvm compiler compiles
these operators, it will query this log file to get the best knob values.
We also released pre-tuned parameters for some arm devices. You can go to
`ARM CPU Benchmark <https://github.com/dmlc/tvm/wiki/Benchmark#arm-cpu>`_
to see the results.
"""
######################################################################
# Install dependencies
# ----------------------------------------
# To use autotvm package in tvm, we need to install some extra dependencies.
# (change "3" to "2" if you use python2):
#
# .. code-block:: bash
#
# pip3 install --user psutil xgboost tornado
#
# To make tvm run faster in tuning, it is recommended to use cython
# as FFI of tvm. In the root directory of tvm, execute
# (change "3" to "2" if you use python2):
#
# .. code-block:: bash
#
# pip3 install --user cython
# sudo make cython3
#
# Now return to python code. Import packages.
import os
import numpy as np
import nnvm.testing
import nnvm.compiler
import tvm
from tvm import autotvm
from tvm.autotvm.tuner import XGBTuner, GATuner, RandomTuner, GridSearchTuner
from tvm.contrib.util import tempdir
import tvm.contrib.graph_runtime as runtime
#################################################################
# Define network
# --------------
# First we need to define the network in nnvm symbol API.
# We can load some pre-defined network from :code:`nnvm.testing`.
# We can also load models from MXNet, ONNX and TensorFlow (see NNVM
# tutorials :ref:`tutorial-nnvm` for more details).
def get_network(name, batch_size):
"""Get the symbol definition and random weight of a network"""
input_shape = (batch_size, 3, 224, 224)
output_shape = (batch_size, 1000)
if "resnet" in name:
n_layer = int(name.split('-')[1])
net, params = nnvm.testing.resnet.get_workload(num_layers=n_layer, batch_size=batch_size)
elif "vgg" in name:
n_layer = int(name.split('-')[1])
net, params = nnvm.testing.vgg.get_workload(num_layers=n_layer, batch_size=batch_size)
elif name == 'mobilenet':
net, params = nnvm.testing.mobilenet.get_workload(batch_size=batch_size)
elif name == 'squeezenet_v1.1':
net, params = nnvm.testing.squeezenet.get_workload(batch_size=batch_size, version='1.1')
elif name == 'inception_v3':
input_shape = (1, 3, 299, 299)
net, params = nnvm.testing.inception_v3.get_workload(batch_size=batch_size)
elif name == 'custom':
# an example for custom network
from nnvm.testing import utils
net = nnvm.sym.Variable('data')
net = nnvm.sym.conv2d(net, channels=4, kernel_size=(3,3), padding=(1,1))
net = nnvm.sym.flatten(net)
net = nnvm.sym.dense(net, units=1000)
net, params = utils.create_workload(net, batch_size, (3, 224, 224))
elif name == 'mxnet':
# an example for mxnet model
from mxnet.gluon.model_zoo.vision import get_model
block = get_model('resnet18_v1', pretrained=True)
net, params = nnvm.frontend.from_mxnet(block)
net = nnvm.sym.softmax(net)
else:
raise ValueError("Unsupported network: " + name)
return net, params, input_shape, output_shape
#################################################################
# Start RPC Tracker
# -----------------
# TVM uses RPC session to communicate with ARM boards.
# During tuning, the tuner will send the generated code to the board and
# measure the speed of code on the board.
#
# To scale up the tuning, TVM uses RPC Tracker to manage distributed devices.
# The RPC Tracker is a centralized master node. We can register all devices to
# the tracker. For example, if we have 10 phones, we can register all of them
# to the tracker, then we can run 10 measurements in parallel, which accelerates
# the tuning process.
#
# To start an RPC tracker, run this command in the host machine. The tracker is
# required during the whole tuning process, so we need to open a new terminal for
# this command:
#
# .. code-block:: bash
#
# python -m tvm.exec.rpc_tracker --host=0.0.0.0 --port=9190
#
# The expected output is
#
# .. code-block:: bash
#
# INFO:RPCTracker:bind to 0.0.0.0:9190
#################################################################
# Register devices to RPC Tracker
# -----------------------------------
# Now we can register our devices to the tracker. The first step is to
# build tvm runtime for the ARM devices.
#
# * For Linux:
# Follow this section :ref:`build-tvm-runtime-on-device` to build
# tvm runtime on the device. Then register the device to tracker by
#
# .. code-block:: bash
#
# python -m tvm.exec.rpc_server --tracker=[HOST_IP]:9190 --key=rk3399
#
# (replace :code:`[HOST_IP]` with the IP address of your host machine)
#
# * For Android:
# Follow this `readme page <https://github.com/dmlc/tvm/tree/master/apps/android_rpc>`_ to
# install tvm rpc apk on the android device. Make sure you can pass the android rpc test.
#
# After registering devices, we can confirm it by querying rpc_tracker
#
# .. code-block:: bash
#
# python -m tvm.exec.query_rpc_tracker --host=0.0.0.0 --port=9190
#
# For example, if we have 2 Huawei mate10 pro, 11 Raspberry Pi 3B and 2 rk3399,
# the output can be
#
# .. code-block:: bash
#
# Queue Status
# ----------------------------------
# key total free pending
# ----------------------------------
# mate10pro 2 2 0
# rk3399 2 2 0
# rpi3b 11 11 0
# ----------------------------------
#
# You can register multiple devices to the tracker to accelerate the measurement in tuning.
###########################################
# Set Tuning Options
# ------------------
# Before tuning, we should do some configurations. Here I use an RK3399 board
# as example. In your setting, you should modify the target and device_key accordingly.
# set :code:`use_android` to True if you use android phone.
#### DEVICE CONFIG ####
# Replace "aarch64-linux-gnu" with the correct target of your board.
# This target is used for cross compilation. You can query it by :code:`gcc -v` on your device.
target = tvm.target.create('llvm -device=arm_cpu -target=aarch64-linux-gnu')
# Also replace this with the device key in your tracker
device_key = 'rk3399'
# Set this to True if you use android phone
use_android = False
#### TUNING OPTION ####
network = 'resnet-18'
log_file = "%s.%s.log" % (device_key, network)
dtype = 'float32'
tuning_option = {
'log_filename': log_file,
'tuner': 'xgb',
'n_trial': 1000,
'early_stopping': 400,
'measure_option': autotvm.measure_option(
builder=autotvm.LocalBuilder(
build_func='ndk' if use_android else 'default'),
runner=autotvm.RPCRunner(
device_key, host='localhost', port=9190,
number=5,
timeout=4,
),
),
}
####################################################################
#
# .. note:: How to set tuning options
#
# In general, the default value provided here works well.
# If you have large time budget, you can set :code:`n_trial`, :code:`early_stopping` larger,
# which makes the tuning run longer.
#
###################################################################
# Begin Tuning
# ------------
# Now we can extract tuning tasks from the network and begin tuning.
# Here we provide a simple utility function to tune a list of tasks.
# This function is just an initial implementation which tunes them in sequential order.
# Later we will bring more sophisticated tuner scheduler.
# You can skip the implementation of this function for this tutorial.
def tune_tasks(tasks,
measure_option,
tuner='xgb',
n_trial=1000,
early_stopping=None,
log_filename='tuning.log',
use_transfer_learning=True,
try_winograd=True):
if try_winograd:
for i in range(len(tasks)):
try: # try winograd template
tsk = autotvm.task.create(tasks[i].name, tasks[i].args,
tasks[i].target, tasks[i].target_host, 'winograd')
input_channel = tsk.workload[1][1]
if input_channel >= 64:
tasks[i] = tsk
except Exception:
pass
# create tmp log file
tmp_log_file = log_filename + ".tmp"
if os.path.exists(tmp_log_file):
os.remove(tmp_log_file)
for i, tsk in enumerate(reversed(tasks)):
prefix = "[Task %2d/%2d] " % (i+1, len(tasks))
# create tuner
if tuner == 'xgb' or tuner == 'xgb-rank':
tuner_obj = XGBTuner(tsk, loss_type='rank')
elif tuner == 'ga':
tuner_obj = GATuner(tsk, pop_size=50)
elif tuner == 'random':
tuner_obj = RandomTuner(tsk)
elif tuner == 'gridsearch':
tuner_obj = GridSearchTuner(tsk)
else:
raise ValueError("Invalid tuner: " + tuner)
if use_transfer_learning:
if os.path.isfile(tmp_log_file):
tuner_obj.load_history(autotvm.record.load_from_file(tmp_log_file))
# do tuning
tuner_obj.tune(n_trial=min(n_trial, len(tsk.config_space)),
early_stopping=early_stopping,
measure_option=measure_option,
callbacks=[
autotvm.callback.progress_bar(n_trial, prefix=prefix),
autotvm.callback.log_to_file(tmp_log_file)])
# pick best records to a cache file
autotvm.record.pick_best(tmp_log_file, log_filename)
os.remove(tmp_log_file)
########################################################################
# Finally we launch tuning jobs and evaluate the end-to-end performance.
def tune_and_evaluate(tuning_opt):
# extract workloads from nnvm graph
print("Extract tasks...")
net, params, input_shape, out_shape = get_network(network, batch_size=1)
tasks = autotvm.task.extract_from_graph(net, target=target,
shape={'data': input_shape}, dtype=dtype,
symbols=(nnvm.sym.conv2d,))
# run tuning tasks
print("Tuning...")
tune_tasks(tasks, **tuning_opt)
# compile kernels with history best records
with autotvm.apply_history_best(log_file):
print("Compile...")
with nnvm.compiler.build_config(opt_level=2, add_pass=['AlterOpLayout']):
graph, lib, params = nnvm.compiler.build(
net, target=target, shape={'data': input_shape}, params=params, dtype=dtype)
# export library
tmp = tempdir()
if use_android:
from tvm.contrib import ndk
filename = "net.so"
lib.export_library(tmp.relpath(filename), ndk.create_shared)
else:
filename = "net.tar"
lib.export_library(tmp.relpath(filename))
# upload module to device
print("Upload...")
remote = autotvm.measure.request_remote(device_key, 'localhost', 9190,
timeout=10000)
remote.upload(tmp.relpath(filename))
rlib = remote.load_module(filename)
# upload parameters to device
ctx = remote.context(str(target), 0)
rparams = {k: tvm.nd.array(v, ctx) for k, v in params.items()}
data_tvm = tvm.nd.array((np.random.uniform(size=input_shape)).astype(dtype))
module = runtime.create(graph, rlib, ctx)
module.set_input('data', data_tvm)
module.set_input(**rparams)
# evaluate
print("Evaluate inference time cost...")
ftimer = module.module.time_evaluator("run", ctx, number=8, repeat=3)
prof_res = np.array(ftimer().results) * 1000 # convert to millisecond
print("Mean inference time (std dev): %.2f ms (%.2f ms)" %
( | np.mean(prof_res) | numpy.mean |
"""
This code optimizes the offsets on top of SMPL.
This code requires that SMPL be already aligned with the scans.
Author: Bharat
Code taken from: Combining Implicit Function Learning and Parametric Models for 3D Human Reconstruction, ECCV'20
Cite: LoopReg: Self-supervised Learning of Implicit Surface Correspondences, Pose and Shape for 3D Human Mesh Registration, NeurIPS' 20.
"""
import os
from os.path import split, join, exists
from glob import glob
import torch
from kaolin.rep import TriangleMesh as tm
from kaolin.metrics.mesh import point_to_surface, laplacian_loss
from tqdm import tqdm
import pickle as pkl
import numpy as np
from lib.smpl_paths import SmplPaths
from lib.th_SMPL import th_batch_SMPL
from fit_SMPL import fit_SMPL, save_meshes, batch_point_to_surface, backward_step
def get_loss_weights():
"""Set loss weights"""
loss_weight = {'s2m': lambda cst, it: 10. ** 2 * cst * (1 + it),
'm2s': lambda cst, it: 10. ** 2 * cst, #/ (1 + it),
'lap': lambda cst, it: 10. ** 4 * cst / (1 + it),
'offsets': lambda cst, it: 10. ** 1 * cst / (1 + it)}
return loss_weight
def forward_step(th_scan_meshes, smpl, init_smpl_meshes):
"""
Performs a forward step, given smpl and scan meshes.
Then computes the losses.
"""
# forward
verts, _, _, _ = smpl()
th_smpl_meshes = [tm.from_tensors(vertices=v,
faces=smpl.faces) for v in verts]
# losses
loss = dict()
loss['s2m'] = batch_point_to_surface([sm.vertices for sm in th_scan_meshes], th_smpl_meshes)
loss['m2s'] = batch_point_to_surface([sm.vertices for sm in th_smpl_meshes], th_scan_meshes)
loss['lap'] = torch.stack([laplacian_loss(sc, sm) for sc, sm in zip(init_smpl_meshes, th_smpl_meshes)])
loss['offsets'] = torch.mean(torch.mean(smpl.offsets**2, axis=1), axis=1)
return loss
def optimize_offsets(th_scan_meshes, smpl, init_smpl_meshes, iterations, steps_per_iter):
# Optimizer
optimizer = torch.optim.Adam([smpl.offsets, smpl.pose, smpl.trans, smpl.betas], 0.005, betas=(0.9, 0.999))
# Get loss_weights
weight_dict = get_loss_weights()
for it in range(iterations):
loop = tqdm(range(steps_per_iter))
loop.set_description('Optimizing SMPL+D')
for i in loop:
optimizer.zero_grad()
# Get losses for a forward pass
loss_dict = forward_step(th_scan_meshes, smpl, init_smpl_meshes)
# Get total loss for backward pass
tot_loss = backward_step(loss_dict, weight_dict, it)
tot_loss.backward()
optimizer.step()
l_str = 'Lx100. Iter: {}'.format(i)
for k in loss_dict:
l_str += ', {}: {:0.4f}'.format(k, loss_dict[k].mean().item()*100)
loop.set_description(l_str)
def fit_SMPLD(scans, smpl_pkl=None, gender='male', save_path=None, display=False):
# Get SMPL faces
sp = SmplPaths(gender=gender)
smpl_faces = sp.get_faces()
th_faces = torch.tensor(smpl_faces.astype('float32'), dtype=torch.long).cuda()
# Batch size
batch_sz = len(scans)
# Init SMPL
if smpl_pkl is None or smpl_pkl[0] is None:
print('SMPL not specified, fitting SMPL now')
pose, betas, trans = fit_SMPL(scans, None, gender, save_path, display)
else:
pose, betas, trans = [], [], []
for spkl in smpl_pkl:
smpl_dict = pkl.load(open(spkl, 'rb'), encoding='latin-1')
p, b, t = smpl_dict['pose'], smpl_dict['betas'], smpl_dict['trans']
pose.append(p)
if len(b) == 10:
temp = | np.zeros((300,)) | numpy.zeros |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2019-07-24 18:29:48
# @Author : <NAME> (<EMAIL>)
# @Link : http://iridescent.ink
# @Version : $1.0$
import numpy as np
from pyailib.utils.const import EPS
from pyailib.base.arrayops import sl
from pyailib.base.mathops import nextpow2, complex2real, real2complex
def standardization(X, mean=None, std=None, axis=None, extra=False):
r"""standardization
.. math::
\bar{X} = \frac{X-\mu}{\sigma}
Args:
X (ndarray): data to be normalized,
mean (list or None, optional): mean value (the default is None, which means auto computed)
std (list or None, optional): standard deviation (the default is None, which means auto computed)
axis (list or int, optional): specify the axis for computing mean and standard deviation (the default is None, which means all elements)
extra (bool, optional): if True, also return the mean and std (the default is False, which means just return the standardized data)
Returns:
(ndarray): Standardized/Normalized ndarray.
"""
if type(X) is not np.ndarray:
X = np.array(X)
if mean is None:
if axis is None:
mean = np.mean(X)
else:
mean = np.mean(X, axis, keepdims=True)
if std is None:
if axis is None:
std = np.std(X)
else:
std = np.std(X, axis, keepdims=True)
if extra is True:
return (X - mean) / (std + EPS), mean, std
else:
return (X - mean) / (std + EPS)
def scale(X, st=[0, 1], sf=None, istrunc=True, extra=False):
r"""
Scale data.
.. math::
x \in [a, b] \rightarrow y \in [c, d]
.. math::
y = (d-c)*(x-a) / (b-a) + c.
Args:
X (ndarray): The data to be scaled.
st (tuple, list, optional): Specifies the range of data after beening scaled. Default [0, 1].
sf (tuple, list, optional): Specifies the range of data. Default [min(X), max(X)].
istrunc (bool): Specifies wether to truncate the data to [a, b], For example,
If sf == [a, b] and 'istrunc' is true,
then X[X < a] == a and X[X > b] == b.
extra (bool): If ``True``, also return :attr:`st` and :attr:`sf`.
Returns:
out (ndarray): Scaled data ndarray.
st, sf (list or tuple): If :attr:`extra` is true, also be returned
Raises:
Exception: Description
"""
if type(X) is not np.ndarray:
X = | np.array(X) | numpy.array |
import h5py
import numpy as np
import os
import torch
from mod_shift.implemented_functions import w_clipped_linear
# loads datasets in the format Batch * spatial dims * embedding dims
# individual functions for loading dataset
def load_cremi(dir_path, downsample_factor, slices, set, gt=False):
file = h5py.File(os.path.join(dir_path, "CREMI", "data", "CREMI.h5"), "r") # E B H W
if gt:
if slices == "all":
return file[set]["gt_seg"][:, ::downsample_factor, ::downsample_factor] # B H W
elif isinstance(slices, int):
return file[set]["gt_seg"][slices, ::downsample_factor, ::downsample_factor][None, ...] # 1 H W
else:
print("Invalid slice parameter {}".format(slice))
else:
if slices == "all":
return file[set]["pred"][:, :, ::downsample_factor, ::downsample_factor].transpose(1, 2, 3, 0) # B H W E
elif isinstance(slices, int):
return file[set]["pred"][:, slices, ::downsample_factor, ::downsample_factor].transpose(1,2,0)[None, ...] # 1 H W E
else: print("Invalid slice parameter {}".format(slices))
def load_isbi(dir_path, downsample_factor, slices, gt=False):
if gt:
file = h5py.File(os.path.join(dir_path,"ISBI", "data", "ISBI.h5"), "r")
if slices == "all":
return file["gt_seg"][:, ::downsample_factor, ::downsample_factor] # B H W
elif isinstance((slices, int)):
return file["gt_seg"][slices, :: downsample_factor, ::downsample_factor][None, ...] # 1 H W
else:
print("Invalid slice parameter {}".format(slices))
else:
data = np.load(os.path.join(dir_path, "ISBI", "data", "ISBI_embeddings_PCA_8.npy")) # B E H W
if slices == "all":
return data[:, :, :: downsample_factor, ::downsample_factor].transpose((0, 2, 3, 1)) # B H W E
elif isinstance((slices, int)):
return data[slices, :, :: downsample_factor, ::downsample_factor].transpose((1, 2, 0))[None, ...] # 1 H W E
else:
print("Invalid slice parameter {}".format(slices))
# abstracted dataset loading function
def load_data(data_config, gt=False):
dataset = data_config["dataset"]
if dataset == "CREMI":
data = load_cremi(data_config["root_path"],
data_config["downsample_factor"],
data_config["slices"],
data_config["set"],
gt=gt)
elif dataset == "ISBI":
data = load_isbi(data_config["root_path"],
data_config["downsample_factor"],
data_config["slices"],
gt=gt)
else: print("Please specify an implemented dataset.")
return data
def get_offsets():
return np.array([[-1, 0], [0, -1],
# indirect 3d nhood for dam edges
[-9, 0], [0, -9],
# long range direct hood
[-9, -9], [9, -9], [-9, -4], [-4, -9], [4, -9], [9, -4],
# inplane diagonal dam edges
[-27, 0], [0, -27]])
def compute_offset_weights_per_slice(points, get_weights, offsets):
weights_list = []
shape = points.shape[:2]
for offset in offsets:
assert offset[1] <= 0, "Offsets have incorrect signs"
if np.abs(offset[1]) > shape[1] or np.abs(offset[0]) > shape[0]:
print(f"Offset {offset} exceeded image dimensions, setting dummy weights of 0")
weights = torch.zeros(shape)
else:
if offset[0] <= 0:
dists = torch.norm(points[:shape[0] + offset[0], :shape[1] + offset[1]]
- points[-offset[0]:, -offset[1]:],
dim=-1,
p=2)
weights = get_weights(dists)
weights = torch.nn.functional.pad(weights,
mode="constant",
value=0,
pad=(np.abs(offset[1]), 0, np.abs(offset[0]), 0))
else:
dists = torch.norm(points[:shape[0] - offset[0], -offset[1]:]
- points[offset[0]:, :shape[1] + offset[1]],
dim=-1,
p=2)
weights = get_weights(dists)
weights = torch.nn.functional.pad(weights,
mode="constant",
value=0,
pad=( | np.abs(offset[1]) | numpy.abs |
import numpy as np
from ..generic.test_tools import construct_phase_values,\
cross_spectrum_to_coordinate_list
from ..generic.filtering_statistical import mad_filtering
from .matching_tools_frequency_filters import normalize_power_spectrum
def phase_fitness(Q, di, dj, norm=2):
assert type(Q) == np.ndarray, ('please provide an array')
# admin
data = cross_spectrum_to_coordinate_list(Q)
C = construct_phase_values(data[:,0:2], di, dj)
# calculation
QC = (data[:,-1] - C) ** norm
dXY = np.abs(QC)
fitness = (1-np.divide(np.sum(dXY) , 2*norm*dXY.size))**norm
return fitness
def phase_support(Q, di, dj, thres=1.4826):
assert type(Q) == np.ndarray, ('please provide an array')
data = cross_spectrum_to_coordinate_list(Q)
C = construct_phase_values(data[:,0:2], di, dj)
# calculation
QC = (data[:,-1] - C) ** 2
dXY = np.abs(QC)
IN = mad_filtering(dXY, thres=thres)
support = np.divide(np.sum(IN), np.prod(IN.shape))
return support
def signal_to_noise(Q, C, norm=2):
""" calculate the signal to noise from a theoretical and an experimental
cross-spectrum
Parameters
----------
Q : numpy.array, size=(m,n), dtype=complex
cross-spectrum
C : numpy.array, size=(m,n), dtype=complex
phase plane
norm : integer
norm for the difference
Returns
-------
snr : float, range=0...1
signal to noise ratio
See Also
--------
phase_fitness
"""
assert type(Q) == np.ndarray, ('please provide an array')
assert type(C) == np.ndarray, ('please provide an array')
Qn = normalize_power_spectrum(Q)
Q_diff = np.abs(Qn-C)**norm
snr = 1 - (np.sum(Q_diff) / (2*norm*np.prod(C.shape)))
return snr
def local_coherence(Q, ds=1):
""" estimate the local coherence of a spectrum
Parameters
----------
Q : numpy.array, size=(m,n), dtype=complex
array with cross-spectrum, with centered coordinate frame
ds : integer, default=1
kernel radius to describe the neighborhood
Returns
-------
M : numpy.array, size=(m,n), dtype=float
vector coherence from no to ideal, i.e.: 0...1
See Also
--------
thresh_masking
Example
-------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from ..generic.test_tools import create_sample_image_pair
>>> # create cross-spectrum with random displacement
>>> im1,im2,_,_,_ = create_sample_image_pair(d=2**4, max_range=1)
>>> spec1,spec2 = np.fft.fft2(im1), np.fft.fft2(im2)
>>> Q = spec1 * np.conjugate(spec2)
>>> Q = normalize_spectrum(Q)
>>> Q = np.fft.fftshift(Q) # transform to centered grid
>>> C = local_coherence(Q)
>>> plt.imshow(C), cmap='OrRd'), plt.colorbar(), plt.show()
>>> plt.imshow(Q), cmap='twilight'), plt.colorbar(), plt.show()
"""
assert type(Q) == np.ndarray, ("please provide an array")
diam = 2 * ds + 1
C = np.zeros_like(Q)
(isteps, jsteps) = np.meshgrid(np.linspace(-ds, +ds, 2 * ds + 1, dtype=int), \
np.linspace(-ds, +ds, 2 * ds + 1, dtype=int))
IN = | np.ones(diam ** 2, dtype=bool) | numpy.ones |
"""
Module for neural analysis
"""
import numpy as np
from typing import Any, Callable, Dict, List, NamedTuple, Optional, Tuple
def get_isi(spk_ts_list: list):
"""
Get inter-analysis interval of spikes
Parameters
----------
spk_ts_list : list
Returns
-------
isi : class object
class object for inter-spike intervals
"""
isi = np.array([], dtype=np.float64)
for spk in spk_ts_list:
isi = np.append(isi, np.diff(spk))
isi = ISI(isi) # return the class object
return isi
def get_peth(evt_ts_list: list, spk_ts_list: list,
pre_evt_buffer=None, duration=None,
bin_size=None,
nb_bins=None
):
"""
Get peri-event histogram & firing rates
Parameters
----------
evt_ts_list : list
Timestamps for behavioral events (e.g., syllable onset/offsets)
spk_ts_list : list
Spike timestamps
pre_evt_buffer : int, default=None
Size of buffer window prior to the first event (in ms)
duration : int, optional
Duration of the peth (in ms). Truncate the
bin_size : int, default=None
Time bin size
nb_bins : int, default=None
Number of bins
Returns
-------
peth : np.ndarray
Peri-event time histograms
time_bin : np.ndarray
Time bin vector
parameter : dict
Parameters for draw peth
Notes
-----
If pre_evt_buffer, bin_size, nb_bins not specified,
take values from analysis ..analysis.parameters
"""
from ..analysis.parameters import peth_parm
import copy
import math
parameter = peth_parm.copy()
if pre_evt_buffer is None:
pre_evt_buffer = parameter['buffer']
if bin_size is None:
bin_size = parameter['bin_size']
if nb_bins is None:
nb_bins = parameter['nb_bins']
time_bin = np.arange(0, nb_bins, bin_size) - pre_evt_buffer
peth = np.zeros((len(evt_ts_list), nb_bins)) # nb of trials x nb of time bins
for trial_ind, (evt_ts, spk_ts) in enumerate(zip(evt_ts_list, spk_ts_list)):
spk_ts_new = copy.deepcopy(spk_ts)
if not isinstance(evt_ts, np.float64):
# evt_ts = np.asarray(list(map(float, evt_ts))) + pre_evt_buffer
# spk_ts_new -= evt_ts[0]
evt_ts = np.asarray(list(map(float, evt_ts)))
spk_ts_new -= evt_ts[0]
spk_ts_new += pre_evt_buffer
else:
spk_ts_new -= evt_ts
spk_ts_new += pre_evt_buffer
for spk in spk_ts_new:
ind = math.ceil(spk / bin_size)
# print("spk = {}, bin index = {}".format(spk, ind)) # for debugging
if ind < 0: raise Exception("Index out of bound")
peth[trial_ind, ind] += 1
# Truncate the array leaving out only the portion of our interest
if duration:
ind = np.where(((0 - pre_evt_buffer) <= time_bin) & (time_bin < duration))[0]
peth = peth[:, ind[0]:ind[-1]+1]
time_bin = time_bin[ind[0]:ind[-1]+1]
return peth, time_bin, parameter
def get_pcc(fr_array: np.ndarray) -> dict:
"""
Get pairwise cross-correlation
Parameters
----------
fr_array : np.ndarray
(trial x time_bin)
Returns
-------
pcc_dict : dict
"""
pcc_dict = {}
pcc_arr = np.array([])
for ind1, fr1 in enumerate(fr_array):
for ind2, fr2 in enumerate(fr_array):
if ind2 > ind1:
if np.linalg.norm((fr1 - fr1.mean()), ord=1) * np.linalg.norm((fr2 - fr2.mean()), ord=1):
if not np.isnan(np.corrcoef(fr1, fr2)[0, 1]):
pcc_arr = np.append(pcc_arr, np.corrcoef(fr1, fr2)[0, 1]) # get correlation coefficient
pcc_dict['array'] = pcc_arr
pcc_dict['mean'] = round(pcc_arr.mean(), 3)
return pcc_dict
def jitter_spk_ts(spk_ts_list, shuffle_limit, reproducible=True):
"""
Add a random temporal jitter to the spike
Parameters
----------
reproducible : bool
Make the results reproducible by setting the seed as equal to index
"""
spk_ts_jittered_list = []
for ind, spk_ts in enumerate(spk_ts_list):
np.random.seed()
if reproducible: # randomization seed
seed = ind
np.random.seed(seed) # make random jitter reproducible
else:
seed = np.random.randint(len(spk_ts_list), size=1)
np.random.seed(seed) # make random jitter reproducible
nb_spk = spk_ts.shape[0]
jitter = np.random.uniform(-shuffle_limit, shuffle_limit, nb_spk)
spk_ts_jittered_list.append(spk_ts + jitter)
return spk_ts_jittered_list
def pcc_shuffle_test(ClassObject, PethInfo, plot_hist=False, alpha=0.05):
"""
Run statistical test to see if baseline pairwise cross-correlation obtained by spike time shuffling is significant
Parameters
----------
ClassObject : class object (e.g., NoteInfo, MotifInfo)
PethInfo : peth info class object
plot_hist : bool
Plot histogram of bootstrapped pcc values (False by default)
Returns
-------
p_sig : dict
True if the pcc is significantly above the baseline
"""
from ..analysis.parameters import peth_shuffle
from collections import defaultdict
from functools import partial
import scipy.stats as stats
import matplotlib.pyplot as plt
pcc_shuffle = defaultdict(partial(np.ndarray, 0))
for i in range(peth_shuffle['shuffle_iter']):
ClassObject.jitter_spk_ts(peth_shuffle['shuffle_limit'])
pi_shuffle = ClassObject.get_note_peth(shuffle=True) # peth object
pi_shuffle.get_fr() # get firing rates
pi_shuffle.get_pcc() # get pcc
for context, pcc in pi_shuffle.pcc.items():
pcc_shuffle[context] = | np.append(pcc_shuffle[context], pcc['mean']) | numpy.append |
# custom utilies for displaying animation, collecting rollouts and more
import numpy as np
import pong_utils
import torch
import gym
import time
import matplotlib
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from parallelEnv import parallelEnv
import progressbar as pb
# check which device is being used.
# I recommend disabling gpu until you've made sure that the code runs
# device = torch.device("cpu")
device = pong_utils.device
print("using device: ", device)
# render ai gym environment
# PongDeterministic does not contain random frameskip
# so is faster to train than the vanilla Pong-v4 environment
env = gym.make('PongDeterministic-v4')
print("List of available actions: ", env.unwrapped.get_action_meanings())
# we will only use the actions 'RIGHTFIRE' = 4 and 'LEFTFIRE" = 5
# the 'FIRE' part ensures that the game starts again after losing a life
# the actions are hard-coded in pong_utils.py
# PREPROCESSING
# show what a preprocessed image looks like
env.reset()
_, _, _, _ = env.step(0)
# get a frame after 20 steps
for _ in range(20):
frame, _, _, _ = env.step(1)
plt.subplot(1, 2, 1)
plt.imshow(frame)
plt.title('original image')
plt.subplot(1, 2, 2)
plt.title('preprocessed image')
# 80 x 80 black and white image
plt.imshow(pong_utils.preprocess_single(frame), cmap='Greys')
plt.show()
# ########
# POLICY #
# ########
# set up a convolutional neural net
# the output is the probability of moving right
# P(left) = 1-P(right)
class Policy(nn.Module):
def __init__(self):
super(Policy, self).__init__()
# 80x80 to outputsize x outputsize
# outputsize = (inputsize - kernel_size + stride)/stride
# (round up if not an integer)
# 80x80x2 to (80-4+2)/2 = 39
self.conv1 = nn.Conv2d(2, 4, kernel_size=6, stride=2, bias=False)
# 39x39x2 to (39-6+3)/3 = 12
self.conv2 = nn.Conv2d(4, 16, kernel_size=6, stride=4)
# output = 12x12x2 here
self.size = 9*9*16
# 2 fully connected layer
self.fc1 = nn.Linear(self.size, 256)
self.fc2 = nn.Linear(256, 1)
# Sigmoid prob output
self.sig = nn.Sigmoid()
def forward(self, x):
# conv layers
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
# flatten the tensor
x = x.view(-1, self.size)
# fully connected layers
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
return self.sig(x)
# Policy
policy = Policy().to(device)
# PreCoded Policy
print("using device: ", device)
# policy = pong_utils.Policy().to(device)
# we use the adam optimizer with learning rate 2e-4
# optim.SGD is also possible
optimizer = optim.Adam(policy.parameters(), lr=1e-4)
# Visualize
# pong_utils.play(env, policy, time=100)
# Rollout
envs = pong_utils.parallelEnv('PongDeterministic-v4', n=4, seed=12345)
prob, state, action, reward = pong_utils.collect_trajectories(envs, policy, tmax=100)
print(reward)
# Training
def surrogate(policy, old_probs, states, actions, rewards,
discount=0.995, beta=0.01):
discount = discount**np.arange(len(rewards))
rewards = np.asarray(rewards) * discount[:, np.newaxis]
# convert rewards to future rewards
rewards_future = rewards[::-1].cumsum(axis=0)[::-1]
mean = np.mean(rewards_future, axis=1)
std = np.std(rewards_future, axis=1) + 1.0e-10
rewards_normalized = (rewards_future - mean[:, np.newaxis])/std[:, np.newaxis]
# convert everything into pytorch tensors and move to gpu if available
actions = torch.tensor(actions, dtype=torch.int8, device=device)
old_probs = torch.tensor(old_probs, dtype=torch.float, device=device)
rewards = torch.tensor(rewards_normalized, dtype=torch.float, device=device)
# convert states to policy (or probability)
new_probs = pong_utils.states_to_prob(policy, states)
new_probs = torch.where(actions == pong_utils.RIGHT, new_probs, 1.0-new_probs)
ratio = new_probs/old_probs
# include a regularization term
# this steers new_policy towards 0.5
# which prevents policy to become exactly 0 or 1
# this helps with exploration
# add in 1.e-10 to avoid log(0) which gives nan
entropy = -(new_probs * torch.log(old_probs + 1.e-10) +
(1.0 - new_probs) * torch.log(1.0 - old_probs + 1.e-10))
return torch.mean(ratio*rewards + beta*entropy)
Lsur = surrogate(policy, prob, state, action, reward)
print(Lsur)
# #########
# TRAINING
# #########
# WARNING: running through all 800 episodes will take 30-45 minutes
# training loop max iterations
episode = 500
# episode = 800
# widget bar to display progress
widget = ['training loop: ', pb.Percentage(), ' ', pb.Bar(), ' ', pb.ETA()]
timer = pb.ProgressBar(widgets=widget, maxval=episode).start()
# initialize environment
envs = parallelEnv('PongDeterministic-v4', n=4, seed=1234)
discount_rate = .99
beta = .01
tmax = 320
# keep track of progress
mean_rewards = []
for e in range(episode):
# collect trajectories
old_probs, states, actions, rewards = \
pong_utils.collect_trajectories(envs, policy, tmax=tmax)
total_rewards = np.sum(rewards, axis=0)
# this is the SOLUTION!
# use your own surrogate function
L = -surrogate(policy, old_probs, states, actions, rewards, beta=beta)
# L = -pong_utils.surrogate(policy, old_probs, states, actions, rewards, beta=beta)
optimizer.zero_grad()
L.backward()
optimizer.step()
del L
# the regulation term also reduces
# this reduces exploration in later runs
beta *= 0.995
# get the average reward of the parallel environments
mean_rewards.append(np.mean(total_rewards))
# display some progress every 20 iterations
if (e+1) % 20 == 0:
print("Episode: {0:d}, score: {1:f}".format(e+1, np.mean(total_rewards)))
print(total_rewards)
# update progress widget bar
timer.update(e+1)
timer.finish()
# play game after training!
pong_utils.play(env, policy, time=2000)
#
plt.plot(mean_rewards)
plt.show()
# save your policy!
torch.save(policy, 'REINFORCE.policy')
# load your policy if needed
# policy = torch.load('REINFORCE.policy')
# try and test out the solution!
# policy = torch.load('PPO_solution.policy')
# #####################
# PPO
# ######################
# clipped surrogate function
# similar as -policy_loss for REINFORCE, but for PPO
def clipped_surrogate(policy, old_probs, states, actions, rewards,
discount=0.995,
epsilon=0.1, beta=0.01):
discount = discount**np.arange(len(rewards))
rewards = np.asarray(rewards)*discount[:, np.newaxis]
# convert rewards to future rewards
rewards_future = rewards[::-1].cumsum(axis=0)[::-1]
mean = np.mean(rewards_future, axis=1)
std = np.std(rewards_future, axis=1) + 1.0e-10
rewards_normalized = (rewards_future - mean[:, np.newaxis])/std[:, np.newaxis]
# convert everything into pytorch tensors and move to gpu if available
actions = torch.tensor(actions, dtype=torch.int8, device=device)
old_probs = torch.tensor(old_probs, dtype=torch.float, device=device)
rewards = torch.tensor(rewards_normalized, dtype=torch.float, device=device)
# convert states to policy (or probability)
new_probs = pong_utils.states_to_prob(policy, states)
new_probs = torch.where(actions == pong_utils.RIGHT, new_probs, 1.0-new_probs)
# ratio for clipping
ratio = new_probs/old_probs
# clipped function
clip = torch.clamp(ratio, 1-epsilon, 1+epsilon)
clipped_surrogate = torch.min(ratio*rewards, clip*rewards)
# include a regularization term
# this steers new_policy towards 0.5
# add in 1.e-10 to avoid log(0) which gives nan
entropy = -(new_probs * torch.log(old_probs + 1.e-10) + (1.0 - new_probs) * torch.log(1.0 - old_probs + 1.e-10))
# this returns an average of all the entries of the tensor
# effective computing L_sur^clip / T
# averaged over time-step and number of trajectories
# this is desirable because we have normalized our rewards
return torch.mean(clipped_surrogate + beta*entropy)
# ##############
# TRAINING PPO #
# ##############
# training loop max iterations
episode = 500
# widget bar to display progress
widget = ['training loop: ', pb.Percentage(), ' ', pb.Bar(), ' ', pb.ETA()]
timer = pb.ProgressBar(widgets=widget, maxval=episode).start()
# Policy
policy = Policy().to(device)
envs = parallelEnv('PongDeterministic-v4', n=8, seed=1234)
discount_rate = .99
epsilon = 0.1
beta = .01
tmax = 320
SGD_epoch = 4 # !!! How often to sample new experiences
# keep track of progress
mean_rewards = []
for e in range(episode):
# collect trajectories
old_probs, states, actions, rewards = \
pong_utils.collect_trajectories(envs, policy, tmax=tmax)
total_rewards = np.sum(rewards, axis=0)
# gradient ascent step
for _ in range(SGD_epoch):
# uncomment to utilize your own clipped function!
L = -clipped_surrogate(policy, old_probs, states, actions, rewards, epsilon=epsilon, beta=beta)
# L = -pong_utils.clipped_surrogate(policy, old_probs, states, actions, rewards, epsilon=epsilon, beta=beta)
optimizer.zero_grad()
L.backward()
optimizer.step()
del L
# the clipping parameter reduces as time goes on
epsilon *= .999
# the regulation term also reduces
# this reduces exploration in later runs
beta *= .995
# get the average reward of the parallel environments
mean_rewards.append(np.mean(total_rewards))
# display some progress every 20 iterations
if (e+1) % 20 == 0:
print("Episode: {0:d}, score: {1:f}".format(e+1, | np.mean(total_rewards) | numpy.mean |
import numpy as np
import numpy.testing as npt
import nitime.timeseries as ts
import pytest
def test_get_time_unit():
number = 4
npt.assert_equal(ts.get_time_unit(number), None)
list_of_numbers = [4, 5, 6]
npt.assert_equal(ts.get_time_unit(list_of_numbers), None)
for tu in ['ps', 's', 'D']:
time_point = ts.TimeArray([4], time_unit=tu)
npt.assert_equal(ts.get_time_unit(time_point), tu)
list_of_time = [ts.TimeArray(4, time_unit=tu), ts.TimeArray(5, time_unit=tu)]
npt.assert_equal(ts.get_time_unit(list_of_time), tu)
# Go crazy, we don't mind:
list_of_lists = [[ts.TimeArray(4, time_unit=tu),
ts.TimeArray(5, time_unit=tu)],
[ts.TimeArray(4, time_unit=tu),
ts.TimeArray(5, time_unit=tu)]]
npt.assert_equal(ts.get_time_unit(list_of_lists), tu)
time_arr = ts.TimeArray([4, 5], time_unit=tu)
npt.assert_equal(ts.get_time_unit(time_arr), tu)
def test_TimeArray():
time1 = ts.TimeArray(list(range(100)), time_unit='ms')
time2 = time1 + time1
npt.assert_equal(time2.time_unit, 'ms')
time1 = ts.TimeArray(10 ** 6)
npt.assert_equal(time1.__repr__(), '1000000.0 s')
#TimeArray can't be more than 1-d:
with pytest.raises(ValueError) as e_info:
ts.TimeArray(np.zeros((2, 2)))
dt = ts.TimeArray(0.001, time_unit='s')
tt = ts.TimeArray([dt])
npt.assert_equal(dt, tt)
t1 = ts.TimeArray([0, 1, 2, 3])
t2 = ts.TimeArray([ts.TimeArray(0),
ts.TimeArray(1),
ts.TimeArray(2),
ts.TimeArray(3)])
npt.assert_equal(t1, t2)
def test_TimeArray_math():
"Addition and subtraction should convert to TimeArray units"
time1 = ts.TimeArray(list(range(10)), time_unit='ms')
time2 = ts.TimeArray(list(range(1,11)), time_unit='ms')
# units should be converted to whatever units the array has
time3 = time1 + 1
npt.assert_equal(time2,time3)
time4 = time2 - 1
npt.assert_equal(time1,time4)
# floats should also work
time3 = time1 + 1.0
npt.assert_equal(time2,time3)
time4 = time2 - 1.0
npt.assert_equal(time1,time4)
# test the r* versions
time3 = 1 + time1
npt.assert_equal(time2,time3)
time4 = 1 - time2
npt.assert_equal(-time1,time4)
# floats should also work
time3 = 1.0 + time1
npt.assert_equal(time2,time3)
time4 = 1.0 - time2
npt.assert_equal(-time1,time4)
timeunits = ts.TimeArray(list(range(10)), time_unit='s')
timeunits.convert_unit('ms')
# now, math with non-TimeArrays should be based on the new time_unit
# here the range() list gets converted to a TimeArray with the same units
# as timeunits (which is now 'ms')
tnew = timeunits + list(range(10))
npt.assert_equal(tnew, timeunits+time1) # recall that time1 was 0-10ms
def test_TimeArray_comparison():
"Comparison with unitless quantities should convert to TimeArray units"
time = ts.TimeArray(list(range(10)), time_unit='ms')
npt.assert_equal(time < 5 , [True]*5+[False]*5)
npt.assert_equal(time > 5 , [False]*6+[True]*4)
npt.assert_equal(time <= 5, [True]*6+[False]*4)
npt.assert_equal(time >= 5, [False]*5+[True]*5)
npt.assert_equal(time == 5, [False]*5+[True] + [False]*4)
time.convert_unit('s')
# now all of time is < 1 in the new time_unit
npt.assert_equal(time < 5 , [True]*10)
npt.assert_equal(time > 5 , [False]*10)
npt.assert_equal(time <= 5, [True]*10)
npt.assert_equal(time >= 5, [False]*10)
npt.assert_equal(time == 5, [False]*10)
def test_TimeArray_init_int64():
"""Make sure that we can initialize TimeArray with an array of ints"""
time = ts.TimeArray(np.int64(1))
npt.assert_equal(time.__repr__(), '1.0 s')
pass
def test_TimeArray_init_list():
"""Initializing with a list that contains TimeArray should work.
"""
for t in [0.001, ts.TimeArray(0.001, time_unit='s')]:
tl = [t]
ta = ts.TimeArray(t, time_unit='s')
tla = ts.TimeArray(tl, time_unit='s')
npt.assert_(ta, tla)
def test_TimeArray_repr():
"""
>>> a = ts.TimeArray([1.1,2,3])
>>> a
TimeArray([ 1.1, 2. , 3. ], time_unit='s')
>>> t = ts.TimeArray(a,time_unit='ms')
>>> t
TimeArray([ 1100., 2000., 3000.], time_unit='ms')
>>> t[0]
1100.0 ms
"""
def test_TimeArray_copyflag():
"""Testing the setting of the copy-flag, where that makes sense"""
#These two should both generate a TimeArray, with one picosecond.
#This one holds time_unit='s'
t1 = ts.TimeArray(np.array([1], dtype=np.int64), copy=False)
#This one holds time_unit='ps':
t2 = ts.TimeArray(1, time_unit='ps')
t3 = ts.TimeArray(t2, copy=False)
npt.assert_equal(t1, t2)
| npt.assert_equal(t2.ctypes.data, t3.ctypes.data) | numpy.testing.assert_equal |
"""
This is a set of unit tests in the presence of correlated noise.
That noise was generated by convolving uncorrelated noise with the
dirty beam of a certain VLA observation. 3969 identical extended
sources on a regular 63*63 grid were convolved with the clean beam.
The accuracy of the deconvolution algorithm, i.e., the deconvolution
of the fitted parameters from the clean beam. is tested.
It also tests the accuracy of the peak flux measurements.
Bias up to 5 sigma is allowed.
Remember that oversampling of the synthesized beam will likely reduce bias.
Accuracy tests for integrated fluxes and positions will be added
later, as well as tests for the kappa*sigma clipper and the
deblending algorithm.
"""
import os
import numpy as np
import unittest
import tkp.accessors
from tkp.sourcefinder import image
from tkp.testutil.data import DATAPATH
from tkp.testutil.decorators import requires_data, duration
MAX_BIAS = 5.0
NUMBER_INSERTED = 3969
TRUE_PEAK_FLUX = 1063.67945065
TRUE_DECONV_SMAJ = 2.*5.5956/2.
TRUE_DECONV_SMIN = 0.5*4.6794/2.
TRUE_DECONV_BPA = -0.5*(-49.8)
# These are measured from the file CORRELATED_NOISE.FITS.
# BG_MEAN = numpy.mean(sourcefinder_image_from_accessor(FitsFile("CORRELATED_NOISE.FITS")).data)
BG_MEAN = -0.0072340798975137829
# BG_STD = numpy.std(sourcefinder_image_from_accessor(FitsFile("CORRELATED_NOISE.FITS")).data)
BG_STD = 5.3480336747739079
class SourceParameters(unittest.TestCase):
def setUp(self):
fitsfile = tkp.accessors.open(os.path.join(DATAPATH,
'sourcefinder/simulations/deconvolved.fits'))
img = image.ImageData(fitsfile.data, fitsfile.beam,
fitsfile.wcs)
# This is quite subtle. We bypass any possible flaws in the
# kappa, sigma clipping algorithm by supplying a background
# level and noise map. In this way we make sure that any
# possible biases in the measured source parameters cannot
# come from biases in the background level. The peak fluxes,
# in particular, can be biased low if the background levels
# are biased high. The background and noise levels supplied
# here are the true values.
extraction_results = img.extract(
det=10.0, anl=6.0,
noisemap=np.ma.array(BG_STD*np.ones((2048, 2048))),
bgmap=np.ma.array(BG_MEAN*np.ones((2048, 2048))))
self.number_sources = len(extraction_results)
peak_fluxes = []
deconv_smajaxes = []
deconv_sminaxes = []
deconv_bpas = []
for sources in extraction_results:
peak_fluxes.append([sources.peak.value, sources.peak.error])
deconv_smajaxes.append([sources.smaj_dc.value,
sources.smaj_dc.error])
deconv_sminaxes.append([sources.smin_dc.value,
sources.smin_dc.error])
deconv_bpas.append([sources.theta_dc.value,
sources.theta_dc.error])
self.peak_fluxes = np.array(peak_fluxes)
self.deconv_smajaxes = np.array(deconv_smajaxes)
self.deconv_sminaxes = np.array(deconv_sminaxes)
self.deconv_bpas = np.array(deconv_bpas)
@duration(100)
@requires_data(os.path.join(DATAPATH,
'sourcefinder/simulations/deconvolved.fits'))
def testAllParameters(self):
# Test all deconvolved
self.assertEqual(
np.where(np.isnan(self.deconv_smajaxes), 1, 0).sum(), 0)
self.assertEqual(
np.where(np.isnan(self.deconv_sminaxes), 1, 0).sum(), 0)
self.assertEqual(
np.where(np.isnan(self.deconv_bpas), 1, 0).sum(), 0)
# Test number of sources
self.assertEqual(self.number_sources, NUMBER_INSERTED)
# Test peak fluxes
peak_weights = 1./self.peak_fluxes[:,1]**2
sum_peak_weights = | np.sum(peak_weights) | numpy.sum |
import itertools
import os
from copy import deepcopy, copy
from multiprocessing.pool import Pool
from pickle import PicklingError
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import src.cosmo_misc
from src import gzsl_search_configurations as configurations
from src.cosmo_misc import crossval_and_plot_sweep_results, get_predictions_of_best_cfg
from sklearn.metrics import auc
from src.utils import ml_utils
from src.utils.ml_utils import pfloor, pceil
from src.metrics import ZSL_Metrics
def GZSL_experiment(data, expert_preds, gating_model, mixture_type='adaptive_smoothing',
hard_gating=False, num_resample_points=100, num_pool_workers=12,
fs_results=None, dirname=None, show_plot=True):
# gating module predictions
# Note: Set to None to allow models that don't use a gating mechanism (e.g. CalibratedStacking)
pred_gating_GZSLval, pred_gating_GZSLtest, g_name = None, None, None
if gating_model is not None:
pred_gating_GZSLval = gating_model.pred_GZSLval
pred_gating_GZSLtest = gating_model.pred_GZSLtest
g_name = gating_model.name
cfg = configurations.get(data['dataset_name'], mixture_type, g_name)
mixture_model = mixture_factory[mixture_type]
fig_list = []
### Val set
pred_ZS = expert_preds['ZS__GZSLval']
pred_S = expert_preds['S__GZSLval']
pred_gating = pred_gating_GZSLval
Y_GZSLval = data['Y_GZSLval']
current_model_GZSLval = mixture_model(data['seen_classes_val'],
data['unseen_classes_val'],
pred_ZS=pred_ZS,
pred_S=pred_S,
pred_gating=pred_gating,
gating_model_name=g_name,
hard_gating=hard_gating,
mixture_type=mixture_type)
print('Experiment name: ', current_model_GZSLval.name)
### Test set
current_model_GZSLtest = mixture_model(data['seen_classes_test'],
data['unseen_classes_test'],
pred_ZS=expert_preds['ZS__GZSLtest'],
pred_S=expert_preds['S__GZSLtest'],
pred_gating=pred_gating_GZSLtest,
gating_model_name=g_name,
hard_gating=hard_gating,
mixture_type=mixture_type)
threshold_name = list(cfg['anomaly_detector_threshold'].keys())[0]
# if search on 2 or more hyper params: then we start with a coarse set on all hyper params
# and set the range for making a fine search on the threshold hyper param
if len(list(itertools.product(*cfg['hyper_params'].values()))) >= 2:
print('Starting with coarse hyper param search')
# print('cfg = ', cfg)
complete_cfg = cfg['hyper_params'].copy()
complete_cfg.update(cfg['anomaly_detector_threshold'])
_ = current_model_GZSLval.sweep_variables(Y=Y_GZSLval,
num_pool_workers=num_pool_workers,
**complete_cfg)
best_cfg = current_model_GZSLval.df_sweep_results.loc[
current_model_GZSLval.df_sweep_results.Acc_H.idxmax(), :]
print(f"Best (coarse) hyper-param configuration is:\n"
f"{best_cfg.loc[complete_cfg.keys()]}")
print(f"Acc_H = {best_cfg.loc['Acc_H']}")
# Setting the params for finer threshold search
best_cfg_params = best_cfg.loc[cfg['hyper_params'].keys()].to_dict()
df_GZSLval_sweep_best = current_model_GZSLval.df_sweep_results.query(
' and '.join([f'{k}=={v}' for k,v in best_cfg_params.items()]))
th_range_resampled = src.cosmo_misc.resample_sweepkey_by_curve(
df_GZSLval_sweep_best, threshold_name, num_resample_points,num_resample_points)
best_complete_cfg = deepcopy(best_cfg_params)
best_complete_cfg[threshold_name] = th_range_resampled
else:
# only 1 hyper param
best_cfg_params = deepcopy(cfg['hyper_params'])
best_complete_cfg = deepcopy(best_cfg_params)
best_complete_cfg.update(cfg['anomaly_detector_threshold'])
best_best_complete_cfg_as_lists = configurations.cast_to_lists(best_complete_cfg)
print('Fine search over the threshold parameter:')
_ = current_model_GZSLval.sweep_variables(Y_GZSLval, num_pool_workers=num_pool_workers,
**best_best_complete_cfg_as_lists)
# Sweep the threshold over all test models, in order to eval AUSUC and generate figures.
# Note: For computing Acc_H, we will use the best model selected on GZSLval (above).
# The selection is performed in performed in process_and_plot_sweep_results() method
_ = current_model_GZSLtest.sweep_variables(data['Y_GZSLtest'],
num_pool_workers=num_pool_workers,
**best_best_complete_cfg_as_lists)
df_res_test, df_res_val = \
crossval_and_plot_sweep_results(threshold_name, data, expert_preds,
current_model_GZSLval, current_model_GZSLtest, fig_list,
fs_results, best_complete_cfg, dirname)
if show_plot:
plt.draw(); plt.pause(0.001) # source https://stackoverflow.com/a/33050617
else:
plt.close()
print('Test performance:', df_res_test.iloc[0, :])
activations_best = {}
if not mixture_type in ['calibrated_stacking']:
activations_val, activations_test, _, _ = get_predictions_of_best_cfg(best_complete_cfg, Y_GZSLval,
data['Y_GZSLtest'], current_model_GZSLval, current_model_GZSLtest)
activations_best[current_model_GZSLtest.name] = dict(val=activations_val,
test=activations_test)
return df_res_test, df_res_val, activations_best
class CombinerGZSL(object): # a.k.a. mixture
"""
This is a parent class for different approached for combining S (seen) & ZS (unseen) experts decisions,
using a gating model
"""
def __init__(self, seen_classes, unseen_classes, pred_ZS, pred_S,
pred_gating, gating_model_name,
mixture_type, hard_gating=False):
self._seen_classes = seen_classes
self._unseen_classes = unseen_classes
self.__pred_ZS = pred_ZS
self.__pred_S = pred_S
self._pred_gating = pred_gating
self._gating_model_name = gating_model_name
self._hard_gating = hard_gating
self._combiner = mixture_type
self.set_name()
self.save_activations = False
self.activations = {}
self.df_sweep_results = None
def set_name(self):
gating_type = 'Soft-Gating'
if self._hard_gating:
gating_type = 'Hard-Gating'
self.name = f'combiner={self._combiner}|' \
f'gater={self._gating_model_name}|{gating_type}'
@property
def pred_ZS(self):
""" To make sure experiments are independent, access to pred_ZS is only through a copy """
return copy(self.__pred_ZS)
@property
def pred_S(self):
""" To make sure experiments are independent, access to pred_S is only through a copy """
return copy(self.__pred_S)
####################################################
""" API to implement by child class """
def _predict(self, X):
""" Needs to update self.pred_ZS and self.pred_S (if applicable)
"""
raise NotImplementedError()
def combine(self, **kwargs):
raise NotImplementedError()
####################################################
@staticmethod
def single_iter_of_sweep_for_parallel_pool(params):
# (ugly) adaptation of parallel pool API for multiple variable
self, current_params_values, hp_keys, Y, zs_metrics = params
current_params = dict(zip(hp_keys, current_params_values))
# Combine expert predictions, using current iteration hyper-params, to generate new combined prediction
pred = self.combine(**current_params)
# Evaluate GZSL metrics of combined model
metric_results = {}
metric_results['Acc_ts'], metric_results['Acc_tr'], metric_results[
'Acc_H'] = zs_metrics.generlized_scores(Y, pred)
metric_results.update(current_params)
return metric_results
def reset_df_results(self, hp_keys):
self.df_sweep_results = pd.DataFrame(
columns=list(hp_keys) + 'Acc_ts,Acc_tr,Acc_H'.split(','))
def sweep_variables(self, Y, num_pool_workers=4,
**hyper_params_ranges): # Dict[str, Union[List, np.array]]
# num_pool_workers allows parallel execution
# Sweep over an outer-product (grid search) of hyper-params ranges
hp_keys, hp_ranges = zip(*hyper_params_ranges.items())
self.reset_df_results(hp_keys)
all_params = list(itertools.product(*hp_ranges))
new_params_list = all_params.copy()
zs_metrics = ZSL_Metrics(self._seen_classes, self._unseen_classes)
results_list = []
if new_params_list:
if num_pool_workers>1:
""" Parallel execution of model evaluations with different hyper-params """
try:
with Pool(num_pool_workers) as po: # This ensures that the processes get closed once they are done
pool_results = po.map(self.single_iter_of_sweep_for_parallel_pool,
((self, current_params_values, hp_keys, Y,
zs_metrics#, progress_bar
) for current_params_values in new_params_list))
results_list = pool_results
except PicklingError:
print('Warning: Can''t execute in parallel due to PicklingError. '
'Common solution is to rerun the call that initialize this '
'class instance.')
num_pool_workers=1
if num_pool_workers == 1:
""" model evaluations with different hyper-params using a serial for loop """
for current_params_values in new_params_list:
res = self.single_iter_of_sweep_for_parallel_pool((
self, current_params_values, hp_keys, Y, zs_metrics))
results_list.append(res)
# aggregate results to a DataFrame
for k, current_params_values in enumerate(all_params):
currect_results = results_list[k]
self.df_sweep_results = self.df_sweep_results.append(currect_results,
ignore_index=True)
return self.df_sweep_results
def plot_tstr_curve_cvpr(self, df_sweep_results=None,
x_name='Acc_tr', y_name='Acc_ts',
xlim=None, ylim=None, ax=None, is_first=False, color=None):
if df_sweep_results is None:
df_sweep_results = self.df_sweep_results
if ax is None:
ax = plt.gca()
X = 100*df_sweep_results.loc[:, x_name]
Y = 100*df_sweep_results.loc[:, y_name]
if xlim is None:
xlim = (pfloor(X.min(), 1), pceil(X.max(), 1))
if ylim is None:
ylim = (pfloor(Y.min(), 1), pceil(Y.max(), 1))
ax.plot(X, Y, 'o', linewidth=5, color=color)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
if is_first:
plt.xlabel(x_name)
plt.ylabel(y_name)
# Hide the right and top spines
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
def AUSUC(self, df_sweep_results=None,
x_name='Acc_tr', y_name='Acc_ts'):
""" Calculate Area-Under-Seen-Unseen-Curve (AUSUC) metric given sweep results. """
if df_sweep_results is None:
df_sweep_results = self.df_sweep_results
X = df_sweep_results.loc[:, x_name]
Y = df_sweep_results.loc[:, y_name]
# Calc area under curve
X_sorted_arg = np.argsort(X)
sorted_X = np.array(X)[X_sorted_arg]
sorted_Y = np.array(Y)[X_sorted_arg]
leftmost_X, leftmost_Y = 0, sorted_Y[0]
rightmost_X, rightmost_Y = sorted_X[-1], 0
sorted_X = np.block([np.array([leftmost_X]), sorted_X, np.array([rightmost_X])])
sorted_Y = np.block([np.array([leftmost_Y]), sorted_Y, | np.array([rightmost_Y]) | numpy.array |
# License: MIT
import os
import abc
import numpy as np
from openbox.utils.util_funcs import check_random_state
from openbox.utils.logging_utils import get_logger
from openbox.utils.history_container import HistoryContainer, MOHistoryContainer, \
MultiStartHistoryContainer
from openbox.utils.constants import MAXINT, SUCCESS
from openbox.utils.samplers import SobolSampler, LatinHypercubeSampler
from openbox.utils.multi_objective import get_chebyshev_scalarization, NondominatedPartitioning
from openbox.utils.config_space.util import convert_configurations_to_array
from openbox.core.base import build_acq_func, build_optimizer, build_surrogate
from openbox.core.base import Observation
class Advisor(object, metaclass=abc.ABCMeta):
"""
Basic Advisor Class, which adopts a policy to sample a configuration.
"""
def __init__(self, config_space,
num_objs=1,
num_constraints=0,
initial_trials=3,
initial_configurations=None,
init_strategy='random_explore_first',
history_bo_data=None,
rand_prob=0.1,
optimization_strategy='bo',
surrogate_type='auto',
acq_type='auto',
acq_optimizer_type='auto',
ref_point=None,
output_dir='logs',
task_id='default_task_id',
random_state=None,
**kwargs):
# Create output (logging) directory.
# Init logging module.
# Random seed generator.
self.num_objs = num_objs
self.num_constraints = num_constraints
self.init_strategy = init_strategy
self.output_dir = output_dir
self.task_id = task_id
self.rng = check_random_state(random_state)
self.logger = get_logger(self.__class__.__name__)
# Basic components in Advisor.
self.rand_prob = rand_prob
self.optimization_strategy = optimization_strategy
# Init the basic ingredients in Bayesian optimization.
self.history_bo_data = history_bo_data
self.surrogate_type = surrogate_type
self.constraint_surrogate_type = None
self.acq_type = acq_type
self.acq_optimizer_type = acq_optimizer_type
self.init_num = initial_trials
self.config_space = config_space
self.config_space_seed = self.rng.randint(MAXINT)
self.config_space.seed(self.config_space_seed)
self.ref_point = ref_point
# init history container
if self.num_objs == 1:
self.history_container = HistoryContainer(task_id, self.num_constraints, config_space=self.config_space)
else: # multi-objectives
self.history_container = MOHistoryContainer(task_id, self.num_objs, self.num_constraints, ref_point)
# initial design
if initial_configurations is not None and len(initial_configurations) > 0:
self.initial_configurations = initial_configurations
self.init_num = len(initial_configurations)
else:
self.initial_configurations = self.create_initial_design(self.init_strategy)
self.init_num = len(self.initial_configurations)
self.surrogate_model = None
self.constraint_models = None
self.acquisition_function = None
self.optimizer = None
self.auto_alter_model = False
self.algo_auto_selection()
self.check_setup()
self.setup_bo_basics()
def algo_auto_selection(self):
from ConfigSpace import UniformFloatHyperparameter, UniformIntegerHyperparameter, \
CategoricalHyperparameter, OrdinalHyperparameter
# analyze config space
cont_types = (UniformFloatHyperparameter, UniformIntegerHyperparameter)
cat_types = (CategoricalHyperparameter, OrdinalHyperparameter)
n_cont_hp, n_cat_hp, n_other_hp = 0, 0, 0
for hp in self.config_space.get_hyperparameters():
if isinstance(hp, cont_types):
n_cont_hp += 1
elif isinstance(hp, cat_types):
n_cat_hp += 1
else:
n_other_hp += 1
n_total_hp = n_cont_hp + n_cat_hp + n_other_hp
info_str = ''
if self.surrogate_type == 'auto':
self.auto_alter_model = True
if n_total_hp >= 100:
self.optimization_strategy = 'random'
self.surrogate_type = 'prf' # for setup procedure
elif n_total_hp >= 10:
self.surrogate_type = 'prf'
elif n_cat_hp > n_cont_hp:
self.surrogate_type = 'prf'
else:
self.surrogate_type = 'gp'
info_str += ' surrogate_type: %s.' % self.surrogate_type
if self.acq_type == 'auto':
if self.num_objs == 1: # single objective
if self.num_constraints == 0:
self.acq_type = 'ei'
else: # with constraints
self.acq_type = 'eic'
elif self.num_objs <= 4: # multi objective (<=4)
if self.num_constraints == 0:
self.acq_type = 'ehvi'
else: # with constraints
self.acq_type = 'ehvic'
else: # multi objective (>4)
if self.num_constraints == 0:
self.acq_type = 'mesmo'
else: # with constraints
self.acq_type = 'mesmoc'
self.surrogate_type = 'gp_rbf'
info_str = ' surrogate_type: %s.' % self.surrogate_type
info_str += ' acq_type: %s.' % self.acq_type
if self.acq_optimizer_type == 'auto':
if n_cat_hp + n_other_hp == 0: # todo: support constant hp in scipy optimizer
self.acq_optimizer_type = 'random_scipy'
else:
self.acq_optimizer_type = 'local_random'
info_str += ' acq_optimizer_type: %s.' % self.acq_optimizer_type
if info_str != '':
info_str = '=== [BO auto selection] ===' + info_str
self.logger.info(info_str)
def alter_model(self, history_container):
if not self.auto_alter_model:
return
num_config_evaluated = len(history_container.configurations)
num_config_successful = len(history_container.successful_perfs)
if num_config_evaluated == 300:
if self.surrogate_type == 'gp':
self.surrogate_type = 'prf'
self.logger.info('n_observations=300, change surrogate model from GP to PRF!')
if self.acq_optimizer_type == 'random_scipy':
self.acq_optimizer_type = 'local_random'
self.logger.info('n_observations=300, change acq optimizer from random_scipy to local_random!')
self.setup_bo_basics()
def check_setup(self):
"""
Check optimization_strategy, num_objs, num_constraints, acq_type, surrogate_type.
Returns
-------
None
"""
assert self.optimization_strategy in ['bo', 'random']
assert isinstance(self.num_objs, int) and self.num_objs >= 1
assert isinstance(self.num_constraints, int) and self.num_constraints >= 0
# single objective
if self.num_objs == 1:
if self.num_constraints == 0:
assert self.acq_type in ['ei', 'eips', 'logei', 'pi', 'lcb', 'lpei', ]
else: # with constraints
assert self.acq_type in ['eic', ]
if self.constraint_surrogate_type is None:
self.constraint_surrogate_type = 'gp'
# multi-objective
else:
if self.num_constraints == 0:
assert self.acq_type in ['ehvi', 'mesmo', 'usemo', 'parego']
if self.acq_type == 'mesmo' and self.surrogate_type != 'gp_rbf':
self.surrogate_type = 'gp_rbf'
self.logger.warning('Surrogate model has changed to Gaussian Process with RBF kernel '
'since MESMO is used. Surrogate_type should be set to \'gp_rbf\'.')
else: # with constraints
assert self.acq_type in ['ehvic', 'mesmoc', 'mesmoc2']
if self.constraint_surrogate_type is None:
if self.acq_type == 'mesmoc':
self.constraint_surrogate_type = 'gp_rbf'
else:
self.constraint_surrogate_type = 'gp'
if self.acq_type == 'mesmoc' and self.surrogate_type != 'gp_rbf':
self.surrogate_type = 'gp_rbf'
self.logger.warning('Surrogate model has changed to Gaussian Process with RBF kernel '
'since MESMOC is used. Surrogate_type should be set to \'gp_rbf\'.')
if self.acq_type == 'mesmoc' and self.constraint_surrogate_type != 'gp_rbf':
self.surrogate_type = 'gp_rbf'
self.logger.warning('Constraint surrogate model has changed to Gaussian Process with RBF kernel '
'since MESMOC is used. Surrogate_type should be set to \'gp_rbf\'.')
# Check reference point is provided for EHVI methods
if 'ehvi' in self.acq_type and self.ref_point is None:
raise ValueError('Must provide reference point to use EHVI method!')
def setup_bo_basics(self):
"""
Prepare the basic BO components.
Returns
-------
An optimizer object.
"""
if self.num_objs == 1 or self.acq_type == 'parego':
self.surrogate_model = build_surrogate(func_str=self.surrogate_type,
config_space=self.config_space,
rng=self.rng,
history_hpo_data=self.history_bo_data)
else: # multi-objectives
self.surrogate_model = [build_surrogate(func_str=self.surrogate_type,
config_space=self.config_space,
rng=self.rng,
history_hpo_data=self.history_bo_data)
for _ in range(self.num_objs)]
if self.num_constraints > 0:
self.constraint_models = [build_surrogate(func_str=self.constraint_surrogate_type,
config_space=self.config_space,
rng=self.rng) for _ in range(self.num_constraints)]
if self.acq_type in ['mesmo', 'mesmoc', 'mesmoc2', 'usemo']:
self.acquisition_function = build_acq_func(func_str=self.acq_type,
model=self.surrogate_model,
constraint_models=self.constraint_models,
config_space=self.config_space)
else:
self.acquisition_function = build_acq_func(func_str=self.acq_type,
model=self.surrogate_model,
constraint_models=self.constraint_models,
ref_point=self.ref_point)
if self.acq_type == 'usemo':
self.acq_optimizer_type = 'usemo_optimizer'
self.optimizer = build_optimizer(func_str=self.acq_optimizer_type,
acq_func=self.acquisition_function,
config_space=self.config_space,
rng=self.rng)
def create_initial_design(self, init_strategy='default'):
"""
Create several configurations as initial design.
Parameters
----------
init_strategy: str
Returns
-------
Initial configurations.
"""
default_config = self.config_space.get_default_configuration()
num_random_config = self.init_num - 1
if init_strategy == 'random':
initial_configs = self.sample_random_configs(self.init_num)
return initial_configs
elif init_strategy == 'default':
initial_configs = [default_config] + self.sample_random_configs(num_random_config)
return initial_configs
elif init_strategy == 'random_explore_first':
candidate_configs = self.sample_random_configs(100)
return self.max_min_distance(default_config, candidate_configs, num_random_config)
elif init_strategy == 'sobol':
sobol = SobolSampler(self.config_space, num_random_config, random_state=self.rng)
initial_configs = [default_config] + sobol.generate(return_config=True)
return initial_configs
elif init_strategy == 'latin_hypercube':
lhs = LatinHypercubeSampler(self.config_space, num_random_config, criterion='maximin')
initial_configs = [default_config] + lhs.generate(return_config=True)
return initial_configs
else:
raise ValueError('Unknown initial design strategy: %s.' % init_strategy)
def max_min_distance(self, default_config, src_configs, num):
min_dis = list()
initial_configs = list()
initial_configs.append(default_config)
for config in src_configs:
dis = np.linalg.norm(config.get_array() - default_config.get_array())
min_dis.append(dis)
min_dis = np.array(min_dis)
for i in range(num):
furthest_config = src_configs[np.argmax(min_dis)]
initial_configs.append(furthest_config)
min_dis[np.argmax(min_dis)] = -1
for j in range(len(src_configs)):
if src_configs[j] in initial_configs:
continue
updated_dis = np.linalg.norm(src_configs[j].get_array() - furthest_config.get_array())
min_dis[j] = min(updated_dis, min_dis[j])
return initial_configs
def get_suggestion(self, history_container=None, return_list=False):
"""
Generate a configuration (suggestion) for this query.
Returns
-------
A configuration.
"""
if history_container is None:
history_container = self.history_container
self.alter_model(history_container)
num_config_evaluated = len(history_container.configurations)
num_config_successful = len(history_container.successful_perfs)
if num_config_evaluated < self.init_num:
return self.initial_configurations[num_config_evaluated]
if self.optimization_strategy == 'random':
return self.sample_random_configs(1, history_container)[0]
if (not return_list) and self.rng.random() < self.rand_prob:
self.logger.info('Sample random config. rand_prob=%f.' % self.rand_prob)
return self.sample_random_configs(1, history_container)[0]
X = convert_configurations_to_array(history_container.configurations)
Y = history_container.get_transformed_perfs(transform=None)
cY = history_container.get_transformed_constraint_perfs(transform='bilog')
if self.optimization_strategy == 'bo':
if num_config_successful < max(self.init_num, 1):
self.logger.warning('No enough successful initial trials! Sample random configuration.')
return self.sample_random_configs(1, history_container)[0]
# train surrogate model
if self.num_objs == 1:
self.surrogate_model.train(X, Y)
elif self.acq_type == 'parego':
weights = self.rng.random_sample(self.num_objs)
weights = weights / np.sum(weights)
scalarized_obj = get_chebyshev_scalarization(weights, Y)
self.surrogate_model.train(X, scalarized_obj(Y))
else: # multi-objectives
for i in range(self.num_objs):
self.surrogate_model[i].train(X, Y[:, i])
# train constraint model
for i in range(self.num_constraints):
self.constraint_models[i].train(X, cY[:, i])
# update acquisition function
if self.num_objs == 1:
incumbent_value = history_container.get_incumbents()[0][1]
self.acquisition_function.update(model=self.surrogate_model,
constraint_models=self.constraint_models,
eta=incumbent_value,
num_data=num_config_evaluated)
else: # multi-objectives
mo_incumbent_value = history_container.get_mo_incumbent_value()
if self.acq_type == 'parego':
self.acquisition_function.update(model=self.surrogate_model,
constraint_models=self.constraint_models,
eta=scalarized_obj( | np.atleast_2d(mo_incumbent_value) | numpy.atleast_2d |
"""
@author: <NAME> <<EMAIL>>
"""
import os
import glob
import argparse
import pickle
import cv2
import numpy as np
from src.utils import *
from src.yolo_net import Yolo
CLASSES = ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow',
'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train',
'tvmonitor']
def get_args():
parser = argparse.ArgumentParser("You Only Look Once: Unified, Real-Time Object Detection")
parser.add_argument("--image_size", type=int, default=448, help="The common width and height for all images")
parser.add_argument("--conf_threshold", type=float, default=0.35)
parser.add_argument("--nms_threshold", type=float, default=0.5)
parser.add_argument("--pre_trained_model_type", type=str, choices=["model", "params"], default="params")
parser.add_argument("--pre_trained_model_path", type=str, default="trained_models/only_params_trained_yolo_voc")
parser.add_argument("--input", type=str, default="test_images")
parser.add_argument("--output", type=str, default="test_images")
args = parser.parse_args()
return args
def test(opt):
if torch.cuda.is_available():
if opt.pre_trained_model_type == "model":
model = torch.load(opt.pre_trained_model_path)
else:
model = Yolo(20)
model.load_state_dict(torch.load(opt.pre_trained_model_path))
else:
if opt.pre_trained_model_type == "model":
model = torch.load(opt.pre_trained_model_path, map_location=lambda storage, loc: storage)
else:
model = Yolo(20)
model.load_state_dict(torch.load(opt.pre_trained_model_path, map_location=lambda storage, loc: storage))
model.eval()
if torch.cuda.is_available():
model.cuda()
colors = pickle.load(open("src/pallete", "rb"))
for image_path in glob.iglob(opt.input + os.sep + '*.jpg'):
if "prediction" in image_path:
continue
image = cv2.imread(image_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
height, width = image.shape[:2]
image = cv2.resize(image, (opt.image_size, opt.image_size))
image = np.transpose( | np.array(image, dtype=np.float32) | numpy.array |
from __future__ import division, print_function
import glob
import numpy as np
from scipy import interpolate as interp
from scipy.ndimage import filters as filter
try:
from enterprise.pulsar import Pulsar
ent_present = True
except ImportError:
ent_present = False
fyr = 1./31536000.
# from Kristina
def getMax2d(samples1, samples2, weights=None, smooth=True, bins=[40, 40],
x_range=None, y_range=None, logx=False, logy=False, logz=False):
""" Function to return the maximum likelihood values by interpolating over
a two dimensional histogram made of two sets of samples.
Parameters
----------
samples1, samples2 : array or list
Arrays or lists from which to find two dimensional maximum likelihood
values.
weights : array of floats
Weights to use in histogram.
bins : list of ints
List of 2 integers which dictates number of bins for samples1 and
samples2.
x_range : tuple, optional
Range of samples1
y_range : tuple, optional
Range of samples2
logx : bool, optional
A value of True use log10 scale for samples1.
logy : bool, optional
A value of True use log10 scale for samples2.
logz : bool, optional
A value of True indicates that the z axis is in log10.
"""
if x_range is None:
xmin = np.amin(samples1)
xmax = np.amax(samples1)
else:
xmin = x_range[0]
xmax = x_range[1]
if y_range is None:
ymin = | np.amin(samples2) | numpy.amin |
"""demo: train a DND LSTM on a contextual choice task
"""
import time
import torch
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from task import ContextualChoice
from model import DNDLSTM as Agent
from utils import compute_stats, to_sqnp
from model.DND import compute_similarities
from model.utils import get_reward, compute_returns, compute_a2c_loss
sns.set(style='white', context='talk', palette='colorblind')
seed_val = 0
torch.manual_seed(seed_val)
np.random.seed(seed_val)
'''init task'''
n_unique_example = 50
n_trials = 2 * n_unique_example
# n time steps of a trial
trial_length = 10
# after `tp_corrupt`, turn off the noise
t_noise_off = 5
# input/output/hidden/memory dim
obs_dim = 32
task = ContextualChoice(
obs_dim=obs_dim, trial_length=trial_length,
t_noise_off=t_noise_off
)
'''init model'''
# set params
dim_hidden = 32
dim_output = 2
dict_len = 100
learning_rate = 5e-4
n_epochs = 20
# init agent / optimizer
agent = Agent(task.x_dim, dim_hidden, dim_output, dict_len)
optimizer = torch.optim.Adam(agent.parameters(), lr=learning_rate)
'''train'''
log_return = np.zeros(n_epochs,)
log_loss_value = np.zeros(n_epochs,)
log_loss_policy = np.zeros(n_epochs,)
log_Y = np.zeros((n_epochs, n_trials, trial_length))
log_Y_hat = np.zeros((n_epochs, n_trials, trial_length))
# loop over epoch
for i in range(n_epochs):
time_start = time.time()
# get data for this epoch
X, Y = task.sample(n_unique_example)
# flush hippocampus
agent.reset_memory()
agent.turn_on_retrieval()
# loop over the training set
for m in range(n_trials):
# prealloc
cumulative_reward = 0
probs, rewards, values = [], [], []
h_t, c_t = agent.get_init_states()
# loop over time, for one training example
for t in range(trial_length):
# only save memory at the last time point
agent.turn_off_encoding()
if t == trial_length-1 and m < n_unique_example:
agent.turn_on_encoding()
# recurrent computation at time t
output_t, _ = agent(X[m][t].view(1, 1, -1), h_t, c_t)
a_t, prob_a_t, v_t, h_t, c_t = output_t
# compute immediate reward
r_t = get_reward(a_t, Y[m][t])
# log
probs.append(prob_a_t)
rewards.append(r_t)
values.append(v_t)
# log
cumulative_reward += r_t
log_Y_hat[i, m, t] = a_t.item()
returns = compute_returns(rewards)
loss_policy, loss_value = compute_a2c_loss(probs, values, returns)
loss = loss_policy + loss_value
optimizer.zero_grad()
loss.backward()
optimizer.step()
# log
log_Y[i] = np.squeeze(Y.numpy())
log_return[i] += cumulative_reward / n_trials
log_loss_value[i] += loss_value.item() / n_trials
log_loss_policy[i] += loss_policy.item() / n_trials
# print out some stuff
time_end = time.time()
run_time = time_end - time_start
print(
'Epoch %3d | return = %.2f | loss: val = %.2f, pol = %.2f | time = %.2f' %
(i, log_return[i], log_loss_value[i], log_loss_policy[i], run_time)
)
'''learning curve'''
f, axes = plt.subplots(1, 2, figsize=(8, 3))
axes[0].plot(log_return)
axes[0].set_ylabel('Return')
axes[0].set_xlabel('Epoch')
axes[1].plot(log_loss_value)
axes[1].set_ylabel('Value loss')
axes[1].set_xlabel('Epoch')
sns.despine()
f.tight_layout()
'''show behavior'''
corrects = log_Y_hat[-1] == log_Y[-1]
acc_mu_no_memory, acc_se_no_memory = compute_stats(
corrects[:n_unique_example])
acc_mu_has_memory, acc_se_has_memory = compute_stats(
corrects[n_unique_example:])
n_se = 2
f, ax = plt.subplots(1, 1, figsize=(7, 4))
ax.errorbar(range(trial_length), y=acc_mu_no_memory,
yerr=acc_se_no_memory * n_se, label='w/o memory')
ax.errorbar(range(trial_length), y=acc_mu_has_memory,
yerr=acc_se_has_memory * n_se, label='w/ memory')
ax.axvline(t_noise_off, label='turn off noise', color='grey', linestyle='--')
ax.set_xlabel('Time')
ax.set_ylabel('Correct rate')
ax.set_title('Choice accuracy by condition')
f.legend(frameon=False, bbox_to_anchor=(1, .6))
sns.despine()
f.tight_layout()
# f.savefig('../figs/correct-rate.png', dpi=100, bbox_inches='tight')
'''visualize keys and values'''
keys, vals = agent.get_all_mems()
n_mems = len(agent.dnd.keys)
dmat_kk, dmat_vv = np.zeros((n_mems, n_mems)), np.zeros((n_mems, n_mems))
for i in range(n_mems):
dmat_kk[i, :] = to_sqnp(compute_similarities(
keys[i], keys, agent.dnd.kernel))
dmat_vv[i, :] = to_sqnp(compute_similarities(
vals[i], vals, agent.dnd.kernel))
# plot
dmats = {'key': dmat_kk, 'value': dmat_vv}
f, axes = plt.subplots(1, 2, figsize=(12, 5))
for i, (label_i, dmat_i) in enumerate(dmats.items()):
sns.heatmap(dmat_i, cmap='viridis', square=True, ax=axes[i])
axes[i].set_xlabel(f'id, {label_i} i')
axes[i].set_ylabel(f'id, {label_i} j')
axes[i].set_title(
f'{label_i}-{label_i} similarity, metric = {agent.dnd.kernel}'
)
f.tight_layout()
'''project memory content to low dim space'''
# convert the values to a np array, #memories x mem_dim
vals_np = np.vstack([to_sqnp(vals[i]) for i in range(n_mems)])
# project to PC space
vals_centered = (vals_np - np.mean(vals_np, axis=0, keepdims=True))
U, S, _ = np.linalg.svd(vals_centered, full_matrices=False)
vals_pc = np.dot(U, | np.diag(S) | numpy.diag |
# Copyright 2021 <NAME>. All Rights Reserved.
#
# Licensed under the MIT License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/MIT
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
""" A collection of functions which are useful for getting the necessary
information from the volume in order to compute nephrometry metrics """
from pathlib import Path
import numpy as np
import pydicom
from scipy.signal import convolve2d
from scipy.ndimage.measurements import label
from scipy.stats import mode
from scipy.spatial.distance import pdist, squareform
from pyfastnns import NNS
import time
import cv2
def get_centroid(volume):
coordinates = np.transpose(np.array(np.nonzero(volume)))
centroid = np.mean(coordinates, axis=0)
return centroid
def _blur_thresh(vol):
kernel = np.ones((3,3))/9.0
ret = np.zeros(np.shape(vol), dtype=np.float32)
for i in range(vol.shape[0]):
ret[i] = convolve2d(
vol[i], kernel, mode="same", boundary="fill", fillvalue=0
)
return ret
def _get_distance(c1, c2, x_width=1, y_width=1, z_width=1):
return np.linalg.norm(
np.multiply(c1 - c2, np.array((x_width, y_width, z_width))), ord=2
)
def distance_between_regions(first_coordinates, second_coordinates):
nns = NNS(first_coordinates)
_, distance = nns.search(second_coordinates)
min_distance = np.min(distance)
return min_distance
def nearest_pair(first_coordinates, second_coordinates):
nns = NNS(first_coordinates)
pts, distances = nns.search(second_coordinates)
min_distance_idx = np.argmin(distances)
sp = second_coordinates[min_distance_idx]
fp = first_coordinates[pts[min_distance_idx]]
return fp, sp
def furthest_pair_distance(coordinates):
coordinates = np.array(coordinates).T
D = pdist(coordinates)
return np.nanmax(D)
def get_nearest_rim_point(region_boundaries, pixel_width, slice_thickness):
# Get coordinates of collecting system voxels
rim_bin = np.equal(region_boundaries, 5).astype(np.int32)
rim_coordinates = np.transpose(np.array(np.nonzero(rim_bin)))
if rim_coordinates.shape[0] == 0:
raise ValueError("Renal rim could not be identified")
# Get coordinates of tumor voxels
tumor_bin = np.equal(region_boundaries, 2).astype(np.int32)
tumor_coordinates = np.transpose(np.array(np.nonzero(tumor_bin)))
# Scale coordinates such that they correspond to the real world (mm)
multiplier = np.array(
[[slice_thickness, pixel_width, pixel_width]]
).astype(np.float32)
rim_coordinates = np.multiply(rim_coordinates, multiplier)
tumor_coordinates = np.multiply(tumor_coordinates, multiplier)
nearest_pt, _ = nearest_pair(rim_coordinates, tumor_coordinates)
return np.divide(nearest_pt, multiplier[0])
def get_distance_to_collecting_system(region_boundaries, pixel_width,
slice_thickness):
# Get coordinates of collecting system voxels
ucs_bin = np.equal(region_boundaries, 4).astype(np.int32)
ucs_coordinates = np.transpose(np.array(np.nonzero(ucs_bin)))
if ucs_coordinates.shape[0] == 0:
return get_distance_to_sinus(
region_boundaries, pixel_width, slice_thickness
)
# raise ValueError("Collecting system could not be identified")
# Get coordinates of tumor voxels
tumor_bin = np.equal(region_boundaries, 2).astype(np.int32)
tumor_coordinates = np.transpose(np.array( | np.nonzero(tumor_bin) | numpy.nonzero |
"""
Different classifiers in decoding the Haxby dataset
=====================================================
Here we compare different classifiers on a visual object recognition
decoding task.
"""
import time
### Fetch data using nilearn dataset fetcher ################################
from nilearn import datasets
haxby_dataset = datasets.fetch_haxby(n_subjects=1)
# print basic information on the dataset
print('First subject anatomical nifti image (3D) located is at: %s' %
haxby_dataset.anat[0])
print('First subject functional nifti image (4D) is located at: %s' %
haxby_dataset.func[0])
# load labels
import numpy as np
labels = np.recfromcsv(haxby_dataset.session_target[0], delimiter=" ")
stimuli = labels['labels']
# identify resting state labels in order to be able to remove them
resting_state = stimuli == b'rest'
# find names of remaining active labels
categories = np.unique(stimuli[np.logical_not(resting_state)])
# extract tags indicating to which acquisition run a tag belongs
session_labels = labels["chunks"][ | np.logical_not(resting_state) | numpy.logical_not |
from re import error
import re
from uos_statphys.isingModel import Observable
import numpy as np
import matplotlib.pyplot as plt
from numpy.core.fromnumeric import var
import tqdm
__all__ = []
class Container(object):
pass
class Observable:
pass
class SingleDataSet:
"""A class for analysis of monte Carlo simulation result."""
def __init__(self, **parameters):
self.parameters = parameters
self.__analyzed = False
self.ensemble = None
self.simulation_time = None
@classmethod
def from_RAW(cls, files, obs_names, delimiter = ",", **parameters):
"""
we assume that each file is a single ensemble of simulation and each ensemble has a same simulation time.
"""
pass
@classmethod
def from_npy(cls, arrays, obs_names, ensemble_axis = None, **parameters):
"""
we assume that each ensemble has a same simulation time.
"""
pass
def set_parameters(self, **parameters):
for p in parameters:
self.parameters[p] = parameters[p]
def set_order_parameter(self, var_name):
self.order_parameter = var_name
def analyze(self, reduced = False):
if self.__analyzed: return
self.average = Container()
self.var = Container()
self.second = Container()
self.forth = Container()
for key in vars(self).copy():
if isinstance(vars(self)[key], np.ndarray) and len(vars(self)[key].shape)==3:
vars(self.average)[key] = np.average(vars(self)[key], axis =2)
vars(self.var)[key] = np.var(vars(self)[key], axis =2)
vars(self.second)[key] = np.average(vars(self)[key].astype(np.float64)**2, axis =2)
vars(self.forth)[key] = np.average(vars(self)[key].astype(np.float64)**4, axis =2)
self.__analyzed = True
def save(self, file, format="npy"):
pass
class SingleAnalyzer:
"""A class for single control parameter analysis of monte Carlo simulation result."""
def __init__(self):
self.__analyzed = False
def set_parameters(self, **parameters):
var = vars(self)
for p in parameters:
var[p] = parameters[p]
def set_order_parameter(self, var_name):
self.order_parameter = var_name
def set_control_parameter(self, var_name):
self.control_parameter = var_name
def append(self, data, **parameters):
pass
def analyze(self):
pass
@classmethod
def from_RAW(cls, files, var_names, delimiter = ",", **parameters):
"""
we assume that each file is a single ensemble of simulation and each ensemble has a same simulation time.
"""
pass
@classmethod
def from_npy(cls, arrays, var_names, parameter_axis, ensemble_axis = None, **parameters):
"""
we assume that each ensemble has a same simulation time.
"""
pass
def reduced_T(self, t_c):
return (self.T - t_c)/t_c
def observable(func):
def plots(self, return_errors = False, return_argmax = False):
if not self.__analyzed:
self.analyze()
raw = func(self) #calculation
ret = [np.mean(raw, axis = 1)]
if return_errors:
ret.append(np.std(raw, axis = 1))
if return_argmax:
ret.append(np.argmax(raw, axis = 0))
return ret
return plots
def __getattr__(self, value):
return self.parameters[value]
def new_variable_with(self, *arg): # define new variable as property
param = map(arg, self.parameters)
def define_new(func): #decorator
def variable():
doc = """Variable difined by user."""
def fget():
return func()
def fset():
raise AttributeError("can't set attribute")
def fdel():
return
return locals()
vars(self)[func.__name__] = property(**locals())
return
vars(self)[func.__name__] = define_new
return define_new
def new_observable_with(self, *arg): # define new observable
def define_new(func): #decorator
return
return define_new()
def plot():
pass
@observable
def susceptibility(self):
return self.var.M/self.L/self.L/self.T
@observable
def heat_capacity(self):
return self.var.E/self.L/self.L/self.T/self.T
@observable
def binder_cumulant(self):
forth = self.forth.M
second = self.second.M
return 1 - forth/3/second**2
class MultiAnalyzer(SingleAnalyzer):
"""A class for single control parameter analysis of monte Carlo simulation result."""
def __init__(self):
self.__analyzed = False
def set_parameters(self, **parameters):
var = vars(self)
for p in parameters:
var[p] = parameters[p]
def set_order_parameter(self, var_name):
self.order_parameter = var_name
def set_control_parameter(self, var_name):
self.control_parameter = var_name
def append(self, data, **parameters):
pass
def analyze(self):
pass
@classmethod
def from_RAW(cls, files, var_names, delimiter = ",", **parameters):
"""
we assume that each file is a single ensemble of simulation and each ensemble has a same simulation time.
"""
pass
@classmethod
def from_npy(cls, arrays, var_names,parameter_axis, ensemble_axis = None, **parameters):
"""
we assume that each ensemble has a same simulation time.
"""
pass
def reduced_T(self, t_c):
return (self.T - t_c)/t_c
def observable(func):
def plots(self, return_errors = False, return_argmax = False):
if not self.__analyzed:
self.analyze()
raw = func(self) #calculation
ret = [np.mean(raw, axis = 1)]
if return_errors:
ret.append(np.std(raw, axis = 1))
if return_argmax:
ret.append(np.argmax(raw, axis = 0))
return ret
return plots
def __getattr__(self, value):
return self.parameters[value]
def new_variable_with(self, *arg): # define new variable as property
param = map(arg, self.parameters)
def define_new(func): #decorator
def variable():
doc = """Variable difined by user."""
def fget():
return func()
def fset():
raise AttributeError("can't set attribute")
def fdel():
return
return locals()
vars(self)[func.__name__] = property(**locals())
return
vars(self)[func.__name__] = define_new
return define_new
def new_observable_with(self, *arg): # define new observable
def define_new(func): #decorator
return
return define_new()
def plot():
pass
@observable
def susceptibility(self):
return self.var.M/self.L/self.L/self.T
@observable
def heat_capacity(self):
return self.var.E/self.L/self.L/self.T/self.T
@observable
def binder_cumulant(self):
forth = self.forth.M
second = self.second.M
return 1 - forth/3/second**2
class IsingMultiAnalyzer:
def __init__(self,L,T,E,M, title = ""):
self.entry = []
self.L = L
for l,t,e,m in zip(L,T,E,M):
vars(self)[f"_{l}"] = IsingSingleAnalyzer(l,t,e,m)
self.entry.append(vars(self)[f"_{l}"])
self.title = title
self.__analyzed = False
def new(isa = None, title =""):
temp = IsingMultiAnalyzer([],[],[],[],title)
if isa is not None:
temp.append(isa)
return temp
def append(self, value):
if isinstance(value, IsingSingleAnalyzer):
self.L.append(value.L)
self.entry.append(value)
vars(self)[f"_{l}"] = value
else:
raise ValueError
def analyze(self):
for isa in self.entry:
isa.analyze()
self.__analyzed = True
@property
def average(self):
if not self.__analyzed:
self.analyze()
for l, isa in zip(self.L,self.entry):
yield l, isa.T, isa.average.E, isa.average.M
@property
def variance(self):
if not self.__analyzed:
self.analyze()
for l, isa in zip(self.L,self.entry):
yield l, isa.T, isa.var.E, isa.var.M
@property
def second(self):
if not self.__analyzed:
self.analyze()
for l, isa in zip(self.L,self.entry):
yield l, isa.T, isa.second.E, isa.second.M
@property
def forth(self):
if not self.__analyzed:
self.analyze()
for l, isa in zip(self.L,self.entry):
yield l, isa.T, isa.forth.E, isa.forth.M
def line_fitting(self, x, y, y_err, line_range= None, logscale = False , label = ""):
popt , pcov = curve_fit(lambda xhat,a,b:a*xhat+b, x, y, sigma =y_err )
perr = np.sqrt(np.diag(pcov))
if line_range is not None:
pred_x = np.array(line_range)
if logscale:
pred_x = np.power(10,pred_x)
predict = 10**popt[1]* | np.power(pred_x,popt[0]) | numpy.power |
import tensorflow as tf
import numpy as np
import time
import pandas as pd
import keras
from keras import Sequential
from keras.models import Model
from keras.layers import *
from keras.optimizers import RMSprop
from keras.callbacks import ReduceLROnPlateau
from keras.preprocessing.image import ImageDataGenerator
import sys
tr_data = pd.read_csv(sys.argv[1], header=None)
test_data = pd.read_csv(sys.argv[2], header=None)
Y_train = tr_data[0].values
del tr_data[0]
X_train = tr_data.values
del test_data[0]
X_test = test_data.values
X_train_new = np.zeros((len(X_train), 32, 32, 1))
for i in range(len(X_train)):
if i % 1000 == 0:
print(i)
for a in range(32):
for b in range(32):
X_train_new[i][a][b][0] = X_train[i][32 * a + b]
X_test_new = np.zeros((len(X_test), 32, 32, 1))
for i in range(len(X_test)):
if i % 1000 == 0:
print(i)
for a in range(32):
for b in range(32):
X_test_new[i][a][b][0] = X_test[i][32 * a + b]
X_train_new /= 255
X_test_new /= 255
num_category = 46
y_train = keras.utils.to_categorical(Y_train, num_category)
input_shape = (32, 32, 1)
model = Sequential()
model.add(Conv2D(32, kernel_size=(5, 5),
activation='relu',
input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(num_category, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=10, # randomly rotate images in the range (degrees, 0 to 180)
zoom_range = 0.1, # Randomly zoom image
width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.1, # randomly shift images vertically (fraction of total height)
horizontal_flip=False, # randomly flip images
vertical_flip=False) # randomly flip images
learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc',
patience=3,
verbose=1,
factor=0.5,
min_lr=0.00001)
datagen.fit(X_train_new)
batch_size=86
history = model.fit_generator(datagen.flow(X_train_new,y_train, batch_size=batch_size),
epochs = 1, validation_data=(X_train_new, y_train),
verbose = 2, steps_per_epoch=X_train_new.shape[0] // batch_size
, callbacks=[learning_rate_reduction])
pred= model.predict(X_test_new)
predictions=np.argmax(pred,axis=1)
| np.savetxt(sys.argv[3],predictions) | numpy.savetxt |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import unittest
import pytest
import pickle
from sklearn.linear_model import LinearRegression, Lasso, LogisticRegression
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OneHotEncoder, FunctionTransformer, PolynomialFeatures
from sklearn.model_selection import KFold
from econml.ortho_iv import (DMLATEIV, ProjectedDMLATEIV, DMLIV, NonParamDMLIV,
IntentToTreatDRIV, LinearIntentToTreatDRIV)
import numpy as np
from econml.utilities import shape, hstack, vstack, reshape, cross_product, StatsModelsLinearRegression
from econml.inference import BootstrapInference
from contextlib import ExitStack
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, GradientBoostingClassifier
import itertools
from econml.sklearn_extensions.linear_model import WeightedLasso
from econml.tests.test_statsmodels import _summarize
import econml.tests.utilities # bugfix for assertWarns
class TestOrthoIV(unittest.TestCase):
def test_cate_api(self):
"""Test that we correctly implement the CATE API."""
n = 30
def size(n, d):
return (n, d) if d >= 0 else (n,)
def make_random(is_discrete, d):
if d is None:
return None
sz = size(n, d)
if is_discrete:
while True:
arr = np.random.choice(['a', 'b', 'c'], size=sz)
# ensure that we've got at least two of every row
_, counts = np.unique(arr, return_counts=True, axis=0)
if len(counts) == 3**(d if d > 0 else 1) and counts.min() > 1:
return arr
else:
return np.random.normal(size=sz)
def eff_shape(n, d_y):
return (n,) + ((d_y,) if d_y > 0 else())
def marg_eff_shape(n, d_y, d_t_final):
return ((n,) +
((d_y,) if d_y > 0 else ()) +
((d_t_final,) if d_t_final > 0 else()))
# since T isn't passed to const_marginal_effect, defaults to one row if X is None
def const_marg_eff_shape(n, d_x, d_y, d_t_final):
return ((n if d_x else 1,) +
((d_y,) if d_y > 0 else ()) +
((d_t_final,) if d_t_final > 0 else()))
for d_t in [2, 1, -1]:
n_t = d_t if d_t > 0 else 1
for discrete_t in [True, False] if n_t == 1 else [False]:
for d_y in [3, 1, -1]:
for d_q in [2, None]:
for d_z in [2, 1]:
if d_z < n_t:
continue
for discrete_z in [True, False] if d_z == 1 else[False]:
Z1, Q, Y, T1 = [make_random(is_discrete, d)
for is_discrete, d in [(discrete_z, d_z),
(False, d_q),
(False, d_y),
(discrete_t, d_t)]]
if discrete_t and discrete_z:
# need to make sure we get all *joint* combinations
arr = make_random(True, 2)
Z1 = arr[:, 0].reshape(size(n, d_z))
T1 = arr[:, 0].reshape(size(n, d_t))
d_t_final1 = 2 if discrete_t else d_t
if discrete_t:
# IntentToTreat only supports binary treatments/instruments
T2 = T1.copy()
T2[T1 == 'c'] = np.random.choice(['a', 'b'], size=np.count_nonzero(T1 == 'c'))
d_t_final2 = 1
if discrete_z:
# IntentToTreat only supports binary treatments/instruments
Z2 = Z1.copy()
Z2[Z1 == 'c'] = np.random.choice(['a', 'b'], size=np.count_nonzero(Z1 == 'c'))
effect_shape = eff_shape(n, d_y)
model_t = LogisticRegression() if discrete_t else Lasso()
model_z = LogisticRegression() if discrete_z else Lasso()
# TODO: add stratification to bootstrap so that we can use it
# even with discrete treatments
all_infs = [None]
if not (discrete_t or discrete_z):
all_infs.append(BootstrapInference(1))
estimators = [(DMLATEIV(model_Y_W=Lasso(),
model_T_W=model_t,
model_Z_W=model_z,
discrete_treatment=discrete_t,
discrete_instrument=discrete_z),
True,
all_infs),
(ProjectedDMLATEIV(model_Y_W=Lasso(),
model_T_W=model_t,
model_T_WZ=model_t,
discrete_treatment=discrete_t,
discrete_instrument=discrete_z),
False,
all_infs),
(DMLIV(model_Y_X=Lasso(), model_T_X=model_t, model_T_XZ=model_t,
model_final=Lasso(),
discrete_treatment=discrete_t, discrete_instrument=discrete_z),
False,
all_infs)]
if d_q and discrete_t and discrete_z:
# IntentToTreat requires X
estimators.append((
LinearIntentToTreatDRIV(model_Y_X=Lasso(), model_T_XZ=model_t,
flexible_model_effect=WeightedLasso(),
n_splits=2),
False,
all_infs + ['statsmodels']))
for est, multi, infs in estimators:
if not(multi) and d_y > 1 or d_t > 1 or d_z > 1:
continue
# ensure we can serialize unfit estimator
pickle.dumps(est)
d_ws = [None]
if isinstance(est, LinearIntentToTreatDRIV):
d_ws.append(2)
for d_w in d_ws:
W = make_random(False, d_w)
for inf in infs:
with self.subTest(d_z=d_z, d_x=d_q, d_y=d_y, d_t=d_t,
discrete_t=discrete_t, discrete_z=discrete_z,
est=est, inf=inf):
Z = Z1
T = T1
d_t_final = d_t_final1
X = Q
d_x = d_q
if isinstance(est, (DMLATEIV, ProjectedDMLATEIV)):
# these support only W but not X
W = Q
X = None
d_x = None
def fit():
return est.fit(Y, T, Z=Z, W=W, inference=inf)
def score():
return est.score(Y, T, Z=Z, W=W)
else:
# these support only binary, not general discrete T and Z
if discrete_t:
T = T2
d_t_final = d_t_final2
if discrete_z:
Z = Z2
if isinstance(est, LinearIntentToTreatDRIV):
def fit():
return est.fit(Y, T, Z=Z, X=X, W=W, inference=inf)
def score():
return est.score(Y, T, Z=Z, X=X, W=W)
else:
def fit():
return est.fit(Y, T, Z=Z, X=X, inference=inf)
def score():
return est.score(Y, T, Z=Z, X=X)
marginal_effect_shape = marg_eff_shape(n, d_y, d_t_final)
const_marginal_effect_shape = const_marg_eff_shape(
n, d_x, d_y, d_t_final)
fit()
# ensure we can serialize fit estimator
pickle.dumps(est)
# make sure we can call the marginal_effect and effect methods
const_marg_eff = est.const_marginal_effect(X)
marg_eff = est.marginal_effect(T, X)
self.assertEqual(shape(marg_eff), marginal_effect_shape)
self.assertEqual(shape(const_marg_eff), const_marginal_effect_shape)
np.testing.assert_array_equal(
marg_eff if d_x else marg_eff[0:1], const_marg_eff)
T0 = np.full_like(T, 'a') if discrete_t else np.zeros_like(T)
eff = est.effect(X, T0=T0, T1=T)
self.assertEqual(shape(eff), effect_shape)
# TODO: add tests for extra properties like coef_ where they exist
if inf is not None:
const_marg_eff_int = est.const_marginal_effect_interval(X)
marg_eff_int = est.marginal_effect_interval(T, X)
self.assertEqual(shape(marg_eff_int),
(2,) + marginal_effect_shape)
self.assertEqual(shape(const_marg_eff_int),
(2,) + const_marginal_effect_shape)
self.assertEqual(shape(est.effect_interval(X, T0=T0, T1=T)),
(2,) + effect_shape)
# TODO: add tests for extra properties like coef_ where they exist
score()
# make sure we can call effect with implied scalar treatments,
# no matter the dimensions of T, and also that we warn when there
# are multiple treatments
if d_t > 1:
cm = self.assertWarns(Warning)
else:
# ExitStack can be used as a "do nothing" ContextManager
cm = ExitStack()
with cm:
effect_shape2 = (n if d_x else 1,) + ((d_y,) if d_y > 0 else())
eff = est.effect(X) if not discrete_t else est.effect(
X, T0='a', T1='b')
self.assertEqual(shape(eff), effect_shape2)
def test_bad_splits_discrete(self):
"""
Tests that when some training splits in a crossfit fold don't contain all treatments then an error
is raised.
"""
Y = np.array([2, 3, 1, 3, 2, 1, 1, 1])
bad = np.array([2, 2, 1, 2, 1, 1, 1, 1])
W = np.ones((8, 1))
ok = np.array([1, 2, 3, 1, 2, 3, 1, 2])
models = [Lasso(), Lasso(), Lasso()]
est = DMLATEIV(*models, n_splits=[(np.arange(4, 8), | np.arange(4) | numpy.arange |
from __future__ import division, absolute_import, print_function
import os
import re
import sys
import imp
import copy
import glob
import atexit
import tempfile
import subprocess
import shutil
import distutils
from distutils.errors import DistutilsError
try:
from threading import local as tlocal
except ImportError:
from dummy_threading import local as tlocal
# stores temporary directory of each thread to only create one per thread
_tdata = tlocal()
# store all created temporary directories so they can be deleted on exit
_tmpdirs = []
def clean_up_temporary_directory():
for d in _tmpdirs:
try:
shutil.rmtree(d)
except OSError:
pass
atexit.register(clean_up_temporary_directory)
try:
set
except NameError:
from sets import Set as set
from numpy.distutils.compat import get_exception
from numpy.compat import basestring
__all__ = ['Configuration', 'get_numpy_include_dirs', 'default_config_dict',
'dict_append', 'appendpath', 'generate_config_py',
'get_cmd', 'allpath', 'get_mathlibs',
'terminal_has_colors', 'red_text', 'green_text', 'yellow_text',
'blue_text', 'cyan_text', 'cyg2win32', 'mingw32', 'all_strings',
'has_f_sources', 'has_cxx_sources', 'filter_sources',
'get_dependencies', 'is_local_src_dir', 'get_ext_source_files',
'get_script_files', 'get_lib_source_files', 'get_data_files',
'dot_join', 'get_frame', 'minrelpath', 'njoin',
'is_sequence', 'is_string', 'as_list', 'gpaths', 'get_language',
'quote_args', 'get_build_architecture', 'get_info', 'get_pkg_info',
'get_num_build_jobs']
class InstallableLib(object):
"""
Container to hold information on an installable library.
Parameters
----------
name : str
Name of the installed library.
build_info : dict
Dictionary holding build information.
target_dir : str
Absolute path specifying where to install the library.
See Also
--------
Configuration.add_installed_library
Notes
-----
The three parameters are stored as attributes with the same names.
"""
def __init__(self, name, build_info, target_dir):
self.name = name
self.build_info = build_info
self.target_dir = target_dir
def get_num_build_jobs():
"""
Get number of parallel build jobs set by the --parallel command line
argument of setup.py
If the command did not receive a setting the environment variable
NPY_NUM_BUILD_JOBS checked and if that is unset it returns 1.
Returns
-------
out : int
number of parallel jobs that can be run
"""
from numpy.distutils.core import get_distribution
envjobs = int(os.environ.get("NPY_NUM_BUILD_JOBS", 1))
dist = get_distribution()
# may be None during configuration
if dist is None:
return envjobs
# any of these three may have the job set, take the largest
cmdattr = (getattr(dist.get_command_obj('build'), 'parallel', None),
getattr(dist.get_command_obj('build_ext'), 'parallel', None),
getattr(dist.get_command_obj('build_clib'), 'parallel', None))
if all(x is None for x in cmdattr):
return envjobs
else:
return max(x for x in cmdattr if x is not None)
def quote_args(args):
# don't used _nt_quote_args as it does not check if
# args items already have quotes or not.
args = list(args)
for i in range(len(args)):
a = args[i]
if ' ' in a and a[0] not in '"\'':
args[i] = '"%s"' % (a)
return args
def allpath(name):
"Convert a /-separated pathname to one using the OS's path separator."
splitted = name.split('/')
return os.path.join(*splitted)
def rel_path(path, parent_path):
"""Return path relative to parent_path."""
# Use realpath to avoid issues with symlinked dirs (see gh-7707)
pd = os.path.realpath(os.path.abspath(parent_path))
apath = os.path.realpath(os.path.abspath(path))
if len(apath) < len(pd):
return path
if apath == pd:
return ''
if pd == apath[:len(pd)]:
assert apath[len(pd)] in [os.sep], repr((path, apath[len(pd)]))
path = apath[len(pd)+1:]
return path
def get_path_from_frame(frame, parent_path=None):
"""Return path of the module given a frame object from the call stack.
Returned path is relative to parent_path when given,
otherwise it is absolute path.
"""
# First, try to find if the file name is in the frame.
try:
caller_file = eval('__file__', frame.f_globals, frame.f_locals)
d = os.path.dirname(os.path.abspath(caller_file))
except NameError:
# __file__ is not defined, so let's try __name__. We try this second
# because setuptools spoofs __name__ to be '__main__' even though
# sys.modules['__main__'] might be something else, like easy_install(1).
caller_name = eval('__name__', frame.f_globals, frame.f_locals)
__import__(caller_name)
mod = sys.modules[caller_name]
if hasattr(mod, '__file__'):
d = os.path.dirname(os.path.abspath(mod.__file__))
else:
# we're probably running setup.py as execfile("setup.py")
# (likely we're building an egg)
d = os.path.abspath('.')
# hmm, should we use sys.argv[0] like in __builtin__ case?
if parent_path is not None:
d = rel_path(d, parent_path)
return d or '.'
def njoin(*path):
"""Join two or more pathname components +
- convert a /-separated pathname to one using the OS's path separator.
- resolve `..` and `.` from path.
Either passing n arguments as in njoin('a','b'), or a sequence
of n names as in njoin(['a','b']) is handled, or a mixture of such arguments.
"""
paths = []
for p in path:
if is_sequence(p):
# njoin(['a', 'b'], 'c')
paths.append(njoin(*p))
else:
assert is_string(p)
paths.append(p)
path = paths
if not path:
# njoin()
joined = ''
else:
# njoin('a', 'b')
joined = os.path.join(*path)
if os.path.sep != '/':
joined = joined.replace('/', os.path.sep)
return minrelpath(joined)
def get_mathlibs(path=None):
"""Return the MATHLIB line from numpyconfig.h
"""
if path is not None:
config_file = os.path.join(path, '_numpyconfig.h')
else:
# Look for the file in each of the numpy include directories.
dirs = get_numpy_include_dirs()
for path in dirs:
fn = os.path.join(path, '_numpyconfig.h')
if os.path.exists(fn):
config_file = fn
break
else:
raise DistutilsError('_numpyconfig.h not found in numpy include '
'dirs %r' % (dirs,))
fid = open(config_file)
mathlibs = []
s = '#define MATHLIB'
for line in fid:
if line.startswith(s):
value = line[len(s):].strip()
if value:
mathlibs.extend(value.split(','))
fid.close()
return mathlibs
def minrelpath(path):
"""Resolve `..` and '.' from path.
"""
if not is_string(path):
return path
if '.' not in path:
return path
l = path.split(os.sep)
while l:
try:
i = l.index('.', 1)
except ValueError:
break
del l[i]
j = 1
while l:
try:
i = l.index('..', j)
except ValueError:
break
if l[i-1]=='..':
j += 1
else:
del l[i], l[i-1]
j = 1
if not l:
return ''
return os.sep.join(l)
def _fix_paths(paths, local_path, include_non_existing):
assert is_sequence(paths), repr(type(paths))
new_paths = []
assert not is_string(paths), repr(paths)
for n in paths:
if is_string(n):
if '*' in n or '?' in n:
p = glob.glob(n)
p2 = glob.glob(njoin(local_path, n))
if p2:
new_paths.extend(p2)
elif p:
new_paths.extend(p)
else:
if include_non_existing:
new_paths.append(n)
print('could not resolve pattern in %r: %r' %
(local_path, n))
else:
n2 = njoin(local_path, n)
if os.path.exists(n2):
new_paths.append(n2)
else:
if os.path.exists(n):
new_paths.append(n)
elif include_non_existing:
new_paths.append(n)
if not os.path.exists(n):
print('non-existing path in %r: %r' %
(local_path, n))
elif is_sequence(n):
new_paths.extend(_fix_paths(n, local_path, include_non_existing))
else:
new_paths.append(n)
return [minrelpath(p) for p in new_paths]
def gpaths(paths, local_path='', include_non_existing=True):
"""Apply glob to paths and prepend local_path if needed.
"""
if is_string(paths):
paths = (paths,)
return _fix_paths(paths, local_path, include_non_existing)
def make_temp_file(suffix='', prefix='', text=True):
if not hasattr(_tdata, 'tempdir'):
_tdata.tempdir = tempfile.mkdtemp()
_tmpdirs.append(_tdata.tempdir)
fid, name = tempfile.mkstemp(suffix=suffix,
prefix=prefix,
dir=_tdata.tempdir,
text=text)
fo = os.fdopen(fid, 'w')
return fo, name
# Hooks for colored terminal output.
# See also http://www.livinglogic.de/Python/ansistyle
def terminal_has_colors():
if sys.platform=='cygwin' and 'USE_COLOR' not in os.environ:
# Avoid importing curses that causes illegal operation
# with a message:
# PYTHON2 caused an invalid page fault in
# module CYGNURSES7.DLL as 015f:18bbfc28
# Details: Python 2.3.3 [GCC 3.3.1 (cygming special)]
# ssh to Win32 machine from debian
# curses.version is 2.2
# CYGWIN_98-4.10, release 1.5.7(0.109/3/2))
return 0
if hasattr(sys.stdout, 'isatty') and sys.stdout.isatty():
try:
import curses
curses.setupterm()
if (curses.tigetnum("colors") >= 0
and curses.tigetnum("pairs") >= 0
and ((curses.tigetstr("setf") is not None
and curses.tigetstr("setb") is not None)
or (curses.tigetstr("setaf") is not None
and curses.tigetstr("setab") is not None)
or curses.tigetstr("scp") is not None)):
return 1
except Exception:
pass
return 0
if terminal_has_colors():
_colour_codes = dict(black=0, red=1, green=2, yellow=3,
blue=4, magenta=5, cyan=6, white=7, default=9)
def colour_text(s, fg=None, bg=None, bold=False):
seq = []
if bold:
seq.append('1')
if fg:
fgcode = 30 + _colour_codes.get(fg.lower(), 0)
seq.append(str(fgcode))
if bg:
bgcode = 40 + _colour_codes.get(fg.lower(), 7)
seq.append(str(bgcode))
if seq:
return '\x1b[%sm%s\x1b[0m' % (';'.join(seq), s)
else:
return s
else:
def colour_text(s, fg=None, bg=None):
return s
def default_text(s):
return colour_text(s, 'default')
def red_text(s):
return colour_text(s, 'red')
def green_text(s):
return colour_text(s, 'green')
def yellow_text(s):
return colour_text(s, 'yellow')
def cyan_text(s):
return colour_text(s, 'cyan')
def blue_text(s):
return colour_text(s, 'blue')
#########################
def cyg2win32(path):
if sys.platform=='cygwin' and path.startswith('/cygdrive'):
path = path[10] + ':' + os.path.normcase(path[11:])
return path
def mingw32():
"""Return true when using mingw32 environment.
"""
if sys.platform=='win32':
if os.environ.get('OSTYPE', '')=='msys':
return True
if os.environ.get('MSYSTEM', '')=='MINGW32':
return True
return False
def msvc_runtime_library():
"Return name of MSVC runtime library if Python was built with MSVC >= 7"
msc_pos = sys.version.find('MSC v.')
if msc_pos != -1:
msc_ver = sys.version[msc_pos+6:msc_pos+10]
lib = {'1300': 'msvcr70', # MSVC 7.0
'1310': 'msvcr71', # MSVC 7.1
'1400': 'msvcr80', # MSVC 8
'1500': 'msvcr90', # MSVC 9 (VS 2008)
'1600': 'msvcr100', # MSVC 10 (aka 2010)
}.get(msc_ver, None)
else:
lib = None
return lib
#########################
#XXX need support for .C that is also C++
cxx_ext_match = re.compile(r'.*[.](cpp|cxx|cc)\Z', re.I).match
fortran_ext_match = re.compile(r'.*[.](f90|f95|f77|for|ftn|f)\Z', re.I).match
f90_ext_match = re.compile(r'.*[.](f90|f95)\Z', re.I).match
f90_module_name_match = re.compile(r'\s*module\s*(?P<name>[\w_]+)', re.I).match
def _get_f90_modules(source):
"""Return a list of Fortran f90 module names that
given source file defines.
"""
if not f90_ext_match(source):
return []
modules = []
f = open(source, 'r')
for line in f:
m = f90_module_name_match(line)
if m:
name = m.group('name')
modules.append(name)
# break # XXX can we assume that there is one module per file?
f.close()
return modules
def is_string(s):
return isinstance(s, basestring)
def all_strings(lst):
"""Return True if all items in lst are string objects. """
for item in lst:
if not is_string(item):
return False
return True
def is_sequence(seq):
if is_string(seq):
return False
try:
len(seq)
except:
return False
return True
def is_glob_pattern(s):
return is_string(s) and ('*' in s or '?' is s)
def as_list(seq):
if is_sequence(seq):
return list(seq)
else:
return [seq]
def get_language(sources):
# not used in numpy/scipy packages, use build_ext.detect_language instead
"""Determine language value (c,f77,f90) from sources """
language = None
for source in sources:
if isinstance(source, str):
if f90_ext_match(source):
language = 'f90'
break
elif fortran_ext_match(source):
language = 'f77'
return language
def has_f_sources(sources):
"""Return True if sources contains Fortran files """
for source in sources:
if fortran_ext_match(source):
return True
return False
def has_cxx_sources(sources):
"""Return True if sources contains C++ files """
for source in sources:
if cxx_ext_match(source):
return True
return False
def filter_sources(sources):
"""Return four lists of filenames containing
C, C++, Fortran, and Fortran 90 module sources,
respectively.
"""
c_sources = []
cxx_sources = []
f_sources = []
fmodule_sources = []
for source in sources:
if fortran_ext_match(source):
modules = _get_f90_modules(source)
if modules:
fmodule_sources.append(source)
else:
f_sources.append(source)
elif cxx_ext_match(source):
cxx_sources.append(source)
else:
c_sources.append(source)
return c_sources, cxx_sources, f_sources, fmodule_sources
def _get_headers(directory_list):
# get *.h files from list of directories
headers = []
for d in directory_list:
head = glob.glob(os.path.join(d, "*.h")) #XXX: *.hpp files??
headers.extend(head)
return headers
def _get_directories(list_of_sources):
# get unique directories from list of sources.
direcs = []
for f in list_of_sources:
d = os.path.split(f)
if d[0] != '' and not d[0] in direcs:
direcs.append(d[0])
return direcs
def get_dependencies(sources):
#XXX scan sources for include statements
return _get_headers(_get_directories(sources))
def is_local_src_dir(directory):
"""Return true if directory is local directory.
"""
if not is_string(directory):
return False
abs_dir = os.path.abspath(directory)
c = os.path.commonprefix([os.getcwd(), abs_dir])
new_dir = abs_dir[len(c):].split(os.sep)
if new_dir and not new_dir[0]:
new_dir = new_dir[1:]
if new_dir and new_dir[0]=='build':
return False
new_dir = os.sep.join(new_dir)
return os.path.isdir(new_dir)
def general_source_files(top_path):
pruned_directories = {'CVS':1, '.svn':1, 'build':1}
prune_file_pat = re.compile(r'(?:[~#]|\.py[co]|\.o)$')
for dirpath, dirnames, filenames in os.walk(top_path, topdown=True):
pruned = [ d for d in dirnames if d not in pruned_directories ]
dirnames[:] = pruned
for f in filenames:
if not prune_file_pat.search(f):
yield os.path.join(dirpath, f)
def general_source_directories_files(top_path):
"""Return a directory name relative to top_path and
files contained.
"""
pruned_directories = ['CVS', '.svn', 'build']
prune_file_pat = re.compile(r'(?:[~#]|\.py[co]|\.o)$')
for dirpath, dirnames, filenames in os.walk(top_path, topdown=True):
pruned = [ d for d in dirnames if d not in pruned_directories ]
dirnames[:] = pruned
for d in dirnames:
dpath = os.path.join(dirpath, d)
rpath = rel_path(dpath, top_path)
files = []
for f in os.listdir(dpath):
fn = os.path.join(dpath, f)
if os.path.isfile(fn) and not prune_file_pat.search(fn):
files.append(fn)
yield rpath, files
dpath = top_path
rpath = rel_path(dpath, top_path)
filenames = [os.path.join(dpath, f) for f in os.listdir(dpath) \
if not prune_file_pat.search(f)]
files = [f for f in filenames if os.path.isfile(f)]
yield rpath, files
def get_ext_source_files(ext):
# Get sources and any include files in the same directory.
filenames = []
sources = [_m for _m in ext.sources if is_string(_m)]
filenames.extend(sources)
filenames.extend(get_dependencies(sources))
for d in ext.depends:
if is_local_src_dir(d):
filenames.extend(list(general_source_files(d)))
elif os.path.isfile(d):
filenames.append(d)
return filenames
def get_script_files(scripts):
scripts = [_m for _m in scripts if is_string(_m)]
return scripts
def get_lib_source_files(lib):
filenames = []
sources = lib[1].get('sources', [])
sources = [_m for _m in sources if is_string(_m)]
filenames.extend(sources)
filenames.extend(get_dependencies(sources))
depends = lib[1].get('depends', [])
for d in depends:
if is_local_src_dir(d):
filenames.extend(list(general_source_files(d)))
elif os.path.isfile(d):
filenames.append(d)
return filenames
def get_shared_lib_extension(is_python_ext=False):
"""Return the correct file extension for shared libraries.
Parameters
----------
is_python_ext : bool, optional
Whether the shared library is a Python extension. Default is False.
Returns
-------
so_ext : str
The shared library extension.
Notes
-----
For Python shared libs, `so_ext` will typically be '.so' on Linux and OS X,
and '.pyd' on Windows. For Python >= 3.2 `so_ext` has a tag prepended on
POSIX systems according to PEP 3149. For Python 3.2 this is implemented on
Linux, but not on OS X.
"""
confvars = distutils.sysconfig.get_config_vars()
# SO is deprecated in 3.3.1, use EXT_SUFFIX instead
so_ext = confvars.get('EXT_SUFFIX', None)
if so_ext is None:
so_ext = confvars.get('SO', '')
if not is_python_ext:
# hardcode known values, config vars (including SHLIB_SUFFIX) are
# unreliable (see #3182)
# darwin, windows and debug linux are wrong in 3.3.1 and older
if (sys.platform.startswith('linux') or
sys.platform.startswith('gnukfreebsd')):
so_ext = '.so'
elif sys.platform.startswith('darwin'):
so_ext = '.dylib'
elif sys.platform.startswith('win'):
so_ext = '.dll'
else:
# fall back to config vars for unknown platforms
# fix long extension for Python >=3.2, see PEP 3149.
if 'SOABI' in confvars:
# Does nothing unless SOABI config var exists
so_ext = so_ext.replace('.' + confvars.get('SOABI'), '', 1)
return so_ext
def get_data_files(data):
if is_string(data):
return [data]
sources = data[1]
filenames = []
for s in sources:
if hasattr(s, '__call__'):
continue
if is_local_src_dir(s):
filenames.extend(list(general_source_files(s)))
elif is_string(s):
if os.path.isfile(s):
filenames.append(s)
else:
print('Not existing data file:', s)
else:
raise TypeError(repr(s))
return filenames
def dot_join(*args):
return '.'.join([a for a in args if a])
def get_frame(level=0):
"""Return frame object from call stack with given level.
"""
try:
return sys._getframe(level+1)
except AttributeError:
frame = sys.exc_info()[2].tb_frame
for _ in range(level+1):
frame = frame.f_back
return frame
######################
class Configuration(object):
_list_keys = ['packages', 'ext_modules', 'data_files', 'include_dirs',
'libraries', 'headers', 'scripts', 'py_modules',
'installed_libraries', 'define_macros']
_dict_keys = ['package_dir', 'installed_pkg_config']
_extra_keys = ['name', 'version']
numpy_include_dirs = []
def __init__(self,
package_name=None,
parent_name=None,
top_path=None,
package_path=None,
caller_level=1,
setup_name='setup.py',
**attrs):
"""Construct configuration instance of a package.
package_name -- name of the package
Ex.: 'distutils'
parent_name -- name of the parent package
Ex.: 'numpy'
top_path -- directory of the toplevel package
Ex.: the directory where the numpy package source sits
package_path -- directory of package. Will be computed by magic from the
directory of the caller module if not specified
Ex.: the directory where numpy.distutils is
caller_level -- frame level to caller namespace, internal parameter.
"""
self.name = dot_join(parent_name, package_name)
self.version = None
caller_frame = get_frame(caller_level)
self.local_path = get_path_from_frame(caller_frame, top_path)
# local_path -- directory of a file (usually setup.py) that
# defines a configuration() function.
# local_path -- directory of a file (usually setup.py) that
# defines a configuration() function.
if top_path is None:
top_path = self.local_path
self.local_path = ''
if package_path is None:
package_path = self.local_path
elif os.path.isdir(njoin(self.local_path, package_path)):
package_path = njoin(self.local_path, package_path)
if not os.path.isdir(package_path or '.'):
raise ValueError("%r is not a directory" % (package_path,))
self.top_path = top_path
self.package_path = package_path
# this is the relative path in the installed package
self.path_in_package = os.path.join(*self.name.split('.'))
self.list_keys = self._list_keys[:]
self.dict_keys = self._dict_keys[:]
for n in self.list_keys:
v = copy.copy(attrs.get(n, []))
setattr(self, n, as_list(v))
for n in self.dict_keys:
v = copy.copy(attrs.get(n, {}))
setattr(self, n, v)
known_keys = self.list_keys + self.dict_keys
self.extra_keys = self._extra_keys[:]
for n in attrs.keys():
if n in known_keys:
continue
a = attrs[n]
setattr(self, n, a)
if isinstance(a, list):
self.list_keys.append(n)
elif isinstance(a, dict):
self.dict_keys.append(n)
else:
self.extra_keys.append(n)
if os.path.exists(njoin(package_path, '__init__.py')):
self.packages.append(self.name)
self.package_dir[self.name] = package_path
self.options = dict(
ignore_setup_xxx_py = False,
assume_default_configuration = False,
delegate_options_to_subpackages = False,
quiet = False,
)
caller_instance = None
for i in range(1, 3):
try:
f = get_frame(i)
except ValueError:
break
try:
caller_instance = eval('self', f.f_globals, f.f_locals)
break
except NameError:
pass
if isinstance(caller_instance, self.__class__):
if caller_instance.options['delegate_options_to_subpackages']:
self.set_options(**caller_instance.options)
self.setup_name = setup_name
def todict(self):
"""
Return a dictionary compatible with the keyword arguments of distutils
setup function.
Examples
--------
>>> setup(**config.todict()) #doctest: +SKIP
"""
self._optimize_data_files()
d = {}
known_keys = self.list_keys + self.dict_keys + self.extra_keys
for n in known_keys:
a = getattr(self, n)
if a:
d[n] = a
return d
def info(self, message):
if not self.options['quiet']:
print(message)
def warn(self, message):
sys.stderr.write('Warning: %s' % (message,))
def set_options(self, **options):
"""
Configure Configuration instance.
The following options are available:
- ignore_setup_xxx_py
- assume_default_configuration
- delegate_options_to_subpackages
- quiet
"""
for key, value in options.items():
if key in self.options:
self.options[key] = value
else:
raise ValueError('Unknown option: '+key)
def get_distribution(self):
"""Return the distutils distribution object for self."""
from numpy.distutils.core import get_distribution
return get_distribution()
def _wildcard_get_subpackage(self, subpackage_name,
parent_name,
caller_level = 1):
l = subpackage_name.split('.')
subpackage_path = njoin([self.local_path]+l)
dirs = [_m for _m in glob.glob(subpackage_path) if os.path.isdir(_m)]
config_list = []
for d in dirs:
if not os.path.isfile(njoin(d, '__init__.py')):
continue
if 'build' in d.split(os.sep):
continue
n = '.'.join(d.split(os.sep)[-len(l):])
c = self.get_subpackage(n,
parent_name = parent_name,
caller_level = caller_level+1)
config_list.extend(c)
return config_list
def _get_configuration_from_setup_py(self, setup_py,
subpackage_name,
subpackage_path,
parent_name,
caller_level = 1):
# In case setup_py imports local modules:
sys.path.insert(0, os.path.dirname(setup_py))
try:
fo_setup_py = open(setup_py, 'U')
setup_name = os.path.splitext(os.path.basename(setup_py))[0]
n = dot_join(self.name, subpackage_name, setup_name)
setup_module = imp.load_module('_'.join(n.split('.')),
fo_setup_py,
setup_py,
('.py', 'U', 1))
fo_setup_py.close()
if not hasattr(setup_module, 'configuration'):
if not self.options['assume_default_configuration']:
self.warn('Assuming default configuration '\
'(%s does not define configuration())'\
% (setup_module))
config = Configuration(subpackage_name, parent_name,
self.top_path, subpackage_path,
caller_level = caller_level + 1)
else:
pn = dot_join(*([parent_name] + subpackage_name.split('.')[:-1]))
args = (pn,)
def fix_args_py2(args):
if setup_module.configuration.__code__.co_argcount > 1:
args = args + (self.top_path,)
return args
def fix_args_py3(args):
if setup_module.configuration.__code__.co_argcount > 1:
args = args + (self.top_path,)
return args
if sys.version_info[0] < 3:
args = fix_args_py2(args)
else:
args = fix_args_py3(args)
config = setup_module.configuration(*args)
if config.name!=dot_join(parent_name, subpackage_name):
self.warn('Subpackage %r configuration returned as %r' % \
(dot_join(parent_name, subpackage_name), config.name))
finally:
del sys.path[0]
return config
def get_subpackage(self,subpackage_name,
subpackage_path=None,
parent_name=None,
caller_level = 1):
"""Return list of subpackage configurations.
Parameters
----------
subpackage_name : str or None
Name of the subpackage to get the configuration. '*' in
subpackage_name is handled as a wildcard.
subpackage_path : str
If None, then the path is assumed to be the local path plus the
subpackage_name. If a setup.py file is not found in the
subpackage_path, then a default configuration is used.
parent_name : str
Parent name.
"""
if subpackage_name is None:
if subpackage_path is None:
raise ValueError(
"either subpackage_name or subpackage_path must be specified")
subpackage_name = os.path.basename(subpackage_path)
# handle wildcards
l = subpackage_name.split('.')
if subpackage_path is None and '*' in subpackage_name:
return self._wildcard_get_subpackage(subpackage_name,
parent_name,
caller_level = caller_level+1)
assert '*' not in subpackage_name, repr((subpackage_name, subpackage_path, parent_name))
if subpackage_path is None:
subpackage_path = njoin([self.local_path] + l)
else:
subpackage_path = njoin([subpackage_path] + l[:-1])
subpackage_path = self.paths([subpackage_path])[0]
setup_py = njoin(subpackage_path, self.setup_name)
if not self.options['ignore_setup_xxx_py']:
if not os.path.isfile(setup_py):
setup_py = njoin(subpackage_path,
'setup_%s.py' % (subpackage_name))
if not os.path.isfile(setup_py):
if not self.options['assume_default_configuration']:
self.warn('Assuming default configuration '\
'(%s/{setup_%s,setup}.py was not found)' \
% (os.path.dirname(setup_py), subpackage_name))
config = Configuration(subpackage_name, parent_name,
self.top_path, subpackage_path,
caller_level = caller_level+1)
else:
config = self._get_configuration_from_setup_py(
setup_py,
subpackage_name,
subpackage_path,
parent_name,
caller_level = caller_level + 1)
if config:
return [config]
else:
return []
def add_subpackage(self,subpackage_name,
subpackage_path=None,
standalone = False):
"""Add a sub-package to the current Configuration instance.
This is useful in a setup.py script for adding sub-packages to a
package.
Parameters
----------
subpackage_name : str
name of the subpackage
subpackage_path : str
if given, the subpackage path such as the subpackage is in
subpackage_path / subpackage_name. If None,the subpackage is
assumed to be located in the local path / subpackage_name.
standalone : bool
"""
if standalone:
parent_name = None
else:
parent_name = self.name
config_list = self.get_subpackage(subpackage_name, subpackage_path,
parent_name = parent_name,
caller_level = 2)
if not config_list:
self.warn('No configuration returned, assuming unavailable.')
for config in config_list:
d = config
if isinstance(config, Configuration):
d = config.todict()
assert isinstance(d, dict), repr(type(d))
self.info('Appending %s configuration to %s' \
% (d.get('name'), self.name))
self.dict_append(**d)
dist = self.get_distribution()
if dist is not None:
self.warn('distutils distribution has been initialized,'\
' it may be too late to add a subpackage '+ subpackage_name)
def add_data_dir(self, data_path):
"""Recursively add files under data_path to data_files list.
Recursively add files under data_path to the list of data_files to be
installed (and distributed). The data_path can be either a relative
path-name, or an absolute path-name, or a 2-tuple where the first
argument shows where in the install directory the data directory
should be installed to.
Parameters
----------
data_path : seq or str
Argument can be either
* 2-sequence (<datadir suffix>, <path to data directory>)
* path to data directory where python datadir suffix defaults
to package dir.
Notes
-----
Rules for installation paths:
foo/bar -> (foo/bar, foo/bar) -> parent/foo/bar
(gun, foo/bar) -> parent/gun
foo/* -> (foo/a, foo/a), (foo/b, foo/b) -> parent/foo/a, parent/foo/b
(gun, foo/*) -> (gun, foo/a), (gun, foo/b) -> gun
(gun/*, foo/*) -> parent/gun/a, parent/gun/b
/foo/bar -> (bar, /foo/bar) -> parent/bar
(gun, /foo/bar) -> parent/gun
(fun/*/gun/*, sun/foo/bar) -> parent/fun/foo/gun/bar
Examples
--------
For example suppose the source directory contains fun/foo.dat and
fun/bar/car.dat::
>>> self.add_data_dir('fun') #doctest: +SKIP
>>> self.add_data_dir(('sun', 'fun')) #doctest: +SKIP
>>> self.add_data_dir(('gun', '/full/path/to/fun'))#doctest: +SKIP
Will install data-files to the locations::
<package install directory>/
fun/
foo.dat
bar/
car.dat
sun/
foo.dat
bar/
car.dat
gun/
foo.dat
car.dat
"""
if is_sequence(data_path):
d, data_path = data_path
else:
d = None
if is_sequence(data_path):
[self.add_data_dir((d, p)) for p in data_path]
return
if not is_string(data_path):
raise TypeError("not a string: %r" % (data_path,))
if d is None:
if os.path.isabs(data_path):
return self.add_data_dir((os.path.basename(data_path), data_path))
return self.add_data_dir((data_path, data_path))
paths = self.paths(data_path, include_non_existing=False)
if is_glob_pattern(data_path):
if is_glob_pattern(d):
pattern_list = allpath(d).split(os.sep)
pattern_list.reverse()
# /a/*//b/ -> /a/*/b
rl = list(range(len(pattern_list)-1)); rl.reverse()
for i in rl:
if not pattern_list[i]:
del pattern_list[i]
#
for path in paths:
if not os.path.isdir(path):
print('Not a directory, skipping', path)
continue
rpath = rel_path(path, self.local_path)
path_list = rpath.split(os.sep)
path_list.reverse()
target_list = []
i = 0
for s in pattern_list:
if is_glob_pattern(s):
if i>=len(path_list):
raise ValueError('cannot fill pattern %r with %r' \
% (d, path))
target_list.append(path_list[i])
else:
assert s==path_list[i], repr((s, path_list[i], data_path, d, path, rpath))
target_list.append(s)
i += 1
if path_list[i:]:
self.warn('mismatch of pattern_list=%s and path_list=%s'\
% (pattern_list, path_list))
target_list.reverse()
self.add_data_dir((os.sep.join(target_list), path))
else:
for path in paths:
self.add_data_dir((d, path))
return
assert not is_glob_pattern(d), repr(d)
dist = self.get_distribution()
if dist is not None and dist.data_files is not None:
data_files = dist.data_files
else:
data_files = self.data_files
for path in paths:
for d1, f in list(general_source_directories_files(path)):
target_path = os.path.join(self.path_in_package, d, d1)
data_files.append((target_path, f))
def _optimize_data_files(self):
data_dict = {}
for p, files in self.data_files:
if p not in data_dict:
data_dict[p] = set()
for f in files:
data_dict[p].add(f)
self.data_files[:] = [(p, list(files)) for p, files in data_dict.items()]
def add_data_files(self,*files):
"""Add data files to configuration data_files.
Parameters
----------
files : sequence
Argument(s) can be either
* 2-sequence (<datadir prefix>,<path to data file(s)>)
* paths to data files where python datadir prefix defaults
to package dir.
Notes
-----
The form of each element of the files sequence is very flexible
allowing many combinations of where to get the files from the package
and where they should ultimately be installed on the system. The most
basic usage is for an element of the files argument sequence to be a
simple filename. This will cause that file from the local path to be
installed to the installation path of the self.name package (package
path). The file argument can also be a relative path in which case the
entire relative path will be installed into the package directory.
Finally, the file can be an absolute path name in which case the file
will be found at the absolute path name but installed to the package
path.
This basic behavior can be augmented by passing a 2-tuple in as the
file argument. The first element of the tuple should specify the
relative path (under the package install directory) where the
remaining sequence of files should be installed to (it has nothing to
do with the file-names in the source distribution). The second element
of the tuple is the sequence of files that should be installed. The
files in this sequence can be filenames, relative paths, or absolute
paths. For absolute paths the file will be installed in the top-level
package installation directory (regardless of the first argument).
Filenames and relative path names will be installed in the package
install directory under the path name given as the first element of
the tuple.
Rules for installation paths:
#. file.txt -> (., file.txt)-> parent/file.txt
#. foo/file.txt -> (foo, foo/file.txt) -> parent/foo/file.txt
#. /foo/bar/file.txt -> (., /foo/bar/file.txt) -> parent/file.txt
#. *.txt -> parent/a.txt, parent/b.txt
#. foo/*.txt -> parent/foo/a.txt, parent/foo/b.txt
#. */*.txt -> (*, */*.txt) -> parent/c/a.txt, parent/d/b.txt
#. (sun, file.txt) -> parent/sun/file.txt
#. (sun, bar/file.txt) -> parent/sun/file.txt
#. (sun, /foo/bar/file.txt) -> parent/sun/file.txt
#. (sun, *.txt) -> parent/sun/a.txt, parent/sun/b.txt
#. (sun, bar/*.txt) -> parent/sun/a.txt, parent/sun/b.txt
#. (sun/*, */*.txt) -> parent/sun/c/a.txt, parent/d/b.txt
An additional feature is that the path to a data-file can actually be
a function that takes no arguments and returns the actual path(s) to
the data-files. This is useful when the data files are generated while
building the package.
Examples
--------
Add files to the list of data_files to be included with the package.
>>> self.add_data_files('foo.dat',
... ('fun', ['gun.dat', 'nun/pun.dat', '/tmp/sun.dat']),
... 'bar/cat.dat',
... '/full/path/to/can.dat') #doctest: +SKIP
will install these data files to::
<package install directory>/
foo.dat
fun/
gun.dat
nun/
pun.dat
sun.dat
bar/
car.dat
can.dat
where <package install directory> is the package (or sub-package)
directory such as '/usr/lib/python2.4/site-packages/mypackage' ('C:
\\Python2.4 \\Lib \\site-packages \\mypackage') or
'/usr/lib/python2.4/site- packages/mypackage/mysubpackage' ('C:
\\Python2.4 \\Lib \\site-packages \\mypackage \\mysubpackage').
"""
if len(files)>1:
for f in files:
self.add_data_files(f)
return
assert len(files)==1
if is_sequence(files[0]):
d, files = files[0]
else:
d = None
if is_string(files):
filepat = files
elif is_sequence(files):
if len(files)==1:
filepat = files[0]
else:
for f in files:
self.add_data_files((d, f))
return
else:
raise TypeError(repr(type(files)))
if d is None:
if hasattr(filepat, '__call__'):
d = ''
elif os.path.isabs(filepat):
d = ''
else:
d = os.path.dirname(filepat)
self.add_data_files((d, files))
return
paths = self.paths(filepat, include_non_existing=False)
if is_glob_pattern(filepat):
if is_glob_pattern(d):
pattern_list = d.split(os.sep)
pattern_list.reverse()
for path in paths:
path_list = path.split(os.sep)
path_list.reverse()
path_list.pop() # filename
target_list = []
i = 0
for s in pattern_list:
if is_glob_pattern(s):
target_list.append(path_list[i])
i += 1
else:
target_list.append(s)
target_list.reverse()
self.add_data_files((os.sep.join(target_list), path))
else:
self.add_data_files((d, paths))
return
assert not is_glob_pattern(d), repr((d, filepat))
dist = self.get_distribution()
if dist is not None and dist.data_files is not None:
data_files = dist.data_files
else:
data_files = self.data_files
data_files.append((os.path.join(self.path_in_package, d), paths))
### XXX Implement add_py_modules
def add_define_macros(self, macros):
"""Add define macros to configuration
Add the given sequence of macro name and value duples to the beginning
of the define_macros list This list will be visible to all extension
modules of the current package.
"""
dist = self.get_distribution()
if dist is not None:
if not hasattr(dist, 'define_macros'):
dist.define_macros = []
dist.define_macros.extend(macros)
else:
self.define_macros.extend(macros)
def add_include_dirs(self,*paths):
"""Add paths to configuration include directories.
Add the given sequence of paths to the beginning of the include_dirs
list. This list will be visible to all extension modules of the
current package.
"""
include_dirs = self.paths(paths)
dist = self.get_distribution()
if dist is not None:
if dist.include_dirs is None:
dist.include_dirs = []
dist.include_dirs.extend(include_dirs)
else:
self.include_dirs.extend(include_dirs)
def add_headers(self,*files):
"""Add installable headers to configuration.
Add the given sequence of files to the beginning of the headers list.
By default, headers will be installed under <python-
include>/<self.name.replace('.','/')>/ directory. If an item of files
is a tuple, then its first argument specifies the actual installation
location relative to the <python-include> path.
Parameters
----------
files : str or seq
Argument(s) can be either:
* 2-sequence (<includedir suffix>,<path to header file(s)>)
* path(s) to header file(s) where python includedir suffix will
default to package name.
"""
headers = []
for path in files:
if is_string(path):
[headers.append((self.name, p)) for p in self.paths(path)]
else:
if not isinstance(path, (tuple, list)) or len(path) != 2:
raise TypeError(repr(path))
[headers.append((path[0], p)) for p in self.paths(path[1])]
dist = self.get_distribution()
if dist is not None:
if dist.headers is None:
dist.headers = []
dist.headers.extend(headers)
else:
self.headers.extend(headers)
def paths(self,*paths,**kws):
"""Apply glob to paths and prepend local_path if needed.
Applies glob.glob(...) to each path in the sequence (if needed) and
pre-pends the local_path if needed. Because this is called on all
source lists, this allows wildcard characters to be specified in lists
of sources for extension modules and libraries and scripts and allows
path-names be relative to the source directory.
"""
include_non_existing = kws.get('include_non_existing', True)
return gpaths(paths,
local_path = self.local_path,
include_non_existing=include_non_existing)
def _fix_paths_dict(self, kw):
for k in kw.keys():
v = kw[k]
if k in ['sources', 'depends', 'include_dirs', 'library_dirs',
'module_dirs', 'extra_objects']:
new_v = self.paths(v)
kw[k] = new_v
def add_extension(self,name,sources,**kw):
"""Add extension to configuration.
Create and add an Extension instance to the ext_modules list. This
method also takes the following optional keyword arguments that are
passed on to the Extension constructor.
Parameters
----------
name : str
name of the extension
sources : seq
list of the sources. The list of sources may contain functions
(called source generators) which must take an extension instance
and a build directory as inputs and return a source file or list of
source files or None. If None is returned then no sources are
generated. If the Extension instance has no sources after
processing all source generators, then no extension module is
built.
include_dirs :
define_macros :
undef_macros :
library_dirs :
libraries :
runtime_library_dirs :
extra_objects :
extra_compile_args :
extra_link_args :
extra_f77_compile_args :
extra_f90_compile_args :
export_symbols :
swig_opts :
depends :
The depends list contains paths to files or directories that the
sources of the extension module depend on. If any path in the
depends list is newer than the extension module, then the module
will be rebuilt.
language :
f2py_options :
module_dirs :
extra_info : dict or list
dict or list of dict of keywords to be appended to keywords.
Notes
-----
The self.paths(...) method is applied to all lists that may contain
paths.
"""
ext_args = copy.copy(kw)
ext_args['name'] = dot_join(self.name, name)
ext_args['sources'] = sources
if 'extra_info' in ext_args:
extra_info = ext_args['extra_info']
del ext_args['extra_info']
if isinstance(extra_info, dict):
extra_info = [extra_info]
for info in extra_info:
assert isinstance(info, dict), repr(info)
dict_append(ext_args,**info)
self._fix_paths_dict(ext_args)
# Resolve out-of-tree dependencies
libraries = ext_args.get('libraries', [])
libnames = []
ext_args['libraries'] = []
for libname in libraries:
if isinstance(libname, tuple):
self._fix_paths_dict(libname[1])
# Handle library names of the form libname@relative/path/to/library
if '@' in libname:
lname, lpath = libname.split('@', 1)
lpath = os.path.abspath(njoin(self.local_path, lpath))
if os.path.isdir(lpath):
c = self.get_subpackage(None, lpath,
caller_level = 2)
if isinstance(c, Configuration):
c = c.todict()
for l in [l[0] for l in c.get('libraries', [])]:
llname = l.split('__OF__', 1)[0]
if llname == lname:
c.pop('name', None)
dict_append(ext_args,**c)
break
continue
libnames.append(libname)
ext_args['libraries'] = libnames + ext_args['libraries']
ext_args['define_macros'] = \
self.define_macros + ext_args.get('define_macros', [])
from numpy.distutils.core import Extension
ext = Extension(**ext_args)
self.ext_modules.append(ext)
dist = self.get_distribution()
if dist is not None:
self.warn('distutils distribution has been initialized,'\
' it may be too late to add an extension '+name)
return ext
def add_library(self,name,sources,**build_info):
"""
Add library to configuration.
Parameters
----------
name : str
Name of the extension.
sources : sequence
List of the sources. The list of sources may contain functions
(called source generators) which must take an extension instance
and a build directory as inputs and return a source file or list of
source files or None. If None is returned then no sources are
generated. If the Extension instance has no sources after
processing all source generators, then no extension module is
built.
build_info : dict, optional
The following keys are allowed:
* depends
* macros
* include_dirs
* extra_compiler_args
* extra_f77_compiler_args
* extra_f90_compiler_args
* f2py_options
* language
"""
self._add_library(name, sources, None, build_info)
dist = self.get_distribution()
if dist is not None:
self.warn('distutils distribution has been initialized,'\
' it may be too late to add a library '+ name)
def _add_library(self, name, sources, install_dir, build_info):
"""Common implementation for add_library and add_installed_library. Do
not use directly"""
build_info = copy.copy(build_info)
name = name #+ '__OF__' + self.name
build_info['sources'] = sources
# Sometimes, depends is not set up to an empty list by default, and if
# depends is not given to add_library, distutils barfs (#1134)
if not 'depends' in build_info:
build_info['depends'] = []
self._fix_paths_dict(build_info)
# Add to libraries list so that it is build with build_clib
self.libraries.append((name, build_info))
def add_installed_library(self, name, sources, install_dir, build_info=None):
"""
Similar to add_library, but the specified library is installed.
Most C libraries used with `distutils` are only used to build python
extensions, but libraries built through this method will be installed
so that they can be reused by third-party packages.
Parameters
----------
name : str
Name of the installed library.
sources : sequence
List of the library's source files. See `add_library` for details.
install_dir : str
Path to install the library, relative to the current sub-package.
build_info : dict, optional
The following keys are allowed:
* depends
* macros
* include_dirs
* extra_compiler_args
* extra_f77_compiler_args
* extra_f90_compiler_args
* f2py_options
* language
Returns
-------
None
See Also
--------
add_library, add_npy_pkg_config, get_info
Notes
-----
The best way to encode the options required to link against the specified
C libraries is to use a "libname.ini" file, and use `get_info` to
retrieve the required options (see `add_npy_pkg_config` for more
information).
"""
if not build_info:
build_info = {}
install_dir = os.path.join(self.package_path, install_dir)
self._add_library(name, sources, install_dir, build_info)
self.installed_libraries.append(InstallableLib(name, build_info, install_dir))
def add_npy_pkg_config(self, template, install_dir, subst_dict=None):
"""
Generate and install a npy-pkg config file from a template.
The config file generated from `template` is installed in the
given install directory, using `subst_dict` for variable substitution.
Parameters
----------
template : str
The path of the template, relatively to the current package path.
install_dir : str
Where to install the npy-pkg config file, relatively to the current
package path.
subst_dict : dict, optional
If given, any string of the form ``@key@`` will be replaced by
``subst_dict[key]`` in the template file when installed. The install
prefix is always available through the variable ``@prefix@``, since the
install prefix is not easy to get reliably from setup.py.
See also
--------
add_installed_library, get_info
Notes
-----
This works for both standard installs and in-place builds, i.e. the
``@prefix@`` refer to the source directory for in-place builds.
Examples
--------
::
config.add_npy_pkg_config('foo.ini.in', 'lib', {'foo': bar})
Assuming the foo.ini.in file has the following content::
[meta]
Name=@foo@
Version=1.0
Description=dummy description
[default]
Cflags=-I@prefix@/include
Libs=
The generated file will have the following content::
[meta]
Name=bar
Version=1.0
Description=dummy description
[default]
Cflags=-Iprefix_dir/include
Libs=
and will be installed as foo.ini in the 'lib' subpath.
"""
if subst_dict is None:
subst_dict = {}
basename = os.path.splitext(template)[0]
template = os.path.join(self.package_path, template)
if self.name in self.installed_pkg_config:
self.installed_pkg_config[self.name].append((template, install_dir,
subst_dict))
else:
self.installed_pkg_config[self.name] = [(template, install_dir,
subst_dict)]
def add_scripts(self,*files):
"""Add scripts to configuration.
Add the sequence of files to the beginning of the scripts list.
Scripts will be installed under the <prefix>/bin/ directory.
"""
scripts = self.paths(files)
dist = self.get_distribution()
if dist is not None:
if dist.scripts is None:
dist.scripts = []
dist.scripts.extend(scripts)
else:
self.scripts.extend(scripts)
def dict_append(self,**dict):
for key in self.list_keys:
a = getattr(self, key)
a.extend(dict.get(key, []))
for key in self.dict_keys:
a = getattr(self, key)
a.update(dict.get(key, {}))
known_keys = self.list_keys + self.dict_keys + self.extra_keys
for key in dict.keys():
if key not in known_keys:
a = getattr(self, key, None)
if a and a==dict[key]: continue
self.warn('Inheriting attribute %r=%r from %r' \
% (key, dict[key], dict.get('name', '?')))
setattr(self, key, dict[key])
self.extra_keys.append(key)
elif key in self.extra_keys:
self.info('Ignoring attempt to set %r (from %r to %r)' \
% (key, getattr(self, key), dict[key]))
elif key in known_keys:
# key is already processed above
pass
else:
raise ValueError("Don't know about key=%r" % (key))
def __str__(self):
from pprint import pformat
known_keys = self.list_keys + self.dict_keys + self.extra_keys
s = '<'+5*'-' + '\n'
s += 'Configuration of '+self.name+':\n'
known_keys.sort()
for k in known_keys:
a = getattr(self, k, None)
if a:
s += '%s = %s\n' % (k, pformat(a))
s += 5*'-' + '>'
return s
def get_config_cmd(self):
"""
Returns the numpy.distutils config command instance.
"""
cmd = get_cmd('config')
cmd.ensure_finalized()
cmd.dump_source = 0
cmd.noisy = 0
old_path = os.environ.get('PATH')
if old_path:
path = os.pathsep.join(['.', old_path])
os.environ['PATH'] = path
return cmd
def get_build_temp_dir(self):
"""
Return a path to a temporary directory where temporary files should be
placed.
"""
cmd = get_cmd('build')
cmd.ensure_finalized()
return cmd.build_temp
def have_f77c(self):
"""Check for availability of Fortran 77 compiler.
Use it inside source generating function to ensure that
setup distribution instance has been initialized.
Notes
-----
True if a Fortran 77 compiler is available (because a simple Fortran 77
code was able to be compiled successfully).
"""
simple_fortran_subroutine = '''
subroutine simple
end
'''
config_cmd = self.get_config_cmd()
flag = config_cmd.try_compile(simple_fortran_subroutine, lang='f77')
return flag
def have_f90c(self):
"""Check for availability of Fortran 90 compiler.
Use it inside source generating function to ensure that
setup distribution instance has been initialized.
Notes
-----
True if a Fortran 90 compiler is available (because a simple Fortran
90 code was able to be compiled successfully)
"""
simple_fortran_subroutine = '''
subroutine simple
end
'''
config_cmd = self.get_config_cmd()
flag = config_cmd.try_compile(simple_fortran_subroutine, lang='f90')
return flag
def append_to(self, extlib):
"""Append libraries, include_dirs to extension or library item.
"""
if is_sequence(extlib):
lib_name, build_info = extlib
dict_append(build_info,
libraries=self.libraries,
include_dirs=self.include_dirs)
else:
from numpy.distutils.core import Extension
assert isinstance(extlib, Extension), repr(extlib)
extlib.libraries.extend(self.libraries)
extlib.include_dirs.extend(self.include_dirs)
def _get_svn_revision(self, path):
"""Return path's SVN revision number.
"""
revision = None
m = None
cwd = os.getcwd()
try:
os.chdir(path or '.')
p = subprocess.Popen(['svnversion'], shell=True,
stdout=subprocess.PIPE, stderr=None,
close_fds=True)
sout = p.stdout
m = re.match(r'(?P<revision>\d+)', sout.read())
except:
pass
os.chdir(cwd)
if m:
revision = int(m.group('revision'))
return revision
if sys.platform=='win32' and os.environ.get('SVN_ASP_DOT_NET_HACK', None):
entries = njoin(path, '_svn', 'entries')
else:
entries = njoin(path, '.svn', 'entries')
if os.path.isfile(entries):
f = open(entries)
fstr = f.read()
f.close()
if fstr[:5] == '<?xml': # pre 1.4
m = re.search(r'revision="(?P<revision>\d+)"', fstr)
if m:
revision = int(m.group('revision'))
else: # non-xml entries file --- check to be sure that
m = re.search(r'dir[\n\r]+(?P<revision>\d+)', fstr)
if m:
revision = int(m.group('revision'))
return revision
def _get_hg_revision(self, path):
"""Return path's Mercurial revision number.
"""
revision = None
m = None
cwd = os.getcwd()
try:
os.chdir(path or '.')
p = subprocess.Popen(['hg identify --num'], shell=True,
stdout=subprocess.PIPE, stderr=None,
close_fds=True)
sout = p.stdout
m = re.match(r'(?P<revision>\d+)', sout.read())
except:
pass
os.chdir(cwd)
if m:
revision = int(m.group('revision'))
return revision
branch_fn = njoin(path, '.hg', 'branch')
branch_cache_fn = njoin(path, '.hg', 'branch.cache')
if os.path.isfile(branch_fn):
branch0 = None
f = open(branch_fn)
revision0 = f.read().strip()
f.close()
branch_map = {}
for line in file(branch_cache_fn, 'r'):
branch1, revision1 = line.split()[:2]
if revision1==revision0:
branch0 = branch1
try:
revision1 = int(revision1)
except ValueError:
continue
branch_map[branch1] = revision1
revision = branch_map.get(branch0)
return revision
def get_version(self, version_file=None, version_variable=None):
"""Try to get version string of a package.
Return a version string of the current package or None if the version
information could not be detected.
Notes
-----
This method scans files named
__version__.py, <packagename>_version.py, version.py, and
__svn_version__.py for string variables version, __version\__, and
<packagename>_version, until a version number is found.
"""
version = getattr(self, 'version', None)
if version is not None:
return version
# Get version from version file.
if version_file is None:
files = ['__version__.py',
self.name.split('.')[-1]+'_version.py',
'version.py',
'__svn_version__.py',
'__hg_version__.py']
else:
files = [version_file]
if version_variable is None:
version_vars = ['version',
'__version__',
self.name.split('.')[-1]+'_version']
else:
version_vars = [version_variable]
for f in files:
fn = njoin(self.local_path, f)
if os.path.isfile(fn):
info = (open(fn), fn, ('.py', 'U', 1))
name = os.path.splitext(os.path.basename(fn))[0]
n = dot_join(self.name, name)
try:
version_module = imp.load_module('_'.join(n.split('.')),*info)
except ImportError:
msg = get_exception()
self.warn(str(msg))
version_module = None
if version_module is None:
continue
for a in version_vars:
version = getattr(version_module, a, None)
if version is not None:
break
if version is not None:
break
if version is not None:
self.version = version
return version
# Get version as SVN or Mercurial revision number
revision = self._get_svn_revision(self.local_path)
if revision is None:
revision = self._get_hg_revision(self.local_path)
if revision is not None:
version = str(revision)
self.version = version
return version
def make_svn_version_py(self, delete=True):
"""Appends a data function to the data_files list that will generate
__svn_version__.py file to the current package directory.
Generate package __svn_version__.py file from SVN revision number,
it will be removed after python exits but will be available
when sdist, etc commands are executed.
Notes
-----
If __svn_version__.py existed before, nothing is done.
This is
intended for working with source directories that are in an SVN
repository.
"""
target = njoin(self.local_path, '__svn_version__.py')
revision = self._get_svn_revision(self.local_path)
if os.path.isfile(target) or revision is None:
return
else:
def generate_svn_version_py():
if not os.path.isfile(target):
version = str(revision)
self.info('Creating %s (version=%r)' % (target, version))
f = open(target, 'w')
f.write('version = %r\n' % (version))
f.close()
import atexit
def rm_file(f=target,p=self.info):
if delete:
try: os.remove(f); p('removed '+f)
except OSError: pass
try: os.remove(f+'c'); p('removed '+f+'c')
except OSError: pass
atexit.register(rm_file)
return target
self.add_data_files(('', generate_svn_version_py()))
def make_hg_version_py(self, delete=True):
"""Appends a data function to the data_files list that will generate
__hg_version__.py file to the current package directory.
Generate package __hg_version__.py file from Mercurial revision,
it will be removed after python exits but will be available
when sdist, etc commands are executed.
Notes
-----
If __hg_version__.py existed before, nothing is done.
This is intended for working with source directories that are
in an Mercurial repository.
"""
target = njoin(self.local_path, '__hg_version__.py')
revision = self._get_hg_revision(self.local_path)
if os.path.isfile(target) or revision is None:
return
else:
def generate_hg_version_py():
if not os.path.isfile(target):
version = str(revision)
self.info('Creating %s (version=%r)' % (target, version))
f = open(target, 'w')
f.write('version = %r\n' % (version))
f.close()
import atexit
def rm_file(f=target,p=self.info):
if delete:
try: os.remove(f); p('removed '+f)
except OSError: pass
try: os.remove(f+'c'); p('removed '+f+'c')
except OSError: pass
atexit.register(rm_file)
return target
self.add_data_files(('', generate_hg_version_py()))
def make_config_py(self,name='__config__'):
"""Generate package __config__.py file containing system_info
information used during building the package.
This file is installed to the
package installation directory.
"""
self.py_modules.append((self.name, name, generate_config_py))
def get_info(self,*names):
"""Get resources information.
Return information (from system_info.get_info) for all of the names in
the argument list in a single dictionary.
"""
from .system_info import get_info, dict_append
info_dict = {}
for a in names:
dict_append(info_dict,**get_info(a))
return info_dict
def get_cmd(cmdname, _cache={}):
if cmdname not in _cache:
import distutils.core
dist = distutils.core._setup_distribution
if dist is None:
from distutils.errors import DistutilsInternalError
raise DistutilsInternalError(
'setup distribution instance not initialized')
cmd = dist.get_command_obj(cmdname)
_cache[cmdname] = cmd
return _cache[cmdname]
def get_numpy_include_dirs():
# numpy_include_dirs are set by numpy/core/setup.py, otherwise []
include_dirs = Configuration.numpy_include_dirs[:]
if not include_dirs:
import numpy
include_dirs = [ numpy.get_include() ]
# else running numpy/core/setup.py
return include_dirs
def get_npy_pkg_dir():
"""Return the path where to find the npy-pkg-config directory."""
# XXX: import here for bootstrapping reasons
import numpy
d = os.path.join(os.path.dirname(numpy.__file__),
'core', 'lib', 'npy-pkg-config')
return d
def get_pkg_info(pkgname, dirs=None):
"""
Return library info for the given package.
Parameters
----------
pkgname : str
Name of the package (should match the name of the .ini file, without
the extension, e.g. foo for the file foo.ini).
dirs : sequence, optional
If given, should be a sequence of additional directories where to look
for npy-pkg-config files. Those directories are searched prior to the
NumPy directory.
Returns
-------
pkginfo : class instance
The `LibraryInfo` instance containing the build information.
Raises
------
PkgNotFound
If the package is not found.
See Also
--------
Configuration.add_npy_pkg_config, Configuration.add_installed_library,
get_info
"""
from numpy.distutils.npy_pkg_config import read_config
if dirs:
dirs.append(get_npy_pkg_dir())
else:
dirs = [get_npy_pkg_dir()]
return read_config(pkgname, dirs)
def get_info(pkgname, dirs=None):
"""
Return an info dict for a given C library.
The info dict contains the necessary options to use the C library.
Parameters
----------
pkgname : str
Name of the package (should match the name of the .ini file, without
the extension, e.g. foo for the file foo.ini).
dirs : sequence, optional
If given, should be a sequence of additional directories where to look
for npy-pkg-config files. Those directories are searched prior to the
NumPy directory.
Returns
-------
info : dict
The dictionary with build information.
Raises
------
PkgNotFound
If the package is not found.
See Also
--------
Configuration.add_npy_pkg_config, Configuration.add_installed_library,
get_pkg_info
Examples
--------
To get the necessary information for the npymath library from NumPy:
>>> npymath_info = np.distutils.misc_util.get_info('npymath')
>>> npymath_info #doctest: +SKIP
{'define_macros': [], 'libraries': ['npymath'], 'library_dirs':
['.../numpy/core/lib'], 'include_dirs': ['.../numpy/core/include']}
This info dict can then be used as input to a `Configuration` instance::
config.add_extension('foo', sources=['foo.c'], extra_info=npymath_info)
"""
from numpy.distutils.npy_pkg_config import parse_flags
pkg_info = get_pkg_info(pkgname, dirs)
# Translate LibraryInfo instance into a build_info dict
info = parse_flags(pkg_info.cflags())
for k, v in parse_flags(pkg_info.libs()).items():
info[k].extend(v)
# add_extension extra_info argument is ANAL
info['define_macros'] = info['macros']
del info['macros']
del info['ignored']
return info
def is_bootstrapping():
if sys.version_info[0] >= 3:
import builtins
else:
import __builtin__ as builtins
try:
builtins.__NUMPY_SETUP__
return True
except AttributeError:
return False
__NUMPY_SETUP__ = False
#########################
def default_config_dict(name = None, parent_name = None, local_path=None):
"""Return a configuration dictionary for usage in
configuration() function defined in file setup_<name>.py.
"""
import warnings
warnings.warn('Use Configuration(%r,%r,top_path=%r) instead of '\
'deprecated default_config_dict(%r,%r,%r)'
% (name, parent_name, local_path,
name, parent_name, local_path,
))
c = Configuration(name, parent_name, local_path)
return c.todict()
def dict_append(d, **kws):
for k, v in kws.items():
if k in d:
ov = d[k]
if isinstance(ov, str):
d[k] = v
else:
d[k].extend(v)
else:
d[k] = v
def appendpath(prefix, path):
if os.path.sep != '/':
prefix = prefix.replace('/', os.path.sep)
path = path.replace('/', os.path.sep)
drive = ''
if os.path.isabs(path):
drive = os.path.splitdrive(prefix)[0]
absprefix = os.path.splitdrive(os.path.abspath(prefix))[1]
pathdrive, path = os.path.splitdrive(path)
d = os.path.commonprefix([absprefix, path])
if os.path.join(absprefix[:len(d)], absprefix[len(d):]) != absprefix \
or os.path.join(path[:len(d)], path[len(d):]) != path:
# Handle invalid paths
d = os.path.dirname(d)
subpath = path[len(d):]
if os.path.isabs(subpath):
subpath = subpath[1:]
else:
subpath = path
return os.path.normpath(njoin(drive + prefix, subpath))
def generate_config_py(target):
"""Generate config.py file containing system_info information
used during building the package.
Usage:
config['py_modules'].append((packagename, '__config__',generate_config_py))
"""
from numpy.distutils.system_info import system_info
from distutils.dir_util import mkpath
mkpath(os.path.dirname(target))
f = open(target, 'w')
f.write('# This file is generated by %s\n' % (os.path.abspath(sys.argv[0])))
f.write('# It contains system_info results at the time of building this package.\n')
f.write('__all__ = ["get_info","show"]\n\n')
for k, i in | system_info.saved_results.items() | numpy.distutils.system_info.system_info.saved_results.items |
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 1 09:52:46 2018
@author: Jackie
"""
import numpy as np
from skimage.morphology import label, binary_dilation,binary_erosion,remove_small_holes
from scipy.ndimage import generate_binary_structure
from load import mask_12m_no, mask_12m
from lib import delta
def postprocess(preds, config):
assert preds.shape[2]==5
ldelta = delta(preds[:,:,1:])
#ldelta = delta0(preds[:,:,5:])
connected = np.all(ldelta>config.GRADIENT_THRES, 2)
base = connected * (preds[:,:,0]>config.MASK_THRES)
wall = np.sum(np.abs(preds[:,:,1:]),axis = -1)
base_label = label(base)
vals, counts = np.unique(base_label[base_label>0], return_counts=True)
for val in vals[(counts<config.CLIP_AREA_LOW)]:
base_label[base_label==val]=0
vals = vals[(counts>=config.CLIP_AREA_LOW)]
for val in vals:
label_mask = base_label == val
if np.sum(label_mask)==0:
continue
label_mask = remove_small_holes(label_mask)
label_mask = basin(label_mask, wall)
label_mask = remove_small_holes(label_mask)
'''
label_bdr = label_mask^binary_erosion(label_mask)
min_wall = np.min(wall[label_mask])
ave_bdr_wall = np.mean(wall[label_bdr])
if ave_bdr_wall < min_wall + config.WALL_DEPTH:
label_mask = 0
'''
base_label[label_mask] = val
vals, counts = np.unique(base_label[base_label>0], return_counts=True)
for val in vals[(counts<config.CLIP_AREA_LOW)]:
base_label[base_label==val]=0
return base_label
def modify_w_unet(base_label, preds, thres=0.25):
base_label = base_label.copy()
vals = np.unique(base_label[base_label>0])
struct = generate_binary_structure(2,2)
for nb_dilation in range(3):
for val in vals:
label_mask = base_label==val
base_label[binary_dilation(label_mask,struct)&(preds[:,:,0]>thres)&(base_label==0)]=val
return base_label
def compute_overlaps_masks(masks1, masks2):
'''Computes IoU overlaps between two sets of masks.
masks1, masks2: [Height, Width, instances]
'''
# flatten masks
masks1 = np.reshape(masks1 > .5, (-1, masks1.shape[-1])).astype(np.float32)
masks2 = np.reshape(masks2 > .5, (-1, masks2.shape[-1])).astype(np.float32)
area1 = np.sum(masks1, axis=0)
area2 = np.sum(masks2, axis=0)
# intersections and union
intersections = | np.dot(masks1.T, masks2) | numpy.dot |
import os,sys
import numpy as np
import cv2
import matplotlib.pyplot as plt
from collections import deque
import pprint
from numpy.polynomial import Polynomial
pp = pprint.PrettyPrinter(indent=2, width=100)
print(' Loading line.py - cwd:', os.getcwd())
# Define a class to receive the characteristics of each line detection
class Line():
def __init__(self, history, height, y_src_top, y_src_bot, **kwargs):
self.history = history
self.compute_history = kwargs.get('compute_history',2)
self.name = kwargs.get('name', '')
self.POLY_DEGREE = kwargs.get('poly_degree', 2)
self.MIN_POLY_DEGREE = kwargs.get('min_poly_degree', self.POLY_DEGREE)
self.RSE_THRESHOLD = kwargs.get('rse_threshold', 120)
self.MIN_X_SPREAD = kwargs.get('min_x_spread', 90)
self.MIN_Y_SPREAD = kwargs.get('min_y_spread', 350)
# height of current image
self.set_height(height)
print(' poly degree = ', self.POLY_DEGREE)
self.units = 'm'
__MX_nom = 3.7
__MY_nom = 30
__MX_denom = 700
__MY_denom = height
self.set_MY(__MY_nom, __MY_denom, debug = False) # meters per pixel in y dimension
self.set_MX(__MX_nom, __MX_denom, debug = False) # meters per pixel in x dimension
# was the line detected in the last iteration?
self.detected = False
self.ttlAccepted = 0
self.ttlRejected = 0
self.ttlRejectedFramesSinceDetected = 0
self.ttlAcceptedFramesSinceRejected = 0
self.y_src_bot = y_src_bot
self.y_src_top = y_src_top
self.fitPolynomial = None
# y values correspoinding to current fitting/ image height
self.set_ploty()
self.y_checkpoints = np.concatenate((np.arange(self.y_src_bot,-1,-100), [self.y_src_top]))
self.y_checkpoints = np.flip(np.sort(self.y_checkpoints))
#-----------------------------------------------------------------------
# polynomial coefficients for the most recent fit / fit history
# proposed_curve: x values of the most recent fitting of the line
# fitted_history: x values of the last n fits of the line
#-----------------------------------------------------------------------
self.proposed_fit = None ## np.array([0,0,0], dtype='float')
self.proposed_fit_history = [ | np.array([0,0,0], dtype='float') | numpy.array |
#!/usr/bin/env python3
#
# Calculates the mean and std of temperatures used in midpoint reports
#
import base
import numpy as np
import matplotlib.pyplot as pl
DEBUG = False
def tasks():
"""
Returns a list of the tasks in this file.
"""
return [
MidpointTemperatures(),
]
class MidpointTemperatures(base.Task):
def __init__(self):
super(MidpointTemperatures, self).__init__('midpoint_temperatures')
self._set_data_subdir('midpointswt')
def gather(self, q):
# Query db
tmin, tmax, tmid = [], [], []
with base.connect() as con:
c = con.cursor()
for row in c.execute(q):
tmin.append(row['tmin'])
tmax.append(row['tmax'])
tmid.append(0.5 * (row['tmin'] + row['tmax']))
# Create list of tmin, tmax, tmid
tmin = np.array(tmin)
tmax = np.array(tmax)
tmid = np.array(tmid)
print('TMid, mean: ' + str(np.mean(tmin)))
print('TMid, std : ' + str(np.std(tmid)))
print('2Sigma range: ' + str(4*np.std(tmid)))
print('Min, max: ' + str(np.min(tmin)) + ', ' + str( | np.max(tmax) | numpy.max |
import numpy as np
from scipy.stats import lognorm
from scipy.optimize import curve_fit
from abc import ABC, abstractmethod
from icecube_tools.detector.effective_area import (
R2015AeffReader,
R2015_AEFF_FILENAME,
)
from icecube_tools.utils.data import IceCubeData, find_files, data_directory
"""
Module for handling the energy resolution
of IceCube using publicly available information.
"""
GIVEN_ETRUE = 0
GIVEN_ERECO = 1
_supported_dataset_ids = ["20150820"]
class EnergyResolutionBase(ABC):
"""
Abstract base class for energy resolution.
Stores information on how the reconstructed
energy in the detector relates to the true
neutrino energy.
"""
@property
def values(self):
"""
A 2D histogram of probabilities normalised
over reconstructed energy.
x-axis <=> true_energy
y-axis <=> reco_energy
"""
return self._values
@values.setter
def values(self, values):
if len(np.shape(values)) > 2:
raise ValueError(str(values) + " is not a 2D array.")
else:
self._values = values
@property
def true_energy_bins(self):
return self._true_energy_bins
@true_energy_bins.setter
def true_energy_bins(self, value):
self._true_energy_bins = value
@property
def reco_energy_bins(self):
return self._reco_energy_bins
@reco_energy_bins.setter
def reco_energy_bins(self, value):
self._reco_energy_bins = value
@abstractmethod
def sample(self):
pass
class EnergyResolution(EnergyResolutionBase):
"""
Muon neutrino energy resolution using public data.
Makes use of the 2015 effective area release and its
corresponding reader class.
Based on implementation by <NAME> (@chrhck).
"""
supported_datasets = _supported_dataset_ids
def __init__(self, filename, conditional=GIVEN_ETRUE, **kwargs):
"""
Muon neutrino energy resolution using public data.
Makes use of the 2015 effective area release and its
corresponding reader class.
Based on implementation by <NAME> (@chrhck).
:param filename: Name of file to be read in.
:param kwargs: year and/or nu_type can be specified.
See release for more info.
Link: https://icecube.wisc.edu/science/data/HE_NuMu_diffuse.
"""
super().__init__()
self._conditional = conditional
self._reader = R2015AeffReader(filename, **kwargs)
self.true_energy_bins = self._reader.true_energy_bins
self.reco_energy_bins = self._reader.reco_energy_bins
self.values = self._integrate_out_cos_zenith()
self.values = self._get_conditional()
self.values = self._normalise()
self._fit_lognormal()
self._fit_polynomial()
@classmethod
def from_dataset(cls, dataset_id, fetch=True, **kwargs):
"""
Load energy resolution from publicly
available data.
:param dataset_id: Date identifying the dataset
e.g. "20181018"
:param fetch: If true, download dataset if missing
"""
if dataset_id not in _supported_dataset_ids:
raise NotImplementedError("This dataset is not currently supported")
if fetch:
data_interface = IceCubeData()
dataset = data_interface.find(dataset_id)
data_interface.fetch(dataset)
dataset_dir = data_interface.get_path_to(dataset[0])
else:
dataset_dir = data_directory
if dataset_id == "20150820":
files = find_files(dataset_dir, R2015_AEFF_FILENAME)
eres_file_name = files[0]
return cls(eres_file_name, **kwargs)
def _integrate_out_cos_zenith(self):
"""
We are only interested in the energy
dependence.
"""
dim_to_int = self._reader._label_order["cos_zenith"]
return np.sum(self._reader.effective_area_values, axis=dim_to_int)
def _get_conditional(self):
"""
From the joint distribution of Etrue and Ereco
we want the conditional of Ereco | Etrue OR Etrue | Ereco.
"""
if self._conditional == GIVEN_ETRUE:
true_energy_dist = self.values.T.sum(axis=0)
# To avoid zero division
true_energy_dist[true_energy_dist == 0] = 1e-10
conditional = np.nan_to_num(self.values.T / true_energy_dist).T
elif self._conditional == GIVEN_ERECO:
reco_energy_dist = self.values.sum(axis=0)
conditional = np.nan_to_num(self.values / reco_energy_dist)
else:
raise ValueError("conditional must be GIVEN_ETRUE or GIVEN_ERECO")
return conditional
def _normalise(self):
"""
Normalise over the reconstruted energy so
at each Etrue bin the is a probability
distribution over Ereco.
"""
if self._conditional == GIVEN_ETRUE:
normalised = np.zeros(
(len(self.true_energy_bins[:-1]), len(self.reco_energy_bins[:-1]))
)
for i, Etrue in enumerate(self.true_energy_bins[:-1]):
norm = 0
for j, Ereco in enumerate(self.reco_energy_bins[:-1]):
delta_Ereco = self.reco_energy_bins[j + 1] - Ereco
norm += self.values[i][j] * delta_Ereco
# Avoid zero division
if norm == 0:
norm = 1e-10
normalised[i] = self.values[i] / norm
elif self._conditional == GIVEN_ERECO:
normalised = np.zeros(
(len(self.true_energy_bins[:-1]), len(self.reco_energy_bins[:-1]))
).T
for i, Ereco in enumerate(self.reco_energy_bins[:-1]):
norm = 0
for j, Etrue in enumerate(self.true_energy_bins[:-1]):
delta_Etrue = self.true_energy_bins[j + 1] - Etrue
norm += self.values.T[i][j] * delta_Etrue
normalised[i] = self.values.T[i] / norm
normalised = normalised.T
return normalised
def _fit_lognormal(self):
"""
Fit a lognormal distribution for each Etrue
and store its parameters.
"""
def _lognorm_wrapper(E, mu, sigma):
return lognorm.pdf(E, sigma, loc=0, scale=mu)
self._mu = []
self._sigma = []
if self._conditional == GIVEN_ETRUE:
self.reco_energy_bin_cen = (
self.reco_energy_bins[:-1] + self.reco_energy_bins[1:]
) / 2
for i, Etrue in enumerate(self.true_energy_bins[:-1]):
try:
fit_vals, _ = curve_fit(
_lognorm_wrapper,
self.reco_energy_bin_cen,
np.nan_to_num(self.values[i]),
p0=(Etrue, 0.5),
)
self._mu.append(fit_vals[0])
self._sigma.append(fit_vals[1])
except:
self._mu.append(np.nan)
self._sigma.append(np.nan)
elif self._conditional == GIVEN_ERECO:
self.true_energy_bin_cen = (
self.true_energy_bins[:-1] + self.true_energy_bins[1:]
) / 2
for i, Ereco in enumerate(self.reco_energy_bins[:-1]):
try:
fit_vals, _ = curve_fit(
_lognorm_wrapper,
self.true_energy_bin_cen,
np.nan_to_num(self.values.T[i]),
p0=(Ereco, 0.5),
)
self._mu.append(fit_vals[0])
self._sigma.append(fit_vals[1])
except:
self._mu.append(np.nan)
self._sigma.append(np.nan)
def _fit_polynomial(self):
"""
Fit a polynomial to approximate the lognormal
params at extreme energies where there are
little statistics.
"""
# polynomial degree
degree = 5
mu_sel = np.where(np.isfinite(self._mu))
mu = np.array(self._mu)[mu_sel]
sigma_sel = np.where(np.isfinite(self._sigma))
sigma = np.array(self._sigma)[sigma_sel]
if self._conditional == GIVEN_ETRUE:
# hard coded values for excluding low statistics
imin = 5
imax = 210
true_energy_bin_cen = (
self.true_energy_bins[:-1] + self.true_energy_bins[1:]
) / 2
Etrue_cen_mu = true_energy_bin_cen[mu_sel]
Etrue_cen_sigma = true_energy_bin_cen[sigma_sel]
mu_pars = np.polyfit(
np.log10(Etrue_cen_mu[imin:imax]), np.log10(mu[imin:imax]), degree
)
sigma_pars = np.polyfit(
np.log10(Etrue_cen_sigma[imin:imax]), np.log10(sigma[imin:imax]), degree
)
elif self._conditional == GIVEN_ERECO:
# hard coded values for exluding low statistics
imin = 5
imax = 45
reco_energy_bin_cen = (
self.reco_energy_bins[:-1] + self.reco_energy_bins[1:]
) / 2
Ereco_cen_mu = reco_energy_bin_cen[mu_sel]
Ereco_cen_sigma = reco_energy_bin_cen[sigma_sel]
mu_pars = np.polyfit(
np.log10(Ereco_cen_mu[imin:imax]), np.log10(mu[imin:imax]), degree
)
sigma_pars = np.polyfit(
| np.log10(Ereco_cen_sigma[imin:imax]) | numpy.log10 |
import sys
from os import path
import re
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
"""
Class for creating dot plot for a set of 2 given sequences
"""
class DotPlot:
sequence1 = ''
sequence2 = ''
window_size = 3
threshold = 2
regex = "^[ACDEFGHIKLMNPQRSTVWY\s]+$"
"""
Constructor method
Creates DotPlot object and initializes needed variables.
input:
argv: string [n] - vector of n input arguments
"""
def __init__(self, argv):
len_argv = len( argv )
if ( len_argv != 4 and len_argv != 3 ) or \
argv[0] == "-h" or argv[0] == "--h" or \
"-help" in argv or "--help" in argv or \
not argv[-1].isnumeric() or not argv[-2].isnumeric():
self.display_help()
sys.exit()
self.parse_input( argv )
"""
normalize_sequence method
Normalizes sequence string to expected format.
input:
sequence: string - string with sequence to normalize.
output:
Normalized sequence
"""
def normalize_sequence(self, sequence):
return sequence.upper().replace(" ", "").replace("\n", "")
"""
pase_input method
Parses input arguments (argv) to expected format
input:
argv: string [n] - vector of n input arguments
"""
def parse_input(self, argv):
len_argv = len(argv)
if len_argv == 3:
sequences = self.fasta_read(argv[0])
if len(sequences) < 2:
self.display_help()
sys.exit()
self.sequence1 = sequences[0]
self.sequence2 = sequences[1]
else:
if re.search(self.regex, argv[0], re.IGNORECASE):
self.sequence1 = self.normalize_sequence(argv[0])
else:
self.sequence1 = self.normalize_sequence(self.fasta_read(argv[0])[0])
if re.search(self.regex, argv[1], re.IGNORECASE):
self.sequence2 = self.normalize_sequence(argv[1])
else:
self.sequence2 = self.normalize_sequence(self.fasta_read(argv[1])[0])
self.window_size = int(argv[-2])
self.threshold = int(argv[-1])
"""
fasta_read method
Reads .fasta file and returns vector of sequences from file.
input:
directory: string - path to .fasta file.
output:
string [n]: vector of n sequences from file.
"""
def fasta_read(self, directory):
if not ( directory.endswith(".fasta") or directory.endswith(".FASTA") ):
directory += ".fasta"
if not path.isfile(directory):
print("File: " + directory + " does not exist.")
self.display_help()
sys.exit()
sequences = []
seq = ""
with open(directory, "r") as file_handle:
for line in file_handle.readlines():
if line[0] == ">":
if len(seq) != 0:
sequences.append(seq.upper())
seq = ""
else:
seq += line
if len(seq) != 0:
sequences.append(seq.upper())
if len(sequences) == 0:
print("File: " + directory + " does not contain any sequence or is not in right format (.fasta).")
self.display_help()
sys.exit()
return sequences
"""
dot_plot method
Calculates dot plot matrix
output:
int [n, 2] - vector of n 2d coordinates (x, y) for dot plot
"""
def dot_plot(self):
l1 = len( self.sequence1 )
l2 = len( self.sequence2 )
padding = self.window_size - 1
points = [[l1, l2]]
grid = np.zeros( [l2, l1] )
for i in range( l1 ):
for j in range( l2 ):
if self.sequence1[i] == self.sequence2[j]:
grid[j, i] = 1
kernel = np.zeros( [self.window_size, self.window_size] )
np.fill_diagonal( kernel, 1 )
result = signal.convolve2d( grid, kernel, mode = 'valid' )
result[result < self.threshold] = 0
result = np.pad( result, (0, padding) )
result *= grid
for i in range(l1):
for j in range(l2):
if result[j, i] > 0:
points.append( [i, j] )
return | np.array(points) | numpy.array |
# ■ 3장 목차
#
# 1. 활성화함수(activation function)
# - 계단 함수
# - sigmoid 함수
# - relu 함수
# 2. 행렬의 내적
#
# ■ 3.1 활성화 함수
# *퍼셉트론과 신경망의 차이점:
# - 퍼셉트론: 원하는 결과를 출력하도록 가중치의 값을 적절히 정하는 작업을 사람이 수동으로 해야한다.
# - 신경망: 가중치 매개변수의 적절한 값을 기계가 데이터로부터 자동으로 학습해서 알아낸다.
#
# 단층 퍼셉트론: 계단 함수
# 다층 퍼셉트론: sigmoid, relu....를 써야 다층의 출력: 0 or 1 의미가 생긴다.
print('====================================================================================================')
print('== 문제 35. 파이썬으로 계단함수를 구현하시오.')
print('====================================================================================================\n')
def step_function(x):
if x > 0:
return 1
else:
return 0
import numpy as np
def step_function(x):
y = x > 0
return y.astype(np.int) # astype은 true는 1로 변경, false는 0으로 변경한다.
x_data = np.array([-1, 0, 1])
print(step_function(x_data))
print('====================================================================================================')
print('== 문제 36. 위의 step_function 함수를 이용해서 계단함수를 그리시오.')
print('====================================================================================================\n')
import numpy as np
import matplotlib.pylab as plt
def step_function(x):
y = x > 0
return y.astype(np.int) # astype은 true는 1로 변경, false는 0으로 변경한다.
x_data = np.arange(-5, 5, 0.1)
y = step_function(x_data)
plt.plot(x_data, y)
plt.ylim(-0.1, 1.1)
plt.show()
print(step_function(x_data))
print('====================================================================================================')
print('== 문제 37. 아래와 같이 그래프가 출력되게 하시오.')
print('====================================================================================================\n')
import numpy as np
import matplotlib.pylab as plt
def step_function(x):
y = x < 0
return y.astype(np.int) # astype은 true는 1로 변경, false는 0으로 변경한다.
x_data = np.arange(-5, 5, 0.1)
y = step_function(x_data)
plt.plot(x_data, y)
plt.ylim(-0.1, 1.1)
plt.show()
print(step_function(x_data))
print('====================================================================================================')
print('== 문제 38. (점심시간 문제)')
print('====================================================================================================\n')
import numpy as np
def step_function(x):
y = x > 0
return y.astype(np.int) # astype은 true는 1로 변경, false는 0으로 변경한다.
x = np.array([-1, 0, 0])
w = np.array([0.3, 0.4, 0.1])
print(step_function(sum(x * w)))
print('====================================================================================================')
print('== 문제 39. 시그모이드 함수를 파이썬으로 구현하시오.')
print('====================================================================================================\n')
import numpy as np
def sigmoid(a):
return 1/(1+np.exp(-a) + 0.00001)
print(sigmoid(2.0))
print('====================================================================================================')
print('== 문제 40. 시그모이드 함수를 그래프로 그리시오.')
print('====================================================================================================\n')
import numpy as np
import matplotlib.pyplot as plt
def sigmoid(a):
return 1/(1+np.exp(-a) + 0.00001)
x = np.arange(-5.0, 5.0, 0.1)
y = sigmoid(x)
plt.plot(x, y)
plt.ylim(-0.1, 1.1)
plt.show()
print('====================================================================================================')
print('== 문제 41. 아래와 같이 그래프가 출력되게 하시오.')
print('====================================================================================================\n')
def sigmoid(a):
return 1/(1+np.exp(a) + 0.00001)
x = np.arange(-5.0, 5.0, 0.1)
y = sigmoid(x)
plt.plot(x, y)
plt.ylim(-0.1, 1.1)
plt.show()
print('====================================================================================================')
print('== 문제 42. 책 74쪽에 나온것처럼 계단 함수와 시그모이드 함수를 같이 출력하시오.')
print('====================================================================================================\n')
def sigmoid(a):
return 1/(1+np.exp(-a) + 0.00001)
def step_function(x):
y = x > 0
return y.astype(np.int)
x = np.arange(-5.0, 5.0, 0.1)
y1 = sigmoid(x)
y2 = step_function(x)
plt.plot(x, y1)
plt.plot(x, y2)
plt.ylim(-0.1, 1.1)
plt.show()
print('====================================================================================================')
print('== 문제 43. Relu 함수를 생성하시오.')
print('====================================================================================================\n')
def relu(a):
return np.maximum(a, 0) # 둘 중에 큰 수를 출력
print(relu(-1))
print(relu(0.3))
print('====================================================================================================')
print('== 문제 44. Relu 함수를 그래프로 그리시오.')
print('====================================================================================================\n')
x = np.arange(-5.0, 5.0, 0.1)
y = relu(x)
plt.plot(x, y)
plt.show()
print('====================================================================================================')
print('== 문제 45. 아래의 행렬 곱(행렬내적)을 파이썬으로 구현하시오.')
print('====================================================================================================\n')
a = np.array([[1, 2, 3], [4, 5, 6]])
b = np.array([[5, 6], [7, 8], [9, 10]])
print(np.dot(a, b))
print('====================================================================================================')
print('== 문제 46. 아래의 행렬 곱(행렬내적)을 파이썬으로 구현하시오.')
print('====================================================================================================\n')
a = np.array([[5, 6], [7, 8], [9, 10]])
b = np.array([[1], [2]])
print(np.dot(a, b))
print('====================================================================================================')
print('== 문제 47. 아래의 그림을 numpy 로 구현하시오.')
print('====================================================================================================\n')
x = np.array([1, 2])
w = np.array([[1, 3, 5], [2, 4, 6]])
b = np.array([7, 8, 9])
print(np.dot(x, w) + b)
print('====================================================================================================')
print('== 문제 48. 위의 문제에서 구한 입력신호의 가중의 합인 y 값이 활성함수인 sigmoid 함수를 통과하면 어떤값으로 ')
print('== 출력되는지 z 값을 확인하시오. ')
print('====================================================================================================\n')
x = np.array([1, 2])
w = | np.array([[1, 3, 5], [2, 4, 6]]) | numpy.array |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# Deep Recurrent Reinforcement Learning: 1 capa LSTM y 4 capas Dense, Funcion de activacion tanh, 12 episodes, 50 iteraciones
drnnLSTMtanhMakespan0=[799, 798, 799, 799, 805, 806, 799, 805, 805, 800, 798, 798]
drnnLSTMtanhMakespan1=[800, 798, 796, 800, 796, 794, 795, 798, 800, 798, 805, 798]
drnnLSTMtanhMakespan2=[796, 800, 798, 804, 800, 798, 798, 798, 800, 800, 802, 797]
drnnLSTMtanhMakespan3=[805, 800, 800, 803, 794, 802, 800, 798, 799, 804, 799, 806]
drnnLSTMtanhMakespan4=[796, 798, 795, 798, 796, 799, 800, 796, 796, 798, 806, 800]
drnnLSTMtanhMakespan5=[798, 798, 799, 800, 800, 808, 798, 798, 801, 796, 799, 798]
drnnLSTMtanhMakespan6=[800, 796, 805, 798, 798, 796, 799, 800, 803, 800, 798, 800]
drnnLSTMtanhMakespan7=[799, 805, 802, 805, 800, 799, 800, 799, 805, 800, 794, 796]
drnnLSTMtanhMakespan8=[799, 798, 800, 798, 798, 800, 800, 800, 804, 799, 800, 804]
drnnLSTMtanhMakespan9=[795, 800, 795, 796, 798, 796, 797, 800, 797, 798, 796, 795]
drnnLSTMtanhMakespan10=[804, 799, 805, 798, 798, 798, 805, 800, 796, 804, 796, 799]
drnnLSTMtanhMakespan11=[795, 803, 805, 798, 795, 801, 798, 798, 804, 803, 799, 804]
drnnLSTMtanhMakespan12=[798, 798, 799, 800, 798, 798, 799, 799, 801, 796, 799, 798]
drnnLSTMtanhMakespan13=[798, 798, 799, 797, 796, 796, 800, 797, 805, 800, 800, 794]
drnnLSTMtanhMakespan14=[800, 798, 798, 796, 800, 800, 798, 798, 802, 798, 802, 798]
drnnLSTMtanhMakespan15=[796, 796, 800, 801, 800, 800, 796, 794, 796, 800, 796, 798]
drnnLSTMtanhMakespan16=[798, 798, 795, 797, 795, 799, 800, 796, 795, 796, 800, 800]
drnnLSTMtanhMakespan17=[794, 795, 800, 798, 795, 796, 798, 796, 795, 794, 798, 796]
drnnLSTMtanhMakespan18=[797, 795, 794, 794, 800, 796, 796, 795, 798, 795, 798, 794]
drnnLSTMtanhMakespan19=[797, 795, 795, 796, 798, 799, 795, 799, 795, 794, 795, 795]
drnnLSTMtanhMakespan20=[796, 794, 798, 797, 798, 799, 795, 795, 797, 795, 795, 792]
drnnLSTMtanhMakespan21=[797, 795, 797, 793, 794, 794, 800, 794, 798, 795, 797, 795]
drnnLSTMtanhMakespan22=[794, 800, 798, 795, 795, 796, 796, 799, 795, 794, 795, 795]
drnnLSTMtanhMakespan23=[795, 795, 794, 795, 794, 794, 797, 799, 796, 794, 794, 795]
drnnLSTMtanhMakespan24=[798, 795, 795, 795, 792, 794, 795, 794, 794, 795, 795, 795]
drnnLSTMtanhMakespan25=[794, 792, 794, 795, 795, 794, 794, 794, 794, 795, 794, 793]
drnnLSTMtanhMakespan26=[794, 794, 795, 796, 798, 795, 794, 794, 794, 794, 795, 794]
drnnLSTMtanhMakespan27=[795, 794, 795, 795, 795, 794, 794, 794, 794, 794, 795, 795]
drnnLSTMtanhMakespan28=[795, 794, 794, 795, 794, 795, 795, 795, 795, 794, 795, 794]
drnnLSTMtanhMakespan29=[792, 794, 795, 794, 794, 795, 794, 793, 795, 794, 795, 792]
drnnLSTMtanhMakespan30=[795, 794, 795, 795, 794, 794, 794, 795, 794, 794, 794, 794]
drnnLSTMtanhMakespan31=[794, 794, 795, 794, 795, 793, 795, 795, 795, 792, 794, 794]
drnnLSTMtanhMakespan32=[795, 795, 794, 793, 795, 795, 795, 795, 794, 794, 795, 794]
drnnLSTMtanhMakespan33=[793, 794, 795, 793, 792, 795, 794, 794, 794, 794, 794, 795]
drnnLSTMtanhMakespan34=[794, 795, 795, 794, 794, 794, 794, 793, 794, 794, 794, 794]
drnnLSTMtanhMakespan35=[794, 794, 797, 793, 792, 794, 793, 794, 795, 794, 795, 792]
drnnLSTMtanhMakespan36=[794, 794, 793, 794, 795, 797, 795, 795, 794, 795, 793, 794]
drnnLSTMtanhMakespan37=[795, 793, 795, 794, 795, 798, 795, 794, 795, 793, 795, 794]
drnnLSTMtanhMakespan38=[794, 795, 793, 795, 794, 794, 794, 794, 794, 794, 797, 795]
drnnLSTMtanhMakespan39=[794, 794, 795, 794, 795, 795, 794, 795, 794, 795, 798, 797]
drnnLSTMtanhMakespan40=[795, 795, 794, 795, 794, 795, 795, 794, 794, 794, 795, 795]
drnnLSTMtanhMakespan41=[794, 795, 792, 794, 794, 798, 795, 794, 794, 794, 793, 795]
drnnLSTMtanhMakespan42=[793, 795, 794, 793, 794, 794, 792, 794, 795, 794, 794, 793]
drnnLSTMtanhMakespan43=[793, 792, 793, 794, 794, 795, 792, 794, 795, 794, 795, 794]
drnnLSTMtanhMakespan44=[793, 794, 795, 795, 794, 794, 795, 798, 794, 792, 795, 794]
drnnLSTMtanhMakespan45=[795, 794, 794, 794, 794, 792, 794, 795, 794, 796, 795, 794]
drnnLSTMtanhMakespan46=[794, 793, 793, 795, 795, 794, 794, 794, 794, 796, 794, 794]
drnnLSTMtanhMakespan47=[794, 794, 795, 794, 794, 795, 792, 795, 794, 795, 795, 794]
drnnLSTMtanhMakespan48=[794, 795, 794, 794, 794, 792, 794, 795, 796, 794, 794, 795]
drnnLSTMtanhMakespan49=[794, 794, 794, 794, 794, 794, 792, 794, 793, 794, 795, 794]
drnnLSTMtanhRewards0=[-0.1759911894273128, -0.17580964970257765, -0.1759911894273128, -0.1759911894273128, -0.177078750549934, -0.17725973169122497, -0.1759911894273128, -0.177078750549934, -0.177078750549934, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765]
drnnLSTMtanhRewards1=[-0.17617264919621228, -0.17580964970257765, -0.17544633017412387, -0.17617264919621228, -0.17544633017412387, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.17580964970257765, -0.17580964970257765]
drnnLSTMtanhRewards2=[-0.17544633017412387, -0.17617264919621228, -0.17580964970257765, -0.1768976897689769, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17653532907770195, -0.17562802996914942]
drnnLSTMtanhRewards3=[-0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.17671654929577466, -0.17508269018743108, -0.17653532907770195, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.1768976897689769, -0.1759911894273128, -0.17725973169122497]
drnnLSTMtanhRewards4=[-0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765, -0.17544633017412387, -0.1759911894273128, -0.17617264919621228, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17725973169122497, -0.17617264919621228]
drnnLSTMtanhRewards5=[-0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17617264919621228, -0.17617264919621228, -0.1776214552648934, -0.17580964970257765, -0.17580964970257765, -0.1763540290620872, -0.17544633017412387, -0.1759911894273128, -0.17580964970257765]
drnnLSTMtanhRewards6=[-0.17617264919621228, -0.17544633017412387, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.1759911894273128, -0.17617264919621228, -0.17671654929577466, -0.17617264919621228, -0.17580964970257765, -0.17617264919621228]
drnnLSTMtanhRewards7=[-0.1759911894273128, -0.177078750549934, -0.17653532907770195, -0.177078750549934, -0.17617264919621228, -0.1759911894273128, -0.17617264919621228, -0.1759911894273128, -0.177078750549934, -0.17617264919621228, -0.17508269018743108, -0.17544633017412387]
drnnLSTMtanhRewards8=[-0.1759911894273128, -0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.1768976897689769, -0.1759911894273128, -0.17617264919621228, -0.1768976897689769]
drnnLSTMtanhRewards9=[-0.17526455026455026, -0.17617264919621228, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17544633017412387, -0.17562802996914942, -0.17617264919621228, -0.17562802996914942, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026]
drnnLSTMtanhRewards10=[-0.1768976897689769, -0.1759911894273128, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17617264919621228, -0.17544633017412387, -0.1768976897689769, -0.17544633017412387, -0.1759911894273128]
drnnLSTMtanhRewards11=[-0.17526455026455026, -0.17671654929577466, -0.177078750549934, -0.17580964970257765, -0.17526455026455026, -0.1763540290620872, -0.17580964970257765, -0.17580964970257765, -0.1768976897689769, -0.17671654929577466, -0.1759911894273128, -0.1768976897689769]
drnnLSTMtanhRewards12=[-0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.1759911894273128, -0.1763540290620872, -0.17544633017412387, -0.1759911894273128, -0.17580964970257765]
drnnLSTMtanhRewards13=[-0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17562802996914942, -0.17544633017412387, -0.17544633017412387, -0.17617264919621228, -0.17562802996914942, -0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.17508269018743108]
drnnLSTMtanhRewards14=[-0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17653532907770195, -0.17580964970257765, -0.17653532907770195, -0.17580964970257765]
drnnLSTMtanhRewards15=[-0.17544633017412387, -0.17544633017412387, -0.17617264919621228, -0.1763540290620872, -0.17617264919621228, -0.17617264919621228, -0.17544633017412387, -0.17508269018743108, -0.17544633017412387, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765]
drnnLSTMtanhRewards16=[-0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.1759911894273128, -0.17617264919621228, -0.17544633017412387, -0.17544633017412387, -0.17526455026455026, -0.17617264919621228, -0.17617264919621228]
drnnLSTMtanhRewards17=[-0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17544633017412387]
drnnLSTMtanhRewards18=[-0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17617264919621228, -0.17544633017412387, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108]
drnnLSTMtanhRewards19=[-0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.1759911894273128, -0.17526455026455026, -0.1759911894273128, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drnnLSTMtanhRewards20=[-0.17544633017412387, -0.17508269018743108, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.1759911894273128, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17471872931833224]
drnnLSTMtanhRewards21=[-0.17562802996914942, -0.17526455026455026, -0.17562802996914942, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17617264919621228, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026]
drnnLSTMtanhRewards22=[-0.17508269018743108, -0.17617264919621228, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17544633017412387, -0.1759911894273128, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drnnLSTMtanhRewards23=[-0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942, -0.1759911894273128, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026]
drnnLSTMtanhRewards24=[-0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026]
drnnLSTMtanhRewards25=[-0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221]
drnnLSTMtanhRewards26=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards27=[-0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drnnLSTMtanhRewards28=[-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards29=[-0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224]
drnnLSTMtanhRewards30=[-0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drnnLSTMtanhRewards31=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108]
drnnLSTMtanhRewards32=[-0.17526455026455026, -0.17526455026455026, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards33=[-0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026]
drnnLSTMtanhRewards34=[-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drnnLSTMtanhRewards35=[-0.17508269018743108, -0.17508269018743108, -0.17562802996914942, -0.17471872931833224, -0.1749007498897221, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224]
drnnLSTMtanhRewards36=[-0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17508269018743108]
drnnLSTMtanhRewards37=[-0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards38=[-0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026]
drnnLSTMtanhRewards39=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17562802996914942]
drnnLSTMtanhRewards40=[-0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drnnLSTMtanhRewards41=[-0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026]
drnnLSTMtanhRewards42=[-0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221]
drnnLSTMtanhRewards43=[-0.1749007498897221, -0.17471872931833224, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards44=[-0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards45=[-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards46=[-0.17508269018743108, -0.1749007498897221, -0.1749007498897221, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108]
drnnLSTMtanhRewards47=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards48=[-0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026]
drnnLSTMtanhRewards49=[-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
# Deep Recurrent Reinforcement Learning: 1 capa LSTM y 4 capas Dense, Funcion de activacion relu, 12 episodes, 50 iteraciones
drnnLSTMreluMakespan0=[805, 800, 800, 800, 794, 800, 798, 809, 795, 800, 798, 798]
drnnLSTMreluMakespan1=[798, 798, 796, 799, 800, 796, 796, 798, 798, 794, 798, 800]
drnnLSTMreluMakespan2=[805, 805, 798, 799, 806, 799, 806, 799, 800, 798, 805, 795]
drnnLSTMreluMakespan3=[800, 800, 800, 796, 800, 800, 799, 806, 808, 798, 797, 798]
drnnLSTMreluMakespan4=[805, 805, 795, 796, 799, 804, 798, 794, 798, 794, 796, 810]
drnnLSTMreluMakespan5=[798, 798, 798, 795, 800, 798, 796, 802, 800, 800, 805, 801]
drnnLSTMreluMakespan6=[800, 798, 798, 795, 800, 796, 800, 798, 799, 796, 805, 800]
drnnLSTMreluMakespan7=[800, 800, 800, 799, 798, 798, 800, 805, 800, 799, 800, 801]
drnnLSTMreluMakespan8=[799, 800, 800, 799, 795, 795, 805, 795, 798, 800, 798, 800]
drnnLSTMreluMakespan9=[800, 796, 805, 798, 798, 795, 805, 800, 799, 795, 800, 805]
drnnLSTMreluMakespan10=[805, 798, 805, 800, 801, 805, 799, 805, 798, 800, 800, 798]
drnnLSTMreluMakespan11=[798, 803, 800, 797, 795, 796, 794, 799, 800, 800, 800, 796]
drnnLSTMreluMakespan12=[799, 798, 799, 795, 798, 795, 798, 798, 798, 795, 798, 798]
drnnLSTMreluMakespan13=[798, 798, 799, 796, 798, 796, 800, 799, 796, 794, 796, 795]
drnnLSTMreluMakespan14=[796, 798, 806, 799, 804, 798, 805, 798, 800, 805, 794, 800]
drnnLSTMreluMakespan15=[806, 795, 800, 796, 798, 796, 810, 798, 799, 798, 800, 800]
drnnLSTMreluMakespan16=[799, 796, 798, 798, 798, 800, 798, 810, 796, 805, 800, 795]
drnnLSTMreluMakespan17=[798, 798, 798, 794, 798, 805, 801, 798, 800, 799, 798, 798]
drnnLSTMreluMakespan18=[795, 800, 794, 798, 797, 798, 794, 800, 797, 796, 794, 794]
drnnLSTMreluMakespan19=[798, 802, 794, 798, 799, 795, 797, 795, 800, 796, 797, 796]
drnnLSTMreluMakespan20=[794, 797, 795, 794, 799, 795, 795, 795, 800, 797, 794, 798]
drnnLSTMreluMakespan21=[799, 798, 796, 795, 794, 798, 795, 795, 798, 798, 795, 794]
drnnLSTMreluMakespan22=[794, 794, 795, 797, 795, 795, 795, 792, 794, 795, 794, 794]
drnnLSTMreluMakespan23=[794, 794, 794, 794, 795, 796, 793, 794, 795, 794, 797, 795]
drnnLSTMreluMakespan24=[794, 792, 792, 794, 796, 792, 794, 795, 794, 792, 796, 795]
drnnLSTMreluMakespan25=[794, 795, 795, 794, 794, 792, 795, 792, 795, 794, 794, 794]
drnnLSTMreluMakespan26=[795, 794, 794, 795, 794, 794, 793, 794, 797, 795, 794, 795]
drnnLSTMreluMakespan27=[794, 794, 795, 796, 795, 797, 794, 794, 795, 801, 794, 795]
drnnLSTMreluMakespan28=[795, 795, 795, 795, 794, 792, 794, 797, 794, 795, 795, 795]
drnnLSTMreluMakespan29=[794, 792, 798, 794, 797, 795, 793, 795, 795, 794, 795, 795]
drnnLSTMreluMakespan30=[795, 794, 798, 794, 794, 795, 792, 796, 794, 796, 794, 794]
drnnLSTMreluMakespan31=[794, 795, 795, 794, 795, 794, 795, 795, 794, 794, 795, 795]
drnnLSTMreluMakespan32=[798, 794, 794, 794, 798, 792, 795, 795, 795, 796, 794, 795]
drnnLSTMreluMakespan33=[794, 796, 794, 794, 794, 795, 794, 794, 797, 793, 793, 795]
drnnLSTMreluMakespan34=[794, 794, 795, 794, 794, 793, 794, 795, 793, 795, 795, 794]
drnnLSTMreluMakespan35=[798, 796, 795, 794, 795, 795, 795, 795, 794, 795, 797, 795]
drnnLSTMreluMakespan36=[794, 796, 794, 794, 794, 794, 795, 795, 797, 796, 795, 795]
drnnLSTMreluMakespan37=[795, 794, 796, 795, 795, 795, 795, 794, 792, 797, 794, 793]
drnnLSTMreluMakespan38=[794, 798, 794, 792, 794, 792, 795, 797, 793, 794, 794, 797]
drnnLSTMreluMakespan39=[792, 794, 794, 794, 792, 795, 795, 795, 794, 794, 795, 794]
drnnLSTMreluMakespan40=[792, 795, 795, 792, 795, 795, 794, 795, 794, 795, 794, 795]
drnnLSTMreluMakespan41=[794, 797, 795, 794, 795, 795, 798, 794, 795, 796, 796, 794]
drnnLSTMreluMakespan42=[794, 795, 795, 795, 794, 795, 795, 794, 794, 795, 793, 795]
drnnLSTMreluMakespan43=[795, 794, 795, 794, 795, 795, 792, 794, 794, 795, 794, 795]
drnnLSTMreluMakespan44=[795, 794, 792, 795, 794, 794, 795, 794, 796, 795, 796, 794]
drnnLSTMreluMakespan45=[795, 794, 793, 794, 793, 795, 794, 794, 795, 794, 795, 794]
drnnLSTMreluMakespan46=[794, 796, 793, 794, 794, 795, 799, 795, 794, 794, 794, 794]
drnnLSTMreluMakespan47=[794, 794, 794, 794, 795, 793, 795, 795, 794, 795, 795, 795]
drnnLSTMreluMakespan48=[794, 794, 795, 794, 795, 795, 795, 794, 794, 795, 795, 794]
drnnLSTMreluMakespan49=[795, 795, 795, 794, 795, 795, 794, 795, 793, 793, 792, 792]
drnnLSTMreluRewards0=[-0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.17508269018743108, -0.17617264919621228, -0.17580964970257765, -0.1778021978021978, -0.17526455026455026, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765]
drnnLSTMreluRewards1=[-0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.1759911894273128, -0.17617264919621228, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17508269018743108, -0.17580964970257765, -0.17617264919621228]
drnnLSTMreluRewards2=[-0.177078750549934, -0.177078750549934, -0.17580964970257765, -0.1759911894273128, -0.17725973169122497, -0.1759911894273128, -0.17725973169122497, -0.1759911894273128, -0.17617264919621228, -0.17580964970257765, -0.177078750549934, -0.17526455026455026]
drnnLSTMreluRewards3=[-0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.17544633017412387, -0.17617264919621228, -0.17617264919621228, -0.1759911894273128, -0.17725973169122497, -0.1776214552648934, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765]
drnnLSTMreluRewards4=[-0.177078750549934, -0.177078750549934, -0.17526455026455026, -0.17544633017412387, -0.1759911894273128, -0.1768976897689769, -0.17580964970257765, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17544633017412387, -0.17798286090969018]
drnnLSTMreluRewards5=[-0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17617264919621228, -0.17580964970257765, -0.17544633017412387, -0.17653532907770195, -0.17617264919621228, -0.17617264919621228, -0.177078750549934, -0.1763540290620872]
drnnLSTMreluRewards6=[-0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.17544633017412387, -0.177078750549934, -0.17617264919621228]
drnnLSTMreluRewards7=[-0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.17617264919621228, -0.1759911894273128, -0.17617264919621228, -0.1763540290620872]
drnnLSTMreluRewards8=[-0.1759911894273128, -0.17617264919621228, -0.17617264919621228, -0.1759911894273128, -0.17526455026455026, -0.17526455026455026, -0.177078750549934, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.17617264919621228]
drnnLSTMreluRewards9=[-0.17617264919621228, -0.17544633017412387, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17526455026455026, -0.17617264919621228, -0.1759911894273128, -0.17526455026455026, -0.17617264919621228, -0.177078750549934]
drnnLSTMreluRewards10=[-0.177078750549934, -0.17580964970257765, -0.177078750549934, -0.17617264919621228, -0.1763540290620872, -0.1759911894273128, -0.177078750549934, -0.177078750549934, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765]
drnnLSTMreluRewards11=[-0.17580964970257765, -0.17671654929577466, -0.17617264919621228, -0.17562802996914942, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.1759911894273128, -0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.17544633017412387]
drnnLSTMreluRewards12=[-0.1759911894273128, -0.17580964970257765, -0.1759911894273128, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17580964970257765]
drnnLSTMreluRewards13=[-0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17544633017412387, -0.17580964970257765, -0.17544633017412387, -0.17617264919621228, -0.1759911894273128, -0.17544633017412387, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026]
drnnLSTMreluRewards14=[-0.17544633017412387, -0.17580964970257765, -0.17725973169122497, -0.1759911894273128, -0.1768976897689769, -0.17580964970257765, -0.177078750549934, -0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.17508269018743108, -0.17617264919621228]
drnnLSTMreluRewards15=[-0.17725973169122497, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765, -0.17544633017412387, -0.17798286090969018, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228]
drnnLSTMreluRewards16=[-0.1759911894273128, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.17798286090969018, -0.17544633017412387, -0.177078750549934, -0.17617264919621228, -0.17526455026455026]
drnnLSTMreluRewards17=[-0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17508269018743108, -0.17580964970257765, -0.177078750549934, -0.1763540290620872, -0.17580964970257765, -0.17617264919621228, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765]
drnnLSTMreluRewards18=[-0.17526455026455026, -0.17617264919621228, -0.17508269018743108, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.17508269018743108, -0.17617264919621228, -0.17562802996914942, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108]
drnnLSTMreluRewards19=[-0.17580964970257765, -0.17653532907770195, -0.17508269018743108, -0.17580964970257765, -0.1759911894273128, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.17562802996914942, -0.17544633017412387]
drnnLSTMreluRewards20=[-0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.1759911894273128, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17617264919621228, -0.17562802996914942, -0.17508269018743108, -0.17580964970257765]
drnnLSTMreluRewards21=[-0.1759911894273128, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108]
drnnLSTMreluRewards22=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108]
drnnLSTMreluRewards23=[-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026]
drnnLSTMreluRewards24=[-0.17508269018743108, -0.17471872931833224, -0.17471872931833224, -0.17508269018743108, -0.17544633017412387, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17544633017412387, -0.17526455026455026]
drnnLSTMreluRewards25=[-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drnnLSTMreluRewards26=[-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026]
drnnLSTMreluRewards27=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.1763540290620872, -0.17508269018743108, -0.17526455026455026]
drnnLSTMreluRewards28=[-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17562802996914942, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026]
drnnLSTMreluRewards29=[-0.17508269018743108, -0.17471872931833224, -0.17580964970257765, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drnnLSTMreluRewards30=[-0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17544633017412387, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108]
drnnLSTMreluRewards31=[-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drnnLSTMreluRewards32=[-0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17471872931833224, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.17526455026455026]
drnnLSTMreluRewards33=[-0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942, -0.1749007498897221, -0.1749007498897221, -0.17526455026455026]
drnnLSTMreluRewards34=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108]
drnnLSTMreluRewards35=[-0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026]
drnnLSTMreluRewards36=[-0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026]
drnnLSTMreluRewards37=[-0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17562802996914942, -0.17508269018743108, -0.1749007498897221]
drnnLSTMreluRewards38=[-0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.17562802996914942, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942]
drnnLSTMreluRewards39=[-0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drnnLSTMreluRewards40=[-0.17471872931833224, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026]
drnnLSTMreluRewards41=[-0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17544633017412387, -0.17508269018743108]
drnnLSTMreluRewards42=[-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026]
drnnLSTMreluRewards43=[-0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026]
drnnLSTMreluRewards44=[-0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108]
drnnLSTMreluRewards45=[-0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drnnLSTMreluRewards46=[-0.17508269018743108, -0.17544633017412387, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.1759911894273128, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drnnLSTMreluRewards47=[-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026]
drnnLSTMreluRewards48=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108]
drnnLSTMreluRewards49=[-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.1749007498897221, -0.17471872931833224, -0.17471872931833224]
# Deep Recurrent Reinforcement Learning: 1 capa GRU y 4 capas Dense, Funcion de activacion tanh, 12 episodes, 50 iteraciones
drnnGRUtanhMakespan0 = [798, 799, 798, 804, 805, 799, 801, 801, 801, 799, 798, 796]
drnnGRUtanhMakespan1 = [800, 798, 798, 798, 798, 798, 801, 798, 795, 796, 800, 796]
drnnGRUtanhMakespan2 = [795, 804, 805, 800, 800, 796, 804, 800, 795, 798, 798, 801]
drnnGRUtanhMakespan3 = [806, 796, 794, 797, 798, 800, 800, 808, 805, 798, 800, 809]
drnnGRUtanhMakespan4 = [805, 801, 795, 798, 798, 800, 796, 796, 805, 798, 799, 798]
drnnGRUtanhMakespan5 = [804, 799, 798, 804, 796, 799, 798, 805, 796, 805, 798, 800]
drnnGRUtanhMakespan6 = [800, 799, 794, 801, 799, 796, 800, 804, 797, 796, 800, 798]
drnnGRUtanhMakespan7 = [798, 800, 810, 810, 805, 800, 795, 798, 800, 805, 799, 800]
drnnGRUtanhMakespan8 = [798, 797, 800, 800, 804, 805, 798, 798, 801, 795, 798, 809]
drnnGRUtanhMakespan9 = [803, 800, 800, 805, 805, 798, 804, 803, 805, 801, 810, 801]
drnnGRUtanhMakespan10 = [798, 799, 798, 798, 805, 804, 805, 798, 799, 798, 800, 800]
drnnGRUtanhMakespan11 = [796, 795, 805, 800, 800, 798, 795, 804, 805, 798, 800, 800]
drnnGRUtanhMakespan12 = [799, 799, 809, 800, 799, 799, 797, 805, 799, 800, 798, 795]
drnnGRUtanhMakespan13 = [805, 800, 800, 805, 800, 799, 798, 801, 798, 797, 805, 800]
drnnGRUtanhMakespan14 = [800, 798, 800, 800, 800, 804, 804, 799, 799, 800, 798, 798]
drnnGRUtanhMakespan15 = [805, 800, 795, 800, 804, 795, 800, 798, 799, 798, 800, 796]
drnnGRUtanhMakespan16 = [806, 795, 801, 799, 799, 796, 796, 794, 802, 796, 800, 802]
drnnGRUtanhMakespan17 = [796, 800, 798, 800, 794, 800, 804, 805, 798, 810, 800, 798]
drnnGRUtanhMakespan18 = [798, 800, 794, 794, 797, 798, 800, 805, 798, 798, 804, 798]
drnnGRUtanhMakespan19 = [796, 800, 806, 799, 796, 800, 798, 805, 798, 799, 797, 805]
drnnGRUtanhMakespan20 = [805, 800, 799, 796, 805, 805, 805, 794, 809, 796, 800, 797]
drnnGRUtanhMakespan21 = [798, 800, 800, 800, 798, 801, 796, 801, 801, 801, 795, 799]
drnnGRUtanhMakespan22 = [798, 801, 797, 800, 799, 795, 799, 799, 800, 801, 800, 799]
drnnGRUtanhMakespan23 = [800, 798, 799, 805, 794, 800, 798, 796, 796, 804, 800, 794]
drnnGRUtanhMakespan24 = [800, 800, 798, 805, 804, 799, 798, 801, 800, 798, 798, 798]
drnnGRUtanhMakespan25 = [798, 798, 798, 795, 800, 803, 798, 798, 800, 799, 796, 798]
drnnGRUtanhMakespan26 = [796, 798, 798, 798, 805, 796, 798, 798, 805, 795, 801, 796]
drnnGRUtanhMakespan27 = [794, 796, 796, 800, 800, 798, 800, 798, 802, 798, 797, 798]
drnnGRUtanhMakespan28 = [799, 799, 800, 800, 798, 802, 799, 798, 795, 795, 794, 798]
drnnGRUtanhMakespan29 = [798, 796, 796, 797, 796, 798, 800, 800, 796, 798, 800, 795]
drnnGRUtanhMakespan30 = [799, 798, 795, 795, 800, 795, 798, 798, 799, 798, 805, 799]
drnnGRUtanhMakespan31 = [795, 799, 794, 794, 796, 795, 795, 794, 798, 797, 798, 795]
drnnGRUtanhMakespan32 = [797, 798, 795, 796, 798, 795, 797, 798, 795, 794, 795, 796]
drnnGRUtanhMakespan33 = [799, 795, 794, 794, 798, 795, 798, 797, 800, 796, 795, 794]
drnnGRUtanhMakespan34 = [798, 795, 798, 796, 798, 794, 796, 798, 798, 798, 796, 797]
drnnGRUtanhMakespan35 = [795, 798, 796, 798, 794, 801, 795, 800, 795, 800, 794, 800]
drnnGRUtanhMakespan36 = [798, 799, 796, 797, 795, 794, 800, 795, 795, 794, 795, 795]
drnnGRUtanhMakespan37 = [799, 798, 795, 795, 794, 795, 795, 796, 805, 795, 798, 796]
drnnGRUtanhMakespan38 = [798, 794, 795, 795, 795, 796, 795, 796, 800, 798, 797, 796]
drnnGRUtanhMakespan39 = [794, 795, 795, 797, 795, 795, 794, 794, 798, 795, 794, 798]
drnnGRUtanhMakespan40 = [795, 795, 795, 795, 795, 795, 794, 794, 793, 797, 794, 795]
drnnGRUtanhMakespan41 = [794, 794, 795, 793, 795, 795, 792, 794, 795, 794, 794, 794]
drnnGRUtanhMakespan42 = [795, 795, 795, 796, 794, 797, 795, 795, 792, 795, 796, 793]
drnnGRUtanhMakespan43 = [794, 795, 795, 794, 795, 794, 798, 794, 797, 795, 794, 794]
drnnGRUtanhMakespan44 = [795, 795, 793, 794, 795, 794, 795, 795, 794, 794, 795, 794]
drnnGRUtanhMakespan45 = [794, 794, 794, 794, 794, 794, 795, 794, 794, 794, 796, 795]
drnnGRUtanhMakespan46 = [795, 794, 795, 794, 794, 794, 793, 794, 795, 795, 794, 797]
drnnGRUtanhMakespan47 = [794, 794, 794, 794, 795, 794, 795, 792, 794, 795, 794, 794]
drnnGRUtanhMakespan48 = [795, 794, 794, 794, 795, 798, 794, 794, 794, 795, 794, 794]
drnnGRUtanhMakespan49 = [795, 795, 794, 795, 793, 795, 796, 794, 795, 794, 794, 797]
drnnGRUtanhRewards0 = [-0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.1768976897689769, -0.177078750549934, -0.1759911894273128, -0.1763540290620872, -0.1763540290620872, -0.1763540290620872, -0.1759911894273128, -0.17580964970257765, -0.17544633017412387]
drnnGRUtanhRewards1 = [-0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.1763540290620872, -0.17580964970257765, -0.17526455026455026, -0.17544633017412387, -0.17617264919621228, -0.17544633017412387]
drnnGRUtanhRewards2 = [-0.17526455026455026, -0.1768976897689769, -0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.17544633017412387, -0.1768976897689769, -0.17617264919621228, -0.17526455026455026, -0.17580964970257765, -0.17580964970257765, -0.1763540290620872]
drnnGRUtanhRewards3 = [-0.17725973169122497, -0.17544633017412387, -0.17508269018743108, -0.17562802996914942, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.1776214552648934, -0.177078750549934, -0.17580964970257765, -0.17617264919621228, -0.1778021978021978]
drnnGRUtanhRewards4 = [-0.177078750549934, -0.1763540290620872, -0.17526455026455026, -0.17580964970257765, -0.17580964970257765, -0.17617264919621228, -0.17544633017412387, -0.17544633017412387, -0.177078750549934, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765]
drnnGRUtanhRewards5 = [-0.1768976897689769, -0.1759911894273128, -0.17580964970257765, -0.1768976897689769, -0.17544633017412387, -0.1759911894273128, -0.17580964970257765, -0.177078750549934, -0.17544633017412387, -0.177078750549934, -0.17580964970257765, -0.17617264919621228]
drnnGRUtanhRewards6 = [-0.17617264919621228, -0.1759911894273128, -0.17508269018743108, -0.1763540290620872, -0.1759911894273128, -0.17544633017412387, -0.17617264919621228, -0.1768976897689769, -0.17562802996914942, -0.17544633017412387, -0.17617264919621228, -0.17580964970257765]
drnnGRUtanhRewards7 = [-0.17580964970257765, -0.17617264919621228, -0.17798286090969018, -0.177078750549934, -0.17798286090969018, -0.17617264919621228, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.1759911894273128, -0.17617264919621228]
drnnGRUtanhRewards8 = [-0.17580964970257765, -0.17562802996914942, -0.17617264919621228, -0.17617264919621228, -0.1768976897689769, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.1763540290620872, -0.17580964970257765, -0.1778021978021978]
drnnGRUtanhRewards9 = [-0.17671654929577466, -0.17617264919621228, -0.17617264919621228, -0.177078750549934, -0.177078750549934, -0.17580964970257765, -0.1768976897689769, -0.17671654929577466, -0.177078750549934, -0.1763540290620872, -0.17798286090969018, -0.1763540290620872]
drnnGRUtanhRewards10 = [-0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.1768976897689769, -0.177078750549934, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228]
drnnGRUtanhRewards11 = [-0.17544633017412387, -0.17526455026455026, -0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17526455026455026, -0.1768976897689769, -0.177078750549934, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228]
drnnGRUtanhRewards12 = [-0.1759911894273128, -0.1759911894273128, -0.1778021978021978, -0.17617264919621228, -0.1759911894273128, -0.1759911894273128, -0.17562802996914942, -0.177078750549934, -0.1759911894273128, -0.17617264919621228, -0.17580964970257765, -0.17526455026455026]
drnnGRUtanhRewards13 = [-0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.177078750549934, -0.17617264919621228, -0.1759911894273128, -0.17580964970257765, -0.1763540290620872, -0.17580964970257765, -0.17562802996914942, -0.177078750549934, -0.17617264919621228]
drnnGRUtanhRewards14 = [-0.17617264919621228, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.1768976897689769, -0.1768976897689769, -0.1759911894273128, -0.1759911894273128, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765]
drnnGRUtanhRewards15 = [-0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.17526455026455026, -0.1768976897689769, -0.17526455026455026, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17617264919621228, -0.17544633017412387]
drnnGRUtanhRewards16 = [-0.17725973169122497, -0.17526455026455026, -0.1763540290620872, -0.1759911894273128, -0.1759911894273128, -0.17544633017412387, -0.17544633017412387, -0.17508269018743108, -0.17653532907770195, -0.17544633017412387, -0.17617264919621228, -0.17653532907770195]
drnnGRUtanhRewards17 = [-0.17544633017412387, -0.17617264919621228, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17508269018743108, -0.1768976897689769, -0.177078750549934, -0.17580964970257765, -0.17798286090969018, -0.17617264919621228, -0.17580964970257765]
drnnGRUtanhRewards18 = [-0.17580964970257765, -0.17617264919621228, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942, -0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.1768976897689769, -0.17580964970257765]
drnnGRUtanhRewards19 = [-0.17544633017412387, -0.17617264919621228, -0.17725973169122497, -0.1759911894273128, -0.17544633017412387, -0.17617264919621228, -0.17580964970257765, -0.177078750549934, -0.17580964970257765, -0.17562802996914942, -0.1759911894273128, -0.177078750549934]
drnnGRUtanhRewards20 = [-0.17617264919621228, -0.177078750549934, -0.1759911894273128, -0.17544633017412387, -0.177078750549934, -0.177078750549934, -0.177078750549934, -0.17508269018743108, -0.1778021978021978, -0.17544633017412387, -0.17617264919621228, -0.17562802996914942]
drnnGRUtanhRewards21 = [-0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.1763540290620872, -0.17544633017412387, -0.1763540290620872, -0.1763540290620872, -0.1763540290620872, -0.17526455026455026, -0.1759911894273128]
drnnGRUtanhRewards22 = [-0.17580964970257765, -0.1763540290620872, -0.17562802996914942, -0.17617264919621228, -0.1759911894273128, -0.17526455026455026, -0.1759911894273128, -0.1759911894273128, -0.17617264919621228, -0.1763540290620872, -0.17617264919621228, -0.1759911894273128]
drnnGRUtanhRewards23 = [-0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.177078750549934, -0.17508269018743108, -0.17617264919621228, -0.17580964970257765, -0.17544633017412387, -0.17544633017412387, -0.1768976897689769, -0.17617264919621228, -0.17508269018743108]
drnnGRUtanhRewards24 = [-0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.177078750549934, -0.1768976897689769, -0.17580964970257765, -0.1763540290620872, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765]
drnnGRUtanhRewards25 = [-0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17617264919621228, -0.17580964970257765, -0.17671654929577466, -0.17580964970257765, -0.17617264919621228, -0.1759911894273128, -0.17544633017412387, -0.17580964970257765]
drnnGRUtanhRewards26 = [-0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17526455026455026, -0.1763540290620872, -0.17544633017412387]
drnnGRUtanhRewards27 = [-0.17508269018743108, -0.17544633017412387, -0.17544633017412387, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.17653532907770195, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765]
drnnGRUtanhRewards28 = [-0.1759911894273128, -0.1759911894273128, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17653532907770195, -0.17580964970257765, -0.1759911894273128, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765]
drnnGRUtanhRewards29 = [-0.17580964970257765, -0.17544633017412387, -0.17544633017412387, -0.17562802996914942, -0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.17526455026455026]
drnnGRUtanhRewards30 = [-0.1759911894273128, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17617264919621228, -0.17526455026455026, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.1759911894273128]
drnnGRUtanhRewards31 = [-0.17526455026455026, -0.1759911894273128, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.17526455026455026]
drnnGRUtanhRewards32 = [-0.17562802996914942, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026]
drnnGRUtanhRewards33 = [-0.1759911894273128, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17580964970257765, -0.17562802996914942, -0.17617264919621228, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108]
drnnGRUtanhRewards34 = [-0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17544633017412387, -0.17508269018743108, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17562802996914942]
drnnGRUtanhRewards35 = [-0.17526455026455026, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17508269018743108, -0.1763540290620872, -0.17526455026455026, -0.17617264919621228, -0.17526455026455026, -0.17617264919621228, -0.17508269018743108, -0.17617264919621228]
drnnGRUtanhRewards36 = [-0.17580964970257765, -0.1759911894273128, -0.17544633017412387, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17617264919621228, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drnnGRUtanhRewards37 = [-0.1759911894273128, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.177078750549934, -0.17526455026455026, -0.17580964970257765, -0.17544633017412387]
drnnGRUtanhRewards38 = [-0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17617264919621228, -0.17580964970257765, -0.17562802996914942, -0.17544633017412387]
drnnGRUtanhRewards39 = [-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765]
drnnGRUtanhRewards40 = [-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17562802996914942, -0.17508269018743108, -0.17526455026455026]
drnnGRUtanhRewards41 = [-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drnnGRUtanhRewards42 = [-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17544633017412387, -0.1749007498897221]
drnnGRUtanhRewards43 = [-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108]
drnnGRUtanhRewards44 = [-0.17526455026455026, -0.17526455026455026, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drnnGRUtanhRewards45 = [-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026]
drnnGRUtanhRewards46 = [-0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17562802996914942]
drnnGRUtanhRewards47 = [-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108]
drnnGRUtanhRewards48 = [-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108]
drnnGRUtanhRewards49 = [-0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942]
# Deep Recurrent Reinforcement Learning: 1 capa GRU y 4 capas Dense, Funcion de activacion relu, 12 episodes, 50 iteraciones
drnnGRUreluMakespan0 = [800, 799, 798, 797, 798, 800, 800, 796, 800, 794, 800, 800]
drnnGRUreluMakespan1 = [798, 800, 805, 795, 799, 808, 795, 800, 796, 798, 799, 798]
drnnGRUreluMakespan2 = [799, 800, 806, 800, 800, 805, 805, 798, 799, 807, 800, 800]
drnnGRUreluMakespan3 = [798, 795, 799, 800, 800, 796, 798, 800, 800, 804, 805, 800]
drnnGRUreluMakespan4 = [811, 800, 799, 800, 805, 798, 798, 799, 796, 804, 805, 804]
drnnGRUreluMakespan5 = [799, 795, 797, 800, 798, 800, 800, 798, 800, 797, 800, 798]
drnnGRUreluMakespan6 = [798, 800, 798, 799, 797, 798, 800, 796, 801, 799, 795, 798]
drnnGRUreluMakespan7 = [800, 804, 795, 801, 796, 806, 805, 798, 800, 799, 799, 804]
drnnGRUreluMakespan8 = [800, 799, 799, 800, 805, 796, 800, 800, 810, 796, 800, 798]
drnnGRUreluMakespan9 = [794, 800, 799, 805, 800, 800, 798, 798, 796, 795, 798, 796]
drnnGRUreluMakespan10 = [798, 800, 798, 801, 795, 802, 796, 809, 800, 800, 798, 795]
drnnGRUreluMakespan11 = [804, 800, 799, 799, 798, 803, 798, 798, 805, 803, 800, 796]
drnnGRUreluMakespan12 = [800, 799, 805, 797, 798, 796, 799, 794, 799, 805, 799, 800]
drnnGRUreluMakespan13 = [796, 800, 798, 800, 795, 799, 800, 804, 800, 794, 805, 805]
drnnGRUreluMakespan14 = [800, 795, 796, 798, 798, 801, 805, 794, 800, 801, 801, 796]
drnnGRUreluMakespan15 = [798, 800, 796, 796, 798, 794, 797, 800, 796, 801, 795, 799]
drnnGRUreluMakespan16 = [800, 805, 794, 800, 799, 800, 805, 801, 798, 800, 801, 799]
drnnGRUreluMakespan17 = [797, 803, 801, 808, 794, 799, 799, 800, 805, 796, 801, 796]
drnnGRUreluMakespan18 = [805, 800, 800, 804, 799, 798, 800, 799, 804, 796, 800, 804]
drnnGRUreluMakespan19 = [804, 798, 800, 799, 799, 799, 805, 795, 801, 799, 799, 805]
drnnGRUreluMakespan20 = [799, 804, 796, 798, 796, 798, 800, 805, 799, 810, 800, 800]
drnnGRUreluMakespan21 = [798, 799, 799, 805, 798, 798, 805, 798, 794, 799, 798, 798]
drnnGRUreluMakespan22 = [799, 798, 798, 796, 798, 805, 799, 798, 798, 799, 796, 798]
drnnGRUreluMakespan23 = [798, 805, 808, 798, 798, 805, 810, 796, 804, 799, 800, 799]
drnnGRUreluMakespan24 = [798, 796, 798, 795, 800, 798, 799, 798, 797, 805, 798, 800]
drnnGRUreluMakespan25 = [799, 796, 799, 798, 805, 798, 798, 800, 796, 794, 810, 798]
drnnGRUreluMakespan26 = [799, 798, 805, 800, 802, 798, 799, 799, 799, 794, 802, 797]
drnnGRUreluMakespan27 = [798, 800, 805, 796, 798, 795, 802, 796, 798, 800, 798, 794]
drnnGRUreluMakespan28 = [796, 805, 798, 800, 800, 798, 810, 798, 798, 798, 796, 796]
drnnGRUreluMakespan29 = [800, 798, 798, 802, 794, 798, 796, 808, 800, 800, 798, 799]
drnnGRUreluMakespan30 = [798, 796, 798, 798, 794, 798, 794, 800, 796, 794, 800, 800]
drnnGRUreluMakespan31 = [794, 802, 797, 799, 798, 800, 799, 799, 796, 796, 798, 798]
drnnGRUreluMakespan32 = [799, 798, 794, 795, 798, 805, 804, 797, 795, 800, 796, 798]
drnnGRUreluMakespan33 = [803, 799, 805, 796, 794, 798, 797, 798, 798, 794, 794, 798]
drnnGRUreluMakespan34 = [810, 796, 795, 798, 799, 798, 796, 795, 795, 797, 798, 798]
drnnGRUreluMakespan35 = [799, 799, 799, 799, 795, 798, 795, 800, 796, 795, 795, 796]
drnnGRUreluMakespan36 = [795, 797, 798, 799, 799, 799, 800, 794, 796, 795, 798, 800]
drnnGRUreluMakespan37 = [800, 798, 799, 794, 800, 796, 798, 798, 797, 800, 794, 798]
drnnGRUreluMakespan38 = [800, 799, 794, 796, 795, 800, 796, 804, 800, 795, 800, 798]
drnnGRUreluMakespan39 = [794, 798, 795, 804, 805, 799, 798, 800, 796, 798, 795, 794]
drnnGRUreluMakespan40 = [799, 798, 796, 798, 798, 799, 800, 796, 798, 798, 799, 798]
drnnGRUreluMakespan41 = [796, 798, 800, 797, 799, 796, 797, 796, 799, 804, 805, 798]
drnnGRUreluMakespan42 = [798, 794, 795, 799, 799, 798, 797, 798, 798, 798, 798, 795]
drnnGRUreluMakespan43 = [799, 798, 794, 794, 795, 794, 795, 799, 799, 800, 799, 794]
drnnGRUreluMakespan44 = [795, 796, 795, 799, 794, 795, 794, 796, 795, 794, 795, 796]
drnnGRUreluMakespan45 = [794, 797, 794, 795, 796, 795, 794, 799, 795, 794, 798, 798]
drnnGRUreluMakespan46 = [795, 795, 794, 795, 794, 794, 792, 794, 795, 797, 794, 794]
drnnGRUreluMakespan47 = [798, 796, 797, 798, 794, 798, 794, 797, 794, 803, 798, 798]
drnnGRUreluMakespan48 = [795, 794, 796, 798, 795, 794, 796, 795, 796, 794, 796, 796]
drnnGRUreluMakespan49 = [798, 798, 796, 798, 798, 796, 796, 798, 798, 798, 796, 798]
drnnGRUreluRewards0 = [-0.17617264919621228, -0.1759911894273128, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17544633017412387, -0.17617264919621228, -0.17508269018743108, -0.17617264919621228, -0.17617264919621228]
drnnGRUreluRewards1 = [-0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.17526455026455026, -0.1759911894273128, -0.1776214552648934, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765]
drnnGRUreluRewards2 = [-0.1759911894273128, -0.17617264919621228, -0.17725973169122497, -0.17617264919621228, -0.17617264919621228, -0.177078750549934, -0.177078750549934, -0.17580964970257765, -0.1759911894273128, -0.1774406332453826, -0.17617264919621228, -0.17617264919621228]
drnnGRUreluRewards3 = [-0.17580964970257765, -0.17526455026455026, -0.1759911894273128, -0.17617264919621228, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.1768976897689769, -0.177078750549934, -0.17617264919621228]
drnnGRUreluRewards4 = [-0.1781634446397188, -0.17617264919621228, -0.1759911894273128, -0.17617264919621228, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17544633017412387, -0.1768976897689769, -0.177078750549934, -0.1768976897689769]
drnnGRUreluRewards5 = [-0.1759911894273128, -0.17526455026455026, -0.17562802996914942, -0.17617264919621228, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17617264919621228, -0.17562802996914942, -0.17617264919621228, -0.17580964970257765]
drnnGRUreluRewards6 = [-0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.17562802996914942, -0.17580964970257765, -0.17544633017412387, -0.17617264919621228, -0.1763540290620872, -0.1759911894273128, -0.17526455026455026, -0.17580964970257765]
drnnGRUreluRewards7 = [-0.17617264919621228, -0.1768976897689769, -0.17526455026455026, -0.1763540290620872, -0.17544633017412387, -0.17725973169122497, -0.177078750549934, -0.17580964970257765, -0.17617264919621228, -0.1759911894273128, -0.1759911894273128, -0.1768976897689769]
drnnGRUreluRewards8 = [-0.17617264919621228, -0.1759911894273128, -0.1759911894273128, -0.17617264919621228, -0.177078750549934, -0.17544633017412387, -0.17617264919621228, -0.17617264919621228, -0.17798286090969018, -0.17544633017412387, -0.17617264919621228, -0.17580964970257765]
drnnGRUreluRewards9 = [-0.17508269018743108, -0.17617264919621228, -0.1759911894273128, -0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765, -0.17544633017412387]
drnnGRUreluRewards10 = [-0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.1763540290620872, -0.17526455026455026, -0.17653532907770195, -0.17544633017412387, -0.1778021978021978, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17526455026455026]
drnnGRUreluRewards11 = [-0.1768976897689769, -0.17617264919621228, -0.1759911894273128, -0.1759911894273128, -0.17580964970257765, -0.17671654929577466, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17671654929577466, -0.17617264919621228, -0.17544633017412387]
drnnGRUreluRewards12 = [-0.17617264919621228, -0.1759911894273128, -0.177078750549934, -0.17562802996914942, -0.17580964970257765, -0.17544633017412387, -0.1759911894273128, -0.17508269018743108, -0.1759911894273128, -0.177078750549934, -0.1759911894273128, -0.17617264919621228]
drnnGRUreluRewards13 = [-0.17544633017412387, -0.17617264919621228, -0.17580964970257765, -0.17617264919621228, -0.17526455026455026, -0.1759911894273128, -0.17617264919621228, -0.1768976897689769, -0.17617264919621228, -0.17508269018743108, -0.177078750549934, -0.177078750549934]
drnnGRUreluRewards14 = [-0.17617264919621228, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.1763540290620872, -0.177078750549934, -0.17508269018743108, -0.17617264919621228, -0.1763540290620872, -0.1763540290620872, -0.17544633017412387]
drnnGRUreluRewards15 = [-0.17580964970257765, -0.17617264919621228, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17508269018743108, -0.17562802996914942, -0.17617264919621228, -0.17544633017412387, -0.1763540290620872, -0.17526455026455026, -0.1759911894273128]
drnnGRUreluRewards16 = [-0.17617264919621228, -0.177078750549934, -0.17508269018743108, -0.17617264919621228, -0.1759911894273128, -0.17617264919621228, -0.177078750549934, -0.1763540290620872, -0.17580964970257765, -0.17617264919621228, -0.1763540290620872, -0.1759911894273128]
drnnGRUreluRewards17 = [-0.17562802996914942, -0.17671654929577466, -0.1763540290620872, -0.1776214552648934, -0.17508269018743108, -0.1759911894273128, -0.17617264919621228, -0.1759911894273128, -0.177078750549934, -0.17544633017412387, -0.1763540290620872, -0.17544633017412387]
drnnGRUreluRewards18 = [-0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.1768976897689769, -0.1759911894273128, -0.17580964970257765, -0.17617264919621228, -0.1759911894273128, -0.1768976897689769, -0.17544633017412387, -0.17617264919621228, -0.1768976897689769]
drnnGRUreluRewards19 = [-0.1768976897689769, -0.17580964970257765, -0.17617264919621228, -0.1759911894273128, -0.1759911894273128, -0.1759911894273128, -0.177078750549934, -0.17526455026455026, -0.1763540290620872, -0.1759911894273128, -0.1759911894273128, -0.177078750549934]
drnnGRUreluRewards20 = [-0.1759911894273128, -0.1768976897689769, -0.17544633017412387, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.1759911894273128, -0.17798286090969018, -0.17617264919621228, -0.17617264919621228]
drnnGRUreluRewards21 = [-0.17580964970257765, -0.1759911894273128, -0.1759911894273128, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17580964970257765, -0.17508269018743108, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765]
drnnGRUreluRewards22 = [-0.1759911894273128, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.177078750549934, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17544633017412387, -0.17580964970257765]
drnnGRUreluRewards23 = [-0.17580964970257765, -0.177078750549934, -0.1776214552648934, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17798286090969018, -0.17544633017412387, -0.1768976897689769, -0.1759911894273128, -0.17617264919621228, -0.1759911894273128]
drnnGRUreluRewards24 = [-0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17562802996914942, -0.177078750549934, -0.17580964970257765, -0.17617264919621228]
drnnGRUreluRewards25 = [-0.1759911894273128, -0.17544633017412387, -0.1759911894273128, -0.17580964970257765, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.17617264919621228, -0.17544633017412387, -0.17508269018743108, -0.17798286090969018, -0.17580964970257765]
drnnGRUreluRewards26 = [-0.1759911894273128, -0.17580964970257765, -0.177078750549934, -0.17617264919621228, -0.17653532907770195, -0.17580964970257765, -0.1759911894273128, -0.1759911894273128, -0.1759911894273128, -0.17508269018743108, -0.17653532907770195, -0.17562802996914942]
drnnGRUreluRewards27 = [-0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17653532907770195, -0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.17508269018743108]
drnnGRUreluRewards28 = [-0.17544633017412387, -0.177078750549934, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17798286090969018, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17544633017412387]
drnnGRUreluRewards29 = [-0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17653532907770195, -0.17508269018743108, -0.17580964970257765, -0.17544633017412387, -0.1776214552648934, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128]
drnnGRUreluRewards30 = [-0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17617264919621228, -0.17544633017412387, -0.17508269018743108, -0.17617264919621228, -0.17617264919621228]
drnnGRUreluRewards31 = [-0.17508269018743108, -0.17653532907770195, -0.17562802996914942, -0.1759911894273128, -0.17580964970257765, -0.17617264919621228, -0.1759911894273128, -0.1759911894273128, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765]
drnnGRUreluRewards32 = [-0.1759911894273128, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.1768976897689769, -0.177078750549934, -0.17562802996914942, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765]
drnnGRUreluRewards33 = [-0.17671654929577466, -0.1759911894273128, -0.177078750549934, -0.17544633017412387, -0.17508269018743108, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765]
drnnGRUreluRewards34 = [-0.17798286090969018, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17580964970257765, -0.17580964970257765]
drnnGRUreluRewards35 = [-0.1759911894273128, -0.1759911894273128, -0.1759911894273128, -0.1759911894273128, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387]
drnnGRUreluRewards36 = [-0.17526455026455026, -0.17562802996914942, -0.17580964970257765, -0.1759911894273128, -0.1759911894273128, -0.1759911894273128, -0.17617264919621228, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228]
drnnGRUreluRewards37 = [-0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.17508269018743108, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17562802996914942, -0.17617264919621228, -0.17508269018743108, -0.17580964970257765]
drnnGRUreluRewards38 = [-0.17617264919621228, -0.1759911894273128, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.1768976897689769, -0.17617264919621228, -0.17526455026455026, -0.17617264919621228, -0.17580964970257765]
drnnGRUreluRewards39 = [-0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.1768976897689769, -0.177078750549934, -0.1759911894273128, -0.17580964970257765, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108]
drnnGRUreluRewards40 = [-0.1759911894273128, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765]
drnnGRUreluRewards41 = [-0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.17562802996914942, -0.1759911894273128, -0.17544633017412387, -0.17562802996914942, -0.17544633017412387, -0.1759911894273128, -0.1768976897689769, -0.177078750549934, -0.17580964970257765]
drnnGRUreluRewards42 = [-0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.1759911894273128, -0.1759911894273128, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026]
drnnGRUreluRewards43 = [-0.1759911894273128, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.1759911894273128, -0.1759911894273128, -0.17617264919621228, -0.1759911894273128, -0.17508269018743108]
drnnGRUreluRewards44 = [-0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.1759911894273128, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387]
drnnGRUreluRewards45 = [-0.17508269018743108, -0.17562802996914942, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.1759911894273128, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17580964970257765]
drnnGRUreluRewards46 = [-0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108]
drnnGRUreluRewards47 = [-0.17580964970257765, -0.17544633017412387, -0.17562802996914942, -0.17580964970257765, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17562802996914942, -0.17508269018743108, -0.17671654929577466, -0.17580964970257765, -0.17580964970257765]
drnnGRUreluRewards48 = [-0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.17544633017412387, -0.17544633017412387]
drnnGRUreluRewards49 = [-0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765]
# Deep Reinforcement Learning: 5 capas Dense, Funcion de activacion tanh, 12 episodios, 50 iteraciones
drlTanhMakespan0 = [794, 794, 805, 799, 810, 800, 794, 810, 804, 806, 812, 808]
drlTanhMakespan1 = [796, 795, 795, 798, 799, 800, 800, 795, 797, 796, 797, 799]
drlTanhMakespan2 = [800, 797, 798, 801, 799, 800, 796, 795, 797, 796, 794, 798]
drlTanhMakespan3 = [800, 795, 799, 796, 799, 798, 795, 799, 795, 799, 798, 796]
drlTanhMakespan4 = [809, 795, 795, 800, 797, 795, 798, 798, 799, 799, 798, 798]
drlTanhMakespan5 = [795, 795, 795, 799, 795, 798, 795, 800, 795, 796, 795, 805]
drlTanhMakespan6 = [794, 800, 795, 793, 798, 795, 794, 798, 795, 799, 795, 796]
drlTanhMakespan7 = [795, 795, 795, 795, 798, 795, 797, 797, 795, 795, 798, 797]
drlTanhMakespan8 = [795, 795, 795, 794, 800, 800, 794, 795, 794, 794, 797, 795]
drlTanhMakespan9 = [793, 794, 796, 795, 796, 800, 794, 797, 793, 795, 798, 795]
drlTanhMakespan10 = [795, 795, 797, 794, 795, 798, 797, 795, 798, 794, 794, 794]
drlTanhMakespan11 = [795, 795, 795, 795, 797, 795, 795, 794, 795, 795, 795, 794]
drlTanhMakespan12 = [794, 798, 795, 794, 795, 795, 795, 797, 799, 795, 795, 795]
drlTanhMakespan13 = [795, 797, 795, 800, 796, 795, 796, 795, 795, 795, 798, 794]
drlTanhMakespan14 = [795, 795, 796, 794, 794, 794, 797, 795, 798, 795, 795, 793]
drlTanhMakespan15 = [799, 794, 795, 795, 795, 796, 801, 797, 795, 794, 795, 799]
drlTanhMakespan16 = [795, 795, 796, 798, 795, 795, 795, 795, 795, 798, 798, 796]
drlTanhMakespan17 = [800, 798, 795, 795, 798, 794, 795, 795, 797, 795, 796, 794]
drlTanhMakespan18 = [797, 800, 798, 797, 796, 794, 799, 797, 795, 796, 799, 798]
drlTanhMakespan19 = [797, 800, 795, 794, 794, 796, 795, 798, 796, 798, 797, 795]
drlTanhMakespan20 = [794, 795, 795, 799, 798, 797, 795, 795, 798, 795, 798, 795]
drlTanhMakespan21 = [796, 795, 795, 795, 795, 797, 798, 794, 797, 795, 796, 794]
drlTanhMakespan22 = [799, 796, 795, 795, 795, 795, 796, 795, 796, 798, 796, 795]
drlTanhMakespan23 = [799, 799, 795, 796, 796, 799, 796, 797, 794, 794, 798, 796]
drlTanhMakespan24 = [795, 795, 797, 800, 797, 795, 795, 796, 795, 795, 798, 799]
drlTanhMakespan25 = [795, 797, 795, 795, 795, 795, 800, 796, 795, 797, 795, 795]
drlTanhMakespan26 = [795, 795, 799, 794, 797, 794, 794, 798, 794, 796, 795, 798]
drlTanhMakespan27 = [796, 796, 795, 796, 798, 797, 794, 795, 794, 794, 794, 798]
drlTanhMakespan28 = [795, 795, 794, 798, 796, 796, 800, 797, 797, 796, 795, 794]
drlTanhMakespan29 = [795, 795, 798, 800, 797, 794, 796, 794, 792, 794, 794, 795]
drlTanhMakespan30 = [798, 797, 795, 799, 797, 800, 798, 799, 797, 800, 794, 796]
drlTanhMakespan31 = [794, 795, 800, 798, 800, 794, 800, 798, 799, 798, 798, 798]
drlTanhMakespan32 = [795, 795, 795, 794, 794, 794, 793, 795, 794, 793, 794, 795]
drlTanhMakespan33 = [794, 797, 792, 794, 795, 795, 797, 795, 795, 794, 792, 795]
drlTanhMakespan34 = [795, 794, 795, 798, 795, 796, 794, 795, 794, 794, 795, 794]
drlTanhMakespan35 = [796, 794, 797, 793, 794, 798, 795, 794, 793, 793, 795, 794]
drlTanhMakespan36 = [795, 795, 794, 795, 795, 795, 794, 795, 795, 793, 795, 794]
drlTanhMakespan37 = [794, 794, 798, 794, 794, 796, 795, 794, 793, 795, 795, 792]
drlTanhMakespan38 = [794, 796, 795, 794, 798, 798, 795, 795, 794, 794, 795, 794]
drlTanhMakespan39 = [794, 795, 795, 796, 792, 794, 795, 794, 795, 794, 794, 795]
drlTanhMakespan40 = [798, 795, 794, 795, 794, 794, 793, 795, 794, 794, 797, 794]
drlTanhMakespan41 = [795, 792, 795, 794, 794, 795, 794, 795, 792, 797, 795, 795]
drlTanhMakespan42 = [792, 794, 794, 795, 794, 794, 795, 794, 792, 794, 794, 794]
drlTanhMakespan43 = [794, 796, 794, 793, 795, 795, 793, 798, 794, 794, 798, 794]
drlTanhMakespan44 = [794, 794, 794, 794, 795, 794, 793, 794, 794, 795, 795, 794]
drlTanhMakespan45 = [790, 794, 793, 794, 793, 794, 795, 794, 791, 795, 795, 794]
drlTanhMakespan46 = [792, 794, 794, 794, 794, 794, 794, 793, 794, 794, 794, 794]
drlTanhMakespan47 = [794, 794, 794, 794, 794, 794, 794, 794, 792, 795, 793, 795]
drlTanhMakespan48 = [794, 794, 792, 792, 797, 794, 792, 794, 794, 795, 794, 795]
drlTanhMakespan49 = [795, 794, 794, 796, 794, 797, 794, 794, 794, 794, 794, 794]
drlTanhMakespan50 = [794, 792, 795, 794, 794, 794, 794, 794, 795, 794, 795, 794]
drlTanhMakespan51 = [794, 792, 796, 795, 794, 794, 795, 794, 795, 795, 795, 794]
drlTanhMakespan52 = [794, 794, 795, 792, 795, 795, 795, 792, 794, 793, 795, 794]
drlTanhMakespan53 = [794, 792, 794, 792, 794, 794, 794, 795, 795, 794, 794, 792]
drlTanhMakespan54 = [795, 793, 794, 794, 794, 792, 795, 794, 794, 792, 794, 796]
drlTanhMakespan55 = [795, 794, 794, 795, 795, 793, 794, 795, 794, 797, 795, 792]
drlTanhMakespan56 = [795, 795, 792, 795, 794, 795, 794, 794, 794, 795, 795, 795]
drlTanhMakespan57 = [795, 792, 795, 794, 795, 795, 792, 795, 794, 797, 792, 792]
drlTanhMakespan58 = [795, 795, 794, 795, 792, 794, 794, 794, 792, 792, 792, 793]
drlTanhMakespan59 = [795, 794, 792, 794, 794, 794, 792, 794, 794, 794, 793, 795]
drlTanhMakespan60 = [794, 795, 795, 795, 798, 794, 794, 794, 794, 794, 794, 792]
drlTanhMakespan61 = [792, 795, 794, 794, 795, 794, 792, 795, 795, 794, 794, 795]
drlTanhMakespan62 = [795, 794, 794, 794, 799, 794, 792, 794, 795, 795, 794, 793]
drlTanhMakespan63 = [791, 795, 792, 796, 794, 794, 792, 795, 793, 794, 792, 794]
drlTanhRewards0 = [-0.17508269018743108, -0.17508269018743108, -0.177078750549934, -0.1759911894273128, -0.17798286090969018, -0.17617264919621228, -0.17508269018743108, -0.17798286090969018, -0.1768976897689769, -0.17725973169122497, -0.17834394904458598, -0.1776214552648934]
drlTanhRewards1 = [-0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228, -0.1759911894273128, -0.17617264919621228, -0.17526455026455026, -0.17562802996914942, -0.17544633017412387, -0.17562802996914942, -0.1759911894273128]
drlTanhRewards2 = [-0.17617264919621228, -0.17562802996914942, -0.17580964970257765, -0.1763540290620872, -0.1759911894273128, -0.17617264919621228, -0.17544633017412387, -0.17526455026455026, -0.17562802996914942, -0.17508269018743108, -0.17544633017412387, -0.17580964970257765]
drlTanhRewards3 = [-0.17617264919621228, -0.1759911894273128, -0.17526455026455026, -0.17544633017412387, -0.1759911894273128, -0.17580964970257765, -0.17526455026455026, -0.1759911894273128, -0.17526455026455026, -0.1759911894273128, -0.17580964970257765, -0.17544633017412387]
drlTanhRewards4 = [-0.1778021978021978, -0.17526455026455026, -0.17526455026455026, -0.17617264919621228, -0.17562802996914942, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765]
drlTanhRewards5 = [-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.1759911894273128, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17617264919621228, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.177078750549934]
drlTanhRewards6 = [-0.17508269018743108, -0.17617264919621228, -0.17526455026455026, -0.1749007498897221, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.1759911894273128, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17544633017412387]
drlTanhRewards7 = [-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17562802996914942, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17562802996914942]
drlTanhRewards8 = [-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17617264919621228, -0.17508269018743108, -0.17617264919621228, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026]
drlTanhRewards9 = [-0.1749007498897221, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17617264919621228, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.1749007498897221, -0.17580964970257765, -0.17526455026455026]
drlTanhRewards10 = [-0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drlTanhRewards11 = [-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026]
drlTanhRewards12 = [-0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.1759911894273128, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026]
drlTanhRewards13 = [-0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17617264919621228, -0.17544633017412387, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108]
drlTanhRewards14 = [-0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.1749007498897221]
drlTanhRewards15 = [-0.1759911894273128, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17562802996914942, -0.1763540290620872, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.1759911894273128]
drlTanhRewards16 = [-0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17544633017412387]
drlTanhRewards17 = [-0.17617264919621228, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108]
drlTanhRewards18 = [-0.17562802996914942, -0.17617264919621228, -0.17580964970257765, -0.17562802996914942, -0.17544633017412387, -0.1759911894273128, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17544633017412387, -0.1759911894273128, -0.17580964970257765]
drlTanhRewards19 = [-0.17562802996914942, -0.17617264919621228, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17562802996914942, -0.17526455026455026]
drlTanhRewards20 = [-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.1759911894273128, -0.17580964970257765, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026]
drlTanhRewards21 = [-0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17562802996914942, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108]
drlTanhRewards22 = [-0.1759911894273128, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026]
drlTanhRewards23 = [-0.1759911894273128, -0.1759911894273128, -0.17544633017412387, -0.17544633017412387, -0.17526455026455026, -0.1759911894273128, -0.17544633017412387, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17544633017412387]
drlTanhRewards24 = [-0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17617264919621228, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765, -0.1759911894273128]
drlTanhRewards25 = [-0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026]
drlTanhRewards26 = [-0.17526455026455026, -0.17526455026455026, -0.1759911894273128, -0.17508269018743108, -0.17562802996914942, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765]
drlTanhRewards27 = [-0.17544633017412387, -0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17562802996914942, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108]
drlTanhRewards28 = [-0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17544633017412387, -0.17562802996914942, -0.17544633017412387, -0.17617264919621228, -0.17562802996914942, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards29 = [-0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228, -0.17562802996914942, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026]
drlTanhRewards30 = [-0.17580964970257765, -0.17562802996914942, -0.17526455026455026, -0.1759911894273128, -0.17562802996914942, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.17562802996914942, -0.17617264919621228, -0.17508269018743108, -0.17544633017412387]
drlTanhRewards31 = [-0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17508269018743108, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765]
drlTanhRewards32 = [-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026]
drlTanhRewards33 = [-0.17508269018743108, -0.17562802996914942, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026]
drlTanhRewards34 = [-0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards35 = [-0.17544633017412387, -0.17508269018743108, -0.17562802996914942, -0.1749007498897221, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards36 = [-0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards37 = [-0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224]
drlTanhRewards38 = [-0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards39 = [-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026]
drlTanhRewards40 = [-0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942, -0.17508269018743108]
drlTanhRewards41 = [-0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026]
drlTanhRewards42 = [-0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drlTanhRewards43 = [-0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108]
drlTanhRewards44 = [-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards45 = [-0.1749007498897221, -0.17435444714191128, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17453662842012357, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards46 = [-0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drlTanhRewards47 = [-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026]
drlTanhRewards48 = [-0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17471872931833224, -0.17508269018743108, -0.17562802996914942, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026]
drlTanhRewards49 = [-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drlTanhRewards50 = [-0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108]
drlTanhRewards51 = [-0.17508269018743108, -0.17471872931833224, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards52 = [-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards53 = [-0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224]
drlTanhRewards54 = [-0.17526455026455026, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17544633017412387]
drlTanhRewards55 = [-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17471872931833224]
drlTanhRewards56 = [-0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026]
drlTanhRewards57 = [-0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17562802996914942, -0.17471872931833224, -0.17471872931833224]
drlTanhRewards58 = [-0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17471872931833224, -0.17471872931833224, -0.1749007498897221]
drlTanhRewards59 = [-0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026]
drlTanhRewards60 = [-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224]
drlTanhRewards61 = [-0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026]
drlTanhRewards62 = [-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1759911894273128, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221]
drlTanhRewards63 = [-0.17453662842012357, -0.17471872931833224, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.1749007498897221, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108]
# Deep Reinforcement Learning: 5 capas Dense, Funcion de activacion relu, 12 episodios, 50 iteraciones
drlReluMakespan0 = [796, 798, 809, 798, 796, 800, 798, 799, 800, 794, 800, 798]
drlReluMakespan1 = [800, 800, 801, 806, 804, 806, 808, 798, 796, 796, 798, 800]
drlReluMakespan2 = [805, 805, 798, 800, 800, 798, 801, 799, 800, 806, 800, 800]
drlReluMakespan3 = [798, 799, 798, 795, 798, 808, 803, 800, 798, 795, 799, 800]
drlReluMakespan4 = [805, 805, 799, 796, 798, 803, 799, 800, 800, 800, 795, 794]
drlReluMakespan5 = [799, 796, 795, 800, 801, 796, 800, 795, 803, 800, 800, 805]
drlReluMakespan6 = [799, 795, 798, 794, 805, 796, 795, 799, 798, 795, 804, 796]
drlReluMakespan7 = [795, 798, 799, 798, 798, 799, 795, 794, 796, 794, 795, 805]
drlReluMakespan8 = [805, 794, 794, 795, 798, 795, 798, 795, 799, 800, 796, 798]
drlReluMakespan9 = [797, 797, 797, 794, 795, 794, 794, 797, 796, 795, 801, 799]
drlReluMakespan10 = [799, 794, 797, 795, 794, 794, 795, 795, 795, 796, 797, 799]
drlReluMakespan11 = [796, 798, 800, 795, 805, 794, 798, 796, 795, 794, 798, 795]
drlReluMakespan12 = [800, 795, 794, 798, 800, 805, 800, 798, 804, 799, 794, 803]
drlReluMakespan13 = [796, 799, 798, 794, 800, 794, 795, 796, 798, 795, 794, 799]
drlReluMakespan14 = [795, 798, 798, 798, 805, 798, 798, 798, 795, 794, 800, 796]
drlReluMakespan15 = [795, 798, 795, 805, 798, 794, 795, 798, 796, 794, 795, 796]
drlReluMakespan16 = [798, 795, 796, 799, 796, 798, 798, 795, 795, 795, 795, 799]
drlReluMakespan17 = [794, 798, 796, 798, 795, 801, 794, 798, 797, 795, 796, 801]
drlReluMakespan18 = [798, 795, 798, 798, 801, 798, 795, 795, 797, 800, 794, 800]
drlReluMakespan19 = [795, 798, 794, 800, 796, 795, 798, 797, 795, 794, 796, 796]
drlReluMakespan20 = [794, 794, 795, 795, 795, 795, 796, 798, 799, 799, 799, 795]
drlReluMakespan21 = [802, 796, 794, 797, 797, 800, 794, 794, 804, 803, 798, 797]
drlReluMakespan22 = [794, 795, 795, 795, 798, 795, 794, 799, 794, 803, 795, 794]
drlReluMakespan23 = [794, 798, 799, 794, 795, 795, 799, 795, 796, 795, 797, 799]
drlReluMakespan24 = [795, 794, 797, 800, 794, 795, 795, 795, 795, 800, 800, 798]
drlReluMakespan25 = [795, 794, 797, 796, 798, 795, 795, 794, 799, 795, 794, 798]
drlReluMakespan26 = [801, 795, 800, 794, 794, 796, 800, 798, 798, 799, 794, 796]
drlReluMakespan27 = [796, 795, 796, 795, 796, 795, 795, 800, 794, 794, 794, 796]
drlReluMakespan28 = [794, 794, 795, 796, 794, 795, 795, 797, 794, 794, 796, 795]
drlReluMakespan29 = [793, 794, 795, 800, 795, 795, 794, 798, 798, 796, 795, 794]
drlReluMakespan30 = [802, 794, 794, 798, 794, 796, 805, 794, 800, 794, 796, 794]
drlReluMakespan31 = [797, 794, 794, 794, 800, 800, 794, 794, 798, 795, 794, 798]
drlReluMakespan32 = [794, 798, 794, 795, 794, 795, 798, 794, 794, 795, 794, 798]
drlReluMakespan33 = [798, 794, 798, 795, 794, 793, 797, 798, 794, 794, 801, 793]
drlReluMakespan34 = [794, 798, 794, 795, 794, 793, 798, 795, 794, 800, 794, 795]
drlReluMakespan35 = [794, 796, 794, 796, 806, 795, 795, 795, 796, 795, 795, 799]
drlReluMakespan36 = [795, 794, 794, 796, 796, 798, 794, 796, 794, 795, 794, 795]
drlReluMakespan37 = [795, 794, 795, 798, 794, 794, 794, 794, 794, 794, 795, 797]
drlReluMakespan38 = [794, 798, 794, 798, 797, 794, 794, 795, 795, 794, 795, 795]
drlReluMakespan39 = [797, 794, 795, 796, 796, 796, 798, 794, 794, 795, 794, 798]
drlReluMakespan40 = [798, 795, 795, 798, 792, 795, 795, 794, 795, 794, 798, 794]
drlReluMakespan41 = [795, 794, 794, 794, 794, 794, 798, 793, 794, 794, 794, 793]
drlReluMakespan42 = [794, 794, 794, 794, 799, 794, 795, 794, 796, 794, 794, 794]
drlReluMakespan43 = [794, 797, 795, 794, 795, 794, 794, 795, 794, 794, 793, 794]
drlReluMakespan44 = [794, 792, 793, 794, 794, 796, 794, 798, 795, 794, 794, 796]
drlReluMakespan45 = [795, 794, 799, 794, 794, 793, 794, 795, 795, 793, 796, 794]
drlReluMakespan46 = [794, 796, 794, 794, 794, 794, 794, 793, 799, 792, 794, 794]
drlReluMakespan47 = [795, 794, 793, 794, 796, 797, 794, 794, 795, 794, 794, 794]
drlReluMakespan48 = [794, 794, 794, 792, 794, 794, 795, 794, 794, 794, 794, 794]
drlReluMakespan49 = [794, 794, 795, 792, 797, 797, 794, 794, 792, 800, 795, 795]
drlReluRewards0 = [-0.17544633017412387, -0.17580964970257765, -0.1778021978021978, -0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.17508269018743108, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765]
drlReluRewards1 = [-0.17617264919621228, -0.17617264919621228, -0.1763540290620872, -0.17725973169122497, -0.1768976897689769, -0.17725973169122497, -0.1776214552648934, -0.17580964970257765, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17617264919621228]
drlReluRewards2 = [-0.177078750549934, -0.177078750549934, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.1763540290620872, -0.1759911894273128, -0.17617264919621228, -0.17725973169122497, -0.17617264919621228, -0.17617264919621228]
drlReluRewards3 = [-0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.1776214552648934, -0.17671654929577466, -0.17617264919621228, -0.17580964970257765, -0.17526455026455026, -0.1759911894273128, -0.17617264919621228]
drlReluRewards4 = [-0.177078750549934, -0.177078750549934, -0.1759911894273128, -0.17544633017412387, -0.17580964970257765, -0.17671654929577466, -0.1759911894273128, -0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.17526455026455026, -0.17508269018743108]
drlReluRewards5 = [-0.1759911894273128, -0.17544633017412387, -0.17526455026455026, -0.17617264919621228, -0.1763540290620872, -0.17544633017412387, -0.17526455026455026, -0.17617264919621228, -0.17671654929577466, -0.17617264919621228, -0.17617264919621228, -0.177078750549934]
drlReluRewards6 = [-0.1759911894273128, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108, -0.177078750549934, -0.17544633017412387, -0.17526455026455026, -0.1759911894273128, -0.17580964970257765, -0.17526455026455026, -0.1768976897689769, -0.17544633017412387]
drlReluRewards7 = [-0.17526455026455026, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.177078750549934]
drlReluRewards8 = [-0.177078750549934, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.1759911894273128, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765]
drlReluRewards9 = [-0.17562802996914942, -0.17562802996914942, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942, -0.17544633017412387, -0.17526455026455026, -0.1763540290620872, -0.1759911894273128]
drlReluRewards10 = [-0.1759911894273128, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17562802996914942, -0.1759911894273128]
drlReluRewards11 = [-0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.17526455026455026, -0.177078750549934, -0.17508269018743108, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026]
drlReluRewards12 = [-0.17617264919621228, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.17617264919621228, -0.17580964970257765, -0.1768976897689769, -0.1759911894273128, -0.17508269018743108, -0.17671654929577466]
drlReluRewards13 = [-0.17544633017412387, -0.1759911894273128, -0.17580964970257765, -0.17508269018743108, -0.17617264919621228, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.1759911894273128]
drlReluRewards14 = [-0.17526455026455026, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17617264919621228, -0.17544633017412387]
drlReluRewards15 = [-0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.177078750549934, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17544633017412387, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387]
drlReluRewards16 = [-0.17580964970257765, -0.17526455026455026, -0.17544633017412387, -0.1759911894273128, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.1759911894273128]
drlReluRewards17 = [-0.17508269018743108, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.1763540290620872, -0.17508269018743108, -0.17580964970257765, -0.17562802996914942, -0.17526455026455026, -0.17544633017412387, -0.1763540290620872]
drlReluRewards18 = [-0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17580964970257765, -0.1763540290620872, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17617264919621228, -0.17508269018743108, -0.17617264919621228]
drlReluRewards19 = [-0.17526455026455026, -0.17580964970257765, -0.17508269018743108, -0.17617264919621228, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17544633017412387]
drlReluRewards20 = [-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.1759911894273128, -0.1759911894273128, -0.1759911894273128, -0.17526455026455026]
drlReluRewards21 = [-0.17653532907770195, -0.17544633017412387, -0.17562802996914942, -0.17508269018743108, -0.17562802996914942, -0.17617264919621228, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.1768976897689769, -0.17671654929577466, -0.17562802996914942]
drlReluRewards22 = [-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.1759911894273128, -0.17508269018743108, -0.17671654929577466, -0.17526455026455026, -0.17508269018743108]
drlReluRewards23 = [-0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.1759911894273128, -0.17508269018743108, -0.17526455026455026, -0.1759911894273128, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17562802996914942, -0.1759911894273128]
drlReluRewards24 = [-0.17526455026455026, -0.17508269018743108, -0.17562802996914942, -0.17617264919621228, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765]
drlReluRewards25 = [-0.17526455026455026, -0.17508269018743108, -0.17562802996914942, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.1759911894273128, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765]
drlReluRewards26 = [-0.1763540290620872, -0.17526455026455026, -0.17617264919621228, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17508269018743108, -0.17544633017412387]
drlReluRewards27 = [-0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17617264919621228, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387]
drlReluRewards28 = [-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026]
drlReluRewards29 = [-0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.17617264919621228, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108]
drlReluRewards30 = [-0.17653532907770195, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17544633017412387, -0.177078750549934, -0.17508269018743108, -0.17617264919621228, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108]
drlReluRewards31 = [-0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17617264919621228, -0.17617264919621228, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108, -0.17580964970257765]
drlReluRewards32 = [-0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765]
drlReluRewards33 = [-0.17580964970257765, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.17562802996914942, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.1763540290620872, -0.1749007498897221]
drlReluRewards34 = [-0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17617264919621228, -0.17508269018743108, -0.17526455026455026]
drlReluRewards35 = [-0.17508269018743108, -0.17544633017412387, -0.17725973169122497, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.1759911894273128]
drlReluRewards36 = [-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026]
drlReluRewards37 = [-0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17562802996914942]
drlReluRewards38 = [-0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17580964970257765, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drlReluRewards39 = [-0.17562802996914942, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765]
drlReluRewards40 = [-0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17471872931833224, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108]
drlReluRewards41 = [-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221]
drlReluRewards42 = [-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1759911894273128, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drlReluRewards43 = [-0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108]
drlReluRewards44 = [-0.17508269018743108, -0.17471872931833224, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387]
drlReluRewards45 = [-0.17526455026455026, -0.17508269018743108, -0.1759911894273128, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.1749007498897221, -0.17544633017412387, -0.17508269018743108]
drlReluRewards46 = [-0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.1759911894273128, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108]
drlReluRewards47 = [-0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17544633017412387, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drlReluRewards48 = [-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drlReluRewards49 = [-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17562802996914942, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17617264919621228, -0.17526455026455026, -0.17526455026455026]
if __name__ == "__main__":
##############################################
##############################################
##############################################
# Deep Recurrent Reinforcement Learning with 1 GRU layer and 4 Dense layers
drnnGRUtanhMakespan = []
drnnGRUtanhRewards = []
drnnGRUtanhMakespanList = []
drnnGRUtanhRewardsList = []
drnnGRUtanhMakespanValues = []
drnnGRUtanhRewardsValues = []
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan0))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan1))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan2))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan3))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan4))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan5))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan6))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan7))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan8))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan9))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan10))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan11))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan12))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan13))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan14))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan15))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan16))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan17))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan18))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan19))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan20))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan21))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan22))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan23))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan24))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan25))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan26))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan27))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan28))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan29))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan30))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan31))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan32))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan33))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan34))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan35))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan36))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan37))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan38))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan39))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan40))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan41))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan42))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan43))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan44))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan45))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan46))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan47))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan48))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan49))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards0))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards1))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards2))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards3))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards4))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards5))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards6))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards7))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards8))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards9))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards10))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards11))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards12))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards13))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards14))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards15))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards16))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards17))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards18))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards19))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards20))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards21))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards22))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards23))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards24))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards25))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards26))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards27))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards28))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards29))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards30))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards31))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards32))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards33))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards34))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards35))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards36))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards37))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards38))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards39))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards40))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards41))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards42))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards43))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards44))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards45))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards46))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards47))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards48))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards49))
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan0)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan1)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan2)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan3)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan4)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan5)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan6)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan7)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan8)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan9)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan10)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan11)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan12)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan13)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan14)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan15)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan16)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan17)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan18)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan19)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan20)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan21)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan22)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan23)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan24)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan25)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan26)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan27)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan28)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan29)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan30)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan31)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan32)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan33)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan34)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan35)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan36)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan37)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan38)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan39)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan40)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan41)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan42)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan43)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan44)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan45)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan46)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan47)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan48)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan49)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards0)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards1)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards2)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards3)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards4)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards5)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards6)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards7)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards8)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards9)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards10)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards11)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards12)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards13)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards14)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards15)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards16)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards17)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards18)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards19)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards20)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards21)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards22)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards23)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards24)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards25)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards26)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards27)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards28)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards29)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards30)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards31)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards32)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards33)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards34)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards35)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards36)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards37)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards38)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards39)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards40)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards41)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards42)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards43)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards44)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards45)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards46)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards47)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards48)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards49)
drnnGRUreluMakespan = []
drnnGRUreluRewards = []
drnnGRUreluMakespanList = []
drnnGRUreluRewardsList = []
drnnGRUreluMakespanValues = []
drnnGRUreluRewardsValues = []
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan0))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan1))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan2))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan3))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan4))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan5))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan6))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan7))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan8))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan9))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan10))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan11))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan12))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan13))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan14))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan15))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan16))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan17))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan18))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan19))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan20))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan21))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan22))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan23))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan24))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan25))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan26))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan27))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan28))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan29))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan30))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan31))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan32))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan33))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan34))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan35))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan36))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan37))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan38))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan39))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan40))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan41))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan42))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan43))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan44))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan45))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan46))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan47))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan48))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan49))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards0))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards1))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards2))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards3))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards4))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards5))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards6))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards7))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards8))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards9))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards10))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards11))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards12))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards13))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards14))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards15))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards16))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards17))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards18))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards19))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards20))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards21))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards22))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards23))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards24))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards25))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards26))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards27))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards28))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards29))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards30))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards31))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards32))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards33))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards34))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards35))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards36))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards37))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards38))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards39))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards40))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards41))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards42))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards43))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards44))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards45))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards46))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards47))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards48))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards49))
drnnGRUreluMakespanList.append(drnnGRUreluMakespan0)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan1)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan2)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan3)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan4)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan5)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan6)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan7)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan8)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan9)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan10)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan11)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan12)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan13)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan14)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan15)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan16)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan17)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan18)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan19)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan20)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan21)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan22)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan23)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan24)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan25)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan26)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan27)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan28)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan29)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan30)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan31)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan32)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan33)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan34)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan35)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan36)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan37)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan38)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan39)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan40)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan41)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan42)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan43)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan44)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan45)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan46)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan47)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan48)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan49)
drnnGRUreluRewardsList.append(drnnGRUreluRewards0)
drnnGRUreluRewardsList.append(drnnGRUreluRewards1)
drnnGRUreluRewardsList.append(drnnGRUreluRewards2)
drnnGRUreluRewardsList.append(drnnGRUreluRewards3)
drnnGRUreluRewardsList.append(drnnGRUreluRewards4)
drnnGRUreluRewardsList.append(drnnGRUreluRewards5)
drnnGRUreluRewardsList.append(drnnGRUreluRewards6)
drnnGRUreluRewardsList.append(drnnGRUreluRewards7)
drnnGRUreluRewardsList.append(drnnGRUreluRewards8)
drnnGRUreluRewardsList.append(drnnGRUreluRewards9)
drnnGRUreluRewardsList.append(drnnGRUreluRewards10)
drnnGRUreluRewardsList.append(drnnGRUreluRewards11)
drnnGRUreluRewardsList.append(drnnGRUreluRewards12)
drnnGRUreluRewardsList.append(drnnGRUreluRewards13)
drnnGRUreluRewardsList.append(drnnGRUreluRewards14)
drnnGRUreluRewardsList.append(drnnGRUreluRewards15)
drnnGRUreluRewardsList.append(drnnGRUreluRewards16)
drnnGRUreluRewardsList.append(drnnGRUreluRewards17)
drnnGRUreluRewardsList.append(drnnGRUreluRewards18)
drnnGRUreluRewardsList.append(drnnGRUreluRewards19)
drnnGRUreluRewardsList.append(drnnGRUreluRewards20)
drnnGRUreluRewardsList.append(drnnGRUreluRewards21)
drnnGRUreluRewardsList.append(drnnGRUreluRewards22)
drnnGRUreluRewardsList.append(drnnGRUreluRewards23)
drnnGRUreluRewardsList.append(drnnGRUreluRewards24)
drnnGRUreluRewardsList.append(drnnGRUreluRewards25)
drnnGRUreluRewardsList.append(drnnGRUreluRewards26)
drnnGRUreluRewardsList.append(drnnGRUreluRewards27)
drnnGRUreluRewardsList.append(drnnGRUreluRewards28)
drnnGRUreluRewardsList.append(drnnGRUreluRewards29)
drnnGRUreluRewardsList.append(drnnGRUreluRewards30)
drnnGRUreluRewardsList.append(drnnGRUreluRewards31)
drnnGRUreluRewardsList.append(drnnGRUreluRewards32)
drnnGRUreluRewardsList.append(drnnGRUreluRewards33)
drnnGRUreluRewardsList.append(drnnGRUreluRewards34)
drnnGRUreluRewardsList.append(drnnGRUreluRewards35)
drnnGRUreluRewardsList.append(drnnGRUreluRewards36)
drnnGRUreluRewardsList.append(drnnGRUreluRewards37)
drnnGRUreluRewardsList.append(drnnGRUreluRewards38)
drnnGRUreluRewardsList.append(drnnGRUreluRewards39)
drnnGRUreluRewardsList.append(drnnGRUreluRewards40)
drnnGRUreluRewardsList.append(drnnGRUreluRewards41)
drnnGRUreluRewardsList.append(drnnGRUreluRewards42)
drnnGRUreluRewardsList.append(drnnGRUreluRewards43)
drnnGRUreluRewardsList.append(drnnGRUreluRewards44)
drnnGRUreluRewardsList.append(drnnGRUreluRewards45)
drnnGRUreluRewardsList.append(drnnGRUreluRewards46)
drnnGRUreluRewardsList.append(drnnGRUreluRewards47)
drnnGRUreluRewardsList.append(drnnGRUreluRewards48)
drnnGRUreluRewardsList.append(drnnGRUreluRewards49)
for vector in drnnGRUtanhMakespanList:
for element in vector:
drnnGRUtanhMakespanValues.append(element)
for vector in drnnGRUtanhRewardsList:
for element in vector:
drnnGRUtanhRewardsValues.append(element)
##################
for vector in drnnGRUreluMakespanList:
for element in vector:
drnnGRUreluMakespanValues.append(element)
for vector in drnnGRUreluRewardsList:
for element in vector:
drnnGRUreluRewardsValues.append(element)
#####################
smoothGRUtanhMakespanValues = pd.Series(drnnGRUtanhMakespanValues).rolling(12).mean()
plt.plot(smoothGRUtanhMakespanValues)
plt.xlabel("Episodios")
plt.ylabel("Segundos")
plt.title("'Makespan' con red neuronal profunda que incluye 1 capa GRU")
plt.show()
smoothGRUtanhRewardsValues = pd.Series(drnnGRUtanhRewardsValues).rolling(12).mean()
plt.plot(smoothGRUtanhRewardsValues)
plt.xlabel("Episodios")
plt.ylabel("Premio")
plt.title("'Reward' con red neuronal profunda que incluye 1 capa GRU")
plt.show()
#####################
smoothGRUreluMakespanValues = pd.Series(drnnGRUreluMakespanValues).rolling(12).mean()
plt.plot(smoothGRUreluMakespanValues)
plt.xlabel("Episodios")
plt.ylabel("Segundos")
plt.title("'Makespan' con red neuronal profunda que incluye 1 capa GRU y ReLU")
plt.show()
smoothGRUreluRewardsValues = pd.Series(drnnGRUreluRewardsValues).rolling(12).mean()
plt.plot(smoothGRUreluRewardsValues)
plt.xlabel("Episodios")
plt.ylabel("Premio")
plt.title("'Reward' con red neuronal profunda que incluye 1 capa GRU y ReLU")
plt.show()
###################
plt.plot(smoothGRUtanhMakespanValues, color='blue', label='tanh')
plt.plot(smoothGRUreluMakespanValues, color='orange', label='relu')
plt.xlabel("Episodios")
plt.ylabel("Segundos")
plt.title("'Makespan' con red neuronal profunda que incluye 1 capa GRU")
plt.legend()
plt.show()
###################
plt.plot(smoothGRUtanhRewardsValues, color='blue', label='tanh')
plt.plot(smoothGRUreluRewardsValues, color='orange', label='relu')
plt.xlabel("Episodios")
plt.ylabel("Premio")
plt.title("'Reward' con red neuronal profunda que incluye 1 capa GRU")
plt.legend()
plt.show()
###################
drnnLSTMtanhMakespan = []
drnnLSTMtanhRewards = []
drnnLSTMtanhMakespanList = []
drnnLSTMtanhRewardsList = []
drnnLSTMtanhMakespanValues = []
drnnLSTMtanhRewardsValues = []
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan0))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan1))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan2))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan3))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan4))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan5))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan6))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan7))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan8))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan9))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan10))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan11))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan12))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan13))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan14))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan15))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan16))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan17))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan18))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan19))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan20))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan21))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan22))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan23))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan24))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan25))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan26))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan27))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan28))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan29))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan30))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan31))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan32))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan33))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan34))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan35))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan36))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan37))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan38))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan39))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan40))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan41))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan42))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan43))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan44))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan45))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan46))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan47))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan48))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan49))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards0))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards1))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards2))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards3))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards4))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards5))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards6))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards7))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards8))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards9))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards10))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards11))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards12))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards13))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards14))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards15))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards16))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards17))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards18))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards19))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards20))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards21))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards22))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards23))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards24))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards25))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards26))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards27))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards28))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards29))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards30))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards31))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards32))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards33))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards34))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards35))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards36))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards37))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards38))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards39))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards40))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards41))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards42))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards43))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards44))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards45))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards46))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards47))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards48))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards49))
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan0)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan1)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan2)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan3)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan4)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan5)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan6)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan7)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan8)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan9)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan10)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan11)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan12)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan13)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan14)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan15)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan16)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan17)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan18)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan19)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan20)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan21)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan22)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan23)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan24)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan25)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan26)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan27)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan28)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan29)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan30)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan31)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan32)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan33)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan34)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan35)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan36)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan37)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan38)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan39)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan40)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan41)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan42)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan43)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan44)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan45)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan46)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan47)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan48)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan49)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards0)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards1)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards2)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards3)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards4)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards5)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards6)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards7)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards8)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards9)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards10)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards11)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards12)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards13)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards14)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards15)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards16)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards17)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards18)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards19)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards20)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards21)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards22)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards23)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards24)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards25)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards26)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards27)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards28)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards29)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards30)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards31)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards32)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards33)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards34)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards35)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards36)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards37)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards38)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards39)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards40)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards41)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards42)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards43)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards44)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards45)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards46)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards47)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards48)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards49)
for vector in drnnLSTMtanhMakespanList:
for element in vector:
drnnLSTMtanhMakespanValues.append(element)
for vector in drnnLSTMtanhRewardsList:
for element in vector:
drnnLSTMtanhRewardsValues.append(element)
smoothLSTMtanhMakespanValues = pd.Series(drnnLSTMtanhMakespanValues).rolling(12).mean()
plt.plot(smoothLSTMtanhMakespanValues)
plt.xlabel("Episodios")
plt.ylabel("Segundos")
plt.title("'Makespan' utilizando LSTM con tanh")
plt.show()
smoothLSTMtanhRewardsValues = pd.Series(drnnLSTMtanhRewardsValues).rolling(12).mean()
plt.plot(smoothLSTMtanhRewardsValues)
plt.xlabel("Episodios")
plt.ylabel("Premio")
plt.title("'Reward' utilizando LSTM con tanh")
plt.show()
####################
drnnLSTMreluMakespan = []
drnnLSTMreluRewards = []
drnnLSTMreluMakespanList = []
drnnLSTMreluRewardsList = []
drnnLSTMreluMakespanValues = []
drnnLSTMreluRewardsValues = []
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan0))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan1))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan2))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan3))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan4))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan5))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan6))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan7))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan8))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan9))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan10))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan11))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan12))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan13))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan14))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan15))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan16))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan17))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan18))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan19))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan20))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan21))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan22))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan23))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan24))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan25))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan26))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan27))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan28))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan29))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan30))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan31))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan32))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan33))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan34))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan35))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan36))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan37))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan38))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan39))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan40))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan41))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan42))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan43))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan44))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan45))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan46))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan47))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan48))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan49))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards0))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards1))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards2))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards3))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards4))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards5))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards6))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards7))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards8))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards9))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards10))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards11))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards12))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards13))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards14))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards15))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards16))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards17))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards18))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards19))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards20))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards21))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards22))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards23))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards24))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards25))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards26))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards27))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards28))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards29))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards30))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards31))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards32))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards33))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards34))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards35))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards36))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards37))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards38))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards39))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards40))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards41))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards42))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards43))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards44))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards45))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards46))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards47))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards48))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards49))
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan0)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan1)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan2)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan3)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan4)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan5)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan6)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan7)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan8)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan9)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan10)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan11)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan12)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan13)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan14)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan15)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan16)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan17)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan18)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan19)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan20)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan21)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan22)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan23)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan24)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan25)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan26)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan27)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan28)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan29)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan30)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan31)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan32)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan33)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan34)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan35)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan36)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan37)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan38)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan39)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan40)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan41)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan42)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan43)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan44)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan45)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan46)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan47)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan48)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan49)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards0)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards1)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards2)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards3)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards4)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards5)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards6)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards7)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards8)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards9)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards10)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards11)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards12)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards13)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards14)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards15)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards16)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards17)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards18)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards19)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards20)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards21)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards22)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards23)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards24)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards25)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards26)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards27)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards28)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards29)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards30)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards31)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards32)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards33)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards34)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards35)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards36)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards37)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards38)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards39)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards40)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards41)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards42)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards43)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards44)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards45)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards46)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards47)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards48)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards49)
for vector in drnnLSTMreluMakespanList:
for element in vector:
drnnLSTMreluMakespanValues.append(element)
for vector in drnnLSTMreluRewardsList:
for element in vector:
drnnLSTMreluRewardsValues.append(element)
smoothLSTMreluMakespanValues = pd.Series(drnnLSTMreluMakespanValues).rolling(12).mean()
plt.plot(smoothLSTMreluMakespanValues)
plt.xlabel("Episodios")
plt.ylabel("Segundos")
plt.title("'Makespan' utilizando LSTM con relu")
plt.show()
smoothLSTMreluRewardsValues = pd.Series(drnnLSTMreluRewardsValues).rolling(12).mean()
plt.plot(smoothLSTMreluRewardsValues)
plt.xlabel("Episodios")
plt.ylabel("Premio")
plt.title("'Reward' utilizando LSTM con relu")
plt.show()
##################
plt.plot(smoothLSTMtanhMakespanValues, color='blue', label='tanh')
plt.plot(smoothLSTMreluMakespanValues, color='orange', label='relu')
plt.xlabel("Episodios")
plt.ylabel("Segundos")
plt.title("'Makespan' con red neuronal profunda que incluye 1 capa LSTM")
plt.legend()
plt.show()
##################
plt.plot(smoothLSTMtanhRewardsValues, color='blue', label='tanh')
plt.plot(smoothLSTMreluRewardsValues, color='orange', label='relu')
plt.xlabel("Episodios")
plt.ylabel("Premio")
plt.title("'Reward' con red neuronal profunda que incluye 1 capa LSTM")
plt.legend()
plt.show()
##################
##################
##################
drlTanhMakespan = []
drlTanhRewards = []
drlTanhMakespanList = []
drlTanhRewardsList = []
drlTanhMakespanValues = []
drlTanhRewardsValues = []
drlTanhMakespan.append(np.mean(drlTanhMakespan0))
drlTanhMakespan.append(np.mean(drlTanhMakespan1))
drlTanhMakespan.append(np.mean(drlTanhMakespan2))
drlTanhMakespan.append(np.mean(drlTanhMakespan3))
drlTanhMakespan.append(np.mean(drlTanhMakespan4))
drlTanhMakespan.append(np.mean(drlTanhMakespan5))
drlTanhMakespan.append(np.mean(drlTanhMakespan6))
drlTanhMakespan.append(np.mean(drlTanhMakespan7))
drlTanhMakespan.append(np.mean(drlTanhMakespan8))
drlTanhMakespan.append(np.mean(drlTanhMakespan9))
drlTanhMakespan.append(np.mean(drlTanhMakespan10))
drlTanhMakespan.append(np.mean(drlTanhMakespan11))
drlTanhMakespan.append(np.mean(drlTanhMakespan12))
drlTanhMakespan.append(np.mean(drlTanhMakespan13))
drlTanhMakespan.append(np.mean(drlTanhMakespan14))
drlTanhMakespan.append(np.mean(drlTanhMakespan15))
drlTanhMakespan.append(np.mean(drlTanhMakespan16))
drlTanhMakespan.append(np.mean(drlTanhMakespan17))
drlTanhMakespan.append(np.mean(drlTanhMakespan18))
drlTanhMakespan.append(np.mean(drlTanhMakespan19))
drlTanhMakespan.append(np.mean(drlTanhMakespan20))
drlTanhMakespan.append(np.mean(drlTanhMakespan21))
drlTanhMakespan.append(np.mean(drlTanhMakespan22))
drlTanhMakespan.append(np.mean(drlTanhMakespan23))
drlTanhMakespan.append(np.mean(drlTanhMakespan24))
drlTanhMakespan.append(np.mean(drlTanhMakespan25))
drlTanhMakespan.append(np.mean(drlTanhMakespan26))
drlTanhMakespan.append(np.mean(drlTanhMakespan27))
drlTanhMakespan.append(np.mean(drlTanhMakespan28))
drlTanhMakespan.append(np.mean(drlTanhMakespan29))
drlTanhMakespan.append(np.mean(drlTanhMakespan30))
drlTanhMakespan.append(np.mean(drlTanhMakespan31))
drlTanhMakespan.append(np.mean(drlTanhMakespan32))
drlTanhMakespan.append(np.mean(drlTanhMakespan33))
drlTanhMakespan.append(np.mean(drlTanhMakespan34))
drlTanhMakespan.append(np.mean(drlTanhMakespan35))
drlTanhMakespan.append(np.mean(drlTanhMakespan36))
drlTanhMakespan.append(np.mean(drlTanhMakespan37))
drlTanhMakespan.append(np.mean(drlTanhMakespan38))
drlTanhMakespan.append(np.mean(drlTanhMakespan39))
drlTanhMakespan.append(np.mean(drlTanhMakespan40))
drlTanhMakespan.append(np.mean(drlTanhMakespan41))
drlTanhMakespan.append(np.mean(drlTanhMakespan42))
drlTanhMakespan.append(np.mean(drlTanhMakespan43))
drlTanhMakespan.append(np.mean(drlTanhMakespan44))
drlTanhMakespan.append(np.mean(drlTanhMakespan45))
drlTanhMakespan.append(np.mean(drlTanhMakespan46))
drlTanhMakespan.append(np.mean(drlTanhMakespan47))
drlTanhMakespan.append(np.mean(drlTanhMakespan48))
drlTanhMakespan.append(np.mean(drlTanhMakespan49))
drlTanhRewards.append(np.mean(drlTanhRewards0))
drlTanhRewards.append(np.mean(drlTanhRewards1))
drlTanhRewards.append(np.mean(drlTanhRewards2))
drlTanhRewards.append(np.mean(drlTanhRewards3))
drlTanhRewards.append(np.mean(drlTanhRewards4))
drlTanhRewards.append(np.mean(drlTanhRewards5))
drlTanhRewards.append(np.mean(drlTanhRewards6))
drlTanhRewards.append(np.mean(drlTanhRewards7))
drlTanhRewards.append(np.mean(drlTanhRewards8))
drlTanhRewards.append(np.mean(drlTanhRewards9))
drlTanhRewards.append(np.mean(drlTanhRewards10))
drlTanhRewards.append(np.mean(drlTanhRewards11))
drlTanhRewards.append(np.mean(drlTanhRewards12))
drlTanhRewards.append(np.mean(drlTanhRewards13))
drlTanhRewards.append(np.mean(drlTanhRewards14))
drlTanhRewards.append(np.mean(drlTanhRewards15))
drlTanhRewards.append(np.mean(drlTanhRewards16))
drlTanhRewards.append(np.mean(drlTanhRewards17))
drlTanhRewards.append(np.mean(drlTanhRewards18))
drlTanhRewards.append(np.mean(drlTanhRewards19))
drlTanhRewards.append(np.mean(drlTanhRewards20))
drlTanhRewards.append(np.mean(drlTanhRewards21))
drlTanhRewards.append(np.mean(drlTanhRewards22))
drlTanhRewards.append(np.mean(drlTanhRewards23))
drlTanhRewards.append(np.mean(drlTanhRewards24))
drlTanhRewards.append(np.mean(drlTanhRewards25))
drlTanhRewards.append(np.mean(drlTanhRewards26))
drlTanhRewards.append(np.mean(drlTanhRewards27))
drlTanhRewards.append(np.mean(drlTanhRewards28))
drlTanhRewards.append(np.mean(drlTanhRewards29))
drlTanhRewards.append(np.mean(drlTanhRewards30))
drlTanhRewards.append(np.mean(drlTanhRewards31))
drlTanhRewards.append(np.mean(drlTanhRewards32))
drlTanhRewards.append(np.mean(drlTanhRewards33))
drlTanhRewards.append(np.mean(drlTanhRewards34))
drlTanhRewards.append(np.mean(drlTanhRewards35))
drlTanhRewards.append(np.mean(drlTanhRewards36))
drlTanhRewards.append(np.mean(drlTanhRewards37))
drlTanhRewards.append(np.mean(drlTanhRewards38))
drlTanhRewards.append(np.mean(drlTanhRewards39))
drlTanhRewards.append(np.mean(drlTanhRewards40))
drlTanhRewards.append(np.mean(drlTanhRewards41))
drlTanhRewards.append(np.mean(drlTanhRewards42))
drlTanhRewards.append(np.mean(drlTanhRewards43))
drlTanhRewards.append(np.mean(drlTanhRewards44))
drlTanhRewards.append(np.mean(drlTanhRewards45))
drlTanhRewards.append(np.mean(drlTanhRewards46))
drlTanhRewards.append(np.mean(drlTanhRewards47))
drlTanhRewards.append(np.mean(drlTanhRewards48))
drlTanhRewards.append(np.mean(drlTanhRewards49))
drlTanhMakespanList.append(drlTanhMakespan0)
drlTanhMakespanList.append(drlTanhMakespan1)
drlTanhMakespanList.append(drlTanhMakespan2)
drlTanhMakespanList.append(drlTanhMakespan3)
drlTanhMakespanList.append(drlTanhMakespan4)
drlTanhMakespanList.append(drlTanhMakespan5)
drlTanhMakespanList.append(drlTanhMakespan6)
drlTanhMakespanList.append(drlTanhMakespan7)
drlTanhMakespanList.append(drlTanhMakespan8)
drlTanhMakespanList.append(drlTanhMakespan9)
drlTanhMakespanList.append(drlTanhMakespan10)
drlTanhMakespanList.append(drlTanhMakespan11)
drlTanhMakespanList.append(drlTanhMakespan12)
drlTanhMakespanList.append(drlTanhMakespan13)
drlTanhMakespanList.append(drlTanhMakespan14)
drlTanhMakespanList.append(drlTanhMakespan15)
drlTanhMakespanList.append(drlTanhMakespan16)
drlTanhMakespanList.append(drlTanhMakespan17)
drlTanhMakespanList.append(drlTanhMakespan18)
drlTanhMakespanList.append(drlTanhMakespan19)
drlTanhMakespanList.append(drlTanhMakespan20)
drlTanhMakespanList.append(drlTanhMakespan21)
drlTanhMakespanList.append(drlTanhMakespan22)
drlTanhMakespanList.append(drlTanhMakespan23)
drlTanhMakespanList.append(drlTanhMakespan24)
drlTanhMakespanList.append(drlTanhMakespan25)
drlTanhMakespanList.append(drlTanhMakespan26)
drlTanhMakespanList.append(drlTanhMakespan27)
drlTanhMakespanList.append(drlTanhMakespan28)
drlTanhMakespanList.append(drlTanhMakespan29)
drlTanhMakespanList.append(drlTanhMakespan30)
drlTanhMakespanList.append(drlTanhMakespan31)
drlTanhMakespanList.append(drlTanhMakespan32)
drlTanhMakespanList.append(drlTanhMakespan33)
drlTanhMakespanList.append(drlTanhMakespan34)
drlTanhMakespanList.append(drlTanhMakespan35)
drlTanhMakespanList.append(drlTanhMakespan36)
drlTanhMakespanList.append(drlTanhMakespan37)
drlTanhMakespanList.append(drlTanhMakespan38)
drlTanhMakespanList.append(drlTanhMakespan39)
drlTanhMakespanList.append(drlTanhMakespan40)
drlTanhMakespanList.append(drlTanhMakespan41)
drlTanhMakespanList.append(drlTanhMakespan42)
drlTanhMakespanList.append(drlTanhMakespan43)
drlTanhMakespanList.append(drlTanhMakespan44)
drlTanhMakespanList.append(drlTanhMakespan45)
drlTanhMakespanList.append(drlTanhMakespan46)
drlTanhMakespanList.append(drlTanhMakespan47)
drlTanhMakespanList.append(drlTanhMakespan48)
drlTanhMakespanList.append(drlTanhMakespan49)
drlTanhRewardsList.append(drlTanhRewards0)
drlTanhRewardsList.append(drlTanhRewards1)
drlTanhRewardsList.append(drlTanhRewards2)
drlTanhRewardsList.append(drlTanhRewards3)
drlTanhRewardsList.append(drlTanhRewards4)
drlTanhRewardsList.append(drlTanhRewards5)
drlTanhRewardsList.append(drlTanhRewards6)
drlTanhRewardsList.append(drlTanhRewards7)
drlTanhRewardsList.append(drlTanhRewards8)
drlTanhRewardsList.append(drlTanhRewards9)
drlTanhRewardsList.append(drlTanhRewards10)
drlTanhRewardsList.append(drlTanhRewards11)
drlTanhRewardsList.append(drlTanhRewards12)
drlTanhRewardsList.append(drlTanhRewards13)
drlTanhRewardsList.append(drlTanhRewards14)
drlTanhRewardsList.append(drlTanhRewards15)
drlTanhRewardsList.append(drlTanhRewards16)
drlTanhRewardsList.append(drlTanhRewards17)
drlTanhRewardsList.append(drlTanhRewards18)
drlTanhRewardsList.append(drlTanhRewards19)
drlTanhRewardsList.append(drlTanhRewards20)
drlTanhRewardsList.append(drlTanhRewards21)
drlTanhRewardsList.append(drlTanhRewards22)
drlTanhRewardsList.append(drlTanhRewards23)
drlTanhRewardsList.append(drlTanhRewards24)
drlTanhRewardsList.append(drlTanhRewards25)
drlTanhRewardsList.append(drlTanhRewards26)
drlTanhRewardsList.append(drlTanhRewards27)
drlTanhRewardsList.append(drlTanhRewards28)
drlTanhRewardsList.append(drlTanhRewards29)
drlTanhRewardsList.append(drlTanhRewards30)
drlTanhRewardsList.append(drlTanhRewards31)
drlTanhRewardsList.append(drlTanhRewards32)
drlTanhRewardsList.append(drlTanhRewards33)
drlTanhRewardsList.append(drlTanhRewards34)
drlTanhRewardsList.append(drlTanhRewards35)
drlTanhRewardsList.append(drlTanhRewards36)
drlTanhRewardsList.append(drlTanhRewards37)
drlTanhRewardsList.append(drlTanhRewards38)
drlTanhRewardsList.append(drlTanhRewards39)
drlTanhRewardsList.append(drlTanhRewards40)
drlTanhRewardsList.append(drlTanhRewards41)
drlTanhRewardsList.append(drlTanhRewards42)
drlTanhRewardsList.append(drlTanhRewards43)
drlTanhRewardsList.append(drlTanhRewards44)
drlTanhRewardsList.append(drlTanhRewards45)
drlTanhRewardsList.append(drlTanhRewards46)
drlTanhRewardsList.append(drlTanhRewards47)
drlTanhRewardsList.append(drlTanhRewards48)
drlTanhRewardsList.append(drlTanhRewards49)
for vector in drlTanhMakespanList:
for element in vector:
drlTanhMakespanValues.append(element)
for vector in drlTanhRewardsList:
for element in vector:
drlTanhRewardsValues.append(element)
smoothdrlTanhMakespanValues = pd.Series(drlTanhMakespanValues).rolling(12).mean()
plt.plot(smoothdrlTanhMakespanValues)
plt.xlabel("Episodios")
plt.ylabel("Segundos")
plt.title("'Makespan' utilizando feedforward con tanh")
plt.show()
smoothdrlTanhRewardsValues = pd.Series(drlTanhRewardsValues).rolling(12).mean()
plt.plot(smoothdrlTanhRewardsValues)
plt.xlabel("Episodios")
plt.ylabel("Premio")
plt.title("'Reward' utilizando feedforward con tanh")
plt.show()
####################
drlReluMakespan = []
drlReluRewards = []
drlReluMakespanList = []
drlReluRewardsList = []
drlReluMakespanValues = []
drlReluRewardsValues = []
drlReluMakespan.append(np.mean(drlReluMakespan0))
drlReluMakespan.append(np.mean(drlReluMakespan1))
drlReluMakespan.append(np.mean(drlReluMakespan2))
drlReluMakespan.append(np.mean(drlReluMakespan3))
drlReluMakespan.append(np.mean(drlReluMakespan4))
drlReluMakespan.append(np.mean(drlReluMakespan5))
drlReluMakespan.append(np.mean(drlReluMakespan6))
drlReluMakespan.append(np.mean(drlReluMakespan7))
drlReluMakespan.append(np.mean(drlReluMakespan8))
drlReluMakespan.append(np.mean(drlReluMakespan9))
drlReluMakespan.append(np.mean(drlReluMakespan10))
drlReluMakespan.append(np.mean(drlReluMakespan11))
drlReluMakespan.append(np.mean(drlReluMakespan12))
drlReluMakespan.append(np.mean(drlReluMakespan13))
drlReluMakespan.append(np.mean(drlReluMakespan14))
drlReluMakespan.append(np.mean(drlReluMakespan15))
drlReluMakespan.append(np.mean(drlReluMakespan16))
drlReluMakespan.append(np.mean(drlReluMakespan17))
drlReluMakespan.append(np.mean(drlReluMakespan18))
drlReluMakespan.append(np.mean(drlReluMakespan19))
drlReluMakespan.append(np.mean(drlReluMakespan20))
drlReluMakespan.append(np.mean(drlReluMakespan21))
drlReluMakespan.append(np.mean(drlReluMakespan22))
drlReluMakespan.append(np.mean(drlReluMakespan23))
drlReluMakespan.append(np.mean(drlReluMakespan24))
drlReluMakespan.append(np.mean(drlReluMakespan25))
drlReluMakespan.append(np.mean(drlReluMakespan26))
drlReluMakespan.append(np.mean(drlReluMakespan27))
drlReluMakespan.append(np.mean(drlReluMakespan28))
drlReluMakespan.append(np.mean(drlReluMakespan29))
drlReluMakespan.append(np.mean(drlReluMakespan30))
drlReluMakespan.append(np.mean(drlReluMakespan31))
drlReluMakespan.append(np.mean(drlReluMakespan32))
drlReluMakespan.append(np.mean(drlReluMakespan33))
drlReluMakespan.append(np.mean(drlReluMakespan34))
drlReluMakespan.append(np.mean(drlReluMakespan35))
drlReluMakespan.append(np.mean(drlReluMakespan36))
drlReluMakespan.append(np.mean(drlReluMakespan37))
drlReluMakespan.append(np.mean(drlReluMakespan38))
drlReluMakespan.append(np.mean(drlReluMakespan39))
drlReluMakespan.append(np.mean(drlReluMakespan40))
drlReluMakespan.append(np.mean(drlReluMakespan41))
drlReluMakespan.append(np.mean(drlReluMakespan42))
drlReluMakespan.append(np.mean(drlReluMakespan43))
drlReluMakespan.append(np.mean(drlReluMakespan44))
drlReluMakespan.append(np.mean(drlReluMakespan45))
drlReluMakespan.append(np.mean(drlReluMakespan46))
drlReluMakespan.append(np.mean(drlReluMakespan47))
drlReluMakespan.append(np.mean(drlReluMakespan48))
drlReluMakespan.append(np.mean(drlReluMakespan49))
drlReluRewards.append(np.mean(drlReluRewards0))
drlReluRewards.append(np.mean(drlReluRewards1))
drlReluRewards.append(np.mean(drlReluRewards2))
drlReluRewards.append(np.mean(drlReluRewards3))
drlReluRewards.append(np.mean(drlReluRewards4))
drlReluRewards.append(np.mean(drlReluRewards5))
drlReluRewards.append(np.mean(drlReluRewards6))
drlReluRewards.append(np.mean(drlReluRewards7))
drlReluRewards.append(np.mean(drlReluRewards8))
drlReluRewards.append(np.mean(drlReluRewards9))
drlReluRewards.append(np.mean(drlReluRewards10))
drlReluRewards.append(np.mean(drlReluRewards11))
drlReluRewards.append(np.mean(drlReluRewards12))
drlReluRewards.append(np.mean(drlReluRewards13))
drlReluRewards.append( | np.mean(drlReluRewards14) | numpy.mean |
import os
import logging
import numpy as np
from PIL import Image
from PIL import ImageOps
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
import tensorflow as tf
tf.get_logger().setLevel(logging.ERROR)
from tensorflow.keras.utils import Sequence, to_categorical
from augmentation import augmentations
##########################################################################
class DataGenerator(Sequence):
def __init__(self,
data,
labels,
img_dim=(32, 32,3),
batch_size=32,
num_classes=10,
shuffle=True,
jsd=True
):
self.data = data
self.labels = labels
self.img_dim = img_dim
self.batch_size = batch_size
self.num_classes = num_classes
self.shuffle = shuffle
self.jsd = jsd
self.augmentations = augmentations
self.on_epoch_end()
def on_epoch_end(self):
self.indices = np.arange(len(self.data))
if self.shuffle:
| np.random.shuffle(self.indices) | numpy.random.shuffle |
import numpy as np
import numpy.ma as ma
from numpy import linalg as LA
import tsstats
#from numba import jit
from wormtracker import Logger
class WormTrajectoryPostProcessor:
## static properties!
# bad frame settings
filterByWidth = True
filterByLength = True
widthThreshold = (0.7, 1.3)
lengthThreshold = (0.9, 1.1)
filterBySpeed = True
maxSpeed = 1000
# segment settings
max_n_missing = 3
max_d_um = 10.
max_segment_frames = 10000
min_segment_size = 150
# head assignment settings (centoid only method)
headMinSpeed = 40. # Min <s> for head assignment by leading end
headMinLeading = 1.3 # Min relative time leading for head
headMinRelSpeed = 1.1 # Min relative end speed for head
headMinRelBrightness = 0.2 # Min relative brightness for head
# head assignment settings (posture method)
headVarianceMinRatio = 1.2 # Min ratio of head to tail posture variation
# head assignment settings (posture dynamics method)
headDeltaCorrelation = 0.05
bodyWaveDelay = 0.08
# smoothing
useSmoothingFilterDerivatives = True
filterWindow = 1. # smoothing filter window size
def __init__(self, h5obj, strain, name):
self.h5obj = h5obj
self.strain = strain
self.name = name
self.h5ref = h5obj['worms'][strain][name]
self.lengths = self.h5ref['length'][...]
self.widths = self.h5ref['width'][...]
self.frameRate = h5obj['/video/frameRate'][0]
self.pixelsPerMicron = h5obj['/video/pixelsPerMicron'][0]
self.maxFrameNumber = self.h5ref['time'].shape[0]
self.nAngles = self.h5ref['posture'].shape[1]
self.badFrames = np.zeros((self.maxFrameNumber,), dtype='bool')
self.haveSkeleton = np.zeros((self.maxFrameNumber,), dtype='bool')
self.skeleton = None
self.posture = None
self.length = None
self.width = None
self.t = self.h5ref['time'][...]
self.X = ma.array(self.h5ref['centroid'][...] / self.pixelsPerMicron)
self.Xhead = ma.zeros(self.X.shape)
self.Xtail = ma.zeros(self.X.shape)
self.v = ma.zeros(self.X.shape)
self.s = ma.zeros((self.maxFrameNumber,))
self.phi = ma.zeros((self.maxFrameNumber,))
self.psi = ma.zeros((self.maxFrameNumber,))
self.dpsi = ma.zeros((self.maxFrameNumber,))
self.Ctheta = None
self.ltheta = None
self.vtheta = None
self.usePosturalHeadAssignment = True
def postProcess(self):
Logger.logPrint('Identifying bad frames...')
self.identifyBadFrames()
Logger.logPrint('Extracting postural data...')
self.extractPosturalData()
Logger.logPrint('Fixing order of postural data...')
self.fixPosturalOrdering()
Logger.logPrint('Segmenting trajectory...')
self.segment()
Logger.logPrint('Assigning head...')
if self.usePosturalHeadAssignment:
self.assignHeadTail()
else:
self.assignHeadTailCentroidOnly()
Logger.logPrint('Ordering postural data head to tail...')
self.orderHeadTail()
Logger.logPrint('Calculating centroid motion variables...')
self.calculateCentroidMeasurements()
Logger.logPrint('Calculating postural measurements...')
self.calculatePosturalMeasurements()
def identifyBadFrames(self):
badFrames = np.logical_or(self.lengths == 0,
self.widths == 0)
self.length = np.median(self.lengths[np.logical_not(badFrames)])
self.width = np.median(self.widths[np.logical_not(badFrames)])
if self.filterByWidth:
badFrames = np.logical_or(badFrames,
np.logical_or(self.widths < self.widthThreshold[0]*self.width,
self.widths > self.widthThreshold[1]*self.width))
if self.filterByLength:
badFrames = np.logical_or(badFrames,
np.logical_or(self.lengths <
self.lengthThreshold[0]*self.length,
self.lengths >
self.lengthThreshold[1]*self.length))
if self.filterBySpeed:
v = ma.zeros(self.X.shape)
v[1:-1] = (self.X[2:, :] - self.X[0:-2])/(2.0/self.frameRate)
instSpeed = np.sqrt(np.sum(np.power(v, 2), axis=1))
badFrames = np.logical_or(badFrames,
instSpeed > self.maxSpeed)
self.badFrames = badFrames
def extractPosturalData(self):
# import skeleton splines
self.skeleton = self.h5ref['skeletonSpline'][...]
self.posture = self.h5ref['posture'][...]
self.haveSkeleton = np.array([np.any(skeleton > 0) and ~badFrame
for skeleton, badFrame in zip(self.skeleton, self.badFrames)])
# @jit
@staticmethod
def skeletonDist(skeleton1, skeleton2):
distEachPoint = np.sqrt(np.sum(np.power(skeleton1 -
skeleton2, 2),
axis=1))
# return average distance per spline point
return np.sum(distEachPoint)/skeleton1.shape[0]
def fixPosturalOrdering(self):
# compare possible skeleton orientations
interframe_d = np.empty((self.maxFrameNumber, 2)) * np.NaN
flipped = np.zeros((self.maxFrameNumber,), dtype=bool)
nFromLastGood = np.empty((self.maxFrameNumber,)) * np.NaN
for i in xrange(1, self.maxFrameNumber):
# check whether there is a previous skeleton to compare
if not self.haveSkeleton[i] or not np.any(self.haveSkeleton[:i]):
continue
ip = np.where(self.haveSkeleton[:i])[0][-1] # last skeleton
nFromLastGood[i] = i - ip
interframe_d[i, 0] = self.skeletonDist(
np.squeeze(self.skeleton[i, :, :]),
np.squeeze(self.skeleton[ip, :, :]))
# flipped orientation
interframe_d[i, 1] = self.skeletonDist(
np.flipud(np.squeeze(self.skeleton[i, :, :])),
np.squeeze(self.skeleton[ip]))
if interframe_d[i, 1] < interframe_d[i, 0]:
# if the flipped orientation is better, flip the data
flipped[i] = not flipped[ip]
else:
flipped[i] = flipped[ip]
self.interframe_d = interframe_d
# flip data appropriately
sel = flipped
self.skeleton[sel, :, :] = self.skeleton[sel, ::-1, :]
self.posture[sel, :] = self.posture[sel, ::-1]
def segment(self):
# break video into segments with matched skeletons
max_d = self.max_d_um/self.pixelsPerMicron
ii = 0
segments = []
while ii < self.maxFrameNumber:
begin = ii
ii += 1
# Continue segment until >max_n_missing consecutive bad frames
# are found, or >max_segment_frames are collected
n_missing = 0
last_missing = False
while (ii < self.maxFrameNumber and
ii - begin < self.max_segment_frames and
(np.isnan(self.interframe_d[ii, 0]) or
np.min(self.interframe_d[ii, :]) < max_d)):
if not self.haveSkeleton[ii]:
n_missing += 1
last_missing = True
if n_missing > self.max_n_missing:
ii += 1
break
else:
n_missing = 0
last_missing = False
ii += 1
segments.append([begin, ii])
self.segments = [segment for segment in segments
if segment[1] - segment[0] >
self.min_segment_size]
def assignHeadTail(self):
flipSegment = np.zeros((len(self.segments),), dtype='bool')
segmentAssignMethod = -np.ones((len(self.segments),), dtype='int8')
npoints = round(self.posture.shape[1]*0.1)
A = ma.array(self.posture)
A[self.badFrames] = ma.masked
A[~self.haveSkeleton] = ma.masked
for i, segment in enumerate(self.segments):
b = segment[0]
e = segment[1]
# posture variance method
v = A.std(axis=0)
npoints=5
vh = v[:npoints].sum()
vt = v[-npoints:].sum()
# calculate dynamics measures
hm = _headMoveMeasure(A[b:e,:])
bw = _bodyWaveMeasure(A[b:e,:])
# head has more variance
# head has oscillatory head movement unlike tail (negative delta correlation measure)
# body wave delay is positive
if vh/vt > self.headVarianceMinRatio:
# not flipped
segmentAssignMethod[i] = 1
elif vt/vh > self.headVarianceMinRatio:
# flipped
flipSegment[i] = True
segmentAssignMethod[i] = 1
elif np.abs(bw) > self.bodyWaveDelay:
segmentAssignMethod[i] = 3
if bw < -self.bodyWaveDelay:
flipSegment[i] = True
elif np.abs(hm) > self.headDeltaCorrelation:
segmentAssignMethod[i] = 2
if hm < -self.headDeltaCorrelation:
flipSegment[i] = True
else:
segmentAssignMethod[i] = 0 # can't assign
self.flipSegment = flipSegment
self.segmentAssignMethod = segmentAssignMethod
def assignHeadTailPostureVariance(self):
flipSegment = np.zeros((len(self.segments),), dtype='bool')
segmentAssignMethod = -np.ones((len(self.segments),), dtype='int8')
npoints = round(self.posture.shape[1]*0.1)
for i, segment in enumerate(self.segments):
b = segment[0]
e = segment[1]
# calculate std at each posture position over segment
v = self.posture[b:e, :].std(axis=0)
# calculate total std for 10% from each end
vh = v[:npoints].sum()
vt = v[-npoints:].sum()
# head has higher variance
if vh/vt > self.headVarianceMinRatio:
# not flipped
segmentAssignMethod[i] = 3
elif vt/vh > self.headVarianceMinRatio:
# flipped
flipSegment[i] = True
segmentAssignMethod[i] = 3
else:
segmentAssignMethod[i] = 0 # can't assign
self.flipSegment = flipSegment
self.segmentAssignMethod = segmentAssignMethod
def assignHeadTailCentroidOnly(self):
flipSegment = np.zeros((len(self.segments),), dtype='bool')
segmentAssignMethod = -np.ones((len(self.segments),), dtype='int8')
X = ma.array(self.h5ref['centroid'][...] / self.pixelsPerMicron)
X[self.badFrames, :] = ma.masked
dt = 1/self.frameRate
(v, s, phi) = _getMotionVariables(X, dt)
Xhead = | np.squeeze(self.skeleton[:, 0, :] - self.h5ref['midpoint'][...]) | numpy.squeeze |
import numpy as np
import scipy.io as sio
import torch.utils.data
from torch.utils.data import DataLoader
import pdb
class NeuralData(torch.utils.data.Dataset):
def __init__(self, data, data2, num_trials_per_class=91):
self.data = data
self.data2 = data2
self.num_trials_per_class = num_trials_per_class
self.size = data.shape[0]
def __getitem__(self, index):
input1_data = self.data[index]
input2_data = self.data2[index]
target = index // self.num_trials_per_class
return input1_data, input2_data, target
def __len__(self):
return self.size
def break_correlations(data):
# data is a TxN matrix, representing trials by neurons (and I want to permute the neurons across trials differently to break single trial correlations)
permuted_data = np.zeros_like(data)
for i in range(data.shape[1]):
permuted_data[:, i] = np.random.permutation(data[:, i])
return permuted_data
def get_neural_nocorr_loader(workers=0, batch_size=10, time1=None, time2=None, deltat=None):
data = sio.loadmat('data/ps4_realdata.mat') # load the .mat file.
NumTrainData = data['train_trial'].shape[0]
NumClass = data['train_trial'].shape[1]
NumTestData = data['test_trial'].shape[0]
trainDataArr = np.zeros((NumClass, NumTrainData, 97)) # contains the firing rates for all neurons on all 8 x 91 trials in the training set
testDataArr = np.zeros((NumClass, NumTestData, 97)) # for the testing set.
for classIX in range(NumClass):
for trainDataIX in range(NumTrainData):
trainDataArr[classIX, trainDataIX, :] = np.sum(data['train_trial'][trainDataIX, classIX][1][:, 350:550], 1)
for testDataIX in range(NumTestData):
testDataArr[classIX, testDataIX, :] = np.sum(data['test_trial'][testDataIX, classIX][1][:, 350:550], 1)
# permute the data to break the single trial correlations
trainDataArrNoCorr = np.zeros((NumClass, NumTrainData, 97))
for classIX in range(NumClass):
trainDataArrNoCorr[classIX, :, :] = break_correlations(trainDataArr[classIX, :, :])
trainData = trainDataArr.reshape(-1, 97)
trainDataNoCorr = trainDataArrNoCorr.reshape(-1, 97)
testData = testDataArr.reshape(-1, 97)
trainset = NeuralData(data=trainData, data2=trainDataNoCorr)
testset = NeuralData(data=testData, data2=testData)
trainloader = DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=workers)
testloader = DataLoader(testset, batch_size=100, shuffle=False, num_workers=workers)
return trainloader, testloader
# get different time windows
def get_neural_time_loader(workers=0, batch_size=10, time1=150, time2=350, deltat=100):
data = sio.loadmat('data/ps4_realdata.mat') # load the .mat file.
NumTrainData = data['train_trial'].shape[0]
NumClass = data['train_trial'].shape[1]
NumTestData = data['test_trial'].shape[0]
trainDataArr = np.zeros((NumClass, NumTrainData, 97)) # contains the firing rates for all neurons on all 8 x 91 trials in the training set
trainDataArr2 = np.zeros((NumClass, NumTrainData, 97))
testDataArr = np.zeros((NumClass, NumTestData, 97)) # for the testing set.
testDataArr2 = np.zeros((NumClass, NumTestData, 97)) # for the testing set.
for classIX in range(NumClass):
for trainDataIX in range(NumTrainData):
trainDataArr[classIX, trainDataIX, :] = np.sum(data['train_trial'][trainDataIX, classIX][1][:, time1:time1 + deltat], 1)
trainDataArr2[classIX, trainDataIX, :] = np.sum(data['train_trial'][trainDataIX, classIX][1][:, time2:time2 + deltat], 1)
for testDataIX in range(NumTestData):
testDataArr[classIX, testDataIX, :] = np.sum(data['test_trial'][testDataIX, classIX][1][:, time1:time1 + deltat], 1)
testDataArr2[classIX, testDataIX, :] = np.sum(data['test_trial'][testDataIX, classIX][1][:, time2:time2 + deltat], 1)
trainData = trainDataArr.reshape(-1, 97)
trainData2 = trainDataArr2.reshape(-1, 97)
testData = testDataArr.reshape(-1, 97)
testData2 = testDataArr2.reshape(-1, 97)
trainset = NeuralData(data=trainData, data2=trainData2)
testset = NeuralData(data=testData, data2=testData2)
trainloader = DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=workers)
testloader = DataLoader(testset, batch_size=100, shuffle=False, num_workers=workers)
return trainloader, testloader
# CENTER OUT
class NeuralDataCenter(torch.utils.data.Dataset):
def __init__(self, data, data2, targets):
self.data = data
self.data2 = data2
self.targets = targets
self.size = data.shape[0]
def __getitem__(self, index):
input_data = self.data[index]
input_data2 = self.data2[index]
target = self.targets[index]
return input_data, input_data2, target
def __len__(self):
return self.size
# some helper functions
def get_target_class(point, U):
target_class = -1
for i, e in enumerate(U):
if (point == e).all():
target_class = i
return target_class
def get_out_indices(data):
return ~np.all(data == 0, axis=1)
def remove_zeros(data):
return data[get_out_indices(data)]
# basically, conditioned on the target class, sample data (but in a convenient manner for the dataloader)
def align_data(targets1, targets2):
target_idx1 = []
target_idx2 = []
for i in range(np.max(targets1) + 1):
idx1 = [idx for idx, val in enumerate(targets1) if val == i]
idx2 = [idx for idx, val in enumerate(targets2) if val == i]
min_overlap = min(len(idx1), len(idx2))
target_idx1.append(idx1[:min_overlap])
target_idx2.append(idx2[:min_overlap])
return target_idx1, target_idx2
def test_align_data():
targets1 = [0, 0, 0, 1, 1]
targets2 = [0, 0, 1]
t1, t2 = align_data(targets1, targets2)
print(t1)
print(t2)
# TODO: add in time_avg, slightly clean up code
def load_neural_data(path, delay=False, raster='spikeRaster2'):
data = sio.loadmat(path)
R = data['R'][0, :]
# a bit messy code, but this loads the targets and removes the center targets
t = R[0:]['posTargets1']
targets = np.zeros((len(t), 2))
for i in range(len(t)):
for j in range(2):
targets[i][j] = t[i][j]
U = remove_zeros(np.unique(targets, axis=0))
features = []
classes = []
for i, e in enumerate(get_out_indices(targets)):
if e:
if delay:
# For the delay data, spikeRaster2 works a lot better than spikeRaster, 2 is from PMd
time_end = R[i]['timeTargetOn'].item() # this is bad naming
time_start = time_end - R[i]['delayTime'].item()
features.append(100 * np.mean(R[i][raster][:, time_start:time_end], axis=1))
else:
features.append(np.sum(R[i]['spikeRaster'], axis=1))
classes.append(get_target_class(targets[i], U))
return features, classes
def load_neural_data_time(path, delay=False, time=0, deltat=100, raster='spikeRaster2'):
data = sio.loadmat(path)
R = data['R'][0, :]
# a bit messy code, but this loads the targets and removes the center targets
t = R[0:]['posTargets1']
targets = np.zeros((len(t), 2))
for i in range(len(t)):
for j in range(2):
targets[i][j] = t[i][j]
U = remove_zeros(np.unique(targets, axis=0))
features = []
classes = []
for i, e in enumerate(get_out_indices(targets)):
if e:
if delay:
# For the delay data, spikeRaster2 works a lot better than spikeRaster, 2 is from PMd
time_end = R[i]['timeTargetOn'].item() # this is bad naming
time_start = time_end - R[i]['delayTime'].item() + time
features.append(100 * np.mean(R[i][raster][:, time_start:time_start + deltat], axis=1))
else:
features.append(np.sum(R[i]['spikeRaster'], axis=1))
classes.append(get_target_class(targets[i], U))
return features, classes
def get_overlapped_data(features, classes, idxs):
features = | np.array(features) | numpy.array |
import numpy as np
import pyart
import scipy.ndimage.filters
def J_function(winds, parameters):
"""
Calculates the total cost function. This typically does not need to be
called directly as get_dd_wind_field is a wrapper around this function and
:py:func:`pydda.cost_functions.grad_J`.
In order to add more terms to the cost function, modify this
function and :py:func:`pydda.cost_functions.grad_J`.
Parameters
----------
winds: 1-D float array
The wind field, flattened to 1-D for f_min. The total size of the
array will be a 1D array of 3*nx*ny*nz elements.
parameters: DDParameters
The parameters for the cost function evaluation as specified by the
:py:func:`pydda.retrieval.DDParameters` class.
Returns
-------
J: float
The value of the cost function
"""
winds = np.reshape(winds,
(3, parameters.grid_shape[0], parameters.grid_shape[1],
parameters.grid_shape[2]))
Jvel = calculate_radial_vel_cost_function(
parameters.vrs, parameters.azs, parameters.els,
winds[0], winds[1], winds[2], parameters.wts, rmsVr=parameters.rmsVr,
weights=parameters.weights, coeff=parameters.Co)
if(parameters.Cm > 0):
Jmass = calculate_mass_continuity(
winds[0], winds[1], winds[2], parameters.z,
parameters.dx, parameters.dy, parameters.dz,
coeff=parameters.Cm)
else:
Jmass = 0
if(parameters.Cx > 0 or parameters.Cy > 0 or parameters.Cz > 0):
Jsmooth = calculate_smoothness_cost(
winds[0], winds[1], winds[2], Cx=parameters.Cx,
Cy=parameters.Cy, Cz=parameters.Cz)
else:
Jsmooth = 0
if(parameters.Cb > 0):
Jbackground = calculate_background_cost(
winds[0], winds[1], winds[2], parameters.bg_weights,
parameters.u_back, parameters.v_back, parameters.Cb)
else:
Jbackground = 0
if(parameters.Cv > 0):
Jvorticity = calculate_vertical_vorticity_cost(
winds[0], winds[1], winds[2], parameters.dx,
parameters.dy, parameters.dz, parameters.Ut,
parameters.Vt, coeff=parameters.Cv)
else:
Jvorticity = 0
if(parameters.Cmod > 0):
Jmod = calculate_model_cost(
winds[0], winds[1], winds[2],
parameters.model_weights, parameters.u_model,
parameters.v_model,
parameters.w_model, coeff=parameters.Cmod)
else:
Jmod = 0
if parameters.Cpoint > 0:
Jpoint = calculate_point_cost(
winds[0], winds[1], parameters.x, parameters.y, parameters.z,
parameters.point_list, Cp=parameters.Cpoint, roi=parameters.roi)
else:
Jpoint = 0
if(parameters.print_out is True):
print(('| Jvel | Jmass | Jsmooth | Jbg | Jvort | Jmodel | Jpoint |' +
' Max w '))
print(('|' + "{:9.4f}".format(Jvel) + '|' +
"{:9.4f}".format(Jmass) + '|' +
"{:9.4f}".format(Jsmooth) + '|' +
"{:9.4f}".format(Jbackground) + '|' +
"{:9.4f}".format(Jvorticity) + '|' +
"{:9.4f}".format(Jmod) + '|' +
"{:9.4f}".format(Jpoint)) + '|' +
"{:9.4f}".format(np.ma.max(np.ma.abs(winds[2]))))
return Jvel + Jmass + Jsmooth + Jbackground + Jvorticity + Jmod + Jpoint
def grad_J(winds, parameters):
"""
Calculates the gradient of the cost function. This typically does not need
to be called directly as get_dd_wind_field is a wrapper around this
function and :py:func:`pydda.cost_functions.J_function`.
In order to add more terms to the cost function,
modify this function and :py:func:`pydda.cost_functions.grad_J`.
Parameters
----------
winds: 1-D float array
The wind field, flattened to 1-D for f_min
parameters: DDParameters
The parameters for the cost function evaluation as specified by the
:py:func:`pydda.retrieve.DDParameters` class.
Returns
-------
grad: 1D float array
Gradient vector of cost function
"""
winds = np.reshape(winds,
(3, parameters.grid_shape[0],
parameters.grid_shape[1], parameters.grid_shape[2]))
grad = calculate_grad_radial_vel(
parameters.vrs, parameters.els, parameters.azs,
winds[0], winds[1], winds[2], parameters.wts, parameters.weights,
parameters.rmsVr, coeff=parameters.Co, upper_bc=parameters.upper_bc)
if(parameters.Cm > 0):
grad += calculate_mass_continuity_gradient(
winds[0], winds[1], winds[2], parameters.z,
parameters.dx, parameters.dy, parameters.dz,
coeff=parameters.Cm, upper_bc=parameters.upper_bc)
if(parameters.Cx > 0 or parameters.Cy > 0 or parameters.Cz > 0):
grad += calculate_smoothness_gradient(
winds[0], winds[1], winds[2], Cx=parameters.Cx,
Cy=parameters.Cy, Cz=parameters.Cz, upper_bc=parameters.upper_bc)
if(parameters.Cb > 0):
grad += calculate_background_gradient(
winds[0], winds[1], winds[2], parameters.bg_weights,
parameters.u_back, parameters.v_back, parameters.Cb,
upper_bc=parameters.upper_bc)
if(parameters.Cv > 0):
grad += calculate_vertical_vorticity_gradient(
winds[0], winds[1], winds[2], parameters.dx,
parameters.dy, parameters.dz, parameters.Ut,
parameters.Vt, coeff=parameters.Cv)
if(parameters.Cmod > 0):
grad += calculate_model_gradient(
winds[0], winds[1], winds[2],
parameters.model_weights, parameters.u_model, parameters.v_model,
parameters.w_model, coeff=parameters.Cmod)
if parameters.Cpoint > 0:
grad += calculate_point_gradient(
winds[0], winds[1], parameters.x, parameters.y, parameters.z,
parameters.point_list, Cp=parameters.Cpoint, roi=parameters.roi)
if(parameters.print_out is True):
print('Norm of gradient: ' + str(np.linalg.norm(grad, np.inf)))
return grad
def calculate_radial_vel_cost_function(vrs, azs, els, u, v,
w, wts, rmsVr, weights, coeff=1.0):
"""
Calculates the cost function due to difference of the wind field from
radar radial velocities. For more information on this cost function, see
Potvin et al. (2012) and Shapiro et al. (2009).
All arrays in the given lists must have the same dimensions and represent
the same spatial coordinates.
Parameters
----------
vrs: List of float arrays
List of radial velocities from each radar
els: List of float arrays
List of elevations from each radar
azs: List of float arrays
List of azimuths from each radar
u: Float array
Float array with u component of wind field
v: Float array
Float array with v component of wind field
w: Float array
Float array with w component of wind field
wts: List of float arrays
Float array containing fall speed from radar.
rmsVr: float
The sum of squares of velocity/num_points. Use for normalization
of data weighting coefficient
weights: n_radars x_bins x y_bins float array
Data weights for each pair of radars
coeff: float
Constant for cost function
Returns
-------
J_o: float
Observational cost function
References
-----------
<NAME>., <NAME>, and <NAME>, 2012: Impact of a Vertical Vorticity
Constraint in Variational Dual-Doppler Wind Analysis: Tests with Real and
Simulated Supercell Data. J. Atmos. Oceanic Technol., 29, 32–49,
https://doi.org/10.1175/JTECH-D-11-00019.1
<NAME>., <NAME>, and <NAME>, 2009: Use of a Vertical Vorticity
Equation in Variational Dual-Doppler Wind Analysis. J. Atmos. Oceanic
Technol., 26, 2089–2106, https://doi.org/10.1175/2009JTECHA1256.1
"""
J_o = 0
lambda_o = coeff / (rmsVr * rmsVr)
for i in range(len(vrs)):
v_ar = (np.cos(els[i])*np.sin(azs[i])*u +
np.cos(els[i])*np.cos(azs[i])*v +
np.sin(els[i])*(w - np.abs(wts[i])))
the_weight = weights[i]
the_weight[els[i].mask] = 0
the_weight[azs[i].mask] = 0
the_weight[vrs[i].mask] = 0
the_weight[wts[i].mask] = 0
J_o += lambda_o*np.sum(np.square(vrs[i] - v_ar)*the_weight)
return J_o
def calculate_grad_radial_vel(vrs, els, azs, u, v, w,
wts, weights, rmsVr, coeff=1.0, upper_bc=True):
"""
Calculates the gradient of the cost function due to difference of wind
field from radar radial velocities.
All arrays in the given lists must have the same dimensions and represent
the same spatial coordinates.
Parameters
----------
vrs: List of float arrays
List of radial velocities from each radar
els: List of float arrays
List of elevations from each radar
azs: List of azimuths
List of azimuths from each radar
u: Float array
Float array with u component of wind field
v: Float array
Float array with v component of wind field
w: Float array
Float array with w component of wind field
coeff: float
Constant for cost function
vel_name: str
Background velocity field name
weights: n_radars x_bins x y_bins float array
Data weights for each pair of radars
Returns
-------
y: 1-D float array
Gradient vector of observational cost function.
More information
----------------
The gradient is calculated by taking the functional derivative of the
cost function. For more information on functional derivatives, see the
Euler-Lagrange Equation:
https://en.wikipedia.org/wiki/Euler%E2%80%93Lagrange_equation
"""
# Use zero for all masked values since we don't want to add them into
# the cost function
p_x1 = np.zeros(vrs[0].shape)
p_y1 = np.zeros(vrs[0].shape)
p_z1 = np.zeros(vrs[0].shape)
lambda_o = coeff / (rmsVr * rmsVr)
for i in range(len(vrs)):
v_ar = (np.cos(els[i])*np.sin(azs[i])*u +
np.cos(els[i])*np.cos(azs[i])*v +
np.sin(els[i])*(w - np.abs(wts[i])))
x_grad = (2*(v_ar - vrs[i]) * np.cos(els[i]) *
np.sin(azs[i]) * weights[i]) * lambda_o
y_grad = (2*(v_ar - vrs[i]) * np.cos(els[i]) *
np.cos(azs[i]) * weights[i]) * lambda_o
z_grad = (2*(v_ar - vrs[i]) * np.sin(els[i]) * weights[i]) * lambda_o
x_grad[els[i].mask] = 0
y_grad[els[i].mask] = 0
z_grad[els[i].mask] = 0
x_grad[azs[i].mask] = 0
y_grad[azs[i].mask] = 0
z_grad[azs[i].mask] = 0
x_grad[els[i].mask] = 0
x_grad[azs[i].mask] = 0
x_grad[vrs[i].mask] = 0
x_grad[wts[i].mask] = 0
y_grad[els[i].mask] = 0
y_grad[azs[i].mask] = 0
y_grad[vrs[i].mask] = 0
y_grad[wts[i].mask] = 0
z_grad[els[i].mask] = 0
z_grad[azs[i].mask] = 0
z_grad[vrs[i].mask] = 0
z_grad[wts[i].mask] = 0
p_x1 += x_grad
p_y1 += y_grad
p_z1 += z_grad
# Impermeability condition
p_z1[0, :, :] = 0
if(upper_bc is True):
p_z1[-1, :, :] = 0
y = np.stack((p_x1, p_y1, p_z1), axis=0)
return y.flatten()
def calculate_smoothness_cost(u, v, w, Cx=1e-5, Cy=1e-5, Cz=1e-5):
"""
Calculates the smoothness cost function by taking the Laplacian of the
wind field.
All arrays in the given lists must have the same dimensions and represent
the same spatial coordinates.
Parameters
----------
u: Float array
Float array with u component of wind field
v: Float array
Float array with v component of wind field
w: Float array
Float array with w component of wind field
Cx: float
Constant controlling smoothness in x-direction
Cy: float
Constant controlling smoothness in y-direction
Cz: float
Constant controlling smoothness in z-direction
Returns
-------
Js: float
value of smoothness cost function
"""
du = np.zeros(w.shape)
dv = np.zeros(w.shape)
dw = np.zeros(w.shape)
scipy.ndimage.filters.laplace(u, du, mode='wrap')
scipy.ndimage.filters.laplace(v, dv, mode='wrap')
scipy.ndimage.filters.laplace(w, dw, mode='wrap')
return np.sum(Cx*du**2 + Cy*dv**2 + Cz*dw**2)
def calculate_smoothness_gradient(u, v, w, Cx=1e-5, Cy=1e-5, Cz=1e-5,
upper_bc=True):
"""
Calculates the gradient of the smoothness cost function
by taking the Laplacian of the Laplacian of the wind field.
All arrays in the given lists must have the same dimensions and represent
the same spatial coordinates.
Parameters
----------
u: Float array
Float array with u component of wind field
v: Float array
Float array with v component of wind field
w: Float array
Float array with w component of wind field
Cx: float
Constant controlling smoothness in x-direction
Cy: float
Constant controlling smoothness in y-direction
Cz: float
Constant controlling smoothness in z-direction
Returns
-------
y: float array
value of gradient of smoothness cost function
"""
du = np.zeros(w.shape)
dv = np.zeros(w.shape)
dw = np.zeros(w.shape)
grad_u = np.zeros(w.shape)
grad_v = np.zeros(w.shape)
grad_w = np.zeros(w.shape)
scipy.ndimage.filters.laplace(u, du, mode='wrap')
scipy.ndimage.filters.laplace(v, dv, mode='wrap')
scipy.ndimage.filters.laplace(w, dw, mode='wrap')
scipy.ndimage.filters.laplace(du, grad_u, mode='wrap')
scipy.ndimage.filters.laplace(dv, grad_v, mode='wrap')
scipy.ndimage.filters.laplace(dw, grad_w, mode='wrap')
# Impermeability condition
grad_w[0, :, :] = 0
if(upper_bc is True):
grad_w[-1, :, :] = 0
y = np.stack([grad_u*Cx*2, grad_v*Cy*2, grad_w*Cz*2], axis=0)
return y.flatten()
def calculate_point_cost(u, v, x, y, z, point_list, Cp=1e-3, roi=500.0):
"""
Calculates the cost function related to point observations. A mean square error cost
function term is applied to points that are within the sphere of influence
whose radius is determined by *roi*.
Parameters
----------
u: Float array
Float array with u component of wind field
v: Float array
Float array with v component of wind field
x: Float array
X coordinates of grid centers
y: Float array
Y coordinates of grid centers
z: Float array
Z coordinated of grid centers
point_list: list of dicts
List of point constraints.
Each member is a dict with keys of "u", "v", to correspond
to each component of the wind field and "x", "y", "z"
to correspond to the location of the point observation.
In addition, "site_id" gives the METAR code (or name) to the station.
Cp: float
The weighting coefficient of the point cost function.
roi: float
Radius of influence of observations
Returns
-------
J: float
The cost function related to the difference between wind field and points.
"""
J = 0.0
for the_point in point_list:
# Instead of worrying about whole domain, just find points in radius of influence
# Since we know that the weight will be zero outside the sphere of influence anyways
the_box = np.where(np.logical_and.reduce(
(np.abs(x - the_point["x"]) < roi, np.abs(y - the_point["y"]) < roi,
np.abs(z - the_point["z"]) < roi)))
J += np.sum(((u[the_box] - the_point["u"])**2 + (v[the_box] - the_point["v"])**2))
return J * Cp
def calculate_point_gradient(u, v, x, y, z, point_list, Cp=1e-3, roi=500.0):
"""
Calculates the gradient of the cost function related to point observations.
A mean square error cost function term is applied to points that are within the sphere of influence
whose radius is determined by *roi*.
Parameters
----------
u: Float array
Float array with u component of wind field
v: Float array
Float array with v component of wind field
x: Float array
X coordinates of grid centers
y: Float array
Y coordinates of grid centers
z: Float array
Z coordinated of grid centers
point_list: list of dicts
List of point constraints. Each member is a dict with keys of "u", "v",
to correspond to each component of the wind field and "x", "y", "z"
to correspond to the location of the point observation.
In addition, "site_id" gives the METAR code (or name) to the station.
Cp: float
The weighting coefficient of the point cost function.
roi: float
Radius of influence of observations
Returns
-------
gradJ: float array
The gradient of the cost function related to the difference between wind field and points.
"""
gradJ_u = np.zeros_like(u)
gradJ_v = np.zeros_like(v)
gradJ_w = np.zeros_like(u)
for the_point in point_list:
the_box = np.where(np.logical_and.reduce(
(np.abs(x - the_point["x"]) < roi, np.abs(y - the_point["y"]) < roi,
np.abs(z - the_point["z"]) < roi)))
gradJ_u[the_box] += 2 * (u[the_box] - the_point["u"])
gradJ_v[the_box] += 2 * (v[the_box] - the_point["v"])
gradJ = np.stack([gradJ_u, gradJ_v, gradJ_w], axis=0).flatten()
return gradJ * Cp
def calculate_mass_continuity(u, v, w, z, dx, dy, dz, coeff=1500.0, anel=1):
"""
Calculates the mass continuity cost function by taking the divergence
of the wind field.
All arrays in the given lists must have the same dimensions and represent
the same spatial coordinates.
Parameters
----------
u: Float array
Float array with u component of wind field
v: Float array
Float array with v component of wind field
w: Float array
Float array with w component of wind field
dx: float
Grid spacing in x direction.
dy: float
Grid spacing in y direction.
dz: float
Grid spacing in z direction.
z: Float array (1D)
1D Float array with heights of grid
coeff: float
Constant controlling contribution of mass continuity to cost function
anel: int
= 1 use anelastic approximation, 0=don't
Returns
-------
J: float
value of mass continuity cost function
"""
dudx = np.gradient(u, dx, axis=2)
dvdy = np.gradient(v, dy, axis=1)
dwdz = np.gradient(w, dz, axis=0)
if(anel == 1):
rho = np.exp(-z/10000.0)
drho_dz = np.gradient(rho, dz, axis=0)
anel_term = w/rho*drho_dz
else:
anel_term = np.zeros(w.shape)
return coeff*np.sum(np.square(dudx + dvdy + dwdz + anel_term))/2.0
def calculate_mass_continuity_gradient(u, v, w, z, dx,
dy, dz, coeff=1500.0, anel=1,
upper_bc=True):
"""
Calculates the gradient of mass continuity cost function. This is done by
taking the negative gradient of the divergence of the wind field.
All grids must have the same grid specification.
Parameters
----------
u: Float array
Float array with u component of wind field
v: Float array
Float array with v component of wind field
w: Float array
Float array with w component of wind field
z: Float array (1D)
1D Float array with heights of grid
dx: float
Grid spacing in x direction.
dy: float
Grid spacing in y direction.
dz: float
Grid spacing in z direction.
coeff: float
Constant controlling contribution of mass continuity to cost function
anel: int
= 1 use anelastic approximation, 0=don't
Returns
-------
y: float array
value of gradient of mass continuity cost function
"""
dudx = np.gradient(u, dx, axis=2)
dvdy = np.gradient(v, dy, axis=1)
dwdz = np.gradient(w, dz, axis=0)
if(anel == 1):
rho = np.exp(-z/10000.0)
drho_dz = np.gradient(rho, dz, axis=0)
anel_term = w/rho*drho_dz
else:
anel_term = 0
div2 = dudx + dvdy + dwdz + anel_term
grad_u = -np.gradient(div2, dx, axis=2)*coeff
grad_v = -np.gradient(div2, dy, axis=1)*coeff
grad_w = -np.gradient(div2, dz, axis=0)*coeff
# Impermeability condition
grad_w[0, :, :] = 0
if(upper_bc is True):
grad_w[-1, :, :] = 0
y = np.stack([grad_u, grad_v, grad_w], axis=0)
return y.flatten()
def calculate_fall_speed(grid, refl_field=None, frz=4500.0):
"""
Estimates fall speed based on reflectivity.
Uses methodology of <NAME> and <NAME>
Parameters
----------
Grid: Py-ART Grid
Py-ART Grid containing reflectivity to calculate fall speed from
refl_field: str
String containing name of reflectivity field. None will automatically
determine the name.
frz: float
Height of freezing level in m
Returns
-------
3D float array:
Float array of terminal velocities
"""
# Parse names of velocity field
if refl_field is None:
refl_field = pyart.config.get_field_name('reflectivity')
refl = grid.fields[refl_field]['data']
grid_z = grid.point_z['data']
term_vel = np.zeros(refl.shape)
A = np.zeros(refl.shape)
B = np.zeros(refl.shape)
rho = np.exp(-grid_z/10000.0)
A[np.logical_and(grid_z < frz, refl < 55)] = -2.6
B[np.logical_and(grid_z < frz, refl < 55)] = 0.0107
A[np.logical_and(grid_z < frz,
np.logical_and(refl >= 55, refl < 60))] = -2.5
B[np.logical_and(grid_z < frz,
np.logical_and(refl >= 55, refl < 60))] = 0.013
A[np.logical_and(grid_z < frz, refl > 60)] = -3.95
B[np.logical_and(grid_z < frz, refl > 60)] = 0.0148
A[np.logical_and(grid_z >= frz, refl < 33)] = -0.817
B[np.logical_and(grid_z >= frz, refl < 33)] = 0.0063
A[np.logical_and(grid_z >= frz,
np.logical_and(refl >= 33, refl < 49))] = -2.5
B[np.logical_and(grid_z >= frz,
np.logical_and(refl >= 33, refl < 49))] = 0.013
A[np.logical_and(grid_z >= frz, refl > 49)] = -3.95
B[np.logical_and(grid_z >= frz, refl > 49)] = 0.0148
fallspeed = A*np.power(10, refl*B)*np.power(1.2/rho, 0.4)
del A, B, rho
return fallspeed
def calculate_background_cost(u, v, w, weights, u_back, v_back, Cb=0.01):
"""
Calculates the background cost function. The background cost function is
simply the sum of the squared differences between the wind field and the
background wind field multiplied by the weighting coefficient.
Parameters
----------
u: Float array
Float array with u component of wind field
v: Float array
Float array with v component of wind field
w: Float array
Float array with w component of wind field
weights: Float array
Weights for each point to consider into cost function
u_back: 1D float array
Zonal winds vs height from sounding
w_back: 1D float array
Meridional winds vs height from sounding
Cb: float
Weight of background constraint to total cost function
Returns
-------
cost: float
value of background cost function
"""
the_shape = u.shape
cost = 0
for i in range(the_shape[0]):
cost += (Cb*np.sum(np.square(u[i]-u_back[i])*(weights[i]) +
np.square(v[i]-v_back[i])*(weights[i])))
return cost
def calculate_background_gradient(u, v, w, weights, u_back, v_back, Cb=0.01):
"""
Calculates the gradient of the background cost function. For each u, v
this is given as 2*coefficent*(analysis wind - background wind).
Parameters
----------
u: Float array
Float array with u component of wind field
v: Float array
Float array with v component of wind field
w: Float array
Float array with w component of wind field
weights: Float array
Weights for each point to consider into cost function
u_back: 1D float array
Zonal winds vs height from sounding
w_back: 1D float array
Meridional winds vs height from sounding
Cb: float
Weight of background constraint to total cost function
Returns
-------
y: float array
value of gradient of background cost function
"""
the_shape = u.shape
u_grad = np.zeros(the_shape)
v_grad = np.zeros(the_shape)
w_grad = np.zeros(the_shape)
for i in range(the_shape[0]):
u_grad[i] = Cb*2*(u[i]-u_back[i])*(weights[i])
v_grad[i] = Cb*2*(v[i]-v_back[i])*(weights[i])
y = np.stack([u_grad, v_grad, w_grad], axis=0)
return y.flatten()
def calculate_vertical_vorticity_cost(u, v, w, dx, dy, dz, Ut, Vt,
coeff=1e-5):
"""
Calculates the cost function due to deviance from vertical vorticity
equation. For more information of the vertical vorticity cost function,
see Potvin et al. (2012) and Shapiro et al. (2009).
Parameters
----------
u: 3D array
Float array with u component of wind field
v: 3D array
Float array with v component of wind field
w: 3D array
Float array with w component of wind field
dx: float array
Spacing in x grid
dy: float array
Spacing in y grid
dz: float array
Spacing in z grid
coeff: float
Weighting coefficient
Ut: float
U component of storm motion
Vt: float
V component of storm motion
Returns
-------
Jv: float
Value of vertical vorticity cost function.
References
----------
Potvin, C.K., <NAME>, and <NAME>, 2012: Impact of a Vertical Vorticity
Constraint in Variational Dual-Doppler Wind Analysis: Tests with Real and
Simulated Supercell Data. J. Atmos. Oceanic Technol., 29, 32–49,
https://doi.org/10.1175/JTECH-D-11-00019.1
<NAME>., <NAME>, and <NAME>, 2009: Use of a Vertical Vorticity
Equation in Variational Dual-Doppler Wind Analysis. J. Atmos. Oceanic
Technol., 26, 2089–2106, https://doi.org/10.1175/2009JTECHA1256.1
"""
dvdz = np.gradient(v, dz, axis=0)
dudz = np.gradient(u, dz, axis=0)
dwdz = np.gradient(w, dx, axis=2)
dvdx = np.gradient(v, dx, axis=2)
dwdy = np.gradient(w, dy, axis=1)
dwdx = np.gradient(w, dx, axis=2)
dudx = | np.gradient(u, dx, axis=2) | numpy.gradient |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# GUI module generated by PAGE version 4.13
# In conjunction with Tcl version 8.6
# May 24, 2018 10:33:11 PM
import sys
import FCSoptionWindow #user-defined
import FPGAserial #user-defined
from threading import Thread
import io
import os
import queue
import time
import numpy as np
import matplotlib
import fileFormatter
import myCorr #user defined
import FCS_Analysis as fcs #user-defined
#import LaserController
import pandas as pd
matplotlib.use("TkAgg")
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg#, NavigationToolbar2TkAgg
from matplotlib.figure import Figure
try:
from Tkinter import *
from Tkinter.filedialog import asksaveasfilename
from Tkinter.filedialog import askopenfilename
from tkinter.filedialog import askopenfilenames
except ImportError:
from tkinter import*
from tkinter.filedialog import asksaveasfilename
from tkinter.filedialog import askopenfilename
from tkinter.filedialog import askopenfilenames
try:
import ttk
py3 = False
except ImportError:
import tkinter.ttk as ttk
py3 = True
import FCS_GUI_support
#from tkinter import*
def vp_start_gui():
'''Starting point when module is the main routine.'''
global val, w, root, expParams
root = Tk()
FCS_GUI_support.set_Tk_var()
top = New_Toplevel (root)
FCS_GUI_support.init(root, top)
root.mainloop()
w = None
def create_New_Toplevel(root, *args, **kwargs):
'''Starting point when module is imported by another program.'''
global w, w_win, rt
rt = root
w = Toplevel (root)
FCS_GUI_support.set_Tk_var()
top = New_Toplevel (w)
FCS_GUI_support.init(w, top, *args, **kwargs)
return (w, top)
def destroy_New_Toplevel():
global w
w.destroy()
w = None
class New_Toplevel:
def __init__(self, top=None):
'''This class configures and populates the toplevel window.
top is the toplevel containing window.'''
_bgcolor = '#d9d9d9' # X11 color: 'gray85'
_fgcolor = '#000000' # X11 color: 'black'
_compcolor = '#d9d9d9' # X11 color: 'gray85'
_ana1color = '#d9d9d9' # X11 color: 'gray85'
_ana2color = '#d9d9d9' # X11 color: 'gray85'
font10 = "-family {Segoe UI} -size 12 -weight normal -slant " \
"roman -underline 0 -overstrike 0"
font9 = "-family {Courier New} -size 12 -weight normal -slant " \
"roman -underline 0 -overstrike 0"
top.geometry("1300x650+25+25")
top.title("FalCorr FCS")
top.configure(background="#d9d9d9")
top.configure(highlightbackground="#d9d9d9")
top.configure(highlightcolor="black")
##List of useful variables##
self.dataType = np.uint16
self.timeScale = 0.5e-6
self.minTime = 1e-6
self.maxTau = 1
self.acqStatus = 0
self.maxTrialNum = 1
self.trialNum = 1
self.acqTime = 300
self.computeCFs = IntVar()
self.computeCFs.set(1)
self.displayResults = IntVar()
self.displayResults.set(1)
self.PCH1 = np.zeros(0)
self.PCH2 = np.zeros(0)
self.bins = np.zeros(0)
self.CF = np.zeros(0)
self.loadNPZfile = ''
self.correlations = [1,1,1,1] #default to all possible cross-correlations and count rate
try:
self.fpga = FPGAserial.openFPGA(mode = 'FCS')
acqState = 'normal'
except:
self.fpga = FPGAserial.openFPGA(mode= 'None')
acqState = 'disabled'
print('No FPGA Connected.\nRunning in analysis only mode.')
self.Label1 = Label(top)
self.Label1.place(relx=0.01, rely=0.07, height=31, width=50)
self.Label1.configure(activebackground="#f9f9f9")
self.Label1.configure(activeforeground="black")
self.Label1.configure(background="#d9d9d9")
self.Label1.configure(disabledforeground="#a3a3a3")
self.Label1.configure(font=font10)
self.Label1.configure(foreground="#000000")
self.Label1.configure(highlightbackground="#d9d9d9")
self.Label1.configure(highlightcolor="black")
self.Label1.configure(text='''Trial #''')
self.fig = Figure(figsize=(12,5), dpi=100,facecolor="#d9d9d9")
self.myPlot = self.fig.add_subplot(122)
self.myPlotPCH = self.fig.add_subplot(121)
self.plotAxes = FigureCanvasTkAgg(self.fig,root)
self.plotAxes.get_tk_widget().pack(anchor='se',fill = "none",in_ = top)
self.currentTrialStr = StringVar()
self.currentTrial = Entry(top)
self.currentTrial.place(relx=0.01, rely=0.13,height=41, relwidth=0.11)
self.currentTrial.configure(background="white")
self.currentTrial.configure(disabledforeground="#a3a3a3")
self.currentTrial.configure(font=font10, justify =CENTER)
self.currentTrial.configure(foreground="#000000")
self.currentTrial.configure(highlightbackground="#d9d9d9")
self.currentTrial.configure(highlightcolor="black")
self.currentTrial.configure(insertbackground="black")
self.currentTrial.configure(selectbackground="#c4c4c4")
self.currentTrial.configure(selectforeground="black")
self.currentTrial.configure(textvariable=self.currentTrialStr)
self.currentTrial.configure(state="readonly")
self.currentTrialStr.set(str(self.trialNum))
self.runningIndVar = IntVar()
self.runningInd = Checkbutton(top,command=self.setRunInd)
self.runningInd.place(relx=0.3, rely=0.85, relheight=0.06
, relwidth=0.12)
self.runningInd.configure(activebackground="#d9d9d9")
self.runningInd.configure(activeforeground="#000000")
self.runningInd.configure(background="#d9d9d9")
self.runningInd.configure(disabledforeground="#a3a3a3")
self.runningInd.configure(font=font10)
self.runningInd.configure(foreground="#000000")
self.runningInd.configure(highlightbackground="#d9d9d9")
self.runningInd.configure(highlightcolor="black")
self.runningInd.configure(justify=LEFT)
self.runningInd.configure(state=ACTIVE)
self.runningInd.configure(text='''Running''')
self.runningInd.configure(variable=self.runningIndVar)
self.runningInd.invoke()
self.PCH1_IndVar = IntVar()
self.PCH1_IndVar.set(1)
self.PCH1_Ind = Checkbutton(top,command=self.graphPCH)
self.PCH1_Ind.place(relx=0.25, rely=0.8, relheight=0.06
, relwidth=0.12)
self.PCH1_Ind.configure(activebackground="#d9d9d9")
self.PCH1_Ind.configure(activeforeground="#000000")
self.PCH1_Ind.configure(background="#d9d9d9")
self.PCH1_Ind.configure(disabledforeground="#a3a3a3")
self.PCH1_Ind.configure(font=font10)
self.PCH1_Ind.configure(foreground="#000000")
self.PCH1_Ind.configure(highlightbackground="#d9d9d9")
self.PCH1_Ind.configure(highlightcolor="black")
self.PCH1_Ind.configure(justify=LEFT)
self.PCH1_Ind.configure(state=ACTIVE)
self.PCH1_Ind.configure(text='''Ch1 PCH''')
self.PCH1_Ind.configure(variable=self.PCH1_IndVar)
self.PCH2_IndVar = IntVar()
self.PCH2_IndVar.set(1)
self.PCH2_Ind = Checkbutton(top,command=self.graphPCH)
self.PCH2_Ind.place(relx=0.35, rely=0.8, relheight=0.06
, relwidth=0.12)
self.PCH2_Ind.configure(activebackground="#d9d9d9")
self.PCH2_Ind.configure(activeforeground="#000000")
self.PCH2_Ind.configure(background="#d9d9d9")
self.PCH2_Ind.configure(disabledforeground="#a3a3a3")
self.PCH2_Ind.configure(font=font10)
self.PCH2_Ind.configure(foreground="#000000")
self.PCH2_Ind.configure(highlightbackground="#d9d9d9")
self.PCH2_Ind.configure(highlightcolor="black")
self.PCH2_Ind.configure(justify=LEFT)
self.PCH2_Ind.configure(state=ACTIVE)
self.PCH2_Ind.configure(text='''Ch2 PCH''')
self.PCH2_Ind.configure(variable=self.PCH2_IndVar)
self.LabelDh1 = Label(top)
self.LabelDh1.place(relx=0.04, rely=0.72, height=41, width=70)
self.LabelDh1.configure(activebackground="#f9f9f9")
self.LabelDh1.configure(activeforeground="black")
self.LabelDh1.configure(background="#d9d9d9")
self.LabelDh1.configure(disabledforeground="#a3a3a3")
self.LabelDh1.configure(font=font10)
self.LabelDh1.configure(foreground="#000000")
self.LabelDh1.configure(highlightbackground="#d9d9d9")
self.LabelDh1.configure(highlightcolor="black")
self.LabelDh1.configure(text='''D \u2095 1(nm)''')
self.LabelDh2 = Label(top)
self.LabelDh2.place(relx=0.04, rely=0.85, height=41, width=70)
self.LabelDh2.configure(activebackground="#f9f9f9")
self.LabelDh2.configure(activeforeground="black")
self.LabelDh2.configure(background="#d9d9d9")
self.LabelDh2.configure(disabledforeground="#a3a3a3")
self.LabelDh2.configure(font=font10)
self.LabelDh2.configure(foreground="#000000")
self.LabelDh2.configure(highlightbackground="#d9d9d9")
self.LabelDh2.configure(highlightcolor="black")
self.LabelDh2.configure(text='''D \u2095 2(nm)''')
self.hydroDiamStr1 = StringVar()
self.hydroDiam1 = Entry(top)
self.hydroDiam1.place(relx=0.01, rely=0.77,height=41, relwidth=0.11)
self.hydroDiam1.configure(background="white")
self.hydroDiam1.configure(disabledforeground="#a3a3a3")
self.hydroDiam1.configure(font=font10, justify =CENTER)
self.hydroDiam1.configure(foreground="#000000")
self.hydroDiam1.configure(highlightbackground="#d9d9d9")
self.hydroDiam1.configure(highlightcolor="black")
self.hydroDiam1.configure(insertbackground="black")
self.hydroDiam1.configure(selectbackground="#c4c4c4")
self.hydroDiam1.configure(selectforeground="black")
self.hydroDiam1.configure(textvariable=self.hydroDiamStr1)
self.hydroDiam1.configure(state="readonly")
self.hydroDiamStr1.set('-')
self.hydroDiamStr2 = StringVar()
self.hydroDiam2 = Entry(top)
self.hydroDiam2.place(relx=0.01, rely=0.90,height=41, relwidth=0.11)
self.hydroDiam2.configure(background="white")
self.hydroDiam2.configure(disabledforeground="#a3a3a3")
self.hydroDiam2.configure(font=font10, justify =CENTER)
self.hydroDiam2.configure(foreground="#000000")
self.hydroDiam2.configure(highlightbackground="#d9d9d9")
self.hydroDiam2.configure(highlightcolor="black")
self.hydroDiam2.configure(insertbackground="black")
self.hydroDiam2.configure(selectbackground="#c4c4c4")
self.hydroDiam2.configure(selectforeground="black")
self.hydroDiam2.configure(textvariable=self.hydroDiamStr2)
self.hydroDiam2.configure(state="readonly")
self.hydroDiamStr2.set('-')
self.LabelAlpha = Label(top)
self.LabelAlpha.place(relx=0.175, rely=0.85, height=41, width=60)
self.LabelAlpha.configure(activebackground="#f9f9f9")
self.LabelAlpha.configure(activeforeground="black")
self.LabelAlpha.configure(background="#d9d9d9")
self.LabelAlpha.configure(disabledforeground="#a3a3a3")
self.LabelAlpha.configure(font=font10)
self.LabelAlpha.configure(foreground="#000000")
self.LabelAlpha.configure(highlightbackground="#d9d9d9")
self.LabelAlpha.configure(highlightcolor="black")
self.LabelAlpha.configure(text='''\u03B1''')
self.alphaStr = StringVar()
self.alpha = Entry(top)
self.alpha.place(relx=0.15, rely=0.90,height=41, relwidth=0.10)
self.alpha.configure(background="white")
self.alpha.configure(disabledforeground="#a3a3a3")
self.alpha.configure(font=font10, justify =CENTER)
self.alpha.configure(foreground="#000000")
self.alpha.configure(highlightbackground="#d9d9d9")
self.alpha.configure(highlightcolor="black")
self.alpha.configure(insertbackground="black")
self.alpha.configure(selectbackground="#c4c4c4")
self.alpha.configure(selectforeground="black")
self.alpha.configure(textvariable=self.alphaStr)
self.alpha.configure(state="readonly")
self.alphaStr.set('-')
self.LabelN = Label(top)
self.LabelN.place(relx=0.175, rely=0.72, height=41, width=85)
self.LabelN.configure(activebackground="#f9f9f9")
self.LabelN.configure(activeforeground="black")
self.LabelN.configure(background="#d9d9d9")
self.LabelN.configure(disabledforeground="#a3a3a3")
self.LabelN.configure(font=font10)
self.LabelN.configure(foreground="#000000")
self.LabelN.configure(highlightbackground="#d9d9d9")
self.LabelN.configure(highlightcolor="black")
self.LabelN.configure(text='''<N>/C (nM)''')
self.NStr = StringVar()
self.N = Entry(top)
self.N.place(relx=0.15, rely=0.77,height=41, relwidth=0.11)
self.N.configure(background="white")
self.N.configure(disabledforeground="#a3a3a3")
self.N.configure(font=font10, justify =CENTER)
self.N.configure(foreground="#000000")
self.N.configure(highlightbackground="#d9d9d9")
self.N.configure(highlightcolor="black")
self.N.configure(insertbackground="black")
self.N.configure(selectbackground="#c4c4c4")
self.N.configure(selectforeground="black")
self.N.configure(textvariable=self.NStr)
self.N.configure(state="readonly")
self.NStr.set('-')
self.Label2 = Label(top)
self.Label2.place(relx=0.01, rely=0.22, height=27, width=20)
self.Label2.configure(activebackground="#f9f9f9")
self.Label2.configure(activeforeground="black")
self.Label2.configure(background="#d9d9d9")
self.Label2.configure(disabledforeground="#a3a3a3")
self.Label2.configure(font=font10)
self.Label2.configure(foreground="#000000")
self.Label2.configure(highlightbackground="#d9d9d9")
self.Label2.configure(highlightcolor="black")
self.Label2.configure(text='''of''')
self.maxTrialStr = StringVar()
self.maxTrials = Entry(top)
self.maxTrials.place(relx=0.01, rely=0.29,height=40, relwidth=0.11)
self.maxTrials.configure(background="white")
self.maxTrials.configure(disabledforeground="#a3a3a3")
self.maxTrials.configure(font=font10, justify = CENTER)
self.maxTrials.configure(foreground="#000000")
self.maxTrials.configure(highlightbackground="#d9d9d9")
self.maxTrials.configure(highlightcolor="black")
self.maxTrials.configure(insertbackground="black")
self.maxTrials.configure(selectbackground="#c4c4c4")
self.maxTrials.configure(selectforeground="black")
self.maxTrials.configure(textvariable=self.maxTrialStr)
self.maxTrials.configure(state="readonly")
self.maxTrialStr.set(str(self.maxTrialNum))
self.menubar = Menu(top,font="TkMenuFont",bg=_bgcolor,fg=_fgcolor)
top.configure(menu = self.menubar)
self.file = Menu(top,tearoff=0)
self.menubar.add_cascade(menu=self.file,
activebackground="#d9d9d9",
activeforeground="#000000",
background="#d9d9d9",
font="TkMenuFont",
foreground="#000000",
label="File")
self.file.add_command(
activebackground="#d8d8d8",
activeforeground="#000000",
background="#d9d9d9",
font="TkMenuFont",
foreground="#000000",
label="Set Save Path",
accelerator = 'ctrl+s',
state = acqState,
command = self.setSavePath)
self.file.add_command(
activebackground="#d8d8d8",
activeforeground="#000000",
background="#d9d9d9",
font="TkMenuFont",
foreground="#000000",
label="Start Acq.",
accelerator = 'ctrl+b',
state = acqState,
command=self.acquireData)
self.file.add_command(
activebackground="#d8d8d8",
activeforeground="#000000",
background="#d9d9d9",
font="TkMenuFont",
foreground="#000000",
accelerator = 'ctrl+e',
label="StopAcq",
state = acqState,
command = self.stopAcq)
self.file.add_command(
activebackground="#d8d8d8",
activeforeground="#000000",
background="#d9d9d9",
font="TkMenuFont",
foreground="#000000",
accelerator = 'ctrl+o',
label="Load CF for Analysis",
command = self.loadNPZ)
self.file.add_command(
activebackground="#d8d8d8",
activeforeground="#000000",
background="#d9d9d9",
font="TkMenuFont",
foreground="#000000",
accelerator = 'ctrl+l',
label="Load bin file(s)",
command = self.loadBinFile)
self.file.add_command(
activebackground="#d8d8d8",
activeforeground="#000000",
background="#d9d9d9",
font="TkMenuFont",
foreground="#000000",
accelerator = 'ctrl+m',
label="Overlay Multiple CFs",
command = self.loadMultNPZ)
self.file.add_command(
activebackground="#d8d8d8",
activeforeground="#000000",
background="#d9d9d9",
font="TkMenuFont",
foreground="#000000",
label="Overlay and Average CFs",
command = self.overlayAndAverage)
self.file.add_command(
activebackground="#d8d8d8",
activeforeground="#000000",
background="#d9d9d9",
font="TkMenuFont",
foreground="#000000",
label="Export NPZ to CSV",
command = self.exportToCSV)
self.file.add_command(
activebackground="#d8d8d8",
activeforeground="#000000",
background="#d9d9d9",
font="TkMenuFont",
foreground="#000000",
accelerator = 'ctrl+s',
label="Save Figure",
command = self.saveAxes)
self.file.add_command(
activebackground="#d8d8d8",
activeforeground="#000000",
background="#d9d9d9",
font="TkMenuFont",
foreground="#000000",
label="Quit",
accelerator = 'ctrl+q',
command=self.quitProg,
)
##establish hot keys##
root.bind_all('<Control-Key-q>', self.quitProg)
root.bind_all('<Control-Key-b>', self.acquireData)
root.bind_all('<Control-Key-e>', self.stopAcq)
root.bind_all('<Control-Key-s>', self.setSavePath)
#Mode control variables
self.modeVarDL = IntVar()
self.modeVarCR = IntVar()
self.modeVarCR.set(1)
self.mode = Menu(top,tearoff=0)
self.menubar.add_cascade(menu=self.mode,
activebackground="#d9d9d9",
activeforeground="#000000",
background="#d9d9d9",
font="TkMenuFont",
foreground="#000000",
state = acqState,
label="Mode")
self.mode.add_checkbutton(
activebackground="#d8d8d8",
activeforeground="#000000",
background="#d9d9d9",
font="TkMenuFont",
foreground="#000000",
label="Data Logging",
variable = self.modeVarDL,
command = self.setModeDL)
self.mode.add_checkbutton(
activebackground="#d8d8d8",
activeforeground="#000000",
background="#d9d9d9",
font="TkMenuFont",
foreground="#000000",
label="Count Rate",
variable = self.modeVarCR,
command = self.setModeCR)
self.options = Menu(top,tearoff=0)
self.menubar.add_cascade(menu=self.options,
activebackground="#d9d9d9",
activeforeground="#000000",
background="#d9d9d9",
font="TkMenuFont",
foreground="#000000",
state = acqState,
label="Options")
self.options.add_checkbutton(
activebackground="#d8d8d8",
activeforeground="#000000",
background="#d9d9d9",
font="TkMenuFont",
foreground="#000000",
variable = self.computeCFs,
label="Save CFs")
self.options.add_checkbutton(
activebackground="#d8d8d8",
activeforeground="#000000",
background="#d9d9d9",
font="TkMenuFont",
foreground="#000000",
variable = self.displayResults,
label="Display Results")
self.options.add_command(
activebackground="#d8d8d8",
activeforeground="#000000",
background="#d9d9d9",
font="TkMenuFont",
foreground="#000000",
label="Set Parameters...",
command = self.callOptionWindow)
self.analysis = Menu(top,tearoff=0)
self.noAnalysisVar = IntVar()
self.noAnalysisVar.set(1)
self.simpleMonoAnalysisVar = IntVar()
self.simpleMonoAnalysisVar.set(0)
self.simpleBiAnalysisVar = IntVar()
self.simpleBiAnalysisVar.set(0)
self.tripletMonoAnalysisVar = IntVar()
self.tripletMonoAnalysisVar.set(0)
self.tripletBiAnalysisVar = IntVar()
self.tripletBiAnalysisVar.set(0)
self.simpleAnomalousAnalysisVar = IntVar()
self.simpleAnomalousAnalysisVar.set(0)
self.tripletAnomalousAnalysisVar = IntVar()
self.tripletAnomalousAnalysisVar.set(0)
self.maxEntropyAnalysisVar = IntVar()
self.maxEntropyAnalysisVar.set(0)
self.menubar.add_cascade(menu=self.analysis,
activebackground="#d9d9d9",
activeforeground="#000000",
background="#d9d9d9",
font="TkMenuFont",
foreground="#000000",
label="Analysis")
self.analysis.add_checkbutton(
activebackground="#d8d8d8",
activeforeground="#000000",
background="#d9d9d9",
font="TkMenuFont",
foreground="#000000",
label="None",
variable = self.noAnalysisVar,
command = self.clearAnalysis)
self.analysis.add_checkbutton(
activebackground="#d8d8d8",
activeforeground="#000000",
background="#d9d9d9",
font="TkMenuFont",
foreground="#000000",
label="Simple Monodisperse",
command=self.clearNone,
variable = self.simpleMonoAnalysisVar)
self.analysis.add_checkbutton(
activebackground="#d8d8d8",
activeforeground="#000000",
background="#d9d9d9",
font="TkMenuFont",
foreground="#000000",
command = self.clearNone,
label="Simple Bimodal",
variable = self.simpleBiAnalysisVar)
self.analysis.add_checkbutton(
activebackground="#d8d8d8",
activeforeground="#000000",
background="#d9d9d9",
font="TkMenuFont",
foreground="#000000",
command = self.clearNone,
label="Triplet Monomodal",
variable = self.tripletMonoAnalysisVar)
self.analysis.add_checkbutton(
activebackground="#d8d8d8",
activeforeground="#000000",
background="#d9d9d9",
font="TkMenuFont",
foreground="#000000",
command = self.clearNone,
label="Triplet Bimodal",
variable = self.tripletBiAnalysisVar)
self.analysis.add_checkbutton(
activebackground="#d8d8d8",
activeforeground="#000000",
background="#d9d9d9",
font="TkMenuFont",
foreground="#000000",
command = self.clearNone,
label="Simple Anomalous",
variable = self.simpleAnomalousAnalysisVar)
self.analysis.add_checkbutton(
activebackground="#d8d8d8",
activeforeground="#000000",
background="#d9d9d9",
font="TkMenuFont",
foreground="#000000",
command = self.clearNone,
label="Triplet Anomalous",
variable = self.tripletAnomalousAnalysisVar)
self.analysis.add_command(
activebackground="#d8d8d8",
activeforeground="#000000",
background="#d9d9d9",
font="TkMenuFont",
foreground="#000000",
accelerator = 'ctrl+c',
label="Analzye Current Data Set(s)",
command = self.analyzeData)
#Laser Excitation Menu
self.excitation = Menu(top,tearoff=0)
self.argon = IntVar()
self.argon.set(1)
self.hene = IntVar()
self.hene.set(0)
self.menubar.add_cascade(menu=self.excitation,
activebackground="#d9d9d9",
activeforeground="#000000",
background="#d9d9d9",
font="TkMenuFont",
foreground="#000000",
label="Excitation")
self.excitation.add_checkbutton(
activebackground="#d8d8d8",
activeforeground="#000000",
background="#d9d9d9",
font="TkMenuFont",
foreground="#000000",
label="488 nm",
variable = self.argon)
self.excitation.add_checkbutton(
activebackground="#d8d8d8",
activeforeground="#000000",
background="#d9d9d9",
font="TkMenuFont",
foreground="#000000",
label="543 nm",
variable = self.hene)
#Help Menu
self.helpMenu = Menu(top,tearoff = 0)
self.menubar.add_cascade(menu=self.helpMenu,
activebackground="#d9d9d9",
activeforeground="#000000",
background="#d9d9d9",
font="TkMenuFont",
foreground="#000000",
label="Help")
self.helpMenu.add_command(
activebackground="#d8d8d8",
activeforeground="#000000",
background="#d9d9d9",
font="TkMenuFont",
foreground="#000000",
label="About",
command = self.aboutGUI)
self.helpMenu.add_command(
activebackground="#d8d8d8",
activeforeground="#000000",
background="#d9d9d9",
font="TkMenuFont",
foreground="#000000",
accelerator = 'ctrl+c',
label="Calibrate...",
command = self.calibrate)
# self.excitation.add_command(
# activebackground="#d8d8d8",
# activeforeground="#000000",
# background="#d9d9d9",
# font="TkMenuFont",
# foreground="#000000",
# label="Adjust Argon Laser Power",
# state = acqState,
# command = self.laserPowerCntrl)
self.Label4 = Label(top)
self.Label4.place(relx=0.01, rely=0.39, height=41, width=135)
self.Label4.configure(activebackground="#f9f9f9")
self.Label4.configure(activeforeground="black")
self.Label4.configure(background="#d9d9d9")
self.Label4.configure(disabledforeground="#a3a3a3")
self.Label4.configure(font=font10)
self.Label4.configure(foreground="#000000")
self.Label4.configure(highlightbackground="#d9d9d9")
self.Label4.configure(highlightcolor="black")
self.Label4.configure(text='''Ch1 Count (kHz)''')
self.Ch1countRateStr = StringVar()
self.Ch1countRate = Entry(top)
self.Ch1countRate.place(relx=0.01, rely=0.44,height=40, relwidth=0.11)
self.Ch1countRate.configure(background="white")
self.Ch1countRate.configure(disabledforeground="#a3a3a3")
self.Ch1countRate.configure(font=font10,justify = CENTER)
self.Ch1countRate.configure(foreground="#000000")
self.Ch1countRate.configure(highlightbackground="#d9d9d9")
self.Ch1countRate.configure(highlightcolor="black")
self.Ch1countRate.configure(insertbackground="black")
self.Ch1countRate.configure(selectbackground="#c4c4c4")
self.Ch1countRate.configure(selectforeground="black")
self.Ch1countRate.configure(textvariable=self.Ch1countRateStr)
self.Ch1countRate.configure(state = "readonly")
self.Ch1countRateStr.set("-")
self.Label7 = Label(top)
self.Label7.place(relx=0.01, rely=0.57, height=41, width=135)
self.Label7.configure(activebackground="#f9f9f9")
self.Label7.configure(activeforeground="black")
self.Label7.configure(background="#d9d9d9")
self.Label7.configure(disabledforeground="#a3a3a3")
self.Label7.configure(font=font10)
self.Label7.configure(foreground="#000000")
self.Label7.configure(highlightbackground="#d9d9d9")
self.Label7.configure(highlightcolor="black")
self.Label7.configure(text='''Ch2 Count (kHz)''')
self.Ch2countRateStr = StringVar()
self.Ch2countRate = Entry(top)
self.Ch2countRate.place(relx=0.01, rely=0.62,height=40, relwidth=0.11)
self.Ch2countRate.configure(background="white")
self.Ch2countRate.configure(disabledforeground="#a3a3a3")
self.Ch2countRate.configure(font=font10,justify = CENTER)
self.Ch2countRate.configure(foreground="#000000")
self.Ch2countRate.configure(highlightbackground="#d9d9d9")
self.Ch2countRate.configure(highlightcolor="black")
self.Ch2countRate.configure(insertbackground="black")
self.Ch2countRate.configure(selectbackground="#c4c4c4")
self.Ch2countRate.configure(selectforeground="black")
self.Ch2countRate.configure(textvariable=self.Ch2countRateStr)
self.Ch2countRate.configure(state = "readonly")
self.Ch2countRateStr.set("-")
self.Label8 = Label(top)
self.Label8.place(relx=0.7, rely=0.8, height=30, width=135)
self.Label8.configure(activebackground="#f9f9f9")
self.Label8.configure(activeforeground="black")
self.Label8.configure(background="#d9d9d9")
self.Label8.configure(disabledforeground="#a3a3a3")
self.Label8.configure(font=font10)
self.Label8.configure(foreground="#000000")
self.Label8.configure(highlightbackground="#d9d9d9")
self.Label8.configure(highlightcolor="black")
self.Label8.configure(text='\u03c4 min')
self.tauMinStr = StringVar()
self.tauMin = Entry(top)
self.tauMin.place(relx=0.8, rely=0.8,height=30, relwidth=0.11)
self.tauMin.configure(background="white")
self.tauMin.configure(disabledforeground="#a3a3a3")
self.tauMin.configure(font=font10,justify = CENTER)
self.tauMin.configure(foreground="#000000")
self.tauMin.configure(highlightbackground="#d9d9d9")
self.tauMin.configure(highlightcolor="black")
self.tauMin.configure(insertbackground="black")
self.tauMin.configure(selectbackground="#c4c4c4")
self.tauMin.configure(selectforeground="black")
self.tauMin.configure(textvariable=self.tauMinStr)
self.tauMinStr.set("1e-6")
self.Label9 = Label(top)
self.Label9.place(relx=0.7, rely=0.9, height=30, width=135)
self.Label9.configure(activebackground="#f9f9f9")
self.Label9.configure(activeforeground="black")
self.Label9.configure(background="#d9d9d9")
self.Label9.configure(disabledforeground="#a3a3a3")
self.Label9.configure(font=font10)
self.Label9.configure(foreground="#000000")
self.Label9.configure(highlightbackground="#d9d9d9")
self.Label9.configure(highlightcolor="black")
self.Label9.configure(text='\u03c4 max')
self.tauMaxStr = StringVar()
self.tauMax = Entry(top)
self.tauMax.place(relx=0.8, rely=0.9,height=30, relwidth=0.11)
self.tauMax.configure(background="white")
self.tauMax.configure(disabledforeground="#a3a3a3")
self.tauMax.configure(font=font10,justify = CENTER)
self.tauMax.configure(foreground="#000000")
self.tauMax.configure(highlightbackground="#d9d9d9")
self.tauMax.configure(highlightcolor="black")
self.tauMax.configure(insertbackground="black")
self.tauMax.configure(selectbackground="#c4c4c4")
self.tauMax.configure(selectforeground="black")
self.tauMax.configure(textvariable=self.tauMaxStr)
self.tauMaxStr.set("1")
self.refresh = Button(top)
self.refresh.place(relx=0.65, rely=0.85,height=30, relwidth=0.05)
self.refresh.configure(background="#f9f9f9")
self.refresh.configure(disabledforeground="#a3a3a3")
self.refresh.configure(font=font10,justify = CENTER)
self.refresh.configure(foreground="#000000")
self.refresh.configure(highlightbackground="#d9d9d9")
self.refresh.configure(highlightcolor="black")
self.refresh.configure(text = 'Refresh')
self.refresh.configure(command = self.refreshAxes)
self.maxTime = 1;
self.minTime = 1e-6;
self.analysisMode = "None"
self.savePath = ""
self.mode = "Count Rate"
self.pathChanged = 0 #indicate if file save path updated or not
self.dataQ = queue.Queue(0)
def setRunInd(self):
if self.acqStatus:
self.runningIndVar.set(1)
else:
self.runningIndVar.set(0)
def quitProg(self,*args): #exit program
self.fpga.closeFPGA()
root.destroy()
sys.exit(1)
def acquireData(self,*args):
if self.acqStatus == 0: #don't allow repeats for button presses.
if self.mode == "Count Rate":
self.acqStatus = 1
self.runningInd.invoke()
root.update()
t = Thread(target=self.streamCountRate)#, args=self)
self.fpga.startAcq()
t.start()
elif self.mode == "Data Logging":
self.fpga.setAcqTime(self.acqTime)
bytesToRead = 4000 #Read length
self.acqStatus = 1
self.runningInd.invoke()
#make sure you have a new file to save
if not self.pathChanged:
self.setSavePath()
#If valid path
if self.pathChanged:
#start loop
for j in range(0,self.maxTrialNum):
self.myPlot.cla() #clear axes
outfile = self.savePath + "_Trial_" + str(j+1) + ".bin" #file to save to
self.loadBinFile = outfile #update latest load file to current file
#Update GUI
self.trialNum = j+1
self.currentTrialStr.set(str(self.trialNum))
root.update()
#Log Data set
fid = io.open(outfile,mode='wb')
self.logData(bytesToRead)
self.queueToFile(fid)
#compute CFs
if self.computeCFs.get():
self.computeCorrFun(outfile) #compute desired correlations
#display results
if self.displayResults.get():
maxTime = 1
mask = np.less_equal(self.bins,self.maxTime) & np.greater_equal(self.bins,self.minTime)
self.myPlot.semilogx(self.bins[mask],self.CF[mask],**{'linestyle':'none','marker':'o','label':'raw'})
self.myPlot.grid(b=True,which = 'minor',linewidth=0.5)
self.myPlot.set_xlabel("\u03C4 (s)")
self.myPlot.set_ylabel("G(\u03C4)")
self.myPlot.autoscale(enable =True,axis = 'y')
self.plotAxes.draw()
#Perform fit and analysis
if not(self.noAnalysisVar.get()):
self.analyzeData()
#Exit for loop
self.trialNum = 1 #reset trial number counter
self.runningInd.invoke() #update running indicator
self.pathChanged = 0 #protects against accidental overwrite
else:
print("No such mode")
def streamCountRate(self):
refreshRate = 0.25
self.fpga.setAcqTime(5) #set to arbitrary acquisition
countRateCh1 = 0
countRateCh2 = 0
while self.acqStatus:
countRates = self.fpga.getCountRateDualChannel(self.dataType,self.timeScale)
countRateCh1 = countRates[0]
countRateCh2 = countRates[1]
self.Ch1countRateStr.set('%.3f' % countRateCh1)
self.Ch2countRateStr.set('%.3f' % countRateCh2)
time.sleep(refreshRate)
self.fpga.ser.reset_input_buffer() #Clear extraneous
self.fpga.setAcqTime(self.acqTime)#set back to original
self.fpga.stopAcq()
def logData(self,bytesToRead):
self.acqStatus =1
#initialize start time
startTime = time.time()
self.fpga.startAcq() #write start to board
while self.acqStatus:
if (time.time()-startTime < self.acqTime+1 or self.fpga.dataWaiting()>0):
b = self.fpga.read(bytesToRead) #read from port
self.dataQ.put(b) #Write to queue
else:
break
self.fpga.stopAcq()
self.acqStatus = 0
def stopAcq(self,*args):
self.acqStatus = 0
self.fpga.stopAcq()
self.runningInd.invoke()
def callOptionWindow(self,mode= 'normal'):
params = FCSoptionWindow.create_New_OptionWindow(root,mode)
if params[1].newData == "Yes":
self.acqTime = params[1].acqDur
self.maxTrialNum = params[1].maxNumTrials
self.maxTrialStr.set(str(self.maxTrialNum))
self.correlations = params[1].correlations
def setSavePath(self,*args):
self.savePath = asksaveasfilename()
if len(self.savePath):
self.pathChanged = 1
def queueToFile(self,fid): #writes data in queu to binary file
while (self.acqStatus or (not self.dataQ.empty())):
if not self.dataQ.empty():
b = self.dataQ.get()
fid.write(b)
self.dataQ.task_done()
fid.close()
#updates mode menu items to be self-consistent
def setModeDL(self):
self.stopAcq()
self.acqStatus = 0
self.modeVarDL.set(1)
self.modeVarCR.set(0)
self.mode = "Data Logging"
#updates mode menu items to be self-consistent
def setModeCR(self):
self.stopAcq()
self.modeVarDL.set(0)
self.modeVarCR.set(1)
self.mode = "Count Rate"
#updates analysis menu items to be self-consistent
def clearAnalysis(self):
self.noAnalysisVar.set(1)
self.simpleMonoAnalysisVar.set(0)
self.simpleBiAnalysisVar.set(0)
self.tripletMonoAnalysisVar.set(0)
self.tripletBiAnalysisVar.set(0)
self.simpleAnomalousAnalysisVar.set(0)
self.tripletAnomalousAnalysisVar.set(0)
self.maxEntropyAnalysisVar.set(0)
#updates analysis menu items to be self-consistent
def clearNone(self):
self.noAnalysisVar.set(0)
#Load and display previous calculation results
def loadNPZ(self,fileName = ''):
#Prompt for file name if none provided
if not fileName:
self.loadNPZfile = askopenfilename(title = "Select NPZ File",filetypes = (("NPZ Files","*.npz"),("all files","*.*")))
else:
self.loadNPZfile = fileName
if self.loadNPZfile:
self.myPlot.cla() #clear axes
self.myPlotPCH.cla() #clear axes
#Load and Display Correlation Function
data = np.load(self.loadNPZfile)
self.bins = data['bins']
self.CF = data['CF']
self.graphCF(lastPlot = True)
#Load and display count rates
CR = data['countRates']
countRateCh1 = CR[0]
countRateCh2 = CR[1]
self.Ch1countRateStr.set('%.3f' % countRateCh1)
self.Ch2countRateStr.set('%.3f' % countRateCh2)
#Load and Display PCHs
PCH = data['PCH']
self.PCH1 = PCH[0]
self.PCH2 = PCH[1]
self.graphPCH(lastPlot = True)
def graphCF(self,lastPlot = False, color = None):
#Load and Display Correlation Function
mask = np.less_equal(self.bins,self.maxTime) & np.greater_equal(self.bins,self.minTime)#only show results in specified window
if color == None:
self.myPlot.semilogx(self.bins[mask],self.CF[mask],**{'linestyle':'none','marker':'o'})
else:
self.myPlot.semilogx(self.bins[mask],self.CF[mask],**{'linestyle':'-','linewidth':2,'color':color})
self.myPlot.grid(b=True,which = 'minor',linewidth=0.5)
self.myPlot.set_xlabel("\u03C4 (s)")
self.myPlot.set_ylabel("G(\u03C4)")
self.myPlot.autoscale(enable =True,axis = 'y')
if lastPlot:
self.plotAxes.draw()
#Graph PCH
def graphPCH(self,lastPlot = False, color = [None,None]):
if len(self.PCH1) or len(self.PCH2): #If initialized
if self.PCH1_IndVar.get() and (not np.isnan(self.PCH1[0][0])):
histLen = max(len(self.PCH1[0]),len(self.PCH1[1]))
if color[0] ==None:
self.myPlotPCH.semilogy(self.PCH1[1][0:histLen-1],self.PCH1[0][0:histLen-1],**{'marker':'o'})
else:
self.myPlotPCH.semilogy(self.PCH1[1][0:histLen-1],self.PCH1[0][0:histLen-1],**{'marker':'o','color':color[0],'linestyle':'--'})
if self.PCH2_IndVar.get() and (not np.isnan(self.PCH2[0][0])):
histLen = max(len(self.PCH2[0]),len(self.PCH2[1]))
if color[1] ==None:
self.myPlotPCH.semilogy(self.PCH2[1][0:histLen-1],self.PCH2[0][0:histLen-1],**{'marker':'o'})
else:
self.myPlotPCH.semilogy(self.PCH2[1][0:histLen-1],self.PCH2[0][0:histLen-1],**{'marker':'o','color':color[1],'linestyle':'--'})
self.myPlotPCH.set_xlabel("Counts Per Interval")
self.myPlotPCH.set_ylabel("Probability")
self.myPlotPCH.autoscale(enable =True,axis = 'y')
if lastPlot:
self.plotAxes.draw()
def loadBinFile(self): #Load bin files and compute selected CFs. Results saved to final
binFileList = askopenfilenames(title = "Select .bin file(s)",filetypes = (("bin files","*.bin"),("all files","*.*")))
params = self.callOptionWindow(mode = 'disabled') #Select CFs to compute
#set to save and display
temp1 = self.computeCFs.get()
temp2 = self.displayResults.get()
self.computeCFs.set(1)
self.displayResults.set(1)
#Indicate computations taking place
self.acqStatus = 1
self.runningInd.invoke()
root.update()
for j in range(0,len(binFileList)): #Load files and compute selected CFs
self.computeCorrFun(binFileList[j])
if len(binFileList)==1:
self.loadNPZ(self.loadNPZfile)
#Indicate computations completed
self.acqStatus = 0
self.runningInd.invoke()
#set save and display preferences back to normal
temp1 = self.computeCFs.set(temp1)
temp2 = self.displayResults.set(temp2)
def loadMultNPZ(self,NPZFileList=''):
#Prompt for file names if none provided
if not NPZFileList:
self.loadNPZfile = askopenfilenames(title = "Select NPZ Files",filetypes = (("NPZ Files","*.npz"),("all files","*.*")))
else:
self.loadNPZfile = NPZFileList
if self.loadNPZfile:
self.myPlot.cla() #clear axes
self.myPlotPCH.cla()
#Load Data
for j in range(0,len(self.loadNPZfile)):
data = np.load(self.loadNPZfile[j])
self.bins = data['bins']
self.CF = data['CF']
self.PCH1,self.PCH2 = data['PCH']
self.graphCF(lastPlot = (j==len(self.loadNPZfile)-1))
self.graphPCH(lastPlot = (j==len(self.loadNPZfile)-1))
def computeCorrFun(self,file):
CF_label = {
0: 'Ch1ACF.npz',
1: 'Ch1xCh2.npz',
2: 'Ch2,Ch1.npz',
3: 'Ch2ACF.npz'
}
for j in range(4):
#Update Count Rates
countRateCh1,countRateCh2 = myCorr.getCountRate(file,self.dataType)
countRateCh1 = countRateCh1/self.timeScale/1000
countRateCh2 = countRateCh2/self.timeScale/1000
self.Ch1countRateStr.set('%.3f' % countRateCh1)
self.Ch2countRateStr.set('%.3f' % countRateCh2)
#Generate PCH
self.PCH1,self.PCH2 = myCorr.getPCHs(file,self.dataType,intBins=200)
if self.correlations[j]:
results = myCorr.multiTauCFfromDeltas(file,self.dataType,j)
self.bins = results[1]*self.timeScale
self.CF = results[0]
#save CF to file
if self.computeCFs.get():
outfile = file[0:len(file)-4]+CF_label.get(j)
self.loadNPZfile = outfile #update latest load file to current file
np.savez(outfile,bins=self.bins,CF=self.CF,countRates = [countRateCh1,countRateCh2],PCH = [self.PCH1,self.PCH2]) #save to file
def analyzeData(self):
if self.loadNPZfile and not(self.noAnalysisVar.get()): #check file list present to begin with and that analysis is checked, if not, do nothing
#Identify wavelength
if self.argon.get() and not self.hene.get(): #488-nm excitation
wavelength = 488
elif self.hene.get() and not self.argon.get(): #543-nm excitation
wavelength = 543
else:
wavelength = np.NAN
myFitter = fcs.scopeFit(wavelength) #fitting object
if isinstance(self.loadNPZfile,str):
self.loadNPZfile = [self.loadNPZfile] #Insure file list is part of a list, not a single string
for j in range(0,len(self.loadNPZfile)):
data = np.load(self.loadNPZfile[j])
#Load CF data
self.bins = data['bins']
self.CF = data['CF']
#Load and display count rates
CR = data['countRates']
countRateCh1 = CR[0]
countRateCh2 = CR[1]
self.Ch1countRateStr.set('%.3f' % countRateCh1)
self.Ch2countRateStr.set('%.3f' % countRateCh2)
#Load PCHs (for consistency)
PCH = data['PCH']
self.PCH1 = PCH[0]
self.PCH2 = PCH[1]
mask = np.less_equal(self.bins,self.maxTime) & np.greater_equal(self.bins,self.minTime)
#Perform simple monomodal fit
if self.simpleMonoAnalysisVar.get():
params,Dh1,Dh2,alpha,wxy,a = myFitter.getHydroDiam("simpleMonodisperse",x=self.bins[mask],y=self.CF[mask])
outfile = self.loadNPZfile[j][0:len(self.loadNPZfile[j])-4]+ "_simpleMonomodal.txt"
fid = io.open(outfile,mode = 'w')
fid.write("Hydrodynamic Radius (m): " + str(Dh1) + "\n")
fid.write("G0: " + str(params[0]) +"\n")
fid.write("GInf:" + str(params[1]) + "\n")
fid.write("tauD: " + str(params[2])+ '\n')
fid.write("Min Time: " + str(self.minTime) +"\n")
fid.write("Max Time: " + str(self.maxTime) + "\n")
fid.write("wxy (um):" + str(wxy) + "\n")
fid.write("axial ratio, a:" + str(a) + "\n")
fid.write("Wavelength: " + str(wavelength) + "\n")
fid.close()
#Show average number of molecules
conc = fcs.measureConc(params[0],wavelength)
self.NStr.set('%.2f' % float(1/params[0]) + ' \ %.2f' % conc)
if self.displayResults.get(): #add to plot
self.myPlot.semilogx(self.bins[mask],myFitter.simpleMonodisperse(self.bins[mask],params[0],params[1],params[2]),**{'linewidth':1,'label':'Simple Mono.'})
#Perform triplet-corrected monomodal fit
if self.tripletMonoAnalysisVar.get():
params,Dh1,Dh2,alpha,wxy,a = myFitter.getHydroDiam("tripletMonodisperse",self.bins[mask],self.CF[mask])
#write fit results to file
outfile = self.loadNPZfile[j][0:len(self.loadNPZfile[j])-4]+ "_tripletMonomodal.txt"
fid = io.open(outfile,mode = 'w')
fid.write("Hydrodynamic Radius: " + str(Dh1) + "\n")
fid.write("G0: " + str(params[0]) +"\n")
fid.write("GInf: " + str(params[1]) + "\n")
fid.write("F: " + str(params[2]) + "\n")
fid.write("tauD: " + str(params[3])+ "\n")
fid.write("tauF: " + str(params[4])+ "\n")
fid.write("Min Time: " + str(self.minTime) +"\n")
fid.write("Max Time: " + str(self.maxTime) + "\n")
fid.write("wxy (um):" + str(wxy) + "\n")
fid.write("axial ratio, a:" + str(a) + "\n")
fid.write("Wavelength: " + str(wavelength) + "\n")
fid.close()
#Show average number of molecules
conc = fcs.measureConc(params[0],wavelength)
self.NStr.set('%.1f' % float(1/params[0])+ ' \ %.2f' % conc)
if self.displayResults.get(): #add to plot
self.myPlot.semilogx(self.bins[mask],myFitter.tripletMonodisperse(self.bins[mask],params[0],params[1],params[2],params[3],params[4]),**{'linewidth':1,'label':'Triplet Mono'})
#Perform simple bimodal fit
if self.simpleBiAnalysisVar.get():
params,Dh1,Dh2,alpha,wxy,a = myFitter.getHydroDiam("simpleBimodal",self.bins[mask],self.CF[mask])
#write fit results to file
outfile = self.loadNPZfile[j][0:len(self.loadNPZfile[j])-4]+ "_simpleBimodal.txt"
fid = io.open(outfile,mode = 'w')
fid.write("Hydrodynamic Radius 1: " + str(Dh1) + "\n")
fid.write("Hydrodynamic Radius 2: " + str(Dh2) + "\n")
fid.write("G1: " + str(params[0]) +"\n")
fid.write("G2: " + str(params[1]) +"\n")
fid.write("GInf: " + str(params[2]) + "\n")
fid.write("tauD1: " + str(params[3])+ "\n")
fid.write("tauD2: " + str(params[4])+ "\n")
fid.write("Min Time: " + str(self.minTime) +"\n")
fid.write("Max Time: " + str(self.maxTime) + "\n")
fid.write("wxy (um):" + str(wxy) + "\n")
fid.write("axial ratio, a:" + str(a) + "\n")
fid.write("Wavelength: " + str(wavelength) + "\n")
fid.close()
#Hide average number of molecules
self.NStr.set('-')
if self.displayResults.get(): #add to plot
self.myPlot.semilogx(self.bins[mask],myFitter.simpleBimodal(self.bins[mask],params[0],params[1],params[2],params[3],params[4]),**{'linewidth':1,'label':'Simple Bi'})
#Perform triplet-corrected bimodal fit
if self.tripletBiAnalysisVar.get():
params,Dh1,Dh2,alpha,wxy,a = myFitter.getHydroDiam("tripletBimodal",self.bins[mask],self.CF[mask])
#write fit results to file
outfile = self.loadNPZfile[j][0:len(self.loadNPZfile[j])-4]+ "_tripletBimodal.txt"
fid = io.open(outfile,mode = 'w')
fid.write("Hydrodynamic Radius 1: " + str(Dh1) + "\n")
fid.write("Hydrodynamic Radius 2: " + str(Dh2) + "\n")
fid.write("G1: " + str(params[0]) +"\n")
fid.write("G2: " + str(params[1]) +"\n")
fid.write("GInf: " + str(params[2]) + "\n")
fid.write("F: " + str(params[3])+ "\n")
fid.write("tauD1: " + str(params[4])+ "\n")
fid.write("tauD2: " + str(params[5])+ "\n")
fid.write("tauDF: " + str(params[6])+ "\n")
fid.write("Min Time: " + str(self.minTime) +"\n")
fid.write("Max Time: " + str(self.maxTime) + "\n")
fid.write("wxy (um):" + str(wxy) + "\n")
fid.write("axial ratio, a:" + str(a) + "\n")
fid.write("Wavelength: " + str(wavelength) + "\n")
fid.close()
#Hide average number of molecules
self.NStr.set('-')
if self.displayResults.get(): #add to plot
self.myPlot.semilogx(self.bins[mask],myFitter.tripletBimodal(self.bins[mask],params[0],params[1],params[2],params[3],params[4],params[5],params[6]),**{'linewidth':1,'label':'Triplet Bi'})
#Simple anomalous diffusion option
if self.simpleAnomalousAnalysisVar.get():
params,Dh1,Dh2,alpha,wxy,a = myFitter.getHydroDiam("simpleAnomalous",x=self.bins[mask],y=self.CF[mask])
outfile = self.loadNPZfile[j][0:len(self.loadNPZfile[j])-4]+ "_simpleAnomalous.txt"
fid = io.open(outfile,mode = 'w')
fid.write("Hydrodynamic Radius: " + str(Dh1) + "\n")
fid.write("G0: " + str(params[0]) +"\n")
fid.write("GInf:" + str(params[1]) + "\n")
fid.write("tauD: " + str(params[2])+ '\n')
fid.write("alpha: " + str(params[3]) + '\n')
fid.write("Min Time: " + str(self.minTime) +"\n")
fid.write("Max Time: " + str(self.maxTime) + "\n")
fid.write("wxy (um): " + str(wxy) + "\n")
fid.write("axial ratio, a: " + str(a) + "\n")
fid.write("Wavelength: " + str(wavelength) + "\n")
fid.close()
#Show average number of molecules
conc = fcs.measureConc(params[0],wavelength)
self.NStr.set('%.1f' % float(1/params[0])+ ' \ %.2f' % conc)
if self.displayResults.get(): #add to plot
self.myPlot.semilogx(self.bins[mask],myFitter.simpleAnomalous(self.bins[mask],params[0],params[1],params[2],params[3]),**{'linewidth':1,'label':'Simple Anom.'})
#Triplet-corrected anomlaous diffusion option
if self.tripletAnomalousAnalysisVar.get():
params,Dh1,Dh2,alpha,wxy,a = myFitter.getHydroDiam("tripletAnomalous",x=self.bins[mask],y=self.CF[mask])
outfile = self.loadNPZfile[j][0:len(self.loadNPZfile[j])-4]+ "_tripletAnomalous.txt"
fid = io.open(outfile,mode = 'w')
fid.write("Hydrodynamic Radius: " + str(Dh1) + "\n")
fid.write("G0: " + str(params[0]) +"\n")
fid.write("GInf:" + str(params[1]) + "\n")
fid.write("F: " + str(params[2]) + "\n")
fid.write("tauD: " + str(params[3])+ '\n')
fid.write("alpha: " + str(params[4])+ '\n')
fid.write("tauF: " + str(params[5])+ '\n')
fid.write("Min Time: " + str(self.minTime) +"\n")
fid.write("Max Time: " + str(self.maxTime) + "\n")
fid.write("wxy (um):" + str(wxy) + "\n")
fid.write("axial ratio, a:" + str(a) + "\n")
fid.write("Wavelength: " + str(wavelength) + "\n")
fid.close()
#Show average number of molecules
conc = fcs.measureConc(params[0],wavelength)
self.NStr.set('%.1f' % float(1/params[0])+ ' \ %.2f' % conc)
if self.displayResults.get(): #add to plot
self.myPlot.semilogx(self.bins[mask],myFitter.tripletAnomalous(self.bins[mask],params[0],params[1],params[2],params[3],params[4],params[5]),**{'linewidth':1,'label':'Triplet Anom.'})
#update gui strings
self.hydroDiamStr1.set('%.2f' % float(Dh1/1e-9))
self.hydroDiamStr2.set('%.2f' % float(Dh2/1e-9))
self.alphaStr.set('%.2f' % float(alpha))
#Draw graphs
self.plotAxes.draw()
def refreshAxes(self):
#Validate min/max delay entries
try:
self.minTime = float(self.tauMinStr.get())
except:
self.tauMinStr.set(str(self.minTime))
try:
self.maxTime = float(self.tauMaxStr.get())
except:
self.tauMaxStr.set(str(self.maxTime))
#Insure minTime is less than MaxTime
if self.maxTime<self.minTime:
self.minTime = 1e-6;
self.maxTime = 1.0;
self.tauMaxStr.set(str(self.maxTime))
self.tauMinStr.set(str(self.minTime))
#Reload and redraw
if isinstance(self.loadNPZfile,str):
self.loadNPZ(fileName = self.loadNPZfile)
else:
self.loadMultNPZ(NPZFileList=self.loadNPZfile)
#Update fits accordingly
if not(self.noAnalysisVar.get()):
self.analyzeData()
def saveAxes(self):
if isinstance(self.loadNPZfile,str):
initialfile = os.path.split(self.loadNPZfile)
initialfile = initialfile[len(initialfile)-1]
initialfile = initialfile[0:len(initialfile)-4]
outFile = asksaveasfilename(title = 'Save Figure As...',defaultextension = 'pdf',initialfile = initialfile)
else:
outFile = asksaveasfilename(title = 'Save Figure As...',defaultextension = 'pdf',initialfile = 'MultipleFileGraph')
self.fig.savefig(outFile,bbox_inches = 'tight')
def exportToCSV(self):
if not(self.loadNPZfile):
self.loadMultNPZ()
if isinstance(self.loadNPZfile,str):
fileFormatter.npzToFormat([self.loadNPZfile])
else:
fileFormatter.npzToFormat(self.loadNPZfile)
def overlayAndAverage(self,NPZFileList=''):
#Prompt for file names if none provided
if not NPZFileList:
self.loadNPZfile = askopenfilenames(title = "Select NPZ Files",filetypes = (("NPZ Files","*.npz"),("all files","*.*")))
else:
self.loadNPZfile = NPZFileList
if self.loadNPZfile:
self.myPlot.cla() #clear axes
#Initialize arrays to first element
data = np.load(self.loadNPZfile[0])
CFData = data['CF']
self.CF = data['CF']
self.bins = data['bins']
countRateData = data['countRates']
self.PCH1,self.PCH2 = data['PCH']
PCH1Data = self.PCH1[0]
PCH2Data = self.PCH2[0]
self.graphCF(lastPlot = False)
self.graphPCH(lastPlot = False)
#run through list
for j in range(1,len(self.loadNPZfile)):
data = np.load(self.loadNPZfile[j])
self.bins = data['bins']
self.CF = data['CF']
countRate = data['countRates']
self.PCH1,self.PCH2 = data['PCH']
self.graphCF(lastPlot = False)
self.graphPCH(lastPlot = False)
CFData = np.vstack((CFData,np.array(self.CF)))
countRateData = np.vstack((countRateData,np.array(countRate)))
PCH1Data = np.vstack((PCH1Data,np.array(self.PCH1[0])))
PCH2Data = np.vstack((PCH2Data,np.array(self.PCH2[0])))
#Find Averaged Values
dataAverage = np.mean(CFData,0)
self.CF = dataAverage
countAverage = np.mean(countRateData,0)
self.Ch1countRateStr.set('%.3f' % countAverage[0])
self.Ch2countRateStr.set('%.3f' % countAverage[1])
PCH1Average = np.mean(PCH1Data,0)
PCH2Average = np.mean(PCH2Data,0)
self.PCH1[0] = PCH1Average
self.PCH2[0] = PCH2Average
self.graphCF(lastPlot = True,color ='black')
self.graphPCH(lastPlot = True,color = ['green','red'])
#Opt to save averaged data
outFile = asksaveasfilename(title = 'Save Figure As...',defaultextension = 'npz',initialfile = self.loadNPZfile[j])
if len(outFile):
np.savez(outFile,bins=self.bins,CF=dataAverage,countRates = countAverage,PCH = [self.PCH1,self.PCH2]) #save to file
def calibrate(self):
answer = messagebox.askyesno("Recalibration Request","Are you sure you want to recalibrate? All previous data will be overwritten")
if answer:
answer = simpledialog.askstring("Authorization Required", "Enter Your Password")
pwd = '<PASSWORD>'
if answer == pwd:
#Physical Constants
kb = 1.38064852e-23 #Boltzmann constant
Temp = 295 #Temperature
eta=8.90e-4 #viscosity
#Open config file
#path = os.getenv('ProgramFiles(x86)')
path = os.path.dirname(os.path.abspath(__file__))
if not os.path.isdir(os.path.join(path,'FalCorr')):
os.mkdir(os.path.join(path,'FalCorr'))
path = os.path.join(path,'FalCorr')
configFile = os.path.join(path,'config.txt')
fid = io.open(configFile,'w')
#Calibrate each wavelength
wavelength = [488,543]
myFit = fcs.scopeFit(0) #Fitter class
for j in range(0,2):
titleString = "Select Files For " + str(wavelength[j]) + "-nm Fitting Focal Parameters"
self.loadNPZfile = askopenfilenames(title = titleString,filetypes = (("NPZ Files","*.npz"),("all files","*.*")))
self.loadMultNPZ(NPZFileList=self.loadNPZfile) #load and display items
#insure in list format
if isinstance(self.loadNPZfile,str):
self.loadNPZfile = [self.loadNPZfile]
#Fit calibration curves
a = np.zeros([len(self.loadNPZfile),1]) #initialize fit array
tauD = np.zeros([len(self.loadNPZfile),1]) #initilaize fit array
fileName = 'Calib'+str(wavelength[j]) + 'List.txt'
calibFile = os.path.join(path,fileName)
calib = io.open(calibFile,'w')
for k in range(0,len(self.loadNPZfile)):
data = np.load(self.loadNPZfile[k])
self.bins = data['bins']
self.CF = data['CF']
mask = np.less_equal(self.bins,self.maxTime) & np.greater_equal(self.bins,self.minTime)
params = myFit.calFit(self.bins[mask],self.CF[mask])
a[k] = params[3]
tauD[k] = params[2]
writeStr = self.loadNPZfile[k] + '\ta: ' + str(a[k]) + '\ttauD: ' + str(tauD[k]) + '\n'
calib.write(writeStr)
calib.close()
#Average Data Sets
A = np.mean(a)
TauD = np.mean(tauD)
#Back-calculate beam parameters
Dh = simpledialog.askfloat('Calibration','Enter known hydrodynamic diameter in nm')
Dh = Dh*1e-9 #convert to meters
Dt = kb*Temp/(3*np.pi*eta*Dh) #Calculate diffusion constant
wxy = | np.sqrt(4*Dt*TauD) | numpy.sqrt |
# Course: EE551 Python for Engineer
# Author: <NAME>
# Date: 2021/05/09
# Version: 1.0
# Performs Gaussian filter, Sobel filter and non-max suppression.
import cv2 as cv
import numpy as np
import math
import os
from image_processor import app
# reads in an image with the provided file path.
# converts the image from color to gray scale if needed.
# Inputs:
# img_file_name: file name of the image to be read in.
# Outputs:
# current_img: uploaded image in gray scale.
def readImg(img_file_name):
current_img = cv.imread(img_file_name)
# converts the image to gray scale if it is color
if current_img.shape[2] > 1:
current_img = cv.cvtColor(current_img, cv.COLOR_BGR2GRAY)
return current_img
# Applies the Gaussian filter onto the specified image with
# the given sigma value. Saves the processed image
# into a local file directory.
# Inputs:
# img: the name of the image file to be processed (including
# path).
# sigma: value used to calculate the Gaussain kernel size.
# Outputs:
# gaussianImg: image that has been filtered by Gaussian filter.
def gaussian(img, sigma):
current_img = readImg(img)
rows = current_img.shape[0]
columns = current_img.shape[1]
# initializes kernel size
kernel_size = 6 * sigma + 1
kernel = np.zeros([kernel_size, kernel_size], dtype = float)
# calculates center element's coordinates
middleIndex = math.ceil(kernel_size / 2)
# calculates kernel matrix
for i in range(kernel_size):
for j in range(kernel_size):
center_dist = pow((i + 1 - middleIndex), 2) + \
pow((j + 1 - middleIndex), 2)
k_row = i + 1
k_col = j + 1
kernel[i, j] = math.exp(-(center_dist) / (2 * (sigma**2)))
kernel = (1 / (2 * math.pi * (sigma ** 2))) * kernel
# applies wrap around padding to the original image
pad_size = math.floor(kernel_size / 2)
paddedImg = np.lib.pad(current_img, pad_size, 'symmetric')
gaussianImg = np.zeros([rows, columns], dtype = float)
k_rows = kernel.shape[0]
k_cols = kernel.shape[1]
# applies the Gaussian filter
for i in range(rows):
for j in range(columns):
temp_matrix = paddedImg[i : i + k_rows, j : j + k_cols]
temp_matrix = temp_matrix.astype(float)
temp_matrix = kernel * temp_matrix
gaussianImg[i, j] = | np.sum(temp_matrix) | numpy.sum |
from numpy import array,sum, sqrt
from scipy.misc import derivative
def selector(mixing_rule):
if(mixing_rule == 'van_der_waals'):
return van_der_waals
else:
return 'Mixing rule does not exist'
def van_der_waals(compositions,tc,acentric,kij,Ai,Bi,alfa,alfa_fun,T):
shape = kij.shape
B = sum(compositions*Bi)
Aij=array([sqrt(Ai[i]*Ai[j])*(1-kij[i,j]) for i in range(0,len(Ai)) for j in range(0,len(Ai))]).reshape(shape)
A = sum([sum(compositions[i]*compositions[j]*Aij[i,j]) for i in range(0,len(Ai)) for j in range(0,len(Ai))])
A_i = array([(compositions[j]*Aij[:,j]) for j in range(0,len(Ai))])
A_i =array([2* | sum(A_i[:,i]) | numpy.sum |
'''See the shared Google Drive documentation for an inheritance diagram that
shows the relationships between the classes defined in this file.
'''
import numpy as np
import socket
import time
from riglib import source
from ismore import settings, udp_feedback_client
import ismore_bmi_lib
from utils.constants import *
#import armassist
#import rehand
from riglib.filter import Filter
from riglib.plants import Plant
import os
class BasePlantUDP(Plant):
'''
Common UDP interface for the ArmAssist/ReHand
'''
debug = 0
sensor_data_timeout = 1 # seconds. if this number of seconds has passed since sensor data was received, velocity commands will not be sent
lpf_vel = 0
# define in subclasses!
ssm_cls = None
addr = None
feedback_data_cls = None
data_source_name = None
n_dof = None
blocking_joints = None
safety_grid = None
feedback_str = ''
def __init__(self, *args, **kwargs):
self.source = source.DataSource(self.feedback_data_cls, bufferlen=5, name=self.data_source_name)
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # used only for sending
ssm = self.ssm_cls()
self.pos_state_names = [s.name for s in ssm.states if s.order == 0]
self.vel_state_names = [s.name for s in ssm.states if s.order == 1]
self.aa_xy_ix = [i for i, j in enumerate(ssm.states) if j.name in ['aa_px', 'aa_py']]
self.aa_psi_ix = [i for i, j in enumerate(ssm.states) if j.name == 'aa_ppsi']
self.rh_pron_ix = [i for i, j in enumerate(ssm.states) if j.name == 'rh_pprono']
self.rh_pfings = [(i, j.name) for i, j in enumerate(ssm.states) if j.name in ['rh_pthumb', 'rh_pindex', 'rh_pfing3']]
self.drive_velocity_raw = np.zeros((len(self.vel_state_names),))
self.drive_velocity_raw_fb_gain = np.zeros((len(self.vel_state_names),))
self.drive_velocity_sent = np.zeros((len(self.vel_state_names),))
self.drive_velocity_sent_pre_safety = np.zeros((len(self.vel_state_names),))
self.pre_drive_state = np.zeros((len(self.vel_state_names), ))
# low-pass filters to smooth out command velocities
# from scipy.signal import butter
# b, a = butter(5, 0.1) # fifth order, 2 Hz bandpass (assuming 10 Hz update rate)
#omega, H = signal.freqz(b, a)
#plt.figure()
#plt.plot(omega/np.pi, np.abs(H))
# self.vel_command_lpfs = [None] * self.n_dof
# for k in range(self.n_dof):
# self.vel_command_lpfs[k] = Filter(b=b, a=a)
# self.last_sent_vel = np.ones(self.n_dof) * np.nan
# calculate coefficients for a 4th-order Butterworth LPF at 1.5 Hz for kinematic data received from the exo
# fs_synch = 20 #Frequency at which emg and kin data are synchronized
# nyq = 0.5 * fs_synch
# cuttoff_freq = 1.5 / nyq
# bpf_kin_coeffs = butter(4, cuttoff_freq, btype='low')
# self.pos_filt = [None] * self.n_dof
# for k in range(self.n_dof):
# self.pos_filt[k] = Filter(bpf_kin_coeffs[0], bpf_kin_coeffs[1])
def init(self):
from riglib import sink
sink.sinks.register(self.source)
def start(self):
# only start this DataSource after it has been registered with
# the SinkManager singleton (sink.sinks) in the call to init()
self.source.start()
self.ts_start_data = time.time()
def stop(self):
# send a zero-velocity command
self._send_command('SetSpeed %s %s\r' % (self.plant_type, self.pack_vel(np.zeros(self.n_dof))))
self.source.stop()
self.feedback_file.close()
def last_data_ts_arrival(self):
return self.source.read(n_pts=1)['ts_arrival'][0]
def _send_command(self, command):
self.sock.sendto(command, self.addr)
def pack_vel(self, vel):
format_str = "%f " * self.n_dof
return format_str % tuple(vel)
def send_vel(self, vel):
assert len(vel) == self.n_dof
vel = vel.copy()
vel *= self.vel_gain # change the units of the velocity, if necessary
self.last_sent_vel = vel
#command_vel is already fitlered at the task level, no need to filter it again.
#self.last_sent_vel = filt_vel = np.array([self.vel_command_lpfs[k](vel[k]) for k in range(self.n_dof)]).ravel()
if all(v <= 0.00000001 for v in abs(self.last_sent_vel)):
print ('last sent vel')
print (self.last_sent_vel)
if (self.last_data_ts_arrival() == 0) or ((self.last_data_ts_arrival() - time.time()) > self.sensor_data_timeout):
print ("sensor data not received for %s recently enough, not sending velocity command!" % self.plant_type)
return
# squash any velocities which would take joints outside of the rectangular bounding box
current_pos = self.get_pos() * self.vel_gain
projected_pos = current_pos + vel * 0.1
max_reached, = np.nonzero((projected_pos > self.max_pos_vals) * (vel > 0))
min_reached, = np.nonzero((projected_pos < self.min_pos_vals) * (vel < 0))
vel[max_reached] = 0
vel[min_reached] = 0
self._send_command('SetSpeed %s %s\r' % (self.plant_type, self.pack_vel(vel)))
# set max speed limts
faster_than_max_speed, = np.nonzero(np.abs(vel) > self.max_speed)
vel[faster_than_max_speed] = self.max_speed[faster_than_max_speed] * np.sign(vel[faster_than_max_speed])
#if we wanna define some limit values for the rehand use the filt_vel. Otherwise use vel
#self._send_command('SetSpeed %s %s\r' % (self.plant_type, self.pack_vel(filt_vel)))
self._send_command('SetSpeed %s %s\r' % (self.plant_type, self.pack_vel(vel)))
# set max speed limts
faster_than_max_speed, = np.nonzero(np.abs(vel) > self.max_speed)
vel[faster_than_max_speed] = self.max_speed[faster_than_max_speed] * np.sign(vel[faster_than_max_speed])
if self.debug:
print ("input vel")
print (vel)
print ("vel sent to %s" % self.plant_type)
print (vel)
print ("current_pos")
print (current_pos)
print ("projected_pos")
print (projected_pos)
print ("actual velocity")
print (self.get_vel())
if self.lpf_vel:
# squash any velocities which would take joints outside of the rectangular bounding box
current_pos = self.get_pos() * self.vel_gain
projected_pos = current_pos + vel * (1.0/20)
max_reached, = np.nonzero((projected_pos > self.max_pos_vals) * (vel > 0))
min_reached, = np.nonzero((projected_pos < self.min_pos_vals) * (vel < 0))
vel[max_reached] = 0
vel[min_reached] = 0
# set max speed limts
faster_than_max_speed, = np.nonzero(np.abs(vel) > self.max_speed)
vel[faster_than_max_speed] = self.max_speed[faster_than_max_speed] * np.sign(vel[faster_than_max_speed])
if faster_than_max_speed > 0:
print ('faster_than_max_speed')
print (faster_than_max_speed)
if self.debug:
print ("input vel")
print (vel)
print ("vel sent to %s" % self.plant_type)
print (vel)
#print "current_pos"
#print current_pos
#print "projected_pos"
#print projected_pos
#print "actual velocity"
#print self.get_vel()
self._send_command('SetSpeed %s %s\r' % (self.plant_type, self.pack_vel(vel)))
else:
self._send_command('SetSpeed %s %s\r' % (self.plant_type, self.pack_vel(vel)))
# def get_pos(self):
# # udp_feedback_client takes care of converting sensor data to cm or rad, as appropriate for the DOF
# return np.array(tuple(self.source.read(n_pts=1)['data'][self.pos_state_names][0]))
def drive(self, decoder):
vel = decoder['qdot']
vel_bl = vel.copy()
feedback_str = ''
if self.blocking_joints is not None:
vel_bl[self.blocking_joints] = 0
if self.safety_grid is not None:
#If the next position is outside of safety then damp velocity to only go to limit:
pos_pred = decoder['q'] + 0.1*vel_bl
#Make sure predicted AA PX, AA PY within bounds:
xy_change = True
if len(self.aa_xy_ix) > 0:
if self.safety_grid.is_valid_pos(pos_pred[self.aa_xy_ix]) is False:
#If not, make their velocity zero:
vel_bl[self.aa_xy_ix] = 0
xy_change = False
feedback_str = feedback_str+ ' stopping xy from moving'
else:
xy_change = False
# Make sure AA Psi within bounds:
if len(self.aa_psi_ix) > 0:
# If X/Y ok
if xy_change:
mn, mx = self.safety_grid.get_minmax_psi(pos_pred[self.aa_xy_ix])
# If x/y not ok:
else:
mn, mx = self.safety_grid.get_minmax_psi(decoder['q'][self.aa_xy_ix])
# Set psi velocity :
if np.logical_and(pos_pred[self.aa_psi_ix] >= mn, pos_pred[self.aa_psi_ix] <= mx):
pass
else:
vel_bl[self.aa_psi_ix] = 0
feedback_str = feedback_str+ 'stopping psi'
# Make sure RH Prono within bounds (if SSM is only RH, use settings.starting_pos for AAPX, AAPY)
if len(self.rh_pron_ix) > 0:
# If X/Y ok
if xy_change:
mn, mx = self.safety_grid.get_minmax_prono(pos_pred[self.aa_xy_ix])
# If x/y not ok or not moving bc not part of state pace :
else:
if len(self.aa_xy_ix) > 0:
mn, mx = self.safety_grid.get_minmax_prono(decoder['q'][self.aa_xy_ix])
else:
mn, mx = self.safety_grid.get_minmax_prono(settings.starting_pos['aa_px'], settings.starting_pos['aa_py'])
# Set prono velocity :
if np.logical_and(pos_pred[self.rh_pron_ix] >= mn, pos_pred[self.rh_pron_ix] <= mx):
pass
else:
vel_bl[self.rh_pron_ix] = 0
feedback_str = feedback_str+ 'stopping prono'
# Assure RH fingers are within range:
if len(self.rh_pfings) > 0:
for i, (ix, nm) in enumerate(self.rh_pfings):
mn, mx = self.safety_grid.get_rh_minmax(nm)
if np.logical_and(pos_pred[ix] >= mn, pos_pred[ix] <= mx):
pass
else:
vel_bl[ix] = 0
feedback_str = feedback_str+ 'stopping rh fings'
self.feedback_str = feedback_str
self.drive_velocity = vel_bl
self.send_vel(vel_bl)
decoder['q'] = self.get_pos()
def write_feedback(self):
pos_vel = [str(i) for i in np.hstack(( self.get_pos(), self.get_vel() )) ]
#self.feedback_file.write(','.join(pos_vel)+'\n')
if self.feedback_str != '':
self.feedback_file.write(self.feedback_str+ time.ctime() + '\n')
class ArmAssistPlantUDP(BasePlantUDP):
'''Sends velocity commands and receives feedback over UDP. Can be used
with either the real or simulated ArmAssist.
'''
ssm_cls = ismore_bmi_lib.StateSpaceArmAssist
addr = settings.ARMASSIST_UDP_SERVER_ADDR
feedback_data_cls = udp_feedback_client.ArmAssistData
data_source_name = 'armassist'
n_dof = 3
plant_type = 'ArmAssist'
vel_gain = np.array([cm_to_mm, cm_to_mm, rad_to_deg]) # convert units to: [mm/s, mm/s, deg/s]
max_pos_vals = np.array([np.inf, np.inf, np.inf])
min_pos_vals = np.array([-np.inf, -np.inf, -np.inf])
max_speed = np.array([np.inf, np.inf, np.inf])
feedback_file = open(os.path.expandvars('$HOME/code/bmi3d/log/armassist.txt'), 'w')
#max_speed = np.array([40, 60, 20]) # in mm/s and deg/s
#max_speed = np.array([60, 80, 50]) # in mm/s and deg/s
#parameters for kinematics low-pass filtering
from scipy.signal import butter, lfilter
from ismore.filter import Filter
fs_synch = 25 #Frequency at which emg and kin data are synchronized
nyq = 0.5 * fs_synch
cuttoff_freq = 1.5 / nyq
bpf_kin_coeffs = butter(2, cuttoff_freq, btype='low')
n_dof = 3
vel_filter = [None] * n_dof
for k in range(n_dof):
vel_filter[k] = Filter(bpf_kin_coeffs[0], bpf_kin_coeffs[1])
n_getpos_iter= 0
def __init__(self, *args, **kwargs):
super(ArmAssistPlantUDP, self).__init__(*args, **kwargs)
def set_pos_control(self): # position control with global reference system
self._send_command('SetControlMode ArmAssist Position')
def set_global_control(self): #velocity control with global reference system
self._send_command('SetControlMode ArmAssist Global')
def set_trajectory_control(self): #trajectory control with global reference system
self._send_command('SetControlMode ArmAssist Trajectory')
def send_vel(self, vel):
vel = vel.copy()
# units of vel should be: [cm/s, cm/s, rad/s]
assert len(vel) == self.n_dof
# convert units to: [mm/s, mm/s, deg/s] to send them through UDP to the ArmAssist application
vel[0] *= cm_to_mm
vel[1] *= cm_to_mm
vel[2] *= rad_to_deg
# set max speed limts
faster_than_max_speed, = np.nonzero(np.abs(vel) > self.max_speed)
vel[faster_than_max_speed] = self.max_speed[faster_than_max_speed] * np.sign(vel[faster_than_max_speed])
self.debug = True
if self.debug:
# print "vel sent to armassist"
# print vel
if faster_than_max_speed.any() > 0:
print ('faster_than_max_speed')
print (faster_than_max_speed)
print ("speed set to: ")
print (vel)
self._send_command('SetSpeed ArmAssist %f %f %f\r' % tuple(vel))
# get raw position
def get_pos_raw(self):
# udp_feedback_client takes care of converting sensor data to cm or rad, as appropriate for the DOF
#get the last poitns of data of the armassist and low-pass filter
return np.array(tuple(self.source.read(n_pts=1)['data'][self.pos_state_names][0]))
# get filtered position
def get_pos(self):
return np.array(tuple(self.source.read(n_pts=1)['data_filt'][self.pos_state_names][0]))
# calculate vel from raw position
def get_vel_raw(self):
recent_pos_data = self.source.read(n_pts=2)
pos = recent_pos_data['data'][self.pos_state_names]
ts = recent_pos_data['ts']
delta_pos = np.array(tuple(pos[1])) - np.array(tuple(pos[0]))
delta_ts = ts[1] - ts[0]
vel = delta_pos / delta_ts
#filt_vel = np.array([self.vel_command_lpfs[k](vel[k]) for k in range(self.n_dof)]).ravel() #nerea --> to test!
if ts[0] != 0 and any(np.isnan(v) for v in vel):
print ("WARNING -- delta_ts = 0 in AA vel calculation:", vel)
for i in range(3):
if np.isnan(vel[i]):
vel[i] = 0
return vel
#calculate vel from raw position and filter
def get_vel(self):
recent_pos_data = self.source.read(n_pts=2)
pos = recent_pos_data['data'][self.pos_state_names]
ts = recent_pos_data['ts']
delta_pos = np.array(tuple(pos[1])) - np.array(tuple(pos[0]))
delta_ts = ts[1] - ts[0]
vel = delta_pos / delta_ts
if ts[0] != 0 and any(np.isnan(v) for v in vel):
print ("WARNING -- delta_ts = 0 in AA vel calculation:", vel)
for i in range(3):
if np.isnan(vel[i]):
vel[i] = 0
# the first value of the pos because it is always NaN and if a NaN is introduced in the filter, all the following filtered values will be also NaNs
if np.any(np.isnan(vel)):
self.n_getpos_iter = self.n_getpos_iter +1
vel_filt = vel
else:
vel_filt = np.array([self.vel_filter[k](vel[k]) for k in range(self.n_dof)]).ravel()
return vel_filt
def send_pos(self, pos, time):
pos = pos.copy()
# units of vel should be: [cm/s, cm/s, rad/s]
assert len(pos) == 3
# convert units to: [mm/s, mm/s, deg/s]
pos[0] *= cm_to_mm
pos[1] *= cm_to_mm
pos[2] *= rad_to_deg
# mode 1: the forearm angle (psi) stays the same as it is. mode 2: psi will move according to the determined value
mode = 2
pos_command = np.zeros(5)
pos_command[0] = pos[0]
pos_command[1] = pos[1]
pos_command[2] = pos[2]
pos_command[3] = time
pos_command[4] = mode
print ("pos")
print (pos)
print ("time")
print (time)
self._send_command('SetPosition ArmAssist %f %f %f %f %f\r' % tuple(pos_command))
def enable(self):
self._send_command('SetControlMode ArmAssist Global\r')
def disable(self):
self._send_command('SetControlMode ArmAssist Disable\r')
def enable_watchdog(self, timeout_ms):
print ('ArmAssist watchdog not enabled, doing nothing')
def send_traj(self, pos_vel):
pos_vel = pos_vel.copy()
# units of vel should be: [cm/s, cm/s, rad/s]
assert len(pos_vel) == 6
# units to are alread in [mm/s, mm/s, rad/s]
# convert values to integers to reduce noise
#pos_vel_int = np.rint(pos_vel)
pos_vel_int = pos_vel
print ("trajectory sent to AA")
print ("x y psi vx vy vpsi")
print (pos_vel_int)
traj_command = np.zeros(6)
traj_command[0] = pos_vel_int[0]
traj_command[1] = pos_vel_int[1]
traj_command[2] = pos_vel_int[2]
traj_command[3] = pos_vel_int[3]
traj_command[4] = pos_vel_int[4]
traj_command[5] = pos_vel_int[5]
self._send_command('SetTrajectory ArmAssist %d %d %d %d %d %d\r' % tuple(traj_command))
class DummyPlantUDP(object):
drive_velocity_raw = np.array([0,0,0])
drive_velocity_sent = np.array([0,0,0])
drive_velocity_sent_pre_safety = np.array([0,0,0])
pre_drive_state = np.array([0, 0, 0])
def init(self):
pass
def enable(self):
pass
def start(self):
pass
def stop(self):
pass
def write_feedback(self):
pass
def get_pos_raw(self):
return np.array([0,0,0])
def get_pos(self):
return np.array([0,0,0])
def get_vel_raw(self):
return np.array([0,0,0])
def get_vel(self):
return np.array([0,0,0])
class ReHandPlantUDP(BasePlantUDP):
'''Sends velocity commands and receives feedback over UDP. Can be used
with either the real or simulated ReHand.
'''
ssm_cls = ismore_bmi_lib.StateSpaceReHand
addr = settings.REHAND_UDP_SERVER_ADDR
feedback_data_cls = udp_feedback_client.ReHandData
data_source_name = 'rehand'
n_dof = 4
plant_type = 'ReHand'
vel_gain = np.array([rad_to_deg, rad_to_deg, rad_to_deg, rad_to_deg])
max_pos_vals = np.array([60, 60, 60, 90], dtype=np.float64) # degrees
min_pos_vals = np.array([25, 25, 25, 25], dtype=np.float64) # degrees
max_speed = | np.array([np.inf, np.inf, np.inf, np.inf], dtype=np.float64) | numpy.array |
import os
import numpy
import pandas
import logging
import seaborn
import warnings
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.callbacks import EarlyStopping, ReduceLROnPlateau
from keras.layers import LSTM, Dense, Flatten, Dropout
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import TimeSeriesSplit
warnings.filterwarnings("ignore")
# logging config
logging.basicConfig(
filename="logs.log",
level=logging.INFO,
format="%(asctime)s:%(levelname)s:%(message)s"
)
class TSLSTM(object):
""" Bayes By Backprop """
def __init__(self, path_to_csv, inputs:list, neurons:int, m:int, n:int, approach:str):
"""
Arguments:
---------
path_to_csv : str
absolute path to the csv file
inputs : list
list of column indices that are needed as input
it should include 0 as the "Date" column
it should include index for "Output" column
neurons : int
number of hidden units/neuron per LSTM
m : int
moving window of ‘m’ number of time step inputs
n : int
‘n’ day output multistep forecasting
approach : string
if "lstm" then a model built using LSTM is returned
if "lstmd" the a model built using LSTM followed by dropout is
returned
"""
self._path_to_csv = path_to_csv
self._inputs = inputs
self._neurons = neurons
self._m = m
self._n = n
self._approach = approach
def load_dataset(self, get_overview:bool):
"""
To load a csv file that should to have a 'Date' column at the 0th index.
Date values are sorted in ascending order (oldest first) with dayfirst.
Arguments:
----------
get_overview : boolean
if overview of the dataset is required
Returns:
--------
data : dataframe
dataframe with 'Date' column converted to date-time and set as index
"""
self.data = pandas.read_csv(
self._path_to_csv,
usecols = self._inputs
)
self.data['Date'] = pandas.to_datetime(self.data['Date'], dayfirst=True)
self.data.sort_values(by=['Date'], inplace=True, ascending=True)
if get_overview:
self._data_overview()
return self.data
def _data_overview(self):
"""
To get general information about the dataset which is stroed in log file
"""
# general description of data features
logging.info(self.data.describe())
# count null values present
if self.data.isnull().all().sum() == 0:
logging.info("No null values present")
else:
logging.info("{} null values present".format(
self.data.isnull().all().sum())
)
# total number of data points and features
logging.info("{} data points with {} features were found".format(
self.data.shape[0],
self.data.shape[1]
))
# total unique date values
logging.info("{} unique date values were found".format(
self.data.index.nunique()
))
# oldest and latest date value
logging.info("The oldest date in dataset is {}".format(
self.data.index.min()
))
logging.info("The latest date in dataset is {}".format(
self.data.index.max()
))
# visualize the data
if not os.path.isdir("./images"):
os.makedirs("./images")
fig = self.data.plot(
x='Date',
y=list(self.data)[1:],
style="-",
alpha=0.7,
figsize=(20, 10),
xlabel = 'Date',
ylabel='Feature Values'
).get_figure()
fig.savefig("images/initial-plot.pdf")
def _split_sequences(self, sequences):
"""
Split a multivariate sequence (dataset) into samples X and y
Arguments:
----------
sequences : Dataframe
train and test dataframe with the features and target column
Returns:
-------
X, y : numpy arrays
sequence split into features and target
"""
X, y = list(), list()
for i in range(len(sequences)):
# find the end of this pattern
end_ix = i + self._m
out_end_ix = end_ix + self._n-1
# check if we are beyond the dataset
if out_end_ix > len(sequences):
break
# gather input and output parts of the pattern
seq_x = sequences[i:end_ix, :-1]
seq_y = sequences[end_ix-1:out_end_ix, -1]
X.append(seq_x)
y.append(seq_y)
return numpy.array(X), numpy.array(y)
def _standardscaler_transform(self, train, test):
"""
Standard scale the train and test data passed
Removes the date column from consideration
Arguments:
---------
train, test : pandas dataframe
Train and test split of the dataset
Returns:
-------
train_data, test_date : numpy arrays
Standard scaled train and test dataframes
"""
X_train = numpy.array(train.iloc[:,1:-1])
y_train = numpy.array(train.iloc[:, -1])
X_test = | numpy.array(test.iloc[:,1:-1]) | numpy.array |
import sys, os
sys.path.append(os.path.dirname(os.path.abspath(__file__))+"/../..")
from blackscholes.fft.Conv import ConvEuro
import numpy as np
from scipy.stats.mstats import gmean
class Euro(ConvEuro):
def __init__(self, strike, S0_vec, T, ir, vol, dividend, corr, cp_type):
dim = len(S0_vec)
vol_vec = np.full(dim, vol)
dividend_vec = | np.full(dim, dividend) | numpy.full |
from matplotlib import pyplot as plt
import numpy as np
import random
class ImageStitcher(object):
"""Stitches together two single digit images into a double digit image with
possible overlap"""
def __init__(self,
img_width,
images,
labels,
overlap_range=(-25, 0),
repeated_digits=True,
singles=False,
doubles=True,
singles_amount=.1,
testing=False):
self.singles = singles
self.doubles = doubles
self.singles_amount = singles_amount
self.original_imgs = images
self.testing = testing
if img_width >= images[0].shape[0] * 2 + overlap_range[1]:
self.img_width = img_width
else:
self.img_width = images[0].shape[0] * 2 + overlap_range[1]
self.overlap_range = overlap_range
self.original_labels = labels
self.stitched_imgs = []
self.stitched_labels = []
if repeated_digits:
self.repeated_digits = True
else:
self.repeated_digits = False
def view_image(self, image):
plt.matshow(image, aspect='auto', cmap='gray')
plt.show()
# overlap_range should be a tuple of values i.e. (-25, 0)
def set_overlap(self, overlap_range):
self.overlap_range = overlap_range
def get_overlap(self):
return self.overlap_range
def resize_all(self):
pass
def stitch(self, image1, image2, num_pixels):
if num_pixels == 0:
new_image = | np.concatenate((image1, image2), axis=1) | numpy.concatenate |
import csv
import os
from datetime import datetime
import numpy as np
import pandas as pd
from gym import Wrapper
from utils.additional import write_results_csv, check_path, storage_saver, arr2str
LOG_PATH = './paper_logs/'
FILE_NAME = 'log_'
FILE_EXCITON = '.csv'
def file_is_empty(path):
return os.stat(path).st_size == 0
def save_to_file(path, dict_saver):
header = list(dict_saver.keys())
values = list(dict_saver.values())
write_results_csv(path, header, values)
class RewardSummarizer:
""" Summarizes rewards received from environment. """
def __init__(self, nenvs, prefix, running_mean_size=100, step_var=None):
self.prefix = prefix
self.step_var = step_var
self.had_ended_episodes = np.zeros(nenvs, dtype=np.bool)
self.rewards = np.zeros(nenvs)
self.episode_lengths = np.zeros(nenvs)
self.reward_queues = [[] # deque([], maxlen=running_mean_size)
for _ in range(nenvs)]
# self.reward_df = None
self.row_list = []
# self.dict1 = None
self.drop_count = 0
check_path(LOG_PATH)
self.time_str = 'last_exp_' + datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f')
self.file_to_save_path = ''.join([LOG_PATH, FILE_NAME, self.time_str, FILE_EXCITON])
def should_add_summaries(self):
""" Returns `True` if it is time to write summaries. """
return np.all(list(self.had_ended_episodes))
def add_summaries(self):
""" Writes summaries. """
dict_saver = {}
dict_saver.update({'cand': arr2str(storage_saver.get_architecture())})
dict_saver.update({"total_reward": np.mean(np.stack([q[-1] for q in self.reward_queues]))})
dict_saver.update({"reward_mean": np.mean(np.stack([np.mean(q) for q in self.reward_queues]))})
dict_saver.update({"reward_std": np.mean(np.stack([np.std(q) for q in self.reward_queues]))})
dict_saver.update({"episode_length": np.mean(self.episode_lengths)})
dict_saver.update({"min_reward": None})
dict_saver.update({"max_reward": None})
if self.had_ended_episodes.size > 1:
dict_saver.update({"min_reward": np.max( | np.stack([q[-1] for q in self.reward_queues]) | numpy.stack |
"""
A set of functions for automatically applying simple preprocessing steps for
removing a certain class and/or ensure that a pair of PSG and HYP files match
each other in length
OBS: It is always assumed that the PSG and HYP files start at the same real
time. That is, they are aligned with respect to their first entries. Any
discrepancy between PSG and HYP lengths is assumed to follow from either of the
two extending longer in time at the end of the study. The data that extends
beyond the other file will normally be discarded (see strip functions below)
"""
import numpy as np
from utime.hypnogram import SparseHypnogram
from utime.errors import NotLoadedError, StripError
_STRIP_ERR = StripError("Unexpected difference between PSG and HYP lengths.")
def _strip(hyp, mask, inits, durs, stages, pop_from_start):
"""
Helper function for 'strip_class_leading' and 'strip_class_trailing'
Removes elements from beginning of lists 'inits', 'durs', 'stages'
according to 'mask' if pop_from_start=True, otherwise from the end of those
lists.
"""
for m in mask:
if not m:
break
if pop_from_start:
inits.pop(0), durs.pop(0), stages.pop(0)
else:
inits.pop(), durs.pop(), stages.pop()
hyp.inits = np.array(inits, hyp.inits.dtype)
hyp.durations = np.array(durs, hyp.durations.dtype)
hyp.stages = np.array(stages, hyp.stages.dtype)
def strip_class_leading(psg, hyp, class_int, sample_rate, check_lengths=False):
"""
Remove stage 'class_int' events from start and/or end of hypnogram
Typically applied in 'strip_class_leading_and_trailing'
See drop_class function for argument description.
"""
remove_mask = np.asarray(hyp.stages) == class_int
i, d, s = list(hyp.inits), list(hyp.durations), list(hyp.stages)
_strip(hyp, remove_mask, i, d, s, pop_from_start=True)
if check_lengths and not assert_equal_length(psg, hyp, sample_rate):
raise _STRIP_ERR
return psg, hyp
def strip_class_trailing(psg, hyp, class_int, sample_rate, check_lengths=False):
"""
Remove stage 'class_int' events from the end of hypnogram
Typically applied in 'strip_class_leading_and_trailing'
See drop_class function for argument description.
"""
remove_mask = np.asarray(hyp.stages) == class_int
i, d, s = list(hyp.inits), list(hyp.durations), list(hyp.stages)
_strip(hyp, reversed(remove_mask), i, d, s, pop_from_start=False)
if check_lengths and not assert_equal_length(psg, hyp, sample_rate):
raise _STRIP_ERR
return psg, hyp
def strip_class_leading_and_trailing(psg, hyp, class_int, sample_rate,
check_lengths=False):
"""
Drops a class 'class_int' from the head and tail of a hypnogram file.
Does not strip the PSG or HYP further. If this function is applied alone,
the PSG and HYP lengths should precisely match after dropping the class
See drop_class function for argument description.
"""
strip_class_leading(psg, hyp, class_int, sample_rate)
strip_class_trailing(psg, hyp, class_int, sample_rate)
if check_lengths and not assert_equal_length(psg, hyp, sample_rate):
raise _STRIP_ERR
return psg, hyp
def strip_psg_to_match_hyp_len(psg, hyp, sample_rate, check_lengths=False):
"""
Trims the tail of a PSG to match the length of a hypnogram.
See drop_class function for argument description.
"""
psg_len_sec = psg.shape[0] / sample_rate
diff_sec = psg_len_sec - hyp.total_duration
if diff_sec < 0:
raise StripError("HYP length is larger than PSG length, "
"should not strip PSG. Consider the "
"'strip_hyp_match_psg_len' or 'strip_to_match' "
"functions")
elif diff_sec == 0:
return psg
idx_to_strip = int(sample_rate * diff_sec)
if check_lengths and not assert_equal_length(psg, hyp, sample_rate):
raise _STRIP_ERR
return psg[:-idx_to_strip]
def strip_hyp_to_match_psg_len(psg, hyp, sample_rate, check_lengths=False):
"""
Strips a (longer) hypnogram to match the length of a (shorter) PSG
See the SparseHypnogram.set_new_end_time method
See drop_class function for argument description.
"""
psg_len_sec = psg.shape[0] / sample_rate
diff_sec = hyp.end_time - psg_len_sec
if diff_sec % hyp.period_length_sec:
raise StripError("Time difference between PSG and HYP ({} sec) not"
" evenly divisible by the period length "
"({} sec)".format(diff_sec, hyp.period_length_sec))
if diff_sec < 0:
raise StripError("PSG length is larger than HYP length, "
"should not strip HYP. Consider the "
"'strip_psg_to_match_hyp_len' or 'strip_to_match' "
"functions")
elif diff_sec == 0:
return hyp
hyp.set_new_end_time(hyp.end_time - diff_sec)
if check_lengths and not assert_equal_length(psg, hyp, sample_rate):
raise _STRIP_ERR
return hyp
def strip_to_match(psg, hyp, sample_rate, class_int=None, check_lengths=False):
"""
Strips to match the PSG and HYP lengths using the following ordered steps:
1) If a class_int is passed and if the hypnogram is longest, attempt
to match by removing the class_int stages from the end of the
hypnogram
2) If the hypnogram is longest, reduce the length of the hypnogram
3) If the PSG is longest, strip the PSG from the tail to match
See drop_class function for argument description.
"""
psg_length_sec = psg.shape[0] / sample_rate
if class_int and hyp.total_duration > psg_length_sec:
# Remove trailing class integer
strip_class_trailing(None, hyp, class_int, None)
if hyp.total_duration > psg_length_sec:
strip_hyp_to_match_psg_len(psg, hyp, sample_rate)
if psg_length_sec > hyp.total_duration: # Note total_dur. is a property
psg = strip_psg_to_match_hyp_len(psg, hyp, sample_rate)
if check_lengths and not assert_equal_length(psg, hyp, sample_rate):
raise _STRIP_ERR
return psg, hyp
def strip_class(psg, hyp, class_int, sample_rate, check_lengths=False):
"""
Remove class 'class_int' if leading or trailing, then strip to match
See drop_class function for argument description.
"""
strip_class_leading_and_trailing(psg, hyp, class_int, sample_rate)
strip_to_match(psg, hyp, class_int=class_int, sample_rate=sample_rate)
if check_lengths and not assert_equal_length(psg, hyp, sample_rate):
raise _STRIP_ERR
return psg, hyp
def drop_class(psg, hyp, class_int, sample_rate, check_lengths=False):
"""
Drops a sleep stage / class with integer value 'class_int' entirely. That
is, all 'class_int' stages in SparseHypnogram 'hyp' will be dropped and
init times will be recomputed. The corresponding PSG signal will likewise
be removed entirely.
This function is used mostly to remove 'UNKNOWN', 'OTHERS', 'NOT SCORED'
type sleep stage classes, often collectively assigned class integer 5.
Note that due to the re-computing of the hypnogram init times, one should
no longer look up sleep stages in the new, stripped hypnogram using real
time stamps from the study (second '100' in the old and new hypnogram may
no longer correspond to the same data).
Also note that dropping a class this way will cause flanking PSG segments
to transition sharply/non smoothly if the dropped class was not in the
head or tail of the study. This is, however, in our experiance, not an
issue as these stages - in our applications - mostly occur near the
beginning or end of the study and rarely in general.
Args:
psg: A ndarray, PSG data, of shape [N, C]
hyp: A SparseHypnogram
class_int: Integer value corresponding to the class that should be
dropped.
sample_rate: The sample rate (Hz) of the passed PSG
check_lengths: Assert that the PSG and HYP have equal length after the
stripping function has been applied. This is usually
wanted, but the default parameter is set to False for
all strip functions, as they may be used inside other
strip functions. The high-level 'apply_strip_func'
function always sets check_lengths=True on the
'top-level' strip function.
Returns:
psg, hyp
"""
# Get all stages of class 'class_int'
mask = hyp.stages == class_int
# Get init and duration drop masks
inits_to_drop = hyp.inits[mask]
durs_to_drop = hyp.durations[mask]
# Find all PSG indices that should be removed
inds_to_remove = []
for i, (start_sec, dur) in enumerate(zip(inits_to_drop, durs_to_drop)):
end_sec = start_sec + dur
# Convert to indices
start_idx = start_sec * sample_rate
end_idx = end_sec * sample_rate
inds_to_remove.extend(range(start_idx, min(len(psg), end_idx)))
# Drop PSG on inds
psg = | np.delete(psg, inds_to_remove, axis=0) | numpy.delete |
import os
import time
import sys
import math
import gzip
import pickle
import glob
import numpy as np
#
from multiprocessing import Process
from joblib import Parallel, delayed
import multiprocessing
#
from multiprocessing.dummy import Pool as ThreadPool
#
from collections import defaultdict
# rdkit cheminformania
from rdkit import DataStructs
from rdkit import rdBase
from rdkit import Chem
from rdkit.Chem import Crippen
from rdkit.Chem import QED
from rdkit.Chem import rdMolDescriptors
#
from rdkit.Chem.Draw import SimilarityMaps
#Machine learning modules
import sklearn
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
print ("\n")
print (" Python:", sys.version )
print (" Numpy :", np.__version__ )
print (" Rdkit :", rdBase.rdkitVersion ,"\n" )
_fscores = None
def genFP(mol,Dummy=-1):
# Helper function to convert to Morgan type fingerprint in Numpy Array
fp = SimilarityMaps.GetMorganFingerprint(mol)
fp_vect = np.zeros((1,))
DataStructs.ConvertToNumpyArray(fp, fp_vect)
return fp_vect
def readFragmentScores(name='fpscores'):
#import cPickle,gzip
global _fscores
_fscores = pickle.load(gzip.open('%s.pkl.gz'%name))
outDict = {}
for i in _fscores:
for j in range(1,len(i)):
outDict[i[j]] = float(i[0])
_fscores = outDict
def numBridgeheadsAndSpiro(mol,ri=None):
if ri is None:
ri=mol.GetRingInfo()
arings = [set(x) for x in ri.AtomRings()]
spiros=set()
for i,ari in enumerate(arings):
for j in range(i+1,len(arings)):
shared=ariås[j]
if len(shared)==1:
spiros.update(shared)
nSpiro=len(spiros)
# find bonds that are shared between rings that share at least 2 bonds:
nBridge=0
brings = [set(x) for x in ri.BondRings()]
bridges=set()
for i,bri in enumerate(brings):
for j in range(i+1,len(brings)):
shared=bri&brings[j]
if len(shared)>1:
atomCounts=defaultdict(int)
for bi in shared:
bond = mol.GetBondWithIdx(bi)
atomCounts[bond.GetBeginAtomIdx()]+=1
atomCounts[bond.GetEndAtomIdx()]+=1
tmp=0
for ai,cnt in atomCounts.items():
if cnt==1:
tmp+=1
bridges.add(ai)
#if tmp!=2: # no need to stress the users
#print 'huh:',tmp
return len(bridges),nSpiro
def calculateScore(m):
if _fscores is None: readFragmentScores()
##<NAME>. and <NAME>. “Estimation of Synthetic Accessibility
##Score of Drug-like Molecules based on Molecular Complexity and Fragment
##Contributions” Journal of Cheminformatics 1:8 (2009)
#
# fragment score
#<- 2 is the *radius* of the circular fingerprint
fp = rdMolDescriptors.GetMorganFingerprint(m,2)
fps = fp.GetNonzeroElements()
score1 = 0.
nf = 0
for bitId,v in fps.items():
nf += v
sfp = bitId
score1 += _fscores.get(sfp,-4)*v
score1 /= nf
# features score
nAtoms = m.GetNumAtoms()
nChiralCenters = len(Chem.FindMolChiralCenters(m,includeUnassigned=True))
ri = m.GetRingInfo()
nBridgeheads,nSpiro=numBridgeheadsAndSpiro(m,ri)
nMacrocycles=0
for x in ri.AtomRings():
if len(x)>8: nMacrocycles+=1
sizePenalty = nAtoms**1.005 - nAtoms
stereoPenalty = math.log10(nChiralCenters+1)
spiroPenalty = math.log10(nSpiro+1)
bridgePenalty = math.log10(nBridgeheads+1)
macrocyclePenalty = 0.
# ---------------------------------------
# This differs from the paper, which defines:
# macrocyclePenalty = math.log10(nMacrocycles+1)
# This form generates better results when 2 or more macrocycles are present
if nMacrocycles > 0: macrocyclePenalty = math.log10(2)
score2 = 0. -sizePenalty -stereoPenalty -spiroPenalty -bridgePenalty -macrocyclePenalty
# correction for the fingerprint density
# not in the original publication, added in version 1.1
# to make highly symmetrical molecules easier to synthetise
score3 = 0.
if nAtoms > len(fps):
score3 = math.log(float(nAtoms) / len(fps)) * .5
sascore = score1 + score2 + score3
# need to transform "raw" value into scale between 1 and 10
min = -4.0
max = 2.5
sascore = 11. - (sascore - min + 1) / (max - min) * 9.
# smooth the 10-end
if sascore > 8.: sascore = 8. + math.log(sascore+1.-9.)
if sascore > 10.: sascore = 10.0
elif sascore < 1.: sascore = 1.0
return sascore
def pepLinealtoSMILE(seq):
# Convertir Fasta a SMILES
tmpSeq = seq[0:1]+"."+seq[1:2]+"."+seq[2:3]+"."+seq[3:4]+"."+seq[4:5]+"."+seq[5:6]+"."+seq[6:7]
helmLineal="PEPTIDE1{"+tmpSeq +"}$$$$V2.0"
SeqFasta = Chem.MolFromHELM(str(helmLineal))
SeqSmiles=Chem.MolToSmiles(SeqFasta)
#
#print (SeqSmiles)
return SeqSmiles
def QSArproperties_test(array,forest, num, namefile):
##<NAME>.; <NAME>.; <NAME>.; <NAME>.; <NAME>. (2012)
##Quantifying the chemical beauty of drugs,
##Nature Chemistry, 4, 90-98
##[https://doi.org/10.1038/nchem.1243]
#
##<NAME>.; <NAME>. (1999)
##Prediction of Physicochemical Parameters by Atomic Contributions,
##Journal of Chemical Information and Computer Sciences, 39, 868-873
##[https://doi.org/10.1021/ci990307l]
#
fw = open( 'QSAR-2D' + str(num) + str(namefile) + '.csv', 'w')
#
for line in array:
parameter= line.split(sep="\t",maxsplit=9)
peptide_seq = parameter[0]
peptide = parameter[1]
#
molpeptideLi = Chem.MolFromSmiles(peptide)
# subprocess
scoreSA_Li = calculateScore(molpeptideLi)
#
# Make Prediction Random-Forest
fp_vect_Li = genFP(molpeptideLi)
# Get probabilities
predictionsLi = forest.predict_proba(fp_vect_Li.reshape(1,-1))
#print ("Probability %s mutagenic %0.6f " % (sequence,predictionsLi[0][1]))
# See http://cdb.ics.uci.edu/cgibin/Smi2DepictWeb.py
propQSAR = QED.properties(molpeptideLi)
MolWeight = propQSAR.MW
MolLogP = propQSAR.ALOGP
HbondA = propQSAR.HBA
HbondD = propQSAR.HBD
PolarSA = propQSAR.PSA
Rbonds = propQSAR.ROTB
Aromatic = propQSAR.AROM
#
MolarRefractivity = Crippen.MolMR(molpeptideLi)
nAtoms = molpeptideLi.GetNumAtoms()
#
SynthAcces = scoreSA_Li
AmesMutagenic = predictionsLi[0][1]
#
result = ( str(MolWeight) + "\t" + str(MolLogP) + "\t" + str(HbondA) + "\t" + str(HbondD) + "\t" + str(PolarSA) + "\t" + str(Rbonds) + "\t" + str(MolarRefractivity) + "\t" + str(nAtoms) + "\t" + str(SynthAcces) + "\t" + str(AmesMutagenic) + "\t" + str (peptide_seq) + "\t" + str(peptide) + "\n")
#print (result)
fw.write(result)
fw.close()
if __name__=='__main__':
#
# Time
t1=time.time()
# Data Base Synthetic Accessibility
readFragmentScores("fpscores")
##<NAME>., <NAME>., <NAME>., <NAME>., ter <NAME>.,
##<NAME>., <NAME>., and <NAME>. (2009)
##Benchmark Data Set for in Silico Prediction of Ames Mutagenicity.
##J. Chem. Inf. Model. 49, 2077−2081.
data = np.genfromtxt('smiles_cas_N6512.smi',
delimiter='\t',
names=['Smiles','CAS','Mutagen'],
encoding=None,
dtype=None,
comments='##')
#
# Convert smiles to RDkit molecules and calculate fingerprints
mols = []
X = []
y = []
for record in data:
try:
mol = Chem.MolFromSmiles(record[0])
if type(mol) != type(None):
fp_vect = genFP(mol)
mols.append([mol, record[1],record[2]])
X.append(fp_vect)
y.append(record[2])
except:
print ("Failed for CAS: %s" % record[1])
#See how succesful the conversions were
print ("Imported smiles %s" % len(data))
print ("Converted smiles %s" % len(mols))
# Prepare the data for modelling
X=np.array(X)
y= | np.array(y) | numpy.array |
import numpy as np
import matplotlib.pyplot as plt
import cv2
import time
# world size, 300 for single class room view, 500/1000 for more building space
world_size = 300
world = np.zeros((world_size, world_size, 3))
world_map = np.zeros((world_size, world_size))
# world borders:
# left border
world_map[0:world_size+1, 0:2] = 20
# right border
world_map[0:world_size+1, world_size-1:world_size+1] = 20
# top border
world_map[0:2, 0:world_size+1] = 20
# bottom border
world_map[world_size-1:world_size+1, 0:world_size+1] = 20
D3_world_map = np.repeat(world_map[:, :, np.newaxis], 3, axis=2)
nr_of_agents = 100
agent_list_infected = np.random.rand(nr_of_agents) > 1
agent_list_infected[0] = 0
agent_list_susceptible = np.ones(nr_of_agents)
positions = np.random.rand(2, nr_of_agents)*world_size
infected_positions = np.where(agent_list_infected == 1)
velocity = (np.random.rand(2, nr_of_agents)-0.5)*2
velocity_length = np.linalg.norm(velocity, ord=2, axis=0)
world[positions[0].astype(np.int32), positions[1].astype(np.int32), :] = 10
world_map[positions[0].astype(np.int32), positions[1].astype(np.int32)] = 10
infection_range = 5
velocity_range = 10
attraction_range = 20
dispersion_range = 5
max_speed = 2
nr_of_infected = []
plt.title("Current positions")
plt.xlabel("x positions")
plt.ylabel("y positions")
while True:
p_1 = np.repeat(positions[:, :, np.newaxis], positions.shape[1], axis=2)
p_2 = np.rot90(p_1, axes=(1, 2))
p_1 -= p_2
distances = np.linalg.norm(p_1, axis=0)
distances[np.arange(nr_of_agents), np.arange(nr_of_agents)] = infection_range +20
infection_cases = np.array(np.where(distances < infection_range))
velocity_cases = np.array(np.where(distances < velocity_range))
attraction_cases = np.array(np.where(distances < attraction_range))
dispersion_cases = np.array(np.where(distances < dispersion_range))
# print()
# print(infection_cases.shape)
if infection_cases.shape[1] >= 1:
infections = agent_list_infected[infection_cases[0, :]] == agent_list_susceptible[infection_cases[1, :]]
infections_where = np.array(np.where(infections == 1))
agent_list_infected[infection_cases[1, infections_where]] = 1
agent_list_susceptible[infection_cases[1, infections_where]] = 0
if dispersion_cases.shape[1] >= 1:
# This would be where you implement social distancing as a force moving agent apart.
# This is done in BOID simulations so look into that.
pass
if attraction_cases.shape[1] >= 1:
# make agents cluster - again BOIDS
pass
if velocity_cases.shape[1] >= 1:
# make agents align their movement - BOIDS
pass
# wall interaction and agent collision avoidance
for i0 in range(nr_of_agents):
wall_perception = world_map[
(positions[0, i0] - max_speed).astype(np.int32):(positions[0, i0] + max_speed + 1).astype(
np.int32),
(positions[1, i0] - max_speed).astype(np.int32):(positions[1, i0] + max_speed + 1).astype(
np.int32)]
agent_percetion = world_map[
(positions[0, i0] - dispersion_range).astype(np.int32):(positions[0, i0] + dispersion_range + 1).astype(
np.int32),
(positions[1, i0] - dispersion_range).astype(np.int32):(positions[1, i0] + dispersion_range + 1).astype(
np.int32)]
#Looking for values of walls and agents
wall_location = np.array(np.where(wall_perception == 20))
agent_location= np.array(np.where(agent_percetion == 10))
# subtract max speed and dispertion range to make the values relative to the center
wall_location -= max_speed
agent_location -=dispersion_range
#Subtracting the sum of distances from velocity of agent i0
velocity[:, i0] -= np.sum(wall_location, 1) *10 #This is where a force multiplier can be added
velocity[:, i0] -= | np.sum(agent_location, 1) | numpy.sum |
import random
import os
import sys
import numpy as np
import subprocess as sub
from functools import partial
from base import Sim, Env, ObjectiveDict
from networks import CPPN, DirectEncoding
from softbot import Genotype, Phenotype, Population
from tools.algorithms import ParetoOptimization
from tools.checkpointing import continue_from_checkpoint
from tools.utils import make_material_tree, count_occurrences
# sub.call("cp ../_voxcad/voxelyzeMain/voxelyze .", shell=True)
sub.call("cp ~/tmp/research_code/evosoro/_voxcad/voxelyzeMain/voxelyze .", shell=True)
sub.call("chmod 755 voxelyze", shell=True)
SEED = int(sys.argv[1])
MAX_TIME = float(sys.argv[2])
IND_SIZE = (10, 10, 9)
FITNESS_TAG = "<normAbsoluteDisplacement>"
# STOP_IF_BLOCK_TOUCHES_GROUND = True # check for ground penetration
MIN_PERCENT_FULL = 0.5
POP_SIZE = 50
MAX_GENS = 1001
NUM_RANDOM_INDS = 1
INIT_TIME = 1
# diff from main
SIM_TIME = 10.0 + INIT_TIME # was 10+init # includes init time
FREQ = 2
TEMP_AMP = 39.4714242553 # 50% volumetric change with temp_base=25: (1+0.01*(39.4714242553-25))**3-1=0.5
DT_FRAC = 0.9 # 0.3
STIFFNESS = 5e6
GRAV_ACC = -0.1
VOXEL_SIZE = 0.05
# DRAW_SHADOW = True # todo
FLUID_ENV = 1 # if 1 drag forces are added
RHO_FLUID = 1000.0 # water density
C_DRAG = 1.5 # fluid drag associated to a triangular facet
AGGREGATE_DRAG_COEF = 0.5 * C_DRAG * RHO_FLUID # aggregate drag coefficient
TIME_TO_TRY_AGAIN = 25
MAX_EVAL_TIME = 61
SAVE_VXA_EVERY = MAX_GENS + 1
SAVE_LINEAGES = False
CHECKPOINT_EVERY = 1
EXTRA_GENS = 0
RUN_DIR = "run_{}".format(SEED)
RUN_NAME = "AquaticBlockPushers"
def embedded_pill(this_softbot, *args, **kwargs):
mat = make_material_tree(this_softbot, *args, **kwargs)
mat[2:8, 2:8, 2:8] = 3
mat[3:7, 3:7, 3:7] = 0
mat[4:6, 4:6, 4:6] = 8
return mat
class MyGenotype(Genotype):
def __init__(self):
Genotype.__init__(self, orig_size_xyz=IND_SIZE)
self.add_network(DirectEncoding(output_node_name="phase_offset", orig_size_xyz=IND_SIZE, symmetric=False),
freeze=True)
self.to_phenotype_mapping.add_map(name="phase_offset", tag="<PhaseOffset>", logging_stats=None)
self.add_network(CPPN(output_node_names=["shape", "muscleOrTissue"]))
self.to_phenotype_mapping.add_map(name="material", tag="<Data>", func=embedded_pill, output_type=int,
dependency_order=["shape", "muscleOrTissue"], logging_stats=None)
self.to_phenotype_mapping.add_output_dependency(name="shape", dependency_name=None, requirement=None,
material_if_true=None, material_if_false="0")
self.to_phenotype_mapping.add_output_dependency(name="muscleOrTissue", dependency_name="shape",
requirement=True, material_if_true="3", material_if_false="1")
class MyPhenotype(Phenotype):
def is_valid(self, min_percent_full=MIN_PERCENT_FULL):
for name, details in self.genotype.to_phenotype_mapping.items():
if np.isnan(details["state"]).any():
return False
if name == "material":
state = details["state"]
num_vox = np.sum(state > 0)
if num_vox < np.product(self.genotype.orig_size_xyz) * min_percent_full:
return False
if | np.sum(state == 3) | numpy.sum |
import os
import cv2
import sys
import time
import platform
import logging
import numpy as np
from multiprocessing import Queue as pQueue
from threading import Thread
from queue import Queue, LifoQueue
logger = logging.getLogger('debug')
class DetectionLoader:
def __init__(self, model, streams):
self.model = model
self.streams = streams
self.detectors = []
def loadDetectors(self):
for stream in self.streams.getStreams():
ref_detectors = Detector(self.model, stream)
self.detectors.append(ref_detectors)
return self.detectors
def getFrames(self):
frames = []
for detector in self.detectors:
frame = detector.getFrame()
if frame is not None:
frames.append(frame)
return frames
class Detector:
def __init__(self, model, stream):
self.model = model
self.stream = stream
self.w = self.model.getw()
self.h = self.model.geth()
self.keypointsMapping = ['Nose', 'Neck', 'R-Sho', 'R-Elb', 'R-Wr', 'L-Sho', 'L-Elb', 'L-Wr', 'R-Hip', 'R-Knee', 'R-Ank', 'L-Hip', 'L-Knee', 'L-Ank', 'R-Eye', 'L-Eye', 'R-Ear', 'L-Ear']
self.POSE_PAIRS = [[1,2], [1,5], [2,3], [3,4], [5,6], [6,7], [1,8], [8,9], [9,10], [1,11], [11,12], [12,13], [1,0], [0,14], [14,16], [0,15], [15,17], [2,17], [5,16]]
self.mapIdx = [[31,32], [39,40], [33,34], [35,36], [41,42], [43,44], [19,20], [21,22], [23,24], [25,26], [27,28], [29,30], [47,48], [49,50], [53,54], [51,52], [55,56], [37,38], [45,46]]
self.colors = [[0,100,255], [0,100,255], [0,255,255], [0,100,255], [0,255,255], [0,100,255], [0,255,0], [255,200,100], [255,0,255], [0,255,0], [255,200,100], [255,0,255], [0,0,255], [255,0,0], [200,200,0], [255,0,0], [200,200,0], [0,0,0]]
self.threshold = 0.2
self.nPoints = 18
self.old_neck = -1*np.ones(20, dtype=int)
self.new_neck = -1*np.ones(20, dtype=int)
self.subject_height = -1*np.ones(20, dtype=int)
self.fall_ratio = 0.5
self.fallcount = 0
self.totalframecount = 0
self.frameClone = None
self.outframes = Queue(maxsize=0)
self.infer()
def getKeypoints(self):
mapSmooth = cv2.GaussianBlur(self.probMap, (3, 3), 0, 0)
mapMask = np.uint8(mapSmooth>self.threshold)
keypoints = []
contours = None
try:
#OpenCV4.x
contours, _ = cv2.findContours(mapMask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
except:
#OpenCV3.x
_, contours, _ = cv2.findContours(mapMask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
blobMask = np.zeros(mapMask.shape)
blobMask = cv2.fillConvexPoly(blobMask, cnt, 1)
maskedProbMap = mapSmooth * blobMask
_, maxVal, _, maxLoc = cv2.minMaxLoc(maskedProbMap)
keypoints.append(maxLoc + (self.probMap[maxLoc[1], maxLoc[0]],))
return keypoints
def getValidPairs(self):
valid_pairs = []
invalid_pairs = []
n_interp_samples = 10
paf_score_th = 0.1
conf_th = 0.5
for k in range(len(self.mapIdx)):
pafA = self.outputs[0, self.mapIdx[k][0], :, :]
pafB = self.outputs[0, self.mapIdx[k][1], :, :]
pafA = cv2.resize(pafA, (self.w, self.h))
pafB = cv2.resize(pafB, (self.w, self.h))
candA = self.detected_keypoints[self.POSE_PAIRS[k][0]]
candB = self.detected_keypoints[self.POSE_PAIRS[k][1]]
nA = len(candA)
nB = len(candB)
if( nA != 0 and nB != 0):
valid_pair = np.zeros((0,3))
for i in range(nA):
max_j=-1
maxScore = -1
found = 0
for j in range(nB):
d_ij = np.subtract(candB[j][:2], candA[i][:2])
norm = np.linalg.norm(d_ij)
if norm:
d_ij = d_ij / norm
else:
continue
interp_coord = list(zip(np.linspace(candA[i][0], candB[j][0], num=n_interp_samples),
np.linspace(candA[i][1], candB[j][1], num=n_interp_samples)))
paf_interp = []
for k in range(len(interp_coord)):
paf_interp.append([pafA[int(round(interp_coord[k][1])), int(round(interp_coord[k][0]))],
pafB[int(round(interp_coord[k][1])), int(round(interp_coord[k][0]))] ])
paf_scores = np.dot(paf_interp, d_ij)
avg_paf_score = sum(paf_scores)/len(paf_scores)
if (len( | np.where(paf_scores > paf_score_th) | numpy.where |
import inspect
import numpy as np
from primitives_ubc.regCCFS.src.utils.commonUtils import sVT
from primitives_ubc.regCCFS.src.utils.commonUtils import is_numeric
from primitives_ubc.regCCFS.src.utils.commonUtils import fastUnique
from primitives_ubc.regCCFS.src.utils.commonUtils import queryIfColumnsVary
from primitives_ubc.regCCFS.src.utils.commonUtils import queryIfOnlyTwoUniqueRows
from primitives_ubc.regCCFS.src.utils.ccfUtils import regCCA_alt
from primitives_ubc.regCCFS.src.utils.ccfUtils import random_feature_expansion
from primitives_ubc.regCCFS.src.utils.ccfUtils import genFeatureExpansionParameters
from primitives_ubc.regCCFS.src.training_utils.component_analysis import componentAnalysis
from primitives_ubc.regCCFS.src.training_utils.twopoint_max_marginsplit import twoPointMaxMarginSplit
import warnings
warnings.filterwarnings('ignore')
import logging
logger = logging.getLogger(__name__)
#-----------------------------------------------------------------------------#
def setupLeaf(YTrain, bReg, options):
"""
Update tree struct to make node a leaf
"""
tree = {}
tree["bLeaf"] = True
tree["Npoints"] = YTrain.shape[0]
tree["mean"] = np.mean(YTrain, axis=0)
if bReg:
tree["std_dev"] = np.std(YTrain, axis=0, ddof=1)
# If a mapping has been applied, invert it
if not (options["org_stdY"].size == 0):
tree["mean"] = tree["mean"] * options["org_stdY"]
tree["std_dev"] = tree["std_dev"] * options["org_stdY"]
if not (options["org_muY"].size == 0):
tree["mean"] = tree["mean"] + options["org_muY"]
return tree
#-----------------------------------------------------------------------------#
def makeExpansionFunc(wZ, bZ, bIncOrig):
if bIncOrig:
f = lambda x: np.concatenate((x, random_feature_expansion(x, wZ, bZ)), axis=1)
else:
f = lambda x: random_feature_expansion(x, wZ, bZ)
return f
#-----------------------------------------------------------------------------#
def calc_mse(cumtotal, cumsq, YTrainSort):
value = np.divide(cumsq, sVT(np.arange(1, YTrainSort.shape[0]+1))) -\
np.divide(((cumtotal[0:-1, :])**2 + YTrainSort**2 + np.multiply(2 * cumtotal[0:-1, :], YTrainSort)),\
sVT(np.arange(1, YTrainSort.shape[0]+1)**2))
return value
#-------------------------------------------------------------------------------
def growCCT(XTrain, YTrain, bReg, options, iFeatureNum, depth):
"""
This function applies greedy splitting according to the CCT algorithm and the
provided options structure. Algorithm either returns a leaf or forms an
internal splitting node in which case the function recursively calls itself
for each of the children, eventually returning the corresponding subtree.
Parameters
----------
XTrain = Array giving training features. Data should be
processed using processInputData before being passed to
CCT
YTrain = Output data after formatting carried out by genCCF
bReg = Whether to perform regression instead of classification.
Default = false (i.e. classification).
options = Options class of type optionsClassCCF. Some fields are
updated during recursion
iFeatureNum = Grouping of features as per processInputData. During
recursion if a feature is found to be identical across
data points, the corresponding values in iFeatureNum are
replaced with NaNs.
depth = Current tree depth (zero based)
Returns
-------
tree = Structure containing learnt tree
"""
# Standard variables
eps = 2.2204e-16
# Set any missing required variables
if (options["mseTotal"]).size == 0:
options["mseTotal"] = YTrain.var(axis=0)
#---------------------------------------------------------------------------
# First do checks for whether we should immediately terminate
#---------------------------------------------------------------------------
N = XTrain.shape[0]
# Return if one training point, pure node or if options for returning
# fulfilled. A little case to deal with a binary YTrain is required.
bStop = (N < (np.amax([2, options["minPointsForSplit"], 2 * options["minPointsLeaf"]]))) or\
(is_numeric(options["maxDepthSplit"]) and depth > options["maxDepthSplit"])
if depth > 490 and (options["maxDepthSplit"] == 'stack'):
bStop = True
logging.warning('Reached maximum depth imposed by stack limitations!')
if bStop:
tree = setupLeaf(YTrain, bReg, options)
return tree
else:
# Check if variance in Y is less than the cut off amount
varY = YTrain.var(axis=0)
if np.all(varY < (options["mseTotal"] * options["mseErrorTolerance"])):
tree = setupLeaf(YTrain, bReg, options)
return tree
#---------------------------------------------------------------------------
# Subsample features as required for hyperplane sampling
#---------------------------------------------------------------------------
iCanBeSelected = fastUnique(X=iFeatureNum)
iCanBeSelected = iCanBeSelected[~np.isnan(iCanBeSelected)]
lambda_ = np.min((iCanBeSelected.size, options["lambda"]))
indFeatIn = np.random.choice(int(iCanBeSelected.size), int(lambda_), replace=False)
iFeatIn = iCanBeSelected[indFeatIn]
bInMat = np.equal((iFeatureNum.flatten(order='F')[np.newaxis]), (np.sort(iFeatIn.flatten(order='F'))[np.newaxis]).T) # 1xk == nx1
iIn = (np.any(bInMat, axis=0)).ravel().nonzero()[0]
# Check for variation along selected dimensions and
# resample features that have no variation
bXVaries = queryIfColumnsVary(X=XTrain[:, iIn], tol=options["XVariationTol"])
if (not np.all(bXVaries)):
iInNew = iIn
nSelected = 0
iIn = iIn[bXVaries]
while (not np.all(bXVaries)) and lambda_ > 0:
iFeatureNum[iInNew[~bXVaries]] = np.nan
bInMat[:, iInNew[~bXVaries]] = False
bRemainsSelected = np.any(bInMat, axis=1)
nSelected = nSelected + bRemainsSelected.sum(axis=0)
iCanBeSelected = np.delete(iCanBeSelected, indFeatIn)
lambda_ = np.min((iCanBeSelected.size, options["lambda"]-nSelected))
if lambda_ < 1:
break
indFeatIn = np.random.choice(iCanBeSelected.size, size=int(lambda_), replace=False)
iFeatIn = iCanBeSelected[indFeatIn]
bInMat = np.equal((iFeatureNum.flatten(order='F')[np.newaxis]), (iFeatIn.flatten(order='F')[np.newaxis].T))
iInNew = (np.any(bInMat, axis=0)).ravel().nonzero()[0]
bXVaries = queryIfColumnsVary(X=XTrain[:, iInNew], tol=options["XVariationTol"])
iIn = np.sort(np.concatenate((iIn, iInNew[bXVaries])))
if iIn.size == 0:
# This means that there was no variation along any feature, therefore exit.
tree = setupLeaf(YTrain, bReg, options)
return tree
#---------------------------------------------------------------------------
# Projection bootstrap if required
#---------------------------------------------------------------------------
if options["bProjBoot"]:
iTrainThis = np.random.randint(N, size=(N, 1))
XTrainBag = XTrain[iTrainThis, iIn]
YTrainBag = YTrain[iTrainThis, :]
if len(YTrainBag.shape) > 2:
YTrainBag = np.squeeze(YTrainBag)
else:
XTrainBag = XTrain[:, iIn]
YTrainBag = YTrain
bXBagVaries = queryIfColumnsVary(X=XTrainBag, tol=options["XVariationTol"])
if (not np.any(bXBagVaries)) or\
(not bReg and YTrainBag.shape[1] > 1 and (np.sum(np.absolute(np.sum(YTrainBag, axis=0)) > 1e-12) < 2)) or\
(not bReg and YTrainBag.shape[1] == 1 and (np.any(np.sum(YTrainBag, axis=0) == np.array([0, YTrainBag.shape[0]])))) or\
(bReg and np.all(np.var(YTrainBag, axis=0) < (options["mseTotal"] * options["mseErrorTolerance"]))):
if (not options["bContinueProjBootDegenerate"]):
tree = setupLeaf(YTrain, bReg, options)
return tree
else:
XTrainBag = XTrain[:, iIn]
YTrainBag = YTrain
#---------------------------------------------------------------------------
# Check for only having two points
#---------------------------------------------------------------------------
if (not (len(options["projections"]) == 0)) and ((XTrainBag.shape[0] == 2) or queryIfOnlyTwoUniqueRows(X=XTrainBag)):
bSplit, projMat, partitionPoint = twoPointMaxMarginSplit(XTrainBag, YTrainBag, options["XVariationTol"])
if (not bSplit):
tree = setupLeaf(YTrain, bReg, options)
return tree
else:
bLessThanTrain = np.dot(XTrain[:, iIn], projMat) <= partitionPoint
iDir = 0
else:
# Generate the new features as required
if options["bRCCA"]:
wZ, bZ = genFeatureExpansionParameters(XTrainBag, options["rccaNFeatures"], options["rccaLengthScale"])
fExp = makeExpansionFunc(wZ, bZ, options["rccaIncludeOriginal"])
XTrainBag = fExp(XTrainBag)
projMat, _, _ = regCCA_alt(XTrainBag, YTrainBag, options["rccaRegLambda"], options["rccaRegLambda"], 1e-8)
if projMat.size == 0:
projMat = np.ones((XTrainBag.shape[1], 1))
UTrain = np.dot(fExp(XTrain[:, iIn]), projMat)
else:
projMat, yprojMat, _, _, _ = componentAnalysis(XTrainBag, YTrainBag, options["projections"], options["epsilonCCA"])
UTrain = np.dot(XTrain[:, iIn], projMat)
#-----------------------------------------------------------------------
# Choose the features to use
#-----------------------------------------------------------------------
# This step catches splits based on no significant variation
bUTrainVaries = queryIfColumnsVary(UTrain, options["XVariationTol"])
if (not np.any(bUTrainVaries)):
tree = setupLeaf(YTrain, bReg, options)
return tree
UTrain = UTrain[:, bUTrainVaries]
projMat = projMat[:, bUTrainVaries]
if options["bUseOutputComponentsMSE"] and bReg and (YTrain.shape[1] > 1) and\
(not (yprojMat.size == 0)) and (options["splitCriterion"] == 'mse'):
VTrain = np.dot(YTrain, yprojMat)
#-----------------------------------------------------------------------
# Search over splits using provided method
#-----------------------------------------------------------------------
nProjDirs = UTrain.shape[1]
splitGains = np.empty((nProjDirs,1))
splitGains.fill(np.nan)
iSplits = np.empty((nProjDirs,1))
iSplits.fill(np.nan)
for nVarAtt in range(nProjDirs):
# Calculate the probabilities of being at each class in each of child
# nodes based on proportion of training data for each of possible
# splits using current projection
sort_UTrain = UTrain[:, nVarAtt].ravel()
UTrainSort = np.sort(sort_UTrain)
iUTrainSort = np.argsort(sort_UTrain)
bUniquePoints_ = np.diff(UTrainSort, n=1, axis=0)
bUniquePoints = np.concatenate((bUniquePoints_ > options["XVariationTol"], np.array([False])))
if options["bUseOutputComponentsMSE"] and bReg and YTrain.shape[1] > 1 and (not (yprojMat.size == 0)) and (options["splitCriterion"] == 'mse'):
VTrainSort = VTrain[iUTrainSort, :]
else:
VTrainSort = YTrain[iUTrainSort, :]
leftCum = np.cumsum(VTrainSort, axis=0)
if (YTrain.shape[1] ==1 or options["bSepPred"]) and (not bReg):
# Convert to [class_doesnt_exist,class_exists]
leftCum = np.concatenate((np.subtract(sVT(X=np.arange(0,N)), leftCum), leftCum))
rightCum = np.subtract(leftCum[-1, :], leftCum)
# Calculate the metric values of the current node and two child nodes
if options["splitCriterion"] == 'mse':
cumSqLeft = np.cumsum(VTrainSort**2)
cumSqLeft = np.expand_dims(cumSqLeft, axis=1)
varData = np.subtract((cumSqLeft[-1]/N), (leftCum[-1, :]/N)**2)
if np.all(varData < (options["mseTotal"] * options["mseErrorTolerance"])):
# Total variation is less then the allowed tolerance so
# terminate and construct a leaf
tree = setupLeaf(YTrain, bReg, options)
return tree
cumtotal_l = np.concatenate((np.zeros((1, VTrainSort.shape[1])), leftCum))
metricLeft = calc_mse(cumtotal=cumtotal_l, cumsq=cumSqLeft, YTrainSort=VTrainSort)
# For calculating the right need to go in additive order again
# so go from other end and then flip
end = cumSqLeft.shape[0] - 1
vend = VTrainSort.shape[0] - 1
metricRight = np.concatenate((np.zeros((1, VTrainSort.shape[1])),\
calc_mse(rightCum[::-1, :],\
np.subtract((cumSqLeft[-1, :][np.newaxis]), cumSqLeft[(end-1)::-1, :]),\
VTrainSort[vend:0:-1, :])))
metricRight = metricRight[::-1, :]
# No need to do the grouping for regression as each must be
# a seperate output anyway.
else:
assert (False), 'Invalid split criterion!'
metricCurrent = np.copy(metricLeft[-1, :])
metricLeft[~bUniquePoints, :] = np.inf
metricRight[~bUniquePoints, :] = np.inf
# Calculate gain in metric for each of possible splits based on current
# metric value minus metric value of child weighted by number of terms
# in each child
metricGain = np.subtract(metricCurrent,\
(np.multiply(sVT(np.arange(1,N+1, 1)), metricLeft)\
+np.multiply(sVT(np.arange(N-1, -1, -1)), metricRight))/N)
metricGain = np.round(metricGain, decimals=4)
# Combine gains if there are mulitple outputs. Note that for gini,
# info and mse, the joint gain is equal to the mean gain, hence
# taking the mean here rather than explicitly calculating joints before.
if metricGain.shape[1] > 1:
if is_numeric(options["taskWeights"]):
# If weights provided, weight task appropriately in terms of importance.
metricGain = np.multiply(metricGain, (options["taskWeights"].flatten(order='F')[np.newaxis])) # (nxk) .* (1*k)
multiTGC = options["multiTaskGainCombination"]
if multiTGC == 'mean':
metricGain = | np.mean(metricGain, axis=1, keepdims=True) | numpy.mean |
import os
from typing import Union
import intake_io
import numpy as np
import pandas as pd
from am_utils.parallel import run_parallel
from am_utils.utils import walk_dir
from tqdm import tqdm
def compute_histogram(dataset):
"""
Compute intensity histogram for a give image.
Parameters
----------
img : xr.Dataset
Input image
Returns
-------
pd.DataFrame:
Histogram as pandas DataFrame
"""
imghist = pd.DataFrame()
for i in range(dataset.dims['c']):
img = dataset.loc[dict(c=dataset.coords['c'][i])]['image'].data
hist, bins = np.histogram(img, bins=np.max(img) + 1, range=(0, np.max(img) + 1))
chist = pd.DataFrame({
'values': bins[:-1],
'counts': hist
})
chist = chist[chist['counts'] > 0]
chist['channel'] = dataset.coords['c'][i].data
imghist = pd.concat([imghist, chist], ignore_index=True)
return imghist
def compute_histogram_batch(input_dir: str, output_dir: str):
"""
Compute intensity histograms for all images in a folder and save as csv.
Parameters
----------
input_dir : str
Input directory
output_dir : str
Output directory
"""
samples = walk_dir(input_dir)
all_hist = pd.DataFrame()
for sample in tqdm(samples):
dataset = intake_io.imload(sample)
imghist = compute_histogram(dataset)
imghist['Image name'] = sample
fn_out = sample.replace(input_dir, output_dir).replace(os.path.splitext(sample)[-1], '.csv')
os.makedirs(os.path.dirname(fn_out), exist_ok=True)
imghist.to_csv(fn_out, index=False)
all_hist = pd.concat([all_hist, imghist], ignore_index=True)
all_hist.to_csv(output_dir.rstrip('/') + '.csv', index=False)
def subtract_background(dataset, bg_value):
bg_value = | np.array([bg_value]) | numpy.array |
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
img = cv2.imread("test4.jpg")
def mix_color_grad_thresh(img, grad_thresh=(20, 90), s_thresh=(170, 255), dir_thresh=(0.7, 1.3), sobel_size=9):
# Convert to HLS color space and separate the S channel
# Note: img is the undistorted image
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
s_channel = hls[:, :, 2]
# Grayscale image
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Sobel x
abs_sobelx = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_size)) # Take the derivative in x
abs_sobely = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_size)) # Take the derivative in x
scaled_sobel = np.uint8(255 * abs_sobelx / np.max(abs_sobelx))
# Threshold magnitude gradient
gradient_magnitude = np.sqrt(abs_sobelx ** 2 + abs_sobely ** 2)
scale_factor = np.max(gradient_magnitude) / 255
gradient_magnitude = np.uint8(gradient_magnitude / scale_factor)
mag_binary_output = np.zeros_like(gradient_magnitude)
mag_binary_output[(gradient_magnitude >= grad_thresh[0]) & (gradient_magnitude <= grad_thresh[1])] = 1
# Threshold direction gradient
grad_direction = np.arctan2(abs_sobely, abs_sobelx)
dir_binary_output = np.zeros_like(grad_direction)
dir_binary_output[(grad_direction >= dir_thresh[0]) & (grad_direction <= dir_thresh[1])] = 1
# Threshold x gradient
sxbinary = np.zeros_like(scaled_sobel)
sxbinary[(scaled_sobel >= grad_thresh[0]) & (scaled_sobel <= grad_thresh[1])] = 1
# Threshold color channel
s_binary = np.zeros_like(s_channel)
s_binary[(s_channel >= s_thresh[0]) & (s_channel <= s_thresh[1])] = 1
# Combine the two binary thresholds
combined_binary = | np.zeros_like(sxbinary) | numpy.zeros_like |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
import torch.distributed as dist
import time
import os
import sys
import io
from RNN_model import RNN_model
# Parse hyperparameters.
vocab_size = 8000
batch_size = 200
no_of_epochs = 10
# Load testing data
x_test = []
with io.open('/u/eot/syf1219/scratch/preprocessed_data/imdb_test.txt','r',encoding='utf-8') as f:
lines = f.readlines()
for line in lines:
line = line.strip()
line = line.split(' ')
line = np.asarray(line,dtype=np.int)
line[line>vocab_size] = 0
x_test.append(line)
y_test = np.zeros((25000,))
y_test[0:12500] = 1
vocab_size += 1
model = torch.load('rnn.model')
model.cuda()
L_Y_test = len(y_test)
test_accu = []
for epoch in range(no_of_epochs):
# Test
model.eval()
epoch_acc = 0.0
epoch_loss = 0.0
epoch_counter = 0
time1 = time.time()
I_permutation = np.random.permutation(L_Y_test)
for i in range(0, L_Y_test, batch_size):
x_input2 = [x_test[j] for j in I_permutation[i:i+batch_size]]
#sequence_length = 100
# sequence_length = sequence_lengths[1]
sequence_length = (epoch + 1) * 50
x_input = np.zeros((batch_size, sequence_length), dtype=np.int)
for j in range(batch_size):
x = np.asarray(x_input2[j])
sl = x.shape[0]
if(sl < sequence_length):
x_input[j,0:sl] = x
else:
start_index = | np.random.randint(sl-sequence_length+1) | numpy.random.randint |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.4.2
# kernelspec:
# display_name: bio_time_series
# language: python
# name: bio_time_series
# ---
# %%
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
# %config InlineBackend.print_figure_kwargs = {'bbox_inches': None}
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import time
import pandas as pd
from tqdm.notebook import tqdm
from bioslds.arma import Arma
from bioslds.dataset import RandomArmaDataset
from bioslds.plotting import FigureManager, show_latent
from bioslds.cluster_quality import unordered_accuracy_score
from bioslds.batch import hyper_score_ar
from bioslds.regressors import (
BioWTARegressor,
CrosscorrelationRegressor,
CepstralRegressor,
)
from draft_helpers import (
paper_style,
calculate_ar_identification_progress,
make_multi_trajectory_plot,
make_accuracy_plot,
predict_plain_score,
make_accuracy_comparison_diagram,
get_accuracy_metrics,
calculate_smooth_weight_errors,
)
fig_path = os.path.join("..", "figs", "draft")
# %% [markdown]
# # Run BioWTA, autocorrelation, and cepstral oracle algorithms on signals based on pairs of AR(3) processes
# %% [markdown]
# ## Define the problem and the parameters for the learning algorithms
# %% [markdown]
# Using best parameters obtained from hyperoptimization runs.
# %%
n_signals = 100
n_samples = 200_000
orders = [(3, 0), (3, 0)]
dwell_times = 100
min_dwell = 50
max_pole_radius = 0.95
normalize = True
fix_scale = None
seed = 153
n_models = 2
n_features = 3
rate_nsm = 0.005028
streak_nsm = 9.527731
rate_cepstral = 0.071844
order_cepstral = 2
metric = unordered_accuracy_score
good_score = 0.85
threshold_steps = 10_000
dataset = RandomArmaDataset(
n_signals,
n_samples,
orders,
dwell_times=dwell_times,
min_dwell=min_dwell,
fix_scale=fix_scale,
normalize=normalize,
rng=seed,
arma_kws={"max_pole_radius": max_pole_radius},
)
# %% [markdown]
# ## Run BioWTA with all combinations of enhancements
# %%
biowta_configurations = {
(1, 1, 0): {
"rate": 0.001992,
"trans_mat": 1 - 1 / 7.794633,
"temperature": 1.036228,
"error_timescale": 1.000000,
},
(0, 0, 1): {
"rate": 0.004718,
"trans_mat": 1 - 1 / 2.000000,
"temperature": 0.000000,
"error_timescale": 4.216198,
},
(1, 1, 1): {
"rate": 0.004130,
"trans_mat": 1 - 1 / 5.769690,
"temperature": 0.808615,
"error_timescale": 1.470822,
},
(0, 1, 1): {
"rate": 0.004826,
"trans_mat": 1 - 1 / 2.154856,
"temperature": 0.000000,
"error_timescale": 4.566321,
},
(1, 0, 1): {
"rate": 0.006080,
"trans_mat": 1 - 1 / 2.000000,
"temperature": 0.117712,
"error_timescale": 4.438448,
},
(0, 1, 0): {
"rate": 0.001476,
"trans_mat": 1 - 1 / 2.984215,
"temperature": 0.000000,
"error_timescale": 1.000000,
},
(0, 0, 0): {
"rate": 0.001199,
"trans_mat": 1 - 1 / 2.000000,
"temperature": 0.000000,
"error_timescale": 1.000000,
},
(1, 0, 0): {
"rate": 0.005084,
"trans_mat": 1 - 1 / 2.000000,
"temperature": 0.011821,
"error_timescale": 1.000000,
},
}
biowta_configurations_human = {
(0, 0, 0): "plain",
(0, 0, 1): "avg_error",
(0, 1, 0): "persistent",
(1, 0, 0): "soft",
(0, 1, 1): "persistent+avg_error",
(1, 1, 0): "soft+persistent",
(1, 0, 1): "soft+avg_error",
(1, 1, 1): "full",
}
# %%
result_biowta_mods = {}
for key in tqdm(biowta_configurations, desc="biowta cfg"):
result_biowta_mods[key] = hyper_score_ar(
BioWTARegressor,
dataset,
metric,
n_models=n_models,
n_features=n_features,
progress=tqdm,
monitor=["r", "weights_", "prediction_"],
**biowta_configurations[key],
)
crt_scores = result_biowta_mods[key][1].trial_scores
crt_median = np.median(crt_scores)
crt_quantile = np.quantile(crt_scores, 0.05)
crt_good = np.mean(crt_scores > good_score)
print(
f"{''.join(str(_) for _ in key)}: median={crt_median:.4f}, "
f"5%={crt_quantile:.4f}, "
f"fraction>{int(100 * good_score)}%={crt_good:.4f}"
)
# %%
for key in tqdm(biowta_configurations, desc="biowta cfg, reconstruction progress"):
calculate_ar_identification_progress(result_biowta_mods[key][1].history, dataset)
# %% [markdown]
# Find some "good" indices in the dataset: one that obtains an accuracy score close to a chosen threshold for "good-enough" (which we set to 85%); and one that has a similar score but also has small reconstruction error for the weights.
# %%
result_biowta_chosen = result_biowta_mods[1, 1, 0]
crt_mask = (result_biowta_chosen[1].trial_scores > 0.98 * good_score) & (
result_biowta_chosen[1].trial_scores < 1.02 * good_score
)
crt_idxs = crt_mask.nonzero()[0]
crt_errors_norm = np.asarray(
[np.mean(_.weight_errors_normalized_[-1]) for _ in result_biowta_chosen[1].history]
)
good_biowta_idx = crt_idxs[np.argmax(crt_errors_norm[crt_mask])]
good_biowta_ident_idx = crt_idxs[np.argmin(crt_errors_norm[crt_mask])]
good_idxs = [good_biowta_ident_idx, good_biowta_idx]
# %%
result_biowta_chosen[1].trial_scores[good_idxs]
# %%
crt_errors_norm[good_idxs]
# %%
for key in biowta_configurations:
make_multi_trajectory_plot(
result_biowta_mods[key][1],
dataset,
n_traces=25,
highlight_idx=good_idxs,
sliding_kws={"window_size": 5000, "overlap_fraction": 0.8},
trace_kws={"alpha": 0.85, "lw": 0.75, "color": "gray"},
rug_kws={"alpha": 0.3},
)
# %% [markdown]
# ## Run learning and inference for autocorrelation and cepstral methods
# %%
t0 = time.time()
result_xcorr = hyper_score_ar(
CrosscorrelationRegressor,
dataset,
metric,
n_models=n_models,
n_features=n_features,
nsm_rate=rate_nsm,
xcorr_rate=1 / streak_nsm,
progress=tqdm,
monitor=["r", "nsm.weights_", "xcorr.coef_"],
)
t1 = time.time()
print(
f"Median accuracy score xcorr: {result_xcorr[0]:.2}. "
f"(Took {t1 - t0:.2f} seconds.)"
)
# %%
t0 = time.time()
result_cepstral = hyper_score_ar(
CepstralRegressor,
dataset,
metric,
cepstral_order=order_cepstral,
cepstral_kws={"rate": rate_cepstral},
initial_weights="oracle_ar",
progress=tqdm,
monitor=["r"],
)
t1 = time.time()
print(
f"Median accuracy score cepstral: {result_cepstral[0]:.2}. "
f"(Took {t1 - t0:.2f} seconds.)"
)
# %% [markdown]
# ## Run BioWTA with weights fixed at ground-truth values
# %%
t0 = time.time()
oracle_biowta = hyper_score_ar(
BioWTARegressor,
dataset,
metric,
n_models=n_models,
n_features=n_features,
rate=0,
trans_mat=biowta_configurations[1, 1, 0]["trans_mat"],
temperature=biowta_configurations[1, 1, 0]["temperature"],
error_timescale=biowta_configurations[1, 1, 0]["error_timescale"],
initial_weights="oracle_ar",
progress=tqdm,
monitor=["r", "prediction_"],
)
t1 = time.time()
print(
f"Median accuracy score oracle BioWTA: {oracle_biowta[0]:.2}. "
f"(Took {t1 - t0:.2f} seconds.)"
)
# %% [markdown]
# ## Make plots
# %%
fig, axs = make_accuracy_plot(
result_biowta_chosen[1], oracle_biowta[1], dataset, good_idxs
)
axs[0, 2].set_xlabel("enh. BioWTA oracle")
axs[0, 2].set_ylabel("enh. BioWTA")
fig.savefig(
os.path.join(fig_path, "rolling_accuracy_2x_ar3_100trials_biowta.png"), dpi=600
)
# %%
crt_frac_good = np.mean(result_biowta_chosen[1].trial_scores > good_score)
print(
f"Percentage of runs with BioWTA accuracies over {int(good_score * 100)}%: "
f"{int(crt_frac_good * 100)}%."
)
crt_frac_fast = np.mean(
np.asarray(result_biowta_chosen[1].convergence_times) <= threshold_steps
)
print(
f"Percentage of runs with BioWTA convergence times under {threshold_steps}: "
f"{int(crt_frac_fast * 100)}%."
)
# %%
fig, axs = make_accuracy_plot(result_xcorr[1], oracle_biowta[1], dataset, good_idxs)
axs[0, 2].set_xlabel("enh. BioWTA oracle")
axs[0, 2].set_ylabel("autocorrelation")
fig.savefig(
os.path.join(fig_path, "rolling_accuracy_2x_ar3_100trials_xcorr.png"), dpi=600
)
# %%
print(
f"Percentage of runs with xcorr accuracies over {int(good_score * 100)}%: "
f"{int(np.mean(result_xcorr[1].trial_scores > good_score) * 100)}%."
)
threshold_steps = 10_000
print(
f"Percentage of runs with xcorr convergence times under {threshold_steps}: "
f"{int(np.mean(np.asarray(result_xcorr[1].convergence_times) <= threshold_steps) * 100)}%."
)
threshold_steps_small = 1000
print(
f"Percentage of runs with xcorr convergence times under {threshold_steps_small}: "
f"{int(np.mean(np.asarray(result_xcorr[1].convergence_times) <= threshold_steps_small) * 100)}%."
)
# %%
fig, axs = make_accuracy_plot(result_cepstral[1], oracle_biowta[1], dataset, good_idxs)
axs[0, 2].set_xlabel("enh. BioWTA oracle")
axs[0, 2].set_ylabel("cepstral oracle")
fig.savefig(
os.path.join(fig_path, "rolling_accuracy_2x_ar3_100trials_cepstral.png"), dpi=600
)
# %%
print(
f"Percentage of runs with cepstral accuracies over {int(good_score * 100)}%: "
f"{int(np.mean(result_cepstral[1].trial_scores > good_score) * 100)}%."
)
threshold_steps = 10_000
print(
f"Percentage of runs with cepstral convergence times under {threshold_steps}: "
f"{int(np.mean(np.asarray(result_cepstral[1].convergence_times) <= threshold_steps) * 100)}%."
)
threshold_steps_small = 1000
print(
f"Percentage of runs with cepstral convergence times under {threshold_steps_small}: "
f"{int(np.mean(np.asarray(result_cepstral[1].convergence_times) <= threshold_steps_small) * 100)}%."
)
# %% [markdown]
# # Explain variability in BioWTA accuracy scores, show effect of algorithm improvements
# %%
predicted_plain_scores = [
predict_plain_score(crt_sig.armas, sigma_ratio=1.0 / crt_sig.scale)
for crt_sig in tqdm(dataset)
]
# %%
with plt.style.context(paper_style):
with FigureManager(
1,
2,
gridspec_kw={"width_ratios": (12, 2)},
despine_kws={"offset": 5},
figsize=(2.8, 1.5),
constrained_layout=True,
) as (fig, axs):
crt_sigma = 0.5
crt_pred1 = -crt_sigma
crt_pred2 = crt_sigma
crt_thresh = 0.5 * (crt_pred1 + crt_pred2)
crt_samples = [-0.3, 1.0, -0.7, 0.4, -1.3, -0.6, 0.3, -0.2, -0.5]
crt_n = len(crt_samples)
crt_usage = np.zeros(crt_n + 1, dtype=int)
axs[0].plot(crt_samples, ".-", c="gray")
# axs[0].axhline(0, ls=":", c="gray")
crt_box = [[crt_n - 0.4, crt_n + 0.4], [-1.4, 1.4]]
axs[0].plot(
crt_box[0] + crt_box[0][::-1] + [crt_box[0][0]],
[crt_box[1][0]] + crt_box[1] + crt_box[1][::-1],
"k-",
)
crt_p_range = (-1.5, 1.5)
axs[0].set_ylim(*crt_p_range)
axs[0].set_xlabel("time step")
axs[0].set_ylabel("signal $y(t)$")
axs[0].set_xticks([0, len(crt_samples)])
axs[0].set_xticklabels([0, "$\\tau$"])
show_latent(crt_usage, ax=axs[0])
axs[0].annotate(
"ground truth: model 1",
(0.5, axs[0].get_ylim()[1] - 0.03),
color="w",
verticalalignment="top",
fontsize=6,
fontweight="bold",
)
crt_ps = np.linspace(*crt_p_range, 100)
crt_dist = (
1
/ | np.sqrt(2 * np.pi * crt_sigma ** 2) | numpy.sqrt |
"""
Copyright (c) 2014 NavPy Developers. All rights reserved.
Use of this source code is governed by a BSD-style license that can be found in
LICENSE.txt
"""
import numpy as np
from . import wgs84
from ..utils import input_check_Nx3 as _input_check_Nx3
from ..utils import input_check_Nx3x3 as _input_check_Nx3x3
from ..utils import input_check_Nx1 as _input_check_Nx1
def angle2dcm(rotAngle1, rotAngle2, rotAngle3, input_unit='rad',
rotation_sequence='ZYX', output_type='ndarray'):
"""
This function converts Euler Angle into Direction Cosine Matrix (DCM).
The DCM is described by three sucessive rotation rotAngle1, rotAngle2, and
rotAngle3 about the axes described by the rotation_sequence.
The default rotation_sequence='ZYX' is the aerospace sequence and rotAngle1
is the yaw angle, rotAngle2 is the pitch angle, and rotAngle3 is the roll
angle. In this case DCM transforms a vector from the locally level
coordinate frame (i.e. the NED frame) to the body frame.
This function can batch process a series of rotations (e.g., time series
of Euler angles).
Parameters
----------
rotAngle1, rotAngle2, rotAngle3 : angles {(N,), (N,1), or (1,N)}
They are a sequence of angles about successive axes described by
rotation_sequence.
input_unit : {'rad', 'deg'}, optional
Rotation angles. Default is 'rad'.
rotation_sequence : {'ZYX'}, optional
Rotation sequences. Default is 'ZYX'.
output_type : {'ndarray','matrix'}, optional
Output type. Default is 'ndarray'.
Returns
--------
C : {3x3} Direction Cosine Matrix
Notes
-----
Programmer: <NAME>
Created: May 03, 2011
Last Modified: January 12, 2016
"""
rotAngle1, N1 = _input_check_Nx1(rotAngle1)
rotAngle2, N2 = _input_check_Nx1(rotAngle2)
rotAngle3, N3 = _input_check_Nx1(rotAngle3)
if(N1 != N2 or N1 != N3):
raise ValueError('Inputs are not of same dimensions')
if(N1 > 1 and output_type != 'ndarray'):
raise ValueError('Matrix output requires scalar inputs')
R3 = np.zeros((N1, 3, 3))
R2 = np.zeros((N1, 3, 3))
R1 = np.zeros((N1, 3, 3))
if(input_unit == 'deg'):
rotAngle1 = np.deg2rad(rotAngle1)
rotAngle2 = | np.deg2rad(rotAngle2) | numpy.deg2rad |
#!/usr/bin/env python
#import standard libraries
import obspy.imaging.beachball
import datetime
import os
import csv
import pandas as pd
import numpy as np
import fnmatch
from geopy.distance import geodesic
from math import *
#from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
from matplotlib import path
class NewFile:
'''Creates a file object with associated uncertainty and event type'''
def __init__(self, filename, unc, event_type, source):
self.filename = filename
self.event_type = event_type
self.unc = unc
self.name = source
def maketime(timestring):
'''Used in argument parser below. Makes a datetime object from a timestring.'''
TIMEFMT = '%Y-%m-%dT%H:%M:%S'
DATEFMT = '%Y-%m-%d'
TIMEFMT2 = '%m-%d-%YT%H:%M:%S.%f'
outtime = None
try:
outtime = datetime.strptime(timestring, TIMEFMT)
except:
try:
outtime = datetime.strptime(timestring, DATEFMT)
except:
try:
outtime = datetime.strptime(timestring, TIMEFMT2)
except:
print('Could not parse time or date from %s' % timestring)
print (outtime)
return outtime
def infile(s):
'''Stores filename, event type, and uncertainty where provided from comma separated string.'''
default_uncertainty = 15
try:
infile,unc,etype = s.split(',')
unc = float(unc)
return (infile, unc, etype)
except:
try:
s = s.split(',')
infile, unc, etype = s[0], default_uncertainty, s[1]
return (infile, unc, etype)
except:
raise argparse.ArgumentTypeError('Input file information must be \
given as infile,unc,etype or as infile,etype')
def datelinecross(x):
''' Arguments: x - longitude value (positive or negative)
Returns: x - a positive longitude. Stays the same if the input was positive,
is changed to positive if the input was negative '''
if x<0:
return x+360
else:
return x
###############################################
### 9 ###
###############################################
## Written GLM
def meridiancross(x):
''' Arguments: x - longitude value (positive or negative)
Returns: x - a longitude in the -180/180 domain '''
if x>180:
return x-360
else:
return x
def northcross(x):
''' Arguments: x - longitude value (positive or negative)
Returns: x - a longitude in the -180/180 domain '''
if x<90:
return x+360
else:
return x
def unnorthcross(x):
''' Arguments: x - longitude value (positive or negative)
Returns: x - a longitude in the -180/180 domain '''
if x>360:
return x-360
else:
return x
def zerothreesixty(data):
data['lon']=data.apply(lambda row: datelinecross(row['lon']),axis=1)
return data
def oneeighty(data):
data['lon']=data.apply(lambda row: meridiancross(row['lon']),axis=1)
return data
def northernaz(data):
data['az']=data.apply(lambda row: northcross(row['az']),axis=1)
return data
def notnorthanymore(data):
data['az']=data.apply(lambda row: unnorthcross(row['az']),axis=1)
return data
def writetofile(input_file, output_file, event_type, uncertainty, args, catalogs, file_no, seismo_thick, slabname, name):
''' Writes an input file object to the given output file.
Acquires the necessary columns from the file, calculates moment tensor information.
Eliminates rows of data that do not fall within the specified bounds
(date, magnitude, & location).
If the event type is an earthquake, the catalog is compared to all previously
entered catalogs. Duplicate events are removed from the subsequent entries
(prioritization is determined by the order in which catalogs are entered).
Writes filtered dataframe to output file and prints progress to console.
Arguments: input_file - input file from input or slab2database
output_file - file where new dataset will be written
event_type - two letter ID that indicates the type of data (AS, EQ, BA, etc)
uncertainty - the default uncertainty associated with this file or event type
args - arguments provided from command line (bounds, magnitude limits, etc)
catalogs - a list of EQ catalogs that are being written to this file
file_no - file number, used for making event IDs '''
in_file = open(input_file)
fcsv = (input_file[:-4]+'.csv')
# Reading .csv file into dataframe - all files must be in .csv format
try:
if input_file.endswith('.csv'):
data = pd.read_csv(input_file, low_memory=False)
else:
print ('Input file %s was not written to file. MUST BE IN .CSV FORMAT' % input_file)
pass
except:
print ('Could not read file %s. A header line of column labels \
followed by a deliminated dataset is expected. Check file format to ensure this \
is such. All files must be in .csv format.' % input_file)
if 'ID' in data.columns:
pass
elif 'id_no' in data.columns:
data['ID'] = data['id_no'].values
else:
start_ID = file_no*100000
stop_ID = start_ID + len(data)
ID = np.arange(start_ID, stop_ID, 1)
data['ID'] = ID
data = makeframe(data, fcsv, event_type, uncertainty, args, seismo_thick,slabname)
data = inbounds(args, data, slabname)
#If option is chosen at command line, removes duplicate entries for the same event
#alternate preference for global or regional catalogues depending upon input arguments
try:
regional_pref
except NameError:
pass
else:
try:
tup = (data, fcsv)
if len(catalogs) > 0:
for idx, row in enumerate(catalogs):
if fnmatch.fnmatch(row, '*global*'):
position = idx
name_of_file = row
if regional_pref == 0 and position != 0:
first_file = catalogs[0]
catalogs[position] = first_file
catalogs[0] = name_of_file
elif regional_pref == 1 and position != (len(catalogs)-1):
last_file = catalogs[(len(catalogs)-1)]
catalogs[position] = first_file
catalogs[(len(catalogs)-1)] = name_of_file
else:
pass
for cat in catalogs:
data = rid_matches(cat[0], data, cat[1], fcsv)
elif len(catalogs) == 0:
catalogs.append(tup)
except:
print ('If file contains earthquake information (event-type = EQ), \
required columns include: lat,lon,depth,mag,time. The columns of the current \
file: %s. Check file format to ensure these columns are present and properly \
labeled.' % data.columns)
#MF 8.9.16 add source to output file
try:
listints = data['ID'].values.astype(int)
except:
start_ID = file_no*100000
stop_ID = start_ID + len(data)
ID = np.arange(start_ID, stop_ID, 1)
data['id_no'] = data['ID'].values
data['ID'] = ID
data['src'] = name
write_data(data, output_file)
print ('The file: %s was written to %s' % (input_file, output_file))
print ('---------------------------------------------------------------------------------')
def castfloats(data):
'''Casts all numerical and nan values to floats to avoid error in calculations'''
data[['lat']] = data[['lat']].astype(float)
data[['lon']] = data[['lon']].astype(float)
data[['depth']] = data[['depth']].astype(float)
data[['unc']] = data[['unc']].astype(float)
if 'mag' in data.columns:
data[['mag']] = data[['mag']].astype(float)
if 'mrr' in data.columns:
data[['mrr']] = data[['mrr']].astype(float)
data[['mtt']] = data[['mtt']].astype(float)
data[['mpp']] = data[['mpp']].astype(float)
data[['mrt']] = data[['mrt']].astype(float)
data[['mrp']] = data[['mrp']].astype(float)
data[['mtp']] = data[['mtp']].astype(float)
if 'Paz' in data.columns and 'Ppl' in data.columns:
data[['Paz']] = data[['Paz']].astype(float)
data[['Ppl']] = data[['Ppl']].astype(float)
data[['Taz']] = data[['Taz']].astype(float)
data[['Tpl']] = data[['Tpl']].astype(float)
data[['S1']] = data[['S1']].astype(float)
data[['D1']] = data[['D1']].astype(float)
data[['R1']] = data[['R1']].astype(float)
data[['S2']] = data[['S2']].astype(float)
data[['D2']] = data[['D2']].astype(float)
data[['R2']] = data[['R2']].astype(float)
return data
def rid_nans(df):
'''Removes points where lat,lon,depth, or uncertainty values are not provided.'''
df = df[np.isfinite(df['lat'])]
df = df[np.isfinite(df['lon'])]
df = df[ | np.isfinite(df['depth']) | numpy.isfinite |
# Written by <NAME>, 2018
import numpy as np
import bisect
class SDR_Classifier:
"""Maximum Likelyhood classifier for SDRs."""
def __init__(self, alpha, input_sdr, num_labels):
"""
Argument alpha is the small constant used by the exponential moving
average which tracks input-output co-occurances.
"""
self.alpha = alpha
self.input_sdr = input_sdr
self.num_labels = num_labels
# Don't initialize to zero, touch every input+output pair.
self.stats = np.random.uniform(
0.1 * self.alpha,
0.2 * self.alpha,
size=(self.input_sdr.size, self.num_labels))
def train(self, labels, input_sdr=None):
"""
Argument labels is array of float, PDF.
"""
labels = | np.array(labels) | numpy.array |
import numpy as np
import sys
import math
from matplotlib import pyplot as plt
import time
np.random.seed(3141592)
# d_f(z) = dz/dx in terms of z = f(x)
def relu(z):
return np.maximum(z, 0.0)
def d_relu(z):
return np.where(z > 0, 1.0, 0.0)
def sigmoid(z):
return 1 / (1 + np.exp(-z))
def d_sigmoid(z):
return z * (1 - z)
class NeuralNetwork:
"""
Parameters
----------
batch_size: batch size for gradient descent
features: number of features in the data, also the size of input layer
architecture: list of hidden layer sizes
target_classes: number of target classes, also the size of output layer
due to one-hot encoding
activation: list of activation functions for each hidden, output layer
"""
def __init__(self,
batch_size,
features,
architecture,
target_classes,
activation,
learning_rate,
eps=1e-4,
adaptive=False,
max_iter=1000):
# indexing:
# 0: input layer,
# 1 - num_hidden_layers: hidden layers,
# num_hidden_layers + 1: output
# input validation
assert len(activation) == len(architecture) + 1
assert eps > 0
assert batch_size > 0
assert features > 0
assert target_classes > 0
assert learning_rate > 0
assert max_iter > 0
# architecture structure
self.num_hidden_layers = len(architecture)
self.features = features
self.architecture = [features] + architecture + [target_classes] # changed
self.target_classes = target_classes
# activation functions and derivatives
self.activation = [None for i in range(self.num_hidden_layers + 2)]
self.d_activation = [None for i in range(self.num_hidden_layers + 2)]
for i in range(len(activation)):
if activation[i] == 'relu':
self.activation[i + 1] = relu
self.d_activation[i + 1] = d_relu
elif activation[i] == 'sigmoid':
self.activation[i + 1] = sigmoid
self.d_activation[i + 1] = d_sigmoid
else:
raise ValueError('Unsupported activation function,'
'choose one of relu and sigmoid')
# He initialization (variance of 2/(number of units in the previous layer))
self.theta = [None] + [np.random.uniform(-1, 1, (n+1, k)) * math.sqrt(6/n) for (n, k) in zip(self.architecture[:-1], self.architecture[1:])]
# SGD parameters
self.batch_size = batch_size
self.learning_rate = learning_rate
self.eps = eps
self.adaptive = adaptive
self.max_iter = max_iter
def train(self, _x_train, _y_train):
# reformatting data
m = _x_train.shape[0]
X_train = np.concatenate((np.ones((m, 1)), _x_train), axis=1)
y_train = np.concatenate((np.ones((m, 1)), _y_train), axis=1)
# variables to keep track of SGD
prev_error = math.inf
epoch = 1
# for each layer, keep track of outputs of that layer
# as well as the computed deltas
layer_outputs = [None for _ in range(len(self.architecture))]
delta = [None for _ in range(len(self.architecture))]
while True:
# max number of epochs - to prevent infinite loop
# however this is never triggered in any of the runs
if epoch == self.max_iter:
break
# choosing the learning rate
learning_rate = self.learning_rate
if self.adaptive:
learning_rate /= math.sqrt(epoch)
# shuffle X_train and y_train first
p = np.random.permutation(m)
X_train, y_train = X_train[p], y_train[p]
# initialize variables related to SGD
average_error = 0
M = self.batch_size
B = m // M
for i in range(B):
# extract mini-batch from the data
input_batch_X = X_train[i * M : (i + 1) * M]
input_batch_y = y_train[i * M : (i + 1) * M][:, 1:]
# forward propagate and keep track of outputs of each unit
layer_outputs[0] = input_batch_X
for layer in range(1, len(self.architecture)):
layer_outputs[layer] = np.concatenate((np.ones((M, 1)), self.activation[layer](layer_outputs[layer - 1] @ self.theta[layer])), axis=1)
last_output = layer_outputs[-1][:, 1:]
last_d_activation = self.d_activation[-1]
# compute loss
average_error += np.sum((input_batch_y - last_output) ** 2) / (2 * M)
# compute deltas using backpropagation
delta[-1] = (input_batch_y - last_output).T * last_d_activation(last_output.T) / M
for layer in range(len(self.architecture) - 2, 0, -1): # theta, layer_outputs
delta[layer] = (self.theta[layer + 1][1:, :] @ delta[layer + 1]) * self.d_activation[layer](layer_outputs[layer][:, 1:].T)
# using deltas find gradient for each theta[layer] and
# do batch update on theta
for layer in range(1, len(self.architecture)):
self.theta[layer] += learning_rate * (delta[layer] @ layer_outputs[layer - 1]).T
# average loss over this epoch
average_error /= B
#print('Iteration:', epoch, 'loss:', average_error)
# main convergence criteria
if abs(average_error - prev_error) < self.eps:
return epoch, average_error
prev_error = average_error
epoch += 1
return epoch, prev_error
def predict(self, x_test):
# reformatting for matching the data
m = x_test.shape[0]
layer_output = np.concatenate((np.array([np.ones(m)]).T, x_test), axis=1)
# feedforwarding
for layer in range(1, len(self.architecture)):
layer_output = self.activation[layer](layer_output @ self.theta[layer])
layer_output = np.concatenate((np.array([np.ones(m)]).T, layer_output), axis=1)
# returning predictions as class labels (not one-hot encoding)
return np.argmax(layer_output[:, 1:], axis=1)
def one_hot_encoder(y, num_classes):
b = np.zeros((y.shape[0], num_classes))
b[np.arange(y.shape[0]), y] = 1
return b
def compressor(x):
return np.reshape(x, (x.shape[0], x.shape[1] * x.shape[2]))
def mainB():
# extracting data
X_train, y_train = compressor(np.load(sys.argv[1])), np.load(sys.argv[2])
X_test, y_test = compressor(np.load(sys.argv[3])), np.load(sys.argv[4])
X_train = X_train.astype('float32') / 255
X_test = X_test.astype('float32') / 255
# statistics
units = []
test_accuracies = []
train_accuracies = []
elapsed_time = []
# possible values for hidden layer units
experimental_values = [1, 10, 50, 100, 500]
# iterating over all choices for hidden layer units
for hidden_layer_units in experimental_values:
# parameters for the neural network
num_hidden_layers = 1
features = 784
batch_size = 100
activation = ['sigmoid' for i in range(num_hidden_layers + 1)]
architecture = [hidden_layer_units] * num_hidden_layers
target_classes = 10
learning_rate = 0.1 # or 0.001
eps = 1e-4
# initializing the neural network
nn = NeuralNetwork(batch_size=batch_size,
features=features,
architecture=architecture,
target_classes=target_classes,
activation=activation,
learning_rate=learning_rate,
eps=eps)
# training the data
t = time.time()
epoch, average_error = nn.train(X_train, one_hot_encoder(y_train, target_classes))
# prediction on test and train data
y_pred_test = nn.predict(X_test)
y_pred_train = nn.predict(X_train)
# statistics
elapsed_time.append(time.time() - t)
units.append(hidden_layer_units)
test_accuracies.append(100 * y_pred_test[y_pred_test == y_test].shape[0] / y_pred_test.shape[0])
train_accuracies.append(100 * y_pred_train[y_pred_train == y_train].shape[0] / y_pred_train.shape[0])
# printing stats
print('hidden layer units:', hidden_layer_units)
print('test accuracy:', test_accuracies[-1], '%')
print('train accuracy:', train_accuracies[-1], '%')
print('time taken:', elapsed_time[-1])
print('number of epochs:', epoch)
print('average error:', average_error)
# plotting the graphs
plt.xscale('log')
plt.title('Accuracy plot')
plt.xlabel('Hidden layer units')
plt.ylabel('Accuracy (in %)')
plt.plot(units, test_accuracies, label='Test accuracies')
plt.plot(units, train_accuracies, label='Train accuracies')
plt.savefig('nn_accuracy_plot_nonadaptive.png')
plt.close()
plt.xscale('log')
plt.title('Time taken')
plt.xlabel('Hidden layer units')
plt.ylabel('Time taken (in s)')
plt.plot(units, elapsed_time)
plt.savefig('nn_time_plot_nonadaptive.png')
def mainC():
# extracting data
X_train, y_train = compressor( | np.load(sys.argv[1]) | numpy.load |
import pickle as pkl
import numpy as np
import numpy.linalg as linalg
# import scipy.linalg as linalg
import scipy.stats as stats
import pandas as pd
import copy as cp
def getPeaksAndBWs(strf,dt=5,df=1/6, discard_thresh=0.05):
original_strf= strf
strf=np.maximum(original_strf,0)
l2_norm_pos = np.sum(strf[:]**2)
[u,s,v] = linalg.svd(strf)
f1 = u[:,0]
t1 = v.T[:,0]
abs_max_f1_val = np.max(np.abs(f1))
abs_max_f1_ix = np.argmax(np.abs(f1))
abs_max_t1_val = np.max(np.abs(t1))
abs_max_t1_ix = np.argmax(np.abs(t1))
pos_peaks_ix = np.argwhere(np.abs(t1)>0.1*abs_max_t1_val)
if len(pos_peaks_ix)>1:
pos_first_peak_ix = pos_peaks_ix[-1]
else:
pos_first_peak_ix = pos_peaks_ix
f_pos_peak = (abs_max_f1_ix)*df
f_pos_bw = np.sum(np.abs(f1)>0.5*abs_max_f1_val)*df
t_pos_peak = (len(t1) - abs_max_t1_ix)*dt*-1
t_pos_bw = np.sum(np.abs(t1)>0.5*abs_max_t1_val)*dt
#Inhibition:
strf=np.minimum(original_strf,0)
l2_norm_neg = np.sum(strf[:]**2)
[u,s,v] = linalg.svd(strf)
f1 = u[:,0]
t1 = v.T[:,0]
abs_max_f1_val = np.max(np.abs(f1))
abs_max_f1_ix = np.argmax(np.abs(f1))
abs_max_t1_val = np.max(np.abs(t1))
abs_max_t1_ix = np.argmax(np.abs(t1))
neg_peaks_ix = np.argwhere(np.abs(t1)>0.1*abs_max_t1_val)
if len(neg_peaks_ix)>1:
neg_first_peak_ix = neg_peaks_ix[-1]
else:
neg_first_peak_ix = neg_peaks_ix
f_neg_peak = (abs_max_f1_ix)*df
f_neg_bw = np.sum(np.abs(f1)>0.5*abs_max_f1_val)*df
t_neg_peak = (len(t1) - abs_max_t1_ix)*dt*-1
t_neg_bw = np.sum(np.abs(t1)>0.5*abs_max_t1_val)*dt
discard_pos = False
discard_neg = False
flip_pos_neg = False
if l2_norm_neg<discard_thresh*l2_norm_pos:
discard_neg = True
f_neg_bw = 0
t_neg_bw = 0
elif l2_norm_pos<discard_thresh*l2_norm_neg:
discard_pos = True
f_pos_bw = 0
t_pos_bw = 0
if (neg_first_peak_ix>pos_first_peak_ix and not discard_neg) or discard_pos:
# print('flip_pos_neg = True')
flip_pos_neg = True
discard_neg = discard_pos
f_peak = [f_neg_peak, f_pos_peak]
f_bw = [f_neg_bw, f_pos_bw]
t_peak = [t_neg_peak, t_pos_peak]
t_bw = [t_neg_bw, t_pos_bw]
else:
f_peak = [f_pos_peak,f_neg_peak]
f_bw = [f_pos_bw,f_neg_bw]
t_peak = [t_pos_peak,t_neg_peak]
t_bw = [t_pos_bw,t_neg_bw]
# flags = [flip_pos_neg, discard_neg]
return [f_peak,f_bw, t_peak,t_bw, flip_pos_neg, discard_neg]
def flip_neg_weights(weights,n_h = 40, dt = 5,dF = 1/6):
numweights = weights.shape[0]
mf_peak = np.empty([numweights,2])
mf_bw = np.empty([numweights,2])
mt_bw = np.empty([numweights,2])
mt_peak = np.empty([numweights,2])
m_pow = np.empty([numweights, n_h])
flip_pos_neg = | np.empty([numweights]) | numpy.empty |
# -*- coding: utf-8 -*-
# Citation: <NAME>., <NAME>., <NAME>., <NAME>., 2021. An s-shaped three-parameter (S3) traffic stream model with consistent car following relationship. Under review.
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from fundamental_diagram_model import fundamental_diagram as fd
from fundamental_diagram_model import estimated_value, theoretical_value
from scipy.optimize import minimize, Bounds
plt.rcParams.update({'figure.max_open_warning': 0})
plt.rc('font',family='Times New Roman')
plt.rcParams['mathtext.fontset']='stix'
class solve:
def __init__(self, data):
self.speed = np.array(data.Speed)
self.density = | np.array(data.Density) | numpy.array |
"""
Link: https://github.com/honglianghe/CDNet/blob/f436555539e140ff8bafa3c9f54cbc2550b7cebd/my_transforms.py
Author: <NAME>
"""
import torch
import random
from PIL import Image, ImageOps, ImageEnhance, ImageFilter
import numpy as np
import numbers
import collections
from skimage import morphology
import SimpleITK as sitk
import time
import copy
from skimage import io
import albumentations as albu
import warnings
warnings.filterwarnings("ignore")
class Compose(object):
""" Composes several transforms together.
Args:
transforms (list of ``Transform`` objects): list of transforms to compose.
"""
def __init__(self, transforms):
self.transforms = transforms
# self.selectorNameList = selectorNameList
def __call__(self, imgs):
# number = 0
for t in self.transforms:
#selectorName = str(self.selectorNameList[number])
#start_time = time.time()
imgs = t(imgs)
# number = number + 1
return imgs
class Scale(object):
"""Rescale the input PIL images to the given size. """
def __init__(self, size, interpolation=Image.BILINEAR):
assert isinstance(size, int) or (isinstance(size, collections.Iterable) and len(size) == 2)
self.size = size
self.interpolation = interpolation
def __call__(self, imgs):
pics = []
for img in imgs:
if isinstance(self.size, int):
w, h = img.size
if (w <= h and w == self.size) or (h <= w and h == self.size):
pics.append(img)
continue
if w < h:
ow = self.size
oh = int(self.size * h / w)
pics.append(img.resize((ow, oh), self.interpolation))
continue
else:
oh = self.size
ow = int(self.size * w / h)
pics.append(img.resize((ow, oh), self.interpolation))
else:
pics.append(img.resize(self.size, self.interpolation))
return tuple(pics)
import cv2
class RandomResize(object):
"""Randomly Resize the input PIL Image using a scale of lb~ub.
Args:
lb (float): lower bound of the scale
ub (float): upper bound of the scale
interpolation (int, optional): Desired interpolation. Default is
``PIL.Image.BILINEAR``
"""
def __init__(self, lb=0.8, ub=1.3, interpolation=Image.BILINEAR):
self.lb = lb
self.ub = ub
self.interpolation = interpolation
def __call__(self, imgs):
"""
Args:
imgs (PIL Images): Images to be scaled.
Returns:
PIL Images: Rescaled images.
"""
for img in imgs:
if not isinstance(img, Image.Image):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
scale = random.uniform(self.lb, self.ub)
# print scale
w, h = imgs[0].size
ow = int(w * scale)
oh = int(h * scale)
do_albu = 0 # TODO
if(do_albu == 1):
transf = albu.Resize(always_apply=False, p=1.0, height=oh, width=ow, interpolation=0)
image = np.array(imgs[0])
weightmap = np.expand_dims(imgs[1], axis=2)
label = np.array(imgs[2]) #np.expand_dims(imgs[2], axis=2)
if (len(label.shape) == 2):
label = label.reshape(label.shape[0], label.shape[1], 1)
if(len(image.shape)==2):
image = image.reshape(image.shape[0], image.shape[1], 1)
concat_map = np.concatenate((image, weightmap, label), axis=2)
concat_map_transf = transf(image=np.array(concat_map))['image']
image_channel = image.shape[-1]
image_transf = concat_map_transf[:, :, :image_channel]
image_transf = np.squeeze(image_transf)
weightmap_transf = concat_map_transf[:, :, image_channel]
if (label.shape[2] == 1):
#label = label.reshape(label.shape[0], label.shape[1], 1)
label_transf = concat_map_transf[:, :, -1:]
label_transf = label_transf.reshape(label_transf.shape[0], label_transf.shape[1])
else:
label_transf = concat_map_transf[:, :, -3:]
image_PIL = Image.fromarray(image_transf.astype(np.uint8))
weightmap_PIL = Image.fromarray(weightmap_transf.astype(np.uint8))
label_PIL = Image.fromarray(label_transf.astype(np.uint8))
pics = []
pics.append(image_PIL)
pics.append(weightmap_PIL)
pics.append(label_PIL)
else:
if scale < 1:
padding_l = (w - ow)//2
padding_t = (h - oh)//2
padding_r = w - ow - padding_l
padding_b = h - oh - padding_t
padding = (padding_l, padding_t, padding_r, padding_b)
pics = []
for i in range(len(imgs)):
img = imgs[i]
img = img.resize((ow, oh), self.interpolation)
if scale < 1:
# img = np.array(img)
img = cv2.copyMakeBorder(np.array(img),padding_t,padding_b,padding_l,padding_r,cv2.BORDER_REFLECT)
# print(img.shape)
img = Image.fromarray(img)
# print("img: ",img.size)
# img = ImageOps.expand(img, border=padding , fill=0)
pics.append(img)
# print(pics[0].size)
return tuple(pics)
class RandomColor(object):
def __init__(self, randomMin = 1, randomMax = 2):
self.randomMin = randomMin
self.randomMax = randomMax
def __call__(self, imgs):
out_imgs = list(imgs)
img = imgs[0]
random_factor = 1 + (np.random.rand()-0.5)
color_image = ImageEnhance.Color(img).enhance(random_factor)
random_factor = 1 + (np.random.rand()-0.5)
brightness_image = ImageEnhance.Brightness(color_image).enhance(random_factor)
random_factor = 1 + (np.random.rand()-0.5)
contrast_image = ImageEnhance.Contrast(brightness_image).enhance(random_factor)
random_factor = 1 + (np.random.rand()-0.5)
img_output = ImageEnhance.Sharpness(contrast_image).enhance(random_factor)
out_imgs[0] = img_output
return tuple(out_imgs)
class RandomAffine(object):
""" Transform the input PIL Image using a random affine transformation
The parameters of an affine transformation [a, b, c=0
d, e, f=0]
are generated randomly according to the bound, and there is no translation
(c=f=0)
Args:
bound: the largest possible deviation of random parameters
"""
def __init__(self, bound):
if bound < 0 or bound > 0.5:
raise ValueError("Bound is invalid, should be in range [0, 0.5)")
self.bound = bound
def __call__(self, imgs):
img = imgs[0]
x, y = img.size
a = 1 + 2 * self.bound * (random.random() - 0.5)
b = 2 * self.bound * (random.random() - 0.5)
d = 2 * self.bound * (random.random() - 0.5)
e = 1 + 2 * self.bound * (random.random() - 0.5)
# correct the transformation center to image center
c = -a * x / 2 - b * y / 2 + x / 2
f = -d * x / 2 - e * y / 2 + y / 2
trans_matrix = [a, b, c, d, e, f]
pics = []
for img in imgs:
pics.append(img.transform((x, y), Image.AFFINE, trans_matrix))
return tuple(pics)
class RandomHorizontalFlip(object):
"""Horizontally flip the given PIL.Image randomly with a probability of 0.5."""
def __call__(self, imgs):
"""
Args:
img (PIL.Image): Image to be flipped.
Returns:
PIL.Image: Randomly flipped image.
"""
pics = []
if random.random() < 0.5:
for img in imgs:#imgs
pics.append(img.transpose(Image.FLIP_LEFT_RIGHT))
return tuple(pics)
else:
return imgs
class RandomVerticalFlip(object):
"""Horizontally flip the given PIL.Image randomly with a probability of 0.5."""
def __call__(self, imgs):
"""
Args:
img (PIL.Image): Image to be flipped.
Returns:
PIL.Image: Randomly flipped image.
"""
pics = []
if random.random() < 0.5:
for img in imgs:
pics.append(img.transpose(Image.FLIP_TOP_BOTTOM))
return tuple(pics)
else:
return imgs
class RandomElasticDeform(object):
""" Elastic deformation of the input PIL Image using random displacement vectors
drawm from a gaussian distribution
Args:
sigma: the largest possible deviation of random parameters
"""
def __init__(self, num_pts=4, sigma=20):
self.num_pts = num_pts
self.sigma = sigma
def __call__(self, imgs):
pics = []
do_albu = 1
if (do_albu == 1):
image = np.array(imgs[0])
weightmap = np.expand_dims(imgs[1], axis=2)
label = np.array(imgs[2]) # np.expand_dims(imgs[2], axis=2)
if(len(label.shape)==2):
label = label.reshape(label.shape[0], label.shape[1], 1)
if(len(image.shape)==2):
image = image.reshape(image.shape[0], image.shape[1], 1)
concat_map = np.concatenate((image, weightmap, label), axis=2)
transf = albu.ElasticTransform(always_apply=False, p=1.0, alpha=1.0, sigma=50, alpha_affine=50,
interpolation=0, border_mode=0,
value=(0, 0, 0),
mask_value=None, approximate=False) # border_mode 用于指定插值算法
concat_map_transf = transf(image=concat_map)['image']
image_channel = image.shape[-1]
image_transf = concat_map_transf[:, :, :image_channel]
image_transf = np.squeeze(image_transf)
weightmap_transf = concat_map_transf[:, :, image_channel]
if (label.shape[2] == 1):
label_transf = concat_map_transf[:, :, -1:]
label_transf = label_transf.reshape(label_transf.shape[0], label_transf.shape[1])
else:
label_transf = concat_map_transf[:, :, -3:]
image_PIL = Image.fromarray(image_transf.astype(np.uint8))
weightmap_PIL = Image.fromarray(weightmap_transf.astype(np.uint8))
label_PIL = Image.fromarray(label_transf.astype(np.uint8))
pics.append(image_PIL)
pics.append(weightmap_PIL)
pics.append(label_PIL)
else:
img = np.array(imgs[0])
if len(img.shape) == 3:
img = img[:,:,0]
sitkImage = sitk.GetImageFromArray(img, isVector=False)
mesh_size = [self.num_pts]*sitkImage.GetDimension()
tx = sitk.BSplineTransformInitializer(sitkImage, mesh_size)
params = tx.GetParameters()
paramsNp = np.asarray(params, dtype=float)
paramsNp = paramsNp + np.random.randn(paramsNp.shape[0]) * self.sigma
paramsNp[0:int(len(params)/3)] = 0 # remove z deformations! The resolution in z is too bad
params = tuple(paramsNp)
tx.SetParameters(params)
resampler = sitk.ResampleImageFilter()
resampler.SetReferenceImage(sitkImage)
resampler.SetInterpolator(sitk.sitkLinear)
resampler.SetDefaultPixelValue(0)
resampler.SetTransform(tx)
resampler.SetDefaultPixelValue(0)
for img in imgs:
is_expand = False
if not isinstance(img, np.ndarray):
img = np.array(img)
if len(img.shape) == 2:
img = np.expand_dims(img, axis=2)
is_expand = True
img_deformed = np.zeros(img.shape, dtype=img.dtype)
for i in range(img.shape[2]):
sitkImage = sitk.GetImageFromArray(img[:,:,i], isVector=False)
outimgsitk = resampler.Execute(sitkImage)
img_deformed[:,:,i] = sitk.GetArrayFromImage(outimgsitk)
if is_expand:
img_deformed = img_deformed[:,:,0]
# print img_deformed.dtype
pics.append(Image.fromarray(img_deformed))
return tuple(pics)
class RandomRotation(object):
"""Rotate the image by angle.
Args:
degrees (sequence or float or int): Range of degrees to select from.
If degrees is a number instead of sequence like (min, max), the range of degrees
will be (-degrees, +degrees).
resample ({PIL.Image.NEAREST, PIL.Image.BILINEAR, PIL.Image.BICUBIC}, optional):
An optional resampling filter.
See http://pillow.readthedocs.io/en/3.4.x/handbook/concepts.html#filters
If omitted, or if the image has mode "1" or "P", it is set to PIL.Image.NEAREST.
expand (bool, optional): Optional expansion flag.
If true, expands the output to make it large enough to hold the entire rotated image.
If false or omitted, make the output image the same size as the input image.
Note that the expand flag assumes rotation around the center and no translation.
center (2-tuple, optional): Optional center of rotation.
Origin is the upper left corner.
Default is the center of the image.
"""
def __init__(self, degrees, resample=Image.BILINEAR, expand=False, center=None):
if isinstance(degrees, numbers.Number):
if degrees < 0:
raise ValueError("If degrees is a single number, it must be positive.")
self.degrees = (-degrees, degrees)
else:
if len(degrees) != 2:
raise ValueError("If degrees is a sequence, it must be of len 2.")
self.degrees = degrees
self.resample = resample
self.expand = expand
self.center = center
@staticmethod
def get_params(degrees):
"""Get parameters for ``rotate`` for a random rotation.
Returns:
sequence: params to be passed to ``rotate`` for random rotation.
"""
angle = random.uniform(degrees[0], degrees[1])
return angle
def __call__(self, imgs):
"""
imgs (PIL Image): Images to be rotated.
Returns:
PIL Image: Rotated image.
"""
angle = self.get_params(self.degrees)
pics = []
do_albu = 1
if (do_albu == 1):
image = | np.array(imgs[0]) | numpy.array |
from __future__ import division
import unittest
import shutil
import os
import time
import warnings
import copy
import pytest
import netCDF4
import numpy as np
from numpy.testing import assert_allclose
from salem.tests import (requires_travis, requires_geopandas, requires_dask,
requires_matplotlib, requires_cartopy)
from salem import utils, transform_geopandas, GeoTiff, read_shapefile, sio
from salem import read_shapefile_to_grid
from salem.utils import get_demo_file
current_dir = os.path.dirname(os.path.abspath(__file__))
testdir = os.path.join(current_dir, 'tmp')
def is_cartopy_rotated_working():
from salem.gis import proj_to_cartopy
from cartopy.crs import PlateCarree
import pyproj
cp = pyproj.Proj('+ellps=WGS84 +proj=ob_tran +o_proj=latlon '
'+to_meter=0.0174532925199433 +o_lon_p=0.0 +o_lat_p=80.5 '
'+lon_0=357.5 +no_defs')
cp = proj_to_cartopy(cp)
out = PlateCarree().transform_points(cp, np.array([-20]), np.array([-9]))
if not (np.allclose(out[0, 0], -22.243473889042903, atol=1e-5) and
np.allclose(out[0, 1], -0.06328365194179102, atol=1e-5)):
# Cartopy also had issues
return False
return True
@requires_geopandas
def create_dummy_shp(fname):
import shapely.geometry as shpg
import geopandas as gpd
e_line = shpg.LinearRing([(1.5, 1), (2., 1.5), (1.5, 2.), (1, 1.5)])
i_line = shpg.LinearRing([(1.4, 1.4), (1.6, 1.4), (1.6, 1.6), (1.4, 1.6)])
p1 = shpg.Polygon(e_line, [i_line])
p2 = shpg.Polygon([(2.5, 1.3), (3., 1.8), (2.5, 2.3), (2, 1.8)])
p3 = shpg.Point(0.5, 0.5)
p4 = shpg.Point(1, 1)
df = gpd.GeoDataFrame()
df['name'] = ['Polygon', 'Line']
df['geometry'] = gpd.GeoSeries([p1, p2])
of = os.path.join(testdir, fname)
df.to_file(of)
return of
def delete_test_dir():
if os.path.exists(testdir):
shutil.rmtree(testdir)
class TestUtils(unittest.TestCase):
def setUp(self):
if not os.path.exists(testdir):
os.makedirs(testdir)
def tearDown(self):
delete_test_dir()
@requires_travis
def test_empty_cache(self):
utils.empty_cache()
def test_hash_cache_dir(self):
h1 = utils._hash_cache_dir()
h2 = utils._hash_cache_dir()
self.assertEqual(h1, h2)
def test_demofiles(self):
self.assertTrue(os.path.exists(utils.get_demo_file('dem_wgs84.nc')))
self.assertTrue(utils.get_demo_file('dummy') is None)
def test_read_colormap(self):
cl = utils.read_colormap('topo') * 256
assert_allclose(cl[4, :], (177, 242, 196))
assert_allclose(cl[-1, :], (235, 233, 235))
cl = utils.read_colormap('dem') * 256
assert_allclose(cl[4, :], (153,100, 43))
assert_allclose(cl[-1, :], (255,255,255))
def test_reduce(self):
arr = [[1, 1, 2, 2], [1, 1, 2, 2]]
assert_allclose(utils.reduce(arr, 1), arr)
assert_allclose(utils.reduce(arr, 2), [[1, 2]])
assert_allclose(utils.reduce(arr, 2, how=np.sum), [[4, 8]])
arr = np.stack([arr, arr, arr])
assert_allclose(arr.shape, (3, 2, 4))
assert_allclose(utils.reduce(arr, 1), arr)
assert_allclose(utils.reduce(arr, 2), [[[1, 2]], [[1, 2]], [[1, 2]]])
assert_allclose(utils.reduce(arr, 2, how=np.sum),
[[[4, 8]], [[4, 8]], [[4, 8]]])
arr[0, ...] = 0
assert_allclose(utils.reduce(arr, 2, how=np.sum),
[[[0, 0]], [[4, 8]], [[4, 8]]])
arr[1, ...] = 1
assert_allclose(utils.reduce(arr, 2, how=np.sum),
[[[0, 0]], [[4, 4]], [[4, 8]]])
class TestIO(unittest.TestCase):
def setUp(self):
if not os.path.exists(testdir):
os.makedirs(testdir)
def tearDown(self):
delete_test_dir()
@requires_geopandas
def test_cache_working(self):
f1 = 'f1.shp'
f1 = create_dummy_shp(f1)
cf1 = utils.cached_shapefile_path(f1)
self.assertFalse(os.path.exists(cf1))
_ = read_shapefile(f1)
self.assertFalse(os.path.exists(cf1))
_ = read_shapefile(f1, cached=True)
self.assertTrue(os.path.exists(cf1))
# nested calls
self.assertTrue(cf1 == utils.cached_shapefile_path(cf1))
# wait a bit
time.sleep(0.1)
f1 = create_dummy_shp(f1)
cf2 = utils.cached_shapefile_path(f1)
self.assertFalse(os.path.exists(cf1))
_ = read_shapefile(f1, cached=True)
self.assertFalse(os.path.exists(cf1))
self.assertTrue(os.path.exists(cf2))
df = read_shapefile(f1, cached=True)
np.testing.assert_allclose(df.min_x, [1., 2.])
np.testing.assert_allclose(df.max_x, [2., 3.])
np.testing.assert_allclose(df.min_y, [1., 1.3])
np.testing.assert_allclose(df.max_y, [2., 2.3])
self.assertRaises(ValueError, read_shapefile, 'f1.sph')
self.assertRaises(ValueError, utils.cached_shapefile_path, 'f1.splash')
@requires_geopandas
def test_read_to_grid(self):
g = GeoTiff(utils.get_demo_file('hef_srtm.tif'))
sf = utils.get_demo_file('Hintereisferner_UTM.shp')
df1 = read_shapefile_to_grid(sf, g.grid)
df2 = transform_geopandas(read_shapefile(sf), to_crs=g.grid)
assert_allclose(df1.geometry[0].exterior.coords,
df2.geometry[0].exterior.coords)
# test for caching
d = g.grid.to_dict()
# change key ordering by chance
d2 = dict((k, v) for k, v in d.items())
from salem.sio import _memory_shapefile_to_grid, cached_shapefile_path
shape_cpath = cached_shapefile_path(sf)
res = _memory_shapefile_to_grid.call_and_shelve(shape_cpath,
grid=g.grid,
**d)
try:
h1 = res.timestamp
except AttributeError:
h1 = res.argument_hash
res = _memory_shapefile_to_grid.call_and_shelve(shape_cpath,
grid=g.grid,
**d2)
try:
h2 = res.timestamp
except AttributeError:
h2 = res.argument_hash
self.assertEqual(h1, h2)
def test_notimevar(self):
import xarray as xr
da = xr.DataArray(np.arange(12).reshape(3, 4), dims=['lat', 'lon'])
ds = da.to_dataset(name='var')
t = sio.netcdf_time(ds)
assert t is None
class TestSkyIsFalling(unittest.TestCase):
@requires_matplotlib
def test_projplot(self):
# this caused many problems on fabien's laptop.
# this is just to be sure that on your system, everything is fine
import pyproj
import matplotlib.pyplot as plt
from salem.gis import transform_proj, check_crs
wgs84 = pyproj.Proj(proj='latlong', datum='WGS84')
fig = plt.figure()
plt.close()
srs = '+units=m +proj=lcc +lat_1=29.0 +lat_2=29.0 +lat_0=29.0 +lon_0=89.8'
proj_out = check_crs('EPSG:4326')
proj_in = pyproj.Proj(srs, preserve_units=True)
lon, lat = transform_proj(proj_in, proj_out, -2235000, -2235000)
np.testing.assert_allclose(lon, 70.75731, atol=1e-5)
def test_gh_152(self):
# https://github.com/fmaussion/salem/issues/152
import xarray as xr
da = xr.DataArray(np.arange(20).reshape(4, 5), dims=['lat', 'lon'],
coords={'lat': np.linspace(0, 30, 4),
'lon': np.linspace(-20, 20, 5)})
da.salem.roi()
class TestXarray(unittest.TestCase):
def setUp(self):
if not os.path.exists(testdir):
os.makedirs(testdir)
def tearDown(self):
delete_test_dir()
@requires_dask
def test_era(self):
ds = sio.open_xr_dataset(get_demo_file('era_interim_tibet.nc')).chunk()
self.assertEqual(ds.salem.x_dim, 'longitude')
self.assertEqual(ds.salem.y_dim, 'latitude')
dss = ds.salem.subset(ds=ds)
self.assertEqual(dss.salem.grid, ds.salem.grid)
lon = 91.1
lat = 31.1
dss = ds.salem.subset(corners=((lon, lat), (lon, lat)), margin=1)
self.assertEqual(len(dss.latitude), 3)
self.assertEqual(len(dss.longitude), 3)
np.testing.assert_almost_equal(dss.longitude, [90.0, 90.75, 91.5])
def test_roi(self):
import xarray as xr
# Check that all attrs are preserved
with sio.open_xr_dataset(get_demo_file('era_interim_tibet.nc')) as ds:
ds.encoding = {'_FillValue': np.NaN}
ds['t2m'].encoding = {'_FillValue': np.NaN}
ds_ = ds.salem.roi(roi=np.ones_like(ds.t2m.values[0, ...]))
xr.testing.assert_identical(ds, ds_)
assert ds.encoding == ds_.encoding
assert ds.t2m.encoding == ds_.t2m.encoding
@requires_geopandas # because of the grid tests, more robust with GDAL
def test_basic_wrf(self):
import xarray as xr
ds = sio.open_xr_dataset(get_demo_file('wrf_tip_d1.nc')).chunk()
# this is because read_dataset changes some stuff, let's see if
# georef still ok
dsxr = xr.open_dataset(get_demo_file('wrf_tip_d1.nc'))
assert ds.salem.grid == dsxr.salem.grid
lon, lat = ds.salem.grid.ll_coordinates
assert_allclose(lon, ds['XLONG'], atol=1e-4)
assert_allclose(lat, ds['XLAT'], atol=1e-4)
# then something strange happened
assert ds.isel(Time=0).salem.grid == ds.salem.grid
assert ds.isel(Time=0).T2.salem.grid == ds.salem.grid
nlon, nlat = ds.isel(Time=0).T2.salem.grid.ll_coordinates
assert_allclose(nlon, ds['XLONG'], atol=1e-4)
assert_allclose(nlat, ds['XLAT'], atol=1e-4)
# the grid should not be missunderstood as lonlat
t2 = ds.T2.isel(Time=0) - 273.15
with pytest.raises(RuntimeError):
g = t2.salem.grid
@requires_dask
def test_geo_em(self):
for i in [1, 2, 3]:
fg = get_demo_file('geo_em_d0{}_lambert.nc'.format(i))
ds = sio.open_wrf_dataset(fg).chunk()
self.assertFalse('Time' in ds.dims)
self.assertTrue('time' in ds.dims)
self.assertTrue('south_north' in ds.dims)
self.assertTrue('south_north' in ds.coords)
@requires_geopandas # because of the grid tests, more robust with GDAL
def test_wrf(self):
import xarray as xr
ds = sio.open_wrf_dataset(get_demo_file('wrf_tip_d1.nc')).chunk()
# this is because read_dataset changes some stuff, let's see if
# georef still ok
dsxr = xr.open_dataset(get_demo_file('wrf_tip_d1.nc'))
assert ds.salem.grid == dsxr.salem.grid
lon, lat = ds.salem.grid.ll_coordinates
assert_allclose(lon, ds['lon'], atol=1e-4)
assert_allclose(lat, ds['lat'], atol=1e-4)
# then something strange happened
assert ds.isel(time=0).salem.grid == ds.salem.grid
assert ds.isel(time=0).T2.salem.grid == ds.salem.grid
nlon, nlat = ds.isel(time=0).T2.salem.grid.ll_coordinates
assert_allclose(nlon, ds['lon'], atol=1e-4)
assert_allclose(nlat, ds['lat'], atol=1e-4)
# the grid should not be missunderstood as lonlat
t2 = ds.T2.isel(time=0) - 273.15
with pytest.raises(RuntimeError):
g = t2.salem.grid
@requires_dask
def test_ncl_diagvars(self):
import xarray as xr
wf = get_demo_file('wrf_cropped.nc')
ncl_out = get_demo_file('wrf_cropped_ncl.nc')
w = sio.open_wrf_dataset(wf).chunk()
nc = xr.open_dataset(ncl_out)
ref = nc['TK']
tot = w['TK']
assert_allclose(ref, tot, rtol=1e-6)
ref = nc['SLP']
tot = w['SLP']
tot = tot.values
assert_allclose(ref, tot, rtol=1e-6)
w = w.isel(time=1, south_north=slice(12, 16), west_east=slice(9, 16))
nc = nc.isel(Time=1, south_north=slice(12, 16), west_east=slice(9, 16))
ref = nc['TK']
tot = w['TK']
assert_allclose(ref, tot, rtol=1e-6)
ref = nc['SLP']
tot = w['SLP']
tot = tot.values
assert_allclose(ref, tot, rtol=1e-6)
w = w.isel(bottom_top=slice(3, 5))
nc = nc.isel(bottom_top=slice(3, 5))
ref = nc['TK']
tot = w['TK']
assert_allclose(ref, tot, rtol=1e-6)
ref = nc['SLP']
tot = w['SLP']
tot = tot.values
assert_allclose(ref, tot, rtol=1e-6)
@requires_dask
def test_ncl_diagvars_compressed(self):
rtol = 2e-5
import xarray as xr
wf = get_demo_file('wrf_cropped_compressed.nc')
ncl_out = get_demo_file('wrf_cropped_ncl.nc')
w = sio.open_wrf_dataset(wf).chunk()
nc = xr.open_dataset(ncl_out)
ref = nc['TK']
tot = w['TK']
assert_allclose(ref, tot, rtol=rtol)
ref = nc['SLP']
tot = w['SLP'].data
assert_allclose(ref, tot, rtol=rtol)
w = w.isel(time=1, south_north=slice(12, 16), west_east=slice(9, 16))
nc = nc.isel(Time=1, south_north=slice(12, 16), west_east=slice(9, 16))
ref = nc['TK']
tot = w['TK']
assert_allclose(ref, tot, rtol=rtol)
ref = nc['SLP']
tot = w['SLP']
assert_allclose(ref, tot, rtol=rtol)
w = w.isel(bottom_top=slice(3, 5))
nc = nc.isel(bottom_top=slice(3, 5))
ref = nc['TK']
tot = w['TK']
assert_allclose(ref, tot, rtol=rtol)
ref = nc['SLP']
tot = w['SLP']
assert_allclose(ref, tot, rtol=rtol)
@requires_dask
def test_unstagger(self):
wf = get_demo_file('wrf_cropped.nc')
w = sio.open_wrf_dataset(wf).chunk()
nc = sio.open_xr_dataset(wf).chunk()
nc['PH_UNSTAGG'] = nc['P']*0.
uns = nc['PH'].isel(bottom_top_stag=slice(0, -1)).values + \
nc['PH'].isel(bottom_top_stag=slice(1, len(nc.bottom_top_stag))).values
nc['PH_UNSTAGG'].values = uns * 0.5
assert_allclose(w['PH'], nc['PH_UNSTAGG'])
# chunk
v = w['PH'].chunk((1, 6, 13, 13))
assert_allclose(v.mean(), nc['PH_UNSTAGG'].mean(), atol=1e-2)
wn = w.isel(west_east=slice(4, 8))
ncn = nc.isel(west_east=slice(4, 8))
assert_allclose(wn['PH'], ncn['PH_UNSTAGG'])
wn = w.isel(south_north=slice(4, 8), time=1)
ncn = nc.isel(south_north=slice(4, 8), Time=1)
assert_allclose(wn['PH'], ncn['PH_UNSTAGG'])
wn = w.isel(west_east=4)
ncn = nc.isel(west_east=4)
assert_allclose(wn['PH'], ncn['PH_UNSTAGG'])
wn = w.isel(bottom_top=4)
ncn = nc.isel(bottom_top=4)
assert_allclose(wn['PH'], ncn['PH_UNSTAGG'])
wn = w.isel(bottom_top=0)
ncn = nc.isel(bottom_top=0)
assert_allclose(wn['PH'], ncn['PH_UNSTAGG'])
wn = w.isel(bottom_top=-1)
ncn = nc.isel(bottom_top=-1)
assert_allclose(wn['PH'], ncn['PH_UNSTAGG'])
w['PH'].chunk()
@requires_dask
def test_unstagger_compressed(self):
wf = get_demo_file('wrf_cropped.nc')
wfc = get_demo_file('wrf_cropped_compressed.nc')
w = sio.open_wrf_dataset(wf).chunk()
wc = sio.open_wrf_dataset(wfc).chunk()
assert_allclose(w['PH'], wc['PH'], rtol=0.003)
@requires_dask
def test_diagvars(self):
wf = get_demo_file('wrf_d01_allvars_cropped.nc')
w = sio.open_wrf_dataset(wf).chunk()
# ws
w['ws_ref'] = np.sqrt(w['U']**2 + w['V']**2)
assert_allclose(w['ws_ref'], w['WS'])
wcrop = w.isel(west_east=slice(4, 8), bottom_top=4)
assert_allclose(wcrop['ws_ref'], wcrop['WS'])
@requires_dask
def test_diagvars_compressed(self):
wf = get_demo_file('wrf_d01_allvars_cropped_compressed.nc')
w = sio.open_wrf_dataset(wf).chunk()
# ws
w['ws_ref'] = np.sqrt(w['U']**2 + w['V']**2)
assert_allclose(w['ws_ref'], w['WS'])
wcrop = w.isel(west_east=slice(4, 8), bottom_top=4)
assert_allclose(wcrop['ws_ref'], wcrop['WS'])
@requires_dask
def test_prcp(self):
wf = get_demo_file('wrfout_d01.nc')
w = sio.open_wrf_dataset(wf).chunk()
nc = sio.open_xr_dataset(wf)
nc['REF_PRCP_NC'] = nc['RAINNC']*0.
uns = nc['RAINNC'].isel(Time=slice(1, len(nc.bottom_top_stag))).values - \
nc['RAINNC'].isel(Time=slice(0, -1)).values
nc['REF_PRCP_NC'].values[1:, ...] = uns * 60 / 180. # for three hours
nc['REF_PRCP_NC'].values[0, ...] = np.NaN
nc['REF_PRCP_C'] = nc['RAINC']*0.
uns = nc['RAINC'].isel(Time=slice(1, len(nc.bottom_top_stag))).values - \
nc['RAINC'].isel(Time=slice(0, -1)).values
nc['REF_PRCP_C'].values[1:, ...] = uns * 60 / 180. # for three hours
nc['REF_PRCP_C'].values[0, ...] = np.NaN
nc['REF_PRCP'] = nc['REF_PRCP_C'] + nc['REF_PRCP_NC']
for suf in ['_NC', '_C', '']:
assert_allclose(w['PRCP' + suf], nc['REF_PRCP' + suf], rtol=1e-5)
wn = w.isel(time=slice(1, 3))
ncn = nc.isel(Time=slice(1, 3))
assert_allclose(wn['PRCP' + suf], ncn['REF_PRCP' + suf], rtol=1e-5)
wn = w.isel(time=2)
ncn = nc.isel(Time=2)
assert_allclose(wn['PRCP' + suf], ncn['REF_PRCP' + suf], rtol=1e-5)
wn = w.isel(time=1)
ncn = nc.isel(Time=1)
assert_allclose(wn['PRCP' + suf], ncn['REF_PRCP' + suf], rtol=1e-5)
wn = w.isel(time=0)
self.assertTrue(~np.any(np.isfinite(wn['PRCP' + suf].values)))
wn = w.isel(time=slice(1, 3), south_north=slice(50, -1))
ncn = nc.isel(Time=slice(1, 3), south_north=slice(50, -1))
assert_allclose(wn['PRCP' + suf], ncn['REF_PRCP' + suf], rtol=1e-5)
wn = w.isel(time=2, south_north=slice(50, -1))
ncn = nc.isel(Time=2, south_north=slice(50, -1))
assert_allclose(wn['PRCP' + suf], ncn['REF_PRCP' + suf], rtol=1e-5)
wn = w.isel(time=1, south_north=slice(50, -1))
ncn = nc.isel(Time=1, south_north=slice(50, -1))
assert_allclose(wn['PRCP' + suf], ncn['REF_PRCP' + suf], rtol=1e-5)
wn = w.isel(time=0, south_north=slice(50, -1))
self.assertTrue(~np.any( | np.isfinite(wn['PRCP' + suf].values) | numpy.isfinite |
"""
Neuroimaging non-cartesian reconstruction
=========================================
Author: <NAME>
In this tutorial we will reconstruct an MR Image directly with density
compensation and SENSE from gpuNUFFT
Import neuroimaging data
------------------------
We use the toy datasets available in pysap, more specifically a 3D orange data
and the radial acquisition scheme (non-cartesian).
"""
# Package import
from mri.operators import NonCartesianFFT, WaveletUD2
from mri.operators.utils import convert_locations_to_mask, \
gridded_inverse_fourier_transform_nd
from mri.operators.fourier.utils import estimate_density_compensation
from mri.reconstructors import SingleChannelReconstructor
from mri.reconstructors.utils.extract_sensitivity_maps import get_Smaps
import pysap
from pysap.data import get_sample_data
# Third party import
from modopt.math.metrics import ssim
from modopt.opt.linear import Identity
from modopt.opt.proximity import SparseThreshold
import numpy as np
# Loading input data
image = get_sample_data('3d-pmri')
cartesian = | np.linalg.norm(image, axis=0) | numpy.linalg.norm |
import inspect
import numpy as np
from numba import cfunc
from numba.types import intc, CPointer, float64
from scipy import LowLevelCallable
from scipy import special
from scipy.integrate import quad
from autolens import decorator_util
from autolens.model.profiles import geometry_profiles
from autolens.model.profiles import light_profiles
def jit_integrand(integrand_function):
jitted_function = decorator_util.jit(nopython=True, cache=True)(integrand_function)
no_args = len(inspect.getfullargspec(integrand_function).args)
wrapped = None
if no_args == 4:
# noinspection PyUnusedLocal
def wrapped(n, xx):
return jitted_function(xx[0], xx[1], xx[2], xx[3])
elif no_args == 5:
# noinspection PyUnusedLocal
def wrapped(n, xx):
return jitted_function(xx[0], xx[1], xx[2], xx[3], xx[4])
elif no_args == 6:
# noinspection PyUnusedLocal
def wrapped(n, xx):
return jitted_function(xx[0], xx[1], xx[2], xx[3], xx[4], xx[5])
elif no_args == 7:
# noinspection PyUnusedLocal
def wrapped(n, xx):
return jitted_function(xx[0], xx[1], xx[2], xx[3], xx[4], xx[5], xx[6])
elif no_args == 8:
# noinspection PyUnusedLocal
def wrapped(n, xx):
return jitted_function(xx[0], xx[1], xx[2], xx[3], xx[4], xx[5], xx[6], xx[7])
elif no_args == 9:
# noinspection PyUnusedLocal
def wrapped(n, xx):
return jitted_function(xx[0], xx[1], xx[2], xx[3], xx[4], xx[5], xx[6], xx[7], xx[8])
elif no_args == 10:
# noinspection PyUnusedLocal
def wrapped(n, xx):
return jitted_function(xx[0], xx[1], xx[2], xx[3], xx[4], xx[5], xx[6], xx[7], xx[8], xx[9])
elif no_args == 11:
# noinspection PyUnusedLocal
def wrapped(n, xx):
return jitted_function(xx[0], xx[1], xx[2], xx[3], xx[4], xx[5], xx[6], xx[7], xx[8], xx[9], xx[10])
cf = cfunc(float64(intc, CPointer(float64)))
return LowLevelCallable(cf(wrapped).ctypes)
class MassProfile(object):
def surface_density_func(self, eta):
raise NotImplementedError("surface_density_func should be overridden")
def surface_density_from_grid(self, grid):
pass
# raise NotImplementedError("surface_density_from_grid should be overridden")
def potential_from_grid(self, grid):
pass
# raise NotImplementedError("potential_from_grid should be overridden")
def deflections_from_grid(self, grid):
raise NotImplementedError("deflections_from_grid should be overridden")
def mass_within_circle(self, radius, conversion_factor):
raise NotImplementedError()
def mass_within_ellipse(self, major_axis, conversion_factor):
raise NotImplementedError()
# noinspection PyAbstractClass
class EllipticalMassProfile(geometry_profiles.EllipticalProfile, MassProfile):
def __init__(self, centre=(0.0, 0.0), axis_ratio=1.0, phi=0.0):
"""
Abstract class for elliptical mass profiles.
Parameters
----------
centre: (float, float)
The (y,x) arc-second coordinates of the profile centre.
axis_ratio : float
Ellipse's minor-to-major axis ratio (b/a)
phi : float
Rotation angle of profile's ellipse counter-clockwise from positive x-axis
"""
super(EllipticalMassProfile, self).__init__(centre, axis_ratio, phi)
self.axis_ratio = axis_ratio
self.phi = phi
def mass_within_circle(self, radius, conversion_factor=1.0):
""" Compute the mass profiles's total mass within a circle of specified radius. This is performed via \
integration of the surface density profiles and is centred on the mass profile.
The value returned by this integral is dimensionless, and a conversion factor can be specified to convert it \
to a physical value (e.g. the critical surface mass density).
Parameters
----------
radius : float
The radius of the circle to compute the dimensionless mass within.
conversion_factor : float
Factor the dimensionless mass is multiplied by to convert it to a physical mass (e.g. the critical surface \
mass density).
"""
return conversion_factor * quad(self.mass_integral, a=0.0, b=radius, args=(1.0,))[0]
def mass_within_ellipse(self, major_axis, conversion_factor=1.0):
""" Compute the mass profiles's total dimensionless mass within an ellipse of specified radius. This is \
performed via integration of the surface density profiles and is centred and rotationally aligned with the \
mass profile.
The value returned by this integral is dimensionless, and a conversion factor can be specified to convert it \
to a physical value (e.g. the critical surface mass density).
Parameters
----------
major_axis : float
The major-axis radius of the ellipse.
conversion_factor : float
Factor the dimensionless mass is multiplied by to convert it to a physical mass (e.g. the critical surface \
mass density).
"""
return conversion_factor * quad(self.mass_integral, a=0.0, b=major_axis, args=(self.axis_ratio,))[0]
def mass_integral(self, x, axis_ratio):
"""Routine to integrate an elliptical light profiles - set axis ratio to 1 to compute the luminosity within a \
circle"""
r = x * axis_ratio
return 2 * np.pi * r * self.surface_density_func(x)
def density_between_circular_annuli(self, inner_annuli_radius, outer_annuli_radius, conversion_factor=1.0):
"""Calculate the mass between two circular annuli and compute the density by dividing by the annuli surface
area.
The value returned by the mass integral is dimensionless, therefore the density between annuli is returned in \
units of inverse radius squared. A conversion factor can be specified to convert this to a physical value \
(e.g. the critical surface mass density).
Parameters
-----------
inner_annuli_radius : float
The radius of the inner annulus outside of which the density are estimated.
outer_annuli_radius : float
The radius of the outer annulus inside of which the density is estimated.
"""
annuli_area = (np.pi * outer_annuli_radius ** 2.0) - (np.pi * inner_annuli_radius ** 2.0)
return (self.mass_within_circle(radius=outer_annuli_radius, conversion_factor=conversion_factor) -
self.mass_within_circle(radius=inner_annuli_radius, conversion_factor=conversion_factor)) \
/ annuli_area
class EllipticalCoredPowerLaw(EllipticalMassProfile, MassProfile):
def __init__(self, centre=(0.0, 0.0), axis_ratio=1.0, phi=0.0, einstein_radius=1.0, slope=2.0, core_radius=0.01):
"""
Represents a cored elliptical power-law density distribution
Parameters
----------
centre: (float, float)
The (y,x) arc-second coordinates of the profile centre.
axis_ratio : float
The elliptical mass profile's minor-to-major axis ratio (b/a).
phi : float
Rotation angle of mass profile's ellipse counter-clockwise from positive x-axis.
einstein_radius : float
The arc-second Einstein radius.
slope : float
The density slope of the power-law (lower value -> shallower profile, higher value -> steeper profile).
core_radius : float
The arc-second radius of the inner core.
"""
super(EllipticalCoredPowerLaw, self).__init__(centre, axis_ratio, phi)
self.einstein_radius = einstein_radius
self.slope = slope
self.core_radius = core_radius
@property
def einstein_radius_rescaled(self):
"""Rescale the einstein radius by slope and axis_ratio, to reduce its degeneracy with other mass-profiles
parameters"""
return ((3 - self.slope) / (1 + self.axis_ratio)) * self.einstein_radius ** (self.slope - 1)
@geometry_profiles.transform_grid
def surface_density_from_grid(self, grid):
""" Calculate the projected surface density in dimensionless units at a given set of gridded coordinates.
Parameters
----------
grid : grids.RegularGrid
The grid of (y,x) arc-second coordinates the surface density is computed on.
"""
surface_density_grid = np.zeros(grid.shape[0])
grid_eta = self.grid_to_elliptical_radii(grid)
for i in range(grid.shape[0]):
surface_density_grid[i] = self.surface_density_func(grid_eta[i])
return surface_density_grid
@geometry_profiles.transform_grid
def potential_from_grid(self, grid):
"""
Calculate the potential at a given set of gridded coordinates.
Parameters
----------
grid : grids.RegularGrid
The grid of (y,x) arc-second coordinates the deflection angles are computed on.
"""
potential_grid = np.zeros(grid.shape[0])
for i in range(grid.shape[0]):
potential_grid[i] = quad(self.potential_func, a=0.0, b=1.0,
args=(grid[i, 0], grid[i, 1], self.axis_ratio, self.slope, self.core_radius))[0]
return self.einstein_radius_rescaled * self.axis_ratio * potential_grid
@geometry_profiles.transform_grid
def deflections_from_grid(self, grid):
"""
Calculate the deflection angles at a given set of gridded coordinates.
Parameters
----------
grid : grids.RegularGrid
The grid of (y,x) arc-second coordinates the deflection angles are computed on.
"""
def calculate_deflection_component(npow, index):
deflection_grid = np.zeros(grid.shape[0])
einstein_radius_rescaled = self.einstein_radius_rescaled
for i in range(grid.shape[0]):
deflection_grid[i] = self.axis_ratio * grid[i, index] * quad(self.deflection_func, a=0.0, b=1.0,
args=(grid[i, 0], grid[i, 1], npow,
self.axis_ratio,
einstein_radius_rescaled, self.slope,
self.core_radius))[0]
return deflection_grid
deflection_y = calculate_deflection_component(1.0, 0)
deflection_x = calculate_deflection_component(0.0, 1)
return self.rotate_grid_from_profile(np.multiply(1.0, np.vstack((deflection_y, deflection_x)).T))
def surface_density_func(self, radius):
return self.einstein_radius_rescaled * (self.core_radius ** 2 + radius ** 2) ** (-(self.slope - 1) / 2.0)
@staticmethod
@jit_integrand
def potential_func(u, y, x, axis_ratio, slope, core_radius):
eta = np.sqrt((u * ((x ** 2) + (y ** 2 / (1 - (1 - axis_ratio ** 2) * u)))))
return (eta / u) * ((3.0 - slope) * eta) ** -1.0 * \
((core_radius ** 2.0 + eta ** 2.0) ** ((3.0 - slope) / 2.0) -
core_radius ** (3 - slope)) / ((1 - (1 - axis_ratio ** 2) * u) ** 0.5)
@staticmethod
@jit_integrand
def deflection_func(u, y, x, npow, axis_ratio, einstein_radius_rescaled, slope, core_radius):
eta_u = np.sqrt((u * ((x ** 2) + (y ** 2 / (1 - (1 - axis_ratio ** 2) * u)))))
return einstein_radius_rescaled * (core_radius ** 2 + eta_u ** 2) ** (-(slope - 1) / 2.0) / (
(1 - (1 - axis_ratio ** 2) * u) ** (npow + 0.5))
class SphericalCoredPowerLaw(EllipticalCoredPowerLaw):
def __init__(self, centre=(0.0, 0.0), einstein_radius=1.0, slope=2.0, core_radius=0.0):
"""
Represents a cored spherical power-law density distribution
Parameters
----------
centre: (float, float)
The (y,x) arc-second coordinates of the profile centre.
einstein_radius : float
The arc-second Einstein radius.
slope : float
The density slope of the power-law (lower value -> shallower profile, higher value -> steeper profile).
core_radius : float
The arc-second radius of the inner core.
"""
super(SphericalCoredPowerLaw, self).__init__(centre, 1.0, 0.0, einstein_radius, slope, core_radius)
@geometry_profiles.transform_grid
def deflections_from_grid(self, grid):
"""
Calculate the deflection angles at a given set of gridded coordinates.
Parameters
----------
grid : grids.RegularGrid
The grid of (y,x) arc-second coordinates the deflection angles are computed on.
"""
eta = self.grid_to_radius(grid)
deflection = np.multiply(2. * self.einstein_radius_rescaled, np.divide(
np.add(np.power(np.add(self.core_radius ** 2, np.square(eta)), (3. - self.slope) / 2.),
-self.core_radius ** (3 - self.slope)), np.multiply((3. - self.slope), eta)))
return self.grid_radius_to_cartesian(grid, deflection)
class EllipticalPowerLaw(EllipticalCoredPowerLaw):
def __init__(self, centre=(0.0, 0.0), axis_ratio=1.0, phi=0.0, einstein_radius=1.0, slope=2.0):
"""
Represents an elliptical power-law density distribution.
Parameters
----------
centre: (float, float)
The (y,x) arc-second coordinates of the profile centre.
axis_ratio : float
The elliptical mass profile's minor-to-major axis ratio (b/a).
phi : float
Rotation angle of mass profile's ellipse counter-clockwise from positive x-axis.
einstein_radius : float
The arc-second Einstein radius.
slope : float
The density slope of the power-law (lower value -> shallower profile, higher value -> steeper profile).
"""
super(EllipticalPowerLaw, self).__init__(centre, axis_ratio, phi, einstein_radius, slope, 0.0)
def surface_density_func(self, radius):
if radius > 0.0:
return self.einstein_radius_rescaled * radius ** (-(self.slope - 1))
else:
return np.inf
@staticmethod
@jit_integrand
def potential_func(u, y, x, axis_ratio, slope, core_radius):
eta_u = np.sqrt((u * ((x ** 2) + (y ** 2 / (1 - (1 - axis_ratio ** 2) * u)))))
return (eta_u / u) * ((3.0 - slope) * eta_u) ** -1.0 * eta_u ** (3.0 - slope) / \
((1 - (1 - axis_ratio ** 2) * u) ** 0.5)
@staticmethod
@jit_integrand
def deflection_func(u, y, x, npow, axis_ratio, einstein_radius_rescaled, slope, core_radius):
eta_u = np.sqrt((u * ((x ** 2) + (y ** 2 / (1 - (1 - axis_ratio ** 2) * u)))))
return einstein_radius_rescaled * eta_u ** (-(slope - 1)) / ((1 - (1 - axis_ratio ** 2) * u) ** (npow + 0.5))
class SphericalPowerLaw(EllipticalPowerLaw):
def __init__(self, centre=(0.0, 0.0), einstein_radius=1.0, slope=2.0):
"""
Represents a spherical power-law density distribution.
Parameters
----------
centre: (float, float)
The (y,x) arc-second coordinates of the profile centre.
einstein_radius : float
The arc-second Einstein radius.
slope : float
The density slope of the power-law (lower value -> shallower profile, higher value -> steeper profile).
"""
super(SphericalPowerLaw, self).__init__(centre, 1.0, 0.0, einstein_radius, slope)
@geometry_profiles.transform_grid
def deflections_from_grid(self, grid):
eta = self.grid_to_radius(grid)
deflection_r = 2.0 * self.einstein_radius_rescaled * np.divide(np.power(eta, (3.0 - self.slope)),
np.multiply((3.0 - self.slope), eta))
return self.grid_radius_to_cartesian(grid, deflection_r)
class EllipticalCoredIsothermal(EllipticalCoredPowerLaw):
def __init__(self, centre=(0.0, 0.0), axis_ratio=1.0, phi=0.0, einstein_radius=1.0, core_radius=0.05):
"""
Represents a cored elliptical isothermal density distribution, which is equivalent to the elliptical power-law
density distribution for the value slope=2.0
Parameters
----------
centre: (float, float)
The (y,x) arc-second coordinates of the profile centre.
axis_ratio : float
The elliptical mass profile's minor-to-major axis ratio (b/a).
phi : float
Rotation angle of mass profile's ellipse counter-clockwise from positive x-axis.
einstein_radius : float
The arc-second Einstein radius.
core_radius : float
The arc-second radius of the inner core.
"""
super(EllipticalCoredIsothermal, self).__init__(centre, axis_ratio, phi, einstein_radius, 2.0,
core_radius)
class SphericalCoredIsothermal(SphericalCoredPowerLaw):
def __init__(self, centre=(0.0, 0.0), einstein_radius=1.0, core_radius=0.05):
"""
Represents a cored spherical isothermal density distribution, which is equivalent to the elliptical power-law
density distribution for the value slope=2.0
Parameters
----------
centre: (float, float)
The (y,x) arc-second coordinates of the profile centre.
einstein_radius : float
The arc-second Einstein radius.
core_radius : float
The arc-second radius of the inner core.
"""
super(SphericalCoredIsothermal, self).__init__(centre, einstein_radius, 2.0, core_radius)
class EllipticalIsothermal(EllipticalPowerLaw):
def __init__(self, centre=(0.0, 0.0), axis_ratio=0.9, phi=0.0, einstein_radius=1.0):
"""
Represents an elliptical isothermal density distribution, which is equivalent to the elliptical power-law
density distribution for the value slope=2.0
Parameters
----------
centre: (float, float)
The (y,x) arc-second coordinates of the profile centre.
axis_ratio : float
The elliptical mass profile's minor-to-major axis ratio (b/a).
phi : float
Rotation angle of mass profile's ellipse counter-clockwise from positive x-axis.
einstein_radius : float
The arc-second Einstein radius.
"""
super(EllipticalIsothermal, self).__init__(centre, axis_ratio, phi, einstein_radius, 2.0)
@geometry_profiles.transform_grid
def deflections_from_grid(self, grid):
"""
Calculate the deflection angles at a given set of gridded coordinates.
For coordinates (0.0, 0.0) the analytic calculation of the deflection angle gives a NaN. Therefore, \
coordinates at (0.0, 0.0) are shifted slightly to (1.0e-8, 1.0e-8).
Parameters
----------
grid : grids.RegularGrid
The grid of (y,x) arc-second coordinates the deflection angles are computed on.
"""
grid[(grid[:, 0] == 0.0) & (grid[:, 1] == 0.0)] = np.array([1.0e-8, 1.0e-8])
try:
factor = 2.0 * self.einstein_radius_rescaled * self.axis_ratio / np.sqrt(1 - self.axis_ratio ** 2)
psi = np.sqrt(np.add(np.multiply(self.axis_ratio ** 2, np.square(grid[:, 1])), np.square(grid[:, 0])))
deflection_y = np.arctanh(np.divide(np.multiply(np.sqrt(1 - self.axis_ratio ** 2), grid[:, 0]), psi))
deflection_x = np.arctan(np.divide(np.multiply(np.sqrt(1 - self.axis_ratio ** 2), grid[:, 1]), psi))
return self.rotate_grid_from_profile(np.multiply(factor, np.vstack((deflection_y, deflection_x)).T))
except ZeroDivisionError:
return self.grid_radius_to_cartesian(grid, np.full(grid.shape[0], 2.0 * self.einstein_radius_rescaled))
class SphericalIsothermal(EllipticalIsothermal):
def __init__(self, centre=(0.0, 0.0), einstein_radius=1.0):
"""
Represents a spherical isothermal density distribution, which is equivalent to the spherical power-law
density distribution for the value slope=2.0
Parameters
----------
centre: (float, float)
The (y,x) arc-second coordinates of the profile centre.
einstein_radius : float
The arc-second Einstein radius.
"""
super(SphericalIsothermal, self).__init__(centre, 1.0, 0.0, einstein_radius)
@geometry_profiles.transform_grid
def potential_from_grid(self, grid):
"""
Calculate the potential at a given set of gridded coordinates.
Parameters
----------
grid : grids.RegularGrid
The grid of (y,x) arc-second coordinates the deflection angles are computed on.
"""
eta = self.grid_to_elliptical_radii(grid)
return 2.0 * self.einstein_radius_rescaled * eta
@geometry_profiles.transform_grid
def deflections_from_grid(self, grid):
"""
Calculate the deflection angles at a given set of gridded coordinates.
Parameters
----------
grid : grids.RegularGrid
The grid of (y,x) arc-second coordinates the deflection angles are computed on.
"""
return self.grid_radius_to_cartesian(grid, np.full(grid.shape[0], 2.0 * self.einstein_radius_rescaled))
# noinspection PyAbstractClass
class AbstractEllipticalGeneralizedNFW(EllipticalMassProfile, MassProfile):
epsrel = 1.49e-5
def __init__(self, centre=(0.0, 0.0), axis_ratio=1.0, phi=0.0, kappa_s=0.05, inner_slope=1.0, scale_radius=5.0):
"""
The elliptical NFW profiles, used to fit the dark matter halo of the lens.
Parameters
----------
centre: (float, float)
The (y,x) arc-second coordinates of the profile centre.
axis_ratio : float
Ratio of profiles ellipse's minor and major axes (b/a).
phi : float
Rotational angle of profiles ellipse counter-clockwise from positive x-axis.
kappa_s : float
The overall normalization of the dark matter halo \
(kappa_s = (rho_s * scale_radius)/lensing_critical_density)
inner_slope : float
The inner slope of the dark matter halo
scale_radius : float
The arc-second radius where the average density within this radius is 200 times the critical density of \
the Universe..
"""
super(AbstractEllipticalGeneralizedNFW, self).__init__(centre, axis_ratio, phi)
super(MassProfile, self).__init__()
self.kappa_s = kappa_s
self.scale_radius = scale_radius
self.inner_slope = inner_slope
def tabulate_integral(self, grid, tabulate_bins):
"""Tabulate an integral over the surface density of deflection potential of a mass profile. This is used in \
the GeneralizedNFW profile classes to speed up the integration procedure.
Parameters
-----------
grid : grids.RegularGrid
The grid of (y,x) arc-second coordinates the potential / deflection_stacks are computed on.
tabulate_bins : int
The number of bins to tabulate the inner integral of this profile.
"""
eta_min = 1.0e-4
eta_max = 1.05 * np.max(self.grid_to_elliptical_radii(grid))
minimum_log_eta = np.log10(eta_min)
maximum_log_eta = np.log10(eta_max)
bin_size = (maximum_log_eta - minimum_log_eta) / (tabulate_bins - 1)
return eta_min, eta_max, minimum_log_eta, maximum_log_eta, bin_size
@geometry_profiles.transform_grid
def surface_density_from_grid(self, grid):
""" Calculate the projected surface density in dimensionless units at a given set of gridded coordinates.
Parameters
----------
grid : grids.RegularGrid
The grid of (y,x) arc-second coordinates the surface density is computed on.
"""
surface_density_grid = np.zeros(grid.shape[0])
grid_eta = self.grid_to_elliptical_radii(grid)
for i in range(grid.shape[0]):
surface_density_grid[i] = self.surface_density_func(grid_eta[i])
return surface_density_grid
class EllipticalGeneralizedNFW(AbstractEllipticalGeneralizedNFW):
@geometry_profiles.transform_grid
def potential_from_grid(self, grid, tabulate_bins=1000):
"""
Calculate the potential at a given set of gridded coordinates.
Parameters
----------
grid : grids.RegularGrid
The grid of (y,x) arc-second coordinates the deflection angles are computed on.
tabulate_bins : int
The number of bins to tabulate the inner integral of this profile.
"""
@jit_integrand
def deflection_integrand(x, kappa_radius, scale_radius, inner_slope):
return (x + kappa_radius / scale_radius) ** (inner_slope - 3) * ((1 - np.sqrt(1 - x ** 2)) / x)
eta_min, eta_max, minimum_log_eta, maximum_log_eta, bin_size = self.tabulate_integral(grid, tabulate_bins)
potential_grid = np.zeros(grid.shape[0])
deflection_integral = np.zeros((tabulate_bins,))
for i in range(tabulate_bins):
eta = 10. ** (minimum_log_eta + (i - 1) * bin_size)
integral = \
quad(deflection_integrand, a=0.0, b=1.0, args=(eta, self.scale_radius, self.inner_slope),
epsrel=EllipticalGeneralizedNFW.epsrel)[0]
deflection_integral[i] = ((eta / self.scale_radius) ** (2 - self.inner_slope)) * (
(1.0 / (3 - self.inner_slope)) *
special.hyp2f1(3 - self.inner_slope, 3 - self.inner_slope, 4 - self.inner_slope,
- (eta / self.scale_radius)) + integral)
for i in range(grid.shape[0]):
potential_grid[i] = (2.0 * self.kappa_s * self.axis_ratio) * \
quad(self.potential_func, a=0.0, b=1.0, args=(grid[i, 0], grid[i, 1],
self.axis_ratio, minimum_log_eta,
maximum_log_eta, tabulate_bins,
deflection_integral),
epsrel=EllipticalGeneralizedNFW.epsrel)[0]
return potential_grid
@geometry_profiles.transform_grid
def deflections_from_grid(self, grid, tabulate_bins=1000):
"""
Calculate the deflection angles at a given set of gridded coordinates.
Parameters
----------
grid : grids.RegularGrid
The grid of (y,x) arc-second coordinates the deflection angles are computed on.
tabulate_bins : int
The number of bins to tabulate the inner integral of this profile.
"""
@jit_integrand
def surface_density_integrand(x, kappa_radius, scale_radius, inner_slope):
return (3 - inner_slope) * (x + kappa_radius / scale_radius) ** (inner_slope - 4) * (1 - np.sqrt(1 - x * x))
def calculate_deflection_component(npow, index):
deflection_grid = np.zeros(grid.shape[0])
for j in range(grid.shape[0]):
coeff = 2.0 * self.kappa_s * self.axis_ratio * grid[j, index]
deflection_grid[j] = coeff * quad(self.deflection_func, a=0.0, b=1.0, args=(
grid[j, 0],
grid[j, 1],
npow,
self.axis_ratio,
minimum_log_eta,
maximum_log_eta,
tabulate_bins,
surface_density_integral), epsrel=EllipticalGeneralizedNFW.epsrel)[0]
return deflection_grid
eta_min, eta_max, minimum_log_eta, maximum_log_eta, bin_size = self.tabulate_integral(grid, tabulate_bins)
surface_density_integral = | np.zeros((tabulate_bins,)) | numpy.zeros |
import sys, os, cv2
import time
import numpy as np
import tensorflow as tf
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
import logging
tf.get_logger().setLevel(logging.ERROR)
import common
from mgail import MGAIL
from tensorboardX import SummaryWriter
import moviepy.editor as mpy
class Driver(object):
def __init__(self, environment):
self.env = environment
self.algorithm = MGAIL(environment=self.env)
self.init_graph = tf.global_variables_initializer()
if self.env.alg == 'mairlTransfer':
variables_to_restore = [var for var in tf.global_variables()
if var.name.startswith('discriminator')]
# print('variables_to_restore: ', variables_to_restore)
self.restore_disc = tf.train.Saver(variables_to_restore)
self.saver = tf.train.Saver(max_to_keep=None)
tf_config = tf.ConfigProto(allow_soft_placement=True)
# Prevent tensorflow from taking all the gpu memory
tf_config.gpu_options.allow_growth = True
self.sess = tf.Session(config=tf_config)
if self.env.trained_model:
if self.env.train_mode and self.env.alg == 'mairlTransfer':
# initialize other parameters
self.sess.run(self.init_graph)
self.restore_disc.restore(self.sess, self.env.trained_model)
print('(mairlTransfer) Restore {} successfully.'.format(self.env.trained_model))
else:
self.saver.restore(self.sess, self.env.trained_model)
print('(Eval) Restore {} successfully.'.format(self.env.trained_model))
else:
self.sess.run(self.init_graph)
self.sess.graph.finalize()
self.run_dir = self.env.run_dir
self.loss = 999. * np.ones(3)
self.reward_mean = 0
self.reward_std = 0
self.run_avg = 0.001
self.discriminator_policy_switch = 0
self.policy_loop_time = 0
self.disc_acc = 0
self.er_count = 0
self.itr = 0
self.best_reward = 0
self.mode = 'Prep'
self.writer = SummaryWriter(log_dir=self.env.config_dir)
np.set_printoptions(precision=2)
np.set_printoptions(linewidth=220)
self.video_index = 0
def update_stats(self, module, attr, value):
v = {'forward_model': 0, 'discriminator': 1, 'policy': 2}
module_ind = v[module]
if attr == 'loss':
self.loss[module_ind] = self.run_avg * self.loss[module_ind] + (1 - self.run_avg) * np.asarray(value)
elif attr == 'accuracy':
self.disc_acc = self.run_avg * self.disc_acc + (1 - self.run_avg) * np.asarray(value)
def train_forward_model(self):
alg = self.algorithm
states_, actions, _, states = self.algorithm.er_agent.sample()[:4]
fetches = [alg.forward_model.minimize, alg.forward_model.loss]
feed_dict = {alg.states_: states_, alg.states: states, alg.actions: actions,
alg.do_keep_prob: self.env.do_keep_prob}
run_vals = self.sess.run(fetches, feed_dict)
self.update_stats('forward_model', 'loss', run_vals[1])
if self.itr % self.env.discr_policy_itrvl == 0:
self.writer.add_scalar('train/forward_model/loss', run_vals[1], self.itr)
def train_discriminator(self):
alg = self.algorithm
# get states and actions
state_a, action_a, rewards_a, state_a_, terminals_a = self.algorithm.er_agent.sample()[:5]
state_e, action_e, rewards_e, state_e_, terminals_e = self.algorithm.er_expert.sample()[:5]
states = np.concatenate([state_a, state_e])
dones = | np.concatenate([terminals_a, terminals_e]) | numpy.concatenate |
import os
import librosa
import matplotlib.pyplot as plt
import numpy as np
import cv2
from easydict import EasyDict
import argparse
common_config = {'sr': 44100, 'hop_length': 512, 'mono': True, 'fmin': 27.5}
split_audio_config = {'top_db': 20, 'frame_length': 1024, 'merge_thrd': 0.2}
cqt_config = {'bins_per_octave':24, 'n_bins':178}
mel_config = {'fmax': 8000, 'n_mels': 178, 'mel_n_fft': 2048}
img_config = {'max_ratio': 0.65, 'best_ratio':0.2, 'img_width_factor': 20, 'img_height_factor': 40}
configs = dict(**common_config, **cqt_config, **mel_config,
**img_config, **split_audio_config)
configs = EasyDict(configs)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--audiodir', type=str)
parser.add_argument('--savedir', type=str)
parser.add_argument('--ext', type=str, default='.flac')
parser.add_argument('--prefix', action='store_true', default=False)
# only for our paper experiment, not needed for inference
parser.add_argument('--refaudiodir', type=str, default=None)
parser.add_argument('--cutline', action='store_true', default=False)
parser.add_argument('--type', type=str, default='cqt')
args = parser.parse_args()
return args
def plot_specs(specs, plt):
plt.matshow(specs)
ax = plt.gca()
ax.xaxis.set_ticks_position('bottom')
ax.invert_yaxis()
def set_img_size(width, height, plt):
fig = plt.gcf()
fig.set_size_inches(width / 20, height / 40)
plt.margins(0, 0)
plt.axis('off')
def get_spectrogram(audiopath=None, audio=None, type='cqt'):
if audio==None:
audio, _ = librosa.load(audiopath, sr=configs.sr, mono=True)
if type=='mel':
specs = librosa.feature.melspectrogram(audio, sr=configs.sr, n_fft=configs.mel_n_fft,
hop_length=configs.hop_length, fmin=configs.fmin, fmax=configs.fmax, htk=True, n_mels=configs.n_mels)
specs = librosa.power_to_db(specs)
elif type=='cqt':
specs = librosa.cqt(audio, sr=configs.sr, hop_length=configs.hop_length, fmin=configs.fmin, bins_per_octave=
configs.bins_per_octave, n_bins=configs.n_bins)
specs = librosa.amplitude_to_db(np.abs(specs))
else:
raise ValueError("error spectrogram type!")
return audio, specs
def get_audio_specs(audiopath=None, audio=None, sr_=None):
if audiopath:
assert audio is None and sr_ is None, 'audiopath should be None'
audio, sr_ = librosa.load(audiopath, sr=configs.sr, mono=configs.mono)
specs = librosa.cqt(audio, sr=configs.sr, hop_length=configs.hop_length, fmin=configs.fmin,
n_bins=configs.n_bins, bins_per_octave=configs.bins_per_octave)
specs = np.abs(specs)
specs = librosa.amplitude_to_db(specs)
return audio, specs
def get_figname(audiopath, prefix=''):
figname = os.path.splitext(os.path.basename(audiopath))[0]
return prefix+figname+'.png'
def get_img_scale(specs, figname, savedir, delete_fig=False, save_dir=None):
height, width = specs.shape
plt.matshow(specs)
ax = plt.gca()
ax.xaxis.set_ticks_position('bottom')
ax.invert_yaxis()
# set_img_size(height, width, plt)
fig = plt.gcf()
# raise ValueError(fig.get_dpi())
fig.set_size_inches(width / 20, height / 40)
plt.margins(0, 0)
plt.axis('off')
figpath = os.path.join(savedir, figname)
plt.savefig(figpath, bbox_inches='tight', pad_inches=0, dpi=100.0)
plt.close()
img = cv2.imread(figpath)
img_height, img_width, channel = img.shape
scale_height = img_height/height
scale_width = img_width/width
if delete_fig:
os.remove(figpath)
return [scale_height, scale_width], [img_height, img_width]
def get_split_line(audio):
'''
:param audio:
:return new_splits: the idx is the raw point index
'''
splits = librosa.effects.split(audio, top_db=20, frame_length=1024, hop_length=512)
# audio onset, end merge
merge_thrd = 0.2
new_splits = [splits[0]]
for idx in range(1, len(splits)):
if (splits[idx][1] - splits[idx][0]) / configs.sr < merge_thrd:
new_splits[-1][1] = splits[idx][1]
else:
new_splits.append(splits[idx])
return new_splits
def split_long_silence(start, end, new_splits, scale_width, scale_height):
height = np.round(scale_height*configs.n_bins)
width = np.round((end-start)*scale_width)
ratio = width/height-1
best_ratio = configs.best_ratio
if ratio<0:
raise ValueError('cannot use this function')
if ratio<=best_ratio:
new_splits.append(end)
return new_splits
best_hop_len = int(np.round(height*(1+best_ratio) / scale_width))
split = start + best_hop_len
new_splits.append(split)
width = np.round((end-split)*scale_width)
ratio = width/height - 1
while ratio>=best_ratio:
split = split+best_hop_len
new_splits.append(split)
width = np.round((end-split)*scale_width)
ratio = width/height - 1
if ratio>=0:
new_splits.append(end)
return new_splits
def get_s_ratio(new_splits, splits, seg_idx, height, scale_width):
hop_length = configs.hop_length
split_idx = max(new_splits[-1], int(np.round(splits[seg_idx + 1][0] - 2*hop_length)))
width = np.round((split_idx - new_splits[-1]) * scale_width)
s_ratio = width / height - 1
return split_idx, s_ratio
def get_next_ratio(new_splits, splits, seg_idx, height, scale_width):
hop_length = configs.hop_length
begin_split_next = max(new_splits[-1], np.round(splits[seg_idx + 1][0] - 2*hop_length))
end_split_next = np.round(splits[seg_idx + 1][1] + 2*hop_length)
width = np.round((end_split_next - begin_split_next) * scale_width)
next_ratio = width / height - 1
return next_ratio
def get_best_splits(splits, scale_height, scale_width, audio_length):
'''
:param splits: (start, end) tuples
:return new_splits: tuple, each two successive complete a segment
the long silence at the begin or the inner can't be ignored, but it can be
ignored if it is at the end of the audio.
'''
hop_length = configs.hop_length
best_ratio = configs.best_ratio
max_ratio = configs.max_ratio
new_splits=[0]
# the half spec frame
split_idx = int(max(0, np.round(splits[0][0]-2*hop_length)))
width = np.round(scale_width*split_idx)
height = np.round(scale_height*configs.n_bins)
ratio = width/height - 1
if abs(ratio)<=best_ratio:
new_splits.append(split_idx)
# the silence is to long
elif ratio>=best_ratio:
new_splits=split_long_silence(0, split_idx, new_splits, scale_width, scale_height)
# too short(ratio<-best_ratio don't don anything)
seg_idx = 0
while seg_idx<len(splits)-1:
split_idx = np.round(min(splits[seg_idx][1] + 2*hop_length,splits[seg_idx+1][0] - 2*hop_length))
width = (split_idx - new_splits[-1])*scale_width
ratio = width / height - 1
old_split_idx = split_idx
if ratio>max_ratio:
new_splits.append(split_idx)
elif -best_ratio<=ratio<=0:
split_idx, s_ratio=get_s_ratio(new_splits, splits, seg_idx, height, scale_width)
if s_ratio>max_ratio:
new_splits = split_long_silence(new_splits[-1], split_idx, new_splits, scale_width, scale_height)
else:
next_ratio = get_next_ratio(new_splits, splits, seg_idx, height, scale_width)
if next_ratio > max_ratio:
new_splits.append(split_idx)
elif s_ratio>best_ratio:
new_splits = split_long_silence(new_splits[-1], split_idx, new_splits, scale_width, scale_height)
elif s_ratio>=-best_ratio:
new_splits.append(split_idx)
seg_idx+=1
elif 0<ratio<=best_ratio:
split_idx, s_ratio=get_s_ratio(new_splits, splits, seg_idx, height, scale_width)
if s_ratio>max_ratio:
new_splits = split_long_silence(new_splits[-1], split_idx, new_splits, scale_width, scale_height)
else:
next_ratio = get_next_ratio(new_splits, splits, seg_idx, height, scale_width)
if next_ratio > max_ratio:
new_splits.append(split_idx)
else:
new_splits.append(old_split_idx)
seg_idx+=1
elif best_ratio<ratio<=max_ratio:
split_idx, s_ratio=get_s_ratio(new_splits, splits, seg_idx, height, scale_width)
if s_ratio<=max_ratio:
next_ratio = get_next_ratio(new_splits, splits, seg_idx, height, scale_width)
if next_ratio > max_ratio:
new_splits.append(split_idx)
else:
new_splits.append(old_split_idx)
elif s_ratio<1+max_ratio:
length = np.round(height*(1+max_ratio)/scale_width)
split_idx = int(new_splits[-1]+length)
new_splits.append(split_idx)
else:
new_splits.append(old_split_idx)
new_splits = split_long_silence(new_splits[-1], split_idx, new_splits, scale_width, scale_height)
seg_idx+=1
elif -max_ratio<=ratio<-best_ratio:
split_idx, s_ratio = get_s_ratio(new_splits, splits, seg_idx, height, scale_width)
if s_ratio>max_ratio:
new_splits = split_long_silence(new_splits[-1], split_idx, new_splits, scale_width, scale_height)
elif s_ratio>=0:
next_ratio = get_next_ratio(new_splits, splits, seg_idx, height, scale_width)
if next_ratio > max_ratio:
new_splits.append(split_idx)
elif s_ratio<=best_ratio:
new_splits.append(split_idx)
else:
length = np.round(height * (1 + best_ratio) / scale_width)
split_idx = int(new_splits[-1] + length)
new_splits.append(split_idx)
elif s_ratio>=-best_ratio:
new_splits.append(split_idx)
seg_idx+=1
else:
seg_idx+=1
# process the last note
split_idx = min(int(np.round(splits[seg_idx][1] + 2*hop_length)), audio_length-1)
width = | np.round((split_idx - new_splits[-1]) * scale_width) | numpy.round |
"""
That's where the default shortcuts for imas2tofu are defined
Shortcuts allow you (via the class MulTiIDSLoader) to define short str
for some important data contained in IMAS ids
The are stored as a large dict, with:
{ids: {
shortcut1: long_version1,
...
shortcutN: long_version1}
}
There is a default copy of this file in the tfu install, but each user can
(re)define his/her own shortcuts using a local copy that will take precedence
at import time.
To customize your tofu install with user-specific parameters, run in terminal:
tofu-custom
This will create a .tofu/ in your home (~) with a local copy that you can edit
To see the shortcuts available from your running ipython console do:
> import tof as tf
> tf.imas2tofu.MultiIDSLoader.get_shortcuts()
Available since tofu 1.4.3
"""
import numpy as np
# ############################################################################
#
# General imas2tofu parameters
#
# ############################################################################
# public imas user (used for checking if can be saved)
_IMAS_USER_PUBLIC = 'imas_public'
# generic imas parameters dict
_IMAS_DIDD = {
'shot': 0,
'run': 0,
'refshot': -1,
'refrun': -1,
'user': _IMAS_USER_PUBLIC,
'tokamak': 'west',
'version': '3',
}
_T0 = False
# ############################################################################
#
# shortcuts for imas2tofu interface (MultiIDSLoader class)
#
# ############################################################################
_dshort = {
'wall': {
'wallR': {'str': 'description_2d[0].limiter.unit[0].outline.r',
'units': 'm'},
'wallZ': {'str': 'description_2d[0].limiter.unit[0].outline.z',
'units': 'm'}},
'pulse_schedule': {
'events_times': {'str': 'event[].time_stamp',
'units': 's'},
'events_names': {'str': 'event[].identifier'}},
'equilibrium': {
't': {'str': 'time', 'units': 's'},
'ip': {'str': 'time_slice[time].global_quantities.ip',
'dim': 'current', 'quant': 'Ip', 'units': 'A'},
'q0': {'str': 'time_slice[time].global_quantities.q_axis',
'units': '-'},
'qmin': {'str': 'time_slice[time].global_quantities.q_min.value',
'units': '-'},
'q95': {'str': 'time_slice[time].global_quantities.q_95',
'units': '-'},
'volume': {'str': 'time_slice[time].global_quantities.volume',
'dim': 'volume', 'quant': 'pvol', 'units': 'm^3'},
'psiaxis': {'str': 'time_slice[time].global_quantities.psi_axis',
'dim': 'B flux', 'quant': 'psi', 'units': 'Wb'},
'psisep': {'str': 'time_slice[time].global_quantities.psi_boundary',
'dim': 'B flux', 'quant': 'psi', 'units': 'Wb'},
'BT0': {'str': ('time_slice[time].global_quantities'
+ '.magnetic_axis.b_field_tor'),
'dim': 'B', 'quant': 'BT', 'units': 'T'},
'axR': {'str': 'time_slice[time].global_quantities.magnetic_axis.r',
'dim': 'distance', 'quant': 'R', 'units': 'm'},
'axZ': {'str': 'time_slice[time].global_quantities.magnetic_axis.z',
'dim': 'distance', 'quant': 'Z', 'units': 'm'},
'x0R': {'str': 'time_slice[time].boundary.x_point[0].r', 'units': 'm'},
'x0Z': {'str': 'time_slice[time].boundary.x_point[0].z', 'units': 'm'},
'x1R': {'str': 'time_slice[time].boundary.x_point[1].r', 'units': 'm'},
'x1Z': {'str': 'time_slice[time].boundary.x_point[1].z', 'units': 'm'},
'strike0R': {'str': 'time_slice[time].boundary.strike_point[0].r',
'units': 'm'},
'strike0Z': {'str': 'time_slice[time].boundary.strike_point[0].z',
'units': 'm'},
'strike1R': {'str': 'time_slice[time].boundary.strike_point[1].r',
'units': 'm'},
'strike1Z': {'str': 'time_slice[time].boundary.strike_point[1].z',
'units': 'm'},
'sepR': {'str': 'time_slice[time].boundary_separatrix.outline.r',
'units': 'm'},
'sepZ': {'str': 'time_slice[time].boundary_separatrix.outline.z',
'units': 'm'},
'1drhotn': {'str': 'time_slice[time].profiles_1d.rho_tor_norm',
'dim': 'rho', 'quant': 'rhotn', 'units': '-'},
'1dphi': {'str': 'time_slice[time].profiles_1d.phi',
'dim': 'B flux', 'quant': 'phi', 'units': 'Wb'},
'1dpsi': {'str': 'time_slice[time].profiles_1d.psi',
'dim': 'B flux', 'quant': 'psi', 'units': 'Wb'},
'1dq': {'str': 'time_slice[time].profiles_1d.q',
'dim': 'safety factor', 'quant': 'q', 'units': '-'},
'1dpe': {'str': 'time_slice[time].profiles_1d.pressure',
'dim': 'pressure', 'quant': 'pe', 'units': 'Pa'},
'1djT': {'str': 'time_slice[time].profiles_1d.j_tor',
'dim': 'vol. current dens.', 'quant': 'jT',
'units': 'A.m^-2'},
'2dphi': {'str': 'time_slice[time].ggd[0].phi[0].values',
'dim': 'B flux', 'quant': 'phi', 'units': 'Wb'},
'2dpsi': {'str': 'time_slice[time].ggd[0].psi[0].values',
'dim': 'B flux', 'quant': 'psi', 'units': 'Wb'},
'2djT': {'str': 'time_slice[time].ggd[0].j_tor[0].values',
'dim': 'vol. current dens.', 'quant': 'jT',
'units': 'A.m^-2'},
'2dBR': {'str': 'time_slice[time].ggd[0].b_field_r[0].values',
'dim': 'B', 'quant': 'BR', 'units': 'T'},
'2dBT': {'str': 'time_slice[time].ggd[0].b_field_tor[0].values',
'dim': 'B', 'quant': 'BT', 'units': 'T'},
'2dBZ': {'str': 'time_slice[time].ggd[0].b_field_z[0].values',
'dim': 'B', 'quant': 'BZ', 'units': 'T'},
'2dmeshNodes': {'str': ('grids_ggd[0].grid[0].space[0]'
+ '.objects_per_dimension[0]'
+ '.object[].geometry'),
'units': 'mixed'},
'2dmeshFaces': {'str': ('grids_ggd[0].grid[0].space[0]'
+ '.objects_per_dimension[2]'
+ '.object[].nodes')},
'2dmeshR': {'str': 'time_slice[0].profiles_2d[0].r', 'units': 'm'},
'2dmeshZ': {'str': 'time_slice[0].profiles_2d[0].z', 'units': 'm'}},
'core_profiles': {
't': {'str': 'time', 'units': 's'},
'ip': {'str': 'global_quantities.ip',
'dim': 'current', 'quant': 'Ip', 'units': 'A'},
'vloop': {'str': 'global_quantities.v_loop',
'dim': 'voltage', 'quant': 'Vloop', 'units': 'V'},
'1dTe': {'str': 'profiles_1d[time].electrons.temperature',
'dim': 'temperature', 'quant': 'Te', 'units': 'eV'},
'1dne': {'str': 'profiles_1d[time].electrons.density',
'dim': 'density', 'quant': 'ne', 'units': 'm^-3'},
'1dzeff': {'str': 'profiles_1d[time].zeff',
'dim': 'charge', 'quant': 'zeff', 'units': '-'},
'1dpsi': {'str': 'profiles_1d[time].grid.psi',
'dim': 'B flux', 'quant': 'psi', 'units': 'Wb'},
'1drhotn': {'str': 'profiles_1d[time].grid.rho_tor_norm',
'dim': 'rho', 'quant': 'rhotn', 'units': '-'},
'1drhopn': {'str': 'profiles_1d[time].grid.rho_pol_norm',
'dim': 'rho', 'quant': 'rhopn', 'units': '-'},
'1dnW': {'str': 'profiles_1d[time].ion[identifier.label=W].density',
'dim': 'density', 'quant': 'nI', 'units': 'm^-3'}},
'edge_profiles': {
't': {'str': 'time', 'units': 's'}},
'core_sources': {
't': {'str': 'time', 'units': 's'},
'1dpsi': {'str': ('source[identifier.name=lineradiation]'
+ '.profiles_1d[time].grid.psi'),
'dim': 'B flux', 'quant': 'psi', 'units': 'Wb'},
'1drhotn': {'str': ('source[identifier.name=lineradiation]'
+ '.profiles_1d[time].grid.rho_tor_norm'),
'dim': 'rho', 'quant': 'rhotn', 'units': '-'},
'1dbrem': {'str': ('source[identifier.name=bremsstrahlung]'
+ '.profiles_1d[time].electrons.energy'),
'dim': 'vol.emis.', 'quant': 'brem.', 'units': 'W.m^-3'},
'1dline': {'str': ('source[identifier.name=lineradiation]'
+ '.profiles_1d[time].electrons.energy'),
'dim': 'vol. emis.', 'quant': 'lines', 'units': 'W.m^-3'}},
'edge_sources': {
't': {'str': 'time', 'units': 's'},
'2dmeshNodes': {'str': ('grid_ggd[0].space[0].objects_per_dimension[0]'
+ '.object[].geometry'),
'units': 'mixed'},
'2dmeshFaces': {'str': ('grid_ggd[0].space[0].objects_per_dimension[2]'
+ '.object[].nodes')},
'2dradiation': {'str': 'source[13].ggd[0].electrons.energy[0].values',
'dim': 'vol. emis.', 'quant': 'vol.emis.',
'name': 'tot. vol. emis.', 'units': 'W.m^-3'}},
'lh_antennas': {
't': {'str': 'antenna[chan].power_launched.time', 'units': 's'},
'power0': {'str': 'antenna[0].power_launched.data',
'dim': 'power', 'quant': 'lh power', 'units': 'W',
'pos': True},
'power1': {'str': 'antenna[1].power_launched.data',
'dim': 'power', 'quant': 'lh power', 'units': 'W',
'pos': True},
'power': {'str': 'antenna[chan].power_launched.data',
'dim': 'power', 'quant': 'lh power', 'units': 'W',
'pos': True},
'R': {'str': 'antenna[chan].position.r.data',
'dim': 'distance', 'quant': 'R', 'units': 'm'}},
'ic_antennas': {
't': {'str': 'antenna[chan].module[0].power_forward.time',
'units': 's'},
'power0mod_fwd': {'str': 'antenna[0].module[].power_forward.data',
'dim': 'power', 'quant': 'ic power', 'units': 'W'},
'power0mod_reflect': {'str': ('antenna[0].module[]'
+ '.power_reflected.data'),
'dim': 'power', 'quant': 'ic power',
'units': 'W'},
'power1mod_fwd': {'str': 'antenna[1].module[].power_forward.data',
'dim': 'power', 'quant': 'ic power', 'units': 'W'},
'power1mod_reflect': {'str': ('antenna[1].module[]'
+ '.power_reflected.data'),
'dim': 'power', 'quant': 'ic power',
'units': 'W'},
'power2mod_fwd': {'str': 'antenna[2].module[].power_forward.data',
'dim': 'power', 'quant': 'ic power', 'units': 'W'},
'power2mod_reflect': {'str': ('antenna[2].module[]'
+ '.power_reflected.data'),
'dim': 'power', 'quant': 'ic power',
'units': 'W'}},
'magnetics': {
't': {'str': 'time', 'units': 's'},
'ip': {'str': 'method[0].ip.data', 'units': 'A'},
'diamagflux': {'str': 'method[0].diamagnetic_flux.data',
'units': 'Wb'},
'bpol_B': {'str': 'bpol_probe[chan].field.data',
'dim': 'B', 'quant': 'Bpol', 'units': 'T'},
'bpol_name': {'str': 'bpol_probe[chan].name'},
'bpol_R': {'str': 'bpol_probe[chan].position.r',
'dim': 'distance', 'quant': 'R', 'units': 'm'},
'bpol_Z': {'str': 'bpol_probe[chan].position.z',
'dim': 'distance', 'quant': 'Z', 'units': 'm'},
'bpol_angpol': {'str': 'bpol_probe[chan].poloidal_angle',
'dim': 'angle', 'quant': 'angle_pol', 'units': 'rad'},
'bpol_angtor': {'str': 'bpol_probe[chan].toroidal_angle',
'dim': 'angle', 'quant': 'angle_tor', 'units': 'rad'},
'floop_flux': {'str': 'flux_loop[chan].flux.data',
'dim': 'B flux', 'quant': 'B flux', 'units': 'Wb'},
'floop_name': {'str': 'flux_loop[chan].name'},
'floop_R': {'str': 'flux_loop[chan].position.r',
'dim': 'distance', 'quant': 'R', 'units': 'm'},
'floop_Z': {'str': 'flux_loop[chan].position.z',
'dim': 'distance', 'quant': 'Z', 'units': 'm'}},
'barometry': {
't': {'str': 'gauge[chan].pressure.time', 'units': 's'},
'names': {'str': 'gauge[chan].name'},
'p': {'str': 'gauge[chan].pressure.data',
'dim': 'pressure', 'quant': 'p', 'units': 'Pa'}},
'calorimetry': {
't': {'str': 'group[chan].component[0].power.time', 'units': 's'},
'names': {'str': 'group[chan].name'},
'power': {'str': 'group[chan].component[0].power.data',
'dim': 'power', 'quant': 'extracted power',
'units': 'W'}},
'neutron_diagnostic': {
't': {'str': 'time', 'units': 's'},
'flux_total': {'str': 'synthetic_signals.total_neutron_flux',
'dim': 'particle flux', 'quant': 'particle flux',
'units': 's^-1'}},
'ece': {
't': {'str': 'time',
'quant': 't', 'units': 's'},
'freq': {'str': 'channel[chan].frequency.data',
'dim': 'freq', 'quant': 'freq', 'units': 'Hz'},
'Te': {'str': 'channel[chan].t_e.data',
'dim': 'temperature', 'quant': 'Te', 'units': 'eV'},
'R': {'str': 'channel[chan].position.r',
'dim': 'distance', 'quant': 'R', 'units': 'm'},
'rhotn': {'str': 'channel[chan].position.rho_tor_norm',
'dim': 'rho', 'quant': 'rhotn', 'units': '-'},
'theta': {'str': 'channel[chan].position.theta',
'dim': 'angle', 'quant': 'theta', 'units': 'rad'},
'tau1keV': {'str': 'channel[chan].optical_depth.data',
'dim': 'optical_depth', 'quant': 'tau', 'units': '-'},
'validity_timed': {'str': 'channel[chan].t_e.validity_timed'},
'names': {'str': 'channel[chan].name'},
'Te0': {'str': 't_e_central.data',
'dim': 'temperature', 'quant': 'Te', 'units': 'eV'}},
'reflectometer_profile': {
't': {'str': 'time', 'units': 's'},
'ne': {'str': 'channel[chan].n_e.data',
'dim': 'density', 'quant': 'ne', 'units': 'm^-3'},
'R': {'str': 'channel[chan].position.r',
'dim': 'distance', 'quant': 'R', 'units': 'm'},
'Z': {'str': 'channel[chan].position.z',
'dim': 'distance', 'quant': 'Z', 'units': 'm'},
'phi': {'str': 'channel[chan].position.phi',
'dim': 'angle', 'quant': 'phi', 'units': 'rad'},
'names': {'str': 'channel[chan].name'},
'mode': {'str': 'mode'},
'sweep': {'str': 'sweep_time'}},
'interferometer': {
't': {'str': 'time',
'quant': 't', 'units': 's'},
'names': {'str': 'channel[chan].name'},
'ne_integ': {'str': 'channel[chan].n_e_line.data',
'dim': 'ne_integ', 'quant': 'ne_integ',
'units': 'm^-2', 'Brightness': True}},
'polarimeter': {
't': {'str': 'time',
'quant': 't', 'units': 's'},
'lamb': {'str': 'channel[chan].wavelength',
'dim': 'distance', 'quant': 'wavelength',
'units': 'm'},
'fangle': {'str': 'channel[chan].faraday_angle.data',
'dim': 'angle', 'quant': 'faraday angle',
'units': 'rad', 'Brightness': True},
'names': {'str': 'channel[chan].name'}},
'bolometer': {
't': {'str': 'channel[chan].power.time',
'quant': 't', 'units': 's'},
'power': {'str': 'channel[chan].power.data',
'dim': 'power', 'quant': 'power radiative',
'units': 'W', 'Brightness': False},
'etendue': {'str': 'channel[chan].etendue',
'dim': 'etendue', 'quant': 'etendue',
'units': 'm^2.sr'},
'names': {'str': 'channel[chan].name'},
'tpower': {'str': 'time', 'quant': 't', 'units': 's'},
'prad': {'str': 'power_radiated_total',
'dim': 'power', 'quant': 'power radiative',
'units': 'W'},
'pradbulk': {'str': 'power_radiated_inside_lcfs',
'dim': 'power', 'quant': 'power radiative',
'units': 'W'}},
'soft_x_rays': {
't': {'str': 'time',
'quant': 't', 'units': 's'},
'power': {'str': 'channel[chan].power.data',
'dim': 'power', 'quant': 'power radiative',
'units': 'W', 'Brightness': False},
'brightness': {'str': 'channel[chan].brightness.data',
'dim': 'brightness', 'quant': 'brightness',
'units': 'W.m^-2.sr^-1', 'Brightness': True},
'names': {'str': 'channel[chan].name'},
'etendue': {'str': 'channel[chan].etendue',
'dim': 'etendue', 'quant': 'etendue',
'units': 'm^2.sr'}},
'spectrometer_visible': {
't': {'str': ('channel[chan].grating_spectrometer'
+ '.radiance_spectral.time'),
'quant': 't', 'units': 's'},
'spectra': {'str': ('channel[chan].grating_spectrometer'
+ '.radiance_spectral.data'),
'dim': 'radiance_spectral',
'quant': 'radiance_spectral',
'units': '(photons).m^-2.s^-1.sr^-1.m^-1',
'Brightness': True},
'names': {'str': 'channel[chan].name'},
'lamb': {'str': 'channel[chan].grating_spectrometer.wavelengths',
'dim': 'wavelength', 'quant': 'wavelength', 'units': 'm'}},
'bremsstrahlung_visible': {
't': {'str': 'time',
'quant': 't', 'units': 's'},
'radiance': {'str': 'channel[chan].radiance_spectral.data',
'dim': 'radiance_spectral',
'quant': 'radiance_spectral',
'units': '(photons).m^-2.s^-1.sr^-1.m^-1',
'Brightness': True},
'names': {'str': 'channel[chan].name'},
'lamb_up': {'str': 'channel[chan].filter.wavelength_upper',
'units': 'm'},
'lamb_lo': {'str': 'channel[chan].filter.wavelength_lower',
'units': 'm'}}
}
# ############################################################################
#
# default data for each ids (not used yet)
#
# ############################################################################
_didsdiag = {
'lh_antennas': {'datacls': 'DataCam1D',
'geomcls': False,
'sig': {'data': 'power',
't': 't'}},
'ic_antennas': {'datacls': 'DataCam1D',
'geomcls': False,
'sig': {'data': 'power',
't': 't'}},
'magnetics': {'datacls': 'DataCam1D',
'geomcls': False,
'sig': {'data': 'bpol_B',
't': 't'}},
'barometry': {'datacls': 'DataCam1D',
'geomcls': False,
'sig': {'data': 'p',
't': 't'}},
'calorimetry': {'datacls': 'DataCam1D',
'geomcls': False,
'sig': {'data': 'power',
't': 't'}},
'ece': {'datacls': 'DataCam1D',
'geomcls': False,
'sig': {'t': 't',
'X': 'rhotn_sign',
'data': 'Te'},
'stack': True},
'neutron_diagnostic': {'datacls': 'DataCam1D',
'geomcls': False,
'sig': {'t': 't',
'data': 'flux_total'}},
'reflectometer_profile': {'datacls': 'DataCam1D',
'geomcls': False,
'sig': {'t': 't',
'X': 'R',
'data': 'ne'}},
'interferometer': {'datacls': 'DataCam1D',
'geomcls': 'CamLOS1D',
'sig': {'t': 't',
'data': 'ne_integ'},
'synth': {'dsynth': {
'quant': 'core_profiles.1dne',
'ref1d': 'core_profiles.1drhotn',
'ref2d': 'equilibrium.2drhotn'},
'dsig': {'core_profiles': ['t'],
'equilibrium': ['t']},
'Brightness': True},
'stack': True},
'polarimeter': {'datacls': 'DataCam1D',
'geomcls': 'CamLOS1D',
'sig': {'t': 't',
'data': 'fangle'},
'synth': {'dsynth': {
'fargs': ['core_profiles.1dne',
'equilibrium.2dBR',
'equilibrium.2dBT',
'equilibrium.2dBZ',
'core_profiles.1drhotn',
'equilibrium.2drhotn']},
'dsig': {'core_profiles': ['t'],
'equilibrium': ['t']},
'Brightness': True},
'stack': True},
'bolometer': {'datacls': 'DataCam1D',
'geomcls': 'CamLOS1D',
'sig': {'t': 't',
'data': 'power'},
'synth': {'dsynth': {
'quant': 'core_sources.1dprad',
'ref1d': 'core_sources.1drhotn',
'ref2d': 'equilibrium.2drhotn'},
'dsig': {'core_sources': ['t'],
'equilibrium': ['t']},
'Brightness': False},
'stack': True},
'soft_x_rays': {'datacls': 'DataCam1D',
'geomcls': 'CamLOS1D',
'sig': {'t': 't',
'data': 'power'},
'stack': True},
'spectrometer_visible': {'datacls': 'DataCam1DSpectral',
'geomcls': 'CamLOS1D',
'sig': {'data': 'spectra',
't': 't',
'lamb': 'lamb'}},
'bremsstrahlung_visible': {
'datacls': 'DataCam1D',
'geomcls': 'CamLOS1D',
'sig': {'t': 't',
'data': 'radiance'},
'synth': {
'dsynth': {
'quant': ['core_profiles.1dTe',
'core_profiles.1dne',
'core_profiles.1dzeff'],
'ref1d': 'core_profiles.1drhotn',
'ref2d': 'equilibrium.2drhotn'},
'dsig': {'core_profiles': ['t'],
'equilibrium': ['t']},
'Brightness': True},
'stack': True}
}
# ############################################################################
#
# Complete dshort and didsdiag
#
# ############################################################################
_lidsdiag = sorted([kk for kk, vv in _didsdiag.items() if 'sig' in vv.keys()])
_lidslos = list(_lidsdiag)
for ids_ in _lidsdiag:
if _didsdiag[ids_]['geomcls'] not in ['CamLOS1D']:
_lidslos.remove(ids_)
for ids in _lidslos:
dlos = {}
strlos = 'line_of_sight'
if ids == 'reflectometer_profile':
strlos += '_detection'
dlos['los_pt1R'] = {
'str': 'channel[chan].{}.first_point.r'.format(strlos),
'units': 'm'}
dlos['los_pt1Z'] = {
'str': 'channel[chan].{}.first_point.z'.format(strlos),
'units': 'm'}
dlos['los_pt1Phi'] = {
'str': 'channel[chan].{}.first_point.phi'.format(strlos),
'units': 'rad'}
dlos['los_pt2R'] = {
'str': 'channel[chan].{}.second_point.r'.format(strlos),
'units': 'm'}
dlos['los_pt2Z'] = {
'str': 'channel[chan].{}.second_point.z'.format(strlos),
'units': 'm'}
dlos['los_pt2Phi'] = {
'str': 'channel[chan].{}.second_point.phi'.format(strlos),
'units': 'rad'}
_dshort[ids].update(dlos)
_lidssynth = sorted([kk for kk, vv in _didsdiag.items()
if 'synth' in vv.keys()])
for ids_ in _lidssynth:
for kk, vv in _didsdiag[ids_]['synth']['dsynth'].items():
if type(vv) is str:
vv = [vv]
for ii in range(0, len(vv)):
v0, v1 = vv[ii].split('.')
if v0 not in _didsdiag[ids_]['synth']['dsig'].keys():
_didsdiag[ids_]['synth']['dsig'][v0] = [v1]
elif v1 not in _didsdiag[ids_]['synth']['dsig'][v0]:
_didsdiag[ids_]['synth']['dsig'][v0].append(v1)
_didsdiag[ids_]['synth']['dsynth'][kk] = vv
# ############################################################################
#
# Dict for computing signals from loaded signals
#
# ############################################################################
# -------------
# Functions
def _events(names, t):
ustr = 'U{}'.format(np.nanmax(np.char.str_len(np.char.strip(names))))
return np.array([(nn, tt)
for nn, tt in zip(*[np.char.strip(names), t])],
dtype=[('name', ustr), ('t', np.float)])
def _RZ2array(ptsR, ptsZ):
out = np.array([ptsR, ptsZ]).T
if out.ndim == 1:
out = out[None, :]
return out
def _losptsRZP(*pt12RZP):
return np.swapaxes([pt12RZP[:3], pt12RZP[3:]], 0, 1).T
def _add(a0, a1):
return np.abs(a0 + a1)
def _eqB(BT, BR, BZ):
return np.sqrt(BT**2 + BR**2 + BZ**2)
def _icmod(al, ar, axis=0):
return np.sum(al - ar, axis=axis)
def _icmodadd(al0, ar0, al1, ar1, al2, ar2, axis=0):
return (np.sum(al0 - ar0, axis=axis)
+ np.sum(al1 - ar1, axis=axis)
+ np.sum(al2 - ar2, axis=axis))
def _rhopn1d(psi):
return np.sqrt((psi - psi[:, 0:1]) / (psi[:, -1] - psi[:, 0])[:, None])
def _rhopn2d(psi, psi0, psisep):
return np.sqrt(
(psi - psi0[:, None]) / (psisep[:, None] - psi0[:, None]))
def _rhotn2d(phi):
return np.sqrt(np.abs(phi) / np.nanmax(np.abs(phi), axis=1)[:, None])
def _eqSep(sepR, sepZ, npts=100):
nt = len(sepR)
assert len(sepZ) == nt
sep = np.full((nt, npts, 2), np.nan)
pts = np.linspace(0, 100, npts)
for ii in range(0, nt):
ptsii = np.linspace(0, 100, sepR[ii].size)
sep[ii, :, 0] = np.interp(pts, ptsii, sepR[ii])
sep[ii, :, 1] = np.interp(pts, ptsii, sepZ[ii])
return sep
def _eqtheta(axR, axZ, nodes, cocos=11):
theta = np.arctan2(nodes[:, 0][None, :] - axZ[:, None],
nodes[:, 1][None, :] - axR[:, None])
if cocos == 1:
theta = -theta
return theta
def _rhosign(rho, theta):
if isinstance(theta, np.ndarray):
rhotns = np.array(rho)
ind = ~ | np.isnan(theta) | numpy.isnan |
import torch
import numpy as np
import os
from datetime import datetime
import data
import metrics
import plots
from pytorch_spa_nn import SpaNn
torch.set_printoptions(linewidth=180, precision=8, edgeitems=12)
np.set_printoptions(linewidth=180)
def continue_training_from_checkpoint(checkpoint, max_epochs, new_checkpoint_path, device):
spa_nn_continued = SpaNn(checkpoint["network_architecture"])
spa_nn_continued.load_state_dict(checkpoint['model_state_dict'])
spa_nn_continued_on_device = spa_nn_continued.to(device)
optimizer_continued = torch.optim.RMSprop(
params=spa_nn_continued_on_device.parameters(),
lr=0.001,
alpha=0.99,
eps=1e-08,
weight_decay=0,
momentum=0,
centered=False)
optimizer_continued.load_state_dict(checkpoint['optimizer_state_dict'])
train(spa_nn_continued_on_device,
optimizer_continued,
new_checkpoint_path,
device=device,
start_epoch=checkpoint["epoch"] + 1,
max_epochs=max_epochs,
codewords_in_dataset_train=checkpoint["codewords_in_dataset_train"],
batch_size_train=checkpoint["batch_size_train"],
snr_range_train=checkpoint["snr_range_train"],
snr_range_validation=checkpoint["snr_range_validation"],
codewords_per_snr_validation=checkpoint["codewords_per_snr_validation"],
dataset_state_train=checkpoint["dataset_state_train"],
interval_for_creating_checkpoint=checkpoint["interval_for_creating_checkpoint"],
continue_from_checkpoint=True,
checkpoint=checkpoint,
use_all_zero_codeword_only_train=checkpoint["use_all_zero_codeword_only_train"],
use_all_zero_codeword_only_validation=checkpoint["use_all_zero_codeword_only_validation"])
def train(spa_nn,
optimizer,
checkpoint_path,
device="cpu",
start_epoch=0,
max_epochs=200,
codewords_in_dataset_train=4,
batch_size_train=2,
snr_range_train=np.array([2]),
snr_range_validation=np.array([2]),
codewords_per_snr_validation=500,
dataset_state_train="fixed",
interval_for_creating_checkpoint=20,
continue_from_checkpoint=False,
checkpoint={},
use_all_zero_codeword_only_train=True,
use_all_zero_codeword_only_validation=True):
"""
Trains the given initialized spa_nn.
:param spa_nn: an initialized neuronal network from the SpaNn class, can be either fnn, rnn or untrainable spa
:param optimizer: a torch.optim optimizer usually RMSProp
:param checkpoint_path: path where regular checkpoints of the neural network training status are stored,
the checkpoint will also be used for plots and evaluation after training, checkpoints
can also be used to resume training from
:param device: either cpu or gpu
:param start_epoch: this is per default set to 0 but will be changed if training is resumed
:param max_epochs: last training epoch, normally in the range of [300, ..., 800] for the example used in the thesis
:param codewords_in_dataset_train: number of codewords that the neural network will train on each epoch
:param batch_size_train: number of codewords after that an optimizer.step() is performed
:param snr_range_train: a np.array with signal-to-noise ratio values in dB between [-2, ..., 4]
:param snr_range_validation: should be set to the same range as the snr_range_train to check for overfitting, can
also be set to np.arange(-5, 8.5, 0.5) to match the realistic setup used in the test
dataset.
:param codewords_per_snr_validation: using 500 codewords per snr in validation dataset provided robust results, be
aware that your validation dataset size will be
snr_range_validation.shape[0] * codewords_per_snr_validation,
picking a wide snr range in the validation set will slow down training
:param dataset_state_train: choose between "fixed" and "otf" (on the fly generated), otf will generate a new
training dataset each epoch, this will slow down training but produce a well trained
network after 100-200 epochs. The training loss will fluctuate a lot, which is expected
behaviour, if you want to check if the network is converging you need to take the
validation dataset loss as reference.
:param interval_for_creating_checkpoint: determine after how many epoch you want to create a backup for the state
of your network
:param continue_from_checkpoint: set to True if training is to be continued, set False if you are starting anew
:param checkpoint: this parameter is initialized with {}, it is only relevant if training is continued from a
checkpoint
:param use_all_zero_codeword_only_train: set to True for using the all zero codeword + noise, set to false if you
want to train on randomly generated codewords + noise
:param use_all_zero_codeword_only_validation: set to True for using the all zero codeword + noise, set to false
for randomly generated codewords + noise
"""
# generate datasets
if dataset_state_train == "fixed":
dataset_train = data.DataSet(batch_size=batch_size_train,
number_of_codewords=codewords_in_dataset_train,
use_all_zero_codeword_only=use_all_zero_codeword_only_train,
snr=snr_range_train,
noise_seed=11)
input_llr_train, target_train = dataset_train.generate_data_set(
codewords_per_snr_in_batch=dataset_train.codewords_per_snr_in_batch)
x_train = torch.from_numpy(input_llr_train)
x_train = x_train.to(device)
y_train = torch.from_numpy(target_train).type(torch.int64)
y_train = y_train.to(device)
spa_BLER_per_snr_train = None # todo
spa_BER_per_snr_train = metrics.bers_per_snr_classic_spa(
input_llr=np.transpose(input_llr_train),
target=np.transpose(target_train),
codewords_per_snr_in_batch=dataset_train.codewords_per_snr_in_batch,
batch_size=batch_size_train)
# validation dataset
codewords_in_dataset_validation = snr_range_validation.size * codewords_per_snr_validation
dataset_validation = data.DataSet(
number_of_codewords=codewords_in_dataset_validation,
batch_size=codewords_in_dataset_validation,
use_all_zero_codeword_only=use_all_zero_codeword_only_validation,
snr=snr_range_validation,
codeword_seed=4,
noise_seed=5,
)
input_llr_validation, target_validation = dataset_validation.generate_data_set(
codewords_per_snr_in_batch=dataset_validation.codewords_per_snr_in_batch)
x_validation = torch.from_numpy(input_llr_validation)
x_validation = x_validation.to(device)
y_validation = torch.from_numpy(target_validation).type(torch.int64)
y_validation = y_validation.to(device)
spa_BLER_per_snr_validation = None # todo
spa_BER_per_snr_validation = metrics.bers_per_snr_classic_spa(
input_llr= | np.transpose(input_llr_validation) | numpy.transpose |
# Authors: <NAME> <<EMAIL>>
# License: BSD 3 clause
from numpy import array, ones_like, arange
from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_, assert_equal
from commpy.channelcoding.gfields import GF
class TestGaloisFields(object):
def test_closure(self):
for m in arange(1, 9):
x = GF(arange(2**m), m)
for a in x.elements:
for b in x.elements:
assert_((GF( | array([a]) | numpy.array |
"""
Load simulated data from the Heston-SLV model described in the paper.
"""
# Copyright 2021 <NAME>.
# Affiliation: Mathematical Institute, University of Oxford
# Email: <EMAIL>
import numpy as np
import pickle
from arbitragerepair import constraints
class DataHestonSlv(object):
"""
Data object for data generated from Heston-SLV models.
"""
def __init__(self, list_exp, list_mny, St, vt, r,
v0, theta, kappa, sigma, rho,
leverage_range_exp, leverage_range_stk, leverage_value,
Cs_hestonSLV, ivs_hestonSLV, Cs_heston_SLV_ts):
# simulated trajectory
self.list_exp = list_exp
self.list_mny = list_mny
self.Cs_heston_SLV_ts = Cs_heston_SLV_ts
self.St = St
self.vt = vt
self.r = r
# calibrated Heston model and data
self.heston_v0 = v0
self.heston_theta = theta
self.heston_kappa = kappa
self.heston_sigma = sigma
self.heston_rho = rho
# calibrated leverage function
self.leverage_range_exp = leverage_range_exp
self.leverage_range_stk = leverage_range_stk
self.leverage_value = leverage_value
# Heston SLV initial data
self.hestonSLV_Cs = Cs_hestonSLV # call price surface
self.hestonSLV_ivs = ivs_hestonSLV # implied vol surface
# Heston SLV simulated data
self.Cs_heston_SLV_ts = Cs_heston_SLV_ts
def load_hestonslv_data(fname):
"""
Load data objects for data generated from Heston-SLV models.
Parameters
----------
fname: string
Path of the pickled data object.
Returns
-------
St: numpy.array, 1D, shape = (L+1, )
Time series of underlying stock price. L is a positive integer.
vt: numpy.array, 1D, shape = (L+1, )
Time series of Heston-SLV instantaneous variance.
list_exp: numpy.array, 1D, shape = (n_expiry, )
List of time-to-expiries (number of days).
list_mny: numpy.array, 1D, shape = (n_mny, )
List of moneynesses (relative moneyness).
cs_ts_raw: numpy.array, 2D, shape = (L+1, N)
Time series of normalised call price surfaces.
N is the number of options and N = n_mny x n_exp
cs_ts: numpy.array, 2D, shape = (L+1, n_opt)
Time series of normalised call price surfaces, where small values
(<= 1e-5) are truncated. N is the number of options.
mask_quality_value: numpy.array, 1D, shape = (N, )
Boolean logical mask of qualified values.
Ts: numpy.array, 1D, shape = (n_opt, )
List of time-to-expiries corresponding to the n_opt options.
ks: numpy.array, 1D, shape = (n_opt, )
List of moneynesses (relative moneyness) corresponding to the n_opt
options.
mat_A: numpy.array, 2D, shape = (R, n_opt)
Coefficient matrix. R is the number of constraints.
vec_b: numpy.array, 1D, shape = (R, )
Vector of constant terms. R is the number of constraints.
"""
# load
infile = open(fname, 'rb')
data_cache = pickle.load(infile)
infile.close()
# retrieve useful info
Cs_heston_SLV_ts = data_cache.Cs_heston_SLV_ts
list_exp = np.array(data_cache.list_exp)
list_mny = np.array(data_cache.list_mny)
ks, Ts = np.meshgrid(list_mny, list_exp)
ks = ks.flatten()
Ts = Ts.flatten() / 365.
St = np.array(data_cache.St) # underlying stock price
vt = np.array(data_cache.vt) # instantaneous variance
# normalise call option prices
cs_ts_raw = np.array(Cs_heston_SLV_ts) / | np.array(St) | numpy.array |
""" Utilities for isgm
Best to keep these separate from the Class modules
"""
from __future__ import print_function, absolute_import, division, unicode_literals
# Python 2 & 3 compatibility
try:
basestring
except NameError:
basestring = str
import pdb
import numpy as np
import warnings
from astropy import constants as const
from astropy import units as u
from astropy.table import Table, QTable
from astropy.units import Quantity
from astropy.coordinates import SkyCoord
from linetools.analysis import absline as ltaa
from linetools.isgm.abscomponent import AbsComponent
from linetools.spectralline import init_analy
from linetools.abund.ions import name_to_ion, ion_to_name
from linetools import utils as ltu
from linetools.lists.linelist import LineList
def chk_components(components, chk_match=False, chk_A_none=False, tol=0.2*u.arcsec):
""" Performs checks on a list of components
Parameters
----------
components : list
list of AbsComponent objects
chk_match : bool, optional
if True, require that the components match in RA/DEC, Zion, Ej, A but not velocity
chk_A_none : bool, optional
if True, require that A *not* be set
tol : Quantity, optional
Tolerance on matching SkyCoord. Default is 0.2*u.arcsec
"""
tests = True
# List
if not isinstance(components, list):
tests = False
raise IOError('Need a list of AbsComponent objects')
# Object
if not all(isinstance(x, AbsComponent) for x in components):
tests = False
raise IOError('List needs to contain only AbsComponent objects')
# A None
if chk_A_none:
if not all(x.A is None for x in components):
tests = False
raise IOError('Not ready for components with A set')
# Matching?
if chk_match:
match = True
comp0 = components[0]
for comp in components[1:]:
# RA/DEC
match = match & bool(comp0.coord.separation(comp.coord) < tol)
# Zion
match = match & (comp0.Zion == comp.Zion)
# Ej
match = match & np.allclose(comp0.Ej.to('1/cm').value,comp.Ej.to('1/cm').value)
# A
match = match & (comp0.A == comp.A)
tests = tests & match
# Return
return tests
def build_components_from_abslines(iabslines, clmdict=None, coord=None,
**kwargs):
""" Generate a list of AbsComponent from a list of abslines
Groups lines with like Zion, Ej, (and A; future)
Parameters
----------
abslines : list
List of AbsLine objects
May be ignored if clmdict is passed in
clmdict : dict, optional
If present, build the abslines list from this dict
coord : SkyCoord, optional
Required if clmdict is used
Returns
-------
components :
list of AbsComponent objects
"""
if clmdict is None:
abslines = iabslines
else:
raise DeprecationWarning("Gone")
abslines = []
# Test
if not isinstance(abslines,list):
raise IOError('Need a list of AbsLine objects')
# Identify unique Zion, Ej combinations in the lines
uZiE = np.array([iline.data['Z']*1000000+iline.data['ion']*10000+
iline.data['Ej'].to('1/cm').value for iline in abslines])
uniZi, auidx = np.unique(uZiE, return_index=True)
# Loop to build components
components = []
for uidx in auidx:
# Synthesize lines with like Zion, Ej
mtZiE = np.where(uZiE == uZiE[uidx])[0]
lines = [abslines[ii] for ii in mtZiE] # Need a list
# Generate component
if lines[0].data['Ej'].value > 0.:
# Grab stars from transition name
nstars = lines[0].name.count('*')
if nstars == 0:
raise ValueError("Should have at least one *")
stars = '*'*nstars
else:
stars = None
component = AbsComponent.from_abslines(lines, stars=stars, **kwargs)
# Reset vmin, vmax
vmin,vmax = 9999., -9999.
for iline in lines:
vmin = min(vmin, iline.limits.vlim[0].value)
vmax = max(vmax, iline.limits.vlim[1].value)
component.vlim = [vmin,vmax]*u.km/u.s
# Append
components.append(component)
# Return
return components
def build_components_from_dict(idict, coord=None, **kwargs):
""" Generate a list of components from an input dict
Parameters
----------
idict : dict
Must contain either components or lines as a key
coord : SkyCoord, optional
Returns
-------
components :
list of AbsComponent objects
Sorted by zcomp
"""
from linetools.spectralline import AbsLine
components = []
if 'components' in idict.keys():
# Components
for key in idict['components']:
components.append(AbsComponent.from_dict(idict['components'][key], coord=coord, **kwargs))
elif 'lines' in idict.keys(): # to be deprecated
lines = []
for key in idict['lines']:
if isinstance(idict['lines'][key], AbsLine):
line = idict['lines'][key]
elif isinstance(idict['lines'][key], dict):
line = AbsLine.from_dict(idict['lines'][key], coord=coord)
else:
raise IOError("Need those lines")
if coord is not None:
line.attrib['coord'] = coord
lines.append(line)
components = build_components_from_abslines(lines, **kwargs)
else:
warnings.warn("No components in this dict")
# Sort by z -- Deals with dict keys being random
z = [comp.zcomp for comp in components]
isrt = np.argsort(np.array(z))
srt_comps = []
for idx in isrt:
srt_comps.append(components[idx])
# Return
return srt_comps
def build_systems_from_components(comps, systype=None, vsys=None, **kwargs):
""" Build a list of AbsSystems from a list of AbsComponents
Current default implementation allows for overlapping components, i.e.
only_overlap=True in add_component
Parameters
----------
comps : list
List of AbsComponents
systype : AbsSystem, optional
Defaults to GenericAbsSystem
vsys : Quantity, optional
'Velocity width' of a system, used when adding components
Passed as vtoler to add_component
The first component will define the system redshift and all others will
need to lie within vsys of it
Returns
-------
abs_systems : list
"""
if systype is None:
from linetools.isgm.abssystem import GenericAbsSystem
systype = GenericAbsSystem
if vsys is None:
if 'overlap_only' not in kwargs.keys():
kwargs['overlap_only'] = True
else:
kwargs['vtoler'] = vsys.to('km/s').value
# Add
abs_systems = []
cpy_comps = [comp.copy() for comp in comps]
# Loop until all components assigned
while len(cpy_comps) > 0:
# Use the first one
comp = cpy_comps.pop(0)
abssys = systype.from_components([comp])
# Try the rest
comps_left = []
for icomp in cpy_comps:
if abssys.add_component(icomp, **kwargs):
pass
else:
comps_left.append(icomp)
# Update vlim
abssys.update_vlim()
# Append
abs_systems.append(abssys)
# Save
cpy_comps = comps_left
# Return
return abs_systems
def xhtbl_from_components(components, ztbl=None, NHI_obj=None):
""" Generate a Table of XH values from a list of components
Parameters
----------
components
ztbl
NHI_obj
Returns
-------
"""
# Get started
tbl = Table()
#
def complist_from_table(table):
"""
Returns a list of AbsComponents from an input astropy.Table.
Parameters
----------
table : Table
Table with component information (each row must correspond
to a component). Each column is expecting a unit when
appropriate.
Returns
-------
complist : list
List of AbsComponents defined from the input table.
Notes
-----
Mandatory column names: 'RA', 'DEC', 'ion_name', 'z_comp', 'vmin', 'vmax'
These column are required.
Special column names: 'name', 'comment', 'logN', 'sig_logN', 'flag_logN'
These columns will fill internal attributes when corresponding.
In order to fill in the Ntuple attribute all three 'logN', 'sig_logN', 'flag_logN'
must be present. For convenience 'logN' and 'sig_logN' are expected to be floats
corresponding to their values in np.log10(1/cm^2).
Other columns: 'any_column_name'
These will be added as attributes within the AbsComponent.attrib dictionary,
with their respective units if given.
"""
# Convert to QTable to handle units in individual entries more easily
table = QTable(table)
# mandatory and optional columns
min_columns = ['RA', 'DEC', 'ion_name', 'z_comp', 'vmin', 'vmax']
special_columns = ['name', 'comment', 'logN', 'sig_logN', 'flag_logN']
for colname in min_columns:
if colname not in table.keys():
raise IOError('{} is a mandatory column. Please make sure your input table has it.'.format(colname))
#loop over rows
complist = []
for row in table:
# mandatory
coord = SkyCoord(row['RA'].to('deg').value, row['DEC'].to('deg').value, unit='deg') # RA y DEC must both come with units
Zion = name_to_ion(row['ion_name'])
zcomp = row['z_comp']
vlim =[row['vmin'].to('km/s').value, row['vmax'].to('km/s').value] * u.km / u.s # units are expected here too
# special columns
try:
Ntuple = (row['flag_logN'], row['logN'], row['sig_logN']) # no units expected
except KeyError:
Ntuple = None
try:
comment = row['comment']
except KeyError:
comment = ''
try:
name = row['name']
except KeyError:
name = None
# define the component
comp = AbsComponent(coord, Zion, zcomp, vlim, Ntup=Ntuple, comment=comment, name=name)
# other columns will be filled in comp.attrib dict
for colname in table.keys():
if (colname not in special_columns) and (colname not in min_columns):
kms_cols = ['b', 'sig_b']
if colname in kms_cols: # check units for parameters expected in velocity units
try:
val_aux = row[colname].to('km/s').value * u.km / u.s
except u.UnitConversionError:
raise IOError('If `{}` column is present, it must have velocity units.'.format(colname))
comp.attrib[colname] = val_aux
# parameters we do not care about units much
else:
comp.attrib[colname] = row[colname]
# append
complist += [comp]
return complist
def table_from_complist(complist):
"""
Returns a astropy.Table from an input list of AbsComponents. It only
fills in mandatory and special attributes (see notes below).
Information stored in dictionary AbsComp.attrib is ignored.
Parameters
----------
complist : list of AbsComponents
The initial list of AbsComponents to create the QTable from.
Returns
-------
table : Table
Table from the information contained in each component.
Notes
-----
Mandatory columns: 'RA', 'DEC', 'ion_name', 'z_comp', 'vmin', 'vmax'
Special columns: 'name', 'comment', 'logN', 'sig_logN', 'flag_logN'
See also complist_from_table()
"""
tab = Table()
# mandatory columns
tab['RA'] = [comp.coord.ra.to('deg').value for comp in complist] * u.deg
tab['DEC'] = [comp.coord.dec.to('deg').value for comp in complist] * u.deg
ion_names = [] # ion_names
for comp in complist:
if comp.Zion == (-1,-1):
ion_names += ["Molecule"]
else:
ion_names += [ion_to_name(comp.Zion)]
tab['ion_name'] = ion_names
tab['z_comp'] = [comp.zcomp for comp in complist]
tab['vmin'] = [comp.vlim[0].value for comp in complist] * comp.vlim.unit
tab['vmax'] = [comp.vlim[1].value for comp in complist] * comp.vlim.unit
# Special columns
tab['logN'] = [comp.logN for comp in complist]
tab['sig_logN'] = [comp.sig_logN for comp in complist]
tab['flag_logN'] = [comp.flag_N for comp in complist]
tab['comment'] = [comp.comment for comp in complist]
tab['name'] = [comp.name for comp in complist]
tab['reliability'] = [comp.reliability for comp in complist]
return tab
def iontable_from_components(components, ztbl=None, NHI_obj=None):
"""Generate a Table from a list of components
Method does *not* perform logic on redshifts or vlim.
Includes rules for adding components of like ion
Not ready for varying atomic mass (e.g. Deuterium)
Parameters
----------
components : list
list of AbsComponent objects
ztbl : float, optional
Redshift for the table
NHI_obj : object, optional (with NHI, sig_NHI, flag_NHI attributes)
If provided, fill HI with NHI, sig_NHI, flag_NHI
Returns
-------
iontbl : Table
"""
from collections import OrderedDict
# Checks
assert chk_components(components,chk_A_none=True)
# Set z from mean
if ztbl is None:
ztbl = np.mean([comp.zcomp for comp in components])
# Construct the Table
cols = OrderedDict() # Keeps columns in order
cols['Z']=int
cols['ion']=int
cols['A']=int
cols['Ej']=float
cols['z']=float
cols['vmin']=float
cols['vmax']=float
cols['flag_N']=int
cols['logN']=float
if isinstance(components[0].sig_logN, float):
cols['sig_logN'] = float
elif components[0].sig_logN.size == 2:
cols['sig_logN'] = np.ndarray
else:
raise IOError("Not prepared for this type of sig_logN")
names = cols.keys()
dtypes = [cols[key] for key in names]
iontbl = Table(names=names,dtype=dtypes)
iontbl['Ej'].unit=1./u.cm
iontbl['vmin'].unit=u.km/u.s
iontbl['vmax'].unit=u.km/u.s
# Identify unique Zion, Ej (not ready for A)
uZiE = np.array([comp.Zion[0]*1000000+comp.Zion[1]*10000+
comp.Ej.to('1/cm').value for comp in components])
uniZi, auidx = np.unique(uZiE, return_index=True)
# Loop
for uidx in auidx:
# Synthesize components with like Zion, Ej
mtZiE = np.where(uZiE == uZiE[uidx])[0]
comps = [components[ii] for ii in mtZiE] # Need a list
synth_comp = synthesize_components(comps, zcomp=ztbl)
# Add a row to QTable
row = dict(Z=synth_comp.Zion[0],ion=synth_comp.Zion[1],
z=ztbl,
Ej=synth_comp.Ej,vmin=synth_comp.vlim[0],
vmax=synth_comp.vlim[1],logN=synth_comp.logN,
flag_N=synth_comp.flag_N,sig_logN=synth_comp.sig_logN)
iontbl.add_row(row)
# NHI
if NHI_obj is not None:
# Existing row in Table?
mt = np.where((iontbl['Z'] == 1) & (iontbl['ion']==1))[0]
if len(mt) == 1:
iontbl[mt[0]]['logN'] = NHI_obj.NHI
iontbl[mt[0]]['sig_logN'] = np.mean(NHI_obj.sig_NHI) # Allow for two values
iontbl[mt[0]]['flag_N'] = NHI_obj.flag_NHI
else:
if len(components) > 0:
vmin=synth_comp.vlim[0]
vmax=synth_comp.vlim[1]
else:
vmin = -300*u.km/u.s
vmax = 300*u.km/u.s
#
row = dict(Z=1,ion=1, z=ztbl,
Ej=0./u.cm,vmin=vmin, vmax=vmax, logN=NHI_obj.NHI,
flag_N=NHI_obj.flag_NHI,sig_logN=np.mean(NHI_obj.sig_NHI))
iontbl.add_row(row)
# Return
return iontbl
def synthesize_components(components, zcomp=None, vbuff=0*u.km/u.s):
"""Synthesize a list of components into one
Requires consistent RA/DEC, Zion, Ej, (A; future)
Is agnostic about z+vlim
Melds column densities
Melds velocities with a small buffer (10 km/s)
Note: Could make this a way to instantiate AbsComponent
Parameters
----------
components : list
list of AbsComponent objects
zcomp : float, optional
Input z to reference the synthesized component
If not input, the mean of the input components is used
vbuff : Quantity, optional
Buffer for synthesizing velocities. Deals with round off, c, etc.
"""
# Checks
assert chk_components(components, chk_A_none=True, chk_match=True)
# Init final component
synth_comp = AbsComponent.from_component(components[0], Ntup=(components[0].flag_N, components[0].logN, components[0].sig_logN))
# Meld column densities
for comp in components[1:]:
if comp.flag_N != 0:
synth_comp.flag_N, synth_comp.logN, synth_comp.sig_logN = ltaa.sum_logN(synth_comp, comp)
# Meld z, vlim
# zcomp
if zcomp is None:
zcomp = np.mean([comp.zcomp for comp in components])
synth_comp.zcomp = zcomp
# Set vlim by min/max [Using non-relativistic + buffer]
vmin = u.Quantity([(comp.zcomp-zcomp)/(1+zcomp)*const.c.to('km/s')+comp.vlim[0] for comp in components])
vmax = u.Quantity([(comp.zcomp-zcomp)/(1+zcomp)*const.c.to('km/s')+comp.vlim[1] for comp in components])
synth_comp.vlim = u.Quantity([np.min(vmin)-vbuff, np.max(vmax)+vbuff])
# Return
return synth_comp
def get_components_at_z(complist, z, dvlims):
"""In a given list of AbsComponents, it finds
the ones that are within dvlims from a given redshift
and returns a list of those.
Parameters
----------
complist : list
List of AbsComponents
z : float
Redshift to search for components
dvlims : Quantity array
Rest-frame velocity limits around z
to look for components
Returns
-------
components_at_z : list
List of AbsComponents in complist within dvlims from z
"""
# check input
if not isinstance(complist[0], AbsComponent):
raise IOError('complist must be a list of AbsComponents.')
if len(dvlims) != 2:
raise IOError('dvlims must be a Quantity array of velocity limits (vmin, vmax).')
else:
try:
dvlims_kms = dvlims.to('km/s')
except u.UnitConversionError:
raise IOError('dvlims must have velocity units.')
good_complist = []
for comp in complist:
dv_comp = ltu.dv_from_z(comp.zcomp, z)
if (dv_comp >= dvlims[0]) and (dv_comp <= dvlims[1]):
good_complist += [comp]
return good_complist
def get_wvobs_chunks(comp):
"""For a given component, it gets a list of tuples with the
min/max observed wavelengths for each absorption line in the
component. An error is raised if an absorption line within the
component does not have its limits defined.
Parameters
----------
comp : AbsComponent
The input AbsComponent object
Returns
-------
wvobs_chunks : list of Quantity arrays
A list with the wvmin, wvmax values for each absorption
line within the component.
"""
if not isinstance(comp, AbsComponent):
raise ValueError('`comp` must be AbsComponent object.')
wvobs_chunks = []
for absline in comp._abslines:
# Check whether the absline has already defined 'wvlim'
if absline.limits.is_set():
wvlim_aux = absline.limits.wvlim
wvobs_chunks += [wvlim_aux]
else:
raise ValueError('{} must have its limits defined.'.format(absline))
return wvobs_chunks
def coincident_components(comp1, comp2, tol=0.2*u.arcsec):
"""Whether two components overlap in wavelength (observed)
space and (ra,dec) sky position. This is useful to identify
components that may need to be fit together in a given spectrum.
Parameters
----------
comp1 : AbsComponent
A given AbsComponent object
comp2 : AbsComponent
A given AbsComponent object
tol : Quantity, optional
Tolerance for checking whether the two components are
in the same sky region. Default is 0.2*u.arcsec
Returns
-------
answer : bool
True if there is overlapping wavelength range and
radec coordinates, otherwise False.
"""
if not isinstance(comp1, AbsComponent):
raise ValueError('comp1 must be AbsComponent object.')
if not isinstance(comp2, AbsComponent):
raise ValueError('comp1 must be AbsComponent object.')
# Check whether they are in the same sky region
if comp1.coord.separation(comp2.coord) > tol:
return False
# loop over abslines
for line1 in comp1._abslines:
for line2 in comp2._abslines:
overlap = line1.coincident_line(line2)
if overlap is True:
return True
return False
def group_coincident_components(comp_list, output_type='list'):
"""For a given input list of components, this function
groups together components that are coincident to each other
(including by transitivity), and returns them as a list (default)
or dictionary of component lists.
Parameters
----------
comp_list : list of AbsComponent
Input list of components to group
output_type : str, optional
Type of the output, choose either
'list' for list or 'dict' for dictionary.
Returns
-------
output : list (or dictionary) of lists of AbsComponent
The grouped components as individual lists
in the output.
"""
if output_type not in ['list', 'dict', 'dictionary']:
raise ValueError("`output_type` must be either 'list' or 'dict'.")
### We first want to identify and group all blended lines
### Sort them by observed wavelength to do this
lst=[]
compnos=[]
for ii,comp in enumerate(comp_list):
lst.extend(comp._abslines)
compnos.extend([ii]*len(comp._abslines))
lst=np.array(lst)
compnos=np.array(compnos)
wv1s= | np.array([line.limits.wvlim[0].value for line in lst]) | numpy.array |
from coordinate_tools import Transformation
from coordinate_tools import RotationMatrix
from kinematics import Kinematics
import numpy as np
import math
import random
import unittest
from test import support
class DifferentialTest(unittest.TestCase):
"""Performs random differential tests on the forward() and reverse function
of the 'Kinematics' class.
"""
def setUp(self):
random.seed(1)
np.random.seed(1)
def test_RC_lockJ4(self):
"""Tests forward and inverse kinematics in RC coordinates with joint 4
being locked to zero degrees. Test without end-effector.
"""
k = Kinematics.from_origin() # test robot coordinates only
k.set_joint2_offset(30)
k.set_joint2_height(30)
k.set_joint4_offset(10)
k.set_arm23_length(150)
k.set_arm35_length(150)
k.set_wrist_length(10)
k.set_endeffector(Transformation.from_identity())
rot_mat = RotationMatrix.from_axis('z') # implicitly locks joint 4
for i in range(1000):
orientation_mat = rot_mat.matrix_at_angle(random.random())
target_location = | np.ones(3) | numpy.ones |
import sys
import time
import yaml
import math
import signal
import datetime
import threading
import traceback
import numpy as np
from cvxopt import matrix, solvers
#from scipy.spatial import ConvexHull
import matplotlib.patches as ptc
import matplotlib.pyplot as plt
import matplotlib.animation as animation
# from actions import *
COLORS = [(0.0, 0.0, 0.0), (0.99, 0.0, 0.0), (0.0, 0.99, 0.0), (0.0, 0.0, 0.99), (0.99, 0.99, 0.0), (0.99, 0.0, 0.99), (0.0, 0.99, 0.99)]
global_boundary = []
xlim = []
ylim = []
test_type = 0
world = None
def is_in_space(p, tol):
global xlim, ylim
return xlim[0] - tol <= p[0] <= xlim[1] + tol and ylim[0] - tol <= p[1] <= ylim[1] + tol
def is_in_bounding_polygon(p, tol):
global global_boundary
pass
def angle_in_2pi(v):
angle = np.arctan2(v[1], v[0])
#if angle <= 0:
# angle += 2 * np.pi
return angle
def to_grid(x, y, x_off, y_off):
return (x - x_off, y - y_off)
#def get_convex_hull(V):
# hull = ConvexHull(V)
# return [V[vertex] for vertex in hull.vertices]
def appendGlobalBoundaries(B):
bottom_left = globals()['global_boundary'][0]
top_right = globals()['global_boundary'][3]
B.append((np.array([1., 0.], dtype=float), np.array(bottom_left, dtype=float)))
B.append((np.array([0., 1.], dtype=float), np.array(bottom_left, dtype=float)))
B.append((np.array([1., 0.], dtype=float), np.array(top_right, dtype=float)))
B.append((np.array([0., 1.], dtype=float), np.array(top_right, dtype=float)))
def angularSort(reference, vertices):
vectors = [p - reference for p in vertices]
indexed_angles = [(angle_in_2pi(vectors[i]), i) for i in range(len(vectors))]
#if self.name == "uav1":
# print("------")
# for i in range(len(vectors)):
# print(vectors[i], indexed_angles[i][0])
# print("------")
indexed_angles.sort()
return [vertices[i] for _, i in indexed_angles]
class StateBuffer:
def __init__(self):
self.buffers = dict()
def getState(self, name):
return self.buffers[name]
def getAllStates(self):
return dict(self.buffers)
def updateState(self, name, s):
self.buffers[name] = s
class Agent:
def __init__(self, name, init, goal, vmax):
self.name = name
self.move_thread = threading.Thread(name="{}_move".format(self.name), target=self.move)
self.sim_log = open('LOG_{}.txt'.format(self.name), 'w+')
self.terminate = False
self.phys_radius = 2.0
self.safe_radius = 3.0
self.comm_radius = 10.0
self.dt = 0.1
self.vmax = vmax
self.vmin = 0.5
self.velocity = np.zeros(2)
self.position = np.array(init, dtype=float)
self.voronoi_graph = []
#self.color = tuple(np.random.rand(3))
self.color = globals()['COLORS'][int(self.name[3:])]
self.inter_sort_type = [('angle', float), ('vector', np.ndarray)]
self.world = None
self.world_offset = (globals()['xlim'][0], globals()['ylim'][0])
self.frontier = set()
self._B = np.array([[1., 0.], [0., 1.], [1., 0.], [0., 1.]], dtype=float)
self.neighbours = dict()
# self.path = []
# self.curves = []
self.xhistory = []
self.yhistory = []
self.goal = np.array(goal, dtype=float)
self.goal_change = 10.
self.converged = False
self.H = matrix([[2., 0.], [0., 2.]], tc='d')
# STATE:
self.state = {'pos': self.position, 'vel': self.velocity, 'end': False}
self.advertiseState()
def initialize_world(self):
#global xlim, ylim
#W = xlim[1] - xlim[0]
#H = ylim[1] - ylim[0]
#self.world = np.zeros((H, W))
#grid_node = to_grid(self.position[0], self.position[1], xlim[1], ylim[1])
#v_act = valid_actions(self.world, grid_node)
#for act in v_act:
# applied_coord = apply_action_to_node(grid_node, act)
# pass
pass
def initialize(self):
#print("Initializing agent {}".format(self.name))
#print("Agent {} --> {}".format(self.name, self.goal))
self.move_thread.start()
def setGoal(self, g):
self.goal_change = np.linalg.norm(g - self.goal)
self.converged = self.goal_change <= 0.1
self.goal = np.array(g, dtype=float)
def hasReachedGoal(self):
return np.linalg.norm(self.goal - self.state['pos']) <= 0.1 and self.converged
def getCentroid(self):
### SOURCE: https://en.wikipedia.org/wiki/Centroid
# Calculate area with Shoelace Formula
area = 0
for i in range(len(self.voronoi_graph) - 1):
x_i, y_i = self.voronoi_graph[i]
x_j, y_j = self.voronoi_graph[i + 1]
area += x_i * y_j - x_j * y_i
area *= 0.5
# Calculate centroid of voronoi cell
Cx, Cy = 0, 0
for i in range(len(self.voronoi_graph) - 1):
x_i, y_i = self.voronoi_graph[i]
x_j, y_j = self.voronoi_graph[i + 1]
product = (x_i * y_j - x_j * y_i)
Cx += (x_i + x_j) * product
Cy += (y_i + y_j) * product
return np.array([Cx, Cy], dtype=float) / (6. * area)
def computeBisectors(self):
bisectors = [] # (normal, point)
cons, vals = [], []
tol = 0.1
for a, st in self.neighbours.items():
if st is None:
continue
if np.any(np.isnan(st['pos'])):
print(f'Agent {self.name} neighbour {a} has NaN!')
normal = (st['pos'] - self.state['pos']).round(4)
m = ((st['pos'] + self.state['pos']) * 0.5).round(4)
bisectors.append((normal, m))
cons.append(normal)
#vals.append(m.dot(normal) - self.safe_radius)
vals.append((m.dot(normal)).round(4))
# bottom_left = globals()['global_boundary'][0]
# top_right = globals()['global_boundary'][3]
# bisectors.append((np.array([1., 0.], dtype=float), np.array(bottom_left, dtype=float)))
# bisectors.append((np.array([0., 1.], dtype=float), np.array(bottom_left, dtype=float)))
# bisectors.append((np.array([1., 0.], dtype=float), np.array(top_right, dtype=float)))
# bisectors.append((np.array([0., 1.], dtype=float), np.array(top_right, dtype=float)))
appendGlobalBoundaries(bisectors)
A = np.array(cons, dtype=float)
b = np.array(vals, dtype=float)
self.voronoi_graph = []
for i in range(len(bisectors)):
n_i, m_i = bisectors[i]
d_i = m_i.dot(n_i)
for j in range(i + 1, len(bisectors)):
n_j, m_j = bisectors[j]
d_j = m_j.dot(n_j)
try:
A_ = np.array([n_i.round(4), n_j.round(4)], dtype=float)
b_ = np.array([d_i.round(4), d_j.round(4)], dtype=float)
p = (np.linalg.solve(A_, b_)).round(4)
except np.linalg.LinAlgError:
continue
except:
print(traceback.format_exc())
continue
if is_in_space(p, tol) and np.all(A.dot(p) <= b + 0.1):
self.voronoi_graph.append(p)
A_iq = matrix(np.array(cons), tc='d')
b_iq = matrix(np.array(vals), tc='d')
self.voronoi_graph = angularSort(self.position, self.voronoi_graph)
#self.voronoi_graph = get_convex_hull(self.voronoi_graph)
return A_iq, b_iq
def solveStep(self, A_iq, b_iq, _t=0):
v_next = self.state['vel']
if _t == 0:
## Buffered Voronoi Cell
if A_iq and b_iq:
solvers.options['show_progress'] = False
sol = solvers.qp(self.H, matrix(-2. * self.goal, tc='d'), A_iq, b_iq)
#print("Agent {} SOLN: {}".format(self.name, sol['x']))
v_next = (np.array(sol['x'][0]) - self.state['pos']) / self.dt
_norm = np.linalg.norm(v_next)
if _norm > self.vmax:
v_next = self.vmax * v_next / _norm
return v_next
elif _t == 1:
## <NAME>
if len(self.voronoi_graph):
self.voronoi_graph.append(self.voronoi_graph[0])
self.setGoal(self.getCentroid())
v_next = self.goal - self.state['pos']
_norm = np.linalg.norm(v_next)
if _norm > self.vmax:
v_next *= self.vmax / np.linalg.norm(v_next)
return v_next
print(f'Agent {self.name} stopped momentarily.')
return np.zeros(2)
def doStep(self, v_next):
x_, y_ = self.state['pos'][0], self.state['pos'][1]
self.xhistory.append(x_)
self.yhistory.append(y_)
self.state['pos'] = self.state['pos'] + self.dt * v_next
self.state['vel'] = v_next
def stepLog(self, _t=0):
if _t == 0:
self.sim_log.write('{} - pos: {} - vel: {} - at: {}\n'.format(self.name, self.position, self.velocity, datetime.datetime.now()))
elif _t == 1:
# Agent name; current position; next goal
#self.sim_log.write('{};{};{}\n'.format(self.name, self.position, self.goal))
#self.sim_log.write(f'{self.name};{self.voronoi_graph.dfs_traversal()}\n')
#self.sim_log.write(f'{self.name};{self.voronoi_graph}\n')
pass
def updateNeighbours(self):
for uav, st in globals()['buf'].buffers.items():
if uav == self.name or st is None:
continue
self.neighbours[uav] = dict(st)
def advertiseState(self):
globals()['buf'].updateState(self.name, self.state)
def stop(self):
self.terminate = True
def move(self):
test = globals()['test_type']
pre_flight_count = 20
#while not self.terminate and not self.hasReachedGoal():
while not self.terminate:
_start = time.time()
self.advertiseState()
self.updateNeighbours()
if pre_flight_count < 1:
A, b = self.computeBisectors()
v_next = self.solveStep(A, b, test)
self.doStep(v_next)
self.stepLog(test)
else:
pre_flight_count -= 1
_elapsed = time.time() - _start
fail_hard = _elapsed >= self.dt
if fail_hard:
#print('Agent {} failed hard real-time constraint at {}'.format(self.name, datetime.datetime.now()))
pass
else:
time.sleep(self.dt - _elapsed)
self.state['end'] = True
if self.hasReachedGoal():
print("Agent {} has reached goal at {}".format(self.name, datetime.datetime.now()))
self.sim_log.close()
class Simulator:
def __init__(self, pfile):
self.xlim = [-20, 80]
self.ylim = [-20, 80]
self.count = 0
self.agents = dict()
self.vmax = 0
self.iteration = 0
self.loadParams(pfile)
#self.logfile = open('SimulatorLog.txt', 'w+')
self.terminate = False
self.distance_thread = threading.Thread(name='distance_thread', target=self.checkCollision)
self.fig = plt.figure()
self.ax = self.fig.add_subplot(1, 1, 1)
#self.fig, self.axs = plt.subplots(2)
self.ani = None
def loadParams(self, pfile):
params = None
with open(pfile) as P:
params = yaml.load(P, Loader=yaml.FullLoader)
self.xlim = np.array(params['xlim'], dtype=float)
self.ylim = np.array(params['ylim'], dtype=float)
self.count = params['count']
self.vmax = params['vmax']
globals()['test_type'] = params['test_type']
globals()['world'] = np.zeros((int(self.ylim[1] - self.ylim[0]), int(self.xlim[1] - self.xlim[0])), dtype=int)
globals()['xlim'] = | np.array(self.xlim, dtype=float) | numpy.array |
import numpy as np
import os
import sys
import pandas as pd
import zipfile
import argparse
from tqdm import tqdm
from utils import *
from sklearn.model_selection import train_test_split
import h5py
| np.random.seed(0) | numpy.random.seed |
"""
Routines for the analysis of proton radiographs. These routines can be broadly
classified as either creating synthetic radiographs from prescribed fields or
methods of 'inverting' experimentally created radiographs to reconstruct the
original fields (under some set of assumptions).
"""
__all__ = [
"SyntheticProtonRadiograph",
]
import astropy.constants as const
import astropy.units as u
import numpy as np
import sys
import warnings
from tqdm import tqdm
from plasmapy import particles
from plasmapy.formulary.mathematics import rot_a_to_b
from plasmapy.particles import Particle
from plasmapy.plasma.grids import AbstractGrid
from plasmapy.simulation.particle_integrators import boris_push
def _coerce_to_cartesian_si(pos):
"""
Takes a tuple of `astropy.unit.Quantity` values representing a position
in space in either Cartesian, cylindrical, or spherical coordinates, and
returns a numpy array representing the same point in Cartesian
coordinates and units of meters.
"""
# Auto-detect geometry based on units
geo_units = [x.unit for x in pos]
if geo_units[2].is_equivalent(u.rad):
geometry = "spherical"
elif geo_units[1].is_equivalent(u.rad):
geometry = "cylindrical"
else:
geometry = "cartesian"
# Convert geometrical inputs between coordinates systems
pos_out = np.zeros(3)
if geometry == "cartesian":
x, y, z = pos
pos_out[0] = x.to(u.m).value
pos_out[1] = y.to(u.m).value
pos_out[2] = z.to(u.m).value
elif geometry == "cylindrical":
r, t, z = pos
r = r.to(u.m)
t = t.to(u.rad).value
z = z.to(u.m)
pos_out[0] = (r * np.cos(t)).to(u.m).value
pos_out[1] = (r * np.sin(t)).to(u.m).value
pos_out[2] = z.to(u.m).value
elif geometry == "spherical":
r, t, p = pos
r = r.to(u.m)
t = t.to(u.rad).value
p = p.to(u.rad).value
pos_out[0] = (r * np.sin(t) * np.cos(p)).to(u.m).value
pos_out[1] = (r * np.sin(t) * np.sin(p)).to(u.m).value
pos_out[2] = (r * np.cos(t)).to(u.m).value
return pos_out
class SyntheticProtonRadiograph:
r"""
Represents a charged particle radiography experiment with simulated or
calculated E and B fields given at positions defined by a grid of spatial
coordinates. The particle source and detector plane are defined by vectors
from the origin of the grid.
Parameters
----------
grid : `~plasmapy.plasma.grids.AbstractGrid` or subclass thereof
A Grid object containing the required quantities [E_x, E_y, E_z, B_x, B_y, B_z].
If any of these quantities are missing, a warning will be given and that
quantity will be assumed to be zero everywhere.
source : `~astropy.units.Quantity`, shape (3)
A vector pointing from the origin of the grid to the location
of the particle source. This vector will be interpreted as
being in either cartesian, cylindrical, or spherical coordinates
based on its units. Valid geometries are:
* Cartesian (x,y,z) : (meters, meters, meters)
* cylindrical (r, theta, z) : (meters, radians, meters)
* spherical (r, theta, phi) : (meters, radians, radians)
In spherical coordinates theta is the polar angle.
detector : `~astropy.units.Quantity`, shape (3)
A vector pointing from the origin of the grid to the center
of the detector plane. The vector from the source point to this
point defines the normal vector of the detector plane. This vector
can also be specified in cartesian, cylindrical, or spherical
coordinates (see the `source` keyword).
detector_hdir : `numpy.ndarray`, shape (3), optional
A unit vector (in Cartesian coordinates) defining the horizontal
direction on the detector plane. By default, the horizontal axis in the
detector plane is defined to be perpendicular to both the
source-to-detector vector and the z-axis (unless the source-to-detector axis
is parallel to the z axis, in which case the horizontal axis is the x-axis).
The detector vertical axis is then defined
to be orthogonal to both the source-to-detector vector and the
detector horizontal axis.
verbose : bool, optional
If true, updates on the status of the program will be printed
into the standard output while running.
"""
def __init__(
self,
grid: AbstractGrid,
source: u.m,
detector: u.m,
detector_hdir=None,
verbose=True,
):
# self.grid is the grid object
self.grid = grid
# self.grid_arr is the grid positions in si units. This is created here
# so that it isn't continously called later
self.grid_arr = grid.grid.to(u.m).value
self.verbose = verbose
# A list of wire meshes added to the grid with add_wire_mesh
# Particles that would hit these meshes will be removed at runtime
# by _apply_wire_mesh
self.mesh_list = []
# ************************************************************************
# Setup the source and detector geometries
# ************************************************************************
self.source = _coerce_to_cartesian_si(source)
self.detector = _coerce_to_cartesian_si(detector)
self._log(f"Source: {self.source} m")
self._log(f"Detector: {self.detector} m")
# Calculate normal vectors (facing towards the grid origin) for both
# the source and detector planes
self.src_n = -self.source / np.linalg.norm(self.source)
self.det_n = -self.detector / np.linalg.norm(self.detector)
# Vector directly from source to detector
self.src_det = self.detector - self.source
# Magnification
self.mag = 1 + np.linalg.norm(self.detector) / np.linalg.norm(self.source)
self._log(f"Magnification: {self.mag}")
# Check that source-detector vector actually passes through the grid
if not self.grid.vector_intersects(self.source * u.m, self.detector * u.m):
raise ValueError(
"The vector between the source and the detector "
"does not intersect the grid provided!"
)
# Determine the angle above which particles will not hit the grid
# these particles can be ignored until the end of the simulation,
# then immediately advanced to the detector grid with their original
# velocities
self.max_theta_hit_grid = self._max_theta_hit_grid()
# ************************************************************************
# Define the detector plane
# ************************************************************************
# Load or calculate the detector hdir
if detector_hdir is not None:
self.det_hdir = detector_hdir / np.linalg.norm(detector_hdir)
else:
self.det_hdir = self._default_detector_hdir()
# Calculate the detector vdir
ny = np.cross(self.det_hdir, self.det_n)
self.det_vdir = -ny / np.linalg.norm(ny)
# ************************************************************************
# Validate the E and B fields
# ************************************************************************
req_quantities = ["E_x", "E_y", "E_z", "B_x", "B_y", "B_z"]
self.grid.require_quantities(req_quantities, replace_with_zeros=True)
for rq in req_quantities:
# Check that there are no infinite values
if not np.isfinite(self.grid[rq].value).all():
raise ValueError(
f"Input arrays must be finite: {rq} contains "
"either NaN or infinite values."
)
# Check that the max values on the edges of the arrays are
# small relative to the maximum values on that grid
#
# Array must be dimensionless to re-assemble it into an array
# of max values like this
arr = np.abs(self.grid[rq]).value
edge_max = np.max(
np.array(
[
np.max(arr[0, :, :]),
np.max(arr[-1, :, :]),
np.max(arr[:, 0, :]),
np.max(arr[:, -1, :]),
np.max(arr[:, :, 0]),
np.max(arr[:, :, -1]),
]
)
)
if edge_max > 1e-3 * np.max(arr):
unit = grid.recognized_quantities[rq].unit
warnings.warn(
"Fields should go to zero at edges of grid to avoid "
f"non-physical effects, but a value of {edge_max:.2E} {unit} was "
f"found on the edge of the {rq} array. Consider applying a "
"envelope function to force the fields at the edge to go to "
"zero.",
RuntimeWarning,
)
def _default_detector_hdir(self):
"""
Calculates the default horizontal unit vector for the detector plane
(see __init__ description for details)
"""
# Create unit vectors that define the detector plane
# Define plane horizontal axis
if np.allclose(np.abs(self.det_n), np.array([0, 0, 1])):
nx = np.array([1, 0, 0])
else:
nx = np.cross(np.array([0, 0, 1]), self.det_n)
nx = nx / np.linalg.norm(nx)
return nx
def _max_theta_hit_grid(self):
r"""
Using the grid and the source position, compute the maximum particle
theta that will impact the grid. This value can be used to determine
which particles are worth tracking.
"""
ind = 0
theta = np.zeros([8])
for x in [0, -1]:
for y in [0, -1]:
for z in [0, -1]:
# Source to grid corner vector
vec = self.grid_arr[x, y, z, :] - self.source
# Calculate angle between vec and the source-to-detector
# axis, which is the central axis of the particle beam
theta[ind] = np.arccos(
np.dot(vec, self.src_det)
/ np.linalg.norm(vec)
/ np.linalg.norm(self.src_det)
)
ind += 1
return np.max(theta)
def _log(self, msg):
if self.verbose:
print(msg)
# Define some constants so they don't get constantly re-evaluated
_c = const.c.si.value
# *************************************************************************
# Create mesh
# *************************************************************************
def add_wire_mesh(
self, location, extent, nwires, wire_diameter, mesh_hdir=None, mesh_vdir=None
):
"""
Add a wire mesh grid between the particle source and the object grid
that blocks particles whose paths intersect the wires.
Parameters
----------
location : `~astropy.units.Quantity`, shape (3)
A vector pointing from the origin of the grid to the center of the
mesh grid. This location must be between the source and the
object grid.
This vector will be interpreted as
being in either cartesian, cylindrical, or spherical coordinates
based on its units. Valid geometries are:
* Cartesian (x,y,z) : (meters, meters, meters)
* cylindrical (r, theta, z) : (meters, radians, meters)
* spherical (r, theta, phi) : (meters, radians, radians)
In spherical coordinates theta is the polar angle.
extent : Tuple of 1 or 2 `~astropy.units.Quantity`
The size of the mesh grid (in the mesh plane). If one value
is provided, the mesh is circular and the value provided is
interpreted as the diameter. If two values are provided, the
mesh is rectangular and they the values are interpreted as the
width and height respectively.
nwires : Tuple of 1 or 2 ints, or a single int
The number of wires in the horizontal and vertical directions. If
only one value is provided, the number in the two directions is
assumed to be equal. Note that a wire will cross the center of the
mesh only when nwires is odd.
wire_diameter : `~astropy.units.Quantity`
The diameter of the wires.
mesh_hdir : `numpy.ndarray`, shape (3), optional
A unit vector (in Cartesian coordinates) defining the horizontal
direction on the mesh plane. Modifying this vector can rotate the
mesh in the plane or tilt the mesh plane relative to the
source-detector axis. By default, `mesh_hdir` is set equal to
`detector_hdir` (see `detector_hdir` keyword in `__init__`).
mesh_vdir : `numpy.ndarray`, shape (3), optional
A unit vector (in Cartesian coordinates) defining the vertical
direction on the mesh plane. Modifying this vector can tilt the
mesh relative to the source-detector axis. By default, `mesh_vdir`
is defined to be perpendicular to `mesh_hdir` and the detector
plane normal (such that the mesh is parallel to the detector plane).
Raises
------
ValueError
Raises a ValueError if the provided mesh location is not
between the source and the object grid.
"""
location = _coerce_to_cartesian_si(location)
wire_radius = wire_diameter.si.value / 2
if not isinstance(extent, tuple):
extent = (extent,)
if len(extent) == 1:
radius = 0.5 * extent[0].si.value
width = extent[0].si.value
height = extent[0].si.value
elif len(extent) == 2:
radius = None
width = extent[0].si.value
height = extent[1].si.value
else:
raise ValueError(
"extent must be a tuple of 1 or 2 elements, but "
f"{len(extent)} elements were provided."
)
if not isinstance(nwires, tuple):
nwires = (nwires,)
if len(nwires) != 2:
nwires = (nwires[0], nwires[0])
# If no hdir/vdir is specified, calculate a default value
# If one is specified, make sure it is normalized
if mesh_hdir is None:
# Re-calculate the default here, in case the user
# specified a different det_hdir
mesh_hdir = self._default_detector_hdir()
else:
mesh_hdir = mesh_hdir / np.linalg.norm(mesh_hdir)
if mesh_vdir is None:
mesh_vdir = np.cross(mesh_hdir, self.det_n)
mesh_vdir = -mesh_vdir / np.linalg.norm(mesh_vdir)
else:
mesh_vdir = mesh_vdir / np.linalg.norm(mesh_vdir)
# Raise exception if mesh is AFTER the field grid
if np.linalg.norm(location - self.source) > np.linalg.norm(self.source):
raise ValueError(
f"The specified mesh location, {location},"
"is not between the source and the origin."
)
mesh_entry = {
"location": location,
"wire_radius": wire_radius,
"radius": radius,
"width": width,
"height": height,
"nwires": nwires,
"mesh_hdir": mesh_hdir,
"mesh_vdir": mesh_vdir,
}
self.mesh_list.append(mesh_entry)
def _apply_wire_mesh(
self,
location=None,
wire_radius=None,
radius=None,
width=None,
height=None,
nwires=None,
mesh_hdir=None,
mesh_vdir=None,
):
"""
Apply wire meshes that were added to self.mesh_list
"""
x = self._coast_to_plane(location, mesh_hdir, mesh_vdir)
# Particle positions in 2D on the mesh plane
xloc = np.dot(x - location, mesh_hdir)
yloc = np.dot(x - location, mesh_vdir)
# Create an array in which True indicates that a particle has hit a wire
# and False indicates that it has not
hit = np.zeros(self.nparticles, dtype=bool)
# Mark particles that overlap vertical or horizontal position with a wire
h_centers = np.linspace(-width / 2, width / 2, num=nwires[0])
for c in h_centers:
hit |= np.isclose(xloc, c, atol=wire_radius)
v_centers = np.linspace(-height / 2, height / 2, num=nwires[1])
for c in v_centers:
hit |= np.isclose(yloc, c, atol=wire_radius)
# Put back any particles that are outside the mesh boundaries
# First handle the case where the mesh is rectangular
if radius is None:
# Replace particles outside the x-boundary
hit[
np.logical_or(
xloc > np.max(h_centers) + wire_radius,
xloc < np.min(h_centers) - wire_radius,
)
] = False
# Replace particles outside the y-boundary
hit[
np.logical_or(
yloc > np.max(v_centers) + wire_radius,
yloc < np.min(v_centers) - wire_radius,
)
] = False
# Handle the case where the mesh is circular
else:
loc_rad = np.sqrt(xloc ** 2 + yloc ** 2)
hit[loc_rad > radius] = False
# In the case of a circular mesh, also create a round wire along the
# outside edge
hit[np.isclose(loc_rad, radius, atol=wire_radius)] = True
# Identify the particles that have hit something, then remove them from
# all of the arrays
keep_these_particles = ~hit
number_kept_particles = keep_these_particles.sum()
nremoved = self.nparticles - number_kept_particles
if self.nparticles - nremoved <= 0:
raise ValueError(
"The specified mesh is blocking all of the particles. "
f"The wire diameter ({2*wire_radius}) may be too large."
)
self.x = self.x[keep_these_particles, :]
self.v = self.v[keep_these_particles, :]
self.theta = self.theta[
keep_these_particles
] # Important to apply here to get correct grid_ind
self.nparticles = number_kept_particles
# *************************************************************************
# Particle creation methods
# *************************************************************************
def _angles_monte_carlo(self):
"""
Generates angles for each particle randomly such that the flux
per solid angle is uniform.
"""
# Create a probability vector for the theta distribution
# Theta must follow a sine distribution in order for the particle
# flux per solid angle to be uniform.
arg = np.linspace(0, self.max_theta, num=int(1e5))
prob = np.sin(arg)
prob *= 1 / np.sum(prob)
# Randomly choose theta's weighted with the sine probabilities
theta = np.random.choice(arg, size=self.nparticles, replace=True, p=prob)
# Also generate a uniform phi distribution
phi = np.random.uniform(high=2 * np.pi, size=self.nparticles)
return theta, phi
def _angles_uniform(self):
"""
Generates angles for each particle such that their velocities are
uniformly distributed on a grid in theta and phi. This method
requires that `nparticles` be a perfect square. If it is not,
`nparticles` will be set as the largest perfect square smaller
than the provided `nparticles`.
"""
# Calculate the approximate square root
n_per = np.floor(np.sqrt(self.nparticles)).astype(np.int32)
# Set new nparticles to be a perfect square
self.nparticles = n_per ** 2
# Create an imaginary grid positioned 1 unit from the source
# and spanning max_theta at the corners
extent = np.sin(self.max_theta) / np.sqrt(2)
arr = np.linspace(-extent, extent, num=n_per)
harr, varr = np.meshgrid(arr, arr, indexing="ij")
# calculate the angles from the source for each point in
# the grid.
theta = np.arctan(np.sqrt(harr ** 2 + varr ** 2))
phi = np.arctan2(varr, harr)
return theta.flatten(), phi.flatten()
@particles.particle_input
def create_particles(
self,
nparticles,
particle_energy,
max_theta=None,
particle: Particle = Particle("p+"),
distribution="monte-carlo",
):
r"""
Generates the angular distributions about the Z-axis, then
rotates those distributions to align with the source-to-detector axis.
By default, particles are generated over almost the entire pi/2. However,
if the detector is far from the source, many of these particles will
never be observed. The max_theta keyword allows these extraneous
particles to be neglected to focus computational resources on the
particles who will actually hit the detector.
nparticles : integer
The number of particles to include in the simulation. The default
is 1e5.
particle_energy : `~astropy.units.Quantity`
The energy of the particle, in units convertible to eV.
All particles are given the same energy.
max_theta : `~astropy.units.Quantity`, optional
The largest velocity vector angle (measured from the
source-to-detector axis) for which particles should be generated.
Decreasing this angle can eliminate particles that would never
reach the detector region of interest. If no value is given, a
guess will be made based on the size of the grid.
Units must be convertible to radians.
particle : ~plasmapy.particles.Particle or string representation of same, optional
Representation of the particle species as either a `Particle` object
or a string representation. The default particle is protons.
distribution: str
A keyword which determines how particles will be distributed
in velocity space. Options are:
- 'monte-carlo': velocities will be chosen randomly,
such that the flux per solid angle is uniform.
- 'uniform': velocities will be distrbuted such that,
left unperturbed,they will form a uniform pattern
on the detection plane. This method
requires that `nparticles` be a perfect square. If it is not,
`nparticles` will be set as the largest perfect square smaller
than the provided `nparticles`.
Simulations run in the `uniform` mode will imprint a grid pattern
on the image, but will well-sample the field grid with a
smaller number of particles. The default is `monte-carlo`
"""
self._log("Creating Particles")
# Load inputs
self.nparticles = int(nparticles)
self.particle_energy = particle_energy.to(u.eV).value
self.q = particle.charge.to(u.C).value
self.m = particle.mass.to(u.kg).value
# If max_theta is not specified, make a guess based on the grid size
if max_theta is None:
self.max_theta = np.clip(
1.5 * self.max_theta_hit_grid, 0.01, 0.99 * np.pi / 2
)
else:
self.max_theta = max_theta.to(u.rad).value
# Calculate the velocity corresponding to the particle energy
ER = self.particle_energy * 1.6e-19 / (self.m * self._c ** 2)
v0 = self._c * np.sqrt(1 - 1 / (ER + 1) ** 2)
if distribution == "monte-carlo":
theta, phi = self._angles_monte_carlo()
elif distribution == "uniform":
theta, phi = self._angles_uniform()
# Temporarily save theta to later determine which particles
# should be tracked
self.theta = theta
# Construct the velocity distribution around the z-axis
self.v = np.zeros([self.nparticles, 3])
self.v[:, 0] = v0 * np.sin(theta) * np.cos(phi)
self.v[:, 1] = v0 * np.sin(theta) * np.sin(phi)
self.v[:, 2] = v0 * np.cos(theta)
# Calculate the rotation matrix that rotates the z-axis
# onto the source-detector axis
a = np.array([0, 0, 1])
b = self.detector - self.source
rot = rot_a_to_b(a, b)
# Apply rotation matrix to calculated velocity distribution
self.v = np.matmul(self.v, rot)
# Place particles at the source
self.x = np.tile(self.source, (self.nparticles, 1))
@particles.particle_input
def load_particles(
self, x, v, particle: Particle = Particle("p+"),
):
r"""
Load arrays of particle positions and velocities
x : `~astropy.units.Quantity`, shape (N,3)
Positions for N particles
v: `~astropy.units.Quantity`, shape (N,3)
Velocities for N particles
particle : ~plasmapy.particles.Particle or string representation of same, optional
Representation of the particle species as either a `Particle` object
or a string representation. The default particle is protons.
distribution: str
A keyword which determines how particles will be distributed
in velocity space. Options are:
- 'monte-carlo': velocities will be chosen randomly,
such that the flux per solid angle is uniform.
- 'uniform': velocities will be distrbuted such that,
left unpreturbed,they will form a uniform pattern
on the detection plane.
Simulations run in the `uniform` mode will imprint a grid pattern
on the image, but will well-sample the field grid with a
smaller number of particles. The default is `monte-carlo`
"""
self.q = particle.charge.to(u.C).value
self.m = particle.mass.to(u.kg).value
if x.shape[0] != v.shape[0]:
raise ValueError(
"Provided x and v arrays have inconsistent numbers "
" of particles "
f"({x.shape[0]} and {v.shape[0]} respectively)."
)
else:
self.nparticles = x.shape[0]
self.x = x.to(u.m).value
self.v = v.to(u.m / u.s).value
self.theta = np.arccos(
np.inner(self.v, self.src_n) / np.linalg.norm(self.v, axis=-1)
)
n_wrong_way = np.sum(np.where(self.theta > np.pi / 2, 1, 0))
if n_wrong_way > 1:
warnings.warn(
f"{100*n_wrong_way/self.nparticles:.2f}% of particles "
"initialized are heading away from the grid. Check the orientation "
" of the provided velocity vectors.",
RuntimeWarning,
)
# *************************************************************************
# Run/push loop methods
# *************************************************************************
def _adaptive_dt(self, Ex, Ey, Ez, Bx, By, Bz):
r"""
Calculate the appropriate dt based on a number of considerations
including the local grid resolution (ds) and the gyroperiod of the
particles in the current fields.
"""
# If dt was explicitly set, skip the rest of this function
if self.dt.size == 1:
return self.dt
# Compute the timestep indicated by the grid resolution
ds = self.grid.grid_resolution.to(u.m).value
gridstep = 0.5 * (np.min(ds) / self.vmax)
# If not, compute a number of possible timesteps
# Compute the cyclotron gyroperiod
Bmag = np.max(np.sqrt(Bx ** 2 + By ** 2 + Bz ** 2)).to(u.T).value
# Compute the gyroperiod
if Bmag == 0:
gyroperiod = np.inf
else:
gyroperiod = 2 * np.pi * self.m / (self.q * np.max(Bmag))
# TODO: introduce a minimum timestep based on electric fields too!
# Create an array of all the possible time steps we computed
candidates = np.array([gyroperiod / 12, gridstep])
# Enforce limits on dt
candidates = np.clip(candidates, self.dt[0], self.dt[1])
# dt is the min of the remaining candidates
return | np.min(candidates) | numpy.min |
import gym
import copy
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
class JapanMaze(object):
def __init__(self,radius=0.5,seed=0):
np.random.seed(seed=seed)
self.action_limit = 0.1
self.ini_posi = np.array([-0.9,-0.9])
self.ini_cov = np.array([[0.005,0.],[0.,0.005]])
self.whereami = copy.deepcopy(self.ini_posi)
self.goal = np.array([0.9,0.9])
self.reward_f = lambda y:np.exp(-(np.linalg.norm(y-self.goal)**2)/2)
self.center = np.array([0.0,0.0])
self.radius = radius
self.timelimit =40
self.N = 30 # Collision determination resolution
high = np.ones(2)*1
self.observation_space = gym.spaces.Box(low=-np.ones(2)*1, high=np.ones(2)*1,dtype=np.float32)
self.action_space = gym.spaces.Box(low=- | np.ones(2) | numpy.ones |
""" Test functions for linalg module
"""
import os
import sys
import itertools
import traceback
import textwrap
import subprocess
import pytest
import numpy as np
from numpy import array, single, double, csingle, cdouble, dot, identity, matmul
from numpy import multiply, atleast_2d, inf, asarray
from numpy import linalg
from numpy.linalg import matrix_power, norm, matrix_rank, multi_dot, LinAlgError
from numpy.linalg.linalg import _multi_dot_matrix_chain_order
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_array_equal,
assert_almost_equal, assert_allclose, suppress_warnings,
assert_raises_regex, HAS_LAPACK64,
)
from numpy.testing._private.utils import requires_memory
def consistent_subclass(out, in_):
# For ndarray subclass input, our output should have the same subclass
# (non-ndarray input gets converted to ndarray).
return type(out) is (type(in_) if isinstance(in_, np.ndarray)
else np.ndarray)
old_assert_almost_equal = assert_almost_equal
def assert_almost_equal(a, b, single_decimal=6, double_decimal=12, **kw):
if asarray(a).dtype.type in (single, csingle):
decimal = single_decimal
else:
decimal = double_decimal
old_assert_almost_equal(a, b, decimal=decimal, **kw)
def get_real_dtype(dtype):
return {single: single, double: double,
csingle: single, cdouble: double}[dtype]
def get_complex_dtype(dtype):
return {single: csingle, double: cdouble,
csingle: csingle, cdouble: cdouble}[dtype]
def get_rtol(dtype):
# Choose a safe rtol
if dtype in (single, csingle):
return 1e-5
else:
return 1e-11
# used to categorize tests
all_tags = {
'square', 'nonsquare', 'hermitian', # mutually exclusive
'generalized', 'size-0', 'strided' # optional additions
}
class LinalgCase:
def __init__(self, name, a, b, tags=set()):
"""
A bundle of arguments to be passed to a test case, with an identifying
name, the operands a and b, and a set of tags to filter the tests
"""
assert_(isinstance(name, str))
self.name = name
self.a = a
self.b = b
self.tags = frozenset(tags) # prevent shared tags
def check(self, do):
"""
Run the function `do` on this test case, expanding arguments
"""
do(self.a, self.b, tags=self.tags)
def __repr__(self):
return f'<LinalgCase: {self.name}>'
def apply_tag(tag, cases):
"""
Add the given tag (a string) to each of the cases (a list of LinalgCase
objects)
"""
assert tag in all_tags, "Invalid tag"
for case in cases:
case.tags = case.tags | {tag}
return cases
#
# Base test cases
#
np.random.seed(1234)
CASES = []
# square test cases
CASES += apply_tag('square', [
LinalgCase("single",
| array([[1., 2.], [3., 4.]], dtype=single) | numpy.array |
from pynwb import TimeSeries
import numpy as np
from bisect import bisect, bisect_left
def get_timeseries_tt(node: TimeSeries, istart=0, istop=None) -> np.ndarray:
"""
For any TimeSeries, return timestamps. If the TimeSeries uses starting_time and rate, the timestamps will be
generated.
Parameters
----------
node: pynwb.TimeSeries
istart: int, optional
Optionally sub-select the returned times - lower bound
istop: int, optional
Optionally sub-select the returned times - upper bound
Returns
-------
numpy.ndarray
"""
if node.timestamps is not None:
return node.timestamps[istart:istop]
else:
if not np.isfinite(node.starting_time):
starting_time = 0
else:
starting_time = node.starting_time
if istop is None:
return np.arange(istart, len(node.data)) / node.rate + starting_time
elif istop > 0:
return np.arange(istart, istop) / node.rate + starting_time
else:
return (
np.arange(istart, len(node.data) + istop - 1) / node.rate
+ starting_time
)
def get_timeseries_maxt(node: TimeSeries) -> float:
"""
Returns the maximum time of any TimeSeries
Parameters
----------
node: pynwb.TimeSeries
Returns
-------
float
"""
if node.timestamps is not None:
return node.timestamps[-1]
elif np.isnan(node.starting_time):
return (len(node.data) - 1) / node.rate
else:
return (len(node.data) - 1) / node.rate + node.starting_time
def get_timeseries_mint(node: TimeSeries) -> float:
"""
Returns the minimum time of any TimeSeries
Parameters
----------
node: pynwb.TimeSeries
Returns
-------
float
"""
if node.timestamps is not None:
return node.timestamps[0]
elif | np.isnan(node.starting_time) | numpy.isnan |
import numpy
import argparse
import matplotlib
from matplotlib import colors
from generaltools import from_eta_to_k_par
from analytic_covariance import gain_error_covariance
from analytic_covariance import blackman_harris_taper
from analytic_covariance import compute_ps_variance
from analytic_covariance import dft_matrix
from analytic_covariance import compute_weights
from radiotelescope import RadioTelescope
from generaltools import from_u_to_k_perp
def main(ssh=False, labelfontsize=13, tickfontsize=11, plot_name="Baseline_Distribution_MWA.pdf"):
plot_path = "../../Plots/Analytic_Covariance/"
u_range = numpy.logspace(0, numpy.log10(500), 100)
frequency_range = numpy.linspace(135, 165, 251) * 1e6
k_perp = from_u_to_k_perp(u_range, frequency_range[int(len(frequency_range) / 2)])
x_label = r"$k_{\perp}$ [Mpc$^{-1}$]"
mwa_position_path = "./Data/MWA_Compact_Coordinates.txt"
mwa_telescope = RadioTelescope(load=True, path=mwa_position_path)
log_steps = numpy.diff(numpy.log10(u_range))
u_bin_edges = numpy.zeros(len(u_range) + 1)
u_bin_edges[1:] = 10**(numpy.log10(u_range) + 0.5*log_steps[0])
u_bin_edges[0] = 10**(numpy.log10(u_range[0] - 0.5*log_steps[0]))
baseline_lengths = numpy.sqrt(mwa_telescope.baseline_table.u_coordinates ** 2 +
mwa_telescope.baseline_table.u_coordinates ** 2)
counts, edges = | numpy.histogram(baseline_lengths, bins=u_bin_edges) | numpy.histogram |
"""
Env used in PAD. Code adapted from its github repo
paper: Self-Supervised Policy Adaptation during Deployment
(https://arxiv.org/abs/2007.04309)
github: https://github.com/nicklashansen/policy-adaptation-during-deployment
"""
import numpy as np
from numpy.random import randint
import gym
import torch
import torch.nn.functional as F
import torchvision.transforms.functional as TF
import cv2
import importlib.resources
from dm_control.suite import common
from secant.wrappers import FrameStack
from secant.envs.dm_control.adapter import DMControlAdapter
__all__ = ["ColorWrapper", "GreenScreen"]
def _resource_file_path(fname) -> str:
with importlib.resources.path("secant.envs.dm_control.pad_data", fname) as p:
return str(p)
class ColorWrapper(gym.Wrapper):
"""Wrapper for the color experiments"""
def __init__(self, env, mode):
assert isinstance(env, FrameStack), "color/video env must be FrameStack first"
gym.Wrapper.__init__(self, env)
self._max_episode_steps = env._max_episode_steps
self._mode = mode
self.time_step = 0
if "color" in self._mode:
self._load_colors()
def reset(self):
self.time_step = 0
if "color" in self._mode:
self.randomize()
if "video" in self._mode:
# apply greenscreen
self.reload_physics(
{
"skybox_rgb": [0.2, 0.8, 0.2],
"skybox_rgb2": [0.2, 0.8, 0.2],
"skybox_markrgb": [0.2, 0.8, 0.2],
}
)
return self.env.reset()
def step(self, action):
self.time_step += 1
return self.env.step(action)
def randomize(self):
assert (
"color" in self._mode
), f"can only randomize in color mode, received {self._mode}"
self.reload_physics(self.get_random_color())
def _load_colors(self):
assert self._mode in {"color_easy", "color_hard"}
self._colors = torch.load(_resource_file_path(f"{self._mode}.pt"))
def get_random_color(self):
assert len(self._colors) >= 100, "env must include at least 100 colors"
return self._colors[randint(len(self._colors))]
def reload_physics(self, setting_kwargs=None, state=None):
domain_name = self._get_dmc_wrapper()._domain_name
if setting_kwargs is None:
setting_kwargs = {}
if state is None:
state = self._get_state()
self._reload_physics(
*common.settings.get_model_and_assets_from_setting_kwargs(
domain_name + ".xml", setting_kwargs
)
)
self._set_state(state)
def get_state(self):
return self._get_state()
def set_state(self, state):
self._set_state(state)
def _get_dmc_wrapper(self):
_env = self.env
while not isinstance(_env, DMControlAdapter) and hasattr(_env, "env"):
_env = _env.env
assert isinstance(_env, DMControlAdapter), "environment is not dmc2gym-wrapped"
return _env
def _reload_physics(self, xml_string, assets=None):
_env = self.env
while not hasattr(_env, "_physics") and hasattr(_env, "env"):
_env = _env.env
assert hasattr(_env, "_physics"), "environment does not have physics attribute"
_env.physics.reload_from_xml_string(xml_string, assets=assets)
def _get_physics(self):
_env = self.env
while not hasattr(_env, "_physics") and hasattr(_env, "env"):
_env = _env.env
assert hasattr(_env, "_physics"), "environment does not have physics attribute"
return _env._physics
def _get_state(self):
return self._get_physics().get_state()
def _set_state(self, state):
self._get_physics().set_state(state)
def rgb_to_hsv(r, g, b):
"""Convert RGB color to HSV color"""
maxc = max(r, g, b)
minc = min(r, g, b)
v = maxc
if minc == maxc:
return 0.0, 0.0, v
s = (maxc - minc) / maxc
rc = (maxc - r) / (maxc - minc)
gc = (maxc - g) / (maxc - minc)
bc = (maxc - b) / (maxc - minc)
if r == maxc:
h = bc - gc
elif g == maxc:
h = 2.0 + rc - bc
else:
h = 4.0 + gc - rc
h = (h / 6.0) % 1.0
return h, s, v
def do_green_screen(x, bg):
"""Removes green background from observation and replaces with bg; not optimized for speed"""
assert isinstance(x, np.ndarray) and isinstance(
bg, np.ndarray
), "inputs must be numpy arrays"
assert x.dtype == np.uint8 and bg.dtype == np.uint8, "inputs must be uint8 arrays"
# Get image sizes
x_h, x_w = x.shape[1:]
# Convert to RGBA images
im = TF.to_pil_image(torch.ByteTensor(x))
im = im.convert("RGBA")
pix = im.load()
bg = TF.to_pil_image(torch.ByteTensor(bg))
bg = bg.convert("RGBA")
bg = bg.load()
# Replace pixels
for x in range(x_w):
for y in range(x_h):
r, g, b, a = pix[x, y]
h_ratio, s_ratio, v_ratio = rgb_to_hsv(r / 255.0, g / 255.0, b / 255.0)
h, s, v = (h_ratio * 360, s_ratio * 255, v_ratio * 255)
min_h, min_s, min_v = (100, 80, 70)
max_h, max_s, max_v = (185, 255, 255)
if min_h <= h <= max_h and min_s <= s <= max_s and min_v <= v <= max_v:
pix[x, y] = bg[x, y]
x = np.moveaxis(np.array(im).astype(np.uint8), -1, 0)[:3]
return x
class GreenScreen(gym.Wrapper):
"""Green screen for video experiments"""
def __init__(self, env, mode):
gym.Wrapper.__init__(self, env)
self._mode = mode
if "video" in mode:
self._video = mode
if not self._video.endswith(".mp4"):
self._video += ".mp4"
self._video = _resource_file_path(self._video)
self._data = self._load_video(self._video)
else:
self._video = None
self._max_episode_steps = env._max_episode_steps
def _load_video(self, video):
"""Load video from provided filepath and return as numpy array"""
cap = cv2.VideoCapture(video)
assert (
cap.get(cv2.CAP_PROP_FRAME_WIDTH) >= 100
), "width must be at least 100 pixels"
assert (
cap.get(cv2.CAP_PROP_FRAME_HEIGHT) >= 100
), "height must be at least 100 pixels"
n = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
buf = np.empty(
(
n,
int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)),
int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
3,
),
np.dtype("uint8"),
)
i, ret = 0, True
while i < n and ret:
ret, frame = cap.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
buf[i] = frame
i += 1
cap.release()
return | np.moveaxis(buf, -1, 1) | numpy.moveaxis |
# Licensed under an MIT open source license - see LICENSE
import numpy as np
from .colors import *
import time
from astropy import log
import warnings
import matplotlib.pyplot as plt
class Decomposer(object):
"""
A class containing various methods for decomposing individual spectra
Parameters
----------
spectral_axis : array
An array of the spectral axis
spectrum : array
The spectrum
rms : number
An estimate of the rms
Attributes
----------
fittype : string
A string describing the pyspeckit fitter
guesses : list
A list containing the initial guesses to the fit
guesses_updated : list
Used if the best-fitting solution is to be compared with a parent
spectrum (as in scouse)
psktemplate : instance of pyspeckit's Spectrum class
A template spectrum generated using pyspeckit
pskspectrum : instance of pyspeckit's Spectrum class
This is the spectrum that will be fit
modeldict : dictionary
A dictionary describing the best-fitting solution
validfit : bool
Whether or not the fit is valid. Used if the best-fitting solution is to
be compared with a parent spectrum (as in scouse)
tol : list
list of tolerance values used to compare the best-fitting solution to
that of its parent spectrum
res : number
the channel spacing
method : string
The fitting method used. Current options:
parent: the fitting method predominantly used by scouse, where a
spectrum has been fit using initial guesses from a parent
spectrum
dspec: Where a spectrum has been fit using input guesses from
derivative spectroscopy
manual: Where a spectrum has been fit manually using pyspeckit's
interactive fitter
"""
def __init__(self,spectral_axis,spectrum,rms):
self.spectral_axis=spectral_axis
self.spectrum=spectrum
self.rms=rms
self.fittype=None
self.guesses=None
self.guesses_from_parent=None
self.guesses_updated=None
self.psktemplate=None
self.pskspectrum=None
self.modeldict=None
self.validfit=False
self.tol=None
self.res=None
self.method=None
self.fit_updated=False
self.residuals_shown=False
self.guesses=None
self.happy=False
self.conditions=None
def fit_spectrum_with_guesses(self, guesses, fittype='gaussian', method='dspec'):
"""
Fitting method used when using scouse as a standalone fitter. It takes
guesses supplied by dspec and calls on pyspeckit to fit the spectrum
Parameters
----------
guesses : list
a list containing the initial guesses for the fit parameters
fittype : string
A string describing the pyspeckit fitter
"""
self.method=method
self.fittype=fittype
self.guesses=guesses
self.fit_a_spectrum()
self.get_model_information()
def fit_spectrum_from_parent(self,guesses,guesses_parent,tol,res,fittype='gaussian',method='parent'):
"""
The fitting method most commonly used by scouse. This method will fit
a spectrum and compare the result against another model. Most commonly
a model describing a lower resolution parent spectrum
Parameters
----------
guesses : list
a list containing the initial guesses for the fit parameters
guesses_parent : list
a list containing the model parameters of the parent
tol : list
list of tolerance values used to compare the best-fitting solution to
that of its parent spectrum
res : number
the channel spacing
fittype : string
A string describing the pyspeckit fitter
"""
self.method=method
self.fittype=fittype
self.guesses=guesses
self.guesses_parent=guesses_parent
self.tol=tol
self.res=res
if self.psktemplate is not None:
self.update_template()
else:
self.create_a_spectrum()
self.fit_a_spectrum()
errors=np.copy(self.pskspectrum.specfit.modelerrs)
errors=[np.nan if error is None else error for error in errors ]
errors=np.asarray([np.nan if np.invert(np.isfinite(error)) else error for error in errors ])
if np.any(np.invert(np.isfinite(errors))):
guesses = np.copy(self.pskspectrum.specfit.modelpars)
rounding = np.asarray([np.abs(np.floor(np.log10(np.abs(guess)))) if np.floor(np.log10(np.abs(guess)))<0.0 and guess != 0.0 else 1.0 for guess in guesses])
self.guesses = np.asarray([np.around(guess,decimals=int(rounding[i])) for i, guess in enumerate(guesses)])
# first get the number of parameters and components
nparams=np.size(self.pskspectrum.specfit.fitter.parnames)
ncomponents=np.size(self.guesses)/nparams
# remove any instances of nans
for i in range(int(ncomponents)):
component = guesses[int((i * nparams)) : int((i * nparams) + nparams)]
if np.any(~np.isfinite(np.asarray(component))):
guesses[int((i * nparams)) : int((i * nparams) + nparams)] = 0.0
# remove any instances of negative intensity
for i in range(int(ncomponents)):
component = self.guesses[int((i*nparams)):int((i*nparams)+nparams)]
if np.sum([1 for number in component if number < 0.0]) >= 1:
self.guesses[int((i*nparams)):int((i*nparams)+nparams)] = 0.0
# for spectra with more than one component we want to set the component
# with the lowest amplitude to zero as well (this could be the same
# component)
if ncomponents > 1:
# identify where amplitude is in paranames
namelist = ['tex', 'amp', 'amplitude', 'peak', 'tant', 'tmb']
foundname = [pname in namelist for pname in self.pskspectrum.specfit.fitter.parnames]
foundname = np.array(foundname)
idx=np.where(foundname==True)[0]
idx=np.asscalar(idx[0])
# Now get the amplitudes
amplist=np.asarray([self.guesses[int(i*nparams)+idx] for i in range(int(ncomponents))])
# identify the lowest amplitude
idx = np.where(amplist==np.min(amplist))[0]
idx=np.asscalar(idx[0])
self.guesses[int((idx*nparams)):int((idx*nparams)+nparams)] = 0.0
self.guesses = self.guesses[(self.guesses != 0.0)]
if np.size(self.guesses !=0):
#self.psktemplate=None
#self.pskspectrum=None
if self.psktemplate is not None:
self.update_template()
else:
self.create_a_spectrum()
self.fit_a_spectrum()
self.get_model_information()
self.check_against_parent()
if not self.validfit:
self.modeldict={}
self.psktemplate=None
self.pskspectrum=None
def fit_spectrum_manually(self, fittype='gaussian'):
"""
Method used to manually fit a spectrum
Parameters
----------
fittype : string
A string describing the pyspeckit fitter
"""
plt.ion()
self.method='manual'
self.fittype=fittype
self.interactive_fitter()
with warnings.catch_warnings():
warnings.simplefilter('ignore', category=DeprecationWarning)
while not self.happy:
try:
# using just a few little bits of plt.pause below
plt.gcf().canvas.draw()
plt.gcf().canvas.start_event_loop(0.1)
time.sleep(0.1)
except KeyboardInterrupt:
break
plt.ioff()
self.get_model_information()
def interactive_fitter(self):
"""
Interactive fitter - the interactive fitting process controlled by
fit_spectrum_manually. Starts with the interactive fitter with
fit_updated= false. The user can fit the spectrum. Pressing enter will
initialise the fit (fit_updated=True). Pressing enter again will
accept the fit.
"""
old_log = log.level
log.setLevel('ERROR')
if not self.fit_updated:
self.fit_updated=False
# Interactive fitting with pyspeckit
self.pskspectrum.plotter(xmin=np.min(self.spectral_axis),
xmax=np.max(self.spectral_axis),)
self.pskspectrum.plotter.figure.canvas.callbacks.disconnect(3)
self.pskspectrum.specfit.clear_all_connections()
assert self.pskspectrum.plotter._active_gui is None
# interactive fitting
self.fit_a_spectrum_interactively()
assert self.pskspectrum.plotter._active_gui is not None
self.residuals_shown=False
else:
self.fit_updated=True
self.pskspectrum.plotter(xmin=np.min(self.spectral_axis),
xmax=np.max(self.spectral_axis),)
# disable mpl key commands (especially 'q')
self.pskspectrum.plotter.figure.canvas.callbacks.disconnect(3)
self.pskspectrum.specfit.clear_all_connections()
assert self.pskspectrum.plotter._active_gui is None
if None in self.guesses:
raise ValueError(colors.fg._red_+"Encountered a 'None' value in"+
" guesses"+colors._endc_)
# non interactive - display the fit
self.fit_a_spectrum()
self.pskspectrum.specfit.plot_fit(show_components=True)
self.pskspectrum.specfit.plotresiduals(axis=self.pskspectrum.plotter.axis,
clear=False,
color='g',
label=False)
assert self.pskspectrum.plotter._active_gui is None
self.residuals_shown=True
self.printable_format()
print("Options:"
"\n"
"1) If you are happy with this fit, press Enter."
"\n"
"2) If not, press 'f' to re-enter the interactive fitter.")
log.setLevel(old_log)
with warnings.catch_warnings():
warnings.simplefilter('ignore', category=DeprecationWarning)
if plt.matplotlib.rcParams['interactive']:
self.happy = None
self.pskspectrum.plotter.axis.figure.canvas.mpl_connect('key_press_event',self.interactive_callback)
else:
plt.show()
self.happy = self.interactive_callback('noninteractive')
#
if not hasattr(self.pskspectrum.specfit, 'fitter'):
raise ValueError("No fitter available for the spectrum."
" This can occur if you have plt.ion() set"
" or if you did not fit the spectrum."
)
return
def interactive_callback(self, event):
"""
A 'callback function' to be triggered when the user selects a fit.
Parameters
----------
event : interactive event
"""
if plt.matplotlib.rcParams['interactive']:
if hasattr(event, 'key'):
# Enter to continue
if event.key in ('enter'):
if self.residuals_shown:
print("")
print("'enter' key acknowledged."+
colors.fg._lightgreen_+" Solution accepted"+colors._endc_+".")
self.happy = True
self.pskspectrum.specfit.clear_all_connections()
self.pskspectrum.plotter.disconnect()
plt.close(self.pskspectrum.plotter.figure.number)
assert self.pskspectrum.plotter._active_gui is None
else:
print("")
print("'enter' key acknowledged."+
colors.fg._cyan_+" Showing fit and residuals"+colors._endc_+".")
self.fit_updated=True
self.guesses = self.pskspectrum.specfit.parinfo.values
self.interactive_fitter()
# To re-enter the fitter
elif event.key in ('f', 'F'):
print("")
print("'f' key acknowledged."+
colors.fg._lightred_+" Re-entering interactive fitter"+colors._endc_+".")
self.residuals_shown = False
# to indicate that all components have been selected
elif event.key in ('d','D','3',3):
# The fit has been performed interactively, but we also
# want to print out the nicely-formatted additional
# information
self.pskspectrum.specfit.button3action(event)
print("'d' key acknowledged."+
colors.fg._cyan_+" Guess initialized"+colors._endc_+".")
print('')
print("Options:"
"\n"
"1) To lock the fit and display residuals, press Enter."
"\n"
"2) Press 'f' to re-enter the interactive fitter.")
self.happy = None
else:
self.happy = None
elif hasattr(event, 'button') and event.button in ('d','D','3',3):
# The fit has been performed interactively, but we also
# want to print out the nicely-formatted additional
# information
print("'d' key acknowledged."+
colors.fg._cyan_+" Guess initialized"+colors._endc_+".")
print('')
print("Options:"
"\n"
"1) To lock the fit and display residuals, press Enter."
"\n"
"2) Press 'f' to re-enter the interactive fitter.")
self.happy = None
else:
self.happy = None
else:
# this should only happen if not triggered by a callback
assert event == 'noninteractive'
self.printable_format()
h = input("Are you happy with the fit? (y/n): ")
self.happy = h in ['True', 'T', 'true', '1', 't', 'y', 'yes', 'Y', 'Yes']
print("")
self.fit_updated=True
return self.happy
def fit_a_spectrum(self):
"""
Fits a spectrum
"""
with warnings.catch_warnings():
warnings.simplefilter('ignore')
old_log = log.level
log.setLevel('ERROR')
self.pskspectrum.specfit(interactive=False,
clear_all_connections=True,
xmin=np.min(self.spectral_axis),
xmax=np.max(self.spectral_axis),
fittype = self.fittype,
guesses = self.guesses,
verbose=False,
use_lmfit=True)
log.setLevel(old_log)
def fit_a_spectrum_interactively(self):
"""
Fits a spectrum interactively
"""
with warnings.catch_warnings():
warnings.simplefilter('ignore')
old_log = log.level
log.setLevel('ERROR')
self.pskspectrum.specfit(interactive=True,
print_message=True,
xmin=np.min(self.spectral_axis),
xmax=np.max(self.spectral_axis),
fittype = self.fittype,
verbose=False,
use_lmfit=True,
show_components=True)
log.setLevel(old_log)
def create_a_template(self,unit='',xarrkwargs={}):
"""
generates an instance of pyspeckit's Spectrum class
Parameters
----------
x : array
spectral axis
y : array
the spectrum
rms : number
estimate of the rms
unit : str
unit of the spectral axis
xarrkwargs : dictionary
key word arguments describing the spectral axis
"""
from pyspeckit import Spectrum
spectrum=np.zeros_like(self.spectral_axis,dtype='float')
error_spectrum=np.ones_like(self.spectral_axis,dtype='float')
with warnings.catch_warnings():
warnings.simplefilter('ignore')
old_log = log.level
log.setLevel('ERROR')
self.psktemplate = Spectrum(data=spectrum,
error=error_spectrum,
xarr=self.spectral_axis,
doplot=False,
unit=unit,
xarrkwargs=xarrkwargs,
verbose=False,
)
log.setLevel(old_log)
def create_a_spectrum(self,unit='',xarrkwargs={}):
"""
generates an instance of pyspeckit's Spectrum class
Parameters
----------
x : array
spectral axis
y : array
the spectrum
rms : number
estimate of the rms
unit : str
unit of the spectral axis
xarrkwargs : dictionary
key word arguments describing the spectral axis
"""
from pyspeckit import Spectrum
import astropy.units as u
with warnings.catch_warnings():
warnings.simplefilter('ignore')
old_log = log.level
log.setLevel('ERROR')
self.pskspectrum = Spectrum(data=np.ma.masked_where(np.isnan(u.Quantity(self.spectrum).value) + np.isinf(u.Quantity(self.spectrum).value), u.Quantity(self.spectrum).value),
error=np.ma.masked_where(np.isnan(u.Quantity(np.ones_like(self.spectrum)*self.rms).value) + np.isinf(u.Quantity(np.ones_like(self.spectrum)*self.rms).value), u.Quantity( | np.ones_like(self.spectrum) | numpy.ones_like |
# coding: utf-8
# Copyright (c) <NAME>
# Distributed under the terms of "New BSD License", see the LICENSE file.
import numpy as np
from PIL import Image, ImageDraw, ImageFont
from . descriptors import Positive, TwoDee, PILArray, Alignment
from types import MethodType
from textwrap import wrap as textwrap
from copy import deepcopy
"""
Base classes for allowing simple graphic design on top of PIL Images.
"""
__author__ = "<NAME>"
__copyright__ = "Copyright 2020, <NAME>"
__version__ = "0.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "development"
__date__ = "May 11, 2020"
class Graphic:
"""
A 2d graphic, possibly holding other sub-graphics.
Attributes:
size (Positive): A 2-tuple of integers for the x and y sizes.
color (str): The color (html or hex) of the graphic background. (Default is #0000, transparent.)
position (TwoDee): Pixels between the `anchor` of this graphic a the reference point in the parent graphic
(which depends on the `coordinates` setting). Meaningless if this is not a sub-graphic of a larger graphic.
(Default is None.)
anchor ('upper left'/'center'): Whether position indicates the location of the upper-left corner of this graphic
or the center of it. (Default is 'upper left'.)
coordinate_frame ('upper left'/'center'): Whether position is measured from the upper left corner of the parent
graphic (x>0 is right, y>0 is down), or the center of the parent graphic (x>0 is still right, but y>0 is
up). (Default is 'upper left'.)
layer (int): The relative rendering order inside the parent graphic; higher layers are rendered on top. (Default
is 0.)
angle (float): How much to rotate the graphic by before pasting it onto the parent canvas.
resample: cf. PIL documentation for pasting.
parent (Graphic): The graphic onto which this graphic will be pasted. (Default is None.)
name (str): The name by which this graphic is known to its parent. (Automatically filled on assignment to a
parent graphic's `children`.)
children (Children): Holds sub-graphics to be pasted onto this graphic.
image (PIL.Image): The actual visual being rendered.
depth (int): How many layers of parent graphics exist above this one.
"""
size = Positive('size')
position = TwoDee('position')
coordinate_frame = Alignment('coordinate_frame')
anchor = Alignment('anchor')
def __init__(self, size, **kwargs):
self.color = None
self.position = None
self.anchor = 'upper left'
self.coordinate_frame = 'upper left'
self.layer = 0
self.angle = 0
self.resample = 0
self._update_attributes_from_dict(kwargs)
self.size = size
self.children = Children(self)
self._image = None
self.parent = None
self._name = 'parent'
@property
def image(self):
"""The image is constructed by rendering all children on top of the graphic's own base image."""
if self._image is None:
self.render()
return self._image
@property
def name(self):
"""The graphic name is set during assignment as a child of a parent graphic, or is just 'parent'."""
return self._name
@property
def depth(self):
"""Depth measures how many graphics there are in the tree above this graphic."""
try:
return self.parent.depth + 1
except AttributeError:
return 0
def _prepare_image(self):
self._image = Image.new("RGBA", self.size.inttuple, self.color or '#0000')
def save(self, fp, format=None, **params):
self.image.save(fp, format=format, **params)
def copy(self):
return deepcopy(self)
def render(self):
if self.position is not None and self.parent is None:
raise ValueError("Position is not None, but this graphic has no parent.")
elif self.position is None and self.parent is not None:
raise ValueError("Position is None, but this graphic has a parent.")
self._prepare_image()
self.children.render()
if self.angle != 0:
self._image = self.image.rotate(self.angle, resample=self.resample, expand=True)
@staticmethod
def clamp_to_size_tuple(values, size):
return tuple(np.clip(values, (0, 0), size).astype(int))
@staticmethod
def to_pilarray(x):
"""
Converts an `numpy.ndarray`-like object to a `PILArray` (which is the same, but has a method for converting
itself to a tuple of integers).
"""
return np.array(x).view(PILArray)
def _update_attributes_from_dict(self, kwargs):
for k, v in kwargs.items():
if hasattr(self, k):
setattr(self, k, v)
else:
raise AttributeError("{} has no attribute '{}'".format(self.name, k))
@property
def _numeric_position(self):
if self.coordinate_frame.is_upper_left:
position = self.position
elif self.coordinate_frame.is_center:
position = 0.5 * self.parent.size + (1, -1) * self.position
return position
@property
def _numeric_anchor(self):
if self.anchor.is_upper_left:
anchor_shift = self.to_pilarray((0, 0))
elif self.anchor.is_center:
anchor_shift = 0.5 * self.to_pilarray(self.image.size)
return anchor_shift
def crop_and_box(self):
corner1 = (self._numeric_position - self._numeric_anchor).inttuple
corner2 = (corner1 + self.to_pilarray(self.image.size)).inttuple
free_box = corner1 + corner2
max_size = self.parent.size.inttuple
clamped_box = self.clamp_to_size_tuple(corner1, max_size) + self.clamp_to_size_tuple(corner2, max_size)
cropping_offset = np.array(clamped_box) - np.array(free_box)
image = self.image
if np.any(cropping_offset != 0):
cropping_box = tuple(( | np.array(cropping_offset) | numpy.array |
import numpy as np
import matplotlib.pyplot as plt
from lassolver.utils.func import *
class ISTA:
def __init__(self, A, x, noise):
self.A = A
self.M, self.N = A.shape
self.x = x
Ax = A @ x
if type(noise) is int:
SNRdB = 10**(0.1*noise) / self.P
self.sigma = np.linalg.norm(Ax)**2 / SNRdB
self.n = np.random.normal(0, self.sigma**0.5, (self.M, 1))
elif type(noise).__module__ == 'numpy':
self.sigma = np.var(noise)
self.n = noise.copy()
else :
raise ValueError
self.y = Ax + self.n
self.s = | np.zeros((self.N, 1)) | numpy.zeros |
import itertools as itt
import numpy as np
def ndarray_dprime(array0, array1, axis, flip=None, keepdims=False):
"""
general function to calculate the d prime between two arrays with the same shape. the d prime is calculated in
a dimension wise manner but for the specified axis, which is treated as observations/repetitions
:param array0: ndarray
:param array1: ndarray
:param axis: int. observation axis
:param flip: str, None (default). 'absolute' returns the absolute value of d primes,
'max' flips the values so the max absolute value is positive,
'first' flips the values so the first time time value is positive
:return: ndarray with one less dimension as the input arrays
"""
# main dprime calculation
dprime = ((np.mean(array0, axis=axis, keepdims=keepdims) - np.mean(array1, axis=axis, keepdims=keepdims)) /
np.sqrt(0.5 * (np.var(array0, axis=axis, keepdims=keepdims) + np.var(array1, axis=axis, keepdims=keepdims))))
# check for edge cases
if np.any(np.isnan(dprime)):
dprime[np.where(np.isnan(dprime))] = 0
if np.any(np.isinf(dprime)):
dprime[np.where(np.isinf(dprime))] = (array0.mean(axis=axis, keepdims=keepdims)
- array1.mean(axis=axis, keepdims=keepdims))[np.isinf(dprime)]
# due to floating point error, variances that should be zero are really small numbers, which lead to really big
# dprimes, this happens most of the time due zero spikes counted
dprime[dprime > 100000] = 0
# multiple options to flip the dprime
if flip == 'absolute':
dprime = np.abs(dprime)
elif flip == 'max':
# flip value signs so the highest absolute dprime value is positive
toflip = (np.abs(np.min(dprime, axis=-1)) > np.max(dprime, axis=-1))[..., None] # assume last dimension is time
dprime = | np.negative(dprime, where=toflip, out=dprime) | numpy.negative |
# Author: <EMAIL>
import time
import numpy as np
import tensorflow as tf
import cv2
from efficientnet.tfkeras import EfficientNetB5
from efficientnet.tfkeras import center_crop_and_resize, preprocess_input
def center_crop_and_resize(frame, size):
"""change shape of a frame with shape (h, w, 3) into shape (size, size, 3)
"""
# prepare_frame
assert len(frame.shape) == 3 and frame.shape[-1] == 3
if frame.dtype != np.uint8:
frame = frame.astype(np.uint8)
# center crop process
y, x = frame.shape[0:2]
if x != y:
min_dim = min(y, x)
start_x = (x // 2) - (min_dim // 2)
start_y = (y // 2) - (min_dim // 2)
frame = frame[start_y:start_y+min_dim,start_x:start_x+min_dim]
# resize process
h, w = frame.shape[:2]
if h * w < size ** 2:
frame = cv2.resize(frame, (size, size), interpolation=cv2.INTER_CUBIC)
elif not (h == w == size):
frame = cv2.resize(frame, (size, size), interpolation=cv2.INTER_AREA)
return np.expand_dims(frame, 0).astype(np.float32)
class EfficientNetExtractor(object):
"""Extracts EfficientNet features for RGB frames.
"""
def __init__(self, img_size=456, max_pooling=True):
self.index = 0
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = 1.0
self.session = tf.compat.v1.Session(config=config)
self.graph = tf.compat.v1.get_default_graph()
tf.compat.v1.keras.backend.set_session(self.session)
self.model = EfficientNetB5(
weights='pretrained/efficientnet/efficientnet-b5_noisy-student_notop.h5',
include_top=False,
pooling='avg')
self.img_size = img_size
self.block7 = self.model.output
self.block6 = self.model.layers[-48].output
def extract_rgb_frame_features(self, frame_rgb):
assert len(frame_rgb.shape) == 4
assert frame_rgb.shape[3] == 3 # 3 channels (R, G, B)
with self.graph.as_default():
tf.keras.backend.set_session(self.session)
block7, block6 = self.session.run([self.block7, self.block6], feed_dict={self.model.input: frame_rgb})
return np.hstack([block7, | np.reshape(block6, [block6.shape[0], -1, block6.shape[-1]]) | numpy.reshape |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@version: 1.4.0
@file: GSP_main.py
@time: 2021/1/26 10:50
@functions: graph signal processing main script
@update: support Yeo-ICN definition
@update: support ICN-level brain activity and connecitivty strength saving
"""
import numpy as np
import glob
import os
import time
import matplotlib.pyplot as plt
from pygsp import graphs, filters, plotting
from GSP_utilities import surrogate_BOLD_create, save_variable, load_variable
import pandas as pd
from dppd import dppd
dp, X = dppd()
# 1. path locations and parameters
start = time.time()
deriv_path = '/home/amax/data/cye/MScohort_BIDS_clean/derivatives'
connectome_path = os.path.join(deriv_path, 'mrtrix')
xcpengine_path = os.path.join(deriv_path, 'xcpengine')
network_assign_path = 'CAB-NP_v1.1_Labels-ReorderedbyNetworks_Yeo.csv'
num_BOLD_timepoints = 180
num_rand = 100 # number of surrogates
functional_type = 'BOLD'
tract_type = 'meanlength' # one of the following: invlength, invnodevol, level-participant_connectome, meanlength
ICN_type = 'Yeo' # one of the following: 'Yeo', 'Cole'
normalize_type = 'both' # 'W': normalize W; 'L': normalize Laplacian (Preti method); 'both': normalize both W and Laplacian
# 2. read network assignment for hcpmmp
network_assign_csv = pd.read_csv(network_assign_path)
network_assign_csv = dp(network_assign_csv).mutate(NETWORK=X.Yeo_NETWORK).pd
network_assign_csv = dp(network_assign_csv).mutate(NETWORKKEY=X.Yeo_NETWORKKEY).pd
num_network_df = dp(network_assign_csv).summarise((X.NETWORKKEY, np.max, 'hp_max')).pd
num_network = num_network_df.iloc[0,0]
network_rowindex_ls = []
for network_i in range(1,num_network+1):
df_network = dp(network_assign_csv).filter_by(X.NETWORKKEY == network_i).pd
network_rowindex_ls.append(df_network.index.values)
network_unique_df = dp(network_assign_csv).distinct('NETWORKKEY').pd
network_unique_df = network_unique_df.sort_values(by='NETWORKKEY',ascending = True)
network_unique_df = dp(network_unique_df).filter_by(-X.NETWORK.isin(['Undefine'])).pd # remove undefined ICN
network_unique_df = network_unique_df.reset_index()
# 3. define group of interests
cohort1 = 'ms'
cohort2 = 'nc'
cohort3 = 'nmo'
cohort4 = 'cis'
cohort1_connectome_ls = glob.glob(os.path.join(connectome_path, 'sub-' + cohort1 + '*'))
cohort2_connectome_ls = glob.glob(os.path.join(connectome_path, 'sub-' + cohort2 + '*'))
cohort3_connectome_ls = glob.glob(os.path.join(connectome_path, 'sub-' + cohort3 + '*'))
cohort4_connectome_ls = glob.glob(os.path.join(connectome_path, 'sub-' + cohort4 + '*'))
cohort_connectome_ls = cohort1_connectome_ls + cohort2_connectome_ls + cohort3_connectome_ls + cohort4_connectome_ls
cohort_connectome_ls.sort()
cohort1_fmri_ls = glob.glob(os.path.join(xcpengine_path, 'sub-' + cohort1 + '*'))
cohort2_fmri_ls = glob.glob(os.path.join(xcpengine_path, 'sub-' + cohort2 + '*'))
cohort3_fmri_ls = glob.glob(os.path.join(xcpengine_path, 'sub-' + cohort3 + '*'))
cohort4_fmri_ls = glob.glob(os.path.join(xcpengine_path, 'sub-' + cohort4 + '*'))
cohort_fmri_ls = cohort1_fmri_ls + cohort2_fmri_ls + cohort3_fmri_ls + cohort4_fmri_ls
cohort_name_ls = [os.path.basename(item) for item in cohort_connectome_ls]
remove_name_ls = ['sub-nc011','sub-nc039', 'sub-nmo002', 'sub-nmo019', 'sub-cis002','sub-cis015', 'sub-ms015'] # problematic cases
cohort_name_ls = list(set(cohort_name_ls) - set(remove_name_ls)) # remove problematic cases
for i in remove_name_ls: # remove problematic cases
cohort_connectome_ls = [x for x in cohort_connectome_ls if i not in x]
cohort_fmri_ls = [x for x in cohort_fmri_ls if i not in x]
cohort_name_ls.sort()
cohort_connectome_ls.sort()
cohort_fmri_ls.sort()
if len(cohort_connectome_ls) != len(cohort_fmri_ls):
print('Number of connectome and xcpengine results not matched')
# 4. create a dataframe to store individual filepath
path_dict = {'subname':cohort_name_ls, 'mrtrix_path': cohort_connectome_ls, 'xcp_path':cohort_fmri_ls}
path_df = pd.DataFrame(path_dict, columns=['subname','mrtrix_path','xcp_path'])
path_df = dp(path_df).mutate(connectome_path=X.mrtrix_path + '/connectome/' + X.subname +'_parc-hcpmmp1_' + tract_type + '.csv').pd
path_df = dp(path_df).mutate(BOLD_series_path=X.xcp_path + '/fcon/hcpmmp/hcpmmp.1D').pd
path_df = dp(path_df).mutate(fmri_map_path=X.xcp_path + '/roiquant/hcpmmp/' + X.subname +'_hcpmmp_mean.csv').pd
print('finished step 4')
# 5. load individual connectome as ndarray
num_parcels = len(network_assign_csv)
num_sub = len(path_df)
path_df_nc = dp(path_df).filter_by(X.subname.str.contains('nc')).pd
num_nc = len(path_df_nc)
nc_idx = path_df_nc.index
connectome_array = np.zeros(shape=(num_parcels, num_parcels, num_sub))
for sub_idx in range(len(path_df)):
indiviudal_connectome = np.genfromtxt(path_df.loc[sub_idx, 'connectome_path'], delimiter=',')
connectome_array[:,:,sub_idx] = indiviudal_connectome
# 6. load individual BOLD series and fill missing part according to /fcon/hcpmmp/missing.txt
BOLD_series_3D = np.zeros(shape=(num_parcels, num_BOLD_timepoints, num_sub))
for sub_idx in range(len(path_df)):
BOLD_series = np.genfromtxt(path_df.loc[sub_idx, 'BOLD_series_path'])
BOLD_series = BOLD_series.T
missing_path = os.path.join(path_df.loc[sub_idx, 'xcp_path'], 'fcon', 'hcpmmp', 'hcpmmp_missing.txt')
if os.path.exists(missing_path):
missing_parcel_id = np.genfromtxt(missing_path, dtype=int)
if missing_parcel_id.size == 1: # only one parcel missing
if BOLD_series[missing_parcel_id-1,:].sum() != 0:
print("missing parcel not match for subject {}".format(sub_idx))
network_key = network_assign_csv.loc[missing_parcel_id-1,'NETWORKKEY']
network_parcel_idx = network_rowindex_ls[network_key-1]
BOLD_series[missing_parcel_id-1,:] = np.mean(BOLD_series[network_parcel_idx,:])
else: # multiple parcels missing
for missing_idx in missing_parcel_id:
network_key = network_assign_csv.loc[missing_idx-1,'NETWORKKEY']
network_parcel_idx = network_rowindex_ls[network_key-1]
BOLD_series[missing_idx-1,:] = np.mean(BOLD_series[network_parcel_idx,:])
BOLD_series_3D[:,:,sub_idx] = BOLD_series
print('finished loading individual BOLD series and filling missing part')
# 7. load fmri parametric map and fill missing part according to /fcon/hcpmmp/missing.txt
fmri_paramap = np.zeros(shape=(num_parcels, num_sub))
paramap_str = 'mean_alffZ'
for sub_idx in range(len(path_df)):
fmri_map = pd.read_csv(path_df.loc[sub_idx, 'fmri_map_path'],index_col=0)
fmri_map = fmri_map.loc[:,paramap_str]
missing_path = os.path.join(path_df.loc[sub_idx, 'xcp_path'], 'fcon', 'hcpmmp', 'hcpmmp_missing.txt')
if os.path.exists(missing_path):
missing_parcel_id = np.genfromtxt(missing_path, dtype=int)
if missing_parcel_id.size == 1: # only one parcel missing
if not np.isnan(fmri_map[missing_parcel_id]):
print("missing parcel not match for subject {}".format(sub_idx))
network_key = network_assign_csv.loc[missing_parcel_id-1,'NETWORKKEY']
network_parcel_idx = network_rowindex_ls[network_key-1]
fmri_map[int(missing_parcel_id)] = np.mean(fmri_map[network_parcel_idx])
fmri_map = fmri_map.to_numpy()
else: # multiple parcels missing
network_key = network_assign_csv.loc[missing_parcel_id-1,'NETWORKKEY']
network_rowindex_ls = np.array(network_rowindex_ls, dtype=object)
network_parcel_idx = network_rowindex_ls[network_key-1]
for parcel_i in range(missing_parcel_id.size):
fmri_map[int(missing_parcel_id[parcel_i])] = np.mean(fmri_map[network_parcel_idx[parcel_i]])
fmri_map = fmri_map.to_numpy()
fmri_paramap[:,sub_idx] = fmri_map
print('finished loading fmri parametric map and fill missing part')
# 8. load connectome and functional signal and do GSP
if functional_type == 'BOLD': # func_sig is BOLD_series_3D
func_sig = BOLD_series_3D
s_head_cohort = np.zeros(shape=(num_parcels, num_BOLD_timepoints, num_sub))
s_rand_cohort = np.zeros(shape=(num_parcels, num_BOLD_timepoints, num_sub, num_rand))
else:
raise ValueError('undefined functional signal')
G_U_cohort = np.zeros(shape=(num_parcels, num_parcels, num_sub))
for sub_idx in range(len(path_df)):
W = np.genfromtxt(path_df.loc[sub_idx, 'connectome_path'], delimiter=',')
# Symmetric Normalization of adjacency matrix
D = np.diag(np.sum(W,1)) #degree
D_power = np.power(D, (-1/2))
D_power[np.isinf(D_power)] = 0
Wsymm = D_power @ W @ D_power
#The eigenvector matrix G.U is used to define the Graph Fourier Transform of the graph signal S
if normalize_type == 'W':
G = graphs.Graph(Wsymm)
G.compute_fourier_basis()
G_U_cohort[:,:,sub_idx] = G.U
U = G.U
elif normalize_type == 'L':
G = graphs.Graph(W, lap_type = 'normalized')
G.compute_fourier_basis()
G_U_cohort[:,:,sub_idx] = G.U
U = G.U
elif normalize_type == 'both':
Wsymm = np.triu(Wsymm) + np.triu(Wsymm).T - np.diag(np.triu(Wsymm).diagonal()) # force symmetric
G = graphs.Graph(Wsymm, lap_type = 'normalized')
G.compute_fourier_basis()
G_U_cohort[:,:,sub_idx] = G.U
U = G.U
# L = np.eye(len(Wsymm)) - Wsymm
# lamda, U = np.linalg.eig(L)
# U = U[:, np.argsort(lamda)]
if functional_type == 'BOLD': # func_sig is BOLD_series_3D
s_head = U.T @ func_sig[:,:,sub_idx]
s_head_cohort[:,:,sub_idx] = s_head
# calcualte surrogate for individual
s_rand_cohort[:,:,sub_idx,:] = surrogate_BOLD_create(U, func_sig[:,:,sub_idx], num_rand)
print('finished Graph Fourier Transform')
# save_variable(G_U_cohort, 'G_U_cohort.pkl')
# save_variable(s_head_cohort, 's_head_cohort.pkl')
# save_variable(s_rand_cohort, 's_rand_cohort.pkl')
# G_U_cohort = load_variable('G_U_cohort.pkl')
# s_head_cohort = load_variable('s_head_cohort.pkl')
# s_rand_cohort = load_variable('s_rand_cohort.pkl')
# 8.5(optional). plot Sihag2020 plot
# take nc001 as example
nc001_idx = path_df.subname[path_df.subname == 'sub-nc001'].index.tolist()[0]
s_low = G_U_cohort[:,0:4, nc001_idx] @ s_head_cohort[0:4,:,nc001_idx]
s_high = G_U_cohort[:,-55:-51, nc001_idx] @ s_head_cohort[-55:-51,:,nc001_idx]
np.savetxt("nc001_s_low_both.csv", s_low, delimiter=",")
np.savetxt("nc001_s_high_both.csv", s_high, delimiter=",")
# 9. calculate the median-split threshold
NC_index = [cohort_name_ls.index(x) for x in cohort_name_ls if 'nc' in x]
if functional_type == 'BOLD': # func_sig is BOLD_series_3D
s_head_NC = s_head_cohort[:,:,NC_index]
s_head_NC_square = np.power(s_head_NC, 2)
#s_head_NC_square = np.power(s_head_NC_square, 1/2)
s_head_NC_square_mean = np.mean(s_head_NC_square, (1,2)) # average for each timepoint and each subject
s_head_NC_AUCTOT = np.trapz(s_head_NC_square_mean)
i=0
AUC=0
while AUC < s_head_NC_AUCTOT/2:
AUC = np.trapz(s_head_NC_square_mean[:i])
i = i + 1
cutoff = i-1
print('finished calculating the median-split threshold')
print('cutoff = {}'.format(cutoff))
# 10. calculate decoupling index for empirical data
if functional_type == 'BOLD': # func_sig is BOLD_series_3D
s_aligned_cohort = np.zeros(shape=(num_parcels, num_BOLD_timepoints, num_sub))
s_liberal_cohort = np.zeros(shape=(num_parcels, num_BOLD_timepoints, num_sub))
for sub_idx in range(len(path_df)):
s_aligned_cohort[:,:,sub_idx] = G_U_cohort[:,0:cutoff, sub_idx] @ s_head_cohort[0:cutoff,:,sub_idx]
s_liberal_cohort[:,:,sub_idx] = G_U_cohort[:,cutoff-1:-1, sub_idx] @ s_head_cohort[cutoff-1:-1,:,sub_idx]
s_aligned_individual = np.linalg.norm(s_aligned_cohort, ord=2, axis=1)
s_liberal_individual = np.linalg.norm(s_liberal_cohort, ord=2, axis=1)
s_deCoupIdx_individual = s_liberal_individual / s_aligned_individual
s_aligned = np.mean(s_aligned_individual[:,nc_idx], axis=1)
s_liberal = np.mean(s_liberal_individual[:,nc_idx], axis=1)
s_deCoupIdx_node = s_liberal/s_aligned # only for NC
print('finished calculating decoupling index for empirical data')
# 11. calculate decoupling index for surrogate data only for NC
if functional_type == 'BOLD': # func_sig is BOLD_series_3D
s_aligned_cohort_rand = np.zeros(shape=(num_parcels, num_BOLD_timepoints, num_nc, num_rand))
s_liberal_cohort_rand = np.zeros(shape=(num_parcels, num_BOLD_timepoints, num_nc, num_rand))
for i, sub_idx in enumerate(nc_idx):
for rand_idx in range(num_rand):
s_aligned_cohort_rand[:,:,i,rand_idx] = G_U_cohort[:,0:cutoff, sub_idx] @ s_rand_cohort[0:cutoff,:,sub_idx,rand_idx]
s_liberal_cohort_rand[:,:,i,rand_idx] = G_U_cohort[:,cutoff-1:-1, sub_idx] @ s_rand_cohort[cutoff-1:-1,:,sub_idx,rand_idx]
# norm for BOLD timepoints
s_aligned_norm_rand = np.linalg.norm(s_aligned_cohort_rand, ord=2, axis=1)
s_liberal_norm_rand = np.linalg.norm(s_liberal_cohort_rand, ord=2, axis=1)
# average for cohorts
s_aligned_rand = np.mean(s_aligned_norm_rand, axis=1)
s_liberal_rand = np.mean(s_liberal_norm_rand, axis=1)
# decoupling index
s_deCoupIdx_node_rand = s_liberal_rand/s_aligned_rand
print('finished calculating decoupling index for surrogate data')
# 12. network-level harmonics for emperical and surrogate data
s_aligned_network = np.zeros(shape=(num_network))
s_liberal_network = np.zeros(shape=(num_network))
s_aligned_network_individual = np.zeros(shape=(num_network, num_sub))
s_liberal_network_individual = np.zeros(shape=(num_network, num_sub))
s_aligned_network_rand = np.zeros(shape=(num_network, num_rand))
s_liberal_network_rand = np.zeros(shape=(num_network, num_rand))
for i in range(num_network):
s_aligned_network[i] = np.mean(s_aligned[network_rowindex_ls[i]])
s_liberal_network[i] = np.mean(s_liberal[network_rowindex_ls[i]])
s_aligned_network_individual[i,:] = np.mean(s_aligned_individual[network_rowindex_ls[i],:], axis=0)
s_liberal_network_individual[i,:] = np.mean(s_liberal_individual[network_rowindex_ls[i],:], axis=0)
s_aligned_network_rand[i,:] = np.mean(s_aligned_rand[network_rowindex_ls[i],:], axis=0)
s_liberal_network_rand[i,:] = np.mean(s_liberal_rand[network_rowindex_ls[i],:], axis=0)
s_deCoupIdx_network = s_liberal_network/s_aligned_network
s_deCoupIdx_network_individual = s_liberal_network_individual/s_aligned_network_individual
s_deCoupIdx_network_rand = s_liberal_network_rand/s_aligned_network_rand
# 13. brain-level harmonics for emperical and surrogate data
s_aligned_brain = np.mean(s_aligned)
s_liberal_brain = np.mean(s_liberal)
s_deCoupIdx_brain = s_liberal_brain/s_aligned_brain
s_aligned_brain_individual = np.mean(s_aligned_individual, axis=0)
s_liberal_brain_individual = np.mean(s_liberal_individual, axis=0)
s_deCoupIdx_brain_individual = s_liberal_brain_individual/s_aligned_brain_individual
s_aligned_brain_rand = np.mean(s_aligned_rand, axis=0)
s_liberal_brain_rand = np.mean(s_liberal_rand, axis=0)
s_deCoupIdx_brain_rand = s_liberal_brain_rand/s_aligned_brain_rand
print('s_deCoupIdx_brain = {}'.format(s_deCoupIdx_brain))
# 14. significance of surrogate for plot
# node-level
s_deCoupIdx_node_significance = np.logical_or((np.percentile(s_deCoupIdx_node_rand, 5, axis=1) >= s_deCoupIdx_node), (np.percentile(s_deCoupIdx_node_rand, 95, axis=1) <= s_deCoupIdx_node))
s_deCoupIdx_node_significance = s_deCoupIdx_node_significance.astype(np.int)
# network-level
s_deCoupIdx_network_significance = np.logical_or((np.percentile(s_deCoupIdx_network_rand, 5, axis=1) >= s_deCoupIdx_network), (np.percentile(s_deCoupIdx_network_rand, 95, axis=1) <= s_deCoupIdx_network))
s_deCoupIdx_network_significance = s_deCoupIdx_network_significance.astype(np.int)
# brain-level
s_deCoupIdx_brain_significance = np.logical_or((np.percentile(s_deCoupIdx_brain_rand, 5, axis=0) >= s_deCoupIdx_brain), (np.percentile(s_deCoupIdx_brain_rand, 95, axis=0) <= s_deCoupIdx_brain))
# 15. save results to csv
if normalize_type == 'W':
normalize_str = '_W'
elif normalize_type == 'L':
normalize_str = '_L'
elif normalize_type == 'both':
normalize_str = '_both'
if functional_type == 'BOLD': # func_sig is BOLD_series_3D
csv_folder = 'BOLD_4D_' + tract_type + '_' + normalize_str
if not os.path.exists(os.path.abspath(csv_folder)):
os.mkdir(os.path.abspath(csv_folder))
# save surrogate (ndarray with num_rand × num_region)
s_deCoupIdx_node_rand_df = pd.DataFrame(data = s_deCoupIdx_node_rand.T, columns = network_assign_csv.loc[:,'LABEL'])
s_deCoupIdx_network_rand_df = pd.DataFrame(data = s_deCoupIdx_network_rand.T, columns = network_unique_df.loc[:,'NETWORK'])
s_deCoupIdx_brain_rand_df = pd.DataFrame(data = s_deCoupIdx_brain_rand)
s_deCoupIdx_node_rand_df.to_csv(os.path.join(os.path.abspath(csv_folder), 's_deCoupIdx_node_rand_df.csv'))
s_deCoupIdx_network_rand_df.to_csv(os.path.join(os.path.abspath(csv_folder), 's_deCoupIdx_' + '-network_rand_df.csv'))
s_deCoupIdx_brain_rand_df.to_csv(os.path.join(os.path.abspath(csv_folder), 's_deCoupIdx_brain_rand_df.csv'))
# save surrogate significance (ndarray with 1 × num_region)
s_deCoupIdx_node_significance_df = pd.DataFrame(data = np.expand_dims(s_deCoupIdx_node_significance, axis=0), columns = network_assign_csv.loc[:,'LABEL'])
s_deCoupIdx_network_significance_df = pd.DataFrame(data = np.expand_dims(s_deCoupIdx_network_significance, axis=0), columns = network_unique_df.loc[:,'NETWORK'])
s_deCoupIdx_node_significance_df.to_csv(os.path.join(os.path.abspath(csv_folder), 's_deCoupIdx_node_significance_df.csv'))
s_deCoupIdx_network_significance_df.to_csv(os.path.join(os.path.abspath(csv_folder), 's_deCoupIdx_' + '-network_significance_df.csv'))
with open(os.path.join(os.path.abspath(csv_folder), 's_deCoupIdx_brain_significance.txt'), 'w') as output_file:
output_file.write(str(s_deCoupIdx_brain_significance))
# save empirical harmonics for NC cohort (for plot usage, ndarray with 1 × num_region)
s_deCoupIdx_node_empirical_df = pd.DataFrame(data = np.expand_dims(s_deCoupIdx_node, axis=0), columns = network_assign_csv.loc[:,'LABEL'])
s_deCoupIdx_network_empirical_df = pd.DataFrame(data = np.expand_dims(s_deCoupIdx_network, axis=0), columns = network_unique_df.loc[:,'NETWORK'])
s_deCoupIdx_node_empirical_df.to_csv(os.path.join(os.path.abspath(csv_folder), 's_deCoupIdx_node_empirical_df.csv'))
s_deCoupIdx_network_empirical_df.to_csv(os.path.join(os.path.abspath(csv_folder), 's_deCoupIdx_' +'-network_empirical_df.csv'))
with open(os.path.join(os.path.abspath(csv_folder), 's_deCoupIdx_brain_empirical.txt'), 'w') as output_file:
output_file.write(str(s_deCoupIdx_brain))
# save subject-level harmonics (ndarray with num_sub × num_region)
s_deCoupIdx_node_individual_df = pd.DataFrame(data = s_deCoupIdx_individual.T, columns = network_assign_csv.loc[:,'LABEL'])
s_deCoupIdx_network_individual_df = pd.DataFrame(data = s_deCoupIdx_network_individual.T, columns = network_unique_df.loc[:,'NETWORK'])
s_deCoupIdx_brain_individual_df = pd.DataFrame(data = s_deCoupIdx_brain_individual)
s_deCoupIdx_node_individual_df = pd.concat([path_df.loc[:,'subname'],s_deCoupIdx_node_individual_df],axis=1)
s_deCoupIdx_network_individual_df = pd.concat([path_df.loc[:,'subname'],s_deCoupIdx_network_individual_df],axis=1)
s_deCoupIdx_brain_individual_df = pd.concat([path_df.loc[:,'subname'],s_deCoupIdx_brain_individual_df],axis=1)
s_deCoupIdx_node_individual_df.to_csv(os.path.join(os.path.abspath(csv_folder), 's_deCoupIdx_node_individual_df.csv'))
s_deCoupIdx_network_individual_df.to_csv(os.path.join(os.path.abspath(csv_folder), 's_deCoupIdx_' + '-network_individual_df.csv'))
s_deCoupIdx_brain_individual_df.to_csv(os.path.join(os.path.abspath(csv_folder), 's_deCoupIdx_brain_individual_df.csv'))
# 16.(optional) save connectivity strength
# parcel-level
connectome_parcel_individual = np.zeros(shape=(num_sub, num_parcels))
# mean of nonzero
def non_zero_mean(np_arr):
exist = (np_arr != 0)
num = np_arr.sum(axis=1)
den = exist.sum(axis=1)
return num/den
for sub_idx in range(num_sub):
connectome_parcel_individual[sub_idx,:] = non_zero_mean(connectome_array[:,:,sub_idx])
connectome_parcel_individual_df = pd.DataFrame(data = connectome_parcel_individual, columns = network_assign_csv.loc[:,'LABEL'])
connectome_parcel_individual_df = pd.concat([path_df.loc[:,'subname'], connectome_parcel_individual_df],axis=1)
connectome_parcel_individual_df.to_csv(os.path.join(os.path.abspath(csv_folder), 'connectome_' + '-parcel_individual_df.csv'))
# ICN-level
connectome_network_individual = | np.zeros(shape=(num_network, num_sub)) | numpy.zeros |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
The Phantom class is instantiated with a ground-truth phantom and corresponding material properties data. The get_projections method simulates data acquisition and returns radiographs for the specified theta values.
"""
import sys
import os
import numpy as np
import pandas as pd
from scipy import misc
import h5py
import time
from scipy.integrate import simps
import matplotlib.pyplot as plt
import cv2
from tomopy import project
from scipy.ndimage.filters import gaussian_filter
from tomo_twin.pg_filter import add_phase_contrast
model_data_path = '../model_data'
class Phantom:
def __init__(self, vol, materials, res, energy_pts, bits = 16, data_path = model_data_path):
'''
Parameters
----------
vol : np.array
labeled (segmented / ground-truth) volume. voxel values are in finite range [0,...n_materials-1].
materials : dict
dict of material names and their respective density g/cc, e.g. {"Fe" : 7.87, "Al": 2.7}
res : float
voxel size in microns
energy_pts : float or np.array
list of energies
bits : int
16 for 16 bit camera
data_path : str
path to exported XOP data
'''
# deal with materials
self.bits = bits
self.res = res
self.data_path = data_path
self.energy_pts = np.asarray(energy_pts) if type(energy_pts) is float else energy_pts
self.materials = [Material(key, value, \
self.res, \
self.energy_pts, \
data_path = self.data_path) for key, value in materials.items()]
self.sigma_mat = np.concatenate([material.sigma for material in self.materials], axis = 1)
# some numbers
self.n_mat = len(self.materials)
self.n_energies = np.size(self.energy_pts)
# deal with labeled volume
self.vol = vol
self.vol_shape = self.vol.shape
if self.vol.max() != (len(self.materials)-1):
raise ValueError("Number of materials does not match voxel value range.")
if len(self.vol_shape) not in (2,3): raise ValueError("vol must have either 2 or 3 dimensions.")
self.ray_axis = 1 if len(self.vol_shape) == 3 else 0
if len(self.vol_shape) == 3:
self.proj_shape = (self.vol_shape[0], self.vol_shape[-1])
else:
self.proj_shape = (self.vol_shape[-1],)
self.make_volume() # blows up volume into individual energies
def make_volume(self):
'''
Converts the labeled GT volume provided into a volume of sigma values (attenutation coefficient, density and pixel size as pathlength). The resulting shape is (nz, ny, nx) or (n_energies, nz, ny, nx). The "energy" channel is added if multiple energies are requested.
'''
voxel_vals = np.arange(self.n_mat)
self.vol = np.asarray([self.vol]*self.n_energies, dtype = np.float32)
for ie in range(self.n_energies):
for voxel in voxel_vals:
self.vol[ie, self.vol[ie] == voxel] = self.sigma_mat[ie,voxel]
if self.n_energies == 1:
self.vol = self.vol[0]
return
else:
return
def get_projections(self, theta = (0,180,180), beam = None, noise = 0.01, blur_size = 5, detector_dist = 0.0):
'''
Acquire projections on the phantom.
Returns
-------
np.array
output shape is a stack of radiographs (nthetas, nrows, ncols)
Parameters
----------
theta : tuple
The tuple must be defined as (starting_theta, ending_theta, number_projections). The angle is intepreted as degrees.
beam : np.array
The flat-field (beam array) must be provided with shape (1, nrows, ncols) or (n_energies, nrows, ncols).
noise : float
The noise parameter is interpreted as a fraction (0,1). The noise transforms the pixel map I(y,x) in the projection space as I(y,x) --> I(y,x)*(1 + N(mu=0, sigma=noise)).
'''
# make theta array in radians
theta = np.linspace(*theta, endpoint = True)
theta = np.radians(theta)
# make beam array (if not passed)
if beam is None:
beam = np.ones(self.proj_shape, dtype = np.float32)
beam = beam*(2**self.bits-1)
# if monochromatic beam
if self.n_energies == 1:
projs = project(self.vol, theta, pad = False, emission = False)
projs = projs*beam
# scintillator / detector blurring
if blur_size > 0:
projs = [proj for proj in projs]
projs = Parallelize(projs, gaussian_filter, \
procs = 12, \
sigma = 0.3*(0.5*(blur_size - 1) - 1) + 0.8, \
order = 0)
projs = np.asarray(projs)
# in-line phase contrast based on detector-sample distance (cm)
if detector_dist > 0.0:
pad_h = int(projs.shape[1]*0.4)
projs = np.pad(projs, ((0,0), (pad_h,pad_h), (0,0)), mode = 'reflect')
projs = add_phase_contrast(projs, \
pixel_size = self.res*1e-04, \
energy = float(self.energy_pts), \
dist = detector_dist)
projs = projs[:,pad_h:-pad_h,:]
# Poisson noise model (approximated as normal distribution)
projs = np.random.normal(projs, noise*np.sqrt(projs))
# projs = np.random.poisson(projs)
# This actually worked fine
# projs = projs*beam*(1 + np.random.normal(0, noise, projs.shape))
# if polychromatic beam
else:
projs = Parallelize(theta.tolist(), \
_project_at_theta, \
vol = self.vol, \
n_energies = self.n_energies, \
beam = beam, \
noise = noise, procs = 12)
projs = np.asarray(projs)
# saturated pixels
projs = np.clip(projs, 0, 2**self.bits-1)
return projs.astype(np.uint16)
class Material:
# Ideas borrowed from <NAME>'s code for BeamHardeningCorrections (7-BM github)
def __init__(self, name, density, path_len, energy_pts, scintillator_flag = False, data_path = None):
"""
Parameters
----------
name : str
string describing material name. Typically, use chemical formula, e.g. Fe, Cu, etc.
density : float
g/cm3 units
path_len : float
thickness for components (filters, scintillators, etc.) and pixel size for materials in phantom
energy_pts : np array
listing the energy_pts requested. shape is (n,)
scintillator_flag : bool
return absorption data instead of attenuation, if material is scintillator
sigma : np.array
sigma array with dimensions (n_energies, 1)
att_coeff : np.array
mass attenuation coefficient array (n_energies, 1)
data_path : str
path to exported XOP data
"""
self.name = name
self.data_path = data_path
self.density = density # g/cc
self.scintillator_flag = scintillator_flag
self.path_len = path_len # um
self.energy_pts = energy_pts
self.calc_sigma()
def read_attcoeff(self):
"""
# att_coeff : cm2/g units, array dimensions of (n_energies,)
"""
df = pd.read_csv(os.path.join(self.data_path, 'materials', self.name + "_properties_xCrossSec.dat"), sep = '\t', delimiter = " ", header = None)
old_energy_pts = np.asarray(df[0])/1000.0
if self.scintillator_flag:
att_coeff = np.asarray(df[3])
else:
att_coeff = np.asarray(df[6])
self.att_coeff = np.interp(self.energy_pts, old_energy_pts, att_coeff).reshape(-1,1)
def calc_sigma(self):
self.read_attcoeff()
self.sigma = np.multiply(self.att_coeff, self.density)*(self.path_len*1e-4) # att_coeff in cm2/g, rho in g/cm3, res in cm
def read_source(file_path, energy_pts, res = 1.17, img_shape = (1200,1920), bits = 16, exp_fac = 0.92):
"""
Reads data from a source hdf5 file, in a format specific to this code. The original data is adapted from DABAX in XOP.
returns b : beam array shape (n_energies, V, H) or (n_energies, 1)
Two choices:
1. enter 2D shape to incorporate vertically varying fan beam profile and spectral variation. If 2D, crops the source within the FOV of Camera defined by (res, shape). Assumes FOV is in vertical center of fan beam.
2. enter 1D shape to ignore and get only spectral variation.
Parameters
----------
file_path : str
filepath for reading beam source, e.g. bending magnet, undulator or monochromatic source, etc.
energy_pts : np.array
energy points in keV, array with dimensions (n_energies,)
res : float
pixel resolution of camera in micrometers
shape : np.array
pixel array size V, H
"""
if type(energy_pts) is float:
energy_pts = | np.asarray([energy_pts]) | numpy.asarray |
import numpy as np
import datetime
from bayes_opt import BayesianOptimization, UtilityFunction
from scipy import optimize
from pyemittance.emit_eval_example import eval_emit_machine
class Opt:
def __init__(self, init_scan=[-6, -4, -2, 0]):
self.energy = 0.135
self.varscan = init_scan
self.num_points_adapt = 7
self.pbounds = ((0.46, 0.485), (-0.01, 0.01), (-0.01, 0.01))
self.plot = False
self.save_runs = False
self.online = False
self.uncertainty_lim = 0.25
self.timestamp = None
self.total_num_points = 0
self.seed = 12
def evaluate(self, varx, vary, varz):
# fixed initial varscan
quad_init = self.varscan
config = [varx, vary, varz]
out_dict, self.total_num_points = eval_emit_machine(config,
quad_init=list(quad_init),
online=self.online,
name='LCLS',
meas_type='OTRS',
adapt_ranges=True,
num_points=self.num_points_adapt,
check_sym=True,
infl_check=True,
add_pnts=True,
show_plots=self.plot,
use_prev_meas=True,
quad_tol=0.02,
save_runs=self.save_runs,
calc_bmag=True)
return out_dict
def evaluate_bo(self, varx, vary, varz):
out_dict = self.evaluate(varx, vary, varz)
emit = out_dict['nemit']
emit_err = out_dict['nemit_err']
if np.isnan(emit):
print("NaN emit")
return np.nan, np.nan
if emit_err / emit < self.uncertainty_lim:
# save total number of points added
timestamp = (datetime.datetime.now()).strftime("%Y-%m-%d_%H-%M-%S")
f = open(f"bo_points_meas_iter.txt", "a+")
f.write(f'{varx},{vary},{varz},{emit},{emit_err},{self.total_num_points},{timestamp}\n')
f.close()
return -emit, -emit_err
def run_bo_opt_w_reject(self, rnd_state=11, init_pnts=3, n_iter=120):
np.random.seed(self.seed)
# Set domain
bounds = {'varx': self.pbounds[0], 'vary': self.pbounds[1], 'varz': self.pbounds[2]}
# Run BO
optimizer = BayesianOptimization(
f=None,
pbounds=bounds,
random_state=rnd_state,
verbose=2
)
# utility = UtilityFunction(kind="ucb", kappa=0.1, xi=0.0)
utility = UtilityFunction(kind="ucb", kappa=2.5, xi=0.0)
target_list = []
# init random points
x = []
emit_list = []
emit_err_list = []
emit_res = (np.nan, np.nan)
while len(emit_list) < init_pnts:
x_i = [np.random.uniform(self.pbounds[0][0], self.pbounds[0][1]),
np.random.uniform(self.pbounds[1][0], self.pbounds[1][1]),
np.random.uniform(self.pbounds[2][0], self.pbounds[2][1])]
emit_res = self.evaluate(x_i[0], x_i[1], x_i[2])
if not np.isnan(emit_res[0]) and not np.isnan(emit_res[1]):# and abs(emit_res[0]) > 58e-8:
# take large init emittances
x.append(x_i)
emit_list.append(emit_res[0])
emit_err_list.append(emit_res[1])
print("Init configs: ", x)
print("Init emit: ", emit_list)
# get init points
for i in range(len(x)):
# target, error = np.nan, np.nan
# hile np.isnan(target) or np.isnan(error) or error/target > self.uncertainty_lim:
next_point = {'varx': x[i][0],
'vary': x[i][1],
'varz': x[i][2]
}
# # evaluate next point
target = emit_list[i]
optimizer.register(params=next_point, target=target)
if target_list and target > np.max(target_list):
color = '\033[95m', '\033[0m'
else:
color = '\u001b[30m', '\033[0m'
print(
f"{color[0]}iter {i} | target {-1 * target/1e-6:.3f} | config {next_point['varx']:.6f} "
f"{next_point['vary']:.6f} {next_point['varz']:.6f}{color[1]}")
target_list.append(target)
# BO iters
for i in range(n_iter):
target, error = np.nan, np.nan
while np.isnan(target) or np.isnan(error) or error / target > self.uncertainty_lim:
next_point = optimizer.suggest(utility)
target, error = self.evaluate(**next_point)
optimizer.register(params=next_point, target=target)
if target_list and target > np.max(target_list):
color = '\033[95m', '\033[0m'
else:
color = '\u001b[30m', '\033[0m'
print(
f"{color[0]}iter {i} | target {-1 * target/1e-6:.3f} | config {next_point['varx']:.6f}"
f" {next_point['vary']:.6f} {next_point['varz']:.6f}{color[1]}")
emit_list.append(target)
emit_err_list.append(error)
target_list.append(target)
timestamp = (datetime.datetime.now()).strftime("%Y-%m-%d_%H-%M-%S")
np.save(f'bo_opt_res_emit_list_{rnd_state}_{init_pnts}_{n_iter}_{timestamp}.npy', emit_list,
allow_pickle=True)
np.save(f'bo_opt_res_emit_err_list_{rnd_state}_{init_pnts}_{n_iter}_{timestamp}.npy', emit_err_list,
allow_pickle=True)
np.save(f'bo_opt_res_{rnd_state}_{init_pnts}_{n_iter}_{timestamp}.npy', optimizer.res,
allow_pickle=True)
return optimizer
def eval_simplex(self, x):
out_dict = self.evaluate(x[0], x[1], x[2])
timestamp = (datetime.datetime.now()).strftime("%Y-%m-%d_%H-%M-%S")
emit = out_dict['nemit']
err = out_dict['nemit_err']
if np.isnan(emit) or (err / emit > self.uncertainty_lim):
print("NaN or high uncertainty emittance, returning 100.")
f = open(f"simplex_run.txt", "a+")
f.write(f'{x[0]},{x[1]},{x[2]},{np.nan},{np.nan},{self.total_num_points},{timestamp}\n')
f.close()
return 100
f = open(f"simplex_run.txt", "a+")
f.write(f'{x[0]},{x[1]},{x[2]},{emit},{err},{self.total_num_points},{timestamp}\n')
f.close()
return emit
def run_simplex_opt(self, max_iter):
np.random.seed(self.seed)
initial_guess = np.array(
[np.random.uniform(self.pbounds[0][0], self.pbounds[0][1]),
np.random.uniform(self.pbounds[1][0], self.pbounds[1][1]),
np.random.uniform(self.pbounds[2][0], self.pbounds[2][1])
])
# initial_guess1 = self.pbounds[0][0]+ np.random.rand(1) * (self.pbounds[0][1] - self.pbounds[0][0])
# initial_guess2 = self.pbounds[1][0]+ np.random.rand(1) * (self.pbounds[1][1] - self.pbounds[1][0])
# initial_guess3 = self.pbounds[2][0]+ np.random.rand(1) * (self.pbounds[2][1] - self.pbounds[2][0])
# initial_guess = np.array([initial_guess1, initial_guess2, initial_guess3])
min = optimize.minimize(self.eval_simplex, initial_guess,
method='Nelder-Mead', options={'maxiter': max_iter,
'return_all': True,
'adaptive': True,
'fatol': 0.1 * 0.75,
'xatol': 0.00001
},
)
timestamp = (datetime.datetime.now()).strftime("%Y-%m-%d_%H-%M-%S")
np.save(f'simplex_allvecs_{timestamp}.npy', min["allvecs"], allow_pickle=True)
f = open(f"simplex_allres_{timestamp}.txt", "a+")
f.write(min)
f.close()
return min
def run_bo_opt(self, rnd_state=11, init_pnts=3, n_iter=200):
# Set domain
bounds = {'varx': self.pbounds[0], 'vary': self.pbounds[1], 'varz': self.pbounds[2]}
# Run BO
optimizer = BayesianOptimization(
f=self.evaluate,
pbounds=bounds,
random_state=rnd_state,
)
# optimizer.maximize(init_points=init_pnts, n_iter=n_iter)
optimizer.maximize(init_points=init_pnts,
n_iter=n_iter,
kappa=0.01
# kappa_decay = 0.8,
# kappa_decay_delay = 25
)
return optimizer
def run_simplex_opt_norm(self, max_iter):
np.random.seed(self.seed)
# below code based on Badger implementation of simplex for the ACR
# vars init values
x0 = [ | np.random.uniform(self.pbounds[0][0], self.pbounds[0][1]) | numpy.random.uniform |
from __future__ import print_function
import os
import sys
import numpy as np
import torch
import networkx as nx
import random
from torch.autograd import Variable
from torch.nn.parameter import Parameter
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tqdm import tqdm
from copy import deepcopy
sys.path.append('%s/../common' % os.path.dirname(os.path.realpath(__file__)))
from cmd_args import cmd_args
from graph_embedding import S2VGraph
from rl_common import local_args, load_graphs, load_base_model, attackable
class GeneticAgent(object):
def __init__(self, classifier, s2v_g, n_edges_attack):
self.s2v_g = s2v_g
self.n_edges_attack = n_edges_attack
self.classifier = classifier
g = s2v_g.to_networkx()
comps = [c for c in nx.connected_component_subgraphs(g)]
self.comps = comps
self.set_id = {}
self.solution = None
for i in range(len(comps)):
for j in comps[i].nodes():
self.set_id[j] = i
self.population = []
for k in range(cmd_args.population_size):
added = []
for k in range(n_edges_attack):
while True:
i = np.random.randint(len(g))
j = np.random.randint(len(g))
if self.set_id[i] != self.set_id[j] or i == j or (i, j) in added:
continue
break
added.append((i, j))
self.population.append(added)
def rand_action(self, i):
region = self.comps[self.set_id[i]].nodes()
assert len(region) > 1
while True:
j = region[np.random.randint(len(region))]
if j == i:
continue
assert self.set_id[i] == self.set_id[j]
break
return j
def get_fitness(self):
g_list = []
g = self.s2v_g.to_networkx()
for edges in self.population:
g2 = g.copy()
g2.add_edge(edges[0][0], edges[0][1])
# g2.add_edges_from(edges)
assert nx.number_connected_components(g2) == self.s2v_g.label
g_list.append(S2VGraph(g2, self.s2v_g.label))
log_ll, _, acc = self.classifier(g_list)
acc = acc.cpu().double().numpy()
if self.solution is None:
for i in range(len(self.population)):
if acc[i] < 1.0:
self.solution = self.population[i]
break
nll = -log_ll[:, self.classifier.label_map[self.s2v_g.label]]
return nll
def select(self, fitness):
scores = torch.exp(fitness).cpu().data.numpy()
max_args = np.argsort(-scores)
result = []
for i in range(cmd_args.population_size - cmd_args.population_size // 2):
result.append(deepcopy(self.population[max_args[i]]))
idx = np.random.choice(np.arange(cmd_args.population_size),
size=cmd_args.population_size // 2,
replace=True,
p=scores/scores.sum())
for i in idx:
result.append(deepcopy(self.population[i]))
return result
def crossover(self, parent, pop):
if np.random.rand() < cmd_args.cross_rate:
another = pop[ np.random.randint(len(pop)) ]
if len(parent) != self.n_edges_attack:
return another[:]
if len(another) != self.n_edges_attack:
return parent[:]
t = []
for i in range(self.n_edges_attack):
if np.random.rand() < 0.5:
t.append(parent[i])
else:
t.append(another[i])
return t
else:
return parent[:]
def mutate(self, child):
if len(child) != self.n_edges_attack:
return child
for i in range(self.n_edges_attack):
if | np.random.rand() | numpy.random.rand |
import glob
import random
import json
import os
import six
import cv2
import numpy as np
from tqdm import tqdm
from time import time
from .train import find_latest_checkpoint
from .data_utils.data_loader import get_image_array, get_segmentation_array,\
DATA_LOADER_SEED, class_colors, get_pairs_from_paths
from .models.config import IMAGE_ORDERING
random.seed(DATA_LOADER_SEED)
def model_from_checkpoint_path(checkpoints_path):
from .models.all_models import model_from_name
assert (os.path.isfile(checkpoints_path+"_config.json")
), "Checkpoint not found."
model_config = json.loads(
open(checkpoints_path+"_config.json", "r").read())
latest_weights = find_latest_checkpoint(checkpoints_path)
assert (latest_weights is not None), "Checkpoint not found."
model = model_from_name[model_config['model_class']](
model_config['n_classes'], input_height=model_config['input_height'],
input_width=model_config['input_width'])
print("loaded weights ", latest_weights)
model.load_weights(latest_weights)
return model
def get_colored_segmentation_image(seg_arr, n_classes, colors=class_colors):
output_height = seg_arr.shape[0]
output_width = seg_arr.shape[1]
seg_img = np.zeros((output_height, output_width, 3))
for c in range(n_classes):
seg_arr_c = seg_arr[:, :] == c
seg_img[:, :, 0] += ((seg_arr_c)*(colors[c][0])).astype('uint8')
seg_img[:, :, 1] += ((seg_arr_c)*(colors[c][1])).astype('uint8')
seg_img[:, :, 2] += ((seg_arr_c)*(colors[c][2])).astype('uint8')
return seg_img
def get_legends(class_names, colors=class_colors):
n_classes = len(class_names)
legend = np.zeros(((len(class_names) * 25) + 25, 125, 3),
dtype="uint8") + 255
class_names_colors = enumerate(zip(class_names[:n_classes],
colors[:n_classes]))
for (i, (class_name, color)) in class_names_colors:
color = [int(c) for c in color]
cv2.putText(legend, class_name, (5, (i * 25) + 17),
cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 0), 1)
cv2.rectangle(legend, (100, (i * 25)), (125, (i * 25) + 25),
tuple(color), -1)
return legend
def overlay_seg_image(inp_img, seg_img):
orininal_h = inp_img.shape[0]
orininal_w = inp_img.shape[1]
seg_img = cv2.resize(seg_img, (orininal_w, orininal_h), interpolation=cv2.INTER_NEAREST)
fused_img = (inp_img/2 + seg_img/2).astype('uint8')
return fused_img
def concat_lenends(seg_img, legend_img):
new_h = np.maximum(seg_img.shape[0], legend_img.shape[0])
new_w = seg_img.shape[1] + legend_img.shape[1]
out_img = np.zeros((new_h, new_w, 3)).astype('uint8') + legend_img[0, 0, 0]
out_img[:legend_img.shape[0], : legend_img.shape[1]] = np.copy(legend_img)
out_img[:seg_img.shape[0], legend_img.shape[1]:] = np.copy(seg_img)
return out_img
def visualize_segmentation(seg_arr, inp_img=None, n_classes=None,
colors=class_colors, class_names=None,
overlay_img=False, show_legends=False,
prediction_width=None, prediction_height=None):
if n_classes is None:
n_classes = np.max(seg_arr)
seg_img = get_colored_segmentation_image(seg_arr, n_classes, colors=colors)
if inp_img is not None:
original_h = inp_img.shape[0]
original_w = inp_img.shape[1]
seg_img = cv2.resize(seg_img, (original_w, original_h), interpolation=cv2.INTER_NEAREST)
if (prediction_height is not None) and (prediction_width is not None):
seg_img = cv2.resize(seg_img, (prediction_width, prediction_height), interpolation=cv2.INTER_NEAREST)
if inp_img is not None:
inp_img = cv2.resize(inp_img,
(prediction_width, prediction_height))
if overlay_img:
assert inp_img is not None
seg_img = overlay_seg_image(inp_img, seg_img)
if show_legends:
assert class_names is not None
legend_img = get_legends(class_names, colors=colors)
seg_img = concat_lenends(seg_img, legend_img)
return seg_img
def predict(model=None, inp=None, out_fname=None,
checkpoints_path=None, overlay_img=False,
class_names=None, show_legends=False, colors=class_colors,
prediction_width=None, prediction_height=None):
if model is None and (checkpoints_path is not None):
model = model_from_checkpoint_path(checkpoints_path)
assert (inp is not None)
assert ((type(inp) is np.ndarray) or isinstance(inp, six.string_types)),\
"Input should be the CV image or the input file name"
if isinstance(inp, six.string_types):
inp = cv2.imread(inp)
assert len(inp.shape) == 3, "Image should be h,w,3 "
output_width = model.output_width
output_height = model.output_height
input_width = model.input_width
input_height = model.input_height
n_classes = model.n_classes
x = get_image_array(inp, input_width, input_height,
ordering=IMAGE_ORDERING)
pr = model.predict(np.array([x]))[0]
pr = pr.reshape((output_height, output_width, n_classes)).argmax(axis=2)
seg_img = visualize_segmentation(pr, inp, n_classes=n_classes,
colors=colors, overlay_img=overlay_img,
show_legends=show_legends,
class_names=class_names,
prediction_width=prediction_width,
prediction_height=prediction_height)
if out_fname is not None:
cv2.imwrite(out_fname, seg_img)
return pr
def predict_multiple(model=None, inps=None, inp_dir=None, out_dir=None,
checkpoints_path=None, overlay_img=False,
class_names=None, show_legends=False, colors=class_colors,
prediction_width=None, prediction_height=None):
if model is None and (checkpoints_path is not None):
model = model_from_checkpoint_path(checkpoints_path)
if inps is None and (inp_dir is not None):
inps = glob.glob(os.path.join(inp_dir, "*.jpg")) + glob.glob(
os.path.join(inp_dir, "*.png")) + \
glob.glob(os.path.join(inp_dir, "*.jpeg"))
inps = sorted(inps)
assert type(inps) is list
all_prs = []
for i, inp in enumerate(tqdm(inps)):
if out_dir is None:
out_fname = None
else:
if isinstance(inp, six.string_types):
out_fname = os.path.join(out_dir, os.path.basename(inp))
else:
out_fname = os.path.join(out_dir, str(i) + ".jpg")
pr = predict(model, inp, out_fname,
overlay_img=overlay_img, class_names=class_names,
show_legends=show_legends, colors=colors,
prediction_width=prediction_width,
prediction_height=prediction_height)
all_prs.append(pr)
return all_prs
def set_video(inp, video_name):
cap = cv2.VideoCapture(inp)
fps = int(cap.get(cv2.CAP_PROP_FPS))
video_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
video_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
size = (video_width, video_height)
fourcc = cv2.VideoWriter_fourcc(*"XVID")
video = cv2.VideoWriter(video_name, fourcc, fps, size)
return cap, video, fps
def predict_video(model=None, inp=None, output=None,
checkpoints_path=None, display=False, overlay_img=True,
class_names=None, show_legends=False, colors=class_colors,
prediction_width=None, prediction_height=None):
if model is None and (checkpoints_path is not None):
model = model_from_checkpoint_path(checkpoints_path)
n_classes = model.n_classes
cap, video, fps = set_video(inp, output)
while(cap.isOpened()):
prev_time = time()
ret, frame = cap.read()
if frame is not None:
pr = predict(model=model, inp=frame)
fused_img = visualize_segmentation(
pr, frame, n_classes=n_classes,
colors=colors,
overlay_img=overlay_img,
show_legends=show_legends,
class_names=class_names,
prediction_width=prediction_width,
prediction_height=prediction_height
)
else:
break
print("FPS: {}".format(1/(time() - prev_time)))
if output is not None:
video.write(fused_img)
if display:
cv2.imshow('Frame masked', fused_img)
if cv2.waitKey(fps) & 0xFF == ord('q'):
break
cap.release()
if output is not None:
video.release()
cv2.destroyAllWindows()
def evaluate(model=None, inp_images=None, annotations=None,
inp_images_dir=None, annotations_dir=None, checkpoints_path=None):
if model is None:
assert (checkpoints_path is not None),\
"Please provide the model or the checkpoints_path"
model = model_from_checkpoint_path(checkpoints_path)
if inp_images is None:
assert (inp_images_dir is not None),\
"Please provide inp_images or inp_images_dir"
assert (annotations_dir is not None),\
"Please provide inp_images or inp_images_dir"
paths = get_pairs_from_paths(inp_images_dir, annotations_dir)
paths = list(zip(*paths))
inp_images = list(paths[0])
annotations = list(paths[1])
assert type(inp_images) is list
assert type(annotations) is list
tp = np.zeros(model.n_classes)
fp = np.zeros(model.n_classes)
fn = | np.zeros(model.n_classes) | numpy.zeros |
from __future__ import division, print_function, absolute_import
from warnings import warn
import numpy as np
from numpy import (atleast_2d, ComplexWarning, arange, zeros_like, imag, diag,
iscomplexobj, tril, triu, argsort, empty_like)
from .decomp import _asarray_validated
from .lapack import get_lapack_funcs, _compute_lwork
__all__ = ['ldl']
def ldl(A, lower=True, hermitian=True, overwrite_a=False, check_finite=True):
""" Computes the LDLt or Bunch-Kaufman factorization of a symmetric/
hermitian matrix.
This function returns a block diagonal matrix D consisting blocks of size
at most 2x2 and also a possibly permuted unit lower triangular matrix
``L`` such that the factorization ``A = L D L^H`` or ``A = L D L^T``
holds. If ``lower`` is False then (again possibly permuted) upper
triangular matrices are returned as outer factors.
The permutation array can be used to triangularize the outer factors
simply by a row shuffle, i.e., ``lu[perm, :]`` is an upper/lower
triangular matrix. This is also equivalent to multiplication with a
permutation matrix ``P.dot(lu)``, where ``P`` is a column-permuted
identity matrix ``I[:, perm]``.
Depending on the value of the boolean ``lower``, only upper or lower
triangular part of the input array is referenced. Hence, a triangular
matrix on entry would give the same result as if the full matrix is
supplied.
Parameters
----------
a : array_like
Square input array
lower : bool, optional
This switches between the lower and upper triangular outer factors of
the factorization. Lower triangular (``lower=True``) is the default.
hermitian : bool, optional
For complex-valued arrays, this defines whether ``a = a.conj().T`` or
``a = a.T`` is assumed. For real-valued arrays, this switch has no
effect.
overwrite_a : bool, optional
Allow overwriting data in ``a`` (may enhance performance). The default
is False.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
lu : ndarray
The (possibly) permuted upper/lower triangular outer factor of the
factorization.
d : ndarray
The block diagonal multiplier of the factorization.
perm : ndarray
The row-permutation index array that brings lu into triangular form.
Raises
------
ValueError
If input array is not square.
ComplexWarning
If a complex-valued array with nonzero imaginary parts on the
diagonal is given and hermitian is set to True.
Examples
--------
Given an upper triangular array `a` that represents the full symmetric
array with its entries, obtain `l`, 'd' and the permutation vector `perm`:
>>> import numpy as np
>>> from scipy.linalg import ldl
>>> a = np.array([[2, -1, 3], [0, 2, 0], [0, 0, 1]])
>>> lu, d, perm = ldl(a, lower=0) # Use the upper part
>>> lu
array([[ 0. , 0. , 1. ],
[ 0. , 1. , -0.5],
[ 1. , 1. , 1.5]])
>>> d
array([[-5. , 0. , 0. ],
[ 0. , 1.5, 0. ],
[ 0. , 0. , 2. ]])
>>> perm
array([2, 1, 0])
>>> lu[perm, :]
array([[ 1. , 1. , 1.5],
[ 0. , 1. , -0.5],
[ 0. , 0. , 1. ]])
>>> lu.dot(d).dot(lu.T)
array([[ 2., -1., 3.],
[-1., 2., 0.],
[ 3., 0., 1.]])
Notes
-----
This function uses ``?SYTRF`` routines for symmetric matrices and
``?HETRF`` routines for Hermitian matrices from LAPACK. See [1]_ for
the algorithm details.
Depending on the ``lower`` keyword value, only lower or upper triangular
part of the input array is referenced. Moreover, this keyword also defines
the structure of the outer factors of the factorization.
.. versionadded:: 1.1.0
See also
--------
cholesky, lu
References
----------
.. [1] <NAME>, <NAME>, Some stable methods for calculating
inertia and solving symmetric linear systems, Math. Comput. Vol.31,
1977. DOI: 10.2307/2005787
"""
a = atleast_2d(_asarray_validated(A, check_finite=check_finite))
if a.shape[0] != a.shape[1]:
raise ValueError('The input array "a" should be square.')
# Return empty arrays for empty square input
if a.size == 0:
return empty_like(a), | empty_like(a) | numpy.empty_like |
import os
import yaml
import numpy as np
import dash
import dash_table
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUpdate
import plotly.graph_objs as go
from pypsa import Network
tech_colors = {"All": "rgba(138,43,226,0.5)", # purple
"nuclear": "rgba(255,140,0,0.5)", # orange
"wind_onshore": "rgba(51,100,255,0.5)", # middle-dark blue
"wind_offshore": "rgba(51,51,255,0.5)", # dark blue
"wind_floating": "rgba(51,164,255,0.5)", # middle blue
"pv_utility": "rgba(220,20,60,0.5)", # red
"pv_residential": "rgba(220,20,20,0.5)", # dark red
"ror": "rgba(255,153,255,0.5)", # pink
"ccgt": "rgba(47,79,79,0.5)", # grey
"ocgt": "rgba(105,105,105,0.5)", # other grey
"Li-ion P": "rgba(102,255,178,0.5)", # light green
"Li-ion E": "rgba(102,255,178,0.5)", # light green
"phs": "rgba(0,153,76,0.5)", # dark green
"sto": "rgba(51,51,255,0.5)", # dark blue
"load": "rgba(20,20,20,0.5)", # dark grey
"imports": "rgba(255,215,0,0.5)",
"storage": "rgba(34,139,34,0.5)"}
# ODO: Maybe I should put each 'figure' into objects where I can just update some parts of
# it so that the uploading is faster
class SizingDash:
def __init__(self, output_dir, test_number=None):
# Load css
css_folder = os.path.join(os.path.dirname(os.path.abspath(__file__)), "assets/")
self.app = dash.Dash(__name__, assets_url_path=css_folder)
# Load net
# If no test_number is specified, take the last run
self.current_test_number = test_number
if self.current_test_number is None:
self.current_test_number = sorted(os.listdir(output_dir))[-1]
self.output_dir = output_dir
self.net = Network()
self.net.import_from_csv_folder(f"{self.output_dir}{self.current_test_number}/")
if len(self.net.lines) != 0:
self.current_line_id = self.net.lines.index[0]
if len(self.net.links) != 0:
self.current_link_id = self.net.links.index[0]
self.current_bus_id = self.net.buses.index[0]
self.selected_types = sorted(list(set(self.net.generators.type.values)))
def built_app(self):
def get_map():
map_coords = [min(self.net.buses["x"].values) - 5,
max(self.net.buses["x"].values) + 5,
min(self.net.buses["y"].values) - 2,
max(self.net.buses["y"].values) + 2]
fig = go.Figure(layout=go.Layout(
showlegend=False,
geo=dict(
showcountries=True,
scope='world',
lonaxis=dict(
showgrid=True,
gridwidth=1,
range=[map_coords[0], map_coords[1]],
dtick=5
),
lataxis=dict(
showgrid=True,
gridwidth=1,
range=[map_coords[2], map_coords[3]],
dtick=5
)
)
))
# Adding lines to map
if len(self.net.lines) != 0:
# Get minimum s_nom_opt
s_nom_opt_min = min(self.net.lines.s_nom_opt[self.net.lines.s_nom_opt > 0].values)
for i, idx in enumerate(self.net.lines.index):
bus0_id = self.net.lines.loc[idx, ("bus0",)]
bus1_id = self.net.lines.loc[idx, ("bus1",)]
bus0_x = self.net.buses.loc[bus0_id, ("x",)]
bus0_y = self.net.buses.loc[bus0_id, ("y",)]
bus1_x = self.net.buses.loc[bus1_id, ("x",)]
bus1_y = self.net.buses.loc[bus1_id, ("y",)]
color = 'rgba(0,0,255,0.8)'
name = 'AC'
s_nom_mul = self.net.lines.loc[idx, ('s_nom_opt',)] / s_nom_opt_min
if self.net.lines.loc[idx, ("carrier",)] == "DC":
color = 'rgba(255,0,0,0.8)'
name = 'DC'
fig.add_trace(go.Scattergeo(
mode='lines',
lon=[bus0_x, (bus0_x + bus1_x) / 2, bus1_x],
lat=[bus0_y, (bus0_y + bus1_y) / 2, bus1_y],
line=dict(
width=np.log(1 + s_nom_mul),
color=color),
text=[idx, idx, idx],
hoverinfo='text',
name=name
))
# Adding links to map
if len(self.net.links) != 0:
# Get minimum p_nom_opt
p_nom_opt_min = min(self.net.links.p_nom_opt[self.net.links.p_nom_opt > 0].values)
for i, idx in enumerate(self.net.links.index):
bus0_id = self.net.links.loc[idx, ("bus0", )]
bus1_id = self.net.links.loc[idx, ("bus1", )]
bus0_x = self.net.buses.loc[bus0_id, ("x", )]
bus0_y = self.net.buses.loc[bus0_id, ("y", )]
bus1_x = self.net.buses.loc[bus1_id, ("x", )]
bus1_y = self.net.buses.loc[bus1_id, ("y", )]
color = 'rgba(0,0,255,0.8)'
name = 'AC'
p_nom_mul = self.net.links.loc[idx, 'p_nom_opt']/p_nom_opt_min
if self.net.links.loc[idx, ("carrier", )] == "DC":
color = 'rgba(255,0,0,0.8)'
name = 'DC'
fig.add_trace(go.Scattergeo(
mode='lines',
lon=[bus0_x, (bus0_x+bus1_x)/2, bus1_x],
lat=[bus0_y, (bus0_y+bus1_y)/2, bus1_y],
line=dict(
width=np.log(1+p_nom_mul)/4,
color=color),
text=["", f"Init Capacity: {self.net.links.loc[idx, 'p_nom']}<br>"
f"Opt Capacity: {self.net.links.loc[idx, 'p_nom_opt']}", ""],
hoverinfo='text',
name=name
))
# Add points to map
p_noms = np.zeros((len(self.net.buses.index, )))
color = tech_colors['All']
if len(self.selected_types) == 1:
color = tech_colors[self.selected_types[0]]
colors = [color]*len(self.net.buses.index)
for i, bus_id in enumerate(self.net.buses.index):
total_gens = 0
generators = self.net.generators[self.net.generators.bus == bus_id]
# Keep only the reactors of the type we want to display
for t in self.selected_types:
generators_filter = generators[generators.type == t]
p_noms[i] += np.sum(generators_filter["p_nom_opt"].values)
total_gens += len(generators_filter["p_nom"].values)
if total_gens == 0:
# No allowed generation building
colors[i] = 'grey'
elif p_noms[i] == 0:
colors[i] = 'black'
p_nom_max = np.max(p_noms)
if p_nom_max == 0:
p_nom_max = 1 # Prevents cases where there is no installed capacity at all
fig.add_trace(go.Scattergeo(
mode="markers",
lat=self.net.buses['y'].values,
lon=self.net.buses['x'].values,
text=self.net.buses.index,
hoverinfo='text',
marker=dict(
size=10+40* | np.log(1+p_noms/p_nom_max) | numpy.log |
"""Analyze vote ideal points."""
import os
import numpy as np
import analysis_utils as utils
project_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), os.pardir))
vote_source_dir = os.path.join(project_dir, "data/senate-votes/114")
# Load voting data.
vote_data_dir = os.path.join(vote_source_dir, "clean")
(votes, senator_indices, bill_indices, voter_map, bill_descriptions,
bill_names, vote_ideal_points_dw_nominate) = utils.load_vote_data(
vote_data_dir)
# Load fitted vote ideal points.
vote_param_dir = os.path.join(vote_source_dir, "fits/params")
(polarity_loc, polarity_scale, popularity_loc, popularity_scale,
ideal_point_loc, ideal_point_scale) = utils.load_vote_ideal_point_parameters(
vote_param_dir)
polarity_mean = polarity_loc
popularity_mean = popularity_loc
ideal_point_mean = ideal_point_loc
# Find how extreme Bernie Sanders' ideal point is.
sanders_index = | np.where(voter_map == "<NAME> (I)") | numpy.where |
from __future__ import print_function
import itertools
import math
import os
import random
import shutil
import tempfile
import unittest
import uuid
import numpy as np
import pytest
import tensorflow as tf
import coremltools
import coremltools.models.datatypes as datatypes
from coremltools.models import _MLMODEL_FULL_PRECISION, _MLMODEL_HALF_PRECISION
from coremltools.models import neural_network as neural_network
from coremltools.models.neural_network import flexible_shape_utils
from coremltools.models.utils import macos_version, is_macos
np.random.seed(10)
MIN_MACOS_VERSION_REQUIRED = (10, 13)
LAYERS_10_15_MACOS_VERSION = (10, 15)
def _get_unary_model_spec(x, mode, alpha=1.0):
input_dim = x.shape
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', datatypes.Array(*input_dim))]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_unary(name='unary', input_name='data',
output_name='output', mode=mode, alpha=alpha)
return builder.spec
class CorrectnessTest(unittest.TestCase):
def runTest(self):
pass
def _compare_shapes(self, np_preds, coreml_preds):
return np.squeeze(np_preds).shape == np.squeeze(coreml_preds).shape
def _compare_nd_shapes(self, np_preds, coreml_preds, shape=()):
if shape:
return coreml_preds.shape == shape
else:
# check if shape has 0 valued dimension
if np.prod(np_preds.shape) == 0 and np.prod(coreml_preds.shape) == 0:
return True
return coreml_preds.shape == np_preds.shape
def _compare_predictions(self, np_preds, coreml_preds, delta=.01):
np_preds = np_preds.flatten()
coreml_preds = coreml_preds.flatten()
for i in range(len(np_preds)):
max_den = max(1.0, np_preds[i], coreml_preds[i])
if np.abs(
np_preds[i] / max_den - coreml_preds[i] / max_den) > delta:
return False
return True
@staticmethod
def _compare_moments(model, inputs, expected, use_cpu_only=True, num_moments=10):
"""
This utility function is used for validate random distributions layers.
It validates the first 10 moments of prediction and expected values.
"""
def get_moment(data, k):
return np.mean(np.power(data - np.mean(data), k))
if isinstance(model, str):
model = coremltools.models.MLModel(model)
model = coremltools.models.MLModel(model, useCPUOnly=use_cpu_only)
prediction = model.predict(inputs, useCPUOnly=use_cpu_only)
for output_name in expected:
np_preds = expected[output_name]
coreml_preds = prediction[output_name]
np_moments = [get_moment(np_preds.flatten(), k) for k in range(num_moments)]
coreml_moments = [get_moment(coreml_preds.flatten(), k) for k in range(num_moments)]
np.testing.assert_almost_equal(np_moments, coreml_moments, decimal=2)
# override expected values to allow element-wise compares
for output_name in expected:
expected[output_name] = prediction[output_name]
def _test_model(self,
model,
input,
expected,
model_precision=_MLMODEL_FULL_PRECISION,
useCPUOnly=False,
output_name_shape_dict={},
validate_shapes_only=False):
model_dir = None
# if we're given a path to a model
if isinstance(model, str):
model = coremltools.models.MLModel(model)
# If we're passed in a specification, save out the model
# and then load it back up
elif isinstance(model, coremltools.proto.Model_pb2.Model):
model_dir = tempfile.mkdtemp()
model_name = str(uuid.uuid4()) + '.mlmodel'
model_path = os.path.join(model_dir, model_name)
coremltools.utils.save_spec(model, model_path)
model = coremltools.models.MLModel(model, useCPUOnly=useCPUOnly)
# If we want to test the half precision case
if model_precision == _MLMODEL_HALF_PRECISION:
model = coremltools.utils.convert_neural_network_weights_to_fp16(
model)
try:
prediction = model.predict(input, useCPUOnly=useCPUOnly)
for output_name in expected:
if self.__class__.__name__ == "SimpleTest":
assert (self._compare_shapes(expected[output_name],
prediction[output_name]))
else:
if output_name in output_name_shape_dict:
output_shape = output_name_shape_dict[output_name]
else:
output_shape = []
if len(output_shape) == 0 and len(expected[output_name].shape) == 0:
output_shape = (1,)
assert (self._compare_nd_shapes(expected[output_name],
prediction[output_name],
output_shape))
if not validate_shapes_only:
assert (self._compare_predictions(expected[output_name],
prediction[output_name]))
finally:
# Remove the temporary directory if we created one
if model_dir and os.path.exists(model_dir):
shutil.rmtree(model_dir)
@unittest.skipIf(not is_macos() or macos_version() < MIN_MACOS_VERSION_REQUIRED,
'macOS 10.13+ is required. Skipping tests.')
class SimpleTest(CorrectnessTest):
def test_tiny_upsample_linear_mode(self):
input_dim = (1, 1, 3) # (C,H,W)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_upsample(name='upsample',
scaling_factor_h=2, scaling_factor_w=3,
input_name='data', output_name='output',
mode='BILINEAR')
input = {
'data': np.reshape(np.array([1.0, 2.0, 3.0]), (1, 1, 3))
}
expected = {
'output': np.array(
[[1, 1.333, 1.666, 2, 2.333, 2.666, 3, 3, 3],
[1, 1.333, 1.6666, 2, 2.33333, 2.6666, 3, 3, 3]
])
}
self._test_model(builder.spec, input, expected)
self.assertEquals(len(input_dim), builder._get_rank('output'))
def test_LRN(self):
input_dim = (1, 3, 3)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', datatypes.Array(*input_dim))]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_lrn(name='lrn', input_name='data', output_name='output',
alpha=2, beta=3, local_size=1, k=8)
input = {
'data': np.ones((1, 3, 3))
}
expected = {
'output': 1e-3 * np.ones((1, 3, 3))
}
self._test_model(builder.spec, input, expected)
self.assertEqual(len(input_dim), builder._get_rank('output'))
def test_MVN(self):
input_dim = (2, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', datatypes.Array(*input_dim))]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_mvn(name='mvn', input_name='data', output_name='output',
across_channels=False, normalize_variance=False)
input = {
'data': np.reshape(np.arange(8, dtype=np.float32), (2, 2, 2))
}
expected = {
'output': np.reshape(np.arange(8) - np.array(
[1.5, 1.5, 1.5, 1.5, 5.5, 5.5, 5.5, 5.5]), (2, 2, 2))
}
self._test_model(builder.spec, input, expected)
def test_L2_normalize(self):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', datatypes.Array(*input_dim))]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_l2_normalize(name='mvn', input_name='data',
output_name='output')
input = {
'data': np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
}
expected = {
'output': np.reshape(np.arange(4, dtype=np.float32),
(1, 2, 2)) / np.sqrt(14)
}
self._test_model(builder.spec, input, expected)
def test_unary_sqrt(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': np.sqrt(x)}
spec = _get_unary_model_spec(x, 'sqrt')
self._test_model(spec, input, expected)
def test_unary_rsqrt(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': 1 / np.sqrt(x)}
spec = _get_unary_model_spec(x, 'rsqrt')
self._test_model(spec, input, expected)
def test_unary_inverse(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': 1 / x}
spec = _get_unary_model_spec(x, 'inverse')
self._test_model(spec, input, expected)
def test_unary_power(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': x ** 3}
spec = _get_unary_model_spec(x, 'power', 3)
self._test_model(spec, input, expected)
def test_unary_exp(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': np.exp(x)}
spec = _get_unary_model_spec(x, 'exp')
self._test_model(spec, input, expected)
def test_unary_log(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': np.log(x)}
spec = _get_unary_model_spec(x, 'log')
self._test_model(spec, input, expected)
def test_unary_abs(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': np.abs(x)}
spec = _get_unary_model_spec(x, 'abs')
self._test_model(spec, input, expected)
def test_unary_threshold(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': np.maximum(x, 2)}
spec = _get_unary_model_spec(x, 'threshold', 2)
self._test_model(spec, input, expected)
def test_split(self):
input_dim = (9, 2, 2)
x = np.random.rand(*input_dim)
input_features = [('data', datatypes.Array(*input_dim))]
output_names = []
output_features = []
for i in range(3):
out = 'out_' + str(i)
output_names.append(out)
output_features.append((out, None))
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_split(name='split', input_name='data',
output_names=output_names)
input = {'data': x}
expected = {
'out_0': x[0: 3, :, :],
'out_1': x[3: 6, :, :],
'out_2': x[6: 9, :, :]
}
self._test_model(builder.spec, input, expected)
for output_ in output_names:
self.assertEqual(len(input_dim), builder._get_rank(output_))
def test_scale_constant(self):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_scale(name='scale', W=5, b=45, has_bias=True,
input_name='data', output_name='output')
x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': 5 * x + 45}
self._test_model(builder.spec, input, expected)
def test_scale_matrix(self):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
W = np.reshape(np.arange(5, 9), (1, 2, 2))
builder.add_scale(name='scale', W=W, b=None, has_bias=False,
input_name='data', output_name='output',
shape_scale=[1, 2, 2])
x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': W * x}
self._test_model(builder.spec, input, expected)
def test_bias_constant(self):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_bias(name='bias', b=45, input_name='data',
output_name='output')
x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': x + 45}
self._test_model(builder.spec, input, expected)
def test_bias_matrix(self):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
b = np.reshape(np.arange(5, 9), (1, 2, 2))
builder.add_bias(name='bias', b=b, input_name='data',
output_name='output',
shape_bias=[1, 2, 2])
x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': x + b}
self._test_model(builder.spec, input, expected)
def test_load_constant(self, model_precision=_MLMODEL_FULL_PRECISION):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
b = np.reshape(np.arange(5, 9), (1, 2, 2))
builder.add_load_constant(name='load_constant', output_name='bias',
constant_value=b, shape=[1, 2, 2])
builder.add_elementwise(name='add', input_names=['data', 'bias'],
output_name='output', mode='ADD')
x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': x + b}
self._test_model(builder.spec, input, expected, model_precision)
self.assertEqual(len(input_dim), builder._get_rank('output'))
def test_load_constant_half_precision(self):
self.test_load_constant(model_precision=_MLMODEL_HALF_PRECISION)
def test_min(self):
input_dim = (1, 2, 2)
input_features = [('data_0', datatypes.Array(*input_dim)),
('data_1', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_elementwise(name='min', input_names=['data_0', 'data_1'],
output_name='output', mode='MIN')
x1 = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
x2 = np.reshape(np.arange(2, 6, dtype=np.float32), (1, 2, 2))
input = {'data_0': x1, 'data_1': x2}
expected = {'output': np.minimum(x1, x2)}
self._test_model(builder.spec, input, expected)
self.assertEqual(len(input_dim), builder._get_rank('output'))
def test_conv_same_padding(self):
input_dim = (10, 15, 15)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
W = np.random.rand(3, 3, 10, 20)
builder.add_convolution(name='conv', kernel_channels=10,
output_channels=20,
height=3, width=3, stride_height=2,
stride_width=2,
border_mode='same', groups=1,
W=W, b=None, has_bias=False,
input_name='data', output_name='output',
same_padding_asymmetry_mode='TOP_LEFT_HEAVY')
x = np.random.rand(*input_dim)
input = {'data': x}
expected = {'output': np.random.rand(20, 8, 8)}
self._test_model(
builder.spec, input, expected, validate_shapes_only=True)
self.assertEqual(len(input_dim), builder._get_rank('output'))
def test_deconv_valid_padding(self):
input_dim = (10, 15, 15)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
W = np.random.rand(3, 3, 10, 20)
builder.add_convolution(name='deconv', kernel_channels=10,
output_channels=20,
height=3, width=3, stride_height=2,
stride_width=2,
border_mode='valid', groups=1,
W=W, b=None, has_bias=False,
is_deconv=True,
input_name='data', output_name='output',
padding_top=2, padding_bottom=3,
padding_left=2, padding_right=3)
x = np.random.rand(*input_dim)
input = {'data': x}
expected = {'output': np.random.rand(20, 26, 26)}
self._test_model(
builder.spec, input, expected, validate_shapes_only=True)
def test_deconv_non_unit_groups(self):
input_dim = (16, 15, 15)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features)
W = np.random.rand(3, 3, 16, 5)
builder.add_convolution(name='deconv', kernel_channels=16,
output_channels=20,
height=3, width=3, stride_height=2,
stride_width=2,
border_mode='valid', groups=4,
W=W, b=None, has_bias=False,
is_deconv=True,
input_name='data', output_name='output',
padding_top=2, padding_bottom=3,
padding_left=2, padding_right=3)
x = np.random.rand(*input_dim)
input = {'data': x}
expected = {'output': np.random.rand(20, 26, 26)}
self._test_model(
builder.spec, input, expected, validate_shapes_only=True)
def test_linear_activation(self):
input_dim = (10, 15, 15)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_activation(name='activation',
non_linearity='LINEAR',
input_name='data',
output_name='output', params=[34.0, 67.0])
x = np.random.rand(*input_dim)
input = {'data': x}
expected = {'output': 34.0 * x + 67.0}
self._test_model(builder.spec, input, expected)
def test_padding_constant(self):
input_dim = (1, 2, 3)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features)
builder.add_padding(name='pad',
left=1, right=0, top=2, bottom=0,
value=-1,
input_name='data',
output_name='output')
x = np.reshape(np.array([[1, 2, 3], [4, 5, 6]]), (1, 2, 3)).astype(
np.float32)
input = {'data': x}
y = np.reshape(
np.array([[-1, -1, -1, -1], [-1, -1, -1, -1], [-1, 1, 2, 3],
[-1, 4, 5, 6]]), (1, 4, 4)).astype(np.float32)
expected = {'output': y}
self._test_model(builder.spec, input, expected)
def test_padding_replication(self):
input_dim = (1, 2, 3)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_padding(name='pad',
left=1, top=2,
input_name='data',
output_name='output', padding_type='replication')
x = np.reshape(np.array([[1, 2, 3], [4, 5, 6]]), (1, 2, 3)).astype(
np.float32)
input = {'data': x}
y = np.reshape(np.array([[1, 1, 2, 3], [1, 1, 2, 3], [1, 1, 2, 3],
[4, 4, 5, 6]]), (1, 4, 4)).astype(np.float32)
expected = {'output': y}
self._test_model(builder.spec, input, expected)
def test_reshape_target_shape_3(self):
input_dim = (1, 2, 5) # (C,H,W)
target_dim = (10, 1, 1)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_reshape(name='reshape', input_name='data',
output_name='output', target_shape=target_dim,
mode=0)
x = np.random.rand(*input_dim)
input = {'data': x}
expected = {'output': np.reshape(x, (10, 1, 1))}
self._test_model(builder.spec, input, expected)
self.assertEqual(len(target_dim), builder._get_rank('output'))
def test_reshape_target_shape_4(self):
input_dim = (1, 2, 5) # (C,H,W)
target_dim = (1, 10, 1, 1)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_reshape(name='reshape', input_name='data',
output_name='output', target_shape=target_dim,
mode=0)
x = np.random.rand(*input_dim)
input = {'data': x}
expected = {'output': np.reshape(x, (1, 10, 1, 1))}
self._test_model(builder.spec, input, expected)
self.assertEqual(len(target_dim), builder._get_rank('output'))
def test_bias_matrix_cpu(self):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
b = np.reshape(np.arange(5, 9), (1, 2, 2))
builder.add_bias(name='bias', b=b, input_name='data',
output_name='output',
shape_bias=[1, 2, 2])
x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': x + b}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_linear_activation_cpu(self):
input_dim = (10, 15, 15)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_activation(name='activation',
non_linearity='LINEAR',
input_name='data',
output_name='output', params=[34.0, 67.0])
x = np.random.rand(*input_dim)
input = {'data': x}
expected = {'output': 34.0 * x + 67.0}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
@unittest.skipIf(not is_macos() or macos_version() < LAYERS_10_15_MACOS_VERSION,
'macOS 10.15+ required. Skipping tests.')
class NewLayersSimpleTest(CorrectnessTest):
def test_shape_flexibility_range(self):
input_features = [('data', datatypes.Array(*(3,4)))]
builder = neural_network.NeuralNetworkBuilder(input_features,
[('output', None)], disable_rank5_shape_mapping=True)
builder.add_sin(name='sin', input_name='data', output_name='output')
spec = builder.spec
flexible_shape_utils.set_multiarray_ndshape_range(spec, feature_name='data',
lower_bounds=[1,1], upper_bounds=[-1,5])
shapes = [(3,4), (1,5), (60,5), (22,4), (5,3)]
for s in shapes:
x = np.random.rand(*s)
expected = {'output': np.sin(x)}
self._test_model(spec, {'data': x}, expected, useCPUOnly=True)
def test_shape_flexibility_enumeration(self, rank=4):
default_shape = tuple(np.random.randint(1, 15, size=rank))
input_features = [('data', datatypes.Array(*default_shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features=input_features,
output_features=[('output', None)],
disable_rank5_shape_mapping=True)
builder.add_sin(name='sin', input_name='data', output_name='output')
spec = builder.spec
shapes = [tuple(np.random.randint(1, 15, size=rank)),
tuple(np.random.randint(1, 15, size=rank))]
flexible_shape_utils.add_multiarray_ndshape_enumeration(
spec, feature_name='data', enumerated_shapes=shapes)
shapes.append(default_shape)
for s in shapes:
x = np.random.rand(*s)
expected = {'output': np.sin(x)}
self._test_model(spec, {'data': x}, expected, useCPUOnly=True)
def test_shape_flexibility_enumeration_rank3(self):
self.test_shape_flexibility_enumeration(rank=3)
def test_shape_flexibility_enumeration_rank2(self):
self.test_shape_flexibility_enumeration(rank=2)
def test_transpose_cpu(self):
for rank in range(1, 6):
axes = np.random.permutation(rank)
axes = [axis - rank if np.random.choice([True, False]) else axis for axis in axes]
input_shape = np.random.randint(low=2, high=6, size=rank)
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_transpose(name='TransposeND',
axes=axes,
input_name='data',
output_name='output')
x = np.random.rand(*input_shape)
input = {'data': x}
expected = {'output': np.transpose(x, axes)}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_dynamic_weight_conv(self):
input_dim = (1, 3, 16, 16)
# weight layout: (output_channels, kernel_channels, height, width)
weight_dim = (4, 3, 3, 3)
output_dim = (1, 4, 14, 14)
kernel_channels = input_dim[0]
output_channels, kernel_channels, height, width = weight_dim
input_features = [
('input', datatypes.Array(*input_dim)),
('weight', datatypes.Array(*weight_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features,
output_features,
disable_rank5_shape_mapping=True)
builder.add_convolution(
name='two_input_conv_layer',
kernel_channels=kernel_channels,
output_channels=output_channels,
height=height,
width=width,
stride_height=1,
stride_width=1,
border_mode='valid',
groups=1,
W=None,
b=None,
has_bias=False,
input_name=['input', 'weight'],
output_name='output')
# Assigning everything to ones should cover the execution path
# and engine failures, but is not a complete check on numerics.
input_val = np.ones(input_dim)
weight_val = np.ones(weight_dim)
expected = np.ones(output_dim) * 27
feed_dict = {'input': input_val, 'weight': weight_val}
expected = {'output': expected}
self._test_model(builder.spec, feed_dict, expected, useCPUOnly=True)
self._test_model(builder.spec, feed_dict, expected, useCPUOnly=False)
@pytest.mark.xfail
def test_dynamic_weight_deconv(self):
# Expect to fail in Core ML 3
input_dim = (1, 1, 16, 16)
# weight layout: (output_channels, kernel_channels, height, width)
weight_dim = (1, 1, 3, 3)
output_dim = (1, 1, 18, 18)
output_channels, kernel_channels, height, width = weight_dim
input_features = [
('data', datatypes.Array(*input_dim)),
('weight', datatypes.Array(*weight_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features,
output_features,
disable_rank5_shape_mapping=True)
builder.add_convolution(
name='deconv',
kernel_channels=kernel_channels,
output_channels=output_channels,
height=height,
width=width,
stride_height=1,
stride_width=1,
border_mode='valid',
groups=1,
W=None,
b=None,
has_bias=False,
is_deconv=True,
input_name=['data', 'weight'],
output_name='output')
input_val = np.ones(input_dim)
weight_val = | np.ones(weight_dim) | numpy.ones |
import re
import numpy
import utils
import math
def read_input(filename):
pattern = re.compile(r'Tile (?P<ID>\d+):')
tiles = [tile for tile in utils.read(filename, 'string').split('\n\n') if len(tile) != 0]
def parse_tile(tile_info):
lines = tile_info.splitlines()
tile_id = int(pattern.fullmatch(lines[0]).group('ID'))
pixels = lines[1:]
return tile_id, pixels
tile_list = [parse_tile(tile) for tile in tiles]
return {tile_id: pixels for (tile_id, pixels) in tile_list}
class Stack:
def __init__(self, tiles_count):
self.n = int(math.sqrt(tiles_count))
self.stack = numpy.empty((self.n, self.n), dtype=dict)
self.next_int = 0
@property
def next(self):
return divmod(self.next_int, self.n)
def contains(self, tile_id):
return any(grid['id'] == tile_id for grid in self.stack.flatten() if isinstance(grid, dict))
def can_add(self, grid):
if self.contains(grid['id']):
return False
i, j = self.next
if i > 0 and self.stack[i-1][j]['bottom_border'] != grid['top_border']:
return False
if j > 0 and self.stack[i][j-1]['right_border'] != grid['left_border']:
return False
return True
def push(self, grid):
self.stack[self.next] = grid
self.next_int += 1
def pop(self):
self.next_int -= 1
self.stack[self.next] = None
def is_full(self):
return self.next_int == self.n * self.n
def fill(self, grids_per_id):
for tile_id, grids in grids_per_id.items():
if self.contains(tile_id):
continue
for grid in grids:
if not self.can_add(grid):
continue
self.push(grid)
self.fill(grids_per_id)
if self.is_full():
return self
self.pop()
def __repr__(self):
return '\n'.join(['Stack ({})'.format(self.next)] + [
' '.join(['({} {} {})'.format(tile['id'], tile['rotation'], tile['flip'])
if isinstance(tile, dict) else '(XXXX X XXXX)' for tile in row])
for row in self.stack])
def get_grid(pixels, tile_id, rotation, flip):
return {
'id': tile_id,
'rotation': rotation,
'pixels': pixels,
'flip': flip,
'top_border': ''.join(pixels[0]),
'bottom_border': ''.join(pixels[-1]),
'right_border': ''.join(line[-1] for line in pixels),
'left_border': ''.join([line[0] for line in pixels])
}
def make_grids(tile_id, pixels):
pixels = list(map(list, pixels))
return [get_grid( | numpy.rot90(pixels, rot) | numpy.rot90 |
##########################################################
# @author: pkc/Vincent
# --------------------------------------------------------
# Based on the MATLAB code by <NAME>
# modification of python code by sajid
#
import numpy as np
import numpy.fft as fft
import matplotlib.pyplot as plt
import itertools
import sys
def diagonal_split(x):
''' pre-processing steps interms of
cropping to enable the diagonal
splitting of the input image
'''
h, w = x.shape
cp_x = x
''' cropping the rows '''
if (np.mod(h, 4)==1):
cp_x = cp_x[:-1]
elif(np.mod(h, 4)==2):
cp_x = cp_x[1:-1]
elif(np.mod(h, 4)==3):
cp_x = cp_x[1:-2]
''' cropping the columns'''
if (np.mod(w, 4)==1):
cp_x = cp_x[:, :-1]
elif(np.mod(w, 4)==2):
cp_x = cp_x[:,1:-1]
elif(np.mod(w, 4)==3):
cp_x = cp_x[:, 1:-2]
x = cp_x
h, w = x.shape
if((np.mod(h, 4)!=0) or (np.mod(w, 4)!=0)):
print('[!] diagonal splitting not possible due to cropping issue')
print('[!] re-check the cropping portion')
end()
row_indices = np.arange(0, h)
col_indices = np.arange(0, w)
row_split_u = row_indices[::2]
row_split_d = np.asanyarray(list(set(row_indices)-set(row_split_u)))
col_split_l = col_indices[::2]
col_split_r = np.asanyarray(list(set(col_indices)-set(col_split_l)))
''' ordered pair of pre-processing
of the diagonal elements
and sub-sequent splits of the image
'''
op1 = list(itertools.product(row_split_u, col_split_l))
ind = [np.asanyarray([fo for fo, _ in op1]), np.asanyarray([so for _, so in op1])]
s_a1 = x[ind]
s_a1 = s_a1.reshape((len(row_split_u), len(col_split_l)))
op2 = list(itertools.product(row_split_d, col_split_r))
ind = [np.asanyarray([fo for fo, _ in op2]), np.asanyarray([so for _, so in op2])]
s_a2 = x[ind]
s_a2 = s_a2.reshape((len(row_split_d), len(col_split_r)))
op3 = list(itertools.product(row_split_d, col_split_l))
ind = [np.asanyarray([fo for fo, _ in op3]), np.asanyarray([so for _, so in op3])]
s_b1 = x[ind]
s_b1 = s_b1.reshape((len(row_split_d), len(col_split_l)))
op4 = list(itertools.product(row_split_u, col_split_r))
ind = [np.asanyarray([fo for fo, _ in op4]), np.asanyarray([so for _, so in op4])]
s_b2 = x[ind]
s_b2 = s_b2.reshape((len(row_split_u), len(col_split_r)))
return(s_a1, s_a2, s_b1, s_b2)
def get_frc_img(img, frc_img_lx, center=None):
''' Returns a cropped image version of input image "img"
img: input image
center: cropping is performed with center a reference
point to calculate length in x and y direction.
Unless otherwise stated center is basically center
of input image "img"
frc_img_lx: length of cropped image in x as well as y. Also
the cropped image is made to be square image for
the FRC calculation
'''
h, w = img.shape
cy = round(min(h, w)/2)
if center is None:
cy = cy
else:
cy = cy + center
ep = cy + round(frc_img_lx/2)
sp = ep - frc_img_lx
frc_img = img[sp:ep, sp:ep]
return frc_img
def ring_indices(x, inscribed_rings=True, plot=False):
print("ring plots is:", plot)
#read the shape and dimensions of the input image
shape = np.shape(x)
dim = np.size(shape)
'''Depending on the dimension of the image 2D/3D,
create an array of integers which increase with
distance from the center of the array
'''
if dim == 2 :
nr,nc = shape
nrdc = np.floor(nr/2)
ncdc = np.floor(nc/2)
r = np.arange(nr)-nrdc
c = np.arange(nc)-ncdc
[R,C] = np.meshgrid(r,c)
index = np.round(np.sqrt(R**2+C**2))
elif dim == 3 :
nr,nc,nz = shape
nrdc = np.floor(nr/2)+1
ncdc = np.floor(nc/2)+1
nzdc = np.floor(nz/2)+1
r = np.arange(nr)-nrdc + 1
c = np.arange(nc)-ncdc + 1
z = np.arange(nc)-nzdc + 1
[R,C,Z] = np.meshgrid(r,c,z)
index = np.round(np.sqrt(R**2+C**2+Z**2))+1
else :
print('input is neither a 2d or 3d array')
''' if inscribed_rings is True then the outmost
ring use to evaluate the FRC will be the circle
inscribed in the square input image of size L.
(i.e. FRC_r <= L/2). Else the arcs of the rings
beyond the inscribed circle will also be
considered while determining FRC
(i.e. FRC_r<=sqrt((L/2)^2 + (L/2)^2))
'''
if (inscribed_rings == True):
maxindex = nr/2
else:
maxindex = np.max(index)
#output = np.zeros(int(maxindex),dtype = complex)
''' In the next step the output is generated. The output is an array of length
maxindex. The elements in this array corresponds to the sum of all the elements
in the original array correponding to the integer position of the output array
divided by the number of elements in the index array with the same value as the
integer position.
Depening on the size of the input array, use either the pixel or index method.
By-pixel method for large arrays and by-index method for smaller ones.
'''
print('performed by index method')
indices = []
for i in np.arange(int(maxindex)):
indices.append(np.where(index == i))
if plot is True:
img_plane = np.zeros((nr, nc))
for i in range(int(maxindex)):
if ((i%20)==0):
img_plane[indices[i]]=1.0
plt.imshow(img_plane,cmap="summer")
if inscribed_rings is True:
plt.title(' FRC rings with the max radius as that\
\n of the inscribed circle in the image (spacing of 20 [px] between rings)')
else:
plt.title(' FRC rings extending beyond the radius of\
\n the inscribed circle in the image (spacing of 20 [px] between rings)')
return(indices)
def spinavej(x, inscribed_rings=True):
''' modification of code by sajid an
Based on the MATLAB code by <NAME>
'''
shape = np.shape(x)
dim = np.size(shape)
''' Depending on the dimension of the image 2D/3D, create an array of integers
which increase with distance from the center of the array
'''
if dim == 2 :
nr,nc = shape
nrdc = np.floor(nr/2)
ncdc = | np.floor(nc/2) | numpy.floor |
"""
Method of Equivalent Sources for Removing VRM Responses
=======================================================
Here, we use an equivalent source inversion to remove the VRM response from TEM
data collected by a small coincident loop system. The data being inverted are
the same as in the forward modeling example. To remove the VRM signal we:
1. invert the late time data to recover an equivalent source surface layer of cells.
2. use the recovered model to predict the VRM response at all times
3. subtract the predicted VRM response from the observed data
"""
#########################################################################
# Import modules
# --------------
#
from SimPEG.electromagnetics import viscous_remanent_magnetization as VRM
import numpy as np
import discretize
from SimPEG import (
utils,
maps,
data_misfit,
directives,
optimization,
regularization,
inverse_problem,
inversion,
data,
)
import matplotlib.pyplot as plt
import matplotlib as mpl
##########################################################################
# Defining the mesh
# -----------------
#
cs, ncx, ncy, ncz, npad = 2.0, 35, 35, 20, 5
hx = [(cs, npad, -1.3), (cs, ncx), (cs, npad, 1.3)]
hy = [(cs, npad, -1.3), (cs, ncy), (cs, npad, 1.3)]
hz = [(cs, npad, -1.3), (cs, ncz), (cs, npad, 1.3)]
mesh = discretize.TensorMesh([hx, hy, hz], "CCC")
##########################################################################
# Defining the true model
# -----------------------
#
# Create xi model (amalgamated magnetic property). Here the model is made by
# summing a set of 3D Gaussian distributions. And only active cells have a
# model value.
#
topoCells = mesh.gridCC[:, 2] < 0.0 # define topography
xyzc = mesh.gridCC[topoCells, :]
c = 2 * np.pi * 8**2
pc = np.r_[4e-4, 4e-4, 4e-4, 6e-4, 8e-4, 6e-4, 8e-4, 8e-4]
x_0 = np.r_[50.0, -50.0, -40.0, -20.0, -15.0, 20.0, -10.0, 25.0]
y_0 = np.r_[0.0, 0.0, 40.0, 10.0, -20.0, 15.0, 0.0, 0.0]
z_0 = np.r_[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
var_x = c * np.r_[3.0, 3.0, 3.0, 1.0, 3.0, 0.5, 0.1, 0.1]
var_y = c * np.r_[20.0, 20.0, 1.0, 1.0, 0.4, 0.5, 0.1, 0.4]
var_z = c * np.r_[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
xi_true = np.zeros(np.shape(xyzc[:, 0]))
for ii in range(0, 8):
xi_true += (
pc[ii]
* np.exp(-((xyzc[:, 0] - x_0[ii]) ** 2) / var_x[ii])
* np.exp(-((xyzc[:, 1] - y_0[ii]) ** 2) / var_y[ii])
* np.exp(-((xyzc[:, 2] - z_0[ii]) ** 2) / var_z[ii])
)
xi_true += 1e-5
##########################################################################
# Survey
# ------
#
# Here we must set the transmitter waveform, which defines the off-time decay
# of the VRM response. Next we define the sources, receivers and time channels
# for the survey. Our example is similar to an EM-63 survey.
#
waveform = VRM.waveforms.StepOff()
times = np.logspace(-5, -2, 31) # Observation times
x, y = np.meshgrid(np.linspace(-30, 30, 21), np.linspace(-30, 30, 21))
z = 0.5 * np.ones(x.shape)
loc = np.c_[utils.mkvc(x), utils.mkvc(y), utils.mkvc(z)] # Src and Rx Locations
source_listVRM = []
for pp in range(0, loc.shape[0]):
loc_pp = np.reshape(loc[pp, :], (1, 3))
receiver_listVRM = [
VRM.Rx.Point(loc_pp, times=times, fieldType="dbdt", orientation="z")
]
source_listVRM.append(
VRM.Src.MagDipole(
receiver_listVRM, utils.mkvc(loc[pp, :]), [0.0, 0.0, 0.01], waveform
)
)
survey_vrm = VRM.Survey(source_listVRM)
##########################################################################
# Forward Simulation
# ------------------
#
# Here we predict data by solving the forward problem. For the VRM problem,
# we use a sensitivity refinement strategy for cells # that are proximal to
# transmitters. This is controlled through the *refinement_factor* and *refinement_distance*
# properties.
#
# Defining the problem
problem_vrm = VRM.Simulation3DLinear(
mesh,
survey=survey_vrm,
indActive=topoCells,
refinement_factor=3,
refinement_distance=[1.25, 2.5, 3.75],
)
# Predict VRM response
fields_vrm = problem_vrm.dpred(xi_true)
# Add an artificial TEM response. An analytic solution for the response near
# the surface of a conductive half-space (Nabighian, 1979) is scaled at each
# location to provide lateral variability in the TEM response.
n_times = len(times)
n_loc = loc.shape[0]
sig = 1e-1
mu0 = 4 * np.pi * 1e-7
fields_tem = -(sig**1.5) * mu0**2.5 * times**-2.5 / (20 * np.pi**1.5)
fields_tem = np.kron( | np.ones(n_loc) | numpy.ones |
# -*- coding: utf-8 -*-
'''Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2016, 2017 <NAME> <<EMAIL>>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.'''
from __future__ import division
import os
from fluids import *
import numpy as np
from math import pi, log10, log
from random import uniform
from numpy.testing import assert_allclose
from scipy.constants import *
from scipy.optimize import *
from scipy.interpolate import *
from fluids import fluids_data_dir
from fluids.core import Engauge_2d_parser
from fluids.optional.pychebfun import *
import pytest
def log_uniform(low, high):
return 10**uniform(log10(low), log10(high))
def test_fittings():
K = entrance_beveled_orifice(Di=0.1, do=.07, l=0.003, angle=45)
assert_allclose(K, 1.2987552913818574)
### Exits
assert_allclose(exit_normal(), 1.0)
K_helix = helix(Di=0.01, rs=0.1, pitch=.03, N=10, fd=.0185)
assert_allclose(K_helix, 14.525134924495514)
K_spiral = spiral(Di=0.01, rmax=.1, rmin=.02, pitch=.01, fd=0.0185)
assert_allclose(K_spiral, 7.950918552775473)
### Contractions
K_sharp = contraction_sharp(Di1=1, Di2=0.4)
assert_allclose(K_sharp, 0.5301269161591805)
K_beveled = contraction_beveled(Di1=0.5, Di2=0.1, l=.7*.1, angle=120)
assert_allclose(K_beveled, 0.40946469413070485)
### Expansions (diffusers)
K_sharp = diffuser_sharp(Di1=.5, Di2=1)
assert_allclose(K_sharp, 0.5625)
K = diffuser_curved(Di1=.25**0.5, Di2=1., l=2.)
assert_allclose(K, 0.2299781250000002)
K = diffuser_pipe_reducer(Di1=.5, Di2=.75, l=1.5, fd1=0.07)
assert_allclose(K, 0.06873244301714816)
K = diffuser_pipe_reducer(Di1=.5, Di2=.75, l=1.5, fd1=0.07, fd2=.08)
assert_allclose(K, 0.06952256647393829)
# Misc
K1 = Darby3K(NPS=2., Re=10000., name='Valve, Angle valve, 45°, full line size, β = 1')
K2 = Darby3K(NPS=12., Re=10000., name='Valve, Angle valve, 45°, full line size, β = 1')
K3 = Darby3K(NPS=12., Re=10000., K1=950, Ki=0.25, Kd=4)
Ks = [1.1572523963562353, 0.819510280626355, 0.819510280626355]
assert_allclose([K1, K2, K3], Ks)
with pytest.raises(Exception):
Darby3K(NPS=12., Re=10000)
with pytest.raises(Exception):
Darby3K(NPS=12., Re=10000, name='fail')
tot = sum([Darby3K(NPS=2., Re=1000, name=i) for i in Darby.keys()])
assert_allclose(tot, 67.96442287975898)
K1 = Hooper2K(Di=2., Re=10000., name='Valve, Globe, Standard')
K2 = Hooper2K(Di=2., Re=10000., K1=900, Kinfty=4)
assert_allclose([K1, K2], [6.15, 6.09])
tot = sum([Hooper2K(Di=2., Re=10000., name=i) for i in Hooper.keys()])
assert_allclose(tot, 46.18)
with pytest.raises(Exception):
Hooper2K(Di=2, Re=10000)
with pytest.raises(Exception):
Hooper2K(Di=2., Re=10000, name='fail')
K2 = change_K_basis(K1=32.68875692997804, D1=.01, D2=.02)
assert_allclose(K2, 523.0201108796487)
### Entrances
def test_entrance_distance_45_Miller():
from fluids.fittings import entrance_distance_45_Miller
K = entrance_distance_45_Miller(Di=0.1, Di0=0.14)
assert_allclose(K, 0.24407641818143339)
def test_entrance_distance():
K1 = entrance_distance(0.1, t=0.0005)
assert_allclose(K1, 1.0154100000000004)
assert_allclose(entrance_distance(Di=0.1, t=0.05), 0.57)
K = entrance_distance(Di=0.1, t=0.0005, method='Miller')
assert_allclose(K, 1.0280427936730414)
K = entrance_distance(Di=0.1, t=0.0005, method='Idelchik')
assert_allclose(K, 0.9249999999999999)
K = entrance_distance(Di=0.1, t=0.0005, l=.02, method='Idelchik')
assert_allclose(K, 0.8475000000000001)
K = entrance_distance(Di=0.1, t=0.0005, method='Harris')
assert_allclose(K, 0.8705806231290558, 3e-3)
K = entrance_distance(Di=0.1, method='Crane')
assert_allclose(K, 0.78)
with pytest.raises(Exception):
entrance_distance(Di=0.1, t=0.01, method='BADMETHOD')
def test_entrance_rounded():
K = entrance_rounded(Di=0.1, rc=0.0235)
assert_allclose(K, 0.09839534618360923)
assert_allclose(entrance_rounded(Di=0.1, rc=0.2), 0.03)
K = entrance_rounded(Di=0.1, rc=0.0235, method='Miller')
assert_allclose(K, 0.057734448458542094)
K = entrance_rounded(Di=0.1, rc=0.0235, method='Swamee')
assert_allclose(K, 0.06818838227156554)
K = entrance_rounded(Di=0.1, rc=0.01, method='Crane')
assert_allclose(K, .09)
K = entrance_rounded(Di=0.1, rc=0.01, method='Harris')
assert_allclose(K, 0.04864878230217168)
# Limiting condition
K = entrance_rounded(Di=0.1, rc=0.0235, method='Harris')
assert_allclose(K, 0.0)
K = entrance_rounded(Di=0.1, rc=0.01, method='Idelchik')
assert_allclose(K, 0.11328005177738182)
# Limiting condition
K = entrance_rounded(Di=0.1, rc=0.0235, method='Idelchik')
assert_allclose(K, 0.03)
with pytest.raises(Exception):
entrance_rounded(Di=0.1, rc=0.01, method='BADMETHOD')
def test_entrance_beveled():
K = entrance_beveled(Di=0.1, l=0.003, angle=45)
assert_allclose(K, 0.45086864221916984)
K = entrance_beveled(Di=0.1, l=0.003, angle=45, method='Idelchik')
assert_allclose(K, 0.3995000000000001)
def test_entrance_sharp():
assert_allclose(entrance_sharp(), 0.57)
with pytest.raises(Exception):
entrance_sharp(method='BADMETHOD')
for method in ['Swamee', 'Blevins', 'Idelchik', 'Crane']:
assert_allclose(0.5, entrance_sharp(method=method))
entrance_sharp(method='Miller') # Don't bother checking a value for the Miller method
def test_entrance_angled():
K_30_Idelchik = 0.9798076211353316
assert_allclose(entrance_angled(30), K_30_Idelchik)
assert_allclose(entrance_angled(30, method='Idelchik'), K_30_Idelchik)
with pytest.raises(Exception):
entrance_angled(30, method='BADMETHOD')
### Bends
def test_bend_rounded_Crane():
K = bend_rounded_Crane(Di=.4020, rc=.4*5, angle=30)
assert_allclose(K, 0.09321910015613409)
K_max = bend_rounded_Crane(Di=.400, rc=.4*25, angle=30)
K_limit = bend_rounded_Crane(Di=.400, rc=.4*20, angle=30)
assert_allclose(K_max, K_limit)
def test_bend_rounded_Miller():
# Miller examples - 9.12
D = .6
Re = Reynolds(V=4, D=D, nu=1.14E-6)
kwargs = dict(Di=D, bend_diameters=2, angle=90, Re=Re, roughness=.02E-3)
K = bend_rounded_Miller(L_unimpeded=30*D, **kwargs)
assert_allclose(K, 0.1513266131915296, rtol=1e-4)# 0.150 in Miller- 1% difference due to fd
K = bend_rounded_Miller(L_unimpeded=0*D, **kwargs)
assert_allclose(K, 0.1414607344374372, rtol=1e-4) # 0.135 in Miller - Difference mainly from Co interpolation method, OK with that
K = bend_rounded_Miller(L_unimpeded=2*D, **kwargs)
assert_allclose(K, 0.09343184457353562, rtol=1e-4) # 0.093 in miller
def test_bend_rounded():
### Bends
K_5_rc = [bend_rounded(Di=4.020, rc=4.0*5, angle=i, fd=0.0163) for i in [15, 30, 45, 60, 75, 90]]
K_5_rc_values = [0.07038212630028828, 0.10680196344492195, 0.13858204974134541, 0.16977191374717754, 0.20114941557508642, 0.23248382866658507]
assert_allclose(K_5_rc, K_5_rc_values)
K_10_rc = [bend_rounded(Di=34.500, rc=36*10, angle=i, fd=0.0106) for i in [15, 30, 45, 60, 75, 90]]
K_10_rc_values = [0.061075866683922314, 0.10162621862720357, 0.14158887563243763, 0.18225270014527103, 0.22309967045081655, 0.26343782210280947]
assert_allclose(K_10_rc, K_10_rc_values)
K = bend_rounded(Di=4.020, bend_diameters=5, angle=30, fd=0.0163)
assert_allclose(K, 0.106920213333191)
K = bend_rounded(Di=4.020, bend_diameters=5, angle=30, Re=1E5)
assert_allclose(K, 0.11532121658742862)
K = bend_rounded(Di=4.020, bend_diameters=5, angle=30, Re=1E5, method='Miller')
assert_allclose(K, 0.10276501180879682)
K = bend_rounded(Di=.5, bend_diameters=5, angle=30, Re=1E5, method='Crane')
assert_allclose(K, 0.08959057097762159)
K = bend_rounded(Di=.5, bend_diameters=5, angle=30, Re=1E5, method='Ito')
assert_allclose(K, 0.10457946464978755)
K = bend_rounded(Di=.5, bend_diameters=5, angle=30, Re=1E5, method='Swamee')
assert_allclose(K, 0.055429466248839564)
def test_bend_miter():
K_miters = [bend_miter(i) for i in [150, 120, 90, 75, 60, 45, 30, 15]]
K_miter_values = [2.7128147734758103, 2.0264994448555864, 1.2020815280171306, 0.8332188430731828, 0.5299999999999998, 0.30419633092708653, 0.15308822558050816, 0.06051389308126326]
assert_allclose(K_miters, K_miter_values)
K = bend_miter(Di=.6, angle=45, Re=1e6, roughness=1e-5, L_unimpeded=20, method='Miller')
assert_allclose(K, 0.2944060416245167)
K = bend_miter(Di=.05, angle=45, Re=1e6, roughness=1e-5, method='Crane')
assert_allclose(K, 0.28597953150073047)
K = bend_miter(angle=45, Re=1e6, method='Rennels')
assert_allclose(K, 0.30419633092708653)
with pytest.raises(Exception):
bend_miter(angle=45, Re=1e6, method='BADMETHOD')
def test_bend_miter_Miller():
K = bend_miter_Miller(Di=.6, angle=45, Re=1e6, roughness=1e-5, L_unimpeded=20)
assert_allclose(K, 0.2944060416245167)
K_default_L_unimpeded = bend_miter_Miller(Di=.6, angle=45, Re=1e6, roughness=1e-5)
assert_allclose(K, K_default_L_unimpeded)
K_high_angle = bend_miter_Miller(Di=.6, angle=120, Re=1e6, roughness=1e-5, L_unimpeded=20)
K_higher_angle = bend_miter_Miller(Di=.6, angle=150, Re=1e6, roughness=1e-5, L_unimpeded=20)
assert_allclose(K_high_angle, K_higher_angle)
@pytest.mark.slow
@pytest.mark.fuzz
def test_bend_rounded_Miller_fuzz():
# Tested for quite a while without problems
answers = []
for i in range(500):
Di = log_uniform(1e-5, 100)
rc = uniform(0, 100)
angle = uniform(0, 180)
Re = log_uniform(1e-5, 1E15)
roughness = uniform(1e-10, Di*.95)
L_unimpeded = log_uniform(1e-10, Di*1000)
ans = bend_rounded_Miller(Di=Di, rc=rc, angle=angle, Re=Re, roughness=roughness, L_unimpeded=L_unimpeded)
if np.isnan(ans) or np.isinf(ans):
raise Exception
answers.append(ans)
assert min(answers) >= 0
assert max(answers) < 1E10
@pytest.mark.slow
@pytest.mark.fuzz
def test_bend_miter_Miller_fuzz():
# Tested for quite a while without problems
answers = []
for i in range(10**3):
Di = log_uniform(1e-5, 100)
angle = uniform(0, 120)
Re = log_uniform(1e-5, 1E15)
roughness = uniform(1e-10, Di*.95)
L_unimpeded = log_uniform(1e-10, Di*1000)
ans = bend_miter_Miller(Di=Di, angle=angle, Re=Re, roughness=roughness, L_unimpeded=L_unimpeded)
if np.isnan(ans) or np.isinf(ans):
raise Exception
answers.append(ans)
assert min(answers) >= 0
assert max(answers) < 1E10
### Diffusers
def test_diffuser_conical():
K1 = diffuser_conical(Di1=.1**0.5, Di2=1, angle=10., fd=0.020)
K2 = diffuser_conical(Di1=1/3., Di2=1, angle=50, fd=0.03) # 2
K3 = diffuser_conical(Di1=2/3., Di2=1, angle=40, fd=0.03) # 3
K4 = diffuser_conical(Di1=1/3., Di2=1, angle=120, fd=0.0185) # #4
K5 = diffuser_conical(Di1=2/3., Di2=1, angle=120, fd=0.0185) # Last
K6 = diffuser_conical(Di1=.1**0.5, Di2=1, l=3.908, fd=0.020)
Ks = [0.12301652230915454, 0.8081340270019336, 0.32533470783539786, 0.812308728765127, 0.3282650135070033, 0.12300865396254032]
assert_allclose([K1, K2, K3, K4, K5, K6], Ks)
with pytest.raises(Exception):
diffuser_conical(Di1=.1, Di2=0.1, angle=1800., fd=0.020)
with pytest.raises(Exception):
diffuser_conical(Di1=.1, Di2=0.1, fd=0.020)
K1 = diffuser_conical_staged(Di1=1., Di2=10., DEs=[2,3,4,5,6,7,8,9], ls=[1,1,1,1,1,1,1,1,1], fd=0.01)
K2 = diffuser_conical(Di1=1., Di2=10.,l=9, fd=0.01)
Ks = [1.7681854713484308, 0.973137914861591]
| assert_allclose([K1, K2], Ks) | numpy.testing.assert_allclose |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.