prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
# AUTOGENERATED! DO NOT EDIT! File to edit: 01_utils.ipynb (unless otherwise specified).
__all__ = ['extend_sync_timepoints', 'align_sync_timepoints', 'resample_to_timepoints', 'link_sync_timepoints',
'flip_stimulus', 'flip_gratings', 'stim_to_dataChunk', 'phy_results_dict', 'spike_to_dataChunk',
'get_calcium_stack_lenghts', 'twoP_dataChunks', 'img_2d_fit', 'fill_nan', 'stim_inten_norm',
'group_direction_response', 'group_chirp_bumps', 'get_repeat_corrected', 'removeSlowDrift',
'time_shift_test_corr', 'cross_corr_with_lag', 'get_inception_generator', 'group_omitted_epochs',
'get_shank_channels', 'format_pval', 'stim_recap_df']
# Cell
import numpy as np
import pandas as pd
import os
import glob
import re
from typing import Dict, Tuple, Sequence, Union, Callable
import scipy.interpolate as interpolate
from scipy.ndimage import convolve1d
from scipy.signal import savgol_filter
import scipy.stats
from scipy.ndimage import gaussian_filter
import matplotlib.pyplot as plt
import math
from cmath import *
from PIL import Image
from .core import *
# Cell
def extend_sync_timepoints(timepoints:np.ndarray, signals:np.ndarray,
up_bound, low_bound=0) -> Tuple[DataChunk, DataChunk]:
"""
Extend arrays of timepoints and signals (with identical shape) from the low_bound up to the up_bound.
For example, the first timepoint could be 2000, and with a low_bound of 0, it would add the
timepoints 0, 500, 1000, 1500 if the timepoint distance is of 500 (obtained by averaging the timepoints
distances).
params:
- timepoints: Timepoints to extend
- signals: Signals to extend
- up_bound: Up bound to which to extend both timepoints and signals
- low_bound: Low bound to which to extend both timepoints and signals
returns:
- timepoint: Extended timepoints
- signals: The datachunk array is not modified, but the idx attribute is increased by the number
of frames added with the low_bound.
"""
assert len(timepoints) == len(signals)
timepoints = np.array(timepoints)
signals = np.array(signals)
spb = np.mean(timepoints[1:]-timepoints[:-1]) #spf: sample_per_bin
#Left and right side are just prolongation of the sample_times up
# from (0-sample_per_fr) to (len+sample_per_fr) so it covers all timepoints
left_side = np.arange(timepoints[0]-spb , low_bound - spb, -spb)[::-1].astype(int)
right_side = np.arange(timepoints[-1]+spb, up_bound + spb, spb).astype(int)
new_timepoints = np.concatenate((left_side,
timepoints,
right_side))
timepoint_chunk = DataChunk(data=new_timepoints, idx=0, group="sync")
signal_chunk = DataChunk(data=signals, idx=len(left_side), group="sync")
return (timepoint_chunk, signal_chunk)
# Cell
def align_sync_timepoints(timepoints:DataChunk, signals:DataChunk,
ref_timepoints:DataChunk, ref_signals:DataChunk) -> Tuple[DataChunk, DataChunk, DataChunk]:
"""
Align the signals of a timepoints timeserie to a reference ref_timepoints with the corresponding
ref_signals. ref_timepoints is extended to match ref_timepoints lenght.
params:
- timepoints: timepoints to align
- signals: signals to align
- ref_timepoints: reference timepoints
- ref_signals: reference signals
return:
- Aligned timepoints (DataChunk)
- Aligned signals (DataChunk)
"""
shift_left = ((np.where(ref_signals)[0][0] + ref_signals.idx)
- (np.where(signals)[0][0] + signals.idx))
shift_right = len(ref_timepoints) - (len(timepoints) + shift_left)
spb = np.mean(timepoints[1:]-timepoints[:-1]) #spf: sample_per_bin
spb_ref = np.mean(ref_timepoints[1:]-ref_timepoints[:-1]) #spf: sample_per_bin
left_timepoints = | np.zeros(0) | numpy.zeros |
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 18 10:18:52 2019
@author: <NAME>
"""
from __future__ import print_function
import os, sys
import cv2
import numpy as np
import imutils
from copy import copy, deepcopy
import math
from skimage.feature import peak_local_max
import rotular
# Retorna o valor de retangularidade do objeto
def retangularidade(objeto):
l1 = objeto["retangulo_largura"]
l2 = objeto["retangulo_altura"]
area = len(objeto["area"])
if l1*l2 == 0:
return 0
else:
return area/(l1*l2)
# Retorna os pixels da borda e um vetor de distancias do pixel central até todos os pixels da borda
def assinatura(imagemCinza, objeto):
inferiorD = objeto["retangulo"][0] # = [y,x]
inferiorE = objeto["retangulo"][1] # = [y,x]
superiorD = objeto["retangulo"][2] # = [y,x]
superiorE = objeto["retangulo"][3] # = [y,x]
centroide = objeto["centroide"]
dist_pontos_borda = []
dY = []
dX = []
angulos = []
pixels_borda = []
for y in np.arange(superiorE[0], inferiorE[0] + 1):
for x in np.arange(superiorE[1], superiorD[1] + 1):
if imagemCinza[y][x] != 0:
if y < (imagemCinza.shape[0] - 1) and x < (imagemCinza.shape[1] - 1) and y > 0 and x > 0:
if imagemCinza[y - 1][x] == 0 or imagemCinza[y][x - 1] == 0 or imagemCinza[y][x + 1] == 0 or imagemCinza[y + 1][x] == 0:
distancia = math.pow(y - centroide[0], 2) + math.pow(x - centroide[1], 2) # distancia euclidiana do centroide até a borda
dist_pontos_borda.append(math.sqrt(distancia))
dY.append(y - centroide[0])
dX.append(x - centroide[1])
pixels_borda.append([y,x])
angulos.append(math.atan2(y - centroide[0],x - centroide[1]))
#print('Quantidade de Angulos:',len(angulos), 'Formato Angulo:', angulos[0], 'Formato distancia:', distancias[0])
# print('Soma das distancias/total de distancias:',sum(dist_pontos_borda)/len(dist_pontos_borda))
# print('Quantidade de Pixels da borda:',len(pixels_borda))
# print('Assinatura:', dist_pontos_borda)
# print('Somatório das Distâncias dividido pelo total de distâncias (Assinatura):', somatorio)
# print('Min:', min(dist_pontos_borda),'Min:', max(dist_pontos_borda))
# print('Variância das distâncias:', np.var(dist_pontos_borda))
# pintarBorda(mascara,pixels_borda)
# compacidadeQuadrado(pixels_borda)
return pixels_borda, dist_pontos_borda
# Retorna o número de pixels em cada linha e o número de pixels em cada coluna, na forma de vetor de linhas e colunas
def calcularProjecoes(objeto, imagemLimiarizada):
inferiorD = objeto["retangulo"][0] # = [y,x]
inferiorE = objeto["retangulo"][1] # = [y,x]
superiorD = objeto["retangulo"][2] # = [y,x]
superiorE = objeto["retangulo"][3] # = [y,x]
max_x = superiorD[1]
min_x = superiorE[1]
max_y = inferiorD[0]
min_y = superiorE[0]
projecoesLinhas = []
projecoesColunas = []
for y in np.arange(min_y, max_y + 1):
qtd_pixels = 0
for x in np.arange(min_x, max_x + 1):
if imagemLimiarizada[y][x] != 0:
qtd_pixels += 1
projecoesLinhas.append(qtd_pixels)
for x in np.arange(min_x, max_x + 1):
qtd_pixels = 0
for y in np.arange(min_y, max_y + 1):
if imagemLimiarizada[y][x] != 0:
qtd_pixels += 1
projecoesColunas.append(qtd_pixels)
#print('Projecoes Linhas:', projecoesLinhas) #'Quantidade de linhas:', len(projecoesLinhas))
#print('Projecoes Colunas:', projecoesColunas) #'Quantidade de colunas:', len(projecoesColunas))
return projecoesLinhas, projecoesColunas
# Retorna a imagem de borda com o retângulo envolvente
def pintarRetanguloClassificado(imagemClassificada, classificacoes, objetos):
imagemClassificadaReal = copy(imagemClassificada)
for objeto in objetos:
menorX = objeto["retangulo"][1][1] - 2
maiorX = objeto["retangulo"][0][1] + 2
menorY = objeto["retangulo"][2][0] - 2
maiorY = objeto["retangulo"][0][0] + 2
if 'GALAXY' in objeto["classificacao"]: # PINTAR OBJETOS CLASSIFICADOS COMO '1'
if menorX < 0:
menorX = 0
if menorY < 0:
menorY = 0
if maiorY >= imagemClassificadaReal.shape[0]:
maiorY = imagemClassificadaReal.shape[0] -1
if maiorX >= imagemClassificadaReal.shape[1]:
maiorX = imagemClassificadaReal.shape[1] -1
menorx_aux = copy(menorX)
imagemClassificadaReal[maiorY][maiorX] = 255
imagemClassificadaReal[maiorY][menorX] = 255
imagemClassificadaReal[menorY][maiorX] = 255
imagemClassificadaReal[menorY][menorX] = 255
# Pintar linhas horizontais (Linhas ou eixos X)
while (maiorX != menorX):
imagemClassificadaReal[maiorY][menorX] = 255
imagemClassificadaReal[menorY][menorX] = 255
menorX += 1
menorX = menorx_aux
# Pintar linhas verticais (Colunas ou eixos Y)
while (maiorY != menorY):
imagemClassificadaReal[menorY][maiorX] = 255
imagemClassificadaReal[menorY][menorX] = 255
menorY += 1
return imagemClassificadaReal
def removerEstrelas(imagem, classificacoes, objetos, classeEstrela):
for i in range(len(objetos)):
if classificacoes[i] == classeEstrela: # REMOVER OBJETOS NÃO CLASSIFICADOS COMO 'classeEstrela'
for posicao in objetos[i]["area"]:
imagem[posicao[0]][posicao[1]] = 0
# Retorna a imagem de borda com o retângulo envolvente
def pintarRetanguloClassificadoKmeans(imagemClassificada, classificacoes, objetos, classeGalaxia):
for i in range(len(objetos)):
objeto = objetos[i]
menorX = objeto["retangulo"][1][1] - 2
maiorX = objeto["retangulo"][0][1] + 2
menorY = objeto["retangulo"][2][0] - 2
maiorY = objeto["retangulo"][0][0] + 2
if classificacoes[i] == classeGalaxia: # PINTAR OBJETOS CLASSIFICADOS COMO 'classeGaláxia'
if menorX < 0:
menorX = 0
if menorY < 0:
menorY = 0
if maiorY >= imagemClassificada.shape[0]:
maiorY = imagemClassificada.shape[0] -1
if maiorX >= imagemClassificada.shape[1]:
maiorX = imagemClassificada.shape[1] -1
menorx_aux = copy(menorX)
imagemClassificada[maiorY][maiorX] = 255
imagemClassificada[maiorY][menorX] = 255
imagemClassificada[menorY][maiorX] = 255
imagemClassificada[menorY][menorX] = 255
# Pintar linhas horizontais (Linhas ou eixos X)
while (maiorX != menorX):
imagemClassificada[maiorY][menorX] = 255
imagemClassificada[menorY][menorX] = 255
menorX += 1
menorX = menorx_aux
# Pintar linhas verticais (Colunas ou eixos Y)
while (maiorY != menorY):
imagemClassificada[menorY][maiorX] = 255
imagemClassificada[menorY][menorX] = 255
menorY += 1
# Retorna a imagem de borda com o retângulo envolvente
def pintarRetangulo(maiorY, menorY, maiorX, menorX, objeto, imagemCaracteristicas):
#if objeto["compacidade"] >= 12 and objeto["compacidade"] <= 14:
#if (objeto["compacidade"] < 12 and len(objeto["area"]) > 7) or (objeto["compacidade"] >= 14 and len(objeto["area"]) > 7): #OBJETOS NÃO CIRCULARES COM ÁREA MAIOR QUE 7
if menorX < 0:
menorX = 0
if menorY < 0:
menorY = 0
if maiorY >= imagemCaracteristicas.shape[0]:
maiorY = imagemCaracteristicas.shape[0] -1
if maiorX >= imagemCaracteristicas.shape[1]:
maiorX = imagemCaracteristicas.shape[1] -1
menorx_aux = copy(menorX)
imagemCaracteristicas[maiorY][maiorX] = 255
imagemCaracteristicas[maiorY][menorX] = 255
imagemCaracteristicas[menorY][maiorX] = 255
imagemCaracteristicas[menorY][menorX] = 255
# Pintar linhas horizontais (Linhas ou eixos X)
while (maiorX != menorX):
imagemCaracteristicas[maiorY][menorX] = 255
imagemCaracteristicas[menorY][menorX] = 255
menorX += 1
menorX = menorx_aux
# Pintar linhas verticais (Colunas ou eixos Y)
while (maiorY != menorY):
imagemCaracteristicas[menorY][maiorX] = 255
imagemCaracteristicas[menorY][menorX] = 255
menorY += 1
# Retorna o valor de excentricidade do objeto
def calcularExcentricidade(eixoY, eixoX):
excentricidade = 0
if len(eixoY) > len(eixoX):
excentricidade = len(eixoY) / len(eixoX)
else:
excentricidade = len(eixoX) / len(eixoY)
return excentricidade
# Retorna o valor de compacidade de um objeto circular
def calcularCompacidadeCirculo(objeto):
raio1 = objeto["retangulo_altura"] #/ 2
raio2 = objeto["retangulo_largura"] #/ 2
compacidade = ((2 * math.pi * raio1)**2) / (math.pi * (raio2**2))
#print('Raio1:',raio1,'Raio2:',raio2,'Compacidade:', compacidade)
return compacidade
# Retorna o valor de compacidade do objeto em questão
def calcularCompacidade(codigo_cadeia, objeto):
n_p = []
n_i = []
for i in codigo_cadeia:
if i % 2 == 0:
n_p.append(i)
else:
n_i.append(i)
perimetro = len(n_p) + (math.sqrt(2)*len(n_i))
compacidade = (perimetro**2) / len(objeto["area"])
#print('Compacidade:', compacidade)
return compacidade
# Retorna o código de cadeia normalizado (Invariante perante rotação)
def normalizarCodigoCadeia(objeto):
def shift(codigo, n):
return codigo[n:] + codigo[:n]
codigo_cadeia = objeto["codigo_cadeia"]
codigo_normalizado = []
codigos = []
novo_codigo_cadeia = []
for i in range(len(codigo_cadeia)-1):
if codigo_cadeia[i] > codigo_cadeia[i + 1]:
diferenca = codigo_cadeia[i + 1] - codigo_cadeia[i]
codigo_normalizado.append(diferenca + 8)
else:
codigo_normalizado.append(codigo_cadeia[i + 1] - codigo_cadeia[i])
#print('Normalizado: ',codigo_normalizado)
for k in range(len(codigo_normalizado)):
codigo_normalizado = shift(codigo_normalizado, 1)
codigo = ''
for j in range(len(codigo_normalizado)):
codigo = codigo + str(codigo_normalizado[j])
#print('Codigos:', codigo)
codigos.append(int(codigo))
diferenca = len(codigo_normalizado) - len(str(min(codigos)))
for i in range(diferenca):
novo_codigo_cadeia.append(0)
for i in str(min(codigos)):
novo_codigo_cadeia.append(int(i))
#print('Novo codigo de cadeia:', novo_codigo_cadeia)
return novo_codigo_cadeia
# Retorna o código de cadeia do objeto em questão
def gerarCodigoCadeia(objeto, mascara, pixels_borda):
inferiorD = objeto["retangulo"][0] # = [y,x]
inferiorE = objeto["retangulo"][1] # = [y,x]
superiorD = objeto["retangulo"][2] # = [y,x]
superiorE = objeto["retangulo"][3] # = [y,x]
centroide = objeto["centroide"]
y_aux = 0
x_aux = 0
bordas_percorridas = []
codigo_cadeia = []
encontrou = False
# print('\nBorda:', borda)
#print('Pixels da borda:',pixels_borda)
for y in np.arange(superiorE[0], inferiorE[0] + 1):
for x in np.arange(superiorE[1], superiorD[1] + 1):
if mascara[y][x] != 0 and not encontrou:
if y < (mascara.shape[0] - 1) and x < (mascara.shape[1] - 1) and y > 0 and x > 0:
y_aux = y
x_aux = x
encontrou = True
for i in range(len(pixels_borda)):
if mascara[y_aux][x_aux - 1] != 0 and [y_aux, x_aux - 1] in pixels_borda and [y_aux,
x_aux - 1] not in bordas_percorridas:
x_aux -= 1
bordas_percorridas.append([y_aux, x_aux])
codigo_cadeia.append(4)
#print('4', y_aux, x_aux)
elif mascara[y_aux + 1][x_aux - 1] != 0 and [y_aux + 1, x_aux - 1] in pixels_borda and [y_aux + 1,
x_aux - 1] not in bordas_percorridas:
y_aux += 1
x_aux -= 1
bordas_percorridas.append([y_aux, x_aux])
codigo_cadeia.append(5)
#print('5', y_aux, x_aux)
elif mascara[y_aux + 1][x_aux] != 0 and [y_aux + 1, x_aux] in pixels_borda and [y_aux + 1,
x_aux] not in bordas_percorridas:
y_aux += 1
bordas_percorridas.append([y_aux, x_aux])
codigo_cadeia.append(6)
#print('6', y_aux, x_aux)
elif mascara[y_aux + 1][x_aux + 1] != 0 and [y_aux + 1, x_aux + 1] in pixels_borda and [y_aux + 1,
x_aux + 1] not in bordas_percorridas:
y_aux += 1
x_aux += 1
bordas_percorridas.append([y_aux, x_aux])
codigo_cadeia.append(7)
#print('7', y_aux, x_aux)
elif mascara[y_aux][x_aux + 1] != 0 and [y_aux, x_aux + 1] in pixels_borda and [y_aux,
x_aux + 1] not in bordas_percorridas:
x_aux += 1
bordas_percorridas.append([y_aux, x_aux])
codigo_cadeia.append(0)
#print('0', y_aux, x_aux)
elif mascara[y_aux - 1][x_aux + 1] != 0 and [y_aux - 1, x_aux + 1] in pixels_borda and [y_aux - 1,
x_aux + 1] not in bordas_percorridas:
y_aux -= 1
x_aux += 1
bordas_percorridas.append([y_aux, x_aux])
codigo_cadeia.append(1)
#print('1', y_aux, x_aux)
elif mascara[y_aux - 1][x_aux] != 0 and [y_aux - 1, x_aux] in pixels_borda and [y_aux - 1,
x_aux] not in bordas_percorridas:
y_aux -= 1
bordas_percorridas.append([y_aux, x_aux])
codigo_cadeia.append(2)
#print('2', y_aux, x_aux)
elif mascara[y_aux - 1][x_aux - 1] != 0 and [y_aux - 1, x_aux - 1] in pixels_borda and [y_aux - 1,x_aux - 1] not in bordas_percorridas:
y_aux -= 1
x_aux -= 1
bordas_percorridas.append([y_aux, x_aux])
codigo_cadeia.append(3)
#print('3', y_aux, x_aux)
#print('\nCodigo de cadeia TRUE:', codigo_cadeia)
return codigo_cadeia
# Retorna o valor de variância em relação aos valores dos pixels presentes no objeto
def variancia(imagemCinza, objeto):
inferiorD = objeto["retangulo"][0] # = [y,x]
inferiorE = objeto["retangulo"][1] # = [y,x]
superiorD = objeto["retangulo"][2] # = [y,x]
superiorE = objeto["retangulo"][3] # = [y,x]
pixels = []
for i in objeto['area']:
pixels.append(imagemCinza[i[0]][i[1]])
return np.var(pixels)
def calcularCentroide(imagemPontos, altura, largura, indice, centroides):
for y in range(altura):
for x in range(largura):
if imagemPontos[y][x] == indice and [y,x] not in centroides:
return [y,x]
# Retorna a posição central de um objeto em coordenadas Y e X da matriz (imagem 8bits), assim como a Área do objeto em quantidade de pixels
def calcularCentroideArea(altura, largura, imagemRotulada, labels, indice):
#cv2.imwrite('imagemArea.png', imagemRotulada)
#cv2.imshow('imagemArea',imagemRotulada)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
#print('\nIndice:', indice)
#print('Labels[indice]:', labels[indice])
area = []
somatorioX = 0
somatorioY = 0
for y in np.arange(altura):
for x in np.arange(largura):
if imagemRotulada[y][x] == labels[indice]:
area.append([y,x])
somatorioX += x
somatorioY += y
# Por que eu coloquei a condição de que "se area == 0 então area = 1" ?
#if area == 0:
# area = 1
posX = int(somatorioX / len(area))
posY = int(somatorioY / len(area))
return posY, posX, area
# Calcula o maior eixo horizontal e o maior eixo vertical de um objeto (com base na cor dos pixels pertencentes ao objeto)
def calcularEixos(altura, largura, imagemRotulada, labels, indice):
maiorEixo_X = [] # Vetor com todas as coordenadas do maior Eixo X
maiorEixo_Y = [] # Vetor com todas as coordenadas do maior Eixo Y
menorX = 0
menorY = 0
maiorX = 0
maiorY = 0
# Maior e Menor X | Encontra maior eixo X
for i in np.arange(altura):
vetX = []
for j in np.arange(largura):
if imagemRotulada[i][j] == labels[indice]:
vetX.insert(0, [i, j])
if menorX == 0 or j < menorX:
menorX = j
if maiorX == 0 or j > maiorX:
maiorX = j
if len(vetX) > len(maiorEixo_X):
maiorEixo_X = vetX
# Maior e Menor Y | Encontra maior eixo Y
for j in | np.arange(largura) | numpy.arange |
import numpy as np
import awkward
from awkward import JaggedArray
#for later
#func = numbaize(formula,['p%i'%i for i in range(nParms)]+[varnames[i] for i in range(nEvalVars)])
def convert_jec_txt_file(jecFilePath):
jec_f = open(jecFilePath,'r')
layoutstr = jec_f.readline().strip().strip('{}')
jec_f.close()
name = jecFilePath.split('/')[-1].split('.')[0]
layout = layoutstr.split()
if not layout[0].isdigit():
raise Exception('First column of JEC descriptor must be a digit!')
#setup the file format
nBinnedVars = int(layout[0])
nBinColumns = 2*nBinnedVars
nEvalVars = int(layout[nBinnedVars+1])
formula = layout[nBinnedVars+nEvalVars+2]
nParms = 0
while( formula.count('[%i]'%nParms) ):
formula = formula.replace('[%i]'%nParms,'p%i'%nParms)
nParms += 1
#protect function names with vars in them
funcs_to_cap = ['max','exp']
for f in funcs_to_cap:
formula = formula.replace(f,f.upper())
templatevars = ['x','y','z','w','t','s']
varnames = [layout[i+nBinnedVars+2] for i in range(nEvalVars)]
for find,replace in zip(templatevars,varnames):
formula = formula.replace(find,replace)
#restore max
for f in funcs_to_cap:
formula = formula.replace(f.upper(),f)
nFuncColumns = 2*nEvalVars + nParms
nTotColumns = nFuncColumns + 1
#parse the columns
minMax = ['Min','Max']
columns = []
dtypes = []
offset = 1
for i in range(nBinnedVars):
columns.extend(['%s%s'%(layout[i+offset],mm) for mm in minMax])
dtypes.extend(['<f8','<f8'])
columns.append('NVars')
dtypes.append('<i8')
offset += nBinnedVars + 1
for i in range(nEvalVars):
columns.extend(['%s%s'%(layout[i+offset],mm) for mm in minMax])
dtypes.extend(['<f8','<f8'])
for i in range(nParms):
columns.append('p%i'%i)
dtypes.append('<f8')
pars = np.genfromtxt(jecFilePath,
dtype=tuple(dtypes),
names=tuple(columns),
skip_header=1,
unpack=True,
encoding='ascii'
)
#the first bin is always usual for JECs
#the next bins may vary in number, so they're jagged arrays... yay
bins = {}
offset_col = 0
offset_name = 1
bin_order = []
for i in range(nBinnedVars):
binMins = None
binMaxs = None
if i == 0:
binMins = np.unique(pars[columns[0]])
binMaxs = np.unique(pars[columns[1]])
bins[layout[i+offset_name]] = np.union1d(binMins,binMaxs)
else:
counts = np.zeros(0,dtype=np.int)
allBins = np.zeros(0,dtype=np.double)
for binMin in bins[bin_order[0]][:-1]:
binMins = np.unique(pars[np.where(pars[columns[0]] == binMin)][columns[i+offset_col]])
binMaxs = np.unique(pars[np.where(pars[columns[0]] == binMin)][columns[i+offset_col+1]])
theBins = np.union1d(binMins,binMaxs)
allBins = np.append(allBins,theBins)
counts = np.append(counts,theBins.size)
bins[layout[i+offset_name]] = JaggedArray.fromcounts(counts,allBins)
bin_order.append(layout[i+offset_name])
offset_col += 1
#skip nvars to the variable columns
#the columns here define clamps for the variables defined in columns[]
# ----> clamps can be different from bins
# ----> if there is more than one binning variable this array is jagged
# ----> just make it jagged all the time
binshapes = tuple([bins[thebin].size-1 for thebin in bin_order])
clamp_mins = {}
clamp_maxs = {}
var_order = []
offset_col = 2*nBinnedVars+1
offset_name = nBinnedVars + 2
jagged_counts = np.ones(bins[bin_order[0]].size-1,dtype=np.int)
if len(bin_order) > 1:
jagged_counts = | np.maximum(bins[bin_order[1]].counts - 1,0) | numpy.maximum |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import cv2
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.tri as mtri
color_hand_joints = [[1.0, 0.0, 0.0],
[0.0, 0.4, 0.0], [0.0, 0.6, 0.0], [0.0, 0.8, 0.0], [0.0, 1.0, 0.0], # thumb
[0.0, 0.0, 0.6], [0.0, 0.0, 1.0], [0.2, 0.2, 1.0], [0.4, 0.4, 1.0], # index
[0.0, 0.4, 0.4], [0.0, 0.6, 0.6], [0.0, 0.8, 0.8], [0.0, 1.0, 1.0], # middle
[0.4, 0.4, 0.0], [0.6, 0.6, 0.0], [0.8, 0.8, 0.0], [1.0, 1.0, 0.0], # ring
[0.4, 0.0, 0.4], [0.6, 0.0, 0.6], [0.8, 0.0, 0.8], [1.0, 0.0, 1.0]] # little
camera_shape = [[[-0.05, 0.05, 0.05, -0.05, -0.05], [-0.05, -0.05, 0.05, 0.05, -0.05], [0, 0, 0, 0, 0]],
[[0.05, 0], [0.05, 0], [0, -0.1]],
[[0.05, 0], [-0.05, 0], [0, -0.1]],
[[-0.05, 0], [-0.05, 0], [0, -0.1]],
[[-0.05, 0], [0.05, 0], [0, -0.1]]
]
camera_color = (0, 0, 200/255)
def fig2data(fig):
"""
@brief Convert a Matplotlib figure to a 4D numpy array with RGBA channels and return it
@param fig a matplotlib figure
@return a numpy 3D array of RGBA values
"""
# draw the renderer
fig.canvas.draw()
# Get the RGBA buffer from the figure
w, h = fig.canvas.get_width_height()
buf = np.fromstring(fig.canvas.tostring_argb(), dtype=np.uint8)
buf.shape = (w, h, 4)
# canvas.tostring_argb give pixmap in ARGB mode. Roll the ALPHA channel to have it in RGBA mode
buf = np.roll(buf, 3, axis=2)
return buf
def draw_silhouette(image, mask=None, poly=None):
"""
:param image: H x W x 3
:param mask: H x W
:param poly: 1 x N x 2 (np.array)
:return:
"""
img_mask = image.copy()
if mask is not None:
mask = np.concatenate([np.zeros(list(mask.shape) + [2]), mask[:, :, None]], 2).astype(np.uint8) * 255
img_mask = cv2.addWeighted(img_mask, 1, mask, 0.5, 0)
if poly is not None:
cv2.polylines(img_mask, poly, isClosed=True, thickness=2, color=(0, 0, 255))
return img_mask
def draw_mesh(image, cam_param, mesh_xyz, face):
"""
:param image: H x W x 3
:param cam_param: 1 x 3 x 3
:param mesh_xyz: 778 x 3
:param face: 1538 x 3 x 2
:return:
"""
vertex2uv = np.matmul(cam_param, mesh_xyz.T).T
vertex2uv = (vertex2uv / vertex2uv[:, 2:3])[:, :2].astype(np.int)
fig = plt.figure()
fig.set_size_inches(float(image.shape[0]) / fig.dpi, float(image.shape[1]) / fig.dpi, forward=True)
plt.imshow(image)
plt.axis('off')
if face is None:
plt.plot(vertex2uv[:, 0], vertex2uv[:, 1], 'o', color='green', markersize=1)
else:
plt.triplot(vertex2uv[:, 0], vertex2uv[:, 1], face, lw=0.5, color='orange')
plt.subplots_adjust(left=0., right=1., top=1., bottom=0, wspace=0, hspace=0)
ret = fig2data(fig)
plt.close(fig)
return ret
def draw_2d_skeleton(image, pose_uv):
"""
:param image: H x W x 3
:param pose_uv: 21 x 2
wrist,
thumb_mcp, thumb_pip, thumb_dip, thumb_tip
index_mcp, index_pip, index_dip, index_tip,
middle_mcp, middle_pip, middle_dip, middle_tip,
ring_mcp, ring_pip, ring_dip, ring_tip,
little_mcp, little_pip, little_dip, little_tip
:return:
"""
assert pose_uv.shape[0] == 21
skeleton_overlay = image.copy()
marker_sz = 6
line_wd = 3
root_ind = 0
for joint_ind in range(pose_uv.shape[0]):
joint = pose_uv[joint_ind, 0].astype('int32'), pose_uv[joint_ind, 1].astype('int32')
cv2.circle(
skeleton_overlay, joint,
radius=marker_sz, color=color_hand_joints[joint_ind] * np.array(255), thickness=-1,
lineType=cv2.CV_AA if cv2.__version__.startswith('2') else cv2.LINE_AA)
if joint_ind == 0:
continue
elif joint_ind % 4 == 1:
root_joint = pose_uv[root_ind, 0].astype('int32'), pose_uv[root_ind, 1].astype('int32')
cv2.line(
skeleton_overlay, root_joint, joint,
color=color_hand_joints[joint_ind] * np.array(255), thickness=int(line_wd),
lineType=cv2.CV_AA if cv2.__version__.startswith('2') else cv2.LINE_AA)
else:
joint_2 = pose_uv[joint_ind - 1, 0].astype('int32'), pose_uv[joint_ind - 1, 1].astype('int32')
cv2.line(
skeleton_overlay, joint_2, joint,
color=color_hand_joints[joint_ind] * np.array(255), thickness=int(line_wd),
lineType=cv2.CV_AA if cv2.__version__.startswith('2') else cv2.LINE_AA)
return skeleton_overlay
def draw_3d_skeleton(pose_cam_xyz, image_size):
"""
:param pose_cam_xyz: 21 x 3
:param image_size: H, W
:return:
"""
assert pose_cam_xyz.shape[0] == 21
fig = plt.figure()
fig.set_size_inches(float(image_size[0]) / fig.dpi, float(image_size[1]) / fig.dpi, forward=True)
ax = plt.subplot(111, projection='3d')
marker_sz = 10
line_wd = 2
for i, shape in enumerate(camera_shape):
ax.plot(shape[0], shape[1], shape[2], color=camera_color, linestyle=(':', '-')[i==0])
for joint_ind in range(pose_cam_xyz.shape[0]):
ax.plot(pose_cam_xyz[joint_ind:joint_ind + 1, 0], pose_cam_xyz[joint_ind:joint_ind + 1, 1],
pose_cam_xyz[joint_ind:joint_ind + 1, 2], '.', c=color_hand_joints[joint_ind], markersize=marker_sz)
if joint_ind == 0:
continue
elif joint_ind % 4 == 1:
ax.plot(pose_cam_xyz[[0, joint_ind], 0], pose_cam_xyz[[0, joint_ind], 1], pose_cam_xyz[[0, joint_ind], 2],
color=color_hand_joints[joint_ind], linewidth=line_wd)
else:
ax.plot(pose_cam_xyz[[joint_ind - 1, joint_ind], 0], pose_cam_xyz[[joint_ind - 1, joint_ind], 1],
pose_cam_xyz[[joint_ind - 1, joint_ind], 2], color=color_hand_joints[joint_ind],
linewidth=line_wd)
ax.axis('auto')
x_lim = [-0.1, 0.1, 0.02]
y_lim = [-0.1, 0.12, 0.02]
z_lim = [0.0, 0.8, 0.1]
x_ticks = np.arange(x_lim[0], x_lim[1], step=x_lim[2])
y_ticks = np.arange(y_lim[0], y_lim[1], step=y_lim[2])
z_ticks = np.arange(z_lim[0], z_lim[1], step=z_lim[2])
plt.xticks(x_ticks, [x_lim[0], '', '', '', '', 0, '', '', '', x_lim[1]], fontsize=14)
plt.yticks(y_ticks, [y_lim[0], '', '', '', '', 0, '', '', '', -y_lim[0], ''], fontsize=14)
ax.set_zticks(z_ticks)
z_ticks = [''] * (z_ticks.shape[0])
z_ticks[4] = 0.4
ax.set_zticklabels(z_ticks, fontsize=14)
ax.view_init(elev=140, azim=80)
plt.subplots_adjust(left=-0.06, right=0.98, top=0.93, bottom=-0.07, wspace=0, hspace=0)
ret = fig2data(fig)
plt.close(fig)
return ret
def draw_3d_mesh(mesh_xyz, image_size, face):
"""
:param mesh_xyz: 778 x 3
:param image_size: H, W
:param face: 1538 x 3
:return:
"""
fig = plt.figure()
fig.set_size_inches(float(image_size[0]) / fig.dpi, float(image_size[1]) / fig.dpi, forward=True)
ax = plt.subplot(111, projection='3d')
for i, shape in enumerate(camera_shape):
ax.plot(shape[0], shape[1], shape[2], color=camera_color, linestyle=(':', '-')[i==0])
triang = mtri.Triangulation(mesh_xyz[:, 0], mesh_xyz[:, 1], triangles=face)
ax.plot_trisurf(triang, mesh_xyz[:, 2], color=(145/255, 181/255, 255/255))
ax.axis('auto')
x_lim = [-0.1, 0.1, 0.02]
y_lim = [-0.1, 0.12, 0.02]
z_lim = [0.0, 0.8, 0.1]
x_ticks = | np.arange(x_lim[0], x_lim[1], step=x_lim[2]) | numpy.arange |
from __future__ import division
import numpy as np
from numpy import linalg
from numba import njit
from beam_telescope_analysis.tools import geometry_utils
@njit(cache=True)
def _filter_predict_f(track_jacobian, local_scatter_gain_matrix, transition_covariance, current_filtered_state, current_filtered_state_covariance):
"""Calculates the (forward) predicted state and its covariance matrix. Prediction is done on whole track chunk with size chunk_size.
Parameters
----------
transition_matrix : [chunk_size, n_dim_state, n_dim_state] array
state transition matrix from time t to t+1.
transition_covariance : [chunk_size, n_dim_state, n_dim_state] array
covariance matrix for state transition from time t to t+1.
current_filtered_state: [chunk_size, n_dim_state] array
filtered state at time t.
current_filtered_state_covariance: [chunk_size, n_dim_state, n_dim_state] array
covariance of filtered state at time t.
Returns
-------
predicted_state : [chunk_size, n_dim_state] array
predicted state at time t+1.
predicted_state_covariance : [chunk_size, n_dim_state, n_dim_state] array
covariance matrix of predicted state at time t+1.
"""
# Extrapolate current filtered state (plane k -> plane k+1)
predicted_state = _vec_mul(track_jacobian, current_filtered_state)
# Extrapolate current filtered covariance (plane k -> plane k+1). Neglect air gap between detectors
predicted_state_covariance = _mat_mul(track_jacobian,
_mat_mul(current_filtered_state_covariance,
_mat_trans(track_jacobian)))
# Add process noise to covariance matrix
general_scatter_gain_matrix = _mat_mul(track_jacobian, local_scatter_gain_matrix)
predicted_state_covariance += _mat_mul(general_scatter_gain_matrix,
_mat_mul(transition_covariance,
_mat_trans(general_scatter_gain_matrix)))
return predicted_state, predicted_state_covariance
@njit(cache=True)
def _filter_predict_b(track_jacobian, local_scatter_gain_matrix, transition_covariance, current_filtered_state, current_filtered_state_covariance):
"""Calculates the (backward) predicted state and its covariance matrix. Prediction is done on whole track chunk with size chunk_size.
Parameters
----------
transition_matrix : [chunk_size, n_dim_state, n_dim_state] array
state transition matrix from time t to t+1.
transition_covariance : [chunk_size, n_dim_state, n_dim_state] array
covariance matrix for state transition from time t to t+1.
current_filtered_state: [chunk_size, n_dim_state] array
filtered state at time t.
current_filtered_state_covariance: [chunk_size, n_dim_state, n_dim_state] array
covariance of filtered state at time t.
Returns
-------
predicted_state : [chunk_size, n_dim_state] array
predicted state at time t+1.
predicted_state_covariance : [chunk_size, n_dim_state, n_dim_state] array
covariance matrix of predicted state at time t+1.
"""
# Extrapolate current filtered state (plane k -> plane k+1)
predicted_state = _vec_mul(track_jacobian, current_filtered_state)
# Extrapolate current filtered covariance (plane k -> plane k+1). Neglect air gap between detectors
predicted_state_covariance = _mat_mul(track_jacobian,
_mat_mul(current_filtered_state_covariance,
_mat_trans(track_jacobian)))
# Add process noise to covariance matrix
general_scatter_gain_matrix = local_scatter_gain_matrix
predicted_state_covariance += _mat_mul(general_scatter_gain_matrix,
_mat_mul(transition_covariance,
_mat_trans(general_scatter_gain_matrix)))
return predicted_state, predicted_state_covariance
@njit(cache=True)
def _filter_correct(reference_state, observation_matrix, observation_covariance, predicted_state, predicted_state_covariance, observation):
r"""Filters a predicted state with the Kalman Filter. Filtering
is done on whole track chunk with size chunk_size.
Parameters
----------
observation_matrix : [chunk_size, n_dim_obs, n_dim_obs] array
observation matrix for time t.
observation_covariance : [chunk_size, n_dim_obs, n_dim_obs] array
covariance matrix for observation at time t.
predicted_state : [chunk_size, n_dim_state] array
predicted state at time t.
predicted_state_covariance : [n_dim_state, n_dim_state] array
covariance matrix of predicted state at time t.
observation : [chunk_size, n_dim_obs] array
observation at time t. If observation is a masked array and any of
its values are masked, the observation will be not included in filtering.
Returns
-------
kalman_gain : [chunk_size, n_dim_state, n_dim_obs] array
Kalman gain matrix for time t.
filtered_state : [chunk_size, n_dim_state] array
filtered state at time t.
filtered_state_covariance : [chunk_size, n_dim_state, n_dim_state] array
covariance matrix of filtered state at time t.
"""
predicted_observation = _vec_mul(observation_matrix, predicted_state + reference_state)
predicted_observation_covariance = _mat_mul(observation_matrix,
_mat_mul(predicted_state_covariance, _mat_trans(observation_matrix))) + observation_covariance
kalman_gain = _mat_mul(predicted_state_covariance,
_mat_mul(_mat_trans(observation_matrix),
_mat_inverse(predicted_observation_covariance)))
filtered_state = predicted_state + _vec_mul(kalman_gain, observation - predicted_observation)
filtered_state_covariance = predicted_state_covariance - _mat_mul(kalman_gain,
_mat_mul(observation_matrix,
predicted_state_covariance))
# Set filtered state to predicted state where no observation is available.
valid_hit_selection = ~np.isnan(observation[:, 0])
kalman_gain[~valid_hit_selection, :, :] = 0.0 # Zero kalman gain
filtered_state[~valid_hit_selection, :] = predicted_state[~valid_hit_selection, :]
filtered_state_covariance[~valid_hit_selection, :, :] = predicted_state_covariance[~valid_hit_selection, :, :]
# Calculate chi2 (only if observation available)
filtered_residuals = observation[valid_hit_selection] - _vec_mul(observation_matrix[valid_hit_selection], filtered_state[valid_hit_selection] + reference_state[valid_hit_selection])
filtered_residuals_covariance = observation_covariance[valid_hit_selection] - _mat_mul(observation_matrix[valid_hit_selection], _mat_mul(filtered_state_covariance[valid_hit_selection], _mat_trans(observation_matrix[valid_hit_selection])))
chi2 = _vec_vec_mul(filtered_residuals, _vec_mul(_mat_inverse(filtered_residuals_covariance), filtered_residuals))
return kalman_gain, filtered_state, filtered_state_covariance, chi2
@profile
def _filter_f(dut_planes, reference_states, z_sorted_dut_indices, select_fit_duts, observations, observation_matrices, transition_covariances, observation_covariances, initial_state, initial_state_covariance):
"""Apply the Kalman Filter. First a prediction of the state is done, then a filtering is
done which includes the observations.
Parameters
----------
dut_planes : list
List of DUT parameters (material_budget, translation_x, translation_y, translation_z, rotation_alpha, rotation_beta, rotation_gamma).
z_sorted_dut_indices : list
List of DUT indices in the order reflecting their z position.
select_fit_duts : iterable
List of DUTs which should be included in Kalman Filter. DUTs which are not in list
were treated as missing measurements and will not be included in the Filtering step.
observations : [chunk_size, n_timesteps, n_dim_obs] array
observations (measurements) from times [0...n_timesteps-1]. If any of observations is masked,
then observations[:, t] will be treated as a missing observation
and will not be included in the filtering step.
observation_matrices : [chunk_size, n_timesteps, n_dim_obs, n_dim_state] array-like
observation matrices.
transition_covariances : [chunk_size, n_timesteps-1, n_dim_state,n_dim_state] array-like
covariance matrices of transition matrices.
observation_covariances : [chunk_size, n_timesteps, n_dim_obs, n_dim_obs] array-like
covariance matrices of observation matrices.
initial_state : [chunk_size, n_dim_state] array-like
initial value of state.
initial_state_covariance : [chunk_size, n_dim_state, n_dim_state] array-like
initial value for observation covariance matrices.
Returns
-------
predicted_states : [chunk_size, n_timesteps, n_dim_state] array
predicted states of times [0...t].
predicted_state_covariances : [chunk_size, n_timesteps, n_dim_state, n_dim_state] array
covariance matrices of predicted states of times [0...t].
kalman_gains : [chunk_size, n_timesteps, n_dim_state] array
Kalman gain matrices of times [0...t].
filtered_states : [chunk_size, n_timesteps, n_dim_state] array
filtered states of times [0...t].
filtered_state_covariances : [chunk_size, n_timesteps, n_dim_state] array
covariance matrices of filtered states of times [0...t].
"""
chunk_size, n_timesteps, n_dim_obs = observations.shape
n_dim_state = initial_state_covariance.shape[2]
predicted_states = np.zeros((chunk_size, n_timesteps, n_dim_state))
predicted_state_covariances = np.zeros((chunk_size, n_timesteps, n_dim_state, n_dim_state))
kalman_gains = np.zeros((chunk_size, n_timesteps, n_dim_state, n_dim_obs))
filtered_states = np.zeros((chunk_size, n_timesteps, n_dim_state))
filtered_state_covariances = np.zeros((chunk_size, n_timesteps, n_dim_state, n_dim_state))
chi2 = np.full((chunk_size, n_timesteps), fill_value=np.nan)
Js = np.zeros((chunk_size, n_timesteps, n_dim_state, n_dim_state))
for i, dut_index in enumerate(z_sorted_dut_indices):
# Get actual reference state
reference_state = reference_states[:, dut_index, :]
if i == 0: # first DUT: Set predicted state to initial state
predicted_states[:, dut_index] = initial_state
predicted_state_covariances[:, dut_index] = initial_state_covariance
else:
previuos_dut_index = z_sorted_dut_indices[i - 1] # index of previous DUT
previous_dut = dut_planes[previuos_dut_index] # we use filter from previous plane and extrapolate/predict onto actual plane
actual_dut = dut_planes[dut_index] # we want to get prediction onto actual plane [dut_index]
# Local to global transformation
rotation_matrix_previous_dut = geometry_utils.rotation_matrix(
alpha=previous_dut.rotation_alpha,
beta=previous_dut.rotation_beta,
gamma=previous_dut.rotation_gamma)
rotation_matrix_actual_dut = geometry_utils.rotation_matrix(
alpha=actual_dut.rotation_alpha,
beta=actual_dut.rotation_beta,
gamma=actual_dut.rotation_gamma)
previous_dut_position = np.array([previous_dut.translation_x, previous_dut.translation_y, previous_dut.translation_z])
actual_dut_position = np.array([actual_dut.translation_x, actual_dut.translation_y, actual_dut.translation_z])
# Transition matrix: 0: not defined/needed, 1: 0->1, 2: 1->2 (k: k-1 --> k)
Js[:, dut_index, :, :] = _calculate_track_jacobian(
reference_state=reference_states[:, previuos_dut_index, :], # use reference state from before
dut_position=np.tile(previous_dut_position, reps=(reference_state.shape[0], 1)),
target_dut_position=np.tile(actual_dut_position, reps=(reference_state.shape[0], 1)), # extrapolates to this position
rotation_matrix=np.tile(rotation_matrix_previous_dut.T, reps=(reference_state.shape[0], 1, 1)),
rotation_matrix_target_dut=np.tile(rotation_matrix_actual_dut.T, reps=(reference_state.shape[0], 1, 1)))
# According to Wolin et al. paper
Gl_det = _calculate_scatter_gain_matrix(reference_state=reference_states[:, previuos_dut_index, :]) # use reference state from before
# Calculate prediction from filter
predicted_states[:, dut_index], predicted_state_covariances[:, dut_index] = _filter_predict_f(
track_jacobian=Js[:, dut_index, :, :],
local_scatter_gain_matrix=Gl_det,
transition_covariance=transition_covariances[:, previuos_dut_index],
current_filtered_state=filtered_states[:, previuos_dut_index],
current_filtered_state_covariance=filtered_state_covariances[:, previuos_dut_index])
check_covariance_matrix(predicted_state_covariances[:, dut_index]) # Sanity check for covariance matrix
valid_hit_selection = ~np.isnan(observations[:, dut_index, 0])
if dut_index in select_fit_duts:
# DUT is a fit dut: set filter to prediction where no hit is available, otherwise calculate filtered state.
kalman_gains[:, dut_index], filtered_states[:, dut_index], filtered_state_covariances[:, dut_index], chi2[valid_hit_selection, dut_index] = _filter_correct(
reference_state=reference_state, # use reference state from actual plane for filtering
observation_matrix=observation_matrices[:, dut_index],
observation_covariance=observation_covariances[:, dut_index],
predicted_state=predicted_states[:, dut_index],
predicted_state_covariance=predicted_state_covariances[:, dut_index],
observation=observations[:, dut_index])
chi2[~valid_hit_selection, dut_index] = np.nan # No hit, thus no chi2
check_covariance_matrix(filtered_state_covariances[:, dut_index]) # Sanity check for covariance matrix
else:
# DUT is not a fit dut: set filter to prediction.
kalman_gains[:, dut_index] = np.zeros((chunk_size, n_dim_state, n_dim_obs), dtype=np.float64)
filtered_states[:, dut_index] = predicted_states[:, dut_index]
filtered_state_covariances[:, dut_index] = predicted_state_covariances[:, dut_index]
check_covariance_matrix(filtered_state_covariances[:, dut_index]) # Sanity check for covariance matrix
# Calculate chi2 (only if observation available).
filtered_residuals = observations[valid_hit_selection, dut_index] - _vec_mul(observation_matrices[valid_hit_selection, dut_index], filtered_states[valid_hit_selection, dut_index] + reference_state[valid_hit_selection])
# Note: need to add here covariance matrices, since in this case (filter equals to prediction) need to use the formula for predicted residual covariance
filtered_residuals_covariance = observation_covariances[valid_hit_selection, dut_index] + _mat_mul(observation_matrices[valid_hit_selection, dut_index], _mat_mul(filtered_state_covariances[valid_hit_selection, dut_index], _mat_trans(observation_matrices[valid_hit_selection, dut_index])))
check_covariance_matrix(filtered_residuals_covariance) # Sanity check for covariance matrix
chi2[valid_hit_selection, dut_index] = _vec_vec_mul(filtered_residuals, _vec_mul(_mat_inverse(filtered_residuals_covariance), filtered_residuals))
chi2[~valid_hit_selection, dut_index] = np.nan # No hit, thus no chi2
# Final check for valid chi2
if np.any(chi2[~np.isnan(chi2)] < 0.0):
raise RuntimeError('Some chi-square values are negative (during filter step)!')
return predicted_states, predicted_state_covariances, kalman_gains, filtered_states, filtered_state_covariances, chi2, Js
def _filter_b(dut_planes, reference_states, z_sorted_dut_indices, select_fit_duts, observations, observation_matrices, transition_covariances, observation_covariances, initial_state, initial_state_covariance):
"""Apply the Kalman Filter. First a prediction of the state is done, then a filtering is
done which includes the observations.
Parameters
----------
dut_planes : list
List of DUT parameters (material_budget, translation_x, translation_y, translation_z, rotation_alpha, rotation_beta, rotation_gamma).
z_sorted_dut_indices : list
List of DUT indices in the order reflecting their z position.
select_fit_duts : iterable
List of DUTs which should be included in Kalman Filter. DUTs which are not in list
were treated as missing measurements and will not be included in the Filtering step.
observations : [chunk_size, n_timesteps, n_dim_obs] array
observations (measurements) from times [0...n_timesteps-1]. If any of observations is masked,
then observations[:, t] will be treated as a missing observation
and will not be included in the filtering step.
observation_matrices : [chunk_size, n_timesteps, n_dim_obs, n_dim_state] array-like
observation matrices.
transition_covariances : [chunk_size, n_timesteps-1, n_dim_state,n_dim_state] array-like
covariance matrices of transition matrices.
observation_covariances : [chunk_size, n_timesteps, n_dim_obs, n_dim_obs] array-like
covariance matrices of observation matrices.
initial_state : [chunk_size, n_dim_state] array-like
initial value of state.
initial_state_covariance : [chunk_size, n_dim_state, n_dim_state] array-like
initial value for observation covariance matrices.
Returns
-------
predicted_states : [chunk_size, n_timesteps, n_dim_state] array
predicted states of times [0...t].
predicted_state_covariances : [chunk_size, n_timesteps, n_dim_state, n_dim_state] array
covariance matrices of predicted states of times [0...t].
kalman_gains : [chunk_size, n_timesteps, n_dim_state] array
Kalman gain matrices of times [0...t].
filtered_states : [chunk_size, n_timesteps, n_dim_state] array
filtered states of times [0...t].
filtered_state_covariances : [chunk_size, n_timesteps, n_dim_state] array
covariance matrices of filtered states of times [0...t].
"""
chunk_size, n_timesteps, n_dim_obs = observations.shape
n_dim_state = initial_state_covariance.shape[2]
predicted_states = np.zeros((chunk_size, n_timesteps, n_dim_state))
predicted_state_covariances = np.zeros((chunk_size, n_timesteps, n_dim_state, n_dim_state))
kalman_gains = np.zeros((chunk_size, n_timesteps, n_dim_state, n_dim_obs))
filtered_states = np.zeros((chunk_size, n_timesteps, n_dim_state))
filtered_state_covariances = np.zeros((chunk_size, n_timesteps, n_dim_state, n_dim_state))
chi2 = np.full((chunk_size, n_timesteps), fill_value=np.nan)
Js = np.zeros((chunk_size, n_timesteps, n_dim_state, n_dim_state))
for i, dut_index in enumerate(z_sorted_dut_indices[::-1]):
# Get actual reference state
reference_state = reference_states[:, dut_index, :]
if i == 0: # first DUT / last DUT.
predicted_states[:, dut_index] = initial_state
predicted_state_covariances[:, dut_index] = initial_state_covariance
else:
previuos_dut_index = z_sorted_dut_indices[::-1][i - 1] # index of previous DUT
previous_dut = dut_planes[previuos_dut_index] # we use filter from previous plane and extrapolate/predict onto actual plane
actual_dut = dut_planes[dut_index] # we want to get prediction onto actual plane [dut_index]
check_covariance_matrix(transition_covariances[:, previuos_dut_index]) # Sanity check for covariance matrix
# Local to global transformation
rotation_matrix_previous_dut = geometry_utils.rotation_matrix(
alpha=previous_dut.rotation_alpha,
beta=previous_dut.rotation_beta,
gamma=previous_dut.rotation_gamma)
rotation_matrix_actual_dut = geometry_utils.rotation_matrix(
alpha=actual_dut.rotation_alpha,
beta=actual_dut.rotation_beta,
gamma=actual_dut.rotation_gamma)
previous_dut_position = np.array([previous_dut.translation_x, previous_dut.translation_y, previous_dut.translation_z])
actual_dut_position = np.array([actual_dut.translation_x, actual_dut.translation_y, actual_dut.translation_z])
# Transition matrix: 7: not defined/needed, 6: 7->6, 5: 6->5 (k: k + 1 --> k)
Js[:, dut_index, :, :] = _calculate_track_jacobian(
reference_state=reference_states[:, previuos_dut_index, :], # use reference state from before (backward)
dut_position=np.tile(previous_dut_position, reps=(reference_state.shape[0], 1)),
target_dut_position=np.tile(actual_dut_position, reps=(reference_state.shape[0], 1)),
rotation_matrix=np.tile(rotation_matrix_previous_dut.T, reps=(reference_state.shape[0], 1, 1)),
rotation_matrix_target_dut=np.tile(rotation_matrix_actual_dut.T, reps=(reference_state.shape[0], 1, 1)))
# According to Wolin et al. paper
# x_k depends only on the scatterings w_k at plane k and not(!!) on the scatterings at plane k+1
Gl_det = _calculate_scatter_gain_matrix(reference_state=reference_states[:, dut_index, :])
# Calculate prediction from filter
predicted_states[:, dut_index], predicted_state_covariances[:, dut_index] = _filter_predict_b(
track_jacobian=Js[:, dut_index, :, :],
local_scatter_gain_matrix=Gl_det,
transition_covariance=transition_covariances[:, dut_index], # x_k depends only on the scatterings w_k at plane k and not(!!) on the scatterings at plane k+1
current_filtered_state=filtered_states[:, previuos_dut_index],
current_filtered_state_covariance=filtered_state_covariances[:, previuos_dut_index])
valid_hit_selection = ~np.isnan(observations[:, dut_index, 0])
if dut_index in select_fit_duts:
# DUT is a fit dut: set filter to prediction where no hit is available, otherwise calculate filtered state.
kalman_gains[:, dut_index], filtered_states[:, dut_index], filtered_state_covariances[:, dut_index], chi2[valid_hit_selection, dut_index] = _filter_correct(
reference_state=reference_state, # use reference state from actual plane for filtering
observation_matrix=observation_matrices[:, dut_index],
observation_covariance=observation_covariances[:, dut_index],
predicted_state=predicted_states[:, dut_index],
predicted_state_covariance=predicted_state_covariances[:, dut_index],
observation=observations[:, dut_index])
chi2[~valid_hit_selection, dut_index] = np.nan # No hit, thus no chi2
check_covariance_matrix(filtered_state_covariances[:, dut_index]) # Sanity check for covariance matrix
else:
# DUT is not a fit dut: set filter to prediction.
kalman_gains[:, dut_index] = np.zeros((chunk_size, n_dim_state, n_dim_obs), dtype=np.float64)
filtered_states[:, dut_index] = predicted_states[:, dut_index]
filtered_state_covariances[:, dut_index] = predicted_state_covariances[:, dut_index]
check_covariance_matrix(filtered_state_covariances[:, dut_index]) # Sanity check for covariance matrix
# Calculate chi2 (only if observation available).
filtered_residuals = observations[valid_hit_selection, dut_index] - _vec_mul(observation_matrices[valid_hit_selection, dut_index], filtered_states[valid_hit_selection, dut_index] + reference_state[valid_hit_selection])
# Note: need to add here covariance matrices, since in this case (filter equals to prediction) need to use the formula for predicted residual covariance
filtered_residuals_covariance = observation_covariances[valid_hit_selection, dut_index] + _mat_mul(observation_matrices[valid_hit_selection, dut_index], _mat_mul(filtered_state_covariances[valid_hit_selection, dut_index], _mat_trans(observation_matrices[valid_hit_selection, dut_index])))
check_covariance_matrix(filtered_residuals_covariance) # Sanity check for covariance matrix
chi2[valid_hit_selection, dut_index] = _vec_vec_mul(filtered_residuals, _vec_mul(_mat_inverse(filtered_residuals_covariance), filtered_residuals))
chi2[~valid_hit_selection, dut_index] = np.nan # No hit, thus no chi2
# Final check for valid chi2
if np.any(chi2[~np.isnan(chi2)] < 0.0):
raise RuntimeError('Some chi-square values are negative (during filter step)!')
return predicted_states, predicted_state_covariances, kalman_gains, filtered_states, filtered_state_covariances, chi2, Js
@njit(cache=True)
def _vec_vec_mul(X, Y):
'''Helper function to multiply 3D vector with 3D vector. Multiplication is done on last two axes.
'''
result = np.zeros((X.shape[0]))
for l in range(X.shape[0]):
# iterate through rows of X
for i in range(X.shape[1]):
result[l] += X[l][i] * Y[l][i]
return result
@njit(cache=True)
def _mat_mul(X, Y):
'''Helper function to multiply two 3D matrices. Multiplication is done on last two axes.
'''
result = np.zeros((X.shape[0], X.shape[1], Y.shape[2]))
if not X.shape[2] == Y.shape[1]:
raise RuntimeError('Matrix muliplication failed due to incorrect shape!')
for l in range(X.shape[0]):
# iterate through rows of X
for i in range(X.shape[1]):
# iterate through columns of Y
for j in range(Y.shape[2]):
# iterate through rows of Y
for k in range(Y.shape[1]):
result[l][i][j] += X[l][i][k] * Y[l][k][j]
return result
@njit(cache=True)
def _vec_mul(X, Y):
'''Helper function to multiply 3D matrix with 3D vector. Multiplication is done on last two axes.
'''
result = np.zeros((X.shape[0], X.shape[1]))
for l in range(X.shape[0]):
# iterate through rows of X
for i in range(X.shape[1]):
# iterate through columns of Y
for k in range(X.shape[2]):
result[l][i] += X[l][i][k] * Y[l][k]
return result
@njit(cache=True)
def _mat_trans(X):
'''Helper function to calculate transpose of 3D matrix. Transposition is done on last two axes.
'''
result = np.zeros((X.shape[0], X.shape[2], X.shape[1]))
for l in range(X.shape[0]):
for i in range(X.shape[2]):
for j in range(X.shape[1]):
result[l][i][j] = X[l][j][i]
return result
@njit(cache=True)
def _mat_inverse(X, atol=1e-4, rtol=1e-6):
'''Helper function to calculate inverese of 3D matrix. Inversion is done on last two axes.
'''
X = np.ascontiguousarray(X) # make array contiguous (avoid NumbaPerformance warning)
inv = np.zeros((X.shape), dtype=np.float64)
for i in range(X.shape[0]):
if linalg.det(X[i]) == 0.0: # Check if matrix is not singular
print('Singular Matrix')
inv[i] = X[i]
else:
inv[i] = linalg.inv(X[i])
# Check if inverse was succesfull
X_c = np.dot(X[i], np.dot(inv[i], X[i]))
inv_c = np.dot(inv[i], np.dot(X[i], inv[i]))
tol_X = atol + rtol * np.absolute(X_c)
tol_inv = atol + rtol * np.absolute(inv_c)
if np.any(np.absolute(X[i] - X_c) > tol_X) or np.any(np.absolute(inv[i] - inv_c) > tol_inv):
print('RuntimeError: Matrix inversion failed!')
return inv
def check_covariance_matrix(cov):
''' This function checks if the input covariance matrix is positive semi-definite (psd).
In case it is not, it will try to make the matrix psd with the condition that the psd-correced matrix does not
differ to much from the original one (works only if the matrix has very small negative eigenvalues, e.g. due to numerical precision, ...)
Cannot by jitted since jitted np.linalg.eigvalsh only supports 2D arrays.
'''
# Check for positive semi-definite covariance matrix. In case they are not psd, make them psd.
if not np.all(np.linalg.eigvalsh(cov) >= 0.0):
non_psd_selection = np.any(np.linalg.eigvalsh(cov) < 0.0, axis=1)
cov[non_psd_selection] = _make_matrix_psd(cov[non_psd_selection])
@njit(cache=True)
def _make_matrix_psd(A, atol=1e-5, rtol=1e-8):
"""Find the nearest positive-definite matrix to input
A Python/Numpy port of <NAME>'s `nearestSPD` MATLAB code [1], which
credits [2].
[1] https://www.mathworks.com/matlabcentral/fileexchange/42885-nearestspd
[2] <NAME>, "Computing a nearest symmetric positive semidefinite
matrix" (1988): https://doi.org/10.1016/0024-3795(88)90223-6
"""
A3 = np.zeros((A.shape))
for i in range(A.shape[0]):
B = (A[i] + A[i].T) / 2
_, s, V = np.linalg.svd(B)
H = np.dot(V.T, np.dot(np.diag(s), V))
A2 = (B + H) / 2
A3[i] = (A2 + A2.T) / 2
if np.all(np.linalg.eigvalsh(A3[i]) >= 0.0):
tol_A3 = atol + rtol * np.absolute(A3[i])
if np.any(np.absolute(A[i] - A3[i]) > tol_A3): # Check if corrected (psd) matrix did not change too much.
raise RuntimeError('Output matrix differs too much from input matrix during nearest PSD')
continue
else:
spacing = np.spacing(np.linalg.norm(A[i]))
I_d = np.eye(A[i].shape[0])
k = 1
while not np.all(np.linalg.eigvalsh(A3[i]) >= 0.0):
mineig = np.min(np.real(np.linalg.eigvalsh(A3[i])))
A3[i] += I_d * (-mineig * k**2 + spacing)
k += 1
tol_A3 = atol + rtol * np.absolute(A3[i])
if np.any(np.absolute(A[i] - A3[i]) > tol_A3): # Check if corrected (psd) matrix did not change too much.
raise RuntimeError('Output matrix differs too much from input matrix during nearest PSD')
return A3
@njit(cache=True)
def _extrapolate_state(track_state, dut_position, target_dut_position, rotation_matrix, rotation_matrix_target_dut):
''' Extrapolate track state. Track state is calculated in local system of destination plane.
'''
# Intersection point on reference surface
x_point = np.column_stack((track_state[:, 0], track_state[:, 1], np.zeros_like(track_state[:, 0])))
# Track direction on reference surface
direc = np.column_stack((track_state[:, 2], track_state[:, 3], np.ones_like(track_state[:, 2])))
# Coordinate trafo from local system of plane k to local system of plane k+1
R = _mat_mul(_mat_trans(rotation_matrix_target_dut), rotation_matrix)
x0 = _vec_mul(_mat_trans(rotation_matrix), (target_dut_position - dut_position))
# Track direction on final surface
target_direc = _vec_mul(R, direc)
# Surface normal vector in beam direction
w = np.zeros(shape=(direc.shape[0], 3), dtype=np.float64)
w[:, 2] = 1.0
# Step lenght
s = _vec_vec_mul(_vec_mul(R, (x0 - x_point)), w) / target_direc[:, 2]
s = np.column_stack((s, s, s)) # Proper shape
# Intersection point with target dut. Basically Eq(6,7) in http://cds.cern.ch/record/687146/files/note99_041.pdf.
target_point = _vec_mul(R, (x_point + s * direc - x0))
return np.column_stack((target_point[:, 0], target_point[:, 1], target_direc[:, 0] / target_direc[:, 2], target_direc[:, 1] / target_direc[:, 2]))
@njit(cache=True)
def _calculate_scatter_gain_matrix(reference_state):
""" Reference: <NAME> Ho (NIM A329 (1993) 493-500)
"""
p3 = reference_state[:, 2]
p4 = reference_state[:, 3]
n_trk = np.zeros(shape=(reference_state.shape[0], 3), dtype=np.float64)
n_trk[:, 0] = p3
n_trk[:, 1] = p4
n_trk[:, 2] = 1.0
n_trk_mag = np.sqrt(_vec_vec_mul(n_trk, n_trk))
n_trk[:, 0] /= n_trk_mag # normalize to 1
n_trk[:, 1] /= n_trk_mag # normalize to 1
n_trk[:, 2] /= n_trk_mag # normalize to 1
u_hat = np.zeros(shape=(reference_state.shape[0], 3), dtype=np.float64)
u_hat[:, 0] = 1.0
v_trk = np.cross(n_trk, u_hat)
v_trk_mag = np.sqrt(_vec_vec_mul(v_trk, v_trk))
v_trk[:, 0] /= v_trk_mag # normalize to 1
v_trk[:, 1] /= v_trk_mag # normalize to 1
v_trk[:, 2] /= v_trk_mag # normalize to 1
u_trk = np.cross(v_trk, n_trk)
u_trk_mag = np.sqrt(_vec_vec_mul(u_trk, u_trk))
u_trk[:, 0] /= u_trk_mag # normalize to 1
u_trk[:, 1] /= u_trk_mag # normalize to 1
u_trk[:, 2] /= u_trk_mag # normalize to 1
# Direction cosines
a1 = u_trk[:, 0]
a2 = v_trk[:, 0]
a3 = n_trk[:, 0]
b1 = u_trk[:, 1]
b2 = v_trk[:, 1]
b3 = n_trk[:, 1]
g1 = u_trk[:, 2]
g2 = v_trk[:, 2]
g3 = n_trk[:, 2]
# Scatter Gain Matrix
G = np.zeros(shape=(reference_state.shape[0], 4, 2), dtype=np.float64)
G[:, 2, 0] = (a1 * g3 - a3 * g1) / (g3 * g3) # Eq (10)
G[:, 2, 1] = (a2 * g3 - a3 * g2) / (g3 * g3) # Eq (11)
G[:, 3, 0] = (b1 * g3 - b3 * g1) / (g3 * g3) # Eq (12)
G[:, 3, 1] = (b2 * g3 - b3 * g2) / (g3 * g3) # Eq (13)
# Scattering angles affect the track do not affect impact point
G[:, 0, 0] = 0.0
G[:, 0, 1] = 0.0
G[:, 1, 0] = 0.0
G[:, 1, 1] = 0.0
return G
@njit(cache=True)
def _calculate_track_jacobian(reference_state, dut_position, target_dut_position, rotation_matrix, rotation_matrix_target_dut):
''' Reference: <NAME> "Straight Line Fit for Pixel and Strip Detectors with Arbitrary Plane Orientations", CMS Note. (http://cds.cern.ch/record/687146/files/note99_041.pdf)
Calculates change of local coordinates (u, v, u', v') from one DUT (u, v, w) to next DUT (U, V, W) (wrt. to reference state).
Assumes that rotation is given from local into global coordinates.
'''
# Coordinate transformation into local system of next dut
R = _mat_mul((rotation_matrix_target_dut), _mat_trans(rotation_matrix))
x0 = _vec_mul((rotation_matrix), target_dut_position - dut_position)
x_point = np.zeros(shape=(reference_state.shape[0], 3), dtype=np.float64)
x_point[:, 0] = reference_state[:, 0]
x_point[:, 1] = reference_state[:, 1]
x_point[:, 2] = 0.0
direc = np.zeros(shape=(reference_state.shape[0], 3), dtype=np.float64)
direc[:, 0] = reference_state[:, 2]
direc[:, 1] = reference_state[:, 3]
direc[:, 2] = 1.0
target_direc = _vec_mul(R, direc)
w = np.zeros(shape=(reference_state.shape[0], 3), dtype=np.float64)
w[:, 0] = 0.0
w[:, 1] = 0.0
w[:, 2] = 1.0
s = _vec_vec_mul(_vec_mul(R, (x0 - x_point)), w) / target_direc[:, 2]
up = target_direc[:, 0] / target_direc[:, 2]
vp = target_direc[:, 1] / target_direc[:, 2]
J = | np.zeros(shape=(reference_state.shape[0], 4, 4), dtype=np.float64) | numpy.zeros |
import random
import json
import gym
from gym import spaces
import pandas as pd
import numpy as np
MAX_ACCOUNT_BALANCE = 2147483647
MAX_NUM_SHARES = 2147483647
MAX_SHARE_PRICE = 5000
MAX_OPEN_POSITIONS = 5
MAX_STEPS = 20000
COMMISSION_FEE = 0.008
INITIAL_ACCOUNT_BALANCE = 10000
class StockTradingEnv(gym.Env):
# metadata = {'render.modes': ['human']}
def __init__(self, df_list, isTraining=True):
super(StockTradingEnv, self).__init__()
self.training = isTraining
self.window_size = 6
self.df_list = []
df_list[0].dropna(inplace = True)
self.intersect_dates = df_list[0]['Date']
for df in df_list[1:]:
df.dropna(inplace = True)
self.intersect_dates = np.intersect1d(self.intersect_dates, df['Date'])
# Remove all NAN in the df
self.start_date = np.min(self.intersect_dates)
self.end_date = np.max(self.intersect_dates)
for df in df_list:
self.df_list.append(df[df['Date'].isin(self.intersect_dates)].reset_index(drop=True))
# For Multiple Markets: Adding the CASH to the action
self.market_number = len(df_list)+1
lower_bond = [[0.0]*self.market_number]*3
lower_bond = np.array(lower_bond)
lower_bond = np.reshape(lower_bond, (1,-1))
upper_bond = [[1.0]*self.market_number]*3
upper_bond = np.array(upper_bond)
upper_bond = np.reshape(upper_bond, (1,-1))
self.action_space = spaces.Box(
low=lower_bond[0], high=upper_bond[0], dtype=np.float16)
# Lower bond: [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]]
# Upper bond: [[3.0, 3.0, 3.0, 3.0], [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]]
# Prices contains the OHCL values for the last six prices
self.observation_space = spaces.Box(
low=0, high=1, shape=(self.market_number, 6, 6), dtype=np.float16)
def _next_observation(self):
'''
The _next_observation method compiles the stock data for the last five time steps,
appends the agent’s account information, and scales all the values to between 0 and 1.
'''
# self.current_step is defined in reset method,
# We assume the current_step is TODAY (BEFORE FINAL), which means we only know infomation till YESTERDAY ENDS.
obs_list = []
for i, df in enumerate(self.df_list):
frame = np.array([
df.loc[self.current_step-self.window_size: self.current_step - 1,
'Open'].values / MAX_SHARE_PRICE,
df.loc[self.current_step-self.window_size: self.current_step - 1,
'High'].values / MAX_SHARE_PRICE,
df.loc[self.current_step-self.window_size: self.current_step - 1,
'Low'].values / MAX_SHARE_PRICE,
df.loc[self.current_step-self.window_size: self.current_step - 1,
'Price'].values / MAX_SHARE_PRICE,
df.loc[self.current_step-self.window_size: self.current_step - 1,
'Vol'].values / MAX_NUM_SHARES,
], dtype=np.float64)
# Append additional data and scale each value to between 0-1
obs = np.append(frame, [[
self.cash / INITIAL_ACCOUNT_BALANCE,
self.total_net_worth / INITIAL_ACCOUNT_BALANCE,
self.net_worth[i] / INITIAL_ACCOUNT_BALANCE,
self.cost_basis[i] / INITIAL_ACCOUNT_BALANCE,
self.total_sales_value[i] / MAX_NUM_SHARES,
0.0
]], axis=0)
obs_list.append(obs)
cash_obs = np.array([
np.array([1.0]*self.window_size) / MAX_SHARE_PRICE,
np.array([1.0]*self.window_size) / MAX_SHARE_PRICE,
np.array([1.0]*self.window_size) / MAX_SHARE_PRICE,
np.array([1.0]*self.window_size) / MAX_SHARE_PRICE,
np.array([1.0]*self.window_size)
], dtype=np.float64)
cash_obs = np.stack(cash_obs)
cash_obs = np.append(cash_obs, [[
self.cash / INITIAL_ACCOUNT_BALANCE,
self.total_net_worth / INITIAL_ACCOUNT_BALANCE,
self.cash / INITIAL_ACCOUNT_BALANCE,
1 / INITIAL_ACCOUNT_BALANCE,
self.cash / MAX_NUM_SHARES,
0.0
]], axis=0)
obs_list.append(cash_obs)
obs_array = np.array(obs_list)
obs_array[pd.isna(obs_array)] = 0
obs_array[np.isinf(obs_array)] = 1
self.backup_obs = np.array(obs_list, dtype=np.float64)
return self.backup_obs
def _take_action(self, action):
# Set the current price to a random price within the time step
# dim(self.actual_price) = [n,6], dim(action) = [1, n+1]
self.actual_price = np.array([random.uniform(df.loc[self.current_step, "Low"],
df.loc[self.current_step, "High"]) for df in self.df_list], dtype=np.float64)
self.actual_price = np.append(self.actual_price, 1)
# Add CASH price = 1, now dim=n+1
self.actual_price[pd.isna(self.actual_price)] = self.prev_buyNhold_price[pd.isna(self.actual_price)]
tradable_asset = (pd.isna(self.actual_price).astype(int))*(-1)+1
action = np.reshape(action, (3,-1))
action_type = | np.floor(action[0]*2.99) | numpy.floor |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# @file gd_algorithm.py
# @brief
# @author QRS
# @blog qrsforever.github.io
# @version 1.0
# @date 2019-09-23 11:25:05
################################ jupyter-vim #######################################
# https://github.com/qrsforever/vim/blob/master/bundle/.configs/jupyter-vim_conf.vim
# %pylab --no-import-all # noqa
#####################################################################################
import numpy as np
from sklearn.utils import shuffle
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
np.random.seed(678)
#####################################################################################
# <codecell> activity function and derivative function
#####################################################################################
def sigmoid(x):
return 1 / (1 + np.exp(-1 * x))
def d_sigmoid(x):
return sigmoid(x) * (1 - sigmoid(x))
##
def tanh(x):
return np.tanh(x)
def d_tanh(x):
return 1 - np.tanh(x) ** 2
##
def relu(x):
mask = (x > 0.0) * 1.0
return x * mask
def d_relu(x):
mask = (x > 0.0) * 1.0
return mask
##
def elu(matrix):
mask = (matrix <= 0) * 1.0
less_zero = matrix * mask
safe = (matrix > 0) * 1.0
greater_zero = matrix * safe
final = 3.0 * (np.exp(less_zero) - 1) * less_zero
return greater_zero + final
def d_elu(matrix):
safe = (matrix > 0) * 1.0
mask2 = (matrix <= 0) * 1.0
temp = matrix * mask2
final = (3.0 * np.exp(temp))*mask2
return (matrix * safe) + final
#####################################################################################
# <codecell> train data
#####################################################################################
mnist = input_data.read_data_sets("/home/lidong/Datasets/ML/mnist", one_hot=False)
train = mnist.test
images, labels = train.images, train.labels
images.shape, labels.shape, labels[0:5]
## select 0,1 labels and images
zero_index, one_index = np.where(labels == 0)[0], np.where(labels == 1)[0]
zero_image, one_image = images[[zero_index]], images[[one_index]]
zero_label, one_label = np.expand_dims(labels[[zero_index]], axis=1), np.expand_dims(labels[[one_index]], axis=1)
zero_image.shape, one_image.shape, zero_label.shape, one_label.shape
## meld 0, 1 labels and images
images_org = np.vstack((zero_image, one_image))
labels_org = np.vstack((zero_label, one_label))
images_org.shape, labels_org.shape, labels_org[2:5], labels[2:5]
## shuffle method 1: sklearn.utils.shuffle
images, labels = shuffle(images_org, labels_org)
images.shape, labels.shape
## shuffle method 2: np.random.shuffle
# images_labels = np.hstack((images_org, labels_org))
# np.random.shuffle(images_labels)
# images, labels = images_labels[:, 0:-1], np.expand_dims(images_labels[:, -1], axis=1)
# images.shape, labels.shape
## train / test data
train_num, test_num = 50, 20
train_images, train_labels = images[0:train_num, :], labels[0:train_num, :]
test_images, test_labels = images[-test_num-1:-1, :], labels[-test_num-1:-1, :]
train_images.shape, test_images.shape
#####################################################################################
# <codecell> Graph
#####################################################################################
#
# *****
# * x * elu
# ***** ***** l1A
# * * tanh
# ***** ***** l2A
# ***** * *
# * * ***** ***** sigmoid
# ***** * * ***** l3A
# --------> ***** --------> --------> * *
# . . *****
# w1:784x256 . w2:256x128 . w3:128x1
# . . .
# .
# . *****
# ***** * *
# * * *****
# ***** *****
# * *
# *****
# 1x784 1x256 1x128 1x1
# input layer-1 layer-2 layer-3
#
# 损失函数:
#
# (sigmoid(w3 * tanh(w2 * elu(w1 * x))) - label)^2 * 0.2
# | | | | | ------ x d(w1)
# | | | | | l1
# | | | | +-------- d_elu(l1) d(l1)
# | | | | l1A
# | | | +-------------- l1A d(w2)
# | | | l2
# | | +------------------ d_tanh(l2) d(l2)
# | | l2A
# | +------------------------ l2A d_sigmoid(l3) (l3A - label) d(w3) |
# | l3 |
# +------------------------------ d_sigmoid(l3) (l3A - label) d(l3) |w3
# l3A |
# --------------------------------------------------- (l3A - label) d(l3A)|
# cost
#
#
# 0-9数字图像只选取了0和1, 所以简化模型, 采用全连接, 加最后一层的sigmoid,而不是softmax
# 矩阵求导是个难点, 需要基本的了解, 否则代码是很难理解, 什么时候转置, 什么时候点乘等.
#####################################################################################
# <codecell> Global param
#####################################################################################
## weight
_w1 = np.random.randn(784, 256) * 0.2
_w2 = np.random.randn(256, 128) * 0.2
_w3 = np.random.randn(128, 1) * 0.2
## hyper parameters
learn_rate = 0.0003
num_epoch = 100
cost_array = {}
#####################################################################################
# <codecell> SGD
#####################################################################################
w1, w2, w3 = _w1, _w2, _w3
cost_temp_array = []
for iter in range(num_epoch):
total_cost = 0
for index in range(len(train_images)):
image = np.expand_dims(train_images[index], axis=0)
label = np.expand_dims(train_labels[index], axis=1)
# layer1
l1 = image.dot(w1)
l1A = elu(l1)
# layer2
l2 = l1A.dot(w2)
l2A = tanh(l2)
# layer3
l3 = l2A.dot(w3)
l3A = sigmoid(l3)
# loss
total_cost += np.square(l3A - label).sum() * 0.5
# eval gradient
g31 = l3A - label
g32 = d_sigmoid(l3)
g33 = l2A
g3 = g33.T.dot(g31 * g32) # 128x1
g21 = (g31 * g32).dot(w3.T)
g22 = d_tanh(l2)
g23 = l1A
g2 = g23.T.dot(g21 * g22) # 256x128
g11 = (g21 * g22).dot(w2.T)
g12 = d_elu(l1)
g13 = image
g1 = g13.T.dot(g11 * g12) # 784x256
# update weight
w3 = w3 - learn_rate * g3
w2 = w2 - learn_rate * g2
w1 = w1 - learn_rate * g1
if iter % 10 == 0:
print("SGD current Iter: ", iter, " Total Cost: ", total_cost)
cost_temp_array.append(total_cost)
cost_array['sgd'] = cost_temp_array
#####################################################################################
# <codecell> Momentum
#####################################################################################
w1, w2, w3 = _w1, _w2, _w3
v1, v2, v3 = 0, 0, 0
alpha = 0.001
cost_temp_array = []
for iter in range(num_epoch):
total_cost = 0
for index in range(len(train_images)):
image = np.expand_dims(train_images[index], axis=0)
label = np.expand_dims(train_labels[index], axis=1)
l1 = image.dot(w1)
l1A = elu(l1)
l2 = l1A.dot(w2)
l2A = tanh(l2)
l3 = l2A.dot(w3)
l3A = sigmoid(l3)
total_cost += np.square(l3A - label).sum() * 0.5
g31 = l3A - label
g32 = d_sigmoid(l3)
g33 = l2A
g3 = g33.T.dot(g31 * g32)
g21 = (g31 * g32).dot(w3.T)
g22 = d_tanh(l2)
g23 = l1A
g2 = g23.T.dot(g21 * g22)
g11 = (g21 * g22).dot(w2.T)
g12 = d_elu(l1)
g13 = image
g1 = g13.T.dot(g11 * g12)
v3 = v3 * alpha + learn_rate * g3
v2 = v2 * alpha + learn_rate * g2
v1 = v1 * alpha + learn_rate * g1
w3 = w3 - v3
w2 = w2 - v2
w1 = w1 - v1
if iter % 10 == 0:
print("Momentum current Iter: ", iter, " Total Cost: ", total_cost)
cost_temp_array.append(total_cost)
cost_array['Momentum'] = cost_temp_array
#####################################################################################
# <codecell> NAG: Nesterov accelerated gradient
#####################################################################################
w1, w2, w3 = _w1, _w2, _w3
v1, v2, v3 = 0, 0, 0
alpha = 0.001
cost_temp_array = []
for iter in range(num_epoch):
total_cost = 0
for index in range(len(train_images)):
image = np.expand_dims(train_images[index], axis=0)
label = np.expand_dims(train_labels[index], axis=1)
l1 = image.dot(w1)
l1A = elu(l1)
l2 = l1A.dot(w2)
l2A = tanh(l2)
l3 = l2A.dot(w3)
l3A = sigmoid(l3)
total_cost += np.square(l3A - label).sum() * 0.5
# 预知的能力, 提前使用动量的信息, 然后预知下一时刻的梯度
fake_w3 = w3 - alpha * v3
fake_w2 = w2 - alpha * v2
fake_w1 = w1 - alpha * v1
l1 = image.dot(fake_w1)
l1A = elu(l1)
l2 = l1A.dot(fake_w2)
l2A = tanh(l2)
l3 = l2A.dot(fake_w3)
l3A = sigmoid(l3)
g31 = l3A - label
g32 = d_sigmoid(l3)
g33 = l2A
g3_fake = g33.T.dot(g31 * g32)
g21 = (g31 * g32).dot(fake_w3.T)
g22 = d_tanh(l2)
g23 = l1A
g2_fake = g23.T.dot(g21 * g22)
g11 = (g21 * g22).dot(fake_w2.T)
g12 = d_elu(l1)
g13 = image
g1_fake = g13.T.dot(g11 * g12)
v3 = v3 * alpha + learn_rate * g3_fake
v2 = v2 * alpha + learn_rate * g2_fake
v1 = v1 * alpha + learn_rate * g1_fake
w3 = w3 - v3
w2 = w2 - v2
w1 = w1 - v1
if iter % 10 == 0:
print("Nesterov accelerated gradient current Iter: ", iter, " Total Cost: ", total_cost)
cost_temp_array.append(total_cost)
cost_array['NAG'] = cost_temp_array
#####################################################################################
# <codecell> Adagrad
#####################################################################################
w1, w2, w3 = _w1, _w2, _w3
vlr_1, vlr_2, vlr_3 = 0, 0, 0
epsilon = 0.00000001
cost_temp_array = []
for iter in range(num_epoch):
total_cost = 0
for index in range(len(train_images)):
image = np.expand_dims(train_images[index], axis=0)
label = np.expand_dims(train_labels[index], axis=1)
l1 = image.dot(w1)
l1A = elu(l1)
l2 = l1A.dot(w2)
l2A = tanh(l2)
l3 = l2A.dot(w3)
l3A = sigmoid(l3)
total_cost += np.square(l3A - label).sum() * 0.5
g31 = l3A - label
g32 = d_sigmoid(l3)
g33 = l2A
g3 = g33.T.dot(g31 * g32)
g21 = (g31 * g32).dot(w3.T)
g22 = d_tanh(l2)
g23 = l1A
g2 = g23.T.dot(g21 * g22)
g11 = (g21 * g22).dot(w2.T)
g12 = d_elu(l1)
g13 = image
g1 = g13.T.dot(g11 * g12)
# 累加梯度平方, 自适应
vlr_3 = vlr_3 + g3 ** 2 # 128x1
vlr_2 = vlr_2 + g2 ** 2 # 256x128
vlr_1 = vlr_1 + g1 ** 2 # 784x256
w3 = w3 - (learn_rate / np.sqrt(vlr_3 + epsilon)) * g3
w2 = w2 - (learn_rate / np.sqrt(vlr_2 + epsilon)) * g2
w1 = w1 - (learn_rate / np.sqrt(vlr_1 + epsilon)) * g1
if iter % 10 == 0:
print("Adagrad current Iter: ", iter, " Total Cost: ", total_cost)
cost_temp_array.append(total_cost)
cost_array['Adagrad'] = cost_temp_array
#####################################################################################
# <codecell> Adadelta
#####################################################################################
w1, w2, w3 = _w1, _w2, _w3
epsilon, gamma = 0.000001, 0.001
vlr_1, vlr_2, vlr_3 = 0, 0, 0
wlr_1, wlr_2, wlr_3 = 0, 0, 0
cost_temp_array = []
for iter in range(num_epoch):
total_cost = 0
for index in range(len(train_images)):
image = np.expand_dims(train_images[index], axis=0)
label = np.expand_dims(train_labels[index], axis=1)
l1 = image.dot(w1)
l1A = elu(l1)
l2 = l1A.dot(w2)
l2A = tanh(l2)
l3 = l2A.dot(w3)
l3A = sigmoid(l3)
total_cost += np.square(l3A - label).sum() * 0.5
g31 = l3A - label
g32 = d_sigmoid(l3)
g33 = l2A
g3 = g33.T.dot(g31 * g32)
g21 = (g31 * g32).dot(w3.T)
g22 = d_tanh(l2)
g23 = l1A
g2 = g23.T.dot(g21 * g22)
g11 = (g21 * g22).dot(w2.T)
g12 = d_elu(l1)
g13 = image
g1 = g13.T.dot(g11 * g12)
# 梯度平方和衰减平均
vlr_3 = gamma * vlr_3 + (1-gamma) * g3 ** 2
vlr_2 = gamma * vlr_2 + (1-gamma) * g2 ** 2
vlr_1 = gamma * vlr_1 + (1-gamma) * g1 ** 2
delta_3 = - (np.sqrt(wlr_3 + epsilon) / np.sqrt(vlr_3 + epsilon)) * g3
delta_2 = - (np.sqrt(wlr_2 + epsilon) / np.sqrt(vlr_2 + epsilon)) * g2
delta_1 = - (np.sqrt(wlr_1 + epsilon) / np.sqrt(vlr_1 + epsilon)) * g1
# Delta权重平方和衰减平均
wlr_3 = gamma * wlr_3 + (1-gamma) * delta_3 ** 2
wlr_2 = gamma * wlr_2 + (1-gamma) * delta_2 ** 2
wlr_1 = gamma * wlr_1 + (1-gamma) * delta_1 ** 2
w3 = w3 + delta_3
w2 = w2 + delta_2
w1 = w1 + delta_1
if iter % 10 == 0:
print("Adadelta current Iter: ", iter, " Total Cost: ", total_cost)
cost_temp_array.append(total_cost)
cost_array['Adadelta'] = cost_temp_array
#####################################################################################
# <codecell> RMSprop: 是Adadelta一个特殊情况
#####################################################################################
w1, w2, w3 = _w1, _w2, _w3
epsilon, gamma = 0.00000001, 0.9
vlr_1, vlr_2, vlr_3 = 0, 0, 0
cost_temp_array = []
for iter in range(num_epoch):
total_cost = 0
for index in range(len(train_images)):
image = np.expand_dims(train_images[index], axis=0)
label = np.expand_dims(train_labels[index], axis=1)
l1 = image.dot(w1)
l1A = elu(l1)
l2 = l1A.dot(w2)
l2A = tanh(l2)
l3 = l2A.dot(w3)
l3A = sigmoid(l3)
total_cost += np.square(l3A - label).sum() * 0.5
g31 = l3A - label
g32 = d_sigmoid(l3)
g33 = l2A
g3 = g33.T.dot(g31 * g32)
g21 = (g31 * g32).dot(w3.T)
g22 = d_tanh(l2)
g23 = l1A
g2 = g23.T.dot(g21 * g22)
g11 = (g21 * g22).dot(w2.T)
g12 = d_elu(l1)
g13 = image
g1 = g13.T.dot(g11 * g12)
vlr_3 = gamma * vlr_3 + (1 - gamma) * g3 ** 2
vlr_2 = gamma * vlr_2 + (1 - gamma) * g2 ** 2
vlr_1 = gamma * vlr_1 + (1 - gamma) * g1 ** 2
w3 = w3 - (learn_rate/np.sqrt(vlr_3 + epsilon)) * g3
w2 = w2 - (learn_rate/np.sqrt(vlr_2 + epsilon)) * g2
w1 = w1 - (learn_rate/np.sqrt(vlr_1 + epsilon)) * g1
if iter % 10 == 0:
print("RMSprop current Iter: ", iter, " Total Cost: ", total_cost)
cost_temp_array.append(total_cost)
cost_array['RMSprop'] = cost_temp_array
#####################################################################################
# <codecell> Adam (自适应矩估计, 一阶均值, 二阶方差(非中心)
#####################################################################################
w1, w2, w3 = _w1, _w2, _w3
epsilon, beta_1, beta_2 = 0.00000001, 0.9, 0.999
mlr_1, mlr_2, mlr_3 = 0, 0, 0
vlr_1, vlr_2, vlr_3 = 0, 0, 0
cost_temp_array = []
for iter in range(num_epoch):
total_cost = 0
for index in range(len(train_images)):
image = np.expand_dims(train_images[index], axis=0)
label = np.expand_dims(train_labels[index], axis=1)
l1 = image.dot(w1)
l1A = elu(l1)
l2 = l1A.dot(w2)
l2A = tanh(l2)
l3 = l2A.dot(w3)
l3A = sigmoid(l3)
total_cost += np.square(l3A - label).sum() * 0.5
g31 = l3A - label
g32 = d_sigmoid(l3)
g33 = l2A
g3 = g33.T.dot(g31 * g32)
g21 = (g31 * g32).dot(w3.T)
g22 = d_tanh(l2)
g23 = l1A
g2 = g23.T.dot(g21 * g22)
g11 = (g21 * g22).dot(w2.T)
g12 = d_elu(l1)
g13 = image
g1 = g13.T.dot(g11 * g12)
# 一阶mean: 梯度衰减均值
mlr_3 = beta_1 * mlr_3 + (1 - beta_1) * g3
mlr_2 = beta_1 * mlr_2 + (1 - beta_1) * g2
mlr_1 = beta_1 * mlr_1 + (1 - beta_1) * g1
# 二阶variance: 梯度指数衰减(梯度平方衰减均值)
vlr_3 = beta_2 * vlr_3 + (1 - beta_2) * g3 ** 2
vlr_2 = beta_2 * vlr_2 + (1 - beta_2) * g2 ** 2
vlr_1 = beta_2 * vlr_1 + (1 - beta_2) * g1 ** 2
# 矫正
mlr_3_hat = mlr_3 / (1 - beta_1)
mlr_2_hat = mlr_2 / (1 - beta_1)
mlr_1_hat = mlr_1 / (1 - beta_1)
vlr_3_hat = vlr_3 / (1 - beta_2)
vlr_2_hat = vlr_2 / (1 - beta_2)
vlr_1_hat = vlr_1 / (1 - beta_2)
w3 = w3 - (learn_rate / (np.sqrt(vlr_3_hat) + epsilon)) * mlr_3_hat
w2 = w2 - (learn_rate / (np.sqrt(vlr_2_hat) + epsilon)) * mlr_2_hat
w1 = w1 - (learn_rate / (np.sqrt(vlr_1_hat) + epsilon)) * mlr_1_hat
if iter % 10 == 0:
print("Adam current Iter: ", iter, " Total Cost: ", total_cost)
cost_temp_array.append(total_cost)
cost_array['Adam'] = cost_temp_array
#####################################################################################
# <codecell> Nadam (incorporate NAG into Adam)
#####################################################################################
w1, w2, w3 = _w1, _w2, _w3
epsilon, beta_1, beta_2 = 0.00000001, 0.9, 0.999
mlr_1, mlr_2, mlr_3 = 0, 0, 0
vlr_1, vlr_2, vlr_3 = 0, 0, 0
cost_temp_array = []
for iter in range(num_epoch):
total_cost = 0
for index in range(len(train_images)):
image = np.expand_dims(train_images[index], axis=0)
label = np.expand_dims(train_labels[index], axis=1)
l1 = image.dot(w1)
l1A = elu(l1)
l2 = l1A.dot(w2)
l2A = tanh(l2)
l3 = l2A.dot(w3)
l3A = sigmoid(l3)
total_cost += np.square(l3A - label).sum() * 0.5
g31 = l3A - label
g32 = d_sigmoid(l3)
g33 = l2A
g3 = g33.T.dot(g31 * g32)
g21 = (g31 * g32).dot(w3.T)
g22 = d_tanh(l2)
g23 = l1A
g2 = g23.T.dot(g21 * g22)
g11 = (g21 * g22).dot(w2.T)
g12 = d_elu(l1)
g13 = image
g1 = g13.T.dot(g11 * g12)
mlr_3 = beta_1 * mlr_3 + (1 - beta_1) * g3
mlr_2 = beta_1 * mlr_2 + (1 - beta_1) * g2
mlr_1 = beta_1 * mlr_1 + (1 - beta_1) * g1
vlr_3 = beta_2 * vlr_3 + (1 - beta_2) * g3 ** 2
vlr_2 = beta_2 * vlr_2 + (1 - beta_2) * g2 ** 2
vlr_1 = beta_2 * vlr_1 + (1 - beta_2) * g1 ** 2
mlr_3_hat = mlr_3 / (1 - beta_1)
mlr_2_hat = mlr_2 / (1 - beta_1)
mlr_1_hat = mlr_1 / (1 - beta_1)
vlr_3_hat = vlr_3 / (1 - beta_2)
vlr_2_hat = vlr_2 / (1 - beta_2)
vlr_1_hat = vlr_1 / (1 - beta_2)
w3 = w3 - (learn_rate/(np.sqrt(vlr_3_hat) + epsilon)) * (beta_1 * mlr_3_hat + (((1 - beta_1) * g3) / (1 - beta_1)))
w2 = w2 - (learn_rate/( | np.sqrt(vlr_2_hat) | numpy.sqrt |
from lasagne.layers import InputLayer
from lasagne.layers import DenseLayer
from lasagne.layers import ConcatLayer
from lasagne.layers import NonlinearityLayer
from lasagne.layers import GlobalPoolLayer
from lasagne.layers import Conv2DLayer as ConvLayer
from lasagne.layers import MaxPool2DLayer as PoolLayerDNN
from lasagne.layers import MaxPool2DLayer as PoolLayer
from lasagne.layers import LocalResponseNormalization2DLayer as LRNLayer
from lasagne.nonlinearities import softmax, linear
def build_inception_module(name, input_layer, nfilters):
# nfilters: (pool_proj, 1x1, 3x3_reduce, 3x3, 5x5_reduce, 5x5)
net = {}
net['pool'] = PoolLayerDNN(input_layer, pool_size=3, stride=1, pad=1)
net['pool_proj'] = ConvLayer(net['pool'], nfilters[0], 1)
net['1x1'] = ConvLayer(input_layer, nfilters[1], 1)
net['3x3_reduce'] = ConvLayer(input_layer, nfilters[2], 1)
net['3x3'] = ConvLayer(net['3x3_reduce'], nfilters[3], 3, pad=1)
net['5x5_reduce'] = ConvLayer(input_layer, nfilters[4], 1)
net['5x5'] = ConvLayer(net['5x5_reduce'], nfilters[5], 5, pad=2)
net['output'] = ConcatLayer([
net['1x1'],
net['3x3'],
net['5x5'],
net['pool_proj'],
])
return {'{}/{}'.format(name, k): v for k, v in net.items()}
def build_model():
net = {}
net['input'] = InputLayer((None, 3, None, None))
net['conv1/7x7_s2'] = ConvLayer(net['input'], 64, 7, stride=2, pad=3)
net['pool1/3x3_s2'] = PoolLayer(net['conv1/7x7_s2'],
pool_size=3,
stride=2,
ignore_border=False)
net['pool1/norm1'] = LRNLayer(net['pool1/3x3_s2'], alpha=0.00002, k=1)
net['conv2/3x3_reduce'] = ConvLayer(net['pool1/norm1'], 64, 1)
net['conv2/3x3'] = ConvLayer(net['conv2/3x3_reduce'], 192, 3, pad=1)
net['conv2/norm2'] = LRNLayer(net['conv2/3x3'], alpha=0.00002, k=1)
net['pool2/3x3_s2'] = PoolLayer(net['conv2/norm2'], pool_size=3, stride=2)
net.update(build_inception_module('inception_3a',
net['pool2/3x3_s2'],
[32, 64, 96, 128, 16, 32]))
net.update(build_inception_module('inception_3b',
net['inception_3a/output'],
[64, 128, 128, 192, 32, 96]))
net['pool3/3x3_s2'] = PoolLayer(net['inception_3b/output'],
pool_size=3, stride=2)
net.update(build_inception_module('inception_4a',
net['pool3/3x3_s2'],
[64, 192, 96, 208, 16, 48]))
net.update(build_inception_module('inception_4b',
net['inception_4a/output'],
[64, 160, 112, 224, 24, 64]))
net.update(build_inception_module('inception_4c',
net['inception_4b/output'],
[64, 128, 128, 256, 24, 64]))
net.update(build_inception_module('inception_4d',
net['inception_4c/output'],
[64, 112, 144, 288, 32, 64]))
net.update(build_inception_module('inception_4e',
net['inception_4d/output'],
[128, 256, 160, 320, 32, 128]))
net['pool4/3x3_s2'] = PoolLayer(net['inception_4e/output'],
pool_size=3, stride=2)
net.update(build_inception_module('inception_5a',
net['pool4/3x3_s2'],
[128, 256, 160, 320, 32, 128]))
net.update(build_inception_module('inception_5b',
net['inception_5a/output'],
[128, 384, 192, 384, 48, 128]))
net['pool5/7x7_s1'] = GlobalPoolLayer(net['inception_5b/output'])
net['loss3/classifier'] = DenseLayer(net['pool5/7x7_s1'],
num_units=1000,
nonlinearity=linear)
net['prob'] = NonlinearityLayer(net['loss3/classifier'],
nonlinearity=softmax)
return net
import skimage.transform
import numpy as np
MEAN_VALUES = | np.array([104, 117, 123]) | numpy.array |
from scipy import optimize
import numpy as np
from matplotlib import pyplot as plt
import scipy.integrate as integrate
def curve(x, t):
period = 2 * np.pi / x[1]
if isinstance(t, float):
t = np.array((t,))
y = | np.ndarray((t.shape[0],)) | numpy.ndarray |
import pyopencl as cl
import numpy
from pyPaSWAS.Core.SmithWaterman import SmithWaterman
from pyPaSWAS.Core import STOP_DIRECTION, LEFT_DIRECTION, NO_DIRECTION, UPPER_DIRECTION, UPPER_LEFT_DIRECTION
from pyPaSWAS.Core.PaSWAS import CPUcode
from pyPaSWAS.Core.PaSWAS import GPUcode
from pyPaSWAS.Core.StartingPoint import StartingPoint
class SmithWatermanOcl(SmithWaterman):
'''
classdocs
'''
def __init__(self, logger, score, settings):
'''
Constructor
'''
SmithWaterman.__init__(self, logger, score, settings)
#self.oclcode = OCLcode(self.logger)
# platforms: A single ICD on a computer
self.platform = None
# device: device which will perform computation (for example a CPU or GPU)
self.device = None
# context: manages a command-queue, memory, program and kernel objects
self.ctx = None
# queue: stores instructions for the device
self.queue = None
# program: the compiled kernel program
self.program = None
# device_type: type of device to run computations on
self.device_type = 0
self._set_device_type(self.settings.device_type)
self._set_platform(self.settings.platform_name)
self._initialize_device(int(self.settings.device_number))
self.always_reallocate_memory = False
def _init_oclcode(self):
# Compiling part of the OpenCL code in advance
self.oclcode.set_shared_xy_code(self.shared_x, self.shared_y)
self.oclcode.set_direction_code(NO_DIRECTION, UPPER_LEFT_DIRECTION,
UPPER_DIRECTION, LEFT_DIRECTION,
STOP_DIRECTION)
def _execute_calculate_score_kernel(self, number_of_blocks, idx, idy):
''' Executes a single run of the calculate score kernel'''
pass
def _execute_traceback_kernel(self, number_of_blocks, idx, idy):
''' Executes a single run of the traceback kernel'''
pass
def _get_direction_byte_array(self):
'''
Get the resulting directions
@return gives the resulting direction array as byte array
'''
pass
def __del__(self):
'''Destructor. Removes the current running context'''
del self.program
del self.queue
del self.ctx
del self.device
del self.platform
self.device_type = 0
def _set_device_type(self, device_type):
'''Sets the device type'''
if device_type.upper() == 'ACCELERATOR':
self.device_type = cl.device_type.ACCELERATOR
elif device_type.upper() == 'GPU':
self.device_type = cl.device_type.GPU
elif device_type.upper() == 'CPU':
self.device_type = cl.device_type.CPU
else:
self.logger.warning("Warning: device type is set to default: GPU")
self.device_type = cl.device_type.GPU
def _set_platform(self, platform_name):
found_platform = False
for platform in cl.get_platforms():
for device in platform.get_devices():
if (platform_name.upper() in str(platform).upper()
and device.get_info(cl.device_info.TYPE) == self.device_type):
self.platform = platform
found_platform = True
break
if(found_platform):
self.logger.debug("Found platform {}".format(str(self.platform)))
break
if not (self.platform):
for platform in cl.get_platforms():
for device in platform.get_devices():
if (device.get_info(cl.device_info.TYPE) == self.device_type):
self.platform = platform
found_platform = True
break
if(found_platform):
self.logger.debug('Found platform {}, however this is not the platform indicated by the user'.format(str(self.platform)))
break
if not (self.platform):
raise RuntimeError('Failed to find platform')
def _initialize_device(self, device_number):
'''
Initalizes a device and verifies its computational abilities.
@param device_number: int value representing the device to use
'''
self.logger.debug('Initializing device {0}'.format(device_number))
self.device = self.platform.get_devices(device_type=self.device_type)[device_number]
if int(self.settings.number_of_compute_units) > 0:
self.device = self.device.create_sub_devices([cl.device_partition_property.EQUALLY,int(self.settings.number_of_compute_units)])[int(self.settings.sub_device)]
self.ctx = cl.Context(devices=[self.device])
self.queue = cl.CommandQueue(self.ctx)
#self.logger.debug("context:{}".format(self.ctx) )
def _device_global_mem_size(self):
#return clCharacterize.usable_local_mem_size(self.device)
# GLOBAL_MEM_SIZE
return self.device.get_info(cl.device_info.MAX_MEM_ALLOC_SIZE)
def _clear_memory(self):
'''Clears the claimed memory on the device.'''
if not self.always_reallocate_memory:
return
self.logger.debug('Clearing device memory.')
self._clear_normal_memory()
self._clear_zero_copy_memory()
try:
self.queue.finish()
except:
pass
def _clear_normal_memory(self):
self.logger.debug('Clearing normal device memory.')
if (self.d_sequences is not None):
try:
self.d_sequences.finish()
except:
pass
self.d_sequences.release()
if (self.d_targets is not None):
try:
self.d_targets.finish()
except:
pass
self.d_targets.release()
if (self.d_matrix is not None):
try:
self.d_matrix.finish()
except:
pass
self.d_matrix.release()
if (self.gap_extension and self.d_matrix_i is not None):
try:
self.d_matrix_i.finish()
except:
pass
self.d_matrix_i.release()
if (self.gap_extension and self.d_matrix_j is not None):
try:
self.d_matrix_j.finish()
except:
pass
self.d_matrix_j.release()
if (self.d_global_maxima is not None):
try:
self.d_global_maxima.finish()
except:
pass
self.d_global_maxima.release()
if (self.d_index_increment is not None):
try:
self.d_index_increment.finish()
except:
pass
self.d_index_increment.release()
def _clear_zero_copy_memory(self):
self.logger.debug('Clearing zero-copy device memory.')
if (self.d_starting_points_zero_copy is not None):
try:
self.d_starting_points_zero_copy.finish()
except:
pass
self.d_starting_points_zero_copy.release()
if (self.d_max_possible_score_zero_copy is not None):
try:
self.d_max_possible_score_zero_copy.finish()
except:
pass
self.d_max_possible_score_zero_copy.release()
def _need_reallocation(self, buffer, size):
if self.always_reallocate_memory:
return True
if buffer is None:
return True
if buffer.get_info(cl.mem_info.SIZE) < size:
try:
buffer.finish()
except:
pass
buffer.release()
return True
return False
def _init_normal_memory(self):
'''
#_init_memory will initialize all required memory on the device based on the current settings.
Make sure to initialize these values!
'''
# Sequence device memory
self.logger.debug('Initializing normal device memory.')
memory = self.length_of_x_sequences * self.number_of_sequences
if self._need_reallocation(self.d_sequences, memory):
self.d_sequences = cl.Buffer(self.ctx, cl.mem_flags.READ_ONLY, size=memory)
mem_size = memory
# Target device memory
memory = self.length_of_y_sequences * self.number_targets
if self._need_reallocation(self.d_targets, memory):
self.d_targets = cl.Buffer(self.ctx, cl.mem_flags.READ_ONLY, size=memory)
mem_size += memory
if self._need_reallocation(self.d_index_increment, SmithWaterman.int_size):
self.d_index_increment = cl.Buffer(self.ctx, cl.mem_flags.WRITE_ONLY, size=SmithWaterman.int_size)
return mem_size
def _init_zero_copy_memory(self):
self.logger.debug('Initializing zero-copy memory.')
# Starting points host memory allocation and device copy
memory = (self.size_of_startingpoint * self.maximum_number_starting_points * self.number_of_sequences *
self.number_targets)
if self._need_reallocation(self.d_starting_points_zero_copy, memory):
self.d_starting_points_zero_copy = cl.Buffer(self.ctx, cl.mem_flags.WRITE_ONLY | cl.mem_flags.ALLOC_HOST_PTR, size=memory)
mem_size = memory
# Maximum zero copy memory allocation and device copy
memory = (self.number_of_sequences * self.number_of_targets * SmithWaterman.float_size)
#self.d_max_possible_score_zero_copy = cl.Buffer(self.ctx, cl.mem_flags.READ_ONLY | cl.mem_flags.ALLOC_HOST_PTR, size=memory)
mem_size += memory
return mem_size
def _init_memory(self):
mem_size = self._init_normal_memory()
mem_size += self._init_zero_copy_memory()
self.logger.debug('Allocated: {}MB of memory'.format(str(mem_size / 1024.0 / 1024.00)))
def _init_zero_copy(self):
''' Initializes the index used for the 'zero copy' of the found starting points '''
index = numpy.zeros((1), dtype=numpy.int32)
cl.enqueue_copy(self.queue, self.d_index_increment, index)
def _compile_code(self):
"""Compile the device code with current settings"""
self.logger.debug('Compiling OpenCL code.')
code = self.oclcode.get_code(self.score, self.number_of_sequences, self.number_targets, self.length_of_x_sequences, self.length_of_y_sequences)
#self.logger.debug('Code: \n{}'.format(code))
self.program = cl.Program(self.ctx, code).build()
self.calculateScoreAffineGap_kernel = self.program.calculateScoreAffineGap
self.calculateScore_kernel = self.program.calculateScore
self.tracebackAffineGap_kernel = self.program.tracebackAffineGap
self.traceback_kernel = self.program.traceback
def copy_sequences(self, h_sequences, h_targets):
'''
Copy the sequences and targets to the device
@param h_sequences: the sequences to be copied. Should be a single string containing all sequences
@param h_targets: the targets to be copied. Should be a single string containing all sequences
'''
cl.enqueue_copy(self.queue, self.d_sequences, h_sequences, is_blocking=False)
cl.enqueue_copy(self.queue, self.d_targets, h_targets, is_blocking=False)
def _get_number_of_starting_points(self):
''' Returns the number of startingpoints. '''
self.logger.debug('Getting number of starting points.')
self.index = numpy.zeros((1), dtype=numpy.int32)
cl.enqueue_copy(self.queue, self.index, self.d_index_increment)
return self.index[0]
def _fill_max_possible_score(self, target_index, targets, i, index, records_seqs):
for tI in range(self.number_of_targets):
if tI+target_index < len(targets) and i+index < len(records_seqs):
self.set_minimum_score(tI*self.max_sequences + i, float(self.score.highest_score) * (len(records_seqs[i+index])
if len(records_seqs[i+index]) < len(targets[tI+target_index])
else len(targets[tI+target_index])) * float(self.filter_factor))
def _copy_min_score(self):
if self._need_reallocation(self.d_max_possible_score_zero_copy, self.min_score_np.nbytes):
self.d_max_possible_score_zero_copy = cl.Buffer(self.ctx, cl.mem_flags.READ_ONLY | cl.mem_flags.ALLOC_HOST_PTR, size=self.min_score_np.nbytes)
cl.enqueue_copy(self.queue, self.d_max_possible_score_zero_copy, self.min_score_np, is_blocking=False)
def _set_max_possible_score(self, target_index, targets, i, index, records_seqs):
'''fills the max_possible_score datastructure on the host'''
# self.h_max_possible_score_zero_copy = cl.enqueue_map_buffer(self.queue, self.d_max_possible_score_zero_copy,
# cl.map_flags.WRITE, 0,
# self.number_of_sequences * self.number_targets ,
# dtype=numpy.float32)[0]
self._fill_max_possible_score(target_index, targets, i, index, records_seqs)
#Unmap memory object
# del self.h_max_possible_score_zero_copy
def _get_starting_point_byte_array(self, number_of_starting_points):
'''
Get the resulting starting points
@return gives the resulting starting point array as byte array
'''
if self.h_starting_points_zero_copy is not None and len(self.h_starting_points_zero_copy) > 0 :
self.h_starting_points_zero_copy.base.release()
self.h_starting_points_zero_copy = cl.enqueue_map_buffer(self.queue, self.d_starting_points_zero_copy, cl.map_flags.READ, 0,
(self.size_of_startingpoint *
number_of_starting_points, 1), dtype=numpy.byte)[0]
return self.h_starting_points_zero_copy
class SmithWatermanCPU(SmithWatermanOcl):
'''
classdocs
'''
def __init__(self, logger, score, settings):
'''
Constructor
'''
SmithWatermanOcl.__init__(self, logger, score, settings)
self.oclcode = CPUcode(self.logger)
self.workload_x = 4
self.workload_y = 4
self.workgroup_x = self.shared_x // self.workload_x
self.workgroup_y = self.shared_y // self.workload_y
self.d_semaphores = None
self._init_oclcode()
def _init_normal_memory(self):
mem_size = SmithWatermanOcl._init_normal_memory(self)
# Input matrix device memory
memory = (SmithWaterman.float_size * (self.length_of_x_sequences + 1) * self.number_of_sequences *
(self.length_of_y_sequences + 1) * self.number_targets)
if self._need_reallocation(self.d_matrix, memory):
self.d_matrix = cl.Buffer(self.ctx, cl.mem_flags.READ_WRITE, size=memory)
mem_size += memory
pattern = numpy.zeros((1),dtype=numpy.float32)
cl.enqueue_fill_buffer(self.queue, self.d_matrix, pattern, 0, size = memory)
if self.gap_extension:
if self._need_reallocation(self.d_matrix_i, memory):
self.d_matrix_i = cl.Buffer(self.ctx, cl.mem_flags.READ_WRITE, size=memory)
mem_size += memory
if self._need_reallocation(self.d_matrix_j, memory):
self.d_matrix_j = cl.Buffer(self.ctx, cl.mem_flags.READ_WRITE, size=memory)
mem_size += memory
pattern = numpy.array([-1E10],dtype=numpy.float32)
cl.enqueue_fill_buffer(self.queue, self.d_matrix_i, pattern, 0, size = memory)
cl.enqueue_fill_buffer(self.queue, self.d_matrix_j, pattern, 0, size = memory)
# Maximum global device memory
memory = (SmithWaterman.float_size * self.x_div_shared_x * self.number_of_sequences *
self.y_div_shared_y * self.number_targets * self.workload_x * self.workload_y)
if self._need_reallocation(self.d_global_maxima, memory):
self.d_global_maxima = cl.Buffer(self.ctx, cl.mem_flags.READ_WRITE, size=memory)
mem_size += memory
memory = (SmithWaterman.int_size *
self.length_of_x_sequences *
self.number_of_sequences *
self.length_of_y_sequences *
self.number_targets)
if self._need_reallocation(self.d_semaphores, memory):
self.d_semaphores = cl.Buffer(self.ctx, cl.mem_flags.READ_WRITE, size=memory)
pattern = numpy.zeros((1),dtype=numpy.int32)
cl.enqueue_fill_buffer(self.queue, self.d_semaphores, pattern, 0, size=memory)
mem_size += memory
return mem_size
def _init_zero_copy_memory(self):
mem_size = SmithWatermanOcl._init_zero_copy_memory(self)
# Global directions host memory allocation and device copy
memory = (self.length_of_x_sequences * self.number_of_sequences * self.length_of_y_sequences * self.number_targets)
if self._need_reallocation(self.d_global_direction_zero_copy, memory):
self.d_global_direction_zero_copy = cl.Buffer(self.ctx, cl.mem_flags.READ_WRITE | cl.mem_flags.ALLOC_HOST_PTR, size=memory)
mem_size += memory
return mem_size
def _clear_normal_memory(self):
SmithWatermanOcl._clear_normal_memory(self)
if (self.d_semaphores is not None):
try:
self.d_semaphores.finish()
except:
pass
self.d_semaphores.release()
def _clear_zero_copy_memory(self):
SmithWatermanOcl._clear_zero_copy_memory(self)
if (self.d_global_direction_zero_copy is not None):
try:
self.d_global_direction_zero_copy.finish()
except:
pass
self.d_global_direction_zero_copy.release()
def _get_direction_byte_array(self):
'''
Get the resulting directions
@return gives the resulting direction array as byte array
'''
h_global_direction_zero_copy = cl.enqueue_map_buffer(self.queue, self.d_global_direction_zero_copy, cl.map_flags.READ, 0,
(self.number_of_sequences,
self.number_targets,
self.length_of_x_sequences,
self.length_of_y_sequences), dtype=numpy.byte)[0]
return h_global_direction_zero_copy
def _get_direction(self, direction_array, sequence, target, block_x, block_y, value_x, value_y):
return direction_array[sequence][target][block_x*self.shared_x + value_x][block_y*self.shared_y + value_y]
def _set_direction(self, direction, direction_array, sequence, target, block_x, block_y, value_x, value_y):
direction_array[sequence][target][block_x*self.shared_x + value_x][block_y*self.shared_y + value_y] = direction
def _execute_calculate_score_kernel(self, number_of_blocks, idx, idy):
''' Executes a single run of the calculate score kernel'''
dim_block = (self.workgroup_x, self.workgroup_y)
dim_grid_sw = (self.number_of_sequences * self.workgroup_x, self.number_targets * number_of_blocks * self.workgroup_y)
if self.gap_extension:
self.calculateScoreAffineGap_kernel(self.queue,
dim_grid_sw,
dim_block,
self.d_matrix,
self.d_matrix_i,
self.d_matrix_j,
numpy.int32(idx),
numpy.int32(idy),
numpy.int32(number_of_blocks),
self.d_sequences,
self.d_targets,
self.d_global_maxima,
self.d_global_direction_zero_copy)
else:
self.calculateScore_kernel(self.queue,
dim_grid_sw,
dim_block,
self.d_matrix,
numpy.int32(idx),
numpy.int32(idy),
numpy.int32(number_of_blocks),
self.d_sequences,
self.d_targets,
self.d_global_maxima,
self.d_global_direction_zero_copy)
def _execute_traceback_kernel(self, number_of_blocks, idx, idy):
''' Executes a single run of the traceback kernel'''
dim_block = (self.workgroup_x, self.workgroup_y)
dim_grid_sw = (self.number_of_sequences * self.workgroup_x, self.number_targets * number_of_blocks * self.workgroup_y)
if self.gap_extension:
self.tracebackAffineGap_kernel(self.queue, dim_grid_sw, dim_block,
self.d_matrix,
self.d_matrix_i,
self.d_matrix_j,
numpy.int32(idx),
numpy.int32(idy),
numpy.int32(number_of_blocks),
self.d_global_maxima,
self.d_global_direction_zero_copy,
self.d_index_increment,
self.d_starting_points_zero_copy,
self.d_max_possible_score_zero_copy,
self.d_semaphores)
else:
self.traceback_kernel(self.queue, dim_grid_sw, dim_block,
self.d_matrix,
numpy.int32(idx),
numpy.int32(idy),
numpy.int32(number_of_blocks),
self.d_global_maxima,
self.d_global_direction_zero_copy,
self.d_index_increment,
self.d_starting_points_zero_copy,
self.d_max_possible_score_zero_copy,
self.d_semaphores)
class SmithWatermanGPU(SmithWatermanOcl):
'''
classdocs
'''
def __init__(self, logger, score, settings):
'''
Constructor
'''
SmithWatermanOcl.__init__(self, logger, score, settings)
self.oclcode = GPUcode(self.logger)
self.d_global_direction = None
self.d_is_traceback_required = None
self._init_oclcode()
def _init_normal_memory(self):
mem_size = SmithWatermanOcl._init_normal_memory(self)
# Input matrix device memory
memory = (SmithWaterman.float_size * self.length_of_x_sequences * self.number_of_sequences *
self.length_of_y_sequences * self.number_targets)
if self._need_reallocation(self.d_matrix, memory):
self.d_matrix = cl.Buffer(self.ctx, cl.mem_flags.READ_WRITE, size=memory)
mem_size += memory
if self.gap_extension:
if self._need_reallocation(self.d_matrix_i, memory):
self.d_matrix_i = cl.Buffer(self.ctx, cl.mem_flags.READ_WRITE, size=memory)
mem_size += memory
if self._need_reallocation(self.d_matrix_j, memory):
self.d_matrix_j = cl.Buffer(self.ctx, cl.mem_flags.READ_WRITE, size=memory)
mem_size += memory
# Maximum global device memory
memory = (SmithWaterman.float_size * self.x_div_shared_x * self.number_of_sequences *
self.y_div_shared_y * self.number_targets)
if self._need_reallocation(self.d_global_maxima, memory):
self.d_global_maxima = cl.Buffer(self.ctx, cl.mem_flags.READ_WRITE, size=memory)
mem_size += memory
memory = (self.length_of_x_sequences * self.number_of_sequences * self.length_of_y_sequences * self.number_targets)
if self._need_reallocation(self.d_global_direction, memory):
self.d_global_direction = cl.Buffer(self.ctx, cl.mem_flags.READ_WRITE, size=memory)
mem_size += memory
memory = SmithWaterman.int_size
if self._need_reallocation(self.d_is_traceback_required, memory):
self.d_is_traceback_required = cl.Buffer(self.ctx, cl.mem_flags.WRITE_ONLY, size=memory)
flag = numpy.zeros((1), dtype=numpy.uint32)
cl.enqueue_fill_buffer(self.queue, self.d_is_traceback_required, flag, 0, size=memory)
return mem_size
def _clear_normal_memory(self):
SmithWatermanOcl._clear_normal_memory(self)
if (self.d_global_direction is not None):
try:
self.d_global_direction.finish()
except:
pass
self.d_global_direction.release()
if (self.d_is_traceback_required is not None):
try:
self.d_is_traceback_required.finish()
except:
pass
self.d_is_traceback_required.release()
def _compile_code(self):
"""Compile the device code with current settings"""
if self.program is None:
self.logger.debug('Compiling OpenCL code.')
code = self.oclcode.get_code(self.score, self.number_of_sequences, self.number_targets, self.length_of_x_sequences, self.length_of_y_sequences)
self.program = cl.Program(self.ctx, code).build(options=['-cl-fast-relaxed-math'])
self.calculateScoreAffineGap_kernel = self.program.calculateScoreAffineGap
self.calculateScore_kernel = self.program.calculateScore
self.tracebackAffineGap_kernel = self.program.tracebackAffineGap
self.traceback_kernel = self.program.traceback
def _get_direction_byte_array(self):
'''
Get the resulting directions
@return gives the resulting direction array as byte array
'''
h_global_direction = cl.enqueue_map_buffer(self.queue, self.d_global_direction, cl.map_flags.READ, 0,
(self.number_of_sequences,
self.number_targets,
self.x_div_shared_x,
self.y_div_shared_y,
self.shared_x,
self.shared_y), dtype=numpy.byte)[0]
return h_global_direction
def _execute_calculate_score_kernel(self, number_of_blocks, idx, idy):
''' Executes a single run of the calculate score kernel'''
dim_block = (self.shared_x, self.shared_y, 1)
dim_grid_sw = (number_of_blocks * self.shared_x, self.number_of_sequences * self.shared_y, self.number_targets)
if self.gap_extension:
self.calculateScoreAffineGap_kernel(self.queue, dim_grid_sw, dim_block,
numpy.uint32(self.number_of_sequences),
numpy.uint32(self.number_targets),
numpy.uint32(self.x_div_shared_x),
numpy.uint32(self.y_div_shared_y),
self.d_matrix,
self.d_matrix_i,
self.d_matrix_j,
numpy.uint32(idx),
numpy.uint32(idy),
self.d_sequences,
self.d_targets,
self.d_global_maxima,
self.d_global_direction,
self.d_max_possible_score_zero_copy,
self.d_is_traceback_required)
else:
self.calculateScore_kernel(self.queue, dim_grid_sw, dim_block,
numpy.uint32(self.number_of_sequences),
numpy.uint32(self.number_targets),
numpy.uint32(self.x_div_shared_x),
numpy.uint32(self.y_div_shared_y),
self.d_matrix,
numpy.uint32(idx),
numpy.uint32(idy),
self.d_sequences,
self.d_targets,
self.d_global_maxima,
self.d_global_direction,
self.d_max_possible_score_zero_copy,
self.d_is_traceback_required)
def _is_traceback_required(self):
'''Returns False if it is known after calculating scores that there are no possible
starting points, hence no need to run traceback.
'''
flag = numpy.zeros((1), dtype=numpy.uint32)
cl.enqueue_copy(self.queue, flag, self.d_is_traceback_required)
if flag[0]:
# Clear the flag
flag[0] = 0
cl.enqueue_fill_buffer(self.queue, self.d_is_traceback_required, flag, 0, size=SmithWaterman.int_size)
return True
else:
return False
def _execute_traceback_kernel(self, number_of_blocks, idx, idy):
''' Executes a single run of the traceback kernel'''
dim_block = (self.shared_x, self.shared_y, 1)
dim_grid_sw = (number_of_blocks * self.shared_x, self.number_of_sequences * self.shared_y, self.number_targets)
if self.gap_extension:
self.tracebackAffineGap_kernel(self.queue, dim_grid_sw, dim_block,
numpy.uint32(self.number_of_sequences),
numpy.uint32(self.number_targets),
numpy.uint32(self.x_div_shared_x),
numpy.uint32(self.y_div_shared_y),
self.d_matrix,
self.d_matrix_i,
self.d_matrix_j,
| numpy.uint32(idx) | numpy.uint32 |
# -*- coding: utf-8 -*-
"""Mappings between the ordering of PyFR nodes, and those of external formats
"""
import numpy as np
class GmshNodeMaps(object):
"""Mappings between the node ordering of PyFR and that of Gmsh
Node mappings are contained within two dictionaries; one maps from
Gmsh node ordering to PyFR, and the other provides the inverse.
Dictionary items are keyed by a tuple of element type (string) and
number of solution points per element (integer).
Each dictionary value is a list of integers that provide mappings
via their list index. When lists in the "gmsh_to_pyfr" dictionary
are indexed using the Gmsh node number, they return the equivalent
PyFR node number. The reverse is true for the "pyfr_to_gmsh"
dictionary.
:Example: Convert Gmsh node number 4, in a 64 point hexahedra, to
the equivalent node number in PyFR:
>>> from pyfr.readers.nodemaps import GmshNodeMaps
>>> GmshNodeMaps.gmsh_to_pyfr['hex', 64][4]
>>> 48
"""
to_pyfr = {
('hex', 8): | np.array([0, 1, 3, 2, 4, 5, 7, 6]) | numpy.array |
import _pickle, numpy as np, itertools as it
from time import perf_counter
# from cppimport import import_hook
#
# # import cppimport
#
# # cppimport.set_quiet(False)
#
import rpxdock as rp
from rpxdock.bvh import bvh_test
from rpxdock.bvh import BVH, bvh
import rpxdock.homog as hm
def test_bvh_isect_cpp():
assert bvh_test.TEST_bvh_test_isect()
def test_bvh_isect_fixed():
# print()
mindist = 0.01
totbvh, totnaive = 0, 0
for i in range(10):
xyz1 = np.random.rand(1000, 3) + [0.9, 0.9, 0]
xyz2 = np.random.rand(1000, 3)
tcre = perf_counter()
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
tcre = perf_counter() - tcre
assert len(bvh1) == 1000
pos1 = hm.htrans([0.9, 0.9, 0.9])
pos2 = np.eye(4)
tbvh = perf_counter()
clash1 = bvh.bvh_isect_fixed(bvh1, bvh2, mindist)
tbvh = perf_counter() - tbvh
tn = perf_counter()
clash2 = bvh.naive_isect_fixed(bvh1, bvh2, mindist)
tn = perf_counter() - tn
assert clash1 == clash2
# print(f"{i:3} clash {clash1:1} {tn / tbvh:8.2f}, {tn:1.6f}, {tbvh:1.6f}")
totbvh += tbvh
totnaive += tn
print("total times", totbvh, totnaive / totbvh, totnaive)
def test_bvh_isect():
t = rp.Timer().start()
N1, N2 = 10, 10
N = N1 * N2
mindist = 0.04
nclash = 0
for outer in range(N1):
xyz1 = np.random.rand(1250, 3) - [0.5, 0.5, 0.5]
xyz2 = np.random.rand(1250, 3) - [0.5, 0.5, 0.5]
pos1 = hm.rand_xform(N2, cart_sd=0.8)
pos2 = hm.rand_xform(N2, cart_sd=0.8)
t.checkpoint('init')
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
t.checkpoint('BVH')
clash = list()
for inner in range(N2):
clash1 = bvh.bvh_isect(bvh1=bvh1, bvh2=bvh2, pos1=pos1[inner], pos2=pos2[inner],
mindist=mindist)
t.checkpoint('bvh_isect')
clash2 = bvh.naive_isect(bvh1, bvh2, pos1[inner], pos2[inner], mindist)
t.checkpoint('naive_isect')
assert clash1 == clash2
clash.append(clash1)
clashvec = bvh.bvh_isect_vec(bvh1, bvh2, pos1, pos2, mindist)
t.checkpoint('bvh_isect_vec')
assert np.all(clashvec == clash)
nclash += sum(clash)
assert clashvec[1] == bvh.bvh_isect_vec(bvh1, bvh2, pos1[1], pos2[1], mindist)
bvh.bvh_isect_vec(bvh1, bvh2, pos1, pos2[1], mindist) # ?? make sure api works?
bvh.bvh_isect_vec(bvh1, bvh2, pos1[1], pos2, mindist)
print(
f"Ngeom {N1:,} Npos {N2:,} isect {nclash/N:4.2f} bvh: {int(N/t.sum.bvh_isect):,}/s",
f"bvh_vec {int(N/t.sum.bvh_isect_vec):,} fastnaive {int(N/t.sum.naive_isect):,}/s",
f"ratio {int(t.sum.naive_isect/t.sum.bvh_isect_vec):,}x",
)
def test_bvh_isect_fixed_range():
N1, N2 = 10, 10
N = N1 * N2
mindist = 0.04
nclash = 0
for outer in range(N1):
xyz1 = np.random.rand(1000, 3) - [0.5, 0.5, 0.5]
xyz2 = np.random.rand(1000, 3) - [0.5, 0.5, 0.5]
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
bvh1_half = BVH(xyz1[250:750])
bvh2_half = BVH(xyz2[250:750])
pos1 = hm.rand_xform(N2, cart_sd=0.5)
pos2 = hm.rand_xform(N2, cart_sd=0.5)
isect1 = bvh.bvh_isect_vec(bvh1, bvh2, pos1, pos2, mindist)
isect2, clash = bvh.bvh_isect_fixed_range_vec(bvh1, bvh2, pos1, pos2, mindist)
assert np.all(isect1 == isect2)
bounds = [250], [749], [250], [749]
isect1 = bvh.bvh_isect_vec(bvh1_half, bvh2_half, pos1, pos2, mindist)
isect2, clash = bvh.bvh_isect_fixed_range_vec(bvh1, bvh2, pos1, pos2, mindist, *bounds)
assert np.all(isect1 == isect2)
def test_bvh_min_cpp():
assert bvh_test.TEST_bvh_test_min()
def test_bvh_min_dist_fixed():
xyz1 = np.random.rand(5000, 3) + [0.9, 0.9, 0.0]
xyz2 = np.random.rand(5000, 3)
tcre = perf_counter()
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
tcre = perf_counter() - tcre
tbvh = perf_counter()
d, i1, i2 = bvh.bvh_min_dist_fixed(bvh1, bvh2)
tbvh = perf_counter() - tbvh
dtest = np.linalg.norm(xyz1[i1] - xyz2[i2])
assert np.allclose(d, dtest, atol=1e-6)
# tnp = perf_counter()
# dnp = np.min(np.linalg.norm(xyz1[:, None] - xyz2[None], axis=2))
# tnp = perf_counter() - tnp
tn = perf_counter()
dn = bvh.naive_min_dist_fixed(bvh1, bvh2)
tn = perf_counter() - tn
print()
print("from bvh: ", d)
print("from naive:", dn)
assert np.allclose(dn, d, atol=1e-6)
print(f"tnaivecpp {tn:5f} tbvh {tbvh:5f} tbvhcreate {tcre:5f}")
print("bvh acceleration vs naive", tn / tbvh)
# assert tn / tbvh > 100
def test_bvh_min_dist():
xyz1 = np.random.rand(1000, 3) - [0.5, 0.5, 0.5]
xyz2 = np.random.rand(1000, 3) - [0.5, 0.5, 0.5]
tcre = perf_counter()
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
tcre = perf_counter() - tcre
# print()
totbvh, totnaive = 0, 0
N = 10
pos1 = hm.rand_xform(N, cart_sd=1)
pos2 = hm.rand_xform(N, cart_sd=1)
dis = list()
for i in range(N):
tbvh = perf_counter()
d, i1, i2 = bvh.bvh_min_dist(bvh1, bvh2, pos1[i], pos2[i])
tbvh = perf_counter() - tbvh
dtest = np.linalg.norm(pos1[i] @ hm.hpoint(xyz1[i1]) - pos2[i] @ hm.hpoint(xyz2[i2]))
assert np.allclose(d, dtest, atol=1e-6)
tn = perf_counter()
dn = bvh.naive_min_dist(bvh1, bvh2, pos1[i], pos2[i])
tn = perf_counter() - tn
assert np.allclose(dn, d, atol=1e-6)
dis.append((d, i1, i2))
# print(
# f"tnaivecpp {tn:1.6f} tbvh {tbvh:1.6f} tcpp/tbvh {tn/tbvh:8.1f}",
# np.linalg.norm(pos1[:3, 3]),
# dtest - d,
# )
totnaive += tn
totbvh += tbvh
d, i1, i2 = bvh.bvh_min_dist_vec(bvh1, bvh2, pos1, pos2)
for a, b, c, x in zip(d, i1, i2, dis):
assert a == x[0]
assert b == x[1]
assert c == x[2]
print(
"total times",
totbvh / N * 1000,
"ms",
totnaive / totbvh,
totnaive,
f"tcre {tcre:2.4f}",
)
def test_bvh_min_dist_floormin():
xyz1 = np.random.rand(1000, 3) - [0.5, 0.5, 0.5]
xyz2 = np.random.rand(1000, 3) - [0.5, 0.5, 0.5]
tcre = perf_counter()
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
tcre = perf_counter() - tcre
# print()
totbvh, totnaive = 0, 0
N = 10
for i in range(N):
pos1 = hm.rand_xform(cart_sd=1)
pos2 = hm.rand_xform(cart_sd=1)
tbvh = perf_counter()
d, i1, i2 = bvh.bvh_min_dist(bvh1, bvh2, pos1, pos2)
tbvh = perf_counter() - tbvh
dtest = np.linalg.norm(pos1 @ hm.hpoint(xyz1[i1]) - pos2 @ hm.hpoint(xyz2[i2]))
assert np.allclose(d, dtest, atol=1e-6)
tn = perf_counter()
dn = bvh.naive_min_dist(bvh1, bvh2, pos1, pos2)
tn = perf_counter() - tn
assert np.allclose(dn, d, atol=1e-6)
# print(
# f"tnaivecpp {tn:1.6f} tbvh {tbvh:1.6f} tcpp/tbvh {tn/tbvh:8.1f}",
# np.linalg.norm(pos1[:3, 3]),
# dtest - d,
# )
totnaive += tn
totbvh += tbvh
print(
"total times",
totbvh / N * 1000,
"ms",
totnaive / totbvh,
totnaive,
f"tcre {tcre:2.4f}",
)
def test_bvh_slide_single_inline():
bvh1 = BVH([[-10, 0, 0]])
bvh2 = BVH([[0, 0, 0]])
d = bvh.bvh_slide(bvh1, bvh2, np.eye(4), np.eye(4), rad=1.0, dirn=[1, 0, 0])
assert d == 8
# moves xyz1 to -2,0,0
# should always come in from "infinity" from -direction
bvh1 = BVH([[10, 0, 0]])
bvh2 = BVH([[0, 0, 0]])
d = bvh.bvh_slide(bvh1, bvh2, np.eye(4), np.eye(4), rad=1.0, dirn=[1, 0, 0])
assert d == -12
# also moves xyz1 to -2,0,0
for i in range(100):
np.random.seed(i)
dirn = np.array([np.random.randn(), 0, 0])
dirn /= np.linalg.norm(dirn)
rad = np.abs(np.random.randn() / 10)
xyz1 = np.array([[np.random.randn(), 0, 0]])
xyz2 = np.array([[np.random.randn(), 0, 0]])
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
d = bvh.bvh_slide(bvh1, bvh2, np.eye(4), np.eye(4), rad=rad, dirn=dirn)
xyz1 += d * dirn
assert np.allclose(np.linalg.norm(xyz1 - xyz2), 2 * rad, atol=1e-4)
def test_bvh_slide_single():
nmiss = 0
for i in range(100):
# np.random.seed(i)
dirn = np.random.randn(3)
dirn /= np.linalg.norm(dirn)
rad = np.abs(np.random.randn())
xyz1 = np.random.randn(1, 3)
xyz2 = np.random.randn(1, 3)
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
d = bvh.bvh_slide(bvh1, bvh2, np.eye(4), np.eye(4), rad=rad, dirn=dirn)
if d < 9e8:
xyz1 += d * dirn
assert np.allclose(np.linalg.norm(xyz1 - xyz2), 2 * rad, atol=1e-4)
else:
nmiss += 1
delta = xyz2 - xyz1
d0 = delta.dot(dirn)
dperp2 = np.sum(delta * delta) - d0 * d0
target_d2 = 4 * rad**2
assert target_d2 < dperp2
print("nmiss", nmiss, nmiss / 1000)
def test_bvh_slide_single_xform():
nmiss = 0
for i in range(1000):
dirn = np.random.randn(3)
dirn /= np.linalg.norm(dirn)
rad = np.abs(np.random.randn() * 2.0)
xyz1 = np.random.randn(1, 3)
xyz2 = np.random.randn(1, 3)
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
pos1 = hm.rand_xform()
pos2 = hm.rand_xform()
d = bvh.bvh_slide(bvh1, bvh2, pos1, pos2, rad=rad, dirn=dirn)
if d < 9e8:
p1 = (pos1 @ hm.hpoint(xyz1[0]))[:3] + d * dirn
p2 = (pos2 @ hm.hpoint(xyz2[0]))[:3]
assert np.allclose(np.linalg.norm(p1 - p2), 2 * rad, atol=1e-4)
else:
nmiss += 1
p2 = pos2 @ hm.hpoint(xyz2[0])
p1 = pos1 @ hm.hpoint(xyz1[0])
delta = p2 - p1
d0 = delta[:3].dot(dirn)
dperp2 = np.sum(delta * delta) - d0 * d0
target_d2 = 4 * rad**2
assert target_d2 < dperp2
print("nmiss", nmiss, nmiss / 1000)
def test_bvh_slide_whole():
# timings wtih -Ofast
# slide test 10,000 iter bvhslide float: 16,934/s double: 16,491/s bvhmin 17,968/s fracmiss: 0.0834
# np.random.seed(0)
N1, N2 = 2, 10
totbvh, totbvhf, totmin = 0, 0, 0
nmiss = 0
for j in range(N1):
xyz1 = np.random.rand(5000, 3) - [0.5, 0.5, 0.5]
xyz2 = np.random.rand(5000, 3) - [0.5, 0.5, 0.5]
# tcre = perf_counter()
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
# bvh1f = BVH_32bit(xyz1)
# bvh2f = BVH_32bit(xyz2)
# tcre = perf_counter() - tcre
pos1 = hm.rand_xform(N2, cart_sd=0.5)
pos2 = hm.rand_xform(N2, cart_sd=0.5)
dirn = np.random.randn(3)
dirn /= np.linalg.norm(dirn)
radius = 0.001 + np.random.rand() / 10
slides = list()
for i in range(N2):
tbvh = perf_counter()
dslide = bvh.bvh_slide(bvh1, bvh2, pos1[i], pos2[i], radius, dirn)
tbvh = perf_counter() - tbvh
tbvhf = perf_counter()
# dslide = bvh.bvh_slide_32bit(bvh1f, bvh2f, pos1[i], pos2[i], radius, dirn)
tbvhf = perf_counter() - tbvhf
slides.append(dslide)
if dslide > 9e8:
tn = perf_counter()
dn, i, j = bvh.bvh_min_dist(bvh1, bvh2, pos1[i], pos2[i])
tn = perf_counter() - tn
assert dn > 2 * radius
nmiss += 1
else:
tmp = hm.htrans(dirn * dslide) @ pos1[i]
tn = perf_counter()
dn, i, j = bvh.bvh_min_dist(bvh1, bvh2, tmp, pos2[i])
tn = perf_counter() - tn
if not np.allclose(dn, 2 * radius, atol=1e-6):
print(dn, 2 * radius)
assert np.allclose(dn, 2 * radius, atol=1e-6)
# print(
# i,
# f"tnaivecpp {tn:1.6f} tbvh {tbvh:1.6f} tcpp/tbvh {tn/tbvh:8.1f}",
# np.linalg.norm(pos1[:3, 3]),
# dslide,
# )
totmin += tn
totbvh += tbvh
totbvhf += tbvhf
slides2 = bvh.bvh_slide_vec(bvh1, bvh2, pos1, pos2, radius, dirn)
assert np.allclose(slides, slides2)
N = N1 * N2
print(
f"slide test {N:,} iter bvhslide double: {int(N/totbvh):,}/s bvhmin {int(N/totmin):,}/s",
# f"slide test {N:,} iter bvhslide float: {int(N/totbvhf):,}/s double: {int(N/totbvh):,}/s bvhmin {int(N/totmin):,}/s",
f"fracmiss: {nmiss/N}",
)
def test_collect_pairs_simple():
print("test_collect_pairs_simple")
bufbvh = -np.ones((100, 2), dtype="i4")
bufnai = -np.ones((100, 2), dtype="i4")
bvh1 = BVH([[0, 0, 0], [0, 2, 0]])
bvh2 = BVH([[0.9, 0, 0], [0.9, 2, 0]])
assert len(bvh1) == 2
mindist = 1.0
pos1 = np.eye(4)
pos2 = np.eye(4)
pbvh, o = bvh.bvh_collect_pairs(bvh1, bvh2, pos1, pos2, mindist, bufbvh)
nnai = bvh.naive_collect_pairs(bvh1, bvh2, pos1, pos2, mindist, bufnai)
assert not o
print(pbvh.shape)
assert len(pbvh) == 2 and nnai == 2
assert np.all(pbvh == [[0, 0], [1, 1]])
assert np.all(bufnai[:nnai] == [[0, 0], [1, 1]])
pos1 = hm.htrans([0, 2, 0])
pbvh, o = bvh.bvh_collect_pairs(bvh1, bvh2, pos1, pos2, mindist, bufbvh)
nnai = bvh.naive_collect_pairs(bvh1, bvh2, pos1, pos2, mindist, bufnai)
assert not o
assert len(pbvh) == 1 and nnai == 1
assert np.all(pbvh == [[0, 1]])
assert np.all(bufnai[:nnai] == [[0, 1]])
pos1 = hm.htrans([0, -2, 0])
pbvh, o = bvh.bvh_collect_pairs(bvh1, bvh2, pos1, pos2, mindist, bufbvh)
nnai = bvh.naive_collect_pairs(bvh1, bvh2, pos1, pos2, mindist, bufnai)
assert not o
assert len(pbvh) == 1 and nnai == 1
assert np.all(pbvh == [[1, 0]])
assert np.all(bufnai[:nnai] == [[1, 0]])
def test_collect_pairs_simple_selection():
print("test_collect_pairs_simple_selection")
bufbvh = -np.ones((100, 2), dtype="i4")
bufnai = -np.ones((100, 2), dtype="i4")
crd1 = [[0, 0, 0], [0, 0, 0], [0, 2, 0], [0, 0, 0]]
crd2 = [[0, 0, 0], [0.9, 0, 0], [0, 0, 0], [0.9, 2, 0]]
mask1 = [1, 0, 1, 0]
mask2 = [0, 1, 0, 1]
bvh1 = BVH(crd1, mask1)
bvh2 = BVH(crd2, mask2)
assert len(bvh1) == 2
assert np.allclose(bvh1.radius(), 1.0, atol=1e-6)
assert np.allclose(bvh1.center(), [0, 1, 0], atol=1e-6)
mindist = 1.0
pos1 = np.eye(4)
pos2 = np.eye(4)
pbvh, o = bvh.bvh_collect_pairs(bvh1, bvh2, pos1, pos2, mindist, bufbvh)
assert not o
nnai = bvh.naive_collect_pairs(bvh1, bvh2, pos1, pos2, mindist, bufnai)
assert len(pbvh) == 2 and nnai == 2
assert np.all(pbvh == [[0, 1], [2, 3]])
assert np.all(bufnai[:nnai] == [[0, 1], [2, 3]])
pos1 = hm.htrans([0, 2, 0])
pbvh, o = bvh.bvh_collect_pairs(bvh1, bvh2, pos1, pos2, mindist, bufbvh)
assert not o
nnai = bvh.naive_collect_pairs(bvh1, bvh2, pos1, pos2, mindist, bufnai)
assert len(pbvh) == 1 and nnai == 1
assert np.all(pbvh == [[0, 3]])
assert np.all(bufnai[:nnai] == [[0, 3]])
pos1 = hm.htrans([0, -2, 0])
pbvh, o = bvh.bvh_collect_pairs(bvh1, bvh2, pos1, pos2, mindist, bufbvh)
assert not o
nnai = bvh.naive_collect_pairs(bvh1, bvh2, pos1, pos2, mindist, bufnai)
assert len(pbvh) == 1 and nnai == 1
assert np.all(pbvh == [[2, 1]])
assert np.all(bufnai[:nnai] == [[2, 1]])
def test_collect_pairs():
N1, N2 = 1, 50
N = N1 * N2
Npts = 500
totbvh, totbvhf, totmin = 0, 0, 0
totbvh, totnai, totct, ntot = 0, 0, 0, 0
bufbvh = -np.ones((Npts * Npts, 2), dtype="i4")
bufnai = -np.ones((Npts * Npts, 2), dtype="i4")
for j in range(N1):
xyz1 = np.random.rand(Npts, 3) - [0.5, 0.5, 0.5]
xyz2 = np.random.rand(Npts, 3) - [0.5, 0.5, 0.5]
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
pos1, pos2 = list(), list()
while 1:
x1 = hm.rand_xform(cart_sd=0.5)
x2 = hm.rand_xform(cart_sd=0.5)
d = np.linalg.norm(x1[:, 3] - x2[:, 3])
if 0.8 < d < 1.3:
pos1.append(x1)
pos2.append(x2)
if len(pos1) == N2:
break
pos1 = np.stack(pos1)
pos2 = np.stack(pos2)
pairs = list()
mindist = 0.002 + np.random.rand() / 10
for i in range(N2):
tbvh = perf_counter()
pbvh, o = bvh.bvh_collect_pairs(bvh1, bvh2, pos1[i], pos2[i], mindist, bufbvh)
tbvh = perf_counter() - tbvh
assert not o
tnai = perf_counter()
nnai = bvh.naive_collect_pairs(bvh1, bvh2, pos1[i], pos2[i], mindist, bufnai)
tnai = perf_counter() - tnai
tct = perf_counter()
nct = bvh.bvh_count_pairs(bvh1, bvh2, pos1[i], pos2[i], mindist)
tct = perf_counter() - tct
ntot += nct
assert nct == len(pbvh)
totnai += 1
pairs.append(pbvh.copy())
totbvh += tbvh
totnai += tnai
totct += tct
assert len(pbvh) == nnai
if len(pbvh) == 0:
continue
o = np.lexsort((pbvh[:, 1], pbvh[:, 0]))
pbvh[:] = pbvh[:][o]
o = np.lexsort((bufnai[:nnai, 1], bufnai[:nnai, 0]))
bufnai[:nnai] = bufnai[:nnai][o]
assert np.all(pbvh == bufnai[:nnai])
pair1 = pos1[i] @ hm.hpoint(xyz1[pbvh[:, 0]])[..., None]
pair2 = pos2[i] @ hm.hpoint(xyz2[pbvh[:, 1]])[..., None]
dpair = np.linalg.norm(pair2 - pair1, axis=1)
assert np.max(dpair) <= mindist
pcount = bvh.bvh_count_pairs_vec(bvh1, bvh2, pos1, pos2, mindist)
assert np.all(pcount == [len(x) for x in pairs])
pairs2, lbub = bvh.bvh_collect_pairs_vec(bvh1, bvh2, pos1, pos2, mindist)
for i, p in enumerate(pairs):
lb, ub = lbub[i]
assert np.all(pairs2[lb:ub] == pairs[i])
x, y = bvh.bvh_collect_pairs_vec(bvh1, bvh2, pos1[:3], pos2[0], mindist)
assert len(y) == 3
x, y = bvh.bvh_collect_pairs_vec(bvh1, bvh2, pos1[0], pos2[:5], mindist)
assert len(y) == 5
print(
f"collect test {N:,} iter bvh {int(N/totbvh):,}/s naive {int(N/totnai):,}/s ratio {totnai/totbvh:7.2f} count-only {int(N/totct):,}/s avg cnt {ntot/N}"
)
def test_collect_pairs_range():
N1, N2 = 1, 500
N = N1 * N2
Npts = 1000
for j in range(N1):
xyz1 = np.random.rand(Npts, 3) - [0.5, 0.5, 0.5]
xyz2 = np.random.rand(Npts, 3) - [0.5, 0.5, 0.5]
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
pos1, pos2 = list(), list()
while 1:
x1 = hm.rand_xform(cart_sd=0.5)
x2 = hm.rand_xform(cart_sd=0.5)
d = np.linalg.norm(x1[:, 3] - x2[:, 3])
if 0.8 < d < 1.3:
pos1.append(x1)
pos2.append(x2)
if len(pos1) == N2:
break
pos1 = np.stack(pos1)
pos2 = np.stack(pos2)
pairs = list()
mindist = 0.002 + np.random.rand() / 10
pairs, lbub = bvh.bvh_collect_pairs_vec(bvh1, bvh2, pos1, pos2, mindist)
rpairs, rlbub = bvh.bvh_collect_pairs_range_vec(bvh1, bvh2, pos1, pos2, mindist)
assert np.all(lbub == rlbub)
assert np.all(pairs == rpairs)
rpairs, rlbub = bvh.bvh_collect_pairs_range_vec(bvh1, bvh2, pos1, pos2, mindist, [250],
[750])
assert len(rlbub) == len(pos1)
assert np.all(rpairs[:, 0] >= 250)
assert np.all(rpairs[:, 0] <= 750)
filt_pairs = pairs[np.logical_and(pairs[:, 0] >= 250, pairs[:, 0] <= 750)]
# assert np.all(filt_pairs == rpairs) # sketchy???
assert np.allclose(np.unique(filt_pairs, axis=1), np.unique(rpairs, axis=1))
rpairs, rlbub = bvh.bvh_collect_pairs_range_vec(bvh1, bvh2, pos1, pos2, mindist, [600],
[1000], -1, [100], [400], -1)
assert len(rlbub) == len(pos1)
assert np.all(rpairs[:, 0] >= 600)
assert np.all(rpairs[:, 0] <= 1000)
assert np.all(rpairs[:, 1] >= 100)
assert np.all(rpairs[:, 1] <= 400)
filt_pairs = pairs[(pairs[:, 0] >= 600) * (pairs[:, 0] <= 1000) * (pairs[:, 1] >= 100) *
(pairs[:, 1] <= 400)]
assert np.all(filt_pairs == rpairs) # sketchy???
assert np.allclose(np.unique(filt_pairs, axis=1), np.unique(rpairs, axis=1))
def test_collect_pairs_range_sym():
# np.random.seed(132)
N1, N2 = 5, 100
N = N1 * N2
Npts = 1000
for j in range(N1):
xyz1 = np.random.rand(Npts, 3) - [0.5, 0.5, 0.5]
xyz2 = np.random.rand(Npts, 3) - [0.5, 0.5, 0.5]
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
pos1, pos2 = list(), list()
while 1:
x1 = hm.rand_xform(cart_sd=0.5)
x2 = hm.rand_xform(cart_sd=0.5)
d = np.linalg.norm(x1[:, 3] - x2[:, 3])
if 0.8 < d < 1.3:
pos1.append(x1)
pos2.append(x2)
if len(pos1) == N2:
break
pos1 = np.stack(pos1)
pos2 = np.stack(pos2)
pairs = list()
mindist = 0.002 + np.random.rand() / 10
pairs, lbub = bvh.bvh_collect_pairs_vec(bvh1, bvh2, pos1, pos2, mindist)
rpairs, rlbub = bvh.bvh_collect_pairs_range_vec(bvh1, bvh2, pos1, pos2, mindist)
assert np.all(lbub == rlbub)
assert np.all(pairs == rpairs)
bounds = [100], [400], len(xyz1) // 2
rpairs, rlbub = bvh.bvh_collect_pairs_range_vec(bvh1, bvh2, pos1, pos2, mindist, *bounds)
assert len(rlbub) == len(pos1)
assert np.all(
np.logical_or(np.logical_and(100 <= rpairs[:, 0], rpairs[:, 0] <= 400),
np.logical_and(600 <= rpairs[:, 0], rpairs[:, 0] <= 900)))
filt_pairs = pairs[np.logical_or(np.logical_and(100 <= pairs[:, 0], pairs[:, 0] <= 400),
np.logical_and(600 <= pairs[:, 0], pairs[:, 0] <= 900))]
assert np.allclose(np.unique(filt_pairs, axis=1), np.unique(rpairs, axis=1))
bounds = [100], [400], len(xyz1) // 2, [20], [180], len(xyz1) // 5
rpairs, rlbub = bvh.bvh_collect_pairs_range_vec(bvh1, bvh2, pos1, pos2, mindist, *bounds)
def awful(p):
return np.logical_and(
np.logical_or(np.logical_and(100 <= p[:, 0], p[:, 0] <= 400),
np.logical_and(600 <= p[:, 0], p[:, 0] <= 900)),
np.logical_or(
np.logical_and(+20 <= p[:, 1], p[:, 1] <= 180),
np.logical_or(
np.logical_and(220 <= p[:, 1], p[:, 1] <= 380),
np.logical_or(
np.logical_and(420 <= p[:, 1], p[:, 1] <= 580),
np.logical_or(np.logical_and(620 <= p[:, 1], p[:, 1] <= 780),
np.logical_and(820 <= p[:, 1], p[:, 1] <= 980))))))
assert len(rlbub) == len(pos1)
assert np.all(awful(rpairs))
filt_pairs = pairs[awful(pairs)]
assert np.all(filt_pairs == rpairs) # sketchy???
assert np.allclose(np.unique(filt_pairs, axis=1), np.unique(rpairs, axis=1))
def test_slide_collect_pairs():
# timings wtih -Ofast
# slide test 10,000 iter bvhslide float: 16,934/s double: 16,491/s bvhmin 17,968/s fracmiss: 0.0834
# np.random.seed(0)
N1, N2 = 2, 50
Npts = 5000
totbvh, totbvhf, totcol, totmin = 0, 0, 0, 0
nhit = 0
buf = -np.ones((Npts * Npts, 2), dtype="i4")
for j in range(N1):
xyz1 = np.random.rand(Npts, 3) - [0.5, 0.5, 0.5]
xyz2 = np.random.rand(Npts, 3) - [0.5, 0.5, 0.5]
xyzcol1 = xyz1[:int(Npts / 5)]
xyzcol2 = xyz2[:int(Npts / 5)]
# tcre = perf_counter()
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
bvhcol1 = BVH(xyzcol1)
bvhcol2 = BVH(xyzcol2)
# tcre = perf_counter() - tcre
for i in range(N2):
dirn = np.random.randn(3)
dirn /= np.linalg.norm(dirn)
radius = 0.001 + np.random.rand() / 10
pairdis = 3 * radius
pos1 = hm.rand_xform(cart_sd=0.5)
pos2 = hm.rand_xform(cart_sd=0.5)
tbvh = perf_counter()
dslide = bvh.bvh_slide(bvh1, bvh2, pos1, pos2, radius, dirn)
tbvh = perf_counter() - tbvh
if dslide > 9e8:
tn = perf_counter()
dn, i, j = bvh.bvh_min_dist(bvh1, bvh2, pos1, pos2)
tn = perf_counter() - tn
assert dn > 2 * radius
else:
nhit += 1
pos1 = hm.htrans(dirn * dslide) @ pos1
tn = perf_counter()
dn, i, j = bvh.bvh_min_dist(bvh1, bvh2, pos1, pos2)
tn = perf_counter() - tn
if not np.allclose(dn, 2 * radius, atol=1e-6):
print(dn, 2 * radius)
assert np.allclose(dn, 2 * radius, atol=1e-6)
tcol = perf_counter()
pair, o = bvh.bvh_collect_pairs(bvhcol1, bvhcol2, pos1, pos2, pairdis, buf)
assert not o
if len(pair) > 0:
tcol = perf_counter() - tcol
totcol += tcol
pair1 = pos1 @ hm.hpoint(xyzcol1[pair[:, 0]])[..., None]
pair2 = pos2 @ hm.hpoint(xyzcol2[pair[:, 1]])[..., None]
dpair = np.linalg.norm(pair2 - pair1, axis=1)
assert np.max(dpair) <= pairdis
totmin += tn
totbvh += tbvh
N = N1 * N2
print(
f"slide test {N:,} iter bvhslide double: {int(N/totbvh):,}/s bvhmin {int(N/totmin):,}/s",
# f"slide test {N:,} iter bvhslide float: {int(N/totbvhf):,}/s double: {int(N/totbvh):,}/s bvhmin {int(N/totmin):,}/s",
f"fracmiss: {nhit/N} collect {int(nhit/totcol):,}/s",
)
def test_bvh_accessors():
xyz = np.random.rand(10, 3) - [0.5, 0.5, 0.5]
b = BVH(xyz)
assert np.allclose(b.com()[:3], np.mean(xyz, axis=0))
p = b.centers()
dmat = np.linalg.norm(p[:, :3] - xyz[:, None], axis=2)
assert np.allclose(np.min(dmat, axis=1), 0)
def random_walk(N):
x = np.random.randn(N, 3).astype("f").cumsum(axis=0)
x -= x.mean(axis=0)
return 0.5 * x / x.std()
def test_bvh_isect_range(body=None, cart_sd=0.3, N2=10, mindist=0.02):
N1 = 1 if body else 2
N = N1 * N2
totbvh, totnaive, totbvh0, nhit = 0, 0, 0, 0
for ibvh in range(N1):
if body:
bvh1, bvh2 = body.bvh_bb, body.bvh_bb
else:
# xyz1 = np.random.rand(2000, 3) - [0.5, 0.5, 0.5]
# xyz2 = np.random.rand(2000, 3) - [0.5, 0.5, 0.5]
xyz1 = random_walk(1000)
xyz2 = random_walk(1000)
tcre = perf_counter()
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
tcre = perf_counter() - tcre
pos1 = hm.rand_xform(N2, cart_sd=cart_sd)
pos2 = hm.rand_xform(N2, cart_sd=cart_sd)
ranges = list()
for i in range(N2):
tbvh0 = perf_counter()
c = bvh.bvh_isect(bvh1=bvh1, bvh2=bvh2, pos1=pos1[i], pos2=pos2[i], mindist=mindist)
tbvh0 = perf_counter() - tbvh0
# if not c:
# continue
if c:
nhit += 1
tbvh = perf_counter()
range1 = bvh.isect_range_single(bvh1=bvh1, bvh2=bvh2, pos1=pos1[i], pos2=pos2[i],
mindist=mindist)
tbvh = perf_counter() - tbvh
tn = perf_counter()
range2 = bvh.naive_isect_range(bvh1, bvh2, pos1[i], pos2[i], mindist)
assert range1 == range2
tn = perf_counter() - tn
ranges.append(range1)
# print(f"{str(range1):=^80}")
# body.move_to(pos1).dump_pdb("test1.pdb")
# body.move_to(pos2).dump_pdb("test2.pdb")
# return
# print(f"{i:3} range {range1} {tn / tbvh:8.2f}, {tn:1.6f}, {tbvh:1.6f}")
totbvh += tbvh
totnaive += tn
totbvh0 += tbvh0
lb, ub = bvh.isect_range(bvh1, bvh2, pos1, pos2, mindist)
ranges = np.array(ranges)
assert np.all(lb == ranges[:, 0])
assert np.all(ub == ranges[:, 1])
ok = np.logical_and(lb >= 0, ub >= 0)
isect, clash = bvh.bvh_isect_fixed_range_vec(bvh1, bvh2, pos1, pos2, mindist, lb, ub)
assert not np.any(isect[ok])
print(
f"iscet {nhit:,} hit of {N:,} iter bvh: {int(nhit/totbvh):,}/s fastnaive {int(nhit/totnaive):,}/s",
f"ratio {int(totnaive/totbvh):,}x isect-only: {totbvh/totbvh0:3.3f}x",
)
def test_bvh_isect_range_ids():
N1 = 50
N2 = 100
N = N1 * N2
# Nids = 100
cart_sd = 0.3
mindist = 0.03
Npts = 1000
factors = [1000, 500, 250, 200, 125, 100, 50, 40, 25, 20, 10, 8, 5, 4, 2, 1]
# Npts = 6
# factors = [3]
# mindist = 0.3
# N1 = 1
assert all(Npts % f == 0 for f in factors)
for ibvh in range(N1):
# for ibvh in [5]:
# np.random.seed(ibvh)
# print(ibvh)
Nids = factors[ibvh % len(factors)]
# xyz1 = np.random.rand(2000, 3) - [0.5, 0.5, 0.5]
# xyz2 = np.random.rand(2000, 3) - [0.5, 0.5, 0.5]
xyz1 = random_walk(Npts)
xyz2 = random_walk(Npts)
tcre = perf_counter()
bvh1 = BVH(xyz1, [], np.repeat(np.arange(Nids), Npts / Nids))
bvh2 = BVH(xyz2, [], np.repeat(np.arange(Nids), Npts / Nids))
tcre = perf_counter() - tcre
pos1 = hm.rand_xform(N2, cart_sd=cart_sd)
pos2 = hm.rand_xform(N2, cart_sd=cart_sd)
# pos1 = pos1[99:]
# pos2 = pos2[99:]
# print(bvh1.vol_lb())
# print(bvh1.vol_ub())
# print(bvh1.obj_id())
# assert 0
# assert bvh1.max_id() == Nids - 1
# assert bvh1.min_lb() == 0
# assert bvh1.max_ub() == Nids - 1
lb, ub = bvh.isect_range(bvh1, bvh2, pos1, pos2, mindist)
pos1 = pos1[lb != -1]
pos2 = pos2[lb != -1]
ub = ub[lb != -1]
lb = lb[lb != -1]
# print(lb, ub)
assert np.all(0 <= lb) and np.all(lb - 1 <= ub) and np.all(ub < Nids)
isectall = bvh.bvh_isect_vec(bvh1, bvh2, pos1, pos2, mindist)
assert np.all(isectall == np.logical_or(lb > 0, ub < Nids - 1))
isect, clash = bvh.bvh_isect_fixed_range_vec(bvh1, bvh2, pos1, pos2, mindist, lb, ub)
if np.any(isect):
print(np.where(isect)[0])
print('lb', lb[isect])
print('ub', ub[isect])
print('cA', clash[isect, 0])
print('cB', clash[isect, 1])
# print('is', isect.astype('i') * 100)
# print('isectlbub', np.sum(isect), np.sum(isect) / len(isect))
assert not | np.any(isect[lb <= ub]) | numpy.any |
# ========================================================================
#
# Copyright NumFOCUS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ========================================================================
from SimpleITK.SimpleITK import *
from SimpleITK.SimpleITK import _GetMemoryViewFromImage
from SimpleITK.SimpleITK import _SetImageFromArray
from typing import List, Union
def Resample(image1, *args, **kwargs):
"""
Resample ( Image image1,
Transform transform = itk::simple::Transform(),
InterpolatorEnum interpolator = itk::simple::sitkLinear,
double defaultPixelValue = 0.0,
PixelIDValueEnum outputPixelType = itk::simple::sitkUnknown,
bool useNearestNeighborExtrapolator = false);
Resample ( Image image1,
Image referenceImage,
Transform transform = itk::simple::Transform(),
InterpolatorEnum interpolator = itk::simple::sitkLinear,
double defaultPixelValue = 0.0,
PixelIDValueEnum outputPixelType = sitkUnknown,
bool useNearestNeighborExtrapolator = false);
Resample ( const Image& image1,
VectorUInt32 size,
Transform transform = itk::simple::Transform(),
InterpolatorEnum interpolator = itk::simple::sitkLinear,
VectorDouble outputOrigin = std::vector<double>(3, 0.0),
VectorDouble outputSpacing = std::vector<double>(3, 1.0),
VectorDouble outputDirection = std::vector<double>(),
double defaultPixelValue = 0.0,
PixelIDValueEnum outputPixelType = sitkUnknown,
bool useNearestNeighborExtrapolator = false);
itk::simple::ResampleImageFilter procedural interface.
This is a custom overloaded python method, which fully supports the 3 signatures with positional and keyword
arguments. The second positional parameters without a default value are used to determine which overloaded
procedure signature to invoke.
"""
def _r_image(referenceImage,
transform=Transform(),
interpolator=sitkLinear,
defaultPixelValue=0.0,
outputPixelType=sitkUnknown,
useNearestNeighborExtrapolator=False):
resampler = ResampleImageFilter()
resampler.SetReferenceImage(referenceImage)
resampler.SetTransform(transform)
resampler.SetInterpolator(interpolator)
resampler.SetDefaultPixelValue(defaultPixelValue)
resampler.SetOutputPixelType(outputPixelType)
resampler.SetUseNearestNeighborExtrapolator(useNearestNeighborExtrapolator)
return resampler.Execute(image1)
def _r(size,
transform=Transform(),
interpolator=sitkLinear,
outputOrigin=(0.0, 0.0, 0.0),
outputSpacing=(1.0, 1.0, 1.0),
outputDirection=(),
defaultPixelValue=0.0,
outputPixelType=sitkUnknown,
useNearestNeighborExtrapolator=False):
resampler = ResampleImageFilter()
resampler.SetSize(size)
resampler.SetTransform(transform)
resampler.SetInterpolator(interpolator)
resampler.SetOutputOrigin(outputOrigin)
resampler.SetOutputSpacing(outputSpacing)
resampler.SetOutputDirection(outputDirection)
resampler.SetDefaultPixelValue(defaultPixelValue)
resampler.SetOutputPixelType(outputPixelType)
resampler.SetUseNearestNeighborExtrapolator(useNearestNeighborExtrapolator)
return resampler.Execute(image1)
if args:
if isinstance(args[0], Image):
return _r_image(*args, **kwargs)
if not isinstance(args[0], Transform):
try:
iter(args[0])
return _r(*args, **kwargs)
except TypeError as e:
pass
if "referenceImage" in kwargs:
return _r_image(*args, **kwargs)
if "size" in kwargs:
return _r(*args, **kwargs)
return _r_image(image1, *args, **kwargs)
HAVE_NUMPY = True
try:
import numpy
except ImportError:
HAVE_NUMPY = False
def _get_numpy_dtype(sitkImage):
"""Given a SimpleITK image, returns the numpy.dtype which describes the data"""
if not HAVE_NUMPY:
raise ImportError('Numpy not available.')
# this is a mapping from sitk's pixel id to numpy's dtype
_sitk_np = {sitkUInt8: numpy.uint8,
sitkUInt16: numpy.uint16,
sitkUInt32: numpy.uint32,
sitkUInt64: numpy.uint64,
sitkInt8: numpy.int8,
sitkInt16: numpy.int16,
sitkInt32: numpy.int32,
sitkInt64: numpy.int64,
sitkFloat32: numpy.float32,
sitkFloat64: numpy.float64,
sitkComplexFloat32: numpy.complex64,
sitkComplexFloat64: numpy.complex128,
sitkVectorUInt8: numpy.uint8,
sitkVectorInt8: numpy.int8,
sitkVectorUInt16: numpy.uint16,
sitkVectorInt16: numpy.int16,
sitkVectorUInt32: numpy.uint32,
sitkVectorInt32: numpy.int32,
sitkVectorUInt64: numpy.uint64,
sitkVectorInt64: numpy.int64,
sitkVectorFloat32: numpy.float32,
sitkVectorFloat64: numpy.float64,
sitkLabelUInt8: numpy.uint8,
sitkLabelUInt16: numpy.uint16,
sitkLabelUInt32: numpy.uint32,
sitkLabelUInt64: numpy.uint64
}
return _sitk_np[sitkImage.GetPixelIDValue()]
def _get_sitk_pixelid(numpy_array_type):
"""Returns a SimpleITK PixelID given a numpy array."""
if not HAVE_NUMPY:
raise ImportError('Numpy not available.')
# This is a Mapping from numpy array types to sitks pixel types.
_np_sitk = {numpy.character: sitkUInt8,
numpy.uint8: sitkUInt8,
numpy.uint16: sitkUInt16,
numpy.uint32: sitkUInt32,
numpy.uint64: sitkUInt64,
numpy.int8: sitkInt8,
numpy.int16: sitkInt16,
numpy.int32: sitkInt32,
numpy.int64: sitkInt64,
numpy.float32: sitkFloat32,
numpy.float64: sitkFloat64,
numpy.complex64: sitkComplexFloat32,
numpy.complex128: sitkComplexFloat64
}
try:
return _np_sitk[numpy_array_type.dtype]
except KeyError:
for key in _np_sitk:
if numpy.issubdtype(numpy_array_type.dtype, key):
return _np_sitk[key]
raise TypeError('dtype: {0} is not supported.'.format(numpy_array_type.dtype))
def _get_sitk_vector_pixelid(numpy_array_type):
"""Returns a SimpleITK vector PixelID given a numpy array."""
if not HAVE_NUMPY:
raise ImportError('Numpy not available.')
# This is a Mapping from numpy array types to sitks pixel types.
_np_sitk = {numpy.character: sitkVectorUInt8,
numpy.uint8: sitkVectorUInt8,
numpy.uint16: sitkVectorUInt16,
numpy.uint32: sitkVectorUInt32,
numpy.uint64: sitkVectorUInt64,
numpy.int8: sitkVectorInt8,
numpy.int16: sitkVectorInt16,
numpy.int32: sitkVectorInt32,
numpy.int64: sitkVectorInt64,
numpy.float32: sitkVectorFloat32,
numpy.float64: sitkVectorFloat64,
}
try:
return _np_sitk[numpy_array_type.dtype]
except KeyError:
for key in _np_sitk:
if numpy.issubdtype(numpy_array_type.dtype, key):
return _np_sitk[key]
raise TypeError('dtype: {0} is not supported as an array.'.format(numpy_array_type.dtype))
# SimplyITK <-> Numpy Array conversion support.
def GetArrayViewFromImage(image):
"""Get a NumPy ndarray view of a SimpleITK Image.
Returns a Numpy ndarray object as a "view" of the SimpleITK's Image buffer. This reduces pixel buffer copies, but
requires that the SimpleITK image object is kept around while the buffer is being used.
"""
if not HAVE_NUMPY:
raise ImportError('NumPy not available.')
pixel_id = image.GetPixelIDValue()
assert pixel_id != sitkUnknown, "An SimpleITK image of Unknown pixel type should not exists!"
dtype = _get_numpy_dtype(image)
shape = image.GetSize()
if image.GetNumberOfComponentsPerPixel() > 1:
shape = (image.GetNumberOfComponentsPerPixel(), ) + shape
image.MakeUnique()
image_memory_view = _GetMemoryViewFromImage(image)
array_view = numpy.asarray(image_memory_view).view(dtype = dtype)
array_view.shape = shape[::-1]
return array_view
def GetArrayFromImage(image):
"""Get a NumPy ndarray from a SimpleITK Image.
This is a deep copy of the image buffer and is completely safe and without potential side effects.
"""
# TODO: If the image is already not unique then a second copy may be made before the numpy copy is done.
array_view = GetArrayViewFromImage(image)
# perform deep copy of the image buffer
return numpy.array(array_view, copy=True)
def GetImageFromArray(arr, isVector=None):
""" Get a SimpleITK Image from a numpy array.
If isVector is True, then the Image will have a Vector pixel type, and the last dimension of the array will be
considered the component index. By default when isVector is None, 4D arrays
are automatically considered 3D vector images, but 3D arrays are 3D images.
"""
if not HAVE_NUMPY:
raise ImportError('Numpy not available.')
z = | numpy.asarray(arr) | numpy.asarray |
import numpy as np
from numpy.linalg import inv
from geomdl import NURBS
from geomdl import multi
from geomdl import construct
from geomdl import convert
from geomdl.visualization import VisVTK as vis
from geomdl.visualization import VisMpL
from geomdl import exchange
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# CTRL_knot_list = [[0.0, 0.0], [0.0, 1.0 / 3.0], [0.0, 2.0 / 3.0], [0.0, 1.0],
# [1.0 / 3.0, 0.0], [1.0 / 3.0, 1.0 / 3.0], [1.0 / 3.0, 2.0 / 3.0], [1.0 / 3.0, 1.0],
# [2.0 / 3.0, 0.0], [2.0 / 3.0, 1.0 / 3.0], [2.0 / 3.0, 2.0 / 3.0], [2.0 / 3.0, 1.0],
# [1.0, 0.0], [1.0, 1.0 / 3.0], [1.0, 2.0 / 3.0], [1.0, 1.0]]
# CNTRL_Knot_Side = [[0.0, 0.0], [0.0, 0.0], [0.0, 1.0], [0.0, 1.0],
# [0.0, 1.0], [0.0, 1.0], [1.0, 1.0], [1.0, 1.0],
# [1.0, 1.0], [1.0, 1.0], [1.0, 0.0], [1.0, 0.0],
# [1.0, 0.0], [1.0, 0.0], [0.0, 0.0], [0.0, 0.0]]
# tri_list = [[0, 1, 4], [5, 4, 1], [1, 2, 5], [6, 5, 2], [2, 3, 6], [7, 6, 3],
# [4, 5, 8], [9, 8, 5], [5, 6, 9], [10, 9, 6], [6, 7, 10], [11, 10, 7],
# [8, 9, 12], [13, 12, 9], [9, 10, 13], [14, 13, 10], [10, 11, 14], [15, 14, 11]]
tri_list_side = [[0, 2, 1], [0, 3, 1], [3, 2, 1], [3, 2, 0],
[3, 11, 7], [3, 15, 7], [15, 11, 7], [15, 11, 3],
[15, 13, 14], [15, 12, 14], [12, 13, 14], [12, 13, 15],
[12, 4, 8], [12, 0, 8], [0, 4, 8], [0, 4, 12]]
def bound_box(cntrl_pt):
bmax = np.empty([3])
bmin = np.empty([3])
bmin = [min(cntrl_pt[:, 0]), min(cntrl_pt[:, 1]), min(cntrl_pt[:, 2])]
bmax = [max(cntrl_pt[:, 0]), max(cntrl_pt[:, 1]), max(cntrl_pt[:, 2])]
return bmin, bmax
def bound_box_simul(ctrl_pts):
b_max = np.empty([ctrl_pts.shape[0], 3], dtype=np.float32)
b_min = np.empty([ctrl_pts.shape[0], 3], dtype=np.float32)
e = 4
for i in range(0, b_max.shape[0]):
b_min[i, :] = [min(ctrl_pts[i, :, 0]),
min(ctrl_pts[i, :, 1]),
min(ctrl_pts[i, :, 2])]
b_max[i, :] = np.array([max(ctrl_pts[i, :, 0]),
max(ctrl_pts[i, :, 1]),
max(ctrl_pts[i, :, 2])])
pass
return b_min, b_max
def padding_simul(b_min, b_max, vox_count):
origin = np.empty([b_max.shape[0], 3], dtype=np.float32)
vox_size = np.empty([b_max.shape[0]], dtype=np.float32)
for i in range(0, b_min.shape[0]):
g_max = [max(b_max[i, :, 0]), max(b_max[i, :, 1]), max(b_max[i, :, 2])]
g_min = [min(b_min[i, :, 0]), min(b_min[i, :, 1]), min(b_min[i, :, 2])]
maj_x = g_max[0] - g_min[0]
maj_y = g_max[1] - g_min[1]
maj_z = g_max[2] - g_min[2]
maj_axis = max(max(maj_x, maj_y), maj_z)
vox_size[i] = maj_axis / vox_count
pad_x = maj_axis - maj_x
pad_y = maj_axis - maj_y
pad_z = maj_axis - maj_z
if pad_x != 0:
g_max[0] += pad_x / 2
g_min[0] -= pad_x / 2
if pad_y != 0:
g_max[1] += pad_y / 2
g_min[1] -= pad_y / 2
if pad_z != 0:
g_max[2] += pad_z / 2
g_min[2] -= pad_z / 2
origin[i] = [g_min[0] + (vox_size[i] / 2), g_min[1] + (vox_size[i] / 2), g_min[2] + (vox_size[i] / 2)]
return origin, vox_size
pass
def voxel_assign_single(voxels_all, val, direc, i, t_count, vox_count):
if direc == 0:
for inst in range(0, voxels_all.shape[0]):
if voxels_all[inst][i // vox_count][i % vox_count][t_count] == 0:
voxels_all[inst][i // vox_count][i % vox_count][t_count] = val
break
elif voxels_all[inst][i // vox_count][i % vox_count][t_count] == val:
break
elif direc == 1:
for inst in range(0, voxels_all.shape[0]):
if voxels_all[inst][i % vox_count][t_count][i // vox_count] == 0:
voxels_all[inst][i % vox_count][t_count][i // vox_count] = val
break
elif voxels_all[inst][i % vox_count][t_count][i // vox_count] == val:
break
elif direc == 2:
for inst in range(0, voxels_all.shape[0]):
if voxels_all[inst][t_count][i // vox_count][i % vox_count] == 0:
voxels_all[inst][t_count][i // vox_count][i % vox_count] = val
break
elif voxels_all[inst][t_count][i // vox_count][i % vox_count] == val:
break
pass
pass
def nr_inter_single(b_max, b_min, vox_count, vox_size, origin, dir_1, dir_2, ray_d,
tri_list_3, ctrl_pts, knot_list_3, vox_all, direc, arr_idx):
tri = np.empty([3, 3], dtype=np.float32)
for j in range(0, vox_count * vox_count):
ray = [origin[0] + ((j // vox_count) * vox_size * dir_1[0]) + ((j % vox_count) * vox_size * dir_2[0]),
origin[1] + ((j // vox_count) * vox_size * dir_1[1]) + ((j % vox_count) * vox_size * dir_2[1]),
origin[2] + ((j // vox_count) * vox_size * dir_1[2]) + ((j % vox_count) * vox_size * dir_2[2])]
for k in range(0, b_max.shape[0]):
if ray_box_inter(b_min[k], b_max[k], ray, ray_d):
for t in range(0, tri_list_3.shape[0]):
TRI_ptS = ctrl_pts[k // 6][k % 6]
tri[0] = [TRI_ptS[tri_list_3[t][0]][0], tri_pts[tri_list_3[t][0]][1], tri_pts[tri_list_3[t][0]][2]]
tri[1] = [tri_pts[tri_list_3[t][1]][0], tri_pts[tri_list_3[t][1]][1], tri_pts[tri_list_3[t][1]][2]]
tri[2] = [tri_pts[tri_list_3[t][2]][0], tri_pts[tri_list_3[t][2]][1], tri_pts[tri_list_3[t][2]][2]]
A = np.array([[-ray_d[0], tri[2][0] - tri[0][0], tri[1][0] - tri[0][0]],
[-ray_d[1], tri[2][1] - tri[0][1], tri[1][1] - tri[0][1]],
[-ray_d[2], tri[2][2] - tri[0][2], tri[1][2] - tri[0][2]]])
B = np.array([[ray[0] - tri[0][0]], [ray[1] - tri[0][1]], [ray[2] - tri[0][2]]])
param = np.matmul(inv(A), B)
if param[1] >= 0.0 and param[2] >= 0.0:
if param[1] + param[2] <= 1.0:
# print('intersection')
knot_inter = [knot_list_3[tri_list_3[t][0]][0], knot_list_3[tri_list_3[t][0]][1]]
t_inter = param[0]
if t % 2 == 0:
u_inter = knot_inter[0] + (param[1] * 0.33)
v_inter = knot_inter[1] + (param[2] * 0.33)
else:
u_inter = knot_inter[0] - (param[1] * 0.33)
v_inter = knot_inter[1] - (param[2] * 0.33)
[bol, t_count] = newton_method(t_inter, u_inter, v_inter, ray, ray_d, vox_size, tri_pts, 3)
if bol:
# val = (int(k // 6) + 1)
val = int(arr_idx[j])
voxel_assign_single(vox_all, val, direc, j, t_count, vox_count)
return vox_all
def post_process(voxels_all, voxel_master, vox_x, vox_y, vox_z, direc, vox_1, vox_2):
for i in range(0, vox_1 * vox_2):
inout_vox = np.empty(2, dtype=np.uint8)
vox_list_1 = np.zeros(5, dtype=np.float32)
vox_list_2 = np.zeros(5, dtype=np.float32)
if direc == 0:
for j in range(0, vox_z):
if voxels_all[0][i // vox_y][i % vox_y][j] != 0:
for inst in range(0, voxels_all.shape[0]):
vox_list_2[inst] = voxels_all[inst][i // vox_y][i % vox_y][j]
elem = list_compare(voxels_all.shape[0], vox_list_1, vox_list_2)
if elem != -1:
inout_vox[1] = j
for vox in range(inout_vox[0], inout_vox[1] + 1):
voxel_master[i // vox_y][i % vox_y][vox] = elem
for inst in range(0, voxels_all.shape[0]):
vox_list_1[inst] = voxels_all[inst][i // vox_y][i % vox_y][j]
inout_vox[0] = j
elif direc == 1:
for j in range(0, vox_y):
if voxels_all[0][i % vox_x][j][i // vox_x] != 0:
for inst in range(0, voxels_all.shape[0]):
vox_list_2[inst] = voxels_all[inst][i % vox_x][j][i // vox_x]
elem = list_compare(voxels_all.shape[0], vox_list_1, vox_list_2)
if elem != -1:
inout_vox[1] = j
for vox in range(inout_vox[0], inout_vox[1] + 1):
voxel_master[i % vox_x][vox][i // vox_x] = elem
for inst in range(0, voxels_all.shape[0]):
vox_list_1[inst] = voxels_all[inst][i % vox_x][j][i // vox_x]
inout_vox[0] = j
elif direc == 2:
for j in range(0, vox_x):
if voxels_all[0][j][i // vox_z][i % vox_z] != 0:
for inst in range(0, voxels_all.shape[0]):
vox_list_2[inst] = voxels_all[inst][j][i // vox_z][i % vox_z]
elem = list_compare(voxels_all.shape[0], vox_list_1, vox_list_2)
if elem != -1:
inout_vox[1] = j
for vox in range(inout_vox[0], inout_vox[1] + 1):
voxel_master[vox][i // vox_z][i % vox_z] = elem
for inst in range(0, voxels_all.shape[0]):
vox_list_1[inst] = voxels_all[inst][j][i // vox_z][i % vox_z]
inout_vox[0] = j
return voxel_master
pass
def list_compare(depth, vox_list_1, vox_list_2):
elem = -1
for idx_1 in range(0, depth):
if vox_list_1[idx_1] != 0:
for idx_2 in range(0, depth):
if vox_list_2[idx_2] != 0:
if vox_list_1[idx_1] == vox_list_2[idx_2]:
elem = vox_list_1[idx_1]
return elem
pass
# def gauss_val(vox_master, vox_count, origin, vox_size, stress): # for two chamber
def gauss_val(vox_master, vox_count, origin, vox_size, ctrl_pts, stress): # for Aorta
for i in range(0, vox_count * vox_count):
for j in range(0, vox_count):
if vox_master[i // vox_count][i % vox_count][j] != 0:
elem = int(vox_master[i // vox_count][i % vox_count][j]) - 1
gauss_vals = min_dist_vox(j, origin, vox_count, vox_size, i, ctrl_pts[elem])
vox_master[i // vox_count][i % vox_count][j] = gauss_vals
return vox_master
pass
def newton_single(t, u, v, ray, direc, cps, degree):
non_conv = 0
iter_count = 0
t_count = 0
knot_u = np.array([0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0])
knot_v = np.array([0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0])
s_1 = surf_pt(u, v, cps, knot_u, knot_v, degree, degree)
p_1 = ray + (t * direc)
obj_func = np.abs(np.array([[s_1[0] - p_1[0]], [s_1[1] - p_1[1]], [s_1[2] - p_1[2]]]))
# obj_func = np.array([[s_1[0] - p_1[0]], [s_1[1] - p_1[1]], [s_1[2] - p_1[2]]])
dist = np.linalg.norm(s_1 - p_1)
print(dist)
while dist > 0.001:
deri = deri_surf(u, v, 1, cps, knot_u, knot_v, degree, degree)
jacob = np.array([[-direc[0], deri[1][0][0], deri[0][1][0]],
[-direc[1], deri[1][0][1], deri[0][1][1]],
[-direc[2], deri[1][0][2], deri[0][1][2]]])
opti_sub = np.matmul(inv(jacob), obj_func)
t -= opti_sub[0]
u -= opti_sub[1]
v -= opti_sub[2]
print(t, u, v)
if u < 0.0: u = np.random.random()
if u > 1.0: u = np.random.random()
if v < 0.0: v = np.random.random()
if v > 1.0: v = np.random.random()
if t < 0.0:
print(t, u, v, ' ==== its negative')
t = 0.0
if (u == 0.0 and v == 0.0) or (u == 1.0 and v == 1.0):
non_conv += 1
print('Non Conver')
if non_conv == 50 or iter_count == 50:
print(non_conv, iter_count)
return False, t_count
s_1 = surf_pt(u, v, cps, knot_u, knot_v, degree, degree)
p_1 = ray + (t * direc)
obj_func = np.array([[s_1[0] - p_1[0]], [s_1[1] - p_1[1]], [s_1[2] - p_1[2]]])
# obj_func = np.abs(np.array([[s_1[0] - p_1[0]], [s_1[1] - p_1[1]], [s_1[2] - p_1[2]]]))
dist = np.linalg.norm(s_1 - p_1)
print(dist)
iter_count += 1
pts = surf_pt(u, v, cps, knot_u, knot_v, degree, degree)
return True, pts
pass
def newton_method(t, u, v, ray, ray_d, vox_size, ctrl_pts, degree):
non_conv = 0
iter_count = 0
t_count = 0
s_1 = surf_pt(u, v, ctrl_pts, degree)
p_1 = ray + (t * ray_d)
obj_func = np.array([[s_1[0] - p_1[0]], [s_1[1] - p_1[1]], [s_1[2] - p_1[2]]])
dist = np.linalg.norm(s_1 - p_1)
while dist > 0.001:
deri = deri_surf(u, v, 1, ctrl_pts, 3)
jacob = np.array([[-ray_d[0], deri[1][0][0], deri[0][1][0]],
[-ray_d[1], deri[1][0][1], deri[0][1][1]],
[-ray_d[2], deri[1][0][2], deri[0][1][2]]])
opti_sub = np.matmul(inv(jacob), obj_func)
t -= opti_sub[0]
u -= opti_sub[1]
v -= opti_sub[2]
if u < 0.0: u = 0.0
if u > 1.0: u = 1.0
if v < 0.0: v = 0.0
if v > 1.0: v = 1.0
if (u == 0.0 and v == 0.0) or (u == 1.0 and v == 1.0):
non_conv += 1
if non_conv == 50 or iter_count == 50:
return False, t_count
s_1 = surf_pt(u, v, ctrl_pts, degree)
p_1 = ray + (t * ray_d)
obj_func = np.array([[s_1[0] - p_1[0]], [s_1[1] - p_1[1]], [s_1[2] - p_1[2]]])
dist = np.linalg.norm(s_1 - p_1)
iter_count += 1
if dist < 0.001:
t_count = int(t // vox_size)
if t % vox_size >= vox_size / 2:
t_count += 1
pass
return True, t_count
def ray_box_inter(b_min, b_max, ray_o, ray_d):
invR = | np.empty([3], dtype=np.float32) | numpy.empty |
import os
import sys
from collections import namedtuple
from itertools import product
import numpy as np
from numpy.matlib import repmat
import pandas as pd
from scipy.stats import t
from ctg.core.config import config
import ctg.core.calculate_abundance as calculate_abundance
# import warnings
# from pandas.core.common import SettingWithCopyWarning
# warnings.simplefilter('error', SettingWithCopyWarning)
'''
The file format below is hardcoded for Amanda's files. This can be changed by
later by passing a global config object for the run (or just additional
function argumments).
Currently, the time points should be of the format
construct_id probe_a_id probe_b_id target_a_id target_b_id {NAME}_T{DAYS}_{REP}
In addition, reps are defined to be the first set of levels in the loading functions
'''
def ma_cov(x,y, axis=0):
"""Calculates the covariance from masked numpy arrays"""
return np.ma.mean(x*y, axis=axis) - (np.ma.mean(x, axis=axis)* | np.ma.mean(y, axis=axis) | numpy.ma.mean |
# To import required modules:
import numpy as np
import time
import os
import sys
import matplotlib
import matplotlib.cm as cm #for color maps
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec #for specifying plot attributes
from matplotlib import ticker #for setting contour plots to log scale
import scipy.integrate #for numerical integration
import scipy.misc #for factorial function
from scipy.special import erf #error function, used in computing CDF of normal distribution
import scipy.interpolate #for interpolation functions
import corner #corner.py package for corner plots
#matplotlib.rc('text', usetex=True)
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))))
from src.functions_general import *
from src.functions_compare_kepler import *
from src.functions_load_sims import *
from src.functions_plot_catalogs import *
from src.functions_plot_params import *
savefigures = False
savefigures_directory = '/Users/hematthi/Documents/GradSchool/Research/ExoplanetsSysSim_Clusters/Figures/Model_Optimization/AMD_system/Split_stars/Singles_ecc/Params11_KS/durations_norm_circ_singles_multis_GF2020_KS/Best_models/GP_med/'
##### To load the underlying populations:
loadfiles_directory = '/Users/hematthi/Documents/GradSchool/Research/ACI/Simulated_Data/AMD_system/Split_stars/Singles_ecc/Params11_KS/Distribute_AMD_per_mass/durations_norm_circ_singles_multis_GF2020_KS/GP_med/' #Lognormal_mass_Earthlike_rocky/
run_number = ''
N_sim, cos_factor, P_min, P_max, radii_min, radii_max = read_targets_period_radius_bounds(loadfiles_directory + 'periods%s.out' % run_number)
param_vals_all = read_sim_params(loadfiles_directory + 'periods%s.out' % run_number)
sssp_per_sys, sssp = compute_summary_stats_from_cat_phys(file_name_path=loadfiles_directory, run_number=run_number, load_full_tables=True)
##### To load some mass-radius tables:
# NWG-2018 model:
MR_table_file = '../../data/MRpredict_table_weights3025_R1001_Q1001.txt'
with open(MR_table_file, 'r') as file:
lines = (line for line in file if not line.startswith('#'))
MR_table = np.genfromtxt(lines, names=True, delimiter=', ')
# Li Zeng models:
MR_earthlike_rocky = np.genfromtxt('../../data/MR_earthlike_rocky.txt', names=['mass','radius']) # mass and radius are in Earth units
MR_pure_iron = np.genfromtxt('../../data/MR_pure_iron.txt', names=['mass','radius']) # mass and radius are in Earth units
# To construct an interpolation function for each MR relation:
MR_NWG2018_interp = scipy.interpolate.interp1d(10.**MR_table['log_R'], 10.**MR_table['05'])
MR_earthlike_rocky_interp = scipy.interpolate.interp1d(MR_earthlike_rocky['radius'], MR_earthlike_rocky['mass'])
MR_pure_iron_interp = scipy.interpolate.interp1d(MR_pure_iron['radius'], MR_pure_iron['mass'])
# To find where the Earth-like rocky relation intersects with the NWG2018 mean relation (between 1.4-1.5 R_earth):
def diff_MR(R):
M_NWG2018 = MR_NWG2018_interp(R)
M_earthlike_rocky = MR_earthlike_rocky_interp(R)
return np.abs(M_NWG2018 - M_earthlike_rocky)
# The intersection is approximately 1.472 R_earth
radii_switch = 1.472
# IDEA 1: Normal distribution for rho centered around Earth-like rocky, with a sigma_rho that grows with radius
# To define sigma_rho such that log10(sigma_rho) is a linear function of radius:
rho_earthlike_rocky = rho_from_M_R(MR_earthlike_rocky['mass'], MR_earthlike_rocky['radius']) # mean density (g/cm^3) for Earth-like rocky as a function of radius
rho_pure_iron = rho_from_M_R(MR_pure_iron['mass'], MR_pure_iron['radius']) # mean density (g/cm^3) for pure iron as a function of radius
sigma_rho_at_radii_switch = 3. # std of mean density (g/cm^3) at radii_switch
sigma_rho_at_radii_min = 1. # std of mean density (g/cm^3) at radii_min
rho_radius_slope = (np.log10(sigma_rho_at_radii_switch)-np.log10(sigma_rho_at_radii_min)) / (radii_switch - radii_min) # dlog(rho)/dR; slope between radii_min and radii_switch in log(rho)
sigma_rho = 10.**( rho_radius_slope*(MR_earthlike_rocky['radius'] - radii_min) + np.log10(sigma_rho_at_radii_min) )
# IDEA 2: Lognormal distribution for mass centered around Earth-like rocky, with a sigma_log_M that grows with radius
# To define sigma_log_M as a linear function of radius:
sigma_log_M_at_radii_switch = 0.3 # std of log_M (Earth masses) at radii_switch
sigma_log_M_at_radii_min = 0.04 # std of log_M (Earth masses) at radii_min
sigma_log_M_radius_slope = (sigma_log_M_at_radii_switch - sigma_log_M_at_radii_min) / (radii_switch - radii_min)
sigma_log_M = sigma_log_M_radius_slope*(MR_earthlike_rocky['radius'] - radii_min) + sigma_log_M_at_radii_min
##### To make mass-radius plots:
afs = 20 #axes labels font size
tfs = 20 #text labels font size
lfs = 16 #legend labels font size
bins = 100
# Density vs. radius for new model based on Li Zeng's Earth-like rocky:
fig = plt.figure(figsize=(8,8))
plot = GridSpec(4, 1, left=0.15, bottom=0.1, right=0.98, top=0.98, wspace=0, hspace=0)
ax = plt.subplot(plot[0,:]) # sigma_rho vs. radius
plt.plot(MR_earthlike_rocky['radius'], sigma_rho, color='orange', ls='-', lw=3, label=r'Linear $\log(\sigma_\rho)$ vs $R_p$')
plt.gca().set_yscale("log")
ax.tick_params(axis='both', labelsize=afs)
plt.xticks([])
plt.yticks([1., 2., 3., 4., 5.])
ax.yaxis.set_major_formatter(ticker.ScalarFormatter())
ax.yaxis.get_major_formatter().set_scientific(False)
ax.yaxis.get_major_formatter().set_useOffset(False)
plt.xlim([radii_min, radii_switch])
plt.ylim([0.9, 4.])
plt.ylabel(r'$\sigma_\rho$ ($g/cm^3$)', fontsize=tfs)
plt.legend(loc='upper left', bbox_to_anchor=(0.01,0.99), ncol=1, frameon=False, fontsize=lfs)
ax = plt.subplot(plot[1:,:]) # rho vs. radius
plt.plot(MR_pure_iron['radius'], rho_pure_iron, color='r', ls='--', lw=3, label='Pure iron')
plt.plot(MR_earthlike_rocky['radius'], rho_earthlike_rocky, color='orange', ls='--', lw=3, label='Earth-like rocky')
plt.fill_between(MR_earthlike_rocky['radius'], rho_earthlike_rocky - sigma_rho, rho_earthlike_rocky + sigma_rho, color='orange', alpha=0.5, label=r'Earth-like rocky $\pm \sigma_\rho$')
plt.fill_between(MR_earthlike_rocky['radius'], rho_earthlike_rocky - 2.*sigma_rho, rho_earthlike_rocky + 2.*sigma_rho, color='orange', alpha=0.3, label=r'Earth-like rocky $\pm 2\sigma_\rho$')
plt.fill_between(MR_earthlike_rocky['radius'], rho_earthlike_rocky - 3.*sigma_rho, rho_earthlike_rocky + 3.*sigma_rho, color='orange', alpha=0.1, label=r'Earth-like rocky $\pm 3\sigma_\rho$')
plt.axhline(y=1., color='c', lw=3, label='Water density (1 g/cm^3)')
plt.gca().set_yscale("log")
ax.tick_params(axis='both', labelsize=afs)
plt.minorticks_off()
plt.yticks([1., 2., 3., 4., 5., 7., 10., 15.])
ax.yaxis.set_minor_formatter(ticker.ScalarFormatter())
ax.yaxis.set_major_formatter(ticker.ScalarFormatter())
ax.yaxis.get_major_formatter().set_scientific(False)
ax.yaxis.get_major_formatter().set_useOffset(False)
plt.xlim([radii_min, radii_switch])
plt.ylim([0.9, 20.])
plt.xlabel(r'$R_p$ ($R_\oplus$)', fontsize=tfs)
plt.ylabel(r'$\rho$ ($g/cm^3$)', fontsize=tfs)
plt.legend(loc='lower right', bbox_to_anchor=(0.99,0.01), ncol=1, frameon=False, fontsize=lfs)
if savefigures:
plt.savefig(savefigures_directory + 'Density_radius.pdf')
plt.close()
plt.show()
# Mass vs. radius:
fig = plt.figure(figsize=(16,8))
plot = GridSpec(5, 5, left=0.1, bottom=0.1, right=0.98, top=0.98, wspace=0, hspace=0)
ax = plt.subplot(plot[1:,:4])
masses_all = sssp_per_sys['mass_all'][sssp_per_sys['mass_all'] > 0.]
radii_all = sssp_per_sys['radii_all'][sssp_per_sys['radii_all'] > 0.]
corner.hist2d(np.log10(radii_all), np.log10(masses_all), bins=50, plot_density=True, contour_kwargs={'colors': ['0.6','0.4','0.2','0']}, data_kwargs={'color': 'k'})
plt.plot(MR_table['log_R'], MR_table['05'], '-', color='g', label='Mean prediction (NWG2018)')
plt.fill_between(MR_table['log_R'], MR_table['016'], MR_table['084'], color='g', alpha=0.5, label=r'16%-84% (NWG2018)')
plt.plot(MR_table['log_R'], np.log10(M_from_R_rho(10.**MR_table['log_R'], rho=5.51)), color='b', label='Earth density (5.51 g/cm^3)')
plt.plot(MR_table['log_R'], np.log10(M_from_R_rho(10.**MR_table['log_R'], rho=3.9)), color='m', label='Mars density (3.9 g/cm^3)')
plt.plot(MR_table['log_R'], np.log10(M_from_R_rho(10.**MR_table['log_R'], rho=1.)), color='c', label='Water density (1 g/cm^3)')
plt.plot(MR_table['log_R'], np.log10(M_from_R_rho(10.**MR_table['log_R'], rho=7.9)), color='r', label='Iron density (7.9 g/cm^3)')
plt.plot(MR_table['log_R'], np.log10(M_from_R_rho(10.**MR_table['log_R'], rho=100.)), color='k', label='100 g/cm^3')
plt.plot(np.log10(MR_earthlike_rocky['radius']), np.log10(MR_earthlike_rocky['mass']), color='orange', ls='--', lw=3, label='Earth-like rocky')
#plt.fill_between(np.log10(MR_earthlike_rocky['radius']), np.log10(M_from_R_rho(MR_earthlike_rocky['radius'], rho=rho_earthlike_rocky-sigma_rho)), np.log10(M_from_R_rho(MR_earthlike_rocky['radius'], rho=rho_earthlike_rocky+sigma_rho)), color='orange', alpha=0.5, label=r'16%-84% ($\rho \sim \mathcal{N}(\rho_{\rm Earthlike\:rocky}, \sigma_\rho(R_p))$)') #label=r'$\rho \sim \mathcal{N}(\rho_{\rm Earthlike\:rocky}, 10^{[\frac{d\log\rho}{dR_p}(R_p - 0.5) + \log{\rho_0}]})$'
plt.fill_between(np.log10(MR_earthlike_rocky['radius']), | np.log10(MR_earthlike_rocky['mass']) | numpy.log10 |
# Author: <NAME> <<EMAIL>> KTH 2018
#
# Original paper for the CNN model:
# @inproceedings{kimyoon_cnn,
# author = {<NAME>},
# title = {Convolutional Neural Networks for Sentence Classification},
# booktitle = {{EMNLP}},
# pages = {1746--1751},
# publisher = {{ACL}},
# year = 2014
# }
#
# Tensorflow implementation inspiration from <NAME>:
# https://github.com/dennybritz/cnn-text-classification-tf
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
import pre_process
import argparse
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
from sklearn.metrics import hamming_loss
from sklearn.metrics import f1_score
from tensorflow.python.tools import freeze_graph
"""
Script for training a text classifier with Kim Yoon's CNN, either with pre-trained or random initialized embeddings.
Can use either noisy or binary labels and either a single input channel or multi-channel.
"""
# Enable deterministic comparisons between executions
tf.set_random_seed(0)
# Constants
NUM_CLASSES = 13
NUM_CHANNELS = 2
def define_placeholders(sequence_length, multichannel=False):
""" Define placeholders for input features,labels, and dropout """
if(multichannel):
x_placeholder = tf.placeholder(tf.int32, [None, sequence_length, NUM_CHANNELS], name="input_x")
else:
x_placeholder = tf.placeholder(tf.int32, [None, sequence_length], name="input_x")
y_placeholder = tf.placeholder(tf.float32, [None, NUM_CLASSES], name="input_y")
dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")
return x_placeholder, y_placeholder, dropout_keep_prob
def build_graph(x_placeholder, vocab_size, embedding_size, dropout_placeholder, sequence_length, filter_sizes,
num_filters, initW, pretrained=False, multichannel=False):
""" Build the computational graph for forward and backward propagation """
# Keeping track of l2 regularization loss
l2_loss = tf.constant(0.0)
# Embedding layer
with tf.name_scope("embedding"):
if(pretrained):
W = initW
else:
W = tf.Variable(tf.random_uniform([vocab_size, embedding_size], -1.0, 1.0), name="W")
if(multichannel):
#Lookup word-ids in the embedding matrix
embedded_chars = tf.nn.embedding_lookup(W, x_placeholder)
#Transpose to get correct format
embedded_chars_expanded = tf.transpose(embedded_chars, [0,1,3,2])
else:
#Lookup word-ids in the embedding matrix
embedded_chars = tf.nn.embedding_lookup(W, x_placeholder)
#CNN expects 3D input, expand to be 1 channel so it fits
embedded_chars_expanded = tf.expand_dims(embedded_chars, -1)
# Create a convolution + maxpool layer for each filter size
pooled_outputs = []
for i, filter_size in enumerate(filter_sizes):
with tf.name_scope("conv-maxpool-%s" % filter_size):
# Convolution Layer
if(multichannel):
filter_shape = [filter_size, embedding_size, NUM_CHANNELS, num_filters]
else:
filter_shape = [filter_size, embedding_size, 1, num_filters]
#Initialize weights randomly
W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name="W")
#Initialize bias
b = tf.Variable(tf.constant(0.1, shape=[num_filters]), name="b")
#Convolution operation, 2D convolution (patch strides over 2d surface for all input channels one at a time) on 4D input
#VALID padding => No padding, means output width = (width-filter-width +1)/stride
#strides = [1,1,1,1], one stride for each dimension
conv = tf.nn.conv2d(embedded_chars_expanded, W, strides=[1, 1, 1, 1], padding="VALID", name="conv")
# Apply RELU nonlinearity to the output of conv operation added with the bias
h = tf.nn.relu(tf.nn.bias_add(conv, b), name="relu")
# Maxpooling over the outputs of RELU
# ksize is the dimensions of patch
# the patch is slided over the input and outputs the max element of each region
# (intuitively sub-sample the input by focusing on keywords and dropping noise)
pooled = tf.nn.max_pool(h, ksize=[1, sequence_length - filter_size + 1, 1, 1], strides=[1, 1, 1, 1],
padding='VALID', name="pool")
# Since we have one pooling for each conv channel we store all outputs (multi dimensional) in an array
pooled_outputs.append(pooled)
# Combine all the pooled features
num_filters_total = num_filters * len(filter_sizes)
# append pooled features on last axis
h_pool = tf.concat(pooled_outputs, 3)
# flatten output
h_pool_flat = tf.reshape(h_pool, [-1, num_filters_total])
# Add dropout
with tf.name_scope("dropout"):
h_drop = tf.nn.dropout(h_pool_flat, dropout_placeholder)
# Final (unnormalized) scores and predictions
with tf.name_scope("output"):
# Weights between pooled features and output, uses "Xavier" initialization from paper "Understanding the difficulty of training deep feedforward neural networks"
W = tf.get_variable(
"W",
shape=[num_filters_total, NUM_CLASSES],
initializer=tf.contrib.layers.xavier_initializer())
# initialize bias
b = tf.Variable(tf.constant(0.1, shape=[NUM_CLASSES]), name="b")
# l2 loss
l2_loss += tf.nn.l2_loss(W)
l2_loss += tf.nn.l2_loss(b)
# h_drop x weights + b
logits = tf.nn.xw_plus_b(h_drop, W, b, name="scores")
# cast logits to binary predictions
predictions = tf.where(logits > 0.5, tf.ones_like(logits), tf.zeros_like(logits), name="predictions")
return logits, predictions, l2_loss
def define_optimizer(learning_rate, logits, y_placeholder, l2_loss, predictions, l2_reg_lambda):
""" Define the optimizer, loss, accuracy etc for doing the learning """
# Calculate mean cross-entropy loss
with tf.name_scope("loss"):
# Binary logistic loss for each class (works with both probabilistic labels and binary labels)
losses = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=y_placeholder,
name="losses")
# Sum the log-loss for each class and add l2 regularization
loss = tf.reduce_mean(losses) + l2_reg_lambda * l2_loss
tf.summary.scalar("regularized_loss", loss)
# When using probabilistic labels this casting is necessary to get binary labels for computing statistics
y_preds = tf.where(y_placeholder > 0.5, tf.ones_like(y_placeholder), tf.zeros_like(y_placeholder))
# Compare labels with predictions
correct_predictions = tf.equal(tf.cast(predictions, dtype=tf.int32), tf.cast(y_preds, dtype=tf.int32))
# Compute stats and update tensorboard
with tf.name_scope("accuracy"):
accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
tf.summary.scalar("accuracy", accuracy)
with tf.name_scope("streaming_acc"):
streaming_accuracy, str_acc_update = tf.metrics.accuracy(labels=y_preds, predictions=predictions)
tf.summary.scalar("streaming_ accuracy", str_acc_update)
with tf.name_scope('recall'):
recall, rec_update = tf.metrics.recall(labels=y_preds, predictions=predictions)
tf.summary.scalar("recall", rec_update)
with tf.name_scope('precision'):
precision, pre_update = tf.metrics.precision(labels=y_preds, predictions=predictions)
tf.summary.scalar("precision", precision)
with tf.name_scope('F1'):
F1 = (2 * pre_update * rec_update) / (pre_update + rec_update)
tf.summary.scalar("F1", F1)
TP = tf.count_nonzero(tf.cast(predictions, dtype=tf.int32) * tf.cast(y_preds, dtype=tf.int32), dtype=tf.float32)
TN = tf.count_nonzero((tf.cast(predictions, dtype=tf.int32) - 1) * (tf.cast(y_preds, dtype=tf.int32) - 1),dtype=tf.float32)
FP = tf.count_nonzero(tf.cast(predictions, dtype=tf.int32) * (tf.cast(y_preds, dtype=tf.int32) - 1),dtype=tf.float32)
FN = tf.count_nonzero((tf.cast(predictions, dtype=tf.int32) - 1) * tf.cast(y_preds, dtype=tf.int32), dtype=tf.float32)
batch_precision = TP / (TP + FP)
batch_recall = TP / (TP + FN)
batch_f1 = 2 * ((batch_precision * batch_recall) / (batch_precision + batch_recall))
tf.summary.scalar("batch_precision", batch_precision)
tf.summary.scalar("batch_recall", batch_recall)
tf.summary.scalar("batch_f1", batch_f1)
# Define Training procedure
# Uncomment this if using exp decay
# global_step = tf.Variable(0, name="global_step", trainable=False)
# starter_learning_rate = learning_rate
# learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step, 500, 0.96, staircase=True)
tf.summary.scalar("learning rate", learning_rate)
optimizer = tf.train.AdamOptimizer(learning_rate)
train_step = optimizer.minimize(loss)
# train_step = tf.train.AdamOptimizer(learning_rate).minimize(loss, global_step=global_step)
return train_step, accuracy, loss, recall, rec_update, precision, pre_update, F1, streaming_accuracy, str_acc_update, batch_precision, batch_recall, batch_f1, y_preds
def init_graph():
""" Initialize the graph and variables for Tensorflow engine """
# initialize and run start operation
init_g = tf.global_variables_initializer()
init_l = tf.local_variables_initializer()
sess = tf.Session()
sess.run(init_g)
sess.run(init_l)
return sess
def training_step(i, update_dev_data, update_train_data, update_test_data, x_placeholder, y_placeholder,
dropout_placeholder,
x_train_batch, y_train_batch, x_dev_batch, y_dev_batch, x_test, y_test,
dropout_keep_prob, training_step, accuracy, loss, sess,
predictions, train_writer, test_writer,
merged, recall, rec_update, precision, pre_update, F1, streaming_accuracy, str_acc_update, batch_precision,
batch_recall, batch_f1, logits, y_preds, verbose, multichannel):
"""
Function representing a single iteration during training.
Returns a tuple of accuracy and loss statistics.
"""
if(multichannel):
x_train_batch = np.transpose(x_train_batch,axes=[0,2,1])
x_dev_batch = np.transpose(x_dev_batch,axes=[0,2,1])
x_test = np.transpose(x_test,axes=[0,2,1])
# the backpropagation training step
sess.run(training_step,
feed_dict={x_placeholder: x_train_batch, y_placeholder: y_train_batch,
dropout_placeholder: dropout_keep_prob})
# evaluating model performance for printing purposes
# evaluation used to later visualize how well the model did at a particular time in the training
train_a = [] # Array of training-accuracy for a single iteration
train_str_a = [] # Array of streaming training-accuracy
train_c = [] # Array of training-cost for a single iteration
train_r = [] # Array of streaming training-recall
train_p = [] # Array of streaming training-precision
train_f = [] # Array of streaming training-F1
train_hl = [] # Array of train hamming loss
dev_a = [] # Array of dev-accuracy for a single iteration
dev_c = [] # Array of dev-cost for a single iteration
dev_r = [] # Array of dev-recall for a single iteration
dev_p = [] # Array of dev-precision for a single iteration
dev_f = [] # Array of dev-F1 for a single iteration
dev_hl = [] # Array of dev hamming loss
test_a = [] # Array of test-accuracy for a single iteration
test_c = [] # Array of test-cost for a single iteration
test_r = [] # Array of test-recall for a single iteration
test_p = [] # Array of test-precision for a single iteration
test_f = [] # Array of test-F1 for a single iteration
test_hl = [] # Array of test-hamming loss
test_class_precision = [] #Array of precision for each class
test_class_recall = [] #Array of precision for each class
test_class_ap = [] #Array of avg precision for each class
test_class_f1 = [] #Array of f1 for each class
test_m_f = [] #Array of macro f1 for test set
# Compute streaming recall, precision, accuracy on train set
train_recall, train_precision, train_str_acc = sess.run([rec_update, pre_update, str_acc_update],
feed_dict={x_placeholder: x_train_batch,
y_placeholder: y_train_batch,
dropout_placeholder: dropout_keep_prob})
# If stats for train-data should be updated, compute loss and accuracy for the batch and store it
if update_train_data:
train_acc, train_loss, train_preds, train_logits, summary, train_f1, y_tr_pred = sess.run([accuracy, loss, predictions, logits, merged, F1, y_preds],
feed_dict={x_placeholder: x_train_batch,
y_placeholder: y_train_batch,
dropout_placeholder: dropout_keep_prob})
train_hls = hamming_loss(train_preds, y_tr_pred)
tf.summary.scalar("hamming_loss",train_hls)
train_writer.add_summary(summary, i)
train_a.append(train_acc)
train_c.append(train_loss)
train_r.append(train_recall)
train_p.append(train_precision)
train_f.append(train_f1)
train_str_a.append(train_str_acc)
train_hl.append(train_hls)
if(verbose):
print("train loss: {}".format(train_loss))
print("train batch accuracy: {}".format(train_acc))
print("train recall: {}".format(train_recall))
print("train precision: {}".format(train_precision))
print("train micro-averaged f1: {}".format(train_f1))
print("train streaming accuracy: {}".format(train_str_acc))
print("train hamming loss: {}".format(train_hls))
# If stats for dev-data should be updated, compute loss and accuracy for the batch and store it
if update_dev_data:
dev_acc, dev_loss, dev_preds, dev_logits, summary, dev_recall, dev_precision, dev_f1, y_d_pred = sess.run(
[accuracy, loss, predictions, logits, merged, batch_recall, batch_precision, batch_f1, y_preds],
feed_dict={x_placeholder: x_dev_batch,
y_placeholder: y_dev_batch,
dropout_placeholder: 1.0})
dev_hls = hamming_loss(dev_preds, y_d_pred)
tf.summary.scalar("hamming_loss",dev_hls)
dev_a.append(dev_acc)
dev_c.append(dev_loss)
dev_r.append(dev_recall)
dev_p.append(dev_precision)
dev_f.append(dev_f1)
dev_hl.append(dev_hls)
test_writer.add_summary(summary, i)
if(verbose):
print("dev loss: {}".format(dev_loss))
print("dev accuracy: {}".format(dev_acc))
print("dev recall: {}".format(dev_recall))
print("dev precision: {}".format(dev_precision))
print("dev micro-averaged f1: {}".format(dev_f1))
print("dev hamming loss: {}".format(dev_hls))
# At the end of training, test on the held-out ground truth testset
if update_test_data:
test_acc, test_loss, test_preds, test_logits, test_recall, test_precision, test_f1, y_t_pred= sess.run([accuracy, loss, predictions, logits, batch_recall, batch_precision, batch_f1, y_preds],
feed_dict={x_placeholder: x_test,
y_placeholder: y_test,
dropout_placeholder: 1.0})
test_hls = hamming_loss(test_preds, y_t_pred)
test_macro_f1 = f1_score(y_test, test_preds, average='macro')
test_a.append(test_acc)
test_c.append(test_loss)
test_r.append(test_recall)
test_p.append(test_precision)
test_f.append(test_f1)
test_hl.append(test_hls)
test_m_f.append(test_macro_f1)
if(verbose):
print("test loss: {}".format(test_loss))
print("test accuracy: {}".format(test_acc))
print("test recall: {}".format(test_recall))
print("test precision: {}".format(test_precision))
print("test micro-averaged f1: {}".format(test_f1))
print("macro averaged f1: {}".format(test_macro_f1))
print("test hamming loss: {}".format(test_hls))
# For each class
precision = dict()
recall = dict()
average_precision = dict()
for i in range(NUM_CLASSES):
precision[i], recall[i], _ = precision_recall_curve(y_test[:, i],
test_preds[:, i])
average_precision[i] = average_precision_score(y_test[:, i], test_preds[:, i])
f1_per_class = f1_score(y_test, test_preds, average=None)
test_class_precision.append(" ".join(map(lambda x: str(x), precision.values())))
test_class_recall.append(" ".join(map(lambda x: str(x), recall.values())))
test_class_ap.append(" ".join(map(lambda x: str(x), average_precision.values())))
test_class_f1.append(" ".join(map(lambda x: str(x), f1_per_class)))
if(verbose):
for i in range(NUM_CLASSES):
print("precision for class {}: {}".format(i, precision[i]))
print("recall for class {}: {}".format(i, recall[i]))
print("average_precision for class {}: {}".format(i, average_precision[i]))
print("f1 for class {}: {}".format(i, f1_per_class[i]))
return train_a, train_c, train_r, train_p, train_f, train_str_acc, train_hl, dev_a, dev_c, dev_r, dev_p, dev_f, dev_hl, test_a, test_c, test_r, test_p, test_f, test_hl, test_class_precision, test_class_recall, test_class_ap, test_m_f
def hype_grid(args):
""" Grid search for hyperparameter tuning """
learning_rates = [0.0001, 0.001, 0.003, 0.05, 0.03]
dropout_rates = [0.1, 0.2, 0.3, 0.6, 0.8]
l2reglambda_rates = [0.1,0.2,0.3,0.6,0.8]
if(args.pretrained):
# Preprocess data to the right format
x, vocab_processor, y, x_test, y_test, W = pre_process.combine_labels_features(
args.featurestrain,
args.labelstrain,
args.featurestest,
args.labelstest,
args.vectors,
args.vectordim,
args.maxdocumentsize
)
else:
# Preprocess data to the right format
x, vocab_processor, y, x_test, y_test = pre_process.combine_labels_features(
args.featurestrain,
args.labelstrain,
args.featurestest,
args.labelstest,
"",
args.vectordim,
args.maxdocumentsize,
)
x_train, x_dev, y_train, y_dev = pre_process.split(x, y, args.testsplit, vocab_processor.vocabulary_)
sequence_length = x_train.shape[1]
results = []
results.append("learning_rate,dropout_rate,l2reglamda,test_accuracy")
for learning_rate in learning_rates:
for dropout_rate in dropout_rates:
for l2lambda in l2reglambda_rates:
#To be able to run hyperparam tuning on the same graph
tf.reset_default_graph()
print(
"Trying following " + str(learning_rate) + " learning rate and " + str(dropout_rate) + ' dropout rate and l2reglambda: ' + str(l2lambda))
if(args.pretrained):
test_accuracy = main(sequence_length, len(vocab_processor.vocabulary_), x_train, y_train, x_dev, y_dev,
x_test, y_test, W=W, pretrained=True, num_epochs=args.epochs, batch_size=args.batchsize, vectorDim=args.vectordim,
learning_rate=args.learningrate, dropout_keep_prob=args.dropoutkeepprob, filter_sizes=args.filtersizes,
num_filters=args.numfilters,
l2_reg_lambda=args.l2reglambda)
else:
test_accuracy = main(sequence_length, len(vocab_processor.vocabulary_), x_train, y_train, x_dev, y_dev,
x_test, y_test, num_epochs=args.epochs, batch_size=args.batchsize, vectorDim=args.vectordim,
learning_rate=args.learningrate, dropout_keep_prob=args.dropoutkeepprob, filter_sizes=args.filtersizes,
num_filters=args.numfilters,
l2_reg_lambda=args.l2reglambda, maxiterations=args.maxiterations)
print('Test accuracy ' + str(test_accuracy))
results.append(str(learning_rate) + "," + str(dropout_rate) + "," + str(dropout_rate) + "," + str(l2lambda) + "," + str(test_accuracy))
np.savetxt('./results/tuning/tuning.txt', np.array(results), delimiter=',', fmt="%s")
def hype_random(args):
"""
Random search for hyperparameter tuning
"""
learning_rates = np.random.uniform(0.0001, 0.03, 10).tolist()
dropout_rates = np.random.uniform(0.1, 0.8, 1).tolist()
l2reglambda_rates = np.random.uniform(0.1, 0.7, 10).tolist()
if(args.pretrained):
# Preprocess data to the right format
x, vocab_processor, y, x_test, y_test, W = pre_process.combine_labels_features(
args.featurestrain,
args.labelstrain,
args.featurestest,
args.labelstest,
args.vectors,
args.vectordim,
args.maxdocumentsize
)
else:
# Preprocess data to the right format
x, vocab_processor, y, x_test, y_test = pre_process.combine_labels_features(
args.featurestrain,
args.labelstrain,
args.featurestest,
args.labelstest,
"",
args.vectordim,
args.maxdocumentsize,
)
x_train, x_dev, y_train, y_dev = pre_process.split(x, y, args.testsplit, vocab_processor.vocabulary_)
sequence_length = x_train.shape[1]
results = []
results.append("learning_rate,dropout_rate,l2reglamda,test_accuracy")
for learning_rate in learning_rates:
for dropout_rate in dropout_rates:
for l2lambda in l2reglambda_rates:
#To be able to run hyperparam tuning on the same graph
tf.reset_default_graph()
print(
"Trying following " + str(learning_rate) + " learning rate and " + str(dropout_rate) + ' dropout rate and l2reglambda: ' + str(l2lambda))
if(args.pretrained):
test_accuracy = main(sequence_length, len(vocab_processor.vocabulary_), x_train, y_train, x_dev, y_dev,
x_test, y_test, W=W, pretrained=True, num_epochs=args.epochs, batch_size=args.batchsize, vectorDim=args.vectordim,
learning_rate=args.learningrate, dropout_keep_prob=args.dropoutkeepprob, filter_sizes=args.filtersizes,
num_filters=args.numfilters,
l2_reg_lambda=args.l2reglambda)
else:
test_accuracy = main(sequence_length, len(vocab_processor.vocabulary_), x_train, y_train, x_dev, y_dev,
x_test, y_test, num_epochs=args.epochs, batch_size=args.batchsize, vectorDim=args.vectordim,
learning_rate=args.learningrate, dropout_keep_prob=args.dropoutkeepprob, filter_sizes=args.filtersizes,
num_filters=args.numfilters,
l2_reg_lambda=args.l2reglambda, maxiterations=args.maxiterations)
print('Test accuracy ' + str(test_accuracy))
results.append(str(learning_rate) + "," + str(dropout_rate) + "," + str(dropout_rate) + "," + str(l2lambda) + "," + str(test_accuracy))
np.savetxt('./results/tuning/tuning.txt', np.array(results), delimiter=',', fmt="%s")
def main(sequence_length, vocabSize, x_train, y_train, x_dev, y_dev, x_test, y_test, W = [], pretrained = False,
vectorDim=300, learning_rate=0.01, dropout_keep_prob=0.7,
batch_size=64, num_epochs=100, filter_sizes=[3, 4, 5], num_filters=128, l2_reg_lambda=0.0,
output="./results", maxiterations=100000000000000, verbose=False, plot=False, multichannel=False):
"""
Orchestrates the training, initiates and builds graph, performs training, saves results
"""
# Containers for results
train_accuracy = []
train_streaming_accuracy = []
train_loss = []
train_recall = []
train_precision = []
train_f1 = []
train_hl = []
dev_accuracy = []
dev_loss = []
dev_recall = []
dev_precision = []
dev_f1 = []
dev_hl = []
test_accuracy = []
test_loss = []
test_recall = []
test_precision = []
test_f1 = []
test_hl = []
test_class_precision = []
test_class_recall = []
test_class_ap = []
test_m_f = []
# Get placeholders
x_placeholder, y_placeholder, dropout_placeholder = define_placeholders(sequence_length, multichannel=multichannel)
# Build graph and get necessary variables
if(pretrained):
logits, predictions, l2_loss = build_graph(x_placeholder, vocabSize, vectorDim, dropout_placeholder,
sequence_length, filter_sizes, num_filters, W, pretrained=True, multichannel=multichannel)
else:
logits, predictions, l2_loss = build_graph(x_placeholder, vocabSize, vectorDim, dropout_placeholder,
sequence_length, filter_sizes, num_filters, W, pretrained=False, multichannel=multichannel)
# Define optimizer and get reference to operations to train with
training_step_tf, accuracy, cross_entropy_loss, recall, rec_update, precision, \
pre_update, F1, streaming_accuracy, str_acc_update, batch_precision, batch_recall, batch_f1, y_preds = define_optimizer(
learning_rate, logits, y_placeholder, l2_loss,
predictions, l2_reg_lambda)
# Initialize TF
sess = init_graph()
# Add ops to save and restore all the variables.
saver = tf.train.Saver()
# Merge all the summaries and write them to tensorboard
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(output + "/tensorboard" + '/train',
sess.graph)
test_writer = tf.summary.FileWriter(output + "/tensorboard" + '/test')
# Train NumEpochs over the entire train set
# Each iteration uses BatchSize number of examples
epoch = 1
total_i = 0
while epoch <= num_epochs:
i = 0
# Generate batches
batches = pre_process.batch_iter(
list(zip(x_train, y_train)), batch_size, num_epochs)
for batch in batches:
if(total_i < maxiterations):
x_batch, y_batch = zip(*batch)
x_batch = np.array(list(x_batch))
y_batch = np.array(list(y_batch))
train_test = False
dev_test = False
test_test = False
if i % 10 == 0:
# save checkpoint
saver.save(sess, output + "/models/model.ckpt")
train_test = True
dev_test = True
test_test = False
if(len(dev_f1) > 0):
print("######### epoch: {} , iteration: {}, dev f1: {} ##########".format(epoch, i, dev_f1[-1]))
else:
print("######### epoch: {} , iteration: {}, dev f1: -1 ##########".format(epoch, i))
a, c, r, p, f, stra, hl, da, dc, dr, dp, df1, dhl, ta, tc, tr, tp, tf1, thl, tcp, tcr, tca, tmf = training_step(i * epoch, dev_test,
train_test, test_test,
x_placeholder, y_placeholder,
dropout_placeholder, x_batch,
y_batch,
x_dev, y_dev, x_test, y_test,
dropout_keep_prob,
training_step_tf, accuracy,
cross_entropy_loss, sess,
predictions, train_writer,
test_writer, merged, recall,
rec_update, precision,
pre_update, F1,
streaming_accuracy,
str_acc_update, batch_precision,
batch_recall, batch_f1, logits, y_preds, verbose, multichannel)
# Update training stats
train_accuracy += a
train_streaming_accuracy += stra
train_loss += c
train_recall += r
train_precision += p
train_f1 += f
train_hl += hl
dev_accuracy += da
dev_loss += dc
dev_recall += dr
dev_precision += dp
dev_f1 += df1
dev_hl += dhl
test_accuracy += ta
test_loss += tc
test_recall += tr
test_precision += tp
test_f1 += tf1
test_hl += thl
test_class_precision += tcp
test_class_recall += tcr
test_class_ap += tca
test_m_f += tmf
i += 1
total_i += 1
epoch += 1
# Compute stats on the test set:
train_test = False
dev_test = False
test_test = True
a, c, r, p, f, stra, hl, da, dc, dr, dp, df1, dhl, ta, tc, tr, tp, tf1, thl, tcp, tcr, tca, tmf = training_step(i * epoch, dev_test,
train_test, test_test,
x_placeholder, y_placeholder,
dropout_placeholder, x_batch,
y_batch,
x_dev, y_dev, x_test, y_test,
dropout_keep_prob,
training_step_tf, accuracy,
cross_entropy_loss, sess,
predictions, train_writer,
test_writer, merged, recall,
rec_update, precision,
pre_update, F1,
streaming_accuracy,
str_acc_update, batch_precision,
batch_recall, batch_f1, logits, y_preds, verbose, multichannel)
# Update training stats
test_accuracy += ta
test_loss += tc
test_recall += tr
test_precision += tp
test_f1 += tf1
test_hl += thl
test_class_precision += tcp
test_class_recall += tcr
test_class_ap += tca
test_m_f += tmf
# Save the variables to disk.
save_model(output, sess, saver)
# Save results
np.savetxt('./results/train_stats/train_acc.txt', np.array(train_accuracy), delimiter=',')
np.savetxt('./results/train_stats/train_str_acc.txt', np.array(train_streaming_accuracy), delimiter=',')
np.savetxt('./results/train_stats/train_loss.txt', np.array(train_loss), delimiter=',')
np.savetxt('./results/train_stats/train_recall.txt', np.array(train_recall), delimiter=',')
np.savetxt('./results/train_stats/train_precision.txt', np.array(train_precision), delimiter=',')
np.savetxt('./results/train_stats/train_f1.txt', np.array(train_f1), delimiter=',')
np.savetxt('./results/train_stats/train_hamming_loss.txt', np.array(train_hl), delimiter=',')
np.savetxt('./results/train_stats/dev_acc.txt', np.array(dev_accuracy), delimiter=',')
np.savetxt('./results/train_stats/dev_loss.txt', np.array(dev_loss), delimiter=',')
np.savetxt('./results/train_stats/dev_recall.txt', np.array(dev_recall), delimiter=',')
np.savetxt('./results/train_stats/dev_precision.txt', np.array(dev_precision), delimiter=',')
np.savetxt('./results/train_stats/dev_f1.txt', np.array(dev_f1), delimiter=',')
np.savetxt('./results/train_stats/dev_hamming_loss.txt', np.array(dev_hl), delimiter=',')
np.savetxt('./results/train_stats/test_acc.txt', np.array(test_accuracy), delimiter=',')
np.savetxt('./results/train_stats/test_loss.txt', np.array(test_loss), delimiter=',')
np.savetxt('./results/train_stats/test_recall.txt', np.array(test_recall), delimiter=',')
np.savetxt('./results/train_stats/test_precision.txt', np.array(test_precision), delimiter=',')
np.savetxt('./results/train_stats/test_f1.txt', np.array(test_f1), delimiter=',')
np.savetxt('./results/train_stats/test_hamming_loss.txt', | np.array(test_hl) | numpy.array |
### Authors: <NAME> (CEA Saclay), <NAME> (CEA Saclay)
### Contact: <EMAIL>, <EMAIL>
###
### Note:
### Please see https://desi.lbl.gov/trac/attachment/wiki/TargetSelectionWG/TSTeleconMinutes/DESITS210113/QSO_completeness_Ravoux_13_01_2021_v2.pdf for details about the MgII fitter
###
### the "optimal"/chosen parameters are (can be modify when desi_qso_mgii_afterburner is called):
### lambda_width = 250
### max_sigma = 200
### min_sigma = 10
### min_deltachi2 = 16
### min_signifiance_A = 3
### min_A = 0
import sys
import numpy as np
from astropy.table import Table
from scipy.optimize import curve_fit
from scipy.ndimage.filters import gaussian_filter
import redrock.templates
from desispec.io import read_spectra
from desispec.coaddition import coadd_cameras
from desispec.interpolation import resample_flux
import logging
logger = logging.getLogger("mgii_afterburner")
def load_redrock_templates(template_dir=None):
'''
< COPY from prospect.plotframes to avoid to load prospect in desispec >
Load redrock templates; redirect stdout because redrock is chatty
'''
saved_stdout = sys.stdout
sys.stdout = open('/dev/null', 'w')
try:
templates = dict()
for filename in redrock.templates.find_templates(template_dir=template_dir):
tx = redrock.templates.Template(filename)
templates[(tx.template_type, tx.sub_type)] = tx
except Exception as err:
sys.stdout = saved_stdout
raise(err)
sys.stdout = saved_stdout
return templates
def create_model(spectra, redshifts,
archetype_fit=False,
archetypes_dir=None,
template_dir=None):
'''
< COPY from prospect.plotframes to avoid to load prospect in desispec >
Returns model_wave[nwave], model_flux[nspec, nwave], row matched to redshifts,
which can be in a different order than spectra.
- redshifts must be entry-matched to spectra.
'''
if archetype_fit:
from redrock.archetypes import All_archetypes
if np.any(redshifts['TARGETID'] != spectra.fibermap['TARGETID']) :
raise RuntimeError('zcatalog and spectra do not match (different targetids)')
templates = load_redrock_templates(template_dir=template_dir)
#- Empty model flux arrays per band to fill
model_flux = dict()
for band in spectra.bands:
model_flux[band] = np.zeros(spectra.flux[band].shape)
for i in range(len(redshifts)):
zb = redshifts[i]
if archetype_fit:
archetypes = All_archetypes(archetypes_dir=archetypes_dir).archetypes
archetype = archetypes[zb['SPECTYPE']]
coeff = zb['COEFF']
for band in spectra.bands:
wave = spectra.wave[band]
wavehash = hash((len(wave), wave[0], wave[1], wave[-2], wave[-1], spectra.R[band].data.shape[0]))
dwave = {wavehash: wave}
mx = archetype.eval(zb['SUBTYPE'], dwave, coeff, wave, zb['Z'])
model_flux[band][i] = spectra.R[band][i].dot(mx)
else:
tx = templates[(zb['SPECTYPE'], zb['SUBTYPE'])]
coeff = zb['COEFF'][0:tx.nbasis]
model = tx.flux.T.dot(coeff).T
for band in spectra.bands:
mx = resample_flux(spectra.wave[band], tx.wave*(1+zb['Z']), model)
model_flux[band][i] = spectra.R[band][i].dot(mx)
#- Now combine, if needed, to a single wavelength grid across all cameras
if spectra.bands == ['brz'] :
model_wave = spectra.wave['brz']
mflux = model_flux['brz']
elif np.all([ band in spectra.bands for band in ['b','r','z'] ]) :
br_split = 0.5*(spectra.wave['b'][-1] + spectra.wave['r'][0])
rz_split = 0.5*(spectra.wave['r'][-1] + spectra.wave['z'][0])
keep = dict()
keep['b'] = (spectra.wave['b'] < br_split)
keep['r'] = (br_split <= spectra.wave['r']) & (spectra.wave['r'] < rz_split)
keep['z'] = (rz_split <= spectra.wave['z'])
model_wave = np.concatenate( [
spectra.wave['b'][keep['b']],
spectra.wave['r'][keep['r']],
spectra.wave['z'][keep['z']],
] )
mflux = np.concatenate( [
model_flux['b'][:, keep['b']],
model_flux['r'][:, keep['r']],
model_flux['z'][:, keep['z']],
], axis=1 )
else :
raise RuntimeError("create_model: Set of bands for spectra not supported")
return model_wave, mflux
def get_spectra(spectra_name, redrock_name, lambda_width, index_to_fit,
template_dir=None, archetypes_dir=None):
"""
Get the spectra and the best fit model from a given spectra and redrock file.
Args:
spectra_name (str): The name of the spectra file.
redrock_name (str): The name of the redrock file associated to the spectra
file.
lambda_width (float): The width in wavelength (in Angstrom) considered
for the fitting arount the MgII peak
index_to_fit (boolean numpy array): boolean array of size 500 specifing which spectra have to be used
add_linear_term (boolean): Add a linear term to the Gaussian peak term
to fit the continuum.
template_dir (str): If the redrock template variable is not loaded by
the desi environment, specify the template path
archetypes_dir (str): If not None, use the archetypes templates in the
path specified
Returns:
target_id (numpy array): Array containing target id of the the object
to fit
redshift_redrock (numpy array): Array containing the redshift of the the
object to fit
flux (numpy array): Array containing the full flux arrays of every
object to fit
ivar_flux (numpy array): Array containing the inverse variance arrays
of every object to fit
model_flux (numpy array): Array containing the best fit redrock model
for every object to fit
wavelength (numpy array): Array containing the wavelength
index_with_fit (boolean numpy array): boolean array of index_to_fit size masking index where mgII fitter is not apply
"""
spectra = read_spectra(spectra_name)
spectra = spectra.select(targets=spectra.fibermap["TARGETID"][index_to_fit])
if 'brz' not in spectra.bands:
spectra = coadd_cameras(spectra)
redshifts = Table.read(redrock_name, 'REDSHIFTS')[index_to_fit]
# astropy 5.x incorrectly interprets blank strings as masked values; undo
if hasattr(redshifts['SUBTYPE'], 'mask'):
redshifts['SUBTYPE'][redshifts['SUBTYPE'].mask] = ''
if archetypes_dir is not None:
model_wave, model_flux = create_model(spectra,
redshifts,
archetype_fit=True,
archetypes_dir=archetypes_dir,
template_dir=template_dir)
else:
model_wave, model_flux = create_model(spectra,
redshifts,
archetype_fit=False,
archetypes_dir=None,
template_dir=template_dir)
redshift_redrock = redshifts["Z"]
wavelength = spectra.wave['brz']
mgii_peak_1, mgii_peak_2 = 2803.5324, 2796.3511
mean_mgii_peak = (mgii_peak_1 + mgii_peak_2)/2
non_visible_peak = (redshift_redrock+1) * mean_mgii_peak < np.min(wavelength) + lambda_width/2
non_visible_peak |= (redshift_redrock+1) * mean_mgii_peak > np.max(wavelength) - lambda_width/2
index_with_fit = ~non_visible_peak
target_id = spectra.fibermap["TARGETID"][index_with_fit]
redshift_redrock = redshift_redrock[index_with_fit]
flux = spectra.flux['brz'][index_with_fit]
ivar_flux = spectra.ivar['brz'][index_with_fit]
model_flux = model_flux[index_with_fit]
return target_id, redshift_redrock, flux, ivar_flux, model_flux, wavelength, index_with_fit
def fit_mgii_line(target_id,
redshift_redrock,
flux,
ivar_flux,
model_flux,
wavelength,
lambda_width,
add_linear_term=False,
gaussian_smoothing_fit=None,
mask_mgii=None):
"""
Fitting routine. Fit a Gaussian peak on preselected spectra and return the
main parameters of the fit including parameter errors.
Args:
target_id (numpy array): Array containing target id of the the object
to fit
redshift_redrock (numpy array): Array containing the redshift of the the
object to fit
flux (numpy array): Array containing the full flux arrays of every
object to fit
ivar_flux (numpy array): Array containing the inverse variance arrays
of every object to fit
model_flux (numpy array): Array containing the best fit redrock model
for every object to fit
wavelength (numpy array): Array containing the wavelength
lambda_width (float): The width in wavelength (in Angstrom) considered
for the fitting arount the MgII peak
add_linear_term (boolean): Add a linear term to the Gaussian peak term
to fit the continuum.
gaussian_smoothing_fit (float): If not None, the spectra is smoothed by
the given value before the fit
mask_mgii (float): If not None, mask a region of near the MgII peak with
the given witdh to fit double MgII peak (in progress)
Returns:
fit_results (numpy array): Array containing the parameters of the fit
"""
mgii_peak_1 = 2803.5324
mgii_peak_2 = 2796.3511
mean_mgii_peak = (mgii_peak_1 + mgii_peak_2)/2
mgii_peak_observed_frame = (redshift_redrock+1) * mean_mgii_peak
if(add_linear_term):
fit_results = np.zeros((target_id.shape[0],11))
else:
fit_results = np.zeros((target_id.shape[0],9))
for i in range(len(flux)):
centered_wavelenght = wavelength - mgii_peak_observed_frame[i]
mask_wave = np.abs(centered_wavelenght) < lambda_width/2
if(mask_mgii) is not None:
mask_wave &= np.abs(centered_wavelenght) > mask_mgii/2
flux_centered = flux[i][mask_wave]
model_flux_centered = model_flux[i][mask_wave]
with np.errstate(divide='ignore',invalid='ignore'): # if zero division --> sigma = np.inf --> in curve_fit and the rest we only have 1/sigma --> 1/inf = 0.0
sigma_flux_centered = (1 /np.sqrt(ivar_flux[i]))[mask_wave]
if(gaussian_smoothing_fit is not None):
flux_centered = gaussian_filter(flux_centered, gaussian_smoothing_fit)
if(add_linear_term):
fit_function = lambda x, A, sigma, B, C : A * np.exp(-1.0 * (x)**2 / (2 * sigma**2)) + B + C * x
try:
popt, pcov = curve_fit(fit_function,
xdata=centered_wavelenght[mask_wave],
ydata=flux_centered,
sigma=sigma_flux_centered,
p0=[1.0,lambda_width/2,np.mean(flux_centered),0.0],
bounds=([-np.inf,-np.inf,-np.inf,-0.01], [np.inf,np.inf,np.inf,0.01]))
except RuntimeError:
print("Fit not converged")
popt = np.full((4),0)
pcov = np.full((4,4),0)
fit_results[i][1:4] = popt[0:3]
fit_results[i][4:7] = np.diag(pcov)[0:3]
fit_results[i][7] = popt[3]
fit_results[i][8] = np.diag(pcov)[3]
else:
fit_function = lambda x, A, sigma, B : A * | np.exp(-1.0 * (x)**2 / (2 * sigma**2)) | numpy.exp |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 26 11:38:14 2021
@author: christian
"""
from astropy import constants as const
from astropy.io import fits
from astropy.convolution import Gaussian1DKernel, convolve
import datetime as dt
import math
import matplotlib.backends.backend_pdf
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator, AutoMinorLocator
import numpy as np
from scipy.optimize import curve_fit
import scipy.stats as stats
from spectres import spectres
from tqdm import tqdm
import unyt as u
import warnings
def add_weight(line_pos, line_wid, w, err, pix_wid):
"""Lines up the two spectra by the amount of light absorpted in the area
around the line.
Parameters
----------
line_pos : float
The position of the absorption line.
line_wid : float
The width of the absorption line.
w : Array like
A subarray with wavelength values around the line.
err : Array like
The corresponding error array.
pix_wid : float
The width of a pixel in wavelength.
Returns
-------
Two variable:
weight : Array like
An array that weights the corresponding flux values for the
wavelength array w.
"""
i = 0
j = -1
npix = len(w)
# Initially every pixel is weighted by their inverse variance
weight = np.divide(np.ones(len(w)), np.square(err))
# Pixel at a lower wavelength than the specified window have weight = 0
while w[i] + pix_wid / 2 < line_pos - line_wid:
weight[i] = 0.0
i += 1
npix -= 1
# Pixel at a higher wavelength than the specified window have weight = 0
while w[j] - pix_wid / 2 > line_pos + line_wid:
weight[j] = 0.0
j -= 1
npix -= 1
# The pixels on the edge of the window have a reduced weight according to
# their width within the window.
weight[i] = weight[i] * (w[i] + pix_wid / 2 -
line_pos + line_wid) / pix_wid
weight[j] = weight[j] * (pix_wid / 2 +
line_pos + line_wid - w[j]) / pix_wid
# Number of pixels within the window takes into account fractions of pixels
npix = npix - 2.0 + (pix_wid / 2 + line_pos + line_wid - w[j]) / \
pix_wid + (w[i] + pix_wid / 2 - line_pos + line_wid) / pix_wid
# Normalising the weight by the heighest weight
weight = np.divide(weight, max(weight))
return weight, npix
def addSN(flux, time, vmag, DarkN, SkyN, n, norm_f, Boff=0.654, Roff=-0.352,
Ioff=-0.7, HARSN=1000, HAR=False):
"""Adds noice to the inserted flux. The noise is dependent on the
brightness of the target, the observation time, the dark noice and the
sky noice. It simulates noice for a solar star. This simulates noise for
a HERMES spectrum according to the capabilities of the spectrograph and
telescope.
Parameters
----------
flux : Array like
An array holding the flux.
time : float
Observing time (s).
vmag : float
Brightness in the V band (mag).
DarkN : float
Dark noise total photon count.
SkyN : float
Relative sky brightness.
n : int
Band identifier (0: B, 1: V, 2: R, 3: IR).
norm_f : Array like
Normalised flux array.
Boff : float
B band offset from V band (mag). Solar offset by default.
Roff : float
R band offset from V band (mag). Solar offset by default.
Ioff : float
IR band offset from V band (mag). Solar offset by default.
HARSN : float
Previous SNR in the original HARPS spectrum.
(negligible by default)
HAR : Boolean
Has been a HARPS spectrum before. Will take into account previous
noise of spectrum.
Returns
-------
A variable:
results : Library
Contains:
'SN' keyword for the resulting SN as a float
'SNpp' keyword for SN per pixel as a float
'e' keyword for the error numpy array
'f' keyword for the new flux array
"""
results = {}
# Determine the number of electrons observed in the specified band
if n == 0:
ne = time / 3600 * 10**(-0.4 * (0.993 * (vmag + Boff) - 24.05))
nepp = ne / 3.81 # number of measured electrons per pixel
# Find the SNR of the initial HARPS spectrum for the wavelength region.
# Increases SNR per pixel for HERMES cause of larger pixels
try:
harSN = min(HARSN[31:36]) * 2
except TypeError:
harSN = HARSN * 2
harSNpp = harSN / 3.81 # HARPS SNR per HERMES pixel
elif n == 1:
ne = time / 3600 * 10**(-0.4*(1.18 * vmag - 26.25))
nepp = ne / 4.69
try:
harSN = min(HARSN[52:56]) * 2
except TypeError:
harSN = HARSN * 2
harSNpp = harSN / 4.69
elif n == 2:
ne = time / 3600 * 10**(-0.4*(1.07 * (vmag + Roff) - 24.98))
nepp = ne / 3.74
try:
harSN = min(HARSN[66:70]) * 2
except TypeError:
harSN = HARSN * 2
harSNpp = harSN / 3.74
elif n == 3:
ne = time / 3600 * 10**(-0.4*(0.89 * (vmag + Ioff) - 22.33))
nepp = ne / 3.74
harSN = HARSN * 2
harSNpp = harSN / 3.74
# Calculate the SNR (and SNR per pixel) and the number of sky pixel.
skypp = SkyN * nepp * pow(2.5, vmag-17.552)
SN = np.sqrt(ne)
SNpp = math.sqrt(nepp + skypp)
# Compute results for HARPS spectra (calculate individual uncertainties and
# add random noise to the spectrum)
if HAR:
if harSN < SN:
results['e'] = np.abs(np.divide(flux,
np.sqrt(np.abs(norm_f))) / harSNpp)
results['f'] = flux + DarkN * flux / ne
results['SN'] = harSN
results['SNpp'] = harSNpp
else:
SNadd = 1/math.sqrt(1/(SNpp**2) + 1/(harSNpp**2))
adderr = flux / SNadd
results['f'] = np.add(flux, np.random.normal(0, adderr,
len(flux))) + DarkN * flux / ne
results['e'] = np.abs(np.divide(flux,
np.sqrt(np.abs(norm_f))) / SNpp)
results['SN'] = SN
results['SNpp'] = SNpp
# Compute results for HERMES spectra (calculate individual uncertainties and
# add random noise to the spectrum)
else:
results['SN'] = SN
results['SNpp'] = SNpp
results['e'] = np.abs(np.divide(flux, np.sqrt(np.abs(norm_f))) / SNpp)
results['f'] = np.add(flux, np.random.normal(0, results['e'],
len(flux)))
print(max(np.divide(results['f'], results['e'])))
return results
def addSN_simple(flux, SNR, norm_f):
"""Adds noice to the inserted flux. This is the most simple way to do it.
We take only a SNR and project noise of the projected strength on the flux
array.
Parameters
----------
flux : Array like
An array holding the flux.
SNR : float
The signal to noice ratio that shall be simulated on the flux
array.
norm_f : Array like
An array holding the normalised flux.
Returns
-------
A variable:
results: Library
Contains:
'SN' keyword for the resulting SN as a float, the
'SNpp' keyword for SN per pixel as a float
'e' keyword for the error numpy array
'f' keyword for the new flux array
"""
results = {}
# Calculate the flux uncertainties and apply random noise to the spectrum
results['SN'] = SNR
results['SNpp'] = SNR
results['e'] = np.abs(np.divide(flux, np.sqrt(np.abs(norm_f))) / SNR)
results['f'] = np.add(flux, np.random.normal(0, results['e'], len(flux)))
return results
def air_indexEdlen53(l, t=15., p=760.):
"""Return the index of refraction of air at given temperature, pressure,
and wavelength in Angstroms.
The formula is from Edlen 1953, provided directly by ESO.
Parameters
----------
l : float
Vacuum wavelength in Angstroms
t : float
Temperature in °C. (Don't actually change this from the default!)
p : float
Pressure in mmHg. (Don't actually change this from the default!)
Returns
-------
n : float
The index of refraction for air at the given parameters.
"""
n = 1e-6 * p * (1 + (1.049-0.0157*t)*1e-6*p) / 720.883 / (1 + 0.003661*t)\
* (64.328 + 29498.1/(146-(1e4/l)**2) + 255.4/(41-(1e4/l)**2))
n = n + 1
return n
def air2vacESO(air_wavelengths_array):
"""Take an array of air wavelengths and return an array of vacuum
wavelengths in the same units.
Parameters
----------
air_arr : `unyt.unyt_array`
A list of wavelengths in air, with dimensions length. Will be converted
to Angstroms internally.
Returns
-------
`unyt.unyt_array`
A unyt_array of wavelengths in vacuum, in the original units.
"""
reshape = False
original_units = air_wavelengths_array.units
if air_wavelengths_array.ndim == 2:
# We need to flatten the array to 1-D, then reshape it afterwards.
reshape = True
original_shape = air_wavelengths_array.shape
tqdm.write(str(original_shape))
air_wavelengths_array = air_wavelengths_array.flatten()
air_wavelengths_array.convert_to_units(u.angstrom)
tolerance = 2e-12
num_iter = 100
vacuum_wavelengths_list = []
# tqdm.write('Converting air wavelengths to vacuum using Edlen 1953.')
for i in range(0, len(air_wavelengths_array)):
new_wavelength = air_wavelengths_array[i].value
old_wavelength = 0.
iterations = 0
past_iterations = [new_wavelength]
while abs(old_wavelength - new_wavelength) > tolerance:
old_wavelength = new_wavelength
n_refraction = air_indexEdlen53(new_wavelength)
new_wavelength = air_wavelengths_array[i].value * n_refraction
iterations += 1
past_iterations.append(new_wavelength)
if iterations > num_iter:
print(past_iterations)
raise RuntimeError('Max number of iterations exceeded!')
vacuum_wavelengths_list.append(new_wavelength)
vacuum_array = u.unyt_array(vacuum_wavelengths_list, u.angstrom)
if reshape:
tqdm.write(f'Converting back to original shape: {original_shape}.')
# Reshape the array back to its original shape.
return vacuum_array.reshape(original_shape).to(original_units)
else:
return vacuum_array.to(original_units)
def center_line(w, f):
"""Measures the center of an absorption line.
Parameters
----------
w : Array like
A subarray with wavelenghts within a certain line.
f : Array like
A subarray with flux values within a certain line.
deg : int
The degree of the fitting polynomial.
band : int
The band in which the line is within the HERMES spectrograph.
Returns
-------
A variable:
x_min : float
The wavelength value of the line after centering on the minimum.
"""
a = np.argmin(f)
w_pix = w[a] - w[a-1]
f2 = f[a-1:a+2]
A = np.array([[1, -1, 1], [0, 0, 1], [1, 1, 1]])
B = np.array([f2[0], f2[1], f2[2]])
X = np.linalg.inv(A).dot(B)
w_npix = - X[1] / (2*X[0])
if np.absolute(w_npix) > 1:
print("Error: Minimum not close to minimum")
if X[0] < 0:
print("Error: Minimum is actually maximum")
# Return the center of the Gaussian
return w[a] + (w_npix * w_pix)
def Gauss(x, a, x0, sigma, sigma0):
return 1 - a * np.exp(-(x - x0)**2 / (2 * (sigma0**2 + sigma**2)))
def determine_resolving_power(w, f, deg=2, band=0, specres=28000, w2=[],
f2=[]):
"""Determines the resolving power of an absorption feature.
Parameters
----------
w: Array like
A subarray with wavelenghts within a certain line.
f: Array like
A subarray with flux values within a certain line.
deg: int
The degree of the fitting polynomial.
band: int
The band in which the line is within the HERMES spectrograph
Returns
-------
A variable:
x_min : float
The wavelength value of the line after centering on the minimum
"""
# calculate the sigma of the band
boun = [[4715, 4900], [5649, 5873], [6478, 6737], [7585, 7885]]
c = const.c.to('km/s')
sig_final = 0
sig = (boun[band][1] + boun[band][0]) / (2.0 * 2.355 * specres)
# Fit a Gaussian to the line
mean = sum(w * f) / sum(f)
warnings.filterwarnings("ignore", "Covariance of the parameters could not"
+ " be estimated")
try:
popt, pcov = curve_fit(lambda x, a, x0, sigma:
Gauss(x, a, x0, sigma, 0), w, f,
p0=[1-min(f), mean, sig])
except RuntimeError:
return -1, -1, -1
# w_plot = np.linspace(w[0], w[-1], 100)
# plt.step(w_plot, Gauss(w_plot, popt[0], popt[1], popt[2]))
# plt.step(w, f)
# plt.show()
# plt.clf()
if 0.1 < popt[0] and popt[0] < 0.7:
sig2 = popt[2]
if np.diag(pcov)[2] > 0:
sig2_err = np.sqrt(np.diag(pcov)[2])
else:
return -1, -1, -1
R = float(popt[1] / (2.355 * popt[2]))
else:
return -1, -1, -1
sig_b = np.square(sig2) - np.square(sig)
if sig_b < 0:
sig_abs = np.sqrt(np.abs(sig_b))
sig_err = np.sqrt(np.square(sig2 * sig2_err * c.value /
(popt[1] * sig_abs)) +
np.square(sig_abs * c.value / popt[1]**2))
sig_final = 0
elif sig_b >= 0:
sig_b2 = np.sqrt(sig_b)
sig_err = np.sqrt(np.square(sig2 * sig2_err * c.value /
(popt[1] * sig_b2)) +
np.square(sig_b2 * c.value / popt[1]**2))
sig_final = sig_b2 * c.value / popt[1]
return R, sig_final, sig_err
def determine_resolving_power2(w, f, deg=2, band=0, specres=28000, w2=[],
f2=[]):
"""Determines the resolving power of an absorption feature.
Parameters
----------
w: Array like
A subarray with wavelenghts within a certain line.
f: Array like
A subarray with flux values within a certain line.
deg: int
The degree of the fitting polynomial.
band: int
The band in which the line is within the HERMES spectrograph
Returns
-------
A variable:
x_min : float
The wavelength value of the line after centering on the minimum
"""
# calculate the sigma of the band
boun = [[4715, 4900], [5649, 5873], [6478, 6737], [7585, 7885]]
c = const.c.to('km/s')
sig_final = 0
sig = (boun[band][1] + boun[band][0]) / (2.0 * 2.355 * specres)
# Fit a Gaussian to the line
mean = sum(w * f) / sum(f)
warnings.filterwarnings("ignore", "Covariance of the parameters could not"
+ " be estimated")
try:
popt, pcov = curve_fit(lambda x, a, x0, sigma:
Gauss(x, a, x0, sigma, sig), w, f,
p0=[1-min(f), mean, 4])
except RuntimeError:
return -1, -1, -1
if 0.1 < popt[0] and popt[0] < 0.7:
sig_b = np.abs(popt[2])
if np.diag(pcov)[2] > 0:
sig_b_err = np.sqrt(np.diag(pcov)[2])
else:
return -1, -1, -1
R = float(popt[1] / (2.355 * np.abs(popt[2])))
else:
return -1, -1, -1
w_plot = np.linspace(w[0], w[-1], 100)
# plt.step(w_plot, Gauss(w_plot, popt[0], popt[1], popt[2], sig))
# plt.step(w, f)
# plt.show()
# plt.clf()
sig_final = sig_b * c.value / popt[1]
sig_err = sig_b_err * c.value / popt[1]
return R, sig_final, sig_err
def determine_radvel(ref_flux, tar_flux, pixel, rv_weight, mpix=0,
plot_correlation=False, band=0,
mid_wav=0.0, secondary=False):
"""Determine the radial velocity between two Spectra
Parameters
----------
ref_flux : Array like
A subarray holding the reference's flux.
tar_flux : Array like
A subarray holding the targets flux.
pixel : float
The width of a pixel to determine the total shift in wavelength.
rv_weight : Array like
An array with the weigths of all pixels within this correction.
mpix : float
Eliminates pixels from the process as a fraction of total pixels.
E.g. 0.2 will eliminate 20% of pixels from both edges of the
spectrum (whis will reduce the number of pixel by 40%).
plot_correlation : Boolean
If True, plots the correlation function. Mainly used for
debugging.
band : Int
Querries the band in which this correction happens. This is only
relevant for the IR band (band=3) for which the algorithm needs
to be cautious because of sky correction residuals.
mid_wav : float
The middle of the correction to determine the radial velocity in
km/s.
secondary : Boolean
If True, the correction is in a more precise correction mode to
correct only small radial velocities (<8 HERMES pixel).
Returns
-------
A variable:
shift : float
The radial velocity between the spectra.
"""
max_pix = 20
c = const.c.to('km/s')
tar_flux = np.array(tar_flux)
if band == 3:
tar_flux[tar_flux > 0.5] = np.ones_like(tar_flux[tar_flux > 0.5]) * 0.5
if mpix > 0.0 and mpix < 0.3:
pix_elim = int(len(ref_flux)*mpix)
ref_flux = ref_flux[pix_elim:-pix_elim]
corr = np.array([])
rv_weight = np.array(rv_weight)
rv_weight = np.where([f < 0 for f in tar_flux], 0.0, rv_weight)
if all(rv_weight == 0):
rv_weight = np.ones_like(rv_weight)
k = 0
while len(ref_flux)+k <= len(tar_flux):
weight = rv_weight[k:len(ref_flux)+k]
corr_temp = np.divide(
np.sum(np.multiply(np.multiply(
ref_flux, tar_flux[k:len(ref_flux)+k]), weight)),
np.multiply(np.sqrt(np.sum(np.multiply(
weight, np.square(ref_flux)))),
np.sqrt(np.sum(np.multiply(
weight, np.square(
tar_flux[k:len(ref_flux)+k]))))))
corr = np.append(corr, corr_temp)
k = k+1
pix_zero = int(len(corr) / 2)
if plot_correlation is True:
plt.plot(range(len(corr)), corr)
plt.show()
plt.close()
if secondary is True:
min_i = np.argmax(corr[pix_zero-max_pix:pix_zero+max_pix]) + \
pix_zero-max_pix
else:
min_i = np.argmax(corr)
shift = (min_i - pix_zero) * pixel
if plot_correlation is True:
plt.plot(range(len(ref_flux)) + min_i, 1 - ref_flux)
plt.plot(range(len(tar_flux)), 1 - tar_flux)
plt.show()
plt.close()
if mid_wav != 0:
corr_range = np.linspace(0, len(corr) - 1, len(corr))
corr_rv = (corr_range - pix_zero) * pixel * c.value / mid_wav
return shift, corr_rv, corr
else:
return shift
def prepare_reference_rv(wave_r_old, flux_r_old, wave_t, res_power, center_w,
stacked=False, single_out=True, harps=False,
test=False):
"""Convolves and resamples a high resolution reference spectrum onto the
target wavelength grid.
Parameters
----------
wave_r_old : Array like
The old reference wavelength array.
flux_r_old : Array like
The old reference flux array.
wave_t : Array like
The wavelength array on which the spectrum will be projected on.
res_power : Array like
The resolving power of the target spectrum.
center_w : float
Wavelength at the centre of the array. Used to determine the
width of the convolving gaussian from the resolving power.
stacked : Boolean
If True, assumes the spectrum to be a stacked spectrum or to have
a "generic" resolving power layout as a result of being convolved
with HERMES resolving power.
single_out : Boolean
If True, doesn't return f_conv or w_temp (True by default).
harps : Boolean
Recognises HARPS spectra and takes into account their limited
resolving power.
Returns
-------
Variable:
flux_r : Numpy Array
The resulting reference flux array that fits the target wavelength
array.
f_conv : Numpy Array
The flux array on the old wavelength grid.
w_temp : Numpy Array
The old wavelength grid.
"""
if stacked is True:
band_correction = [0.8, 0.775, 0.75, 0.83]
j = int(wave_t[0] / 1000 - 4)
res_power = 28000 * band_correction[j]
if harps is True:
harps_res = 75000
res_power = 1 / np.sqrt(
1/np.square(res_power) - 1/np.square(harps_res))
w_temp = wave_r_old[np.bitwise_and(wave_t[0] - 10 < wave_r_old,
wave_r_old < wave_t[-1] + 10)]
f_temp = flux_r_old[np.bitwise_and(wave_t[0] - 10 < wave_r_old,
wave_r_old < wave_t[-1] + 10)]
w_pix = w_temp[1] - w_temp[0]
sigma = center_w / (2.355 * res_power * w_pix)
if test is True:
c = const.c.to('km/s')
lw2_wav = w_temp[0] * 15.0 / c.value / w_pix
sigma2 = w_temp[int(len(w_temp)/2)] / (2.355 * 28000 * w_pix)
mu = w_temp[0]
x = np.linspace(mu - 5*sigma, mu + 5*sigma, 100)
y = np.linspace(mu - lw2_wav, mu + lw2_wav, 100)
plt.plot(x, stats.norm.pdf(x, mu, sigma))
plt.plot(x, stats.norm.pdf(x, mu, sigma2))
plt.axvline(mu + lw2_wav, color='black')
plt.axvline(mu - lw2_wav, color='black')
print(np.sum(stats.norm.pdf(y, mu, sigma)) /
np.sum(stats.norm.pdf(y, mu, sigma2)))
plt.show()
Gauss = Gaussian1DKernel(stddev=sigma)
f_conv = convolve(f_temp, Gauss)
flux_r = spectres(wave_t, w_temp, f_conv)
if test is True:
print(sigma)
plt.plot(wave_t, flux_r)
plt.plot(w_temp, f_temp)
plt.show()
plt.clf()
if single_out is True:
return flux_r
else:
return flux_r, f_conv, w_temp
def prepare_reference(wave_r_old, flux_r_old, res_power,
stacked=False):
"""Convolves and resamples a high resolution reference spectrum onto the
target wavelength grid.
Parameters
----------
wave_r_old : Array like
The old reference wavelength array.
flux_r_old : Array like
The old reference flux array.
wave_t : Array like
The wavelength array on which the spectrum will be projected on.
res_power : Array like
The resolving power of the target spectrum.
stacked : Boolean
If True, assumes the spectrum to be a stacked spectrum or to have
a "generic" resolving power layout as a result of being convolved
with HERMES resolving power.
Returns
-------
Variable:
flux_r : Array like
The resulting reference flux array that fits the target wavelength
array.
"""
if stacked is True:
band_correction = [0.8, 0.775, 0.75, 0.83]
j = int(wave_r_old[0] / 1000 - 4)
res_power = 28000 * band_correction[j]
w_temp = wave_r_old
f_temp = flux_r_old
w_pix = w_temp[1] - w_temp[0]
sigma = w_temp[int(len(w_temp)/2)] / (2.355 * res_power * w_pix)
Gauss = Gaussian1DKernel(stddev=sigma)
f_conv = convolve(f_temp, Gauss)
return f_conv
def lineup(f_ref, f_tar, e_ref, e_tar, band=0, low_perc=False,
rv_weight=[0], Li_plot=False):
"""Lines up the two spectra by the amount of light absorpted in the
area around the line.
Parameters
----------
f_ref: Array like
A subarray with flux values around a certain line for the
reference spectrum.
f_tar: Array like
A subarray with flux values around a certain line for the
target spectrum.
e_ref: Array like
A subarray with error values around a certain line for the
reference spectrum.
e_tar: Array like
A subarray with error values around a certain line for the
target spectrum.
band : int
The band in which we want to use this algorithm.
(0: B, 1: V, 2: R, 3: IR)
low_perc : Boolean
If True, ignores the lowest 75% of flux values in the reference
spectrum and the corresponding pixel in the target.
rv_weight : Array like
Gives relative weights to all pixels. Note that the aaray length
must be the same as the length of f_ref and e_ref.
Returns
-------
A variable:
raise_tar : Array like
A number by which the target spectrum is multiplied in order
to line it up with the reference.
"""
if Li_plot is True:
plt.step(np.linspace(0, len(f_ref)-1, len(f_ref)), f_ref)
plt.step(np.linspace(0, len(f_tar)-1, len(f_tar)), f_tar)
plt.axvline(len(f_ref)/2)
plt.show()
plt.clf()
i = band
perc = 1.0
if low_perc is True:
perc = 0.25
b_coeff = [3, 3, 3, 3]
b_coeff2 = [5, 5, 5, 5]
if all(rv_weight == 0) or len(rv_weight) != len(e_tar):
rv_weight = np.ones_like(e_tar)
weight = 1 / np.square(e_tar) * rv_weight
cut_value = np.sort(f_ref)[int(len(f_ref)*(1-perc))]
f_tar = f_tar[f_ref > cut_value]
weight = weight[f_ref > cut_value]
# weight = np.ones_like(weight[f_ref > cut_value])
f_ref = f_ref[f_ref > cut_value]
sum1 = sum(f_tar * weight)
if sum1 == 0:
return False
raise_tar = sum(f_ref * weight) / sum1
m_flag = False
for j in range(4):
f_tar_new = f_tar * raise_tar
e_tar_new = e_tar * raise_tar
con = f_tar_new < np.max(
[1.05 + e_tar_new * b_coeff[i],
1.0 + e_tar_new * b_coeff2[i]])
if np.median(f_tar_new[con]) > 1.05 or m_flag is True:
con = np.bitwise_and(con, f_tar_new > np.min(
[0.9 - e_tar_new * b_coeff[i],
1.0 - e_tar_new * b_coeff2[i]]))
m_flag = True
f_ref_new = f_ref[con]
weight_new = weight[con]
f_tar_new = f_tar_new[con]
raise_tar = raise_tar * sum(f_ref_new * weight_new) / \
sum(f_tar_new * weight_new)
return raise_tar
def line_prep_plot(center_w, center_f, linew, linew_old, window,
post_resolv_w, post_resolv_f, target_resolv_w,
target_resolv_f, post_norm_w, post_norm_f, reference_norm_w,
reference_norm_f, weights, twavcon):
"""
Makes a plot for section 2 of the Lehmann et al. 2021 paper.
"""
c = const.c.to('km/s')
l1_vel = 15
l2_vel = 400
lower_bound = linew - window*1.4
upper_bound = linew + window
pre_resolv_w, pre_resolv_f = center_w, center_f
pre_norm_w, pre_norm_f = target_resolv_w, target_resolv_f
reference_EW_w, reference_EW_f, target_EW_w, target_EW_f = \
reference_norm_w, reference_norm_f, post_norm_w, post_norm_f
l1_wav = linew * l1_vel / c.value
l2_wav = linew * l2_vel / c.value
output1 = 'Paper_Prep_plot1.pdf'
output2 = 'Paper_Prep_plot2.pdf'
output3 = 'Paper_Prep_plot3.pdf'
output4 = 'Paper_Prep_plot4.pdf'
output5 = 'MCR_spectrum.pdf'
pdf1 = matplotlib.backends.backend_pdf.PdfPages(output1)
pdf2 = matplotlib.backends.backend_pdf.PdfPages(output2)
pdf3 = matplotlib.backends.backend_pdf.PdfPages(output3)
pdf4 = matplotlib.backends.backend_pdf.PdfPages(output4)
pdf5 = matplotlib.backends.backend_pdf.PdfPages(output5)
fig, ax = plt.subplots(nrows=1, ncols=1)
fig.set_size_inches(8.5, 4.5)
ax.step(center_w, center_f, lw=4, where='mid', color='blue',
label='Reference spectrum')
ax.axhline(1, color='black', ls='--', lw=4)
ax.axvline(linew, color='black', ls='-', lw=4, label='Reference Centroid')
ax.axvline(linew_old-0.02, color='black', ls='dotted', lw=4,
label='Line list wavelength')
ax.set_xlim(lower_bound, upper_bound)
ax.set_ylim(0.4, 1.05)
# ax.set_xlabel(r'\LARGE Wavelength [\AA]')
ax.set_ylabel(r'Normalized flux')
ax.legend(loc='lower left', handlelength=1.0)
ax.xaxis.set_major_locator(MultipleLocator(0.5))
ax.xaxis.set_minor_locator(AutoMinorLocator())
ax.yaxis.set_major_locator(MultipleLocator(0.2))
ax.yaxis.set_minor_locator(AutoMinorLocator())
ax.set_rasterization_zorder(-10)
fig.subplots_adjust(left=0.01, right=0.99, bottom=0.02, top=0.98,
wspace=0.0, hspace=0.0)
pdf1.savefig(fig, bbox_inches='tight', pad_inches=0.02)
pdf1.close()
ax.clear()
ax.step(pre_resolv_w, pre_resolv_f, lw=4, where='mid', color='blue',
label='Unconvolved reference')
ax.step(post_resolv_w, post_resolv_f, lw=4, where='mid', color='purple',
label='Convolved reference')
ax.step(target_resolv_w, target_resolv_f, lw=4, where='mid', color='red',
label='HERMES target')
ax.axhline(1, color='black', ls='--', lw=4)
ax.axvline(linew, color='black', ls='-', lw=4)
ax.set_xlim(lower_bound, upper_bound)
ax.set_ylim(0.4, 1.05)
ax.set_xlabel(r'\LARGE Wavelength [\AA]')
ax.set_ylabel(r'Normalized flux')
ax.legend(loc='lower left', handlelength=1.0)
ax.xaxis.set_major_locator(MultipleLocator(0.5))
ax.xaxis.set_minor_locator(AutoMinorLocator())
ax.yaxis.set_major_locator(MultipleLocator(0.2))
ax.yaxis.set_minor_locator(AutoMinorLocator())
ax.set_rasterization_zorder(-10)
fig.subplots_adjust(left=0.01, right=0.99, bottom=0.02, top=0.98,
wspace=0.0, hspace=0.0)
pdf2.savefig(fig, bbox_inches='tight', pad_inches=0.02)
pdf2.close()
ax.clear()
ax.step(pre_norm_w, pre_norm_f-0.1, lw=4, where='mid', color='red',
label='Pre-norm target')
ax.step(post_norm_w, post_norm_f, lw=4, where='mid', color='orange',
label='Post-norm target')
ax.step(reference_norm_w, reference_norm_f, lw=4, where='mid',
color='purple', label='Reference')
ax.axhline(1, color='black', ls='--', lw=4)
ax.axvline(linew, color='black', ls='-', lw=4)
ax.set_xlim(linew - l2_wav, linew + l2_wav)
ax.set_ylim(0.4, 1.05)
# ax.set_xlabel(r'wavelength [\AA]')
ax.set_ylabel(r'Normalized flux')
ax.legend(loc='lower left', handlelength=1.0)
ax.xaxis.set_major_locator(MultipleLocator(5))
ax.xaxis.set_minor_locator(AutoMinorLocator())
ax.yaxis.set_major_locator(MultipleLocator(0.2))
ax.yaxis.set_minor_locator(AutoMinorLocator())
ax.set_rasterization_zorder(-10)
fig.subplots_adjust(left=0.01, right=0.99, bottom=0.02, top=0.98,
wspace=0.0, hspace=0.0)
pdf3.savefig(fig, bbox_inches='tight', pad_inches=0.02)
pdf3.close()
ax.clear()
ax.step(reference_EW_w, reference_EW_f, lw=4, where='mid', color='purple',
label='Reference')
ax.step(target_EW_w, target_EW_f, lw=4, where='mid', color='orange',
label='HERMES target')
ax.step(target_EW_w[twavcon], weights/max(weights)/3 + 0.39, lw=4,
where='mid', label="Weights")
ax.axhline(1, color='black', ls='--', lw=4)
ax.set_xlim(lower_bound, upper_bound)
ax.axvline(linew, color='black', ls='-', lw=4)
ax.axvline(linew - l1_wav, color='red', ls='--', lw=4)
ax.axvline(linew + l1_wav, color='red', ls='--', lw=4)
ax.set_ylim(0.4, 1.05)
ax.set_xlabel(r'Wavelength [\AA]')
ax.set_ylabel(r'Normalized flux')
ax.legend(loc='lower left', handlelength=1.0)
ax.xaxis.set_major_locator(MultipleLocator(0.5))
ax.xaxis.set_minor_locator(AutoMinorLocator())
ax.yaxis.set_major_locator(MultipleLocator(0.2))
ax.yaxis.set_minor_locator(AutoMinorLocator())
ax.set_rasterization_zorder(-10)
fig.subplots_adjust(left=0.01, right=0.99, bottom=0.02, top=0.98,
wspace=0.0, hspace=0.0)
pdf4.savefig(fig, bbox_inches='tight', pad_inches=0.02)
pdf4.close()
ax.clear()
ax.step(pre_resolv_w, pre_resolv_f, lw=2.5, where='mid',
label='R>100,000')
ax.step(post_resolv_w, post_resolv_f, lw=2.5, where='mid',
label='R$\sim$28,000')
ax.axhline(1, color='black', ls='--', lw=2.5)
ax.set_xlim(linew - l2_wav-0.7, linew + l2_wav-8)
ax.set_ylim(0.4, 1.05)
ax.set_xlabel(r'Wavelength [\AA]')
ax.set_ylabel(r'Normalized flux')
ax.legend(loc='lower left', handlelength=1.0)
ax.xaxis.set_major_locator(MultipleLocator(4))
ax.xaxis.set_minor_locator(AutoMinorLocator())
ax.yaxis.set_major_locator(MultipleLocator(0.2))
ax.yaxis.set_minor_locator(AutoMinorLocator())
ax.set_rasterization_zorder(-10)
fig.subplots_adjust(left=0.01, right=0.99, bottom=0.02, top=0.98,
wspace=0.0, hspace=0.0)
pdf5.savefig(fig, bbox_inches='tight', pad_inches=0.02)
pdf5.close()
ax.clear()
def measure_EW(w, f, err, weight, line_wid):
"""Uses the weight and the given pixel to measure the EW
Parameters
----------
w : Array like
A subarray with wavelength values around a certain
line for the reference spectrum.
f : Array like
A subarray with flux values around a certain
line for the reference spectrum.
err: Array like
The flux error array
weight : Array like
A subarray with weights for flux values around
a certain line for the target spectrum.
line_wid : float
Width of the absorption line window in which to measure the EW.
Returns
-------
Two variable:
EW : float
The equivalent width of the line.
EW_sig : float
The uncertanty of the equivalent width.
"""
absorb = np.subtract(1.0, f)
abs_bar = np.sum(np.multiply(absorb, weight)) / np.sum(weight)
sig = np.sqrt(np.sum(np.multiply(np.square(weight), np.square(err)))
/ np.square(np.sum(weight)))
EW = abs_bar * line_wid * 1000
EWs = sig * line_wid * 1000
return EW, EWs
def prepare_target_wavelegnth(wave_new, wave_old, flux):
"""Re-samples a spectrum with a high pixelwidth to a spectrum with low
pixelwidth. This will not increase the accuracy of the spectrum.
Parameters
----------
wave_new : Array like
The new wavelength array on which the flux is projected.
wave_old : Array like
The old wavelenght array for the flux.
flux : Array like
The flux array of the spectrum.
Returns
-------
Variable:
flux_new : Array like
The resulting flux array for the wave_new wavelength array.
"""
flux_new = []
wav_disp_new = (wave_new[-1] - wave_new[0]) / len(wave_new)
wav_disp_old = (wave_old[-1] - wave_old[0]) / len(wave_old)
if wav_disp_new > wav_disp_old:
print('Error: Old wavelegnth array is finer than the new one.')
return 1
if wave_new[0] < wave_old[0] or wave_new[-1] > wave_old[-1]:
print('Error: New wavelength array must be contained '
+ 'within the old one.')
return 1
for w_new in wave_new:
i = np.argwhere(np.array(wave_old) < w_new)[-1][0]
dist1, dist2 = w_new - wave_old[i], wave_old[i+1] - w_new
w1, w2 = 1 - (dist1 / wav_disp_old), 1 - (dist2 / wav_disp_old)
flux_new = np.append(flux_new, flux[i] * w1 + flux[i+1] * w2)
return flux_new
def readlinelistw(linelistfile):
"""Read a linelist and return the lines Wavelenghts, element, ionisation
and excitation potential.
Parameters
----------
linelistfile : str or Path object
A path to a linelist file to be read.
Returns
-------
Four seperate variables:
w : Numpy array
The wavelength values of all the lines.
elem : Numpy array
The element type of the corresponding lines.
ion : Numpy array
The ionisation of the corresponding lines.
ep : Numpy array
The excitation potential of the corresponding lines.
"""
# print(linelistfile)
with open(linelistfile) as linelist:
lines = linelist.readlines()
del_index = []
w = np.zeros_like(lines, dtype=float)
elem = np.chararray((len(lines),), unicode=True, itemsize=6)
ion = np.ones_like(lines, dtype=int)
ep = np.zeros_like(lines, dtype=float)
for i, line in enumerate(lines):
if line.startswith(';') or line.startswith('#'):
del_index.append(i)
continue
line = line.replace("|", " ")
w[i] = line.split()[1]
elem[i] = line.split()[0]
if all(ws <= 3 for ws in w):
del_index = []
ion = np.array(w)
for i, line in enumerate(lines):
if line.startswith(';') or line.startswith('#'):
del_index.append(i)
continue
line = line.replace("|", " ")
w[i] = float(line.split()[2])
if len(line.split()) > 3:
ep[i] = float(line.split()[3])
w = np.delete(w, del_index)
elem = np.delete(elem, del_index)
ion = np.delete(ion, del_index)
ep = np.delete(ep, del_index)
return w, elem, ion, ep
def read_ap_correct(w, elem, ion, ap_number, callibration, ap_weight, band=0):
"""Read the correction file for lines and return the correction values.
Parameters
----------
w: array-like
Wavelength of the lines.
elem: array like
Element of the lines.
ion: array-like
Ionization of the lines.
ap_numper : int or array like
The aperture number in which the spectrum was observed. Can be array
if spectrum is observed over multiple nights in different apertures.
callibration : str or Path object
The file in which the aperture scaling corrections are stored.
ap_weight : array like
Contains the weights for each contributing aperture.
Normally this is weighted with the SNR of each spectrum for the
combination process.
Returns
-------
ap_correct: Numpy array
Correction for each line. The array is in the same structure as the
line arrays.
"""
b_ex = [[4000, 8000], [4000, 5000], [5000, 6000], [6000, 7000],
[7000, 8000]]
ap_correct = np.ones_like(w)
ap_correct2 = [[]] * len(ap_number)
if ap_number[0] == 0:
return ap_correct
else:
for j in range(len(ap_number)):
with open(callibration + 'Resolving_map_correction/aperture' +
str(ap_number[j]) + '_correct.dat') as corr_file:
lines = corr_file.readlines()
if j == 0:
w2 = np.zeros_like(lines, dtype=float)
elem2 = np.chararray((len(lines),), unicode=True, itemsize=6)
ion2 = np.ones_like(lines, dtype=int)
ap_correct2[j] = np.ones_like(lines, dtype=float)
for i, line in enumerate(lines):
if line.startswith(';') or line.startswith('#'):
continue
if float(line.split()[2]) < b_ex[band][0] \
or float(line.split()[2]) > b_ex[band][1]:
continue
if j == 0:
elem2[i] = line.split()[0]
ion2[i] = int(line.split()[1])
w2[i] = float(line.split()[2])
ap_correct2[j][i] = float(line.split()[3]) * ap_weight[j]
ap_correct3 = np.sum(ap_correct2, axis=0)
for i in range(len(w)):
try:
corr_index = np.where(np.bitwise_and(np.bitwise_and(
elem2 == elem[i], ion2 == ion[i]), w2 == w[i]))[0][0]
ap_correct[i] = ap_correct3[corr_index]
except IndexError:
ap_correct[i] = 1.0
return ap_correct
def combine_ap(combine_file, spec_name):
"""Find all apertures used in HERMES to combine a resulting aperture array.
The weights of the spectrum are given by the SNR^2.
Parameters
----------
combine_file: str or Path object
The name of the file which contains the neccessary information.
Normally kept in the calibration folder.
spec_name: str or Path object
The identifier used in the table for this target
Returns
-------
ap_array: Numpy array
All aperture numbers that participated in the combined spectrum.
weights: Numpy array
The weight of all apertures that are part of the observation.
"""
aperture_array = []
snr_array = []
with open(combine_file, 'r') as comb:
lines = comb.readlines()
for line in lines:
if line.startswith('Name'):
continue
if line.startswith(spec_name):
aperture_array = np.array(line.split(',')[3::5])
snr_array = np.array(line.split(',')[5::5])
snr_array = np.array(snr_array[aperture_array != 'NaN'],
dtype='float')
aperture_array = np.array(
np.array(aperture_array[aperture_array != 'NaN'],
dtype='float'), dtype='int')
if len(aperture_array) == 0:
return [], []
weight_array = np.square(snr_array) / np.sum(np.square(snr_array))
for i in range(len(aperture_array)):
if len(aperture_array) < i+1:
break
indic = np.argwhere(aperture_array == aperture_array[i])
weight_array[i] = np.sum(weight_array[indic])
weight_array = np.delete(weight_array, indic[1:])
aperture_array = np.delete(aperture_array, indic[1:])
aperture_array = np.where([a == 0 for a in aperture_array],
1, aperture_array)
return aperture_array, weight_array
def rHERMES(FITSfile, datahdu=0, SN=False, e_hdu=1, plot_sky=False):
"""Read a HERMES FITS file and returns data information.
Parameters
----------
FITSfile : str or Path object
A path to a HARPS FITS file to be read.
datahdu : int
Decides which data hdulist to read the data from
0 is Flux, 4 is normalized Flux in HERMES spectra.
Returns
-------
dict
A dictionary containing the following key-value pairs:
w : Numpy array
The wavelength array.
f : Numpy array
The flux array.
e : Numpy array
A zero array to be changed later
"""
result = {}
if FITSfile.endswith('.fits'):
with fits.open(FITSfile) as hdulist:
header0 = hdulist[0].header
f = hdulist[datahdu].data
unnorm_f = hdulist[0].data
sky_f = hdulist[2].data
e = hdulist[e_hdu].data
cdelta1 = header0['CDELT1']
crval1 = header0['CRVAL1']
rv_weight = np.ones_like(f)
for i in range(len(sky_f)):
if sky_f[i] < 0:
rv_weight[i] = 0
# create wavelength and error (only 0 values by this point) array
w = np.linspace(0, len(f) - 1, len(f)) * cdelta1 + crval1
# If we want to use the normalized spectrum, we should use a
# normalized error
# if datahdu == 4:
# e = np.divide(np.multiply(e, f), hdulist[0].data)
# write array on output
result['w'] = w
result['f'] = f
result['e'] = e
result['disp'] = w[1] - w[0]
result['rv_weight'] = rv_weight
if SN and 'SNR' in header0:
SNR = header0['SNR']
result['SNR'] = SNR
else:
result['SNR'] = 1000
if plot_sky is True:
print(np.subtract(sky_f, unnorm_f))
fig, ax = plt.subplots(nrows=1, ncols=1)
fig.set_size_inches(8.5, 4.5)
ax.step(w, sky_f, lw=2, where='mid', color='blue',
label='with sky')
ax.step(w, unnorm_f, lw=2, where='mid', color='red',
label='without sky')
ax.axhline(1, color='black', ls='--', lw=4)
ax.set_xlabel(r'\LARGE Wavelength [\AA]')
ax.set_ylabel(r'Normalized flux')
ax.legend(loc='lower left', handlelength=1.0)
ax.set_rasterization_zorder(-10)
plt.show()
return result
else:
wave = np.array([], dtype='float64')
flux = np.array([], dtype='float64')
with open(FITSfile) as data:
for line in data:
if line.startswith('#'):
continue
wave = np.append(wave, float(line.split(',')[0]))
flux = np.abs(np.append(flux, float(line.split(',')[1])))
result['w'] = wave
result['f'] = flux
result['e'] = np.absolute(np.divide(np.power(flux, 0.4), 1000))
result['SNR'] = 1000
return result
def r_resolving_map(FITSfile, ap_number, warn_flag=True, print_max_diff=False,
weight=[1]):
"""Read a HERMES FITS file containing a resolving power map as written in
Kos et al 2016.
Parameters
----------
FITSfile : str or Path object
A path to the FITS file to be read.
ap_number: int
Number of aparature used for the target spectrum.
Returns
-------
dict
A dictionary containing the following key-value pairs:
w : Numpy array
The wavelength array.
R : Numpy array
Array of resolving powers in all querried lines.
"""
R = [[]] * len(ap_number)
with fits.open(FITSfile, mode='readonly') as hdulist:
if print_max_diff is True:
stuff = hdulist[0].data[hdulist[0].data > 0]
print(len(stuff) / len(hdulist[0].data[0]))
stuff = np.reshape(stuff, (int(len(stuff) /
len(hdulist[0].data[0])),
len(hdulist[0].data[0])))
print(np.amax(np.subtract(np.amax(stuff, axis=0),
np.amin(stuff, axis=0))))
for i in range(len(ap_number)):
header0 = hdulist[0].header
R[i] = np.multiply(hdulist[0].data[ap_number[i]], weight[i])
for r in R[i]:
if (r < 10000 and len(ap_number) == 1) or r == 0:
if warn_flag is True:
print('Warning: Aperture does not contain resolving' +
' power.')
R[i] = np.multiply(hdulist[0].data[ap_number[i]+1],
weight[i])
break
R_full = np.sum(R, axis=0)
cdelta1 = header0['CDELT1']
crval1 = header0['CRVAL1']
wav = np.linspace(0, len(R_full) - 1, len(R_full)) * cdelta1 + crval1
return {'w': wav, 'R': R_full}
def rHARPS(FITSfile, obj=False, wavelenmin=False, date_obs=False,
spec_bin=False, med_snr=False, hdnum=False, radvel=False,
coeffs=False, SN=False):
"""Read a HARPS ADP FITS file and return a dictionary of information.
Parameters
----------
FITSfile : str or Path object
A path to a HARPS FITS file to be read.
obj : bool, Default: False
If *True*, the output will contain the contents of the OBJECT FITS
header card.
wavelenmin : bool, Default: False
If *True*, the output will contain the contents of the WAVELMIN FITS
header card.
date_obs : bool, Default: False
If *True*, the output will contain the contents of the DATE-OBS FITS
header card.
spec_bin : bool, Default: False
If *True*, the output will contain the contents of the SPEC_BIN FITS
header card.
med_snr : bool, Default: False
If *True*, the output will contain the contents of the SNR FITS header
card.
hdnum : bool, Default: False
If *True*, the output will contain the contents of the custom-added
HDNUM FITS header card. (Added to unify object identifiers across all
stars, some of which were occasionally identified by things other than
HD number.)
radvel : bool, Default: False
If *True*, the output will contain the contents of the custom-added
RADVEL FITS header card. (Added to unify the radial velocity for each
star, as a small minority of stars had different radial velocity
information in their HIERARCH ESO TEL TAFG RADVEL header cards.)
coeffs : bool, Default: False
If *True*, the output will contain the contents of the various
*ESO DRS CAL TH COEFF LLX* header cards, where *X* ranges from 0 to
287.
Returns
-------
dict
A dictionary containing the following key-value pairs:
w : Numpy array
The wavelength array.
f : Numpy array
The flux array.
e : Numpy array
The estimated error array (HARPS returns no error array by
default).
Optionally
==========
obj : str
The object name from the 'OBJECT' flag.
wlmin : float
The minimum wavelength.
date_obs : datetime object
The date the file was observed.
spec_bin : float
The wavelength bin size.
med_snr : float
The median SNR of the flux array.
hd_num : str
The HD identifier of the star in the format "HDxxxxxx".
radvel : float
The radial velocity of the star in km/s.
If the `coeffs` keyword argument is *True*, there will be 288 entries
of the form "ESO DRS CAL TH COEFF LLX": *value*, where X will range
from 0 to 287.
"""
result = {}
try:
with fits.open(FITSfile) as hdulist:
try:
header0 = hdulist[0].header
header1 = hdulist[1].header
data = hdulist[1].data
w = data.WAVE[0]
gain = header0['GAIN']
# Multiply by the gain to convert from ADUs to photoelectrons
f = data.FLUX[0] * gain
e = 1.e6 * np.absolute(f)
result['w'] = w
result['f'] = f
result['e'] = e
if obj:
result['obj'] = header1['OBJECT']
if wavelenmin:
result['wavelmin'] = header0['WAVELMIN']
if date_obs:
result['date_obs'] = dt.datetime.strptime(
header0['DATE-OBS'], '%Y-%m-%dT%H:%M:%S.%f')
if spec_bin:
result['spec_bin'] = header0['SPEC_BIN']
if med_snr:
result['med_snr'] = header0['SNR']
if hdnum:
result['hdnum'] = header0['HDNUM']
if radvel:
result['radvel'] = header0['RADVEL']
if SN:
SNR = []
for i in range(72):
card = 'HIERARCH ESO DRS SPE EXT SN' + str(i)
SNR.append(header0[card])
result['SN'] = SNR
# If the coeffs keyword is given, returna all
# 288 wavelength solution coefficients.
if coeffs:
for i in range(0, 288, 1):
key_string = 'ESO DRS CAL TH COEFF LL{0}'.format(
str(i))
result[key_string] = header0[key_string]
return result
except:
result['HAR'] = 1
header0 = hdulist[0].header
header1 = hdulist[1].header
data = hdulist[1].data
w = [1/x[0]*100000000 for x in np.flip(data)]
# Multiply by the gain to convert from ADUs to photoelectrons
f = [x[1] for x in np.flip(data)]
result['w'] = w
result['f'] = f
result['e'] = np.divide(np.ones_like(w), 1000)
if obj:
result['obj'] = header1['OBJECT']
if wavelenmin:
result['wavelmin'] = header0['WAVELMIN']
if date_obs:
result['date_obs'] = dt.datetime.strptime(
header0['DATE-OBS'], '%Y-%m-%dT%H:%M:%S.%f')
if spec_bin:
result['spec_bin'] = header0['SPEC_BIN']
if med_snr:
result['med_snr'] = header0['SNR']
if hdnum:
result['hdnum'] = header0['HDNUM']
if radvel:
result['radvel'] = header0['RADVEL']
# if SN:
# SNR = []
# for i in range(72):
# card = 'HIERARCH ESO DRS SPE EXT SN' + str(i)
# SNR.append(header0[card])
# result['SN'] = SNR
# If the coeffs keyword is given, returna all 288 wavelength solution
# coefficients.
if coeffs:
for i in range(0, 288, 1):
key_string = 'ESO DRS CAL TH COEFF LL{0}'.format(
str(i))
result[key_string] = header0[key_string]
return result
except OSError:
with open(FITSfile) as ascii_table:
w_line = ascii_table.readline()
f_line = ascii_table.readline()
w = [float(x) for x in w_line.split(',')]
f = [float(x) for x in f_line.split(',')]
result['w'] = w
result['f'] = f
result['e'] = np.absolute(np.divide(np.power(f, 0.4), 1000))
return result
def rflatHARPS(FITSfile, obj=False, wavelenmin=False, date_obs=False,
spec_bin=False, med_snr=False, hdnum=False, radvel=False,
coeffs=False, SN=False):
"""Read a HARPS ADP FITS file and return a dictionary of information.
Parameters
----------
FITSfile : str or Path object
A path to a HARPS FITS file to be read.
obj : bool, Default: False
If *True*, the output will contain the contents of the OBJECT FITS
header card.
wavelenmin : bool, Default: False
If *True*, the output will contain the contents of the WAVELMIN FITS
header card.
date_obs : bool, Default: False
If *True*, the output will contain the contents of the DATE-OBS FITS
header card.
spec_bin : bool, Default: False
If *True*, the output will contain the contents of the SPEC_BIN FITS
header card.
med_snr : bool, Default: False
If *True*, the output will contain the contents of the SNR FITS header
card.
hdnum : bool, Default: False
If *True*, the output will contain the contents of the custom-added
HDNUM FITS header card. (Added to unify object identifiers across all
stars, some of which were occasionally identified by things other than
HD number.)
radvel : bool, Default: False
If *True*, the output will contain the contents of the custom-added
RADVEL FITS header card. (Added to unify the radial velocity for each
star, as a small minority of stars had different radial velocity
information in their HIERARCH ESO TEL TAFG RADVEL header cards.)
coeffs : bool, Default: False
If *True*, the output will contain the contents of the various
*ESO DRS CAL TH COEFF LLX* header cards, where *X* ranges from 0 to
287.
Returns
-------
dict
A dictionary containing the following key-value pairs:
w : Numpy array
The wavelength array.
f : Numpy array
The flux array.
e : Numpy array
The estimated error array (HARPS returns no error array by
default).
Optionally
==========
obj : str
The object name from the 'OBJECT' flag.
wlmin : float
The minimum wavelength.
date_obs : datetime object
The date the file was observed.
spec_bin : float
The wavelength bin size.
med_snr : float
The median SNR of the flux array.
hd_num : str
The HD identifier of the star in the format "HDxxxxxx".
radvel : float
The radial velocity of the star in km/s.
If the `coeffs` keyword argument is *True*, there will be 288 entries
of the form "ESO DRS CAL TH COEFF LLX": *value*, where X will range
from 0 to 287.
"""
result = {}
with fits.open(FITSfile) as hdulist:
header0 = hdulist[0].header
f = hdulist[0].data
cdelta1 = header0['CDELT1']
crval1 = header0['CRVAL1']
w = np.linspace(0, len(f), len(f)) * cdelta1 + crval1
e = np.zeros(len(f))
# Construct an error array by taking the square root of each flux value
if SN:
SNR = []
for i in range(72):
card = 'HIERARCH ESO DRS SPE EXT SN' + str(i)
SNR.append(header0[card])
result['SN'] = SNR
result['w'] = w
result['f'] = f
result['e'] = e
return result
def HAR2HER(spec, specres, pixelw, band_cor=True):
if max(spec['w']) < 7885.0027:
boun = [[4713.5737, 4901.3360], [5649.1206, 5872.0078],
[6478.3989, 6736.1442]]
npix = 4096 # number of pixels per band
w = np.array(spec['w'])
f = np.array(spec['f'])
wreduced = [[], [], []]
wspec = [[], [], [], []]
freduced = [[], [], []]
avpix = np.ones(3)
sigma = np.ones(3)
if band_cor is True:
band_correction = [0.8, 0.775, 0.73]
else:
band_correction = [1., 1., 1.]
for i in range(3):
wreduced[i] = w[((boun[i][0] - 50) < w) & (w < (boun[i][1] + 50))]
freduced[i] = f[((boun[i][0] - 50) < w) & (w < (boun[i][1] + 50))]
aver = np.zeros(len(wreduced[i])-1)
for j in range(len(wreduced[i])-1):
aver[j] = wreduced[i][j+1] - wreduced[i][j]
avpix[i] = np.average(aver)
minus_pix = int(10.0/avpix[i])
npixold = len(wreduced[i]) - minus_pix*2
wspec[i] = np.linspace(boun[i][0]-40, boun[i][1]+40, num=npixold)
freduced[i] = spectres(wspec[i], wreduced[i], freduced[i])
wreduced[i] = wspec[i]
avpix[i] = wreduced[i][1] - wreduced[i][0]
# Convolving the flux with gaussians (smooth them out)
# Calculate for each band the sigma (from HERMES) and Gaussian
for j in range(3):
sigma[j] = (boun[j][1] + boun[j][0]) / \
(2.0 * 2.355 * specres * band_correction[j] * avpix[j])
# For a broadened spectrum use the factor 2.25
Gauss = Gaussian1DKernel(stddev=sigma[j])
# Convolve the flux with the Gaussian to "blur it out"
freduced[j] = convolve(freduced[j], Gauss)
wnew = [[], [], []]
for j in range(3):
wnew[j] = | np.linspace(boun[j][0], boun[j][1], num=npix) | numpy.linspace |
import json
import open3d
import numpy as np
from pathlib import Path
from PIL import Image
from tqdm import tqdm, trange
from scipy.ndimage import map_coordinates
from misc.post_proc import np_coor2xy, np_coory2v
from misc.panostretch import pano_connect_points
from reconstruction.make_obj_mtl_files import make_obj_file, make_obj_file_horizontal, make_mtl_file
from preprocess import preprocess
def xyz_2_coorxy(xs, ys, zs, H, W):
''' Mapping 3D xyz coordinates to equirect coordinate '''
us = np.arctan2(xs, -ys)
vs = -np.arctan(zs / np.sqrt(xs**2 + ys**2))
coorx = (us / (2 * np.pi) + 0.5) * W
coory = (vs / np.pi + 0.5) * H
return coorx, coory
def create_ceiling_floor_mask(cor_id, H, W):
'''
Binary masking on equirectangular
where 1 indicate floor or ceiling
'''
# Prepare 1d ceiling-wall/floor-wall boundary
c_pts = []
f_pts = []
n_cor = len(cor_id)
for i in range(n_cor // 2):
# Ceiling boundary points
xys = pano_connect_points(cor_id[i*2],
cor_id[(i*2+2) % n_cor],
z=-50, w=W, h=H)
c_pts.extend(xys)
# Floor boundary points
xys = pano_connect_points(cor_id[i*2+1],
cor_id[(i*2+3) % n_cor],
z=50, w=W, h=H)
f_pts.extend(xys)
# Sort for interpolate
c_pts = np.array(c_pts)
c_pts = c_pts[np.argsort(c_pts[:, 0] * H - c_pts[:, 1])]
f_pts = np.array(f_pts)
f_pts = f_pts[ | np.argsort(f_pts[:, 0] * H + f_pts[:, 1]) | numpy.argsort |
#!/usr/bin/env python
#
# <NAME>
# Dartmouth College
# <EMAIL>
#
# This script finds, catalogs, and draws borders around
# interesting paratextual objects and ornaments in page
# images. It is used for the extraction of objects from
# ECCO page images of eighteenth-century texts.
import cv2
import imutils
import os, sys, shutil, glob
import numpy as np
import pickle
import nltk
from nltk.corpus import words
from bs4 import BeautifulSoup
import argparse
# set default options
ocrboundaries = False
single_image = False
# parse arguments
parser = argparse.ArgumentParser(
description='locates objects and annotates ECCO TIF documents')
parser.add_argument('object')
parser.add_argument('--draw-ocr-boundaries',help='place green boxes around paragraphs', dest='ocrboundaries', action='store_true')
parser.add_argument('--single-image', help='mark-up just a single page image', dest='single_image', action='store')
args = parser.parse_args()
object = args.object
if args.ocrboundaries == True:
ocrboundaries = True
if args.single_image != False:
single_image_switch = True
single_image = args.single_image
if object == None:
print("Error: need ECCO object")
exit()
# load English language vocabulary
vocab = words.words()
################################################################################
# pre-load and process all images used for pattern matching
# convert to grayscale on load
################################################################################
manicule_gray = cv2.imread('share/manicule1.jpg',0)
arabesque_gray = cv2.imread('share/arabesque.jpg',0)
rosette_gray = cv2.imread('share/rosette.png',0)
annotation3_gray = cv2.imread('share/annotation3.jpg',0)
longdash_gray = cv2.imread('share/longdash.jpg',0)
# asterisms
asterism_gray = cv2.imread('share/asterism.jpg',0)
inverted_asterism_gray = cv2.imread('share/inverted_asterism.jpg',0)
asterism_block_gray = cv2.imread('share/asterism_block.jpg',0)
astrism_line_gray = cv2.imread('share/asterism_line.jpg',0)
#asterisk_image = cv2.imread('share/asterisk1.jpg')
#asterisk_gray = cv2.cvtColor(asterisk_image, cv2.COLOR_BGR2GRAY)
def find_inverted_asterism(target,output):
(tH, tW) = inverted_asterism_gray.shape[:2]
res = cv2.matchTemplate(target,inverted_asterism_gray,cv2.TM_CCOEFF_NORMED)
threshold = 0.60
locations = np.where(res >= threshold)
count=0
for pt in zip(*locations[::-1]):
count = count + 1
cv2.rectangle(output, pt, (pt[0] + tW, pt[1] + tH), (0,0,255), 2)
return(count)
def find_asterism(target,output):
(tH, tW) = asterism_gray.shape[:2]
res = cv2.matchTemplate(target,asterism_gray,cv2.TM_CCOEFF_NORMED)
threshold = 0.60
locations = np.where(res >= threshold)
count=0
for pt in zip(*locations[::-1]):
count = count + 1
cv2.rectangle(output, pt, (pt[0] + tW, pt[1] + tH), (0,0,255), 2)
return(count)
def find_asterism_line(target,output):
(tH, tW) = asterism_line_gray.shape[:2]
res = cv2.matchTemplate(target,asterism_line_gray,cv2.TM_CCOEFF_NORMED)
threshold = 0.60
locations = np.where(res >= threshold)
count=0
for pt in zip(*locations[::-1]):
count = count + 1
cv2.rectangle(output, pt, (pt[0] + tW, pt[1] + tH), (0,0,255), 2)
return(count)
def find_asterism_block(target,output):
(tH, tW) = asterism_block_gray.shape[:2]
res = cv2.matchTemplate(target,asterism_block_gray,cv2.TM_CCOEFF_NORMED)
threshold = 0.60
locations = np.where(res >= threshold)
count=0
for pt in zip(*locations[::-1]):
count = count + 1
cv2.rectangle(output, pt, (pt[0] + tW, pt[1] + tH), (0,0,255), 2)
return(count)
def find_longdash(target,output):
(tH, tW) = longdash_gray.shape[:2]
res = cv2.matchTemplate(target,longdash_gray,cv2.TM_CCOEFF_NORMED)
threshold = 0.75
locations = np.where(res >= threshold)
count=0
for pt in zip(*locations[::-1]):
count = count + 1
cv2.rectangle(output, pt, (pt[0] + tW, pt[1] + tH), (0,0,255), 2)
return(count)
def find_manicule(target,output):
(tH, tW) = manicule_gray.shape[:2]
res = cv2.matchTemplate(target,manicule_gray,cv2.TM_CCOEFF_NORMED)
threshold = 0.75
locations = np.where(res >= threshold)
count=0
for pt in zip(*locations[::-1]):
count = count + 1
cv2.rectangle(output, pt, (pt[0] + tW, pt[1] + tH), (0,0,255), 2)
return(count)
def find_arabesque(target,output):
(tH, tW) = arabesque_gray.shape[:2]
res = cv2.matchTemplate(target,arabesque_gray,cv2.TM_CCOEFF_NORMED)
threshold = 0.60
locations = np.where(res >= threshold)
count=0
for pt in zip(*locations[::-1]):
count = count + 1
cv2.rectangle(output, pt, (pt[0] + tW, pt[1] + tH), (0,0,255), 2)
return(count)
def find_rosette(target,output):
(tH, tW) = rosette_gray.shape[:2]
res = cv2.matchTemplate(target,rosette_gray,cv2.TM_CCOEFF_NORMED)
threshold = 0.65
locations = np.where(res >= threshold)
count=0
for pt in zip(*locations[::-1]):
count = count + 1
cv2.rectangle(output, pt, (pt[0] + tW, pt[1] + tH), (0,0,255), 2)
return(count)
def find_asterisk(target,output):
(tH, tW) = asterisk_gray.shape[:2]
res = cv2.matchTemplate(target,asterisk_gray,cv2.TM_CCOEFF_NORMED)
threshold = 0.70
locations = | np.where(res >= threshold) | numpy.where |
#! /usr/bin/python
# Various coordinate system transform utilities. DIS uses
# an Earth-Centered, Earth-Fixed coordinate system, with the
# origin at the center of the (WGS84) earth, positive x out
# at the equator and prime meridian, z out through the north
# pole, and y out at the equator and 90 deg east. We often want
# to convert those coordinates to latitude, longitude, and altitude
# on the WGS84 globe. This utility does that. (It's swiped from
# the net, specifically the stoqs project at MBARI)
__author__ = ""
__credits = "https://github.com/GAVLab/fhwa2_viz/blob/master/fhwa2_gui/src/util.py"
"""
Container for general GPS functions and classes
Functions:
deg2rad
rad2deg
euclideanDistance
gpsWeekCheck
keplerE
Classes:
GPS - includes functions:
lla2ecef
ecef2lla
WGS84 - constant parameters for GPS class
"""
#Import required packages
from math import sqrt, pi, sin, cos, tan, atan, atan2, asin
from numpy import array, dot
#from numarray import array, dot, zeros, Float64
#def diag(l):
# length = len(l)
# a = zeros((length, length), Float64)
# for index in range(length):
# a[index, index] = l[index]
# return a
def deg2rad(deg):
"""Converts degrees to radians"""
return deg * pi / 180
def rad2deg(rad):
"""Converts radians to degrees"""
return rad * 180 / pi
def isEven(num):
"""Boolean function returning true if num is even, false if not"""
return num%2 == 0
def euclideanDistance(data, dataRef=None):
"""Calculates the Euclidian distance between the given data and zero.
This works out to be equivalent to the distance between two points if their
difference is given as the input"""
total = 0
for index in range(len(data)):
if dataRef is None:
total += data[index]**2
else:
total += (data[index] - dataRef[index])**2
return sqrt(total)
def gpsWeekCheck(t):
"""Makes sure the time is in the interval [-302400 302400] seconds, which
corresponds to number of seconds in the GPS week"""
if t > 302400.:
t = t - 604800.
elif t < -302400.:
t = t + 604800.
return t
def keplerE(M_k, ecc, tolerance=1e-12):
"""Iteratively calculates E_k using Kepler's equation:
E_k = M_k + ecc * sin(E_k)"""
E_k = M_k
E_0 = E_k + tolerance * 10.
while abs(E_k - E_0) > tolerance:
E_0 = E_k
E_k = M_k + ecc * sin(E_k)
return E_k
class WGS84:
"""General parameters defined by the WGS84 system"""
#Semimajor axis length (m)
a = 6378137.0
#Semiminor axis length (m)
b = 6356752.3142
#Ellipsoid flatness (unitless)
f = (a - b) / a
#Eccentricity (unitless)
e = sqrt(f * (2 - f))
#Speed of light (m/s)
c = 299792458.
#Relativistic constant
F = -4.442807633e-10
#Earth's universal gravitational constant
mu = 3.986005e14
#Earth rotation rate (rad/s)
omega_ie = 7.2921151467e-5
def g0(self, L):
"""acceleration due to gravity at the elipsoid surface at latitude L"""
return 9.7803267715 * (1 + 0.001931851353 * sin(L)**2) / \
sqrt(1 - 0.0066943800229 * sin(L)**2)
class GPS:
"""Working class for GPS module"""
wgs84 = WGS84()
fGPS = 1023
fL1 = fGPS * 1.54e6
fL2 = fGPS * 1.2e6
def lla2ecef(self, lla):
"""Convert lat, lon, alt to Earth-centered, Earth-fixed coordinates.
Input: lla - (lat, lon, alt) in (decimal degrees, decimal degees, m)
Output: ecef - (x, y, z) in (m, m, m)
"""
#Decompose the input
lat = deg2rad(lla[0])
lon = deg2rad(lla[1])
alt = lla[2]
#Calculate length of the normal to the ellipsoid
N = self.wgs84.a / sqrt(1 - (self.wgs84.e * sin(lat))**2)
#Calculate ecef coordinates
x = (N + alt) * cos(lat) * cos(lon)
y = (N + alt) * cos(lat) * sin(lon)
z = (N * (1 - self.wgs84.e**2) + alt) * sin(lat)
#Return the ecef coordinates
return (x, y, z)
def lla2gcc(self, lla, geoOrigin=''):
"""
Same as lls2ecef, but accepts an X3D-style geoOrigin string for subtraction of it in ecef (gcc) cooridinates
"""
if geoOrigin:
lon0, lat0, a0 = [float(c) for c in geoOrigin.split()]
x0, y0, z0 = self.lla2ecef((lat0, lon0, a0))
else:
x0, y0, z0 = 0, 0, 0
x, y, z = self.lla2ecef(lla)
return (x - x0, y - y0, z -z0)
def ecef2lla(self, ecef, tolerance=1e-9):
"""Convert Earth-centered, Earth-fixed coordinates to lat, lon, alt.
Input: ecef - (x, y, z) in (m, m, m)
Output: lla - (lat, lon, alt) in (decimal degrees, decimal degrees, m)
"""
#Decompose the input
x = ecef[0]
y = ecef[1]
z = ecef[2]
#Calculate lon
lon = atan2(y, x)
#Initialize the variables to calculate lat and alt
alt = 0
N = self.wgs84.a
p = sqrt(x**2 + y**2)
lat = 0
previousLat = 90
#Iterate until tolerance is reached
while abs(lat - previousLat) >= tolerance:
previousLat = lat
sinLat = z / (N * (1 - self.wgs84.e**2) + alt)
lat = atan((z + self.wgs84.e**2 * N * sinLat) / p)
N = self.wgs84.a / sqrt(1 - (self.wgs84.e * sinLat)**2)
alt = p / cos(lat) - N
#Return the lla coordinates
return (rad2deg(lat), rad2deg(lon), alt)
def ecef2ned(self, ecef, origin):
"""Converts ecef coordinates into local tangent plane where the
origin is the origin in ecef coordinates.
Input: ecef - (x, y, z) in (m, m, m)
origin - (x0, y0, z0) in (m, m, m)
Output: ned - (north, east, down) in (m, m, m)
"""
llaOrigin = self.ecef2lla(origin)
lat = deg2rad(llaOrigin[0])
lon = deg2rad(llaOrigin[1])
Re2t = array([[-sin(lat)*cos(lon), -sin(lat)*sin(lon), cos(lat)],
[-sin(lon), cos(lon), 0],
[-cos(lat)*cos(lon), -cos(lat)*sin(lon), -sin(lat)]])
return list(dot(Re2t, array(ecef) - | array(origin) | numpy.array |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""
Library for converting certain types of Marian models to a standalone ONNX model.
Because Marian and ONNX use very different philosophies, a conversion is not possible
for all possible Marian models. Specifically, currently we don't support recurrent
networks in the encoder, and we can only decode with greedy search (not beam search).
This works by running a Marian decode for 2 output steps, and capturing three pieces of
Marian's internal graph that correspond to the encoder, the first decoding steps, and the
second decoding step. The graph of the second decoding step can be applied repeatedly in
order to decoder a variable-length sequence.
The three pieces are then composed with a greedy-search implementation, which is realized
directly via ONNX operators. This is facilitated by the onnx_fx library. As of this writing,
onnx_fx is still in experimental stage, and is not yet included in Release branches of
the onnxconverter-common distribution. Hence, you must use the latest master branch, not
the release.
The code below assumes that the onnxconverter_common repo is cloned next to the marian-dev
repo, and that you use the standard CMake build process on Linux. If not, please make sure
that the onnxconverter-common repo is included in PYTHONPATH, and you may need to pass the
binary path of Marian to export_marian_model_components() explicitly.
Prerequisites:
```
pip install onnxruntime
git clone https://github.com/microsoft/onnxconverter-common.git
```
You will also need to compile Marian with -DUSE_ONNX=ON.
Known issue: If the number of decoder layers is not 6, you need to manually adjust one
line of code in loop_body() below.
"""
import os, sys, inspect, subprocess
from typing import List, Dict, Optional, Callable
# get the Marian root path
_marian_root_path = os.path.dirname(inspect.getfile(inspect.currentframe())) + "/../.."
# we assume onnxconverter-common to be available next to the marian-dev repo; you may need to adjust this
sys.path.append(_marian_root_path + "/../onnxconverter-common")
from onnxconverter_common.onnx_fx import Graph
from onnxconverter_common.onnx_fx import GraphFunctionType as _Ty
from onnxconverter_common import optimize_onnx_graph
import onnxruntime as _ort
from onnxruntime import quantization
def _ort_apply_model(model, inputs): # ORT execution is a callback so that Graph itself does not need to depend on ORT
sess = _ort.InferenceSession(model.SerializeToString())
return sess.run(None, inputs)
Graph.inference_runtime = _ort_apply_model
Graph.opset = 11
def _optimize_graph_in_place(graph: Graph):
# @TODO: This should really be methods on onnx_fx.Graph.
g = graph._oxml.graph
g_opt = optimize_onnx_graph(
onnx_nodes=g.node, # the onnx node list in onnx model.
nchw_inputs=None, # the name list of the inputs needed to be transposed as NCHW
inputs=g.input, # the model input
outputs=g.output, # the model output
initializers=g.initializer, # the model initializers
stop_initializers=None, # 'stop' optimization on these initializers
model_value_info=g.value_info, # the model value_info
model_name=g.name, # the internal name of model
target_opset=graph.opset)
graph._oxml.graph.CopyFrom(g_opt)
def export_marian_model_components(marian_model_path: str, marian_vocab_paths: List[str],
marian_executable_path: Optional[str]=None) -> Dict[str,Graph]:
"""
Export the Marian graph to a set of models.
Args:
marian_model_path: path to Marian model to convert
marian_vocab_paths: paths of vocab files (normally, this requires 2 entries, which may be identical)
marian_executable_path: path to Marian executable; will default to THIS_SCRIPT_PATH/../../build/marian
Returns:
Dict of onnx_fx.Graph instances corresponding to pieces of the Marian model.
"""
assert isinstance(marian_vocab_paths, list), "marian_vocab_paths must be a list of paths"
# default marian executable is found relative to location of this script (Linux/CMake only)
if marian_executable_path is None:
marian_executable_path = _marian_root_path + "/build/marian"
# partial models are written to /tmp
output_path_stem = "/tmp/" + os.path.basename(marian_model_path)
# exporting is done via invoking Marian via its command-line interface; models are written to tmp files
command = marian_executable_path
args = [
"convert",
"--from", marian_model_path,
"--vocabs", *marian_vocab_paths,
"--to", output_path_stem,
"--export-as", "onnx-encode"
]
subprocess.run([command] + args, check=True)
# load the tmp files into onnx_fx.Graph objects
graph_names = ["encode_source", "decode_first", "decode_next"] # Marian generates graphs with these names
output_paths = [output_path_stem + "." + graph_name + ".onnx" for graph_name in graph_names] # form pathnames under which Marian wrote the files
res = { graph_name: Graph.load(output_path) for graph_name, output_path in zip(graph_names, output_paths) }
# optimize the partial models in place, as Marian may not have used the most optimal way of expressing all operations
for graph_name in res.keys():
_optimize_graph_in_place(res[graph_name])
# clean up after ourselves
for output_path in output_paths:
os.unlink(output_path)
return res
def quantize_models_in_place(partial_models: Dict[str,Graph], to_bits: int=8):
"""
Quantize the partial models in place.
Args:
partial_models: models returned from export_marian_model_components()
to_bits: number of bits to quantize to, currently only supports 8
"""
for graph_name in partial_models.keys(): # quantize each partial model
partial_models[graph_name]._oxml = quantization.quantize(
partial_models[graph_name]._oxml,
nbits=to_bits,
quantization_mode=quantization.QuantizationMode.IntegerOps,
symmetric_weight=True,
force_fusions=True)
def compose_model_components_with_greedy_search(partial_models: Dict[str,Graph], num_decoder_layers: int):
"""
Create an ONNX model that implements greedy search over the exported Marian pieces.
Args:
partial_models: models returned from export_marian_model_components()
num_decoder_layers: must be specified, since it cannot be inferred from the model files presently (e.g. 6)
Returns:
ONNX model that can be called as
result_ids = greedy_search_fn(np.array(source_ids, dtype=np.int64), np.array([target_eos_id], dtype=np.int64))[0]
"""
decoder_state_dim = num_decoder_layers * 2 # each decoder has two state variables
# load our partial functions
# ONNX graph inputs and outputs are named but not ordered. Therefore, we must define the parameter order here.
def define_parameter_order(graph, inputs, outputs):
tmppath = "/tmp/tmpmodel.onnx"
graph.save(tmppath) # unfortunately, Graph.load() cannot load from another Graph, so use a tmp file
graph = Graph.load(tmppath, inputs=inputs, outputs=outputs)
os.unlink(tmppath)
return graph
encode_source = define_parameter_order(partial_models["encode_source"],
inputs=['data_0', 'data_0_mask', 'data_0_posrange'], # define the order of arguments
outputs=['encoder_context_0'])
decode_first = define_parameter_order(partial_models["decode_first"],
inputs=['data_1_posrange', 'encoder_context_0', 'data_0_mask'],
outputs=['first_logits'] +
[f"first_decoder_state_{i}" for i in range(decoder_state_dim)])
decode_next = define_parameter_order(partial_models["decode_next"],
inputs=['prev_word', 'data_1_posrange', 'encoder_context_0', 'data_0_mask'] +
[f"decoder_state_{i}" for i in range(decoder_state_dim)],
outputs=['next_logits'] +
[f"next_decoder_state_{i}" for i in range(decoder_state_dim)])
# create an ONNX graph that implements full greedy search
# The greedy search is implemented via the @onnx_fx.Graph.trace decorator, which allows us to
# author the greedy search in Python, similar to @CNTK.Function and PyTorch trace-based jit.
# The decorator executes greedy_search() below on a dummy input in order to generate an ONNX graph
# via invoking operators from the onnx.fx library.
# The partial functions exported from Marian are invoked (=inlined) by this.
# The result is a full ONNX graph that implements greedy search using the Marian model.
@Graph.trace(
input_types=[_Ty.I(shape=['N']), _Ty.I([1])],
output_types=[_Ty.I(shape=['T'])],
outputs="Y")
def greedy_search(X, eos_id):
"""
Args:
X: sequence of input tokens, including EOS symbol, as integer indices into the input vocabulary
eos_id: id of the EOS symbol in the output vocabulary
"""
ox = X.ox
data_0 = X
data_0_shape = data_0.shape()
data_0_mask = ox.constant_of_shape(data_0_shape, value=1.0)
seq_len = data_0_shape[-1]
data_0_index_range = ox.range([ox.constant(value=0), seq_len, ox.constant(value=1)]).cast(to=ox.float)
data_0_index_range = ox.unsqueeze(data_0_index_range, axes=[1, 2])
max_len = seq_len * 3
encoder_context_0 = encode_source(data_0=data_0, data_0_mask=data_0_mask,
data_0_posrange=data_0_index_range)
y_len_0 = ox.constant(value=0.0)
logp, *out_decoder_states = decode_first(data_1_posrange=y_len_0,
encoder_context_0=encoder_context_0, data_0_mask=data_0_mask)
y_t = logp[0, 0, 0].argmax(axis=-1, keepdims=True) # note: rank-1 tensor, not a scalar
eos_token = eos_id + 0
test_y_t = (y_t != eos_token)
@Graph.trace(outputs=['ty_t', 'y_t_o', *(f'ods_{i}' for i in range(decoder_state_dim)), 'y_t_o2'],
output_types=[_Ty.b, _Ty.i] + [_Ty.f] * decoder_state_dim + [_Ty.i],
input_types=[_Ty.I([1]), _Ty.b, _Ty.i] + [_Ty.f] * decoder_state_dim)
def loop_body(iteration_count, condition, # these are not actually used inside
y_t,
out_decoder_states_0, out_decoder_states_1, out_decoder_states_2, out_decoder_states_3, out_decoder_states_4, out_decoder_states_5,
out_decoder_states_6, out_decoder_states_7, out_decoder_states_8, out_decoder_states_9, out_decoder_states_10, out_decoder_states_11):
# @BUGBUG: Currently, we do not support variable number of arguments to the callable.
# @TODO: We have the information from the type signature in Graph.trace(), so this should be possible.
assert decoder_state_dim == 12, "Currently, decoder layers other than 6 require a manual code change"
out_decoder_states = [out_decoder_states_0, out_decoder_states_1, out_decoder_states_2, out_decoder_states_3, out_decoder_states_4, out_decoder_states_5,
out_decoder_states_6, out_decoder_states_7, out_decoder_states_8, out_decoder_states_9, out_decoder_states_10, out_decoder_states_11]
"""
Loop body follows the requirements of ONNX Loop:
"The graph run each iteration.
It has 2+N inputs: (iteration_num, condition, loop carried dependencies...).
It has 1+N+K outputs: (condition, loop carried dependencies..., scan_outputs...).
Each scan_output is created by concatenating the value of the specified output value at the end of each iteration of the loop.
It is an error if the dimensions or data type of these scan_outputs change across loop iterations."
Inputs:
iteration_num (not used by our function)
test_y_t: condition (not used as an input)
y_t, *out_decoder_states: N=(decoder_state_dim+1) loop-carried dependencies
Outputs:
test_y_t: condition, return True if there is more to decode
y_t, *out_decoder_states: N=(decoder_state_dim+1) loop-carried dependencies (same as in the Inputs section)
y_t: K=1 outputs
"""
pos = iteration_count + 1
data_1_posrange = pos.cast(to=1).unsqueeze(axes=[0, 1, 2])
logp, *out_decoder_states = decode_next(
prev_word=y_t, data_1_posrange=data_1_posrange,
encoder_context_0=encoder_context_0, data_0_mask=data_0_mask,
**{f"decoder_state_{i}": out_decoder_states[i] for i in range(len(out_decoder_states))})
y_t = logp[0, 0, 0].argmax(axis=-1, keepdims=True)
test_y_t = (y_t != eos_token)
return [test_y_t, y_t] + out_decoder_states + [y_t]
# "Final N loop carried dependency values then K scan_outputs"
ret_vals = ox.loop(max_len, test_y_t, loop_body,
inputs=[y_t] + out_decoder_states,
outputs=['gy_t_o', *[f"gods_{i}" for i in range(len(out_decoder_states))], 'greedy_out'])
y = ret_vals[-1] # scan_output
# we must prepend the very first token
Y = ox.concat([ox.unsqueeze(y_t), y], axis=0) # note: y_t are rank-1 tensors, not scalars (ORT concat fails with scalars)
return ox.squeeze(Y, axes=[1])
greedy_search.to_model() # this triggers the model tracing (which is lazy)
# optimize the final model as well
# @BUGBUG: This leads to a malformed or hanging model.
#_optimize_graph_in_place(greedy_search)
return greedy_search
def apply_model(greedy_search_fn: Graph, source_ids: List[int], target_eos_id: int) -> List[int]:
"""
Apply model to an input sequence, e.g. run translation.
This function is meant for quick testing, and as an example of how to invoke the final graph.
Args:
greedy_search_fn: ONNX model created with combine_model_components_with_greedy_search()\
source_ids: list of source tokens, as indices into soure vocabulary, ending in EOS symbol
target_eos_id: id of EOS symbol in target vocabulary
Returns:
Result as list of ids into target vocabulary
"""
import numpy as np
Y = greedy_search_fn(
| np.array(source_ids, dtype=np.int64) | numpy.array |
"""All TCR metrics incorporated into multi-variate logistic regression to predict illness severity."""
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from scipy.stats import mannwhitneyu
from statsmodels.stats.multitest import multipletests
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import StratifiedKFold
from sklearn.preprocessing import LabelEncoder,OneHotEncoder, StandardScaler, MinMaxScaler
from sklearn.metrics import roc_curve,roc_auc_score
import matplotlib
plt.style.use('ggplot')
matplotlib.rc('font', family='sans-serif')
df_metrics = pd.read_csv('../../Data/ImmuneCODE/samples.tsv',sep='\t')
df_metrics['sample_name'] = df_metrics['sample_name']+'.tsv'
metrics = df_metrics.columns[1:10]
df = pd.read_csv('../../Data/ImmuneCODE/ImmuneCODE-Repertoire-Tags-002.2.tsv',sep='\t')
df['who_ordinal_scale_bin'] = None
df['who_ordinal_scale_bin'][df['who_ordinal_scale'] <=4] = 'mild'
df['who_ordinal_scale_bin'][df['who_ordinal_scale'] > 4] = 'severe'
df['who_ordinal_scale'] = df['who_ordinal_scale_bin']
df['Age'] = df['Age'].str.split(' ',expand=True)[0]
df['Age'] = df['Age'].astype(float)
df['Dataset'] = df['Dataset'].str.split('COVID-19-',expand=True)[1]
df['sample_name'] = df['sample_name']+'.tsv'
df['severe'] = None
df['severe'][(df['icu_admit']==True) | (df['who_ordinal_scale']=='severe')] = 'severe'
df['severe'][(df['icu_admit']==False) | (df['who_ordinal_scale']=='mild')] = 'mild'
df_merge = pd.merge(df,df_metrics,on=['sample_name'])
ds = ['NIH/NIAID','ISB']
label_sel = 'severe'
df_merge = df_merge[df_merge['Dataset'].isin(ds)]
df_merge.dropna(subset=[label_sel],inplace=True)
ss = StandardScaler()
for m in metrics:
df_merge[m] = ss.fit_transform(np.array(df_merge[m]).reshape(-1,1))
lr = LogisticRegression()
skf = StratifiedKFold(shuffle=True,n_splits=2)
lb = LabelEncoder()
iterations = 100
DFs = []
for d in ds:
df_temp = df_merge[df_merge['Dataset'] == d].reset_index(drop=True)
X = np.array(df_temp[metrics])
Y = np.array(df_temp['severe'])
Y = lb.fit_transform(Y)
y_pred = []
y_test = []
sample_list = []
for it in range(iterations):
for train_idx, test_idx in skf.split(X,Y):
y_test.append(Y[test_idx])
sample_list.append(df_temp['sample_name'][test_idx])
lr.fit(X[train_idx],Y[train_idx])
pred = lr.predict_proba(X[test_idx])
y_pred.append(pred[:,1])
y_pred = | np.hstack(y_pred) | numpy.hstack |
"""Rangeland Production Model."""
import os
import logging
import tempfile
import shutil
from builtins import range
import re
import math
import pickle
import numpy
import pandas
from osgeo import ogr
from osgeo import osr
from osgeo import gdal
import pygeoprocessing
from rangeland_production import utils
from rangeland_production import validation
LOGGER = logging.getLogger('rangeland_production.forage')
# we only have these types of soils
SOIL_TYPE_LIST = ['clay', 'silt', 'sand']
# temporary directory to store intermediate files
PROCESSING_DIR = None
# user-supplied crude protein of vegetation
CRUDE_PROTEIN = None
# state variables and parameters take their names from Century
# _SITE_STATE_VARIABLE_FILES contains state variables that are a
# property of the site, including:
# carbon in each soil compartment
# (structural, metabolic, som1, som2, som3) and layer (1=surface, 2=soil)
# e.g., som2c_2 = carbon in soil som2;
# N and P in each soil layer and compartment (1=N, 2=P)
# e.g., som2e_1_1 = N in surface som2, som2e_1_2 = P in surface som2;
# water in each soil layer, asmos_<layer>
# state variables fully described in this table:
# https://docs.google.com/spreadsheets/d/1TGCDOJS4nNsJpzTWdiWed390NmbhQFB2uUoMs9oTTYo/edit?usp=sharing
_SITE_STATE_VARIABLE_FILES = {
'metabc_1_path': 'metabc_1.tif',
'metabc_2_path': 'metabc_2.tif',
'som1c_1_path': 'som1c_1.tif',
'som1c_2_path': 'som1c_2.tif',
'som2c_1_path': 'som2c_1.tif',
'som2c_2_path': 'som2c_2.tif',
'som3c_path': 'som3c.tif',
'strucc_1_path': 'strucc_1.tif',
'strucc_2_path': 'strucc_2.tif',
'strlig_1_path': 'strlig_1.tif',
'strlig_2_path': 'strlig_2.tif',
'metabe_1_1_path': 'metabe_1_1.tif',
'metabe_2_1_path': 'metabe_2_1.tif',
'som1e_1_1_path': 'som1e_1_1.tif',
'som1e_2_1_path': 'som1e_2_1.tif',
'som2e_1_1_path': 'som2e_1_1.tif',
'som2e_2_1_path': 'som2e_2_1.tif',
'som3e_1_path': 'som3e_1.tif',
'struce_1_1_path': 'struce_1_1.tif',
'struce_2_1_path': 'struce_2_1.tif',
'metabe_1_2_path': 'metabe_1_2.tif',
'metabe_2_2_path': 'metabe_2_2.tif',
'plabil_path': 'plabil.tif',
'secndy_2_path': 'secndy_2.tif',
'parent_2_path': 'parent_2.tif',
'occlud_path': 'occlud.tif',
'som1e_1_2_path': 'som1e_1_2.tif',
'som1e_2_2_path': 'som1e_2_2.tif',
'som2e_1_2_path': 'som2e_1_2.tif',
'som2e_2_2_path': 'som2e_2_2.tif',
'som3e_2_path': 'som3e_2.tif',
'struce_1_2_path': 'struce_1_2.tif',
'struce_2_2_path': 'struce_2_2.tif',
'asmos_1_path': 'asmos_1.tif',
'asmos_2_path': 'asmos_2.tif',
'asmos_3_path': 'asmos_3.tif',
'asmos_4_path': 'asmos_4.tif',
'asmos_5_path': 'asmos_5.tif',
'asmos_6_path': 'asmos_6.tif',
'asmos_7_path': 'asmos_7.tif',
'asmos_8_path': 'asmos_8.tif',
'asmos_9_path': 'asmos_9.tif',
'avh2o_3_path': 'avh2o_3.tif',
'minerl_1_1_path': 'minerl_1_1.tif',
'minerl_2_1_path': 'minerl_2_1.tif',
'minerl_3_1_path': 'minerl_3_1.tif',
'minerl_4_1_path': 'minerl_4_1.tif',
'minerl_5_1_path': 'minerl_5_1.tif',
'minerl_6_1_path': 'minerl_6_1.tif',
'minerl_7_1_path': 'minerl_7_1.tif',
'minerl_8_1_path': 'minerl_8_1.tif',
'minerl_9_1_path': 'minerl_9_1.tif',
'minerl_10_1_path': 'minerl_10_1.tif',
'minerl_1_2_path': 'minerl_1_2.tif',
'minerl_2_2_path': 'minerl_2_2.tif',
'minerl_3_2_path': 'minerl_3_2.tif',
'minerl_4_2_path': 'minerl_4_2.tif',
'minerl_5_2_path': 'minerl_5_2.tif',
'minerl_6_2_path': 'minerl_6_2.tif',
'minerl_7_2_path': 'minerl_7_2.tif',
'minerl_8_2_path': 'minerl_8_2.tif',
'minerl_9_2_path': 'minerl_9_2.tif',
'minerl_10_2_path': 'minerl_10_2.tif',
'snow_path': 'snow.tif',
'snlq_path': 'snlq.tif',
}
# _PFT_STATE_VARIABLES contains state variables that are a
# property of a PFT, including:
# carbon, nitrogen, and phosphorous in aboveground biomass
# where 1=N, 2=P
# e.g. aglivc = C in aboveground live biomass,
# aglive_1 = N in aboveground live biomass;
# carbon, nitrogen, and phosphorous in aboveground standing dead
# biomass, stdedc and stdede;
# carbon, nitrogen and phosphorous in belowground live biomass,
# aglivc and aglive
# state variables fully described in this table:
# https://docs.google.com/spreadsheets/d/1TGCDOJS4nNsJpzTWdiWed390NmbhQFB2uUoMs9oTTYo/edit?usp=sharing
_PFT_STATE_VARIABLES = [
'aglivc', 'bglivc', 'stdedc', 'aglive_1', 'bglive_1',
'stdede_1', 'aglive_2', 'bglive_2', 'stdede_2', 'avh2o_1',
'crpstg_1', 'crpstg_2',
]
# intermediate parameters that do not change between timesteps,
# including field capacity and wilting point of each soil layer,
# coefficients describing effect of soil texture on decomposition
# rates
_PERSISTENT_PARAMS_FILES = {
'afiel_1_path': 'afiel_1.tif',
'afiel_2_path': 'afiel_2.tif',
'afiel_3_path': 'afiel_3.tif',
'afiel_4_path': 'afiel_4.tif',
'afiel_5_path': 'afiel_5.tif',
'afiel_6_path': 'afiel_6.tif',
'afiel_7_path': 'afiel_7.tif',
'afiel_8_path': 'afiel_8.tif',
'afiel_9_path': 'afiel_9.tif',
'awilt_1_path': 'awilt_1.tif',
'awilt_2_path': 'awilt_2.tif',
'awilt_3_path': 'awilt_3.tif',
'awilt_4_path': 'awilt_4.tif',
'awilt_5_path': 'awilt_5.tif',
'awilt_6_path': 'awilt_6.tif',
'awilt_7_path': 'awilt_7.tif',
'awilt_8_path': 'awilt_8.tif',
'awilt_9_path': 'awilt_9.tif',
'wc_path': 'wc.tif',
'eftext_path': 'eftext.tif',
'p1co2_2_path': 'p1co2_2.tif',
'fps1s3_path': 'fps1s3.tif',
'orglch_path': 'orglch.tif',
'fps2s3_path': 'fps2s3.tif',
'rnewas_1_1_path': 'rnewas_1_1.tif',
'rnewas_2_1_path': 'rnewas_2_1.tif',
'rnewas_1_2_path': 'rnewas_1_2.tif',
'rnewas_2_2_path': 'rnewas_2_2.tif',
'rnewbs_1_1_path': 'rnewbs_1_1.tif',
'rnewbs_1_2_path': 'rnewbs_1_2.tif',
'rnewbs_2_1_path': 'rnewbs_2_1.tif',
'rnewbs_2_2_path': 'rnewbs_2_2.tif',
'vlossg_path': 'vlossg.tif',
}
# site-level values that are updated once per year
_YEARLY_FILES = {
'annual_precip_path': 'annual_precip.tif',
'baseNdep_path': 'baseNdep.tif',
}
# pft-level values that are updated once per year
_YEARLY_PFT_FILES = ['pltlig_above', 'pltlig_below']
# intermediate values for each plant functional type that are shared
# between submodels, but do not need to be saved as output
_PFT_INTERMEDIATE_VALUES = [
'h2ogef_1', 'tgprod_pot_prod',
'cercrp_min_above_1', 'cercrp_min_above_2',
'cercrp_max_above_1', 'cercrp_max_above_2',
'cercrp_min_below_1', 'cercrp_min_below_2',
'cercrp_max_below_1', 'cercrp_max_below_2',
'tgprod', 'rtsh', 'flgrem', 'fdgrem']
# intermediate site-level values that are shared between submodels,
# but do not need to be saved as output
_SITE_INTERMEDIATE_VALUES = [
'amov_1', 'amov_2', 'amov_3', 'amov_4', 'amov_5', 'amov_6', 'amov_7',
'amov_8', 'amov_9', 'amov_10', 'snowmelt', 'bgwfunc', 'diet_sufficiency']
# fixed parameters for each grazing animal type are adapted from the GRAZPLAN
# model as described by Freer et al. 2012, "The GRAZPLAN animal biology model
# for sheep and cattle and the GrazFeed decision support tool"
_FREER_PARAM_DICT = {
'b_indicus': {
'CN1': 0.0115,
'CN2': 0.27,
'CN3': 0.4,
'CI1': 0.025,
'CI2': 1.7,
'CI8': 62,
'CI9': 1.7,
'CI15': 0.5,
'CI19': 0.416,
'CI20': 1.5,
'CR1': 0.8,
'CR2': 0.17,
'CR3': 1.7,
'CR4': 0.00078,
'CR5': 0.6,
'CR6': 0.00074,
'CR7': 0.5,
'CR12': 0.8,
'CR13': 0.35,
'CK1': 0.5,
'CK2': 0.02,
'CK3': 0.85,
'CK5': 0.4,
'CK6': 0.02,
'CK8': 0.133,
'CL0': 0.375,
'CL1': 4,
'CL2': 30,
'CL3': 0.6,
'CL5': 0.94,
'CL6': 3.1,
'CL15': 0.032,
'CM1': 0.09,
'CM2': 0.31,
'CM3': 0.00008,
'CM4': 0.84,
'CM6': 0.0025,
'CM7': 0.9,
'CM16': 0.0026,
'CRD1': 0.3,
'CRD2': 0.25,
'CRD4': 0.007,
'CRD5': 0.005,
'CRD6': 0.35,
'CRD7': 0.1,
'CA1': 0.05,
'CA2': 0.85,
'CA3': 5.5,
'CA4': 0.178,
'CA6': 1,
'CA7': 0.6,
'CP1': 285,
'CP4': 0.33,
'CP5': 1.8,
'CP6': 2.42,
'CP7': 1.16,
'CP8': 4.11,
'CP9': 343.5,
'CP10': 0.0164,
'CP15': 0.07,
},
'b_taurus': {
'CN1': 0.0115,
'CN2': 0.27,
'CN3': 0.4,
'CI1': 0.025,
'CI2': 1.7,
'CI8': 62,
'CI9': 1.7,
'CI15': 0.5,
'CI19': 0.416,
'CI20': 1.5,
'CR1': 0.8,
'CR2': 0.17,
'CR3': 1.7,
'CR4': 0.00078,
'CR5': 0.6,
'CR6': 0.00074,
'CR7': 0.5,
'CR12': 0.8,
'CR13': 0.35,
'CK1': 0.5,
'CK2': 0.02,
'CK3': 0.85,
'CK5': 0.4,
'CK6': 0.02,
'CK8': 0.133,
'CL0': 0.375,
'CL1': 4,
'CL2': 30,
'CL3': 0.6,
'CL5': 0.94,
'CL6': 3.1,
'CL15': 0.032,
'CM1': 0.09,
'CM2': 0.36,
'CM3': 0.00008,
'CM4': 0.84,
'CM6': 0.0025,
'CM7': 0.9,
'CM16': 0.0026,
'CRD1': 0.3,
'CRD2': 0.25,
'CRD4': 0.007,
'CRD5': 0.005,
'CRD6': 0.35,
'CRD7': 0.1,
'CA1': 0.05,
'CA2': 0.85,
'CA3': 5.5,
'CA4': 0.178,
'CA6': 1,
'CA7': 0.6,
'CP1': 285,
'CP4': 0.33,
'CP5': 1.8,
'CP6': 2.42,
'CP7': 1.16,
'CP8': 4.11,
'CP9': 343.5,
'CP10': 0.0164,
'CP15': 0.07,
},
'indicus_x_taurus': {
'CN1': 0.0115,
'CN2': 0.27,
'CN3': 0.4,
'CI1': 0.025,
'CI2': 1.7,
'CI8': 62,
'CI9': 1.7,
'CI15': 0.5,
'CI19': 0.416,
'CI20': 1.5,
'CR1': 0.8,
'CR2': 0.17,
'CR3': 1.7,
'CR4': 0.00078,
'CR5': 0.6,
'CR6': 0.00074,
'CR7': 0.5,
'CR12': 0.8,
'CR13': 0.35,
'CK1': 0.5,
'CK2': 0.02,
'CK3': 0.85,
'CK5': 0.4,
'CK6': 0.02,
'CK8': 0.133,
'CL0': 0.375,
'CL1': 4,
'CL2': 30,
'CL3': 0.6,
'CL5': 0.94,
'CL6': 3.1,
'CL15': 0.032,
'CM1': 0.09,
'CM2': 0.335,
'CM3': 0.00008,
'CM4': 0.84,
'CM6': 0.0025,
'CM7': 0.9,
'CM16': 0.0026,
'CRD1': 0.3,
'CRD2': 0.25,
'CRD4': 0.007,
'CRD5': 0.005,
'CRD6': 0.35,
'CRD7': 0.1,
'CA1': 0.05,
'CA2': 0.85,
'CA3': 5.5,
'CA4': 0.178,
'CA6': 1,
'CA7': 0.6,
'CP1': 285,
'CP4': 0.33,
'CP5': 1.8,
'CP6': 2.42,
'CP7': 1.16,
'CP8': 4.11,
'CP9': 343.5,
'CP10': 0.0164,
'CP15': 0.07,
},
'sheep': {
'CN1': 0.0157,
'CN2': 0.27,
'CN3': 0.4,
'CI1': 0.04,
'CI2': 1.7,
'CI8': 28,
'CI9': 1.4,
'CI12': 0.15,
'CI13': 0.02,
'CI14': 0.002,
'CI20': 1.5,
'CR1': 0.8,
'CR2': 0.17,
'CR3': 1.7,
'CR4': 0.00112,
'CR5': 0.6,
'CR6': 0.00112,
'CR7': 0,
'CR12': 0.8,
'CR13': 0.35,
'CK1': 0.5,
'CK2': 0.02,
'CK3': 0.85,
'CK5': 0.4,
'CK6': 0.02,
'CK8': 0.133,
'CL0': 0.486,
'CL1': 2,
'CL2': 22,
'CL3': 1,
'CL5': 0.94,
'CL6': 4.7,
'CL15': 0.045,
'CM1': 0.09,
'CM2': 0.26,
'CM3': 0.00008,
'CM4': 0.84,
'CM6': 0.02,
'CM7': 0.9,
'CM16': 0.0026,
'CRD1': 0.3,
'CRD2': 0.25,
'CRD4': 0.007,
'CRD5': 0.005,
'CRD6': 0.35,
'CRD7': 0.1,
'CA1': 0.05,
'CA2': 0.85,
'CA3': 5.5,
'CA4': 0.178,
'CA6': 1,
'CA7': 0.6,
'CW1': 24,
'CW2': 0.004,
'CW3': 0.7,
'CW5': 0.25,
'CW6': 0.072,
'CW7': 1.35,
'CW8': 0.016,
'CW9': 1,
'CW12': 0.025,
'CP1': 150,
'CP4': 0.33,
'CP5': 1.43,
'CP6': 3.38,
'CP7': 0.91,
'CP8': 4.33,
'CP9': 4.37,
'CP10': 0.965,
'CP15': 0.1,
},
}
# Target nodata is for general rasters that are positive, and _IC_NODATA are
# for rasters that are any range
_TARGET_NODATA = -1.0
_IC_NODATA = float(numpy.finfo('float32').min)
# SV_NODATA is for state variables
_SV_NODATA = -1.0
def execute(args):
"""InVEST Forage Model.
[model description]
Parameters:
args['workspace_dir'] (string): path to target output workspace.
args['results_suffix'] (string): (optional) string to append to any
output file names
args['starting_month'] (int): what month to start reporting where
the range 1..12 is equivalent to Jan..Dec.
args['starting_year'] (int): what year to start runs. this value is
used to notate outputs in the form [month_int]_[year]
args['n_months'] (int): number of months to run model, the model run
will start reporting in `args['starting_month']`.
args['aoi_path'] (string): path to polygon vector indicating the
desired spatial extent of the model. This has the effect of
clipping the computational area of the input datasets to be the
area intersected by this polygon.
args['management_threshold'] (float): biomass in kg/ha required to be
left standing at each model step after offtake by grazing animals
args['proportion_legume_path'] (string): path to raster containing
fraction of pasture that is legume, by weight
args['bulk_density_path'] (string): path to bulk density raster.
args['ph_path'] (string): path to soil pH raster.
args['clay_proportion_path'] (string): path to raster representing
per-pixel proportion of soil component that is clay
args['silt_proportion_path'] (string): path to raster representing
per-pixel proportion of soil component that is silt
args['sand_proportion_path'] (string): path to raster representing
per-pixel proportion of soil component that is sand
args['precip_dir'] (string): path to a directory containing monthly
precipitation rasters. The model requires at least 12 months of
precipitation and expects to find a precipitation file input for
every month of the simulation, so the number of precipitation
files should be the maximum of 12 and `n_months`. The file name of
each precipitation raster must end with the year, followed by an
underscore, followed by the month number. E.g., Precip_2016_1.tif
for January of 2016.
args['min_temp_dir'] (string): path to a directory containing monthly
minimum temperature rasters. The model requires one minimum
temperature raster for each month of the year, or each month that
the model is run, whichever is smaller. The file name of each
minimum temperature raster must end with the month number. E.g.,
Min_temperature_1.tif for January.
args['max_temp_dir'] (string): path to a directory containing monthly
maximum temperature rasters. The model requires one maximum
temperature raster for each month of the year, or each month that
the model is run, whichever is smaller. The file name of each
maximum temperature raster must end with the month number. E.g.,
Max_temperature_1.tif for January.
args['site_param_table'] (string): path to csv file giving site
parameters. This file must contain a column named "site" that
contains unique integers. These integer values correspond to site
type identifiers which are values in the site parameter spatial
index raster. Other required fields for this table are site and
"fixed" parameters from the Century model, i.e., the parameters
in the Century input files site.100 and fix.100.
args['site_param_spatial_index_path'] (string): path to a raster file
that indexes site parameters, indicating which set of site
parameter values should apply at each pixel in the raster. The
raster should be composed of integers that correspond to values in
the field "site" in `site_param_table`.
args['veg_trait_path'] (string): path to csv file giving vegetation
traits for each plant functional type available for grazing. This
file must contain a column named "PFT" that contains unique
integers. These integer values correspond to PFT identifiers of
veg spatial composition rasters. Other required fields for this
table are vegetation input parameters from the Century model, for
example maximum intrinsic growth rate, optimum temperature for
production, minimum C/N ratio, etc.
args['veg_spatial_composition_path_pattern'] (string): path to
vegetation rasters, one per plant functional type available for
grazing, where <PFT> can be replaced with an integer that is
indexed in the veg trait csv.
Example: if this value is given as `./vegetation/pft_<PFT>.tif`
and the directory `./vegetation/` contains these files:
"pft_1.tif"
"pft_12.tif"
"pft_50.tif",
then the "PFT" field in the vegetation trait table must contain
the values 1, 12, and 50.
args['animal_trait_path'] (string): path to csv file giving animal
traits for each animal type - number - duration combination. This
table must contain a column named "animal_id" that contains unique
integers. These integer values correspond to features in the
animal management layer.
Other required fields in this table are:
type (allowable values: b_indicus, b_taurus,
indicus_x_taurus, sheep, camelid, hindgut_fermenter)
sex (allowable values: entire_m, castrate, breeding_female,
NA)
age (days)
weight (kg)
SRW (standard reference weight, kg; the weight of a mature
female in median condition)
SFW (standard fleece weight, kg; the average weight of fleece
of a mature adult; for sheep only)
birth_weight (kg)
grz_months (a string of integers, separated by ','; months of
the simulation when animals are present,
relative to `starting_month`. For example, if `n_months`
is 3, and animals are present during the entire simulation
period, `grz_months` should be "1,2,3")
args['animal_grazing_areas_path'] (string): path to animal vector
inputs giving the location of grazing animals. Must have a field
named "animal_id", containing unique integers that correspond to
the values in the "animal_id" column of the animal trait csv, and
a field named "num_animal" giving the number of animals grazing
inside each polygon feature.
args['initial_conditions_dir'] (string): optional input, path to
directory containing initial conditions. If this directory is not
supplied, a site_initial_table and pft_initial_table must be
supplied. If supplied, this directory must contain a series of
rasters with initial values for each PFT and for the site.
Required rasters for each PFT:
initial variables that are a property of PFT in the table
https://docs.google.com/spreadsheets/d/1TGCDOJS4nNsJpzTWdiWed390NmbhQFB2uUoMs9oTTYo/edit?usp=sharing
e.g., aglivc_<PFT>.tif
Required for the site:
initial variables that are a property of site in the table
https://docs.google.com/spreadsheets/d/1TGCDOJS4nNsJpzTWdiWed390NmbhQFB2uUoMs9oTTYo/edit?usp=sharing
args['site_initial_table'] (string): optional input, path to table
containing initial conditions for each site state variable. If an
initial conditions directory is not supplied, this table must be
supplied. This table must contain a value for each site code and
each state variable listed in the following table:
https://docs.google.com/spreadsheets/d/1TGCDOJS4nNsJpzTWdiWed390NmbhQFB2uUoMs9oTTYo/edit?usp=sharing
args['pft_initial_table'] (string): optional input, path to table
containing initial conditions for each plant functional type state
variable. If an initial conditions directory is not supplied, this
table must be supplied. This table must contain a value for each
plant functional type index and each state variable listed in the
following table:
https://docs.google.com/spreadsheets/d/1TGCDOJS4nNsJpzTWdiWed390NmbhQFB2uUoMs9oTTYo/edit?usp=sharing
args['save_sv_rasters'] (boolean): optional input, default false.
Should rasters containing all state variables be saved for each
model time step?
args['animal_density'] (string): optional input, density of grazing
animals in animals per hectare.
args['crude_protein'] (float): optional input, crude protein
concentration of forage for the purposes of animal diet selection.
Should be a value between 0-1. If included, this value is
substituted for N content of forage when calculating digestibility
and "ingestibility" of forage, and protein content of the diet, for
grazing animals.
Returns:
None.
"""
LOGGER.info("model execute: %s", args)
starting_month = int(args['starting_month'])
starting_year = int(args['starting_year'])
n_months = int(args['n_months'])
try:
delete_sv_folders = not args['save_sv_rasters']
except KeyError:
delete_sv_folders = True
try:
global CRUDE_PROTEIN
CRUDE_PROTEIN = args['crude_protein']
except KeyError:
pass
try:
animal_density_path = args['animal_density']
except KeyError:
args['animal_density'] = None
# this set will build up the integer months that are used so we can index
# them with temperature later
temperature_month_set = set()
# this dict will be used to build the set of input rasters associated with
# a reasonable lookup ID so we can have a nice dataset to align for raster
# stack operations
base_align_raster_path_id_map = {}
precip_dir_list = [
os.path.join(args['precip_dir'], f) for f in
os.listdir(args['precip_dir'])]
for month_index in range(n_months):
month_i = (starting_month + month_index - 1) % 12 + 1
temperature_month_set.add(month_i)
year = starting_year + (starting_month + month_index - 1) // 12
year_month_match = re.compile(
r'.*[^\d]%d_%d\.[^.]+$' % (year, month_i))
file_list = [
month_file_path for month_file_path in precip_dir_list if
year_month_match.match(month_file_path)]
if len(file_list) == 0:
raise ValueError(
"No precipitation data found for year %d, month %d" %
(year, month_i))
if len(file_list) > 1:
raise ValueError(
"Ambiguous set of files found for year %d, month %d: %s" %
(year, month_i, file_list))
base_align_raster_path_id_map[
'precip_{}'.format(month_index)] = file_list[0]
# the model requires 12 months of precipitation data to calculate
# atmospheric N deposition and potential production from annual precip
n_precip_months = int(args['n_months'])
if n_precip_months < 12:
m_index = int(args['n_months'])
while n_precip_months < 12:
month_i = (starting_month + m_index - 1) % 12 + 1
year = starting_year + (starting_month + m_index - 1) // 12
year_month_match = re.compile(
r'.*[^\d]%d_%d\.[^.]+$' % (year, month_i))
file_list = [
month_file_path for month_file_path in precip_dir_list if
year_month_match.match(month_file_path)]
if len(file_list) == 0:
break
if len(file_list) > 1:
raise ValueError(
"Ambiguous set of files found for year %d, month %d: %s" %
(year, month_i, file_list))
base_align_raster_path_id_map[
'precip_%d' % m_index] = file_list[0]
n_precip_months = n_precip_months + 1
m_index = m_index + 1
if n_precip_months < 12:
raise ValueError("At least 12 months of precipitation data required")
# collect monthly temperature data
min_temp_dir_list = [
os.path.join(args['min_temp_dir'], f) for f in
os.listdir(args['min_temp_dir'])]
for month_i in temperature_month_set:
month_file_match = re.compile(r'.*[^\d]%d\.[^.]+$' % month_i)
file_list = [
month_file_path for month_file_path in min_temp_dir_list if
month_file_match.match(month_file_path)]
if len(file_list) == 0:
raise ValueError(
"No minimum temperature data found for month %d" % month_i)
if len(file_list) > 1:
raise ValueError(
"Ambiguous set of files found for month %d: %s" %
(month_i, file_list))
base_align_raster_path_id_map[
'min_temp_%d' % month_i] = file_list[0]
max_temp_dir_list = [
os.path.join(args['max_temp_dir'], f) for f in
os.listdir(args['max_temp_dir'])]
for month_i in temperature_month_set:
month_file_match = re.compile(r'.*[^\d]%d\.[^.]+$' % month_i)
file_list = [
month_file_path for month_file_path in max_temp_dir_list if
month_file_match.match(month_file_path)]
if len(file_list) == 0:
raise ValueError(
"No maximum temperature data found for month %d" % month_i)
if len(file_list) > 1:
raise ValueError(
"Ambiguous set of files found for month %d: %s" %
(month_i, file_list))
base_align_raster_path_id_map[
'max_temp_%d' % month_i] = file_list[0]
# lookup to provide path to soil percent given soil type
for soil_type in SOIL_TYPE_LIST:
base_align_raster_path_id_map[soil_type] = (
args['%s_proportion_path' % soil_type])
if not os.path.exists(base_align_raster_path_id_map[soil_type]):
raise ValueError(
"Couldn't find %s for %s" % (
base_align_raster_path_id_map[soil_type], soil_type))
base_align_raster_path_id_map['bulk_d_path'] = args['bulk_density_path']
base_align_raster_path_id_map['ph_path'] = args['ph_path']
# make sure site initial conditions and parameters exist for each site
# identifier
base_align_raster_path_id_map['site_index'] = (
args['site_param_spatial_index_path'])
n_bands = pygeoprocessing.get_raster_info(
args['site_param_spatial_index_path'])['n_bands']
if n_bands > 1:
raise ValueError(
'Site spatial index raster must contain only one band')
site_datatype = pygeoprocessing.get_raster_info(
args['site_param_spatial_index_path'])['datatype']
if site_datatype not in [1, 2, 3, 4, 5]:
raise ValueError('Site spatial index raster must be integer type')
# get unique values in site param raster
site_index_set = set()
for offset_map, raster_block in pygeoprocessing.iterblocks(
(args['site_param_spatial_index_path'], 1)):
site_index_set.update(numpy.unique(raster_block))
site_nodata = pygeoprocessing.get_raster_info(
args['site_param_spatial_index_path'])['nodata'][0]
if site_nodata in site_index_set:
site_index_set.remove(site_nodata)
site_param_table = utils.build_lookup_from_csv(
args['site_param_table'], 'site')
missing_site_index_list = list(
site_index_set.difference(site_param_table.keys()))
if missing_site_index_list:
raise ValueError(
"Couldn't find parameter values for the following site " +
"indices: %s\n\t" + ", ".join(missing_site_index_list))
# make sure plant functional type parameters exist for each pft raster
pft_dir = os.path.dirname(args['veg_spatial_composition_path_pattern'])
pft_basename = os.path.basename(
args['veg_spatial_composition_path_pattern'])
files = [
f for f in os.listdir(pft_dir) if os.path.isfile(
os.path.join(pft_dir, f))]
pft_regex = re.compile(pft_basename.replace('<PFT>', r'(\d+)'))
pft_matches = [
m for m in [pft_regex.search(f) for f in files] if m is not None]
pft_id_set = set([int(m.group(1)) for m in pft_matches])
for pft_i in pft_id_set:
pft_path = args['veg_spatial_composition_path_pattern'].replace(
'<PFT>', '%d' % pft_i)
base_align_raster_path_id_map['pft_%d' % pft_i] = pft_path
veg_trait_table = utils.build_lookup_from_csv(
args['veg_trait_path'], 'PFT')
missing_pft_trait_list = pft_id_set.difference(veg_trait_table.keys())
if missing_pft_trait_list:
raise ValueError(
"Couldn't find trait values for the following plant functional " +
"types: %s\n\t" + ", ".join(missing_pft_trait_list))
frtcindx_set = set([
pft_i['frtcindx'] for pft_i in veg_trait_table.values()])
if frtcindx_set.difference(set([0, 1])):
raise ValueError("frtcindx parameter contains invalid values")
base_align_raster_path_id_map['proportion_legume_path'] = args[
'proportion_legume_path']
# track separate state variable files for each PFT
pft_sv_dict = {}
for pft_i in pft_id_set:
for sv in _PFT_STATE_VARIABLES:
pft_sv_dict['{}_{}_path'.format(
sv, pft_i)] = '{}_{}.tif'.format(sv, pft_i)
# make sure animal traits exist for each feature in animal management
# layer
anim_id_list = []
driver = ogr.GetDriverByName('ESRI Shapefile')
datasource = driver.Open(args['animal_grazing_areas_path'], 0)
layer = datasource.GetLayer()
for feature in layer:
anim_id_list.append(feature.GetField('animal_id'))
input_animal_trait_table = utils.build_lookup_from_csv(
args['animal_trait_path'], 'animal_id')
missing_animal_trait_list = set(
anim_id_list).difference(input_animal_trait_table.keys())
if missing_animal_trait_list:
raise ValueError(
"Couldn't find trait values for the following animal " +
"ids: %s\n\t" + ", ".join(missing_animal_trait_list))
# if animal density is supplied, align inputs to match its resolution
# otherwise, match resolution of precipitation rasters
if args['animal_density']:
target_pixel_size = pygeoprocessing.get_raster_info(
args['animal_density'])['pixel_size']
base_align_raster_path_id_map['animal_density'] = args[
'animal_density']
else:
target_pixel_size = pygeoprocessing.get_raster_info(
base_align_raster_path_id_map['precip_0'])['pixel_size']
LOGGER.info(
"pixel size of aligned inputs: %s", target_pixel_size)
# temporary directory for intermediate files
global PROCESSING_DIR
PROCESSING_DIR = os.path.join(args['workspace_dir'], "temporary_files")
if not os.path.exists(PROCESSING_DIR):
os.makedirs(PROCESSING_DIR)
# set up a dictionary that uses the same keys as
# 'base_align_raster_path_id_map' to point to the clipped/resampled
# rasters to be used in raster calculations for the model.
aligned_raster_dir = os.path.join(
args['workspace_dir'], 'aligned_inputs')
if os.path.exists(aligned_raster_dir):
shutil.rmtree(aligned_raster_dir)
os.makedirs(aligned_raster_dir)
aligned_inputs = dict([(key, os.path.join(
aligned_raster_dir, 'aligned_%s' % os.path.basename(path)))
for key, path in base_align_raster_path_id_map.items()])
# align all the base inputs to be the minimum known pixel size and to
# only extend over their combined intersections
source_input_path_list = [
base_align_raster_path_id_map[k] for k in sorted(
base_align_raster_path_id_map.keys())]
aligned_input_path_list = [
aligned_inputs[k] for k in sorted(aligned_inputs.keys())]
pygeoprocessing.align_and_resize_raster_stack(
source_input_path_list, aligned_input_path_list,
['near'] * len(source_input_path_list),
target_pixel_size, 'intersection',
base_vector_path_list=[args['aoi_path']],
vector_mask_options={'mask_vector_path': args['aoi_path']})
_check_pft_fractional_cover_sum(aligned_inputs, pft_id_set)
file_suffix = utils.make_suffix_string(args, 'results_suffix')
# create animal trait spatial index raster from management polygon
aligned_inputs['animal_index'] = os.path.join(
aligned_raster_dir, 'animal_spatial_index.tif')
pygeoprocessing.new_raster_from_base(
aligned_inputs['site_index'], aligned_inputs['animal_index'],
gdal.GDT_Int32, [_TARGET_NODATA], fill_value_list=[_TARGET_NODATA])
pygeoprocessing.rasterize(
args['animal_grazing_areas_path'], aligned_inputs['animal_index'],
option_list=["ATTRIBUTE=animal_id"])
# create uniform animal density raster, if not supplied as input
if not args['animal_density']:
aligned_inputs['animal_density'] = os.path.join(
aligned_raster_dir, 'animal_density.tif')
_animal_density(aligned_inputs, args['animal_grazing_areas_path'])
# Initialization
sv_dir = os.path.join(args['workspace_dir'], 'state_variables_m-1')
os.makedirs(sv_dir)
initial_conditions_dir = None
try:
initial_conditions_dir = args['initial_conditions_dir']
except KeyError:
pass
if initial_conditions_dir:
# check that a raster for each required state variable is supplied
missing_initial_values = []
# set _SV_NODATA from initial rasters
state_var_nodata = set([])
# align initial state variables to resampled inputs
resample_initial_path_map = {}
for sv in _SITE_STATE_VARIABLE_FILES:
sv_path = os.path.join(
initial_conditions_dir, _SITE_STATE_VARIABLE_FILES[sv])
state_var_nodata.update(
set([pygeoprocessing.get_raster_info(sv_path)['nodata'][0]]))
resample_initial_path_map[sv] = sv_path
if not os.path.exists(sv_path):
missing_initial_values.append(sv_path)
for pft_i in pft_id_set:
for sv in _PFT_STATE_VARIABLES:
sv_key = '{}_{}_path'.format(sv, pft_i)
sv_path = os.path.join(
initial_conditions_dir, '{}_{}.tif'.format(sv, pft_i))
state_var_nodata.update(
set([pygeoprocessing.get_raster_info(sv_path)['nodata']
[0]]))
resample_initial_path_map[sv_key] = sv_path
if not os.path.exists(sv_path):
missing_initial_values.append(sv_path)
if missing_initial_values:
raise ValueError(
"Couldn't find the following required initial values: " +
"\n\t".join(missing_initial_values))
if len(state_var_nodata) > 1:
raise ValueError(
"Initial state variable rasters contain >1 nodata value")
global _SV_NODATA
_SV_NODATA = list(state_var_nodata)[0]
# align initial values with inputs
initial_path_list = (
[aligned_inputs['precip_0']] +
[resample_initial_path_map[key] for key in sorted(
resample_initial_path_map.keys())])
aligned_initial_path_list = (
[os.path.join(PROCESSING_DIR, 'aligned_input_template.tif')] +
[os.path.join(
sv_dir, os.path.basename(resample_initial_path_map[key])) for
key in sorted(resample_initial_path_map.keys())])
pygeoprocessing.align_and_resize_raster_stack(
initial_path_list, aligned_initial_path_list,
['near'] * len(initial_path_list),
target_pixel_size, 'intersection',
base_vector_path_list=[args['aoi_path']], raster_align_index=0,
vector_mask_options={'mask_vector_path': args['aoi_path']})
sv_reg = dict(
[(key, os.path.join(sv_dir, os.path.basename(path)))
for key, path in resample_initial_path_map.items()])
else:
# create initialization rasters from tables
try:
site_initial_conditions_table = utils.build_lookup_from_csv(
args['site_initial_table'], 'site')
except KeyError:
raise ValueError(
"If initial conditions rasters are not supplied, initial " +
"conditions tables must be supplied")
missing_site_index_list = list(
site_index_set.difference(site_initial_conditions_table.keys()))
if missing_site_index_list:
raise ValueError(
"Couldn't find initial conditions values for the following " +
"site indices: %s\n\t" + ", ".join(missing_site_index_list))
try:
pft_initial_conditions_table = utils.build_lookup_from_csv(
args['pft_initial_table'], 'PFT')
except KeyError:
raise ValueError(
"If initial conditions rasters are not supplied, initial " +
"conditions tables must be supplied")
missing_pft_index_list = pft_id_set.difference(
pft_initial_conditions_table.keys())
if missing_pft_index_list:
raise ValueError(
"Couldn't find initial condition values for the following "
"plant functional types: %s\n\t" + ", ".join(
missing_pft_index_list))
sv_reg = initial_conditions_from_tables(
aligned_inputs, sv_dir, pft_id_set, site_initial_conditions_table,
pft_initial_conditions_table)
# calculate persistent intermediate parameters that do not change during
# the simulation
persist_param_dir = os.path.join(
args['workspace_dir'], 'intermediate_parameters')
utils.make_directories([persist_param_dir])
pp_reg = utils.build_file_registry(
[(_PERSISTENT_PARAMS_FILES, persist_param_dir)], file_suffix)
# calculate derived animal traits that do not change during the simulation
freer_parameter_df = pandas.DataFrame.from_dict(
_FREER_PARAM_DICT, orient='index')
freer_parameter_df['type'] = freer_parameter_df.index
animal_trait_table = calc_derived_animal_traits(
input_animal_trait_table, freer_parameter_df)
# calculate maximum potential intake of each animal type
for animal_id in animal_trait_table.keys():
revised_animal_trait_dict = calc_max_intake(
animal_trait_table[animal_id])
animal_trait_table[animal_id] = revised_animal_trait_dict
# calculate field capacity and wilting point
LOGGER.info("Calculating field capacity and wilting point")
_afiel_awilt(
aligned_inputs['site_index'], site_param_table,
sv_reg['som1c_2_path'], sv_reg['som2c_2_path'], sv_reg['som3c_path'],
aligned_inputs['sand'], aligned_inputs['silt'],
aligned_inputs['clay'], aligned_inputs['bulk_d_path'], pp_reg)
# calculate other persistent parameters
LOGGER.info("Calculating persistent parameters")
_persistent_params(
aligned_inputs['site_index'], site_param_table,
aligned_inputs['sand'], aligned_inputs['clay'], pp_reg)
# calculate required ratios for decomposition of structural material
LOGGER.info("Calculating required ratios for structural decomposition")
_structural_ratios(
aligned_inputs['site_index'], site_param_table, sv_reg, pp_reg)
# make yearly directory for values that are updated every twelve months
year_dir = tempfile.mkdtemp(dir=PROCESSING_DIR)
year_reg = dict(
[(key, os.path.join(year_dir, path)) for key, path in
_YEARLY_FILES.items()])
for pft_i in pft_id_set:
for file in _YEARLY_PFT_FILES:
year_reg['{}_{}'.format(file, pft_i)] = os.path.join(
year_dir, '{}_{}.tif'.format(file, pft_i))
# make monthly directory for monthly intermediate parameters that are
# shared between submodels, but do not need to be saved as output
month_temp_dir = tempfile.mkdtemp(dir=PROCESSING_DIR)
month_reg = {}
for pft_i in pft_id_set:
for val in _PFT_INTERMEDIATE_VALUES:
month_reg['{}_{}'.format(
val, pft_i)] = os.path.join(
month_temp_dir, '{}_{}.tif'.format(val, pft_i))
for val in _SITE_INTERMEDIATE_VALUES:
month_reg[val] = os.path.join(month_temp_dir, '{}.tif'.format(val))
output_dir = os.path.join(args['workspace_dir'], "output")
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# provisional state variable registry contains provisional biomass in
# absence of grazing
provisional_sv_dir = tempfile.mkdtemp(dir=PROCESSING_DIR)
provisional_sv_reg = utils.build_file_registry(
[(_SITE_STATE_VARIABLE_FILES, provisional_sv_dir),
(pft_sv_dict, provisional_sv_dir)], file_suffix)
intermediate_sv_dir = tempfile.mkdtemp(dir=PROCESSING_DIR)
# Main simulation loop
# for each step in the simulation
for month_index in range(n_months):
if (month_index % 12) == 0:
# Update yearly quantities
_yearly_tasks(
aligned_inputs, site_param_table, veg_trait_table, month_index,
pft_id_set, year_reg)
current_month = (starting_month + month_index - 1) % 12 + 1
current_year = starting_year + (starting_month + month_index - 1) // 12
# track state variables from previous step
prev_sv_reg = sv_reg
for animal_id in animal_trait_table.keys():
if animal_trait_table[animal_id]['sex'] == 'breeding_female':
revised_animal_trait_dict = update_breeding_female_status(
animal_trait_table[animal_id], month_index)
animal_trait_table[animal_id] = revised_animal_trait_dict
revised_animal_trait_dict = calc_max_intake(
animal_trait_table[animal_id])
animal_trait_table[animal_id] = revised_animal_trait_dict
# enforce absence of grazing as zero biomass removed
for pft_i in pft_id_set:
pygeoprocessing.new_raster_from_base(
aligned_inputs['pft_{}'.format(pft_i)],
month_reg['flgrem_{}'.format(pft_i)], gdal.GDT_Float32,
[_TARGET_NODATA], fill_value_list=[0])
pygeoprocessing.new_raster_from_base(
aligned_inputs['pft_{}'.format(pft_i)],
month_reg['fdgrem_{}'.format(pft_i)], gdal.GDT_Float32,
[_TARGET_NODATA], fill_value_list=[0])
# populate provisional_sv_reg with provisional biomass in absence of
# grazing
_potential_production(
aligned_inputs, site_param_table, current_month, month_index,
pft_id_set, veg_trait_table, prev_sv_reg, pp_reg, month_reg)
_root_shoot_ratio(
aligned_inputs, site_param_table, current_month, pft_id_set,
veg_trait_table, prev_sv_reg, year_reg, month_reg)
_soil_water(
aligned_inputs, site_param_table, veg_trait_table, current_month,
month_index, prev_sv_reg, pp_reg, pft_id_set, month_reg,
provisional_sv_reg)
_decomposition(
aligned_inputs, current_month, month_index, pft_id_set,
site_param_table, year_reg, month_reg, prev_sv_reg, pp_reg,
provisional_sv_reg)
_death_and_partition(
'stded', aligned_inputs, site_param_table, current_month,
year_reg, pft_id_set, veg_trait_table, prev_sv_reg,
provisional_sv_reg)
_death_and_partition(
'bgliv', aligned_inputs, site_param_table, current_month,
year_reg, pft_id_set, veg_trait_table, prev_sv_reg,
provisional_sv_reg)
_shoot_senescence(
pft_id_set, veg_trait_table, prev_sv_reg, month_reg, current_month,
provisional_sv_reg)
intermediate_sv_reg = copy_intermediate_sv(
pft_id_set, provisional_sv_reg, intermediate_sv_dir)
delta_agliv_dict = _new_growth(
pft_id_set, aligned_inputs, site_param_table, veg_trait_table,
month_reg, current_month, provisional_sv_reg)
_apply_new_growth(delta_agliv_dict, pft_id_set, provisional_sv_reg)
# estimate grazing offtake by animals relative to provisional biomass
# at an intermediate step, after senescence but before new growth
_calc_grazing_offtake(
aligned_inputs, args['aoi_path'], args['management_threshold'],
intermediate_sv_reg, pft_id_set, aligned_inputs['animal_index'],
animal_trait_table, veg_trait_table, current_month, month_reg)
# estimate actual biomass production for this step, integrating impacts
# of grazing
sv_dir = os.path.join(
args['workspace_dir'], 'state_variables_m%d' % month_index)
utils.make_directories([sv_dir])
sv_reg = utils.build_file_registry(
[(_SITE_STATE_VARIABLE_FILES, sv_dir),
(pft_sv_dict, sv_dir)], file_suffix)
_potential_production(
aligned_inputs, site_param_table, current_month, month_index,
pft_id_set, veg_trait_table, prev_sv_reg, pp_reg, month_reg)
_root_shoot_ratio(
aligned_inputs, site_param_table, current_month, pft_id_set,
veg_trait_table, prev_sv_reg, year_reg, month_reg)
_soil_water(
aligned_inputs, site_param_table, veg_trait_table, current_month,
month_index, prev_sv_reg, pp_reg, pft_id_set, month_reg, sv_reg)
_decomposition(
aligned_inputs, current_month, month_index, pft_id_set,
site_param_table, year_reg, month_reg, prev_sv_reg, pp_reg, sv_reg)
_death_and_partition(
'stded', aligned_inputs, site_param_table, current_month,
year_reg, pft_id_set, veg_trait_table, prev_sv_reg, sv_reg)
_death_and_partition(
'bgliv', aligned_inputs, site_param_table, current_month,
year_reg, pft_id_set, veg_trait_table, prev_sv_reg, sv_reg)
_shoot_senescence(
pft_id_set, veg_trait_table, prev_sv_reg, month_reg, current_month,
sv_reg)
delta_agliv_dict = _new_growth(
pft_id_set, aligned_inputs, site_param_table, veg_trait_table,
month_reg, current_month, sv_reg)
_animal_diet_sufficiency(
sv_reg, pft_id_set, aligned_inputs, animal_trait_table,
veg_trait_table, current_month, month_reg)
_grazing(
aligned_inputs, site_param_table, month_reg, animal_trait_table,
pft_id_set, sv_reg)
_apply_new_growth(delta_agliv_dict, pft_id_set, sv_reg)
_leach(aligned_inputs, site_param_table, month_reg, sv_reg)
_write_monthly_outputs(
aligned_inputs, provisional_sv_reg, sv_reg, month_reg, pft_id_set,
current_year, current_month, output_dir, file_suffix)
# summary results
summary_output_dir = os.path.join(output_dir, 'summary_results')
os.makedirs(summary_output_dir)
summary_shp_path = os.path.join(
summary_output_dir,
'grazing_areas_results_rpm{}.shp'.format(file_suffix))
create_vector_copy(
args['animal_grazing_areas_path'], summary_shp_path)
field_pickle_map, field_header_order_list = aggregate_and_pickle_results(
output_dir, summary_shp_path)
_add_fields_to_shapefile(
field_pickle_map, field_header_order_list, summary_shp_path)
# clean up
shutil.rmtree(persist_param_dir)
shutil.rmtree(PROCESSING_DIR)
if delete_sv_folders:
for month_index in range(-1, n_months):
shutil.rmtree(
os.path.join(
args['workspace_dir'],
'state_variables_m%d' % month_index))
def raster_multiplication(
raster1, raster1_nodata, raster2, raster2_nodata, target_path,
target_path_nodata):
"""Multiply raster1 by raster2.
Multiply raster1 by raster2 element-wise. In any pixel where raster1 or
raster2 is nodata, the result is nodata. The result is always of float
datatype.
Side effects:
modifies or creates the raster indicated by `target_path`
Returns:
None
"""
def raster_multiply_op(raster1, raster2):
"""Multiply two rasters."""
valid_mask = (
(~numpy.isclose(raster1, raster1_nodata)) &
(~numpy.isclose(raster2, raster2_nodata)))
result = numpy.empty(raster1.shape, dtype=numpy.float32)
result[:] = target_path_nodata
result[valid_mask] = raster1[valid_mask] * raster2[valid_mask]
return result
pygeoprocessing.raster_calculator(
[(path, 1) for path in [raster1, raster2]],
raster_multiply_op, target_path, gdal.GDT_Float32,
target_path_nodata)
def raster_division(
raster1, raster1_nodata, raster2, raster2_nodata, target_path,
target_path_nodata):
"""Divide raster1 by raster2.
Divide raster1 by raster2 element-wise. In any pixel where raster1 or
raster2 is nodata, the result is nodata. The result is always of float
datatype.
Side effects:
modifies or creates the raster indicated by `target_path`
Returns:
None
"""
def raster_divide_op(raster1, raster2):
"""Divide raster1 by raster2."""
valid_mask = (
(~numpy.isclose(raster1, raster1_nodata)) &
(~numpy.isclose(raster2, raster2_nodata)))
raster1 = raster1.astype(numpy.float32)
raster2 = raster2.astype(numpy.float32)
result = numpy.empty(raster1.shape, dtype=numpy.float32)
result[:] = target_path_nodata
error_mask = ((raster1 != 0) & (raster2 == 0.) & valid_mask)
zero_mask = ((raster1 == 0.) & (raster2 == 0.) & valid_mask)
nonzero_mask = ((raster2 != 0.) & valid_mask)
result[error_mask] = target_path_nodata
result[zero_mask] = 0.
result[nonzero_mask] = raster1[nonzero_mask] / raster2[nonzero_mask]
return result
pygeoprocessing.raster_calculator(
[(path, 1) for path in [raster1, raster2]],
raster_divide_op, target_path, gdal.GDT_Float32,
target_path_nodata)
def raster_list_sum(
raster_list, input_nodata, target_path, target_nodata,
nodata_remove=False):
"""Calculate the sum per pixel across rasters in a list.
Sum the rasters in `raster_list` element-wise, allowing nodata values
in the rasters to propagate to the result or treating nodata as zero. If
nodata is treated as zero, areas where all inputs are nodata will be nodata
in the output.
Parameters:
raster_list (list): list of paths to rasters to sum
input_nodata (float or int): nodata value in the input rasters
target_path (string): path to location to store the result
target_nodata (float or int): nodata value for the result raster
nodata_remove (bool): if true, treat nodata values in input
rasters as zero. If false, the sum in a pixel where any input
raster is nodata is nodata.
Side effects:
modifies or creates the raster indicated by `target_path`
Returns:
None
"""
def raster_sum_op(*raster_list):
"""Add the rasters in raster_list without removing nodata values."""
invalid_mask = numpy.any(
numpy.isclose(numpy.array(raster_list), input_nodata), axis=0)
for r in raster_list:
numpy.place(r, numpy.isclose(r, input_nodata), [0])
sum_of_rasters = numpy.sum(raster_list, axis=0)
sum_of_rasters[invalid_mask] = target_nodata
return sum_of_rasters
def raster_sum_op_nodata_remove(*raster_list):
"""Add the rasters in raster_list, treating nodata as zero."""
invalid_mask = numpy.all(
numpy.isclose(numpy.array(raster_list), input_nodata), axis=0)
for r in raster_list:
numpy.place(r, numpy.isclose(r, input_nodata), [0])
sum_of_rasters = numpy.sum(raster_list, axis=0)
sum_of_rasters[invalid_mask] = target_nodata
return sum_of_rasters
if nodata_remove:
pygeoprocessing.raster_calculator(
[(path, 1) for path in raster_list], raster_sum_op_nodata_remove,
target_path, gdal.GDT_Float32, target_nodata)
else:
pygeoprocessing.raster_calculator(
[(path, 1) for path in raster_list], raster_sum_op,
target_path, gdal.GDT_Float32, target_nodata)
def raster_sum(
raster1, raster1_nodata, raster2, raster2_nodata, target_path,
target_nodata, nodata_remove=False):
"""Add raster 1 and raster2.
Add raster1 and raster2, allowing nodata values in the rasters to
propagate to the result or treating nodata as zero.
Parameters:
raster1 (string): path to one raster operand
raster1_nodata (float or int): nodata value in raster1
raster2 (string): path to second raster operand
raster2_nodata (float or int): nodata value in raster2
target_path (string): path to location to store the sum
target_nodata (float or int): nodata value for the result raster
nodata_remove (bool): if true, treat nodata values in input
rasters as zero. If false, the sum in a pixel where any
input raster is nodata is nodata.
Side effects:
modifies or creates the raster indicated by `target_path`
Returns:
None
"""
def raster_sum_op(raster1, raster2):
"""Add raster1 and raster2 without removing nodata values."""
valid_mask = (
(~numpy.isclose(raster1, raster1_nodata)) &
(~numpy.isclose(raster2, raster2_nodata)))
result = numpy.empty(raster1.shape, dtype=numpy.float32)
result[:] = target_nodata
result[valid_mask] = raster1[valid_mask] + raster2[valid_mask]
return result
def raster_sum_op_nodata_remove(raster1, raster2):
"""Add raster1 and raster2, treating nodata as zero."""
numpy.place(raster1, numpy.isclose(raster1, raster1_nodata), [0])
numpy.place(raster2, numpy.isclose(raster2, raster2_nodata), [0])
result = raster1 + raster2
return result
if nodata_remove:
pygeoprocessing.raster_calculator(
[(path, 1) for path in [raster1, raster2]],
raster_sum_op_nodata_remove, target_path, gdal.GDT_Float32,
target_nodata)
else:
pygeoprocessing.raster_calculator(
[(path, 1) for path in [raster1, raster2]],
raster_sum_op, target_path, gdal.GDT_Float32,
target_nodata)
def raster_difference(
raster1, raster1_nodata, raster2, raster2_nodata, target_path,
target_nodata, nodata_remove=False):
"""Subtract raster2 from raster1.
Subtract raster2 from raster1 element-wise, allowing nodata values in the
rasters to propagate to the result or treating nodata as zero.
Parameters:
raster1 (string): path to raster from which to subtract raster2
raster1_nodata (float or int): nodata value in raster1
raster2 (string): path to raster which should be subtracted from
raster1
raster2_nodata (float or int): nodata value in raster2
target_path (string): path to location to store the difference
target_nodata (float or int): nodata value for the result raster
nodata_remove (bool): if true, treat nodata values in input
rasters as zero. If false, the difference in a pixel where any
input raster is nodata is nodata.
Side effects:
modifies or creates the raster indicated by `target_path`
Returns:
None
"""
def raster_difference_op(raster1, raster2):
"""Subtract raster2 from raster1 without removing nodata values."""
valid_mask = (
(~numpy.isclose(raster1, raster1_nodata)) &
(~numpy.isclose(raster2, raster2_nodata)))
result = numpy.empty(raster1.shape, dtype=numpy.float32)
result[:] = target_nodata
result[valid_mask] = raster1[valid_mask] - raster2[valid_mask]
return result
def raster_difference_op_nodata_remove(raster1, raster2):
"""Subtract raster2 from raster1, treating nodata as zero."""
numpy.place(raster1, numpy.isclose(raster1, raster1_nodata), [0])
numpy.place(raster2, numpy.isclose(raster2, raster2_nodata), [0])
result = raster1 - raster2
return result
if nodata_remove:
pygeoprocessing.raster_calculator(
[(path, 1) for path in [raster1, raster2]],
raster_difference_op_nodata_remove, target_path, gdal.GDT_Float32,
target_nodata)
else:
pygeoprocessing.raster_calculator(
[(path, 1) for path in [raster1, raster2]],
raster_difference_op, target_path, gdal.GDT_Float32,
target_nodata)
def reclassify_nodata(target_path, new_nodata_value):
"""Reclassify the nodata value of a raster to a new value.
Convert all areas of nodata in the target raster to the new nodata
value, which must be an integer.
Parameters:
target_path (string): path to target raster
new_nodata_value (integer): new value to set as nodata
Side effects:
modifies the raster indicated by `target_path`
Returns:
None
"""
def reclassify_op(target_raster):
reclassified_raster = numpy.copy(target_raster)
reclassify_mask = (target_raster == previous_nodata_value)
reclassified_raster[reclassify_mask] = new_nodata_value
return reclassified_raster
fd, temp_path = tempfile.mkstemp(dir=PROCESSING_DIR)
shutil.copyfile(target_path, temp_path)
previous_nodata_value = pygeoprocessing.get_raster_info(
target_path)['nodata'][0]
pygeoprocessing.raster_calculator(
[(temp_path, 1)], reclassify_op, target_path, gdal.GDT_Float32,
new_nodata_value)
# clean up
os.close(fd)
os.remove(temp_path)
def weighted_state_variable_sum(
sv, sv_reg, aligned_inputs, pft_id_set, weighted_sum_path):
"""Calculate weighted sum of state variable across plant functional types.
To sum a state variable across PFTs within a grid cell, the state variable
must be weighted by the fractional cover of each PFT inside the grid cell.
First multiply the state variable by its fractional cover, and then add up
the weighted products.
Parameters:
sv (string): state variable to be summed across plant functional types
sv_reg (dict): map of key, path pairs giving paths to state variables,
including sv, the state variable to be summed
aligned_inputs (dict): map of key, path pairs indicating paths
to aligned model inputs, including fractional cover of each plant
functional type
pft_id_set (set): set of integers identifying plant functional types
weighted_sum_path (string): path to raster that should contain the
weighted sum across PFTs
Side effects:
modifies or creates the raster indicated by `weighted_sum_path`
Returns:
None
"""
temp_dir = tempfile.mkdtemp(dir=PROCESSING_DIR)
temp_val_dict = {}
for pft_i in pft_id_set:
val = '{}_weighted'.format(sv)
temp_val_dict['{}_{}'.format(val, pft_i)] = os.path.join(
temp_dir, '{}_{}.tif'.format(val, pft_i))
weighted_path_list = []
for pft_i in pft_id_set:
target_path = temp_val_dict['{}_weighted_{}'.format(sv, pft_i)]
pft_nodata = pygeoprocessing.get_raster_info(
aligned_inputs['pft_{}'.format(pft_i)])['nodata'][0]
raster_multiplication(
sv_reg['{}_{}_path'.format(sv, pft_i)], _SV_NODATA,
aligned_inputs['pft_{}'.format(pft_i)], pft_nodata,
target_path, _TARGET_NODATA)
weighted_path_list.append(target_path)
raster_list_sum(
weighted_path_list, _TARGET_NODATA, weighted_sum_path, _TARGET_NODATA,
nodata_remove=True)
# clean up temporary files
shutil.rmtree(temp_dir)
def _check_pft_fractional_cover_sum(aligned_inputs, pft_id_set):
"""Check the sum of fractional cover across plant functional types.
Parameters:
aligned_inputs (dict): map of key, path pairs indicating paths
to aligned model inputs, including fractional cover of each plant
functional type
pft_id_set (set): set of integers identifying plant functional types
Raises:
ValueError if the pixel-wise sum of fractional cover values across
plant functional types exceeds 1
Returns:
None
"""
with tempfile.NamedTemporaryFile(
prefix='cover_sum', dir=PROCESSING_DIR) as cover_sum_temp_file:
cover_sum_path = cover_sum_temp_file.name
with tempfile.NamedTemporaryFile(
prefix='operand_temp', dir=PROCESSING_DIR) as operand_temp_file:
operand_temp_path = operand_temp_file.name
# initialize sum to zero
pygeoprocessing.new_raster_from_base(
aligned_inputs['site_index'], cover_sum_path, gdal.GDT_Float32,
[_TARGET_NODATA], fill_value_list=[0])
for pft_i in pft_id_set:
shutil.copyfile(cover_sum_path, operand_temp_path)
pft_nodata = pygeoprocessing.get_raster_info(
aligned_inputs['pft_{}'.format(pft_i)])['nodata'][0]
raster_sum(
aligned_inputs['pft_{}'.format(pft_i)], pft_nodata,
operand_temp_path, _TARGET_NODATA,
cover_sum_path, _TARGET_NODATA)
# get maximum sum of fractional cover
max_cover = 0.
for offset_map, raster_block in pygeoprocessing.iterblocks(
(cover_sum_path, 1)):
valid_mask = (raster_block != _TARGET_NODATA)
if raster_block[valid_mask].size > 0:
max_cover = max(max_cover, numpy.amax(raster_block[valid_mask]))
if max_cover > 1:
raise ValueError(
"Fractional cover across plant functional types exceeds 1")
# clean up
os.remove(cover_sum_path)
def initial_conditions_from_tables(
aligned_inputs, sv_dir, pft_id_set, site_initial_conditions_table,
pft_initial_conditions_table):
"""Generate initial state variable registry from initial conditions tables.
Parameters:
aligned_inputs (dict): map of key, path pairs indicating paths
to aligned model inputs, including site spatial index raster and
fractional cover of each plant functional type
sv_dir (string): path to directory where initial state variable rasters
should be stored
pft_id_set (set): set of integers identifying plant functional types
site_initial_conditions_table (dict): map of site spatial index to
dictionaries that contain initial values for site-level state
variables
pft_initial_conditions_table (dict): map of plant functional type index
to dictionaries that contain initial values for plant functional
type-level state variables
Returns:
initial_sv_reg, map of key, path pairs giving paths to initial state
variable rasters
"""
def full_masked(pft_cover, fill_val):
"""Create a constant raster masked by pft fractional cover.
Parameters:
pft_cover (numpy.ndarray): input, fractional cover of the plant
functional type
fill_val (float): constant value with which to fill raster in areas
where fractional cover > 0
Returns:
full_masked, a raster containing `fill_val` in areas where
`pft_cover` > 0
"""
valid_mask = (
(~numpy.isclose(pft_cover, _SV_NODATA)) &
(pft_cover > 0))
full_masked = numpy.empty(pft_cover.shape, dtype=numpy.float32)
full_masked[:] = _SV_NODATA
full_masked[valid_mask] = fill_val
return full_masked
initial_sv_reg = {}
# site-level state variables
# check for missing state variable values
required_site_state_var = set(
[sv_key[:-5] for sv_key in _SITE_STATE_VARIABLE_FILES.keys()])
for site_code in site_initial_conditions_table.keys():
missing_site_state_var = required_site_state_var.difference(
site_initial_conditions_table[site_code].keys())
if missing_site_state_var:
raise ValueError(
"The following state variables were not found in the site " +
"initial conditions table: \n\t" + "\n\t".join(
missing_site_state_var))
for sv_key, basename in _SITE_STATE_VARIABLE_FILES.items():
state_var = sv_key[:-5]
site_to_val = dict(
[(site_code, float(table[state_var])) for (
site_code, table) in
site_initial_conditions_table.items()])
target_path = os.path.join(sv_dir, basename)
initial_sv_reg[sv_key] = target_path
pygeoprocessing.reclassify_raster(
(aligned_inputs['site_index'], 1), site_to_val, target_path,
gdal.GDT_Float32, _SV_NODATA)
# PFT-level state variables
for pft_i in pft_id_set:
# check for missing values
missing_pft_state_var = set(_PFT_STATE_VARIABLES).difference(
pft_initial_conditions_table[pft_i].keys())
if missing_pft_state_var:
raise ValueError(
"The following state variables were not found in the plant " +
"functional type initial conditions table: \n\t" + "\n\t".join(
missing_pft_state_var))
for state_var in _PFT_STATE_VARIABLES:
fill_val = pft_initial_conditions_table[pft_i][state_var]
pft_cover_path = aligned_inputs['pft_{}'.format(pft_i)]
target_path = os.path.join(
sv_dir, '{}_{}.tif'.format(state_var, pft_i))
sv_key = '{}_{}_path'.format(state_var, pft_i)
initial_sv_reg[sv_key] = target_path
pygeoprocessing.raster_calculator(
[(pft_cover_path, 1), (fill_val, 'raw')],
full_masked, target_path, gdal.GDT_Float32, _SV_NODATA)
return initial_sv_reg
def _calc_ompc(
som1c_2_path, som2c_2_path, som3c_path, bulkd_path, edepth_path,
ompc_path):
"""Estimate total soil organic matter.
Total soil organic matter is the sum of soil carbon across
slow, active, and passive compartments, weighted by bulk
density and total modeled soil depth. Lines 220-222, Prelim.f
Parameters:
som1c_2_path (string): path to active organic soil carbon raster
som2c_2_path (string): path to slow organic soil carbon raster
som3c_path (string): path to passive organic soil carbon raster
bulkd_path (string): path to bulk density of soil raster
edepth (string): path to depth of soil raster
ompc_path (string): path to result, total soil organic matter
Side effects:
modifies or creates the raster indicated by `ompc_path`
Returns:
None
"""
def ompc_op(som1c_2, som2c_2, som3c, bulkd, edepth):
"""Estimate total soil organic matter.
Total soil organic matter is the sum of soil carbon across
slow, active, and passive compartments, weighted by bulk
density and total modeled soil depth. Lines 220-222, Prelim.f
Parameters:
som1c_2_path (string): state variable, active organic soil carbon
som2c_2_path (string): state variable, slow organic soil carbon
som3c_path (string): state variable, passive organic soil carbon
bulkd_path (string): input, bulk density of soil
edepth_path (string): parameter, depth of soil for this
calculation
Returns:
ompc, total soil organic matter weighted by bulk
density.
"""
ompc = numpy.empty(som1c_2.shape, dtype=numpy.float32)
ompc[:] = _TARGET_NODATA
valid_mask = (
(~numpy.isclose(som1c_2, _SV_NODATA)) &
(~numpy.isclose(som2c_2, _SV_NODATA)) &
(~numpy.isclose(som3c, _SV_NODATA)) &
(~numpy.isclose(bulkd, bulkd_nodata)) &
(edepth != _IC_NODATA))
ompc[valid_mask] = (
(som1c_2[valid_mask] + som2c_2[valid_mask] +
som3c[valid_mask]) * 1.724 /
(10000. * bulkd[valid_mask] * edepth[valid_mask]))
return ompc
bulkd_nodata = pygeoprocessing.get_raster_info(bulkd_path)['nodata'][0]
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
som1c_2_path, som2c_2_path, som3c_path,
bulkd_path, edepth_path]],
ompc_op, ompc_path, gdal.GDT_Float32, _TARGET_NODATA)
def _calc_afiel(
sand_path, silt_path, clay_path, ompc_path, bulkd_path, afiel_path):
"""Calculate field capacity for one soil layer.
Parameters:
sand_path (string): path to proportion sand in soil raster
silt_path (string): path to proportion silt in soil raster
clay_path (string): path to proportion clay in soil raster
ompc_path (string): path to estimated total soil organic matter raster
bulkd_path (string): path to bulk density of soil raster
afiel_path (string): path to result raster, field capacity for this
soil layer
Side effects:
creates the raster indicated by `afiel_path`
Returns:
None
"""
def afiel_op(sand, silt, clay, ompc, bulkd):
"""Calculate field capacity for one soil layer.
Field capacity, maximum soil moisture retention capacity,
from <NAME> Larson 1979, 'Estimating soil and water
retention characteristics from particle size distribution,
organic matter percent and bulk density'. Water Resources
Research 15:1633.
Parameters:
sand_path (string): input, proportion sand in soil
silt_path (string): input, proportion silt in soil
clay_path (string): input, proportion clay in soil
ompc_path (string): derived, estimated total soil organic matter
bulkd_path (string): input, bulk density of soil
Returns:
afiel, field capacity for this soil layer
"""
afiel = numpy.empty(sand.shape, dtype=numpy.float32)
afiel[:] = _TARGET_NODATA
valid_mask = (
(~numpy.isclose(sand, sand_nodata)) &
(~numpy.isclose(silt, silt_nodata)) &
(~numpy.isclose(clay, clay_nodata)) &
(ompc != _TARGET_NODATA) &
(~numpy.isclose(bulkd, bulkd_nodata)))
afiel[valid_mask] = (
0.3075 * sand[valid_mask] + 0.5886 * silt[valid_mask] +
0.8039 * clay[valid_mask] + 2.208E-03 * ompc[valid_mask] +
-0.1434 * bulkd[valid_mask])
return afiel
sand_nodata = pygeoprocessing.get_raster_info(sand_path)['nodata'][0]
silt_nodata = pygeoprocessing.get_raster_info(silt_path)['nodata'][0]
clay_nodata = pygeoprocessing.get_raster_info(clay_path)['nodata'][0]
bulkd_nodata = pygeoprocessing.get_raster_info(bulkd_path)['nodata'][0]
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
sand_path, silt_path, clay_path, ompc_path, bulkd_path]],
afiel_op, afiel_path, gdal.GDT_Float32, _TARGET_NODATA)
def _calc_awilt(
sand_path, silt_path, clay_path, ompc_path, bulkd_path, awilt_path):
"""Calculate wilting point for one soil layer.
Wilting point, minimum soil water required by plants before
wilting, from Gupta and Larson 1979, 'Estimating soil and
water retention characteristics from particle size distribution,
organic matter percent and bulk density'. Water Resources
Research 15:1633.
Parameters:
sand_path (string): path to proportion sand in soil raster
silt_path (string): path to proportion silt in soil raster
clay_path (string): path to proportion clay in soil raster
ompc_path (string): path to estimated total soil organic matter raster
bulkd_path (string): path to bulk density of soil raster
awilt_path (string): path to result raster, wilting point for this
soil layer
Side effects:
creates the raster indicated by `awilt_path`
Returns:
None
"""
def awilt_op(sand, silt, clay, ompc, bulkd):
"""Calculate wilting point for one soil layer.
Wilting point, minimum soil water required by plants before
wilting, from Gupta and Larson 1979, 'Estimating soil and
water retention characteristics from particle size distribution,
organic matter percent and bulk density'. Water Resources
Research 15:1633.
Parameters:
sand_path (string): input, proportion sand in soil
silt_path (string): input, proportion silt in soil
clay_path (string): input, proportion clay in soil
ompc_path (string): derived, estimated total soil organic matter
bulkd_path (string): input, bulk density of soil
Returns:
awilt, wilting point for this soil layer
"""
awilt = numpy.empty(sand.shape, dtype=numpy.float32)
awilt[:] = _TARGET_NODATA
valid_mask = (
(~numpy.isclose(sand, sand_nodata)) &
(~numpy.isclose(silt, silt_nodata)) &
(~numpy.isclose(clay, clay_nodata)) &
(ompc != _TARGET_NODATA) &
(~numpy.isclose(bulkd, bulkd_nodata)))
awilt[valid_mask] = (
-0.0059 * sand[valid_mask] + 0.1142 * silt[valid_mask] +
0.5766 * clay[valid_mask] + 2.228E-03 * ompc[valid_mask] +
0.02671 * bulkd[valid_mask])
return awilt
sand_nodata = pygeoprocessing.get_raster_info(sand_path)['nodata'][0]
silt_nodata = pygeoprocessing.get_raster_info(silt_path)['nodata'][0]
clay_nodata = pygeoprocessing.get_raster_info(clay_path)['nodata'][0]
bulkd_nodata = pygeoprocessing.get_raster_info(bulkd_path)['nodata'][0]
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
sand_path, silt_path, clay_path, ompc_path, bulkd_path]],
awilt_op, awilt_path, gdal.GDT_Float32, _TARGET_NODATA)
def _afiel_awilt(
site_index_path, site_param_table, som1c_2_path, som2c_2_path,
som3c_path, sand_path, silt_path, clay_path, bulk_d_path, pp_reg):
"""Calculate field capacity and wilting point for each soil layer.
Computations based on Gupta and Larson 1979, 'Estimating soil and water
retention characteristics from particle size distribution, organic
matter percent and bulk density'. Water Resources Research 15:1633.
Field capacity is calculated for -0.33 bar; wilting point is
calculated for water content at -15 bars.
Parameters:
site_index_path (string): path to site spatial index raster
site_param_table (dict): map of site spatial index to dictionaries
that contain site-level parameters including 'edepth' field
som1c_2_path (string): path to the state variable 'som1c_2',
active organic soil carbon
som2c_2_path (string): path to the state variable 'som2c_2',
slow organic soil carbon
som3c_path (string): path to the state variable 'som3c',
passive organic soil carbon
sand_path (string): path to raster containing proportion sand in soil
silt_path (string): path to raster containing proportion silt in soil
clay_path (string): path to raster containing proportion clay in soil
bulk_d_path (string): path to raster containing bulk density of soil
pp_reg (dict): map of key, path pairs giving paths to persistent
intermediate parameters that do not change over the course of
the simulation
Modifies the rasters pp_reg['afiel_<layer>'] and pp_reg['awilt_<layer>']
for all soil layers.
Returns:
None
"""
def decrement_ompc(ompc_orig_path, ompc_dec_path):
"""Decrease estimated organic matter to 85% of its value.
In each subsequent soil layer, estimated organic matter is decreased
by 15%, to 85% of its previous value.
Parameters:
ompc_orig_path (string): path to estimated soil organic matter
raster
ompc_dec_path (string): path to result raster, estimated soil
organic matter decreased to 85% of its previous value
Side effects:
modifies or creates the raster indicated by `ompc_dec_path`
Returns:
None
"""
def decrement_op(ompc_orig):
"""Reduce organic matter to 85% of its previous value."""
ompc_dec = numpy.empty(ompc_orig.shape, dtype=numpy.float32)
ompc_dec[:] = _TARGET_NODATA
valid_mask = (ompc_orig != _TARGET_NODATA)
ompc_dec[valid_mask] = ompc_orig[valid_mask] * 0.85
return ompc_dec
pygeoprocessing.raster_calculator(
[(ompc_orig_path, 1)], decrement_op, ompc_dec_path,
gdal.GDT_Float32, _TARGET_NODATA)
# temporary intermediate rasters for calculating field capacity and
# wilting point
temp_dir = tempfile.mkdtemp(dir=PROCESSING_DIR)
edepth_path = os.path.join(temp_dir, 'edepth.tif')
ompc_path = os.path.join(temp_dir, 'ompc.tif')
site_to_edepth = dict(
[(site_code, float(table['edepth'])) for
(site_code, table) in site_param_table.items()])
pygeoprocessing.reclassify_raster(
(site_index_path, 1), site_to_edepth, edepth_path, gdal.GDT_Float32,
_IC_NODATA)
# estimate total soil organic matter
_calc_ompc(
som1c_2_path, som2c_2_path, som3c_path, bulk_d_path, edepth_path,
ompc_path)
# calculate field capacity and wilting point for each soil layer,
# decreasing organic matter content by 85% with each layer
for lyr in range(1, 10):
afiel_path = pp_reg['afiel_{}_path'.format(lyr)]
awilt_path = pp_reg['awilt_{}_path'.format(lyr)]
_calc_afiel(
sand_path, silt_path, clay_path, ompc_path, bulk_d_path,
afiel_path)
_calc_awilt(
sand_path, silt_path, clay_path, ompc_path, bulk_d_path,
awilt_path)
ompc_dec_path = os.path.join(temp_dir, 'ompc{}.tif'.format(lyr))
decrement_ompc(ompc_path, ompc_dec_path)
ompc_path = ompc_dec_path
# clean up temporary files
shutil.rmtree(temp_dir)
def _persistent_params(
site_index_path, site_param_table, sand_path, clay_path, pp_reg):
"""Calculate persistent parameters.
The calculated values do not change over the course of the simulation.
Parameters:
site_index_path (string): path to site spatial index raster
site_param_table (dict): map of site spatial index to dictionaries
that contain site-level parameters
sand_path (string): path to raster containing proportion sand in soil
clay_path (string): path to raster containing proportion clay in soil
pp_reg (dict): map of key, path pairs giving paths to persistent
intermediate parameters that do not change over the course of
the simulation.
Modifies the persistent parameter rasters indexed by the following
keys:
pp_reg['wc_path']
pp_reg['eftext_path']
pp_reg['p1co2_2_path']
pp_reg['fps1s3_path']
pp_reg['fps2s3_path']
pp_reg['orglch_path']
pp_reg['vlossg_path']
Returns:
None
"""
sand_nodata = pygeoprocessing.get_raster_info(sand_path)['nodata'][0]
clay_nodata = pygeoprocessing.get_raster_info(clay_path)['nodata'][0]
# temporary intermediate rasters for persistent parameters calculation
temp_dir = tempfile.mkdtemp(dir=PROCESSING_DIR)
param_val_dict = {}
for val in[
'peftxa', 'peftxb', 'p1co2a_2', 'p1co2b_2', 'ps1s3_1',
'ps1s3_2', 'ps2s3_1', 'ps2s3_2', 'omlech_1', 'omlech_2', 'vlossg']:
target_path = os.path.join(temp_dir, '{}.tif'.format(val))
param_val_dict[val] = target_path
site_to_val = dict(
[(site_code, float(table[val])) for (
site_code, table) in site_param_table.items()])
pygeoprocessing.reclassify_raster(
(site_index_path, 1), site_to_val, target_path, gdal.GDT_Float32,
_IC_NODATA)
def calc_wc(afiel_1, awilt_1):
"""Calculate water content of soil layer 1."""
return afiel_1 - awilt_1
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
pp_reg['afiel_1_path'], pp_reg['awilt_1_path']]],
calc_wc, pp_reg['wc_path'], gdal.GDT_Float32, _TARGET_NODATA)
def calc_eftext(peftxa, peftxb, sand):
"""Calculate effect of soil texture on microbial decomposition.
Use an empirical regression to estimate the effect of soil
sand content on the microbe decomposition rate. Line 359 Prelim.f
Parameters:
peftxa (numpy.ndarray): parameter, regression intercept
peftxb (numpy.ndarray): parameter, regression slope
sand (numpy.ndarray): input, proportion sand in soil
Returns:
eftext, coefficient that modifies microbe decomposition rate.
"""
eftext = numpy.empty(sand.shape, dtype=numpy.float32)
eftext[:] = _IC_NODATA
valid_mask = (
(peftxa != _IC_NODATA) &
(peftxb != _IC_NODATA) &
(~numpy.isclose(sand, sand_nodata)))
eftext[valid_mask] = (
peftxa[valid_mask] + (peftxb[valid_mask] * sand[valid_mask]))
return eftext
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
param_val_dict['peftxa'], param_val_dict['peftxb'], sand_path]],
calc_eftext, pp_reg['eftext_path'], gdal.GDT_Float32, _IC_NODATA)
def calc_p1co2_2(p1co2a_2, p1co2b_2, sand):
"""Calculate the fraction of carbon lost to CO2 from som1c_2.
During decomposition from active organic soil carbon, a fraction
of decomposing material is lost to CO2 as the soil respires.
Line 366 Prelim.f
Parameters:
p1co2a_2 (numpy.ndarray): parameter, intercept of regression
predicting loss to CO2 from active organic soil carbon
p1co2b_2 (numpy.ndarray): parameter, slope of regression
predicting loss to CO2 from active organic soil carbon
sand (numpy.ndarray): input, proportion sand in soil
Returns:
p1co2_2, fraction of carbon that flows to CO2 from active
organic soil carbon
"""
p1co2_2 = numpy.empty(sand.shape, dtype=numpy.float32)
p1co2_2[:] = _IC_NODATA
valid_mask = (
(p1co2a_2 != _IC_NODATA) &
(p1co2b_2 != _IC_NODATA) &
(~numpy.isclose(sand, sand_nodata)))
p1co2_2[valid_mask] = (
p1co2a_2[valid_mask] + (p1co2b_2[valid_mask] * sand[valid_mask]))
return p1co2_2
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
param_val_dict['p1co2a_2'],
param_val_dict['p1co2b_2'], sand_path]],
calc_p1co2_2, pp_reg['p1co2_2_path'], gdal.GDT_Float32, _IC_NODATA)
def calc_fps1s3(ps1s3_1, ps1s3_2, clay):
"""Calculate effect of clay content on decomposition from som1c_2.
Use an empirical regression to estimate the effect of clay content
of soil on flow from soil organic matter with fast turnover to
soil organic matter with slow turnover. Line 370 Prelim.f
Parameters:
ps1s3_1 (numpy.ndarray): parameter, regression intercept
ps1s3_2 (numpy.ndarray): parameter, regression slope
clay (numpy.ndarray): input, proportion clay in soil
Returns:
fps1s3, coefficient that modifies rate of decomposition
from som1c_2
"""
fps1s3 = numpy.empty(clay.shape, dtype=numpy.float32)
fps1s3[:] = _IC_NODATA
valid_mask = (
(ps1s3_1 != _IC_NODATA) &
(ps1s3_2 != _IC_NODATA) &
(~numpy.isclose(clay, clay_nodata)))
fps1s3[valid_mask] = (
ps1s3_1[valid_mask] + (ps1s3_2[valid_mask] * clay[valid_mask]))
return fps1s3
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
param_val_dict['ps1s3_1'], param_val_dict['ps1s3_2'], clay_path]],
calc_fps1s3, pp_reg['fps1s3_path'], gdal.GDT_Float32, _IC_NODATA)
def calc_fps2s3(ps2s3_1, ps2s3_2, clay):
"""Calculate effect of clay content on decomposition from som2c_2.
Use an empirical regression to estimate the effect of clay content
of soil on flow from slow soil organic carbon to soil passive organic
carbon. Line 371 Prelim.f
Parameters:
ps2s3_1 (numpy.ndarray): parameter, regression intercept
ps2s3_2 (numpy.ndarray): parameter, regression slope
clay (numpy.ndarray): input, proportion clay in soil
Returns:
fps2s3, coefficient that modifies rate of decomposition from
som2c_2 to som3c
"""
fps2s3 = numpy.empty(clay.shape, dtype=numpy.float32)
fps2s3[:] = _IC_NODATA
valid_mask = (
(ps2s3_1 != _IC_NODATA) &
(ps2s3_2 != _IC_NODATA) &
(~numpy.isclose(clay, clay_nodata)))
fps2s3[valid_mask] = (
ps2s3_1[valid_mask] + (ps2s3_2[valid_mask] * clay[valid_mask]))
return fps2s3
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
param_val_dict['ps2s3_1'], param_val_dict['ps2s3_2'], clay_path]],
calc_fps2s3, pp_reg['fps2s3_path'], gdal.GDT_Float32, _IC_NODATA)
def calc_orglch(omlech_1, omlech_2, sand):
"""Calculate the effect of sand content on leaching from soil.
Use an empirical regression to estimate the effect of sand content
of soil on rate of organic leaching from soil when there is drainage
of soil water from soil layer 1 to soil layer 2. Line 110 Predec.f
Parameters:
omlech_1 (numpy.ndarray): parameter, regression intercept
omlech_2 (numpy.ndarray): parameter, regression slope
sand (numpy.ndarray): input, proportion sand in soil
Returns:
orglch, the fraction of organic compounds leaching from soil
with drainage from soil layer 1 to layer 2
"""
orglch = numpy.empty(sand.shape, dtype=numpy.float32)
orglch[:] = _IC_NODATA
valid_mask = (
(omlech_1 != _IC_NODATA) &
(omlech_2 != _IC_NODATA) &
(~numpy.isclose(sand, sand_nodata)))
orglch[valid_mask] = (
omlech_1[valid_mask] + (omlech_2[valid_mask] * sand[valid_mask]))
return orglch
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
param_val_dict['omlech_1'], param_val_dict['omlech_2'],
sand_path]],
calc_orglch, pp_reg['orglch_path'], gdal.GDT_Float32, _IC_NODATA)
def calc_vlossg(vlossg_param, clay):
"""Calculate proportion of gross mineralized N that is volatized.
During decomposition, some N is lost to volatilization. This is a
function of the gross mineralized N and is calculated according to this
multiplier, which varies with soil clay content.
Parameters:
vlossg (numpy.ndarray): parameter, volatilization loss multiplier
clay (numpy.ndarray): input, proportion clay in soil
Returns:
vlossg, proportion of gross mineralized N that is volatized
"""
valid_mask = (
(vlossg_param != _IC_NODATA) &
(~numpy.isclose(clay, clay_nodata)))
vlossg = numpy.empty(vlossg_param.shape, dtype=numpy.float32)
vlossg[:] = _IC_NODATA
max_mask = ((clay > 0.3) & valid_mask)
min_mask = ((clay < 0.1) & valid_mask)
vlossg[valid_mask] = -0.1 * (clay[valid_mask] - 0.3) + 0.01
vlossg[max_mask] = 0.01
vlossg[min_mask] = 0.03
vlossg[valid_mask] = vlossg[valid_mask] * vlossg_param[valid_mask]
return vlossg
pygeoprocessing.raster_calculator(
[(path, 1) for path in [param_val_dict['vlossg'], clay_path]],
calc_vlossg, pp_reg['vlossg_path'], gdal.GDT_Float32, _IC_NODATA)
# clean up temporary files
shutil.rmtree(temp_dir)
def _aboveground_ratio(anps, tca, pcemic_1, pcemic_2, pcemic_3):
"""Calculate C/<iel> ratios of decomposing aboveground material.
This ratio is used to test whether there is sufficient <iel> (N or P)
in aboveground material for the material to decompose. Agdrat.f
Parameters:
anps (numpy.ndarray): state variable, N or P in the donor material
tca (numpy.ndarray): state variable, total C in the donor material
pcemic_1 (numpy.ndarray): parameter, maximum C/<iel> of new material
pcemic_2 (numpy.ndarray): parameter, minimum C/<iel> of new material
pcemic_3 (numpy.ndarray): parameter, minimum <iel> content of
decomposing material that gives minimum C/<iel> of new material
Returns:
agdrat, the C/<iel> ratio of new material
"""
valid_mask = (
(~numpy.isclose(anps, _SV_NODATA)) &
(~numpy.isclose(tca, _SV_NODATA)) &
(pcemic_1 != _IC_NODATA) &
(pcemic_2 != _IC_NODATA) &
(pcemic_3 != _IC_NODATA))
cemicb = numpy.empty(anps.shape, dtype=numpy.float32)
cemicb[:] = _IC_NODATA
cemicb[valid_mask] = (
(pcemic_2[valid_mask] - pcemic_1[valid_mask]) /
pcemic_3[valid_mask])
econt = numpy.empty(anps.shape, dtype=numpy.float32)
econt[:] = _TARGET_NODATA
econt[valid_mask] = 0
decompose_mask = ((tca > 0.) & valid_mask)
econt[decompose_mask] = anps[decompose_mask] / (tca[decompose_mask] * 2.5)
agdrat = numpy.empty(anps.shape, dtype=numpy.float32)
agdrat[:] = _TARGET_NODATA
agdrat[valid_mask] = pcemic_2[valid_mask]
compute_mask = ((econt <= pcemic_3) & valid_mask)
agdrat[compute_mask] = (
pcemic_1[compute_mask] + econt[compute_mask] * cemicb[compute_mask])
return agdrat
def _belowground_ratio(aminrl, varat_1_iel, varat_2_iel, varat_3_iel):
"""Calculate C/<iel> ratios of decomposing belowground material.
This ratio is used to test whether there is sufficient <iel> (N or P)
in soil metabolic material to decompose. Bgdrat.f
Parameters:
aminrl (numpy.ndarray): derived, average surface mineral <iel>
varat_1_iel (numpy.ndarray): parameter, maximum C/<iel> ratio for
newly decomposed material
varat_2_iel (numpy.ndarray): parameter, minimum C/<iel> ratio
varat_3_iel (numpy.ndarray): parameter, amount of <iel> present
when minimum ratio applies
Returns:
bgdrat, the C/<iel> ratio of new material
"""
valid_mask = (
(~numpy.isclose(aminrl, _SV_NODATA)) &
(varat_1_iel != _IC_NODATA) &
(varat_2_iel != _IC_NODATA) &
(varat_3_iel != _IC_NODATA))
bgdrat = numpy.empty(aminrl.shape, dtype=numpy.float32)
bgdrat[:] = _TARGET_NODATA
bgdrat[valid_mask] = (
(1. - aminrl[valid_mask] / varat_3_iel[valid_mask]) *
(varat_1_iel[valid_mask] - varat_2_iel[valid_mask]) +
varat_2_iel[valid_mask])
max_mask = ((aminrl <= 0) & valid_mask)
bgdrat[max_mask] = varat_1_iel[max_mask]
min_mask = ((aminrl > varat_3_iel) & valid_mask)
bgdrat[min_mask] = varat_2_iel[min_mask]
return bgdrat
def _structural_ratios(site_index_path, site_param_table, sv_reg, pp_reg):
"""Calculate maximum C/N and C/P ratios for structural material.
These ratios limit decomposition of structural material (i.e., material
containing lignin). Lines 31-77 Predec.f
Parameters:
site_index_path (string): path to site spatial index raster
site_param_table (dict): map of site spatial index to dictionaries
that contain site-level parameters
sv_reg (dict): map of key, path pairs giving paths to state
variables for the current month
pp_reg (dict): map of key, path pairs giving paths to persistent
intermediate parameters that do not change over the course of
the simulation.
Modifies the persistent parameter rasters indexed by the following
keys:
pp_reg['rnewas_1_1_path']
pp_reg['rnewas_1_2_path']
pp_reg['rnewas_2_1_path']
pp_reg['rnewas_2_2_path']
pp_reg['rnewbs_1_1_path']
pp_reg['rnewbs_1_2_path']
pp_reg['rnewbs_2_1_path']
pp_reg['rnewbs_2_2_path']
Returns:
None
"""
# temporary parameter rasters for structural ratios calculations
temp_dir = tempfile.mkdtemp(dir=PROCESSING_DIR)
param_val_dict = {}
for iel in [1, 2]:
for val in[
'pcemic1_2', 'pcemic1_1', 'pcemic1_3', 'pcemic2_2',
'pcemic2_1', 'pcemic2_3', 'rad1p_1', 'rad1p_2',
'rad1p_3', 'varat1_1', 'varat22_1']:
target_path = os.path.join(temp_dir, '{}_{}.tif'.format(val, iel))
param_val_dict['{}_{}'.format(val, iel)] = target_path
site_to_val = dict(
[(site_code, float(table['{}_{}'.format(val, iel)])) for
(site_code, table) in site_param_table.items()])
pygeoprocessing.reclassify_raster(
(site_index_path, 1), site_to_val, target_path,
gdal.GDT_Float32, _IC_NODATA)
def calc_rnewas_som2(
pcemic2_2, pcemic2_1, pcemic2_3, struce_1, strucc_1, rad1p_1,
rad1p_2, rad1p_3, pcemic1_2, rnewas1):
"""Calculate C/<iel> ratio for decomposition into som2.
This ratio is calculated separately for each nutrient (i.e., N, P).
When material decomposes into the surface slow organic pool, the
C/<iel> ratio of decomposing material must be smaller than or equal to
this ratio. A portion of the ratio of material entering som1, the
surface active pool, is also added to som2 and calculated here.
Parameters:
pcemic2_2 (numpy.ndarray): parameter, minimum C/<iel> ratio for
surface slow organic pool
pcemic2_1 (numpy.ndarray): parameter, maximum C/<iel> ratio for
surface slow organic pool
pcemic2_3 (numpy.ndarray): parameter, mimimum <iel> content of
decomposing aboveground material, above which the C/<iel>
ratio of the surface slow organic pool equals pcemic1_2
struce_1 (numpy.ndarray): state variable, <iel> in surface
structural material
strucc_1 (numpy.ndarray): state variable, C in surface
structural material
rad1p_1 (numpy.ndarray): parameter, intercept of regression used
to calculate addition of <iel> from surface active pool
rad1p_2 (numpy.ndarray): parameter, slope of regression used
to calculate addition of <iel> from surface active pool
rad1p_3 (numpy.ndarray): parameter, minimum allowable C/<iel>
used to calculate addition term for C/<iel> ratio of som2
formed from surface active pool
pcemic1_2 (numpy.ndarray): parameter, minimum C/<iel> ratio for
surface active organic pool
rnewas1 (numpy.ndarray): derived, C/<iel> ratio for decomposition
into som1
Returns:
rnewas2, required ratio for decomposition of structural material
into som2 for one nutrient
"""
valid_mask = (
(pcemic2_2 != _IC_NODATA) &
(pcemic2_1 != _IC_NODATA) &
(pcemic2_3 != _IC_NODATA) &
(~numpy.isclose(struce_1, _SV_NODATA)) &
(~numpy.isclose(strucc_1, _SV_NODATA)) &
(rad1p_1 != _IC_NODATA) &
(rad1p_2 != _IC_NODATA) &
(rad1p_3 != _IC_NODATA) &
(pcemic1_2 != _IC_NODATA) &
(rnewas1 != _TARGET_NODATA))
rnewas2 = _aboveground_ratio(
struce_1, strucc_1, pcemic2_1, pcemic2_2, pcemic2_3)
radds1 = numpy.empty(strucc_1.shape, dtype=numpy.float32)
radds1[:] = _TARGET_NODATA
radds1[valid_mask] = (
rad1p_1[valid_mask] + rad1p_2[valid_mask] *
(rnewas1[valid_mask] - pcemic1_2[valid_mask]))
rnewas2[valid_mask] = rnewas1[valid_mask] + radds1[valid_mask]
rnewas2[valid_mask] = numpy.maximum(
rnewas2[valid_mask], rad1p_3[valid_mask])
return rnewas2
for iel in [1, 2]:
# calculate rnewas_iel_1 - aboveground material to SOM1
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
sv_reg['struce_1_{}_path'.format(iel)],
sv_reg['strucc_1_path'],
param_val_dict['pcemic1_1_{}'.format(iel)],
param_val_dict['pcemic1_2_{}'.format(iel)],
param_val_dict['pcemic1_3_{}'.format(iel)]]],
_aboveground_ratio, pp_reg['rnewas_{}_1_path'.format(iel)],
gdal.GDT_Float32, _TARGET_NODATA)
# calculate rnewas_iel_2 - aboveground material to SOM2
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
param_val_dict['pcemic2_2_{}'.format(iel)],
param_val_dict['pcemic2_1_{}'.format(iel)],
param_val_dict['pcemic2_3_{}'.format(iel)],
sv_reg['struce_1_{}_path'.format(iel)],
sv_reg['strucc_1_path'],
param_val_dict['rad1p_1_{}'.format(iel)],
param_val_dict['rad1p_2_{}'.format(iel)],
param_val_dict['rad1p_3_{}'.format(iel)],
param_val_dict['pcemic1_2_{}'.format(iel)],
pp_reg['rnewas_{}_1_path'.format(iel)]]],
calc_rnewas_som2, pp_reg['rnewas_{}_2_path'.format(iel)],
gdal.GDT_Float32, _TARGET_NODATA)
# calculate rnewbs_iel_1 - belowground material to SOM1
site_to_varat1_1 = dict([
(site_code, float(table['varat1_1_{}'.format(iel)])) for
(site_code, table) in site_param_table.items()])
pygeoprocessing.reclassify_raster(
(site_index_path, 1), site_to_varat1_1,
pp_reg['rnewbs_{}_1_path'.format(iel)],
gdal.GDT_Float32, _TARGET_NODATA)
# calculate rnewbs_iel_2 - belowground material to SOM2
# rnewbs(iel,2) = varat22(1,iel)
site_to_varat22_1 = dict([
(site_code, float(table['varat22_1_{}'.format(iel)])) for
(site_code, table) in site_param_table.items()])
pygeoprocessing.reclassify_raster(
(site_index_path, 1), site_to_varat22_1,
pp_reg['rnewbs_{}_2_path'.format(iel)],
gdal.GDT_Float32, _TARGET_NODATA)
# clean up temporary files
shutil.rmtree(temp_dir)
def _yearly_tasks(
aligned_inputs, site_param_table, veg_trait_table, month_index,
pft_id_set, year_reg):
"""Calculate quantities that remain static for 12 months.
These quantities are annual precipitation, annual atmospheric N
deposition, and the fraction of plant residue which is lignin for each pft.
Century also calculates non-symbiotic soil N fixation once yearly, but here
those were moved to monthly tasks. Century uses precipitation in the future
12 months (prcgrw) to predict root:shoot ratios, but here we instead use
the sum of monthly precipitation in 12 months including the current one, if
data for 12 future months are not available.
Lines 79-82, 164 Eachyr.f
Parameters:
aligned_inputs (dict): map of key, path pairs indicating paths
to aligned model inputs, including monthly precipitation and site
spatial index raster
site_param_table (dict): map of site spatial index to dictionaries
that contain site-level parameters
veg_trait_table (dict): map of pft id to dictionaries containing
plant functional type parameters
month_index (int): current monthly step, relative to 0 so that
month_index=0 at first monthly time step
pft_id_set (set): set of integers identifying plant functional types
year_reg (dict): map of key, path pairs giving paths to the annual
precipitation and N deposition rasters
Side effects:
modifies or creates the rasters indicated by:
year_reg['annual_precip_path']
year_reg['baseNdep_path']
year_reg['pltlig_above_<pft>'] for each pft
year_reg['pltlig_below_<pft>'] for each pft
Returns:
None
Raises:
ValueError if fewer than 12 monthly precipitation rasters can be found
"""
def calc_base_N_dep(epnfa_1, epnfa_2, prcann):
"""Calculate base annual atmospheric N deposition.
Parameters:
epnfa_1 (numpy.ndarray): parameter, intercept of regression
predicting atmospheric N deposition from precipitation
epnfa_2 (numpy.ndarray): parameter, slope of regression predicting
atmospheric N deposition from precipitation
prcann (numpy.ndarray): derived, annual precipitation
Returns:
baseNdep, annual atmospheric N deposition
"""
baseNdep = numpy.empty(prcann.shape, dtype=numpy.float32)
baseNdep[:] = 0.
valid_mask = (
(epnfa_1 != _IC_NODATA) &
(epnfa_2 != _IC_NODATA) &
(prcann != _TARGET_NODATA))
baseNdep[valid_mask] = (
epnfa_1[valid_mask] +
(epnfa_2[valid_mask] * numpy.minimum(prcann[valid_mask], 80.)))
baseNdep[baseNdep < 0] = 0.
return baseNdep
def calc_pltlig(fligni_1_lyr, fligni_2_lyr, prcann):
"""Calculate the fraction of residue that is lignin. Cmplig.f
This fraction is used to calculate the fraction of residue (i.e.,
incoming litter from fall of standing dead or incoming soil from death
of roots) that is partitioned to metabolic vs structural pools. It is
calculated once per year from annual precipitation and fixed
parameters.
Parameters:
fligni_1_lyr (numpy.ndarray): parameter, intercept for regression
predicting lignin content fraction from rainfall
fligni_2_lyr (numpy.ndarray): parameter, slope for regression
predicting lignin content fraction from rainfall
prcann (numpy.ndarray): derived, annual precipitation
Returns:
pltlig_lyr, fraction of residue that is lignin
"""
valid_mask = (
(fligni_1_lyr != _IC_NODATA) &
(fligni_2_lyr != _IC_NODATA) &
(prcann != _TARGET_NODATA))
pltlig = numpy.empty(fligni_1_lyr.shape, dtype=numpy.float32)
pltlig[:] = _TARGET_NODATA
pltlig[valid_mask] = (
fligni_1_lyr[valid_mask] + fligni_2_lyr[valid_mask] *
prcann[valid_mask])
pltlig[valid_mask] = numpy.clip(pltlig[valid_mask], 0.02, 0.5)
return pltlig
offset = -12
annual_precip_rasters = []
while len(annual_precip_rasters) < 12:
offset += 1
if offset == 12:
raise ValueError("Insufficient precipitation rasters were found")
precip_month = month_index + offset
try:
annual_precip_rasters.append(
aligned_inputs['precip_%d' % precip_month])
except KeyError:
continue
precip_nodata = set([])
for precip_raster in annual_precip_rasters:
precip_nodata.update(
set([pygeoprocessing.get_raster_info(precip_raster)['nodata'][0]]))
if len(precip_nodata) > 1:
raise ValueError("Precipitation rasters include >1 nodata value")
precip_nodata = list(precip_nodata)[0]
raster_list_sum(
annual_precip_rasters, precip_nodata, year_reg['annual_precip_path'],
_TARGET_NODATA)
# intermediate parameter rasters for this operation
temp_dir = tempfile.mkdtemp(dir=PROCESSING_DIR)
param_val_dict = {}
for val in['epnfa_1', 'epnfa_2']:
target_path = os.path.join(temp_dir, '{}.tif'.format(val))
param_val_dict[val] = target_path
site_to_val = dict(
[(site_code, float(table[val])) for
(site_code, table) in site_param_table.items()])
pygeoprocessing.reclassify_raster(
(aligned_inputs['site_index'], 1), site_to_val, target_path,
gdal.GDT_Float32, _IC_NODATA)
for val in ['fligni_1_1', 'fligni_2_1', 'fligni_1_2', 'fligni_2_2']:
for pft_i in pft_id_set:
target_path = os.path.join(
temp_dir, '{}_{}.tif'.format(val, pft_i))
param_val_dict['{}_{}'.format(val, pft_i)] = target_path
fill_val = veg_trait_table[pft_i][val]
pygeoprocessing.new_raster_from_base(
aligned_inputs['site_index'], target_path, gdal.GDT_Float32,
[_IC_NODATA], fill_value_list=[fill_val])
# calculate base N deposition
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
param_val_dict['epnfa_1'], param_val_dict['epnfa_2'],
year_reg['annual_precip_path']]],
calc_base_N_dep, year_reg['baseNdep_path'], gdal.GDT_Float32,
_TARGET_NODATA)
for pft_i in pft_id_set:
# fraction of surface residue that is lignin
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
param_val_dict['fligni_1_1_{}'.format(pft_i)],
param_val_dict['fligni_2_1_{}'.format(pft_i)],
year_reg['annual_precip_path']]],
calc_pltlig, year_reg['pltlig_above_{}'.format(pft_i)],
gdal.GDT_Float32, _TARGET_NODATA)
# fraction of soil residue that is lignin
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
param_val_dict['fligni_1_2_{}'.format(pft_i)],
param_val_dict['fligni_2_2_{}'.format(pft_i)],
year_reg['annual_precip_path']]],
calc_pltlig, year_reg['pltlig_below_{}'.format(pft_i)],
gdal.GDT_Float32, _TARGET_NODATA)
# clean up temporary files
shutil.rmtree(temp_dir)
def calc_latitude(template_raster, latitude_raster_path):
"""Calculate latitude at the center of each pixel in a template raster."""
pygeoprocessing.new_raster_from_base(
template_raster, latitude_raster_path, gdal.GDT_Float32,
[_IC_NODATA])
latitude_raster = gdal.OpenEx(
latitude_raster_path, gdal.OF_RASTER | gdal.GA_Update)
target_band = latitude_raster.GetRasterBand(1)
base_raster_info = pygeoprocessing.get_raster_info(template_raster)
geotransform = base_raster_info['geotransform']
for offset_map, raster_block in pygeoprocessing.iterblocks(
(template_raster, 1)):
n_y_block = raster_block.shape[0]
n_x_block = raster_block.shape[1]
# offset by .5 so we're in the center of the pixel
xoff = offset_map['xoff'] + 0.5
yoff = offset_map['yoff'] + 0.5
# calculate the projected x and y coordinate bounds for the block
x_range = numpy.linspace(
geotransform[0] + geotransform[1] * xoff,
geotransform[0] + geotransform[1] * (xoff + n_x_block - 1),
n_x_block)
y_range = numpy.linspace(
geotransform[3] + geotransform[5] * yoff,
geotransform[3] + geotransform[5] * (yoff + n_y_block - 1),
n_y_block)
# we'll use this to avoid generating any nodata points
valid_mask = raster_block != base_raster_info['nodata']
# these indexes correspond to projected coordinates
# y_vector is what we want, an array of latitude coordinates
x_vector, y_vector = numpy.meshgrid(x_range, y_range)
target_band.WriteArray(
y_vector, xoff=offset_map['xoff'], yoff=offset_map['yoff'])
# Making sure the band and dataset is flushed and not in memory
target_band.FlushCache()
target_band.FlushCache()
target_band = None
gdal.Dataset.__swig_destroy__(latitude_raster)
latitude_raster = None
def _calc_daylength(template_raster, month, daylength_path):
"""Calculate estimated hours of daylength. Daylen.c.
Parameters:
template_raster (string): path to a raster in geographic coordinates
that is aligned with model inputs
month (int): current month of the year, such that month=0 indicates
January
daylength_path (string): path to shortwave radiation raster
Side effects:
modifies or creates the raster indicated by `daylength_path`
Returns:
None
"""
def daylength(month):
def _daylength(latitude):
"""Estimate hours of daylength for a given month and latitude."""
# Julian day at beginning of each month
jday_list = [
1, 32, 61, 92, 122, 153, 183, 214, 245, 275, 306, 337]
jday = jday_list[month - 1]
# Convert latitude from degrees to radians
rlatitude = latitude * (numpy.pi / 180.0)
declin = 0.4014 * numpy.sin(6.283185 * (jday - 77.0) / 365)
temp = 1.0 - (-numpy.tan(rlatitude) * numpy.tan(declin))**2
temp[temp < 0] = 0
par1 = numpy.sqrt(temp)
par2 = -numpy.tan(rlatitude) * numpy.tan(declin)
ahou = numpy.arctan2(par1, par2)
hours_of_daylength = (ahou / numpy.pi) * 24
return hours_of_daylength
return _daylength
# calculate an intermediate input, latitude at each pixel center
temp_dir = tempfile.mkdtemp(dir=PROCESSING_DIR)
latitude_raster_path = os.path.join(temp_dir, 'latitude.tif')
calc_latitude(template_raster, latitude_raster_path)
pygeoprocessing.raster_calculator(
[(latitude_raster_path, 1)], daylength(month), daylength_path,
gdal.GDT_Float32, _TARGET_NODATA)
# clean up temporary files
shutil.rmtree(temp_dir)
def _shortwave_radiation(template_raster, month, shwave_path):
"""Calculate shortwave radiation outside the atmosphere.
Shortwave radiation outside the atmosphere is calculated according to
Penman (1948), "Natural evaporation from open water, bare soil and grass",
Proc. Roy. Soc. London. The latitude of each pixel is required to
calculate radiation and is calculated as an intermediate step from the
input `template_raster`. shwave.f
Parameters:
template_raster (string): path to a raster in geographic coordinates
that is aligned with model inputs
month (int): current month of the year, such that month=0 indicates
January
shwave_path (string): path to shortwave radiation raster
Side effects:
Modifies the raster indicated by `shwave_path`
Returns:
None
"""
def shwave(month):
def _shwave(latitude):
"""Calculate shortwave radiation outside the atmosphere.
Parameters:
latitude (float): latitude of current site in degrees
month (int): current month of the year, such that month=1
indicates January
Returns:
shwave, short wave solar radiation outside the atmosphere
"""
# Julian date in middle of each month of the year
jday_list = [
16, 46, 75, 106, 136, 167, 197, 228, 259, 289, 320, 350]
jday = jday_list[month - 1]
transcof = 0.8
# Convert latitude from degrees to radians
rlatitude = latitude * (numpy.pi / 180.0)
# short wave solar radiation on a clear day
declin = 0.401426 * numpy.sin(6.283185 * (jday - 77.0) / 365.0)
temp = 1.0 - (-numpy.tan(rlatitude) * numpy.tan(declin))**2
temp[temp < 0.] = 0.
par1 = numpy.sqrt(temp)
par2 = (-numpy.tan(rlatitude) * numpy.tan(declin))
ahou = numpy.arctan2(par1, par2)
ahou[ahou < 0.] = 0.
solrad = (
917.0 * transcof * (
ahou * numpy.sin(rlatitude) * numpy.sin(declin) +
numpy.cos(rlatitude) *
numpy.cos(declin) * numpy.sin(ahou)))
# short wave radiation outside the atmosphere
shwave = solrad / transcof
return shwave
return _shwave
# calculate an intermediate input, latitude at each pixel center
temp_dir = tempfile.mkdtemp(dir=PROCESSING_DIR)
latitude_raster_path = os.path.join(temp_dir, 'latitude.tif')
calc_latitude(template_raster, latitude_raster_path)
pygeoprocessing.raster_calculator(
[(latitude_raster_path, 1)],
shwave(month), shwave_path,
gdal.GDT_Float32, _TARGET_NODATA)
# clean up temporary files
shutil.rmtree(temp_dir)
def _reference_evapotranspiration(
max_temp_path, min_temp_path, shwave_path, fwloss_4_path,
pevap_path):
"""Calculate reference evapotranspiration.
Reference evapotranspiration from the FAO Penman-Monteith equation in
"Guidelines for computing crop water requirements", FAO Irrigation and
drainage paper 56 (http://www.fao.org/docrep/X0490E/x0490e08.htm),
modified by the parameter fwloss(4).
Parameters:
max_temp_path (string): path to maximum monthly temperature
min_temp_path (string): path to minimum monthly temperature
shwave_path (string): path to shortwave radiation outside the
atmosphere
fwloss_4_path (string): path to parameter, scaling factor for
reference evapotranspiration
pevap_path (string): path to result, reference evapotranspiration
raster
Side effects:
modifies or creates the raster indicated by `pevap_path`
Returns:
None
"""
def _calc_pevap(max_temp, min_temp, shwave, fwloss_4):
"""Calculate reference evapotranspiration.
Pevap.f
Parameters:
max_temp (numpy.ndarray): input, maximum monthly temperature
min_temp (numpy.ndarray): input, minimum monthly temperature
shwave (numpy.ndarray): derived, shortwave radiation outside the
atmosphere
fwloss_4 (numpy.ndarray): parameter, scaling factor for reference
evapotranspiration
Returns:
pevap, reference evapotranspiration
"""
const1 = 0.0023
const2 = 17.8
langleys2watts = 54.0
valid_mask = (
(~numpy.isclose(max_temp, maxtmp_nodata)) &
(~numpy.isclose(min_temp, mintmp_nodata)) &
(shwave != _TARGET_NODATA) &
(fwloss_4 != _IC_NODATA))
trange = numpy.empty(fwloss_4.shape, dtype=numpy.float32)
trange[:] = _TARGET_NODATA
trange[valid_mask] = max_temp[valid_mask] - min_temp[valid_mask]
tmean = numpy.empty(fwloss_4.shape, dtype=numpy.float32)
tmean[:] = _IC_NODATA
tmean[valid_mask] = (max_temp[valid_mask] + min_temp[valid_mask]) / 2.0
# daily reference evapotranspiration
daypet = numpy.empty(fwloss_4.shape, dtype=numpy.float32)
daypet[:] = _TARGET_NODATA
in1 = const1 * (tmean[valid_mask] + const2)
in2 = numpy.sqrt(trange[valid_mask])
in3 = (shwave[valid_mask] / langleys2watts)
daypet[valid_mask] = (
const1 * (tmean[valid_mask] + const2) *
numpy.sqrt(trange[valid_mask]) *
(shwave[valid_mask] / langleys2watts))
# monthly reference evapotranspiration, from mm to cm,
# bounded to be at least 0.5
monpet = (daypet * 30.) / 10.
monpet[monpet <= 0.5] = 0.5
pevap = numpy.empty(fwloss_4.shape, dtype=numpy.float32)
pevap[:] = _TARGET_NODATA
pevap[valid_mask] = monpet[valid_mask] * fwloss_4[valid_mask]
return pevap
maxtmp_nodata = pygeoprocessing.get_raster_info(
max_temp_path)['nodata'][0]
mintmp_nodata = pygeoprocessing.get_raster_info(
min_temp_path)['nodata'][0]
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
max_temp_path, min_temp_path, shwave_path, fwloss_4_path]],
_calc_pevap, pevap_path, gdal.GDT_Float32, _TARGET_NODATA)
def _potential_production(
aligned_inputs, site_param_table, current_month, month_index,
pft_id_set, veg_trait_table, prev_sv_reg, pp_reg, month_reg):
"""Calculate above- and belowground potential production.
Potential production of each plant functional type is calculated
as total potential production given incoming solar radiation,
limited by temperature, soil moisture, and obstruction by biomass and
litter. Further modification of potential production according to
limitation by water and nutrient availability is calculated in the
root:shoot ratio submodel. Lines 57-148 Potcrp.f
Parameters:
aligned_inputs (dict): map of key, path pairs indicating paths
to aligned model inputs, including precipitation, temperature,
plant functional type composition, and site spatial index
site_param_table (dict): map of site spatial indices to dictionaries
containing site parameters
current_month (int): month of the year, such that current_month=1
indicates January
month_index (int): month of the simulation, such that month_index=13
indicates month 13 of the simulation
pft_id_set (set): set of integers identifying plant functional types
veg_trait_table (dict): map of pft id to dictionaries containing
plant functional type parameters
prev_sv_reg (dict): map of key, path pairs giving paths to state
variables for the previous month
pp_reg (dict): map of key, path pairs giving paths to persistent
intermediate parameters that do not change over the course of
the simulation
month_reg (dict): map of key, path pairs giving paths to intermediate
calculated values that are shared between submodels
Side effects:
creates the raster indicated by `month_reg['h2ogef_1_<PFT>']` for each
plant functional type (PFT) where growth is scheduled to occur in
this month
creates the raster indicated by `month_reg['tgprod_pot_prod_<PFT>']`
for each plant functional type (PFT) where growth is scheduled to
occur in this month
Returns:
None
"""
# if growth does not occur this month for all PFTs,
# skip the rest of the function
do_PFT = []
for pft_i in pft_id_set:
if str(current_month) in veg_trait_table[pft_i]['growth_months']:
do_PFT.append(pft_i)
if not do_PFT:
return
def calc_ctemp(aglivc, pmxbio, maxtmp, pmxtmp, mintmp, pmntmp):
"""Calculate soil temperature relative to its effect on growth.
Soil temperature is calculated from monthly temperature inputs and
modified by total standing live biomass. Lines 69-84 Potcrp.f
Parameters:
aglivc (numpy.ndarray): derived, sum of aglivc (carbon in
aboveground live biomass) across plant functional types
pmxbio (numpy.ndarray): parameter, maximum biomass impact on
temperature
maxtmp (numpy.ndarray): input, average maximum monthly
temperature
pmxtmp (numpy.ndarray): parameter, scaling factor for effect of
biomass on monthly maximum temperature
mintmp (numpy.ndarray): input, average minimum monthly temperature
pmntmp (numpy.ndarray): parameter, scaling factor for effect of
biomass on monthly minimum temperature
Returns:
ctemp, effect of soil temperature on potential production
"""
bio = numpy.empty(aglivc.shape, dtype=numpy.float32)
bio[:] = _IC_NODATA
valid_mask = (
(aglivc >= 0.) &
(pmxbio != _IC_NODATA) &
(~numpy.isclose(maxtmp, maxtmp_nodata)) &
(pmxtmp != _IC_NODATA) &
(~numpy.isclose(mintmp, mintmp_nodata)) &
(pmntmp != _IC_NODATA))
bio[valid_mask] = aglivc[valid_mask] * 2.5
bio[bio > pmxbio] = pmxbio[bio > pmxbio]
bio[pmxbio < 0] = _IC_NODATA
# Maximum temperature
tmxs = numpy.empty(aglivc.shape, dtype=numpy.float32)
tmxs[:] = _IC_NODATA
tmxs[valid_mask] = (
maxtmp[valid_mask] + (
(25.4/(1. + 18. * numpy.exp(-0.20 * maxtmp[valid_mask]))) *
(numpy.exp(pmxtmp[valid_mask] * bio[valid_mask]) - 0.13)))
# Minimum temperature
tmns = numpy.empty(aglivc.shape, dtype=numpy.float32)
tmns[:] = _IC_NODATA
tmns[valid_mask] = (
mintmp[valid_mask] +
(pmntmp[valid_mask] * bio[valid_mask] - 1.78))
# Average temperature
ctemp = numpy.empty(aglivc.shape, dtype=numpy.float32)
ctemp[:] = _IC_NODATA
ctemp[valid_mask] = (tmxs[valid_mask] + tmns[valid_mask])/2.
return ctemp
def calc_potprd(mintmp, maxtmp, ctemp, ppdf_1, ppdf_2, ppdf_3, ppdf_4):
"""Calculate the limiting effect of temperature on growth.
Estimated soil temperature restricts potential production according to
a Poisson Density Function curve described by the plant functional
type-specific parameters ppdf_1-4.. Lines 73-84 Potcrp.f
Parameters:
mintmp (numpy.ndarray): input, average minimum monthly temperature
maxtmp (numpy.ndarray): input, average maximum monthly
temperature
ctemp (numpy.ndarray): derived, soil temperature as calculated from
monthly temperature and modified by standing live biomass
ppdf_1 (numpy.ndarray): parameter, optimum temperature for growth
ppdf_2 (numpy.ndarray): parameter, maximum temperature for growth
ppdf_3 (numpy.ndarray): parameter, left curve shape for Poisson
Density Function curve describing growth as function of
temperature
ppdf_4 (numpy.ndarray): parameter, right curve shape for Poisson
Density Function curve describing growth as function of
temperature
Returns:
potprd, scaling factor describing potential production limited
by temperature
"""
valid_mask = (
(~numpy.isclose(mintmp, mintmp_nodata)) &
(~numpy.isclose(maxtmp, maxtmp_nodata)) &
(ctemp != _IC_NODATA) &
(ppdf_1 != _IC_NODATA) &
(ppdf_2 != _IC_NODATA) &
(ppdf_3 != _IC_NODATA) &
(ppdf_4 != _IC_NODATA))
frac = numpy.empty(ctemp.shape, dtype=numpy.float32)
frac[:] = _TARGET_NODATA
frac[valid_mask] = (
(ppdf_2[valid_mask] - ctemp[valid_mask]) /
(ppdf_2[valid_mask] - ppdf_1[valid_mask]))
avg_tmp = numpy.empty(ctemp.shape, dtype=numpy.float32)
avg_tmp[valid_mask] = (mintmp[valid_mask] + maxtmp[valid_mask]) / 2.
grow_mask = (
(avg_tmp > 0) &
(frac > 0) &
valid_mask)
potprd = numpy.empty(ctemp.shape, dtype=numpy.float32)
potprd[:] = _TARGET_NODATA
potprd[valid_mask] = 0.
potprd[grow_mask] = (numpy.exp(
(ppdf_3[grow_mask]/ppdf_4[grow_mask]) *
(1. - numpy.power(frac[grow_mask], ppdf_4[grow_mask]))) *
numpy.power(frac[grow_mask], ppdf_3[grow_mask]))
return potprd
def calc_h2ogef_1(
pevap, avh2o_1, precip, wc, pprpts_1, pprpts_2, pprpts_3):
"""Calculate the limiting factor of water availability on growth.
Soil moisture restricts potential production according to the ratio
of available water to reference evapotranspiration. The shape of the
linear relationship of this ratio to potential production is
controlled by the site parameters pprpts_1, pprpts_2, and pprpts_3.
Lines 57-64 Potcrp.f
Parameters:
pevap (numpy.ndarray): derived, reference evapotranspiration
avh2o_1 (numpy.ndarray): state variable, water available to this
plant functional type for growth
precip (numpy.ndarray): input, precipitation for the current month
wc (numpy.ndarray): derived, water content in soil layer 1
pprpts_1 (numpy.ndarray): parameter, the minimum ratio of
available water to reference evapotranspiration that limits
production completely
pprpts_2 (numpy.ndarray): parameter, influences the slope of the
line predicting potential production from available water
pprpts_3 (numpy.ndarray): parameter, the ratio of available water
to reference evapotranspiration above which production is
not restricted
Returns:
h2ogef_1, scaling factor describing potential production limited
by soil moisture
"""
valid_mask = (
(pevap != _TARGET_NODATA) &
(~numpy.isclose(avh2o_1, _SV_NODATA)) &
(~numpy.isclose(precip, precip_nodata)) &
(wc != _TARGET_NODATA) &
(pprpts_1 != _IC_NODATA) &
(pprpts_2 != _IC_NODATA) &
(pprpts_3 != _IC_NODATA))
h2ogef_prior = numpy.empty(pevap.shape, dtype=numpy.float32)
h2ogef_prior[:] = _TARGET_NODATA
h2ogef_prior[valid_mask] = numpy.where(
pevap[valid_mask] >= 0.01,
(avh2o_1[valid_mask] + precip[valid_mask])/pevap[valid_mask],
0.01)
intcpt = (
pprpts_1[valid_mask] + (pprpts_2[valid_mask] * wc[valid_mask]))
slope = 1. / (pprpts_3[valid_mask] - intcpt)
h2ogef_1 = numpy.empty(pevap.shape, dtype=numpy.float32)
h2ogef_1[:] = _TARGET_NODATA
h2ogef_1[valid_mask] = (
1.0 + slope *
(h2ogef_prior[valid_mask] - pprpts_3[valid_mask]))
h2ogef_1[valid_mask] = numpy.clip(h2ogef_1[valid_mask], 0.01, 1.)
return h2ogef_1
def calc_biof(sum_stdedc, sum_aglivc, strucc_1, pmxbio, biok5):
"""Calculate the effect of obstruction on growth.
Live biomass, standing dead biomass, and litter reduce potential
production through obstruction. The shape of the relationship between
standing biomass and litter and potential production is controlled by
the site parameter pmxbio and the plant functional type parameter
biok5. Lines 91-120 Potcrp.f
Parameters:
sum_stdedc (numpy.ndarray): derived, total carbon in standing dead
biomass across plant functional types
sum_aglivc (numpy.ndarray): derived, total carbon in aboveground
live biomass across plant functional types
strucc_1 (numpy.ndarray): derived, carbon in surface litter
pmxbio (numpy.ndarray): parameter, maximum biomass impact on
potential production
biok5 (numpy.ndarray): parameter, level of standing dead biomass
and litter
Returns:
biof, scaling factor describing potential production limited
by obstruction
"""
valid_mask = (
(~numpy.isclose(strucc_1, _SV_NODATA)) &
(pmxbio != _IC_NODATA) &
(biok5 != _IC_NODATA))
bioc = numpy.empty(sum_stdedc.shape, dtype=numpy.float32)
bioc[:] = _IC_NODATA
bioc[valid_mask] = numpy.where(
((sum_stdedc[valid_mask] + 0.1*strucc_1[valid_mask]) <= 0.), 0.01,
(sum_stdedc[valid_mask] + 0.1*strucc_1[valid_mask]))
bioc[valid_mask] = numpy.where(
(bioc[valid_mask] > pmxbio[valid_mask]), pmxbio[valid_mask],
bioc[valid_mask])
bioprd = numpy.empty(sum_stdedc.shape, dtype=numpy.float32)
bioprd[:] = _IC_NODATA
bioprd[valid_mask] = 1. - (
bioc[valid_mask] / (biok5[valid_mask] + bioc[valid_mask]))
temp1 = 1. - bioprd
temp2 = temp1 * 0.75
temp3 = temp1 * 0.25
ratlc = numpy.empty(sum_stdedc.shape, dtype=numpy.float32)
ratlc[:] = _IC_NODATA
ratlc[valid_mask] = sum_aglivc[valid_mask] / bioc[valid_mask]
biof = numpy.empty(sum_stdedc.shape, dtype=numpy.float32)
biof[:] = _TARGET_NODATA
biof[valid_mask] = numpy.where(
ratlc[valid_mask] <= 1.,
(bioprd[valid_mask] + (temp2[valid_mask] * ratlc[valid_mask])),
numpy.where(
ratlc[valid_mask] <= 2.,
(bioprd[valid_mask] + temp2[valid_mask]) +
temp3[valid_mask] * (ratlc[valid_mask] - 1.),
1.))
return biof
def calc_tgprod_pot_prod(prdx_1, shwave, potprd, h2ogef_1, biof):
"""Calculate total potential production.
Total above- and belowground potential biomass production is calculated
as the total potential production given solar radiation and the
intrinsinc growth capacity of the plant functional type, modified by
limiting factors of temperature, soil moisture, and obstruction by
standing biomass and litter. Line 147 Potcrp.f
Parameters:
prdx_1 (numpy.ndarray): parameter, the intrinsic capacity of the
plant functional type for growth per unit of solar radiation
shwave (numpy.ndarray): derived, shortwave solar radiation outside
the atmosphere
potprd (numpy.ndarray): parameter, scaling factor describing
limiting effect of temperature
h2ogef_1 (numpy.ndarray): derived, scaling factor describing the
limiting effect of soil moisture
biof (numpy.ndarray): derived, scaling factor describing the
limiting effect of obstruction by standing biomass and litter
Returns:
tgprod_pot_prod, total above- and belowground potential biomass
production (g biomass)
"""
valid_mask = (
(prdx_1 != _IC_NODATA) &
(shwave != _TARGET_NODATA) &
(potprd != _TARGET_NODATA) &
(h2ogef_1 != _TARGET_NODATA) &
(biof != _TARGET_NODATA))
tgprod_pot_prod = numpy.empty(prdx_1.shape, dtype=numpy.float32)
tgprod_pot_prod[:] = _TARGET_NODATA
tgprod_pot_prod[valid_mask] = (
prdx_1[valid_mask] * shwave[valid_mask] * potprd[valid_mask] *
h2ogef_1[valid_mask] * biof[valid_mask])
return tgprod_pot_prod
# temporary intermediate rasters for calculating total potential production
temp_dir = tempfile.mkdtemp(dir=PROCESSING_DIR)
temp_val_dict = {}
# site-level temporary calculated values
for val in ['sum_aglivc', 'sum_stdedc', 'ctemp', 'shwave', 'pevap']:
temp_val_dict[val] = os.path.join(temp_dir, '{}.tif'.format(val))
# PFT-level temporary calculated values
for pft_i in pft_id_set:
for val in [
'aglivc_weighted', 'stdedc_weighted', 'potprd', 'biof']:
temp_val_dict['{}_{}'.format(val, pft_i)] = os.path.join(
temp_dir, '{}_{}.tif'.format(val, pft_i))
# temporary parameter rasters for calculating total potential production
param_val_dict = {}
# site-level parameters
for val in [
'pmxbio', 'pmxtmp', 'pmntmp', 'fwloss_4', 'pprpts_1',
'pprpts_2', 'pprpts_3']:
target_path = os.path.join(temp_dir, '{}.tif'.format(val))
param_val_dict[val] = target_path
site_to_val = dict(
[(site_code, float(table[val])) for
(site_code, table) in site_param_table.items()])
pygeoprocessing.reclassify_raster(
(aligned_inputs['site_index'], 1), site_to_val, target_path,
gdal.GDT_Float32, _IC_NODATA)
# PFT-level parameters
for val in [
'ppdf_1', 'ppdf_2', 'ppdf_3', 'ppdf_4', 'biok5', 'prdx_1']:
for pft_i in do_PFT:
target_path = os.path.join(
temp_dir, '{}_{}.tif'.format(val, pft_i))
param_val_dict['{}_{}'.format(val, pft_i)] = target_path
fill_val = veg_trait_table[pft_i][val]
pygeoprocessing.new_raster_from_base(
aligned_inputs['site_index'], target_path, gdal.GDT_Float32,
[_IC_NODATA], fill_value_list=[fill_val])
maxtmp_nodata = pygeoprocessing.get_raster_info(
aligned_inputs['max_temp_{}'.format(current_month)])['nodata'][0]
mintmp_nodata = pygeoprocessing.get_raster_info(
aligned_inputs['min_temp_{}'.format(current_month)])['nodata'][0]
precip_nodata = pygeoprocessing.get_raster_info(
aligned_inputs['precip_{}'.format(month_index)])['nodata'][0]
# calculate intermediate quantities that do not differ between PFTs:
# sum of aglivc (standing live biomass) and stdedc (standing dead biomass)
# across PFTs, weighted by % cover of each PFT
for sv in ['aglivc', 'stdedc']:
weighted_sum_path = temp_val_dict['sum_{}'.format(sv)]
weighted_state_variable_sum(
sv, prev_sv_reg, aligned_inputs, pft_id_set, weighted_sum_path)
# ctemp, soil temperature relative to impacts on growth
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
temp_val_dict['sum_aglivc'],
param_val_dict['pmxbio'],
aligned_inputs['max_temp_{}'.format(current_month)],
param_val_dict['pmxtmp'],
aligned_inputs['min_temp_{}'.format(current_month)],
param_val_dict['pmntmp']]],
calc_ctemp, temp_val_dict['ctemp'], gdal.GDT_Float32, _IC_NODATA)
# shwave, shortwave radiation outside the atmosphere
_shortwave_radiation(
aligned_inputs['site_index'], current_month, temp_val_dict['shwave'])
# pet, reference evapotranspiration modified by fwloss parameter
_reference_evapotranspiration(
aligned_inputs['max_temp_{}'.format(current_month)],
aligned_inputs['min_temp_{}'.format(current_month)],
temp_val_dict['shwave'],
param_val_dict['fwloss_4'],
temp_val_dict['pevap'])
# calculate quantities that differ between PFTs
for pft_i in do_PFT:
# potprd, the limiting effect of temperature
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
aligned_inputs['min_temp_{}'.format(current_month)],
aligned_inputs['max_temp_{}'.format(current_month)],
temp_val_dict['ctemp'],
param_val_dict['ppdf_1_{}'.format(pft_i)],
param_val_dict['ppdf_2_{}'.format(pft_i)],
param_val_dict['ppdf_3_{}'.format(pft_i)],
param_val_dict['ppdf_4_{}'.format(pft_i)]]],
calc_potprd, temp_val_dict['potprd_{}'.format(pft_i)],
gdal.GDT_Float32, _TARGET_NODATA)
# h2ogef_1, the limiting effect of soil water availability
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
temp_val_dict['pevap'],
prev_sv_reg['avh2o_1_{}_path'.format(pft_i)],
aligned_inputs['precip_{}'.format(month_index)],
pp_reg['wc_path'],
param_val_dict['pprpts_1'],
param_val_dict['pprpts_2'],
param_val_dict['pprpts_3']]],
calc_h2ogef_1, month_reg['h2ogef_1_{}'.format(pft_i)],
gdal.GDT_Float32, _TARGET_NODATA)
# biof, the limiting effect of obstruction
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
temp_val_dict['sum_stdedc'],
temp_val_dict['sum_aglivc'],
prev_sv_reg['strucc_1_path'],
param_val_dict['pmxbio'],
param_val_dict['biok5_{}'.format(pft_i)]]],
calc_biof, temp_val_dict['biof_{}'.format(pft_i)],
gdal.GDT_Float32, _TARGET_NODATA)
# total potential production
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
param_val_dict['prdx_1_{}'.format(pft_i)],
temp_val_dict['shwave'],
temp_val_dict['potprd_{}'.format(pft_i)],
month_reg['h2ogef_1_{}'.format(pft_i)],
temp_val_dict['biof_{}'.format(pft_i)]]],
calc_tgprod_pot_prod,
month_reg['tgprod_pot_prod_{}'.format(pft_i)],
gdal.GDT_Float32, _TARGET_NODATA)
# clean up temporary files
shutil.rmtree(temp_dir)
def _calc_favail_P(sv_reg, param_val_dict):
"""Calculate the fraction of P in surface layer available to plants.
This must be performed after the sum of mineral N in the surface layer
is calculated because the fraction of labile P available to plants is
impacted by the amount of mineral N in the surface layer.
Parameters:
sv_reg (dict): map of key, path pairs giving paths to state variables
for the current month, including minerl_1_1, mineral N in the
surface layer
param_val_dict (dict): map of key, path pairs giving paths to
site-level parameters, including favail_4, favail_5, favail_6,
and favail_2
Side effects:
modifies or creates the raster indicated by
`param_val_dict['favail_2']`
Returns:
None
"""
def favail_P_op(minerl_1_1, favail_4, favail_5, favail_6):
"""Calculate the fraction of P in surface layer available to plants.
The fraction of labile P available to plants depends on mineral N in
the surface layer and site parameters favail_4, favail_5, favail_6.
Line 395 Simsom.f
Parameters:
minerl_1_1 (numpy.ndarray): state variable, mineral N in the
surface layer
favail_4 (numpy.ndarray): parameter, minimum fraction of P
available
favail_5 (numpy.ndarray): parameter, maximum fraction of P
available
favail_6 (numpy.ndarray): parameter, mineral N in surface layer
required to attain maximum fraction of P available
Returns:
favail_P, fraction of mineral P available to plants
"""
valid_mask = (
(~numpy.isclose(minerl_1_1, _SV_NODATA)) &
(favail_4 != _IC_NODATA) &
(favail_5 != _IC_NODATA) &
(favail_6 != _IC_NODATA))
interim = numpy.empty(minerl_1_1.shape, dtype=numpy.float32)
interim[:] = _IC_NODATA
interim[valid_mask] = (
favail_4[valid_mask] + minerl_1_1[valid_mask] *
(favail_5[valid_mask] - favail_4[valid_mask]) /
favail_6[valid_mask])
favail_P = numpy.empty(minerl_1_1.shape, dtype=numpy.float32)
favail_P[:] = _IC_NODATA
favail_P[valid_mask] = numpy.maximum(
favail_4[valid_mask], numpy.minimum(
interim[valid_mask], favail_5[valid_mask]))
return favail_P
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
sv_reg['minerl_1_1_path'],
param_val_dict['favail_4'],
param_val_dict['favail_5'],
param_val_dict['favail_6']]],
favail_P_op, param_val_dict['favail_2'],
gdal.GDT_Float32, _IC_NODATA)
def _calc_avail_mineral_nutrient(pft_param_dict, sv_reg, iel, target_path):
"""Calculate one mineral nutrient available to one plant functional type.
The mineral nutrient available to a plant functional type is calculated
from the mineral nutrient content of soil layers accessible by that
plant function type.
Parameters:
pft_param_dict (dict): map of key, value pairs giving the values of
parameters for this plant functional type
(i.e., veg_trait_table[pft_i] for this pft_i)
sv_reg (dict): map of key, path pairs giving paths to state
variables for the current month
iel (int): integer index for current nutrient (1=N, 2=P)
target_path (string): path to raster to contain available mineral
nutrient for this plant functional type and nutrient
Side effects:
modifies or creates the raster indicated by `target_path`
Returns:
None
"""
nlay = int(pft_param_dict['nlaypg'])
mineral_raster_list = [
sv_reg['minerl_{}_{}_path'.format(lyr, iel)] for lyr in range(
1, nlay + 1)]
raster_list_sum(
mineral_raster_list, _SV_NODATA, target_path, _TARGET_NODATA,
nodata_remove=True)
def _calc_available_nutrient(
pft_i, iel, pft_param_dict, sv_reg, site_param_table, site_index_path,
availm_path, favail_path, tgprod_path, eavail_path):
"""Calculate nutrient available to a plant functional type.
The nutrient available is the sum of mineral nutrient (N or P) in soil
layers accessible by the roots of the plant functional type, modified
by the fraction of nutrient available to plants and the current root
biomass.
Parameters:
pft_i (int): plant functional type index
iel (int): nutrient index (iel=1 indicates N, iel=2 indicates P)
pft_param_dict (dict): map of key, value pairs giving the values of
parameters for this plant functional type
(i.e., veg_trait_table[pft_i] for this pft_i)
sv_reg (dict): map of key, path pairs giving paths to state
variables for the current month
site_index_path (string): path to site spatial index raster
availm_path (string): path to raster containing available mineral
nutrient for the given plant functional type and nutrient
site_param_table (dict): map of site spatial index to dictionaries
that contain site-level parameters
favail_path (string): path to raster containing the appropriate value
of the parameter favail. For nitrogen, this parameter is supplied
directly as user input, but for phosphorus, it must be calculated
from other parameters.
tgprod_path (string): path to raster containing total potential
production (g biomass)
eavail_path (string): path to location to store the result, nutrient
available to the plant functional type
Side effects:
modifies or creates the raster indicated by `eavail_path`
Returns:
None
"""
def calc_eavail(rictrl, bglivc, riint, availm, favail, crpstg):
"""Calculate available nutrient.
Parameters:
rictrl (numpy.ndarray): parameter, scaling factor used to
calculate the impact of root biomass on nutrient availability
bglivc (numpy.ndarray): state variable, carbon in belowground
live biomass
riint (numpy.ndarray): parameter, intercept used to calculate the
impact of root biomass on nutrient availability
availm (numpy.ndarray): derived, the sum of mineral nutrient in
soil layers accessible by this plant functional type
favail (numpy.ndarray): parameter, fraction of the nutrient
available each month to plants
crpstg (numpy.ndarray): state variable, nutrient in
retranslocation storage pool for the plant functional type
Returns:
eavail, the nutrient available to the plant functional type
"""
valid_mask = (
(rictrl != _IC_NODATA) &
(~numpy.isclose(bglivc, _SV_NODATA)) &
(riint != _IC_NODATA) &
(availm != _TARGET_NODATA) &
(favail != _IC_NODATA) &
(~numpy.isclose(crpstg, _SV_NODATA)))
rimpct = numpy.empty(rictrl.shape, dtype=numpy.float32)
rimpct[:] = _TARGET_NODATA
rimpct[valid_mask] = numpy.where(
((rictrl[valid_mask] * bglivc[valid_mask] * 2.5) > 33.),
1., 1. - riint[valid_mask] * numpy.exp(
-rictrl[valid_mask] * bglivc[valid_mask] * 2.5))
eavail = numpy.empty(rictrl.shape, dtype=numpy.float32)
eavail[:] = _TARGET_NODATA
eavail[valid_mask] = (
(availm[valid_mask] * favail[valid_mask] * rimpct[valid_mask]) +
crpstg[valid_mask])
return eavail
def add_symbiotic_fixed_N(eavail_prior, snfxmx, tgprod):
"""Add nitrogen fixed by the plant to nutrient available.
Some nitrogen may be fixed by the plant, and this must be added
to available mineral nitrogen. Nitrogen fixed by the plant is
calculated from total potential production and the maximum
rate of N fixation.
Parameters:
eavail_prior (numpy.ndarray): derived, mineral nitrogen available
to the plant functional type, calculated with calc_eavail()
snfxmx (numpy.ndarray): parameter, maximum rate of symbiotic
nitrogen fixation
tgprod (numpy.ndarray): derived, total above- and belowground
potential production (g biomass)
Returns:
eavail, total N available including N fixed by the plant
"""
valid_mask = (
(eavail_prior != _TARGET_NODATA) &
(snfxmx != _IC_NODATA) &
(tgprod != _TARGET_NODATA))
maxNfix = numpy.empty(eavail_prior.shape, dtype=numpy.float32)
maxNfix[:] = _TARGET_NODATA
maxNfix[valid_mask] = snfxmx[valid_mask] * (tgprod[valid_mask] / 2.5)
eavail = numpy.empty(eavail_prior.shape, dtype=numpy.float32)
eavail[:] = _TARGET_NODATA
eavail[valid_mask] = eavail_prior[valid_mask] + maxNfix[valid_mask]
return eavail
# temporary intermediate rasters for calculating available nutrient
temp_dir = tempfile.mkdtemp(dir=PROCESSING_DIR)
param_val_dict = {}
for val in ['rictrl', 'riint']:
target_path = os.path.join(temp_dir, '{}.tif'.format(val))
param_val_dict[val] = target_path
site_to_val = dict(
[(site_code, float(table[val])) for
(site_code, table) in site_param_table.items()])
pygeoprocessing.reclassify_raster(
(site_index_path, 1), site_to_val, target_path,
gdal.GDT_Float32, _IC_NODATA)
for val in ['snfxmx_1']:
target_path = os.path.join(temp_dir, '{}.tif'.format(val))
param_val_dict[val] = target_path
fill_val = pft_param_dict[val]
pygeoprocessing.new_raster_from_base(
site_index_path, target_path, gdal.GDT_Float32,
[_IC_NODATA], fill_value_list=[fill_val])
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
param_val_dict['rictrl'],
sv_reg['bglivc_{}_path'.format(pft_i)],
param_val_dict['riint'],
availm_path, favail_path,
sv_reg['crpstg_{}_{}_path'.format(iel, pft_i)]]],
calc_eavail, eavail_path,
gdal.GDT_Float32, _TARGET_NODATA)
if iel == 1:
eavail_prior_path = os.path.join(temp_dir, 'eavail_prior.tif')
shutil.copyfile(eavail_path, eavail_prior_path)
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
eavail_prior_path,
param_val_dict['snfxmx_1'],
tgprod_path]],
add_symbiotic_fixed_N, eavail_path,
gdal.GDT_Float32, _TARGET_NODATA)
# clean up temporary files
shutil.rmtree(temp_dir)
def _calc_nutrient_demand(
biomass_production_path, fraction_allocated_to_roots_path,
cercrp_min_above_path, cercrp_min_below_path, demand_path):
"""Calculate the demand of one nutrient by a plant functional type.
Demand is calculated from total biomass production, the fraction of biomass
production allocated to roots, and the minimum carbon/nutrient ratios of
above- and belowground live biomass. Lines 88-92 CropDynC.f and line
65, Nutrlm.f
Parameters:
biomass_production_path (string): path to raster giving total
biomass production
fraction_allocated_to_roots_path (string): path to raster giving
the fraction fo total biomass production allocated to roots
cercrp_min_above_path (string): path to raster giving the minimum
ratio of carbon to nutrient in aboveground live biomass
cercrp_min_below_path (string): path to raster giving the minimum
ratio of carbon to nutrient in belowground live biomass
Side effects:
modifies or creates the raster indicated by `demand_path`
Returns:
None
"""
def nutrient_demand_op(
biomass_production, root_fraction, cercrp_min_above,
cercrp_min_below):
"""Calculate nutrient demand.
Parameters:
biomass_production (numpy.ndarray): derived, total biomass
production
root_fraction (numpy.ndarray): derived, fraction of biomass
allocated to roots
cercrp_min_above (numpy.ndarray): derived, minimum carbon to
nutrient ratio of new aboveground live material
cercrp_min_below (numpy.ndarray): derived, minimum carbon to
nutrient ratio of new belowground live material
Returns:
demand_e, nutrient demand
"""
valid_mask = (
(biomass_production != _TARGET_NODATA) &
(root_fraction != _TARGET_NODATA) &
(cercrp_min_above != _TARGET_NODATA) &
(cercrp_min_above > 0) &
(cercrp_min_below > 0) &
(cercrp_min_below != _TARGET_NODATA))
demand_above = numpy.empty(root_fraction.shape, dtype=numpy.float32)
demand_above[:] = _TARGET_NODATA
demand_above[valid_mask] = (
((biomass_production[valid_mask] *
(1. - root_fraction[valid_mask])) / 2.5) *
(1. / cercrp_min_above[valid_mask]))
demand_below = numpy.empty(root_fraction.shape, dtype=numpy.float32)
demand_below[:] = _TARGET_NODATA
demand_below[valid_mask] = (
((biomass_production[valid_mask] *
(root_fraction[valid_mask])) / 2.5) *
(1. / cercrp_min_below[valid_mask]))
demand_e = numpy.empty(root_fraction.shape, dtype=numpy.float32)
demand_e[:] = _TARGET_NODATA
demand_e[valid_mask] = (
demand_above[valid_mask] + demand_below[valid_mask])
return demand_e
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
biomass_production_path, fraction_allocated_to_roots_path,
cercrp_min_above_path, cercrp_min_below_path]],
nutrient_demand_op, demand_path,
gdal.GDT_Float32, _TARGET_NODATA)
def calc_provisional_fracrc(
annual_precip, frtcindx, bgppa, bgppb, agppa, agppb,
cfrtcw_1, cfrtcw_2, cfrtcn_1, cfrtcn_2):
"""Calculate provisional fraction of carbon allocated to roots.
A temporary provisional fraction of carbon allocated to roots must be
calculated prior to calculating plant demand for N and P. The value
of this provisional fraction depends on whether the plant functional
type is modeled as a perennial plant or with the "Great Plains"
equation of Parton et al. 1987, "Analysis of factors controlling soil
organic matter levels in Great Plains grasslands", Soil Science
Society of America Journal. Lines 36-47 cropDynC.f
Parameters:
annual_precip (numpy.ndarray): derived, sum of monthly
precipitation over twelve months including the current month
frtcindx (numpy.ndarray): parameter, flag indicating whether
root:shoot allocation follows the Great Plains equation
(frtcindx=0) or as a perennial plant (frtcindx=1)
bgppa (numpy.ndarray): parameter, intercept in regression
estimating belowground production from annual precipitation
if frtcindx=0
bgppb (numpy.ndarray): parameter, slope in regression estimating
belowground production from annual precipitation if
frtcindx=0
agppa (numpy.ndarray): parameter, intercept in regression
estimating aboveground production from annual precipitation
if frtcindx=0
agppb (numpy.ndarray): parameter, slope in regression estimating
aboveground production from annual precipitation if
frtcindx=0
cfrtcw_1 (numpy.ndarray): parameter, maximum fraction of carbon
allocated to roots under maximum water stress if frtcindx=1
cfrtcw_2 (numpy.ndarray): parameter, minimum fraction of carbon
allocated to roots without water stress if frtcindx=1
cfrtcn_1 (numpy.ndarray): parameter, maximum fraction of carbon
allocated to roots under maximum nutrient stress if frtcindx=1
cfrtcn_2 (numpy.ndarray): parameter, minimum fraction of carbon
allocated to roots under no nutrient stress if frtcindx=1
Returns:
fracrc_p, provisional fraction of carbon allocated to roots
"""
valid_mask = (
(annual_precip != _TARGET_NODATA) &
(frtcindx != _IC_NODATA) &
(bgppa != _IC_NODATA))
rtsh = numpy.empty(annual_precip.shape, dtype=numpy.float32)
rtsh[:] = _TARGET_NODATA
rtsh[valid_mask] = (
(bgppa[valid_mask] +
annual_precip[valid_mask] * bgppb[valid_mask]) /
(agppa[valid_mask] + annual_precip[valid_mask] *
agppb[valid_mask]))
fracrc_p = numpy.empty(annual_precip.shape, dtype=numpy.float32)
fracrc_p[:] = _TARGET_NODATA
fracrc_p[valid_mask] = numpy.where(
frtcindx[valid_mask] == 0,
(1.0 / (1.0 / rtsh[valid_mask] + 1.0)),
((cfrtcw_1[valid_mask] + cfrtcw_2[valid_mask] +
cfrtcn_1[valid_mask] + cfrtcn_2[valid_mask]) / 4.0))
return fracrc_p
def calc_ce_ratios(
pramn_1_path, pramn_2_path, aglivc_path, biomax_path,
pramx_1_path, pramx_2_path, prbmn_1_path, prbmn_2_path,
prbmx_1_path, prbmx_2_path, annual_precip_path, pft_i, iel,
month_reg):
"""Calculate minimum and maximum carbon to nutrient ratios.
Minimum and maximum C/E ratios are used to calculate demand for a
nutrient by a plant functional type. This function calculates the
ratios for above- and belowground plant portions, for one plant
functional type and one nutrient. Fltce.f
Parameters:
pramn_1_path (string): path to raster containing the parameter
pramn_<iel>_1, the minimum aboveground ratio with zero biomass
pramn_2_path (string): path to raster containing the parameter
pramn_<iel>_2, the minimum aboveground ratio with biomass greater
than or equal to biomax
aglivc_path (string): path to raster containing carbon in
aboveground live biomass
biomax_path (string): path to raster containing the parameter
biomax, the biomass above which the ratio equals pramn_2
or pramx_2
pramx_1_path (string): path to raster containing the parameter
pramx_<iel>_1, the maximum aboveground ratio with zero biomass
pramx_2_path (string): path to raster containing the parameter
pramx_<iel>_2, the maximum aboveground ratio with biomass greater
than or equal to biomax
prbmn_1_path (string): path to raster containing the parameter
prbmn_<iel>_1, intercept of regression to predict minimum
belowground ratio from annual precipitation
prbmn_2_path (string): path to raster containing the parameter
prbmn_<iel>_2, slope of regression to predict minimum belowground
ratio from annual precipitation
prbmx_1_path (string): path to raster containing the parameter
prbmx_<iel>_1, intercept of regression to predict maximum
belowground ratio from annual precipitation
prbmx_2_path (string): path to raster containing the parameter
prbmx_<iel>_2, slope of regression to predict maximum belowground
ratio from annual precipitation
annual_precip_path (string): path to annual precipitation raster
pft_i (int): plant functional type index
iel (int): nutrient index (iel=1 indicates N, iel=2 indicates P)
month_reg (dict): map of key, path pairs giving paths to
intermediate calculated values that are shared between
submodels
Side effects:
creates the rasters indicated by
`month_reg['cercrp_min_above_<iel>_<pft_i>']`,
`month_reg['cercrp_max_above_<iel>_<pft_i>']`,
`month_reg['cercrp_min_below_<iel>_<pft_i>']`,
`month_reg['cercrp_max_below_<iel>_<pft_i>']`,
Returns:
None
"""
def calc_above_ratio(pra_1, pra_2, aglivc, biomax):
"""Calculate carbon to nutrient ratio for aboveground material.
Parameters:
pra_1 (numpy.ndarray): parameter, minimum or maximum ratio
with zero biomass
pra_2 (numpy.ndarray): parameter, minimum or maximum ratio
with biomass greater than or equal to biomax
aglivc (numpy.ndarray): state variable, carbon in aboveground
live material
biomax (numpy:ndarray): parameter, biomass above which the
ratio equals pra_2
Returns:
cercrp_above, carbon to nutrient ratio for aboveground
material
"""
valid_mask = (
(pra_1 != _IC_NODATA) &
(pra_2 != _IC_NODATA) &
(~numpy.isclose(aglivc, _SV_NODATA)) &
(biomax != _IC_NODATA))
cercrp_above = numpy.empty(pra_1.shape, dtype=numpy.float32)
cercrp_above[:] = _TARGET_NODATA
cercrp_above[valid_mask] = numpy.minimum(
(pra_1[valid_mask] + (pra_2[valid_mask] - pra_1[valid_mask]) *
2.5 * aglivc[valid_mask] / biomax[valid_mask]),
pra_2[valid_mask])
return cercrp_above
def calc_below_ratio(prb_1, prb_2, annual_precip):
"""Calculate carbon to nutrient ratio for belowground material.
Parameters:
prb_1 (numpy.ndarray): parameter, intercept of regression
to predict ratio from annual precipitation
prb_2 (numpy.ndarray): parameter, slope of regression to
predict ratio from annual precipitation
annual_precip (numpy.ndarray): derived, precipitation in twelve
months including the current month
Returns:
cercrp_below, carbon to nutrient ratio for belowground
material
"""
valid_mask = (
(prb_1 != _IC_NODATA) &
(prb_2 != _IC_NODATA) &
(annual_precip != _TARGET_NODATA))
cercrp_below = numpy.empty(prb_1.shape, dtype=numpy.float32)
cercrp_below[:] = _TARGET_NODATA
cercrp_below[valid_mask] = (
prb_1[valid_mask] +
(prb_2[valid_mask] * annual_precip[valid_mask]))
return cercrp_below
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
pramn_1_path, pramn_2_path, aglivc_path, biomax_path]],
calc_above_ratio,
month_reg['cercrp_min_above_{}_{}'.format(iel, pft_i)],
gdal.GDT_Float32, _TARGET_NODATA)
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
pramx_1_path, pramx_2_path, aglivc_path, biomax_path]],
calc_above_ratio,
month_reg['cercrp_max_above_{}_{}'.format(iel, pft_i)],
gdal.GDT_Float32, _TARGET_NODATA)
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
prbmn_1_path, prbmn_2_path, annual_precip_path]],
calc_below_ratio,
month_reg['cercrp_min_below_{}_{}'.format(iel, pft_i)],
gdal.GDT_Float32, _TARGET_NODATA)
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
prbmx_1_path, prbmx_2_path, annual_precip_path]],
calc_below_ratio,
month_reg['cercrp_max_below_{}_{}'.format(iel, pft_i)],
gdal.GDT_Float32, _TARGET_NODATA)
def calc_revised_fracrc(
frtcindx_path, fracrc_p_path, totale_1_path, totale_2_path,
demand_1_path, demand_2_path, h2ogef_1_path, cfrtcw_1_path,
cfrtcw_2_path, cfrtcn_1_path, cfrtcn_2_path, fracrc_r_path):
"""
Calculate revised fraction of carbon allocated to roots.
The revised fraction of carbon allocated to roots includes the
impacts of water and nutrient limitation. The method of the
revised calculation depends on whether the plant functional
type is modeled as a perennial plant or with the "Great Plains"
equation of Parton et al. 1987, "Analysis of factors controlling soil
organic matter levels in Great Plains grasslands", Soil Science
Society of America Journal. Lines 96-104, cropDynC.f, froota.f
Parameters:
frtcindx_path (string): path to raster containing the parameter
frtcindx
fracrc_p_path (string): path to raster containing provisional
fraction of carbon allocated to roots
totale_1_path (string): path to raster containing total available
nitrogen
totale_2_path (string): path to raster containing total available
phosphorus
demand_1_path (string): path to raster containing nitrogen demand
demand_2_path (string): path to raster containing phosphorus demand
h2ogef_1_path (string): path to raster containing the limiting
effect of water availability on growth
cfrtcw_1_path (string): path to raster containing the parameter
cfrtcw_1
cfrtcw_2_path (string): path to raster containing the parameter
cfrtcw_2
cfrtcn_1_path (string): path to raster containing the parameter
cfrtcn_1
cfrtcn_2_path (string): path to raster containing the parameter
cfrtcn_2
fracrc_r_path (string): path to raster that should contain the
result, revised fraction of carbon allocated to roots
Side effects:
creates the raster indicated by `fracrc_r_path`
Returns:
None
"""
def calc_a2drat(totale, demand):
"""Calculate the ratio of available nutrient to nutrient demand.
The ratio of nutrient available to demand for the nutrient is
restricted to be between 0 and 1.
Parameters:
totale (numpy.ndarray): derived, nutrient available
demand (numpy.ndarray): derived, demand for the nutrient
Returns:
a2drat, the ratio of available nutrient to demand, restricted
to be between 0 and 1
"""
valid_mask = (
(totale != _TARGET_NODATA) &
(demand != _TARGET_NODATA))
a2drat = numpy.empty(totale.shape, dtype=numpy.float32)
a2drat[:] = _TARGET_NODATA
demand_mask = ((demand > 0) & valid_mask)
a2drat[valid_mask] = 1.
a2drat[demand_mask] = numpy.clip(
totale[demand_mask] / demand[demand_mask], 0., 1.)
return a2drat
def calc_perennial_fracrc(
h2ogef, cfrtcw_1, cfrtcw_2, a2drat_1, a2drat_2, cfrtcn_1,
cfrtcn_2):
"""Calculate fraction C allocated to roots for a perennial plant.
The fraction of carbon allocated to roots is determined by
water availability, described by h2ogef, and nutrient availability,
described by a2drat_1 for nitrogen and a2drat_2 for phosphorus.
Lines 114-125 froota.f
Parameters:
h2ogef (numpy.ndarray): derived, the limiting factor of water
availability on growth
cfrtcw_1 (numpy.ndarray): parameter, the maximum fraction of
carbon allocated to roots with maximum water stress
cfrtcw_2 (numpy.ndarray): parameter, the minimum fraction of
carbon allocated to roots with no water stress
a2drat_1 (numpy.ndarray): derived, the ratio of available
nitrogen to nitrogen demand, restricted to be between 0
and 1
a2drat_2 (numpy.ndarray): derived, the ratio of available
phosphorus to phosphorus demand, restricted to be between
0 and 1
cfrtcn_1 (numpy.ndarray): parameter, maximum fraction of
carbon allocated to roots with maximum nutrient stress
cfrtcn_2 (numpy.ndarray): parameter, minimum fraction of
carbon allocated to roots with no nutrient stress
Returns:
fracrc_perennial, revised fraction of C allocated to roots for
a perennial plant
"""
valid_mask = (
(h2ogef != _TARGET_NODATA) &
(cfrtcw_1 != _IC_NODATA) &
(cfrtcw_2 != _IC_NODATA) &
(a2drat_1 != _TARGET_NODATA) &
(a2drat_2 != _TARGET_NODATA) &
(cfrtcn_1 != _IC_NODATA) &
(cfrtcn_2 != _IC_NODATA))
h2oeff = numpy.empty(h2ogef.shape, dtype=numpy.float32)
h2oeff[:] = _TARGET_NODATA
h2oeff[valid_mask] = (
(cfrtcw_2[valid_mask] - cfrtcw_1[valid_mask]) *
(h2ogef[valid_mask] - 1.) + cfrtcw_2[valid_mask])
ntreff_1 = numpy.empty(h2ogef.shape, dtype=numpy.float32)
ntreff_1[:] = _TARGET_NODATA
ntreff_1[valid_mask] = (
(cfrtcn_2[valid_mask] - cfrtcn_1[valid_mask]) *
(a2drat_1[valid_mask] - 1.0) + cfrtcn_2[valid_mask])
ntreff_2 = numpy.empty(h2ogef.shape, dtype=numpy.float32)
ntreff_2[:] = _TARGET_NODATA
ntreff_1[valid_mask] = (
(cfrtcn_2[valid_mask] - cfrtcn_1[valid_mask]) *
(a2drat_2[valid_mask] - 1.0) + cfrtcn_2[valid_mask])
ntreff = numpy.empty(h2ogef.shape, dtype=numpy.float32)
ntreff[:] = _TARGET_NODATA
ntreff[valid_mask] = numpy.maximum(
ntreff_1[valid_mask], ntreff_2[valid_mask])
fracrc_perennial = numpy.empty(
h2ogef.shape, dtype=numpy.float32)
fracrc_perennial[:] = _TARGET_NODATA
fracrc_perennial[valid_mask] = numpy.minimum(
numpy.maximum(h2oeff[valid_mask], ntreff[valid_mask]), 0.99)
return fracrc_perennial
def revised_fracrc_op(frtcindx, fracrc_p, fracrc_perennial):
"""Calculate revised fraction of carbon allocated to roots.
The revised fraction of carbon allocated to roots is calculated
according to the parameter frtcindx. If frtcindx=0 (use the "Great
Plains equation"), the revised fraction is equal to the provisional
fraction. If frtcindx=1 (a perennial plant), the revised fraction
is calculated from water and nutrient stress.
Parameters:
frtcindx (numpy.ndarray): parameter, indicates whether revised
fraction of carbon allocated to roots should follow the
"Great Plains equation" or the algorithm for a perennial
plant
fracrc_p (numpy.ndarray): derived, provisional fraction of
carbon allocated to roots
fracrc_perennial (numpy.ndarray): derived, fraction of
carbon allocated to roots for a perennial plant
Returns:
fracrc_r, revised fraction of carbon allocated to roots
"""
valid_mask = (
(frtcindx != _IC_NODATA) &
(fracrc_p != _TARGET_NODATA) &
(fracrc_perennial != _TARGET_NODATA))
fracrc_r = numpy.empty(frtcindx.shape, dtype=numpy.float32)
fracrc_r[:] = _TARGET_NODATA
fracrc_r[valid_mask] = numpy.where(
frtcindx[valid_mask] == 0, fracrc_p[valid_mask],
fracrc_perennial[valid_mask])
return fracrc_r
# temporary intermediate rasters for calculating revised fracrc
temp_dir = tempfile.mkdtemp(dir=PROCESSING_DIR)
temp_val_dict = {}
for val in ['a2drat_1', 'a2drat_2', 'fracrc_perennial']:
temp_val_dict[val] = os.path.join(
temp_dir, '{}.tif'.format(val))
pygeoprocessing.raster_calculator(
[(path, 1) for path in [totale_1_path, demand_1_path]],
calc_a2drat, temp_val_dict['a2drat_1'], gdal.GDT_Float32,
_TARGET_NODATA)
pygeoprocessing.raster_calculator(
[(path, 1) for path in [totale_2_path, demand_2_path]],
calc_a2drat, temp_val_dict['a2drat_2'], gdal.GDT_Float32,
_TARGET_NODATA)
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
h2ogef_1_path, cfrtcw_1_path, cfrtcw_2_path,
temp_val_dict['a2drat_1'], temp_val_dict['a2drat_2'],
cfrtcn_1_path, cfrtcn_2_path]],
calc_perennial_fracrc, temp_val_dict['fracrc_perennial'],
gdal.GDT_Float32, _TARGET_NODATA)
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
frtcindx_path, fracrc_p_path,
temp_val_dict['fracrc_perennial']]],
revised_fracrc_op, fracrc_r_path,
gdal.GDT_Float32, _TARGET_NODATA)
# clean up temporary files
shutil.rmtree(temp_dir)
def grazing_effect_on_aboveground_production(tgprod, fracrc, flgrem, grzeff):
"""Adjust aboveground production with the impact of grazing.
Removal of biomass by herbivores directly impacts potential
aboveground production according to the amount of biomass removed
and the parameter grzeff, which acts as a switch to determine the
effect. If grzeff=0, 3, or 4, aboveground production is not
changed. If grzeff=1 or 6, production decreases linearly with
biomass removed; if grzeff=2 or 5, biomass removed has a quadratic
impact on production. Grazrst.f
Parameters:
tgprod (numpy.ndarray): derived, total potential biomass
production restricted by water and nutrient availability
fracrc (numpy.ndarray): derived, fraction of carbon allocated
to roots according to water and nutrient availability
flgrem (numpy.ndarray): derived, fraction of live biomass
removed by grazing in previous monthly step
grzeff (numpy.ndarray): parameter, the effect of defoliation on
production and root:shoot ratio
Returns:
agprod, aboveground production impacted by grazing
"""
valid_mask = (
(tgprod != _TARGET_NODATA) &
(fracrc != _TARGET_NODATA) &
(flgrem != _TARGET_NODATA) &
(grzeff != _IC_NODATA))
agprod_prior = numpy.empty(tgprod.shape, dtype=numpy.float32)
agprod_prior[:] = _TARGET_NODATA
agprod_prior[valid_mask] = (
tgprod[valid_mask] * (1. - fracrc[valid_mask]))
linear_effect = numpy.empty(tgprod.shape, dtype=numpy.float32)
linear_effect[:] = _TARGET_NODATA
linear_effect[valid_mask] = numpy.maximum(
(1. - (2.21*flgrem[valid_mask])) * agprod_prior[valid_mask],
0.02)
quadratic_effect = numpy.empty(tgprod.shape, dtype=numpy.float32)
quadratic_effect[:] = _TARGET_NODATA
quadratic_effect[valid_mask] = (
(1. + 2.6*flgrem[valid_mask] -
(5.83*(numpy.power(flgrem[valid_mask], 2)))) *
agprod_prior[valid_mask])
quadratic_effect[valid_mask] = numpy.maximum(
quadratic_effect[valid_mask], 0.02)
no_effect_mask = (valid_mask & numpy.isin(grzeff, [0, 3, 4]))
linear_mask = (valid_mask & numpy.isin(grzeff, [1, 6]))
quadratic_mask = (valid_mask & numpy.isin(grzeff, [2, 5]))
agprod = numpy.empty(tgprod.shape, dtype=numpy.float32)
agprod[:] = _TARGET_NODATA
agprod[no_effect_mask] = agprod_prior[no_effect_mask]
agprod[linear_mask] = linear_effect[linear_mask]
agprod[quadratic_mask] = quadratic_effect[quadratic_mask]
return agprod
def grazing_effect_on_root_shoot(fracrc, flgrem, grzeff, gremb):
"""Adjust root:shoot ratio according to the impact of grazing.
Removal of biomass by herbivores directly impacts the root:shoot
ratio of production according to the amount of biomass removed and
the parameter grzeff, which acts as a switch to determine the
effect. If grzeff=0 or 1, the root:shoot ratio is not changed.
If grzeff=2 or 3, biomass removed has a quadratic impact on the
root:shoot ratio. If grzeff=4, 5, or 6, biomass removed has a
linear effect on the root:shoot ratio. The parameter gremb
multiplies the linear impact of grazing when grzeff=4, 5 or 6.
Grzrst.f
Parameters:
fracrc (numpy.ndarray): derived, fraction of carbon allocated
to roots according to water and nutrient availability
flgrem (numpy.ndarray): derived, fraction of live biomass
removed by grazing in previous monthly step
grzeff (numpy.ndarray): parameter, the effect of defoliation on
production and root:shoot ratio
grzemb (numpy.ndarray): parameter, grazing effect multiplier
Returns:
rtsh, root:shoot ratio impacted by grazing
"""
valid_mask = (
(fracrc != _TARGET_NODATA) &
(flgrem != _TARGET_NODATA) &
(grzeff != _IC_NODATA) &
(gremb != _IC_NODATA))
rtsh_prior = numpy.empty(fracrc.shape, dtype=numpy.float32)
rtsh_prior[:] = _TARGET_NODATA
rtsh_prior[valid_mask] = (
fracrc[valid_mask] / (1. - fracrc[valid_mask]))
quadratic_effect = numpy.empty(fracrc.shape, dtype=numpy.float32)
quadratic_effect[:] = _TARGET_NODATA
quadratic_effect[valid_mask] = numpy.maximum(
rtsh_prior[valid_mask] + 3.05 * flgrem[valid_mask] -
11.78 * numpy.power(flgrem[valid_mask], 2),
0.01)
linear_effect = numpy.empty(fracrc.shape, dtype=numpy.float32)
linear_effect[:] = _TARGET_NODATA
linear_effect[valid_mask] = numpy.maximum(
1. - (flgrem[valid_mask] * gremb[valid_mask]),
0.01)
no_effect_mask = (valid_mask & numpy.isin(grzeff, [0, 1]))
quadratic_mask = (valid_mask & numpy.isin(grzeff, [2, 3]))
linear_mask = (valid_mask & numpy.isin(grzeff, [4, 5, 6]))
rtsh = numpy.empty(fracrc.shape, dtype=numpy.float32)
rtsh[:] = _TARGET_NODATA
rtsh[no_effect_mask] = rtsh_prior[no_effect_mask]
rtsh[quadratic_mask] = quadratic_effect[quadratic_mask]
rtsh[linear_mask] = linear_effect[linear_mask]
return rtsh
def calc_tgprod_final(rtsh, agprod):
"""Calculate final total potential production.
Final total potential production is calculated from aboveground
production impacted by grazing and the final root:shoot ratio
impacted by grazing.
Parameters:
rtsh (numpy.ndarray): derived, final root:shoot ratio impacted
by grazing
agprod (numpy.ndarray): derived, final aboveground potential
production impacted by grazing
Returns:
tgprod, final total potential production
"""
valid_mask = (
(rtsh != _TARGET_NODATA) &
(agprod != _TARGET_NODATA))
tgprod = numpy.empty(rtsh.shape, dtype=numpy.float32)
tgprod[:] = _TARGET_NODATA
tgprod[valid_mask] = (
agprod[valid_mask] + (rtsh[valid_mask] * agprod[valid_mask]))
return tgprod
def calc_final_tgprod_rtsh(
tgprod_pot_prod_path, fracrc_path, flgrem_path, grzeff_path,
gremb_path, tgprod_path, rtsh_path):
"""Calculate final potential production and root:shoot ratio.
Final potential production and root:shoot ratio include the impact of
grazing. First calculate final aboveground production including the
impact of grazing; then calculate rtsh, the final root:shoot ratio
including the impact of grazing; then calculate tgprod, final total
potential production, from final aboveground production and final
root:shoot ratio. Grazrst.f
Parameters:
tgprod_pot_prod_path (string): path to raster containing total
potential biomass production restricted by water and nutrient
availability, prior to effects of grazing
fracrc_path (string): path to raster containing the fraction of
carbon production allocated to roots according to restriction
by water and nutrient availability, prior to effects of
grazing
flgrem_path (string): path to raster containing the fraction of
live aboveground biomass removed by herbivores according to
diet selection in the previous step
grzeff_path (string): path to raster containing the parameter
grzeff, the effect of defolation on production and root:shoot
ratio
gremb_path (string): path to raster containing the parameter
gremb, the grazing effect multiplier
tgprod_path (string): path to raster containing final total
potential production (g biomass)
rtsh_path (string): path to raster containing final root:shoot
ratio of potential production
Side effects:
creates the raster indicated by tgprod_path
creates the raster indicated by rtsh_path
Returns:
None
"""
# temporary intermediate rasters for grazing effect
temp_dir = tempfile.mkdtemp(dir=PROCESSING_DIR)
agprod_path = os.path.join(temp_dir, 'agprod.tif')
# grazing effect on aboveground production
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
tgprod_pot_prod_path, fracrc_path, flgrem_path,
grzeff_path]],
grazing_effect_on_aboveground_production,
agprod_path, gdal.GDT_Float32, _TARGET_NODATA)
# grazing effect on final root:shoot ratio
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
fracrc_path, flgrem_path, grzeff_path, gremb_path]],
grazing_effect_on_root_shoot, rtsh_path,
gdal.GDT_Float32, _TARGET_NODATA)
# final total potential production
pygeoprocessing.raster_calculator(
[(path, 1) for path in [rtsh_path, agprod_path]],
calc_tgprod_final, tgprod_path,
gdal.GDT_Float32, _TARGET_NODATA)
# clean up temporary files
shutil.rmtree(temp_dir)
def _root_shoot_ratio(
aligned_inputs, site_param_table, current_month, pft_id_set,
veg_trait_table, prev_sv_reg, year_reg, month_reg):
"""Calculate final potential production and root:shoot ratio.
Final potential biomass production and root:shoot ratio is calculated
according to nutrient availability and demand for the nutrient, and the
impact of defoliation by herbivores. CropDynC.f
Parameters:
aligned_inputs (dict): map of key, path pairs indicating paths
to aligned model inputs, including the site spatial index raster
site_param_table (dict): map of site spatial index to dictionaries
that contain site-level parameters
current_month (int): month of the year, such that current_month=1
indicates January
pft_id_set (set): set of integers identifying plant functional types
veg_trait_table (dict): map of pft id to dictionaries containing
plant functional type parameters
prev_sv_reg (dict): map of key, path pairs giving paths to state
variables for the previous month
year_reg (dict): map of key, path pairs giving paths to rasters that
are modified once per year, including annual precipitation
month_reg (dict): map of key, path pairs giving paths to intermediate
calculated values that are shared between submodels
Side effects:
creates the raster indicated by
`month_reg['tgprod_<PFT>']`, total potential production (g biomass)
for each plant functional type (PFT)
creates the raster indicated by `month_reg['rtsh_<PFT>']` for each
plant functional type (PFT)
Returns:
None
"""
# if growth does not occur this month for all PFTs,
# skip the rest of the function
do_PFT = []
for pft_i in pft_id_set:
# growth occurs in growth months and when senescence not scheduled
do_growth = (
current_month != veg_trait_table[pft_i]['senescence_month'] and
str(current_month) in veg_trait_table[pft_i]['growth_months'])
if do_growth:
do_PFT.append(pft_i)
if not do_PFT:
return
# temporary intermediate rasters for root:shoot submodel
temp_dir = tempfile.mkdtemp(dir=PROCESSING_DIR)
temp_val_dict = {}
for pft_i in do_PFT:
for val in ['fracrc_p', 'fracrc', 'availm']:
temp_val_dict['{}_{}'.format(val, pft_i)] = os.path.join(
temp_dir, '{}_{}.tif'.format(val, pft_i))
for iel in [1, 2]:
for val in ['eavail', 'demand']:
temp_val_dict[
'{}_{}_{}'.format(val, iel, pft_i)] = os.path.join(
temp_dir, '{}_{}_{}.tif'.format(val, iel, pft_i))
# temporary parameter rasters for root:shoot submodel
param_val_dict = {}
# site-level parameters
for val in [
'bgppa', 'bgppb', 'agppa', 'agppb', 'favail_1', 'favail_4',
'favail_5', 'favail_6']:
target_path = os.path.join(temp_dir, '{}.tif'.format(val))
param_val_dict[val] = target_path
site_to_val = dict(
[(site_code, float(table[val])) for
(site_code, table) in site_param_table.items()])
pygeoprocessing.reclassify_raster(
(aligned_inputs['site_index'], 1), site_to_val, target_path,
gdal.GDT_Float32, _IC_NODATA)
# PFT-level parameters
for pft_i in do_PFT:
for val in [
'frtcindx', 'cfrtcw_1', 'cfrtcw_2', 'cfrtcn_1', 'cfrtcn_2',
'biomax', 'cfrtcw_1', 'cfrtcw_2', 'cfrtcn_1', 'cfrtcn_2',
'grzeff', 'gremb']:
target_path = os.path.join(
temp_dir, '{}_{}.tif'.format(val, pft_i))
param_val_dict['{}_{}'.format(val, pft_i)] = target_path
fill_val = veg_trait_table[pft_i][val]
pygeoprocessing.new_raster_from_base(
aligned_inputs['site_index'], target_path, gdal.GDT_Float32,
[_IC_NODATA], fill_value_list=[fill_val])
for val in [
'pramn_1_1', 'pramn_1_2', 'pramx_1_1', 'pramx_1_2',
'prbmn_1_1', 'prbmn_1_2', 'prbmx_1_1', 'prbmx_1_2',
'pramn_2_1', 'pramn_2_2', 'pramx_2_1', 'pramx_2_2',
'prbmn_2_1', 'prbmn_2_2', 'prbmx_2_1', 'prbmx_2_2']:
target_path = os.path.join(
temp_dir, '{}_{}.tif'.format(val, pft_i))
param_val_dict[
'{}_{}'.format(val, pft_i)] = target_path
fill_val = veg_trait_table[pft_i][val]
pygeoprocessing.new_raster_from_base(
aligned_inputs['site_index'], target_path,
gdal.GDT_Float32, [_IC_NODATA], fill_value_list=[fill_val])
# the parameter favail_2 must be calculated from current mineral N in
# surface layer
param_val_dict['favail_2'] = os.path.join(temp_dir, 'favail_2.tif')
_calc_favail_P(prev_sv_reg, param_val_dict)
for pft_i in do_PFT:
# fracrc_p, provisional fraction of C allocated to roots
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
year_reg['annual_precip_path'],
param_val_dict['frtcindx_{}'.format(pft_i)],
param_val_dict['bgppa'],
param_val_dict['bgppb'],
param_val_dict['agppa'],
param_val_dict['agppb'],
param_val_dict['cfrtcw_1_{}'.format(pft_i)],
param_val_dict['cfrtcw_2_{}'.format(pft_i)],
param_val_dict['cfrtcn_1_{}'.format(pft_i)],
param_val_dict['cfrtcn_2_{}'.format(pft_i)]]],
calc_provisional_fracrc,
temp_val_dict['fracrc_p_{}'.format(pft_i)],
gdal.GDT_Float32, _TARGET_NODATA)
for iel in [1, 2]:
# persistent ratios used here and in plant growth submodel
calc_ce_ratios(
param_val_dict['pramn_{}_1_{}'.format(iel, pft_i)],
param_val_dict['pramn_{}_2_{}'.format(iel, pft_i)],
prev_sv_reg['aglivc_{}_path'.format(pft_i)],
param_val_dict['biomax_{}'.format(pft_i)],
param_val_dict['pramx_{}_1_{}'.format(iel, pft_i)],
param_val_dict['pramx_{}_2_{}'.format(iel, pft_i)],
param_val_dict['prbmn_{}_1_{}'.format(iel, pft_i)],
param_val_dict['prbmn_{}_2_{}'.format(iel, pft_i)],
param_val_dict['prbmx_{}_1_{}'.format(iel, pft_i)],
param_val_dict['prbmx_{}_2_{}'.format(iel, pft_i)],
year_reg['annual_precip_path'], pft_i, iel, month_reg)
# sum of mineral nutrient in accessible soil layers
_calc_avail_mineral_nutrient(
veg_trait_table[pft_i], prev_sv_reg, iel,
temp_val_dict['availm_{}'.format(pft_i)])
# eavail_iel, available nutrient
_calc_available_nutrient(
pft_i, iel, veg_trait_table[pft_i], prev_sv_reg,
site_param_table, aligned_inputs['site_index'],
temp_val_dict['availm_{}'.format(pft_i)],
param_val_dict['favail_{}'.format(iel)],
month_reg['tgprod_pot_prod_{}'.format(pft_i)],
temp_val_dict['eavail_{}_{}'.format(iel, pft_i)])
# demand_iel, demand for the nutrient
_calc_nutrient_demand(
month_reg['tgprod_pot_prod_{}'.format(pft_i)],
temp_val_dict['fracrc_p_{}'.format(pft_i)],
month_reg['cercrp_min_above_{}_{}'.format(iel, pft_i)],
month_reg['cercrp_min_below_{}_{}'.format(iel, pft_i)],
temp_val_dict['demand_{}_{}'.format(iel, pft_i)])
# revised fraction of carbon allocated to roots
calc_revised_fracrc(
param_val_dict['frtcindx_{}'.format(pft_i)],
temp_val_dict['fracrc_p_{}'.format(pft_i)],
temp_val_dict['eavail_1_{}'.format(pft_i)],
temp_val_dict['eavail_2_{}'.format(pft_i)],
temp_val_dict['demand_1_{}'.format(pft_i)],
temp_val_dict['demand_2_{}'.format(pft_i)],
month_reg['h2ogef_1_{}'.format(pft_i)],
param_val_dict['cfrtcw_1_{}'.format(pft_i)],
param_val_dict['cfrtcw_2_{}'.format(pft_i)],
param_val_dict['cfrtcn_1_{}'.format(pft_i)],
param_val_dict['cfrtcn_2_{}'.format(pft_i)],
temp_val_dict['fracrc_{}'.format(pft_i)])
# final potential production and root:shoot ratio accounting for
# impacts of grazing
calc_final_tgprod_rtsh(
month_reg['tgprod_pot_prod_{}'.format(pft_i)],
temp_val_dict['fracrc_{}'.format(pft_i)],
month_reg['flgrem_{}'.format(pft_i)],
param_val_dict['grzeff_{}'.format(pft_i)],
param_val_dict['gremb_{}'.format(pft_i)],
month_reg['tgprod_{}'.format(pft_i)],
month_reg['rtsh_{}'.format(pft_i)])
# clean up temporary files
shutil.rmtree(temp_dir)
def _snow(
site_index_path, site_param_table, precip_path, tave_path,
max_temp_path, min_temp_path, prev_snow_path, prev_snlq_path,
current_month, snowmelt_path, snow_path, snlq_path,
inputs_after_snow_path, pet_rem_path):
"""Account for precipitation as snow and snowmelt from snowpack.
Determine whether precipitation falls as snow. Track the fate of
new and existing snowpack including evaporation and melting. Track the
the remaining snowpack and liquid in snow and potential
evapotranspiration remaining after evaporation of snow. Snowcent.f
Parameters:
site_index_path (string): path to site spatial index raster
site_param_table (dict): map of site spatial index to dictionaries
that contain site-level parameters
precip_path (string): path to raster containing precipitation for the
current month
tave_path (string): path to raster containing average temperature for
the current month
max_temp_path (string): path to raster containing maximum temperature
for the current month
min_temp_path (string): path to raster containing minimum temperature
for the current month
prev_snow_path (string): path to raster containing current snowpack
prev_snlq_path (string): path to raster containing current liquid in
snow
current_month (int): current month of the year, such that month=0
indicates January
snow_path (string): path to raster to contain modified snowpack
snlq_path (string): path to raster to contain modified liquid in snow
inputs_after_snow_path (string): path to raster containing water inputs
to the system after accounting for snow
pet_rem_path (string): path to raster containing potential
evapotranspiration remaining after any evaporation of snow
Side effects:
creates the raster indicated by `snowmelt_path`
creates the raster indicated by `snow_path`
creates the raster indicated by `snlq_path`
creates the raster indicated by `inputs_after_snow_path`
creates the raster indicated by `pet_rem_path`
Returns:
None
"""
def calc_snow_moisture(return_type):
"""Calculate change in snow, pet, snow liquid, and moisture inputs.
Record changes in snowpack, liquid in snow, potential
evapotranspiration energy, and liquid draining into soil from snow.
Parameters:
return_type (string): flag indicating whether modified snowpack,
modified liquid in snow, modified potential evapotranspiration,
or soil moisture inputs after snow should be returned
Returns:
the function `_calc_snow_moisture`
"""
def _calc_snow_moisture(
tave, precip, snow, snlq, pet, tmelt_1, tmelt_2, shwave):
"""Calculate the fate of moisture from snow.
Calculate new snowfall or rain on snow. Calculate direct
evaporation of snow and consumption of potential
evapotranspiration energy. Calculate snowmelt and liquid draining
from snow into the soil.
Parameters:
tave (numpy.ndarray): derived, average temperature
precip (numpy.ndarray): input, precipitation for this month
snow (numpy.ndarray): derived, existing snowpack prior to new
snowfall
snlq (numpy.ndarray): derived, existing liquid in snowpack
pet (numpy.ndarray): derived, potential evapotranspiration
tmelt_1 (numpy.ndarray): parameter, minimum temperature above
which snow will melt
tmelt_2 (numpy.ndarray): parameter, ratio between degrees above
the minimum temperature and cm of snow that will melt
shwave (numpy.ndarray): derived, shortwave radiation outside
the atmosphere
Returns:
snowmelt if return_type is 'snowmelt'
snow_revised if return_type is 'snow'
snlq_revised if return_type is 'snlq'
pet_revised if return_type is 'pet'
inputs_after_snow if return_type is 'inputs_after_snow'
"""
valid_mask = (
(tave != _IC_NODATA) &
(~numpy.isclose(precip, precip_nodata)) &
(~numpy.isclose(snow, _SV_NODATA)) &
(~numpy.isclose(snlq, _SV_NODATA)) &
(pet != _TARGET_NODATA) &
(tmelt_1 != _IC_NODATA) &
(tmelt_2 != _IC_NODATA) &
(shwave != _TARGET_NODATA))
inputs_after_snow = numpy.empty(precip.shape, dtype=numpy.float32)
inputs_after_snow[:] = _TARGET_NODATA
inputs_after_snow[valid_mask] = precip[valid_mask]
snowfall_mask = (valid_mask & (tave <= 0))
snow[snowfall_mask] = (snow[snowfall_mask] + precip[snowfall_mask])
inputs_after_snow[snowfall_mask] = 0.
rain_on_snow_mask = (
(valid_mask) &
(tave > 0) &
(snow > 0))
snlq[rain_on_snow_mask] = (
snlq[rain_on_snow_mask] + precip[rain_on_snow_mask])
inputs_after_snow[rain_on_snow_mask] = 0.
snowtot = numpy.zeros(snow.shape, dtype=numpy.float32)
snowtot[valid_mask] = numpy.maximum(
snow[valid_mask] + snlq[valid_mask], 0)
evap_mask = (valid_mask & (snowtot > 0.))
evsnow = numpy.zeros(snow.shape, dtype=numpy.float32)
evsnow[evap_mask] = numpy.minimum(
snowtot[evap_mask], pet[evap_mask] * 0.87)
snow_revised = numpy.empty(snow.shape, dtype=numpy.float32)
snow_revised[:] = _TARGET_NODATA
snow_revised[valid_mask] = snow[valid_mask]
snow_revised[evap_mask] = numpy.maximum(
snow[evap_mask] - evsnow[evap_mask] *
(snow[evap_mask] / snowtot[evap_mask]), 0.)
snlq_revised = numpy.zeros(snow.shape, dtype=numpy.float32)
snlq_revised[valid_mask] = snlq[valid_mask]
snlq_revised[evap_mask] = numpy.maximum(
snlq[evap_mask] - evsnow[evap_mask] *
(snlq[evap_mask] / snowtot[evap_mask]), 0.)
pet_revised = numpy.empty(snow.shape, dtype=numpy.float32)
pet_revised[:] = _TARGET_NODATA
pet_revised[valid_mask] = pet[valid_mask]
pet_revised[evap_mask] = numpy.maximum(
(pet[evap_mask] - evsnow[evap_mask] / 0.87), 0.)
melt_mask = (valid_mask & (tave >= tmelt_1))
snowmelt = numpy.zeros(snow.shape, dtype=numpy.float32)
snowmelt[melt_mask] = numpy.clip(
tmelt_2[melt_mask] * (tave[melt_mask] - tmelt_1[melt_mask]) *
shwave[melt_mask], 0., snow_revised[melt_mask])
snow_revised[melt_mask] = (
snow_revised[melt_mask] - snowmelt[melt_mask])
snlq_revised[melt_mask] = (
snlq_revised[melt_mask] + snowmelt[melt_mask])
drain_mask = (melt_mask & (snlq_revised > 0.5 * snow_revised))
inputs_after_snow[drain_mask] = (
snlq_revised[drain_mask] - 0.5 * snow_revised[drain_mask])
snlq_revised[drain_mask] = (
snlq_revised[drain_mask] - inputs_after_snow[drain_mask])
if return_type == 'snowmelt':
return snowmelt
elif return_type == 'snow':
return snow_revised
elif return_type == 'snlq':
return snlq_revised
elif return_type == 'pet':
return pet_revised
else:
return inputs_after_snow
return _calc_snow_moisture
temp_dir = tempfile.mkdtemp(dir=PROCESSING_DIR)
temp_val_dict = {}
for val in ['shwave', 'pet']:
temp_val_dict[val] = os.path.join(temp_dir, '{}.tif'.format(val))
param_val_dict = {}
for val in ['tmelt_1', 'tmelt_2', 'fwloss_4']:
target_path = os.path.join(temp_dir, '{}.tif'.format(val))
param_val_dict[val] = target_path
site_to_val = dict(
[(site_code, float(table[val])) for (
site_code, table) in site_param_table.items()])
pygeoprocessing.reclassify_raster(
(site_index_path, 1), site_to_val, target_path, gdal.GDT_Float32,
_IC_NODATA)
max_temp_nodata = pygeoprocessing.get_raster_info(
max_temp_path)['nodata'][0]
min_temp_nodata = pygeoprocessing.get_raster_info(
min_temp_path)['nodata'][0]
precip_nodata = pygeoprocessing.get_raster_info(
precip_path)['nodata'][0]
# solar radiation outside the atmosphere
_shortwave_radiation(precip_path, current_month, temp_val_dict['shwave'])
# pet, reference evapotranspiration modified by fwloss parameter
_reference_evapotranspiration(
max_temp_path, min_temp_path, temp_val_dict['shwave'],
param_val_dict['fwloss_4'], temp_val_dict['pet'])
# calculate snowmelt
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
tave_path, precip_path, prev_snow_path,
prev_snlq_path, temp_val_dict['pet'],
param_val_dict['tmelt_1'], param_val_dict['tmelt_2'],
temp_val_dict['shwave']]],
calc_snow_moisture('snowmelt'), snowmelt_path,
gdal.GDT_Float32, _TARGET_NODATA)
# calculate change in snow
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
tave_path, precip_path, prev_snow_path,
prev_snlq_path, temp_val_dict['pet'],
param_val_dict['tmelt_1'], param_val_dict['tmelt_2'],
temp_val_dict['shwave']]],
calc_snow_moisture("snow"), snow_path,
gdal.GDT_Float32, _TARGET_NODATA)
# calculate change in liquid in snow
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
tave_path, precip_path, prev_snow_path,
prev_snlq_path, temp_val_dict['pet'],
param_val_dict['tmelt_1'], param_val_dict['tmelt_2'],
temp_val_dict['shwave']]],
calc_snow_moisture("snlq"), snlq_path,
gdal.GDT_Float32, _TARGET_NODATA)
# calculate change in potential evapotranspiration energy
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
tave_path, precip_path, prev_snow_path,
prev_snlq_path, temp_val_dict['pet'],
param_val_dict['tmelt_1'], param_val_dict['tmelt_2'],
temp_val_dict['shwave']]],
calc_snow_moisture("pet"), pet_rem_path,
gdal.GDT_Float32, _TARGET_NODATA)
# calculate soil moisture inputs draining from snow after snowmelt
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
tave_path, precip_path, prev_snow_path,
prev_snlq_path, temp_val_dict['pet'],
param_val_dict['tmelt_1'], param_val_dict['tmelt_2'],
temp_val_dict['shwave']]],
calc_snow_moisture("inputs_after_snow"), inputs_after_snow_path,
gdal.GDT_Float32, _TARGET_NODATA)
# clean up temporary files
shutil.rmtree(temp_dir)
def _calc_aboveground_live_biomass(sum_aglivc, sum_tgprod):
"""Calculate aboveground live biomass for purposes of soil water.
Live biomass impacts loss of moisture inputs through canopy
interception and evapotranspiration. Because soil moisture is computed
after potential production, but before actual growth of plants, some of
the predicted growth in biomass (i.e., tgprod) is added here to
existing standing live biomass (i.e., aglivc * 2.5; line 80,
potprod.f, in Century).
Parameters:
sum_aglivc (numpy.ndarray): the sum of aglivc across plant
functional types (pft), weighted by % cover of the pft
sum_tgprod (numpy.ndarray): sum of tgprod, potential production
limited by soil water, nutrient availability, and grazing,
across pfts weighted by % cover of the pft
Returns:
aliv, aboveground live biomass for soil water submodel
"""
valid_mask = (
(sum_aglivc != _TARGET_NODATA) &
(sum_tgprod != _TARGET_NODATA))
aliv = numpy.empty(sum_aglivc.shape, dtype=numpy.float32)
aliv[:] = _TARGET_NODATA
aliv[valid_mask] = (
sum_aglivc[valid_mask] * 2.5 + (0.25 * sum_tgprod[valid_mask]))
return aliv
def _calc_standing_biomass(aliv, sum_stdedc):
"""Calculate total aboveground standing biomass for soil water.
Total standing biomass impacts loss of moisture inputs by increasing
total canopy interception and decreasing bare soil evaporation. It is
the sum of live and dead standing biomass across plant functional
types, bounded to be <= 800 g/m2.
Parameters:
aliv (numpy.ndarray): aboveground live biomass, calculated from
aglivc and tgprod across plant functional types
sum_stdedc (numpy.ndarray): aboveground standing dead C summed
across plant functional types
Returns:
sd, total aboveground standing biomass for soil water.
"""
valid_mask = (
(aliv != _TARGET_NODATA) &
(sum_stdedc != _TARGET_NODATA))
sd = numpy.empty(aliv.shape, dtype=numpy.float32)
sd[:] = _TARGET_NODATA
sd[valid_mask] = numpy.minimum(
aliv[valid_mask] + (sum_stdedc[valid_mask] * 2.5), 800.)
return sd
def subtract_surface_losses(return_type):
"""Calculate surface losses to runoff and surface evaporation.
Calculate the loss of surface moisture to runoff, canopy interception,
and bare soil evaporation.
Parameters:
return_type (string): flag indicating whether soil moisture inputs
after surface losses or total surface evaporation should be
returned
Returns:
the function `_subtract_surface_losses`
"""
def _subtract_surface_losses(
inputs_after_snow, fracro, precro, snow, alit, sd, fwloss_1,
fwloss_2, pet_rem):
"""Subtract moisture losses to runoff, interception, and evaporation.
Of the surface water inputs from precipitation and snowmelt, some water
is lost to runoff (line 113, H2olos.f). After runoff, some water is
lost to canopy interception and bare soil evaporation, if there is no
snow cover. Loss to canopy interception and bare soil evaporation is
a function of live, standing dead, and surface litter biomass. The
total loss of moisture to interception and bare soil evaporation is
bounded to be less than or equal to 40% of reference
evapotranspiration.
Parameters:
inputs_after_snow (numpy.ndarray): derived, surface water inputs
from precipitation and snowmelt, prior to runoff
fracro (numpy.ndarray): parameter, fraction of surface water
above precro that is lost to runoff
precro (numpy.ndarray): parameter, amount of surface water that
must be available for runoff to occur
snow (numpy.ndarray): derived, current snowpack
alit (numpy.ndarray): derived, biomass in surface litter
sd (numpy.ndarray): derived, total standing biomass
fwloss_1 (numpy.ndarray): parameter, scaling factor for
interception and evaporation of precip by vegetation
fwloss_2 (numpy.ndarray): parameter, scaling factor for bare soil
evaporation of precip
pet_rem (numpy.ndarray): derived, potential evaporation remaining
after evaporation of snow
Returns:
inputs_after_surface, surface water inputs to soil after runoff
and surface evaporation are subtracted, if return_type is
'inputs_after_surface'
absevap, bare soil evaporation, if return_type is 'absevap'
evap_losses, total surface evaporation, if return_type is
'evap_losses'
"""
valid_mask = (
(inputs_after_snow != _TARGET_NODATA) &
(fracro != _IC_NODATA) &
(precro != _IC_NODATA) &
(snow != _TARGET_NODATA) &
(alit != _TARGET_NODATA) &
(sd != _TARGET_NODATA) &
(fwloss_1 != _IC_NODATA) &
(fwloss_2 != _IC_NODATA) &
(pet_rem != _TARGET_NODATA))
runoff = numpy.empty(inputs_after_snow.shape, dtype=numpy.float32)
runoff[:] = _TARGET_NODATA
runoff[valid_mask] = numpy.maximum(
fracro[valid_mask] *
(inputs_after_snow[valid_mask] - precro[valid_mask]), 0.)
inputs_after_runoff = numpy.empty(
inputs_after_snow.shape, dtype=numpy.float32)
inputs_after_runoff[:] = _TARGET_NODATA
inputs_after_runoff[valid_mask] = (
inputs_after_snow[valid_mask] - runoff[valid_mask])
evap_mask = (valid_mask & (snow <= 0))
# loss to interception
aint = numpy.zeros(inputs_after_snow.shape, dtype=numpy.float32)
aint[evap_mask] = (
(0.0003 * alit[evap_mask] + 0.0006 * sd[evap_mask]) *
fwloss_1[evap_mask])
# loss to bare soil evaporation
absevap = numpy.empty(inputs_after_snow.shape, dtype=numpy.float32)
absevap[:] = _TARGET_NODATA
absevap[valid_mask] = 0.
absevap[evap_mask] = (
0.5 *
numpy.exp((-0.002 * alit[evap_mask]) - (0.004 * sd[evap_mask])) *
fwloss_2[evap_mask])
# total losses to interception and evaporation
evap_losses = numpy.empty(inputs_after_snow.shape, dtype=numpy.float32)
evap_losses[:] = _TARGET_NODATA
evap_losses[valid_mask] = 0.
evap_losses[evap_mask] = (
numpy.minimum(((absevap[evap_mask] + aint[evap_mask]) *
inputs_after_runoff[evap_mask]), (0.4 * pet_rem[evap_mask])))
# remaining inputs after evaporation
inputs_after_surface = numpy.empty(
inputs_after_snow.shape, dtype=numpy.float32)
inputs_after_surface[:] = _TARGET_NODATA
inputs_after_surface[valid_mask] = inputs_after_runoff[valid_mask]
inputs_after_surface[evap_mask] = (
inputs_after_runoff[evap_mask] - evap_losses[evap_mask])
if return_type == 'inputs_after_surface':
return inputs_after_surface
elif return_type == 'absevap':
return absevap
elif return_type == 'evap_losses':
return evap_losses
return _subtract_surface_losses
def calc_potential_transpiration(return_type):
"""Calculate potential transpiration and evaporation from soil layer 1.
Calculate potential transpiration (trap), potential evaporation from
soil layer 1 (pevp), and initial transpiration water loss (tran).
Remove the initial transpiration water loss from soil moisture inputs
at this step.
Parameters:
return_type (string): flag indicating whether potential transpiration,
potential evaporation from soil layer 1, or modified moisture
inputs should be returned
Returns:
the function `_calc_potential_transpiration`
"""
def _calc_potential_transpiration(
pet_rem, evap_losses, tave, aliv, current_moisture_inputs):
"""Calculate potential water losses to transpiration.
Calculate potential transpiration (trap), the total potential
transpiration from all soil layers by plants. Calculate potential
evaporation from soil layer 1 (pevp); this amount is calculated prior
to transpiration but actually removed after water loss to transpiration
from all soil layers has been accounted. Calculate actual transpiration
(tran). Remove actual transpiration water losses from moisture inputs
before distributing water to soil layers. This is necessary for a
monthly time step to give plants in wet climates adequate access to
water for transpiration.
Parameters:
pet_rem (numpy.ndarray): derived, potential evapotranspiration
remaining after evaporation of snow
evap_losses (numpy.ndarray): derived, total surface evaporation
tave (numpy.ndarray): derived, average temperature
aliv (numpy.ndarray): aboveground live biomass, calculated from
aglivc and tgprod across plant functional types
current_moisture_inputs (numpy.ndarray): derived, moisture inputs
after surface losses
Returns:
trap if return_type is 'trap'
pevp if return_type is 'pevp'
modified_moisture_inputs if return_type is
'modified_moisture_inputs'
"""
valid_mask = (
(pet_rem != _TARGET_NODATA) &
(evap_losses != _TARGET_NODATA) &
(tave != _IC_NODATA) &
(aliv != _TARGET_NODATA) &
(current_moisture_inputs != _TARGET_NODATA))
trap = numpy.empty(pet_rem.shape, dtype=numpy.float32)
trap[:] = _TARGET_NODATA
trap[valid_mask] = pet_rem[valid_mask] - evap_losses[valid_mask]
no_transpiration_mask = (valid_mask & (tave < 2))
trap[no_transpiration_mask] = 0.
transpiration_mask = (valid_mask & (tave >= 2))
trap[transpiration_mask] = numpy.maximum(
numpy.minimum(
trap[transpiration_mask], pet_rem[transpiration_mask] *
0.65 * (1 - numpy.exp(-0.02 * aliv[transpiration_mask]))), 0.)
trap[valid_mask] = numpy.maximum(trap[valid_mask], 0.01)
pevp = numpy.empty(pet_rem.shape, dtype=numpy.float32)
pevp[:] = _TARGET_NODATA
pevp[valid_mask] = numpy.maximum(
pet_rem[valid_mask] - trap[valid_mask] - evap_losses[valid_mask],
0.)
tran = numpy.empty(pet_rem.shape, dtype=numpy.float32)
tran[:] = _TARGET_NODATA
tran[valid_mask] = numpy.minimum(
trap[valid_mask] - 0.01, current_moisture_inputs[valid_mask])
trap[valid_mask] = trap[valid_mask] - tran[valid_mask]
modified_moisture_inputs = numpy.empty(
pet_rem.shape, dtype=numpy.float32)
modified_moisture_inputs[:] = _TARGET_NODATA
modified_moisture_inputs[valid_mask] = (
current_moisture_inputs[valid_mask] - tran[valid_mask])
if return_type == 'trap':
return trap
elif return_type == 'pevp':
return pevp
elif return_type == 'modified_moisture_inputs':
return modified_moisture_inputs
return _calc_potential_transpiration
def distribute_water_to_soil_layer(return_type):
"""Distribute moisture inputs to one soil layer prior to transpiration.
Soil moisture inputs after runoff, evaporation, and initial
transpiration are distributed to soil layers sequentially according to the
field capacity of the layer. If moisture inputs exceed the field capacity
of the layer, the remainder of moisture inputs move down to the next
adjacent soil layer.
Returns:
the function `_distribute_water`
"""
def _distribute_water(adep, afiel, asmos, current_moisture_inputs):
"""Revise soil moisture in this soil layer prior to transpiration.
Moisture inputs coming into this soil layer are compared to the field
capacity of the layer. If the field capacity is exceeded, the excess
moisture moves from this layer to the next adjacent layer.
Parameters:
adep (numpy.ndarray): parameter, depth of this soil layer in cm
afiel (numpy.ndarray): derived, field capacity of this layer
asmos (numpy.ndarray): state variable, current soil moisture
content of this soil layer
current_moisture_inputs (numpy.ndarray): derived, moisture inputs
added to this soil layer
Returns:
asmos_revised, revised soil moisture in this layer, if return_type
is 'asmos_revised'
amov, moisture flowing from this layer into the next, if
return_type is 'amov'
"""
valid_mask = (
(adep != _IC_NODATA) &
(afiel != _TARGET_NODATA) &
(~numpy.isclose(asmos, _SV_NODATA)) &
(current_moisture_inputs != _TARGET_NODATA))
afl = numpy.empty(adep.shape, dtype=numpy.float32)
afl[:] = _TARGET_NODATA
afl[valid_mask] = adep[valid_mask] * afiel[valid_mask]
asmos_interm = numpy.empty(adep.shape, dtype=numpy.float32)
asmos_interm[:] = _TARGET_NODATA
asmos_interm[valid_mask] = (
asmos[valid_mask] + current_moisture_inputs[valid_mask])
amov = numpy.empty(adep.shape, dtype=numpy.float32)
amov[:] = _TARGET_NODATA
exceeded_mask = (valid_mask & (asmos_interm > afl))
amov[exceeded_mask] = asmos_interm[exceeded_mask]
asmos_revised = numpy.empty(adep.shape, dtype=numpy.float32)
asmos_revised[:] = _TARGET_NODATA
asmos_revised[valid_mask] = asmos_interm[valid_mask]
asmos_revised[exceeded_mask] = afl[exceeded_mask]
notexceeded_mask = (valid_mask & (asmos_interm <= afl))
amov[notexceeded_mask] = 0.
if return_type == 'asmos_revised':
return asmos_revised
elif return_type == 'amov':
return amov
return _distribute_water
def calc_available_water_for_transpiration(asmos, awilt, adep):
"""Calculate water available for transpiration in one soil layer.
The water available for transpiration is the amount of water in the soil
layer minus the wilting point of the soil layer.
Parameters:
asmos (numpy.ndarray): derived, interim moisture in the soil layer
awilt (numpy.ndarray): derived, wilting point of the soil layer
adep (numpy.ndarray): parameter, depth of the soil layer in cm
Returns:
avw, available water for transpiration
"""
valid_mask = (
(asmos != _TARGET_NODATA) &
(awilt != _TARGET_NODATA) &
(adep != _IC_NODATA))
avw = numpy.empty(asmos.shape, dtype=numpy.float32)
avw[:] = _TARGET_NODATA
avw[valid_mask] = numpy.maximum(
asmos[valid_mask] - awilt[valid_mask] * adep[valid_mask], 0.)
return avw
def revise_potential_transpiration(trap, tot):
"""Revise potential transpiration according to water available.
Total potential transpiration, trap, is revised to be less than or equal
to total water available for transpiration, tot. Total water available
for transpiration is the sum of available water per soil layer.
Line 241, H2olos.f
Parameters:
trap (numpy.ndarray): derived, potential transpiration water losses
tot (numpy.ndarray): derived, total soil water available for
transpiration
Returns:
trap_revised, revised total potential transpiration
"""
valid_mask = (
(trap != _TARGET_NODATA) &
(tot != _TARGET_NODATA))
trap_revised = numpy.empty(trap.shape, dtype=numpy.float32)
trap_revised[:] = _TARGET_NODATA
trap_revised[valid_mask] = numpy.minimum(trap[valid_mask], tot[valid_mask])
return trap_revised
def remove_transpiration(return_type):
"""Remove water from a soil layer via transpiration by plants.
Transpiration from one soil layer is apportioned from total potential
transpiration, trap, according to the available water for transpiration in
this soil layer. Lines 218-294, H2olos.f
Parameters:
return_type (string): flag indicating whether avinj (water in this soil
layer available to plants for growth) or asmos (total water in this
soil layer) should be returned
Returns:
the function `_remove_transpiration`
"""
def _remove_transpiration(asmos, awilt, adep, trap, awwt, tot2):
"""Remove water from a soil layer via transpiration by plants.
Parameters:
asmos (numpy.ndarray): derived, interim moisture in this soil layer
after additions from current month precipitation
awilt (numpy.ndarray): derived, wilting point of this soil layer
adep (numpy.ndarray): parameter, depth of this soil layer in cm
trap (numpy.ndarray): derived, total potential transpiration
across all soil layers accessible by plant roots
awwt (numpy.ndarray): derived, water available for transpiration
in this soil layer weighted by transpiration depth distribution
parameter
tot2 (numpy.ndarray): derived, the sum of weighted water available
for transpiration across soil layers
Returns:
avinj, water available to plants for growth in this layer after
losses to transpiration, if return type is 'avinj'
asmos_revised, total water in this layer after losses to
transpiration, if return type is 'asmos'
"""
valid_mask = (
(asmos != _TARGET_NODATA) &
(awilt != _TARGET_NODATA) &
(adep != _IC_NODATA) &
(trap != _TARGET_NODATA) &
(awwt != _TARGET_NODATA) &
(tot2 != _TARGET_NODATA))
avinj = numpy.empty(asmos.shape, dtype=numpy.float32)
avinj[:] = _TARGET_NODATA
avinj[valid_mask] = numpy.maximum(
asmos[valid_mask] - awilt[valid_mask] * adep[valid_mask], 0.)
transpire_mask = (valid_mask & (tot2 > 0))
transpiration_loss = numpy.zeros(asmos.shape, dtype=numpy.float32)
transpiration_loss[transpire_mask] = numpy.minimum(
(trap[transpire_mask] *
awwt[transpire_mask]) / tot2[transpire_mask],
avinj[transpire_mask])
avinj[valid_mask] = avinj[valid_mask] - transpiration_loss[valid_mask]
asmos_revised = numpy.empty(asmos.shape, dtype=numpy.float32)
asmos_revised[:] = _TARGET_NODATA
asmos_revised[valid_mask] = (
asmos[valid_mask] - transpiration_loss[valid_mask])
if return_type == 'avinj':
return avinj
elif return_type == 'asmos':
return asmos_revised
return _remove_transpiration
def calc_relative_water_content_lyr_1(asmos_1, adep_1, awilt_1, afiel_1):
"""Calculate the relative water content of soil layer 1.
The relative water content of soil layer 1, prior to any evaporation losses
from soil layer 1, is used to estimate water available for evaporation
from soil layer 1. Line 280, H2olos.f
Parameters:
asmos_1 (numpy.ndarray): derived, interim moisture in soil layer 1
after losses to transpiration
adep_1 (numpy.ndarray): parameter, depth of soil layer 1 in cm
awilt_1 (numpy.ndarray): derived, wilting point of soil layer 1
afiel_1 (numpy.ndarray): derived, field capacity of soil layer 1
Returns:
rwcf_1, relative water content of soil layer 1
"""
valid_mask = (
(asmos_1 != _TARGET_NODATA) &
(adep_1 != _IC_NODATA) &
(awilt_1 != _TARGET_NODATA) &
(afiel_1 != _TARGET_NODATA))
rwcf_1 = numpy.empty(asmos_1.shape, dtype=numpy.float32)
rwcf_1[valid_mask] = (
(asmos_1[valid_mask] / adep_1[valid_mask] - awilt_1[valid_mask]) /
(afiel_1[valid_mask] - awilt_1[valid_mask]))
return rwcf_1
def calc_evaporation_loss(rwcf_1, pevp, absevap, asmos_1, awilt_1, adep_1):
"""Calculate evaporation from soil layer 1.
Some moisture is lost from soil layer 1 (i.e., the top soil layer) to
evaporation, separate from surface evaporation and transpiration by plants.
This amount is calculated from potential soil evaporation, which was
calculated from potential evapotranspiration prior to allocation of water
to soil layers. It is restricted to be less than or equal to water
available in this soil layer.
Parameters:
rwcf_1 (numpy.ndarray): derived, relative water content of soil layer 1
pevp (numpy.ndarray): derived, potential evaporation from soil layer 1
absevap (numpy.ndarray): derived, bare soil evaporation
asmos_1 (numpy.ndarray): derived, interim moisture in soil layer 1
awilt_1 (numpy.ndarray): derived, wilting point of soil layer 1
adep_1 (numpy.ndarray): parameter, depth of soil layer 1 in cm
Returns:
evlos, moisture evaporated from soil layer 1
"""
valid_mask = (
(rwcf_1 != _TARGET_NODATA) &
(pevp != _TARGET_NODATA) &
(absevap != _TARGET_NODATA) &
(asmos_1 != _TARGET_NODATA) &
(awilt_1 != _TARGET_NODATA) &
(adep_1 != _IC_NODATA))
evmt = numpy.empty(rwcf_1.shape, dtype=numpy.float32)
evmt[:] = _TARGET_NODATA
evmt[valid_mask] = numpy.maximum(
(rwcf_1[valid_mask] - 0.25) / (1 - 0.25), 0.01)
evlos = numpy.empty(rwcf_1.shape, dtype=numpy.float32)
evlos[:] = _TARGET_NODATA
evlos[valid_mask] = numpy.minimum(
evmt[valid_mask] * pevp[valid_mask] * absevap[valid_mask] * 0.1,
numpy.maximum(
asmos_1[valid_mask] - awilt_1[valid_mask] *
adep_1[valid_mask], 0.))
return evlos
def _soil_water(
aligned_inputs, site_param_table, veg_trait_table, current_month,
month_index, prev_sv_reg, pp_reg, pft_id_set, month_reg, sv_reg):
"""Allocate precipitation to runoff, transpiration, and soil moisture.
Simulate snowfall and account for evaporation and melting of the snow pack.
Allocate the flow of precipitation through interception by plants,
runoff and infiltration into the soil, percolation through the soil, and
transpiration by plants. Update soil moisture in each soil layer.
Estimate avh2o_1 for each PFT (water available to the PFT for growth),
avh2o_3 (water in first two soil layers), and amov_<lyr> (saturated flow
of water between soil layers, used in decomposition and mineral leaching).
Parameters:
aligned_inputs (dict): map of key, path pairs indicating paths
to aligned model inputs, including precipitation, temperature,
plant functional type composition, and site spatial index
site_param_table (dict): map of site spatial indices to dictionaries
containing site parameters
veg_trait_table (dict): map of pft id to dictionaries containing
plant functional type parameters, including nlaypg, number of soil
layers access by plant roots
current_month (int): month of the year, such that current_month=1
indicates January
month_index (int): month of the simulation, such that month_index=1
indicates month 1 of the simulation
prev_sv_reg (dict): map of key, path pairs giving paths to state
variables for the previous month
pp_reg (dict): map of key, path pairs giving persistent parameters
including field capacity of each soil layer
pft_id_set (set): set of integers identifying plant functional types
month_reg (dict): map of key, path pairs giving paths to intermediate
calculated values that are shared between submodels
sv_reg (dict): map of key, path pairs giving paths to state variables
for the current month
Side effects:
creates the raster indicated by `sv_reg['snow_path']`, current snowpack
creates the raster indicated by `sv_reg['snlq_path']`, current liquid
in snow
creates the raster indicated by
`sv_reg['asmos_<lyr>_path']`, soil moisture content, for each soil
layer accessible by roots of any plant functional type
creates the rasters indicated by `month_reg['amov_<lyr>']` for each
soil layer, saturated flow of water from that soil layer
creates the raster indicated by `sv_reg['avh2o_1_<PFT>_path']`, soil
moisture available for growth, for each plant functional type (PFT)
creates the raster indicated by `sv_reg['avh2o_3_path']`, available
water in the top two soil layers
Returns:
None
"""
def calc_avg_temp(max_temp, min_temp):
"""Calculate average temperature from maximum and minimum temp."""
valid_mask = (
(~numpy.isclose(max_temp, max_temp_nodata)) &
(~numpy.isclose(min_temp, min_temp_nodata)))
tave = numpy.empty(max_temp.shape, dtype=numpy.float32)
tave[:] = _IC_NODATA
tave[valid_mask] = (max_temp[valid_mask] + min_temp[valid_mask]) / 2.
return tave
def calc_surface_litter_biomass(strucc_1, metabc_1):
"""Calculate biomass in surface litter."""
valid_mask = (
(~numpy.isclose(strucc_1, _SV_NODATA)) &
(~numpy.isclose(metabc_1, _SV_NODATA)))
alit = numpy.empty(strucc_1.shape, dtype=numpy.float32)
alit[:] = _TARGET_NODATA
alit[valid_mask] = (strucc_1[valid_mask] + metabc_1[valid_mask]) * 2.5
alit = numpy.minimum(alit, 400)
return alit
max_temp_nodata = pygeoprocessing.get_raster_info(
aligned_inputs['max_temp_{}'.format(current_month)])['nodata'][0]
min_temp_nodata = pygeoprocessing.get_raster_info(
aligned_inputs['min_temp_{}'.format(current_month)])['nodata'][0]
# get max number of soil layers accessible by plants
nlaypg_max = int(max(val['nlaypg'] for val in veg_trait_table.values()))
# max number of soil layers simulated, beyond those accessible by plants
nlayer_max = int(max(val['nlayer'] for val in site_param_table.values()))
# temporary intermediate rasters for soil water submodel
temp_dir = tempfile.mkdtemp(dir=PROCESSING_DIR)
temp_val_dict = {}
for val in [
'tave', 'current_moisture_inputs', 'modified_moisture_inputs',
'pet_rem', 'alit', 'sum_aglivc', 'sum_stdedc', 'sum_tgprod',
'aliv', 'sd', 'absevap', 'evap_losses', 'trap', 'trap_revised',
'pevp', 'tot', 'tot2', 'rwcf_1', 'evlos', 'avinj_interim_1']:
temp_val_dict[val] = os.path.join(temp_dir, '{}.tif'.format(val))
# temporary intermediate values for each layer accessible by plants
for val in ['avw', 'awwt', 'avinj']:
for lyr in range(1, nlaypg_max + 1):
val_lyr = '{}_{}'.format(val, lyr)
temp_val_dict[val_lyr] = os.path.join(
temp_dir, '{}.tif'.format(val_lyr))
# temporary intermediate value for each layer total
for lyr in range(1, nlayer_max + 1):
val_lyr = 'asmos_interim_{}'.format(lyr)
temp_val_dict[val_lyr] = os.path.join(
temp_dir, '{}.tif'.format(val_lyr))
# PFT-level temporary calculated values
for pft_i in pft_id_set:
for val in ['tgprod_weighted', 'sum_avinj']:
temp_val_dict['{}_{}'.format(val, pft_i)] = os.path.join(
temp_dir, '{}_{}.tif'.format(val, pft_i))
param_val_dict = {}
for val in ['fracro', 'precro', 'fwloss_1', 'fwloss_2']:
target_path = os.path.join(temp_dir, '{}.tif'.format(val))
param_val_dict[val] = target_path
site_to_val = dict(
[(site_code, float(table[val])) for
(site_code, table) in site_param_table.items()])
pygeoprocessing.reclassify_raster(
(aligned_inputs['site_index'], 1), site_to_val, target_path,
gdal.GDT_Float32, _IC_NODATA)
for lyr in range(1, nlaypg_max + 1):
val_lyr = 'awtl_{}'.format(lyr)
target_path = os.path.join(temp_dir, '{}.tif'.format(val_lyr))
param_val_dict[val_lyr] = target_path
site_to_val = dict(
[(site_code, float(table[val_lyr])) for
(site_code, table) in site_param_table.items()])
pygeoprocessing.reclassify_raster(
(aligned_inputs['site_index'], 1), site_to_val, target_path,
gdal.GDT_Float32, _IC_NODATA)
for lyr in range(1, nlayer_max + 1):
val_lyr = 'adep_{}'.format(lyr)
target_path = os.path.join(temp_dir, '{}.tif'.format(val_lyr))
param_val_dict[val_lyr] = target_path
site_to_val = dict(
[(site_code, float(table[val_lyr])) for
(site_code, table) in site_param_table.items()])
pygeoprocessing.reclassify_raster(
(aligned_inputs['site_index'], 1), site_to_val, target_path,
gdal.GDT_Float32, _IC_NODATA)
# calculate canopy and litter cover that influence moisture inputs
# calculate biomass in surface litter
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
prev_sv_reg['strucc_1_path'], prev_sv_reg['metabc_1_path']]],
calc_surface_litter_biomass, temp_val_dict['alit'],
gdal.GDT_Float32, _TARGET_NODATA)
# calculate the sum of aglivc (standing live biomass) and stdedc
# (standing dead biomass) across PFTs, weighted by % cover of each PFT
for sv in ['aglivc', 'stdedc']:
weighted_sum_path = temp_val_dict['sum_{}'.format(sv)]
weighted_state_variable_sum(
sv, prev_sv_reg, aligned_inputs, pft_id_set, weighted_sum_path)
# calculate the weighted sum of tgprod, potential production, across PFTs
weighted_path_list = []
for pft_i in pft_id_set:
do_growth = (
current_month != veg_trait_table[pft_i]['senescence_month'] and
str(current_month) in veg_trait_table[pft_i]['growth_months'])
if do_growth:
target_path = temp_val_dict['tgprod_weighted_{}'.format(pft_i)]
pft_nodata = pygeoprocessing.get_raster_info(
aligned_inputs['pft_{}'.format(pft_i)])['nodata'][0]
raster_multiplication(
month_reg['tgprod_{}'.format(pft_i)], _TARGET_NODATA,
aligned_inputs['pft_{}'.format(pft_i)], pft_nodata,
target_path, _TARGET_NODATA)
weighted_path_list.append(target_path)
if weighted_path_list:
raster_list_sum(
weighted_path_list, _TARGET_NODATA,
temp_val_dict['sum_tgprod'], _TARGET_NODATA, nodata_remove=True)
else: # no potential production occurs this month, so tgprod = 0
pygeoprocessing.new_raster_from_base(
temp_val_dict['sum_aglivc'], temp_val_dict['sum_tgprod'],
gdal.GDT_Float32, [_TARGET_NODATA], fill_value_list=[0.])
# calculate average temperature
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
aligned_inputs['max_temp_{}'.format(current_month)],
aligned_inputs['min_temp_{}'.format(current_month)]]],
calc_avg_temp, temp_val_dict['tave'], gdal.GDT_Float32, _IC_NODATA)
# calculate aboveground live biomass
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
temp_val_dict['sum_aglivc'], temp_val_dict['sum_tgprod']]],
_calc_aboveground_live_biomass, temp_val_dict['aliv'],
gdal.GDT_Float32, _TARGET_NODATA)
# calculate total standing biomass
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
temp_val_dict['aliv'], temp_val_dict['sum_stdedc']]],
_calc_standing_biomass, temp_val_dict['sd'],
gdal.GDT_Float32, _TARGET_NODATA)
# modify standing snow, liquid in snow, return moisture inputs after snow
_snow(
aligned_inputs['site_index'], site_param_table,
aligned_inputs['precip_{}'.format(month_index)],
temp_val_dict['tave'],
aligned_inputs['max_temp_{}'.format(current_month)],
aligned_inputs['min_temp_{}'.format(current_month)],
prev_sv_reg['snow_path'], prev_sv_reg['snlq_path'],
current_month, month_reg['snowmelt'], sv_reg['snow_path'],
sv_reg['snlq_path'], temp_val_dict['modified_moisture_inputs'],
temp_val_dict['pet_rem'])
# remove runoff and surface evaporation from moisture inputs
shutil.copyfile(
temp_val_dict['modified_moisture_inputs'],
temp_val_dict['current_moisture_inputs'])
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
temp_val_dict['current_moisture_inputs'],
param_val_dict['fracro'], param_val_dict['precro'],
sv_reg['snow_path'], temp_val_dict['alit'],
temp_val_dict['sd'], param_val_dict['fwloss_1'],
param_val_dict['fwloss_2'], temp_val_dict['pet_rem']]],
subtract_surface_losses('inputs_after_surface'),
temp_val_dict['modified_moisture_inputs'],
gdal.GDT_Float32, _TARGET_NODATA)
# calculate bare soil evaporation
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
temp_val_dict['current_moisture_inputs'],
param_val_dict['fracro'], param_val_dict['precro'],
sv_reg['snow_path'], temp_val_dict['alit'],
temp_val_dict['sd'], param_val_dict['fwloss_1'],
param_val_dict['fwloss_2'], temp_val_dict['pet_rem']]],
subtract_surface_losses('absevap'),
temp_val_dict['absevap'],
gdal.GDT_Float32, _TARGET_NODATA)
# calculate total losses to surface evaporation
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
temp_val_dict['current_moisture_inputs'],
param_val_dict['fracro'], param_val_dict['precro'],
sv_reg['snow_path'], temp_val_dict['alit'],
temp_val_dict['sd'], param_val_dict['fwloss_1'],
param_val_dict['fwloss_2'], temp_val_dict['pet_rem']]],
subtract_surface_losses('evap_losses'),
temp_val_dict['evap_losses'],
gdal.GDT_Float32, _TARGET_NODATA)
# remove losses due to initial transpiration from water inputs
shutil.copyfile(
temp_val_dict['modified_moisture_inputs'],
temp_val_dict['current_moisture_inputs'])
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
temp_val_dict['pet_rem'], temp_val_dict['evap_losses'],
temp_val_dict['tave'], temp_val_dict['aliv'],
temp_val_dict['current_moisture_inputs']]],
calc_potential_transpiration('modified_moisture_inputs'),
temp_val_dict['modified_moisture_inputs'],
gdal.GDT_Float32, _TARGET_NODATA)
# calculate potential transpiration
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
temp_val_dict['pet_rem'], temp_val_dict['evap_losses'],
temp_val_dict['tave'], temp_val_dict['aliv'],
temp_val_dict['current_moisture_inputs']]],
calc_potential_transpiration('trap'), temp_val_dict['trap'],
gdal.GDT_Float32, _TARGET_NODATA)
# calculate potential evaporation from top soil layer
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
temp_val_dict['pet_rem'], temp_val_dict['evap_losses'],
temp_val_dict['tave'], temp_val_dict['aliv'],
temp_val_dict['current_moisture_inputs']]],
calc_potential_transpiration('pevp'), temp_val_dict['pevp'],
gdal.GDT_Float32, _TARGET_NODATA)
# distribute water to each layer
for lyr in range(1, nlayer_max + 1):
shutil.copyfile(
temp_val_dict['modified_moisture_inputs'],
temp_val_dict['current_moisture_inputs'])
# revise moisture content of this soil layer
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
param_val_dict['adep_{}'.format(lyr)],
pp_reg['afiel_{}_path'.format(lyr)],
prev_sv_reg['asmos_{}_path'.format(lyr)],
temp_val_dict['current_moisture_inputs']]],
distribute_water_to_soil_layer('asmos_revised'),
temp_val_dict['asmos_interim_{}'.format(lyr)],
gdal.GDT_Float32, _TARGET_NODATA)
# calculate soil moisture moving to next layer
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
param_val_dict['adep_{}'.format(lyr)],
pp_reg['afiel_{}_path'.format(lyr)],
prev_sv_reg['asmos_{}_path'.format(lyr)],
temp_val_dict['current_moisture_inputs']]],
distribute_water_to_soil_layer('amov'),
temp_val_dict['modified_moisture_inputs'],
gdal.GDT_Float32, _TARGET_NODATA)
# amov, water moving to next layer, persists between submodels
shutil.copyfile(
temp_val_dict['modified_moisture_inputs'],
month_reg['amov_{}'.format(lyr)])
# calculate available water for transpiration
avw_list = []
for lyr in range(1, nlaypg_max + 1):
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
temp_val_dict['asmos_interim_{}'.format(lyr)],
pp_reg['awilt_{}_path'.format(lyr)],
param_val_dict['adep_{}'.format(lyr)]]],
calc_available_water_for_transpiration,
temp_val_dict['avw_{}'.format(lyr)], gdal.GDT_Float32,
_TARGET_NODATA)
avw_list.append(temp_val_dict['avw_{}'.format(lyr)])
# total water available for transpiration
raster_list_sum(
avw_list, _TARGET_NODATA, temp_val_dict['tot'], _TARGET_NODATA)
# calculate water available for transpiration weighted by transpiration
# depth for that soil layer
awwt_list = []
for lyr in range(1, nlaypg_max + 1):
raster_multiplication(
temp_val_dict['avw_{}'.format(lyr)], _TARGET_NODATA,
param_val_dict['awtl_{}'.format(lyr)], _IC_NODATA,
temp_val_dict['awwt_{}'.format(lyr)], _TARGET_NODATA)
awwt_list.append(temp_val_dict['awwt_{}'.format(lyr)])
# total weighted available water for transpiration
raster_list_sum(
awwt_list, _TARGET_NODATA, temp_val_dict['tot2'], _TARGET_NODATA)
# revise total potential transpiration
pygeoprocessing.raster_calculator(
[(path, 1) for path in [temp_val_dict['trap'], temp_val_dict['tot']]],
revise_potential_transpiration, temp_val_dict['trap_revised'],
gdal.GDT_Float32, _TARGET_NODATA)
# remove water via transpiration
for lyr in range(1, nlaypg_max + 1):
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
temp_val_dict['asmos_interim_{}'.format(lyr)],
pp_reg['awilt_{}_path'.format(lyr)],
param_val_dict['adep_{}'.format(lyr)],
temp_val_dict['trap_revised'],
temp_val_dict['awwt_{}'.format(lyr)], temp_val_dict['tot2']]],
remove_transpiration('avinj'),
temp_val_dict['avinj_{}'.format(lyr)], gdal.GDT_Float32,
_TARGET_NODATA)
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
temp_val_dict['asmos_interim_{}'.format(lyr)],
pp_reg['awilt_{}_path'.format(lyr)],
param_val_dict['adep_{}'.format(lyr)],
temp_val_dict['trap_revised'],
temp_val_dict['awwt_{}'.format(lyr)], temp_val_dict['tot2']]],
remove_transpiration('asmos'), sv_reg['asmos_{}_path'.format(lyr)],
gdal.GDT_Float32, _TARGET_NODATA)
# no transpiration is removed from layers not accessible by plants
for lyr in range(nlaypg_max + 1, nlayer_max + 1):
shutil.copyfile(
temp_val_dict['asmos_interim_{}'.format(lyr)],
sv_reg['asmos_{}_path'.format(lyr)])
# relative water content of soil layer 1
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
sv_reg['asmos_1_path'], param_val_dict['adep_1'],
pp_reg['awilt_1_path'], pp_reg['afiel_1_path']]],
calc_relative_water_content_lyr_1, temp_val_dict['rwcf_1'],
gdal.GDT_Float32, _TARGET_NODATA)
# evaporation from soil layer 1
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
temp_val_dict['rwcf_1'], temp_val_dict['pevp'],
temp_val_dict['absevap'], sv_reg['asmos_1_path'],
pp_reg['awilt_1_path'], param_val_dict['adep_1']]],
calc_evaporation_loss, temp_val_dict['evlos'],
gdal.GDT_Float32, _TARGET_NODATA)
# remove evaporation from total moisture in soil layer 1
shutil.copyfile(sv_reg['asmos_1_path'], temp_val_dict['asmos_interim_1'])
raster_difference(
temp_val_dict['asmos_interim_1'], _TARGET_NODATA,
temp_val_dict['evlos'], _TARGET_NODATA, sv_reg['asmos_1_path'],
_TARGET_NODATA)
# remove evaporation from moisture available to plants in soil layer 1
shutil.copyfile(temp_val_dict['avinj_1'], temp_val_dict['avinj_interim_1'])
raster_difference(
temp_val_dict['avinj_interim_1'], _TARGET_NODATA,
temp_val_dict['evlos'], _TARGET_NODATA, temp_val_dict['avinj_1'],
_TARGET_NODATA)
# calculate avh2o_1, soil water available for growth, for each PFT
for pft_i in pft_id_set:
pft_nodata = pygeoprocessing.get_raster_info(
aligned_inputs['pft_{}'.format(pft_i)])['nodata'][0]
soil_layers_accessible = [
temp_val_dict['avinj_{}'.format(lyr)] for lyr in
range(1, int(veg_trait_table[pft_i]['nlaypg']) + 1)]
raster_list_sum(
soil_layers_accessible, _TARGET_NODATA,
temp_val_dict['sum_avinj_{}'.format(pft_i)],
_TARGET_NODATA, nodata_remove=True)
raster_multiplication(
temp_val_dict['sum_avinj_{}'.format(pft_i)], _TARGET_NODATA,
aligned_inputs['pft_{}'.format(pft_i)], pft_nodata,
sv_reg['avh2o_1_{}_path'.format(pft_i)], _SV_NODATA)
# calculate avh2o_3, moisture in top two soil layers
soil_layers_to_sum = [
temp_val_dict['avinj_{}'.format(lyr)] for lyr in [1, 2]]
raster_list_sum(
soil_layers_to_sum, _TARGET_NODATA, sv_reg['avh2o_3_path'],
_SV_NODATA, nodata_remove=False)
# set correct nodata value for all revised asmos rasters
for lyr in range(1, nlayer_max + 1):
reclassify_nodata(sv_reg['asmos_{}_path'.format(lyr)], _SV_NODATA)
# clean up temporary files
shutil.rmtree(temp_dir)
def calc_anerb(rprpet, pevap, drain, aneref_1, aneref_2, aneref_3):
"""Calculate the effect of soil anaerobic conditions on decomposition.
The impact of soil anaerobic conditions on decomposition is calculated from
soil moisture and reference evapotranspiration. Anerob.f.
Parameters:
rprpet (numpy.ndarray): derived, ratio of precipitation or snowmelt to
reference evapotranspiration
pevap (numpy.ndarray): derived, reference evapotranspiration
drain (numpy.ndarray): parameter, the fraction of excess water lost by
drainage. Indicates whether a soil is sensitive for anaerobiosis
(drain = 0) or not (drain = 1)
aneref_1 (numpy.ndarray): parameter, value of rprpet below which there
is no negative impact of soil anaerobic conditions on decomposition
aneref_2 (numpy.ndarray): parameter, value of rprpet above which there
is maximum negative impact of soil anaerobic conditions on
decomposition
aneref_3 (numpy.ndarray): parameter, minimum value of the impact of
soil anaerobic conditions on decomposition
Returns:
anerb, the effect of soil anaerobic conditions on decomposition
"""
valid_mask = (
(rprpet != _TARGET_NODATA) &
(pevap != _TARGET_NODATA) &
(drain != _IC_NODATA) &
(aneref_1 != _IC_NODATA) &
(aneref_2 != _IC_NODATA) &
(aneref_3 != _IC_NODATA))
xh2o = numpy.empty(rprpet.shape, dtype=numpy.float32)
xh2o[:] = _TARGET_NODATA
xh2o[valid_mask] = (
(rprpet[valid_mask] - aneref_1[valid_mask]) * pevap[valid_mask] *
(1. - drain[valid_mask]))
anerb = numpy.empty(rprpet.shape, dtype=numpy.float32)
anerb[:] = _TARGET_NODATA
anerb[valid_mask] = 1.
high_rprpet_mask = (valid_mask & (rprpet > aneref_1) & (xh2o > 0))
anerb[high_rprpet_mask] = numpy.maximum(
1. + (1. - aneref_3[high_rprpet_mask]) /
(aneref_1[high_rprpet_mask] - aneref_2[high_rprpet_mask]) *
(aneref_1[high_rprpet_mask] +
(xh2o[high_rprpet_mask] / pevap[high_rprpet_mask]) -
aneref_1[high_rprpet_mask]),
aneref_3[high_rprpet_mask])
return anerb
def esched(return_type):
"""Calculate flow of an element accompanying decomposition of C.
Calculate the movement of one element (N or P) as C decomposes from one
state variable (the donating stock, or box A) to another state variable
(the receiving stock, or box B). Esched.f
Parameters:
return_type (string): flag indicating whether to return material
leaving box A, material arriving in box B, or material flowing
into or out of the mineral pool
Returns:
the function `_esched`
"""
def _esched(cflow, tca, rcetob, anps, labile):
"""Calculate the flow of one element (iel) to accompany decomp of C.
This is a transcription of Esched.f: "Schedule N, P, or S flow and
associated mineralization or immobilization flow for decomposition
from Box A to Box B."
If there is enough of iel (N or P) in the donating stock to satisfy
the required ratio, that material flows from the donating stock to
the receiving stock and whatever iel is leftover goes to mineral
pool. If there is not enough iel to satisfy the required ratio, iel
is drawn from the mineral pool to satisfy the ratio; if there is
not enough iel in the mineral pool, the material does not leave the
donating stock.
Parameters:
cflow (numpy.ndarray): derived, total C that is decomposing from
box A to box B
tca (numpy.ndarray): state variable, C in donating stock, i.e.
box A
rcetob (numpy.ndarray): derived, required ratio of C/iel in the
receiving stock
anps (numpy.ndarray): state variable, iel (N or P) in the donating
stock
labile (numpy.ndarray): state variable, mineral iel (N or P)
Returns:
material_leaving_a, the amount of material leaving box A, if
return_type is 'material_leaving_a'
material_arriving_b, the amount of material arriving in box B,
if return_type is 'material_arriving_b'
mnrflo, flow to or from mineral pool, if return_type is
'mineral_flow'
"""
valid_mask = (
(cflow != _IC_NODATA) &
(~numpy.isclose(tca, _SV_NODATA)) &
(tca > 0) &
(rcetob != _TARGET_NODATA) &
(~numpy.isclose(anps, _SV_NODATA)) &
(~numpy.isclose(labile, _SV_NODATA)))
outofa = numpy.empty(cflow.shape, dtype=numpy.float32)
outofa[:] = _IC_NODATA
outofa[valid_mask] = (
anps[valid_mask] * (cflow[valid_mask] / tca[valid_mask]))
immobil_ratio = numpy.zeros(cflow.shape)
nonzero_mask = ((outofa > 0) & valid_mask)
immobil_ratio[nonzero_mask] = (
cflow[nonzero_mask] / outofa[nonzero_mask])
immflo = numpy.zeros(cflow.shape)
immflo[valid_mask] = (
cflow[valid_mask] / rcetob[valid_mask] - outofa[valid_mask])
labile_supply = numpy.zeros(cflow.shape)
labile_supply[valid_mask] = labile[valid_mask] - immflo[valid_mask]
atob = numpy.zeros(cflow.shape)
atob[valid_mask] = cflow[valid_mask] / rcetob[valid_mask]
# immobilization
immobilization_mask = (
(immobil_ratio > rcetob) &
(labile_supply > 0) &
valid_mask)
# mineralization
mineralization_mask = (
(immobil_ratio <= rcetob) &
valid_mask)
# no movement
no_movt_mask = (
(immobil_ratio > rcetob) &
(labile_supply <= 0) &
valid_mask)
material_leaving_a = numpy.empty(cflow.shape, dtype=numpy.float32)
material_leaving_a[:] = _IC_NODATA
material_arriving_b = numpy.empty(cflow.shape, dtype=numpy.float32)
material_arriving_b[:] = _IC_NODATA
mnrflo = numpy.empty(cflow.shape, dtype=numpy.float32)
mnrflo[:] = _IC_NODATA
material_leaving_a[immobilization_mask] = (
outofa[immobilization_mask])
material_arriving_b[immobilization_mask] = (
outofa[immobilization_mask] + immflo[immobilization_mask])
mnrflo[immobilization_mask] = -immflo[immobilization_mask]
material_leaving_a[mineralization_mask] = outofa[mineralization_mask]
material_arriving_b[mineralization_mask] = atob[mineralization_mask]
mnrflo[mineralization_mask] = (
outofa[mineralization_mask] - atob[mineralization_mask])
material_leaving_a[no_movt_mask] = 0.
material_arriving_b[no_movt_mask] = 0.
mnrflo[no_movt_mask] = 0.
if return_type == 'material_leaving_a':
return material_leaving_a
elif return_type == 'material_arriving_b':
return material_arriving_b
elif return_type == 'mineral_flow':
return mnrflo
return _esched
def fsfunc(minerl_1_2, sorpmx, pslsrb):
"""Calculate the fraction of mineral P that is in solution.
The fraction of P in solution is influenced by two soil properties:
the maximum sorption potential of the soil and sorption affinity.
Parameters:
minerl_1_2 (numpy.ndarray): state variable, mineral P in top layer
sorpmx (numpy.ndarray): parameter, maximum P sorption potential
pslsrb (numpy.ndarray): parameter, slope term which controls the
fraction of mineral P that is labile
Returns:
fsol, fraction of P in solution
"""
valid_mask = (
(~numpy.isclose(minerl_1_2, _SV_NODATA)) &
(minerl_1_2 > 0) &
(sorpmx != _IC_NODATA) &
(pslsrb != _IC_NODATA))
c_ar = numpy.zeros(minerl_1_2.shape, dtype=numpy.float32)
c_ar[valid_mask] = (
sorpmx[valid_mask] * (2.0 - pslsrb[valid_mask]) / 2.)
b_ar = numpy.zeros(minerl_1_2.shape, dtype=numpy.float32)
b_ar[valid_mask] = (
sorpmx[valid_mask] - minerl_1_2[valid_mask] + c_ar[valid_mask])
sq_ar = numpy.zeros(minerl_1_2.shape, dtype=numpy.float32)
sq_ar[valid_mask] = (
b_ar[valid_mask] * b_ar[valid_mask] + 4. * c_ar[valid_mask] *
minerl_1_2[valid_mask])
sqrt_ar = numpy.zeros(minerl_1_2.shape, dtype=numpy.float32)
sqrt_ar[valid_mask] = numpy.sqrt(sq_ar[valid_mask])
labile = numpy.zeros(minerl_1_2.shape, dtype=numpy.float32)
labile[valid_mask] = (-b_ar[valid_mask] + sqrt_ar[valid_mask]) / 2.
fsol = numpy.empty(minerl_1_2.shape, dtype=numpy.float32)
fsol[:] = _TARGET_NODATA
fsol[valid_mask] = labile[valid_mask] / minerl_1_2[valid_mask]
return fsol
def calc_surface_som2_ratio(
som1c_1, som1e_1_iel, rad1p_1_iel, rad1p_2_iel, rad1p_3_iel,
pcemic1_2_iel):
"""Calculate the required C/iel ratio for material entering surface SOM2.
The C/iel ratio of material decomposing from surface SOM1 into surface SOM2
fluctuates with each decomposition time step according to the current C/iel
content of SOM1.
Parameters:
som1c_1 (numpy.ndarray): state variable, C in surface SOM1
som1e_1_iel (numpy.ndarray): state variable, iel in surface SOM1
rad1p_1_iel (numpy.ndarray): parameter, intercept term
rad1p_2_iel (numpy.ndarray): parameter, slope term
rad1p_3_iel (numpy.ndarray): parameter, minimum allowable C/iel for
addition term
pcemic1_2_iel (numpy.ndarray): parameter, minimum C/iel ratio
Returns:
rceto2_surface, required C/iel ratio of material entering surface SOM2
"""
valid_mask = (
(~numpy.isclose(som1c_1, _SV_NODATA)) &
(~numpy.isclose(som1e_1_iel, _SV_NODATA)) &
(som1e_1_iel > 0) &
(rad1p_1_iel != _IC_NODATA) &
(rad1p_2_iel != _IC_NODATA) &
(pcemic1_2_iel != _IC_NODATA) &
(rad1p_3_iel != _IC_NODATA))
radds1 = numpy.empty(som1c_1.shape, dtype=numpy.float32)
radds1[:] = _TARGET_NODATA
radds1[valid_mask] = (
rad1p_1_iel[valid_mask] + rad1p_2_iel[valid_mask] *
((som1c_1[valid_mask] / som1e_1_iel[valid_mask]) -
pcemic1_2_iel[valid_mask]))
rceto2_surface = numpy.empty(som1c_1.shape, dtype=numpy.float32)
rceto2_surface[:] = _TARGET_NODATA
rceto2_surface[valid_mask] = numpy.maximum(
(som1c_1[valid_mask] / som1e_1_iel[valid_mask] + radds1[valid_mask]),
rad1p_3_iel[valid_mask])
return rceto2_surface
def calc_tcflow_strucc_1(
aminrl_1, aminrl_2, strucc_1, struce_1_1, struce_1_2, rnewas_1_1,
rnewas_2_1, strmax_1, defac, dec1_1, pligst_1, strlig_1, pheff_struc):
"""Calculate total flow out of surface structural C.
The total potential flow of C out of surface structural material is
calculated according to its lignin content, the decomposition factor, and
soil pH. The actual flow is limited by the availability of N and P. N and P
may be supplied by the mineral source, or by the element (N or P) in the
decomposing stock.
Parameters:
aminrl_1 (numpy.ndarray): derived, average surface mineral N
aminrl_2 (numpy.ndarray): derived, average surface mineral P
strucc_1 (numpy.ndarray): state variable, surface structural C
struce_1_1 (numpy.ndarray): state variable, surface structural N
struce_1_2 (numpy.ndarray): state variable, surface structural P
rnewas_1_1 (numpy.ndarray): derived, required C/N ratio for
aboveground material decomposing to SOM1
rnewas_2_1 (numpy.ndarray): derived, required C/P ratio for
aboveground material decomposing to SOM1
strmax_1 (numpy.ndarray): parameter, maximum decomposition amount
defac (numpy.ndarray): derived, decomposition factor
dec1_1 (numpy.ndarray): parameter, maximum decomposition rate
pligst_1 (numpy.ndarray): parameter, effect of lignin content on
decomposition rate
strlig_1 (numpy.ndarray): state variable, lignin content of decomposing
material
pheff_struc (numpy.ndarray): derived, effect of soil pH on
decomposition rate
Returns:
tcflow_strucc_1, total flow of C out of surface structural
material
"""
valid_mask = (
(~numpy.isclose(aminrl_1, _SV_NODATA)) &
(~numpy.isclose(aminrl_2, _SV_NODATA)) &
(~numpy.isclose(strucc_1, _SV_NODATA)) &
(~numpy.isclose(struce_1_1, _SV_NODATA)) &
(~numpy.isclose(struce_1_2, _SV_NODATA)) &
(rnewas_1_1 != _TARGET_NODATA) &
(rnewas_2_1 != _TARGET_NODATA) &
(strmax_1 != _IC_NODATA) &
(defac != _TARGET_NODATA) &
(dec1_1 != _IC_NODATA) &
(pligst_1 != _IC_NODATA) &
(~numpy.isclose(strlig_1, _SV_NODATA)) &
(pheff_struc != _TARGET_NODATA))
potential_flow = numpy.zeros(aminrl_1.shape, dtype=numpy.float32)
potential_flow[valid_mask] = (
numpy.minimum(strucc_1[valid_mask], strmax_1[valid_mask]) *
defac[valid_mask] * dec1_1[valid_mask] *
numpy.exp(-pligst_1[valid_mask] * strlig_1[valid_mask]) * 0.020833 *
pheff_struc[valid_mask])
decompose_mask = (
((aminrl_1 > 0.0000001) | ((strucc_1 / struce_1_1) <= rnewas_1_1)) &
((aminrl_2 > 0.0000001) | ((strucc_1 / struce_1_2) <= rnewas_2_1)) &
valid_mask)
tcflow_strucc_1 = numpy.empty(aminrl_1.shape, dtype=numpy.float32)
tcflow_strucc_1[:] = _IC_NODATA
tcflow_strucc_1[valid_mask] = 0.
tcflow_strucc_1[decompose_mask] = potential_flow[decompose_mask]
return tcflow_strucc_1
def calc_tcflow_strucc_2(
aminrl_1, aminrl_2, strucc_2, struce_2_1, struce_2_2, rnewbs_1_1,
rnewbs_2_1, strmax_2, defac, dec1_2, pligst_2, strlig_2, pheff_struc,
anerb):
"""Calculate total flow out of soil structural C.
The total potential flow of C out of soil structural material is
calculated according to its lignin content, the decomposition factor, and
soil pH. The actual flow is limited by the availability of N and P. N and P
may be supplied by the mineral source, or by the element (N or P) in the
decomposing stock.
Parameters:
aminrl_1 (numpy.ndarray): derived, average soil mineral N
aminrl_2 (numpy.ndarray): derived, average soil mineral P
strucc_2 (numpy.ndarray): state variable, soil structural C
struce_2_1 (numpy.ndarray): state variable, soil structural N
struce_2_2 (numpy.ndarray): state variable, soil structural P
rnewbs_1_1 (numpy.ndarray): derived, required C/N ratio for
belowground material decomposing to SOM1
rnewbs_2_1 (numpy.ndarray): derived, required C/P ratio for
belowground material decomposing to SOM1
strmax_2 (numpy.ndarray): parameter, maximum decomposition amount
defac (numpy.ndarray): derived, decomposition factor
dec1_2 (numpy.ndarray): parameter, maximum decomposition rate
pligst_2 (numpy.ndarray): parameter, effect of lignin content on
decomposition rate
strlig_2 (numpy.ndarray): state variable, lignin content of decomposing
material
pheff_struc (numpy.ndarray): derived, effect of soil pH on
decomposition rate
anerb (numpy.ndarray): derived, effect of soil anaerobic conditions on
decomposition rate
Returns:
tcflow_strucc_2, total flow of C out of soil structural
material
"""
valid_mask = (
(~numpy.isclose(aminrl_1, _SV_NODATA)) &
(~numpy.isclose(aminrl_2, _SV_NODATA)) &
(~numpy.isclose(strucc_2, _SV_NODATA)) &
(~numpy.isclose(struce_2_1, _SV_NODATA)) &
(~numpy.isclose(struce_2_2, _SV_NODATA)) &
(rnewbs_1_1 != _TARGET_NODATA) &
(rnewbs_2_1 != _TARGET_NODATA) &
(strmax_2 != _IC_NODATA) &
(defac != _TARGET_NODATA) &
(dec1_2 != _IC_NODATA) &
(pligst_2 != _IC_NODATA) &
(~numpy.isclose(strlig_2, _SV_NODATA)) &
(pheff_struc != _TARGET_NODATA) &
(anerb != _TARGET_NODATA))
potential_flow = numpy.zeros(aminrl_1.shape, dtype=numpy.float32)
potential_flow[valid_mask] = (
numpy.minimum(strucc_2[valid_mask], strmax_2[valid_mask]) *
defac[valid_mask] * dec1_2[valid_mask] *
numpy.exp(-pligst_2[valid_mask] * strlig_2[valid_mask]) * 0.020833 *
pheff_struc[valid_mask] * anerb[valid_mask])
decompose_mask = (
((aminrl_1 > 0.0000001) | ((strucc_2 / struce_2_1) <= rnewbs_1_1)) &
((aminrl_2 > 0.0000001) | ((strucc_2 / struce_2_2) <= rnewbs_2_1)) &
valid_mask)
tcflow_strucc_2 = numpy.empty(aminrl_1.shape, dtype=numpy.float32)
tcflow_strucc_2[:] = _IC_NODATA
tcflow_strucc_2[valid_mask] = 0.
tcflow_strucc_2[decompose_mask] = potential_flow[decompose_mask]
return tcflow_strucc_2
def calc_tcflow_surface(
aminrl_1, aminrl_2, cstatv, estatv_1, estatv_2, rcetob_1, rcetob_2,
defac, dec_param, pheff):
"""Calculate total flow of C out of a surface pool.
The total potential flow of C out of a surface pool is calculated according
to the decomposition factor and soil pH. The actual flow is limited by the
availability of N and P. N and P may be supplied by the mineral source, or
by the element (N or P) in the decomposing stock.
Parameters:
aminrl_1 (numpy.ndarray): derived, average surface mineral N
aminrl_2 (numpy.ndarray): derived, average surface mineral P
cstatv (numpy.ndarray): state variable, C in decomposing pool
estatv_1 (numpy.ndarray): state variable, N in decomposing pool
estatv_2 (numpy.ndarray): state variable, P in decomposing pool
rcetob_1 (numpy.ndarray): derived, required C/N ratio for
material entering the receiving pool
rcetob_2 (numpy.ndarray): derived, required C/P ratio for
material entering the receiving pool
defac (numpy.ndarray): derived, decomposition factor
dec_param (numpy.ndarray): parameter, maximum decomposition rate
pheff (numpy.ndarray): derived, effect of soil pH on
decomposition rate
Returns:
tcflow, total flow of C out of the decomposing pool
"""
valid_mask = (
(~numpy.isclose(aminrl_1, _SV_NODATA)) &
(~numpy.isclose(aminrl_2, _SV_NODATA)) &
(~numpy.isclose(cstatv, _SV_NODATA)) &
(~numpy.isclose(estatv_1, _SV_NODATA)) &
(~numpy.isclose(estatv_2, _SV_NODATA)) &
(rcetob_1 != _TARGET_NODATA) &
(rcetob_2 != _TARGET_NODATA) &
(defac != _TARGET_NODATA) &
(dec_param != _IC_NODATA) &
(pheff != _TARGET_NODATA))
potential_flow = numpy.zeros(aminrl_1.shape, dtype=numpy.float32)
potential_flow[valid_mask] = (
numpy.minimum(
cstatv[valid_mask] * defac[valid_mask] * dec_param[valid_mask] *
0.020833 * pheff[valid_mask], cstatv[valid_mask]))
decompose_mask = (
((aminrl_1 > 0.0000001) | ((cstatv / estatv_1) <= rcetob_1)) &
((aminrl_2 > 0.0000001) | ((cstatv / estatv_2) <= rcetob_2)) &
valid_mask)
tcflow = numpy.empty(aminrl_1.shape, dtype=numpy.float32)
tcflow[:] = _IC_NODATA
tcflow[valid_mask] = 0.
tcflow[decompose_mask] = potential_flow[decompose_mask]
return tcflow
def calc_tcflow_soil(
aminrl_1, aminrl_2, cstatv, estatv_1, estatv_2, rcetob_1,
rcetob_2, defac, dec_param, pheff, anerb):
"""Calculate total flow out of soil metabolic C.
The total potential flow of C out of soil metabolic material is
calculated according to the decomposition factor, soil pH, and soil
anaerobic conditions. The actual flow is limited by the availability of N
and P. N and P may be supplied by the mineral source, or by the element
(N or P) in the decomposing stock.
Parameters:
aminrl_1 (numpy.ndarray): derived, average soil mineral N
aminrl_2 (numpy.ndarray): derived, average soil mineral P
cstatv (numpy.ndarray): state variable, C in decomposing stock
estatv_1 (numpy.ndarray): state variable, N in decomposing stock
estatv_2 (numpy.ndarray): state variable, P in decomposing stock
rcetob_1 (numpy.ndarray): derived, required C/N ratio for
material entering receiving stock
rceto1_2 (numpy.ndarray): derived, required C/P ratio for
material entering receiving stock
defac (numpy.ndarray): derived, decomposition factor
dec_param (numpy.ndarray): parameter, maximum decomposition rate
pheff (numpy.ndarray): derived, effect of soil pH on
decomposition rate
anerb (numpy.ndarray): derived, effect of soil anaerobic
conditions on decomposition rate
Returns:
tcflow_soil, total flow of C out of soil metabolic
material
"""
valid_mask = (
(~numpy.isclose(aminrl_1, _SV_NODATA)) &
(~numpy.isclose(aminrl_2, _SV_NODATA)) &
(~numpy.isclose(cstatv, _SV_NODATA)) &
(~numpy.isclose(estatv_1, _SV_NODATA)) &
(~numpy.isclose(estatv_2, _SV_NODATA)) &
(rcetob_1 != _TARGET_NODATA) &
(rcetob_2 != _TARGET_NODATA) &
(defac != _TARGET_NODATA) &
(dec_param != _IC_NODATA) &
(pheff != _TARGET_NODATA) &
(anerb != _TARGET_NODATA))
potential_flow = numpy.zeros(aminrl_1.shape, dtype=numpy.float32)
potential_flow[valid_mask] = (
numpy.minimum(
cstatv[valid_mask] * defac[valid_mask] * dec_param[valid_mask] *
0.020833 * pheff[valid_mask] * anerb[valid_mask],
cstatv[valid_mask]))
decompose_mask = (
((aminrl_1 > 0.0000001) | ((cstatv / estatv_1) <= rcetob_1)) &
((aminrl_2 > 0.0000001) | ((cstatv / estatv_2) <= rcetob_2)) &
valid_mask)
tcflow_soil = numpy.empty(aminrl_1.shape, dtype=numpy.float32)
tcflow_soil[:] = _IC_NODATA
tcflow_soil[valid_mask] = 0.
tcflow_soil[decompose_mask] = potential_flow[decompose_mask]
return tcflow_soil
def calc_tcflow_som1c_2(
aminrl_1, aminrl_2, som1c_2, som1e_2_1, som1e_2_2, rceto2_1,
rceto2_2, defac, dec3_2, eftext, anerb, pheff_metab):
"""Calculate total flow out of soil SOM1.
The total potential flow of C out of soil SOM1 is calculated
according to the effect of soil texture, anaerobic conditions,
and soil pH. The actual flow is limited by the availability of N
and P. N and P may be supplied by the mineral source, or by the
element (N or P) in the decomposing stock.
Parameters:
aminrl_1 (numpy.ndarray): derived, average surface mineral N
aminrl_2 (numpy.ndarray): derived, average surface mineral P
som1c_2 (numpy.ndarray): state variable, C in soil SOM1
som1e_2_1 (numpy.ndarray): state variable, N in soil SOM1
som1e_2_2 (numpy.ndarray): state variable, P in soil SOM1
rceto2_1 (numpy.ndarray): derived, required C/N ratio for
material decomposing to soil SOM2
rceto2_2 (numpy.ndarray): derived, required C/P ratio for
material decomposing to soil SOM2
defac (numpy.ndarray): derived, decomposition factor
dec3_2 (numpy.ndarray): parameter, maximum decomposition rate
eftext (numpy.ndarray): derived, effect of soil texture on
decomposition rate
anerb (numpy.ndarray): derived, effect of soil anaerobic conditions
on decomposition rate
pheff_metab (numpy.ndarray): derived, effect of soil pH on
decomposition rate
Returns:
tcflow_som1c_2, total flow of C out of soil SOM1
"""
valid_mask = (
(~numpy.isclose(aminrl_1, _SV_NODATA)) &
(~numpy.isclose(aminrl_2, _SV_NODATA)) &
(~numpy.isclose(som1c_2, _SV_NODATA)) &
(~numpy.isclose(som1e_2_1, _SV_NODATA)) &
(~numpy.isclose(som1e_2_2, _SV_NODATA)) &
(rceto2_1 != _TARGET_NODATA) &
(rceto2_2 != _TARGET_NODATA) &
(defac != _TARGET_NODATA) &
(dec3_2 != _IC_NODATA) &
(eftext != _TARGET_NODATA) &
(anerb != _TARGET_NODATA) &
(pheff_metab != _TARGET_NODATA))
potential_flow = numpy.zeros(aminrl_1.shape, dtype=numpy.float32)
potential_flow[valid_mask] = (
som1c_2[valid_mask] * defac[valid_mask] * dec3_2[valid_mask] *
eftext[valid_mask] * anerb[valid_mask] * 0.020833 *
pheff_metab[valid_mask])
decompose_mask = (
((aminrl_1 > 0.0000001) | ((som1c_2 / som1e_2_1) <= rceto2_1)) &
((aminrl_2 > 0.0000001) | ((som1c_2 / som1e_2_2) <= rceto2_2)) &
valid_mask)
tcflow_som1c_2 = numpy.empty(aminrl_1.shape, dtype=numpy.float32)
tcflow_som1c_2[:] = _IC_NODATA
tcflow_som1c_2[valid_mask] = 0.
tcflow_som1c_2[decompose_mask] = potential_flow[decompose_mask]
return tcflow_som1c_2
def calc_som3_flow(tcflow, fps, animpt, anerb):
"""Calculate the C that flows from soil SOM1 or SOM2 to SOM3.
The fraction of total flow leaving SOM1 or SOM2 that goes to SOM3 is
dependent on soil clay content and soil anaerobic conditions.
Parameters:
tcflow (numpy.ndarray): derived, total C leaving soil SOM1 or SOM2
fps (numpy.ndarray): derived, effect of soil clay content on
decomposition to SOM3
animpt (numpy.ndarray): parameter, slope of relationship between
anaerobic conditions and decomposition flow to SOM3
anerb (numpy.ndarray): derived, impact of soil anaerobic conditions
on decomposition
Returns:
tosom3, C flowing to SOM3
"""
valid_mask = (
(tcflow != _IC_NODATA) &
(fps != _IC_NODATA) &
(animpt != _IC_NODATA) &
(anerb != _TARGET_NODATA))
tosom3 = numpy.empty(tcflow.shape, dtype=numpy.float32)
tosom3[:] = _IC_NODATA
tosom3[valid_mask] = (
tcflow[valid_mask] * fps[valid_mask] *
(1. + animpt[valid_mask] * (1. - anerb[valid_mask])))
return tosom3
def calc_som2_flow(som2c_1, cmix, defac):
"""Calculate the C that flows from surface SOM2 to soil SOM2.
Some C flows from surface SOM2 to soil SOM2 via mixing. This flow is
controlled by the parameter cmix.
Parameters:
som2c_1 (numpy.ndarray): state variable, C in surface SOM2
cmix (numpy.ndarray): parameter, amount of C flowing via mixing
defac (numpy.ndarray): derived, decomposition factor
Returns:
tcflow, C flowing to soil SOM2 via mixing
"""
valid_mask = (
(~numpy.isclose(som2c_1, _SV_NODATA)) &
(cmix != _IC_NODATA) &
(defac != _TARGET_NODATA))
tcflow = numpy.empty(som2c_1.shape, dtype=numpy.float32)
tcflow[:] = _IC_NODATA
tcflow[valid_mask] = (
som2c_1[valid_mask] * cmix[valid_mask] * defac[valid_mask] *
0.020833)
return tcflow
def calc_respiration_mineral_flow(cflow, frac_co2, estatv, cstatv):
"""Calculate mineral flow of one element associated with respiration.
As material decomposes from one stock to another, some CO2 is lost
to microbial respiration and some nutrient (N or P) moves to the
mineral pool. Respir.f
Parameters:
cflow (numpy.ndarray): derived, C decomposing from one stock
to another
frac_co2 (numpy.ndarray): parameter, fraction of decomposing
C lost as CO2
estatv (numpy.ndarray): state variable, iel (N or P) in the
decomposing stock
cstatv (numpy.ndarray): state variable, C in the decomposing
stock
Returns:
mineral_flow, flow of iel (N or P) accompanying respiration
"""
valid_mask = (
(cflow != _IC_NODATA) &
(frac_co2 != _IC_NODATA) &
(~numpy.isclose(estatv, _SV_NODATA)) &
(~numpy.isclose(cstatv, _SV_NODATA)))
co2_loss = numpy.zeros(cflow.shape, dtype=numpy.float32)
co2_loss[valid_mask] = cflow[valid_mask] * frac_co2[valid_mask]
mineral_flow = numpy.empty(cflow.shape, dtype=numpy.float32)
mineral_flow[:] = _IC_NODATA
mineral_flow[valid_mask] = 0.
flow_mask = ((cstatv > 0) & valid_mask)
mineral_flow[flow_mask] = (
co2_loss[flow_mask] * estatv[flow_mask] / cstatv[flow_mask])
return mineral_flow
def update_gross_mineralization(gross_mineralization, mineral_flow):
"""Update gross N mineralization with current mineral flow.
Gross mineralization of N during decomposition is used to calculate
volatilization loss of N after decomposition. It is updated with N
mineral flow if mineral flow is positive.
Parameters:
gross_mineralization (numpy.ndarray): gross N mineralization during
decomposition
mineral_flow (numpy.ndarray): N mineral flow
Returns:
gromin_updated, updated gross mineralization
"""
valid_mask = (
(gross_mineralization != _TARGET_NODATA) &
(mineral_flow != _IC_NODATA))
gromin_updated = numpy.empty(
gross_mineralization.shape, dtype=numpy.float32)
gromin_updated[:] = _TARGET_NODATA
gromin_updated[valid_mask] = gross_mineralization[valid_mask]
update_mask = ((mineral_flow > 0) & valid_mask)
gromin_updated[update_mask] = (
gross_mineralization[update_mask] + mineral_flow[update_mask])
return gromin_updated
def calc_net_cflow(cflow, frac_co2):
"""Calculate net flow of C after loss to CO2.
As material decomposes from one stock to another, some C is lost to
CO2 through microbial respiration. Calculate the net flow of C after
subtracting losses to CO2.
Parameters:
cflow (numpy.ndarray): derived, C decomposing from one stock
to another
frac_co2 (numpy.ndarray): parameter, fraction of decomposing
C lost as CO2
Returns:
net_cflow, amount of decomposing C that flows after accounting
for CO2 losses
"""
valid_mask = (
(cflow != _IC_NODATA) &
(frac_co2 != _IC_NODATA))
co2_loss = numpy.zeros(cflow.shape, dtype=numpy.float32)
co2_loss[valid_mask] = cflow[valid_mask] * frac_co2[valid_mask]
net_cflow = numpy.empty(cflow.shape, dtype=numpy.float32)
net_cflow[:] = _IC_NODATA
net_cflow[valid_mask] = cflow[valid_mask] - co2_loss[valid_mask]
return net_cflow
def calc_net_cflow_tosom2(tcflow, frac_co2, tosom3, cleach):
"""Calculate net flow of C from soil SOM1 to soil SOM2.
The C flowing from soil SOM1 to SOM2 is the remainder of total flow
from SOM1, after accounting for losses to CO2 through respiration,
decomposition to SOM3, and leaching.
Parameters:
tcflow (numpy.ndarray): derived, total C decomposing from soil
SOM1
frac_co2 (numpy.ndarray): parameter, fraction of decomposing
C lost as CO2
tosom3 (numpy.ndarray): derived, C flowing from SOM1 to SOM3
cleach (numpy.ndarray): derived, leached organic C
Returns:
net_tosom2, amount of C that flows from soil SOM1 to soil SOm2
"""
valid_mask = (
(tcflow != _IC_NODATA) &
(frac_co2 != _IC_NODATA) &
(tosom3 != _IC_NODATA) &
(cleach != _TARGET_NODATA))
net_tosom2 = numpy.empty(tcflow.shape, dtype=numpy.float32)
net_tosom2[:] = _IC_NODATA
net_tosom2[valid_mask] = (
tcflow[valid_mask] - (tcflow[valid_mask] * frac_co2[valid_mask]) -
tosom3[valid_mask] - cleach[valid_mask])
return net_tosom2
def calc_net_cflow_tosom1(tcflow, frac_co2, tosom3):
"""Calculate net flow of C from soil SOM2 to soil SOM1.
The C flowing from soil SOM2 to SOM1 is the remainder of total flow
from SOM2, after accounting for losses to CO2 through respiration
and decomposition to SOM3.
Parameters:
tcflow (numpy.ndarray): derived, total C decomposing from soil
SOM1
frac_co2 (numpy.ndarray): parameter, fraction of decomposing
C lost as CO2
tosom3 (numpy.ndarray): derived, C flowing from SOM1 to SOM3
Returns:
net_tosom1, amount of C that flows from soil SOM2 to soil SOM1
"""
valid_mask = (
(tcflow != _IC_NODATA) &
(frac_co2 != _IC_NODATA) &
(tosom3 != _IC_NODATA))
net_tosom1 = numpy.empty(tcflow.shape, dtype=numpy.float32)
net_tosom1[:] = _IC_NODATA
net_tosom1[valid_mask] = (
tcflow[valid_mask] - (tcflow[valid_mask] * frac_co2[valid_mask]) -
tosom3[valid_mask])
return net_tosom1
def respiration(
tcflow_path, frac_co2_path, cstatv_path, estatv_path,
delta_estatv_path, delta_minerl_1_iel_path, gromin_1_path=None):
"""Calculate and apply flow of N or P during respiration.
Microbial respiration accompanies decomposition of most stocks.
Calculate the flow of one element (N or P) to the mineral pool, which
accompanies this respiration.
Parameters:
tcflow_path (string): path to raster containing flow of C that
is accompanied by respiration
frac_co2_path (string): path to raster containing fraction of
C lost to co2
cstatv_path (string): path to raster containing C state variable
of decomposing pool
estatv_path (string): path to raster containing iel (N or P) state
variable of decomposing pool
delta_estatv_path (string): path to raster containing change
in the iel state variable of decomposing pool
delta_minerl_1_iel_path (string): path to raster containing
change in surface mineral iel
gromin_1_path (string): path to raster containing gross
mineralization of N
Side effects:
modifies or creates the raster indicated by `delta_estatv_path`
modifies or creates the raster indicated by `delta_minerl_1_iel_path`
modifies or creates the raster indicated by `gromin_1_path`, if
supplied
Returns:
None
"""
with tempfile.NamedTemporaryFile(
prefix='operand_temp', dir=PROCESSING_DIR) as operand_temp_file:
operand_temp_path = operand_temp_file.name
with tempfile.NamedTemporaryFile(
prefix='d_statv_temp', dir=PROCESSING_DIR) as d_statv_temp_file:
d_statv_temp_path = d_statv_temp_file.name
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
tcflow_path, frac_co2_path, estatv_path,
cstatv_path]],
calc_respiration_mineral_flow, operand_temp_path, gdal.GDT_Float32,
_IC_NODATA)
# mineral flow is removed from the decomposing iel state variable
shutil.copyfile(delta_estatv_path, d_statv_temp_path)
raster_difference(
d_statv_temp_path, _IC_NODATA, operand_temp_path, _IC_NODATA,
delta_estatv_path, _IC_NODATA)
# mineral flow is added to surface mineral iel
shutil.copyfile(delta_minerl_1_iel_path, d_statv_temp_path)
raster_sum(
d_statv_temp_path, _IC_NODATA, operand_temp_path, _IC_NODATA,
delta_minerl_1_iel_path, _IC_NODATA)
if gromin_1_path:
shutil.copyfile(gromin_1_path, d_statv_temp_path)
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
d_statv_temp_path,
operand_temp_path]],
update_gross_mineralization, gromin_1_path,
gdal.GDT_Float32, _TARGET_NODATA)
# clean up
os.remove(operand_temp_path)
os.remove(d_statv_temp_path)
def nutrient_flow(
cflow_path, cstatv_donating_path, estatv_donating_path, rcetob_path,
minerl_1_path, d_estatv_donating_path, d_estatv_receiving_path,
d_minerl_path, gromin_path=None):
"""Calculate and apply the flow of one nutrient accompanying C.
As C decomposes from one compartment to another, nutrients (N and P)
also flow from the donating compartment to the receiving compartment.
Some N or P may also flow to or from the mineral pool. Calculate and
apply the flow of iel (N or P) accompanying the given flow of C.
Parameters:
cflow_path (string): path to raster containing the flow of C
from the donating to the receiving pool
cstatv_donating_path (string): path to raster containing the C
state variable in the donating pool
estatv_donating_path (string): path to raster containing the iel
(N or P) in the donating pool
rcetob_path (string): path to raster containing required C/iel
ratio in the receiving pool
minerl_1_path (string): path to raster containing surface mineral iel
d_estatv_donating_path (string): path to raster containing change
in iel in the donating pool
d_estatv_receiving_path (string): path to raster containing change
in iel in the receiving pool
d_minerl_path (string): path to raster containing change in surface
mineral iel
gromin_path (string): path to raster containing gross mineralization
of N
Side effects:
modifies or creates the raster indicated by `d_estatv_donating_path`
modifies or creates the raster indicated by `d_estatv_receiving_path`
modifies or creates the raster indicated by `d_minerl_path`
modifies or creates the raster indicated by `gromin_path`, if supplied
Returns:
None
"""
with tempfile.NamedTemporaryFile(
prefix='operand_temp', dir=PROCESSING_DIR) as operand_temp_file:
operand_temp_path = operand_temp_file.name
with tempfile.NamedTemporaryFile(
prefix='d_statv_temp', dir=PROCESSING_DIR) as d_statv_temp_file:
d_statv_temp_path = d_statv_temp_file.name
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
cflow_path, cstatv_donating_path, rcetob_path,
estatv_donating_path, minerl_1_path]],
esched('material_leaving_a'), operand_temp_path, gdal.GDT_Float32,
_IC_NODATA)
shutil.copyfile(d_estatv_donating_path, d_statv_temp_path)
raster_difference(
d_statv_temp_path, _IC_NODATA, operand_temp_path, _IC_NODATA,
d_estatv_donating_path, _IC_NODATA)
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
cflow_path, cstatv_donating_path, rcetob_path,
estatv_donating_path, minerl_1_path]],
esched('material_arriving_b'), operand_temp_path, gdal.GDT_Float32,
_IC_NODATA)
shutil.copyfile(d_estatv_receiving_path, d_statv_temp_path)
raster_sum(
d_statv_temp_path, _IC_NODATA, operand_temp_path, _IC_NODATA,
d_estatv_receiving_path, _IC_NODATA)
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
cflow_path, cstatv_donating_path, rcetob_path,
estatv_donating_path, minerl_1_path]],
esched('mineral_flow'), operand_temp_path, gdal.GDT_Float32,
_IC_NODATA)
shutil.copyfile(d_minerl_path, d_statv_temp_path)
raster_sum(
d_statv_temp_path, _IC_NODATA, operand_temp_path, _IC_NODATA,
d_minerl_path, _IC_NODATA)
if gromin_path:
shutil.copyfile(gromin_path, d_statv_temp_path)
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
d_statv_temp_path, operand_temp_path]],
update_gross_mineralization, gromin_path,
gdal.GDT_Float32, _TARGET_NODATA)
# clean up
os.remove(operand_temp_path)
os.remove(d_statv_temp_path)
def calc_c_leach(amov_2, tcflow, omlech_3, orglch):
"""Calculate the amount of C leaching from soil SOM1 to stream flow.
Some C leaches from soil SOM1 if the water flow out of soil layer 2
is above a critical level.
Parameters:
amov_2 (numpy.ndarray): derived, moisture flowing out of soil layer
2
tcflow (numpy.ndarray): derived, total flow of C out of soil SOM1
omlech_3 (numpy.ndarray): parameter, threshold value for amov_2
orglch (numpy.ndarray): derived, effect of sand content on leaching
rate
Returns:
cleach, C leaching from soil SOM1 to stream flow
"""
valid_mask = (
(amov_2 != _TARGET_NODATA) &
(tcflow != _IC_NODATA) &
(omlech_3 != _IC_NODATA) &
(orglch != _IC_NODATA))
cleach = numpy.empty(amov_2.shape, dtype=numpy.float32)
cleach[:] = _TARGET_NODATA
cleach[valid_mask] = 0
linten = numpy.zeros(amov_2.shape)
linten[valid_mask] = numpy.minimum(
(1. - (omlech_3[valid_mask] - amov_2[valid_mask]) /
omlech_3[valid_mask]), 1.)
leach_mask = ((amov_2 > 0) & valid_mask)
cleach[leach_mask] = (
tcflow[leach_mask] * orglch[leach_mask] * linten[leach_mask])
return cleach
def remove_leached_iel(
som1c_2_path, som1e_2_iel_path, cleach_path, d_som1e_2_iel_path,
iel):
"""Remove N or P leached from soil SOM1.
As soil SOM1 decomposes into SOM3, some of N and P is lost from SOM1
through leaching. The amount lost is calculated from the amount of C
leaching from the soil and the proportion of iel (N or P) in soil SOM1.
Parameters:
som1c_2_path (string): path to raster containing C in soil SOM1
som1e_2_iel_path (string): path to raster containing iel in soil
SOM1
cleach_path (string): path to raster containing C leaching from
SOM1
d_som1e_2_iel_path (string): path to raster giving change in
som1e_2_iel
iel (int): index indicating N (iel == 1) or P (iel == 2))
Side effects:
modifies the raster indicated by `d_som1e_2_iel_path`
Returns:
None
"""
def calc_leached_N(som1c_2, som1e_2_1, cleach):
"""Calculate the N leaching from soil SOM1."""
valid_mask = (
(~ | numpy.isclose(som1c_2, _SV_NODATA) | numpy.isclose |
import os
from collections import defaultdict
import numpy as np
import copy
import pickle
import scipy.sparse
from PIL import Image
import h5py, json
import torch
from pycocotools.coco import COCO
from torch.utils.data import Dataset
from lib.scene_parser.rcnn.structures.bounding_box import BoxList
from lib.utils.box import bbox_overlaps
class vg_hdf5(Dataset):
def __init__(self, cfg, split="train", transforms=None, num_im=-1, num_val_im=0,
filter_duplicate_rels=True, filter_non_overlap=True, filter_empty_rels=True):
assert split == "train" or split == "test", "split must be one of [train, val, test]"
assert num_im >= -1, "the number of samples must be >= 0"
# split = 'train' if split == 'test' else 'test'
self.data_dir = cfg.DATASET.PATH
self.transforms = transforms
self.split = split
self.filter_non_overlap = filter_non_overlap
self.filter_duplicate_rels = filter_duplicate_rels and self.split == 'train'
self.roidb_file = os.path.join(self.data_dir, "VG-SGG.h5")
self.image_file = os.path.join(self.data_dir, "imdb_1024.h5")
# read in dataset from a h5 file and a dict (json) file
assert os.path.exists(self.data_dir), \
"cannot find folder {}, please download the visual genome data into this folder".format(self.data_dir)
self.im_h5 = h5py.File(self.image_file, 'r')
self.info = json.load(open(os.path.join(self.data_dir, "VG-SGG-dicts.json"), 'r'))
self.im_refs = self.im_h5['images'] # image data reference
im_scale = self.im_refs.shape[2]
# add background class
self.info['label_to_idx']['__background__'] = 0
self.class_to_ind = self.info['label_to_idx']
self.ind_to_classes = sorted(self.class_to_ind, key=lambda k:
self.class_to_ind[k])
# cfg.ind_to_class = self.ind_to_classes
self.predicate_to_ind = self.info['predicate_to_idx']
self.predicate_to_ind['__background__'] = 0
self.ind_to_predicates = sorted(self.predicate_to_ind, key=lambda k:
self.predicate_to_ind[k])
# cfg.ind_to_predicate = self.ind_to_predicates
self.split_mask, self.image_index, self.im_sizes, self.gt_boxes, self.gt_classes, self.relationships = load_graphs(
self.roidb_file, self.image_file,
self.split, num_im, num_val_im=num_val_im,
filter_empty_rels=filter_empty_rels,
filter_non_overlap=filter_non_overlap and split == "train",
)
self.json_category_id_to_contiguous_id = self.class_to_ind
self.contiguous_category_id_to_json_id = {
v: k for k, v in self.json_category_id_to_contiguous_id.items()
}
@property
def coco(self):
"""
:return: a Coco-like object that we can use to evaluate detection!
"""
anns = []
for i, (cls_array, box_array) in enumerate(zip(self.gt_classes, self.gt_boxes)):
for cls, box in zip(cls_array.tolist(), box_array.tolist()):
anns.append({
'area': (box[3] - box[1] + 1) * (box[2] - box[0] + 1),
'bbox': [box[0], box[1], box[2] - box[0] + 1, box[3] - box[1] + 1],
'category_id': cls,
'id': len(anns),
'image_id': i,
'iscrowd': 0,
})
fauxcoco = COCO()
fauxcoco.dataset = {
'info': {'description': 'ayy lmao'},
'images': [{'id': i} for i in range(self.__len__())],
'categories': [{'supercategory': 'person',
'id': i, 'name': name} for i, name in enumerate(self.ind_to_classes) if name != '__background__'],
'annotations': anns,
}
fauxcoco.createIndex()
return fauxcoco
def _im_getter(self, idx):
w, h = self.im_sizes[idx, :]
ridx = self.image_index[idx]
im = self.im_refs[ridx]
im = im[:, :h, :w] # crop out
im = im.transpose((1,2,0)) # c h w -> h w c
return im
def __len__(self):
return len(self.image_index)
def __getitem__(self, index):
"""
get dataset item
"""
# get image
img = Image.fromarray(self._im_getter(index)); width, height = img.size
# get object bounding boxes, labels and relations
obj_boxes = self.gt_boxes[index].copy()
obj_labels = self.gt_classes[index].copy()
obj_relation_triplets = self.relationships[index].copy()
if self.filter_duplicate_rels:
# Filter out dupes!
assert self.split == 'train'
old_size = obj_relation_triplets.shape[0]
all_rel_sets = defaultdict(list)
for (o0, o1, r) in obj_relation_triplets:
all_rel_sets[(o0, o1)].append(r)
obj_relation_triplets = [(k[0], k[1], np.random.choice(v)) for k,v in all_rel_sets.items()]
obj_relation_triplets = np.array(obj_relation_triplets)
obj_relations = np.zeros((obj_boxes.shape[0], obj_boxes.shape[0]))
for i in range(obj_relation_triplets.shape[0]):
subj_id = obj_relation_triplets[i][0]
obj_id = obj_relation_triplets[i][1]
pred = obj_relation_triplets[i][2]
obj_relations[subj_id, obj_id] = pred
target_raw = BoxList(obj_boxes, (width, height), mode="xyxy")
img, target = self.transforms(img, target_raw)
target.add_field("labels", torch.from_numpy(obj_labels))
target.add_field("pred_labels", torch.from_numpy(obj_relations))
target.add_field("relation_labels", torch.from_numpy(obj_relation_triplets))
target = target.clip_to_image(remove_empty=False)
return img, target, index
def get_groundtruth(self, index):
width, height = self.im_sizes[index, :]
# get object bounding boxes, labels and relations
obj_boxes = self.gt_boxes[index].copy()
obj_labels = self.gt_classes[index].copy()
obj_relation_triplets = self.relationships[index].copy()
if self.filter_duplicate_rels:
# Filter out dupes!
assert self.split == 'train'
old_size = obj_relation_triplets.shape[0]
all_rel_sets = defaultdict(list)
for (o0, o1, r) in obj_relation_triplets:
all_rel_sets[(o0, o1)].append(r)
obj_relation_triplets = [(k[0], k[1], np.random.choice(v)) for k,v in all_rel_sets.items()]
obj_relation_triplets = np.array(obj_relation_triplets)
obj_relations = np.zeros((obj_boxes.shape[0], obj_boxes.shape[0]))
for i in range(obj_relation_triplets.shape[0]):
subj_id = obj_relation_triplets[i][0]
obj_id = obj_relation_triplets[i][1]
pred = obj_relation_triplets[i][2]
obj_relations[subj_id, obj_id] = pred
target = BoxList(obj_boxes, (width, height), mode="xyxy")
target.add_field("labels", torch.from_numpy(obj_labels))
target.add_field("pred_labels", torch.from_numpy(obj_relations))
target.add_field("relation_labels", torch.from_numpy(obj_relation_triplets))
target.add_field("difficult", torch.from_numpy(obj_labels).clone().fill_(0))
return target
def get_img_info(self, img_id):
w, h = self.im_sizes[img_id, :]
return {"height": h, "width": w}
def map_class_id_to_class_name(self, class_id):
return self.ind_to_classes[class_id]
def load_graphs(graphs_file, images_file, mode='train', num_im=-1, num_val_im=0, filter_empty_rels=True,
filter_non_overlap=False):
"""
Load the file containing the GT boxes and relations, as well as the dataset split
:param graphs_file: HDF5
:param mode: (train, val, or test)
:param num_im: Number of images we want
:param num_val_im: Number of validation images
:param filter_empty_rels: (will be filtered otherwise.)
:param filter_non_overlap: If training, filter images that dont overlap.
:return: image_index: numpy array corresponding to the index of images we're using
boxes: List where each element is a [num_gt, 4] array of ground
truth boxes (x1, y1, x2, y2)
gt_classes: List where each element is a [num_gt] array of classes
relationships: List where each element is a [num_r, 3] array of
(box_ind_1, box_ind_2, predicate) relationships
"""
if mode not in ('train', 'val', 'test'):
raise ValueError('{} invalid'.format(mode))
roi_h5 = h5py.File(graphs_file, 'r')
im_h5 = h5py.File(images_file, 'r')
data_split = roi_h5['split'][:]
split = 2 if mode == 'test' else 0
split_mask = data_split == split
# Filter out images without bounding boxes
split_mask &= roi_h5['img_to_first_box'][:] >= 0
if filter_empty_rels:
split_mask &= roi_h5['img_to_first_rel'][:] >= 0
image_index = | np.where(split_mask) | numpy.where |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Create plots for Figure 2.
"""
# import packages
import numpy as np
import matplotlib.pyplot as plt
import os
#######################
# SET DIRECTORY
#######################
loaddir = 'data/fig2/'
savdir = 'figures/fig2/'
if not os.path.exists(savdir):
os.mkdir(savdir)
#######################
# LOAD SIMULATION DATA
#######################
# load main data dictionaries
data = np.load(loaddir+'relearning_results.npy',allow_pickle=True).item()
data0 = np.load(loaddir+'experiment_results.npy',allow_pickle=True).item()
# load parameters
dt = data0['params']['dt']
T = data0['params']['T']
time = data0['params']['time']
tsteps = data0['params']['tsteps']
pulse_length = data0['params']['pulse_length']
manifold_trials = data0['params']['manifold_trials']
stimulus = data0['stimulus']
target = data0['target']
reduced_dim = data0['decoding']['reduced_dim']
# load weights
w00 = np.load(loaddir+'W_initial.npy')
w0 = np.load(loaddir+'W_stabilized.npy')
w1w = np.load(loaddir+'W_within.npy')
w1o = np.load(loaddir+'W_outside.npy')
non0idx = np.where(w0!=0)
# load output
activity2 = data['manifold']['original']['activity2']
order_ori = data['manifold']['original']['order']
order_w = data['manifold']['within']['order']
order_o = data['manifold']['outside']['order']
o_original = activity2 @ data['perturbations']['T'].T
o_within_wrong = activity2 @ data['perturbations']['T_within'].T
o_outside_wrong = activity2 @ data['perturbations']['T_outside'].T
a_outside = data['manifold']['outside']['activity'].reshape((activity2.shape[0],
activity2.shape[1],-1))
o_outside = a_outside[:,:,:data['perturbations']['T_outside'].shape[-1]] @ \
data['perturbations']['T_outside'].T
a_within = data['manifold']['within']['activity'].reshape((activity2.shape[0],
activity2.shape[1],-1))
o_within = a_within[:,:,:data['perturbations']['T_within'].shape[-1]] @ \
data['perturbations']['T_within'].T
#######################
# ANALYSIS
#######################
# performance
performance = | np.zeros(5) | numpy.zeros |
#
# Copyright (c) 2018-present, the Authors of the OpenKE-PyTorch (old).
# All rights reserved.
#
# Link to the project: https://github.com/thunlp/OpenKE/tree/OpenKE-PyTorch(old)
#
# Note: This code was partially adapted by <NAME>
# to adapt to the case of HyperKG, described in:
# https://arxiv.org/abs/1908.04895
#
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
import numpy as np
class Model(nn.Module):
def __init__(self,config):
super(Model,self).__init__()
self.config = config
self.gpu_activated = config.gpu_activated
def cuda_transform(self,variable):
if self.gpu_activated:
return variable.cuda()
return variable
def get_postive_instance(self):
self.postive_h = Variable(torch.from_numpy(np.asarray(self.config.batch_h[0:self.config.batch_size], dtype=np.int64)))
self.postive_t = Variable(torch.from_numpy(np.asarray(self.config.batch_t[0:self.config.batch_size], dtype=np.int64)))
self.postive_r = Variable(torch.from_numpy( | np.asarray(self.config.batch_r[0:self.config.batch_size], dtype=np.int64) | numpy.asarray |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 19 15:03:35 2021
@author: willdavison
"""
"""
What follows has been taken from the DASH software GitHub, with relevant
modifications indicated. For a more complete understanding please visit
(https://github.com/daniel-muthukrishna/astrodash).
"""
import os
import sys
import numpy as np
import astropy.io.fits as afits
from scipy.interpolate import interp1d, UnivariateSpline
from scipy.signal import medfilt
from scipy.integrate import cumtrapz
try:
import pandas as pd
USE_PANDAS = True
except ImportError:
print("Pandas module not installed. DASH will use numpy to load spectral files instead. "
"This can be up to 10x slower.")
USE_PANDAS = False
class ProcessingTools(object):
def redshift_spectrum(self, wave, flux, z):
wave_new = wave * (z + 1)
return wave_new, flux
def deredshift_spectrum(self, wave, flux, z):
wave_new = wave / (z + 1)
return wave_new, flux
def min_max_index(self, flux, outerVal=0):
"""
:param flux:
:param outerVal: is the scalar value in all entries before the minimum and after the maximum index
:return:
"""
nonZeros = np.where(flux != outerVal)[0]
if nonZeros.size:
minIndex, maxIndex = min(nonZeros), max(nonZeros)
else:
minIndex, maxIndex = len(flux), len(flux)
return minIndex, maxIndex
class ReadSpectrumFile(object):
def __init__(self, filename, w0, w1, nw):
self.filename = filename
self.w0 = w0
self.w1 = w1
self.nw = nw
self.processingTools = ProcessingTools()
def read_dat_file(self):
try:
if USE_PANDAS is True:
data = pd.read_csv(self.filename, header=None, delim_whitespace=True).values
else:
data = np.loadtxt(self.filename)
wave = data[:, 0]
flux = data[:, 1]
except:
print("COULDN'T USE LOADTXT FOR FILE: {0}\n READ LINE BY LINE INSTEAD.".format(self.filename))
wave = []
flux = []
with open(self.filename, 'r') as FileObj:
for line in FileObj:
if line.strip() != '' and line.strip()[0] != '#':
datapoint = line.rstrip('\n').strip().split()
wave.append(float(datapoint[0].replace('D', 'E')))
flux.append(float(datapoint[1].replace('D', 'E')))
wave = np.array(wave)
flux = np.array(flux)
sorted_indexes = np.argsort(wave)
wave = wave[sorted_indexes]
flux = flux[sorted_indexes]
return wave, flux
def file_extension(self, template=False):
if isinstance(self.filename, (list, np.ndarray)): # Is an Nx2 array
wave, flux = self.filename[:,0], self.filename[:,1]
return wave, flux
elif hasattr(self.filename, 'read'): # Is a file handle
self.filename.seek(0)
return self.read_dat_file()
else: # Is a filename string
filename = os.path.basename(self.filename)
extension = filename.split('.')[-1]
if extension == self.filename or extension in ['flm', 'txt', 'dat']:
return self.read_dat_file()
else:
try:
return self.read_dat_file()
except:
print("Invalid Input File")
return 0
def two_col_input_spectrum(self, wave, flux, z):
wave, flux = self.processingTools.deredshift_spectrum(wave, flux, z)
mask = (wave >= self.w0) & (wave < self.w1)
wave = wave[mask]
flux = flux[mask]
if not wave.any():
raise Exception("The spectrum {0} with redshift {1} is out of the wavelength range {2}A to {3}A, "
"and cannot be classified. Please remove this object or change the input redshift of this"
" spectrum.".format(self.filename, z, int(self.w0), int(self.w1)))
fluxNorm = (flux - min(flux)) / (max(flux) - min(flux))
return wave, fluxNorm
def zero_non_overlap_part(array, minIndex, maxIndex, outerVal=0.):
slicedArray = np.copy(array)
slicedArray[0:minIndex] = outerVal * np.ones(minIndex)
slicedArray[maxIndex:] = outerVal * np.ones(len(array) - maxIndex)
return slicedArray
def normalise_spectrum(flux):
if len(flux) == 0 or min(flux) == max(flux): # No data
fluxNorm = np.zeros(len(flux))
else:
fluxNorm = (flux - min(flux)) / (max(flux) - min(flux))
return fluxNorm
class PreProcessSpectrum(object):
def __init__(self, w0, w1, nw):
self.w0 = w0
self.w1 = w1
self.nw = nw
self.dwlog = np.log(w1 / w0) / nw
self.processingTools = ProcessingTools()
def log_wavelength(self, wave, flux):
# Set up log wavelength array bins
wlog = self.w0 * np.exp(np.arange(0, self.nw) * self.dwlog)
fluxOut = self._vectorised_log_binning(wave, flux)
minIndex, maxIndex = self.processingTools.min_max_index(fluxOut, outerVal=0)
return wlog, fluxOut, minIndex, maxIndex
def _vectorised_log_binning(self, wave, flux):
""" Vectorised code version of the self._original_log_binning (for improved speed since this is the most called
function in the script during training). This is complicated, but it has been tested to match the slower
looping method """
spec = np.array([wave, flux]).T
mask = (wave >= self.w0) & (wave < self.w1)
spec = spec[mask]
wave, flux = spec.T
try:
fluxOut = np.zeros(int(self.nw))
waveMiddle = wave[1:-1]
waveTake1Index = wave[:-2]
wavePlus1Index = wave[2:]
s0List = 0.5 * (waveTake1Index + waveMiddle)
s1List = 0.5 * (waveMiddle + wavePlus1Index)
s0First = 0.5 * (3 * wave[0] - wave[1])
s0Last = 0.5 * (wave[-2] + wave[-1])
s1First = 0.5 * (wave[0] + wave[1])
s1Last = 0.5 * (3 * wave[-1] - wave[-2])
s0List = np.concatenate([[s0First], s0List, [s0Last]])
s1List = np.concatenate([[s1First], s1List, [s1Last]])
s0LogList = np.log(s0List / self.w0) / self.dwlog + 1
s1LogList = np.log(s1List / self.w0) / self.dwlog + 1
dnuList = s1List - s0List
s0LogListInt = s0LogList.astype(int)
s1LogListInt = s1LogList.astype(int)
numOfJLoops = s1LogListInt - s0LogListInt
jIndexes = np.flatnonzero(numOfJLoops)
jIndexVals = s0LogListInt[jIndexes]
prependZero = jIndexVals[0] if jIndexVals[0] < 0 else False
if prependZero is not False:
jIndexVals[0] = 0
numOfJLoops[0] += prependZero
numOfJLoops = (numOfJLoops[jIndexes])[jIndexVals < self.nw]
fluxValList = ((flux * 1 / (s1LogList - s0LogList) * dnuList)[jIndexes])[jIndexVals < self.nw]
fluxValList = np.repeat(fluxValList, numOfJLoops)
minJ = min(jIndexVals)
maxJ = (max(jIndexVals) + numOfJLoops[-1]) if (max(jIndexVals) + numOfJLoops[-1] < self.nw) else self.nw
fluxOut[minJ:maxJ] = fluxValList[:(maxJ - minJ)]
return fluxOut
except Exception as e:
print(e)
print('wave', wave)
print('flux', flux)
print("########################################ERROR#######################################\n\n\n\n")
return | np.zeros(self.nw) | numpy.zeros |
import time
import os
import json
import sys
from pprint import pformat
import joblib
from joblib import Parallel, delayed
import rasterio
from rasterio.plot import reshape_as_image
from rasterio.mask import mask
from rasterio.merge import merge
import geopandas as gpd
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score, cohen_kappa_score
from shapely.geometry import mapping
from geoRpro.utils import NumpyEncoder, to_json, json_to_disk, load_json, gen_sublist
import geoRpro.raster as rst
import logging
import pdb
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def cohen_kappa(confusion):
"""
Simplified version of: cohen_kappa_score
https://github.com/scikit-learn/scikit-learn/blob/main/sklearn/metrics/_classification.py
"""
n_classes = confusion.shape[0]
sum0 = np.sum(confusion, axis=0)
sum1 = np.sum(confusion, axis=1)
expected = np.outer(sum0, sum1) / np.sum(sum0)
w_mat = np.ones([n_classes, n_classes], dtype=int)
w_mat.flat[:: n_classes + 1] = 0
k = np.sum(w_mat * confusion) / np.sum(w_mat * expected)
return 1 - k
def train_RandomForestClf(X, y, estimators):
"""
Use train_test split to get accuacy of a Random forerst classifier
"""
logger.info(f'Start to train the model with estimators: {estimators}')
data = {}
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, stratify=y)
clf = RandomForestClassifier(n_estimators=estimators)
clf.fit(X_train, y_train)
# Acc on X_train (Internal)
y_pred_train = clf.predict(X_train)
cm_train = confusion_matrix(y_train,y_pred_train)
## NOTE!! This is a new confusion matrix without dummy classes
## Valid ONLY for a specific case. To be removed or generalized !!
cm_train = cm_train[:-2, :-2]
strg_cm_train = np.array2string(cm_train)
data['cm_train'] = cm_train
internal_accuracy=sum(np.diagonal(cm_train))/np.sum(cm_train)
internal_kappa=cohen_kappa(cm_train)
data['train_oa'] = internal_accuracy
data['train_ck'] = internal_kappa
logger.info(f"Train overall accuracy: {internal_accuracy}")
logger.info(f"Train cohen_kappa accuracy: {internal_kappa}")
# Acc on X_test (External)
y_pred = clf.predict(X_test)
cm_test = confusion_matrix(y_test,y_pred)
## NOTE!! This is a new confusion matrix without dummy classes
## Valid ONLY for a specific case. To be removed or generalized !!
cm_test = cm_test[:-2, :-2]
strg_cm_test = np.array2string(cm_test)
data['cm_test'] = cm_test
external_accuracy=sum(np.diagonal(cm_test))/np.sum(cm_test)
external_kappa=cohen_kappa(cm_test)
data['test_oa'] = external_accuracy
data['test_ck'] = external_kappa
logger.info(f"Test overall accuracy: {external_accuracy}")
logger.info(f"Test cohen_kappa accuracy: {external_kappa}")
diag = | np.diagonal(cm_test, offset=0) | numpy.diagonal |
import unittest
import numpy as np
from svreg.summation import Rho, FFG
from tests._testStructs import _all_test_structs, r0
class Test_Summation(unittest.TestCase):
def setUp(self):
self.ffg = FFG(
name='ffg',
allElements=['H', 'He'],
neighborElements=['H', 'He'],
components=['f_A', 'f_B', 'g_AA', 'g_BB', 'g_AB'],
inputTypes={
'f_A': ['H'],
'f_B': ['He'],
'g_AA': ['H', 'H'],
'g_AB': ['H', 'He'],
'g_BB': ['He', 'He']
},
numParams={'f_A': 7, 'f_B': 7, 'g_AA': 9, 'g_BB': 9, 'g_AB': 9},
restrictions={
'f_A': [(6, 0), (8, 0)],
'f_B': [(6, 0), (8, 0)],
'g_AA':[],
'g_AB':[],
'g_BB':[],
},
paramRanges={
'f_A': None,
'f_B': None,
'g_AA': None,
'g_AB': None,
'g_BB': None
},
bonds={
'ffg_AA': ['f_A', 'f_A', 'g_AA'],
'ffg_AB': ['f_A', 'f_B', 'g_AB'],
'ffg_BB': ['f_B', 'f_B', 'g_BB'],
},
bondMapping="lambda i,j: 'ffg_AA' if i+j==0 else ('ffg_AB' if i+j==1 else 'ffg_BB')",
cutoffs=[1.0, 30.0],
numElements=2,
bc_type='fixed',
)
self.rho = Rho(
name='rho',
allElements=['H', 'He'],
neighborElements=['H', 'He'],
components=['rho_A', 'rho_B'],
inputTypes={'rho_A': ['H'], 'rho_B': ['He']},
numParams={'rho_A': 7, 'rho_B': 7},
restrictions={'rho_A': [(6, 0), (8, 0)], 'rho_B': [(6, 0), (8, 0)]},
paramRanges={'rho_A': None, 'rho_B': None},
bonds={
'rho_A': ['rho_A'],
'rho_B': ['rho_B'],
},
bondMapping="lambda i: 'rho_A' if i == 0 else 'rho_B'",
cutoffs=[1.0, 3.1],
numElements=2,
bc_type='fixed',
)
def test_sv_rho_dimers(self):
params = {
'rho_A': np.array([1, 1, 1, 1, 1, 1, 1, 0, 0]),
'rho_B': | np.array([2, 2, 2, 2, 2, 2, 2, 0, 0]) | numpy.array |
#!/usr/bin/env python
import numpy as np
import netCDF4 as nc
import pandas as pd
import multiprocessing
import textwrap
import matplotlib.pyplot as plt
import lhsmdu
import glob
import json
import os
import ast
import shutil
import subprocess
from contextlib import contextmanager
import param_util as pu
import output_utils as ou
@contextmanager
def log_wrapper(message,tag=''):
'''
Likely will abandon or repurpose this function.
Not super helpful as a log printer.'''
print('[SA:{}] {}'.format(tag, message))
try:
yield
finally:
print()
def generate_uniform(N, param_props):
'''
Generate sample matrix using uniform method.
Sample matrix will have one row for each "sample" of the
parameters. There will be one column for each parameter in
the `param_props` list.
Parameters
----------
N : int
number of samples (rows) to create
param_props : list of dicts
Each item in `param_props` list will be a dictionary
with at least the following:
>>> param_props = {
... 'name': 'rhq10', # name in dvmdostem parameter file (cmt_*.txt)
... 'bounds': [5.2, 6.4], # the min and max values the parameter can have
... }
Returns
-------
df : pandas.DataFrame, shape (N, len(param_props))
There will be one column for each parameter in the
`param_props` list and N rows (samples).
'''
print(param_props)
l = np.random.uniform(size=(N, len(param_props)))
# Generate bounds, based on specification in params list
lows = | np.array([p['bounds'][0] for p in param_props]) | numpy.array |
import copy
import math
import random
import cv2
import numpy as np
from typing import List
from plaster.run.ims_import.ims_import_params import ImsImportParams
from plaster.run.ims_import.ims_import_result import ImsImportResult
from plaster.run.ims_import.ims_import_worker import OUTPUT_NP_TYPE
from plaster.run.priors import RegPSFPrior, RegIllumPrior
from plaster.run.sigproc_v2.c_gauss2_fitter import gauss2_fitter
from plaster.tools.image import imops
from plaster.tools.image.coord import XY, YX
from plaster.tools.schema import check
from plaster.tools.utils import utils
from plaster.tools.utils.tmp import tmp_folder
from plaster.tools.zlog.zlog import tell, spy
import logging
log = logging.getLogger(__name__)
# see comment below, above "PeaksModelPSF" regarding why this is commented out
# from plaster.run.sigproc_v2.psf_sample import psf_sample
class Synth:
"""
Generate synthetic images for testing.
This system is organized so that synthetic image(s) is
delayed until the render() command is called. This allows
for "reaching in" to the state and messing with it.
Example, suppose that in some strange test you need to
have a position of a certain peak location in very specific
places for the test. To prevent a proliferation of one-off
methods in this class, the idea is that you can use the
method that creates two peaks and then "reach in" to
tweak the positions directly before render.
Examples:
with Synth() as s:
p = PeaksModelGaussian()
p.locs_randomize()
CameraModel(100, 2)
s.render_chcy()
"""
synth = None
def __init__(
self, n_fields=1, n_channels=1, n_cycles=1, dim=(512, 512), save_as=None,
):
self.n_fields = n_fields
self.n_channels = n_channels
self.n_cycles = n_cycles
self.dim = dim
self.models = []
self.aln_offsets = np.random.uniform(-20, 20, size=(self.n_cycles, 2))
self.aln_offsets[0] = (0, 0)
self.ch_aln = None
Synth.synth = self
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
Synth.synth = None
if exception_type is not None:
raise exception_type(exception_value)
def zero_aln_offsets(self):
self.aln_offsets = np.zeros((self.n_cycles, 2))
def add_model(self, model):
self.models += [model]
def _save_debug(self):
path = "/erisyon/plaster/_synth_debug.npy"
np.save(path, self.render_flchcy())
tell(f"Wrote debugging to {path}")
def render_chcy(self, fl_i=0):
"""
Returns only chcy_ims (first field)
"""
ims = np.zeros((self.n_channels, self.n_cycles, *self.dim))
for ch_i in np.arange(self.n_channels):
ch_aln_offset = 0.0
if self.ch_aln is not None:
ch_aln_offset = self.ch_aln[ch_i]
for cy_i in np.arange(self.n_cycles):
im = ims[ch_i, cy_i]
for model in self.models:
model.render(
im, fl_i, ch_i, cy_i, self.aln_offsets[cy_i] + ch_aln_offset
)
ims[ch_i, cy_i] = im
return ims
def render_flchcy(self):
flchcy_ims = np.zeros(
(self.n_fields, self.n_channels, self.n_cycles, *self.dim)
)
for fl_i in range(self.n_fields):
flchcy_ims[fl_i] = self.render_chcy()
return flchcy_ims
def scale_peaks_by_max(self):
"""
For some tests it is nice to know that the max brightness of a peak
instead of the area under the curve.
"""
self.peak_ims = [peak_im / np.max(peak_im) for peak_im in self.peak_ims]
def channel_aln_offsets(self, ch_aln):
"""
TODO: This probably should move to Peaks so that it conforms
to similar pattern of channel_scale_factor()
"""
check.array_t(ch_aln, shape=(self.n_channels, 2))
self.ch_aln = ch_aln
class BaseSynthModel:
def __init__(self):
self.dim = Synth.synth.dim
Synth.synth.add_model(self)
def render(self, im, fl_i, ch_i, cy_i, aln_offset):
pass
class PeaksModel(BaseSynthModel):
def __init__(self, n_peaks=1000):
super().__init__()
self.n_channels = Synth.synth.n_channels
self.n_cycles = Synth.synth.n_cycles
self.n_fields = Synth.synth.n_fields
self.n_peaks = n_peaks
self.locs = np.zeros((n_peaks, 2))
self.row_k = np.ones((n_peaks,))
self.counts = np.ones((n_peaks, self.n_cycles, self.n_channels), dtype=int)
self._amps = np.ones((n_peaks, self.n_cycles))
self._channel_scale_factor = None
self._channel_offset = None
# locs related
# ------------------------------------------------------------------------
def locs_randomize(self):
self.locs = np.random.uniform(0, self.dim, (self.n_peaks, 2))
return self
def locs_randomize_no_subpixel(self):
self.locs = np.floor(np.random.uniform(0, self.dim, (self.n_peaks, 2))).astype(
float
)
return self
def locs_randomize_away_from_edges(self, dist=15):
self.locs = np.random.uniform(
[dist, dist], np.array(self.dim) - dist, (self.n_peaks, 2)
)
return self
def locs_grid(self, pad=10):
step = self.dim[0] // math.floor(math.sqrt(self.n_peaks))
y = np.arange(pad, self.dim[0] - pad, step)
x = np.arange(pad, self.dim[1] - pad, step)
self.locs = np.array(np.meshgrid(x, y)).T.reshape(-1, 2).astype(float)
return self
def locs_center(self):
# Good for one peak only
assert self.n_peaks == 1
self.locs = [(self.dim[0] / 2, self.dim[1] / 2,)]
return self
def locs_add_random_subpixel(self):
self.locs += np.random.uniform(-1, 1, self.locs.shape)
return self
def remove_near_edges(self, dist=20):
self.locs = np.array(
[
loc
for loc in self.locs
if dist < loc[0] < self.dim[0] - dist
and dist < loc[1] < self.dim[1] - dist
]
)
return self
# count related. Use this preferentially over direct amps assignment
# ------------------------------------------------------------------------
def counts_uniform(self, cnt):
self.counts = cnt * np.ones(
(self.n_peaks, self.n_cycles, self.n_channels), dtype=int
)
return self
def bleach(self, p_bleach):
r = np.random.uniform(
0.0, 1.0, size=(self.n_peaks, self.n_cycles, self.n_channels)
)
decrement = | np.where(r < p_bleach, -1, 0) | numpy.where |
import time
import matplotlib;
matplotlib.use('MacOSX')
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
import numpy as np
from numpy import linalg as LA
from scipy.spatial import Delaunay
from scipy.sparse import csr_matrix
from scipy.sparse import lil_matrix
from scipy.sparse import hstack, vstack
from pyflann import *
# surface parameters
global N; N = 30 # number of samples (each axis)
global d1; d1 = np.outer(np.linspace(0, N-1, N), np.ones(N)) # surface axis 1
global d2; d2 = d1.copy().T # surface axis 2
global μ_noise; μ_noise = 0.0 # mean of noise
global σ_noise; σ_noise = 0.5 # variance of noise
# mesh parameters
global obs; obs = 1.0 #fraction of observable points (0 < obs <= 1)
# regularisation parameters
global λ; λ = 2.0 # data term weighting
global τ; τ = 0.125 # primal step size
global σ; σ = 0.125 # dual step size
global α1; α1 = 0.3 # (TGV) penalty for discontinuities in first derivative
global α2; α2 = 0.8 # (TGV) penalty for discontinuities in second derivative
global β; β = 1.0 # (logTV) weighting factor on K matrix
global θ; θ = 1 # extra gradient step size
global L; L = 25; # number of iterations (convex loop, non-convex inner loop)
global M; M = 20; # number of iterations (non-convex outer loop)
np.random.seed(1)
def rect_2d(t):
"""Defines a rect function."""
return np.abs(t) < 0.5 * 1.0
def generate_noise_2d(t):
"""Generates noise with fixed mean μ and standard deviation σ."""
noise = np.random.normal(size=t.shape) * σ_noise + μ_noise
return noise
start = time.time()
# ------------------ CONSTRUCT GROUND TRUTH SURFACE z_gnd ------------------
# MUST CHOOSE EITHER PIECEWISE_CONSTANT OR AFFINE SURFACE (UNCOMMENT THE RELAVENT LINES)
# AFFINE SURFACE
#z0 = 0.05*(d1 + d2)
#z1 = 0
#z2 = 0
# PIECEWISE_CONSTANT SURFACE
z0 = 1*rect_2d((d1-4.5)/(10)) - 1*rect_2d((d2-4.5)/(10))
z1 = 2*rect_2d((d1-14.5)/(10))
z2 = 0*rect_2d((d1-24.5)/(10)) +1*rect_2d((d2-24.5)/(10)) #* -(d1-24.5)*0.1
z_gnd = z0 + z1 + z2 + 2 # add 2 to make sure above 0 for visualisation
# ----------------------- CONSTRUCT NOISY SURFACE z -----------------------
z_unflat = (z_gnd + generate_noise_2d(d1)*1)
z = z_unflat.flatten()
# ----------------------- SAMPLE NOISY SURFACE z_mesh -----------------------
z_domain_mask = np.zeros(N*N)
z_domain_mask[:int(round((obs*N*N)))] = 1
np.random.seed(1)
np.random.shuffle(z_domain_mask)
z_mesh = []
for i in range(0, len(z)):
if(z_domain_mask[i]>0):
z_mesh.append(z[i])
z_mesh = np.array(z_mesh).reshape(len(z_mesh),1) / 1
mesh_vert = [] #vertices in mesh
for i in range(0, len(z)):
if(z_domain_mask[i]==1):
mesh_vert.append([i//N, i%N])
mesh_vert = np.array(mesh_vert).astype(int32)
d1_mesh = mesh_vert[:,0] # mesh axis 1
d2_mesh = mesh_vert[:,1] # mesh axis 2
# ----------------------- DELAUNAY TRIANGULATION -----------------------
tri = Delaunay(mesh_vert)
graph = []
edges = []
for i in range(len(z_mesh)):
graph.append([])
for i in range(len(tri.simplices)):
vert1 = tri.simplices[i][0]
vert2 = tri.simplices[i][1]
vert3 = tri.simplices[i][2]
edge_dist_12 = np.linalg.norm(mesh_vert[vert1]-mesh_vert[vert2], ord=2) #Euclidean distance is l2 norm
edge_dist_13 = np.linalg.norm(mesh_vert[vert1]-mesh_vert[vert3], ord=2)
edge_dist_23 = np.linalg.norm(mesh_vert[vert2]-mesh_vert[vert3], ord=2)
#deal with edge_12
if((vert1 not in graph[vert2]) and (vert2 not in graph[vert1])):
if(len(graph[vert1])<=len(graph[vert2])):
graph[vert1].extend([vert2])
edges.append((vert1,vert2))
else:
graph[vert2].extend([vert1])
edges.append((vert2,vert1))
#deal with edge_13
if((vert1 not in graph[vert3]) and (vert3 not in graph[vert1])):
if(len(graph[vert1])<=len(graph[vert3])):
graph[vert1].extend([vert3])
edges.append((vert1,vert3))
else:
graph[vert3].extend([vert1])
edges.append((vert3,vert1))
#deal with edge_23
if((vert2 not in graph[vert3]) and (vert3 not in graph[vert2])):
if(len(graph[vert2])<=len(graph[vert3])):
graph[vert2].extend([vert3])
edges.append((vert2,vert3))
else:
graph[vert3].extend([vert2])
edges.append((vert3,vert2))
# ----------------------- PRIMAL-DUAL OPTIMISATION -----------------------
### First-order primal-dual methods of Chambolle and Pock
#GOOD HYPERPARAMETERS: λ=2.0, τ=0.125, σ=0.125, θ=1, L=200
def TV_optimisation(z):
# initilise variables
x = np.copy(z) # main primal variable
p = np.zeros((len(edges),1)) # main dual variable
x_bar = np.copy(x) # extra-gradient variable
for k in range(L):
x_prev = np.copy(x)
p_prev = np.copy(p)
# ------------------------- DUAL STEP -------------------------
for i in range(0,len(edges)):
u_p = p[i][0] + σ * (x_bar[edges[i][1]][0] - x_bar[edges[i][0]][0])
p[i][0] = u_p/max(abs(u_p),1)
# ------------------------ PRIMAL STEP -----------------------
for i in range(0,len(edges)):
x[edges[i][0]][0] += τ * (p[i][0])
x[edges[i][1]][0] -= τ * (p[i][0])
# MUST CHOOSE EITHER L1 norm or L2 norm (UNCOMMENT THE RELAVENT LINES)
# NB OBSERVATION FOR L1 norm vs L2 norm: to get better results for L1 norm, lower value of λ and increase number of iterations
#L1 norm data term
#f = lambda zi, xi: (xi - λ*τ if (xi-zi) > λ*τ else (xi + λ*τ if (xi-zi) < - λ*τ else xi))
#x = np.array([f(zi,xi) for zi,xi in zip(np.squeeze(np.array(z)),np.squeeze(np.array(x)))]).reshape(len(x),1)
#L2 norm data term
x = (x + λ * τ * z)/(1 + λ * τ)
# ------------- EXTRA GRADIENT STEP (RELAXATION) -------------
x_bar = x + θ*(x-x_prev)
return x
#GOOD HYPERPARAMETERS: λ=0.8, τ=0.125, α1=0.3, α2=0.8, σ=0.125, θ=1, L=200
def TGV_optimisation(z):
# initilise variables
x = np.copy(z) # main primal variable
y = np.zeros((len(x),1)) # additional primal variable
p = np.zeros((len(edges),1)) # main dual variable
q = np.zeros((len(edges),1)) # additional dual variable
x_bar = np.copy(x) # extra-gradient variable (for primal x)
y_bar = np.copy(y) # extra-gradient variable (for primal y)
for k in range(L):
x_prev = np.copy(x)
y_prev = np.copy(y)
p_prev = np.copy(p)
q_prev = np.copy(q)
# ------------------------- DUAL STEP -------------------------
for i in range(0,len(edges)):
u_p_1 = p[i][0] + σ * α1 * ((x_bar[edges[i][1]][0] - x_bar[edges[i][0]][0]) - y_bar[edges[i][0]][0])
p[i][0] = u_p_1/max(abs(u_p_1),1)
u_p_2 = q[i][0] + σ * α2 * (y_bar[edges[i][1]][0] - y_bar[edges[i][0]][0])
q[i][0] = u_p_2/max(abs(u_p_2),1)
# ------------------------ PRIMAL STEP -----------------------
for i in range(0,len(edges)):
x[edges[i][0]][0] += τ * α1 * (p[i][0])
x[edges[i][1]][0] -= τ * α1 * (p[i][0])
y[edges[i][0]][0] += τ * (α1 * p_prev[i][0])
y[edges[i][1]][0] += τ * (α1 * p_prev[i][0])
y[edges[i][0]][0] += τ * (α2 * (q_prev[i][0]))
y[edges[i][1]][0] -= τ * (α2 * (q_prev[i][0]))
# MUST CHOOSE EITHER L1 norm or L2 norm (UNCOMMENT THE RELAVENT LINES)
# NB OBSERVATION FOR L1 norm vs L2 norm: to get better results for L1 norm, lower value of λ and increase number of iterations
#L1 norm data term
#f = lambda zi, xi: (xi - λ*τ if (xi-zi) > λ*τ else (xi + λ*τ if (xi-zi) < - λ*τ else xi))
#x = np.array([f(zi,xi) for zi,xi in zip(np.squeeze(np.array(z)),np.squeeze(np.array(x)))]).reshape(len(x),1)
#L2 norm data term
x = (x + λ * τ * z)/(1 + λ * τ)
# ------------- EXTRA GRADIENT STEP (RELAXATION) -------------
x_bar = x + θ*(x-x_prev)
y_bar = y + θ*(y-y_prev)
return x
#GOOD HYPERPARAMETERS: λ=2.0, τ=0.125, β=1.0, σ=0.125, θ=1, L=25, M=20
def logTV_optimisation(z):
# initilise variables
x = | np.copy(z) | numpy.copy |
""" Implementation of the 'original' volume/area scaling glacier model from
Marzeion et. al. 2012, see http://www.the-cryosphere.net/6/1295/2012/.
While the mass balance model is comparable to OGGMs past mass balance model,
the 'dynamic' part does not include any ice physics but works with ares/volume
and length/volume scaling instead.
Author: <NAME>
"""
# Built ins
import os
import logging
import datetime
from time import gmtime, strftime
# External libs
import numpy as np
import pandas as pd
import xarray as xr
import netCDF4
from scipy.optimize import minimize_scalar
# import OGGM modules
import oggm
import oggm.cfg as cfg
from oggm.cfg import SEC_IN_YEAR, SEC_IN_MONTH
from oggm import __version__
from oggm import utils, entity_task, global_task
from oggm.utils import floatyear_to_date, ncDataset
from oggm.exceptions import InvalidParamsError, MassBalanceCalibrationError
from oggm.core import climate
from oggm.core.massbalance import MassBalanceModel
# Module logger
log = logging.getLogger(__name__)
def _compute_temp_terminus(temp, temp_grad, ref_hgt,
terminus_hgt, temp_anomaly=0):
"""Computes the (monthly) mean temperature at the glacier terminus,
following section 2.1.2 of Marzeion et. al., 2012. The input temperature
is scaled by the given temperature gradient and the elevation difference
between reference altitude and the glacier terminus elevation.
Parameters
----------
temp : netCDF4 variable
monthly mean climatological temperature (degC)
temp_grad : netCDF4 variable or float
temperature lapse rate [degC per m of elevation change]
ref_hgt : float
reference elevation for climatological temperature [m asl.]
terminus_hgt : float
elevation of the glacier terminus (m asl.)
temp_anomaly : netCDF4 variable or float, optional
monthly mean temperature anomaly, default 0
Returns
-------
netCDF4 variable
monthly mean temperature at the glacier terminus [degC]
"""
temp_terminus = temp + temp_grad * (terminus_hgt - ref_hgt) + temp_anomaly
return temp_terminus
def _compute_solid_prcp(prcp, prcp_factor, ref_hgt, min_hgt, max_hgt,
temp_terminus, temp_all_solid, temp_grad,
prcp_grad=0, prcp_anomaly=0):
"""Compute the (monthly) amount of solid precipitation onto the glacier
surface, following section 2.1.1 of Marzeion et. al., 2012. The fraction of
solid precipitation depends mainly on the terminus temperature and the
temperature thresholds for solid and liquid precipitation. It is possible
to scale the precipitation amount from the reference elevation to the
average glacier surface elevation given a gradient (zero per default).
Parameters
----------
prcp : netCDF4 variable
monthly mean climatological precipitation [kg/m2]
prcp_factor : float
precipitation scaling factor []
ref_hgt : float
reference elevation for climatological precipitation [m asl.]
min_hgt : float
minimum glacier elevation [m asl.]
max_hgt : float
maximum glacier elevation [m asl.]
temp_terminus : netCDF4 variable
monthly mean temperature at the glacier terminus [degC]
temp_all_solid : float
temperature threshold below which all precipitation is solid [degC]
temp_grad : netCDF4 variable or float
temperature lapse rate [degC per m of elevation change]
prcp_grad : netCDF4 variable or float, optional
precipitation lapse rate [kg/m2 per m of elevation change], default = 0
prcp_anomaly : netCDF4 variable or float, optional
monthly mean precipitation anomaly [kg/m2], default = 0
Returns
-------
netCDF4 variable
monthly mean solid precipitation [kg/m2]
"""
# compute fraction of solid precipitation
if max_hgt == min_hgt:
# prevent division by zero if max_hgt equals min_hgt
f_solid = (temp_terminus <= temp_all_solid).astype(int)
else:
# use scaling defined in paper
f_solid = (1 + (temp_terminus - temp_all_solid)
/ (temp_grad * (max_hgt - min_hgt)))
f_solid = np.clip(f_solid, 0, 1)
# compute mean elevation
mean_hgt = 0.5 * (min_hgt + max_hgt)
# apply precipitation scaling factor
prcp_solid = (prcp_factor * prcp + prcp_anomaly)
# compute solid precipitation
prcp_solid *= (1 + prcp_grad * (mean_hgt - ref_hgt)) * f_solid
return prcp_solid
def get_min_max_elevation(gdir):
"""Reads the DEM and computes the minimal and maximal glacier surface
elevation in meters asl, from the given (RGI) glacier outline.
Parameters
----------
gdir : :py:class:`oggm.GlacierDirectory`
Returns
-------
[float, float]
minimal and maximal glacier surface elevation [m asl.]
"""
# open DEM file and mask the glacier surface area
fpath = gdir.get_filepath('gridded_data')
with ncDataset(fpath) as nc:
mask = nc.variables['glacier_mask'][:]
topo = nc.variables['topo'][:]
# get relevant elevation information
min_elev = np.min(topo[np.where(mask == 1)])
max_elev = np.max(topo[np.where(mask == 1)])
return min_elev, max_elev
def get_yearly_mb_temp_prcp(gdir, time_range=None, year_range=None):
"""Read climate file and compute mass balance relevant climate parameters.
Those are the positive melting temperature at glacier terminus elevation
as energy input and the amount of solid precipitation onto the glacier
surface as mass input. Both parameters are computes as yearly sums.
Default is to read all data, but it is possible to specify a time range by
giving two (included) datetime bounds. Similarly, the year range limits the
returned data to the given bounds of (hydrological) years.
Parameters
----------
gdir : :py:class:`oggm.GlacierDirectory`
time_range : datetime tuple, optional
[t0, t1] time bounds, default = None
year_range : float tuple, optional
[y0, y1] year range, default = None
Returns
-------
[float array, float array, float array]
hydrological years (index), melting temperature [degC],
solid precipitation [kg/m2]
"""
# convert hydrological year range into time range
if year_range is not None:
sm = cfg.PARAMS['hydro_month_' + gdir.hemisphere]
em = sm - 1 if (sm > 1) else 12
t0 = datetime.datetime(year_range[0]-1, sm, 1)
t1 = datetime.datetime(year_range[1], em, 1)
return get_yearly_mb_temp_prcp(gdir, time_range=[t0, t1])
# get needed parameters
temp_all_solid = cfg.PARAMS['temp_all_solid']
temp_melt = cfg.PARAMS['temp_melt']
prcp_fac = cfg.PARAMS['prcp_scaling_factor']
default_grad = cfg.PARAMS['temp_default_gradient']
g_minmax = cfg.PARAMS['temp_local_gradient_bounds']
# Marzeion et. al., 2012 used a precipitation lapse rate of 3%/100m.
# But the prcp gradient is omitted for now.
# prcp_grad = 3e-4
prcp_grad = 0
# read the climate file
igrad = None
with utils.ncDataset(gdir.get_filepath('climate_monthly'), mode='r') as nc:
# time
time = nc.variables['time']
time = netCDF4.num2date(time[:], time.units)
# limit data to given time range and
# raise errors is bounds are outside available data
if time_range is not None:
p0 = np.where(time == time_range[0])[0]
try:
p0 = p0[0]
except IndexError:
raise climate.MassBalanceCalibrationError('time_range[0] '
'not found in file')
p1 = np.where(time == time_range[1])[0]
try:
p1 = p1[0]
except IndexError:
raise climate.MassBalanceCalibrationError('time_range[1] not '
'found in file')
else:
p0 = 0
p1 = len(time)-1
time = time[p0:p1+1]
# read time series of temperature and precipitation
itemp = nc.variables['temp'][p0:p1+1]
iprcp = nc.variables['prcp'][p0:p1+1]
# read time series of temperature lapse rate
if 'gradient' in nc.variables:
igrad = nc.variables['gradient'][p0:p1+1]
# Security for stuff that can happen with local gradients
igrad = np.where(~np.isfinite(igrad), default_grad, igrad)
igrad = np.clip(igrad, g_minmax[0], g_minmax[1])
# read climate data reference elevation
ref_hgt = nc.ref_hgt
# use the default gradient if no gradient is supplied by the climate file
if igrad is None:
igrad = itemp * 0 + default_grad
# Up to this point, the code is mainly copy and paste from the
# corresponding OGGM routine, with some minor adaptions.
# What follows is my code: So abandon all hope, you who enter here!
# get relevant elevation information
min_hgt, max_hgt = get_min_max_elevation(gdir)
# get temperature at glacier terminus
temp_terminus = _compute_temp_terminus(itemp, igrad, ref_hgt, min_hgt)
# compute positive 'melting' temperature/energy input
temp = np.clip(temp_terminus - temp_melt, a_min=0, a_max=None)
# get solid precipitation
prcp_solid = _compute_solid_prcp(iprcp, prcp_fac, ref_hgt,
min_hgt, max_hgt,
temp_terminus, temp_all_solid,
igrad, prcp_grad)
# check if climate data includes all 12 month of all years
ny, r = divmod(len(time), 12)
if r != 0:
raise ValueError('Climate data should be N full years exclusively')
# last year gives the tone of the hydro year
years = np.arange(time[-1].year - ny + 1, time[-1].year + 1, 1)
# compute sums over hydrological year
temp_yr = np.zeros(len(years))
prcp_yr = np.zeros(len(years))
for i, y in enumerate(years):
temp_yr[i] = np.sum(temp[i * 12:(i + 1) * 12])
prcp_yr[i] = np.sum(prcp_solid[i * 12:(i + 1) * 12])
return years, temp_yr, prcp_yr
def _fallback_local_t_star(gdir):
"""A Fallback function if vascaling.local_t_star raises an Error.
This function will still write a `vascaling_mustar.json`, filled with NANs,
if vascaling.local_t_star fails and cfg.PARAMS['continue_on_error'] = True.
Parameters
----------
gdir : :py:class:`oggm.GlacierDirectory`
the glacier directory to process
"""
# Scalars in a small dict for later
df = dict()
df['rgi_id'] = gdir.rgi_id
df['t_star'] = np.nan
df['bias'] = np.nan
df['mu_star_glacierwide'] = np.nan
gdir.write_json(df, 'vascaling_mustar')
@entity_task(log, writes=['vascaling_mustar'], fallback=_fallback_local_t_star)
def local_t_star(gdir, ref_df=None, tstar=None, bias=None):
"""Compute the local t* and associated glacier-wide mu*.
If `tstar` and `bias` are not provided, they will be interpolated from the
reference t* list.
The mass balance calibration parameters (i.e. temperature lapse rate,
temperature thresholds for melting, solid and liquid precipitation,
precipitation scaling factor) are written to the climate_info.pkl file.
The results of the calibration process (i.e. t*, mu*, bias) are stored in
the `vascaling_mustar.json` file, to be used later by other tasks.
Parameters
----------
gdir : :py:class:`oggm.GlacierDirectory`
ref_df : :py:class:`pandas.Dataframe`, optional
replace the default calibration list with a custom one
tstar : int, optional
the year when the glacier should be in equilibrium, default = None
bias : float, optional
the associated reference bias, default = None
"""
# specify relevant mass balance parameters
params = ['temp_default_gradient', 'temp_all_solid', 'temp_all_liq',
'temp_melt', 'prcp_scaling_factor']
if tstar is None or bias is None:
# Do our own interpolation of t_start for given glacier
if ref_df is None:
if not cfg.PARAMS['run_mb_calibration']:
# Make some checks and use the default one
climate_info = gdir.read_json('climate_info')
source = climate_info['baseline_climate_source']
ok_source = ['CRU TS4.01', 'CRU TS3.23', 'HISTALP']
if not np.any(s in source.upper() for s in ok_source):
msg = ('If you are using a custom climate file you should '
'run your own MB calibration.')
raise MassBalanceCalibrationError(msg)
# major RGI version relevant
v = gdir.rgi_version[0]
# baseline climate
str_s = 'cru4' if 'CRU' in source else 'histalp'
vn = 'vas_ref_tstars_rgi{}_{}_calib_params'.format(v, str_s)
for k in params:
if cfg.PARAMS[k] != cfg.PARAMS[vn][k]:
msg = ('The reference t* you are trying to use was '
'calibrated with different MB parameters. You '
'might have to run the calibration manually.')
raise MassBalanceCalibrationError(msg)
ref_df = cfg.PARAMS['vas_ref_tstars_rgi{}_{}'.format(v, str_s)]
else:
# Use the the local calibration
fp = os.path.join(cfg.PATHS['working_dir'], 'ref_tstars.csv')
ref_df = pd.read_csv(fp)
# Compute the distance to each glacier
distances = utils.haversine(gdir.cenlon, gdir.cenlat,
ref_df.lon, ref_df.lat)
# Take the 10 closest
aso = np.argsort(distances)[0:9]
amin = ref_df.iloc[aso]
distances = distances[aso]**2
# If really close no need to divide, else weighted average
if distances.iloc[0] <= 0.1:
tstar = amin.tstar.iloc[0]
bias = amin.bias.iloc[0]
else:
tstar = int(np.average(amin.tstar, weights=1./distances))
bias = np.average(amin.bias, weights=1./distances)
# Add the climate related params to the GlacierDir to make sure
# other tools cannot fool around without re-calibration
out = gdir.read_json('climate_info')
out['mb_calib_params'] = {k: cfg.PARAMS[k] for k in params}
gdir.write_json(out, 'climate_info')
# We compute the overall mu* here but this is mostly for testing
# Climate period
mu_hp = int(cfg.PARAMS['mu_star_halfperiod'])
yr = [tstar - mu_hp, tstar + mu_hp]
# get monthly climatological values
# of terminus temperature and solid precipitation
years, temp, prcp = get_yearly_mb_temp_prcp(gdir, year_range=yr)
# solve mass balance equation for mu*
# note: calving is not considered
mustar = np.mean(prcp) / np.mean(temp)
# check for a finite result
if not np.isfinite(mustar):
raise climate.MassBalanceCalibrationError('{} has a non finite '
'mu'.format(gdir.rgi_id))
# Clip the mu
if not (cfg.PARAMS['min_mu_star'] < mustar < cfg.PARAMS['max_mu_star']):
raise climate.MassBalanceCalibrationError('mu* out of '
'specified bounds.')
# Scalars in a small dict for later
df = dict()
df['rgi_id'] = gdir.rgi_id
df['t_star'] = int(tstar)
df['bias'] = bias
df['mu_star'] = mustar
gdir.write_json(df, 'vascaling_mustar')
@entity_task(log, writes=['climate_info'])
def t_star_from_refmb(gdir, mbdf=None):
"""Computes the reference year t* for the given glacier and mass balance
measurements.
Parameters
----------
gdir : :py:class:`oggm.GlacierDirectory`
mbdf : :py:class:`pd.Series`
observed MB data indexed by year. If None, read automatically from the
reference data, default = None
Returns
-------
dict
A dictionary {'t_star': [], 'bias': []} containing t* and the
corresponding mass balance bias
"""
# make sure we have no marine terminating glacier
assert gdir.terminus_type == 'Land-terminating'
# get reference time series of mass balance measurements
if mbdf is None:
mbdf = gdir.get_ref_mb_data()['ANNUAL_BALANCE']
# compute average observed mass-balance
ref_mb = np.mean(mbdf)
# Compute one mu candidate per year and the associated statistics
# Only get the years were we consider looking for t*
y0, y1 = cfg.PARAMS['tstar_search_window']
ci = gdir.read_json('climate_info')
y0 = y0 or ci['baseline_hydro_yr_0']
y1 = y1 or ci['baseline_hydro_yr_1']
years = np.arange(y0, y1+1)
ny = len(years)
mu_hp = int(cfg.PARAMS['mu_star_halfperiod'])
mb_per_mu = pd.Series(index=years)
# get mass balance relevant climate parameters
years, temp, prcp = get_yearly_mb_temp_prcp(gdir, year_range=[y0, y1])
# get climate parameters, but only for years with mass balance measurements
selind = np.searchsorted(years, mbdf.index)
sel_temp = temp[selind]
sel_prcp = prcp[selind]
sel_temp = np.mean(sel_temp)
sel_prcp = np.mean(sel_prcp)
# for each year in the climatic period around t* (ignoring the first and
# last 15-years), compute a mu-candidate by solving the mass balance
# equation for mu. afterwards compute the average (modeled) mass balance
# over all years with mass balance measurements using the mu-candidate
for i, y in enumerate(years):
# ignore begin and end, i.e. if the
if ((i - mu_hp) < 0) or ((i + mu_hp) >= ny):
continue
# compute average melting temperature
t_avg = np.mean(temp[i - mu_hp:i + mu_hp + 1])
# skip if if too cold, i.e. no melt occurs (division by zero)
if t_avg < 1e-3:
continue
# compute the mu candidate for the current year, by solving the mass
# balance equation for mu*
mu = np.mean(prcp[i - mu_hp:i + mu_hp + 1]) / t_avg
# compute mass balance using the calculated mu and the average climate
# conditions over the years with mass balance records
mb_per_mu[y] = np.mean(sel_prcp - mu * sel_temp)
# compute differences between computed mass balance and reference value
diff = (mb_per_mu - ref_mb).dropna()
# raise error if no mu could be calculated for any year
if len(diff) == 0:
raise MassBalanceCalibrationError('No single valid mu candidate for '
'this glacier!')
# choose mu* as the mu candidate with the smallest absolute bias
amin = np.abs(diff).idxmin()
# write results to the `climate_info.pkl`
d = gdir.read_json('climate_info')
d['t_star'] = amin
d['bias'] = diff[amin]
gdir.write_json(d, 'climate_info')
return {'t_star': amin, 'bias': diff[amin],
'avg_mb_per_mu': mb_per_mu, 'avg_ref_mb': ref_mb}
@global_task
def compute_ref_t_stars(gdirs):
"""Detects the best t* for the reference glaciers and writes them to disk
This task will be needed for mass balance calibration of custom climate
data. For CRU and HISTALP baseline climate a pre-calibrated list is
available and should be used instead.
Parameters
----------
gdirs : list of :py:class:`oggm.GlacierDirectory` objects
will be filtered for reference glaciers
"""
if not cfg.PARAMS['run_mb_calibration']:
raise InvalidParamsError('Are you sure you want to calibrate the '
'reference t*? There is a pre-calibrated '
'version available. If you know what you are '
'doing and still want to calibrate, set the '
'`run_mb_calibration` parameter to `True`.')
# Reference glaciers only if in the list and period is good
ref_gdirs = utils.get_ref_mb_glaciers(gdirs)
# Run
from oggm.workflow import execute_entity_task
out = execute_entity_task(t_star_from_refmb, ref_gdirs)
# Loop write
df = pd.DataFrame()
for gdir, res in zip(ref_gdirs, out):
# list of mus compatibles with refmb
rid = gdir.rgi_id
df.loc[rid, 'lon'] = gdir.cenlon
df.loc[rid, 'lat'] = gdir.cenlat
df.loc[rid, 'n_mb_years'] = len(gdir.get_ref_mb_data())
df.loc[rid, 'tstar'] = res['t_star']
df.loc[rid, 'bias'] = res['bias']
# Write out
df['tstar'] = df['tstar'].astype(int)
df['n_mb_years'] = df['n_mb_years'].astype(int)
file = os.path.join(cfg.PATHS['working_dir'], 'ref_tstars.csv')
df.sort_index().to_csv(file)
@entity_task(log)
def find_start_area(gdir, year_start=1851):
"""This task find the start area for the given glacier, which results in
the best results after the model integration (i.e., modeled glacier surface
closest to measured RGI surface in 2003).
All necessary prepro task (gis, centerline, climate) must be executed
beforehand, as well as the local_t_star() task.
Parameters
----------
gdir : :py:class:`oggm.GlacierDirectory`
year_start : int, optional
year at the beginning of the model integration, default = 1851
(best choice for working with HISTALP data)
Returns
-------
:py:class:`scipy.optimize.OptimizeResult`
"""
# instance the mass balance models
mbmod = VAScalingMassBalance(gdir)
# get reference area and year from RGI
a_rgi = gdir.rgi_area_m2
rgi_df = utils.get_rgi_glacier_entities([gdir.rgi_id])
y_rgi = int(rgi_df.BgnDate.values[0][:4])
# get min and max glacier surface elevation
h_min, h_max = get_min_max_elevation(gdir)
# set up the glacier model with the reference values (from RGI)
model_ref = VAScalingModel(year_0=y_rgi, area_m2_0=a_rgi,
min_hgt=h_min, max_hgt=h_max,
mb_model=mbmod)
def _to_minimize(area_m2_start, ref, year_start=year_start):
"""Initialize VAS glacier model as copy of the reference model (ref)
and adjust the model to the given starting area (area_m2_start) and
starting year (1851). Let the model evolve to the same year as the
reference model. Compute and return the relative absolute area error.
Parameters
----------
area_m2_start : float
ref : :py:class:`oggm.VAScalingModel`
year_start : float, optional
the default value is inherited from the surrounding task
Returns
-------
float
relative absolute area estimate error
"""
# define model
model_tmp = VAScalingModel(year_0=ref.year_0,
area_m2_0=ref.area_m2_0,
min_hgt=ref.min_hgt_0,
max_hgt=ref.max_hgt,
mb_model=ref.mb_model)
# scale to desired starting size
model_tmp.create_start_glacier(area_m2_start, year_start=year_start)
# run and compare, return relative error
return np.abs(model_tmp.run_and_compare(ref))
# define bounds - between 100m2 and two times the reference size
area_m2_bounds = [100, 2 * model_ref.area_m2_0]
# run minimization
minimization_res = minimize_scalar(_to_minimize, args=(model_ref),
bounds=area_m2_bounds,
method='bounded')
return minimization_res
class VAScalingMassBalance(MassBalanceModel):
"""Original mass balance model, used in <NAME>. al., 2012.
The general concept is similar to the oggm.PastMassBalance model.
Thereby the main difference is that the Volume/Area Scaling mass balance
model returns only one glacier wide mass balance value per month or year.
"""
def __init__(self, gdir, mu_star=None, bias=None,
filename='climate_monthly', input_filesuffix='',
repeat=False, ys=None, ye=None, check_calib_params=True):
"""Initialize.
Parameters
----------
gdir : :py:class:`oggm.GlacierDirectory`
mu_star : float, optional
set to the alternative value of mu* you want to use, while
the default is to use the calibrated value
bias : float, optional
set to the alternative value of the calibration bias [mm we yr-1]
you want to use (the default is to use the calibrated value)
Note that this bias is *substracted* from the computed MB. Indeed:
BIAS = MODEL_MB - REFERENCE_MB
filename : str, optional
set to a different BASENAME if you want to use alternative climate
data
input_filesuffix : str, optional
the file suffix of the input climate file, no suffix as default
repeat : bool
Whether the climate period given by [ys, ye] should be repeated
indefinitely in a circular way, default=False
ys : int
The start of the climate period where the MB model is valid
(default: the period with available data)
ye : int
The end of the climate period where the MB model is valid
(default: the period with available data)
check_calib_params : bool
OGGM will try hard not to use wrongly calibrated mu* by checking
the parameters used during calibration and the ones you are
using at run time. If they don't match, it will raise an error.
Set to False to suppress this check.
"""
# initalize of oggm.MassBalanceModel
super(VAScalingMassBalance, self).__init__()
# read mass balance parameters from file
if mu_star is None:
df = gdir.read_json('vascaling_mustar')
mu_star = df['mu_star']
if bias is None:
if cfg.PARAMS['use_bias_for_run']:
df = gdir.read_json('vascaling_mustar')
bias = df['bias']
else:
bias = 0.
# set mass balance parameters
self.mu_star = mu_star
self.bias = bias
# set mass balance calibration parameters
self.t_solid = cfg.PARAMS['temp_all_solid']
self.t_liq = cfg.PARAMS['temp_all_liq']
self.t_melt = cfg.PARAMS['temp_melt']
prcp_fac = cfg.PARAMS['prcp_scaling_factor']
default_grad = cfg.PARAMS['temp_default_gradient']
# Check the climate related params to the GlacierDir to make sure
if check_calib_params:
mb_calib = gdir.read_json('climate_info')['mb_calib_params']
for k, v in mb_calib.items():
if v != cfg.PARAMS[k]:
raise RuntimeError('You seem to use different mass-'
'balance parameters than used for the '
'calibration. '
'Set `check_calib_params=False` '
'to ignore this warning.')
# set public attributes
self.temp_bias = 0.
self.prcp_bias = 1.
self.repeat = repeat
self.hemisphere = gdir.hemisphere
# read climate file
fpath = gdir.get_filepath(filename, filesuffix=input_filesuffix)
with ncDataset(fpath, mode='r') as nc:
# time
time = nc.variables['time']
time = netCDF4.num2date(time[:], time.units)
ny, r = divmod(len(time), 12)
if r != 0:
raise ValueError('Climate data should be N full years')
# This is where we switch to hydro float year format
# Last year gives the tone of the hydro year
self.years = np.repeat(np.arange(time[-1].year-ny+1,
time[-1].year+1), 12)
self.months = np.tile( | np.arange(1, 13) | numpy.arange |
from .context import rubik_solver
from .context import available_moves
import numpy as np
import unittest
benchmark_scrambles = [
("F' D U' L2 B2 D2 B2 D B F U L2 R2 B2 U' L F' D U2 F' D2 U L U' B' U' B2 U R2 U",
np.array([ 1, 2, 0, 1, 2, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1]),
np.array([ 7, 1, 4, 3, 2, 6, 5, 0,19, 8,15,12,17,16,18,11,14,13, 9,10])),
("U B2 D F2 D R B' R' D B D2 F R D' F R D2 L' B' D U' R B2 D B2 R U' F2 D' R2",
np.array([ 0, 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0]),
np.array([ 0, 6, 7, 1, 2, 4, 5, 3, 18,13,15,10,17,14,19,11,16, 9, 8,12])),
("D' F2 L2 B2 L B D2 F L2 D2 U2 B2 F2 L R' D U2 L2 U L' U L F' R' F U' B2 F2 L' R'",
np.array([ 2, 1, 2, 1, 1, 2, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0]),
np.array([ 7, 4, 5, 1, 3, 2, 0, 6,16, 9,19,18, 8,10,17,15,13,11,12,14])),
("D2 L' D R F R' D' U' R' U2 R' D F2 D U L' F' D' L2 U' B' F' D2 B' L2 U2 B D' U2 L'",
np.array([ 0, 1, 1, 0, 2, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0]),
np.array([ 2, 6, 4, 0, 3, 7, 5, 1,18,15,19,10, 9,12,13, 8,11,17,16,14])),
("U L2 B F L2 D' U' B D' B2 F' L2 R' F' L R2 F R' U B L2 F' U2 L D U' L2 R' B2 L",
np.array([ 0, 1, 1, 0, 2, 0, 2, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1]),
np.array([ 7, 0, 6, 5, 4, 1, 2, 3,18,19, 8,15,11,14,12,13,10,16,17, 9]))]
class TestCube(unittest.TestCase):
def test_cube(self):
cube = rubik_solver.Cube()
self.assertTrue(cube.is_solved())
self.assertEqual(cube.scramble, None)
for scramble, orientation, permutation in benchmark_scrambles:
cube = rubik_solver.Cube(scramble)
self.assertFalse(cube.is_solved())
self.assertTrue(np.array_equal(cube.position['orientation'], orientation))
self.assertTrue(np.array_equal(cube.position['permutation'], permutation))
cube.reset()
cube.position = cube.apply_scramble(scramble)
self.assertFalse(cube.is_solved())
self.assertTrue(np.array_equal(cube.position['orientation'], orientation))
self.assertTrue(np.array_equal(cube.position['permutation'], permutation))
for i in range(100):
cube = rubik_solver.Cube(i)
if i == 0:
self.assertTrue(cube.is_solved())
else:
self.assertFalse(cube.is_solved())
self.assertEqual(len(cube.scramble.split()), i)
self.assertTrue(cube.is_solved(rubik_solver.Cube().position))
for move in available_moves[None]:
tmp1 = cube.make_move(move)
tmp2 = cube.make_move(move, rubik_solver.Cube().position)
self.assertFalse( | np.array_equal(tmp1['orientation'], tmp2['orientation']) | numpy.array_equal |
"""
This is detMAE.py, a class for obtaining various bits for the deterministic
limit of temporal difference reinforcement learning.
"""
import itertools as it
import numpy as np
import scipy.linalg as la
class detMAE(object):
def __init__(self,
TranstionTensor,
RewardTensor,
alpha,
beta,
gamma,
roundingprec=9):
"""doc."""
assert len(np.unique(TranstionTensor.shape[1:-1])) == 1,\
"Transition tensor has different action sets sizes"
assert len(np.unique(RewardTensor.shape[2:-1])) == 1,\
"Reward tensor has different action sets sizes"
self.R = RewardTensor
self.T = TranstionTensor
self.N = self.R.shape[0] # the number of agents
self.Z = self.T.shape[0] # the number of states
self.M = self.T.shape[1] # the number of actions for each agent
self.alpha = alpha # the agent's learning rate
self.beta = beta # the agent's exploitation level
# assert hasattr(gamma, "__iter__"), "gamma needs __iter__"
# self.gamma = gamma
# the agent's discout factor
if hasattr(gamma, '__iter__'):
self.gamma = gamma
else:
self.gamma = np.repeat(gamma, self.N)
self.Omega = self._obtain_OtherAgentsActionsSummationTensor()
self.OmegaD = self._obtain_OtherAgentsDerivativeSummationTensor()
self.roundingprec = roundingprec
# =========================================================================
# Behavior profiles
# =========================================================================
def zeroIntelligence_behavior(self):
"""Behavior profile with equal probabilities."""
return np.ones((self.N, self.Z, self.M)) / float(self.M)
def random_behavior(self, method="norm"):
"""Behavior profile with random probabilities."""
if method=="norm":
X = np.random.rand(self.N, self.Z, self.M)
X = X / X.sum(axis=2).repeat(self.M).reshape(self.N, self.Z, self.M)
elif method == "diff":
X = np.random.rand(self.N, self.Z, self.M-1)
X = np.concatenate((np.zeros((self.N, self.Z, 1)),
np.sort(X, axis=-1),
| np.ones((self.N, self.Z, 1)) | numpy.ones |
"""
Copyright 2017 <NAME>, Toyota Technological Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import os
import scipy.constants as sc
from .spectrum import Spectrum
import warnings
def load_default_spectrum(fname1, fname2):
cache_spectrum = {}
spectrumfile = np.loadtxt(fname1,
dtype=float, delimiter=',', skiprows=2)
cache_spectrum["AM1.5g"] = spectrumfile[:, [0, 2]]
cache_spectrum["AM1.5d"] = spectrumfile[:, [0, 3]]
cache_spectrum["AM0"] = spectrumfile[:, [0, 1]]
spectrumfile = np.loadtxt(fname2,
dtype=float, delimiter='\t')
spectrumfile[:, 1] /= 1000
cache_spectrum["AM1.5do"] = spectrumfile
return cache_spectrum
# Read default spectrum
this_dir = os.path.split(__file__)[0]
spec_data = load_default_spectrum(os.path.join(this_dir, "astmg173.csv"),
os.path.join(this_dir, "am15d.dat"))
def load_blackbody(T=6000, normalize_to=None):
"""
Load Blackbody spectrum
:param T: temperature
:param normalize_to: the value in W/m^2 that the output spectrum is normalized to. Set to None if no renormalization is required.
:return: Spectrum
"""
# Initialze the wavelength in nm-> m
wl = np.arange(20, 2000, step=20) / 1e9
# Convert it to frequency
mu = sc.c / wl
# Intensity of Blackbody spectrum in (W/m^2)
blackbody_i = 2 * sc.pi * sc.h * np.power(mu, 3) / np.power(sc.c, 2) * (1 / (np.exp(sc.h * mu / sc.k / T) - 1))
factor = 1
sp = Spectrum(x_data=mu, y_data=blackbody_i, x_unit='s**-1',
y_unit="m**-2", is_spec_density=True, is_photon_flux=False)
if normalize_to is not None:
factor = normalize_to / sp.rsum()
return sp * factor
def load_astm(spec_type="AM1.5g"):
"""
Load ASTMG173-03 spectrum
:param spec_type: the type of spectrum: "AM1.5g", "AM0" and "AM1.5d"
:return: designated ASTM ``Spectrum``
"""
if spec_type in spec_data.keys():
flux = spec_data[spec_type]
sp = Spectrum(flux[:, 0], flux[:, 1], x_unit='nm', y_unit='m**-2',
is_photon_flux=False, is_spec_density=True)
else:
s = "spec_type should be string of one of these:%s" % spec_data.keys()
raise ValueError(s)
return sp
class Illumination(Spectrum):
def __init__(self, spectrum="AM1.5g", concentration=1):
"""
Initialise a standard spectrum.
"""
# flux, wl = self.read_from_csv(spectrum)
warnings.warn("Illumination class will be deprecated in future version.", DeprecationWarning)
flux = spec_data[spectrum]
Spectrum.__init__(self, flux[:, 0], flux[:, 1] * concentration, 'nm',
y_unit='m**-2', is_photon_flux=False, is_spec_density=True)
def total_power(self):
# Calculate power using different methods
return self.rsum()
class BpFilter(Spectrum):
def __init__(self, edge_in_eV, f_type="high_pass", OD=2, energy_bound=(0.5, 6)):
"""
Create a band pass filter
:param edge_in_eV: the cutoff frequency (in eV) of this filter
:param f_type: high_pass or low_pass. high_pass: photons with energy higher than band edge passes.
:param OD: optical density for attenuation
:param energy_bound: the bound of wavelengths
"""
a1 = np.linspace(energy_bound[0], edge_in_eV, num=100, endpoint=True)
a2 = np.linspace(edge_in_eV + 0.01, energy_bound[1], num=100, endpoint=False)
wavelength = np.concatenate((a1, a2))
attenuation = np.zeros(wavelength.shape)
if f_type == "high_pass":
attenuation[wavelength <= edge_in_eV] = OD
if f_type == "low_pass":
attenuation[wavelength >= edge_in_eV] = OD
attenuation = np.power(10, -attenuation)
Spectrum.__init__(self, wavelength, attenuation, 'eV')
class material_filter(Spectrum):
def __init__(self, material_abs, thickness):
assert isinstance(material_abs, Spectrum)
abs_spec = material_abs.get_spectrum(to_x_unit='m')
attenuation = abs_spec[1, :] * thickness
attenuation = | np.exp(-attenuation) | numpy.exp |
#--------------
# Script to generate histograms to show
# different local minimums that occur
#--------------
import numpy as np
import matplotlib.pyplot as plt
from pathlib import Path
import sys
from neorl import ES
import string
import random
sys.path.append("..")
from fitness_help import FitnessHelper, Objective, get_log
from p7_base import rid, make_objs, calc_cumavg, plot_progress, \
plot_objs
objs = make_objs() #in order react, psplits, diff_worth
wts = [.55, .4, .05]
BOUNDS = {"x%i"%i : ["float", -1.*np.pi, 1.*np.pi] for i in range(1, 9)}
lambda_ = 60
mu = 30
cxpb = 0.6
mutpb = 0.3
notes_str = "lambda=%i, mu=%i, cxpb=%f, mutpb=%f\n"%(lambda_, mu,
cxpb, mutpb)
histname = "log/hist_p55p4p05_2"
rlist = []
qsplit = []
diffworth = []
objectives = []
drumangles = []
I = 2000
for i in range(I):
print(i+1, "/", I)
fname = Path("log/es_%s.log"%rid())
es_helper = FitnessHelper(objs, wts, fname, notes = notes_str)
es = ES(mode="min", bounds = BOUNDS, fit = es_helper.fitness,
ncores=1, lambda_ = lambda_, mu = mu, cxpb = cxpb, mutpb = mutpb)
es_x, es_y, es_hist = es.evolute(150)
res = get_log(fname)
bi = | np.argmin(res["fitness"].values) | numpy.argmin |
"""
code from brooklyn1900's craft repository.
https://github.com/brooklyn1900/CRAFT_pytorch
"""
import math
import numpy as np
import cv2
from skimage import io
# RGB
NORMALIZE_MEAN = np.array([0.485, 0.456, 0.406], dtype=np.float32) * 255.0
NORMALIZE_VARIANCE = np.array([0.229, 0.224, 0.225], dtype=np.float32) * 255.0
"""
box_util
"""
def cal_slope(p1, p2):
return (p2[1] - p1[1]) / (p2[0] - p1[0] + 1e-5)
def above_line(p, start_point, slope):
y = (p[0] - start_point[0]) * slope + start_point[1]
return p[1] < y
def reorder_points(point_list):
"""
Reorder points of quadrangle.
(top-left, top-right, bottom right, bottom left).
:param point_list: List of point. Point is (x, y).
:return: Reorder points.
"""
# Find the first point which x is minimum.
ordered_point_list = sorted(point_list, key=lambda x: (x[0], x[1]))
first_point = ordered_point_list[0]
# Find the third point. The slope is middle.
slope_list = [[cal_slope(first_point, p), p] for p in ordered_point_list[1:]]
ordered_slope_point_list = sorted(slope_list, key=lambda x: x[0])
first_third_slope, third_point = ordered_slope_point_list[1]
# Find the second point which is above the line between the first point and the third point.
# All that's left is the fourth point.
if above_line(ordered_slope_point_list[0][1], third_point, first_third_slope):
second_point = ordered_slope_point_list[0][1]
fourth_point = ordered_slope_point_list[2][1]
reverse_flag = False
else:
second_point = ordered_slope_point_list[2][1]
fourth_point = ordered_slope_point_list[0][1]
reverse_flag = True
# Find the top left point.
second_fourth_slope = cal_slope(second_point, fourth_point)
if first_third_slope < second_fourth_slope:
if reverse_flag:
reorder_point_list = [fourth_point, first_point, second_point, third_point]
else:
reorder_point_list = [second_point, third_point, fourth_point, first_point]
else:
reorder_point_list = [first_point, second_point, third_point, fourth_point]
return reorder_point_list
def cal_min_box_distance(box1, box2):
box_distance = [math.pow((p2[0] - p1[0]), 2) + math.pow((p2[1] - p1[1]), 2) for p1 in box1 for p2 in box2]
return np.min(box_distance)
def reorder_box(box_list):
"""
Reorder character boxes.
:param box_list: List of box. Box is a list of point. Point is (x, y).
:return: Reorder boxes.
"""
# Calculate the minimum distance between any two boxes.
box_count = len(box_list)
distance_mat = np.zeros((box_count, box_count), dtype=np.float32)
for i in range(box_count):
box1 = box_list[i]
for j in range(i + 1, box_count):
box2 = box_list[j]
distance = cal_min_box_distance(box1, box2)
distance_mat[i][j] = distance
distance_mat[j][i] = distance
# Find the boxes on the both ends.
end_box_index = np.argmax(distance_mat)
nan = distance_mat[end_box_index // box_count, end_box_index % box_count] + 1
for i in range(box_count):
distance_mat[i, i] = nan
last_box_index = start_box_index = end_box_index // box_count
last_box = box_list[start_box_index]
# reorder box.
reordered_box_list = [last_box]
for i in range(box_count - 1):
distance_mat[:, last_box_index] = nan
closest_box_index = np.argmin(distance_mat[last_box_index])
reordered_box_list.append(box_list[closest_box_index])
last_box_index = closest_box_index
return reordered_box_list
def cal_triangle_area(p1, p2, p3):
"""
Calculate the area of triangle.
S = |(x2 - x1)(y3 - y1) - (x3 - x1)(y2 - y1)| / 2
:param p1: (x, y)
:param p2: (x, y)
:param p3: (x, y)
:return: The area of triangle.
"""
[x1, y1], [x2, y2], [x3, y3] = p1, p2, p3
return abs((x2 - x1) * (y3 - y1) - (x3 - x1) * (y2 - y1)) / 2
def cal_quadrangle_area(points):
"""
Calculate the area of quadrangle.
:return: The area of quadrangle.
"""
points = reorder_points(points)
p1, p2, p3, p4 = points
s1 = cal_triangle_area(p1, p2, p3)
s2 = cal_triangle_area(p3, p4, p1)
s3 = cal_triangle_area(p2, p3, p4)
s4 = cal_triangle_area(p4, p1, p2)
if s1 + s2 == s3 + s4:
return s1 + s2
else:
return 0
def cal_intersection(points):
"""
Calculate the intersection of diagonals.
x=[(x3-x1)(x4-x2)(y2-y1)+x1(y3-y1)(x4-x2)-x2(y4-y2)(x3-x1)]/[(y3-y1)(x4-x2)-(y4-y2)(x3-x1)]
y=(y3-y1)[(x4-x2)(y2-y1)+(x1-x2)(y4-y2)]/[(y3-y1)(x4-x2)-(y4-y2)(x3-x1)]+y1
:param points: (x1, y1), (x2, y2), (x3, y3), (x4, y4).
:return: (x, y).
"""
[x1, y1], [x2, y2], [x3, y3], [x4, y4] = points
x = ((x3 - x1) * (x4 - x2) * (y2 - y1) + x1 * (y3 - y1) * (x4 - x2) - x2 * (y4 - y2) * (x3 - x1)) \
/ ((y3 - y1) * (x4 - x2) - (y4 - y2) * (x3 - x1) + 1e-5)
y = (y3 - y1) * ((x4 - x2) * (y2 - y1) + (x1 - x2) * (y4 - y2)) \
/ ((y3 - y1) * (x4 - x2) - (y4 - y2) * (x3 - x1) + 1e-5) + y1
return [x, y]
def cal_center_point(points):
points = np.array(points)
return [round( | np.average(points[:, 0]) | numpy.average |
import numpy as np
import scipy as sc
import pandas as pd
import bct
import networkx as nx
"""
distance_wei_floyd
"""
def distance_wei_floyd(adjacency, transform=None):
if transform is not None:
if transform == 'log':
if np.logical_or(adjacency > 1, adjacency < 0).any():
raise ValueError("Connection strengths must be in the " +
"interval [0,1) to use the transform " +
"-log(w_ij).")
SPL = -np.log(adjacency)
elif transform == 'inv':
SPL = 1. / adjacency
else:
raise ValueError("Unexpected transform type. Only 'log' and " +
"'inv' are accepted")
else:
SPL = adjacency.copy().astype('float')
SPL[SPL == 0] = np.inf
n = adjacency.shape[1]
flag_find_paths = True
hops = np.array(adjacency != 0).astype('float')
Pmat = np.repeat(np.atleast_2d(np.arange(0, n)), n, 0)
for k in range(n):
i2k_k2j = np.repeat(SPL[:, [k]], n, 1) + np.repeat(SPL[[k], :], n, 0)
if flag_find_paths:
path = SPL > i2k_k2j
i, j = np.where(path)
hops[path] = hops[i, k] + hops[k, j]
Pmat[path] = Pmat[i, k]
SPL = np.min(np.stack([SPL, i2k_k2j], 2), 2)
eye = np.eye(n) > 0
SPL[eye] = 0
if flag_find_paths:
hops[eye], Pmat[eye] = 0, 0
return SPL, hops, Pmat
"""
retrieve_shortest_path
"""
def retrieve_shortest_path(s, t, hops, Pmat):
path_length = hops[s, t]
if path_length != 0:
path = np.zeros((int(path_length + 1), 1), dtype='int')
path[0] = s
for ind in range(1, len(path)):
s = Pmat[s, t]
path[ind] = s
else:
path = []
return path
"""
search_information
"""
def search_information(adjacency, transform=None, has_memory=False):
N = len(adjacency)
if np.allclose(adjacency, adjacency.T):
flag_triu = True
else:
flag_triu = False
T = np.linalg.solve(np.diag(np.sum(adjacency, axis=1)), adjacency)
_, hops, Pmat = distance_wei_floyd(adjacency, transform)
SI = np.zeros((N, N))
SI[np.eye(N) > 0] = np.nan
for i in range(N):
for j in range(N):
if (j > i and flag_triu) or (not flag_triu and i != j):
path = retrieve_shortest_path(i, j, hops, Pmat)
lp = len(path) - 1
if flag_triu:
if np.any(path):
pr_step_ff = | np.zeros(lp) | numpy.zeros |
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
"""
Created on Mon Feb 10 17:24 2020
@author: <NAME>
======================================================================
Purpose: Outputs max trends in gsat for a range of N year periods in
each observational historical record, for use in Figure 3
======================================================================
"""
# Load in required directories
basedir = 'Priestley-Centre/Near_term_warming/observation_data'
savedir = 'Priestley-Centre/Near_term_warming/analysis_figure_code/'+\
'Figure3/saved_arrays'
# Load in data and apply scaling factors
# to convert from GBST to GSAT
temp_BE = np.loadtxt(basedir+'/BE_Land_and_Ocean.csv',\
delimiter=',')[:,1]*1.087
years_BE = np.loadtxt(basedir+'/BE_Land_and_Ocean.csv',\
delimiter=',')[:,0]
nyear_BE = len(years_BE)
temp_GI = np.loadtxt(basedir+'/GISTEMPv4.csv',\
delimiter=',')[:,1]*1.087
years_GI = np.loadtxt(basedir+'/GISTEMPv4.csv',\
delimiter=',')[:,0]
nyear_GI = len(years_GI)
temp_Ha = np.loadtxt(basedir+'/HadCRUT4.6.csv',\
delimiter=',')[:,1]*1.19
years_Ha = np.loadtxt(basedir+'/HadCRUT4.6.csv',\
delimiter=',')[:,0]
nyear_Ha = len(years_Ha)
temp_CW = np.loadtxt(basedir+'/CWv2_had4sst3.csv',\
delimiter=',')[:,1]*1.087
years_CW = np.loadtxt(basedir+'/CWv2_had4sst3.csv',\
delimiter=',')[:,0]
nyear_CW = len(years_CW)
# Calculate maximum Ny trends
trend_lengths = | np.linspace(10,50,41) | numpy.linspace |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
coordinate calculation
"""
# information
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2021 - <NAME>'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = '<NAME>'
__email__ = 'toru.ver.11 at-sign gmail.com'
__all__ = []
# import standard libraries
import os
# import third-party libraries
import numpy as np
# import my libraries
import test_pattern_generator2 as tpg
import font_control as fc
class GridCoordinate():
def __init__(
self, bg_width=1920, bg_height=1080,
fg_width=200, fg_height=100, h_num=5, v_num=4,
remove_tblr_margin=False):
"""
Example
-------
>>> gc = GridCoordinate(
... bg_width=1920, bg_height=1080,
... fg_width=200, fg_height=150,
... h_num=3, v_num=2, remove_tblr_margin=False)
>>> pos_list = gc.get_st_pos_list()
[[[ 330 260] [ 330 670]]
[[ 860 260] [ 860 670]]
[[1390 260] [1390 670]]]
>>> img = np.zeros((height, width, 3))
>>> fg_img = np.ones((fg_height, fg_width, 3))
>>> for v_idx in range(v_num):
... for h_idx in range(h_num):
... idx = v_idx * h_num + h_idx
... tpg.merge(img, fg_img, pos_list[h_idx][v_idx])
>>> tpg.img_wirte_float_as_16bit_int("./test_img.png", img)
"""
self.remove_tblr_margin = remove_tblr_margin
if (fg_width*h_num > bg_width) or (fg_height*v_num > bg_height):
print("Fatal Error!")
print(" fg_width or fg_height is too big.")
if self.remove_tblr_margin:
self.tblr_maring_offset = -1
else:
self.tblr_maring_offset = 1
h_margin_list, v_margin_list = self.calc_hv_margin_list(
bg_width=bg_width, bg_height=bg_height,
fg_width=fg_width, fg_height=fg_height, h_num=h_num, v_num=v_num)
self.calc_st_coordinate_list(
h_num=h_num, v_num=v_num, fg_width=fg_width, fg_height=fg_height,
h_margin_list=h_margin_list, v_margin_list=v_margin_list)
def get_st_pos_list(self):
return self.pos_list
def calc_hv_margin_list(
self, bg_width=1920, bg_height=1080,
fg_width=200, fg_height=100, h_num=5, v_num=4):
h_margin_total = bg_width - (fg_width * h_num)
v_margin_total = bg_height - (fg_height * v_num)
h_margin_list = tpg.equal_devision(
h_margin_total, h_num + self.tblr_maring_offset)
v_margin_list = tpg.equal_devision(
v_margin_total, v_num + self.tblr_maring_offset)
return h_margin_list, v_margin_list
def calc_st_coordinate_list(
self, h_num, v_num, fg_width, fg_height,
h_margin_list, v_margin_list):
"""
calculate start position.
Example
-------
h_num : int
horizontal number of the fg_pattern.
v_num : int
vertical number of the fg_pattern.
fg_width : int
width of the fg pattern.
fg_height : int
height of the fg pattern.
h_margin_list : list
horizontal margin list
v_margin_list : list
vertical margin list
Returns
-------
self.pos_list (not return, just keep on the ram) : ndarray
pos_list[h_idx][v_idx][0] : h_pos
pos_list[h_idx][v_idx][1] : v_pos
"""
if self.remove_tblr_margin:
st_offset_h = 0
st_offset_v = 0
else:
st_offset_h = h_margin_list.pop(0)
st_offset_v = v_margin_list.pop(0)
self.pos_list = np.zeros(
(h_num, v_num, 2), dtype=np.uint32)
v_pos = st_offset_v
for v_idx in range(v_num):
h_pos = st_offset_h
for h_idx in range(h_num):
self.pos_list[h_idx][v_idx][0] = h_pos
self.pos_list[h_idx][v_idx][1] = v_pos
if h_idx >= (h_num - 1):
break
h_magin = h_margin_list[h_idx]
h_pos += (h_magin + fg_width)
if v_idx >= (v_num - 1):
break
v_margin = v_margin_list[v_idx]
v_pos += (v_margin + fg_height)
class ImgWithTextCoordinate():
def __init__(
self, img_width, img_height,
text="8bit \nwith alpha", font_size=30,
text_pos="left",
font_path=fc.NOTO_SANS_MONO_BOLD,
margin_num_of_chara=0.5):
"""
Parameters
----------
img_width : int
width of the image
img_height : int
haight of the image
text_pos : string
"left", "right", "top", "bottom".
margin_num_of_chara : float
the margin rate between text and image.
1.0 means one character width.
Examples
--------
>>> text = "8bit \nwith alpha"
>>> font_size = 30
>>> font_path = fc.NOTO_SANS_MONO_REGULAR
>>> margin_num_of_chara = 1.0
>>> fg_img = np.ones((300, 400, 3)) * np.array([0, 1, 1])
>>> # left
>>> bg_img = np.zeros((720, 1280, 3))
>>> tpg.draw_outline(bg_img, np.array([0, 1, 0]), 1)
>>> img_text_coorinate = ImgWithTextCoordinate(
... img_width=fg_img.shape[1], img_height=fg_img.shape[0]
... text=text, font_size=font_size,
... text_pos="left", font_path=font_path,
... margin_num_of_chara=margin_num_of_chara)
>>> img_st_pos, text_st_pos\
... = img_text_coorinate.get_img_and_text_st_pos()
>>> tpg.merge(bg_img, fg_img, img_st_pos)
>>> text_drawer = fc.TextDrawer(
... bg_img, text=text, pos=text_st_pos,
... font_color=(0.5, 0.5, 0.5), font_size=font_size,
... font_path=font_path)
>>> text_drawer.draw()
>>> tpg.img_wirte_float_as_16bit_int("./img_left.png", bg_img)
"""
text_width, text_height = self.calc_text_size(
text=text, font_size=font_size, font_path=font_path)
text_img_margin = self.calc_text_img_margin(
text=text, text_width=text_width,
margin_num_of_chara=margin_num_of_chara)
if text_pos == "left":
text_st_pos = (0, 0)
img_st_pos = (text_width + text_img_margin, 0)
elif text_pos == "right":
text_st_pos = (img_width + text_img_margin, 0)
img_st_pos = (0, 0)
elif text_pos == "top":
text_st_pos = (0, 0)
img_st_pos = (0, text_height + text_img_margin)
elif text_pos == "bottom":
text_st_pos = (0, img_height + text_img_margin)
img_st_pos = (0, 0)
else:
print("Parameter error!")
print(f" text_pos={text_pos} is invalid")
self.text_st_pos = np.array(text_st_pos, dtype=np.uint32)
self.img_st_pos = np.array(img_st_pos, dtype=np.uint32)
def get_img_and_text_st_pos(self):
return self.img_st_pos, self.text_st_pos
def calc_text_size(self, text, font_size, font_path):
text_drawer = fc.TextDrawer(
None, text=text, font_size=font_size,
font_path=font_path)
text_drawer.make_text_img_with_alpha()
text_width, text_height = text_drawer.get_text_size()
return text_width, text_height
def calc_text_img_margin(
self, text, text_width, margin_num_of_chara=0.5):
"""
Parameters
----------
test : str
text
text_width : int
text width. unit is pixel
margin_num_of_chara : float
the margin rate between text and image.
1.0 means one character width.
"""
str_length = self.get_text_horizontal_length(text)
text_width_one_str = text_width / str_length * margin_num_of_chara
return int(text_width_one_str + 0.5)
def get_text_horizontal_length(self, text):
str_list_splitted_lf = text.split('\n')
str_length_num_list = [
len(strings) for strings in str_list_splitted_lf]
max_length = | np.array(str_length_num_list) | numpy.array |
import tvm
import numpy as np
def test_sort():
n = 2
l = 5
m = 3
data = tvm.placeholder((n, l, m), name='data')
sort_num = tvm.placeholder((n, m), name="sort_num", dtype="int32")
axis = 1
is_descend = True
out = tvm.extern(data.shape, [data, sort_num],
lambda ins, outs: tvm.call_packed(
"tvm.contrib.sort.argsort", ins[0],
ins[1], outs[0], axis, is_descend),
dtype='int32', name="sort_tensor")
input = [[[1, 2, 3], [2, 4.5, 3.5], [1.1, 0.5, 1], [3.2, -5, 0.5], [1.5, 0, 0]],
[[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12], [13, 14, 15]]]
sort_num_input = [[1, 2, 3], [4, 5, 5]]
sorted_index = [[[0, 1, 1], [1, 0, 0], [2, 2, 2], [3, 3, 3], [4, 4, 4]],
[[3, 4, 4], [2, 3, 3], [1, 2, 2], [0, 1, 1], [4, 0, 0]]]
ctx = tvm.cpu(0)
target = "llvm"
s = tvm.create_schedule(out.op)
f = tvm.build(s, [data, sort_num, out], target)
a = tvm.nd.array(np.array(input).astype(data.dtype), ctx)
b = tvm.nd.array(np.array(sort_num_input).astype(sort_num.dtype), ctx)
c = tvm.nd.array(np.zeros(a.shape, dtype=out.dtype), ctx)
f(a, b, c)
tvm.testing.assert_allclose(c.asnumpy(), np.array(sorted_index).astype(out.dtype), rtol=1e-5)
def test_sort_np():
dshape = (1, 2, 3, 4, 5, 6)
axis = 4
reduced_shape = (1, 2, 3, 4, 6)
is_descend = False
data = tvm.placeholder(dshape, name='data')
sort_num = tvm.placeholder(reduced_shape, name="sort_num", dtype="int32")
out = tvm.extern(data.shape, [data, sort_num],
lambda ins, outs: tvm.call_packed(
"tvm.contrib.sort.argsort", ins[0],
ins[1], outs[0], axis, is_descend),
dtype='int32', name="sort_tensor")
ctx = tvm.cpu(0)
target = "llvm"
s = tvm.create_schedule(out.op)
f = tvm.build(s, [data, sort_num, out], target)
np_data = np.random.uniform(size=dshape)
np_out = np.argsort(np_data, axis=axis)
sort_num_input = np.full(reduced_shape, dshape[axis])
a = tvm.nd.array(np.array(np_data).astype(data.dtype), ctx)
b = tvm.nd.array(np.array(sort_num_input).astype(sort_num.dtype), ctx)
c = tvm.nd.array( | np.zeros(a.shape, dtype=out.dtype) | numpy.zeros |
"""Tests for the transform module."""
import numpy as np
import unittest
import os
from lpot.data import TRANSFORMS, DataLoader
from lpot.utils.create_obj_from_config import get_postprocess, create_dataset
from lpot.utils.utility import LazyImport
mx = LazyImport('mxnet')
tf = LazyImport('tensorflow')
torchvision = LazyImport('torchvision')
class TestMetrics(unittest.TestCase):
def test_tensorflow_2(self):
image = np.ones([1, 256, 256, 1])
resize_kwargs = {"size":[224, 224]}
transforms = TRANSFORMS(framework="tensorflow", process="preprocess")
resize = transforms['Resize'](**resize_kwargs)
random_crop_kwargs = {"size": [1, 128, 128, 1]}
random_crop = transforms['RandomCrop'](**random_crop_kwargs)
transform_list = [resize, random_crop]
compose = transforms['Compose'](transform_list)
image_result = compose((image, None))
self.assertEqual(image_result[0].shape, (1, 128, 128, 1))
class TestONNXQLImagenetTransform(unittest.TestCase):
@classmethod
def setUpClass(cls):
from PIL import Image
cls.img = np.random.random_sample([600,600,3])*255
cls.PIL_img = Image.fromarray(cls.img.astype(np.uint8))
def testResizeCropImagenetTransform(self):
transforms = TRANSFORMS('onnxrt_qlinearops', "preprocess")
transform = transforms['ResizeCropImagenet'](height=224, width=224)
sample = (self.PIL_img, 0)
result = transform(sample)
resized_input = result[0]
self.assertEqual(len(resized_input), 3)
self.assertEqual(len(resized_input[0]), 224)
self.assertEqual(len(resized_input[0][0]), 224)
class TestONNXITImagenetTransform(unittest.TestCase):
@classmethod
def setUpClass(cls):
from PIL import Image
cls.img = np.random.random_sample([600,600,3])*255
cls.PIL_img = Image.fromarray(cls.img.astype(np.uint8))
def testResizeCropImagenetTransform(self):
transforms = TRANSFORMS('onnxrt_integerops', "preprocess")
transform = transforms['ResizeCropImagenet'](height=224, width=224)
sample = (self.PIL_img, 0)
result = transform(sample)
resized_input = result[0]
self.assertEqual(len(resized_input), 3)
self.assertEqual(len(resized_input[0]), 224)
self.assertEqual(len(resized_input[0][0]), 224)
class TestTensorflowImagenetTransform(unittest.TestCase):
tf.compat.v1.disable_v2_behavior()
def testBilinearImagenetTransform(self):
transforms = TRANSFORMS('tensorflow', "preprocess")
transform = transforms['BilinearImagenet'](height=224, width=224)
rand_input = np.random.random_sample([600,600,3]).astype(np.float32)
sample = (rand_input, 0)
result = transform(sample)
resized_input = result[0].eval(session=tf.compat.v1.Session())
self.assertEqual(len(resized_input), 224)
self.assertEqual(len(resized_input[0]), 224)
self.assertEqual(len(resized_input[0][0]), 3)
def testResizeCropImagenetTransform(self):
transforms = TRANSFORMS('tensorflow', "preprocess")
transform = transforms['ResizeCropImagenet'](height=224, width=224)
rand_input = np.random.random_sample([600,600,3]).astype(np.float32)
sample = (rand_input, 0)
result = transform(sample)
resized_input = result[0].eval(session=tf.compat.v1.Session())
self.assertEqual(len(resized_input), 224)
self.assertEqual(len(resized_input[0]), 224)
self.assertEqual(len(resized_input[0][0]), 3)
def testLabelShift(self):
transforms = TRANSFORMS('tensorflow', "postprocess")
transform = transforms['LabelShift'](label_shift=1)
rand_input = np.random.random_sample([600,600,3]).astype(np.float32)
sample = (rand_input, 1001)
label = transform(sample)[1]
self.assertEqual(label, 1000)
def testQuantizedInput(self):
transforms = TRANSFORMS('tensorflow', "preprocess")
transform = transforms['QuantizedInput'](dtype='uint8', scale=100)
rand_input = np.random.random_sample([600,600,3]).astype(np.float32)
sample = (rand_input, 1001)
result = transform(sample)
quantized_input = result[0].eval(session=tf.compat.v1.Session())
self.assertLessEqual(quantized_input.max(), 255)
self.assertGreaterEqual(quantized_input.min(), 0)
class TestSameTransfoms(unittest.TestCase):
@classmethod
def setUpClass(cls):
from PIL import Image
cls.img = np.random.random_sample([10,10,3])*255
cls.tf_trans = TRANSFORMS('tensorflow', 'preprocess')
cls.pt_trans = TRANSFORMS('pytorch', 'preprocess')
cls.mx_trans = TRANSFORMS('mxnet', 'preprocess')
cls.ox_trans = TRANSFORMS('onnxrt_qlinearops', 'preprocess')
cls.mx_img = mx.nd.array(cls.img)
cls.pt_img = Image.fromarray(cls.img.astype(np.uint8))
_ = TRANSFORMS('tensorflow', 'postprocess')
_ = TRANSFORMS('pytorch', 'postprocess')
_ = TRANSFORMS('mxnet', 'postprocess')
_ = TRANSFORMS('onnxrt_qlinearops' , 'postprocess')
_ = TRANSFORMS('onnxrt_integerops', 'postprocess')
def testCenterCrop(self):
args = {'size':[4,4]}
tf_func = TestSameTransfoms.tf_trans['CenterCrop'](**args)
tf_result = tf_func((TestSameTransfoms.img, None))
tf_result = tf_result[0].eval(session=tf.compat.v1.Session())
pt_func = TestSameTransfoms.pt_trans['CenterCrop'](**args)
pt_result = pt_func((TestSameTransfoms.pt_img, None))[0]
mx_func = TestSameTransfoms.mx_trans['CenterCrop'](**args)
mx_result = mx_func((TestSameTransfoms.mx_img, None))
mx_result = mx_result[0].asnumpy()
self.assertEqual(tf_result.shape, (4,4,3))
self.assertEqual(pt_result.size, (4,4))
self.assertEqual(mx_result.shape, (4,4,3))
self.assertEqual(np.array(pt_result)[0][0][0], int(mx_result[0][0][0]))
self.assertEqual(np.array(pt_result)[0][0][0], int(tf_result[0][0][0]))
args = {'size':4}
tf_func = TestSameTransfoms.tf_trans['CenterCrop'](**args)
tf_result = tf_func((TestSameTransfoms.img, None))
tf_result = tf_result[0].eval(session=tf.compat.v1.Session())
pt_func = TestSameTransfoms.pt_trans['CenterCrop'](**args)
pt_result = pt_func((TestSameTransfoms.pt_img, None))[0]
mx_func = TestSameTransfoms.mx_trans['CenterCrop'](**args)
mx_result = mx_func((TestSameTransfoms.mx_img, None))
mx_result = mx_result[0].asnumpy()
self.assertEqual(tf_result.shape, (4,4,3))
self.assertEqual(pt_result.size, (4,4))
self.assertEqual(mx_result.shape, (4,4,3))
self.assertEqual(np.array(pt_result)[0][0][0], int(mx_result[0][0][0]))
self.assertEqual(np.array(pt_result)[0][0][0], int(tf_result[0][0][0]))
args = {'size':[4]}
tf_func = TestSameTransfoms.tf_trans['CenterCrop'](**args)
tf_result = tf_func((TestSameTransfoms.img, None))
tf_result = tf_result[0].eval(session=tf.compat.v1.Session())
self.assertEqual(tf_result.shape, (4,4,3))
def testResize(self):
tf_func = TestSameTransfoms.tf_trans['Resize'](**{'size':[4,5]})
tf_result = tf_func((TestSameTransfoms.img, None))
tf_result = tf_result[0].eval(session=tf.compat.v1.Session())
pt_func = TestSameTransfoms.pt_trans['Resize'](**{'size':[4,5]})
pt_result = pt_func((TestSameTransfoms.pt_img, None))[0]
mx_func = TestSameTransfoms.mx_trans['Resize'](**{'size':[4,5]})
mx_result = mx_func((TestSameTransfoms.mx_img, None))
mx_result = mx_result[0].asnumpy()
self.assertEqual(tf_result.shape, (4,5,3))
self.assertEqual(pt_result.size, (5,4))
self.assertEqual(mx_result.shape, (5,4,3))
pt_func = TestSameTransfoms.pt_trans['Resize'](**{'size':[4,4]})
pt_result = pt_func((TestSameTransfoms.pt_img, None))[0]
pt_vision_func = torchvision.transforms.Resize(size=4, interpolation=2)
pt_vision_result = pt_vision_func(TestSameTransfoms.pt_img)
self.assertEqual(np.array(pt_result)[0][1][2], | np.array(pt_vision_result) | numpy.array |
#!/usr/bin/env python3
"""
Read class averages mrc file and save it to jpg.
Automatically remove the edges.
INPUT: mrcs file of 2D class averages
OUTPUT: a dir for the jpg output
The name of the jpg file would be "particlename_diamxxkxx_classnumber.jpg"
"""
import os
import mrcfile
import numpy as np
from PIL import Image
import argparse
import shutil
def setupParserOptions():
ap = argparse.ArgumentParser()
ap.add_argument('-i', '--input',
help="Input mrcs file of 2D class averages.")
ap.add_argument('-n', '--name', default='particle',
help="Name of the particle")
ap.add_argument('-o', '--output', default='2DAssess',
help="Output jpg dir.")
args = vars(ap.parse_args())
return args
def cutbyradius(img):
h = img.shape[0]
w = img.shape[1]
# empty_val = img[0,0] # because the image is already masked (2d class avg), the [0,0] point must be empty
edge_l = 0
for i in range(w):
if np.sum(img[i,:]) > 1e-7 or np.sum(img[:,i]) < -1e-7:
edge_l = i
break
edge_r = 0
for ii in range(w):
if np.sum(img[-ii,:]) > 1e-7 or np.sum(img[:,-ii]) < -1e-7:
edge_r = ii
break
edge_t = 0
for j in range(h):
if np.sum(img[:,j]) > 1e-7 or np.sum(img[:,j]) < -1e-7:
edge_t = j
break
edge_b = 0
for jj in range(h):
if np.sum(img[:,-jj]) > 1e-7 or | np.sum(img[:,-jj]) | numpy.sum |
'''
this is the KPITU (Knee Point Identification Based on Trade-Off Utility) algorithm
'''
import numpy as np
import copy
import math as m
class solution(object):
def __init__(self, m):
self.index = -1
self.objective = np.zeros([1, m])
self.neighbor = []
self.contribution = -1
self.repoints = None
self.left = -1
class reference_point(object):
def __init__(self):
self.direction = None
self.neighbor = []
self.associate = []
def transfer(A, B):
if np.sum(A.objective - B.objective) > 0:
return 1
else:
return 0
def select(A, B):
return np.sum(A.objective - B.objective)
def Associate(p, w):
obj_mat = np.asarray([i.objective for i in p]).T
w_mat = np.asarray([i.direction for i in w])
d_mat = | np.dot(w_mat, obj_mat) | numpy.dot |
import numpy as np
import matplotlib
from matplotlib import pyplot as plt
import os
import placentagen as pg
import csv
D0_list = [150,150,172.8,172.8,170.8,135.8,43,230,255.6,301.6,304.1,108.1,85.7,60.7,235.6,156.5,255.4,164.8,100.8,64.9]
Cpass_list = [1.168,1.168,1.416,1.416,1.386,1.067,0.316,1.697,1.417,1.672,1.857,0.843,0.655,0.371,1.802,1.043,1.719,1.141,0.687,0.459]
Cpassdash_list = [7.24,7.24,7.901,7.901,10.568,10.516,11.247,5.298,5.628,5.324,5.226,24.279,36.785,21.035,6.782,8.293,14.354,13.828,12.606,13.431]
Cact_list = [1.108, 1.103, 1.499, 1.858, 1.514, 1.202, 0.392, 3.995, 2.649, 1.395, 3.748, 1.665, 1.024, 0.654,
0.908, 3.491, 1.564, 1.36, 1.131, 0.405]
D0_list_act = [150,172.8,170.8,135.8,43,156.5,255.4,164.8,100.8,64.9]
Cmyo_list = [7.479,8.871,8.462,7.973,24.934,9.018,4.674,7.508,15.977,22.252]
expt_pressure = np.array([10.,30.,50.,70.,90.]) # defined in mmHg
passive_diameter_preg = np.array([76.258, 122.33566667, 145.152, 137.5625, 144.64166667])
passive_se_preg = np.array([10.8693589, 10.23274183, 13.36969036, 11.7338111, 12.88427201])
passive_diameter = np.array([54.11314286, 74.08128571, 88.831, 89.99828571, 86.769])
passive_se = np.array([3.71311161,5.78277879,9.940847,9.98130157,12.93325597])
active_diameter_preg = np.array([92.70733333,113.74933333,121.8715,107.93166667,101.19983333])
active_se_preg = np.array([8.36576993,6.12886374,15.68328409,15.01816237,19.29603708])
active_diameter = np.array([65.587,74.17528571,79.87185714,83.58714286,80.92285714])
active_se = | np.array([5.52633482,5.86497481,7.06835057,7.71278033,9.02834107]) | numpy.array |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# Deep Recurrent Reinforcement Learning: 1 capa LSTM y 4 capas Dense, Funcion de activacion tanh, 12 episodes, 50 iteraciones
drnnLSTMtanhMakespan0=[799, 798, 799, 799, 805, 806, 799, 805, 805, 800, 798, 798]
drnnLSTMtanhMakespan1=[800, 798, 796, 800, 796, 794, 795, 798, 800, 798, 805, 798]
drnnLSTMtanhMakespan2=[796, 800, 798, 804, 800, 798, 798, 798, 800, 800, 802, 797]
drnnLSTMtanhMakespan3=[805, 800, 800, 803, 794, 802, 800, 798, 799, 804, 799, 806]
drnnLSTMtanhMakespan4=[796, 798, 795, 798, 796, 799, 800, 796, 796, 798, 806, 800]
drnnLSTMtanhMakespan5=[798, 798, 799, 800, 800, 808, 798, 798, 801, 796, 799, 798]
drnnLSTMtanhMakespan6=[800, 796, 805, 798, 798, 796, 799, 800, 803, 800, 798, 800]
drnnLSTMtanhMakespan7=[799, 805, 802, 805, 800, 799, 800, 799, 805, 800, 794, 796]
drnnLSTMtanhMakespan8=[799, 798, 800, 798, 798, 800, 800, 800, 804, 799, 800, 804]
drnnLSTMtanhMakespan9=[795, 800, 795, 796, 798, 796, 797, 800, 797, 798, 796, 795]
drnnLSTMtanhMakespan10=[804, 799, 805, 798, 798, 798, 805, 800, 796, 804, 796, 799]
drnnLSTMtanhMakespan11=[795, 803, 805, 798, 795, 801, 798, 798, 804, 803, 799, 804]
drnnLSTMtanhMakespan12=[798, 798, 799, 800, 798, 798, 799, 799, 801, 796, 799, 798]
drnnLSTMtanhMakespan13=[798, 798, 799, 797, 796, 796, 800, 797, 805, 800, 800, 794]
drnnLSTMtanhMakespan14=[800, 798, 798, 796, 800, 800, 798, 798, 802, 798, 802, 798]
drnnLSTMtanhMakespan15=[796, 796, 800, 801, 800, 800, 796, 794, 796, 800, 796, 798]
drnnLSTMtanhMakespan16=[798, 798, 795, 797, 795, 799, 800, 796, 795, 796, 800, 800]
drnnLSTMtanhMakespan17=[794, 795, 800, 798, 795, 796, 798, 796, 795, 794, 798, 796]
drnnLSTMtanhMakespan18=[797, 795, 794, 794, 800, 796, 796, 795, 798, 795, 798, 794]
drnnLSTMtanhMakespan19=[797, 795, 795, 796, 798, 799, 795, 799, 795, 794, 795, 795]
drnnLSTMtanhMakespan20=[796, 794, 798, 797, 798, 799, 795, 795, 797, 795, 795, 792]
drnnLSTMtanhMakespan21=[797, 795, 797, 793, 794, 794, 800, 794, 798, 795, 797, 795]
drnnLSTMtanhMakespan22=[794, 800, 798, 795, 795, 796, 796, 799, 795, 794, 795, 795]
drnnLSTMtanhMakespan23=[795, 795, 794, 795, 794, 794, 797, 799, 796, 794, 794, 795]
drnnLSTMtanhMakespan24=[798, 795, 795, 795, 792, 794, 795, 794, 794, 795, 795, 795]
drnnLSTMtanhMakespan25=[794, 792, 794, 795, 795, 794, 794, 794, 794, 795, 794, 793]
drnnLSTMtanhMakespan26=[794, 794, 795, 796, 798, 795, 794, 794, 794, 794, 795, 794]
drnnLSTMtanhMakespan27=[795, 794, 795, 795, 795, 794, 794, 794, 794, 794, 795, 795]
drnnLSTMtanhMakespan28=[795, 794, 794, 795, 794, 795, 795, 795, 795, 794, 795, 794]
drnnLSTMtanhMakespan29=[792, 794, 795, 794, 794, 795, 794, 793, 795, 794, 795, 792]
drnnLSTMtanhMakespan30=[795, 794, 795, 795, 794, 794, 794, 795, 794, 794, 794, 794]
drnnLSTMtanhMakespan31=[794, 794, 795, 794, 795, 793, 795, 795, 795, 792, 794, 794]
drnnLSTMtanhMakespan32=[795, 795, 794, 793, 795, 795, 795, 795, 794, 794, 795, 794]
drnnLSTMtanhMakespan33=[793, 794, 795, 793, 792, 795, 794, 794, 794, 794, 794, 795]
drnnLSTMtanhMakespan34=[794, 795, 795, 794, 794, 794, 794, 793, 794, 794, 794, 794]
drnnLSTMtanhMakespan35=[794, 794, 797, 793, 792, 794, 793, 794, 795, 794, 795, 792]
drnnLSTMtanhMakespan36=[794, 794, 793, 794, 795, 797, 795, 795, 794, 795, 793, 794]
drnnLSTMtanhMakespan37=[795, 793, 795, 794, 795, 798, 795, 794, 795, 793, 795, 794]
drnnLSTMtanhMakespan38=[794, 795, 793, 795, 794, 794, 794, 794, 794, 794, 797, 795]
drnnLSTMtanhMakespan39=[794, 794, 795, 794, 795, 795, 794, 795, 794, 795, 798, 797]
drnnLSTMtanhMakespan40=[795, 795, 794, 795, 794, 795, 795, 794, 794, 794, 795, 795]
drnnLSTMtanhMakespan41=[794, 795, 792, 794, 794, 798, 795, 794, 794, 794, 793, 795]
drnnLSTMtanhMakespan42=[793, 795, 794, 793, 794, 794, 792, 794, 795, 794, 794, 793]
drnnLSTMtanhMakespan43=[793, 792, 793, 794, 794, 795, 792, 794, 795, 794, 795, 794]
drnnLSTMtanhMakespan44=[793, 794, 795, 795, 794, 794, 795, 798, 794, 792, 795, 794]
drnnLSTMtanhMakespan45=[795, 794, 794, 794, 794, 792, 794, 795, 794, 796, 795, 794]
drnnLSTMtanhMakespan46=[794, 793, 793, 795, 795, 794, 794, 794, 794, 796, 794, 794]
drnnLSTMtanhMakespan47=[794, 794, 795, 794, 794, 795, 792, 795, 794, 795, 795, 794]
drnnLSTMtanhMakespan48=[794, 795, 794, 794, 794, 792, 794, 795, 796, 794, 794, 795]
drnnLSTMtanhMakespan49=[794, 794, 794, 794, 794, 794, 792, 794, 793, 794, 795, 794]
drnnLSTMtanhRewards0=[-0.1759911894273128, -0.17580964970257765, -0.1759911894273128, -0.1759911894273128, -0.177078750549934, -0.17725973169122497, -0.1759911894273128, -0.177078750549934, -0.177078750549934, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765]
drnnLSTMtanhRewards1=[-0.17617264919621228, -0.17580964970257765, -0.17544633017412387, -0.17617264919621228, -0.17544633017412387, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.17580964970257765, -0.17580964970257765]
drnnLSTMtanhRewards2=[-0.17544633017412387, -0.17617264919621228, -0.17580964970257765, -0.1768976897689769, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17653532907770195, -0.17562802996914942]
drnnLSTMtanhRewards3=[-0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.17671654929577466, -0.17508269018743108, -0.17653532907770195, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.1768976897689769, -0.1759911894273128, -0.17725973169122497]
drnnLSTMtanhRewards4=[-0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765, -0.17544633017412387, -0.1759911894273128, -0.17617264919621228, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17725973169122497, -0.17617264919621228]
drnnLSTMtanhRewards5=[-0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17617264919621228, -0.17617264919621228, -0.1776214552648934, -0.17580964970257765, -0.17580964970257765, -0.1763540290620872, -0.17544633017412387, -0.1759911894273128, -0.17580964970257765]
drnnLSTMtanhRewards6=[-0.17617264919621228, -0.17544633017412387, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.1759911894273128, -0.17617264919621228, -0.17671654929577466, -0.17617264919621228, -0.17580964970257765, -0.17617264919621228]
drnnLSTMtanhRewards7=[-0.1759911894273128, -0.177078750549934, -0.17653532907770195, -0.177078750549934, -0.17617264919621228, -0.1759911894273128, -0.17617264919621228, -0.1759911894273128, -0.177078750549934, -0.17617264919621228, -0.17508269018743108, -0.17544633017412387]
drnnLSTMtanhRewards8=[-0.1759911894273128, -0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.1768976897689769, -0.1759911894273128, -0.17617264919621228, -0.1768976897689769]
drnnLSTMtanhRewards9=[-0.17526455026455026, -0.17617264919621228, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17544633017412387, -0.17562802996914942, -0.17617264919621228, -0.17562802996914942, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026]
drnnLSTMtanhRewards10=[-0.1768976897689769, -0.1759911894273128, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17617264919621228, -0.17544633017412387, -0.1768976897689769, -0.17544633017412387, -0.1759911894273128]
drnnLSTMtanhRewards11=[-0.17526455026455026, -0.17671654929577466, -0.177078750549934, -0.17580964970257765, -0.17526455026455026, -0.1763540290620872, -0.17580964970257765, -0.17580964970257765, -0.1768976897689769, -0.17671654929577466, -0.1759911894273128, -0.1768976897689769]
drnnLSTMtanhRewards12=[-0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.1759911894273128, -0.1763540290620872, -0.17544633017412387, -0.1759911894273128, -0.17580964970257765]
drnnLSTMtanhRewards13=[-0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17562802996914942, -0.17544633017412387, -0.17544633017412387, -0.17617264919621228, -0.17562802996914942, -0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.17508269018743108]
drnnLSTMtanhRewards14=[-0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17653532907770195, -0.17580964970257765, -0.17653532907770195, -0.17580964970257765]
drnnLSTMtanhRewards15=[-0.17544633017412387, -0.17544633017412387, -0.17617264919621228, -0.1763540290620872, -0.17617264919621228, -0.17617264919621228, -0.17544633017412387, -0.17508269018743108, -0.17544633017412387, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765]
drnnLSTMtanhRewards16=[-0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.1759911894273128, -0.17617264919621228, -0.17544633017412387, -0.17544633017412387, -0.17526455026455026, -0.17617264919621228, -0.17617264919621228]
drnnLSTMtanhRewards17=[-0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17544633017412387]
drnnLSTMtanhRewards18=[-0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17617264919621228, -0.17544633017412387, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108]
drnnLSTMtanhRewards19=[-0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.1759911894273128, -0.17526455026455026, -0.1759911894273128, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drnnLSTMtanhRewards20=[-0.17544633017412387, -0.17508269018743108, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.1759911894273128, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17471872931833224]
drnnLSTMtanhRewards21=[-0.17562802996914942, -0.17526455026455026, -0.17562802996914942, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17617264919621228, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026]
drnnLSTMtanhRewards22=[-0.17508269018743108, -0.17617264919621228, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17544633017412387, -0.1759911894273128, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drnnLSTMtanhRewards23=[-0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942, -0.1759911894273128, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026]
drnnLSTMtanhRewards24=[-0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026]
drnnLSTMtanhRewards25=[-0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221]
drnnLSTMtanhRewards26=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards27=[-0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drnnLSTMtanhRewards28=[-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards29=[-0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224]
drnnLSTMtanhRewards30=[-0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drnnLSTMtanhRewards31=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108]
drnnLSTMtanhRewards32=[-0.17526455026455026, -0.17526455026455026, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards33=[-0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026]
drnnLSTMtanhRewards34=[-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drnnLSTMtanhRewards35=[-0.17508269018743108, -0.17508269018743108, -0.17562802996914942, -0.17471872931833224, -0.1749007498897221, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224]
drnnLSTMtanhRewards36=[-0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17508269018743108]
drnnLSTMtanhRewards37=[-0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards38=[-0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026]
drnnLSTMtanhRewards39=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17562802996914942]
drnnLSTMtanhRewards40=[-0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drnnLSTMtanhRewards41=[-0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026]
drnnLSTMtanhRewards42=[-0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221]
drnnLSTMtanhRewards43=[-0.1749007498897221, -0.17471872931833224, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards44=[-0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards45=[-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards46=[-0.17508269018743108, -0.1749007498897221, -0.1749007498897221, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108]
drnnLSTMtanhRewards47=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards48=[-0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026]
drnnLSTMtanhRewards49=[-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
# Deep Recurrent Reinforcement Learning: 1 capa LSTM y 4 capas Dense, Funcion de activacion relu, 12 episodes, 50 iteraciones
drnnLSTMreluMakespan0=[805, 800, 800, 800, 794, 800, 798, 809, 795, 800, 798, 798]
drnnLSTMreluMakespan1=[798, 798, 796, 799, 800, 796, 796, 798, 798, 794, 798, 800]
drnnLSTMreluMakespan2=[805, 805, 798, 799, 806, 799, 806, 799, 800, 798, 805, 795]
drnnLSTMreluMakespan3=[800, 800, 800, 796, 800, 800, 799, 806, 808, 798, 797, 798]
drnnLSTMreluMakespan4=[805, 805, 795, 796, 799, 804, 798, 794, 798, 794, 796, 810]
drnnLSTMreluMakespan5=[798, 798, 798, 795, 800, 798, 796, 802, 800, 800, 805, 801]
drnnLSTMreluMakespan6=[800, 798, 798, 795, 800, 796, 800, 798, 799, 796, 805, 800]
drnnLSTMreluMakespan7=[800, 800, 800, 799, 798, 798, 800, 805, 800, 799, 800, 801]
drnnLSTMreluMakespan8=[799, 800, 800, 799, 795, 795, 805, 795, 798, 800, 798, 800]
drnnLSTMreluMakespan9=[800, 796, 805, 798, 798, 795, 805, 800, 799, 795, 800, 805]
drnnLSTMreluMakespan10=[805, 798, 805, 800, 801, 805, 799, 805, 798, 800, 800, 798]
drnnLSTMreluMakespan11=[798, 803, 800, 797, 795, 796, 794, 799, 800, 800, 800, 796]
drnnLSTMreluMakespan12=[799, 798, 799, 795, 798, 795, 798, 798, 798, 795, 798, 798]
drnnLSTMreluMakespan13=[798, 798, 799, 796, 798, 796, 800, 799, 796, 794, 796, 795]
drnnLSTMreluMakespan14=[796, 798, 806, 799, 804, 798, 805, 798, 800, 805, 794, 800]
drnnLSTMreluMakespan15=[806, 795, 800, 796, 798, 796, 810, 798, 799, 798, 800, 800]
drnnLSTMreluMakespan16=[799, 796, 798, 798, 798, 800, 798, 810, 796, 805, 800, 795]
drnnLSTMreluMakespan17=[798, 798, 798, 794, 798, 805, 801, 798, 800, 799, 798, 798]
drnnLSTMreluMakespan18=[795, 800, 794, 798, 797, 798, 794, 800, 797, 796, 794, 794]
drnnLSTMreluMakespan19=[798, 802, 794, 798, 799, 795, 797, 795, 800, 796, 797, 796]
drnnLSTMreluMakespan20=[794, 797, 795, 794, 799, 795, 795, 795, 800, 797, 794, 798]
drnnLSTMreluMakespan21=[799, 798, 796, 795, 794, 798, 795, 795, 798, 798, 795, 794]
drnnLSTMreluMakespan22=[794, 794, 795, 797, 795, 795, 795, 792, 794, 795, 794, 794]
drnnLSTMreluMakespan23=[794, 794, 794, 794, 795, 796, 793, 794, 795, 794, 797, 795]
drnnLSTMreluMakespan24=[794, 792, 792, 794, 796, 792, 794, 795, 794, 792, 796, 795]
drnnLSTMreluMakespan25=[794, 795, 795, 794, 794, 792, 795, 792, 795, 794, 794, 794]
drnnLSTMreluMakespan26=[795, 794, 794, 795, 794, 794, 793, 794, 797, 795, 794, 795]
drnnLSTMreluMakespan27=[794, 794, 795, 796, 795, 797, 794, 794, 795, 801, 794, 795]
drnnLSTMreluMakespan28=[795, 795, 795, 795, 794, 792, 794, 797, 794, 795, 795, 795]
drnnLSTMreluMakespan29=[794, 792, 798, 794, 797, 795, 793, 795, 795, 794, 795, 795]
drnnLSTMreluMakespan30=[795, 794, 798, 794, 794, 795, 792, 796, 794, 796, 794, 794]
drnnLSTMreluMakespan31=[794, 795, 795, 794, 795, 794, 795, 795, 794, 794, 795, 795]
drnnLSTMreluMakespan32=[798, 794, 794, 794, 798, 792, 795, 795, 795, 796, 794, 795]
drnnLSTMreluMakespan33=[794, 796, 794, 794, 794, 795, 794, 794, 797, 793, 793, 795]
drnnLSTMreluMakespan34=[794, 794, 795, 794, 794, 793, 794, 795, 793, 795, 795, 794]
drnnLSTMreluMakespan35=[798, 796, 795, 794, 795, 795, 795, 795, 794, 795, 797, 795]
drnnLSTMreluMakespan36=[794, 796, 794, 794, 794, 794, 795, 795, 797, 796, 795, 795]
drnnLSTMreluMakespan37=[795, 794, 796, 795, 795, 795, 795, 794, 792, 797, 794, 793]
drnnLSTMreluMakespan38=[794, 798, 794, 792, 794, 792, 795, 797, 793, 794, 794, 797]
drnnLSTMreluMakespan39=[792, 794, 794, 794, 792, 795, 795, 795, 794, 794, 795, 794]
drnnLSTMreluMakespan40=[792, 795, 795, 792, 795, 795, 794, 795, 794, 795, 794, 795]
drnnLSTMreluMakespan41=[794, 797, 795, 794, 795, 795, 798, 794, 795, 796, 796, 794]
drnnLSTMreluMakespan42=[794, 795, 795, 795, 794, 795, 795, 794, 794, 795, 793, 795]
drnnLSTMreluMakespan43=[795, 794, 795, 794, 795, 795, 792, 794, 794, 795, 794, 795]
drnnLSTMreluMakespan44=[795, 794, 792, 795, 794, 794, 795, 794, 796, 795, 796, 794]
drnnLSTMreluMakespan45=[795, 794, 793, 794, 793, 795, 794, 794, 795, 794, 795, 794]
drnnLSTMreluMakespan46=[794, 796, 793, 794, 794, 795, 799, 795, 794, 794, 794, 794]
drnnLSTMreluMakespan47=[794, 794, 794, 794, 795, 793, 795, 795, 794, 795, 795, 795]
drnnLSTMreluMakespan48=[794, 794, 795, 794, 795, 795, 795, 794, 794, 795, 795, 794]
drnnLSTMreluMakespan49=[795, 795, 795, 794, 795, 795, 794, 795, 793, 793, 792, 792]
drnnLSTMreluRewards0=[-0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.17508269018743108, -0.17617264919621228, -0.17580964970257765, -0.1778021978021978, -0.17526455026455026, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765]
drnnLSTMreluRewards1=[-0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.1759911894273128, -0.17617264919621228, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17508269018743108, -0.17580964970257765, -0.17617264919621228]
drnnLSTMreluRewards2=[-0.177078750549934, -0.177078750549934, -0.17580964970257765, -0.1759911894273128, -0.17725973169122497, -0.1759911894273128, -0.17725973169122497, -0.1759911894273128, -0.17617264919621228, -0.17580964970257765, -0.177078750549934, -0.17526455026455026]
drnnLSTMreluRewards3=[-0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.17544633017412387, -0.17617264919621228, -0.17617264919621228, -0.1759911894273128, -0.17725973169122497, -0.1776214552648934, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765]
drnnLSTMreluRewards4=[-0.177078750549934, -0.177078750549934, -0.17526455026455026, -0.17544633017412387, -0.1759911894273128, -0.1768976897689769, -0.17580964970257765, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17544633017412387, -0.17798286090969018]
drnnLSTMreluRewards5=[-0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17617264919621228, -0.17580964970257765, -0.17544633017412387, -0.17653532907770195, -0.17617264919621228, -0.17617264919621228, -0.177078750549934, -0.1763540290620872]
drnnLSTMreluRewards6=[-0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.17544633017412387, -0.177078750549934, -0.17617264919621228]
drnnLSTMreluRewards7=[-0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.17617264919621228, -0.1759911894273128, -0.17617264919621228, -0.1763540290620872]
drnnLSTMreluRewards8=[-0.1759911894273128, -0.17617264919621228, -0.17617264919621228, -0.1759911894273128, -0.17526455026455026, -0.17526455026455026, -0.177078750549934, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.17617264919621228]
drnnLSTMreluRewards9=[-0.17617264919621228, -0.17544633017412387, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17526455026455026, -0.17617264919621228, -0.1759911894273128, -0.17526455026455026, -0.17617264919621228, -0.177078750549934]
drnnLSTMreluRewards10=[-0.177078750549934, -0.17580964970257765, -0.177078750549934, -0.17617264919621228, -0.1763540290620872, -0.1759911894273128, -0.177078750549934, -0.177078750549934, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765]
drnnLSTMreluRewards11=[-0.17580964970257765, -0.17671654929577466, -0.17617264919621228, -0.17562802996914942, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.1759911894273128, -0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.17544633017412387]
drnnLSTMreluRewards12=[-0.1759911894273128, -0.17580964970257765, -0.1759911894273128, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17580964970257765]
drnnLSTMreluRewards13=[-0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17544633017412387, -0.17580964970257765, -0.17544633017412387, -0.17617264919621228, -0.1759911894273128, -0.17544633017412387, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026]
drnnLSTMreluRewards14=[-0.17544633017412387, -0.17580964970257765, -0.17725973169122497, -0.1759911894273128, -0.1768976897689769, -0.17580964970257765, -0.177078750549934, -0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.17508269018743108, -0.17617264919621228]
drnnLSTMreluRewards15=[-0.17725973169122497, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765, -0.17544633017412387, -0.17798286090969018, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228]
drnnLSTMreluRewards16=[-0.1759911894273128, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.17798286090969018, -0.17544633017412387, -0.177078750549934, -0.17617264919621228, -0.17526455026455026]
drnnLSTMreluRewards17=[-0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17508269018743108, -0.17580964970257765, -0.177078750549934, -0.1763540290620872, -0.17580964970257765, -0.17617264919621228, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765]
drnnLSTMreluRewards18=[-0.17526455026455026, -0.17617264919621228, -0.17508269018743108, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.17508269018743108, -0.17617264919621228, -0.17562802996914942, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108]
drnnLSTMreluRewards19=[-0.17580964970257765, -0.17653532907770195, -0.17508269018743108, -0.17580964970257765, -0.1759911894273128, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.17562802996914942, -0.17544633017412387]
drnnLSTMreluRewards20=[-0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.1759911894273128, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17617264919621228, -0.17562802996914942, -0.17508269018743108, -0.17580964970257765]
drnnLSTMreluRewards21=[-0.1759911894273128, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108]
drnnLSTMreluRewards22=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108]
drnnLSTMreluRewards23=[-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026]
drnnLSTMreluRewards24=[-0.17508269018743108, -0.17471872931833224, -0.17471872931833224, -0.17508269018743108, -0.17544633017412387, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17544633017412387, -0.17526455026455026]
drnnLSTMreluRewards25=[-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drnnLSTMreluRewards26=[-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026]
drnnLSTMreluRewards27=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.1763540290620872, -0.17508269018743108, -0.17526455026455026]
drnnLSTMreluRewards28=[-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17562802996914942, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026]
drnnLSTMreluRewards29=[-0.17508269018743108, -0.17471872931833224, -0.17580964970257765, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drnnLSTMreluRewards30=[-0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17544633017412387, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108]
drnnLSTMreluRewards31=[-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drnnLSTMreluRewards32=[-0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17471872931833224, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.17526455026455026]
drnnLSTMreluRewards33=[-0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942, -0.1749007498897221, -0.1749007498897221, -0.17526455026455026]
drnnLSTMreluRewards34=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108]
drnnLSTMreluRewards35=[-0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026]
drnnLSTMreluRewards36=[-0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026]
drnnLSTMreluRewards37=[-0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17562802996914942, -0.17508269018743108, -0.1749007498897221]
drnnLSTMreluRewards38=[-0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.17562802996914942, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942]
drnnLSTMreluRewards39=[-0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drnnLSTMreluRewards40=[-0.17471872931833224, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026]
drnnLSTMreluRewards41=[-0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17544633017412387, -0.17508269018743108]
drnnLSTMreluRewards42=[-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026]
drnnLSTMreluRewards43=[-0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026]
drnnLSTMreluRewards44=[-0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108]
drnnLSTMreluRewards45=[-0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drnnLSTMreluRewards46=[-0.17508269018743108, -0.17544633017412387, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.1759911894273128, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drnnLSTMreluRewards47=[-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026]
drnnLSTMreluRewards48=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108]
drnnLSTMreluRewards49=[-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.1749007498897221, -0.17471872931833224, -0.17471872931833224]
# Deep Recurrent Reinforcement Learning: 1 capa GRU y 4 capas Dense, Funcion de activacion tanh, 12 episodes, 50 iteraciones
drnnGRUtanhMakespan0 = [798, 799, 798, 804, 805, 799, 801, 801, 801, 799, 798, 796]
drnnGRUtanhMakespan1 = [800, 798, 798, 798, 798, 798, 801, 798, 795, 796, 800, 796]
drnnGRUtanhMakespan2 = [795, 804, 805, 800, 800, 796, 804, 800, 795, 798, 798, 801]
drnnGRUtanhMakespan3 = [806, 796, 794, 797, 798, 800, 800, 808, 805, 798, 800, 809]
drnnGRUtanhMakespan4 = [805, 801, 795, 798, 798, 800, 796, 796, 805, 798, 799, 798]
drnnGRUtanhMakespan5 = [804, 799, 798, 804, 796, 799, 798, 805, 796, 805, 798, 800]
drnnGRUtanhMakespan6 = [800, 799, 794, 801, 799, 796, 800, 804, 797, 796, 800, 798]
drnnGRUtanhMakespan7 = [798, 800, 810, 810, 805, 800, 795, 798, 800, 805, 799, 800]
drnnGRUtanhMakespan8 = [798, 797, 800, 800, 804, 805, 798, 798, 801, 795, 798, 809]
drnnGRUtanhMakespan9 = [803, 800, 800, 805, 805, 798, 804, 803, 805, 801, 810, 801]
drnnGRUtanhMakespan10 = [798, 799, 798, 798, 805, 804, 805, 798, 799, 798, 800, 800]
drnnGRUtanhMakespan11 = [796, 795, 805, 800, 800, 798, 795, 804, 805, 798, 800, 800]
drnnGRUtanhMakespan12 = [799, 799, 809, 800, 799, 799, 797, 805, 799, 800, 798, 795]
drnnGRUtanhMakespan13 = [805, 800, 800, 805, 800, 799, 798, 801, 798, 797, 805, 800]
drnnGRUtanhMakespan14 = [800, 798, 800, 800, 800, 804, 804, 799, 799, 800, 798, 798]
drnnGRUtanhMakespan15 = [805, 800, 795, 800, 804, 795, 800, 798, 799, 798, 800, 796]
drnnGRUtanhMakespan16 = [806, 795, 801, 799, 799, 796, 796, 794, 802, 796, 800, 802]
drnnGRUtanhMakespan17 = [796, 800, 798, 800, 794, 800, 804, 805, 798, 810, 800, 798]
drnnGRUtanhMakespan18 = [798, 800, 794, 794, 797, 798, 800, 805, 798, 798, 804, 798]
drnnGRUtanhMakespan19 = [796, 800, 806, 799, 796, 800, 798, 805, 798, 799, 797, 805]
drnnGRUtanhMakespan20 = [805, 800, 799, 796, 805, 805, 805, 794, 809, 796, 800, 797]
drnnGRUtanhMakespan21 = [798, 800, 800, 800, 798, 801, 796, 801, 801, 801, 795, 799]
drnnGRUtanhMakespan22 = [798, 801, 797, 800, 799, 795, 799, 799, 800, 801, 800, 799]
drnnGRUtanhMakespan23 = [800, 798, 799, 805, 794, 800, 798, 796, 796, 804, 800, 794]
drnnGRUtanhMakespan24 = [800, 800, 798, 805, 804, 799, 798, 801, 800, 798, 798, 798]
drnnGRUtanhMakespan25 = [798, 798, 798, 795, 800, 803, 798, 798, 800, 799, 796, 798]
drnnGRUtanhMakespan26 = [796, 798, 798, 798, 805, 796, 798, 798, 805, 795, 801, 796]
drnnGRUtanhMakespan27 = [794, 796, 796, 800, 800, 798, 800, 798, 802, 798, 797, 798]
drnnGRUtanhMakespan28 = [799, 799, 800, 800, 798, 802, 799, 798, 795, 795, 794, 798]
drnnGRUtanhMakespan29 = [798, 796, 796, 797, 796, 798, 800, 800, 796, 798, 800, 795]
drnnGRUtanhMakespan30 = [799, 798, 795, 795, 800, 795, 798, 798, 799, 798, 805, 799]
drnnGRUtanhMakespan31 = [795, 799, 794, 794, 796, 795, 795, 794, 798, 797, 798, 795]
drnnGRUtanhMakespan32 = [797, 798, 795, 796, 798, 795, 797, 798, 795, 794, 795, 796]
drnnGRUtanhMakespan33 = [799, 795, 794, 794, 798, 795, 798, 797, 800, 796, 795, 794]
drnnGRUtanhMakespan34 = [798, 795, 798, 796, 798, 794, 796, 798, 798, 798, 796, 797]
drnnGRUtanhMakespan35 = [795, 798, 796, 798, 794, 801, 795, 800, 795, 800, 794, 800]
drnnGRUtanhMakespan36 = [798, 799, 796, 797, 795, 794, 800, 795, 795, 794, 795, 795]
drnnGRUtanhMakespan37 = [799, 798, 795, 795, 794, 795, 795, 796, 805, 795, 798, 796]
drnnGRUtanhMakespan38 = [798, 794, 795, 795, 795, 796, 795, 796, 800, 798, 797, 796]
drnnGRUtanhMakespan39 = [794, 795, 795, 797, 795, 795, 794, 794, 798, 795, 794, 798]
drnnGRUtanhMakespan40 = [795, 795, 795, 795, 795, 795, 794, 794, 793, 797, 794, 795]
drnnGRUtanhMakespan41 = [794, 794, 795, 793, 795, 795, 792, 794, 795, 794, 794, 794]
drnnGRUtanhMakespan42 = [795, 795, 795, 796, 794, 797, 795, 795, 792, 795, 796, 793]
drnnGRUtanhMakespan43 = [794, 795, 795, 794, 795, 794, 798, 794, 797, 795, 794, 794]
drnnGRUtanhMakespan44 = [795, 795, 793, 794, 795, 794, 795, 795, 794, 794, 795, 794]
drnnGRUtanhMakespan45 = [794, 794, 794, 794, 794, 794, 795, 794, 794, 794, 796, 795]
drnnGRUtanhMakespan46 = [795, 794, 795, 794, 794, 794, 793, 794, 795, 795, 794, 797]
drnnGRUtanhMakespan47 = [794, 794, 794, 794, 795, 794, 795, 792, 794, 795, 794, 794]
drnnGRUtanhMakespan48 = [795, 794, 794, 794, 795, 798, 794, 794, 794, 795, 794, 794]
drnnGRUtanhMakespan49 = [795, 795, 794, 795, 793, 795, 796, 794, 795, 794, 794, 797]
drnnGRUtanhRewards0 = [-0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.1768976897689769, -0.177078750549934, -0.1759911894273128, -0.1763540290620872, -0.1763540290620872, -0.1763540290620872, -0.1759911894273128, -0.17580964970257765, -0.17544633017412387]
drnnGRUtanhRewards1 = [-0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.1763540290620872, -0.17580964970257765, -0.17526455026455026, -0.17544633017412387, -0.17617264919621228, -0.17544633017412387]
drnnGRUtanhRewards2 = [-0.17526455026455026, -0.1768976897689769, -0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.17544633017412387, -0.1768976897689769, -0.17617264919621228, -0.17526455026455026, -0.17580964970257765, -0.17580964970257765, -0.1763540290620872]
drnnGRUtanhRewards3 = [-0.17725973169122497, -0.17544633017412387, -0.17508269018743108, -0.17562802996914942, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.1776214552648934, -0.177078750549934, -0.17580964970257765, -0.17617264919621228, -0.1778021978021978]
drnnGRUtanhRewards4 = [-0.177078750549934, -0.1763540290620872, -0.17526455026455026, -0.17580964970257765, -0.17580964970257765, -0.17617264919621228, -0.17544633017412387, -0.17544633017412387, -0.177078750549934, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765]
drnnGRUtanhRewards5 = [-0.1768976897689769, -0.1759911894273128, -0.17580964970257765, -0.1768976897689769, -0.17544633017412387, -0.1759911894273128, -0.17580964970257765, -0.177078750549934, -0.17544633017412387, -0.177078750549934, -0.17580964970257765, -0.17617264919621228]
drnnGRUtanhRewards6 = [-0.17617264919621228, -0.1759911894273128, -0.17508269018743108, -0.1763540290620872, -0.1759911894273128, -0.17544633017412387, -0.17617264919621228, -0.1768976897689769, -0.17562802996914942, -0.17544633017412387, -0.17617264919621228, -0.17580964970257765]
drnnGRUtanhRewards7 = [-0.17580964970257765, -0.17617264919621228, -0.17798286090969018, -0.177078750549934, -0.17798286090969018, -0.17617264919621228, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.1759911894273128, -0.17617264919621228]
drnnGRUtanhRewards8 = [-0.17580964970257765, -0.17562802996914942, -0.17617264919621228, -0.17617264919621228, -0.1768976897689769, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.1763540290620872, -0.17580964970257765, -0.1778021978021978]
drnnGRUtanhRewards9 = [-0.17671654929577466, -0.17617264919621228, -0.17617264919621228, -0.177078750549934, -0.177078750549934, -0.17580964970257765, -0.1768976897689769, -0.17671654929577466, -0.177078750549934, -0.1763540290620872, -0.17798286090969018, -0.1763540290620872]
drnnGRUtanhRewards10 = [-0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.1768976897689769, -0.177078750549934, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228]
drnnGRUtanhRewards11 = [-0.17544633017412387, -0.17526455026455026, -0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17526455026455026, -0.1768976897689769, -0.177078750549934, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228]
drnnGRUtanhRewards12 = [-0.1759911894273128, -0.1759911894273128, -0.1778021978021978, -0.17617264919621228, -0.1759911894273128, -0.1759911894273128, -0.17562802996914942, -0.177078750549934, -0.1759911894273128, -0.17617264919621228, -0.17580964970257765, -0.17526455026455026]
drnnGRUtanhRewards13 = [-0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.177078750549934, -0.17617264919621228, -0.1759911894273128, -0.17580964970257765, -0.1763540290620872, -0.17580964970257765, -0.17562802996914942, -0.177078750549934, -0.17617264919621228]
drnnGRUtanhRewards14 = [-0.17617264919621228, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.1768976897689769, -0.1768976897689769, -0.1759911894273128, -0.1759911894273128, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765]
drnnGRUtanhRewards15 = [-0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.17526455026455026, -0.1768976897689769, -0.17526455026455026, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17617264919621228, -0.17544633017412387]
drnnGRUtanhRewards16 = [-0.17725973169122497, -0.17526455026455026, -0.1763540290620872, -0.1759911894273128, -0.1759911894273128, -0.17544633017412387, -0.17544633017412387, -0.17508269018743108, -0.17653532907770195, -0.17544633017412387, -0.17617264919621228, -0.17653532907770195]
drnnGRUtanhRewards17 = [-0.17544633017412387, -0.17617264919621228, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17508269018743108, -0.1768976897689769, -0.177078750549934, -0.17580964970257765, -0.17798286090969018, -0.17617264919621228, -0.17580964970257765]
drnnGRUtanhRewards18 = [-0.17580964970257765, -0.17617264919621228, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942, -0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.1768976897689769, -0.17580964970257765]
drnnGRUtanhRewards19 = [-0.17544633017412387, -0.17617264919621228, -0.17725973169122497, -0.1759911894273128, -0.17544633017412387, -0.17617264919621228, -0.17580964970257765, -0.177078750549934, -0.17580964970257765, -0.17562802996914942, -0.1759911894273128, -0.177078750549934]
drnnGRUtanhRewards20 = [-0.17617264919621228, -0.177078750549934, -0.1759911894273128, -0.17544633017412387, -0.177078750549934, -0.177078750549934, -0.177078750549934, -0.17508269018743108, -0.1778021978021978, -0.17544633017412387, -0.17617264919621228, -0.17562802996914942]
drnnGRUtanhRewards21 = [-0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.1763540290620872, -0.17544633017412387, -0.1763540290620872, -0.1763540290620872, -0.1763540290620872, -0.17526455026455026, -0.1759911894273128]
drnnGRUtanhRewards22 = [-0.17580964970257765, -0.1763540290620872, -0.17562802996914942, -0.17617264919621228, -0.1759911894273128, -0.17526455026455026, -0.1759911894273128, -0.1759911894273128, -0.17617264919621228, -0.1763540290620872, -0.17617264919621228, -0.1759911894273128]
drnnGRUtanhRewards23 = [-0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.177078750549934, -0.17508269018743108, -0.17617264919621228, -0.17580964970257765, -0.17544633017412387, -0.17544633017412387, -0.1768976897689769, -0.17617264919621228, -0.17508269018743108]
drnnGRUtanhRewards24 = [-0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.177078750549934, -0.1768976897689769, -0.17580964970257765, -0.1763540290620872, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765]
drnnGRUtanhRewards25 = [-0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17617264919621228, -0.17580964970257765, -0.17671654929577466, -0.17580964970257765, -0.17617264919621228, -0.1759911894273128, -0.17544633017412387, -0.17580964970257765]
drnnGRUtanhRewards26 = [-0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17526455026455026, -0.1763540290620872, -0.17544633017412387]
drnnGRUtanhRewards27 = [-0.17508269018743108, -0.17544633017412387, -0.17544633017412387, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.17653532907770195, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765]
drnnGRUtanhRewards28 = [-0.1759911894273128, -0.1759911894273128, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17653532907770195, -0.17580964970257765, -0.1759911894273128, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765]
drnnGRUtanhRewards29 = [-0.17580964970257765, -0.17544633017412387, -0.17544633017412387, -0.17562802996914942, -0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.17526455026455026]
drnnGRUtanhRewards30 = [-0.1759911894273128, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17617264919621228, -0.17526455026455026, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.1759911894273128]
drnnGRUtanhRewards31 = [-0.17526455026455026, -0.1759911894273128, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.17526455026455026]
drnnGRUtanhRewards32 = [-0.17562802996914942, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026]
drnnGRUtanhRewards33 = [-0.1759911894273128, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17580964970257765, -0.17562802996914942, -0.17617264919621228, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108]
drnnGRUtanhRewards34 = [-0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17544633017412387, -0.17508269018743108, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17562802996914942]
drnnGRUtanhRewards35 = [-0.17526455026455026, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17508269018743108, -0.1763540290620872, -0.17526455026455026, -0.17617264919621228, -0.17526455026455026, -0.17617264919621228, -0.17508269018743108, -0.17617264919621228]
drnnGRUtanhRewards36 = [-0.17580964970257765, -0.1759911894273128, -0.17544633017412387, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17617264919621228, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drnnGRUtanhRewards37 = [-0.1759911894273128, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.177078750549934, -0.17526455026455026, -0.17580964970257765, -0.17544633017412387]
drnnGRUtanhRewards38 = [-0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17617264919621228, -0.17580964970257765, -0.17562802996914942, -0.17544633017412387]
drnnGRUtanhRewards39 = [-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765]
drnnGRUtanhRewards40 = [-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17562802996914942, -0.17508269018743108, -0.17526455026455026]
drnnGRUtanhRewards41 = [-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drnnGRUtanhRewards42 = [-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17544633017412387, -0.1749007498897221]
drnnGRUtanhRewards43 = [-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108]
drnnGRUtanhRewards44 = [-0.17526455026455026, -0.17526455026455026, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drnnGRUtanhRewards45 = [-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026]
drnnGRUtanhRewards46 = [-0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17562802996914942]
drnnGRUtanhRewards47 = [-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108]
drnnGRUtanhRewards48 = [-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108]
drnnGRUtanhRewards49 = [-0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942]
# Deep Recurrent Reinforcement Learning: 1 capa GRU y 4 capas Dense, Funcion de activacion relu, 12 episodes, 50 iteraciones
drnnGRUreluMakespan0 = [800, 799, 798, 797, 798, 800, 800, 796, 800, 794, 800, 800]
drnnGRUreluMakespan1 = [798, 800, 805, 795, 799, 808, 795, 800, 796, 798, 799, 798]
drnnGRUreluMakespan2 = [799, 800, 806, 800, 800, 805, 805, 798, 799, 807, 800, 800]
drnnGRUreluMakespan3 = [798, 795, 799, 800, 800, 796, 798, 800, 800, 804, 805, 800]
drnnGRUreluMakespan4 = [811, 800, 799, 800, 805, 798, 798, 799, 796, 804, 805, 804]
drnnGRUreluMakespan5 = [799, 795, 797, 800, 798, 800, 800, 798, 800, 797, 800, 798]
drnnGRUreluMakespan6 = [798, 800, 798, 799, 797, 798, 800, 796, 801, 799, 795, 798]
drnnGRUreluMakespan7 = [800, 804, 795, 801, 796, 806, 805, 798, 800, 799, 799, 804]
drnnGRUreluMakespan8 = [800, 799, 799, 800, 805, 796, 800, 800, 810, 796, 800, 798]
drnnGRUreluMakespan9 = [794, 800, 799, 805, 800, 800, 798, 798, 796, 795, 798, 796]
drnnGRUreluMakespan10 = [798, 800, 798, 801, 795, 802, 796, 809, 800, 800, 798, 795]
drnnGRUreluMakespan11 = [804, 800, 799, 799, 798, 803, 798, 798, 805, 803, 800, 796]
drnnGRUreluMakespan12 = [800, 799, 805, 797, 798, 796, 799, 794, 799, 805, 799, 800]
drnnGRUreluMakespan13 = [796, 800, 798, 800, 795, 799, 800, 804, 800, 794, 805, 805]
drnnGRUreluMakespan14 = [800, 795, 796, 798, 798, 801, 805, 794, 800, 801, 801, 796]
drnnGRUreluMakespan15 = [798, 800, 796, 796, 798, 794, 797, 800, 796, 801, 795, 799]
drnnGRUreluMakespan16 = [800, 805, 794, 800, 799, 800, 805, 801, 798, 800, 801, 799]
drnnGRUreluMakespan17 = [797, 803, 801, 808, 794, 799, 799, 800, 805, 796, 801, 796]
drnnGRUreluMakespan18 = [805, 800, 800, 804, 799, 798, 800, 799, 804, 796, 800, 804]
drnnGRUreluMakespan19 = [804, 798, 800, 799, 799, 799, 805, 795, 801, 799, 799, 805]
drnnGRUreluMakespan20 = [799, 804, 796, 798, 796, 798, 800, 805, 799, 810, 800, 800]
drnnGRUreluMakespan21 = [798, 799, 799, 805, 798, 798, 805, 798, 794, 799, 798, 798]
drnnGRUreluMakespan22 = [799, 798, 798, 796, 798, 805, 799, 798, 798, 799, 796, 798]
drnnGRUreluMakespan23 = [798, 805, 808, 798, 798, 805, 810, 796, 804, 799, 800, 799]
drnnGRUreluMakespan24 = [798, 796, 798, 795, 800, 798, 799, 798, 797, 805, 798, 800]
drnnGRUreluMakespan25 = [799, 796, 799, 798, 805, 798, 798, 800, 796, 794, 810, 798]
drnnGRUreluMakespan26 = [799, 798, 805, 800, 802, 798, 799, 799, 799, 794, 802, 797]
drnnGRUreluMakespan27 = [798, 800, 805, 796, 798, 795, 802, 796, 798, 800, 798, 794]
drnnGRUreluMakespan28 = [796, 805, 798, 800, 800, 798, 810, 798, 798, 798, 796, 796]
drnnGRUreluMakespan29 = [800, 798, 798, 802, 794, 798, 796, 808, 800, 800, 798, 799]
drnnGRUreluMakespan30 = [798, 796, 798, 798, 794, 798, 794, 800, 796, 794, 800, 800]
drnnGRUreluMakespan31 = [794, 802, 797, 799, 798, 800, 799, 799, 796, 796, 798, 798]
drnnGRUreluMakespan32 = [799, 798, 794, 795, 798, 805, 804, 797, 795, 800, 796, 798]
drnnGRUreluMakespan33 = [803, 799, 805, 796, 794, 798, 797, 798, 798, 794, 794, 798]
drnnGRUreluMakespan34 = [810, 796, 795, 798, 799, 798, 796, 795, 795, 797, 798, 798]
drnnGRUreluMakespan35 = [799, 799, 799, 799, 795, 798, 795, 800, 796, 795, 795, 796]
drnnGRUreluMakespan36 = [795, 797, 798, 799, 799, 799, 800, 794, 796, 795, 798, 800]
drnnGRUreluMakespan37 = [800, 798, 799, 794, 800, 796, 798, 798, 797, 800, 794, 798]
drnnGRUreluMakespan38 = [800, 799, 794, 796, 795, 800, 796, 804, 800, 795, 800, 798]
drnnGRUreluMakespan39 = [794, 798, 795, 804, 805, 799, 798, 800, 796, 798, 795, 794]
drnnGRUreluMakespan40 = [799, 798, 796, 798, 798, 799, 800, 796, 798, 798, 799, 798]
drnnGRUreluMakespan41 = [796, 798, 800, 797, 799, 796, 797, 796, 799, 804, 805, 798]
drnnGRUreluMakespan42 = [798, 794, 795, 799, 799, 798, 797, 798, 798, 798, 798, 795]
drnnGRUreluMakespan43 = [799, 798, 794, 794, 795, 794, 795, 799, 799, 800, 799, 794]
drnnGRUreluMakespan44 = [795, 796, 795, 799, 794, 795, 794, 796, 795, 794, 795, 796]
drnnGRUreluMakespan45 = [794, 797, 794, 795, 796, 795, 794, 799, 795, 794, 798, 798]
drnnGRUreluMakespan46 = [795, 795, 794, 795, 794, 794, 792, 794, 795, 797, 794, 794]
drnnGRUreluMakespan47 = [798, 796, 797, 798, 794, 798, 794, 797, 794, 803, 798, 798]
drnnGRUreluMakespan48 = [795, 794, 796, 798, 795, 794, 796, 795, 796, 794, 796, 796]
drnnGRUreluMakespan49 = [798, 798, 796, 798, 798, 796, 796, 798, 798, 798, 796, 798]
drnnGRUreluRewards0 = [-0.17617264919621228, -0.1759911894273128, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17544633017412387, -0.17617264919621228, -0.17508269018743108, -0.17617264919621228, -0.17617264919621228]
drnnGRUreluRewards1 = [-0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.17526455026455026, -0.1759911894273128, -0.1776214552648934, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765]
drnnGRUreluRewards2 = [-0.1759911894273128, -0.17617264919621228, -0.17725973169122497, -0.17617264919621228, -0.17617264919621228, -0.177078750549934, -0.177078750549934, -0.17580964970257765, -0.1759911894273128, -0.1774406332453826, -0.17617264919621228, -0.17617264919621228]
drnnGRUreluRewards3 = [-0.17580964970257765, -0.17526455026455026, -0.1759911894273128, -0.17617264919621228, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.1768976897689769, -0.177078750549934, -0.17617264919621228]
drnnGRUreluRewards4 = [-0.1781634446397188, -0.17617264919621228, -0.1759911894273128, -0.17617264919621228, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17544633017412387, -0.1768976897689769, -0.177078750549934, -0.1768976897689769]
drnnGRUreluRewards5 = [-0.1759911894273128, -0.17526455026455026, -0.17562802996914942, -0.17617264919621228, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17617264919621228, -0.17562802996914942, -0.17617264919621228, -0.17580964970257765]
drnnGRUreluRewards6 = [-0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.17562802996914942, -0.17580964970257765, -0.17544633017412387, -0.17617264919621228, -0.1763540290620872, -0.1759911894273128, -0.17526455026455026, -0.17580964970257765]
drnnGRUreluRewards7 = [-0.17617264919621228, -0.1768976897689769, -0.17526455026455026, -0.1763540290620872, -0.17544633017412387, -0.17725973169122497, -0.177078750549934, -0.17580964970257765, -0.17617264919621228, -0.1759911894273128, -0.1759911894273128, -0.1768976897689769]
drnnGRUreluRewards8 = [-0.17617264919621228, -0.1759911894273128, -0.1759911894273128, -0.17617264919621228, -0.177078750549934, -0.17544633017412387, -0.17617264919621228, -0.17617264919621228, -0.17798286090969018, -0.17544633017412387, -0.17617264919621228, -0.17580964970257765]
drnnGRUreluRewards9 = [-0.17508269018743108, -0.17617264919621228, -0.1759911894273128, -0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765, -0.17544633017412387]
drnnGRUreluRewards10 = [-0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.1763540290620872, -0.17526455026455026, -0.17653532907770195, -0.17544633017412387, -0.1778021978021978, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17526455026455026]
drnnGRUreluRewards11 = [-0.1768976897689769, -0.17617264919621228, -0.1759911894273128, -0.1759911894273128, -0.17580964970257765, -0.17671654929577466, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17671654929577466, -0.17617264919621228, -0.17544633017412387]
drnnGRUreluRewards12 = [-0.17617264919621228, -0.1759911894273128, -0.177078750549934, -0.17562802996914942, -0.17580964970257765, -0.17544633017412387, -0.1759911894273128, -0.17508269018743108, -0.1759911894273128, -0.177078750549934, -0.1759911894273128, -0.17617264919621228]
drnnGRUreluRewards13 = [-0.17544633017412387, -0.17617264919621228, -0.17580964970257765, -0.17617264919621228, -0.17526455026455026, -0.1759911894273128, -0.17617264919621228, -0.1768976897689769, -0.17617264919621228, -0.17508269018743108, -0.177078750549934, -0.177078750549934]
drnnGRUreluRewards14 = [-0.17617264919621228, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.1763540290620872, -0.177078750549934, -0.17508269018743108, -0.17617264919621228, -0.1763540290620872, -0.1763540290620872, -0.17544633017412387]
drnnGRUreluRewards15 = [-0.17580964970257765, -0.17617264919621228, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17508269018743108, -0.17562802996914942, -0.17617264919621228, -0.17544633017412387, -0.1763540290620872, -0.17526455026455026, -0.1759911894273128]
drnnGRUreluRewards16 = [-0.17617264919621228, -0.177078750549934, -0.17508269018743108, -0.17617264919621228, -0.1759911894273128, -0.17617264919621228, -0.177078750549934, -0.1763540290620872, -0.17580964970257765, -0.17617264919621228, -0.1763540290620872, -0.1759911894273128]
drnnGRUreluRewards17 = [-0.17562802996914942, -0.17671654929577466, -0.1763540290620872, -0.1776214552648934, -0.17508269018743108, -0.1759911894273128, -0.17617264919621228, -0.1759911894273128, -0.177078750549934, -0.17544633017412387, -0.1763540290620872, -0.17544633017412387]
drnnGRUreluRewards18 = [-0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.1768976897689769, -0.1759911894273128, -0.17580964970257765, -0.17617264919621228, -0.1759911894273128, -0.1768976897689769, -0.17544633017412387, -0.17617264919621228, -0.1768976897689769]
drnnGRUreluRewards19 = [-0.1768976897689769, -0.17580964970257765, -0.17617264919621228, -0.1759911894273128, -0.1759911894273128, -0.1759911894273128, -0.177078750549934, -0.17526455026455026, -0.1763540290620872, -0.1759911894273128, -0.1759911894273128, -0.177078750549934]
drnnGRUreluRewards20 = [-0.1759911894273128, -0.1768976897689769, -0.17544633017412387, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.1759911894273128, -0.17798286090969018, -0.17617264919621228, -0.17617264919621228]
drnnGRUreluRewards21 = [-0.17580964970257765, -0.1759911894273128, -0.1759911894273128, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17580964970257765, -0.17508269018743108, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765]
drnnGRUreluRewards22 = [-0.1759911894273128, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.177078750549934, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17544633017412387, -0.17580964970257765]
drnnGRUreluRewards23 = [-0.17580964970257765, -0.177078750549934, -0.1776214552648934, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17798286090969018, -0.17544633017412387, -0.1768976897689769, -0.1759911894273128, -0.17617264919621228, -0.1759911894273128]
drnnGRUreluRewards24 = [-0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17562802996914942, -0.177078750549934, -0.17580964970257765, -0.17617264919621228]
drnnGRUreluRewards25 = [-0.1759911894273128, -0.17544633017412387, -0.1759911894273128, -0.17580964970257765, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.17617264919621228, -0.17544633017412387, -0.17508269018743108, -0.17798286090969018, -0.17580964970257765]
drnnGRUreluRewards26 = [-0.1759911894273128, -0.17580964970257765, -0.177078750549934, -0.17617264919621228, -0.17653532907770195, -0.17580964970257765, -0.1759911894273128, -0.1759911894273128, -0.1759911894273128, -0.17508269018743108, -0.17653532907770195, -0.17562802996914942]
drnnGRUreluRewards27 = [-0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17653532907770195, -0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.17508269018743108]
drnnGRUreluRewards28 = [-0.17544633017412387, -0.177078750549934, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17798286090969018, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17544633017412387]
drnnGRUreluRewards29 = [-0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17653532907770195, -0.17508269018743108, -0.17580964970257765, -0.17544633017412387, -0.1776214552648934, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128]
drnnGRUreluRewards30 = [-0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17617264919621228, -0.17544633017412387, -0.17508269018743108, -0.17617264919621228, -0.17617264919621228]
drnnGRUreluRewards31 = [-0.17508269018743108, -0.17653532907770195, -0.17562802996914942, -0.1759911894273128, -0.17580964970257765, -0.17617264919621228, -0.1759911894273128, -0.1759911894273128, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765]
drnnGRUreluRewards32 = [-0.1759911894273128, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.1768976897689769, -0.177078750549934, -0.17562802996914942, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765]
drnnGRUreluRewards33 = [-0.17671654929577466, -0.1759911894273128, -0.177078750549934, -0.17544633017412387, -0.17508269018743108, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765]
drnnGRUreluRewards34 = [-0.17798286090969018, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17580964970257765, -0.17580964970257765]
drnnGRUreluRewards35 = [-0.1759911894273128, -0.1759911894273128, -0.1759911894273128, -0.1759911894273128, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387]
drnnGRUreluRewards36 = [-0.17526455026455026, -0.17562802996914942, -0.17580964970257765, -0.1759911894273128, -0.1759911894273128, -0.1759911894273128, -0.17617264919621228, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228]
drnnGRUreluRewards37 = [-0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.17508269018743108, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17562802996914942, -0.17617264919621228, -0.17508269018743108, -0.17580964970257765]
drnnGRUreluRewards38 = [-0.17617264919621228, -0.1759911894273128, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.1768976897689769, -0.17617264919621228, -0.17526455026455026, -0.17617264919621228, -0.17580964970257765]
drnnGRUreluRewards39 = [-0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.1768976897689769, -0.177078750549934, -0.1759911894273128, -0.17580964970257765, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108]
drnnGRUreluRewards40 = [-0.1759911894273128, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765]
drnnGRUreluRewards41 = [-0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.17562802996914942, -0.1759911894273128, -0.17544633017412387, -0.17562802996914942, -0.17544633017412387, -0.1759911894273128, -0.1768976897689769, -0.177078750549934, -0.17580964970257765]
drnnGRUreluRewards42 = [-0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.1759911894273128, -0.1759911894273128, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026]
drnnGRUreluRewards43 = [-0.1759911894273128, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.1759911894273128, -0.1759911894273128, -0.17617264919621228, -0.1759911894273128, -0.17508269018743108]
drnnGRUreluRewards44 = [-0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.1759911894273128, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387]
drnnGRUreluRewards45 = [-0.17508269018743108, -0.17562802996914942, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.1759911894273128, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17580964970257765]
drnnGRUreluRewards46 = [-0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108]
drnnGRUreluRewards47 = [-0.17580964970257765, -0.17544633017412387, -0.17562802996914942, -0.17580964970257765, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17562802996914942, -0.17508269018743108, -0.17671654929577466, -0.17580964970257765, -0.17580964970257765]
drnnGRUreluRewards48 = [-0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.17544633017412387, -0.17544633017412387]
drnnGRUreluRewards49 = [-0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765]
# Deep Reinforcement Learning: 5 capas Dense, Funcion de activacion tanh, 12 episodios, 50 iteraciones
drlTanhMakespan0 = [794, 794, 805, 799, 810, 800, 794, 810, 804, 806, 812, 808]
drlTanhMakespan1 = [796, 795, 795, 798, 799, 800, 800, 795, 797, 796, 797, 799]
drlTanhMakespan2 = [800, 797, 798, 801, 799, 800, 796, 795, 797, 796, 794, 798]
drlTanhMakespan3 = [800, 795, 799, 796, 799, 798, 795, 799, 795, 799, 798, 796]
drlTanhMakespan4 = [809, 795, 795, 800, 797, 795, 798, 798, 799, 799, 798, 798]
drlTanhMakespan5 = [795, 795, 795, 799, 795, 798, 795, 800, 795, 796, 795, 805]
drlTanhMakespan6 = [794, 800, 795, 793, 798, 795, 794, 798, 795, 799, 795, 796]
drlTanhMakespan7 = [795, 795, 795, 795, 798, 795, 797, 797, 795, 795, 798, 797]
drlTanhMakespan8 = [795, 795, 795, 794, 800, 800, 794, 795, 794, 794, 797, 795]
drlTanhMakespan9 = [793, 794, 796, 795, 796, 800, 794, 797, 793, 795, 798, 795]
drlTanhMakespan10 = [795, 795, 797, 794, 795, 798, 797, 795, 798, 794, 794, 794]
drlTanhMakespan11 = [795, 795, 795, 795, 797, 795, 795, 794, 795, 795, 795, 794]
drlTanhMakespan12 = [794, 798, 795, 794, 795, 795, 795, 797, 799, 795, 795, 795]
drlTanhMakespan13 = [795, 797, 795, 800, 796, 795, 796, 795, 795, 795, 798, 794]
drlTanhMakespan14 = [795, 795, 796, 794, 794, 794, 797, 795, 798, 795, 795, 793]
drlTanhMakespan15 = [799, 794, 795, 795, 795, 796, 801, 797, 795, 794, 795, 799]
drlTanhMakespan16 = [795, 795, 796, 798, 795, 795, 795, 795, 795, 798, 798, 796]
drlTanhMakespan17 = [800, 798, 795, 795, 798, 794, 795, 795, 797, 795, 796, 794]
drlTanhMakespan18 = [797, 800, 798, 797, 796, 794, 799, 797, 795, 796, 799, 798]
drlTanhMakespan19 = [797, 800, 795, 794, 794, 796, 795, 798, 796, 798, 797, 795]
drlTanhMakespan20 = [794, 795, 795, 799, 798, 797, 795, 795, 798, 795, 798, 795]
drlTanhMakespan21 = [796, 795, 795, 795, 795, 797, 798, 794, 797, 795, 796, 794]
drlTanhMakespan22 = [799, 796, 795, 795, 795, 795, 796, 795, 796, 798, 796, 795]
drlTanhMakespan23 = [799, 799, 795, 796, 796, 799, 796, 797, 794, 794, 798, 796]
drlTanhMakespan24 = [795, 795, 797, 800, 797, 795, 795, 796, 795, 795, 798, 799]
drlTanhMakespan25 = [795, 797, 795, 795, 795, 795, 800, 796, 795, 797, 795, 795]
drlTanhMakespan26 = [795, 795, 799, 794, 797, 794, 794, 798, 794, 796, 795, 798]
drlTanhMakespan27 = [796, 796, 795, 796, 798, 797, 794, 795, 794, 794, 794, 798]
drlTanhMakespan28 = [795, 795, 794, 798, 796, 796, 800, 797, 797, 796, 795, 794]
drlTanhMakespan29 = [795, 795, 798, 800, 797, 794, 796, 794, 792, 794, 794, 795]
drlTanhMakespan30 = [798, 797, 795, 799, 797, 800, 798, 799, 797, 800, 794, 796]
drlTanhMakespan31 = [794, 795, 800, 798, 800, 794, 800, 798, 799, 798, 798, 798]
drlTanhMakespan32 = [795, 795, 795, 794, 794, 794, 793, 795, 794, 793, 794, 795]
drlTanhMakespan33 = [794, 797, 792, 794, 795, 795, 797, 795, 795, 794, 792, 795]
drlTanhMakespan34 = [795, 794, 795, 798, 795, 796, 794, 795, 794, 794, 795, 794]
drlTanhMakespan35 = [796, 794, 797, 793, 794, 798, 795, 794, 793, 793, 795, 794]
drlTanhMakespan36 = [795, 795, 794, 795, 795, 795, 794, 795, 795, 793, 795, 794]
drlTanhMakespan37 = [794, 794, 798, 794, 794, 796, 795, 794, 793, 795, 795, 792]
drlTanhMakespan38 = [794, 796, 795, 794, 798, 798, 795, 795, 794, 794, 795, 794]
drlTanhMakespan39 = [794, 795, 795, 796, 792, 794, 795, 794, 795, 794, 794, 795]
drlTanhMakespan40 = [798, 795, 794, 795, 794, 794, 793, 795, 794, 794, 797, 794]
drlTanhMakespan41 = [795, 792, 795, 794, 794, 795, 794, 795, 792, 797, 795, 795]
drlTanhMakespan42 = [792, 794, 794, 795, 794, 794, 795, 794, 792, 794, 794, 794]
drlTanhMakespan43 = [794, 796, 794, 793, 795, 795, 793, 798, 794, 794, 798, 794]
drlTanhMakespan44 = [794, 794, 794, 794, 795, 794, 793, 794, 794, 795, 795, 794]
drlTanhMakespan45 = [790, 794, 793, 794, 793, 794, 795, 794, 791, 795, 795, 794]
drlTanhMakespan46 = [792, 794, 794, 794, 794, 794, 794, 793, 794, 794, 794, 794]
drlTanhMakespan47 = [794, 794, 794, 794, 794, 794, 794, 794, 792, 795, 793, 795]
drlTanhMakespan48 = [794, 794, 792, 792, 797, 794, 792, 794, 794, 795, 794, 795]
drlTanhMakespan49 = [795, 794, 794, 796, 794, 797, 794, 794, 794, 794, 794, 794]
drlTanhMakespan50 = [794, 792, 795, 794, 794, 794, 794, 794, 795, 794, 795, 794]
drlTanhMakespan51 = [794, 792, 796, 795, 794, 794, 795, 794, 795, 795, 795, 794]
drlTanhMakespan52 = [794, 794, 795, 792, 795, 795, 795, 792, 794, 793, 795, 794]
drlTanhMakespan53 = [794, 792, 794, 792, 794, 794, 794, 795, 795, 794, 794, 792]
drlTanhMakespan54 = [795, 793, 794, 794, 794, 792, 795, 794, 794, 792, 794, 796]
drlTanhMakespan55 = [795, 794, 794, 795, 795, 793, 794, 795, 794, 797, 795, 792]
drlTanhMakespan56 = [795, 795, 792, 795, 794, 795, 794, 794, 794, 795, 795, 795]
drlTanhMakespan57 = [795, 792, 795, 794, 795, 795, 792, 795, 794, 797, 792, 792]
drlTanhMakespan58 = [795, 795, 794, 795, 792, 794, 794, 794, 792, 792, 792, 793]
drlTanhMakespan59 = [795, 794, 792, 794, 794, 794, 792, 794, 794, 794, 793, 795]
drlTanhMakespan60 = [794, 795, 795, 795, 798, 794, 794, 794, 794, 794, 794, 792]
drlTanhMakespan61 = [792, 795, 794, 794, 795, 794, 792, 795, 795, 794, 794, 795]
drlTanhMakespan62 = [795, 794, 794, 794, 799, 794, 792, 794, 795, 795, 794, 793]
drlTanhMakespan63 = [791, 795, 792, 796, 794, 794, 792, 795, 793, 794, 792, 794]
drlTanhRewards0 = [-0.17508269018743108, -0.17508269018743108, -0.177078750549934, -0.1759911894273128, -0.17798286090969018, -0.17617264919621228, -0.17508269018743108, -0.17798286090969018, -0.1768976897689769, -0.17725973169122497, -0.17834394904458598, -0.1776214552648934]
drlTanhRewards1 = [-0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228, -0.1759911894273128, -0.17617264919621228, -0.17526455026455026, -0.17562802996914942, -0.17544633017412387, -0.17562802996914942, -0.1759911894273128]
drlTanhRewards2 = [-0.17617264919621228, -0.17562802996914942, -0.17580964970257765, -0.1763540290620872, -0.1759911894273128, -0.17617264919621228, -0.17544633017412387, -0.17526455026455026, -0.17562802996914942, -0.17508269018743108, -0.17544633017412387, -0.17580964970257765]
drlTanhRewards3 = [-0.17617264919621228, -0.1759911894273128, -0.17526455026455026, -0.17544633017412387, -0.1759911894273128, -0.17580964970257765, -0.17526455026455026, -0.1759911894273128, -0.17526455026455026, -0.1759911894273128, -0.17580964970257765, -0.17544633017412387]
drlTanhRewards4 = [-0.1778021978021978, -0.17526455026455026, -0.17526455026455026, -0.17617264919621228, -0.17562802996914942, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765]
drlTanhRewards5 = [-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.1759911894273128, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17617264919621228, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.177078750549934]
drlTanhRewards6 = [-0.17508269018743108, -0.17617264919621228, -0.17526455026455026, -0.1749007498897221, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.1759911894273128, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17544633017412387]
drlTanhRewards7 = [-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17562802996914942, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17562802996914942]
drlTanhRewards8 = [-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17617264919621228, -0.17508269018743108, -0.17617264919621228, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026]
drlTanhRewards9 = [-0.1749007498897221, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17617264919621228, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.1749007498897221, -0.17580964970257765, -0.17526455026455026]
drlTanhRewards10 = [-0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drlTanhRewards11 = [-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026]
drlTanhRewards12 = [-0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.1759911894273128, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026]
drlTanhRewards13 = [-0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17617264919621228, -0.17544633017412387, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108]
drlTanhRewards14 = [-0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.1749007498897221]
drlTanhRewards15 = [-0.1759911894273128, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17562802996914942, -0.1763540290620872, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.1759911894273128]
drlTanhRewards16 = [-0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17544633017412387]
drlTanhRewards17 = [-0.17617264919621228, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108]
drlTanhRewards18 = [-0.17562802996914942, -0.17617264919621228, -0.17580964970257765, -0.17562802996914942, -0.17544633017412387, -0.1759911894273128, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17544633017412387, -0.1759911894273128, -0.17580964970257765]
drlTanhRewards19 = [-0.17562802996914942, -0.17617264919621228, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17562802996914942, -0.17526455026455026]
drlTanhRewards20 = [-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.1759911894273128, -0.17580964970257765, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026]
drlTanhRewards21 = [-0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17562802996914942, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108]
drlTanhRewards22 = [-0.1759911894273128, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026]
drlTanhRewards23 = [-0.1759911894273128, -0.1759911894273128, -0.17544633017412387, -0.17544633017412387, -0.17526455026455026, -0.1759911894273128, -0.17544633017412387, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17544633017412387]
drlTanhRewards24 = [-0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17617264919621228, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765, -0.1759911894273128]
drlTanhRewards25 = [-0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026]
drlTanhRewards26 = [-0.17526455026455026, -0.17526455026455026, -0.1759911894273128, -0.17508269018743108, -0.17562802996914942, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765]
drlTanhRewards27 = [-0.17544633017412387, -0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17562802996914942, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108]
drlTanhRewards28 = [-0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17544633017412387, -0.17562802996914942, -0.17544633017412387, -0.17617264919621228, -0.17562802996914942, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards29 = [-0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228, -0.17562802996914942, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026]
drlTanhRewards30 = [-0.17580964970257765, -0.17562802996914942, -0.17526455026455026, -0.1759911894273128, -0.17562802996914942, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.17562802996914942, -0.17617264919621228, -0.17508269018743108, -0.17544633017412387]
drlTanhRewards31 = [-0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17508269018743108, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765]
drlTanhRewards32 = [-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026]
drlTanhRewards33 = [-0.17508269018743108, -0.17562802996914942, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026]
drlTanhRewards34 = [-0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards35 = [-0.17544633017412387, -0.17508269018743108, -0.17562802996914942, -0.1749007498897221, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards36 = [-0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards37 = [-0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224]
drlTanhRewards38 = [-0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards39 = [-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026]
drlTanhRewards40 = [-0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942, -0.17508269018743108]
drlTanhRewards41 = [-0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026]
drlTanhRewards42 = [-0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drlTanhRewards43 = [-0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108]
drlTanhRewards44 = [-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards45 = [-0.1749007498897221, -0.17435444714191128, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17453662842012357, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards46 = [-0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drlTanhRewards47 = [-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026]
drlTanhRewards48 = [-0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17471872931833224, -0.17508269018743108, -0.17562802996914942, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026]
drlTanhRewards49 = [-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drlTanhRewards50 = [-0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108]
drlTanhRewards51 = [-0.17508269018743108, -0.17471872931833224, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards52 = [-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards53 = [-0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224]
drlTanhRewards54 = [-0.17526455026455026, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17544633017412387]
drlTanhRewards55 = [-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17471872931833224]
drlTanhRewards56 = [-0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026]
drlTanhRewards57 = [-0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17562802996914942, -0.17471872931833224, -0.17471872931833224]
drlTanhRewards58 = [-0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17471872931833224, -0.17471872931833224, -0.1749007498897221]
drlTanhRewards59 = [-0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026]
drlTanhRewards60 = [-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224]
drlTanhRewards61 = [-0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026]
drlTanhRewards62 = [-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1759911894273128, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221]
drlTanhRewards63 = [-0.17453662842012357, -0.17471872931833224, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.1749007498897221, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108]
# Deep Reinforcement Learning: 5 capas Dense, Funcion de activacion relu, 12 episodios, 50 iteraciones
drlReluMakespan0 = [796, 798, 809, 798, 796, 800, 798, 799, 800, 794, 800, 798]
drlReluMakespan1 = [800, 800, 801, 806, 804, 806, 808, 798, 796, 796, 798, 800]
drlReluMakespan2 = [805, 805, 798, 800, 800, 798, 801, 799, 800, 806, 800, 800]
drlReluMakespan3 = [798, 799, 798, 795, 798, 808, 803, 800, 798, 795, 799, 800]
drlReluMakespan4 = [805, 805, 799, 796, 798, 803, 799, 800, 800, 800, 795, 794]
drlReluMakespan5 = [799, 796, 795, 800, 801, 796, 800, 795, 803, 800, 800, 805]
drlReluMakespan6 = [799, 795, 798, 794, 805, 796, 795, 799, 798, 795, 804, 796]
drlReluMakespan7 = [795, 798, 799, 798, 798, 799, 795, 794, 796, 794, 795, 805]
drlReluMakespan8 = [805, 794, 794, 795, 798, 795, 798, 795, 799, 800, 796, 798]
drlReluMakespan9 = [797, 797, 797, 794, 795, 794, 794, 797, 796, 795, 801, 799]
drlReluMakespan10 = [799, 794, 797, 795, 794, 794, 795, 795, 795, 796, 797, 799]
drlReluMakespan11 = [796, 798, 800, 795, 805, 794, 798, 796, 795, 794, 798, 795]
drlReluMakespan12 = [800, 795, 794, 798, 800, 805, 800, 798, 804, 799, 794, 803]
drlReluMakespan13 = [796, 799, 798, 794, 800, 794, 795, 796, 798, 795, 794, 799]
drlReluMakespan14 = [795, 798, 798, 798, 805, 798, 798, 798, 795, 794, 800, 796]
drlReluMakespan15 = [795, 798, 795, 805, 798, 794, 795, 798, 796, 794, 795, 796]
drlReluMakespan16 = [798, 795, 796, 799, 796, 798, 798, 795, 795, 795, 795, 799]
drlReluMakespan17 = [794, 798, 796, 798, 795, 801, 794, 798, 797, 795, 796, 801]
drlReluMakespan18 = [798, 795, 798, 798, 801, 798, 795, 795, 797, 800, 794, 800]
drlReluMakespan19 = [795, 798, 794, 800, 796, 795, 798, 797, 795, 794, 796, 796]
drlReluMakespan20 = [794, 794, 795, 795, 795, 795, 796, 798, 799, 799, 799, 795]
drlReluMakespan21 = [802, 796, 794, 797, 797, 800, 794, 794, 804, 803, 798, 797]
drlReluMakespan22 = [794, 795, 795, 795, 798, 795, 794, 799, 794, 803, 795, 794]
drlReluMakespan23 = [794, 798, 799, 794, 795, 795, 799, 795, 796, 795, 797, 799]
drlReluMakespan24 = [795, 794, 797, 800, 794, 795, 795, 795, 795, 800, 800, 798]
drlReluMakespan25 = [795, 794, 797, 796, 798, 795, 795, 794, 799, 795, 794, 798]
drlReluMakespan26 = [801, 795, 800, 794, 794, 796, 800, 798, 798, 799, 794, 796]
drlReluMakespan27 = [796, 795, 796, 795, 796, 795, 795, 800, 794, 794, 794, 796]
drlReluMakespan28 = [794, 794, 795, 796, 794, 795, 795, 797, 794, 794, 796, 795]
drlReluMakespan29 = [793, 794, 795, 800, 795, 795, 794, 798, 798, 796, 795, 794]
drlReluMakespan30 = [802, 794, 794, 798, 794, 796, 805, 794, 800, 794, 796, 794]
drlReluMakespan31 = [797, 794, 794, 794, 800, 800, 794, 794, 798, 795, 794, 798]
drlReluMakespan32 = [794, 798, 794, 795, 794, 795, 798, 794, 794, 795, 794, 798]
drlReluMakespan33 = [798, 794, 798, 795, 794, 793, 797, 798, 794, 794, 801, 793]
drlReluMakespan34 = [794, 798, 794, 795, 794, 793, 798, 795, 794, 800, 794, 795]
drlReluMakespan35 = [794, 796, 794, 796, 806, 795, 795, 795, 796, 795, 795, 799]
drlReluMakespan36 = [795, 794, 794, 796, 796, 798, 794, 796, 794, 795, 794, 795]
drlReluMakespan37 = [795, 794, 795, 798, 794, 794, 794, 794, 794, 794, 795, 797]
drlReluMakespan38 = [794, 798, 794, 798, 797, 794, 794, 795, 795, 794, 795, 795]
drlReluMakespan39 = [797, 794, 795, 796, 796, 796, 798, 794, 794, 795, 794, 798]
drlReluMakespan40 = [798, 795, 795, 798, 792, 795, 795, 794, 795, 794, 798, 794]
drlReluMakespan41 = [795, 794, 794, 794, 794, 794, 798, 793, 794, 794, 794, 793]
drlReluMakespan42 = [794, 794, 794, 794, 799, 794, 795, 794, 796, 794, 794, 794]
drlReluMakespan43 = [794, 797, 795, 794, 795, 794, 794, 795, 794, 794, 793, 794]
drlReluMakespan44 = [794, 792, 793, 794, 794, 796, 794, 798, 795, 794, 794, 796]
drlReluMakespan45 = [795, 794, 799, 794, 794, 793, 794, 795, 795, 793, 796, 794]
drlReluMakespan46 = [794, 796, 794, 794, 794, 794, 794, 793, 799, 792, 794, 794]
drlReluMakespan47 = [795, 794, 793, 794, 796, 797, 794, 794, 795, 794, 794, 794]
drlReluMakespan48 = [794, 794, 794, 792, 794, 794, 795, 794, 794, 794, 794, 794]
drlReluMakespan49 = [794, 794, 795, 792, 797, 797, 794, 794, 792, 800, 795, 795]
drlReluRewards0 = [-0.17544633017412387, -0.17580964970257765, -0.1778021978021978, -0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.17508269018743108, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765]
drlReluRewards1 = [-0.17617264919621228, -0.17617264919621228, -0.1763540290620872, -0.17725973169122497, -0.1768976897689769, -0.17725973169122497, -0.1776214552648934, -0.17580964970257765, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17617264919621228]
drlReluRewards2 = [-0.177078750549934, -0.177078750549934, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.1763540290620872, -0.1759911894273128, -0.17617264919621228, -0.17725973169122497, -0.17617264919621228, -0.17617264919621228]
drlReluRewards3 = [-0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.1776214552648934, -0.17671654929577466, -0.17617264919621228, -0.17580964970257765, -0.17526455026455026, -0.1759911894273128, -0.17617264919621228]
drlReluRewards4 = [-0.177078750549934, -0.177078750549934, -0.1759911894273128, -0.17544633017412387, -0.17580964970257765, -0.17671654929577466, -0.1759911894273128, -0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.17526455026455026, -0.17508269018743108]
drlReluRewards5 = [-0.1759911894273128, -0.17544633017412387, -0.17526455026455026, -0.17617264919621228, -0.1763540290620872, -0.17544633017412387, -0.17526455026455026, -0.17617264919621228, -0.17671654929577466, -0.17617264919621228, -0.17617264919621228, -0.177078750549934]
drlReluRewards6 = [-0.1759911894273128, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108, -0.177078750549934, -0.17544633017412387, -0.17526455026455026, -0.1759911894273128, -0.17580964970257765, -0.17526455026455026, -0.1768976897689769, -0.17544633017412387]
drlReluRewards7 = [-0.17526455026455026, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.177078750549934]
drlReluRewards8 = [-0.177078750549934, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.1759911894273128, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765]
drlReluRewards9 = [-0.17562802996914942, -0.17562802996914942, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942, -0.17544633017412387, -0.17526455026455026, -0.1763540290620872, -0.1759911894273128]
drlReluRewards10 = [-0.1759911894273128, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17562802996914942, -0.1759911894273128]
drlReluRewards11 = [-0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.17526455026455026, -0.177078750549934, -0.17508269018743108, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026]
drlReluRewards12 = [-0.17617264919621228, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.17617264919621228, -0.17580964970257765, -0.1768976897689769, -0.1759911894273128, -0.17508269018743108, -0.17671654929577466]
drlReluRewards13 = [-0.17544633017412387, -0.1759911894273128, -0.17580964970257765, -0.17508269018743108, -0.17617264919621228, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.1759911894273128]
drlReluRewards14 = [-0.17526455026455026, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17617264919621228, -0.17544633017412387]
drlReluRewards15 = [-0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.177078750549934, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17544633017412387, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387]
drlReluRewards16 = [-0.17580964970257765, -0.17526455026455026, -0.17544633017412387, -0.1759911894273128, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.1759911894273128]
drlReluRewards17 = [-0.17508269018743108, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.1763540290620872, -0.17508269018743108, -0.17580964970257765, -0.17562802996914942, -0.17526455026455026, -0.17544633017412387, -0.1763540290620872]
drlReluRewards18 = [-0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17580964970257765, -0.1763540290620872, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17617264919621228, -0.17508269018743108, -0.17617264919621228]
drlReluRewards19 = [-0.17526455026455026, -0.17580964970257765, -0.17508269018743108, -0.17617264919621228, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17544633017412387]
drlReluRewards20 = [-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.1759911894273128, -0.1759911894273128, -0.1759911894273128, -0.17526455026455026]
drlReluRewards21 = [-0.17653532907770195, -0.17544633017412387, -0.17562802996914942, -0.17508269018743108, -0.17562802996914942, -0.17617264919621228, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.1768976897689769, -0.17671654929577466, -0.17562802996914942]
drlReluRewards22 = [-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.1759911894273128, -0.17508269018743108, -0.17671654929577466, -0.17526455026455026, -0.17508269018743108]
drlReluRewards23 = [-0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.1759911894273128, -0.17508269018743108, -0.17526455026455026, -0.1759911894273128, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17562802996914942, -0.1759911894273128]
drlReluRewards24 = [-0.17526455026455026, -0.17508269018743108, -0.17562802996914942, -0.17617264919621228, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765]
drlReluRewards25 = [-0.17526455026455026, -0.17508269018743108, -0.17562802996914942, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.1759911894273128, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765]
drlReluRewards26 = [-0.1763540290620872, -0.17526455026455026, -0.17617264919621228, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17508269018743108, -0.17544633017412387]
drlReluRewards27 = [-0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17617264919621228, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387]
drlReluRewards28 = [-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026]
drlReluRewards29 = [-0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.17617264919621228, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108]
drlReluRewards30 = [-0.17653532907770195, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17544633017412387, -0.177078750549934, -0.17508269018743108, -0.17617264919621228, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108]
drlReluRewards31 = [-0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17617264919621228, -0.17617264919621228, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108, -0.17580964970257765]
drlReluRewards32 = [-0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765]
drlReluRewards33 = [-0.17580964970257765, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.17562802996914942, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.1763540290620872, -0.1749007498897221]
drlReluRewards34 = [-0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17617264919621228, -0.17508269018743108, -0.17526455026455026]
drlReluRewards35 = [-0.17508269018743108, -0.17544633017412387, -0.17725973169122497, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.1759911894273128]
drlReluRewards36 = [-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026]
drlReluRewards37 = [-0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17562802996914942]
drlReluRewards38 = [-0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17580964970257765, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drlReluRewards39 = [-0.17562802996914942, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765]
drlReluRewards40 = [-0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17471872931833224, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108]
drlReluRewards41 = [-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221]
drlReluRewards42 = [-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1759911894273128, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drlReluRewards43 = [-0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108]
drlReluRewards44 = [-0.17508269018743108, -0.17471872931833224, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387]
drlReluRewards45 = [-0.17526455026455026, -0.17508269018743108, -0.1759911894273128, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.1749007498897221, -0.17544633017412387, -0.17508269018743108]
drlReluRewards46 = [-0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.1759911894273128, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108]
drlReluRewards47 = [-0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17544633017412387, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drlReluRewards48 = [-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drlReluRewards49 = [-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17562802996914942, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17617264919621228, -0.17526455026455026, -0.17526455026455026]
if __name__ == "__main__":
##############################################
##############################################
##############################################
# Deep Recurrent Reinforcement Learning with 1 GRU layer and 4 Dense layers
drnnGRUtanhMakespan = []
drnnGRUtanhRewards = []
drnnGRUtanhMakespanList = []
drnnGRUtanhRewardsList = []
drnnGRUtanhMakespanValues = []
drnnGRUtanhRewardsValues = []
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan0))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan1))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan2))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan3))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan4))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan5))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan6))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan7))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan8))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan9))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan10))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan11))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan12))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan13))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan14))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan15))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan16))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan17))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan18))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan19))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan20))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan21))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan22))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan23))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan24))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan25))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan26))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan27))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan28))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan29))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan30))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan31))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan32))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan33))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan34))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan35))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan36))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan37))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan38))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan39))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan40))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan41))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan42))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan43))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan44))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan45))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan46))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan47))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan48))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan49))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards0))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards1))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards2))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards3))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards4))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards5))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards6))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards7))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards8))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards9))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards10))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards11))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards12))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards13))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards14))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards15))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards16))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards17))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards18))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards19))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards20))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards21))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards22))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards23))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards24))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards25))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards26))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards27))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards28))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards29))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards30))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards31))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards32))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards33))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards34))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards35))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards36))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards37))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards38))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards39))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards40))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards41))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards42))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards43))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards44))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards45))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards46))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards47))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards48))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards49))
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan0)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan1)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan2)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan3)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan4)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan5)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan6)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan7)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan8)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan9)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan10)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan11)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan12)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan13)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan14)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan15)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan16)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan17)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan18)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan19)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan20)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan21)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan22)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan23)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan24)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan25)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan26)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan27)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan28)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan29)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan30)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan31)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan32)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan33)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan34)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan35)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan36)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan37)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan38)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan39)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan40)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan41)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan42)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan43)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan44)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan45)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan46)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan47)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan48)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan49)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards0)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards1)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards2)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards3)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards4)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards5)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards6)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards7)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards8)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards9)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards10)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards11)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards12)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards13)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards14)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards15)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards16)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards17)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards18)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards19)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards20)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards21)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards22)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards23)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards24)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards25)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards26)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards27)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards28)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards29)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards30)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards31)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards32)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards33)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards34)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards35)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards36)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards37)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards38)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards39)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards40)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards41)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards42)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards43)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards44)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards45)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards46)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards47)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards48)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards49)
drnnGRUreluMakespan = []
drnnGRUreluRewards = []
drnnGRUreluMakespanList = []
drnnGRUreluRewardsList = []
drnnGRUreluMakespanValues = []
drnnGRUreluRewardsValues = []
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan0))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan1))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan2))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan3))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan4))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan5))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan6))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan7))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan8))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan9))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan10))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan11))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan12))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan13))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan14))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan15))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan16))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan17))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan18))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan19))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan20))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan21))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan22))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan23))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan24))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan25))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan26))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan27))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan28))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan29))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan30))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan31))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan32))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan33))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan34))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan35))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan36))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan37))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan38))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan39))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan40))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan41))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan42))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan43))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan44))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan45))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan46))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan47))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan48))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan49))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards0))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards1))
drnnGRUreluRewards.append( | np.mean(drnnGRUreluRewards2) | numpy.mean |
""" Code that plots fields from the CMAC radar object. """
import os
from datetime import datetime
import operator
import cartopy.crs as ccrs
import netCDF4
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import pyart
from pyart.graph.common import (
generate_radar_name, generate_radar_time_begin)
from .config import get_plot_values, get_field_names
plt.switch_backend('agg')
def quicklooks_ppi(radar, config, sweep=None, image_directory=None,
dd_lobes=True):
"""
Quicklooks PPI, images produced with regards to CMAC
Parameter
---------
radar : Radar
Radar object that has CMAC applied to it.
config : str
A string of the radar name found from config.py that contains values
for plotting, specific to that radar.
Optional Parameters
-------------------
image_directory : str
File path to the image folder of which to save the CMAC images. If no
image file path is given, image path defaults to users home directory.
dd_lobes : bool
Plot DD lobes between radars if dd_lobes is True.
"""
if image_directory is None:
image_directory = os.path.expanduser('~')
radar_start_date = netCDF4.num2date(
radar.time['data'][0], radar.time['units'],
only_use_cftime_datetimes=False, only_use_python_datetimes=True)
# Retrieve the plot parameter values based on the radar.
plot_config = get_plot_values(config)
field_config = get_field_names(config)
save_name = plot_config['save_name']
date_string = datetime.strftime(radar_start_date, '%Y%m%d.%H%M%S')
combined_name = '.' + save_name + '.' + date_string
#min_lat = plot_config['min_lat']
#max_lat = plot_config['max_lat']
#min_lon = plot_config['min_lon']
#max_lon = plot_config['max_lon']
max_lat = radar.gate_latitude['data'].max() + .1
min_lat = radar.gate_latitude['data'].min() - .1
max_lon = radar.gate_longitude['data'].max() + .1
min_lon = radar.gate_longitude['data'].min() - .1
# Creating a plot of reflectivity before CMAC.
lal = np.arange(min_lat, max_lat, .8)
lol = np.arange(min_lon, max_lon, .8)
if dd_lobes:
grid_lat = np.arange(min_lat, max_lat, 0.01)
grid_lon = np.arange(min_lon, max_lon, 0.01)
facility = plot_config['facility']
if facility == 'I4':
dms_radar1_coords = [plot_config['site_i4_dms_lon'],
plot_config['site_i4_dms_lat']]
dms_radar2_coords = [plot_config['site_i5_dms_lon'],
plot_config['site_i5_dms_lat']]
elif facility == 'I5':
dms_radar1_coords = [plot_config['site_i5_dms_lon'],
plot_config['site_i5_dms_lat']]
dms_radar2_coords = [plot_config['site_i4_dms_lon'],
plot_config['site_i4_dms_lat']]
elif facility == 'I6':
dms_radar1_coords = [plot_config['site_i6_dms_lon'],
plot_config['site_i6_dms_lat']]
dms_radar2_coords = [plot_config['site_i4_dms_lon'],
plot_config['site_i4_dms_lat']]
dec_radar1 = [_dms_to_decimal(
dms_radar1_coords[0][0], dms_radar1_coords[0][1],
dms_radar1_coords[0][2]), _dms_to_decimal(
dms_radar1_coords[1][0], dms_radar1_coords[1][1],
dms_radar1_coords[1][2])]
dec_radar2 = [_dms_to_decimal(
dms_radar2_coords[0][0], dms_radar2_coords[0][1],
dms_radar2_coords[0][2]), _dms_to_decimal(
dms_radar2_coords[1][0], dms_radar2_coords[1][1],
dms_radar2_coords[1][2])]
bca = _get_bca(dec_radar2[0], dec_radar2[1], dec_radar1[0],
dec_radar1[1], grid_lon, grid_lat)
grid_lon, grid_lat = np.meshgrid(grid_lon, grid_lat)
if sweep is None:
if radar.nsweeps < 4:
sweep = 2
else:
sweep = plot_config['sweep']
# Plot of the raw reflectivity from the radar.
display = pyart.graph.RadarMapDisplay(radar)
fig, ax = plt.subplots(1, 1,
subplot_kw=dict(projection=ccrs.PlateCarree()),
figsize=[12, 8])
ax.set_aspect('auto')
display.plot_ppi_map('reflectivity', sweep=sweep, resolution='50m', ax=ax,
vmin=-8, vmax=64, mask_outside=False,
cmap=pyart.graph.cm_colorblind.HomeyerRainbow,
min_lat=min_lat, min_lon=min_lon,
max_lat=max_lat, max_lon=max_lon,
lat_lines=lal, lon_lines=lol,
projection=ccrs.PlateCarree())
if dd_lobes:
ax.contour(grid_lon, grid_lat, bca,
levels=[np.pi/6, 5*np.pi/6], linewidths=2,
colors='k')
fig.savefig(
image_directory
+ '/reflectivity' + combined_name + '.png')
del fig, ax
# Four panel plot of gate_id, velocity_texture, reflectivity, and
# cross_correlation_ratio.
cat_dict = {}
print('##')
print('## Keys for each gate id are as follows:')
for pair_str in radar.fields['gate_id']['notes'].split(','):
print('## ', str(pair_str))
cat_dict.update({pair_str.split(':')[1]:int(pair_str.split(':')[0])})
sorted_cats = sorted(cat_dict.items(), key=operator.itemgetter(1))
cat_colors = {'rain': 'green',
'multi_trip': 'red',
'no_scatter': 'gray',
'snow': 'cyan',
'melting': 'yellow'}
lab_colors = ['red', 'cyan', 'grey', 'green', 'yellow']
if 'ground_clutter' in radar.fields.keys():
cat_colors['clutter'] = 'black'
lab_colors = np.append(lab_colors, 'black')
if 'terrain_blockage' in radar.fields['gate_id']['notes']:
cat_colors['terrain_blockage'] = 'brown'
lab_colors = np.append(lab_colors, 'brown')
lab_colors = [cat_colors[kitty[0]] for kitty in sorted_cats]
cmap = matplotlib.colors.ListedColormap(lab_colors)
display = pyart.graph.RadarMapDisplay(radar)
fig, ax = plt.subplots(2, 2,
figsize=[15, 10], subplot_kw=dict(projection=ccrs.PlateCarree()))
ax[0, 0].set_aspect('auto')
display.plot_ppi_map('gate_id', sweep=sweep, min_lon=min_lon, ax=ax[0, 0],
max_lon=max_lon, min_lat=min_lat,
max_lat=max_lat, resolution='50m',
lat_lines=lal, lon_lines=lol, cmap=cmap,
vmin=0, vmax=6, projection=ccrs.PlateCarree())
if dd_lobes:
ax[0, 0].contour(grid_lon, grid_lat, bca,
levels=[np.pi/6, 5*np.pi/6], linewidths=2,
colors='k')
cbax = ax[0, 0]
if 'ground_clutter' in radar.fields.keys() or 'terrain_blockage' in radar.fields['gate_id']['notes']:
tick_locs = np.linspace(
0, len(sorted_cats) - 1, len(sorted_cats)) + 0.5
else:
tick_locs = np.linspace(
0, len(sorted_cats), len(sorted_cats)) + 0.5
display.cbs[-1].locator = matplotlib.ticker.FixedLocator(tick_locs)
catty_list = [sorted_cats[i][0] for i in range(len(sorted_cats))]
display.cbs[-1].formatter = matplotlib.ticker.FixedFormatter(catty_list)
display.cbs[-1].update_ticks()
ax[0, 1].set_aspect('auto')
display.plot_ppi_map('reflectivity', sweep=sweep, vmin=-8, vmax=40.0,
ax=ax[0, 1], min_lon=min_lon, max_lon=max_lon,
min_lat=min_lat,
max_lat=max_lat, lat_lines=lal, lon_lines=lol,
resolution='50m',
cmap=pyart.graph.cm_colorblind.HomeyerRainbow,
projection=ccrs.PlateCarree())
if dd_lobes:
ax[0, 1].contour(grid_lon, grid_lat, bca,
levels=[np.pi/6, 5*np.pi/6], linewidths=2,
colors='k')
ax[1, 0].set_aspect('auto')
display.plot_ppi_map('velocity_texture', sweep=sweep, vmin=0, vmax=14,
min_lon=min_lon, max_lon=max_lon, min_lat=min_lat,
max_lat=max_lat, lat_lines=lal, lon_lines=lol,
resolution='50m', ax=ax[1, 0],
title=_generate_title(
radar, 'velocity_texture', sweep),
cmap=pyart.graph.cm.NWSRef,
projection=ccrs.PlateCarree())
if dd_lobes:
ax[1, 0].contour(grid_lon, grid_lat, bca, latlon='True',
levels=[np.pi/6, 5*np.pi/6], linewidths=2,
colors='k')
rhv_field = field_config['cross_correlation_ratio']
ax[1, 1].set_aspect('auto')
display.plot_ppi_map(rhv_field, sweep=sweep, vmin=.5,
vmax=1, min_lon=min_lon, max_lon=max_lon,
min_lat=min_lat, max_lat=max_lat, lat_lines=lal,
lon_lines=lol, resolution='50m', ax=ax[1, 1],
cmap=pyart.graph.cm.Carbone42,
projection=ccrs.PlateCarree())
if dd_lobes:
ax[1, 1].contour(grid_lon, grid_lat, bca,
levels=[np.pi/6, 5*np.pi/6], linewidths=2,
colors='k')
fig.savefig(
image_directory
+ '/cmac_four_panel_plot' + combined_name + '.png')
plt.close(fig)
del fig, ax, display
# Creating a plot with reflectivity corrected with gate ids.
cmac_gates = pyart.correct.GateFilter(radar)
cmac_gates.exclude_all()
cmac_gates.include_equal('gate_id', cat_dict['rain'])
cmac_gates.include_equal('gate_id', cat_dict['melting'])
cmac_gates.include_equal('gate_id', cat_dict['snow'])
display = pyart.graph.RadarMapDisplay(radar)
fig, ax = plt.subplots(1, 1, subplot_kw=dict(projection=ccrs.PlateCarree()),
figsize=[12, 8])
ax.set_aspect('auto')
display.plot_ppi_map('reflectivity',
sweep=sweep, resolution='50m',
vmin=-8, vmax=40, mask_outside=False,
cmap=pyart.graph.cm_colorblind.HomeyerRainbow,
title=_generate_title(
radar, 'masked_corrected_reflectivity',
sweep), ax=ax,
min_lat=min_lat, min_lon=min_lon,
max_lat=max_lat, max_lon=max_lon,
lat_lines=lal, lon_lines=lol,
gatefilter=cmac_gates,
projection=ccrs.PlateCarree())
if dd_lobes:
ax.contour(grid_lon, grid_lat, bca,
levels=[np.pi/6, 5*np.pi/6], linewidths=2,
colors='k')
fig.savefig(
image_directory
+ '/masked_corrected_reflectivity' + combined_name + '.png')
plt.close(fig)
del fig, ax, display
# Creating a plot with reflectivity corrected with attenuation.
display = pyart.graph.RadarMapDisplay(radar)
fig, ax = plt.subplots(1, 1, subplot_kw=dict(projection=ccrs.PlateCarree()),
figsize=[12, 8])
ax.set_aspect('auto')
display.plot_ppi_map('corrected_reflectivity', sweep=sweep,
vmin=0, vmax=40.0, resolution='50m',
title=_generate_title(
radar, 'corrected_reflectivity',
sweep),
cmap=pyart.graph.cm_colorblind.HomeyerRainbow,
min_lat=min_lat, min_lon=min_lon,
max_lat=max_lat, max_lon=max_lon,
lat_lines=lal, lon_lines=lol, ax=ax,
projection=ccrs.PlateCarree())
if dd_lobes:
ax.contour(grid_lon, grid_lat, bca,
levels=[np.pi/6, 5*np.pi/6], linewidths=2,
colors='k')
fig.savefig(
image_directory
+ '/corrected_reflectivity' + combined_name + '.png')
plt.close(fig)
del fig, ax, display
# Creating a plot with differential phase.
phase_field = field_config['input_phidp_field']
display = pyart.graph.RadarMapDisplay(radar)
fig, ax = plt.subplots(1, 1, subplot_kw=dict(projection=ccrs.PlateCarree()),
figsize=[12, 8])
ax.set_aspect('auto')
display.plot_ppi_map(phase_field, sweep=sweep,
resolution='50m', ax=ax,
min_lat=min_lat, min_lon=min_lon,
max_lat=max_lat, max_lon=max_lon,
lat_lines=lal, lon_lines=lol,
projection=ccrs.PlateCarree())
fig.savefig(
image_directory
+ '/differential_phase' + combined_name + '.png')
plt.close(fig)
del fig, ax, display
# Creating a plot of specific attenuation.
display = pyart.graph.RadarMapDisplay(radar)
fig, ax = plt.subplots(1, 1, subplot_kw=dict(projection=ccrs.PlateCarree()),
figsize=[12, 8])
ax.set_aspect('auto')
display.plot_ppi_map('specific_attenuation', sweep=sweep, vmin=0,
vmax=1.0, resolution='50m', ax=ax,
min_lat=min_lat, min_lon=min_lon,
max_lat=max_lat, max_lon=max_lon,
lat_lines=lal, lon_lines=lol,
projection=ccrs.PlateCarree())
if dd_lobes:
ax.contour(grid_lon, grid_lat, bca,
levels=[np.pi/6, 5*np.pi/6], linewidths=2,
colors='k')
fig.savefig(
image_directory
+ '/specific_attenuation' + combined_name + '.png')
plt.close(fig)
del fig, ax, display
# Creating a plot of corrected differential phase.
display = pyart.graph.RadarMapDisplay(radar)
fig, ax = plt.subplots(1, 1, subplot_kw=dict(projection=ccrs.PlateCarree()),
figsize=[12, 8])
ax.set_aspect('auto')
display.plot_ppi_map('corrected_differential_phase', sweep=sweep,
title=_generate_title(
radar, 'corrected_differential_phase',
sweep), ax=ax,
resolution='50m', min_lat=min_lat,
min_lon=min_lon, max_lat=max_lat, max_lon=max_lon,
lat_lines=lal, lon_lines=lol,
projection=ccrs.PlateCarree())
if dd_lobes:
ax.contour(grid_lon, grid_lat, bca,
levels=[np.pi/6, 5*np.pi/6], linewidths=2,
colors='k')
fig.savefig(
image_directory
+ '/corrected_differential_phase' + combined_name + '.png')
plt.close(fig)
del fig, ax, display
# Creating a plot of corrected specific differential phase.
display = pyart.graph.RadarMapDisplay(radar)
fig, ax = plt.subplots(1, 1, subplot_kw=dict(projection=ccrs.PlateCarree()),
figsize=[12, 8])
ax.set_aspect('auto')
display.plot_ppi_map('corrected_specific_diff_phase', sweep=sweep,
vmin=0, vmax=6, resolution='50m',
title=_generate_title(
radar, 'corrected_specific_diff_phase',
sweep), ax=ax,
min_lat=min_lat, min_lon=min_lon, max_lat=max_lat,
max_lon=max_lon, lat_lines=lal, lon_lines=lol,
projection=ccrs.PlateCarree())
if dd_lobes:
ax.contour(grid_lon, grid_lat, bca,
levels=[np.pi/6, 5*np.pi/6], linewidths=2,
colors='k')
fig.savefig(
image_directory
+ '/corrected_specific_diff_phase' + combined_name + '.png')
plt.close(fig)
del fig, ax, display
# Creating a plot with region dealias corrected velocity.
display = pyart.graph.RadarMapDisplay(radar)
fig, ax = plt.subplots(1, 1, subplot_kw=dict(projection=ccrs.PlateCarree()),
figsize=[12, 8])
ax.set_aspect('auto')
display.plot_ppi_map('corrected_velocity', sweep=sweep, resolution='50m',
cmap=pyart.graph.cm.NWSVel, vmin=-30, ax=ax,
vmax=30, min_lat=min_lat, min_lon=min_lon,
max_lat=max_lat, max_lon=max_lon, lat_lines=lal,
lon_lines=lol, projection=ccrs.PlateCarree())
if dd_lobes:
ax.contour(grid_lon, grid_lat, bca,
levels=[np.pi/6, 5*np.pi/6], linewidths=2,
colors='k')
fig.savefig(
image_directory
+ '/corrected_velocity' + combined_name + '.png')
plt.close(fig)
del fig, ax, display
# Creating a plot of rain rate A
display = pyart.graph.RadarMapDisplay(radar)
fig, ax = plt.subplots(1, 1, subplot_kw=dict(projection=ccrs.PlateCarree()),
figsize=[12, 8])
ax.set_aspect('auto')
display.plot_ppi_map('rain_rate_A', sweep=sweep, resolution='50m',
vmin=0, vmax=120, min_lat=min_lat, min_lon=min_lon,
max_lat=max_lat, ax=ax, max_lon=max_lon, lat_lines=lal,
lon_lines=lol, projection=ccrs.PlateCarree())
if dd_lobes:
ax.contour(grid_lon, grid_lat, bca,
levels=[np.pi/6, 5*np.pi/6], linewidths=2,
colors='k')
fig.savefig(
image_directory
+ '/rain_rate_A' + combined_name + '.png')
plt.close(fig)
del fig, ax, display
# Creating a plot of filtered corrected differential phase.
display = pyart.graph.RadarMapDisplay(radar)
fig, ax = plt.subplots(1, 1, subplot_kw=dict(projection=ccrs.PlateCarree()),
figsize=[12, 8])
ax.set_aspect('auto')
display.plot_ppi_map('filtered_corrected_differential_phase', sweep=sweep,
title=_generate_title(
radar, 'filtered_corrected_differential_phase',
sweep),
resolution='50m', min_lat=min_lat, ax=ax,
min_lon=min_lon, max_lat=max_lat, max_lon=max_lon,
lat_lines=lal, lon_lines=lol,
cmap=pyart.graph.cm.Theodore16,
projection=ccrs.PlateCarree())
if dd_lobes:
ax.contour(grid_lon, grid_lat, bca,
levels=[np.pi/6, 5*np.pi/6], linewidths=2,
colors='k')
fig.savefig(
image_directory
+ '/filtered_corrected_differential_phase' + combined_name + '.png')
plt.close(fig)
del fig, ax, display
# Creating a plot of filtered corrected specific differential phase.
display = pyart.graph.RadarMapDisplay(radar)
fig, ax = plt.subplots(1, 1, subplot_kw=dict(projection=ccrs.PlateCarree()),
figsize=[12, 8])
ax.set_aspect('auto')
display.plot_ppi_map('filtered_corrected_specific_diff_phase', sweep=sweep,
title=_generate_title(
radar, 'filtered_corrected_specific_diff_phase',
sweep), ax=ax,
resolution='50m', min_lat=min_lat,
min_lon=min_lon, max_lat=max_lat, max_lon=max_lon,
lat_lines=lal, lon_lines=lol,
cmap=pyart.graph.cm.Theodore16,
projection=ccrs.PlateCarree())
if dd_lobes:
ax.contour(grid_lon, grid_lat, bca,
levels=[np.pi/6, 5*np.pi/6], linewidths=2,
colors='k')
fig.savefig(
image_directory
+ '/filtered_corrected_specific_diff_phase' + combined_name + '.png')
plt.close(fig)
del fig, ax, display
# Creating a plot of corrected differential phase.
display = pyart.graph.RadarMapDisplay(radar)
fig, ax = plt.subplots(1, 1, subplot_kw=dict(projection=ccrs.PlateCarree()),
figsize=[12, 8])
ax.set_aspect('auto')
display.plot_ppi_map('specific_differential_attenuation', sweep=sweep,
title=_generate_title(
radar, 'specific_differential_attenuation',
sweep), ax=ax,
resolution='50m', min_lat=min_lat,
min_lon=min_lon, max_lat=max_lat, max_lon=max_lon,
lat_lines=lal, lon_lines=lol, gatefilter=cmac_gates,
projection=ccrs.PlateCarree())
if dd_lobes:
ax.contour(grid_lon, grid_lat, bca,
levels=[np.pi/6, 5*np.pi/6], linewidths=2,
colors='k')
fig.savefig(
image_directory
+ '/specific_differential_attenuation' + combined_name + '.png')
plt.close(fig)
del fig, ax, display
# Creating a plot of corrected differential phase.
display = pyart.graph.RadarMapDisplay(radar)
fig, ax = plt.subplots(1, 1, subplot_kw=dict(projection=ccrs.PlateCarree()),
figsize=[12, 8])
ax.set_aspect('auto')
display.plot_ppi_map('path_integrated_differential_attenuation',
sweep=sweep,
title=_generate_title(
radar, 'path_integrated_differential_attenuation',
sweep), ax=ax,
resolution='50m', min_lat=min_lat,
min_lon=min_lon, max_lat=max_lat, max_lon=max_lon,
lat_lines=lal, lon_lines=lol, gatefilter=cmac_gates,
projection=ccrs.PlateCarree())
if dd_lobes:
ax.contour(grid_lon, grid_lat, bca,
levels=[np.pi/6, 5*np.pi/6], linewidths=2,
colors='k')
fig.savefig(
image_directory
+ '/path_integrated_differential_attenuation' + combined_name + '.png')
plt.close(fig)
del fig, ax, display
# Creating a plot of corrected differential phase.
display = pyart.graph.RadarMapDisplay(radar)
fig, ax = plt.subplots(1, 1, subplot_kw=dict(projection=ccrs.PlateCarree()),
figsize=[12, 8])
ax.set_aspect('auto')
display.plot_ppi_map('corrected_differential_reflectivity', sweep=sweep,
title=_generate_title(
radar, 'corrected_differential_reflectivity',
sweep), ax=ax,
resolution='50m', min_lat=min_lat,
min_lon=min_lon, max_lat=max_lat, max_lon=max_lon,
lat_lines=lal, lon_lines=lol, gatefilter=cmac_gates,
projection=ccrs.PlateCarree())
if dd_lobes:
ax.contour(grid_lon, grid_lat, bca,
levels=[np.pi/6, 5*np.pi/6], linewidths=2,
colors='k')
fig.savefig(
image_directory
+ '/corrected_differential_reflectivity' + combined_name + '.png')
plt.close(fig)
del fig, ax, display
# Creating a plot with reflectivity corrected with attenuation.
display = pyart.graph.RadarMapDisplay(radar)
fig, ax = plt.subplots(1, 1, subplot_kw=dict(projection=ccrs.PlateCarree()),
figsize=[12, 8])
ax.set_aspect('auto')
display.plot_ppi_map('normalized_coherent_power', sweep=sweep,
resolution='50m',
title=_generate_title(
radar, 'normalized_coherent_power',
sweep),
min_lat=min_lat, min_lon=min_lon,
max_lat=max_lat, max_lon=max_lon,
lat_lines=lal, lon_lines=lol, ax=ax,
projection=ccrs.PlateCarree())
if dd_lobes:
ax.contour(grid_lon, grid_lat, bca,
levels=[np.pi/6, 5*np.pi/6], linewidths=2,
colors='k')
fig.savefig(
image_directory
+ '/normalized_coherent_power' + combined_name + '.png')
plt.close(fig)
del fig, ax, display
# Creating a plot with reflectivity corrected with attenuation.
display = pyart.graph.RadarMapDisplay(radar)
fig, ax = plt.subplots(1, 1, subplot_kw=dict(projection=ccrs.PlateCarree()),
figsize=[12, 8])
ax.set_aspect('auto')
display.plot_ppi_map('signal_to_noise_ratio', sweep=sweep,
resolution='50m',
title=_generate_title(
radar, 'signal_to_noise_ratio',
sweep),
min_lat=min_lat, min_lon=min_lon,
max_lat=max_lat, max_lon=max_lon,
lat_lines=lal, lon_lines=lol, ax=ax,
projection=ccrs.PlateCarree())
if dd_lobes:
ax.contour(grid_lon, grid_lat, bca,
levels=[np.pi/6, 5*np.pi/6], linewidths=2,
colors='k')
fig.savefig(
image_directory
+ '/signal_to_noise_ratio' + combined_name + '.png')
plt.close(fig)
del fig, ax, display
def _generate_title(radar, field, sweep):
""" Generates a title for each plot. """
time_str = generate_radar_time_begin(radar).isoformat() + 'Z'
fixed_angle = radar.fixed_angle['data'][sweep]
line_one = "%s %.1f Deg. %s " % (generate_radar_name(radar), fixed_angle,
time_str)
field_name = str(field)
field_name = field_name.replace('_', ' ')
field_name = field_name[0].upper() + field_name[1:]
return line_one + '\n' + field_name
def _get_bca(rad1_lon, rad1_lat, rad2_lon, rad2_lat,
grid_lon, grid_lat):
# Beam crossing angle needs cartesian coordinate.
p = ccrs.PlateCarree()
p = p.as_geocentric()
rad1 = p.transform_points(ccrs.PlateCarree().as_geodetic(),
np.array(rad1_lon),
np.array(rad1_lat))
rad2 = p.transform_points(ccrs.PlateCarree().as_geodetic(),
np.array(rad2_lon),
np.array(rad2_lat))
grid_lon, grid_lat = np.meshgrid(grid_lon, grid_lat)
grid = p.transform_points(ccrs.PlateCarree().as_geodetic(),
grid_lon, grid_lat,
np.zeros(grid_lon.shape))
# Create grid with Radar 1 in center.
x = grid[:, :, 0] - rad1[0, 0]
y = grid[:, :, 1] - rad1[0, 1]
rad2 = rad2 - rad1
a = np.sqrt(np.multiply(x, x) + np.multiply(y, y))
b = np.sqrt(pow(x - rad2[0, 0], 2) + pow(y - rad2[0, 1], 2))
c = np.sqrt(rad2[0, 0] * rad2[0, 0] + rad2[0, 1] * rad2[0, 1])
theta_1 = | np.arccos(x/a) | numpy.arccos |
import numpy as np
from collections import namedtuple
from sklearn.utils.extmath import cartesian
from scipy.stats import mode
from scipy.stats import itemfreq
from attrdict import AttrDict
import pdb
def max_product_update_var(state, messages, sender_id, recipient_id):
variable_index = sender_id[1:]
factor_index = recipient_id[1:]
outgoing_message = MaxProductVariableNode(sender_id,
messages).update_edge_message(recipient_id)
return outgoing_message
def max_product_update_fac(state, messages, sender_id, recipient_id):
variable_index = sender_id[1:]
factor_index = recipient_id[1:]
outgoing_message = MaxProductFactorNode(sender_id,
messages).update_edge_message(recipient_id)
return outgoing_message
class MaxProductNode():
def __init__(self, node_id, incoming_messages):
self.node_id = node_id
self.incoming_messages = [AttrDict({'message':
np.array([1-neighbor_message, neighbor_message]), 'variable_cost': 1,
'node_id': neighbor_id}) for neighbor_id, neighbor_message in
incoming_messages.items()]
class MaxProductVariableNode(MaxProductNode):
def __init__(self, variable_id, incoming_messages):
MaxProductNode.__init__(self, variable_id, incoming_messages)
def update_edge_message(self, neighbor_to_update):
updated_edges = self.update_edges()
return [edge.message for edge in updated_edges if edge.node_id ==
neighbor_to_update][0][1]
def update_edges(self):
edges = self.incoming_messages
node_state = self.__node_state_from_edges(edges)
new_edges = self.__edges_from_node_state(node_state, edges)
return new_edges
def update_edge_marginals(self, edges):
marginal = self.__marginals_from_edges(edges)
edges_with_marginals = self.__edges_from_marginals(marginal, edges)
return edges_with_marginals
def __node_state_from_edges(self, edges):
variable_cost_mean = edges[0].variable_cost
variable_cost = variable_cost_mean#np.sign(variable_cost_mean)*np.random.exponential(np.abs(variable_cost_mean))
message_product = np.array([1, np.exp(-1*variable_cost)])*self.__compute_message_product(edges)
return self.__normalize_message(message_product)
def __edges_from_node_state(self, node_state, edges):
return [self.__compute_new_neighbor_message(node_state, edge) for edge in edges]
def __marginals_from_edges(self, edges):
unnormalized_marginal = self.__node_state_from_edges(edges)
marginal = self.__normalize_message(unnormalized_marginal)
return marginal
def __edges_from_marginals(self, marginal, edges):
[setattr(edge, 'message', marginal) for edge in edges]
return edges
# Helper Methods
def __compute_message_product(self, edges):
edge_array = np.array([edge.message for edge in edges])
message_product = np.prod(edge_array, axis=0)
return message_product
def __compute_new_neighbor_message(self, message_product, edge):
new_edge_message = \
self.__normalize_message(np.nan_to_num(message_product/edge.message))
edge.message = new_edge_message
return edge
def __normalize_message(self, message):
noise = 1#np.array([0,1])*np.exp(np.random.normal())
return message/float(message.sum()) if message.sum() > 0 else np.array([0.5, 0.5])*noise
class MaxProductFactorNode():
def __init__(self, factor_id, incoming_messages):
MaxProductNode.__init__(self, factor_id, incoming_messages)
if '4' in factor_id:
self.incoming_messages = [AttrDict({
'message': np.array([1-neighbor_message, neighbor_message]),
'variable_cost': 1,
'node_id': neighbor_id,
'id': factor_id,
'decimation_status': 0,
'factor_function': (np.ones(len(incoming_messages)), np.array([1,1]))}) for neighbor_id, neighbor_message in
incoming_messages.items()]
else:
self.incoming_messages = [AttrDict({
'message': | np.array([1-neighbor_message, neighbor_message]) | numpy.array |
"""This module contains simple helper functions """
from __future__ import print_function
import torch
import numpy as np
import os
import cv2
from ffwm.util.flow_util import flow2img
from os.path import basename
def tensor2im(input_image, idx=0, imtype=np.uint8):
if not isinstance(input_image, np.ndarray):
if isinstance(input_image, torch.Tensor): # get the data from a variable
image_tensor = input_image.data
else:
return input_image
image_numpy = image_tensor[idx].cpu().float().numpy() # convert it into a numpy array
if image_numpy.shape[0] == 1: # grayscale to RGB
image_numpy = np.tile((image_numpy - 0.5) * 2, (3, 1, 1))
image_numpy = (np.transpose(image_numpy, (1, 2, 0))) * 255.0
else: # if it is a numpy array, do nothing
image_numpy = input_image
return image_numpy.astype(imtype)
def tensor2flow(flow, idx=0, imtype=np.uint8, max=255):
### transform the flow from [-1, 1] which represent the sample location
# to [-h, h] which represent the pixel motion
B, _, H, W = flow.size()
xx = torch.arange(0, W).view(1, -1).repeat(H, 1)
yy = torch.arange(0, H).view(-1, 1).repeat(1, W)
xx = xx.view(1, 1, H, W).repeat(B, 1, 1, 1)
yy = yy.view(1, 1, H, W).repeat(B, 1, 1, 1)
base_grid = torch.cat((yy, xx), 1).float().type_as(flow)
flow_grid = torch.clamp((flow + 1) * (H / 2), 0, H - 1)
flow_grid = torch.cat((flow_grid[:, 1:2, :, :], flow_grid[:, 0:1, :, :]), 1)
flow = flow_grid - base_grid
image_numpy = flow.data[idx].cpu().float().numpy()
image_numpy = flow2img(np.transpose(image_numpy, (1, 2, 0)))
return image_numpy.astype(imtype)
def tensor2mask(input_image, idx=0, imtype=np.uint8):
if not isinstance(input_image, np.ndarray):
if isinstance(input_image, torch.Tensor): # get the data from a variable
image_tensor = input_image.data
else:
return input_image
image_numpy = image_tensor[idx].cpu().float().numpy() # convert it into a numpy array
if image_numpy.shape[0] == 1: # grayscale to RGB
image_numpy = np.tile(image_numpy, (3, 1, 1))
image_numpy = (np.transpose(image_numpy, (1, 2, 0))) * 255.0 # post-processing: tranpose and scaling
else: # if it is a numpy array, do nothing
image_numpy = input_image
return image_numpy.astype(imtype)
def tensor2att(input_image, idx=0, imtype=np.uint8):
if not isinstance(input_image, np.ndarray):
if isinstance(input_image, torch.Tensor): # get the data from a variable
image_tensor = input_image.data
else:
return input_image
image_numpy = image_tensor[idx].cpu().float().numpy() # convert it into a numpy array
image_numpy = (np.transpose(image_numpy, (1, 2, 0))) * 255.0 # post-processing: tranpose and scaling
else: # if it is a numpy array, do nothing
image_numpy = input_image
image_numpy = cv2.applyColorMap(image_numpy[:, :, 0].astype('uint8'), cv2.COLORMAP_JET)[:,:,::-1]
return image_numpy.astype(imtype)
def diagnose_network(net, name='network'):
"""Calculate and print the mean of average absolute(gradients)
Parameters:
net (torch network) -- Torch network
name (str) -- the name of the network
"""
mean = 0.0
count = 0
for param in net.parameters():
if param.grad is not None:
mean += torch.mean(torch.abs(param.grad.data))
count += 1
if count > 0:
mean = mean / count
print(name)
print(mean)
def save_image(image_numpy, image_path):
"""Save a numpy image to the disk
Parameters:
image_numpy (numpy array) -- input numpy array
image_path (str) -- the path of the image
"""
cv2.imwrite(image_path, cv2.cvtColor(image_numpy, cv2.COLOR_RGB2BGR))
# image_pil = Image.fromarray(image_numpy)
# image_pil.save(image_path)
def print_numpy(x, val=True, shp=False):
"""Print the mean, min, max, median, std, and size of a numpy array
Parameters:
val (bool) -- if print the values of the numpy array
shp (bool) -- if print the shape of the numpy array
"""
x = x.astype(np.float64)
if shp:
print('shape,', x.shape)
if val:
x = x.flatten()
print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % (
np.mean(x), np.min(x), np.max(x), | np.median(x) | numpy.median |
import tensorflow as tf
import numpy as np
import tensorflow.contrib.slim as slim
from utils.layer_utils import resnet101_body, resnet101_head
from utils.common_utils import assign_targets_oneimg, augmentation, decode, nms, eval_OneImg
IMAGE_SHAPE = [224, 224]
class resnet101(object):
def __init__(self, class_num, anchors_num, aspect_ratios=(2.0, 1.0, 0.5),
min_level=3, max_level=7, scales_per_octave=3, batch_norm_decay=0.999, reg_loss_weight=50.0):
self.class_num = class_num
self.anchors_num = anchors_num
self.aspect_ratios = aspect_ratios
self.min_level = min_level
self.max_level = max_level
self.scales_per_octave = scales_per_octave
self.batch_norm_decay = batch_norm_decay
self.reg_loss_weight = reg_loss_weight
def forward(self, inputs, is_training=False, reuse=False):
"""
The Inference of the retinaNet
Args:
The images [batch_size, H, W, C]
Returns:
Feature_maps, class_pred, box_pred. feature_maps is a list and class_pred is [batch_size, anchors, num_class+1]
box_pred is [batch_size, anchors, 4]
"""
# The input img_size, form: [height, weight]
self.img_size = inputs.get_shape().as_list()[1:3]
'''
method1: resnet101, designed by myself, the problem is that it is not finetuning
'''
# Set the batch norm params
batch_norm_param = {
'decay': self.batch_norm_decay,
'epsilon': 1e-5,
'scale': True,
'is_training': is_training,
'fused': None,
}
with slim.arg_scope([slim.conv2d, slim.batch_norm], reuse=reuse):
with slim.arg_scope([slim.conv2d],
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_param,
weights_initializer=tf.random_normal_initializer(stddev=0.01),
biases_initializer=tf.zeros_initializer(),
activation_fn=tf.nn.relu):
with tf.variable_scope('resnet_body'):
layer1, layer2, layer3 = resnet101_body(inputs)
with tf.variable_scope('resnet_head'):
with slim.arg_scope([slim.conv2d],
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_param,
activation_fn=None,
weights_initializer=tf.random_normal_initializer(stddev=0.01),
weights_regularizer=slim.l2_regularizer(0.0001),
biases_initializer=tf.zeros_initializer()):
feature_maps, class_pred, box_pred = resnet101_head(layer1, layer2, layer3, self.class_num, self.anchors_num)
return feature_maps, class_pred, box_pred
def generate_anchors(self, feature_maps):
"""
To generate the anchors
Args:
[P3, P4, P5, P6, P7]
Returns:
The anchors [N, 9, 4] and the structure is [ymin, xmin, ymax, xmax]
"""
anchors_list = []
for i, feature_map in enumerate(feature_maps):
level = i + 3
base_size = [2 ** level * 4, 2 ** level * 4]
stride = [2 ** level, 2 ** level]
grid_size = feature_map.get_shape().as_list()[1:3]
# W / H = octave_scale
octave_scale = [2 ** (float(scale) / self.scales_per_octave) for scale in range(self.scales_per_octave)]
# Use np.arrary not for to create the anchor height and width, considering the speed
octave_grid, ratio_grid = np.meshgrid(octave_scale, self.aspect_ratios)
octave_grid = np.reshape(octave_grid, -1)
ratio_grid = np.reshape(ratio_grid, -1)
anchors_height = base_size[0] * octave_grid / np.sqrt(ratio_grid)
anchors_width = base_size[1] * octave_grid * np.sqrt(ratio_grid)
# Get a grid of box centers
grid_x = np.arange(0, grid_size[1], 1, np.float32)
grid_y = np.arange(0, grid_size[0], 1, np.float32)
grid_x, grid_y = np.meshgrid(grid_x, grid_y)
# # If img_size % stride == 0, give the offset of 0.5(P3, P4, P5), else give the offset of 0(P6, P7)
# if (level < 6):
# x_centers = (grid_x + 0.5) * stride[1]
# y_centers = (grid_y + 0.5) * stride[0]
# else:
# x_centers = grid_x * stride[1]
# y_centers = grid_y * stride[0]
x_centers = (grid_x + 0.5) * stride[1]
y_centers = (grid_y + 0.5) * stride[0]
# Normalized
x_centers, anchors_width = x_centers / self.img_size[1], anchors_width / self.img_size[1]
y_centers, anchors_height = y_centers / self.img_size[0], anchors_height / self.img_size[0]
# Concat the x,y,h,w
anchors_width, x_centers = np.meshgrid(anchors_width, x_centers)
anchors_height, y_centers = np.meshgrid(anchors_height, y_centers)
anchors = np.stack([x_centers, y_centers, anchors_width, anchors_height], axis=-1)
ymin = anchors[:, :, 1] - 0.5 * anchors[:, :, 3]
xmin = anchors[:, :, 0] - 0.5 * anchors[:, :, 2]
ymax = anchors[:, :, 1] + 0.5 * anchors[:, :, 3]
xmax = anchors[:, :, 0] + 0.5 * anchors[:, :, 2]
anchors = | np.stack([ymin, xmin, ymax, xmax], axis=-1) | numpy.stack |
# Copyright 2018-2019 The <NAME> at the California Institute of
# Technology (Caltech), with support from the Paul Allen Family Foundation,
# Google, & National Institutes of Health (NIH) under Grant U24CA224309-01.
# All rights reserved.
#
# Licensed under a modified Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.github.com/vanvalenlab/Caliban/LICENSE
#
# The Work provided may be used for non-commercial academic purposes only.
# For any other use of the Work, including commercial use, please contact:
# <EMAIL>
#
# Neither the name of Caltech nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tracking data utilities that allow for upload, modification, and output."""
from io import BytesIO
import json
import numpy as np
import os
import pathlib
import tarfile
import tempfile
import re
def sorted_nicely(l):
convert = lambda text: int(text) if text.isdigit() else text
alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]
return sorted(l, key=alphanum_key)
def generate_lineage(tracked, daughters):
"""
generates dictionary equivalent to `lineage.json` in .trk files.
these WILL be missing `capped` and `frame_div`, since there is no way
to always correctly infer this information.
"""
lineage = {}
# fill in `label` & `frames`
for frame in range(tracked.shape[0]):
X = tracked[frame]
for cell in map(int, | np.unique(X) | numpy.unique |
""" Prepare AAOmega data for The Cannon """
import glob
import numpy as np
from astropy.table import Table
from astropy import stats
import pyfits
DATA_DIR = "/Users/annaho/Data/AAOmega/Run_13_July"
def weighted_std(values, weights):
""" Calculate standard deviation weighted by errors """
average = np.average(values, weights=weights)
variance = np.average((values-average)**2, weights=weights)
return np.sqrt(variance)
def estimate_noise(fluxes, contmask):
""" Estimate the scatter in a region of the spectrum
taken to be continuum """
nstars = fluxes.shape[0]
scatter = np.zeros(nstars)
for i,spec in enumerate(fluxes):
cont = spec[contmask]
scatter[i] = stats.funcs.mad_std(cont)
return scatter
def load_ref_spectra():
""" Pull out wl, flux, ivar from files of training spectra """
data_dir = "/Users/annaho/Data/AAOmega/ref_spectra"
# Load the files & count the number of training objects
ff = glob.glob("%s/*.txt" %data_dir)
nstars = len(ff)
print("We have %s training objects" %nstars)
# Read the first file to get the wavelength array
f = ff[0]
data = Table.read(f, format="ascii.fast_no_header")
wl = data['col1']
npix = len(wl)
print("We have %s pixels" %npix)
tr_flux = np.zeros((nstars,npix))
tr_ivar = np.zeros(tr_flux.shape)
for i,f in enumerate(ff):
data = Table.read(f, format="ascii.fast_no_header")
flux = data['col2']
tr_flux[i,:] = flux
sigma = data['col3']
tr_ivar[i,:] = 1.0 / sigma**2
return np.array(ff), wl, tr_flux, tr_ivar
def test_spectra_initial_cull():
""" cull by radial velocity """
ff_all = glob.glob("testspectra_new/*.fits")
ff = []
for f in ff_all:
print(f)
a = pyfits.open(f)
vel = a[0].header['VHELIO']
a.close()
if np.abs(vel) < 500:
ff.append(f)
nobj = len(ff)
np.savez("test_id.npz", ff)
def test_spectra_no_cull():
""" Pull out test IDs of science spectra """
data_dir = "/Users/annaho/Data/AAOmega/science_spectra"
ff_all = glob.glob("%s/*.asc" %data_dir)
np.savez("test_id.npz", ff_all)
def load_test_spectra():
""" after you've done the initial cull, load the spectra """
data_dir = "/Users/annaho/Data/AAOmega"
ff = np.load("%s/test_id.npz" %data_dir)['arr_0']
nobj = len(ff)
wl = | np.load("%s/wl.npz" %data_dir) | numpy.load |
'''
Author @ <NAME>
Date: 22 Apr 2021
Version : 0.0.1
Github : https://github.com/Nikeshbajaj/spkit
Contact: <EMAIL>
'''
from __future__ import absolute_import, division, print_function
name = "Signal Processing toolkit | CWT"
import sys
if sys.version_info[:2] < (3, 3):
old_print = print
def print(*args, **kwargs):
flush = kwargs.pop('flush', False)
old_print(*args, **kwargs)
if flush:
file = kwargs.get('file', sys.stdout)
# Why might file=None? IDK, but it works for print(i, file=None)
file.flush() if file is not None else sys.stdout.flush()
import numpy as np
import matplotlib.pyplot as plt
from scipy.fftpack import fft, ifft, fftshift, ifftshift
from scipy import fftpack
from scipy.special import factorial as Fac
from scipy.special import erf
from scipy.signal import convolve
from scipy import signal
#----------CWT--------------------------------------
def CTFT2(x,fs=128,nfft=None):
if nfft is None:
X = fftpack.fftn(x)
else:
X = fftpack.fftn(x,[nfft])
N = len(X)
f = (fs/N)*(np.arange(N)-N/2)
return X,f
def iCTFT2(X,fs=128,nfft=None):
if nfft is None:
x = fftpack.ifftn(X)
else:
x = fftpack.ifftn(X,[nfft])
N = len(x)
t = np.arange(N)/fs
return x,t
def CTFT1(x,t,axis=-1):
N = len(t)
Dt = t[1] - t[0]
Df = 1. / (N * Dt)
f = Df * (np.arange(N) - N / 2)
X = Dt * fftshift(fft(x),axes=axis)
return X,f
def iCTFT1(X,f,axis=-1):
N = len(f)
Df = f[1] - f[0]
Dt = 1. / (N * Df)
t = Dt * np.arange(N)
x = ifft(fftshift(X,axes=axis))
return x,t
def CTFT(x,t, axis=-1, method=1,ignoreWar=False):
assert t.ndim == 1
assert x.shape[axis] == t.shape[0]
N = len(t)
if not(ignoreWar):
if N % 2 != 0:
raise ValueError("number of samples must be even")
Dt = t[1] - t[0]
Df = 1. / (N * Dt)
t0 = t[N // 2]
f = Df * (np.arange(N) - N / 2)
shape = np.ones(x.ndim, dtype=int)
shape[axis] = N
phase = np.ones(N)
phase[1::2] = -1
phase = phase.reshape(shape)
if method == 1:
X = Dt * fft(x * phase, axis=axis)
else:
X = Dt * fftshift(fft(x, axis=axis), axes=axis)
X *= phase
X *= np.exp(-2j * np.pi * t0 * f.reshape(shape))
X *= np.exp(-1j * np.pi * N / 2)
return X,f
def iCTFT(X,f, axis=-1, method=1,noshift=False,ignoreWar=False):
assert f.ndim == 1
assert X.shape[axis] == f.shape[0]
N = len(f)
if not(ignoreWar):
if N % 2 != 0:
raise ValueError("number of samples must be even")
f0 = f[0]
Df = f[1] - f[0]
t0 = -0.5 / Df
if noshift:
t0=0
Dt = 1. / (N * Df)
t = t0 + Dt * np.arange(N)
shape = np.ones(X.ndim, dtype=int)
shape[axis] = N
t_calc = t.reshape(shape)
f_calc = f.reshape(shape)
X_prime = X * np.exp(2j * np.pi * t0 * f_calc)
x_prime = ifft(X_prime, axis=axis)
x = N * Df * np.exp(2j * np.pi * f0 * (t_calc - t0)) * x_prime
return x,t
def GaussWave(t,f,f0,Q,t0=0):
a = (f0 * 1. / Q) ** 2
Wt = np.exp(-a * (t - t0) ** 2)* np.exp(2j * np.pi * f0 * (t - t0))
Wf = np.sqrt(np.pi / a)* np.exp(-2j * np.pi * f * t0)* np.exp(-np.pi ** 2 *((f - f0) ** 2)/ a)
return Wt,Wf
def MorlateWave(t,f=1,sig=1):
Ks = np.exp(-0.5*sig**2)
Cs = (1.0 + np.exp(-sig**2) - 2.0*np.exp(-(3.0/4)*sig**2))**(-0.5)
#Cs=1
Wt = Cs*(np.pi**(-0.25))*np.exp(-0.5*(t**2))*(np.exp(1j*sig*t)-Ks)
w=2*np.pi*f
Wf = Cs*np.pi**(-0.25)*(np.exp(-0.5*(sig-w)**2) - Ks*np.exp(-0.5*w**2) )
return Wt,Wf
def GaborWave(t,f=1,t0=0,f0=1,a=0.1):
Wt = np.exp(-((t-t0)**2)/(a**2))*np.exp(-1j*f0*(t-t0))
Wf = np.exp(-((f-f0)*a)**2)*np.exp(-1j*t0*(f-f0))
return Wt,Wf
def PoissonWave(t,f=1,n=1,method=1):
n = n + np.zeros_like(n)
if method==1:
tx = t.copy()
ind = np.where(t<0)[0]
tx[ind]=0
if len(np.asarray(n))==1:
Wt = ((tx-n)/Fac(n))*tx**(n-1)*np.exp(-tx)
Wt[:,ind] =0
print(Wt.shape)
else:
n =np.asarray(n)
Wt = ((tx-n[:,None])/Fac(n[:,None]))*tx**(n[:,None]-1)*np.exp(-tx)
print(Wt.shape)
Wt[:,ind] =0
#Wt[ind]=0
w = 2*np.pi*f
Wf = -1j*w/(1+1j*w)**(n[:,None]+1)
elif method==2:
Wt = (1.0/np.pi)*(1-t**2)/(1+t**2)**2
w = 2*np.pi*f
Wf = abs(w)*np.exp(-abs(w))
elif method==3:
Wt = (1.0/(2*np.pi))*(1-1j*t)**(-n[:,None]-1)
w = 2*np.pi*f
uw = 1.0*(w>=0)
Wf = (1.0/Fac(n[:,None]))*(w**n[:,None])*np.exp(-w)*uw
return Wt,Wf
def cMaxicanHatWave(t,f):
Wt = (2.0/np.sqrt(3))*(np.pi**(-0.25))*(
np.sqrt(np.pi)*(1.0-t**2)*np.exp(-0.5*(t**2))
-( np.sqrt(2)*1j*t + np.sqrt(np.pi)*erf(1j*t/np.sqrt(2))*(1-t**2)*np.exp(-0.5*t**2))
)
w1 = 2*np.pi*f
w = 1.0*(w1>=0)*w1
#w=w1
Wf = 2*np.sqrt(2.0/3)*np.pi**(-0.25)*(w**2)*np.exp(-0.5*(w**2))
#w0 = 2*np.pi*f0
#w0 = w0 + np.zeros_like(f0)
#Wf = 2*np.sqrt(2.0/3)*np.pi**(-0.25)*(w0*w**2)*np.exp(-0.25*(w0*w**2))
#Wf = 2*np.sqrt(2.0/3)*np.pi**(-0.25)*((w-w0[:, None])**2)*np.exp(-0.25*((w-w0[:, None])**2))
#Wf = 2*np.sqrt(2.0/3)*np.pi**(-0.25)*((w+w0[:, None])**2)*np.exp(-0.25*((w+w0[:, None])**2))
#Wf = 0.5*Wf1+0.0*Wf2
return Wt,Wf
def cMaxicanHatWaveV1(t,f,f0=0,a=1):
Wt = (2.0/np.sqrt(3))*(np.pi**(-0.25))*(
np.sqrt(np.pi)*(1.0-t**2)*np.exp(-0.5*(t**2))
-( np.sqrt(2)*1j*t + np.sqrt(np.pi)*erf(1j*t/np.sqrt(2))*(1-t**2)*np.exp(-0.5*t**2))
)*np.exp(-2*np.pi*1j*f0*t)
w1 = 2*np.pi*(f-f0)
w = 1.0*(w1>=0)*w1
#w=w1
Wf = 2*np.sqrt(2.0/3)*np.pi**(-0.25)*(w**2)*np.exp(-0.5*a*(w**2))
return Wt,Wf
def ShannonWave(t,f):
Wt = np.sinc(t/2)*np.cos(3*np.pi*t/2)
w =2*np.pi*f
v1 = (w-3*np.pi/2.0)/np.pi
v2 = (w+3*np.pi/2.0)/np.pi
Wf = 1.0*(abs(v1)<=0.5) + 1.0*(abs(v2)<=0.5)
return Wt,Wf
def ShannonWaveV1(t,f,f0=3.0/4):
w = 2*np.pi*f
w0 = 2*np.pi*f0
Wt = np.sinc(t/2)*np.cos(w0*t)
v1 = (w-w0)/np.pi
v2 = (w+w0)/np.pi
Wf = 1.0*(abs(v1)<=0.5) + 1.0*(abs(v2)<=0.5)
return Wt,Wf
def ShannonWaveV2(t,f,f0=3.0/4,fw=0.5):
w = 2*np.pi*f
w0 = 2*np.pi*f0
Wt = np.sinc(t/2)*np.exp(-2*np.pi*f0*1j*t)
v1 = (w-w0)/np.pi
v2 = (w+w0)/np.pi
Wf = 1.0*(abs(v1)<=fw) + 0.0*(abs(v2)<=fw)
return Wt,Wf
def WavePSD(x,t,wType='Gauss',PlotW=False,PlotPSD =True,dFFT=False,nFFT=False,reshape=True,**Parameters):
'''
1. For Gauss : f0 =Array, Q=float, t0=float=0, f=Freq Range
2. For Morlet : sig=Array, f=Freq Range
3. For Gabor : f0 =Array, a=float, t0=float=0, f=Freq Range
4. For Poisson: n =Array, f=Freq Range
5. For Complex MaxicanHat, f0=freq shift f=Freq Range
6. For Complex Shannon , f0=freq shift,fw=BandWidth f=Freq Range
'''
N = len(x)
if dFFT:
X,f = CTFT1(x,t)
else:
X,f = CTFT(x,t)
if nFFT:
nfft = 2*N -1
X,f = CTFT2(x,fs=128,nfft=nfft)
#N = len(t)
t1 = t-t[N//2]
#t1=t
f1 = Parameters['f'] if ('f' in Parameters.keys() and Parameters['f'] is not None) else f
#----------Gauss Wavelet------------
if wType =='Gauss':
Q = Parameters['Q']
f0 = Parameters['f0']
t0 = Parameters['t0'] if 't0' in Parameters.keys() else 0
f1 = Parameters['f'] if ('f' in Parameters.keys() and Parameters['f'] is not None) else f
Wt,Wf = GaussWave(t=t1,f=f1,t0=t0,f0=f0[:,None],Q=Q)
S=f0
#----------Morlet Wavelet------------
elif wType =='Morlet':
sig = Parameters['sig']
f1 = Parameters['f'] if ('f' in Parameters.keys() and Parameters['f'] is not None) else f
t0 = Parameters['t0'] if 't0' in Parameters.keys() else 0
#t2 = t-t[len(t)//2]
t2 = t-t0
Wt,Wf = MorlateWave(t=t1,f=f1,sig=sig[:,None])
S=sig
#----------Gabor Wavelet------------
elif wType =='Gabor':
a = Parameters['a']
f0 = Parameters['f0']
t0 = Parameters['t0'] if 't0' in Parameters.keys() else 0
f1 = Parameters['f'] if ('f' in Parameters.keys() and Parameters['f'] is not None) else f
Wt,Wf =GaborWave(t=t1,f=f1,f0=f0[:,None],a=a,t0=t0)
S=f0
#----------Poisson Wavelet------------
elif wType=='Poisson':
method = Parameters['method']
n = Parameters['n']
f1 = Parameters['f'] if ('f' in Parameters.keys() and Parameters['f'] is not None) else f
Wt,Wf = PoissonWave(t=t1,f=f1,n=n,method=method)
S=n
elif wType=='cMaxican':
f0 = Parameters['f0'] if ('f0' in Parameters.keys() and Parameters['f0'] is not None) else np.arange(5)[:,None]
a = Parameters['a'] if 'a' in Parameters.keys() else 1.0
print(a)
Wt,Wf = cMaxicanHatWaveV1(t=t1,f=f1,f0=f0,a=a)
S = f0
elif wType=='cShannon':
f0 = Parameters['f0'] if ('f0' in Parameters.keys() and Parameters['f0'] is not None) else 0.1*np.arange(10)[:,None]
fw = Parameters['fw'] if 'fw' in Parameters.keys() else 0.5
Wt,Wf = ShannonWaveV2(t=t1,f=f1,f0=f0,fw=fw)
S = f0
else:
raise ValueError('Wavelet type was not recognized.')
print('Wavelet type was not recognized')
if nFFT:
#XWf = X*np.conj(fftshift(Wf,axes=-1))
#XW,ty = iCTFT2(XWf,fs=128,nfft=None)
#XW = XW[:,:x.shape[0]]
Wf1 = fftshift(Wf,axes=-1)
xw = ifft(X*np.conj(Wf1))
if reshape:
XW = xw[:,:x.shape[0]]
else:
XW = xw
else:
if dFFT:
#XW,ty = iCTFT1(X*np.conj(Wf),f)
XW1 = X*np.conj(Wf)
print(XW1.shape)
XW = fftpack.ifftn(XW1,shape=[2*X.shape[0]-1],axes=[-1])
if reshape:
XW = XW[:,:x.shape[0]]
ty = np.arange(XW.shape[1])/128.0
else:
XW,ty = iCTFT(X*np.conj(Wf),f)
if PlotW:
plt.figure(figsize=(13,6))
plt.subplot(221)
plt.plot(t,Wt.T.real)
plt.plot(t,Wt.T.imag)
plt.xlim([t[0],t[-1]])
plt.subplot(222)
plt.plot(f,abs(Wf).T)
plt.xlim([f[0],f[-1]])
plt.subplot(224)
plt.plot(f,np.angle(Wf).T)
plt.xlim([f[0],f[-1]])
plt.show()
if PlotPSD:
plt.figure(figsize=(13,6))
plt.subplot(211)
plt.plot(t,x)
plt.xlim([t[0],t[-1]])
plt.subplot(212)
plt.imshow(abs(XW),aspect='auto',origin ='lower', cmap=plt.cm.jet, extent=[t[0], t[-1], S[0], S[-1]],interpolation='sinc' )
#plt.subplot(313)
#plt.imshow(np.angle(XW),aspect='auto',origin ='lower', cmap=plt.cm.jet, extent=[t[0], t[-1], S[0], S[-1]],interpolation='sinc' )
plt.show()
return XW,S
def ScalogramCWT(x,t,wType='Gauss',fs=128,PlotPSD=False,PlotW=False,fftMeth=True,interpolation='sinc',**Parameters):
'''
Compute scalogram using Continues Wavelet Transform for wavelet type (wType) and given scale range
Parameters
----------
x: array-like, input signal,
t: array-like, time array corresponding to x, same length as x
fs: sampling rate
PlotPSD: bool, if True, plot Scalogram
PlotW : bool, if True, plot wavelets in time and frequecy with different scalling version
fftMeth: if True, FFT method is used, else convolution method is used. FFT method is faster.
interpolation: str, or None, interpolation while ploting Scalogram.
Parameters for different wavelet functions
--------
Common Parameters for all the Wavelet functions
f : array of frequency range to be analysed, e.g. np.linspace(-10,10,2*N-1), where N = len(x)
: if None, frequency range of signal is considered from -fs/2 to fs/2
: ( fs/n1*(np.arange(n1)-n1/2))
A list of wavelets will be generated for each value of scale (e.g. f0, sigma, n etc)
1. Gauss: (wType =='Gauss')
f0 = array of center frquencies for wavelets, default: np.linspace(0.1,10,100) [scale value]
Q = float or array of q-factor for each wavelet, e.g. 0.5 (default) or np.linspace(0.1,5,100)
: if array, should be of same size as f0
t0 = float=0, time shift of wavelet, or phase shift in frquency, Not suggeestive to change
2. For Morlet: (wType =='Morlet')
sig = array of sigma values for morlet wavelet, default: np.linspace(0.1,10,100) [scale value]
fw = array of frequency range, e.g. np.linspace(-10,10,2*N-1), where N = len(x)
ref: https://en.wikipedia.org/wiki/Morlet_wavelet
3. For Gabor: (wType =='Gabor')
Gauss and Gabor wavelet are essentially same
f0 = array of center frquencies for wavelets, default: np.linspace(1,40,100) [scale value]
a = float, oscillation parameter, default 0.5,
could be an array (not suggeestive), similar to Gauss, e.g np.linspace(0.1,1,100) or np.logspace(0.001,0.5,100)
t0 = float=0, time shift of wavelet, or phase shift in frquency. Not suggeestive to change
4. For Poisson: (wType=='Poisson')
n = array of intergers, default np.arange(100), [scale value]
method = 1,2,3, different implementation of Poisson funtion, default 3
keep the method=3, other methods are under development and not exactly compatibile with framework yet,
ref: https://en.wikipedia.org/wiki/Poisson_wavelet
5. For Complex MaxicanHat: (wType=='cMaxican')
f0 = array of center frquencies for wavelets, default: np.linspace(1,40,100) [scale value]
a = float, oscillation parameter, default 1.0, could be an array (not suggeestive)
ref: https://en.wikipedia.org/wiki/Complex_Mexican_hat_wavelet
6. For Complex Shannon: (wType=='cShannon')
f0 = array of center frquencies for wavelets, default: 0.1*np.arange(10) [scale value],
fw = BandWidth each wavelet, default 0.5, could be an array (not suggeestive)
ref: https://en.wikipedia.org/wiki/Shannon_wavelet
Returns
-------
XW: Complex-valued matrix of time-scale - Scalogram, with shape (len(S), len(x)). scale vs time
S : scale values
Examples
--------
import numpy as np
import matplotlib.pyplot as plt
from spkit.cwt import ScalogramCWT
# Example 1 - EEG Signal
import spkit as sp
from spkit.cwt import compare_cwt_example
x,fs = sp.load_data.eegSample_1ch()
t = np.arange(len(x))/fs
print(x.shape, t.shape)
compare_cwt_example(x,t,fs=fs)
# Example 2.1 - different wavelets
XW,S = ScalogramCWT(x,t,fs=fs,wType='Gauss',PlotPSD=True)
# Example 2.2 - set scale values and number of points
nS = 100
f0 = np.linspace(0.1,10,nS) # range of scale values - frquency
Q = np.linspace(0.1,5,nS) # different q-factor for each scale value
# Q = 0.5
XW,S = ScalogramCWT(x,t,fs=fs,wType='Gauss',PlotPSD=True,f0=f0,Q=Q)
# Example 2.3 - plot scalled wavelets too
XW,S = ScalogramCWT(x,t,fs=fs,wType='Gauss',PlotPSD=True,PlotW=True,f0=f0,Q=Q)
# Example 3
t = np.linspace(-5, 5, 10*100)
x = (np.sin(2*np.pi*0.75*t*(1-t) + 2.1) + 0.1*np.sin(2*np.pi*1.25*t + 1) + 0.18*np.cos(2*np.pi*3.85*t))
xn = x + np.random.randn(len(t)) * 0.5
XW,S = ScalogramCWT(xn,t,fs=100,wType='Gauss',PlotPSD=True)
# Example 4
f0 = np.linspace(0.1,30,100)
Q = np.linspace(0.1,5,100) # or = 0.5
XW,S = ScalogramCWT(xn,t,fs=128,wType='Gauss',PlotPSD=True,f0=f0,Q=Q)
'''
N = len(x)
n1 = 2*N-1 # considering both signal to be N length, convolutional legth n1 = N+N-1
f = fs/n1*(np.arange(n1)-n1/2)
if fftMeth: X = fft(x,n=n1)
t1 = t-t[N//2] # time length for wavelets
f1 = Parameters['f'] if ('f' in Parameters and Parameters['f'] is not None) else f
t0 = Parameters['t0'] if 't0' in Parameters else 0
#----------Gauss Wavelet------------
if wType =='Gauss':
Q = Parameters['Q'] if 'Q' in Parameters else 0.5
f0 = Parameters['f0'] if 'f0' in Parameters else np.linspace(0.1,10,100)
if isinstance(Q,np.ndarray) and Q.ndim==1: Q = Q[:,None]
Wt,Wf = GaussWave(t=t1,f=f1,t0=t0,f0=f0[:,None],Q=Q)
S=f0
#----------Morlet Wavelet------------
elif wType =='Morlet':
sig = Parameters['sig'] if 'sig' in Parameters else | np.linspace(0.1,10,100) | numpy.linspace |
from __future__ import print_function, division, absolute_import
import functools
import sys
import warnings
# unittest only added in 3.4 self.subTest()
if sys.version_info[0] < 3 or sys.version_info[1] < 4:
import unittest2 as unittest
else:
import unittest
# unittest.mock is not available in 2.7 (though unittest2 might contain it?)
try:
import unittest.mock as mock
except ImportError:
import mock
import matplotlib
matplotlib.use('Agg') # fix execution of tests involving matplotlib on travis
import numpy as np
import six.moves as sm
import imgaug as ia
from imgaug import augmenters as iaa
from imgaug import parameters as iap
from imgaug import dtypes as iadt
from imgaug.testutils import (array_equal_lists, keypoints_equal, reseed,
runtest_pickleable_uint8_img)
import imgaug.augmenters.arithmetic as arithmetic_lib
import imgaug.augmenters.contrast as contrast_lib
class TestAdd(unittest.TestCase):
def setUp(self):
reseed()
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.Add(value="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_ = iaa.Add(value=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_add_zero(self):
# no add, shouldnt change anything
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Add(value=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
def test_add_one(self):
# add > 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Add(value=1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
def test_minus_one(self):
# add < 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Add(value=-1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
def test_uint8_every_possible_value(self):
# uint8, every possible addition for base value 127
for value_type in [float, int]:
for per_channel in [False, True]:
for value in np.arange(-255, 255+1):
aug = iaa.Add(value=value_type(value), per_channel=per_channel)
expected = np.clip(127 + value_type(value), 0, 255)
img = np.full((1, 1), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == expected
img = np.full((1, 1, 3), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert np.all(img_aug == expected)
def test_add_floats(self):
# specific tests with floats
aug = iaa.Add(value=0.75)
img = np.full((1, 1), 1, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 2
img = np.full((1, 1), 1, dtype=np.uint16)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 2
aug = iaa.Add(value=0.45)
img = np.full((1, 1), 1, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 1
img = np.full((1, 1), 1, dtype=np.uint16)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 1
def test_stochastic_parameters_as_value(self):
# test other parameters
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.Add(value=iap.DiscreteUniform(1, 10))
observed = aug.augment_images(images)
assert 100 + 1 <= np.average(observed) <= 100 + 10
aug = iaa.Add(value=iap.Uniform(1, 10))
observed = aug.augment_images(images)
assert 100 + 1 <= np.average(observed) <= 100 + 10
aug = iaa.Add(value=iap.Clip(iap.Normal(1, 1), -3, 3))
observed = aug.augment_images(images)
assert 100 - 3 <= np.average(observed) <= 100 + 3
aug = iaa.Add(value=iap.Discretize(iap.Clip(iap.Normal(1, 1), -3, 3)))
observed = aug.augment_images(images)
assert 100 - 3 <= np.average(observed) <= 100 + 3
def test_keypoints_dont_change(self):
# keypoints shouldnt be changed
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.Add(value=1)
aug_det = iaa.Add(value=1).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_tuple_as_value(self):
# varying values
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.Add(value=(0, 10))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.7)
assert nb_changed_aug_det == 0
def test_per_channel(self):
# test channelwise
aug = iaa.Add(value=iap.Choice([0, 1]), per_channel=True)
observed = aug.augment_image(np.zeros((1, 1, 100), dtype=np.uint8))
uq = np.unique(observed)
assert observed.shape == (1, 1, 100)
assert 0 in uq
assert 1 in uq
assert len(uq) == 2
def test_per_channel_with_probability(self):
# test channelwise with probability
aug = iaa.Add(value=iap.Choice([0, 1]), per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.zeros((1, 1, 20), dtype=np.uint8))
assert observed.shape == (1, 1, 20)
uq = np.unique(observed)
per_channel = (len(uq) == 2)
if per_channel:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.Add(1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.Add(1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_get_parameters(self):
# test get_parameters()
aug = iaa.Add(value=1, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert params[0].value == 1
assert params[1].value == 0
def test_heatmaps(self):
# test heatmaps (not affected by augmenter)
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
aug = iaa.Add(value=10)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_other_dtypes_bool(self):
image = np.zeros((3, 3), dtype=bool)
aug = iaa.Add(value=1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Add(value=1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Add(value=-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Add(value=-2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
def test_other_dtypes_uint_int(self):
for dtype in [np.uint8, np.uint16, np.int8, np.int16]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 21)
image = np.full((3, 3), max_value - 2, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value - 1)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.Add(2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-9)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
for _ in sm.xrange(10):
image = np.full((1, 1, 3), 20, dtype=dtype)
aug = iaa.Add(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) == 1
image = np.full((1, 1, 100), 20, dtype=dtype)
aug = iaa.Add(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
image = np.full((1, 1, 3), 20, dtype=dtype)
aug = iaa.Add(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) == 1
image = np.full((1, 1, 100), 20, dtype=dtype)
aug = iaa.Add(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
def test_other_dtypes_float(self):
# float
for dtype in [np.float16, np.float32]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
if dtype == np.float16:
atol = 1e-3 * max_value
else:
atol = 1e-9 * max_value
_allclose = functools.partial(np.allclose, atol=atol, rtol=0)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 21)
image = np.full((3, 3), max_value - 2, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value - 1)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.Add(2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-9)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
for _ in sm.xrange(10):
image = np.full((50, 1, 3), 0, dtype=dtype)
aug = iaa.Add(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 0, dtype=dtype)
aug = iaa.Add(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
image = np.full((50, 1, 3), 0, dtype=dtype)
aug = iaa.Add(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 0, dtype=dtype)
aug = iaa.Add(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
def test_pickleable(self):
aug = iaa.Add((0, 50), per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=10)
class TestAddElementwise(unittest.TestCase):
def setUp(self):
reseed()
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_aug = iaa.AddElementwise(value="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_aug = iaa.AddElementwise(value=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_add_zero(self):
# no add, shouldnt change anything
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.AddElementwise(value=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
def test_add_one(self):
# add > 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.AddElementwise(value=1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
def test_add_minus_one(self):
# add < 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.AddElementwise(value=-1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
def test_uint8_every_possible_value(self):
# uint8, every possible addition for base value 127
for value_type in [int]:
for per_channel in [False, True]:
for value in np.arange(-255, 255+1):
aug = iaa.AddElementwise(value=value_type(value), per_channel=per_channel)
expected = np.clip(127 + value_type(value), 0, 255)
img = np.full((1, 1), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == expected
img = np.full((1, 1, 3), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert np.all(img_aug == expected)
def test_stochastic_parameters_as_value(self):
# test other parameters
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.AddElementwise(value=iap.DiscreteUniform(1, 10))
observed = aug.augment_images(images)
assert np.min(observed) >= 100 + 1
assert np.max(observed) <= 100 + 10
aug = iaa.AddElementwise(value=iap.Uniform(1, 10))
observed = aug.augment_images(images)
assert np.min(observed) >= 100 + 1
assert np.max(observed) <= 100 + 10
aug = iaa.AddElementwise(value=iap.Clip(iap.Normal(1, 1), -3, 3))
observed = aug.augment_images(images)
assert np.min(observed) >= 100 - 3
assert np.max(observed) <= 100 + 3
aug = iaa.AddElementwise(value=iap.Discretize(iap.Clip(iap.Normal(1, 1), -3, 3)))
observed = aug.augment_images(images)
assert np.min(observed) >= 100 - 3
assert np.max(observed) <= 100 + 3
def test_keypoints_dont_change(self):
# keypoints shouldnt be changed
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.AddElementwise(value=1)
aug_det = iaa.AddElementwise(value=1).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_tuple_as_value(self):
# varying values
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.AddElementwise(value=(0, 10))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.7)
assert nb_changed_aug_det == 0
def test_samples_change_by_spatial_location(self):
# values should change between pixels
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.AddElementwise(value=(-50, 50))
nb_same = 0
nb_different = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_flat = observed_aug.flatten()
last = None
for j in sm.xrange(observed_aug_flat.size):
if last is not None:
v = observed_aug_flat[j]
if v - 0.0001 <= last <= v + 0.0001:
nb_same += 1
else:
nb_different += 1
last = observed_aug_flat[j]
assert nb_different > 0.9 * (nb_different + nb_same)
def test_per_channel(self):
# test channelwise
aug = iaa.AddElementwise(value=iap.Choice([0, 1]), per_channel=True)
observed = aug.augment_image(np.zeros((100, 100, 3), dtype=np.uint8))
sums = np.sum(observed, axis=2)
values = np.unique(sums)
assert all([(value in values) for value in [0, 1, 2, 3]])
def test_per_channel_with_probability(self):
# test channelwise with probability
aug = iaa.AddElementwise(value=iap.Choice([0, 1]), per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.zeros((20, 20, 3), dtype=np.uint8))
sums = np.sum(observed, axis=2)
values = np.unique(sums)
all_values_found = all([(value in values) for value in [0, 1, 2, 3]])
if all_values_found:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.AddElementwise(1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.AddElementwise(1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_get_parameters(self):
# test get_parameters()
aug = iaa.AddElementwise(value=1, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert params[0].value == 1
assert params[1].value == 0
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.AddElementwise(value=10)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_other_dtypes_bool(self):
# bool
image = np.zeros((3, 3), dtype=bool)
aug = iaa.AddElementwise(value=1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.AddElementwise(value=1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.AddElementwise(value=-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.AddElementwise(value=-2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
def test_other_dtypes_uint_int(self):
# uint, int
for dtype in [np.uint8, np.uint16, np.int8, np.int16]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 21)
image = np.full((3, 3), max_value - 2, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value - 1)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.AddElementwise(2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-9)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
for _ in sm.xrange(10):
image = np.full((5, 5, 3), 20, dtype=dtype)
aug = iaa.AddElementwise(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
assert np.all(image_aug[..., 0] == image_aug[..., 1])
image = np.full((1, 1, 100), 20, dtype=dtype)
aug = iaa.AddElementwise(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
image = np.full((5, 5, 3), 20, dtype=dtype)
aug = iaa.AddElementwise(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
assert np.all(image_aug[..., 0] == image_aug[..., 1])
image = np.full((1, 1, 100), 20, dtype=dtype)
aug = iaa.AddElementwise(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
def test_other_dtypes_float(self):
# float
for dtype in [np.float16, np.float32]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
if dtype == np.float16:
atol = 1e-3 * max_value
else:
atol = 1e-9 * max_value
_allclose = functools.partial(np.allclose, atol=atol, rtol=0)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 21)
image = np.full((3, 3), max_value - 2, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value - 1)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.AddElementwise(2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-9)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
for _ in sm.xrange(10):
image = np.full((50, 1, 3), 0, dtype=dtype)
aug = iaa.AddElementwise(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 0, dtype=dtype)
aug = iaa.AddElementwise(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
image = np.full((50, 1, 3), 0, dtype=dtype)
aug = iaa.AddElementwise(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 0, dtype=dtype)
aug = iaa.AddElementwise(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
def test_pickleable(self):
aug = iaa.AddElementwise((0, 50), per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=2)
class AdditiveGaussianNoise(unittest.TestCase):
def setUp(self):
reseed()
def test_loc_zero_scale_zero(self):
# no noise, shouldnt change anything
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
images = np.array([base_img])
aug = iaa.AdditiveGaussianNoise(loc=0, scale=0)
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
def test_loc_zero_scale_nonzero(self):
# zero-centered noise
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
images = np.array([base_img])
images_list = [base_img]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.AdditiveGaussianNoise(loc=0, scale=0.2 * 255)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
assert not np.array_equal(observed, images)
observed = aug_det.augment_images(images)
assert not np.array_equal(observed, images)
observed = aug.augment_images(images_list)
assert not array_equal_lists(observed, images_list)
observed = aug_det.augment_images(images_list)
assert not array_equal_lists(observed, images_list)
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints)
def test_std_dev_of_added_noise_matches_scale(self):
# std correct?
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0, scale=0.2 * 255)
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
nb_iterations = 1000
values = []
for i in sm.xrange(nb_iterations):
images_aug = aug.augment_images(images)
values.append(images_aug[0, 0, 0, 0])
values = np.array(values)
assert np.min(values) == 0
assert 0.1 < np.std(values) / 255.0 < 0.4
def test_nonzero_loc(self):
# non-zero loc
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0.25 * 255, scale=0.01 * 255)
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
nb_iterations = 1000
values = []
for i in sm.xrange(nb_iterations):
images_aug = aug.augment_images(images)
values.append(images_aug[0, 0, 0, 0] - 128)
values = np.array(values)
assert 54 < np.average(values) < 74 # loc=0.25 should be around 255*0.25=64 average
def test_tuple_as_loc(self):
# varying locs
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=(0, 0.5 * 255), scale=0.0001 * 255)
aug_det = aug.to_deterministic()
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
def test_stochastic_parameter_as_loc(self):
# varying locs by stochastic param
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=iap.Choice([-20, 20]), scale=0.0001 * 255)
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
seen = [0, 0]
for i in sm.xrange(200):
observed = aug.augment_images(images)
mean = np.mean(observed)
diff_m20 = abs(mean - (128-20))
diff_p20 = abs(mean - (128+20))
if diff_m20 <= 1:
seen[0] += 1
elif diff_p20 <= 1:
seen[1] += 1
else:
assert False
assert 75 < seen[0] < 125
assert 75 < seen[1] < 125
def test_tuple_as_scale(self):
# varying stds
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0, scale=(0.01 * 255, 0.2 * 255))
aug_det = aug.to_deterministic()
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
def test_stochastic_parameter_as_scale(self):
# varying stds by stochastic param
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0, scale=iap.Choice([1, 20]))
images = np.ones((1, 20, 20, 1), dtype=np.uint8) * 128
seen = [0, 0, 0]
for i in sm.xrange(200):
observed = aug.augment_images(images)
std = np.std(observed.astype(np.int32) - 128)
diff_1 = abs(std - 1)
diff_20 = abs(std - 20)
if diff_1 <= 2:
seen[0] += 1
elif diff_20 <= 5:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] <= 5
assert 75 < seen[0] < 125
assert 75 < seen[1] < 125
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.AdditiveGaussianNoise(loc="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_ = iaa.AdditiveGaussianNoise(scale="test")
except Exception:
got_exception = True
assert got_exception
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0.5, scale=10)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_pickleable(self):
aug = iaa.AdditiveGaussianNoise(scale=(0.1, 10), per_channel=True,
random_state=1)
runtest_pickleable_uint8_img(aug, iterations=2)
class TestDropout(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_zero(self):
# no dropout, shouldnt change anything
base_img = np.ones((512, 512, 1), dtype=np.uint8) * 255
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Dropout(p=0)
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
# 100% dropout, should drop everything
aug = iaa.Dropout(p=1.0)
observed = aug.augment_images(images)
expected = np.zeros((1, 512, 512, 1), dtype=np.uint8)
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = [np.zeros((512, 512, 1), dtype=np.uint8)]
assert array_equal_lists(observed, expected)
def test_p_is_50_percent(self):
# 50% dropout
base_img = np.ones((512, 512, 1), dtype=np.uint8) * 255
images = np.array([base_img])
images_list = [base_img]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.Dropout(p=0.5)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
assert not np.array_equal(observed, images)
percent_nonzero = len(observed.flatten().nonzero()[0]) \
/ (base_img.shape[0] * base_img.shape[1] * base_img.shape[2])
assert 0.35 <= (1 - percent_nonzero) <= 0.65
observed = aug_det.augment_images(images)
assert not np.array_equal(observed, images)
percent_nonzero = len(observed.flatten().nonzero()[0]) \
/ (base_img.shape[0] * base_img.shape[1] * base_img.shape[2])
assert 0.35 <= (1 - percent_nonzero) <= 0.65
observed = aug.augment_images(images_list)
assert not array_equal_lists(observed, images_list)
percent_nonzero = len(observed[0].flatten().nonzero()[0]) \
/ (base_img.shape[0] * base_img.shape[1] * base_img.shape[2])
assert 0.35 <= (1 - percent_nonzero) <= 0.65
observed = aug_det.augment_images(images_list)
assert not array_equal_lists(observed, images_list)
percent_nonzero = len(observed[0].flatten().nonzero()[0]) \
/ (base_img.shape[0] * base_img.shape[1] * base_img.shape[2])
assert 0.35 <= (1 - percent_nonzero) <= 0.65
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints)
def test_tuple_as_p(self):
# varying p
aug = iaa.Dropout(p=(0.0, 1.0))
aug_det = aug.to_deterministic()
images = np.ones((1, 8, 8, 1), dtype=np.uint8) * 255
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
def test_list_as_p(self):
aug = iaa.Dropout(p=[0.0, 0.5, 1.0])
images = np.ones((1, 20, 20, 1), dtype=np.uint8) * 255
nb_seen = [0, 0, 0, 0]
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
n_dropped = np.sum(observed_aug == 0)
p_observed = n_dropped / observed_aug.size
if 0 <= p_observed <= 0.01:
nb_seen[0] += 1
elif 0.5 - 0.05 <= p_observed <= 0.5 + 0.05:
nb_seen[1] += 1
elif 1.0-0.01 <= p_observed <= 1.0:
nb_seen[2] += 1
else:
nb_seen[3] += 1
assert np.allclose(nb_seen[0:3], nb_iterations*0.33, rtol=0, atol=75)
assert nb_seen[3] < 30
def test_stochastic_parameter_as_p(self):
# varying p by stochastic parameter
aug = iaa.Dropout(p=iap.Binomial(1-iap.Choice([0.0, 0.5])))
images = np.ones((1, 20, 20, 1), dtype=np.uint8) * 255
seen = [0, 0, 0]
for i in sm.xrange(400):
observed = aug.augment_images(images)
p = np.mean(observed == 0)
if 0.4 < p < 0.6:
seen[0] += 1
elif p < 0.1:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] <= 10
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test___init___bad_datatypes(self):
# test exception for wrong parameter datatype
got_exception = False
try:
_aug = iaa.Dropout(p="test")
except Exception:
got_exception = True
assert got_exception
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.Dropout(p=1.0)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_pickleable(self):
aug = iaa.Dropout(p=0.5, per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=3)
class TestCoarseDropout(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_zero(self):
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 100
aug = iaa.CoarseDropout(p=0, size_px=4, size_percent=None, per_channel=False, min_size=4)
observed = aug.augment_image(base_img)
expected = base_img
assert np.array_equal(observed, expected)
def test_p_is_one(self):
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 100
aug = iaa.CoarseDropout(p=1.0, size_px=4, size_percent=None, per_channel=False, min_size=4)
observed = aug.augment_image(base_img)
expected = np.zeros_like(base_img)
assert np.array_equal(observed, expected)
def test_p_is_50_percent(self):
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 100
aug = iaa.CoarseDropout(p=0.5, size_px=1, size_percent=None, per_channel=False, min_size=1)
averages = []
for _ in sm.xrange(50):
observed = aug.augment_image(base_img)
averages.append(np.average(observed))
assert all([v in [0, 100] for v in averages])
assert 50 - 20 < np.average(averages) < 50 + 20
def test_size_percent(self):
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 100
aug = iaa.CoarseDropout(p=0.5, size_px=None, size_percent=0.001, per_channel=False, min_size=1)
averages = []
for _ in sm.xrange(50):
observed = aug.augment_image(base_img)
averages.append(np.average(observed))
assert all([v in [0, 100] for v in averages])
assert 50 - 20 < np.average(averages) < 50 + 20
def test_per_channel(self):
aug = iaa.CoarseDropout(p=0.5, size_px=1, size_percent=None, per_channel=True, min_size=1)
base_img = np.ones((4, 4, 3), dtype=np.uint8) * 100
found = False
for _ in sm.xrange(100):
observed = aug.augment_image(base_img)
avgs = np.average(observed, axis=(0, 1))
if len(set(avgs)) >= 2:
found = True
break
assert found
def test_stochastic_parameter_as_p(self):
# varying p by stochastic parameter
aug = iaa.CoarseDropout(p=iap.Binomial(1-iap.Choice([0.0, 0.5])), size_px=50)
images = np.ones((1, 100, 100, 1), dtype=np.uint8) * 255
seen = [0, 0, 0]
for i in sm.xrange(400):
observed = aug.augment_images(images)
p = np.mean(observed == 0)
if 0.4 < p < 0.6:
seen[0] += 1
elif p < 0.1:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] <= 10
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test___init___bad_datatypes(self):
# test exception for bad parameters
got_exception = False
try:
_ = iaa.CoarseDropout(p="test")
except Exception:
got_exception = True
assert got_exception
def test___init___size_px_and_size_percent_both_none(self):
got_exception = False
try:
_ = iaa.CoarseDropout(p=0.5, size_px=None, size_percent=None)
except Exception:
got_exception = True
assert got_exception
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.CoarseDropout(p=1.0, size_px=2)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_pickleable(self):
aug = iaa.CoarseDropout(p=0.5, size_px=10, per_channel=True,
random_state=1)
runtest_pickleable_uint8_img(aug, iterations=10, shape=(40, 40, 3))
class TestDropout2d(unittest.TestCase):
def setUp(self):
reseed()
def test___init___defaults(self):
aug = iaa.Dropout2d(p=0)
assert isinstance(aug.p, iap.Binomial)
assert np.isclose(aug.p.p.value, 1.0)
assert aug.nb_keep_channels == 1
def test___init___p_is_float(self):
aug = iaa.Dropout2d(p=0.7)
assert isinstance(aug.p, iap.Binomial)
assert np.isclose(aug.p.p.value, 0.3)
assert aug.nb_keep_channels == 1
def test___init___nb_keep_channels_is_int(self):
aug = iaa.Dropout2d(p=0, nb_keep_channels=2)
assert isinstance(aug.p, iap.Binomial)
assert np.isclose(aug.p.p.value, 1.0)
assert aug.nb_keep_channels == 2
def test_no_images_in_batch(self):
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
heatmaps = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
heatmaps = ia.HeatmapsOnImage(heatmaps, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=heatmaps)
assert np.allclose(heatmaps_aug.arr_0to1, heatmaps.arr_0to1)
def test_p_is_1(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=0)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == image.dtype.name
assert np.sum(image_aug) == 0
def test_p_is_1_heatmaps(self):
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=0)
arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
hm = ia.HeatmapsOnImage(arr, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=hm)
assert np.allclose(heatmaps_aug.arr_0to1, 0.0)
def test_p_is_1_segmentation_maps(self):
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=0)
arr = np.int32([
[0, 1],
[0, 1]
])
segmaps = ia.SegmentationMapsOnImage(arr, shape=(2, 2, 3))
segmaps_aug = aug(segmentation_maps=segmaps)
assert np.allclose(segmaps_aug.arr, 0.0)
def test_p_is_1_cbaois(self):
cbaois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(2, 2, 3)),
ia.BoundingBoxesOnImage([ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)],
shape=(2, 2, 3)),
ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
shape=(2, 2, 3)),
ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
shape=(2, 2, 3))
]
cbaoi_names = ["keypoints", "bounding_boxes", "polygons",
"line_strings"]
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=0)
for name, cbaoi in zip(cbaoi_names, cbaois):
with self.subTest(datatype=name):
cbaoi_aug = aug(**{name: cbaoi})
assert cbaoi_aug.shape == (2, 2, 3)
assert cbaoi_aug.items == []
def test_p_is_1_heatmaps__keep_one_channel(self):
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=1)
arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
hm = ia.HeatmapsOnImage(arr, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=hm)
assert np.allclose(heatmaps_aug.arr_0to1, hm.arr_0to1)
def test_p_is_1_segmentation_maps__keep_one_channel(self):
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=1)
arr = np.int32([
[0, 1],
[0, 1]
])
segmaps = ia.SegmentationMapsOnImage(arr, shape=(2, 2, 3))
segmaps_aug = aug(segmentation_maps=segmaps)
assert np.allclose(segmaps_aug.arr, segmaps.arr)
def test_p_is_1_cbaois__keep_one_channel(self):
cbaois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(2, 2, 3)),
ia.BoundingBoxesOnImage([ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)],
shape=(2, 2, 3)),
ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
shape=(2, 2, 3)),
ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
shape=(2, 2, 3))
]
cbaoi_names = ["keypoints", "bounding_boxes", "polygons",
"line_strings"]
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=1)
for name, cbaoi in zip(cbaoi_names, cbaois):
with self.subTest(datatype=name):
cbaoi_aug = aug(**{name: cbaoi})
assert cbaoi_aug.shape == (2, 2, 3)
assert np.allclose(
cbaoi_aug.items[0].coords,
cbaoi.items[0].coords
)
def test_p_is_0(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == image.dtype.name
assert np.array_equal(image_aug, image)
def test_p_is_0_heatmaps(self):
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
hm = ia.HeatmapsOnImage(arr, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=hm)
assert np.allclose(heatmaps_aug.arr_0to1, hm.arr_0to1)
def test_p_is_0_segmentation_maps(self):
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
arr = np.int32([
[0, 1],
[0, 1]
])
segmaps = ia.SegmentationMapsOnImage(arr, shape=(2, 2, 3))
segmaps_aug = aug(segmentation_maps=segmaps)
assert np.allclose(segmaps_aug.arr, segmaps.arr)
def test_p_is_0_cbaois(self):
cbaois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(2, 2, 3)),
ia.BoundingBoxesOnImage([ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)],
shape=(2, 2, 3)),
ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
shape=(2, 2, 3)),
ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
shape=(2, 2, 3))
]
cbaoi_names = ["keypoints", "bounding_boxes", "polygons",
"line_strings"]
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
for name, cbaoi in zip(cbaoi_names, cbaois):
with self.subTest(datatype=name):
cbaoi_aug = aug(**{name: cbaoi})
assert cbaoi_aug.shape == (2, 2, 3)
assert np.allclose(
cbaoi_aug.items[0].coords,
cbaoi.items[0].coords
)
def test_p_is_075(self):
image = np.full((1, 1, 3000), 255, dtype=np.uint8)
aug = iaa.Dropout2d(p=0.75, nb_keep_channels=0)
image_aug = aug(image=image)
nb_kept = np.sum(image_aug == 255)
nb_dropped = image.shape[2] - nb_kept
assert image_aug.shape == image.shape
assert image_aug.dtype.name == image.dtype.name
assert np.isclose(nb_dropped, image.shape[2]*0.75, atol=75)
def test_force_nb_keep_channels(self):
image = np.full((1, 1, 3), 255, dtype=np.uint8)
images = np.array([image] * 1000)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=1)
images_aug = aug(images=images)
ids_kept = [np.nonzero(image[0, 0, :]) for image in images_aug]
ids_kept_uq = np.unique(ids_kept)
nb_kept = np.sum(images_aug == 255)
nb_dropped = (len(images) * images.shape[3]) - nb_kept
assert images_aug.shape == images.shape
assert images_aug.dtype.name == images.dtype.name
# on average, keep 1 of 3 channels
# due to p=1.0 we expect to get exactly 2/3 dropped
assert np.isclose(nb_dropped,
(len(images)*images.shape[3])*(2/3), atol=1)
# every channel dropped at least once, i.e. which one is kept is random
assert sorted(ids_kept_uq.tolist()) == [0, 1, 2]
def test_some_images_below_nb_keep_channels(self):
image_2c = np.full((1, 1, 2), 255, dtype=np.uint8)
image_3c = np.full((1, 1, 3), 255, dtype=np.uint8)
images = [image_2c if i % 2 == 0 else image_3c
for i in sm.xrange(100)]
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=2)
images_aug = aug(images=images)
for i, image_aug in enumerate(images_aug):
assert np.sum(image_aug == 255) == 2
if i % 2 == 0:
assert np.sum(image_aug == 0) == 0
else:
assert np.sum(image_aug == 0) == 1
def test_all_images_below_nb_keep_channels(self):
image = np.full((1, 1, 2), 255, dtype=np.uint8)
images = np.array([image] * 100)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=3)
images_aug = aug(images=images)
nb_kept = np.sum(images_aug == 255)
nb_dropped = (len(images) * images.shape[3]) - nb_kept
assert nb_dropped == 0
def test_get_parameters(self):
aug = iaa.Dropout2d(p=0.7, nb_keep_channels=2)
params = aug.get_parameters()
assert isinstance(params[0], iap.Binomial)
assert np.isclose(params[0].p.value, 0.3)
assert params[1] == 2
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.full(shape, 255, dtype=np.uint8)
aug = iaa.Dropout2d(1.0, nb_keep_channels=0)
image_aug = aug(image=image)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_other_dtypes_bool(self):
image = np.full((1, 1, 10), 1, dtype=bool)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=3)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == "bool"
assert np.sum(image_aug == 1) == 3
assert np.sum(image_aug == 0) == 7
def test_other_dtypes_uint_int(self):
dts = ["uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64"]
for dt in dts:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
values = [min_value, int(center_value), max_value]
for value in values:
with self.subTest(dtype=dt, value=value):
image = np.full((1, 1, 10), value, dtype=dt)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=3)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == dt
if value == 0:
assert np.sum(image_aug == value) == 10
else:
assert np.sum(image_aug == value) == 3
assert np.sum(image_aug == 0) == 7
def test_other_dtypes_float(self):
dts = ["float16", "float32", "float64", "float128"]
for dt in dts:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
values = [min_value, -10.0, center_value, 10.0, max_value]
atol = 1e-3*max_value if dt == "float16" else 1e-9 * max_value
_isclose = functools.partial(np.isclose, atol=atol, rtol=0)
for value in values:
with self.subTest(dtype=dt, value=value):
image = np.full((1, 1, 10), value, dtype=dt)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=3)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == dt
if _isclose(value, 0.0):
assert np.sum(_isclose(image_aug, value)) == 10
else:
assert (
np.sum(_isclose(image_aug, np.float128(value)))
== 3)
assert np.sum(image_aug == 0) == 7
def test_pickleable(self):
aug = iaa.Dropout2d(p=0.5, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=3, shape=(1, 1, 50))
class TestTotalDropout(unittest.TestCase):
def setUp(self):
reseed()
def test___init___p(self):
aug = iaa.TotalDropout(p=0)
assert isinstance(aug.p, iap.Binomial)
assert np.isclose(aug.p.p.value, 1.0)
def test_p_is_1(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
aug = iaa.TotalDropout(p=1.0)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == image.dtype.name
assert np.sum(image_aug) == 0
def test_p_is_1_multiple_images_list(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
images = [image, image, image]
aug = iaa.TotalDropout(p=1.0)
images_aug = aug(images=images)
for image_aug, image_ in zip(images_aug, images):
assert image_aug.shape == image_.shape
assert image_aug.dtype.name == image_.dtype.name
assert np.sum(image_aug) == 0
def test_p_is_1_multiple_images_array(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
images = np.array([image, image, image], dtype=np.uint8)
aug = iaa.TotalDropout(p=1.0)
images_aug = aug(images=images)
assert images_aug.shape == images.shape
assert images_aug.dtype.name == images.dtype.name
assert np.sum(images_aug) == 0
def test_p_is_1_heatmaps(self):
aug = iaa.TotalDropout(p=1.0)
arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
hm = ia.HeatmapsOnImage(arr, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=hm)
assert np.allclose(heatmaps_aug.arr_0to1, 0.0)
def test_p_is_1_segmentation_maps(self):
aug = iaa.TotalDropout(p=1.0)
arr = np.int32([
[0, 1],
[0, 1]
])
segmaps = ia.SegmentationMapsOnImage(arr, shape=(2, 2, 3))
segmaps_aug = aug(segmentation_maps=segmaps)
assert np.allclose(segmaps_aug.arr, 0.0)
def test_p_is_1_cbaois(self):
cbaois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(2, 2, 3)),
ia.BoundingBoxesOnImage([ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)],
shape=(2, 2, 3)),
ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
shape=(2, 2, 3)),
ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
shape=(2, 2, 3))
]
cbaoi_names = ["keypoints", "bounding_boxes", "polygons",
"line_strings"]
aug = iaa.TotalDropout(p=1.0)
for name, cbaoi in zip(cbaoi_names, cbaois):
with self.subTest(datatype=name):
cbaoi_aug = aug(**{name: cbaoi})
assert cbaoi_aug.shape == (2, 2, 3)
assert cbaoi_aug.items == []
def test_p_is_0(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
aug = iaa.TotalDropout(p=0.0)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == image.dtype.name
assert np.array_equal(image_aug, image)
def test_p_is_0_multiple_images_list(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
images = [image, image, image]
aug = iaa.TotalDropout(p=0.0)
images_aug = aug(images=images)
for image_aug, image_ in zip(images_aug, images):
assert image_aug.shape == image_.shape
assert image_aug.dtype.name == image_.dtype.name
assert np.array_equal(image_aug, image_)
def test_p_is_0_multiple_images_array(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
images = np.array([image, image, image], dtype=np.uint8)
aug = iaa.TotalDropout(p=0.0)
images_aug = aug(images=images)
for image_aug, image_ in zip(images_aug, images):
assert image_aug.shape == image_.shape
assert image_aug.dtype.name == image_.dtype.name
assert np.array_equal(image_aug, image_)
def test_p_is_0_heatmaps(self):
aug = iaa.TotalDropout(p=0.0)
arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
hm = ia.HeatmapsOnImage(arr, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=hm)
assert np.allclose(heatmaps_aug.arr_0to1, hm.arr_0to1)
def test_p_is_0_segmentation_maps(self):
aug = iaa.TotalDropout(p=0.0)
arr = np.int32([
[0, 1],
[0, 1]
])
segmaps = ia.SegmentationMapsOnImage(arr, shape=(2, 2, 3))
segmaps_aug = aug(segmentation_maps=segmaps)
assert np.allclose(segmaps_aug.arr, segmaps.arr)
def test_p_is_0_cbaois(self):
cbaois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(2, 2, 3)),
ia.BoundingBoxesOnImage([ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)],
shape=(2, 2, 3)),
ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
shape=(2, 2, 3)),
ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
shape=(2, 2, 3))
]
cbaoi_names = ["keypoints", "bounding_boxes", "polygons",
"line_strings"]
aug = iaa.TotalDropout(p=0.0)
for name, cbaoi in zip(cbaoi_names, cbaois):
with self.subTest(datatype=name):
cbaoi_aug = aug(**{name: cbaoi})
assert cbaoi_aug.shape == (2, 2, 3)
assert np.allclose(
cbaoi_aug.items[0].coords,
cbaoi.items[0].coords
)
def test_p_is_075_multiple_images_list(self):
images = [np.full((1, 1, 1), 255, dtype=np.uint8)] * 3000
aug = iaa.TotalDropout(p=0.75)
images_aug = aug(images=images)
nb_kept = np.sum([np.sum(image_aug == 255) for image_aug in images_aug])
nb_dropped = len(images) - nb_kept
for image_aug in images_aug:
assert image_aug.shape == images[0].shape
assert image_aug.dtype.name == images[0].dtype.name
assert np.isclose(nb_dropped, len(images)*0.75, atol=75)
def test_p_is_075_multiple_images_array(self):
images = np.full((3000, 1, 1, 1), 255, dtype=np.uint8)
aug = iaa.TotalDropout(p=0.75)
images_aug = aug(images=images)
nb_kept = np.sum(images_aug == 255)
nb_dropped = len(images) - nb_kept
assert images_aug.shape == images.shape
assert images_aug.dtype.name == images.dtype.name
assert np.isclose(nb_dropped, len(images)*0.75, atol=75)
def test_get_parameters(self):
aug = iaa.TotalDropout(p=0.0)
params = aug.get_parameters()
assert params[0] is aug.p
def test_unusual_channel_numbers(self):
shapes = [
(5, 1, 1, 4),
(5, 1, 1, 5),
(5, 1, 1, 512),
(5, 1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
images = np.zeros(shape, dtype=np.uint8)
aug = iaa.TotalDropout(1.0)
images_aug = aug(images=images)
assert np.all(images_aug == 0)
assert images_aug.dtype.name == "uint8"
assert images_aug.shape == shape
def test_zero_sized_axes(self):
shapes = [
(5, 0, 0),
(5, 0, 1),
(5, 1, 0),
(5, 0, 1, 0),
(5, 1, 0, 0),
(5, 0, 1, 1),
(5, 1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
images = np.full(shape, 255, dtype=np.uint8)
aug = iaa.TotalDropout(1.0)
images_aug = aug(images=images)
assert images_aug.dtype.name == "uint8"
assert images_aug.shape == images.shape
def test_other_dtypes_bool(self):
image = np.full((1, 1, 10), 1, dtype=bool)
aug = iaa.TotalDropout(p=1.0)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == "bool"
assert np.sum(image_aug == 1) == 0
def test_other_dtypes_uint_int(self):
dts = ["uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64"]
for dt in dts:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
values = [min_value, int(center_value), max_value]
for value in values:
for p in [1.0, 0.0]:
with self.subTest(dtype=dt, value=value, p=p):
images = np.full((5, 1, 1, 3), value, dtype=dt)
aug = iaa.TotalDropout(p=p)
images_aug = aug(images=images)
assert images_aug.shape == images.shape
assert images_aug.dtype.name == dt
if np.isclose(p, 1.0) or value == 0:
assert np.sum(images_aug == 0) == 5*3
else:
assert np.sum(images_aug == value) == 5*3
def test_other_dtypes_float(self):
dts = ["float16", "float32", "float64", "float128"]
for dt in dts:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
values = [min_value, -10.0, center_value, 10.0, max_value]
atol = 1e-3*max_value if dt == "float16" else 1e-9 * max_value
_isclose = functools.partial(np.isclose, atol=atol, rtol=0)
for value in values:
for p in [1.0, 0.0]:
with self.subTest(dtype=dt, value=value, p=p):
images = np.full((5, 1, 1, 3), value, dtype=dt)
aug = iaa.TotalDropout(p=p)
images_aug = aug(images=images)
assert images_aug.shape == images.shape
assert images_aug.dtype.name == dt
if np.isclose(p, 1.0):
assert np.sum(_isclose(images_aug, 0.0)) == 5*3
else:
assert (
np.sum(_isclose(images_aug, np.float128(value)))
== 5*3)
def test_pickleable(self):
aug = iaa.TotalDropout(p=0.5, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=30, shape=(4, 4, 2))
class TestMultiply(unittest.TestCase):
def setUp(self):
reseed()
def test_mul_is_one(self):
# no multiply, shouldnt change anything
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Multiply(mul=1.0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
def test_mul_is_above_one(self):
# multiply >1.0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Multiply(mul=1.2)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 120
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 120]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 120
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 120]
assert array_equal_lists(observed, expected)
def test_mul_is_below_one(self):
# multiply <1.0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Multiply(mul=0.8)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 80
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 80]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 80
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 80]
assert array_equal_lists(observed, expected)
def test_keypoints_dont_change(self):
# keypoints shouldnt be changed
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.Multiply(mul=1.2)
aug_det = iaa.Multiply(mul=1.2).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_tuple_as_mul(self):
# varying multiply factors
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.Multiply(mul=(0, 2.0))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
def test_per_channel(self):
aug = iaa.Multiply(mul=iap.Choice([0, 2]), per_channel=True)
observed = aug.augment_image(np.ones((1, 1, 100), dtype=np.uint8))
uq = np.unique(observed)
assert observed.shape == (1, 1, 100)
assert 0 in uq
assert 2 in uq
assert len(uq) == 2
def test_per_channel_with_probability(self):
# test channelwise with probability
aug = iaa.Multiply(mul=iap.Choice([0, 2]), per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.ones((1, 1, 20), dtype=np.uint8))
assert observed.shape == (1, 1, 20)
uq = np.unique(observed)
per_channel = (len(uq) == 2)
if per_channel:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.Multiply(mul="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_ = iaa.Multiply(mul=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.ones(shape, dtype=np.uint8)
aug = iaa.Multiply(1)
image_aug = aug(image=image)
assert np.all(image_aug == 2)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.ones(shape, dtype=np.uint8)
aug = iaa.Multiply(2)
image_aug = aug(image=image)
assert np.all(image_aug == 2)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_get_parameters(self):
# test get_parameters()
aug = iaa.Multiply(mul=1, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert params[0].value == 1
assert params[1].value == 0
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.Multiply(mul=2)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_other_dtypes_bool(self):
# bool
image = np.zeros((3, 3), dtype=bool)
aug = iaa.Multiply(1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Multiply(1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Multiply(2.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Multiply(0.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Multiply(-1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
def test_other_dtypes_uint_int(self):
# uint, int
for dtype in [np.uint8, np.uint16, np.int8, np.int16]:
dtype = np.dtype(dtype)
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.Multiply(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 10)
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.Multiply(10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 100)
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.Multiply(0.5)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 5)
image = np.full((3, 3), 0, dtype=dtype)
aug = iaa.Multiply(0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 0)
if np.dtype(dtype).kind == "u":
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.Multiply(-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 0)
else:
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.Multiply(-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == -10)
image = np.full((3, 3), int(center_value), dtype=dtype)
aug = iaa.Multiply(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == int(center_value))
image = np.full((3, 3), int(center_value), dtype=dtype)
aug = iaa.Multiply(1.2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == int(1.2 * int(center_value)))
if np.dtype(dtype).kind == "u":
image = np.full((3, 3), int(center_value), dtype=dtype)
aug = iaa.Multiply(100)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.Multiply(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
# non-uint8 currently don't increase the itemsize
if dtype.name == "uint8":
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.Multiply(10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.Multiply(0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 0)
# non-uint8 currently don't increase the itemsize
if dtype.name == "uint8":
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.Multiply(-2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
# non-uint8 currently don't increase the itemsize
if dtype.name == "uint8":
for _ in sm.xrange(10):
image = np.full((1, 1, 3), 10, dtype=dtype)
aug = iaa.Multiply(iap.Uniform(0.5, 1.5))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(5 <= image_aug, image_aug <= 15))
assert len(np.unique(image_aug)) == 1
image = np.full((1, 1, 100), 10, dtype=dtype)
aug = iaa.Multiply(iap.Uniform(0.5, 1.5), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(5 <= image_aug, image_aug <= 15))
assert len(np.unique(image_aug)) > 1
image = np.full((1, 1, 3), 10, dtype=dtype)
aug = iaa.Multiply(iap.DiscreteUniform(1, 3))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) == 1
image = np.full((1, 1, 100), 10, dtype=dtype)
aug = iaa.Multiply(iap.DiscreteUniform(1, 3), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
def test_other_dtypes_float(self):
# float
for dtype in [np.float16, np.float32]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
if dtype == np.float16:
atol = 1e-3 * max_value
else:
atol = 1e-9 * max_value
_allclose = functools.partial(np.allclose, atol=atol, rtol=0)
image = np.full((3, 3), 10.0, dtype=dtype)
aug = iaa.Multiply(1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 10.0)
image = np.full((3, 3), 10.0, dtype=dtype)
aug = iaa.Multiply(2.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 20.0)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), max_value, dtype=dtype)
# aug = iaa.Multiply(-10)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert _allclose(image_aug, min_value)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.Multiply(0.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 0.0)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.Multiply(0.5)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 0.5*max_value)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), min_value, dtype=dtype)
# aug = iaa.Multiply(-2.0)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert _allclose(image_aug, max_value)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.Multiply(0.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 0.0)
# using tolerances of -100 - 1e-2 and 100 + 1e-2 is not enough for float16, had to be increased to -/+ 1e-1
# deactivated, because itemsize increase was deactivated
"""
for _ in sm.xrange(10):
image = np.full((1, 1, 3), 10.0, dtype=dtype)
aug = iaa.Multiply(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 10.0, dtype=dtype)
aug = iaa.Multiply(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
image = np.full((1, 1, 3), 10.0, dtype=dtype)
aug = iaa.Multiply(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 10.0, dtype=dtype)
aug = iaa.Multiply(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
"""
def test_pickleable(self):
aug = iaa.Multiply((0.5, 1.5), per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=20)
class TestMultiplyElementwise(unittest.TestCase):
def setUp(self):
reseed()
def test_mul_is_one(self):
# no multiply, shouldnt change anything
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.MultiplyElementwise(mul=1.0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
def test_mul_is_above_one(self):
# multiply >1.0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.MultiplyElementwise(mul=1.2)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 120
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 120]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 120
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 120]
assert array_equal_lists(observed, expected)
def test_mul_is_below_one(self):
# multiply <1.0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.MultiplyElementwise(mul=0.8)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 80
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 80]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 80
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 80]
assert array_equal_lists(observed, expected)
def test_keypoints_dont_change(self):
# keypoints shouldnt be changed
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.MultiplyElementwise(mul=1.2)
aug_det = iaa.Multiply(mul=1.2).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_tuple_as_mul(self):
# varying multiply factors
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.MultiplyElementwise(mul=(0, 2.0))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
def test_samples_change_by_spatial_location(self):
# values should change between pixels
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.MultiplyElementwise(mul=(0.5, 1.5))
nb_same = 0
nb_different = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_flat = observed_aug.flatten()
last = None
for j in sm.xrange(observed_aug_flat.size):
if last is not None:
v = observed_aug_flat[j]
if v - 0.0001 <= last <= v + 0.0001:
nb_same += 1
else:
nb_different += 1
last = observed_aug_flat[j]
assert nb_different > 0.95 * (nb_different + nb_same)
def test_per_channel(self):
# test channelwise
aug = iaa.MultiplyElementwise(mul=iap.Choice([0, 1]), per_channel=True)
observed = aug.augment_image(np.ones((100, 100, 3), dtype=np.uint8))
sums = np.sum(observed, axis=2)
values = np.unique(sums)
assert all([(value in values) for value in [0, 1, 2, 3]])
assert observed.shape == (100, 100, 3)
def test_per_channel_with_probability(self):
# test channelwise with probability
aug = iaa.MultiplyElementwise(mul=iap.Choice([0, 1]), per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.ones((20, 20, 3), dtype=np.uint8))
assert observed.shape == (20, 20, 3)
sums = np.sum(observed, axis=2)
values = np.unique(sums)
all_values_found = all([(value in values) for value in [0, 1, 2, 3]])
if all_values_found:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_aug = iaa.MultiplyElementwise(mul="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_aug = iaa.MultiplyElementwise(mul=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.ones(shape, dtype=np.uint8)
aug = iaa.MultiplyElementwise(2)
image_aug = aug(image=image)
assert np.all(image_aug == 2)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.ones(shape, dtype=np.uint8)
aug = iaa.MultiplyElementwise(2)
image_aug = aug(image=image)
assert np.all(image_aug == 2)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_get_parameters(self):
# test get_parameters()
aug = iaa.MultiplyElementwise(mul=1, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert params[0].value == 1
assert params[1].value == 0
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.MultiplyElementwise(mul=2)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_other_dtypes_bool(self):
# bool
image = np.zeros((3, 3), dtype=bool)
aug = iaa.MultiplyElementwise(1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.MultiplyElementwise(1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.MultiplyElementwise(2.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.MultiplyElementwise(0.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.MultiplyElementwise(-1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
def test_other_dtypes_uint_int(self):
# uint, int
for dtype in [np.uint8, np.uint16, np.int8, np.int16]:
dtype = np.dtype(dtype)
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 10)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), 10, dtype=dtype)
# aug = iaa.MultiplyElementwise(10)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert np.all(image_aug == 100)
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(0.5)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 5)
image = np.full((3, 3), 0, dtype=dtype)
aug = iaa.MultiplyElementwise(0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 0)
# partially deactivated, because itemsize increase was deactivated
if dtype.name == "uint8":
if dtype.kind == "u":
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 0)
else:
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == -10)
image = np.full((3, 3), int(center_value), dtype=dtype)
aug = iaa.MultiplyElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == int(center_value))
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), int(center_value), dtype=dtype)
# aug = iaa.MultiplyElementwise(1.2)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert np.all(image_aug == int(1.2 * int(center_value)))
# deactivated, because itemsize increase was deactivated
if dtype.name == "uint8":
if dtype.kind == "u":
image = np.full((3, 3), int(center_value), dtype=dtype)
aug = iaa.MultiplyElementwise(100)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.MultiplyElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), max_value, dtype=dtype)
# aug = iaa.MultiplyElementwise(10)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.MultiplyElementwise(0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 0)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), max_value, dtype=dtype)
# aug = iaa.MultiplyElementwise(-2)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert np.all(image_aug == min_value)
# partially deactivated, because itemsize increase was deactivated
if dtype.name == "uint8":
for _ in sm.xrange(10):
image = np.full((5, 5, 3), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.Uniform(0.5, 1.5))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(5 <= image_aug, image_aug <= 15))
assert len(np.unique(image_aug)) > 1
assert np.all(image_aug[..., 0] == image_aug[..., 1])
image = np.full((1, 1, 100), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.Uniform(0.5, 1.5), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(5 <= image_aug, image_aug <= 15))
assert len(np.unique(image_aug)) > 1
image = np.full((5, 5, 3), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.DiscreteUniform(1, 3))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
assert np.all(image_aug[..., 0] == image_aug[..., 1])
image = np.full((1, 1, 100), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.DiscreteUniform(1, 3), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
def test_other_dtypes_float(self):
# float
for dtype in [np.float16, np.float32]:
dtype = np.dtype(dtype)
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
if dtype == np.float16:
atol = 1e-3 * max_value
else:
atol = 1e-9 * max_value
_allclose = functools.partial(np.allclose, atol=atol, rtol=0)
image = np.full((3, 3), 10.0, dtype=dtype)
aug = iaa.MultiplyElementwise(1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 10.0)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), 10.0, dtype=dtype)
# aug = iaa.MultiplyElementwise(2.0)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert _allclose(image_aug, 20.0)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), max_value, dtype=dtype)
# aug = iaa.MultiplyElementwise(-10)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert _allclose(image_aug, min_value)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.MultiplyElementwise(0.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 0.0)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.MultiplyElementwise(0.5)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 0.5*max_value)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), min_value, dtype=dtype)
# aug = iaa.MultiplyElementwise(-2.0)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert _allclose(image_aug, max_value)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.MultiplyElementwise(0.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 0.0)
# using tolerances of -100 - 1e-2 and 100 + 1e-2 is not enough for float16, had to be increased to -/+ 1e-1
# deactivated, because itemsize increase was deactivated
"""
for _ in sm.xrange(10):
image = np.full((50, 1, 3), 10.0, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert not np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 10.0, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
image = np.full((50, 1, 3), 10.0, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert not np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
"""
def test_pickleable(self):
aug = iaa.MultiplyElementwise((0.5, 1.5), per_channel=True,
random_state=1)
runtest_pickleable_uint8_img(aug, iterations=3)
class TestReplaceElementwise(unittest.TestCase):
def setUp(self):
reseed()
def test_mask_is_always_zero(self):
# no replace, shouldnt change anything
base_img = np.ones((3, 3, 1), dtype=np.uint8) + 99
images = np.array([base_img])
images_list = [base_img]
aug = iaa.ReplaceElementwise(mask=0, replacement=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
def test_mask_is_always_one(self):
# replace at 100 percent prob., should change everything
base_img = np.ones((3, 3, 1), dtype=np.uint8) + 99
images = np.array([base_img])
images_list = [base_img]
aug = iaa.ReplaceElementwise(mask=1, replacement=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = np.zeros((1, 3, 3, 1), dtype=np.uint8)
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [np.zeros((3, 3, 1), dtype=np.uint8)]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = np.zeros((1, 3, 3, 1), dtype=np.uint8)
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [np.zeros((3, 3, 1), dtype=np.uint8)]
assert array_equal_lists(observed, expected)
def test_mask_is_stochastic_parameter(self):
# replace half
aug = iaa.ReplaceElementwise(mask=iap.Binomial(p=0.5), replacement=0)
img = np.ones((100, 100, 1), dtype=np.uint8)
nb_iterations = 100
nb_diff_all = 0
for i in sm.xrange(nb_iterations):
observed = aug.augment_image(img)
nb_diff = np.sum(img != observed)
nb_diff_all += nb_diff
p = nb_diff_all / (nb_iterations * 100 * 100)
assert 0.45 <= p <= 0.55
def test_mask_is_list(self):
# mask is list
aug = iaa.ReplaceElementwise(mask=[0.2, 0.7], replacement=1)
img = np.zeros((20, 20, 1), dtype=np.uint8)
seen = [0, 0, 0]
for i in sm.xrange(400):
observed = aug.augment_image(img)
p = np.mean(observed)
if 0.1 < p < 0.3:
seen[0] += 1
elif 0.6 < p < 0.8:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] <= 10
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test_keypoints_dont_change(self):
# keypoints shouldnt be changed
base_img = np.ones((3, 3, 1), dtype=np.uint8) + 99
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.ReplaceElementwise(mask=iap.Binomial(p=0.5), replacement=0)
aug_det = iaa.ReplaceElementwise(mask=iap.Binomial(p=0.5), replacement=0).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_replacement_is_stochastic_parameter(self):
# different replacements
aug = iaa.ReplaceElementwise(mask=1, replacement=iap.Choice([100, 200]))
img = np.zeros((1000, 1000, 1), dtype=np.uint8)
img100 = img + 100
img200 = img + 200
observed = aug.augment_image(img)
nb_diff_100 = np.sum(img100 != observed)
nb_diff_200 = np.sum(img200 != observed)
p100 = nb_diff_100 / (1000 * 1000)
p200 = nb_diff_200 / (1000 * 1000)
assert 0.45 <= p100 <= 0.55
assert 0.45 <= p200 <= 0.55
# test channelwise
aug = iaa.MultiplyElementwise(mul=iap.Choice([0, 1]), per_channel=True)
observed = aug.augment_image(np.ones((100, 100, 3), dtype=np.uint8))
sums = np.sum(observed, axis=2)
values = np.unique(sums)
assert all([(value in values) for value in [0, 1, 2, 3]])
def test_per_channel_with_probability(self):
# test channelwise with probability
aug = iaa.ReplaceElementwise(mask=iap.Choice([0, 1]), replacement=1, per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.zeros((20, 20, 3), dtype=np.uint8))
assert observed.shape == (20, 20, 3)
sums = np.sum(observed, axis=2)
values = np.unique(sums)
all_values_found = all([(value in values) for value in [0, 1, 2, 3]])
if all_values_found:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_aug = iaa.ReplaceElementwise(mask="test", replacement=1)
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_aug = iaa.ReplaceElementwise(mask=1, replacement=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.ReplaceElementwise(1.0, 1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.ReplaceElementwise(1.0, 1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_get_parameters(self):
# test get_parameters()
aug = iaa.ReplaceElementwise(mask=0.5, replacement=2, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Binomial)
assert isinstance(params[0].p, iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert isinstance(params[2], iap.Deterministic)
assert 0.5 - 1e-6 < params[0].p.value < 0.5 + 1e-6
assert params[1].value == 2
assert params[2].value == 0
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.ReplaceElementwise(mask=1, replacement=0.5)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_other_dtypes_bool(self):
# bool
aug = iaa.ReplaceElementwise(mask=1, replacement=0)
image = np.full((3, 3), False, dtype=bool)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
aug = iaa.ReplaceElementwise(mask=1, replacement=1)
image = np.full((3, 3), False, dtype=bool)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
aug = iaa.ReplaceElementwise(mask=1, replacement=0)
image = np.full((3, 3), True, dtype=bool)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
aug = iaa.ReplaceElementwise(mask=1, replacement=1)
image = np.full((3, 3), True, dtype=bool)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
aug = iaa.ReplaceElementwise(mask=1, replacement=0.7)
image = np.full((3, 3), False, dtype=bool)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
aug = iaa.ReplaceElementwise(mask=1, replacement=0.2)
image = np.full((3, 3), False, dtype=bool)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
def test_other_dtypes_uint_int(self):
# uint, int
for dtype in [np.uint8, np.uint16, np.uint32, np.int8, np.int16, np.int32]:
dtype = np.dtype(dtype)
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
aug = iaa.ReplaceElementwise(mask=1, replacement=1)
image = np.full((3, 3), 0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 1)
aug = iaa.ReplaceElementwise(mask=1, replacement=2)
image = np.full((3, 3), 1, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 2)
# deterministic stochastic parameters are by default int32 for
# any integer value and hence cannot cover the full uint32 value
# range
if dtype.name != "uint32":
aug = iaa.ReplaceElementwise(mask=1, replacement=max_value)
image = np.full((3, 3), min_value, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
aug = iaa.ReplaceElementwise(mask=1, replacement=min_value)
image = np.full((3, 3), max_value, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
aug = iaa.ReplaceElementwise(mask=1, replacement=iap.Uniform(1.0, 10.0))
image = np.full((100, 1), 0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(1 <= image_aug, image_aug <= 10))
assert len(np.unique(image_aug)) > 1
aug = iaa.ReplaceElementwise(mask=1, replacement=iap.DiscreteUniform(1, 10))
image = np.full((100, 1), 0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(1 <= image_aug, image_aug <= 10))
assert len(np.unique(image_aug)) > 1
aug = iaa.ReplaceElementwise(mask=0.5, replacement=iap.DiscreteUniform(1, 10), per_channel=True)
image = np.full((1, 1, 100), 0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(0 <= image_aug, image_aug <= 10))
assert len(np.unique(image_aug)) > 2
def test_other_dtypes_float(self):
# float
for dtype in [np.float16, np.float32, np.float64]:
dtype = np.dtype(dtype)
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
atol = 1e-3*max_value if dtype == np.float16 else 1e-9 * max_value
_allclose = functools.partial(np.allclose, atol=atol, rtol=0)
aug = iaa.ReplaceElementwise(mask=1, replacement=1.0)
image = np.full((3, 3), 0.0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.allclose(image_aug, 1.0)
aug = iaa.ReplaceElementwise(mask=1, replacement=2.0)
image = np.full((3, 3), 1.0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.allclose(image_aug, 2.0)
# deterministic stochastic parameters are by default float32 for
# any float value and hence cannot cover the full float64 value
# range
if dtype.name != "float64":
aug = iaa.ReplaceElementwise(mask=1, replacement=max_value)
image = np.full((3, 3), min_value, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
aug = iaa.ReplaceElementwise(mask=1, replacement=min_value)
image = np.full((3, 3), max_value, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
aug = iaa.ReplaceElementwise(mask=1, replacement=iap.Uniform(1.0, 10.0))
image = np.full((100, 1), 0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(1 <= image_aug, image_aug <= 10))
assert not np.allclose(image_aug[1:, :], image_aug[:-1, :], atol=0.01)
aug = iaa.ReplaceElementwise(mask=1, replacement=iap.DiscreteUniform(1, 10))
image = np.full((100, 1), 0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(1 <= image_aug, image_aug <= 10))
assert not np.allclose(image_aug[1:, :], image_aug[:-1, :], atol=0.01)
aug = iaa.ReplaceElementwise(mask=0.5, replacement=iap.DiscreteUniform(1, 10), per_channel=True)
image = np.full((1, 1, 100), 0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(0 <= image_aug, image_aug <= 10))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1], atol=0.01)
def test_pickleable(self):
aug = iaa.ReplaceElementwise(mask=0.5, replacement=(0, 255),
per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=3)
# not more tests necessary here as SaltAndPepper is just a tiny wrapper around
# ReplaceElementwise
class TestSaltAndPepper(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_fifty_percent(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.SaltAndPepper(p=0.5)
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
assert 0.4 < p < 0.6
def test_p_is_one(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.SaltAndPepper(p=1.0)
observed = aug.augment_image(base_img)
nb_pepper = np.sum(observed < 40)
nb_salt = np.sum(observed > 255 - 40)
assert nb_pepper > 200
assert nb_salt > 200
def test_pickleable(self):
aug = iaa.SaltAndPepper(p=0.5, per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=3)
class TestCoarseSaltAndPepper(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_fifty_percent(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.CoarseSaltAndPepper(p=0.5, size_px=100)
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
assert 0.4 < p < 0.6
def test_size_px(self):
aug1 = iaa.CoarseSaltAndPepper(p=0.5, size_px=100)
aug2 = iaa.CoarseSaltAndPepper(p=0.5, size_px=10)
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
ps1 = []
ps2 = []
for _ in sm.xrange(100):
observed1 = aug1.augment_image(base_img)
observed2 = aug2.augment_image(base_img)
p1 = np.mean(observed1 != 128)
p2 = np.mean(observed2 != 128)
ps1.append(p1)
ps2.append(p2)
assert 0.4 < np.mean(ps2) < 0.6
assert np.std(ps1)*1.5 < np.std(ps2)
def test_p_is_list(self):
aug = iaa.CoarseSaltAndPepper(p=[0.2, 0.5], size_px=100)
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
seen = [0, 0, 0]
for _ in sm.xrange(200):
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
diff_020 = abs(0.2 - p)
diff_050 = abs(0.5 - p)
if diff_020 < 0.025:
seen[0] += 1
elif diff_050 < 0.025:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] < 10
assert 75 < seen[0] < 125
assert 75 < seen[1] < 125
def test_p_is_tuple(self):
aug = iaa.CoarseSaltAndPepper(p=(0.0, 1.0), size_px=50)
base_img = np.zeros((50, 50, 1), dtype=np.uint8) + 128
ps = []
for _ in sm.xrange(200):
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
ps.append(p)
nb_bins = 5
hist, _ = np.histogram(ps, bins=nb_bins, range=(0.0, 1.0), density=False)
tolerance = 0.05
for nb_seen in hist:
density = nb_seen / len(ps)
assert density - tolerance < density < density + tolerance
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.CoarseSaltAndPepper(p="test", size_px=100)
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_ = iaa.CoarseSaltAndPepper(p=0.5, size_px=None, size_percent=None)
except Exception:
got_exception = True
assert got_exception
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.CoarseSaltAndPepper(p=1.0, size_px=2)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_pickleable(self):
aug = iaa.CoarseSaltAndPepper(p=0.5, size_px=(4, 15),
per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=20)
# not more tests necessary here as Salt is just a tiny wrapper around
# ReplaceElementwise
class TestSalt(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_fifty_percent(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.Salt(p=0.5)
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
assert 0.4 < p < 0.6
# Salt() occasionally replaces with 127, which probably should be the center-point here anyways
assert np.all(observed >= 127)
def test_p_is_one(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.Salt(p=1.0)
observed = aug.augment_image(base_img)
nb_pepper = np.sum(observed < 40)
nb_salt = np.sum(observed > 255 - 40)
assert nb_pepper == 0
assert nb_salt > 200
def test_pickleable(self):
aug = iaa.Salt(p=0.5, per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=3)
class TestCoarseSalt(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_fifty_percent(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.CoarseSalt(p=0.5, size_px=100)
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
assert 0.4 < p < 0.6
def test_size_px(self):
aug1 = iaa.CoarseSalt(p=0.5, size_px=100)
aug2 = iaa.CoarseSalt(p=0.5, size_px=10)
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
ps1 = []
ps2 = []
for _ in sm.xrange(100):
observed1 = aug1.augment_image(base_img)
observed2 = aug2.augment_image(base_img)
p1 = np.mean(observed1 != 128)
p2 = np.mean(observed2 != 128)
ps1.append(p1)
ps2.append(p2)
assert 0.4 < np.mean(ps2) < 0.6
assert np.std(ps1)*1.5 < np.std(ps2)
def test_p_is_list(self):
aug = iaa.CoarseSalt(p=[0.2, 0.5], size_px=100)
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
seen = [0, 0, 0]
for _ in sm.xrange(200):
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
diff_020 = abs(0.2 - p)
diff_050 = abs(0.5 - p)
if diff_020 < 0.025:
seen[0] += 1
elif diff_050 < 0.025:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] < 10
assert 75 < seen[0] < 125
assert 75 < seen[1] < 125
def test_p_is_tuple(self):
aug = iaa.CoarseSalt(p=(0.0, 1.0), size_px=50)
base_img = np.zeros((50, 50, 1), dtype=np.uint8) + 128
ps = []
for _ in sm.xrange(200):
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
ps.append(p)
nb_bins = 5
hist, _ = np.histogram(ps, bins=nb_bins, range=(0.0, 1.0), density=False)
tolerance = 0.05
for nb_seen in hist:
density = nb_seen / len(ps)
assert density - tolerance < density < density + tolerance
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.CoarseSalt(p="test", size_px=100)
except Exception:
got_exception = True
assert got_exception
def test_size_px_or_size_percent_not_none(self):
got_exception = False
try:
_ = iaa.CoarseSalt(p=0.5, size_px=None, size_percent=None)
except Exception:
got_exception = True
assert got_exception
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.CoarseSalt(p=1.0, size_px=2)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_pickleable(self):
aug = iaa.CoarseSalt(p=0.5, size_px=(4, 15),
per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=20)
# not more tests necessary here as Salt is just a tiny wrapper around
# ReplaceElementwise
class TestPepper(unittest.TestCase):
def setUp(self):
reseed()
def test_probability_is_fifty_percent(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.Pepper(p=0.5)
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
assert 0.4 < p < 0.6
assert np.all(observed <= 128)
def test_probability_is_one(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.Pepper(p=1.0)
observed = aug.augment_image(base_img)
nb_pepper = np.sum(observed < 40)
nb_salt = np.sum(observed > 255 - 40)
assert nb_pepper > 200
assert nb_salt == 0
def test_pickleable(self):
aug = iaa.Pepper(p=0.5, per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=3)
class TestCoarsePepper(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_fifty_percent(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.CoarsePepper(p=0.5, size_px=100)
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
assert 0.4 < p < 0.6
def test_size_px(self):
aug1 = iaa.CoarsePepper(p=0.5, size_px=100)
aug2 = iaa.CoarsePepper(p=0.5, size_px=10)
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
ps1 = []
ps2 = []
for _ in sm.xrange(100):
observed1 = aug1.augment_image(base_img)
observed2 = aug2.augment_image(base_img)
p1 = np.mean(observed1 != 128)
p2 = | np.mean(observed2 != 128) | numpy.mean |
# global modules
import numpy as np
import warnings
import matplotlib.pyplot as plt
import seaborn as sns
from mpl_toolkits.mplot3d.axes3d import Axes3D
import pandas as pd
plt.style.use('ggplot')
# consav
from consav import linear_interp
# local modules
import transitions
import funs
lw = 3
fs = 17
def MyPlot(G,xlim=None,ylim=None,save=True,**kwargs):
""" wrapper for plotting """
# initialize
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
def update(g):
fs = 17
if not 'fontsize' in g:
g['fontsize'] = fs
if not 'labelsize' in g:
g['labelsize'] = fs
if not 'legendsize' in g:
g['legendsize'] = fs
if not 'linestyle' in g:
g['linestyle'] = ('-',)*len(g['y'])
if not 'marker' in g:
g['marker'] = None
def graph(g):
for i in range(len(g['y'])):
if 'color' in g:
ax.plot(g['x'],g['y'][i],label=g['label'][i],color=g['color'][i],linestyle=g['linestyle'][i],marker=g['marker'],**kwargs)
else:
ax.plot(g['x'],g['y'][i],label=g['label'][i],linestyle=g['linestyle'][i],marker=g['marker'],**kwargs)
if type(G) == list:
for i in range(len(G)):
g = G[i]
update(g)
graph(g)
else:
g = G
update(g)
graph(g)
# details
ax.legend(fontsize=g['legendsize'])
ax.set_xlabel(g['xlabel'], fontsize=g['fontsize'])
ax.set_ylabel(g['ylabel'], fontsize=g['fontsize'])
if 'xticks' in g:
ax.set_xticks(g['xticks'])
if xlim != None:
ax.set_xlim(xlim)
if ylim != None:
ax.set_ylim(ylim)
ax.tick_params(axis='both', which='major', labelsize=g['labelsize'])
fig.tight_layout()
if save:
return fig
def LaborSupply(LS,indi,ages,start_age):
x = np.arange(ages[0], ages[1]+1)
x_inv = x - start_age
if indi == 'educ':
hs = LS['d'][1]['hs'][x_inv]-LS['d'][0]['hs'][x_inv]
ls = LS['d'][1]['ls'][x_inv]-LS['d'][0]['ls'][x_inv]
assert np.allclose(hs+ls,LS['d'][1]['base'][x_inv]-LS['d'][0]['base'][x_inv])
elif indi == 'educ_w':
hs = LS['d'][1]['hs_f'][x_inv]-LS['d'][0]['hs_f'][x_inv]
ls = LS['d'][1]['ls_f'][x_inv]-LS['d'][0]['ls_f'][x_inv]
assert np.allclose(hs+ls,LS['d'][1]['base_f'][x_inv]-LS['d'][0]['base_f'][x_inv])
elif indi == 'educ_m':
hs = LS['d'][1]['hs_m'][x_inv]-LS['d'][0]['hs_m'][x_inv]
ls = LS['d'][1]['ls_m'][x_inv]-LS['d'][0]['ls_m'][x_inv]
assert np.allclose(hs+ls,LS['d'][1]['base_m'][x_inv]-LS['d'][0]['base_m'][x_inv])
elif indi == 'gender':
hs = LS['d'][1]['base_f'][x_inv]-LS['d'][0]['base_f'][x_inv]
ls = LS['d'][1]['base_m'][x_inv]-LS['d'][0]['base_m'][x_inv]
assert np.allclose(hs+ls,LS['d'][1]['base'][x_inv]-LS['d'][0]['base'][x_inv])
return hs,ls,x
def LS_bar(LS,indi,N,start_age,ages,fs=17,ls=12,save=True):
# set up
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
# compute
HS,LS,x = LaborSupply(LS,indi,ages,start_age)
# plot
if indi == 'gender':
label = ['Women', 'Men']
else:
label = ['High skilled', 'Low skilled']
ax.bar(x,HS*100/N, label=label[0])
ax.bar(x,LS*100/N,bottom=HS*100/N, label=label[1])
ax.legend(fontsize=ls)
ax.set_xlabel('Age',fontsize=fs)
ax.set_ylabel('Pct. change in labor supply',fontsize=fs)
ax.tick_params(axis='both', which='major', labelsize=ls)
fig.tight_layout()
if save:
return fig
def RetAge_linear(G,x='base'):
age = []
for i in range(len(G)):
age.append(G[i][x])
age = np.array(age)
return age[1:]-age[0]
def RetAge_plot(x,Y,labels,xlab,ylab,lw=3,fs=17,ls=12,line_45=True,save=True):
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
# plot
if line_45:
ax.plot(x,x, linewidth=lw, label='45 degree line', linestyle='--', color='k')
for i in range(len(Y)):
g = RetAge_linear(Y[i]['RetAge'])
ax.plot(x,g, linewidth=lw, label=labels[i], marker='o')
# details
ax.set_xlabel(xlab, fontsize=fs)
ax.set_ylabel(ylab, fontsize=fs)
ax.legend(fontsize=ls)
ax.tick_params(axis='both', which='major', labelsize=ls)
fig.tight_layout()
if save:
return fig
def Surplus(g,N,change='Pct'):
if change == 'Pct':
return (g[1:]-g[0])/abs(g[0])*100
elif change == 'Abs':
return (g[1:]-g[0])/N
def GovS_plot(x,Y,labels,xlab,ylab,lw=3,fs=17,ls=12,N=[1,1,1],change='Pct',save=True):
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
# plot
for i in range(len(Y)):
g = Surplus(Y[i],N[i],change)
ax.plot(x,g, linewidth=lw, label=labels[i], marker='o')
# details
ax.set_xlabel(xlab, fontsize=fs)
ax.set_ylabel(ylab, fontsize=fs)
ax.legend(fontsize=ls)
ax.tick_params(axis='both', which='major', labelsize=ls)
fig.tight_layout()
if save:
return fig
def pct_change_plot(x,Y,labels,xlab,ylab,lw=3,fs=17,ls=12,save=True):
fig = plt.figure()
ax1 = fig.add_subplot(1,1,1)
# pre
lst = []
for i in range(len(Y)):
lst.append(Surplus(Y[i],1,'Abs'))
g1 = (np.array(lst[0])/np.array(lst[1])-1)*100
g2 = (np.array(lst[0])/np.array(lst[2])-1)*100
# plot
lns1 = ax1.plot(x,g1, linewidth=lw, label=labels[0], marker='o')
ax2 = ax1.twinx()
lns2 = ax2.plot(x,g2, 'b', marker='o', linewidth=lw, label=labels[1])
# details
ax1.set_xlabel(xlab, fontsize=fs)
ax1.set_ylabel(ylab, fontsize=fs)
ax1.legend(fontsize=ls)
ax1.tick_params(axis='both', which='major', labelsize=ls)
ax2.set_xlabel(xlab, fontsize=fs)
lns = lns1+lns2
labs = [l.get_label() for l in lns]
ax1.legend(lns, labs, fontsize=ls)
ax1.set_ylim([np.min(g1)*0.8, np.max(g1)*1.2])
ax2.set_ylim([np.min(g2)*0.8, np.max(g2)*1.2])
ax2.tick_params(axis='both', which='major', labelsize=ls)
ax2.grid(False)
fig.tight_layout()
if save:
return fig
def policy(model,var,T,MA,ST,RA,D,label=False,xlim=None,ylim=None,bottom=0,top=False):
# unpack
sol = model.sol
par = model.par
solvardict = dict([('c','C_t'),
('v','v_t')])
y_lst = []
label_lst = []
m = sol.m
if not top:
top = len(m)
# loop through options
for age in T:
for ma in MA:
for st in ST:
for ra in RA:
for d in D:
t = transitions.inv_age(age,par)
if d == 1:
ra = transitions.ra_look_up(t,st,ra,d,par)
x = m[bottom:top]
y = getattr(sol,var)[t,ma,st,ra,d,bottom:top]
y_lst.append(y)
if not label:
lab = None
else:
if 't' in label and 'ra' in label and len(label) == 2:
lab = f"$(t = {age}, ra = {ra})$"
if 't' in label and len(label) == 1:
lab = f"$(t = {age})$"
label_lst.append(lab)
# return
return {'y': y_lst, 'x': x, 'label': label_lst, 'xlabel': '$m_t$', 'ylabel': '${}$'.format(solvardict[var])}
def policy_c(model,ax,var,T,AD,ST_h,ST_w,RA_h,RA_w,D_h,D_w,label=False,xlim=None,ylim=None,bottom=0):
""" plot either consumption or value functions for couples """
# unpack
sol = model.sol
par = model.par
solvardict = dict([('c','C_t'),
('v','v_t')])
m = sol.m
ad_min = par.ad_min
# loop through options
for t in T:
for ad in AD:
ad = ad + ad_min
for st_h in ST_h:
for st_w in ST_w:
for ra_h in RA_h:
for ra_w in RA_w:
for d_h in D_h:
for d_w in D_w:
if d_h == 1:
ra_xh = transitions.ra_look_up(t,st_h,ra_h,d_h,par)
else:
ra_xh = ra_h
if d_w == 1:
ra_xw = transitions.ra_look_up(t,st_w,ra_w,d_w,par)
else:
ra_xw = ra_w
d = transitions.d_c(d_h,d_w)
x = m[bottom:]
y = getattr(sol,var)[t,ad,st_h,st_w,ra_xh,ra_xw,d,bottom:]
if label == False:
ax.plot(x,y)
else:
# if 't' in label and 'ra' in label and len(label) == 2:
# lab = f"$(t = {transitions.age(t,par)}, ra = {ra})$"
if 't' in label and len(label) == 1:
lab = f"$(t = {transitions.age(t,par)})$"
elif 'd' in label and len(label) == 1:
lab = f"$(d^h = {d_h}, d^w = {d_w})$"
ax.plot(x,y,label=lab)
# details
if xlim != None:
ax.set_xlim(xlim)
if ylim != None:
ax.set_ylim(ylim)
if label:
ax.legend()
ax.grid(True)
ax.set_xlabel('$m_t$')
ax.set_ylabel('${}$'.format(solvardict[var]))
def choice_probs(model,ma,ST=[0,1,2,3],ages=[57,67]):
# unpack
sol = model.sol
par = model.par
v = sol.v
# initalize
ages = | np.arange(ages[0], ages[1]+1) | numpy.arange |
"""
Implementation of Matrix Transfer approach to calculating transmission, reflection, and absorption.
Many ideas were taken from <NAME> implementation in the tmm package
https://github.com/sbyrnes321/tmm/blob/master/tmm_core.py (used under terms of MIT license)
The current implementation allows for multidimensional dielectric arrays.
"""
import numpy as np
import scipy as sp
import WrightTools as wt
def e_to_n(e):
return np.sqrt(e)
def n_to_e(n):
return n ** 2
def _R_from_r(r):
"""
Calculate reflected power R, starting with reflection amplitude r.
"""
return np.abs(r) ** 2
def _T_from_t(pol, t, n_i, n_f, th_i, th_f):
"""
Calculate transmitted power T, starting with transmission amplitude t.
"""
if pol == "s":
return np.abs(t ** 2) * (
((n_f * np.cos(th_f)).real) / (n_i * np.cos(th_i)).real
)
elif pol == "p":
return np.abs(t ** 2) * (
((n_f * np.conj(np.cos(th_f))).real) / (n_i * np.conj(np.cos(th_i))).real
)
else:
raise ValueError("Polarization must be 's' or 'p'")
def _r_from_M(M):
return M[..., 1, 0] / M[..., 0, 0]
def _t_from_M(M):
return 1 / M[..., 0, 0]
def _Mlist_prod(Mlist):
Mout = Mlist.pop(0)
for M in Mlist:
Mout = Mout @ M
return Mout
def _t_calc(pol, n_i, n_f, th_i, th_f):
if pol == "s":
return 2 * n_i * np.cos(th_i) / (n_i * np.cos(th_i) + n_f * np.cos(th_f))
elif pol == "p":
return 2 * n_i * np.cos(th_i) / (n_f * np.cos(th_i) + n_i * np.cos(th_f))
else:
raise ValueError("Polarization must be 's' or 'p'")
def _r_calc(pol, n_i, n_f, th_i, th_f):
if pol == "s":
out = n_i * np.cos(th_i) - n_f * np.cos(th_f)
out /= n_i * np.cos(th_i) + n_f * np.cos(th_f)
return out
elif pol == "p":
out = n_f * np.cos(th_i) - n_i * np.cos(th_f)
out /= n_f * np.cos(th_i) + n_i * np.cos(th_f)
return out
else:
raise ValueError("Polarization must be 's' or 'p'")
def _M_generator(pol, n_i, n_f, th_i, th_f, deltan):
# eq 11 in byrnes notes
rnn1 = _r_calc(pol, n_i, n_f, th_i, th_f)
tnn1 = _t_calc(pol, n_i, n_f, th_i, th_f)
M1 = np.zeros(deltan.shape + (2, 2), dtype=complex)
M1[..., 0, 0] = np.exp(
-1j * deltan
) # TODO ensure matrix construction is as intended
M1[..., 1, 1] = np.exp(
1j * deltan
) # TODO ensure matrix construction is as intended
M2 = np.ones(deltan.shape + (2, 2), dtype=complex)
M2[..., 0, 1] = rnn1 # TODO ensure matrix construction is as intended
M2[..., 1, 0] = rnn1 # TODO ensure matrix construction is as intended
out = M1 @ M2
out /= tnn1[..., None, None]
return out
def _M_bootstrap(pol, n, th, deltan):
assert n.shape == th.shape == deltan.shape, "input arrays have mismatched shapes"
Mout = []
for i in range(1, n.shape[0]):
M = _M_generator(pol, n[i - 1], n[i], th[i - 1], th[i], deltan[i - 1])
Mout.append(M)
return Mout
def _snells_law_calc(n_1, n_2, th_1):
# TODO this is super naive. Consider making sure we are in the correct branch cut
th_2_guess = sp.arcsin(n_1 * np.sin(th_1) / n_2)
return th_2_guess
def _snells_bootstrap(ns, th_0):
theta_out = np.zeros(ns.shape, dtype=complex)
theta_out[0] = th_0
for i in range(1, ns.shape[0]):
theta_old = theta_out[i - 1]
n_old = ns[i - 1]
n_new = ns[i]
theta_new = _snells_law_calc(n_old, n_new, theta_old)
theta_out[i] = theta_new
return theta_out
def stack_calculation(pol, n_arr, d_arr, th_0, hw_vac):
""" Calculate optical properties of a stack of optical structures.
This calculator assumes arrays are well shaped.
0th dimension of arrays correlate to optical stack number.
1st dimension of arrays correlate to energy/wavelength of light
2nd and more dimensions correlate to user specified refractive index changes
Parameters
----------
pol : string
's' or 'p' specifies the polarization type
n_arr : array
refractive indecies of optical stack
For x layers (include the input, leading, and output, trailing, layers) required shape is
(x, y, ...).
By convention, the first and last layers have exclusively real refractive indecies.
d_arr : array
thicknesses of optical stack in nanometers.
For x layers required shape is (x, 1, ...).
By convention, first and last layers have zero thickness.
th_0 : float
angle of forward traveling light from 0th to 1st layer
hw_vac : array
energy per photon of light in vacuum (units of eV)
must be of shape (1, y, ...)
Returns
-------
tuple
R, T, A: arrays
arrays have shape (y, ...)
R : reflectance
T : transmittance
A : absorptance
"""
# ensure d_arr has zero thickness for first and last layers
d_arr[0] = 0
d_arr[-1] = 0
# convert to nm
lam_vac = wt.units.converter(hw_vac, "eV", "nm")
# calculate arrays
th_arr = _snells_bootstrap(n_arr, th_0)
kz_arr = 2 * np.pi * n_arr * np.cos(th_arr) / lam_vac
delta_arr = kz_arr * d_arr
# create list of M arrays
Mlist = _M_bootstrap(pol, n_arr, th_arr, delta_arr)
# now take their product
Mout = _Mlist_prod(Mlist)
# calculate useful quantities
r = _r_from_M(Mout)
t = _t_from_M(Mout)
R = _R_from_r(r)
T = _T_from_t(pol, t, n_arr[0], n_arr[-1], th_arr[0], th_arr[-1])
A = 1 - R - T
return R, T, A
def easy_stack(w, epsilon_samp, n_sub1, n_sub2, samp_thickness_nm):
# assumes w and epsilon_samp are 1D
n_samp = e_to_n(epsilon_samp)
zero = n_sub1 * np.ones(w.shape)
second = n_sub2 * | np.ones(w.shape) | numpy.ones |
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 21 14:41:54 2019
@author: lukas
"""
import subprocess
import os
from shutil import copytree
import numpy as np
import random
# %% run simulation
def run_ghdl_linux(filenames,tb_entity,vcd_name="output.vcd"):
"""
runs the testbench using ghdl and saves the output of ghdl in tmp/ghdl.log
ATTENTION: Function not tested yet!
Doublecheck if shell=True work on linux!
Parameters
----------
filenames : tuple of strings
filenames of the vhdl files .
tb_entity : string
entity name of the testbench.
vcd_name : string, optional
name of the vcd output file. The default is "output.vcd".
Returns
-------
None.
"""
if not os.path.isdir('tmp'):
try : os.mkdir('tmp')
except : print("Error creating tmp folder!")
command_s = "ghdl -s --workdir=tmp"
command_a = "ghdl -a --workdir=tmp"
for i in filenames:
command_s = command_s + " " + i
command_a = command_a + " " + i
command_e = "ghdl -e --workdir=tmp " + tb_entity
command_r = "ghdl -r --workdir=tmp " + tb_entity + " --vcd=tmp/" + vcd_name
print(command_s)
print(command_a)
print(command_e)
print(command_r)
with open("tmp/ghdl.log","a+") as f:
subprocess.run(command_s,shell=True, stdout=f, text=True, check=True)
subprocess.run(command_a,shell=True, stdout=f, text=True, check=True)
subprocess.run(command_e,shell=True, stdout=f, text=True, check=True)
subprocess.run(command_r,shell=True, stdout=f, text=True, check=True)
def run_ghdl_win(filenames,tb_entity,vcd_name="output.vcd"):
"""
runs the testbench using ghdl and saves the output of ghdl in tmp/ghdl.log
Parameters
----------
filenames : tuple of strings
filenames of the vhdl files .
tb_entity : string
entity name of the testbench.
vcd_name : string, optional
name of the vcd output file. The default is "output.vcd".
Returns
-------
None.
"""
if not os.path.isdir('tmp'):
try : os.mkdir('tmp')
except : print("Error creating tmp folder!")
command_s = "ghdl -s --workdir=tmp"
command_a = "ghdl -a --workdir=tmp"
for i in filenames:
command_s = command_s + " " + i
command_a = command_a + " " + i
command_s = command_s + " > tmp\ghdl.log"
command_a = command_a + " > tmp\ghdl.log"
command_e = "ghdl -e --workdir=tmp " + tb_entity +" > tmp\ghdl.log"
command_r = "ghdl -r --workdir=tmp " + tb_entity + " --vcd=tmp/" + vcd_name +" > tmp\ghdl.log"
print(command_s)
print(command_a)
print(command_e)
print(command_r)
if not os.path.isfile("tmp\ghdl.log"):
open("tmp\ghdl.log", 'w').close()
os.popen("cmd")
subprocess.run(command_s,shell=True, check=True)
subprocess.run(command_a,shell=True, check=True)
subprocess.run(command_e,shell=True, check=True)
subprocess.run(command_r,shell=True, check=True)
#with open("tmp/ghdl.log","a+") as f:
def run_vivado_sim_win():
"""
runs the testbench using vivado and saves the output of vivado in
tmp/sim.log
This function is specially tailored for the tb_memctrl testbench.
If anything changes reexport the simulation in vivado. The shell commands
can be found in tb_memctrl.sh and the path of the files in vlog.prj and in
vhdl.prj
Parameters
----------
Returns
-------
None.
"""
print("Start simulation")
print(os.getcwd())
if not os.path.isdir('tmp'):
try : os.mkdir('tmp')
except : print("Error creating tmp folder!")
copytree('xsim', 'tmp/xsim') # copy xsim folder to generate output products in tmp folder
os.chdir('tmp/xsim')
compile_vlog = "xvlog --relax -prj vlog.prj 2>&1 | tee compile.log"
compile_vhdl = "xvhdl --relax -prj vhdl.prj 2>&1 | tee compile.log"
elaborate = 'xelab --relax --debug typical --mt auto -L blk_mem_gen_v8_4_1'\
' -L xil_defaultlib -L fifo_generator_v13_2_1 -L unisims_ver -L'\
' unimacro_ver -L secureip -L xpm --snapshot tb_memctrl'\
' xil_defaultlib.tb_memctrl xil_defaultlib.glbl -log elaborate.log'
simulate = "xsim tb_memctrl -key {Behavioral:sim_1:Functional:tb_memctrl} -tclbatch cmd.tcl -log simulate.log"
err_verilog = subprocess.Popen(compile_vlog,shell=True,stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if err_verilog.poll() == None:
print("Wait till process finished..")
err_verilog.wait(timeout=60.0)
if err_verilog.returncode != 0:
out, err = err_verilog.communicate()
err_verilog.kill()
print(out)
print(err)
else:
print("compile verilog files done!")
err_vhdl = subprocess.Popen(compile_vhdl,shell=True,stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if err_vhdl.poll() == None:
print("Wait till process finished..")
err_vhdl.wait(timeout=60.0)
if err_vhdl.returncode != 0:
out, err = err_vhdl.communicate()
err_vhdl.kill()
print(out)
print(err)
else:
print("compile vhdl files done!")
#err_elaborate = subprocess.Popen(elaborate,shell=True,stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# if err_elaborate.poll() == None:
# print("Wait till process finished..")
# err_elaborate.wait(timeout=60.0)
# if err_elaborate.returncode != 0:
# out, err = err_elaborate.communicate()
# err_elaborate.kill()
# print(out)
# print(err)
# else:
# print("elaborate design done!")
subprocess.call(elaborate,shell=True)
subprocess.call(simulate,shell=True) # For some reason simulation doesn't work with Popen
os.chdir('../..')
print(os.getcwd())
print("End simulation")
# %% wirte to file
def gen_testdata(blocksize,blocknumber,chans,filename="testdata",drange=255,dtype=np.uint8):
"""
Generates random testdata to be used in the testbench
Parameters
----------
blocksize : integer
size of each data block.
blocknumber : integer
number of generated blocks.
chans : integer
number of input channels
filename : string, optional
file name. The default is "testdata".
drange : integer, optional
range of random numbers. The default is 255.
dtype : numpy type, optional
data type of returned numpy array. The default is np.uint8.
Returns
-------
random_data: numpy array
generated random number.
"""
random_data = np.zeros((blocknumber,blocksize,chans),dtype=dtype)
for k in range(chans):
with open("tmp/"+ filename + str(k) + ".txt","a+") as f:
for i in range(blocknumber):
for j in range(blocksize):
random_data[i,j,k] = random.randrange(drange)
f.write("{}\n".format(random_data[i,j,k]))
return random_data
def write_features_to_file(features,filename="feature_map",layernumber=1):
"""
Parameters
----------
features: numpy array [B,W*H,Co] dtype=np.uint8
B.. Batch size
W*H.. Image width times hight
Co.. output channel number
feature matrix
filename : string, optional
file name. The default is "feature_map"
Returns
-------
None.
"""
for i in range(features.shape[2]):
with open("tmp/"+filename+"_L{}".format(layernumber) +"_c{}.txt".format(i),"a+") as f:
for j in range(features.shape[0]):
for k in range(features.shape[1]):
f.write("{}\n".format(features[j,k,i]))
# %% memory controller
def get_vectors_from_data(test_data,img_width,img_hight,kernel_size=3,dtype=np.uint8):
"""
Generates 3x1 vectors from test data
Parameters
----------
test_data : numpy array
generated test data.
img_width : integer
with of test matrix.
img_hight : integer
hight of test matrix.
kernel_size : integer, optional
size of the kernel. The default is 3.
dtype : numpy dtype, optional
Data type of numpy array. The default is np.uint8.
Returns
-------
vectors : numpy array
Vector to compare with the output of the memory controller
"""
vectors = np.zeros((test_data.shape[0],test_data.shape[1],kernel_size,test_data.shape[2]),dtype=dtype)
for i in range(test_data.shape[0]):
vector_cnt = 0
for j in range(test_data.shape[1]):
if j < img_width:
vectors[i,vector_cnt,0,:] = 0
vectors[i,vector_cnt,1,:] = test_data[i,j,:]
vectors[i,vector_cnt,2,:] = test_data[i,j+img_width,:]
vector_cnt += 1
elif j >= (img_width*(img_hight-1)):
#print(j)
vectors[i,vector_cnt,0,:] = test_data[i,j-img_width,:]
vectors[i,vector_cnt,1,:] = test_data[i,j,:]
vectors[i,vector_cnt,2,:] = 0
vector_cnt += 1
else:
vectors[i,vector_cnt,0,:] = test_data[i,j-img_width,:]
vectors[i,vector_cnt,1,:] = test_data[i,j,:]
vectors[i,vector_cnt,2,:] = test_data[i,j+img_width,:]
vector_cnt += 1
return vectors
def get_Kernels(test_vectors,img_width):
"""
Creates 3x3 kernel which is operated by the conv2d
Parameters
----------
test_vectors : numpy array
Generated test vectors 3x1.
img_width : integer
with of test matrix.
Returns
-------
Kernel : numpy array
Kernel to compare with the output of the shiftregister
"""
kernels = np.zeros((test_vectors.shape[0],test_vectors.shape[1],test_vectors.shape[2],test_vectors.shape[2],test_vectors.shape[3]),dtype=np.uint8)
for i in range(test_vectors.shape[0]):
for j in range(test_vectors.shape[1]):
if j%img_width == 0:
kernels[i,j,:,0,:] = 0
kernels[i,j,:,1,:] = test_vectors[i,j,:,:]
kernels[i,j,:,2,:] = test_vectors[i,j+1,:,:]
elif j%img_width == img_width-1:
kernels[i,j,:,0,:] = test_vectors[i,j-1,:,:]
kernels[i,j,:,1,:] = test_vectors[i,j,:,:]
kernels[i,j,:,2,:] = 0
else:
kernels[i,j,:,0,:] = test_vectors[i,j-1,:,:]
kernels[i,j,:,1,:] = test_vectors[i,j,:,:]
kernels[i,j,:,2,:] = test_vectors[i,j+1,:,:]
return kernels
# %% convolutional layer
def conv_2d(kernels,weights,msb,bias,data_width=8):
"""
Emulates the operation carried out by the conv2d module in the FPGA
Parameters
----------
kernel : numpy array [B,W*H,Kh,Kw,Ci]
B.. Batch size
W*H.. Image width times hight
Kh.. Kernel hight
Kw.. Kernel width
Ci.. channel number
Input kernels
weights : numpy array [Co,Ci,Kh,Kw]
Co.. output channel number
Ci.. input channel number
Kh.. Kernel hight
Kw .. Kernel with
Weigth matrix for each kernel
msb : numpy array [Co,Ci]
Co.. output channel number
MSB values for quantization
Returns
-------
features: numpy array [B,W*H,Co] dtype=np.uint8
B.. Batch size
W*H.. Image width times hight
Co.. output channel number
8 bit output Matrix
"""
features = np.zeros((kernels.shape[0],kernels.shape[1],weights.shape[0]),dtype=np.uint8)
for i in range(kernels.shape[0]):
for j in range(kernels.shape[1]):
for k in range (weights.shape[0]):
features[i,j,k] = conv_channel(kernels[i,j,:,:,:],weights[k,:,:,:],msb[k],data_width, bias[k])
return features
def conv_channel(kernels,weights,msb,data_width=8, bias = 0):
"""
Emulates the operation carried out by the conv_channel module in the FPGA
Parameters
----------
kernels : numpy array [B,W*H,Kh,Kw,Ci]
B.. Batch size
W*H.. Image width times hight
Kh.. Kernel hight
Kw.. Kernel width
Ci.. channel number
Input kernels
weights : numpy array [Ci,Kh,Kw]
Ci.. input channel number
Kh.. Kernel hight
Kw .. Kernel with
Weigth matrix for each kernel
msb : integer
MSB postion for quantization
Returns
-------
weighted_sum: np.uint8
B.. Batch size
W*H.. Image width times hight
8 bit output Matrix
"""
weighted_sum = np.int32(0)
for k in range (weights.shape[0]):
weighted_sum+= kernel_3x3(kernels[:,:,k],weights[k,:,:])
weighted_sum += bias
# Relu (Additional benefit np.int16(int("0x00FF",16)) & feature would not work for negative numbers because of 2's complement)
if weighted_sum < 0:
weighted_sum = 0
else: # Quantization
weighted_sum >>= msb+1-data_width
if weighted_sum > int(2**data_width - 1):
weighted_sum = int(2**data_width - 1)
return | np.uint8(weighted_sum) | numpy.uint8 |
from __future__ import print_function
import numpy as np
import astropy.convolution
__all__ = ['shiftnd', 'cross_correlation_shifts']
try:
import fftw3
has_fftw = True
def fftwn(array, nthreads=1):
array = array.astype('complex').copy()
outarray = array.copy()
fft_forward = fftw3.Plan(array, outarray, direction='forward',
flags=['estimate'], nthreads=nthreads)
fft_forward.execute()
return outarray
def ifftwn(array, nthreads=1):
array = array.astype('complex').copy()
outarray = array.copy()
fft_backward = fftw3.Plan(array, outarray, direction='backward',
flags=['estimate'], nthreads=nthreads)
fft_backward.execute()
return outarray / np.size(array)
except ImportError:
fftn = np.fft.fftn
ifftn = np.fft.ifftn
has_fftw = False
# I performed some fft speed tests and found that scipy is slower than numpy
# http://code.google.com/p/agpy/source/browse/trunk/tests/test_ffts.py However,
# the speed varied on machines - YMMV. If someone finds that scipy's fft is
# faster, we should add that as an option here... not sure how exactly
def get_ffts(nthreads=1, use_numpy_fft=not has_fftw):
"""
Returns fftn,ifftn using either numpy's fft or fftw
"""
if has_fftw and not use_numpy_fft:
def fftn(*args, **kwargs):
return fftwn(*args, nthreads=nthreads, **kwargs)
def ifftn(*args, **kwargs):
return ifftwn(*args, nthreads=nthreads, **kwargs)
elif use_numpy_fft:
fftn = np.fft.fftn
ifftn = np.fft.ifftn
else:
# yes, this is redundant, but I feel like there could be a third option...
fftn = np.fft.fftn
ifftn = np.fft.ifftn
return fftn,ifftn
def shiftnd(data, offset, phase=0, nthreads=1, use_numpy_fft=False,
return_abs=False, return_real=True):
"""
FFT-based sub-pixel image shift.
Will turn NaNs into zeros
Shift Theorem:
.. math::
FT[f(t-t_0)](x) = e^{-2 \pi i x t_0} F(x)
Parameters
----------
data : np.ndarray
Data to shift
offset : (int,)*ndim
Offsets in each direction. Must be iterable.
phase : float
Phase, in radians
Other Parameters
----------------
use_numpy_fft : bool
Force use numpy's fft over fftw? (only matters if you have fftw
installed)
nthreads : bool
Number of threads to use for fft (only matters if you have fftw
installed)
return_real : bool
Return the real component of the shifted array
return_abs : bool
Return the absolute value of the shifted array
Returns
-------
The input array shifted by offsets
"""
fftn,ifftn = get_ffts(nthreads=nthreads, use_numpy_fft=use_numpy_fft)
if np.any( | np.isnan(data) | numpy.isnan |
import cv2
import munkres
import numpy as np
import torch
# solution proposed in https://github.com/pytorch/pytorch/issues/229#issuecomment-299424875
def flip_tensor(tensor, dim=0):
"""
flip the tensor on the dimension dim
"""
inv_idx = torch.arange(tensor.shape[dim] - 1, -1, -1).to(tensor.device)
return tensor.index_select(dim, inv_idx)
#
# derived from https://github.com/leoxiaobin/deep-high-resolution-net.pytorch
def flip_back(output_flipped, matched_parts):
assert len(output_flipped.shape) == 4, 'output_flipped has to be [batch_size, num_joints, height, width]'
output_flipped = flip_tensor(output_flipped, dim=-1)
for pair in matched_parts:
tmp = output_flipped[:, pair[0]].clone()
output_flipped[:, pair[0]] = output_flipped[:, pair[1]]
output_flipped[:, pair[1]] = tmp
return output_flipped
def fliplr_joints(joints, joints_vis, width, matched_parts):
# Flip horizontal
joints[:, 0] = width - joints[:, 0] - 1
# Change left-right parts
for pair in matched_parts:
joints[pair[0], :], joints[pair[1], :] = \
joints[pair[1], :], joints[pair[0], :].copy()
joints_vis[pair[0], :], joints_vis[pair[1], :] = \
joints_vis[pair[1], :], joints_vis[pair[0], :].copy()
return joints * joints_vis, joints_vis
def get_affine_transform(center, scale, rot, output_size, shift=np.array([0, 0], dtype=np.float32), inv=0):
if not isinstance(scale, np.ndarray) and not isinstance(scale, list):
print(scale)
scale = np.array([scale, scale])
scale_tmp = scale * 1.0 * 200.0 # It was scale_tmp = scale * 200.0
src_w = scale_tmp[0]
dst_w = output_size[0]
dst_h = output_size[1]
rot_rad = np.pi * rot / 180
src_dir = get_dir([0, src_w * -0.5], rot_rad)
dst_dir = np.array([0, dst_w * -0.5], np.float32)
src = np.zeros((3, 2), dtype=np.float32)
dst = np.zeros((3, 2), dtype=np.float32)
src[0, :] = center + scale_tmp * shift
src[1, :] = center + src_dir + scale_tmp * shift
dst[0, :] = [dst_w * 0.5, dst_h * 0.5]
dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5]) + dst_dir
src[2:, :] = get_3rd_point(src[0, :], src[1, :])
dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :])
if inv:
trans = cv2.getAffineTransform(np.float32(dst), np.float32(src))
else:
trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))
return trans
def affine_transform(pt, t):
new_pt = np.array([pt[0], pt[1], 1.]).T
new_pt = np.dot(t, new_pt)
return new_pt[:2]
def get_3rd_point(a, b):
direct = a - b
return b + np.array([-direct[1], direct[0]], dtype=np.float32)
def get_dir(src_point, rot_rad):
sn, cs = np.sin(rot_rad), np.cos(rot_rad)
src_result = [0, 0]
src_result[0] = src_point[0] * cs - src_point[1] * sn
src_result[1] = src_point[0] * sn + src_point[1] * cs
return src_result
def crop(img, center, scale, output_size, rot=0, interpolation=cv2.INTER_LINEAR):
trans = get_affine_transform(center, scale, rot, output_size)
dst_img = cv2.warpAffine(
img, trans, (int(output_size[0]), int(output_size[1])),
flags=interpolation
)
return dst_img
#
#
#
# derived from https://github.com/leoxiaobin/deep-high-resolution-net.pytorch
def calc_dists(preds, target, normalize):
preds = preds.type(torch.float32)
target = target.type(torch.float32)
dists = torch.zeros((preds.shape[1], preds.shape[0])).to(preds.device)
for n in range(preds.shape[0]):
for c in range(preds.shape[1]):
if target[n, c, 0] > 1 and target[n, c, 1] > 1:
normed_preds = preds[n, c, :] / normalize[n]
normed_targets = target[n, c, :] / normalize[n]
# # dists[c, n] = np.linalg.norm(normed_preds - normed_targets)
dists[c, n] = torch.norm(normed_preds - normed_targets)
else:
dists[c, n] = -1
return dists
def dist_acc(dists, thr=0.5):
"""
Return percentage below threshold while ignoring values with a -1
"""
dist_cal = torch.ne(dists, -1)
num_dist_cal = dist_cal.sum()
if num_dist_cal > 0:
return torch.lt(dists[dist_cal], thr).float().sum() / num_dist_cal
else:
return -1
def evaluate_pck_accuracy(output, target, hm_type='gaussian', thr=0.5):
"""
Calculate accuracy according to PCK,
but uses ground truth heatmap rather than x,y locations
First value to be returned is average accuracy across 'idxs',
followed by individual accuracies
"""
idx = list(range(output.shape[1]))
if hm_type == 'gaussian':
pred, _ = get_max_preds(output)
target, _ = get_max_preds(target)
h = output.shape[2]
w = output.shape[3]
norm = torch.ones((pred.shape[0], 2)) * torch.tensor([h, w],
dtype=torch.float32) / 10 # Why they divide this by 10?
norm = norm.to(output.device)
else:
raise NotImplementedError
dists = calc_dists(pred, target, norm)
acc = torch.zeros(len(idx)).to(dists.device)
avg_acc = 0
cnt = 0
for i in range(len(idx)):
acc[i] = dist_acc(dists[idx[i]], thr=thr)
if acc[i] >= 0:
avg_acc = avg_acc + acc[i]
cnt += 1
avg_acc = avg_acc / cnt if cnt != 0 else 0
return acc, avg_acc, cnt, pred, target
#
#
#
# Operations on bounding boxes (rectangles)
def bbox_area(bbox):
"""
Area of a bounding box (a rectangles).
Args:
bbox (:class:`np.ndarray`): rectangle in the form (x_min, y_min, x_max, y_max)
Returns:
float: Bounding box area.
"""
x1, y1, x2, y2 = bbox
dx = x2 - x1
dy = y2 - y1
return dx * dy
def bbox_intersection(bbox_a, bbox_b):
"""
Intersection between two buonding boxes (two rectangles).
Args:
bbox_a (:class:`np.ndarray`): rectangle in the form (x_min, y_min, x_max, y_max)
bbox_b (:class:`np.ndarray`): rectangle in the form (x_min, y_min, x_max, y_max)
Returns:
(:class:`np.ndarray`, float):
Intersection limits and area.
Format: (x_min, y_min, x_max, y_max), area
"""
x1 = np.max((bbox_a[0], bbox_b[0])) # Left
x2 = np.min((bbox_a[2], bbox_b[2])) # Right
y1 = np.max((bbox_a[1], bbox_b[1])) # Top
y2 = np.min((bbox_a[3], bbox_b[3])) # Bottom
if x2 < x1 or y2 < y1:
bbox_i = np.asarray([0, 0, 0, 0])
area_i = 0
else:
bbox_i = np.asarray([x1, y1, x2, y2], dtype=bbox_a.dtype)
area_i = bbox_area(bbox_i)
return bbox_i, area_i
def bbox_union(bbox_a, bbox_b):
"""
Union between two buonding boxes (two rectangles).
Args:
bbox_a (:class:`np.ndarray`): rectangle in the form (x_min, y_min, x_max, y_max)
bbox_b (:class:`np.ndarray`): rectangle in the form (x_min, y_min, x_max, y_max)
Returns:
float: Union.
"""
area_a = bbox_area(bbox_a)
area_b = bbox_area(bbox_b)
bbox_i, area_i = bbox_intersection(bbox_a, bbox_b)
area_u = area_a + area_b - area_i
return area_u
def bbox_iou(bbox_a, bbox_b):
"""
Intersection over Union (IoU) between two buonding boxes (two rectangles).
Args:
bbox_a (:class:`np.ndarray`): rectangle in the form (x_min, y_min, x_max, y_max)
bbox_b (:class:`np.ndarray`): rectangle in the form (x_min, y_min, x_max, y_max)
Returns:
float: Intersection over Union (IoU).
"""
area_u = bbox_union(bbox_a, bbox_b)
bbox_i, area_i = bbox_intersection(bbox_a, bbox_b)
iou = area_i / area_u
return iou
#
#
#
# Bounding box/pose similarity and association
def oks_iou(g, d, a_g, a_d, sigmas=None, in_vis_thre=None):
if not isinstance(sigmas, np.ndarray):
sigmas = | np.array(
[.26, .25, .25, .35, .35, .79, .79, .72, .72, .62, .62, 1.07, 1.07, .87, .87, .89, .89]) | numpy.array |
import numpy as np
import qutip as qtp
import scipy
import time
import logging
from scipy.interpolate import interp1d
import matplotlib.pyplot as plt
#np.set_printoptions(threshold=np.inf)
# operators
b = qtp.tensor(qtp.destroy(3), qtp.qeye(3)) # spectator qubit
a = qtp.tensor(qtp.qeye(3), qtp.destroy(3)) # fluxing qubit
n_q0 = a.dag() * a
n_q1 = b.dag() * b
# target in the case with no noise
# note that the Hilbert space is H_q1 /otimes H_q0
# so the ordering of basis states below is 00,01,02,10,11,12,20,21,22
U_target = qtp.Qobj([[1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, -1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, -1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1]],
type='oper',
dims=[[3, 3], [3, 3]])
U_target_diffdims = qtp.Qobj([[1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, -1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, -1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1]],
type='oper',
dims=[[9], [9]]) # otherwise average_gate_fidelity doesn't work
'''
remember that qutip uses the Liouville (matrix) representation for superoperators,
with column stacking.
This means that
rho_{xy,x'y'}=rho[3*x+y,3*x'+y']
rho_{xy,x'y'}=operator_to_vector(rho)[3*x+y+27*x'+9*y']
where xy is the row and x'y' is the column
'''
##### Functions to construct hamiltonian, collapse operators, and compute single quantities of interest
def coupled_transmons_hamiltonian_new(w_q0, w_q1, alpha_q0, alpha_q1, J):
"""
Hamiltonian of two coupled anharmonic transmons.
Because the intention is to tune one qubit into resonance with the other,
the number of levels is limited.
q1 -> static qubit, 3-levels
q0 -> fluxing qubit, 3-levels
intended avoided crossing:
11 <-> 02 (q1 is the first qubit and q0 the second one)
N.B. the frequency of q0 is expected to be larger than that of q1
w_q0 > w_q1
and the anharmonicities alpha negative
"""
H = w_q0 * n_q0 + w_q1 * n_q1 + \
1/2*alpha_q0*(a.dag()*a.dag()*a*a) + 1/2*alpha_q1*(b.dag()*b.dag()*b*b) +\
J * (a.dag() + a) * (b + b.dag())
H = H * (2*np.pi)
return H
def calc_hamiltonian(amp,fluxlutman,noise_parameters_CZ):
# all inputs should be given in terms of frequencies, i.e. without the 2*np.pi factor
# instead, the output includes already that factor
w_q0=fluxlutman.calc_amp_to_freq(amp,'01')
w_q0_sweetspot=fluxlutman.calc_amp_to_freq(0,'01')
w_q1=fluxlutman.calc_amp_to_freq(amp,'10')
alpha_q0=fluxlutman.calc_amp_to_freq(amp,'02')-2*w_q0
alpha_q1=noise_parameters_CZ.alpha_q1()
J=fluxlutman.q_J2()/np.sqrt(2)
w_bus=noise_parameters_CZ.w_bus()
delta_q1=w_q1-w_bus
delta_q0_sweetspot=(w_q0_sweetspot)-w_bus
delta_q0=(w_q0)-w_bus
J_temp = J / ((delta_q1+delta_q0_sweetspot)/(delta_q1*delta_q0_sweetspot)) * (delta_q1+delta_q0)/(delta_q1*delta_q0)
H=coupled_transmons_hamiltonian_new(w_q0=w_q0, w_q1=w_q1, alpha_q0=alpha_q0, alpha_q1=alpha_q1, J=J_temp)
return H
def rotating_frame_transformation_propagator_new(U, t: float, H):
"""
Transforms the frame of the unitary according to
U' = U_{RF}*U
NOTE: remember that this is how the time evolution operator changes from one picture to another
Args:
U (QObj): Unitary to be transformed
t (float): time at which to transform
H (QObj): hamiltonian to be rotated away
"""
U_RF = (1j*H*t).expm()
if U.type=='super':
U_RF=qtp.to_super(U_RF)
U_prime = U_RF * U
""" U_RF only on one side because that's the operator that
satisfies the Schroedinger equation in the interaction picture.
"""
return U_prime
def rotating_frame_transformation_operators(operator, t: float, H):
"""
Transforms the frame of an operator (hamiltonian, or jump operator) according to
O' = U_{RF}*O*U_{RF}^dag
Args:
operator (QObj): operator to be transformed
t (float): time at which to transform
H (QObj): hamiltonian to be rotated away
"""
U_RF = (1j*H*t).expm()
return U_RF * H * U_RF.dag()
def c_ops_amplitudedependent(T1_q0,T1_q1,Tphi01_q0_vec,Tphi01_q1):
# case where the incoherent noise for qubit q0 is time dependent, or better pulse-amplitude dependent
c_ops=[]
if T1_q0 != 0:
c_ops.append( | np.sqrt(1/T1_q0) | numpy.sqrt |
import random
import numpy as np
import ConfigSpace
from ConfigSpace import hyperparameters as CSH
from smac.configspace import convert_configurations_to_array
from smac.epm.gaussian_process import GaussianProcess
from smac.epm.gp_base_prior import LognormalPrior, HorseshoePrior
from smac.epm.gp_kernels import ConstantKernel, Matern, HammingKernel, WhiteKernel
from smac.optimizer.acquisition import EI
from smac.optimizer.ei_optimization import LocalSearch
from smac.runhistory.runhistory import RunHistory
from smac.tae.execute_ta_run import StatusType
import hp_transfer_optimizers.core.master
from hp_transfer_optimizers._transfer_utils import get_configspace_partitioning_cond
from hp_transfer_optimizers.core.successivehalving import SuccessiveHalving
def _configspace_to_types_and_bounds(configspace):
types = []
bounds = []
for hyperparameter in configspace.get_hyperparameters():
is_categorical = isinstance(hyperparameter, CSH.CategoricalHyperparameter)
if is_categorical:
types.append(len(hyperparameter.choices))
bounds .append((len(hyperparameter.choices), np.nan))
else:
types.append(0)
bounds.append((hyperparameter.lower, hyperparameter.upper))
types = np.array(types, dtype=np.int)
return types, bounds
def _impute_conditional_data(array, configspace):
return_array = np.empty_like(array)
for i in range(array.shape[0]):
datum = np.copy(array[i])
nan_indices = np.argwhere(np.isnan(datum.astype(np.float64))).flatten()
while np.any(nan_indices):
nan_idx = nan_indices[0]
valid_indices = np.argwhere(
np.isfinite(array.astype(np.float64)[:, nan_idx])
).flatten()
if len(valid_indices) > 0:
# Pick one of them at random and overwrite all NaN values
row_idx = np.random.choice(valid_indices)
datum[nan_indices] = array.astype(np.float64)[row_idx, nan_indices]
else:
# no good point in the data has this value activated, so fill it with a
# valid but random value
hparam_name = configspace.get_hyperparameter_by_idx(nan_idx)
hparam = configspace.get_hyperparameter(hparam_name)
if isinstance(hparam, CSH.CategoricalHyperparameter):
sample = hparam.sample(np.random.RandomState())
# Map to internal representation
datum[nan_idx] = hparam.choices.index(sample)
elif isinstance(hparam, CSH.UniformFloatHyperparameter) or isinstance(hparam, CSH.UniformIntegerHyperparameter):
datum[nan_idx] = np.random.random() # TODO, log sample
else:
raise ValueError
nan_indices = np.argwhere(np.isnan(datum.astype(np.float64))).flatten()
return_array[i, :] = datum
return return_array
def _construct_model(configspace, rng):
types, bounds = _configspace_to_types_and_bounds(configspace)
cont_dims = np.nonzero(types == 0)[0]
cat_dims = np.nonzero(types != 0)[0]
cov_amp = ConstantKernel(
2.0,
constant_value_bounds=(np.exp(-10), np.exp(2)),
prior=LognormalPrior(mean=0.0, sigma=1.0, rng=rng),
)
if len(cont_dims) > 0:
exp_kernel = Matern(
np.ones([len(cont_dims)]),
[(np.exp(-6.754111155189306), np.exp(0.0858637988771976)) for _ in
range(len(cont_dims))],
nu=2.5,
operate_on=cont_dims,
)
if len(cat_dims) > 0:
ham_kernel = HammingKernel(
np.ones([len(cat_dims)]),
[(np.exp(-6.754111155189306), np.exp(0.0858637988771976)) for _ in
range(len(cat_dims))],
operate_on=cat_dims,
)
noise_kernel = WhiteKernel(
noise_level=1e-8,
noise_level_bounds=(np.exp(-25), np.exp(2)),
prior=HorseshoePrior(scale=0.1, rng=rng),
)
if len(cont_dims) > 0 and len(cat_dims) > 0:
# both
kernel = cov_amp * (exp_kernel * ham_kernel) + noise_kernel
elif len(cont_dims) > 0 and len(cat_dims) == 0:
# only cont
kernel = cov_amp * exp_kernel + noise_kernel
elif len(cont_dims) == 0 and len(cat_dims) > 0:
# only cont
kernel = cov_amp * ham_kernel + noise_kernel
else:
raise ValueError()
def _impute_inactive(self, X):
X = X.copy()
return _impute_conditional_data(X, self.configspace)
seed = random.randint(0, 100)
GaussianProcess._impute_inactive = _impute_inactive
return GaussianProcess(
configspace=configspace, types=types, bounds=bounds, seed=seed, kernel=kernel
)
class GPCondSampler:
def __init__(
self,
configspace,
random_fraction=1 / 2,
logger=None,
previous_results=None
):
self.logger = logger
self.random_fraction = random_fraction
self.runhistory = RunHistory()
self.configs = list()
self.losses = list()
rng = np.random.RandomState(random.randint(0, 100))
if previous_results is not None and len(previous_results.batch_results) > 0:
# Assume same-task changing-configspace trajectory for now
results_previous_adjustment = previous_results.batch_results[-1]
configspace_previous = results_previous_adjustment.configspace
# Construct combined config space
configspace_combined = ConfigSpace.ConfigurationSpace()
development_step = CSH.CategoricalHyperparameter("development_step", choices=["old", "new"])
configspace_combined.add_hyperparameter(
development_step
)
configspace_only_old, configspace_both, configspace_only_new = get_configspace_partitioning_cond(configspace, configspace_previous)
configspace_combined.add_hyperparameters(configspace_both.get_hyperparameters())
configspace_combined.add_hyperparameters(configspace_only_old.get_hyperparameters())
configspace_combined.add_hyperparameters(configspace_only_new.get_hyperparameters())
for hyperparameter in configspace_only_old.get_hyperparameters():
configspace_combined.add_condition(
ConfigSpace.EqualsCondition(hyperparameter, development_step, "old")
)
for hyperparameter in configspace_only_new.get_hyperparameters():
configspace_combined.add_condition(
ConfigSpace.EqualsCondition(hyperparameter, development_step, "new")
)
# Read old configs and losses
result_previous = results_previous_adjustment.results[0]
all_runs = result_previous.get_all_runs(only_largest_budget=False)
self.losses_old = [run.loss for run in all_runs]
self.configs_old = [run.config_id for run in all_runs]
id2conf = result_previous.get_id2config_mapping()
self.configs_old = [id2conf[id_]["config"] for id_ in self.configs_old]
# Map old configs to combined space
for config in self.configs_old:
config["development_step"] = "old"
self.configs_old = [ConfigSpace.Configuration(configspace_combined, config) for config in self.configs_old]
for config, cost in zip(self.configs_old, self.losses_old):
self.runhistory.add(config, cost, 0, StatusType.SUCCESS)
# Construct and fit model
self.configspace = configspace_combined
self.model = _construct_model(self.configspace, rng)
self.acquisition_func = EI(model=self.model)
self.acq_optimizer = LocalSearch(acquisition_function=self.acquisition_func,
config_space=self.configspace, rng=rng)
X = convert_configurations_to_array(self.configs_old)
Y = np.array(self.losses_old, dtype=np.float64)
self.model.train(X, Y)
self.acquisition_func.update(
model=self.model,
eta=min(self.losses_old),
)
else:
self.configspace = configspace
self.model = _construct_model(self.configspace, rng)
self.acquisition_func = EI(model=self.model)
self.acq_optimizer = LocalSearch(acquisition_function=self.acquisition_func,
config_space=self.configspace, rng=rng)
self.min_points_in_model = len(self.configspace.get_hyperparameters()) # TODO
@property
def has_model(self):
return len(self.configs) >= self.min_points_in_model
def get_config(self, budget): # pylint: disable=unused-argument
self.logger.debug("start sampling a new configuration.")
is_random_fraction = np.random.rand() < self.random_fraction
if is_random_fraction or not self.has_model:
if "development_step" in self.configspace.get_hyperparameter_names():
while True:
sample = self.configspace.sample_configuration()
if sample["development_step"] == "new":
break
else:
sample = self.configspace.sample_configuration()
else:
# Use private _maximize to not return challenger list object
sample = self.acq_optimizer._maximize(
runhistory=self.runhistory,
stats=None,
num_points=1,
)
sample = sample[0][1]
sample = ConfigSpace.util.deactivate_inactive_hyperparameters(sample.get_dictionary(), self.configspace)
sample = sample.get_dictionary()
info = {}
self.logger.debug("done sampling a new configuration.")
return sample, info
def new_result(self, job, config_info): # pylint: disable=unused-argument
if job.exception is not None:
self.logger.warning(f"job {job.id} failed with exception\n{job.exception}")
if job.result is None:
# One could skip crashed results, but we decided to
# assign a +inf loss and count them as bad configurations
loss = np.inf
else:
# same for non numeric losses.
# Note that this means losses of minus infinity will count as bad!
loss = job.result["loss"] if np.isfinite(job.result["loss"]) else np.inf
config = ConfigSpace.Configuration(self.configspace, job.kwargs["config"])
self.configs.append(config)
self.losses.append(loss)
if self.has_model:
# TODO: include old
X = convert_configurations_to_array(self.configs)
Y = | np.array(self.losses, dtype=np.float64) | numpy.array |
################################################################################
# Copyright (C) 2013-2014 <NAME>
#
# This file is licensed under the MIT License.
################################################################################
"""
Unit tests for `dot` module.
"""
import unittest
import numpy as np
import scipy
from numpy import testing
from ..dot import Dot, SumMultiply
from ..gaussian import Gaussian, GaussianARD
from bayespy.nodes import GaussianGamma
from ...vmp import VB
from bayespy.utils import misc
from bayespy.utils import linalg
from bayespy.utils import random
from bayespy.utils.misc import TestCase
class TestSumMultiply(TestCase):
def test_parent_validity(self):
"""
Test that the parent nodes are validated properly in SumMultiply
"""
V = GaussianARD(1, 1)
X = Gaussian(np.ones(1), np.identity(1))
Y = Gaussian(np.ones(3), np.identity(3))
Z = Gaussian(np.ones(5), np.identity(5))
A = SumMultiply(X, ['i'])
self.assertEqual(A.dims, ((), ()))
A = SumMultiply('i', X)
self.assertEqual(A.dims, ((), ()))
A = SumMultiply(X, ['i'], ['i'])
self.assertEqual(A.dims, ((1,), (1,1)))
A = SumMultiply('i->i', X)
self.assertEqual(A.dims, ((1,), (1,1)))
A = SumMultiply(X, ['i'], Y, ['j'], ['i','j'])
self.assertEqual(A.dims, ((1,3), (1,3,1,3)))
A = SumMultiply('i,j->ij', X, Y)
self.assertEqual(A.dims, ((1,3), (1,3,1,3)))
A = SumMultiply(V, [], X, ['i'], Y, ['i'], [])
self.assertEqual(A.dims, ((), ()))
A = SumMultiply(',i,i->', V, X, Y)
self.assertEqual(A.dims, ((), ()))
# Gaussian-gamma parents
C = GaussianGamma(np.ones(3), np.identity(3), 1, 1)
A = SumMultiply(Y, ['i'], C, ['i'], ['i'])
self.assertEqual(A.dims, ((3,), (3,3), (), ()))
A = SumMultiply('i,i->i', Y, C)
self.assertEqual(A.dims, ((3,), (3,3), (), ()))
C = GaussianGamma(np.ones(3), np.identity(3), 1, 1)
A = SumMultiply(Y, ['i'], C, ['i'], [])
self.assertEqual(A.dims, ((), (), (), ()))
A = SumMultiply('i,i->', Y, C)
self.assertEqual(A.dims, ((), (), (), ()))
# Error: not enough inputs
self.assertRaises(ValueError,
SumMultiply)
self.assertRaises(ValueError,
SumMultiply,
X)
# Error: too many keys
self.assertRaises(ValueError,
SumMultiply,
Y,
['i', 'j'])
self.assertRaises(ValueError,
SumMultiply,
'ij',
Y)
# Error: not broadcastable
self.assertRaises(ValueError,
SumMultiply,
Y,
['i'],
Z,
['i'])
self.assertRaises(ValueError,
SumMultiply,
'i,i',
Y,
Z)
# Error: output key not in inputs
self.assertRaises(ValueError,
SumMultiply,
X,
['i'],
['j'])
self.assertRaises(ValueError,
SumMultiply,
'i->j',
X)
# Error: non-unique input keys
self.assertRaises(ValueError,
SumMultiply,
X,
['i','i'])
self.assertRaises(ValueError,
SumMultiply,
'ii',
X)
# Error: non-unique output keys
self.assertRaises(ValueError,
SumMultiply,
X,
['i'],
['i','i'])
self.assertRaises(ValueError,
SumMultiply,
'i->ii',
X)
# String has too many '->'
self.assertRaises(ValueError,
SumMultiply,
'i->i->i',
X)
# String has too many input nodes
self.assertRaises(ValueError,
SumMultiply,
'i,i->i',
X)
# Same parent several times
self.assertRaises(ValueError,
SumMultiply,
'i,i->i',
X,
X)
# Same parent several times via deterministic node
Xh = SumMultiply('i->i', X)
self.assertRaises(ValueError,
SumMultiply,
'i,i->i',
X,
Xh)
def test_message_to_child(self):
"""
Test the message from SumMultiply to its children.
"""
def compare_moments(u0, u1, *args):
Y = SumMultiply(*args)
u_Y = Y.get_moments()
self.assertAllClose(u_Y[0], u0)
self.assertAllClose(u_Y[1], u1)
# Test constant parent
y = np.random.randn(2,3,4)
compare_moments(y,
linalg.outer(y, y, ndim=2),
'ij->ij',
y)
# Do nothing for 2-D array
Y = GaussianARD(np.random.randn(5,2,3),
np.random.rand(5,2,3),
plates=(5,),
shape=(2,3))
y = Y.get_moments()
compare_moments(y[0],
y[1],
'ij->ij',
Y)
compare_moments(y[0],
y[1],
Y,
[0,1],
[0,1])
# Sum over the rows of a matrix
Y = GaussianARD(np.random.randn(5,2,3),
np.random.rand(5,2,3),
plates=(5,),
shape=(2,3))
y = Y.get_moments()
mu = np.einsum('...ij->...j', y[0])
cov = np.einsum('...ijkl->...jl', y[1])
compare_moments(mu,
cov,
'ij->j',
Y)
compare_moments(mu,
cov,
Y,
[0,1],
[1])
# Inner product of three vectors
X1 = GaussianARD(np.random.randn(2),
np.random.rand(2),
plates=(),
shape=(2,))
x1 = X1.get_moments()
X2 = GaussianARD(np.random.randn(6,1,2),
np.random.rand(6,1,2),
plates=(6,1),
shape=(2,))
x2 = X2.get_moments()
X3 = GaussianARD(np.random.randn(7,6,5,2),
np.random.rand(7,6,5,2),
plates=(7,6,5),
shape=(2,))
x3 = X3.get_moments()
mu = np.einsum('...i,...i,...i->...', x1[0], x2[0], x3[0])
cov = np.einsum('...ij,...ij,...ij->...', x1[1], x2[1], x3[1])
compare_moments(mu,
cov,
'i,i,i',
X1,
X2,
X3)
compare_moments(mu,
cov,
'i,i,i->',
X1,
X2,
X3)
compare_moments(mu,
cov,
X1,
[9],
X2,
[9],
X3,
[9])
compare_moments(mu,
cov,
X1,
[9],
X2,
[9],
X3,
[9],
[])
# Outer product of two vectors
X1 = GaussianARD(np.random.randn(2),
np.random.rand(2),
plates=(5,),
shape=(2,))
x1 = X1.get_moments()
X2 = GaussianARD(np.random.randn(6,1,2),
np.random.rand(6,1,2),
plates=(6,1),
shape=(2,))
x2 = X2.get_moments()
mu = np.einsum('...i,...j->...ij', x1[0], x2[0])
cov = np.einsum('...ik,...jl->...ijkl', x1[1], x2[1])
compare_moments(mu,
cov,
'i,j->ij',
X1,
X2)
compare_moments(mu,
cov,
X1,
[9],
X2,
[7],
[9,7])
# Matrix product
Y1 = GaussianARD(np.random.randn(3,2),
np.random.rand(3,2),
plates=(),
shape=(3,2))
y1 = Y1.get_moments()
Y2 = GaussianARD(np.random.randn(5,2,3),
np.random.rand(5,2,3),
plates=(5,),
shape=(2,3))
y2 = Y2.get_moments()
mu = np.einsum('...ik,...kj->...ij', y1[0], y2[0])
cov = np.einsum('...ikjl,...kmln->...imjn', y1[1], y2[1])
compare_moments(mu,
cov,
'ik,kj->ij',
Y1,
Y2)
compare_moments(mu,
cov,
Y1,
['i','k'],
Y2,
['k','j'],
['i','j'])
# Trace of a matrix product
Y1 = GaussianARD(np.random.randn(3,2),
np.random.rand(3,2),
plates=(),
shape=(3,2))
y1 = Y1.get_moments()
Y2 = GaussianARD(np.random.randn(5,2,3),
np.random.rand(5,2,3),
plates=(5,),
shape=(2,3))
y2 = Y2.get_moments()
mu = np.einsum('...ij,...ji->...', y1[0], y2[0])
cov = np.einsum('...ikjl,...kilj->...', y1[1], y2[1])
compare_moments(mu,
cov,
'ij,ji',
Y1,
Y2)
compare_moments(mu,
cov,
'ij,ji->',
Y1,
Y2)
compare_moments(mu,
cov,
Y1,
['i','j'],
Y2,
['j','i'])
compare_moments(mu,
cov,
Y1,
['i','j'],
Y2,
['j','i'],
[])
# Vector-matrix-vector product
X1 = GaussianARD(np.random.randn(3),
np.random.rand(3),
plates=(),
shape=(3,))
x1 = X1.get_moments()
X2 = GaussianARD(np.random.randn(6,1,2),
np.random.rand(6,1,2),
plates=(6,1),
shape=(2,))
x2 = X2.get_moments()
Y = GaussianARD(np.random.randn(3,2),
np.random.rand(3,2),
plates=(),
shape=(3,2))
y = Y.get_moments()
mu = np.einsum('...i,...ij,...j->...', x1[0], y[0], x2[0])
cov = np.einsum('...ia,...ijab,...jb->...', x1[1], y[1], x2[1])
compare_moments(mu,
cov,
'i,ij,j',
X1,
Y,
X2)
compare_moments(mu,
cov,
X1,
[1],
Y,
[1,2],
X2,
[2])
# Complex sum-product of 0-D, 1-D, 2-D and 3-D arrays
V = GaussianARD(np.random.randn(7,6,5),
np.random.rand(7,6,5),
plates=(7,6,5),
shape=())
v = V.get_moments()
X = GaussianARD(np.random.randn(6,1,2),
np.random.rand(6,1,2),
plates=(6,1),
shape=(2,))
x = X.get_moments()
Y = GaussianARD(np.random.randn(3,4),
np.random.rand(3,4),
plates=(5,),
shape=(3,4))
y = Y.get_moments()
Z = GaussianARD(np.random.randn(4,2,3),
np.random.rand(4,2,3),
plates=(6,5),
shape=(4,2,3))
z = Z.get_moments()
mu = np.einsum('...,...i,...kj,...jik->...k', v[0], x[0], y[0], z[0])
cov = np.einsum('...,...ia,...kjcb,...jikbac->...kc', v[1], x[1], y[1], z[1])
compare_moments(mu,
cov,
',i,kj,jik->k',
V,
X,
Y,
Z)
compare_moments(mu,
cov,
V,
[],
X,
['i'],
Y,
['k','j'],
Z,
['j','i','k'],
['k'])
# Test with constant nodes
N = 10
D = 5
a = np.random.randn(N, D)
B = Gaussian(
np.random.randn(D),
random.covariance(D),
)
X = SumMultiply('i,i->', B, a)
np.testing.assert_allclose(
X.get_moments()[0],
np.einsum('ni,i->n', a, B.get_moments()[0]),
)
np.testing.assert_allclose(
X.get_moments()[1],
np.einsum('ni,nj,ij->n', a, a, B.get_moments()[1]),
)
#
# Gaussian-gamma parents
#
# Outer product of vectors
X1 = GaussianARD(np.random.randn(2),
np.random.rand(2),
shape=(2,))
x1 = X1.get_moments()
X2 = GaussianGamma(
np.random.randn(6,1,2),
random.covariance(2),
np.random.rand(6,1),
np.random.rand(6,1),
plates=(6,1)
)
x2 = X2.get_moments()
Y = SumMultiply('i,j->ij', X1, X2)
u = Y._message_to_child()
y = np.einsum('...i,...j->...ij', x1[0], x2[0])
yy = np.einsum('...ik,...jl->...ijkl', x1[1], x2[1])
self.assertAllClose(u[0], y)
self.assertAllClose(u[1], yy)
self.assertAllClose(u[2], x2[2])
self.assertAllClose(u[3], x2[3])
# Test with constant nodes
N = 10
M = 8
D = 5
a = np.random.randn(N, 1, D)
B = GaussianGamma(
np.random.randn(M, D),
random.covariance(D, size=(M,)),
np.random.rand(M),
np.random.rand(M),
ndim=1,
)
X = SumMultiply('i,i->', B, a)
np.testing.assert_allclose(
X.get_moments()[0],
np.einsum('nmi,mi->nm', a, B.get_moments()[0]),
)
np.testing.assert_allclose(
X.get_moments()[1],
np.einsum('nmi,nmj,mij->nm', a, a, B.get_moments()[1]),
)
np.testing.assert_allclose(
X.get_moments()[2],
B.get_moments()[2],
)
np.testing.assert_allclose(
X.get_moments()[3],
B.get_moments()[3],
)
pass
def test_message_to_parent(self):
"""
Test the message from SumMultiply node to its parents.
"""
data = 2
tau = 3
def check_message(true_m0, true_m1, parent, *args, F=None):
if F is None:
A = SumMultiply(*args)
B = GaussianARD(A, tau)
B.observe(data*np.ones(A.plates + A.dims[0]))
else:
A = F
(A_m0, A_m1) = A._message_to_parent(parent)
self.assertAllClose(true_m0, A_m0)
self.assertAllClose(true_m1, A_m1)
pass
# Check: different message to each of multiple parents
X1 = GaussianARD(np.random.randn(2),
np.random.rand(2),
ndim=1)
x1 = X1.get_moments()
X2 = GaussianARD(np.random.randn(2),
np.random.rand(2),
ndim=1)
x2 = X2.get_moments()
m0 = tau * data * x2[0]
m1 = -0.5 * tau * x2[1] * np.identity(2)
check_message(m0, m1, 0,
'i,i->i',
X1,
X2)
check_message(m0, m1, 0,
X1,
[9],
X2,
[9],
[9])
m0 = tau * data * x1[0]
m1 = -0.5 * tau * x1[1] * np.identity(2)
check_message(m0, m1, 1,
'i,i->i',
X1,
X2)
check_message(m0, m1, 1,
X1,
[9],
X2,
[9],
[9])
# Check: key not in output
X1 = GaussianARD(np.random.randn(2),
np.random.rand(2),
ndim=1)
x1 = X1.get_moments()
m0 = tau * data * np.ones(2)
m1 = -0.5 * tau * np.ones((2,2))
check_message(m0, m1, 0,
'i',
X1)
check_message(m0, m1, 0,
'i->',
X1)
check_message(m0, m1, 0,
X1,
[9])
check_message(m0, m1, 0,
X1,
[9],
[])
# Check: key not in some input
X1 = GaussianARD(np.random.randn(),
np.random.rand())
x1 = X1.get_moments()
X2 = GaussianARD(np.random.randn(2),
np.random.rand(2),
ndim=1)
x2 = X2.get_moments()
m0 = tau * data * np.sum(x2[0], axis=-1)
m1 = -0.5 * tau * np.sum(x2[1] * np.identity(2),
axis=(-1,-2))
check_message(m0, m1, 0,
',i->i',
X1,
X2)
check_message(m0, m1, 0,
X1,
[],
X2,
[9],
[9])
m0 = tau * data * x1[0] * np.ones(2)
m1 = -0.5 * tau * x1[1] * np.identity(2)
check_message(m0, m1, 1,
',i->i',
X1,
X2)
check_message(m0, m1, 1,
X1,
[],
X2,
[9],
[9])
# Check: keys in different order
Y1 = GaussianARD(np.random.randn(3,2),
np.random.rand(3,2),
ndim=2)
y1 = Y1.get_moments()
Y2 = GaussianARD(np.random.randn(2,3),
np.random.rand(2,3),
ndim=2)
y2 = Y2.get_moments()
m0 = tau * data * y2[0].T
m1 = -0.5 * tau * np.einsum('ijlk->jikl', y2[1] * misc.identity(2,3))
check_message(m0, m1, 0,
'ij,ji->ij',
Y1,
Y2)
check_message(m0, m1, 0,
Y1,
['i','j'],
Y2,
['j','i'],
['i','j'])
m0 = tau * data * y1[0].T
m1 = -0.5 * tau * np.einsum('ijlk->jikl', y1[1] * misc.identity(3,2))
check_message(m0, m1, 1,
'ij,ji->ij',
Y1,
Y2)
check_message(m0, m1, 1,
Y1,
['i','j'],
Y2,
['j','i'],
['i','j'])
# Check: plates when different dimensionality
X1 = GaussianARD(np.random.randn(5),
np.random.rand(5),
shape=(),
plates=(5,))
x1 = X1.get_moments()
X2 = GaussianARD(np.random.randn(5,3),
np.random.rand(5,3),
shape=(3,),
plates=(5,))
x2 = X2.get_moments()
m0 = tau * data * np.sum(np.ones((5,3)) * x2[0], axis=-1)
m1 = -0.5 * tau * np.sum(x2[1] * misc.identity(3), axis=(-1,-2))
check_message(m0, m1, 0,
',i->i',
X1,
X2)
check_message(m0, m1, 0,
X1,
[],
X2,
['i'],
['i'])
m0 = tau * data * x1[0][:,np.newaxis] * np.ones((5,3))
m1 = -0.5 * tau * x1[1][:,np.newaxis,np.newaxis] * misc.identity(3)
check_message(m0, m1, 1,
',i->i',
X1,
X2)
check_message(m0, m1, 1,
X1,
[],
X2,
['i'],
['i'])
# Check: other parent's moments broadcasts over plates when node has the
# same plates
X1 = GaussianARD(np.random.randn(5,4,3),
np.random.rand(5,4,3),
shape=(3,),
plates=(5,4))
x1 = X1.get_moments()
X2 = GaussianARD(np.random.randn(3),
np.random.rand(3),
shape=(3,),
plates=(5,4))
x2 = X2.get_moments()
m0 = tau * data * np.ones((5,4,3)) * x2[0]
m1 = -0.5 * tau * x2[1] * misc.identity(3)
check_message(m0, m1, 0,
'i,i->i',
X1,
X2)
check_message(m0, m1, 0,
X1,
['i'],
X2,
['i'],
['i'])
# Check: other parent's moments broadcasts over plates when node does
# not have that plate
X1 = GaussianARD(np.random.randn(3),
np.random.rand(3),
shape=(3,),
plates=())
x1 = X1.get_moments()
X2 = GaussianARD(np.random.randn(3),
np.random.rand(3),
shape=(3,),
plates=(5,4))
x2 = X2.get_moments()
m0 = tau * data * np.sum(np.ones((5,4,3)) * x2[0], axis=(0,1))
m1 = -0.5 * tau * np.sum(np.ones((5,4,1,1))
* misc.identity(3)
* x2[1],
axis=(0,1))
check_message(m0, m1, 0,
'i,i->i',
X1,
X2)
check_message(m0, m1, 0,
X1,
['i'],
X2,
['i'],
['i'])
# Check: other parent's moments broadcasts over plates when the node
# only broadcasts that plate
X1 = GaussianARD(np.random.randn(3),
np.random.rand(3),
shape=(3,),
plates=(1,1))
x1 = X1.get_moments()
X2 = GaussianARD(np.random.randn(3),
np.random.rand(3),
shape=(3,),
plates=(5,4))
x2 = X2.get_moments()
m0 = tau * data * np.sum(np.ones((5,4,3)) * x2[0], axis=(0,1), keepdims=True)
m1 = -0.5 * tau * np.sum(np.ones((5,4,1,1))
* misc.identity(3)
* x2[1],
axis=(0,1),
keepdims=True)
check_message(m0, m1, 0,
'i,i->i',
X1,
X2)
check_message(m0, m1, 0,
X1,
['i'],
X2,
['i'],
['i'])
# Check: broadcasted dimensions
X1 = GaussianARD(np.random.randn(1,1),
np.random.rand(1,1),
ndim=2)
x1 = X1.get_moments()
X2 = GaussianARD(np.random.randn(3,2),
np.random.rand(3,2),
ndim=2)
x2 = X2.get_moments()
m0 = tau * data * np.sum( | np.ones((3,2)) | numpy.ones |
# -*- coding: UTF-8 -*-
from .io import imread, imwrite
from skimage import draw
from ..utils import is_seq, is_np_array
from .transforms import bgr2rgb
from ..vis.color import VIS_COLOR, get_color_tuple, get_text_color
import random
import numpy as np
import matplotlib.pyplot as plt
import cv2
TEXT_MARGIN = 2
def imshow(img):
"""Show an image.
Args:
img (str or ndarray): The image to be displayed.
"""
plt.axis('off')
plt.imshow(imread(img,rgb_mode=True))
plt.show()
def imshow_bboxes(
img,
bboxes,
labels=None,
scores=None,
classes=None,
score_thresh=0,
masks=None,
color=(244, 67, 54),
thickness=1,
use_normalized_coordinates=False,
is_show=False,
save_path=None):
assert classes is None or is_seq(classes) or is_np_array(classes)
assert labels is None or is_seq(labels) or is_np_array(labels)
assert scores is None or is_seq(scores) or is_np_array(scores)
assert masks is None or is_seq(masks) or is_np_array(masks)
from icv.data.core import BBoxList, BBox
if isinstance(bboxes, BBoxList):
bboxes = np.array(bboxes.tolist())
elif isinstance(bboxes, list) and len(bboxes) > 0 and isinstance(bboxes[0], BBox):
bboxes = np.array([bbox.bbox for bbox in bboxes])
else:
bboxes = np.array(bboxes)
image = imread(img)
if bboxes.shape[0] == 0:
if is_show:
imshow(image)
if save_path:
imwrite(image, save_path)
return image
bboxes = bboxes[np.where(np.array(scores) >= score_thresh)] if scores is not None else bboxes
if labels is not None:
if not is_np_array(labels):
labels = | np.array(labels) | numpy.array |
import argparse
import os
import os.path as osp
import json
import shutil
import sys
import random
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import AgglomerativeClustering, SpectralClustering
from sklearn_extra.cluster import KMedoids
from utils import time_to_string
MEAN_COLOUR = 'navy'
STD_COLOUR = 'royalblue'
SIZE_COLOUR = 'red'
class Grouping(object):
def __init__(self, cmd):
self.im_dir = cmd['im_dir']
self.res_dir = cmd['res_dir']
self.order_by = cmd['order_by']
self.n_folders = cmd['n_folders'] # The number of groups saved
self.write_group_info = cmd['write_group_info']
self.song_dict = {}
for line in open(osp.join(self.res_dir, 'song_list.txt'), 'r'):
index_song, song = line.split('\n')[0].split('\t')
index_song = int(index_song)
self.song_dict[index_song] = song
self.n_songs = len(self.song_dict)
self.dists = np.loadtxt(osp.join(self.res_dir, 'dists.txt'), delimiter='\t')
if self.n_folders < 0:
self.n_folders = self.n_songs
self.n_folders = min(self.n_folders, self.n_songs)
def save_results(self, grouping_dict, name, title, xlabel, plot_size):
'''
This function saves the result of a grouping technique.
It does so by first saving the grouping dictionary as a json file and then plots a summary of the results in a figure.
Its inputs are:
- grouping_dict: a dictionary of dictionaries, where each subdictionary contains the information of a single group.
- name: one of 'clusters' or 'neighbours', to indentify the dictionary.
- title: the title of the figure to be plotted.
- xlabel: one of 'cluster' or 'song', to be used as label for the horizontal axis.
- plot_size: boolean. If True, the size of the groups are represented in the figure.
'''
with open(osp.join(self.res_dir, name + '.json'), 'w') as grouping:
json.dump(grouping_dict, grouping, indent=2)
grouping.close()
y = []
std = []
size = []
for group_info in sorted(grouping_dict.values(), key=lambda x:x['number']):
if group_info.get('size', 2) > 1:
y.append(group_info['dist'])
std.append(group_info['std_dist'])
size.append(group_info.get('size', 1))
if y:
x = 1 + np.arange(len(y))
y = np.array(y)
std = np.array(std)
size = np.array(size)
size = size*np.max(std)/ | np.max(size) | numpy.max |
from reader import Reader
import numpy as np
import matplotlib.pyplot as plt
# given rotation vec represents rotation in lie algebra, we can apply
# rodrigues formula and transform it to lie group.
def calculate_rotation_by_rodrigues_formula(rotation_vecs, points_in_3d):
rot_vec_norms = np.linalg.norm(rotation_vecs, axis=1)[:, np.newaxis] # calculated the norm of
# each row
# np.newaxis function is used to add one more dimension to the existing data.
# This way we will have N*1 matrix.
rotation_vecs_unit = rotation_vecs / rot_vec_norms
cos_theta = np.cos(rotation_vecs_unit)
sin_theta = np.sin(rotation_vecs_unit)
return 1 + rot_vec_norms*sin_theta + rot_vec_norms * rot_vec_norms * (1 - cos_theta)
def world_to_pixel_coordinates(camera_params, points_in_3d):
num_observations = camera_params.shape[0]
rotation = camera_params[:, 0:3]
translation = camera_params[:, 3:6]
rot = calculate_rotation_by_rodrigues_formula(rotation, points_in_3d)
translated_rotated_points = | np.cross(points_in_3d, rot) | numpy.cross |
#----------------------------------------------------------------------------------------------------
'''
kmm.py
This file contains the definition of related functions for kernal mean matching
Coded by <NAME>
Date: 2018-11-25
All Rights Reserved.
'''
#----------------------------------------------------------------------------------------------------
import numpy as np
import random
import scipy.linalg as la
from datetime import *
from cala import *
from kernel import *
from nmse import *
def updMean(X, mx, Y):
xDim, xSam = np.shape(X)
yDim, ySam = np.shape(Y)
assert xDim == yDim, 'The dimensionality of X and Y are not identical !'
n = xSam + ySam
for i in range(xDim):
mx[i] = mx[i] * xSam
for j in range(ySam):
mx[i] = mx[i] + Y[i][j]
mx[i] = mx[i] / n
return mx
def updY(X, tX):
xDim, xSam = np.shape(X)
tDim, tSam = np.shape(Y)
assert xDim == tDim, 'The dimensionality of X and tX are not identical !'
n = xSam + tSam
Y = np.column_stack((X, tX))
return Y
def getAind(X, n):
xDim, xSam = np.shape(X)
tmk = xyK(X, X, 'Sinc')
tm = np.sum(tmk, axis=0)
assert len(tm) == xSam, 'The direction of operation may be incorrect !'
idx = np.argsort(tm)
ix = idx[0:n]
return ix
def getBind(X, n, rn):
xDim, xSam = np.shape(X)
index = np.arange(xSam)
random.shuffle(index)
ind = index[0:rn]
tX = X[:, ind]
tmk = xyK(tX, X, 'Sinc')
tm = np.sum(tmk, axis=0)
assert len(tm) == xSam, 'The direction of operation may be incorrect !'
idx = np.argsort(tm)
ix = idx[0:n]
return ix
def ginv(M):
mRow, mCol = np.shape(M)
U, s, V = la.svd(M)
V = np.transpose(V)
s, r = getRank(s)
U = U[:, 0:r]
V = V[:, 0:r]
s = s[0:r]
S = np.diag(s)
tmp = np.dot(V, S)
tmq = np.transpose(U)
tm = np.dot(tmp, tmq)
return tm
def getWeight(X, ind):
xDim, xSam = np.shape(X)
#tDim, tSam = np.shape(tX)
#assert xDim == tDim, 'The dimensionality of X and tX are not identical !'
mx = np.mean(X, axis=1)
mw = np.zeros((xSam, 1))
for i in range(xSam):
tmp = X[:, i] - mx
tmp = tmp * tmp
tmp = np.sum(tmp)
tmp = np.exp(-tmp)
mw[i, 0] = tmp
tmw = mw[ind, 0]
sw = np.sum(mw)
stw = np.sum(tmw)
weight = float(stw) / sw
return weight
# +++++ The kmm functions +++++
def setLayer(b, P, k):
bDep, bRow, bCol = np.shape(b)
pRow, pCol = np.shape(P)
assert bRow == pRow, 'The dimensionality of b and P are not identical !'
assert bCol == pCol, 'The dimensionality of b and P are not identical !'
for i in range(pRow):
for j in range(pCol):
b[k, i, j] = P[i, j]
return b
def together(b):
bDep, bRow, bCol = np.shape(b)
assert bDep > 1, 'The depth of b is incorrect !'
m = np.zeros((bRow, bCol))
for i in range(bRow):
for j in range(bCol):
for k in range(bDep):
m[i, j] = m[i, j] + b[k, i, j]
return m
def iTogether(B):
bDep, bRow, bCol = np.shape(B)
assert bDep >= 1, 'The depth of b is incorrect !'
sKxx = xysK(self.__X, self.__X, 'Gaussian', self.__kw)
sKxy = xysK(self.__X, self.__Y, 'Gaussian', self.__kw)
P = np.zeros((bDep, bDep))
q = np.zeros((bDep, 1))
for i in range(bDep):
tmb = B[i, :, :]
tmp = np.dot(np.transpose(tmb), sKxx)
tmp = np.dot(tmp, tmb)
tmq = np.sum(np.sum(tmp))
tm = 1 / 2
P[i, i] = tm * tmq
tmp = np.dot( | np.transpose(tmb) | numpy.transpose |
# Original work Copyright (c) 2015, Danish Geodata Agency <<EMAIL>>
# Modified work Copyright (c) 2015, 2016, Geoboxers <<EMAIL>>
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
############################
# Pointcloud utility class - wraps many useful methods
# silyko, 2014 - 2016
############################
import os
import ctypes
import numpy as np
from math import ceil
import logging
from osgeo import gdal, ogr, osr
try:
import thatsDEM2.triangle as triangle
except ImportError:
# prereqs for triangle might not be installed
HAS_TRIANGLE = False
else:
HAS_TRIANGLE = True
import thatsDEM2.array_geometry as array_geometry
import thatsDEM2.osr_utils as osr_utils
import thatsDEM2.vector_io as vector_io
# Should perhaps be moved to method in order to speed up import...
import thatsDEM2.grid as grid
import thatsDEM2.remote_files as remote_files
# Import las reader modules
try:
import laspy.file
except ImportError:
HAS_LASPY = False
else:
HAS_LASPY = True
try:
import slash
except Exception:
HAS_SLASH = False
else:
HAS_SLASH = True
LOG = logging.getLogger(__name__)
class InvalidArrayError(Exception):
pass
class NotAvailableError(Exception):
pass
# Translation of short lidar attr codes to laspy names
LASPY_ATTRS = {"c": "raw_classification",
"pid": "pt_src_id",
"rn": "return_num",
"i": "intensity"}
def slice_array(arr, slicer=None):
"""
Convenience method to slice an array,
without creating a view if slicer is None.
Args:
arr: Numpy array
slicer: fancy indexing 'slice' object (slice, mask, int array)
Returns:
Sliced array or original array if slicer is None.
"""
if slicer is not None:
return arr[slicer]
return arr
def empty_like(pc):
"""
Contruct and empty Pointcloud object with same attributes as input pointcloud.
Args:
pc: Pointcloud.pointcloud object.
Returns:
Pointcloud object.
"""
out = type(pc)(np.empty((0, 2), dtype=np.float64),
np.empty((0,), dtype=np.float64))
for a in pc.attributes:
array = pc.get_array(a)
out.set_attribute(a, np.empty((0,), dtype=array.dtype))
if pc.srs is not None:
out.set_srs(pc.srs.Clone())
return out
class Pointcloud(object):
"""
Pointcloud class constructed from a xy and a z array.
z will usually be the third spatial dimension,
but can really be any additional dimension/attribute as many methods work in 2 (and a half) D.
----
Additional properties given in pc_attrs must be 1d numpy arrays.
Pointcloud properties as well as xy and z will be directly modifiable by design,
for example like pc.xy += 1.35 and pc.c[pc.c == 4] = 5,
but make sure you know what you're doing in order to keep consistency in sizes.
And note that if you do direct modifications like that, derived attributes like
triangulation, sorting and bounding box may be inconsistent - remember to clear
with Pointcloud.clear_derived_attrs()
"""
def __init__(self, xy, z, srs=None, **pc_attrs):
xy = self._validate_array("xy", xy, check_size=False)
z = self._validate_array("z", z, check_size=False)
if z.shape[0] != xy.shape[0]:
raise ValueError("z must have length equal to number of xy-points")
# All pointcloudy arrays, including xy and z
self.__pc_attrs = {"xy", "z"}
self.xy = xy
self.z = z
self.set_srs(srs) # Can be an osr.SpatialReference or None
# Derived attrs
self.clear_derived_attrs()
# store the additional attributes
for a in pc_attrs:
self.set_attribute(a, pc_attrs[a])
def __setattr__(self, name, value):
"""Try to keep consistency if pointcloud attributes are set directly"""
# By logic shortcut we can ALWAYS add a new attribute
# But we cannot add an attribute twice before __pc_attrs is defined!
if name in self.__dict__ and name in self.__pc_attrs:
try:
self._set_array(name, value, True)
except Exception as e:
raise InvalidArrayError(str(e))
else:
object.__setattr__(self, name, value)
def __getitem__(self, i):
"""Return a dict with values at a specific index"""
return {a: self.get_array(a)[i] for a in self.__pc_attrs}
def astype(self, subclass):
"""
Return data as a subclass.
subclass must be a subclass of Pointcloud.
"""
if not issubclass(subclass, Pointcloud):
raise ValueError("Not a Pointcloud subclass")
new_instance = subclass(self.xy, self.z)
for a in self.attributes:
new_instance.set_attribute(a, self.get_array(a))
if self.srs is not None:
new_instance.set_srs(self.srs.Clone())
return new_instance
def set_srs(self, srs):
if (srs is not None) and not isinstance(srs, osr.SpatialReference):
raise TypeError("srs must be an osr.SpatialReference")
self.srs = srs
def copy(self):
"""
Return a copy of self as a new instance.
"""
return self.astype(self.__class__)
@property
def attributes(self):
"""Return the attributes minus xy and z"""
return self.__pc_attrs.difference({"xy", "z"})
def set_attribute(self, name, value):
"""
Set or add a an additional pointcloud attribute.
Args:
name: name of attribute
value: array like, must have dimension 1 and same size as self.
"""
if name in ("xy", "z", "srs"):
raise ValueError(
"Name of an additional attribute cannot be xy, z or srs")
self.set_array(name, value)
def _validate_array(self, name, value, check_size=True):
"""Do the array checking stuff for all arrays:
xy, z as well as additional attributes"""
value = np.asarray(value)
if name == "xy" or name == "z":
value = np.require(value, requirements=[
'A', 'O', 'C'], dtype=np.float64)
else:
value = np.require(value, requirements=['A', 'O', 'C'])
if check_size:
assert value.shape[0] == self.xy.shape[0]
if name != "xy":
assert value.ndim == 1
else:
assert value.ndim == 2
return value
def set_array(self, name, array):
"""A method to do the array checking stuff and
set array for all pointcloudy arrays, including xy and z"""
self._set_array(name, array, True)
def _set_array(self, name, array, size_check=False):
"""Internal version of set array, with no size checks"""
# Unless someone tampers with __pc_attrs or deletes attributes,
# there should be consistency
# between names in __pc_attrs and attributes of self
array = self._validate_array(name, array, size_check)
self.__pc_attrs.add(name)
object.__setattr__(self, name, array)
def get_array(self, name):
if name in self.__pc_attrs:
return self.__dict__[name]
raise ValueError("Pointcloud does not have %s attribute" % name)
def remove_attribute(self, name):
if name in self.attributes:
delattr(self, name)
self.__pc_attrs.remove(name)
def get_unique_attribute_values(self, name):
if name in self.attributes:
return np.unique(self.get_array(name))
raise ValueError("Pointcloud does not have %s attribute" % name)
def extend(self, other, least_common=False):
"""
Extend the pointcloud 'in place' by adding another pointcloud.
Attributtes of current pointcloud must be a subset of attributes of other.
Args:
other: A pointcloud.Pointcloud object
least_common: Whether to restrict to least common set of attributes.
Raises:
ValueError: If other pointcloud does not have at least the same attributes as self.
"""
if not isinstance(other, Pointcloud):
raise ValueError("Other argument must be a Pointcloud")
common = self.attributes.intersection(other.attributes)
additional = self.attributes.difference(common)
if len(additional) > 0:
if not least_common:
raise ValueError(
"Other pointcloud does not have all attributes of self.")
# else delete additional
for a in additional:
self.remove_attribute(a)
self.clear_derived_attrs()
for a in self.__pc_attrs:
# Will not invoke __setattr__
self._set_array(a, np.concatenate(
(self.get_array(a), other.get_array(a))))
def thin(self, I):
"""
Modify the pointcloud 'in place' by slicing to a mask or index array.
Args:
I: Mask, index array (1d) or slice to use for fancy numpy indexing.
"""
# Modify in place
self.clear_derived_attrs()
for a in self.__pc_attrs:
self._set_array(a, self.get_array(a)[I])
def cut(self, mask):
"""
Cut the pointcloud by a mask or index array using fancy indexing.
Args:
mask: Mask or index array (1d) to use for fancy numpy indexing.
Returns:
The 'sliced' Pointcloud object.
"""
if self.xy.size == 0: # just return something empty to protect chained calls...
return empty_like(self)
pc = type(self)(self.xy[mask], self.z[mask])
for a in self.attributes:
pc.set_attribute(a, self.get_array(a)[mask])
return pc
def sort_spatially(self, cs, shape=None, xy_ul=None, keep_sorting=False):
"""
Primitive spatial sorting by creating a 'virtual' 2D grid covering the pointcloud
and thus a 1D index by consecutive c style numbering of cells.
Keep track of 'slices' of the pointcloud within each 'virtual' cell.
As the pointcloud is reordered all derived attributes will be cleared.
Returns:
A reference to self.
"""
if self.get_size() == 0:
raise Exception("No way to sort an empty pointcloud.")
if (bool(shape) != bool(xy_ul)): # either both None or both given
raise ValueError(
"Neither or both of shape and xy_ul should be specified.")
self.clear_derived_attrs()
if shape is None:
x1, y1, x2, y2 = self.get_bounds()
ncols = int((x2 - x1) / cs) + 1
nrows = int((y2 - y1) / cs) + 1
else:
x1, y2 = xy_ul
nrows, ncols = shape
arr_coords = ((self.xy - (x1, y2)) / (cs, -cs)).astype(np.int32)
# do we cover the whole area?
mx, my = arr_coords.min(axis=0)
Mx, My = arr_coords.max(axis=0)
assert(min(mx, my) >= 0 and Mx < ncols and My < nrows)
B = arr_coords[:, 1] * ncols + arr_coords[:, 0]
I = np.argsort(B)
B = B[I]
self.thin(I) # This will clear derived attrs
# fix attr setting order - call thin later...
self.spatial_index = np.ones((ncols * nrows * 2,), dtype=np.int32) * -1
res = array_geometry.lib.fill_spatial_index(
B, self.spatial_index, B.shape[0], ncols * nrows)
if res != 0:
raise Exception(
"Size of spatial index array too small! Programming error!")
if keep_sorting:
self.sorting_indices = I
self.index_header = np.asarray(
(ncols, nrows, x1, y2, cs), dtype=np.float64)
return self
def sort_back(self):
"""If pc is sorted, sort it back... in place ....
"""
if self.sorting_indices is not None:
I = np.argsort(self.sorting_indices)
self.thin(I)
else:
raise ValueError("No sorting indices")
def clear_derived_attrs(self):
"""
Clear derived attributes which will change after an in place modification, like an extension.
"""
# Clears attrs which become invalid by an extentsion or sorting
self.triangulation = None
self.index_header = None
self.spatial_index = None
self.bbox = None
self.triangle_validity_mask = None
self.sorting_indices = None
def might_overlap(self, other):
return self.might_intersect_box(other.get_bounds())
def might_intersect_box(self, box): # box=(x1,y1,x2,y2)
if self.xy.shape[0] == 0 or box is None:
return False
b1 = self.get_bounds()
xhit = box[0] <= b1[0] <= box[2] or b1[0] <= box[0] <= b1[2]
yhit = box[1] <= b1[1] <= box[3] or b1[1] <= box[1] <= b1[3]
return xhit and yhit
# Properties - nice shortcuts
@property
def bounds(self):
return self.get_bounds()
@property
def size(self):
return self.get_size()
@property
def z_bounds(self):
return self.get_z_bounds()
@property
def extent(self):
if self.xy.shape[0] > 0:
bbox = self.get_bounds()
z1, z2 = self.get_z_bounds()
extent = np.zeros((6,), dtype=np.float64)
extent[0:2] = bbox[0:2]
extent[3:5] = bbox[2:4]
extent[2] = z1
extent[5] = z2
return extent
return None
def get_bounds(self):
"""Return planar bounding box as (x1,y1,x2,y2) or None if empty."""
if self.bbox is None:
if self.xy.shape[0] > 0:
self.bbox = array_geometry.get_bounds(self.xy)
else:
return None
return self.bbox
def get_z_bounds(self):
"""Return z bounding box as (z1,z2) or None if empty."""
if self.z.size > 0:
return np.min(self.z), np.max(self.z)
else:
return None
def get_size(self):
"""Return point count."""
return self.xy.shape[0]
def cut_to_polygon(self, rings):
"""
Cut the pointcloud to a polygon.
Args:
rings: list of rings as numpy arrays.
The first entry is the outer ring, while subsequent are holes. Holes in holes not supported.
Returns:
A new Pointcloud object.
"""
I = array_geometry.points_in_polygon(self.xy, rings)
return self.cut(I)
def cut_to_line_buffer(self, vertices, dist):
"""
Cut the pointcloud to a buffer around a line (quite fast).
Args:
vertices: The vertices of the line string as a (n,2) float64 numpy array.
dist: The buffer distance.
Returns:
A new Pointcloud object.
"""
I = array_geometry.points_in_buffer(self.xy, vertices, dist)
return self.cut(I)
def cut_to_box(self, xmin, ymin, xmax, ymax):
"""Cut the pointcloud to a planar bounding box"""
I = np.logical_and((self.xy >= (xmin, ymin)),
(self.xy <= (xmax, ymax))).all(axis=1)
return self.cut(I)
def get_grid_mask(self, M, georef):
"""
Get the boolean mask indicating which points lie within a (nrows,ncols) mask.
Args:
M: A numpy boolean array of shape (nrows,ncols).
georef: The GDAL style georefence of the input mask.
Returns:
A numpy 1d boolean mask.
"""
ac = ((self.xy - (georef[0], georef[3])) /
(georef[1], georef[5])).astype(np.int32)
N = np.logical_and(ac >= (0, 0), ac < (
M.shape[1], M.shape[0])).all(axis=1)
ac = ac[N]
MM = np.zeros((self.xy.shape[0],), dtype=np.bool)
MM[N] = M[ac[:, 1], ac[:, 0]]
return MM
def cut_to_grid_mask(self, M, georef):
"""
Cut to the which points lie within a (nrows,ncols) mask.
Args:
M: A numpy boolean array of shape (nrows,ncols).
georef: The GDAL style georefence of the input mask.
Returns:
A new Pontcloud object.
"""
MM = self.get_grid_mask(M, georef)
return self.cut(MM)
def cut_to_z_interval(self, zmin, zmax):
"""
Cut the pointcloud to points in a z interval.
Args:
zmin: minimum z
zmax: maximum z
Returns:
New Pointcloud object
"""
I = np.logical_and((self.z >= zmin), (self.z <= zmax))
return self.cut(I)
def triangulate(self):
"""
Triangulate the pointcloud. Will do nothing if triangulation is already calculated.
Raises:
ValueError: If not at least 3 points in pointcloud
"""
if not HAS_TRIANGLE:
raise NotAvailableError("The triangle module does not seem to be available!")
if self.triangulation is None:
if self.xy.shape[0] > 2:
self.triangulation = triangle.Triangulation(self.xy)
else:
raise ValueError("Less than 3 points - unable to triangulate.")
def set_validity_mask(self, mask):
"""
Explicitely set a triangle validity mask.
Args:
mask: A boolean numpy array of size the number of triangles.
Raises:
ValueError: If triangulation not created or mask of inproper shape.
"""
if self.triangulation is None:
raise ValueError("Triangulation not created yet!")
if mask.shape[0] != self.triangulation.ntrig:
raise ValueError("Invalid size of triangle validity mask.")
self.triangle_validity_mask = mask
def clear_validity_mask(self):
"""Clear the triangle validity mask (set it to None)"""
self.triangle_validity_mask = None
def calculate_validity_mask(self, max_angle=45, tol_xy=2, tol_z=1):
"""
Calculate a triangle validity mask from geometry constrains.
Args:
max_angle: maximal angle/slope in degrees.
tol_xy: maximal size of xy bounding box.
tol_z: maximal size of z bounding box.
"""
tanv2 = np.tan(max_angle * np.pi / 180.0) ** 2 # tanv squared
geom = self.get_triangle_geometry()
self.triangle_validity_mask = (
geom < (tanv2, tol_xy, tol_z)).all(axis=1)
def get_validity_mask(self):
# just return the validity mask
return self.triangle_validity_mask
def get_grid(self, ncols=None, nrows=None, x1=None, x2=None, y1=None, y2=None,
cx=None, cy=None, nd_val=-999, method="triangulation", attr="z", srad=None, params=None):
"""
Grid (an attribute of) the pointcloud.
Will calculate grid size and georeference from supplied input (or pointcloud extent).
Args:
ncols: number of columns.
nrows: number of rows.
x1: left pixel corner/edge (GDAL style).
x2: right pixel corner/edge (GDAL style).
y1: lower pixel corner/edge (GDAL style).
y2: upper pixel corner/edge (GDAL style).
cx: horisontal cell size.
cy: vertical cell size.
nd_val: grid no data value.
method: One of the supported method names:
triangulation, return_triangles, cellcount, most_frequent,
idw_filter, mean_filter, max_filter, min_filter, median_filter, var_filter,
density_filter
or:
A callable which accepts a numpy array and returns a scalar.
The latter will execute the callable on the subarray of values within each cell.
attr: The attribute to grid - defaults to z.
Will cast attr to float64 for triangulation method, and int 32 for most_frequent.
srad: The search radius to use for the filter variant methods.
params: Possible a list of params to pass to filter method.
Returns:
A grid.Grid object and a grid.Grid object with triangle sizes if 'return_triangles' is specified.
Raises:
ValueError: If unable to calculate grid size or location from supplied input,
or using triangulation and triangulation not calculated or supplied with invalid method name.
"""
# x1 = left 'corner' of "pixel", not center.
# y2 = upper 'corner', not center.
# TODO: Fix surprises in the logic below!!!!!
if x1 is None:
bbox = self.get_bounds()
x1 = bbox[0]
if x2 is None:
bbox = self.get_bounds()
x2 = bbox[2]
if y1 is None:
bbox = self.get_bounds()
y1 = bbox[1]
if y2 is None:
bbox = self.get_bounds()
y2 = bbox[3]
if ncols is None and cx is None:
raise ValueError("Unable to compute grid extent from input data")
if nrows is None and cy is None:
raise ValueError("Unable to compute grid extent from input data")
if ncols is None:
ncols = int(ceil((x2 - x1) / cx))
else:
assert cx is None
cx = (x2 - x1) / float(ncols)
if nrows is None:
nrows = int(ceil((y2 - y1) / cy))
else:
assert cy is None
cy = (y2 - y1) / float(nrows)
# geo ref gdal style...
geo_ref = [x1, cx, 0, y2, 0, -cy]
if method in ("triangulation", "return_triangles"):
if self.triangulation is None:
raise ValueError("Create a triangulation first...")
val = np.require(self.get_array(attr), dtype=np.float64)
if method == "triangulation":
g = self.triangulation.make_grid(
val, ncols, nrows, x1, cx, y2, cy, nd_val, return_triangles=False)
return grid.Grid(g, geo_ref, nd_val, self.srs)
else:
g, t = self.triangulation.make_grid(
val, ncols, nrows, x1, cx, y2, cy, nd_val, return_triangles=True)
return grid.Grid(g, geo_ref, nd_val, srs=self.srs), grid.Grid(t, geo_ref, nd_val, srs=self.srs)
elif method == "cellcount": # density grid
arr_coords = ((self.xy - (geo_ref[0], geo_ref[3])) /
(geo_ref[1], geo_ref[5])).astype(np.int32)
M = np.logical_and(arr_coords[:, 0] >= 0, arr_coords[:, 0] < ncols)
M &= np.logical_and(
arr_coords[:, 1] >= 0, arr_coords[:, 1] < nrows)
arr_coords = arr_coords[M]
# Wow - this gridding is sooo simple! and fast!
# create flattened index
B = arr_coords[:, 1] * ncols + arr_coords[:, 0]
bins = np.arange(0, ncols * nrows + 1)
h, b = | np.histogram(B, bins) | numpy.histogram |
# -*- coding: utf-8 -*-
"""
Implementation of surrogate map generation as in Burt et al., 2018, Nat Neuro
"""
from joblib import Parallel, delayed
import numpy as np
from scipy.optimize import least_squares
from scipy import sparse as ssp
from scipy.stats import boxcox
def _make_weight_matrix(x, d0):
"""
Constructs weight matrix from distance matrix + autocorrelation estimate
Parameters
----------
x : array_like
Distance matrix
d0 : float
Estimate of spatial scale of autocorrelation
Returns
-------
W : numpy.ndarray
Weight matrix
"""
# "W is the row-normalized weight matrix with zero diagonal and"
# "off-diagonal elements proportional to W[ij] = z[i]^-1 exp(-D[ij]/d0),"
# "where D[ij] is the surface-based geodesic distance between cortical"
# "areas i and j, and z[i] is a row-wise normalization factor."
# z[i] = row sum exp(-D[ij]/d0)
with np.errstate(over='ignore'):
weight = np.exp(-x / d0) * np.logical_not(np.eye(len(x), dtype=bool))
# avoid divide-by-zero errors
with np.errstate(invalid='ignore'):
return weight / | np.sum(weight, axis=1) | numpy.sum |
import numpy as np
import pandas as pd
from ms_mint.tools import get_mz_mean_from_formulas, gaussian, scale_dataframe
def test__get_mz_mean_from_formulas():
result = get_mz_mean_from_formulas(['C', 'CCCC', 'CNO'])
expected = [12, 48, 41.998]
assert result==expected, result
def test__get_mz_mean_from_formulas__positive_ion():
result = get_mz_mean_from_formulas(['C', 'CCCC', 'CNO'], ms_mode='positive')
expected = [13.0078, 49.0078, 43.0058]
assert result==expected, result
def test__get_mz_mean_from_formulas__negative_ion():
result = get_mz_mean_from_formulas(['C', 'CCCC', 'CNO'], ms_mode='negative')
expected = [10.9922, 46.9922, 40.9902]
assert result==expected, result
def test__gaussian():
mu = 0
sig = 1
x = np.array([-2, -1 , 0, 1, 2]) * sig + mu
result = gaussian(x, mu, sig)
expected = np.array([0.13533528, 0.60653066, 1, 0.60653066, 0.13533528])
ε = max(result - expected)
assert ε<1e-8, ε
def test__gaussian_mu1():
mu = 1
sig = 1
x = np.array([-2, -1 , 0, 1, 2]) * sig + mu
result = gaussian(x, mu, sig)
expected = | np.array([0.13533528, 0.60653066, 1, 0.60653066, 0.13533528]) | numpy.array |
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 10 13:45:27 2018
@author: sreimond
"""
import numpy as np
from datetime import datetime
def is_leap( years, cal='auto' ):
"""
The `is_leap` function enables array input.
Documentation see the `_is_leap` function.
"""
years = np.array(years,ndmin=1)
years_count = np.size(years)
ret = np.zeros(years_count,dtype=np.bool_)
for ix in range(years_count):
try:
ret[ix] = _is_leap( years[ix], cal=cal )
except:
ret[ix] = np.nan
return ret
def is_julian( y, m, d ):
"""
The `is_julian` function enables array input.
Documentation see the `_is_julian` function.
"""
years = np.array(y,ndmin=1)
months = np.array(m,ndmin=1)
days = np.array(d,ndmin=1)
years_count = np.size(years)
dim_check = ((years_count == np.size(months))
and (years_count == np.size(days)))
if not dim_check:
raise ValueError('dimension mismatch')
ret = np.zeros(years_count,dtype=np.bool_)
for ix in range(years_count):
try:
ret[ix] = _is_julian( years[ix], months[ix], days[ix] )
except:
ret[ix] = np.nan
return ret
def is_gregorian( y, m, d ):
"""
The `is_gregorian` function enables array input.
Documentation see the `_is_julian` function.
"""
years = np.array(y,ndmin=1)
months = np.array(m,ndmin=1)
days = np.array(d,ndmin=1)
years_count = np.size(years)
dim_check = ((years_count == np.size(months))
and (years_count == np.size(days)))
if not dim_check:
raise ValueError('dimension mismatch')
ret = np.zeros(years_count,dtype=np.bool_)
for ix in range(years_count):
try:
ret[ix] = _is_gregorian( years[ix], months[ix], days[ix] )
except:
ret[ix] = np.nan
return ret
def ymd2jd( y, m, d, cal='auto' ):
"""
The `ymd2jd` function enables array input.
Documentation see the `_ymd2jd` function.
"""
years = np.array(y,ndmin=1)
months = np.array(m,ndmin=1)
days = np.array(d,ndmin=1)
years_count = np.size(years)
dim_check = ((years_count == np.size(months))
and (years_count == np.size(days)))
if not dim_check:
raise ValueError('dimension mismatch')
ret = np.zeros(years_count,dtype=np.float_)
for ix in range(years_count):
try:
ret[ix] = _ymd2jd( years[ix], months[ix], days[ix], cal=cal )
except:
ret[ix] = np.nan
return ret
def jd2ymd( jd, cal='auto' ):
"""
The `jd2ymd` function enables array input.
Documentation see the `_jd2ymd` function.
"""
jd = np.array(jd,ndmin=1)
jd_count = np.size(jd)
years = np.zeros(jd_count,dtype=np.int_)
months = np.zeros(jd_count,dtype=np.int_)
days = np.zeros(jd_count,dtype=np.float_)
for ix in range(jd_count):
try:
years[ix], months[ix], days[ix] = _jd2ymd( jd[ix], cal=cal )
except:
years[ix], months[ix], days[ix] = np.nan, np.nan, np.nan
return years, months, days
def ymd2mjd( y, m, d, cal='auto' ):
"""
The `ymd2mjd` function enables array input.
Documentation see the `_ymd2mjd` function.
"""
years = np.array(y,ndmin=1)
months = np.array(m,ndmin=1)
days = np.array(d,ndmin=1)
years_count = np.size(years)
dim_check = ((years_count == np.size(months))
and (years_count == np.size(days)))
if not dim_check:
raise ValueError('dimension mismatch')
ret = np.zeros(years_count,dtype=np.float_)
for ix in range(years_count):
try:
ret[ix] = _ymd2mjd( years[ix], months[ix], days[ix], cal=cal )
except:
ret[ix] = np.nan
return ret
def mjd2ymd( mjd, cal='auto' ):
"""
The `mjd2ymd` function enables array input.
Documentation see the `_mjd2ymd` function.
"""
mjd = np.array(mjd,ndmin=1)
mjd_count = np.size(mjd)
years = np.zeros(mjd_count,dtype=np.int_)
months = np.zeros(mjd_count,dtype=np.int_)
days = np.zeros(mjd_count,dtype=np.float_)
for ix in range(mjd_count):
try:
years[ix], months[ix], days[ix] = _mjd2ymd( mjd[ix], cal=cal )
except:
years[ix], months[ix], days[ix] = np.nan, np.nan, np.nan
return years, months, days
def jd2dow( jd ):
"""
The `jd2dow` function enables array input.
Documentation see the `_jd2dow` function.
"""
jd = np.array(jd,ndmin=1)
jd_count = np.size(jd)
days_number = np.zeros(jd_count,dtype=np.int_)
days_name = np.zeros(jd_count,dtype='|S3')
for ix in range(jd_count):
try:
days_number[ix], days_name[ix] = _jd2dow( jd[ix] )
except:
days_number[ix], days_name[ix] = np.nan, 'nan'
return days_number, days_name
def mjd2dow( mjd ):
"""
The `mjd2dow` function enables array input.
Documentation see the `_mjd2dow` function.
"""
mjd = np.array(mjd,ndmin=1)
mjd_count = np.size(mjd)
days_number = np.zeros(mjd_count,dtype=np.int_)
days_name = np.zeros(mjd_count,dtype='|S3')
for ix in range(mjd_count):
try:
days_number[ix], days_name[ix] = _mjd2dow( mjd[ix] )
except:
days_number[ix], days_name[ix] = np.nan, 'nan'
return days_number, days_name
def dhms2day( d, h, m, s ):
"""
The `dhms2day` function enables array input.
Documentation see the `_dhms2day` function.
"""
days = np.array(d,ndmin=1)
hours = np.array(h,ndmin=1)
minutes = np.array(m,ndmin=1)
seconds = np.array(s,ndmin=1)
days_count = np.size(days)
dim_check = ((days_count == np.size(hours))
and (days_count == np.size(minutes))
and (days_count == np.size(seconds)))
if not dim_check:
raise ValueError('dimension mismatch')
ret = np.zeros(days_count,dtype=np.float_)
for ix in range(days_count):
try:
ret[ix] = _dhms2day( days[ix], hours[ix], minutes[ix], seconds[ix] )
except:
ret[ix] = np.nan
return ret
def day2dhms( day ):
"""
The `day2dhms` function enables array input.
Documentation see the `_day2dhms` function.
"""
day = np.array(day,ndmin=1)
day_count = np.size(day)
days = np.zeros(day_count,dtype=np.int_)
hours = np.zeros(day_count,dtype=np.int_)
minutes = np.zeros(day_count,dtype=np.int_)
seconds = np.zeros(day_count,dtype=np.float_)
for ix in range(day_count):
try:
days[ix], hours[ix], minutes[ix], seconds[ix] = _day2dhms( day[ix] )
except:
days[ix], hours[ix], minutes[ix], seconds[ix] = np.nan, np.nan, np.nan, np.nan
return days, hours, minutes, seconds
def ymd2doy( y, m, d ):
"""
The `ymd2doy` function enables array input.
Documentation see the `_ymd2doy` function.
"""
years = np.array(y,ndmin=1)
months = np.array(m,ndmin=1)
days = np.array(d,ndmin=1)
years_count = np.size(years)
dim_check = ((years_count == np.size(months))
and (years_count == np.size(days)))
if not dim_check:
raise ValueError('dimension mismatch')
ret = np.zeros(years_count,dtype=np.int_)
for ix in range(years_count):
try:
ret[ix] = _ymd2doy( years[ix], months[ix], days[ix] )
except:
ret[ix] = np.nan
return ret
def jd2doy( jd ):
"""
The `jd2doy` function enables array input.
Documentation see the `_jd2doy` function.
"""
jd = np.array(jd,ndmin=1)
jd_count = np.size(jd)
doys = np.zeros(jd_count,dtype=np.int_)
for ix in range(jd_count):
try:
doys[ix] = _jd2doy( jd[ix] )
except:
doys[ix] = np.nan
return doys
def mjd2doy( mjd ):
"""
The `mjd2doy` function enables array input.
Documentation see the `_mjd2doy` function.
"""
mjd = np.array(mjd,ndmin=1)
mjd_count = np.size(mjd)
doys = np.zeros(mjd_count,dtype=np.int_)
for ix in range(mjd_count):
try:
doys[ix] = _mjd2doy( mjd[ix] )
except:
doys[ix] = np.nan
return doys
def doy2ymd( y, doy ):
"""
The `doy2ymd` function enables array input.
Documentation see the `_doy2ymd` function.
"""
ys = np.array(y,ndmin=1)
doys = np.array(doy,ndmin=1)
ys_count = np.size(ys)
dim_check = (ys_count == np.size(doys))
if not dim_check:
raise ValueError('dimension mismatch')
years = np.zeros(ys_count,dtype=np.int_)
months = np.zeros(ys_count,dtype=np.int_)
days = np.zeros(ys_count,dtype=np.float_)
for ix in range(ys_count):
try:
years[ix], months[ix], days[ix] = _doy2ymd( ys[ix], doys[ix] )
except:
years[ix], months[ix], days[ix] = np.nan, np.nan, np.nan
return years, months, days
def doy2jd( y, doy ):
"""
The `doy2jd` function enables array input.
Documentation see the `_doy2jd` function.
"""
ys = np.array(y,ndmin=1)
doys = np.array(doy,ndmin=1)
ys_count = np.size(ys)
ret = np.zeros(ys_count,dtype=np.float_)
for ix in range(ys_count):
try:
ret[ix] = _doy2jd( ys[ix], doys[ix] )
except:
ret[ix] = np.nan
return ret
def doy2mjd( y, doy ):
"""
The `doy2mjd` function enables array input.
Documentation see the `_doy2mjd` function.
"""
ys = np.array(y,ndmin=1)
doys = np.array(doy,ndmin=1)
ys_count = np.size(ys)
ret = np.zeros(ys_count,dtype=np.float_)
for ix in range(ys_count):
try:
ret[ix] = _doy2mjd( ys[ix], doys[ix] )
except:
ret[ix] = np.nan
return ret
def mjd2jd( mjd ):
"""
The `mjd2jd` function enables array input.
Documentation see the `_mjd2jd` function.
"""
mjd = np.array(mjd,ndmin=1)
mjd_count = np.size(mjd)
jd = np.zeros(mjd_count,dtype=np.float_)
for ix in range(mjd_count):
try:
jd[ix] = _mjd2jd( mjd[ix] )
except:
jd[ix] = np.nan
return jd
def jd2mjd( jd ):
"""
The `jd2mjd` function enables array input.
Documentation see the `_jd2mjd` function.
"""
jd = | np.array(jd,ndmin=1) | numpy.array |
from time import perf_counter
from math import degrees, radians
from numpy.matlib import zeros
from numpy.linalg import norm, inv
from .latticeresult import LatticeResult, GammaResult
from .latticesystem import LatticeSystem
from ..tools.trim import LoopingTrim, TurningTrim
from ..tools.mass import Mass
class LatticeTrim(LatticeResult):
CLt = None
CYt = None
Clt = None
Cmt = None
Cnt = None
trmfrc = None
trmmom = None
trmlft = None
def __init__(self, name: str, sys: LatticeSystem):
super().__init__(name, sys)
self.set_trim_loads()
def set_targets(self, CLt: float=0.0, CYt: float=0.0,
Clt: float=0.0, Cmt: float=0.0, Cnt: float=0.0):
self.CLt = CLt
self.CYt = CYt
self.Clt = Clt
self.Cmt = Cmt
self.Cnt = Cnt
def set_trim_loads(self, trmfrc: bool=True, trmmom: bool=True, trmlft: bool=False):
self.trmfrc = trmfrc
self.trmmom = trmmom
self.trmlft = trmlft
def delta_C(self):
Ctgt = self.target_Cmat()
Ccur = self.current_Cmat()
return Ctgt-Ccur
def target_Cmat(self):
if self.trmlft:
Ctgt = zeros((1, 1), dtype=float)
Ctgt[0, 0] = self.CLt
elif self.trmfrc and self.trmmom:
Ctgt = zeros((5, 1), dtype=float)
Ctgt[0, 0] = self.CLt
Ctgt[1, 0] = self.CYt
Ctgt[2, 0] = self.Clt
Ctgt[3, 0] = self.Cmt
Ctgt[4, 0] = self.Cnt
elif self.trmfrc:
Ctgt = zeros((2, 1), dtype=float)
Ctgt[0, 0] = self.CLt
Ctgt[1, 0] = self.CYt
elif self.trmmom:
Ctgt = zeros((3, 1), dtype=float)
Ctgt[0, 0] = self.Clt
Ctgt[1, 0] = self.Cmt
Ctgt[2, 0] = self.Cnt
else:
Ctgt = zeros((0, 1), dtype=float)
return Ctgt
def current_Cmat(self):
if self.trmlft:
Ccur = zeros((1, 1), dtype=float)
Ccur[0, 0] = self.nfres.CL
elif self.trmfrc and self.trmmom:
Ccur = zeros((5, 1), dtype=float)
Ccur[0, 0] = self.nfres.CL
Ccur[1, 0] = self.nfres.CY
Ccur[2, 0] = self.nfres.Cl
Ccur[3, 0] = self.nfres.Cm
Ccur[4, 0] = self.nfres.Cn
if self.sys.cdo != 0.0:
Ccur[0, 0] += self.pdres.CL
Ccur[1, 0] += self.pdres.CY
Ccur[2, 0] += self.pdres.Cl
Ccur[3, 0] += self.pdres.Cm
Ccur[4, 0] += self.pdres.Cn
elif self.trmfrc:
Ccur = zeros((2, 1), dtype=float)
Ccur[0, 0] = self.nfres.CL
Ccur[1, 0] = self.nfres.CY
if self.sys.cdo != 0.0:
Ccur[0, 0] += self.pdres.CL
Ccur[1, 0] += self.pdres.CY
elif self.trmmom:
Ccur = | zeros((3, 1), dtype=float) | numpy.matlib.zeros |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2021 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Tests for the improver.metadata.probabilistic module"""
import unittest
import iris
import numpy as np
from iris.exceptions import CoordinateNotFoundError
from iris.tests import IrisTest
from improver.metadata.probabilistic import (
find_percentile_coordinate,
find_threshold_coordinate,
format_cell_methods_for_diagnostic,
format_cell_methods_for_probability,
get_diagnostic_cube_name_from_probability_name,
get_threshold_coord_name_from_probability_name,
in_vicinity_name_format,
is_probability,
probability_is_above_or_below,
)
from improver.synthetic_data.set_up_test_cubes import (
set_up_percentile_cube,
set_up_probability_cube,
set_up_variable_cube,
)
class Test_probability_is_or_below(unittest.TestCase):
"""Test that the probability_is_above_or_below function correctly
identifies whether the spp__relative_to_threshold attribute is above
or below with the respect to the threshold."""
def setUp(self):
"""Set up data and thresholds for the cubes."""
self.data = np.ones((3, 3, 3), dtype=np.float32)
self.threshold_points = np.array([276, 277, 278], dtype=np.float32)
def test_above(self):
""" Tests the case where spp__relative_threshold is above"""
cube = set_up_probability_cube(
self.data, self.threshold_points, spp__relative_to_threshold="above"
)
result = probability_is_above_or_below(cube)
self.assertEqual(result, "above")
def test_below(self):
""" Tests the case where spp__relative_threshold is below"""
cube = set_up_probability_cube(
self.data, self.threshold_points, spp__relative_to_threshold="below"
)
result = probability_is_above_or_below(cube)
self.assertEqual(result, "below")
def test_greater_than(self):
""" Tests the case where spp__relative_threshold is greater_than"""
cube = set_up_probability_cube(
self.data, self.threshold_points, spp__relative_to_threshold="greater_than"
)
result = probability_is_above_or_below(cube)
self.assertEqual(result, "above")
def test_greater_than_or_equal_to(self):
""" Tests the case where spp__relative_threshold is
greater_than_or_equal_to"""
cube = set_up_probability_cube(
self.data,
self.threshold_points,
spp__relative_to_threshold="greater_than_or_equal_to",
)
result = probability_is_above_or_below(cube)
self.assertEqual(result, "above")
def test_less_than(self):
""" Tests the case where spp__relative_threshold is less_than"""
cube = set_up_probability_cube(
self.data, self.threshold_points, spp__relative_to_threshold="less_than"
)
result = probability_is_above_or_below(cube)
self.assertEqual(result, "below")
def test_less_than_or_equal_to(self):
""" Tests the case where spp__relative_threshold is
less_than_or_equal_to"""
cube = set_up_probability_cube(
self.data,
self.threshold_points,
spp__relative_to_threshold="less_than_or_equal_to",
)
result = probability_is_above_or_below(cube)
self.assertEqual(result, "below")
def test_no_spp__relative_to_threshold(self):
"""Tests it returns None if there is no spp__relative_to_threshold
attribute."""
cube = set_up_probability_cube(self.data, self.threshold_points,)
cube.coord("air_temperature").attributes = {
"relative_to_threshold": "greater_than"
}
result = probability_is_above_or_below(cube)
self.assertEqual(result, None)
def test_incorrect_attribute(self):
"""Tests it returns None if the spp__relative_to_threshold
attribute has an invalid value."""
cube = set_up_probability_cube(self.data, self.threshold_points,)
cube.coord("air_temperature").attributes = {
"spp__relative_to_threshold": "higher"
}
result = probability_is_above_or_below(cube)
self.assertEqual(result, None)
class Test_in_vicinity_name_format(unittest.TestCase):
"""Test that the 'in_vicinity' above/below threshold probability
cube naming function produces the correctly formatted names."""
def setUp(self):
"""Set up test cube"""
data = np.ones((3, 3, 3), dtype=np.float32)
threshold_points = np.array([276, 277, 278], dtype=np.float32)
self.cube = set_up_probability_cube(data, threshold_points)
self.cube.rename("probability_of_X_above_threshold")
def test_in_vicinity_name_format(self):
"""Test that 'in_vicinity' is added correctly to the name for both
above and below threshold cases"""
correct_name_above = "probability_of_X_in_vicinity_above_threshold"
new_name_above = in_vicinity_name_format(self.cube.name())
self.cube.rename("probability_of_X_below_threshold")
correct_name_below = "probability_of_X_in_vicinity_below_threshold"
new_name_below = in_vicinity_name_format(self.cube.name())
self.assertEqual(new_name_above, correct_name_above)
self.assertEqual(new_name_below, correct_name_below)
def test_between_thresholds(self):
"""Test for "between_thresholds" suffix"""
self.cube.rename("probability_of_visibility_between_thresholds")
correct_name = "probability_of_visibility_in_vicinity_between_thresholds"
new_name = in_vicinity_name_format(self.cube.name())
self.assertEqual(new_name, correct_name)
def test_no_above_below_threshold(self):
"""Test the case of name without above/below_threshold is handled
correctly"""
self.cube.rename("probability_of_X")
correct_name_no_threshold = "probability_of_X_in_vicinity"
new_name_no_threshold = in_vicinity_name_format(self.cube.name())
self.assertEqual(new_name_no_threshold, correct_name_no_threshold)
def test_in_vicinity_already_exists(self):
"""Test the case of 'in_vicinity' already existing in the cube name"""
self.cube.rename("probability_of_X_in_vicinity")
result = in_vicinity_name_format(self.cube.name())
self.assertEqual(result, "probability_of_X_in_vicinity")
class Test_get_threshold_coord_name_from_probability_name(unittest.TestCase):
"""Test utility to derive threshold coordinate name from probability cube name"""
def test_above_threshold(self):
"""Test correct name is returned from a standard (above threshold)
probability field"""
result = get_threshold_coord_name_from_probability_name(
"probability_of_air_temperature_above_threshold"
)
self.assertEqual(result, "air_temperature")
def test_below_threshold(self):
"""Test correct name is returned from a probability below threshold"""
result = get_threshold_coord_name_from_probability_name(
"probability_of_air_temperature_below_threshold"
)
self.assertEqual(result, "air_temperature")
def test_between_thresholds(self):
"""Test correct name is returned from a probability between thresholds
"""
result = get_threshold_coord_name_from_probability_name(
"probability_of_visibility_in_air_between_thresholds"
)
self.assertEqual(result, "visibility_in_air")
def test_in_vicinity(self):
"""Test correct name is returned from an "in vicinity" probability.
Name "cloud_height" is used in this test to illustrate why suffix
cannot be removed with "rstrip"."""
diagnostic = "cloud_height"
result = get_threshold_coord_name_from_probability_name(
f"probability_of_{diagnostic}_in_vicinity_above_threshold"
)
self.assertEqual(result, diagnostic)
def test_error_not_probability(self):
"""Test exception if input is not a probability cube name"""
with self.assertRaises(ValueError):
get_threshold_coord_name_from_probability_name("lwe_precipitation_rate")
class Test_get_diagnostic_cube_name_from_probability_name(unittest.TestCase):
"""Test utility to derive diagnostic cube name from probability cube name"""
def test_basic(self):
"""Test correct name is returned from a point probability field"""
diagnostic = "air_temperature"
result = get_diagnostic_cube_name_from_probability_name(
f"probability_of_{diagnostic}_above_threshold"
)
self.assertEqual(result, diagnostic)
def test_in_vicinity(self):
"""Test the full vicinity name is returned from a vicinity probability
field"""
diagnostic = "precipitation_rate"
result = get_diagnostic_cube_name_from_probability_name(
f"probability_of_{diagnostic}_in_vicinity_above_threshold"
)
self.assertEqual(result, f"{diagnostic}_in_vicinity")
def test_error_not_probability(self):
"""Test exception if input is not a probability cube name"""
with self.assertRaises(ValueError):
get_diagnostic_cube_name_from_probability_name("lwe_precipitation_rate")
class Test_is_probability(IrisTest):
"""Test the is_probability function"""
def setUp(self):
"""Set up test data"""
self.data = np.ones((3, 3, 3), dtype=np.float32)
self.threshold_points = np.array([276, 277, 278], dtype=np.float32)
self.prob_cube = set_up_probability_cube(self.data, self.threshold_points)
def test_true(self):
"""Test a probability cube evaluates as true"""
result = is_probability(self.prob_cube)
self.assertTrue(result)
def test_scalar_threshold_coord(self):
"""Test a probability cube with a single threshold evaluates as true"""
cube = iris.util.squeeze(self.prob_cube[0])
result = is_probability(cube)
self.assertTrue(result)
def test_false(self):
"""Test cube that does not contain thresholded probabilities
evaluates as false"""
cube = set_up_variable_cube(
self.data, name="probability_of_rain_at_surface", units="1"
)
result = is_probability(cube)
self.assertFalse(result)
class Test_find_threshold_coordinate(IrisTest):
"""Test the find_threshold_coordinate function"""
def setUp(self):
"""Set up test probability cubes with old and new threshold coordinate
naming conventions"""
data = np.ones((3, 3, 3), dtype=np.float32)
self.threshold_points = np.array([276, 277, 278], dtype=np.float32)
cube = set_up_probability_cube(data, self.threshold_points)
self.cube_new = cube.copy()
self.cube_old = cube.copy()
self.cube_old.coord("air_temperature").rename("threshold")
def test_basic(self):
"""Test function returns an iris.coords.Coord"""
threshold_coord = find_threshold_coordinate(self.cube_new)
self.assertIsInstance(threshold_coord, iris.coords.Coord)
def test_old_convention(self):
"""Test function recognises threshold coordinate with name "threshold"
"""
threshold_coord = find_threshold_coordinate(self.cube_old)
self.assertEqual(threshold_coord.name(), "threshold")
self.assertArrayAlmostEqual(threshold_coord.points, self.threshold_points)
def test_new_convention(self):
"""Test function recognises threshold coordinate with standard
diagnostic name and "threshold" as var_name"""
threshold_coord = find_threshold_coordinate(self.cube_new)
self.assertEqual(threshold_coord.name(), "air_temperature")
self.assertEqual(threshold_coord.var_name, "threshold")
self.assertArrayAlmostEqual(threshold_coord.points, self.threshold_points)
def test_fails_if_not_cube(self):
"""Test error if given a non-cube argument"""
msg = "Expecting data to be an instance of iris.cube.Cube"
with self.assertRaisesRegex(TypeError, msg):
find_threshold_coordinate([self.cube_new])
def test_fails_if_no_threshold_coord(self):
"""Test error if no threshold coordinate is present"""
self.cube_new.coord("air_temperature").var_name = None
msg = "No threshold coord found"
with self.assertRaisesRegex(CoordinateNotFoundError, msg):
find_threshold_coordinate(self.cube_new)
class Test_find_percentile_coordinate(IrisTest):
"""Test whether the cube has a percentile coordinate."""
def setUp(self):
"""Create a wind-speed and wind-gust cube with percentile coord."""
data = | np.zeros((2, 3, 3), dtype=np.float32) | numpy.zeros |
# Authors: <NAME> <<EMAIL>>
"""
----------------------------------------------------------------------
--- jumeg.decompose.fourier_ica --------------------------------------
----------------------------------------------------------------------
author : <NAME>
email : <EMAIL>
last update: 09.11.2016
version : 1.2
----------------------------------------------------------------------
This simple implementation of ICASSO is based on the following
publication:
----------------------------------------------------------------------
<NAME>, <NAME>, and <NAME>. 'Validating the
independent components of neuroimaging time-series via
clustering and visualization', Neuroimage, 22:3(1214-1222), 2004.
Should you use this code, we kindly request you to cite the
aforementioned publication.
<http://research.ics.aalto.fi/ica/icasso/about+download.shtml
DOWNLOAD ICASSO from here>
----------------------------------------------------------------------
Overview
----------------------------------------------------------------------
Perform ICASSO estimation. ICASSO is based on running ICA
multiple times with slightly different conditions and
clustering the obtained components. Note, here FourierICA
is applied
1. Runs ICA with given parameters M times on data X.
2. Clusters the estimates and computes other statistics.
3. Returns (and visualizes) the best estimates.
----------------------------------------------------------------------
How to use ICASSO?
----------------------------------------------------------------------
from jumeg.decompose import icasso
icasso_obj = = JuMEG_icasso()
W, A, quality, fourier_ica_obj = icasso_obj.fit(fn_raw, stim_name='STI 013',
event_id=1, tmin_stim=-0.5,
tmax_stim=0.5, flow=4.0, fhigh=34.0)
--> for further comments we refer directly to the functions or to
fourier_ica_test.py
----------------------------------------------------------------------
"""
# ------------------------------------------
# import necessary modules
# ------------------------------------------
import numpy as np
########################################################
# #
# JuMEG_icasso class #
# #
########################################################
class JuMEG_icasso(object):
def __init__(self, ica_method='fourierica', average=False, nrep=50,
fn_inv=None, src_loc_method='dSPM', snr=1.0,
morph2fsaverage=True, stim_name=None, event_id=1,
flow=4.0, fhigh=34.0, tmin_win=0.0, tmax_win=1.0,
pca_dim=None, dim_reduction='MDL', conv_eps=1e-9,
max_iter=2000, tICA=False, lrate=1.0, cost_function=None,
decim_epochs=False):
"""
Generate ICASSO object.
Parameters
----------
ica_method: string which ICA method should be used
default: ica_method='FourierICA'
average: should ICA be performed on data averaged above
subjects?
default: average=False
nrep: number of repetitions ICA should be performed
default: nrep=50
fn_inv: file name of inverse operator. If given
FourierICA is applied on data transformed to
source space
src_loc_method: method used for source localization.
Only of interest if 'fn_inv' is set
default: src_loc_method='dSPM'
snr: signal-to-noise ratio for performing source
localization
default: snr=1.0
morph2fsaverage: should data be morphed to the
'fsaverage' brain?
default: morph2fsaverage=True
stim_name: string which contains the name of the
stimulus channel. Only necessary if ICA should
be applied to evoked data.
event_id: integer of list of integer containing the
event IDs which should be used to generate epochs
default: event_id=1
flow: lower frequency border for estimating the optimal
de-mixing matrix using FourierICA
default: flow=4.0
fhigh: upper frequency border for estimating the optimal
de-mixing matrix using FourierICA
default: fhigh=34.0
Note: here default flow and fhigh are choosen to
contain:
- theta (4-7Hz)
- low (7.5-9.5Hz) and high alpha (10-12Hz),
- low (13-23Hz) and high beta (24-34Hz)
tmin_win: time of interest prior to stimulus onset.
Important for generating epochs to apply FourierICA
default=0.0
tmax_win: time of interest after stimulus onset.
Important for generating epochs to apply FourierICA
default=1.0
dim_reduction: {'', 'AIC', 'BIC', 'GAP', 'MDL', 'MIBS', 'explVar'}
Method for dimension selection. For further information about
the methods please check the script 'dimension_selection.py'.
pca_dim: Integer. The number of components used for PCA
decomposition.
conv_eps: iteration stops when weight changes are smaller
then this number
default: conv_eps = 1e-9
max_iter: integer containing the maximal number of
iterations to be performed in ICA estimation
default: max_iter=2000
tICA: bool if temporal ICA should be applied (and not)
FourierICA
default: tICA=False
lrate: float containg the learning rate which should be
used in the applied ICA algorithm
default: lrate=1.0
cost_function: string containg the cost-function to
use in the appled ICA algorithm. For further information
look in fourier_ica.py
default: cost_funtion=None
decim_epochs: integer. If set the number of epochs used
to estimate the optimal demixing matrix is decimated
to the given number.
default: decim_epochs=False
Returns
-------
object: ICASSO object
"""
self._ica_method = ica_method
self.average = average
self._nrep = nrep
self.fn_inv = fn_inv
self.src_loc_method = src_loc_method
self.snr = snr
self.morph2fsaverage = morph2fsaverage
self.whitenMat = [] # whitening matrix
self.dewhitenMat = [] # de-whitening matrix
self.W_est = [] # de-mixing matrix
self.A_est = [] # mixing matrix
self.dmean = [] # data mean
self.dstd = [] # data standard-deviation
self.stim_name = stim_name
self.event_id = event_id
self.flow = flow
self.fhigh = fhigh
self._sfreq = 0.0
self.tmin_win = tmin_win
self.tmax_win = tmax_win
# ICA parameter
self.conv_eps = conv_eps # stopping threshold
self.max_iter = max_iter
self.lrate = lrate # learning rate for the ICA algorithm
self.tICA = tICA # should temporal ICA be performed?
self.pca_dim = pca_dim
self.dim_reduction= dim_reduction
self.cost_function = cost_function
self.decim_epochs = decim_epochs
# make sure to chose meaningful parameters
# when not FourierICA is used
if self.ica_method != 'fourierica':
if conv_eps == 1e-9:
self.conv_eps = 1e-12 # stopping threshold
if max_iter == 2000:
self.max_iter = 200
if lrate == 1:
self.lrate = None # learning rate for the ICA algorithm
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# set/get maximum number of repetitions
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _set_nrep(self, nrep):
self._nrep = nrep
def _get_nrep(self):
return int(self._nrep)
nrep = property(_get_nrep, _set_nrep)
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# set/get ICA method
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _set_ica_method(self, ica_method):
possible_methods = ['extended-infomax', 'fastica',
'fourierica', 'infomax']
if ica_method in possible_methods:
self._ica_method = ica_method
else:
print('WARNING: chosen ICA method does not exist!')
print('Must be one of the following methods: ', possible_methods)
print('But your choice was: ', ica_method)
print('Programm stops!')
import pdb
pdb.set_trace()
def _get_ica_method(self):
return self._ica_method
ica_method = property(_get_ica_method, _set_ica_method)
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# estimate linkage between components
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _linkage(self, dis):
# initialize some variables
dlen, dim = dis.shape
Md = dis.copy()
Md += np.diag(np.ones(dlen)*np.inf)
# ------------------------------------------
# estimate clusters
# ------------------------------------------
# --> each vector is at first in its own cluster
Z = np.zeros((dlen-1, 3)) + np.NaN
clusters = np.arange(dlen)
Cdist = Md.copy()
for idx in np.arange(dlen-1):
d_min = np.min(Cdist)
if np.isinf(d_min):
break # no more connected clusters
else:
min_idx = np.argmin(np.min(Cdist, axis=0))
c1 = np.argmin(Cdist[:, min_idx]) # cluster1
c2 = clusters[min_idx] # cluster2
# combine the two clusters
c1_inds = (clusters == c1).nonzero()[0] # vectors belonging to c1
c2_inds = (clusters == c2).nonzero()[0] # vectors belonging to c2
c_inds = np.concatenate((c1_inds, c2_inds)) # members of the new cluster
nc_inds = len(c_inds)
# find bigger cluster
if len(c2_inds) > len(c1_inds):
c, k = c2, c1
else:
c, k = c1, c2
clusters[c_inds] = c # update cluster info
Z[idx, :] = [c, k, d_min] # save info into Z
# ------------------------------------------
# update cluster distances
# ------------------------------------------
# remove the subclusters from the cdist table
for idxC in c_inds:
Cdist[idxC, c_inds] = np.Inf # distance of clusters to its members = Inf
k_inds = c_inds[c_inds != c] # vector of the smallest cluster
Cdist[k_inds, :] = np.Inf # set distance of the subcluster to
Cdist[:, k_inds] = np.Inf # other clusters = Inf
# update the distance of this cluster to the other clusters
idxC = (clusters != c).nonzero()[0]
if len(idxC) > 0:
cl = np.unique(clusters[idxC])
for l in cl:
o_inds = (clusters == l).nonzero()[0] # indices belonging to cluster k
no_inds = len(o_inds)
vd = np.zeros((nc_inds, no_inds))
for ivd in range(nc_inds):
vd[ivd, :] = Md[c_inds[ivd], o_inds]
vd = vd.flatten()
idxvd = np.isfinite(vd).nonzero()[0]
nidxvd = len(idxvd)
sd = np.Inf if nidxvd == 0 else np.sum(vd[idxvd])/nidxvd
Cdist[c, l] = sd
Cdist[l, c] = sd
last = Z[idx, 0]
if np.isnan(last):
last = Z[idx-1, 0]
rest = np.setdiff1d(np.unique(clusters), last)
Z[idx:dlen-2, 0] = rest.transpose()
Z[idx:dlen-2, 1] = last
Z[idx:dlen-2, 2] = np.Inf
idx -= 1
else:
rest = []
# ------------------------------------------
# return values
# ------------------------------------------
# calculate the order of the samples
order = np.array([last])
# go through the combination matrix from top to down
for k in range(idx, -1, -1):
c_var = Z[k, 0]
k_var = np.array([Z[k, 1]])
idx_var = np.where(order == c_var)[0]
if len(idx_var) == 0:
order = np.concatenate((k_var, order))
else:
order = np.concatenate((order[:idx_var[0]], k_var, order[idx_var[0]:]))
order = np.concatenate((rest, order))[::-1]
# to maintain compatibility with Statistics Toolbox, the values
# in Z must be yet transformed so that they are similar to the
# output of the LINKAGE function
Zs = Z.copy()
current_cluster = np.array(list(range(dlen)))
iter_stop = len(Z[:, 0])
for idx in range(iter_stop):
Zs[idx, 0] = current_cluster[int(Z[idx, 0])]
Zs[idx, 1] = current_cluster[int(Z[idx, 1])]
current_cluster[int(Z[idx, 0])] = dlen + idx
current_cluster[int(Z[idx, 1])] = dlen + idx
return Zs, order
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# estimate similarities
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _corrw(self):
# get some dimension information
npc = int(self.W_est[0].shape[0])
nchan = int(self.W_est[0].shape[1])
ntimes = int(len(self.W_est))
# save estimated demixing matrices W in one matrix
weight = np.zeros((ntimes*npc, nchan), dtype=np.complex)
for idx in range(ntimes):
weight[(idx*npc):((idx+1)*npc), :] = self.W_est[idx]
weight = np.dot(weight, self.dewhitenMat)
# normalize rows to unit length
weight_norm = np.abs(np.sqrt(np.sum(weight*weight.conj(), axis=1))).reshape((npc*ntimes, 1))
weight /= | np.repeat(weight_norm, npc, axis=1) | numpy.repeat |
"""
Where the model is actually trained and validated
"""
import torch
import numpy as np
import tools_for_model as tools
from tools_for_estimate import cal_pesq, cal_stoi
#######################################################################
# For train #
#######################################################################
# T-F masking
def model_train(model, optimizer, train_loader, DEVICE):
# initialization
train_loss = 0
batch_num = 0
# arr = []
# train
model.train()
for inputs, targets in tools.Bar(train_loader):
batch_num += 1
# to cuda
inputs = inputs.float().to(DEVICE)
targets = targets.float().to(DEVICE)
_, _, outputs = model(inputs, targets)
loss = model.loss(outputs, targets)
# # if you want to check the scale of the loss
# print('loss: {:.4}'.format(loss))
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss += loss
train_loss /= batch_num
return train_loss
def model_perceptual_train(model, optimizer, train_loader, DEVICE):
# initialization
train_loss = 0
train_main_loss = 0
train_perceptual_loss = 0
batch_num = 0
# train
model.train()
for inputs, targets in tools.Bar(train_loader):
batch_num += 1
# to cuda
inputs = inputs.float().to(DEVICE)
targets = targets.float().to(DEVICE)
real_spec, img_spec, outputs = model(inputs)
main_loss = model.loss(outputs, targets)
perceptual_loss = model.loss(outputs, targets, real_spec, img_spec, perceptual=True)
# the constraint ratio
r1 = 1
r2 = 1
r3 = r1 + r2
loss = (r1 * main_loss + r2 * perceptual_loss) / r3
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss += loss
train_main_loss += r1 * main_loss
train_perceptual_loss += r2 * perceptual_loss
train_loss /= batch_num
train_main_loss /= batch_num
train_perceptual_loss /= batch_num
return train_loss, train_main_loss, train_perceptual_loss
def fullsubnet_train(model, optimizer, train_loader, DEVICE):
# initialization
train_loss = 0
batch_num = 0
# arr = []
# train
model.train()
for inputs, targets in tools.Bar(train_loader):
batch_num += 1
# to cuda
inputs = inputs.float().to(DEVICE)
targets = targets.float().to(DEVICE)
noisy_complex = tools.stft(inputs)
clean_complex = tools.stft(targets)
noisy_mag, _ = tools.mag_phase(noisy_complex)
cIRM = tools.build_complex_ideal_ratio_mask(noisy_complex, clean_complex)
cRM = model(noisy_mag)
loss = model.loss(cIRM, cRM)
# # if you want to check the scale of the loss
# print('loss: {:.4}'.format(loss))
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss += loss
train_loss /= batch_num
return train_loss
# Spectral mapping
def dccrn_direct_train(model, optimizer, train_loader, DEVICE):
# initialization
train_loss = 0
batch_num = 0
# train
model.train()
for inputs, targets in tools.Bar(train_loader):
batch_num += 1
# to cuda
inputs = inputs.float().to(DEVICE)
targets = targets.float().to(DEVICE)
output_real, target_real, output_imag, target_imag, _ = model(inputs, targets)
real_loss = model.loss(output_real, target_real)
imag_loss = model.loss(output_imag, target_imag)
loss = (real_loss + imag_loss) / 2
# # if you want to check the scale of the loss
# print('loss: {:.4}'.format(loss))
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss += loss
train_loss /= batch_num
return train_loss
def crn_direct_train(model, optimizer, train_loader, DEVICE):
# initialization
train_loss = 0
batch_num = 0
# train
model.train()
for inputs, targets in tools.Bar(train_loader):
batch_num += 1
# to cuda
inputs = inputs.float().to(DEVICE)
targets = targets.float().to(DEVICE)
output_mag, target_mag, _ = model(inputs, targets)
loss = model.loss(output_mag, target_mag)
# # if you want to check the scale of the loss
# print('loss: {:.4}'.format(loss))
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss += loss
train_loss /= batch_num
return train_loss
#######################################################################
# For validation #
#######################################################################
# T-F masking
def model_validate(model, validation_loader, writer, dir_to_save, epoch, DEVICE):
# initialization
validation_loss = 0
batch_num = 0
avg_pesq = 0
avg_stoi = 0
# for record the score each samples
f_score = open(dir_to_save + '/Epoch_' + '%d_SCORES' % epoch, 'a')
model.eval()
with torch.no_grad():
for inputs, targets in tools.Bar(validation_loader):
batch_num += 1
# to cuda
inputs = inputs.float().to(DEVICE)
targets = targets.float().to(DEVICE)
_, _, outputs = model(inputs, targets)
loss = model.loss(outputs, targets)
validation_loss += loss
# estimate the output speech with pesq and stoi
estimated_wavs = outputs.cpu().detach().numpy()
clean_wavs = targets.cpu().detach().numpy()
pesq = cal_pesq(estimated_wavs, clean_wavs)
stoi = cal_stoi(estimated_wavs, clean_wavs)
# pesq: 0.1 better / stoi: 0.01 better
for i in range(len(pesq)):
f_score.write('PESQ {:.6f} | STOI {:.6f}\n'.format(pesq[i], stoi[i]))
# reshape for sum
pesq = np.reshape(pesq, (1, -1))
stoi = np.reshape(stoi, (1, -1))
avg_pesq += sum(pesq[0]) / len(inputs)
avg_stoi += sum(stoi[0]) / len(inputs)
# save the samples to tensorboard
if epoch % 10 == 0:
writer.log_wav(inputs[0], targets[0], outputs[0], epoch)
validation_loss /= batch_num
avg_pesq /= batch_num
avg_stoi /= batch_num
return validation_loss, avg_pesq, avg_stoi
def model_perceptual_validate(model, validation_loader, writer, dir_to_save, epoch, DEVICE):
# initialization
validation_loss = 0
validation_main_loss = 0
validation_perceptual_loss = 0
batch_num = 0
avg_pesq = 0
avg_stoi = 0
# for record the score each samples
f_score = open(dir_to_save + '/Epoch_' + '%d_SCORES' % epoch, 'a')
model.eval()
with torch.no_grad():
for inputs, targets in tools.Bar(validation_loader):
batch_num += 1
# to cuda
inputs = inputs.float().to(DEVICE)
targets = targets.float().to(DEVICE)
real_spec, img_spec, outputs = model(inputs)
main_loss = model.loss(outputs, targets)
perceptual_loss = model.loss(outputs, targets, real_spec, img_spec, perceptual=True)
# the constraint ratio
r1 = 1
r2 = 1
r3 = r1 + r2
loss = (r1 * main_loss + r2 * perceptual_loss) / r3
validation_loss += loss
validation_main_loss += r1 * main_loss
validation_perceptual_loss += r2 * perceptual_loss
# estimate the output speech with pesq and stoi
estimated_wavs = outputs.cpu().detach().numpy()
clean_wavs = targets.cpu().detach().numpy()
pesq = cal_pesq(estimated_wavs, clean_wavs)
stoi = cal_stoi(estimated_wavs, clean_wavs)
# pesq: 0.1 better / stoi: 0.01 better
for i in range(len(pesq)):
f_score.write('PESQ {:.6f} | STOI {:.6f}\n'.format(pesq[i], stoi[i]))
# reshape for sum
pesq = | np.reshape(pesq, (1, -1)) | numpy.reshape |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------
# Name: csv_text_tfidf.py
# Description:
#
# Author: m.akei
# Copyright: (c) 2021 by m.na.akei
# Time-stamp: <2021-07-10 14:21:16>
# Licence:
# ----------------------------------------------------------------------
import argparse
import fileinput
import textwrap
import sys
from pathlib import Path
import re
import json
from collections import Counter
import pickle
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.metrics.pairwise import cosine_similarity
from textmining_lib import get_words, check_hiragana, check_ascii_symbol, check_zenkaku_symbol_with_japanese, removeSentences, replaceSentences, get_count_of_word
from typing import Union, List, Dict, Callable, Any, Type, Tuple, Optional
import typing
VERSION = 1.0
DEBUG = False
PATH_OF_USERDICT = ""
EXTEND_WORD_LIST: Union[Dict[str, List[str]], Any] = None
MINIMUM_LENGTH_OF_WORD = 2
REMOVE_WORDS = ["。", "、", "?", ".", ",", "?"]
def init():
arg_parser = argparse.ArgumentParser(description="",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=textwrap.dedent('''
remark:
when using '--model_file', csv file and column as arguments must be given but those are not used and may be dummy.
using '--extend_word_file', bow of sentence is extended with additional words.
this option may be usefull when the configuration of devices is important.
format of 'extend word file' there is 'word word1,word2' in a line.
using '--status', simple statistics are printed.
you may remove words that are too frequent by using '--remove_pattern' or '--replace_pattern_file'.
learning steps:
1) make model by '--only_learn'
2) estimate sentence by '--model_file'
3) make new sequential learned model, by '--sequential_learn'
example:
head wagahaiwa_nekodearu.csv | csv_text_tfidf.py - content
head wagahaiwa_nekodearu.csv | csv_text_tfidf.py --index date --output_mode dot - content
head wagahaiwa_nekodearu.csv | csv_text_tfidf.py --index date --output_mode dot --dot_cut_off=0.25 - content > test.dot
THR=0.4
head wagahaiwa_nekodearu.csv | csv_text_tfidf.py --index date --output_mode dot --dot_cut_off=0.3 --use_tf - content |\\
perl -ne "if(/label=\"([.\d]+)\"/ && \$1>${THR}){s/\]/ color=\"red\"]/} print;" > test.dot
head wagahaiwa_nekodearu.csv | csv_text_tfidf.py --index date --output_mode dot --dot_cut_off=0 --remove_pattern='《[^》]*》' - content > test.dot
cat <<EOF > rep_pat.txt
s/《[^》]*》//
s/(?i)GHI/KLM/
EOF
head -40 wagahaiwa_nekodearu.csv |\\
csv_text_tfidf.py --index date --output_mode dot --dot_cut_off=0 --check_frequency=0.1 --replace_pattern_file=rep_pat.txt --debug - content> test.dot
cat <<EOF > tfidf_extend_word.txt
# regexp word1,word2,...
書生 学生,学校
原$ 野原
EOF
head -40 wagahaiwa_nekodearu.csv |\\
csv_text_tfidf.py --index date --output_mode dot --dot_cut_off=0 \\
--replace_pattern_file=rep_pat.txt --extend_word_file=tfidf_extend_word.txt - content> test.dot
# using learned model
csv_text_tfidf.py --index date --output_mode dot --dot_cut_off=0 \\
--replace_pattern_file=rep_pat.txt --extend_word_file=tfidf_extend_word.txt --only_learn=tfidf.db wagahaiwa_nekodearu.csv content
echo 吾輩《わがはい》は猫である。名前はまだ無い。 |\\
csv_text_tfidf.py --index date --output_mode dot --dot_cut_off=0 \\
--replace_pattern_file=rep_pat.txt --extend_word_file=tfidf_extend_word.txt --model_file=tfidf.db wagahaiwa_nekodearu.csv content
# sequential learning
echo -e "date,content\\n2021-07-22,引き止めて、一ヶ月経って、裏の書斎にこもっている。" > new_data.csv
csv_text_tfidf.py --index date --output_mode dot --dot_cut_off=0 \\
--replace_pattern_file=rep_pat.txt --extend_word_file=tfidf_extend_word.txt --sequential_learn=tfidf.db,tfidf2.db new_data.csv content
echo -e "引き止めて、一ヶ月経って、裏の書斎にこもっている。" |\\
csv_text_tfidf.py --index date --output_mode dot --dot_cut_off=0 \\
--replace_pattern_file=rep_pat.txt --extend_word_file=tfidf_extend_word.txt --model_file=tfidf2.db wagahaiwa_nekodearu.csv content
'''))
arg_parser.add_argument('-v', '--version', action='version', version='%(prog)s {}'.format(VERSION))
arg_parser.add_argument("--index",
dest="INDEX",
help="column name for index, default=row number",
type=str,
metavar='COLUMN',
default=None)
arg_parser.add_argument("--additional_columns",
dest="ACOLUMNS",
help="list of column names for additional columns into simlarity table, default=None",
type=str,
metavar='COLUMN[,COLUMN...]',
default=None)
arg_parser.add_argument("--user_dictionary", dest="UDICT", help="path of user dictionary", type=str, metavar='FILE', default="")
arg_parser.add_argument("--minmum_length_of_word",
dest="MLENGTH",
help="minimum length of word, default=2",
type=int,
metavar='INT',
default=2)
arg_parser.add_argument("--remove_words",
dest="RWORDS",
help="list of words to remove, default='。,、,?,.,\,,?'",
type=list,
metavar='WORDS[,WORD,...]',
default="。,、,?,.,\,,?")
arg_parser.add_argument("--remove_pattern",
dest="RPATTERN",
help="regex pattern to remove before analyzing or file",
type=str,
metavar='REGEX_OR_FILE',
default="")
arg_parser.add_argument("--replace_pattern_file",
dest="REP_FILE",
help="path of file that has regex pattern to replace before analyzing",
type=str,
metavar='FILE',
default="")
arg_parser.add_argument("--extend_word_file",
dest="EXT_FILE",
help="path of file that has regex pattern and word to add at deriving words",
type=str,
metavar='FILE',
default=None)
arg_parser.add_argument("--output_mode",
dest="MODE",
help="format of output, default=simple",
type=str,
choices=["simple", "json", "dot"],
default="simple")
arg_parser.add_argument("--only_learn",
dest="VECS_FILE_0",
help="path of file to store model",
type=str,
metavar='FILE',
default=None)
arg_parser.add_argument("--model_file", dest="VECS_FILE_1", help="path of file to load model", type=str, metavar='FILE', default=None)
arg_parser.add_argument("--sequential_learn",
dest="SEQ_MODE",
help="path of file that has result of learned model and new one",
type=str,
metavar='IN_FILE,OUT_FILE',
default=None)
arg_parser.add_argument("--dot_cut_off",
dest="CUT_OFF",
help="threshold for cutting off, only available with '--output_mode=dot'",
type=float,
metavar="FLOAT",
default=None)
arg_parser.add_argument("--check_frequency",
dest="CFLIMIT",
help="ratio for checking frequency of words, only available with '--debug', default=0.2",
type=float,
metavar="FLOAT",
default=0.2)
arg_parser.add_argument("--use_tf", dest="USE_TF", help="use term frequency", action="store_true", default=False)
arg_parser.add_argument("--use_idf", dest="USE_IDF", help="use inverse document frequency", action="store_true", default=False)
arg_parser.add_argument("--status", dest="STATUS", help="print status", action="store_true", default=False)
arg_parser.add_argument("--debug", dest="DEBUG", help="debug mode", action="store_true", default=False)
arg_parser.add_argument('csv_file', metavar='FILE', help='csv file to read, if empty, stdin is used')
arg_parser.add_argument('column', metavar='COLUMN', help="column nameto process")
args = arg_parser.parse_args()
return args
# scikit-learnのCountVectorizerやTfidfVectorizerで追加学習させる | ITに頼って生きていく https://boomin.yokohama/archives/1468
class SeqVectorizer(TfidfVectorizer):
"""追加学習機能対応のTfidfVectotizer
"""
def __init__(self, analyzer="word", binary=True, use_idf=False, encoding="utf-8", strip_accents="unicode"):
super().__init__(analyzer=analyzer, binary=binary, use_idf=use_idf, encoding=encoding, strip_accents=strip_accents)
self.__seq_analyzer = analyzer
self.__seq_binary = binary
self.__seq_use_idf = use_idf
self.__seq_encoding = encoding
self.__seq_strip_accents = strip_accents
def fit(self, X):
result = super().fit(X)
self.n_docs = len(X)
def sequential_fit(self, X):
max_idx = max(self.vocabulary_.values())
intervec = TfidfVectorizer(analyzer=self.__seq_analyzer,
binary=self.__seq_binary,
use_idf=self.__seq_use_idf,
encoding=self.__seq_encoding,
strip_accents=self.__seq_strip_accents)
for a in X:
#update vocabulary_
if self.lowercase: a = a.lower()
intervec.fit([a])
tokens = intervec.get_feature_names()
for w in tokens:
if w not in self.vocabulary_:
max_idx += 1
self.vocabulary_[w] = max_idx
# update idf_
if self.__seq_use_idf:
df = (self.n_docs + self.smooth_idf) / np.exp(self.idf_ - 1) - self.smooth_idf
self.n_docs += 1
df.resize(len(self.vocabulary_), refcheck=False)
for w in tokens:
df[self.vocabulary_[w]] += 1
idf = np.log((self.n_docs + self.smooth_idf) / (df + self.smooth_idf)) + 1
self._tfidf._idf_diag = dia_matrix((idf, 0), shape=(len(idf), len(idf)))
def check_frequency_words(sentences: List[str], limit_ratio: float = 0.2) -> typing.Counter[Any]:
"""語句の出現頻度を多い順に印字する
:param sentences: 文字列のリスト
:param limit_ratio: 印字範囲、文字列リストの長さに対する比率で指定する
:returns: 語句の頻度を格納したcollection.Counter
:rtype: collection.Counter
"""
sts = " ".join(sentences)
nlimit = int(len(sentences) * limit_ratio + 0.5)
words = get_words_0(sts, debug=False)
wc = Counter(words)
# wc = get_count_of_word(sts, mecab_option="", path_of_userdict=PATH_OF_USERDICT, remove_words=["。", "、", "?", ".", ",", "?"])
# wc_mc = wc.most_common(most_common)
result_str = ""
for k in sorted(wc, key=wc.get, reverse=True):
v = wc[k]
if v >= nlimit:
r = v / len(sentences) * 100
result_str += f"\t{k}:{v} ({r:6.2f}%)\n"
if len(result_str) > 0:
print(f"%inf:csv_text_tfidf:check_frequency_words:more than limit:ratio={limit_ratio}, count={nlimit}", file=sys.stderr)
print(result_str, file=sys.stderr)
else:
print(f"%inf:csv_text_tfidf:check_frequency_words:no words whose count are more than limit:ratio={limit_ratio}, count={nlimit}",
file=sys.stderr)
return wc
def get_words_0(sentence: str,
min_length: int = None,
debug: bool = True,
user_dict: str = None,
remove_words: List[str] = None,
extend_words: Dict[str, List[str]] = None) -> Union[List[str], str]:
"""文字列からわかち書きで語句リストを生成する
:param sentence: 文字列
:param min_length: 語句として選定する文字列下限値
:param debug:
:returns: 語句リスト
:rtype:
"""
if user_dict is not None:
udict = user_dict
else:
udict = PATH_OF_USERDICT
if remove_words is not None:
rwords = remove_words
else:
rwords = REMOVE_WORDS
if extend_words is not None:
ewords = extend_words
else:
ewords = EXTEND_WORD_LIST
# words = get_words(sentence, mecab_option="", path_of_userdict=PATH_OF_USERDICT, remove_words=["。", "、", "?", ".", ",", "?"])
words = get_words(sentence, mecab_option="", path_of_userdict=udict, remove_words=rwords)
if min_length is None:
min_length = MINIMUM_LENGTH_OF_WORD
ws = []
for w in words:
p1 = check_hiragana(w)
p2 = check_ascii_symbol(w)
p3 = check_zenkaku_symbol_with_japanese(w)
if len(w) >= min_length and not p1 and not p2 and not p3:
ws.append(w)
if extend_words is not None:
for p in ewords:
if re.search(p, w) is not None:
ws.extend(ewords[p])
if len(ws) == 0:
# ws = [""]
ws = sentence
if debug and DEBUG:
print(f"%inf:csv_text_tfidf:get_words_0:list of words\n{sentence}\n =>{words}=>{ws}", file=sys.stderr)
return ws
# Pythonで文章の類似度を計算する方法〜TF-IDFとcos類似度〜 | データサイエンス情報局 https://analysis-navi.com/?p=688
def get_words_vecs(sentences: List[str], use_tf: bool = False, use_idf: bool = False) -> Any:
"""
:param sentences:
:param use_tf:
:param use_idf:
:returns:
:rtype:
"""
# 【sklearn】TfidfVectorizerの使い方を丁寧に - gotutiyan’s blog https://gotutiyan.hatenablog.com/entry/2020/09/10/181919
# vectorizer = TfidfVectorizer(analyzer=get_words_0, binary=not use_tf, use_idf=use_idf, encoding='utf-8', strip_accents="unicode")
vectorizer = SeqVectorizer(analyzer=get_words_0, binary=not use_tf, use_idf=use_idf, encoding='utf-8', strip_accents="unicode")
docs = | np.array(sentences) | numpy.array |
###############################################################################
# normalizer.py: top-level class for normalizer
###############################################################################
import warnings
import numpy as np
from astroNN.config import MAGIC_NUMBER
from astroNN.nn.numpy import sigmoid_inv, sigmoid
from astroNN.shared.dict_tools import list_to_dict, to_iterable
class Normalizer(object):
"""Top-level class for a normalizer"""
def __init__(self, mode=None):
"""
NAME:
__init__
PURPOSE:
To define a normalizer
HISTORY:
2018-Jan-06 - Written - <NAME> (University of Toronto)
"""
self.normalization_mode = mode
self.featurewise_center = {}
self.datasetwise_center = {}
self.featurewise_stdalization = {}
self.datasetwise_stdalization = {}
self.mean_labels = {}
self.std_labels = {}
self._custom_norm_func = None
self._custom_denorm_func = None
def mode_checker(self, data):
if type(data) is not dict:
dict_flag = False
data = {"Temp": data}
self.mean_labels = {"Temp": self.mean_labels}
self.std_labels = {"Temp": self.std_labels}
else:
dict_flag = True
master_data = {}
if type(self.normalization_mode) is not dict:
self.normalization_mode = list_to_dict(data.keys(), to_iterable(self.normalization_mode))
for name in data.keys(): # normalize data for each named inputs
if data[name].ndim == 1:
data_array = np.expand_dims(data[name], 1)
else:
data_array = np.array(data[name])
self.normalization_mode.update({name: str(self.normalization_mode[name])}) # just to prevent unnecessary type issue
if data_array.dtype == bool:
if self.normalization_mode[name] != '0': # binary classification case
warnings.warn("Data type is detected as bool, setting normalization_mode to 0 which is "
"doing nothing because no normalization can be done on bool")
self.normalization_mode[name] = '0'
data_array = data_array.astype(np.float32, copy=False) # need to convert data to float in every case
if self.normalization_mode[name] == '0':
self.featurewise_center.update({name: False})
self.datasetwise_center.update({name: False})
self.featurewise_stdalization.update({name: False})
self.datasetwise_stdalization.update({name: False})
self.mean_labels.update({name: np.array([0.])})
self.std_labels.update({name: np.array([1.])})
elif self.normalization_mode[name] == '1':
self.featurewise_center.update({name: False})
self.datasetwise_center.update({name: True})
self.featurewise_stdalization.update({name: False})
self.datasetwise_stdalization.update({name: True})
elif self.normalization_mode[name] == '2':
self.featurewise_center.update({name: True})
self.datasetwise_center.update({name: False})
self.featurewise_stdalization.update({name: True})
self.datasetwise_stdalization.update({name: False})
elif self.normalization_mode[name] == '3':
self.featurewise_center.update({name: True})
self.datasetwise_center.update({name: False})
self.featurewise_stdalization.update({name: False})
self.datasetwise_stdalization.update({name: False})
elif self.normalization_mode[name] == '3s': # allow custom function, default to use sigmoid to normalize
self.featurewise_center.update({name: False})
self.datasetwise_center.update({name: False})
self.featurewise_stdalization.update({name: False})
self.datasetwise_stdalization.update({name: False})
if self._custom_norm_func is None:
self._custom_norm_func = sigmoid
if self._custom_denorm_func is None:
self._custom_denorm_func = sigmoid_inv
self.mean_labels.update({name: np.array([0.])})
self.std_labels.update({name: np.array([1.])})
elif self.normalization_mode[name] == '4':
self.featurewise_center.update({name: False})
self.datasetwise_center.update({name: False})
self.featurewise_stdalization.update({name: True})
self.datasetwise_stdalization.update({name: False})
elif self.normalization_mode[name] == '255':
# Used to normalize 8bit images
self.featurewise_center.update({name: False})
self.datasetwise_center.update({name: False})
self.featurewise_stdalization.update({name: False})
self.datasetwise_stdalization.update({name: False})
self.mean_labels.update({name: np.array([0.])})
self.std_labels.update({name: np.array([255.])})
else:
raise ValueError(f"Unknown Mode -> {self.normalization_mode[name]}")
master_data.update({name: data_array})
return master_data, dict_flag
def normalize(self, data, calc=True):
data_array, dict_flag = self.mode_checker(data)
for name in data_array.keys(): # normalize data for each named inputs
magic_mask = [(data_array[name] == MAGIC_NUMBER)]
try:
self.mean_labels[name]
except KeyError:
self.mean_labels.update({name: np.array([0.])})
try:
self.std_labels[name]
except KeyError:
self.std_labels.update({name: | np.array([1.]) | numpy.array |
'''
Mouse Motion Tracker
====================
TBA
Usage
-----
motrack.py -video "path/to/file.ext"
Keys
----
ESC - exit
'''
#!/usr/bin/env python
import numpy as np
import cv2
import argparse
import getcoords
import sys
import utils
import os
# Parse arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", type = str, help= "path to the video file")
ap.add_argument("-fr", "--frame_range", type = str, help ="[sart_frame, stop_frame]",
default = [])
ap.add_argument("-p", "--params", type = str, help= "parameter set",
default = "params")
try:
args = ap.parse_args()
video_src = args.video
parameter_set = args.params
frame_range = utils.convert_list_str(args.frame_range)
except Exception as ex:
template = "An exception of type {0} occurred. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
print(message)
parameter_set = "params"
frame_range = []
video_src = []
init_flag = True if parameter_set == "params_init" else False
if init_flag:
import params_init as p
else:
import params as p
def set_review_flags(dists, num_nonzeros, times):
"""Sets flags for supervised control
Tracking may fail due to multiple reasons and often manifests itself by any of:
1) detected object being stationary for too long period of time, 2) travelled
distance between consecutive frames unepxectedly large or 3) changes in illumination
that persist over multiple consecutive frames.
Parameters
----------
dists : list
traveled distances corresponding to all consecutive (constant) time deltas
num_nonzeros : int
number of oonzero pixels in binary representation of foreground and background
(fg = 1, bg = 0)
times :
list of all times at which frames are collected
Returns
----------
max_num_static : int
maximum number of frames over which dtetected object didn't move
max_num_large : int
maximum number of frames over which object area was unexpectedly large
(possibly due to persistent change of illumination)
max_dist : int
maximal distance travelled by object over all time-deltas
References
------------
segment_stationary_background, label_contours
"""
from itertools import groupby
#1
gradients = np.around(np.gradient(dists)) # alternative: np.diff(dists)
group_reps = [[len(list(vals)), key] for (key, vals) in groupby(gradients)]
group_reps = np.asarray(group_reps)
try:
max_num_static = np.max(group_reps[group_reps[:,1] == 0,0])
except ValueError: # Object never static
max_num_static = 0
#2
thresh = 40000
num_nonzeros_thresh = [True if x > thresh else False for x in num_nonzeros]
group_reps = [[ len(list(vals)), key] for (key, vals) in
groupby(num_nonzeros_thresh)]
group_reps = np.asarray(group_reps)
if group_reps.size & any(group_reps[:, 1] == True):
max_num_large = np.max(group_reps[group_reps[:, 1] == True,0])
else: # Never above threshold
max_num_large = 0;
#3
diffs = np.diff(dists)
diffs = np.sort(diffs)
max_dist = np.abs(diffs[0])
return max_num_static, max_num_large, max_dist
def flag_file_review(f, dists, num_nonzeros, times):
"""Marks file for manual control by appending warnings to output
Parameters
----------
f : file handle
handle to file to write warnings into
dists : list
traveled distances corresponding to all consecutive (constant) time deltas
num_nonzeros : int
number of oonzero pixels in binary representation of foreground and background
(fg = 1, bg = 0)
times :
list of all times at which frames are collected
Returns
------------
f : file handle
handle to file with flags appended
References
---------------
set_review_flags
"""
max_rep_count, max_large_count, max_dist = \
set_review_flags(dists, num_nonzeros, times)
if max_rep_count > 30:
f.write("WARNING:NoMotion: Object static for {:d}*`step` frames\n". \
format( | np.int(max_rep_count) | numpy.int |
import contextlib
import logging
import math
import os
from collections import namedtuple
from inspect import getmembers, isfunction
from pathlib import Path
from astropy.time import Time
import bilby
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.interpolate import RegularGridInterpolator, interp1d
from scipy.stats import gaussian_kde
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
import redback
from redback.constants import *
dirname = os.path.dirname(__file__)
filename = os.path.join(dirname, 'plot_styles/paper.mplstyle')
plt.style.use(filename)
logger = logging.getLogger('redback')
_bilby_logger = logging.getLogger('bilby')
def citation_wrapper(r):
def wrapper(f):
f.citation = r
return f
return wrapper
def get_csm_properties(nn, eta):
csm_properties = namedtuple('csm_properties', ['AA', 'Bf', 'Br'])
filepath = f"{dirname}/tables/csm_table.txt"
ns, ss, bfs, brs, aas = np.loadtxt(filepath, delimiter=',', unpack=True)
bfs = np.reshape(bfs, (10, 30)).T
brs = np.reshape(brs, (10, 30)).T
aas = np.reshape(aas, (10, 30)).T
ns = np.unique(ns)
ss = np.unique(ss)
bf_func = RegularGridInterpolator((ss, ns), bfs)
br_func = RegularGridInterpolator((ss, ns), brs)
aa_func = RegularGridInterpolator((ss, ns), aas)
Bf = bf_func([nn, eta])[0]
Br = br_func([nn, eta])[0]
AA = aa_func([nn, eta])[0]
csm_properties.AA = AA
csm_properties.Bf = Bf
csm_properties.Br = Br
return csm_properties
def lambda_to_nu(wavelength):
"""
:param wavelength: wavelength in Angstrom
:return: frequency in Hertz
"""
return speed_of_light_si / (wavelength * 1.e-10)
def nu_to_lambda(frequency):
"""
:param frequency: frequency in Hertz
:return: wavelength in Angstrom
"""
return 1.e10 * (speed_of_light_si / frequency)
def calc_kcorrected_properties(frequency, redshift, time):
"""
Perform k-correction
:param frequency: observer frame frequency
:param redshift: source redshift
:param time: observer frame time
:return: k-corrected frequency and source frame time
"""
time = time / (1 + redshift)
frequency = frequency * (1 + redshift)
return frequency, time
def mjd_to_jd(mjd):
return Time(mjd, format="mjd").jd
def jd_to_mjd(jd):
return Time(jd, format="jd").mjd
def jd_to_date(jd):
year, month, day, _, _, _ = Time(jd, format="jd").to_value("ymdhms")
return year, month, day
def mjd_to_date(mjd):
year, month, day, _, _, _ = Time(mjd, format="mjd").to_value("ymdhms")
return year, month, day
def date_to_jd(year, month, day):
return Time(dict(year=year, month=month, day=day), format="ymdhms").jd
def date_to_mjd(year, month, day):
return Time(dict(year=year, month=month, day=day), format="ymdhms").mjd
def get_filter_frequency(filter):
pass
def deceleration_timescale(**kwargs):
e0 = 10 ** kwargs['loge0']
gamma0 = kwargs['g0']
nism = 10 ** kwargs['logn0']
denom = 32 * np.pi * gamma0 ** 8 * nism * proton_mass * speed_of_light ** 5
num = 3 * e0
t_peak = (num / denom) ** (1. / 3.)
return t_peak
def calc_ABmag_from_flux_density(fluxdensity):
return (fluxdensity * uu.mJy).to(uu.ABmag)
def convert_absolute_mag_to_apparent(magnitude, distance):
"""
Convert absolute magnitude to apparent
:param magnitude: AB absolute magnitude
:param distance: Distance in parsecs
"""
app_mag = magnitude + 5 * (np.log10(distance) - 1)
return app_mag
def calc_flux_density_from_ABmag(magnitudes):
return (magnitudes * uu.ABmag).to(uu.mJy)
def check_element(driver, id_number):
"""
checks that an element exists on a website, and provides an exception
"""
try:
driver.find_element_by_id(id_number)
except NoSuchElementException as e:
print(e)
return False
return True
def calc_flux_density_error(magnitude, magnitude_error, reference_flux, magnitude_system='AB'):
if magnitude_system == 'AB':
reference_flux = 3631
prefactor = np.log(10) / (-2.5)
dfdm = 1000 * prefactor * reference_flux * np.exp(prefactor * magnitude)
flux_err = ((dfdm * magnitude_error) ** 2) ** 0.5
return flux_err
def calc_flux_from_mag(magnitude, reference_flux, magnitude_system='AB'):
if magnitude_system == 'AB':
reference_flux = 3631
flux = 10 ** (magnitude / -2.5) * reference_flux # Jansky
return 1000 * flux # return in mJy
def bands_to_frequency(bands):
"""Converts a list of bands into an array of frequency in Hz
:param bands: List of bands.
:type bands: list[str]
:return: An array of frequency associated with the given bands.
:rtype: np.ndarray
"""
if bands is None:
bands = []
df = pd.read_csv(f"{dirname}/tables/filters.csv")
bands_to_freqs = {band: wavelength for band, wavelength in zip(df['bands'], df['wavelength [Hz]'])}
res = []
for band in bands:
try:
res.append(bands_to_freqs[band])
except KeyError as e:
logger.info(e)
raise KeyError(f"Band {band} is not defined in filters.csv!")
return np.array(res)
def fetch_driver():
# open the webdriver
return webdriver.PhantomJS()
def calc_credible_intervals(samples, interval=0.9):
if not 0 <= interval <= 1:
raise ValueError
lower_bound = np.quantile(samples, 0.5 - interval/2, axis=0)
upper_bound = np.quantile(samples, 0.5 + interval/2, axis=0)
median = np.quantile(samples, 0.5, axis=0)
return lower_bound, upper_bound, median
def calc_one_dimensional_median_and_error_bar(samples, quantiles=(0.16, 0.84), fmt='.2f'):
summary = namedtuple('summary', ['median', 'lower', 'upper', 'string'])
if len(quantiles) != 2:
raise ValueError("quantiles must be of length 2")
quants_to_compute = np.array([quantiles[0], 0.5, quantiles[1]])
quants = np.percentile(samples, quants_to_compute * 100)
summary.median = quants[1]
summary.plus = quants[2] - summary.median
summary.minus = summary.median - quants[0]
fmt = "{{0:{0}}}".format(fmt).format
string_template = r"${{{0}}}_{{-{1}}}^{{+{2}}}$"
summary.string = string_template.format(
fmt(summary.median), fmt(summary.minus), fmt(summary.plus))
return summary
def kde_scipy(x, bandwidth=0.05, **kwargs):
"""Kernel Density Estimation with Scipy"""
# Note that scipy weights its bandwidth by the covariance of the
# input data. To make the results comparable to the other methods,
# we divide the bandwidth by the sample standard deviation here.
kde = gaussian_kde(x, bw_method=bandwidth / x.std(ddof=1), **kwargs)
return kde
def cdf(x, plot=True, *args, **kwargs):
x, y = sorted(x), np.arange(len(x)) / len(x)
return plt.plot(x, y, *args, **kwargs) if plot else (x, y)
def bin_ttes(ttes, bin_size):
counts, bin_edges = np.histogram(ttes, np.arange(ttes[0], ttes[-1], bin_size))
times = np.array([bin_edges[i] + (bin_edges[i + 1] - bin_edges[i]) / 2 for i in range(len(bin_edges) - 1)])
return times, counts
def find_path(path):
if path == 'default':
return os.path.join(dirname, '../data/GRBData')
else:
return path
def setup_logger(outdir='.', label=None, log_level='INFO'):
"""Setup logging output: call at the start of the script to use
:param outdir: If supplied, write the logging output to outdir/label.log
:type outdir: str
:param label: If supplied, write the logging output to outdir/label.log
:type label: str
:param log_level:
['debug', 'info', 'warning']
Either a string from the list above, or an integer as specified
in https://docs.python.org/2/library/logging.html#logging-levels
(Default value = 'INFO')
"""
log_file = f'{outdir}/{label}.log'
with contextlib.suppress(FileNotFoundError):
os.remove(log_file) # remove existing log file with the same name instead of appending to it
bilby.core.utils.setup_logger(outdir=outdir, label=label, log_level=log_level, print_version=True)
level = _bilby_logger.level
logger.setLevel(level)
if not any([type(h) == logging.StreamHandler for h in logger.handlers]):
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(logging.Formatter(
'%(asctime)s %(name)s %(levelname)-8s: %(message)s', datefmt='%H:%M'))
stream_handler.setLevel(level)
logger.addHandler(stream_handler)
if not any([type(h) == logging.FileHandler for h in logger.handlers]):
if label is not None:
Path(outdir).mkdir(parents=True, exist_ok=True)
file_handler = logging.FileHandler(log_file)
file_handler.setFormatter(logging.Formatter(
'%(asctime)s %(name)s %(levelname)-8s: %(message)s', datefmt='%H:%M'))
file_handler.setLevel(level)
logger.addHandler(file_handler)
for handler in logger.handlers:
handler.setLevel(level)
logger.info(f'Running redback version: {redback.__version__}')
class MetaDataAccessor(object):
"""
Generic descriptor class that allows handy access of properties without long
boilerplate code. Allows easy access to meta_data dict entries
"""
def __init__(self, property_name, default=None):
self.property_name = property_name
self.container_instance_name = 'meta_data'
self.default = default
def __get__(self, instance, owner):
try:
return getattr(instance, self.container_instance_name)[self.property_name]
except KeyError:
return self.default
def __set__(self, instance, value):
getattr(instance, self.container_instance_name)[self.property_name] = value
class DataModeSwitch(object):
"""
Descriptor class to access boolean data_mode switches.
"""
def __init__(self, data_mode):
self.data_mode = data_mode
def __get__(self, instance, owner):
return instance.data_mode == self.data_mode
def __set__(self, instance, value):
if value:
instance.data_mode = self.data_mode
else:
instance.data_mode = None
class KwargsAccessorWithDefault(object):
"""
Descriptor class to access a kwarg dictionary with defaults.
"""
def __init__(self, kwarg, default=None):
self.kwarg = kwarg
self.default = default
def __get__(self, instance, owner):
return instance.kwargs.get(self.kwarg, self.default)
def __set__(self, instance, value):
instance.kwargs[self.kwarg] = value
def get_functions_dict(module):
models_dict = {}
_functions_list = [o for o in getmembers(module) if isfunction(o[1])]
_functions_dict = {f[0]: f[1] for f in _functions_list}
models_dict[module.__name__.split('.')[-1]] = _functions_dict
return models_dict
def interpolated_barnes_and_kasen_thermalisation_efficiency(mej, vej):
"""
Uses Barnes+2016 and interpolation to calculate the r-process thermalisation efficiency
depending on the input mass and velocity
:param mej: ejecta mass in solar masses
:param vej: initial ejecta velocity as a fraction of speed of light
:return: av, bv, dv constants in the thermalisation efficiency equation Eq 25 in Metzger 2017
"""
v_array = np.array([0.1, 0.2, 0.3])
mass_array = np.array([1.0e-3, 5.0e-3, 1.0e-2, 5.0e-2])
a_array = np.asarray([[2.01, 4.52, 8.16], [0.81, 1.9, 3.2],
[0.56, 1.31, 2.19], [.27, .55, .95]])
b_array = np.asarray([[0.28, 0.62, 1.19], [0.19, 0.28, 0.45],
[0.17, 0.21, 0.31], [0.10, 0.13, 0.15]])
d_array = np.asarray([[1.12, 1.39, 1.52], [0.86, 1.21, 1.39],
[0.74, 1.13, 1.32], [0.6, 0.9, 1.13]])
a_func = RegularGridInterpolator((mass_array, v_array), a_array, bounds_error=False, fill_value=None)
b_func = RegularGridInterpolator((mass_array, v_array), b_array, bounds_error=False, fill_value=None)
d_func = RegularGridInterpolator((mass_array, v_array), d_array, bounds_error=False, fill_value=None)
av = a_func([mej, vej])[0]
bv = b_func([mej, vej])[0]
dv = d_func([mej, vej])[0]
return av, bv, dv
def electron_fraction_from_kappa(kappa):
"""
Uses interpolation from Tanaka+19 to calculate
the electron fraction based on the temperature independent gray opacity
:param kappa: temperature independent gray opacity
:return: electron_fraction
"""
kappa_array = | np.array([1, 3, 5, 20, 30]) | numpy.array |
#---------------------------------------
#Since : Jun/17/2012
#Update: 2020/12/25
# -*- coding: utf-8 -*-
#---------------------------------------
import numpy as np
import math as mt
import pylab as pl
import networkx as nx
import sys
from scipy import ndimage
from sklearn import cluster, datasets
import matplotlib.pyplot as plt
from scipy.stats import rankdata
class NG(object):
def __init__(self, num = 256, end = 1000000, lam_i = 20.0, lam_f = 0.1, ew_i = 0.5, ew_f = 0.05, amax_i = 80.0, amax_f = 800, sig_kernel = 0.5):
# Set Parameters
# max of units
self.NUM = num
# relationship of neighbors
self.lam_i = lam_i
self.lam_f = lam_f
# Learning coefficient
self.Ew_i = ew_i
self.Ew_f = ew_f
# threshold to remove a edge (lifetime of edge T)
self.AMAX_i = amax_i
self.AMAX_f = amax_f
# Stopping condision
self.END = end
#kernel
self.sig_kernel = sig_kernel
def initialize_units(self, data):
self.N = data.shape[0] # the number of data points
self.g_units = nx.Graph()
# initialize the units
self.units = data[np.random.permutation(self.N)[range(self.NUM)]]
for i in range(self.NUM):
self.g_units.add_node(i)
def dists(self, x, units):
#calculate distance
return np.linalg.norm(units - x, axis=1)
def dw(self, x, unit):
return x - unit
def kernel(self, x):
return(np.exp(- np.linalg.norm(np.expand_dims(x, axis = 1) - x,axis=2)**2/2/(self.sig_kernel**2)))
def affinity(self):
A = nx.adjacency_matrix(self.g_units)
A = np.array(A.todense())
A = np.where(A > 0, 1, 0)
A = A * self.kernel(self.units)
return A
def normalize(self, data):
# normalize dataset
self.mindata = data[np.argmin(np.linalg.norm(data, axis=1))]
self.diff_max_min = np.linalg.norm( data[np.argmax( | np.linalg.norm(data, axis=1) | numpy.linalg.norm |
import torch
from torch import nn
import numpy as np
from collections import OrderedDict
from torch.utils.data import DataLoader
from torch.utils.data import Sampler
from contextlib import nullcontext
import yaml
from yaml import SafeLoader as yaml_Loader, SafeDumper as yaml_Dumper
import os,sys
from tqdm import tqdm
from lib.utils.dotdict import HDict
HDict.L.update_globals({'path':os.path})
def str_presenter(dumper, data):
if len(data.splitlines()) > 1: # check for multiline string
return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='|')
return dumper.represent_scalar('tag:yaml.org,2002:str', data)
yaml.representer.SafeRepresenter.add_representer(str, str_presenter)
def read_config_from_file(config_file):
with open(config_file, 'r') as fp:
return yaml.load(fp, Loader=yaml_Loader)
def save_config_to_file(config, config_file):
with open(config_file, 'w') as fp:
return yaml.dump(config, fp, sort_keys=False, Dumper=yaml_Dumper)
class StopTrainingException(Exception):
pass
class CollatedBatch(list):
pass
class DistributedTestDataSampler(Sampler):
def __init__(self, data_source, batch_size, rank, world_size):
data_len = len(data_source)
all_indices = | np.arange(data_len, dtype=int) | numpy.arange |
from collections import namedtuple
import operator
import typing
import warnings
import numpy as np
import audeer
from audmetric.core.utils import (
assert_equal_length,
infer_labels,
scores_per_subgroup_and_class,
)
def accuracy(
truth: typing.Sequence[typing.Any],
prediction: typing.Sequence[typing.Any],
labels: typing.Sequence[typing.Union[str, int]] = None
) -> float:
r"""Classification accuracy.
.. math::
\text{accuracy} = \frac{\text{number of correct predictions}}
{\text{number of incorrect predictions}}
Args:
truth: ground truth values/classes
prediction: predicted values/classes
labels: included labels in preferred ordering.
Sample is considered in computation if either prediction or
ground truth (logical OR) is contained in labels.
If no labels are supplied,
they will inferred from :math:`\{\text{prediction}, \text{truth}\}`
and ordered alphabetically.
Returns:
accuracy of prediction :math:`\in [0, 1]`
Raises:
ValueError: if ``truth`` and ``prediction`` differ in length
Example:
>>> accuracy([0, 0], [0, 1])
0.5
"""
assert_equal_length(truth, prediction)
if labels is None:
labels = infer_labels(truth, prediction)
prediction = np.array(prediction)
truth = np.array(truth)
# keep where both prediction and truth contained in `labels`
label_mask = np.nonzero(
np.logical_or(
np.isin(truth, labels),
np.isin(prediction, labels)
)
)
truth = truth[label_mask]
prediction = prediction[label_mask]
if len(prediction) == 0:
return np.nan
else:
return float(sum(prediction == truth) / len(prediction))
def concordance_cc(
truth: typing.Sequence[float],
prediction: typing.Sequence[float],
) -> float:
r"""Concordance correlation coefficient.
.. math::
\rho_c = \frac{2\rho\sigma_\text{prediction}\sigma_\text{truth}}
{\sigma_\text{prediction}^2 + \sigma_\text{truth}^2 + (
\mu_\text{prediction}-\mu_\text{truth})^2}
where :math:`\rho` is the Pearson correlation coefficient,
:math:`\mu` the mean and :math:`\sigma^2` the variance.
Args:
truth: ground truth values
prediction: predicted values
Returns:
concordance correlation coefficient :math:`\in [-1, 1]`
Raises:
ValueError: if ``truth`` and ``prediction`` differ in length
Example:
>>> concordance_cc([0, 1, 2], [0, 1, 1])
0.6666666666666666
"""
assert_equal_length(truth, prediction)
prediction = np.array(prediction)
truth = np.array(truth)
if len(prediction) < 2:
return np.NaN
r = pearson_cc(prediction, truth)
x_mean = prediction.mean()
y_mean = truth.mean()
x_std = prediction.std()
y_std = truth.std()
denominator = (
x_std * x_std
+ y_std * y_std
+ (x_mean - y_mean) * (x_mean - y_mean)
)
if denominator == 0:
ccc = np.nan
else:
ccc = 2 * r * x_std * y_std / denominator
return float(ccc)
def confusion_matrix(
truth: typing.Sequence[typing.Any],
prediction: typing.Sequence[typing.Any],
labels: typing.Sequence[typing.Any] = None,
*,
normalize: bool = False,
) -> typing.List[typing.List[typing.Union[int, float]]]:
r"""Confusion matrix.
Args:
truth: ground truth values/classes
prediction: predicted values/classes
labels: included labels in preferred ordering.
If no labels are supplied,
they will inferred from :math:`\{\text{prediction}, \text{truth}\}`
and ordered alphabetically.
normalize: normalize confusion matrix over the rows
Returns:
confusion matrix
Raises:
ValueError: if ``truth`` and ``prediction`` differ in length
Example:
>>> truth = [0, 1, 2]
>>> prediction = [0, 2, 0]
>>> confusion_matrix(truth, prediction)
[[1, 0, 0], [0, 0, 1], [1, 0, 0]]
"""
assert_equal_length(truth, prediction)
if labels is None:
labels = infer_labels(truth, prediction)
truth = np.array(truth)
prediction = np.array(prediction)
matrix = []
for row in labels:
row_indices = np.where(truth == row)
y_row = prediction[row_indices]
row_matrix = []
for column in labels:
row_matrix += [len(np.where(y_row == column)[0])]
matrix += [row_matrix]
if normalize:
for idx, row in enumerate(matrix):
if np.sum(row) != 0:
row_sum = float(np.sum(row))
matrix[idx] = [x / row_sum for x in row]
return matrix
def detection_error_tradeoff(
truth: typing.Union[
typing.Union[bool, int],
typing.Sequence[typing.Union[bool, int]]
],
prediction: typing.Union[
typing.Union[bool, int, float],
typing.Sequence[typing.Union[bool, int, float]]
],
) -> typing.Tuple[np.ndarray, np.ndarray, np.ndarray]:
r"""Detection error tradeoff for verification experiments.
The `detection error tradeoff (DET)`_
is a graph showing
the false non-match rate (FNMR)
against the false match rate (FMR).
The FNMR indicates
how often an enrolled speaker was missed.
The FMR indicates
how often an impostor was verified as the enrolled speaker.
This function does not return a figure,
but the FMR and FNMR,
together with the corresponding verification thresholds
at which a similarity value
was regarded to belong to the enrolled speaker.
``truth`` may only contain entries like ``[1, 0, True, False...]``,
whereas prediction values
can also contain similarity scores, e.g. ``[0.8, 0.1, ...]``.
The implementation is identical with the one provided
by the pyeer_ package.
.. _detection error tradeoff (DET): https://en.wikipedia.org/wiki/Detection_error_tradeoff
.. _pyeer: https://github.com/manuelaguadomtz/pyeer
Args:
truth: ground truth classes
prediction: predicted classes or similarity scores
Returns:
* false match rate (FMR)
* false non-match rate (FNMR)
* verification thresholds
Raises:
ValueError: if ``truth`` contains values
different from ``1, 0, True, False``
Example:
>>> truth = [1, 0]
>>> prediction = [0.9, 0.1]
>>> detection_error_tradeoff(truth, prediction)
(array([1., 0.]), array([0., 0.]), array([0.1, 0.9]))
""" # noqa: E501
truth = np.array(truth)
allowed_truth_values = set([1, 0, True, False])
if not set(truth).issubset(allowed_truth_values):
raise ValueError(
"'truth' is only allowed to contain "
"[1, 0, True, False], "
'yours contains:\n'
f"[{', '.join([str(t) for t in set(truth)])}]"
)
truth = truth.astype(bool)
prediction = np.array(prediction).astype(np.float64)
# Genuine matching scores
gscores = prediction[truth]
# Impostor matching scores
iscores = prediction[~truth]
gscores_number = len(gscores)
iscores_number = len(iscores)
# Labeling genuine scores as 1 and impostor scores as 0
gscores = list(zip(gscores, [1] * gscores_number))
iscores = list(zip(iscores, [0] * iscores_number))
# Stacking scores
scores = np.array(sorted(gscores + iscores, key=operator.itemgetter(0)))
cumul = np.cumsum(scores[:, 1])
# Grouping scores
thresholds, u_indices = np.unique(scores[:, 0], return_index=True)
# Calculating FNM and FM distributions
fnm = cumul[u_indices] - scores[u_indices][:, 1] # rejecting s < t
fm = iscores_number - (u_indices - fnm)
# Calculating FMR and FNMR
fnmr = fnm / gscores_number
fmr = fm / iscores_number
return fmr, fnmr, thresholds
def edit_distance(
truth: typing.Union[str, typing.Sequence[int]],
prediction: typing.Union[str, typing.Sequence[int]]
) -> int:
r"""Edit distance between two strings of characters or sequences of ints.
The implementation follows the `Wagner-Fischer algorithm`_.
.. _Wagner-Fischer algorithm:
https://en.wikipedia.org/wiki/Wagner%E2%80%93Fischer_algorithm
Args:
truth: ground truth sequence
prediction: predicted sequence
Returns:
edit distance
Example:
>>> truth = 'lorem'
>>> prediction = 'lorm'
>>> edit_distance(truth, prediction)
1
>>> truth = [0, 1, 2]
>>> prediction = [0, 1]
>>> edit_distance(truth, prediction)
1
"""
if truth == prediction:
return 0
elif len(prediction) == 0:
return len(truth)
elif len(truth) == 0:
return len(prediction)
m0 = [None] * (len(truth) + 1)
m1 = [None] * (len(truth) + 1)
for i in range(len(m0)):
m0[i] = i
for i in range(len(prediction)):
m1[0] = i + 1
for j in range(len(truth)):
cost = 0 if prediction[i] == truth[j] else 1
m1[j + 1] = min(m1[j] + 1, # deletion
m0[j + 1] + 1, # insertion
m0[j] + cost) # substitution
for j in range(len(m0)):
m0[j] = m1[j]
return m1[len(truth)]
def equal_error_rate(
truth: typing.Union[
typing.Union[bool, int],
typing.Sequence[typing.Union[bool, int]]
],
prediction: typing.Union[
typing.Union[bool, int, float],
typing.Sequence[typing.Union[bool, int, float]]
],
) -> typing.Tuple[float, namedtuple]:
r"""Equal error rate for verification tasks.
The equal error rate (EER) is the point
where false non-match rate (FNMR)
and the impostors or false match rate (FMR)
are identical.
The FNMR indicates
how often an enrolled speaker was missed.
The FMR indicates
how often an impostor was verified as the enrolled speaker.
In practice the score distribution is not continuous
and an interval is returned instead.
The EER value will be set as the midpoint
of this interval::footcite:`Maio2002`
.. math::
\text{EER} = \frac{
\min(\text{FNMR}[t], \text{FMR}[t])
+ \max(\text{FNMR}[t], \text{FMR}[t])
}{2}
with :math:`t = \text{argmin}(|\text{FNMR} - \text{FMR}|)`.
``truth`` may only contain entries like ``[1, 0, True, False...]``,
whereas prediction values
can also contain similarity scores, e.g. ``[0.8, 0.1, ...]``.
The implementation is identical with the one provided
by the pyeer_ package.
.. footbibliography::
.. _pyeer: https://github.com/manuelaguadomtz/pyeer
Args:
truth: ground truth classes
prediction: predicted classes or similarity scores
Returns:
* equal error rate (EER)
* namedtuple containing
``fmr``,
``fnmr``,
``thresholds``,
``threshold``
whereas the last one corresponds to the threshold
corresponding to the returned EER
Raises:
ValueError: if ``truth`` contains values
different from ``1, 0, True, False``
Example:
>>> truth = [0, 1, 0, 1, 0]
>>> prediction = [0.2, 0.8, 0.4, 0.5, 0.5]
>>> eer, stats = equal_error_rate(truth, prediction)
>>> eer
0.16666666666666666
>>> stats.threshold
0.5
"""
Stats = namedtuple(
'stats',
[
'fmr', # False match rates (FMR)
'fnmr', # False non-match rates (FNMR)
'thresholds', # Thresholds
'threshold', # verification threshold for EER
],
)
fmr, fnmr, thresholds = detection_error_tradeoff(truth, prediction)
diff = fmr - fnmr
# t1 and t2 are our time indices
t2 = np.where(diff <= 0)[0]
if len(t2) > 0:
t2 = t2[0]
else:
warnings.warn(
'The false match rate '
'and false non-match rate curves '
'do not intersect each other.',
RuntimeWarning,
)
eer = 1.0
threshold = float(thresholds[0])
return eer, Stats(fmr, fnmr, thresholds, threshold)
t1 = t2 - 1 if diff[t2] != 0 and t2 != 0 else t2
if fmr[t1] + fnmr[t1] <= fmr[t2] + fnmr[t2]:
eer = (fnmr[t1] + fmr[t1]) / 2.0
threshold = thresholds[t1]
else: # pragma: nocover (couldn't find a test to trigger this)
eer = (fnmr[t2] + fmr[t2]) / 2.0
threshold = thresholds[t2]
eer = float(eer)
threshold = float(threshold)
return eer, Stats(fmr, fnmr, thresholds, threshold)
def event_error_rate(
truth: typing.Union[
str, typing.Sequence[typing.Union[str, typing.Sequence[int]]]
],
prediction: typing.Union[
str, typing.Sequence[typing.Union[str, typing.Sequence[int]]]
],
) -> float:
r"""Event error rate based on edit distance.
The event error rate is computed by aggregating the mean edit
distances of each (truth, prediction)-pair and averaging the
aggregated score by the number of pairs.
The mean edit distance of each (truth, prediction)-pair is computed
as an average of the edit distance over the length of the longer sequence
of the corresponding pair. By normalizing over the longer sequence the
normalized distance is bound to [0, 1].
Args:
truth: ground truth classes
prediction: predicted classes
Returns:
event error rate
Raises:
ValueError: if ``truth`` and ``prediction`` differ in length
Example:
>>> event_error_rate([[0, 1]], [[0]])
0.5
>>> event_error_rate([[0, 1], [2]], [[0], [2]])
0.25
>>> event_error_rate(['lorem'], ['lorm'])
0.2
>>> event_error_rate(['lorem', 'ipsum'], ['lorm', 'ipsum'])
0.1
"""
truth = audeer.to_list(truth)
prediction = audeer.to_list(prediction)
assert_equal_length(truth, prediction)
eer = 0.
for t, p in zip(truth, prediction):
n = max(len(t), len(p))
n = n if n > 1 else 1
eer += edit_distance(t, p) / n
num_samples = len(truth) if len(truth) > 1 else 1
return eer / num_samples
def fscore_per_class(
truth: typing.Sequence[typing.Any],
prediction: typing.Sequence[typing.Any],
labels: typing.Sequence[typing.Any] = None,
*,
zero_division: float = 0,
) -> typing.Dict[str, float]:
r"""F-score per class.
.. math::
\text{fscore}_k = \frac{\text{true positive}_k}
{\text{true positive}_k + \frac{1}{2}
(\text{false positive}_k + \text{false negative}_k)}
Args:
truth: ground truth values/classes
prediction: predicted values/classes
labels: included labels in preferred ordering.
If no labels are supplied,
they will inferred from :math:`\{\text{prediction}, \text{truth}\}`
and ordered alphabetically.
zero_division: set the value to return when there is a zero division
Returns:
dictionary with label as key and F-score as value
Example:
>>> fscore_per_class([0, 0], [0, 1])
{0: 0.6666666666666666, 1: 0.0}
"""
if labels is None:
labels = infer_labels(truth, prediction)
precision = precision_per_class(
truth,
prediction,
labels,
zero_division=zero_division,
)
recall = recall_per_class(
truth,
prediction,
labels,
zero_division=zero_division,
)
fscore = {}
for label, p, r in zip(labels, precision.values(), recall.values()):
if p * r == 0:
fscore[label] = 0.0
elif (p == 0.0 and np.isnan(r)) or (r == 0.0 and np.isnan(p)):
fscore[label] = 0.0
else:
fscore[label] = (2 * p * r) / (p + r)
return fscore
def mean_absolute_error(
truth: typing.Sequence[float],
prediction: typing.Sequence[float],
) -> float:
r"""Mean absolute error.
.. math::
\text{MAE} = \frac{1}{n} \sum^n_{i=1}
|\text{prediction} - \text{truth}|
Args:
truth: ground truth values
prediction: predicted values
Returns:
mean absolute error
Raises:
ValueError: if ``truth`` and ``prediction`` differ in length
Example:
>>> mean_absolute_error([0, 0], [0, 1])
0.5
"""
assert_equal_length(truth, prediction)
prediction = np.array(prediction)
truth = np.array(truth)
return float( | np.abs(truth - prediction) | numpy.abs |
import pickle
import numpy as np
import sys
def eigen(num, split_num, layer_num):
prefix = 'min_'
layer_num = int(layer_num)
num = str(num)
#cur = [8, 8, 8, 8, 16, 16, 24, 24, 24, 24, 24, 24, 32, 32]
#cur = [10, 12, 13, 13, 21, 29, 35, 37, 35, 25, 28, 28, 37, 32]
#cur = [12, 12, 18, 17, 28, 54, 55, 45, 40, 25, 28, 28, 37, 32]
#cur = [16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16]
cur = [22, 23, (20, 2), 25, (22, 2), 25, (24, 2), 20, 18, 19, 19, 20, (18, 2), 20]
cur = [27, 39, (22, 2), 39, (37, 2), 40, (30, 2), 20, 18, 21, 21, 21, (19, 2), 20]
cur = [29, 74, (24, 2), 54, (50, 2), 64, (42, 2), 21, 18, 24, 21, 21, (19, 2), 20]
cur = [33, 132, (28, 2), 69, (59, 2), 104, (53, 2), 21, 18, 24, 21, 21, (19, 2), 20]
cur = [33, 209, (34, 2), 90, (72, 2), 160, (64, 2), 21, 18, 24, 21, 21, (19, 2), 20]
cur[2] = cur[2][0]
cur[4] = cur[4][0]
cur[6] = cur[6][0]
cur[12] = cur[12][0]
cur = [4,4,4,4]
cur = [4, 7, 5, 4]
cur = [10, 12, 21, 11]
cur = [11, 18, 29, 12]
cur = [11, 18, 29, 12]
cur = [11, 30, 38, 12]
print(cur)
cur = pickle.load(open('cifar10max' + str(num) + '.pkl', 'rb'))
curt = []
DD = 0
layer_num = len(cur)
'''
for i in cur:
if i != 'M':
curt.append(i)
for i in range(layer_num):
#w = pickle.load(open('eigen/' + prefix+ 'A_' + str(i) + '_' + num + '_.pkl', 'rb'), encoding='latin1')
try:
w = pickle.load(open('eigenm/' + prefix+ 'A_' + str(i) + '_' + num + '_.pkl', 'rb'), encoding='latin1')
#w1 = pickle.load(open('eigen/' + prefix+ 'A_' + str(i) + '_' + num + '_.pkl', 'rb'), encoding='latin1')
print(w)
#print(w.shape)
except:
DD = DD + 1
continue
if i == DD:
W = w
else:
W = np.concatenate([W, w], 0)
'''
prefix = 'max_'
r = [0.116849326, 0.038422294, 0.02061177, 0.02997986, 0.014377874, 0.0062844744, 0.012592447, 0.006363712, 0.008475702, 0.02377023, 0.038945824, 0.03370137, 0.03196905, 0.06754288]
r = np.ones([14])
#r = pickle.load(open('cifar10max' + str(num) + 'mag.pkl','rb'))
for i in range(layer_num):
#w = pickle.load(open('eigen/' + prefix+ 'A_' + str(i) + '_' + num + '_.pkl', 'rb'), encoding='latin1')
try:
w = pickle.load(open('eigenmax/' + prefix+ 'A_' + str(i) + '_' + num + '_.pkl', 'rb'), encoding='latin1')
#print(np.mean(w))
w *= np.sqrt(r[i])
print(np.mean(w))
#w1 = pickle.load(open('eigen/' + prefix+ 'A_' + str(i) + '_' + num + '_.pkl', 'rb'), encoding='latin1')
#print(w.shape)
except:
DD = DD + 1
continue
if i == DD:
W = w
else:
W = np.concatenate([W, -w], 0)
st = np.argsort(W)
L = W.shape[0]
t = int(0.15 * L)
thre = W[st[t]]
SP = {}
VP = {}
SP1 = {}
VP1 = {}
DL = []
dp = []
prefix = sys.argv[3] + '_'
for i in range(layer_num):
if i == 0:
k = 3
else:
k = 1
try:
w = pickle.load(open('eigenmax/' +prefix+ 'A_' + str(i) + '_' + num + '_.pkl', 'rb'), encoding='latin1')
v = pickle.load(open('eigenmax/' +prefix+ 'V_' + str(i) + '_' + num + '_.pkl', 'rb'), encoding='latin1')
w *= np.sqrt(r[i])
except:
print(i)
l = int(0.1 * curt[i])
D = np.random.randint(0, curt[i], size=[int(0.1 * curt[i]), 1])
SP[i] = D
VD = np.zeros([1, 1, 1])
#VP[i] = np.reshape(v, [v.shape[0], -1, k, k])
VP[i] = np.zeros([curt[i], curt[i-1], 1, 1])
DL.append(l)
continue
if prefix == 'max_':
ic = -1
else:
ic = 1
D = np.argwhere((ic * w) < thre)
l = D.shape[0]
SP[i] = np.squeeze(D)
#SP1[i] = np.random.randint(0, curt[i], size=[D.shape[0], 1])
VD = v[D].astype(float)
VP[i] = np.reshape(v, [v.shape[0], -1, k, k])
#VP1[i] = np.zeros_like(VD)
dp.append(l)
DL.append(l)
print(SP[i].shape)
print(VP[i].shape)
print(cur[i])
pickle.dump(SP, open('eigenmax/' + num + prefix + 'global.pkl', 'wb'))
pickle.dump(VP, open('eigenmax/' + num + prefix + 'globalv.pkl', 'wb'))
print(DL)
DL = | np.array(DL) | numpy.array |
import numpy as np
from functools import reduce
from bisect import bisect
def getZero(size=32):
return np.zeros((size,size),dtype="uint8")
def getI(size=32):
return np.identity(size,dtype="uint8")
def getShift(n,size=32):
return np.eye(size,k=n,dtype="uint8")
def getTrans():
t11,t12,t13,t14 = getZero(),getI(),getZero(),getZero()
t21,t22,t23,t24 = getZero(),getZero(),getI(),getZero()
t31,t32,t33,t34 = getZero(),getZero(),getZero(),getI()
t41,t42,t43,t44 = (getI()^getShift(-8))@(getI()^getShift(11))%2,getZero(),getZero(),getI()^getShift(-19)
trans = np.block([
[t11,t12,t13,t14],
[t21,t22,t23,t24],
[t31,t32,t33,t34],
[t41,t42,t43,t44],
])
return trans
def getS(intervals,rows = 39):
intervals = intervals[:rows]
t = getTrans()
t_ = getTrans()
s = | np.zeros((4*rows,128),"uint8") | numpy.zeros |
import tensorflow as tf
import numpy as np
import tqdm
__all__ = ('pad_ragged_2d', 'shuffle_ragged_2d',
'inputs_to_labels', 'get_pos_encoding',
'get_quant_time', 'softmax_with_temp',
'generate_midis')
def pad_ragged_2d(ragged_tensor, pad_idx):
# ragged_tensor -> RAGGED(batch_size, None)
lens = ragged_tensor.row_lengths(axis=-1)
maxlen = tf.math.reduce_max(lens)
mask = tf.sequence_mask(lens, maxlen, tf.bool)
zero_padded = ragged_tensor.to_tensor()
# zero_padded -> (batch_size, maxlen)
padding = tf.constant(pad_idx, dtype=zero_padded.dtype)
padded_tensor = tf.where(mask, zero_padded, padding)
# padded_tensor -> (batch_size, maxlen)
return padded_tensor
def shuffle_ragged_2d(ragged_tensors, pad_idx, lowest_idx=5):
if not isinstance(ragged_tensors, (list, tuple)):
ragged_tensors = [ragged_tensors]
# ragged_tensor -> RAGGED(batch_size, None)
lens = ragged_tensors[0].row_lengths(axis=-1)
kth_lowest = -tf.nn.top_k(-lens, lowest_idx).values[-1]
shuffled_tensors = [[] for _ in ragged_tensors]
for len_, *rows in zip(lens, *ragged_tensors):
assert all(row.shape[0] == len_ for row in rows)
if len_ <= kth_lowest:
new_rows = [tf.pad(row, paddings=[[0, kth_lowest - len_]],
constant_values=pad_idx) for row in rows]
else:
start_idx = tf.random.uniform(
(), minval=0, maxval=len_ - kth_lowest + 1, dtype=tf.int64)
new_rows = [row[start_idx: start_idx + kth_lowest]
for row in rows]
for tensor, row in zip(shuffled_tensors, new_rows):
tensor.append(row[tf.newaxis, :])
shuffled_tensors = [tf.concat(shuffled_tensor, axis=0)
for shuffled_tensor in shuffled_tensors]
return shuffled_tensors
def inputs_to_labels(inputs, pad_idx):
# inputs -> (batch_size, seq_len)
inputs_padded = tf.pad(inputs[:, 1:], paddings=[
[0, 0], [0, 1]], constant_values=pad_idx)
return inputs_padded
def get_pos_encoding(seq_len, d_model):
numerator = np.arange(seq_len, dtype=np.float32)
numerator = numerator[:, np.newaxis]
denominator = np.arange(0, d_model, 2, dtype=np.float32)
denominator = denominator / d_model
denominator = np.power( | np.array(10000, dtype=np.float32) | numpy.array |
import os
import numpy as np
from fury import shaders
from fury import actor, window
from fury.actor import grid
from fury.utils import shallow_copy
import itertools
import numpy.testing as npt
from fury.tmpdirs import InTemporaryDirectory
from fury.decorators import xvfb_it
from tempfile import mkstemp
# Allow import, but disable doctests if we don't have dipy
from fury.optpkg import optional_package
dipy, have_dipy, _ = optional_package('dipy')
if have_dipy:
from dipy.tracking.streamline import (center_streamlines,
transform_streamlines)
from dipy.align.tests.test_streamlinear import fornix_streamlines
from dipy.reconst.dti import color_fa, fractional_anisotropy
from dipy.data import get_sphere
use_xvfb = os.environ.get('TEST_WITH_XVFB', False)
skip_it = use_xvfb == 'skip'
@npt.dec.skipif(skip_it)
@xvfb_it
def test_slicer():
scene = window.Scene()
data = (255 * np.random.rand(50, 50, 50))
affine = np.eye(4)
slicer = actor.slicer(data, affine)
slicer.display(None, None, 25)
scene.add(slicer)
scene.reset_camera()
scene.reset_clipping_range()
# window.show(scene)
# copy pixels in numpy array directly
arr = window.snapshot(scene, 'test_slicer.png', offscreen=True)
import scipy
print(scipy.__version__)
print(scipy.__file__)
print(arr.sum())
print(np.sum(arr == 0))
print(np.sum(arr > 0))
print(arr.shape)
print(arr.dtype)
report = window.analyze_snapshot(arr, find_objects=True)
npt.assert_equal(report.objects, 1)
# print(arr[..., 0])
# The slicer can cut directly a smaller part of the image
slicer.display_extent(10, 30, 10, 30, 35, 35)
scene.ResetCamera()
scene.add(slicer)
# save pixels in png file not a numpy array
with InTemporaryDirectory() as tmpdir:
fname = os.path.join(tmpdir, 'slice.png')
# window.show(scene)
window.snapshot(scene, fname, offscreen=True)
report = window.analyze_snapshot(fname, find_objects=True)
npt.assert_equal(report.objects, 1)
npt.assert_raises(ValueError, actor.slicer, np.ones(10))
scene.clear()
rgb = np.zeros((30, 30, 30, 3))
rgb[..., 0] = 1.
rgb_actor = actor.slicer(rgb)
scene.add(rgb_actor)
scene.reset_camera()
scene.reset_clipping_range()
arr = window.snapshot(scene, offscreen=True)
report = window.analyze_snapshot(arr, colors=[(255, 0, 0)])
npt.assert_equal(report.objects, 1)
npt.assert_equal(report.colors_found, [True])
lut = actor.colormap_lookup_table(scale_range=(0, 255),
hue_range=(0.4, 1.),
saturation_range=(1, 1.),
value_range=(0., 1.))
scene.clear()
slicer_lut = actor.slicer(data, lookup_colormap=lut)
slicer_lut.display(10, None, None)
slicer_lut.display(None, 10, None)
slicer_lut.display(None, None, 10)
slicer_lut.opacity(0.5)
slicer_lut.tolerance(0.03)
slicer_lut2 = slicer_lut.copy()
npt.assert_equal(slicer_lut2.GetOpacity(), 0.5)
npt.assert_equal(slicer_lut2.picker.GetTolerance(), 0.03)
slicer_lut2.opacity(1)
slicer_lut2.tolerance(0.025)
slicer_lut2.display(None, None, 10)
scene.add(slicer_lut2)
scene.reset_clipping_range()
arr = window.snapshot(scene, offscreen=True)
report = window.analyze_snapshot(arr, find_objects=True)
npt.assert_equal(report.objects, 1)
scene.clear()
data = (255 * np.random.rand(50, 50, 50))
affine = np.diag([1, 3, 2, 1])
slicer = actor.slicer(data, affine, interpolation='nearest')
slicer.display(None, None, 25)
scene.add(slicer)
scene.reset_camera()
scene.reset_clipping_range()
arr = window.snapshot(scene, offscreen=True)
report = window.analyze_snapshot(arr, find_objects=True)
npt.assert_equal(report.objects, 1)
npt.assert_equal(data.shape, slicer.shape)
scene.clear()
data = (255 * np.random.rand(50, 50, 50))
affine = np.diag([1, 3, 2, 1])
from dipy.align.reslice import reslice
data2, affine2 = reslice(data, affine, zooms=(1, 3, 2),
new_zooms=(1, 1, 1))
slicer = actor.slicer(data2, affine2, interpolation='linear')
slicer.display(None, None, 25)
scene.add(slicer)
scene.reset_camera()
scene.reset_clipping_range()
# window.show(scene, reset_camera=False)
arr = window.snapshot(scene, offscreen=True)
report = window.analyze_snapshot(arr, find_objects=True)
npt.assert_equal(report.objects, 1)
npt.assert_array_equal([1, 3, 2] * np.array(data.shape),
np.array(slicer.shape))
@npt.dec.skipif(skip_it)
@xvfb_it
def test_surface():
import math
import random
from scipy.spatial import Delaunay
size = 11
vertices = list()
for i in range(-size, size):
for j in range(-size, size):
fact1 = - math.sin(i) * math.cos(j)
fact2 = - math.exp(abs(1 - math.sqrt(i ** 2 + j ** 2) / math.pi))
z_coord = -abs(fact1 * fact2)
vertices.append([i, j, z_coord])
c_arr = np.random.rand(len(vertices), 3)
random.shuffle(vertices)
vertices = np.array(vertices)
tri = Delaunay(vertices[:, [0, 1]])
faces = tri.simplices
c_loop = [None, c_arr]
f_loop = [None, faces]
s_loop = [None, "butterfly", "loop"]
for smooth_type in s_loop:
for face in f_loop:
for color in c_loop:
scene = window.Scene(background=(1, 1, 1))
surface_actor = actor.surface(vertices, faces=face,
colors=color, smooth=smooth_type)
scene.add(surface_actor)
# window.show(scene, size=(600, 600), reset_camera=False)
arr = window.snapshot(scene, 'test_surface.png',
offscreen=True)
report = window.analyze_snapshot(arr, find_objects=True)
npt.assert_equal(report.objects, 1)
@npt.dec.skipif(skip_it)
@xvfb_it
def test_directed_arrow():
start = [5, 5, 5]
direction = [1, 1, 1]
arrow_actor = actor.directed_arrow(start, direction,
scale=(5, 1, 1), color=(1, 0, 0))
# axes_actor = actor.axes(scale=(12, 1, 1))
scene = window.Scene(background=(1, 1, 1))
scene.add(arrow_actor)
# scene.add(axes_actor)
arr = window.snapshot(scene, 'test_da.png',
offscreen=True)
report = window.analyze_snapshot(arr, find_objects=True)
npt.assert_equal(report.objects, 1)
@npt.dec.skipif(skip_it)
@xvfb_it
def test_contour_from_roi():
# Render volume
scene = window.Scene()
data = np.zeros((50, 50, 50))
data[20:30, 25, 25] = 1.
data[25, 20:30, 25] = 1.
affine = np.eye(4)
surface = actor.contour_from_roi(data, affine,
color=np.array([1, 0, 1]),
opacity=.5)
scene.add(surface)
scene.reset_camera()
scene.reset_clipping_range()
# window.show(scene)
# Test binarization
scene2 = window.Scene()
data2 = np.zeros((50, 50, 50))
data2[20:30, 25, 25] = 1.
data2[35:40, 25, 25] = 1.
affine = np.eye(4)
surface2 = actor.contour_from_roi(data2, affine,
color=np.array([0, 1, 1]),
opacity=.5)
scene2.add(surface2)
scene2.reset_camera()
scene2.reset_clipping_range()
# window.show(scene2)
arr = window.snapshot(scene, 'test_surface.png', offscreen=True)
arr2 = window.snapshot(scene2, 'test_surface2.png', offscreen=True)
report = window.analyze_snapshot(arr, find_objects=True)
report2 = window.analyze_snapshot(arr2, find_objects=True)
npt.assert_equal(report.objects, 1)
npt.assert_equal(report2.objects, 2)
# test on real streamlines using tracking example
from dipy.data import read_stanford_labels
from dipy.reconst.shm import CsaOdfModel
from dipy.data import default_sphere
from dipy.direction import peaks_from_model
from dipy.tracking.local import ThresholdTissueClassifier
from dipy.tracking import utils
from dipy.tracking.local import LocalTracking
from fury.colormap import line_colors
hardi_img, gtab, labels_img = read_stanford_labels()
data = hardi_img.get_data()
labels = labels_img.get_data()
affine = hardi_img.affine
white_matter = (labels == 1) | (labels == 2)
csa_model = CsaOdfModel(gtab, sh_order=6)
csa_peaks = peaks_from_model(csa_model, data, default_sphere,
relative_peak_threshold=.8,
min_separation_angle=45,
mask=white_matter)
classifier = ThresholdTissueClassifier(csa_peaks.gfa, .25)
seed_mask = labels == 2
seeds = utils.seeds_from_mask(seed_mask, density=[1, 1, 1], affine=affine)
# Initialization of LocalTracking.
# The computation happens in the next step.
streamlines = LocalTracking(csa_peaks, classifier, seeds, affine,
step_size=2)
# Compute streamlines and store as a list.
streamlines = list(streamlines)
# Prepare the display objects.
streamlines_actor = actor.line(streamlines, line_colors(streamlines))
seedroi_actor = actor.contour_from_roi(seed_mask, affine, [0, 1, 1], 0.5)
# Create the 3d display.
r = window.Scene()
r2 = window.Scene()
r.add(streamlines_actor)
arr3 = window.snapshot(r, 'test_surface3.png', offscreen=True)
report3 = window.analyze_snapshot(arr3, find_objects=True)
r2.add(streamlines_actor)
r2.add(seedroi_actor)
arr4 = window.snapshot(r2, 'test_surface4.png', offscreen=True)
report4 = window.analyze_snapshot(arr4, find_objects=True)
# assert that the seed ROI rendering is not far
# away from the streamlines (affine error)
| npt.assert_equal(report3.objects, report4.objects) | numpy.testing.assert_equal |
'''
ROOMSIMOVE_SINGLE Compute shoebox room filters for a single source
###########################################################################
# Copyright 2003 <NAME>
# Copyright 2008-2016 <NAME>
# Copyright 2017 <NAME>
# This software is a python version of the stripped-down version of the Roomsim toolbox version
# 3.3 by <NAME> ,
# The matlab function for the stripped down version can be found here:
Roomsimove, http://homepages.loria.fr/evincent/software/Roomsimove.zip
# This code is distributed under the terms of the GNU Public License version 3
# (http://www.gnu.org/licenses/gpl.txt)
#
# If you find it useful, please cite the following reference:
###########################################################################
One difference between the matlab version and this code is that
RT60 value is assumed to be same for all frequencies.
Tested for sampling rate of 16000 Hz.
Usage:
=========
As standalone file:
------------------
python roomsimove_single.py config_file source_pos_x source_pos_y source_pos_z output_file
The help options will also give the details
python roomsimove_single.py -h
As a module:
------------
using config_file
-----------------
import roomsimove_single
sim_rir = roomsimove_single.RoomSim.init_from_config_file(config_file)
source_pos = [1, 1, 1]
rir = sim_rir.create_rir(source_pos)
using default values of absorption coeffecients
-----------------------------------------------
import roomsimove_single
rt60 = 0.5 # in seconds
room_dim = [4.2, 3.4, 5.2] # in meters
absorption = roomsimove_single.rt60_to_absorption(room_dim, rt60)
room = roomsimove_single.Room(room_dim, abs_coeff=absorption)
mic_pos = [2, 2, 2] # in meters
mic1 = roomsimove_single.Microphone(mic_pos, 1, \
orientation=[0.0, 0.0, 0.0], direction='omnidirectional')
mic_pos = [2, 2, 1] # in meters
mic2 = roomsimove_single.Microphone(mic_pos, 2, \
orientation=[0.0, 0.0, 0.0], direction='cardioid')
mics = [mic1, mic2]
sampling_rate = 16000
sim_rir = roomsimove_single.RoomSim(sampling_rate, room, mics, RT60=rt60)
source_pos = [1, 1, 1] # in meters
rir = sim_rir.create_rir(source_pos)
Appyling RIR to data
-------------------
import olafilt
import sounfile as sf
# Assuming single channel data
[data, fs] = sf.read(wav_file)
reverb_data = olafilt.olafilt(rir,data)
'''
import argparse
import numpy as np
from scipy.interpolate import interp1d
import scipy.signal as scipy_sig
import olafilt
import utils
class RandomRIR(object):
"""
Generate a random room, microphone and source position and generate the corresponding RIR.
# Arguments
sampling_rate: Sampling rate of the RIR
max_rt_60: Maximum value of RT60 in seconds. Actual RT60 is random between [0.1, max_rt_60]
min_room_di, max_room_dim: Min and Maximum value of the room dim.
Room dimensions are random picks between [min_room_dim, max_room_dim]
# Usage
rir_if = RandomRIR(sampling_rate=16000)
src = [np.random.rand(10000), np.random.rand(10000)]
rev_sig = rir_if.reverberate(src)
"""
def __init__(self, sampling_rate, max_rt_60=0.5, min_room_dim=3, max_room_dim=5):
self.sampling_rate = sampling_rate
self.max_rt_60 = max_rt_60
self.max_room_dim = max_room_dim
self.min_room_dim = min_room_dim
def create_rir(self, src_cnt, mic_cnt=1):
room_dim = utils.create_new_room(self.min_room_dim, self.max_room_dim)
room = Room(room_dim.dim)
rt60 = utils.generate_rt60(0.1, self.max_rt_60)
all_ele = []
all_mics = []
for mic_id in np.arange(mic_cnt):
mic_pos = utils.new_element_pos(room_dim, all_ele)
mic = Microphone(mic_pos.dim, 2, \
orientation=[0.0, 0.0, 0.0], direction='cardioid')
all_mics.append(mic)
all_ele.append(mic_pos)
all_srcs = []
for mic_id in np.arange(src_cnt):
src_pos = utils.new_element_pos(room_dim, all_ele)
all_srcs.append(src_pos)
all_ele.append(src_pos)
all_rir = []
sim_rir = RoomSim(self.sampling_rate, room, all_mics, RT60=rt60)
for src in all_srcs:
rir = sim_rir.create_rir(src.dim)
all_rir.append(rir)
return all_rir
def reverberate(self, src_list, mic_cnt=1):
"""
Create the RIR with random values and convolves with sources
# Arguments:
src_list: wav signals for different sources
mic_cnt: Number of micrphones
# Returns a list of reverberate sources. Each element in the list \
is of dimension [src_len x mic_cnt]
"""
src_cnt = len(src_list)
rirs = self.create_rir(src_cnt, mic_cnt=mic_cnt)
rev_sig = []
for src_idx, src_rir in enumerate(rirs):
src_ch = [] # multiple channels
for mic_src_rir in src_rir.T:
data_rev = olafilt.olafilt(mic_src_rir, src_list[src_idx])
src_ch.append(data_rev)
src_ch = np.stack(src_ch, 1)
rev_sig.append(src_ch)
return rev_sig
def do_everything(room_dim, mic_positions, source_pos, rt60):
absorption = rt60_to_absorption(room_dim, rt60)
room = Room(room_dim, abs_coeff=absorption)
mics = []
for idx, mic in enumerate(mic_positions):
temp_mic = Microphone(mic, idx, \
orientation=[0.0, 0.0, 0.0], direction='omnidirectional')
mics.append(temp_mic)
sim_rir = RoomSim(16000, room, mics, RT60=rt60)
rir = sim_rir.create_rir(source_pos)
return rir
def get_rt60(F_abs, room_size, A):
'''
Get RT 60 given the room characteristics
'''
m_air = 6.875e-4*(F_abs.T/1000)**(1.7)
# attenuation factors for one metre travelled in air
room_size = np.array(room_size)
atten_air = np.exp(-0.5*m_air).T
Lx = room_size[0]
Ly = room_size[1]
Lz = room_size[2]
#Volume of room m^3
V_room=Lx*Ly*Lz
area_xz=Lx*Lz
area_yz=Ly*Lz
area_xy=Lx*Ly
total_area = 2*(area_xz+area_yz+area_xy)# Total area of shoebox room surfaces
# Effective absorbing area of room surfaces at each frequency
Se=area_yz*(A[0]+A[1])+area_xz*(A[2]+A[3])+area_xy*(A[5]+A[4])
a_bar=Se/total_area # Mean absorption of each room surface
# Norris-Eyring estimate adjusted for air absorption
RT60=0.1611*V_room/(4*m_air.T*V_room-total_area*np.log(1-a_bar))
return RT60
def rt60_to_absorption(room_obj_dim, rt60):
'''
Norris-Eyring formula %%
Converts a given reverberation time into a single absorption coefficient for all surfaces
'''
room_vol = np.prod(room_obj_dim)
area_xz=room_obj_dim[0] * room_obj_dim[2]
area_yz=room_obj_dim[1] * room_obj_dim[2]
area_xy=room_obj_dim[0] * room_obj_dim[1]
total_area =2*(area_xz+area_yz+area_xy); # Total area of shoebox room surfaces
absorption = 1-np.exp(-0.1611*room_vol/(total_area*rt60))
return absorption
class Microphone(object):
'''
Deal with a single microphone
'''
def __init__(self, pos, id_val, \
orientation=[0.0, 0.0, 0.0], direction='omnidirectional'):
self.x_pos = pos[0]
self.y_pos = pos[1]
self.z_pos = pos[2]
self.pos = pos
self._id = str(id_val)
self.orientation = orientation
self.direction = direction
class Room(object):
'''
Room characteristics
'''
def __init__(self, dim, F_abs=None, abs_coeff=None):
self.x_val = dim[0]
self.y_val = dim[1]
self.z_val = dim[2]
self.room_size = np.array(dim)
self.freq_dep_absorption = {}
if F_abs is None:
self.freq_dep_absorption['F_abs'] = np.array([125, 250, 500, 1000, 2000, 4000, 8000])
else:
self.freq_dep_absorption['F_abs'] = np.array(F_abs)
if abs_coeff is None:
self.__set_absorption()
else:
if isinstance(abs_coeff, float) or isinstance(abs_coeff, int):
self.__set_absorption(abs_val=abs_coeff)
else:
self.freq_dep_absorption['Ax1'] = | np.array(abs_coeff[0]) | numpy.array |
#!/usr/bin/python3
import numpy as np
from pathlib import Path
import pdb
import torch
from mseg.utils.names_utils import (
load_class_names,
get_universal_class_names,
get_classname_to_dataloaderid_map
)
from mseg.utils.tsv_utils import read_tsv_column_vals
from mseg.taxonomy.taxonomy_converter import (
parse_entry,
parse_uentry,
parse_test_entry,
TaxonomyConverter,
populate_linear_mapping,
RELABELED_TRAIN_DATASETS,
UNRELABELED_TRAIN_DATASETS
)
_ROOT = Path(__file__).resolve().parent.parent
def entries_equal(dname, tsv_fpath, is_train_dataset):
"""
Compare classnames in *_names.txt file against tsv column entries.
For training datasets, these must be *exactly* the same.
"""
tsv_classnames = read_tsv_column_vals(tsv_fpath, col_name=dname, convert_val_to_int=False)
nonempty_classnames = [name for name in tsv_classnames if name != '']
tsv_classnames = []
for entry in nonempty_classnames:
tsv_classnames.extend(parse_entry(entry))
txt_classnames = load_class_names(dname)
if set(txt_classnames) != set(tsv_classnames):
pdb.set_trace()
if is_train_dataset:
assert len(txt_classnames) == len(tsv_classnames)
# ensure no duplicates among training dataset classnames
assert len(list(tsv_classnames)) == len(set(tsv_classnames))
return set(txt_classnames) == set(tsv_classnames)
def test_names_complete():
"""
Test on dataset_config and on TaxonomyConverter
Make sure tsv entries in a single column match EXACTLY
to _names.txt file.
"""
tsv_fpath = f'{_ROOT}/mseg/class_remapping_files/MSeg_master.tsv'
train_dnames = UNRELABELED_TRAIN_DATASETS + RELABELED_TRAIN_DATASETS
for dname in train_dnames:
print(f'On {dname}...')
assert entries_equal(dname, tsv_fpath, is_train_dataset=True)
print(f'{dname} passed.')
print()
test_dnames = [
'camvid-11',
'kitti-19',
#'pascal-context-60', # {'flower', 'wood'} missing
'scannet-20',
'voc2012',
'wilddash-19'
]
for dname in test_dnames:
print(f'On {dname}')
assert entries_equal(dname, tsv_fpath, is_train_dataset=False)
def test_parse_entry_blank():
""" """
entry = ''
classes = parse_entry(entry)
assert classes == []
def test_parse_entry_brackets1():
"""
"""
entry = '{house,building, skyscraper, booth, hovel, tower, grandstand}'
classes = parse_entry(entry)
gt_classes = [
'house',
'building',
'skyscraper',
'booth',
'hovel',
'tower',
'grandstand'
]
assert classes == gt_classes
def test_parse_entry_space_sep():
"""
Note: ADE20K class "conveyer" is typo of "conveyor"
"""
entry = 'conveyer belt'
classes = parse_entry(entry)
assert classes == ['conveyer belt']
def test_parse_uentry():
""" """
uentry = 'animal_other'
fullname = parse_uentry(uentry)
assert fullname == 'animal_other'
def test_label_transform():
"""
Bring label from training taxonomy (mapillary-public65)
to the universal taxonomy.
21 is the motorcyclist class in mapillary-public65
"""
dname = 'mapillary-public65'
txt_classnames = load_class_names(dname)
train_idx = txt_classnames.index('Motorcyclist')
tc = TaxonomyConverter()
# training dataset label
traind_label = torch.ones(4,4)*train_idx
traind_label = traind_label.type(torch.LongTensor)
# Get back the universal label
u_label = tc.transform_label(traind_label, dname)
u_idx = get_universal_class_names().index('motorcyclist')
gt_u_label = np.ones((4,4)).astype(np.int64) * u_idx
assert np.allclose(u_label.numpy(), gt_u_label)
def test_label_transform_unlabeled():
"""
Make sure 255 stays mapped to 255 at each level (to be ignored in cross-entropy loss).
"""
IGNORE_LABEL = 255
dname = 'mapillary-public65'
txt_classnames = load_class_names(dname)
name2id = get_classname_to_dataloaderid_map(dname, include_ignore_idx_cls = True)
train_idx = name2id['unlabeled']
tc = TaxonomyConverter()
# training dataset label
traind_label = torch.ones(4,4)*train_idx
traind_label = traind_label.type(torch.LongTensor)
# Get back the universal label
u_label = tc.transform_label(traind_label, dname)
u_idx = IGNORE_LABEL
gt_u_label = np.ones((4,4)).astype(np.int64) * u_idx
assert np.allclose(u_label.numpy(), gt_u_label)
def test_transform_predictions_test():
"""
Consider predictions made within the universal taxonomy
over a tiny 2x3 image. We use a linear mapping to bring
these predictions into a test dataset's taxonomy
(summing the probabilities where necessary).
For Camvid, universal probabilities for `person',`bicycle'
should both go into the 'Bicyclist' class.
"""
u_classnames = get_universal_class_names()
person_uidx = u_classnames.index('person')
bicycle_uidx = u_classnames.index('bicycle')
sky_uidx = u_classnames.index('sky')
tc = TaxonomyConverter()
input = | np.zeros((194,2,3)) | numpy.zeros |
# Machine Learning Online Class - Exercise 2: Logistic Regression
#
# Instructions
# ------------
#
# This file contains code that helps you get started on the second part
# of the exercise which covers regularization with logistic regression.
#
# You will need to complete the following functions in this exericse:
#
# sigmoid.m
# costFunction.m
# predict.m
# costFunctionReg.m
#
# For this exercise, you will not need to change any code in this file,
# or any other files other than those mentioned above.
#
import copy
import numpy
import scipy.optimize as optimization
import log_reg_funcs
# Load Data
# The first two columns contains the X values and the third column
# contains the label (y).
data = | numpy.loadtxt("./ex2data2.txt", delimiter=",", dtype="float64") | numpy.loadtxt |
import numpy as np
from copy import deepcopy
from antco import (
updateUndAS,
updateDirAS,
updateUndMMAS,
updateDirMMAS,
updateUndEliteMMAS,
updateDirEliteMMAS,
updateDirEliteAS,
updateUndEliteAS,
updateDirLocalPher,
updateUndLocalPher,
updateUndACS,
updateDirACS)
from antco import Ant
def test_directed_AS_update():
""" antco.pheromone.updateDirAS() unit testing """
np.random.seed(1997)
evaporation = 0.2
P_t0 = np.random.uniform(size=(4, 4)).astype(np.float64)
np.fill_diagonal(P_t0, 0)
P_t0 = (P_t0 + P_t0.T) / 2 # Symmetric matrix
paths = np.array([
# Ant 1
[[0, 1, 0, 1],
[1, 0, 0, 0],
[0, 0, 0, 1],
[1, 0, 1, 0]],
# Ant 2
[[0, 1, 0, 1],
[1, 0, 0, 0],
[0, 0, 0, 1],
[1, 0, 1, 0]],
# Ant 3
[[0, 0, 0, 1],
[0, 0, 0, 1],
[0, 0, 0, 1],
[1, 1, 1, 0]]], dtype=np.int8)
expected = np.array([
[0.0, 0.9267931249792329, 0.4776117072586296, 1.6791352931971335],
[0.9267931249792329, 0.0, 0.5591658434565883, 0.7150135839042728],
[0.4776117072586296, 0.5591658434565883, 0.0, 1.0865920636193305],
[1.6791352931971335, 0.7150135839042728, 1.0865920636193305, 0.0]], dtype=np.float64)
ant_scores = | np.array([0.2, 0.3, 0.4], dtype=np.float64) | numpy.array |
# <NAME>
# python 3.6
""" Input:
------
It reads the individual driver's correlation nc files
Also uses regional masks of SREX regions to find dominant drivers regionally
Output:
-------
* Timeseries of the percent distribution of dominant drivers at different lags
"""
from scipy import stats
from scipy import ndimage
import glob
import sys
import netCDF4 as nc4
import numpy as np
import datetime as dt
from calendar import monthrange
import matplotlib as mpl
#mpl.use('Agg')
import matplotlib.pyplot as plt
#importing my functions
from functions import time_dim_dates, index_and_dates_slicing, geo_idx, mpi_local_and_global_index, create_seq_mat, cumsum_lagged,patch_with_gaps_and_eventsize, norm
from timeit import default_timer as timer
from scipy.stats.stats import pearsonr
import pandas as pd
import argparse
import collections
import os
import xarray as xr
#1- Hack to fix missing PROJ4 env var for Basemaps Error
import os
"""
import conda
conda_file_dir = conda.__file__
conda_dir = conda_file_dir.split('lib')[0]
proj_lib = os.path.join(os.path.join(conda_dir, 'share'), 'proj')
os.environ["PROJ_LIB"] = proj_lib
#-1 Hack end
from mpl_toolkits.basemap import Basemap
from matplotlib import cm
import matplotlib.patches as patches
"""
parser = argparse.ArgumentParser()
#parser.add_argument('--driver_ano' , '-dri_a' , help = "Driver anomalies" , type= str , default= 'pr' ) #pr
parser.add_argument('--variable' , '-var' , help = "Anomalies of carbon cycle variable" , type= str , default= 'gpp' )
parser.add_argument('--source' , '-src' , help = "Model (Source_Run)" , type= str , default= 'CESM2' ) # Model Name
parser.add_argument('--member_idx' , '-m_idx' , help = "Member Index" , type= int , default= 0 ) # Index of the member
#parser.add_argument ('--cum_lag' ,'-lag' , help = 'cum lag months? (multiple lag optional) use , to add multiple' , type = str , default = '01,02,03' )
args = parser.parse_args()
# run plot_dominant_climate_driver_correlation_tce_regional_graphs.py -var gpp -src CESM2
print (args)
variable = args.variable
#drivers_string = args.driver_ano
source_run = args.source
member_idx = args.member_idx
# List of the drivers that will be considered and their names for Plotting
# -----------
if source_run == 'CESM2':
driver_consider = 4
drivers = np.array(['pr','mrso','tas','fFireAll']) [:driver_consider]
drivers_names = np.array(['Prcp','Soil Moisture', 'TAS','Fire']) [:driver_consider]
drivers_code = np.array([ 10, 20, 30, 40]) [:driver_consider]
else:
driver_consider = 3
drivers = np.array(['pr','mrso','tas']) [:driver_consider]
drivers_names = np.array(['Prcp','Soil Moisture', 'TAS']) [:driver_consider]
drivers_code = np.array([ 10, 20, 30]) [:driver_consider]
# Paths for reading the main files
# --------------------------------
cori_scratch = '/global/cscratch1/sd/bharat/'
members_list = os.listdir(cori_scratch+"add_cmip6_data/%s/ssp585/"%source_run)
member_run = members_list[member_idx]
# Storing the file name and abr of the drivers to be considered
# -------------------------------------------------------------
features = {}
features['abr'] = drivers
features['filenames'] = {}
# The name with which the variables are stored in the nc files:
features['Names'] = {}
features['Names']['pr'] = 'pr'
features['Names']['mrso'] = 'mrso'
features['Names']['tas'] = 'tas'
if source_run == 'CESM2':
features['Names']['fFireAll'] = 'Fire'
#features['Names']['tasmax'] = 'tasmax'
features['filenames'][variable] = {} # Creating a empty directory for storing multi members if needed
features['filenames'][variable][member_run] = cori_scratch + "add_cmip6_data/%s/ssp585/%s/%s/CESM2_ssp585_%s_%s_anomalies_gC.nc"%(source_run,member_run, variable,member_run,variable)
# Reading the Correlations Data
# -----------------------------
exp= 'ssp585'
path_corr = cori_scratch + 'add_cmip6_data/%s/%s/%s/%s/Correlations/'%(source_run,exp,member_run,variable)
nc_corr = nc4.Dataset(path_corr + 'dominant_driver_correlation_%s.nc'%(variable))
# Reading the variables from the variable (gpp) anomalies file
# ------------------------------------------------------------
nc_var = nc4.Dataset(features['filenames'][variable][member_run])
time = nc_var .variables['time']
# Reading the variables from the correlation file
# -----------------------------------------------
ranks = nc_corr .variables['rank' ]
wins = nc_corr .variables['win' ]
lags = nc_corr .variables['lag' ]
dom_dri_ids = nc_corr .variables['dri_id' ]
dom_dri_cc = nc_corr .variables['dri_coeff']
# Grids:
# -------
lat = nc_var .variables ['lat']
lon = nc_var .variables ['lon']
lat_bounds = nc_var .variables [nc_var.variables['lat'].bounds ]
lon_bounds = nc_var .variables [nc_var.variables['lon'].bounds ]
lon_edges = np.hstack (( lon_bounds[:,0], lon_bounds[-1,-1]))
lat_edges = np.hstack (( lat_bounds[:,0], lat_bounds[-1,-1]))
# Creating mask of the regions based on the resolution of the model
import regionmask
srex_mask = regionmask.defined_regions.srex.mask(lon[...], lat[...]).values # it has nans
srex_mask_ma= np.ma.masked_invalid(srex_mask) # got rid of nans; values from 1 to 26
# important regional information:
srex_abr = regionmask.defined_regions.srex.abbrevs
srex_names = regionmask.defined_regions.srex.names
srex_nums = regionmask.defined_regions.srex.numbers
srex_centroids = regionmask.defined_regions.srex.centroids
srex_polygons = regionmask.defined_regions.srex.polygons
# Organizing time
# ---------------
window = 25 #years
win_len = 12 * window #number of months in window years
nwin = int(time.size/win_len) #number of windows
#wins = np.array([format(i,'02' ) for i in range(nwin)])
dates_ar = time_dim_dates ( base_date= dt.date(1850,1,1), total_timestamps=time.size)
start_dates = [dates_ar[i*win_len] for i in range(nwin)]#list of start dates of 25 year window
end_dates = [dates_ar[i*win_len+win_len -1] for i in range(nwin)]#list of end dates of the 25 year window
# String
# ------
wins_str = [format(int(i),'02') for i in wins[...]]
lags_str = [format(int(i),'02') for i in lags[...]]
ranks_str = [format(int(i),'02') for i in ranks[...]]
# Regional masks
# --------------
import regionmask
# To store all the DataFrames of counts of dominant climate drivers in a dictionnary for every region
DataFrames_counts = {}
#format>>> DataFrames [regions] [wins] [lags] [ranks]
# range: regions: 26
# wins : 10
# lags : 1
# ranks: 1
for region_abr in srex_abr:
DataFrames_counts[region_abr] = {}
for w in wins_str:
DataFrames_counts[region_abr][w] = {}
for l in lags_str[1:3]:
DataFrames_counts[region_abr][w][l] = {}
save_path = "/global/cscratch1/sd/bharat/add_cmip6_data/%s/ssp585/%s/%s/Correlations/Regional/DataFrames/"%(
source_run, member_run, variable)
if os.path.isdir(save_path) == False:
os.makedirs(save_path)
# Storing the dataframes for regions,win,lag, rk
# ----------------------------------------------
dict_counts = {}
for region_abr in srex_abr:
dict_counts[region_abr] = {}
for win in np.asarray(wins[...], dtype =int):
dict_counts[region_abr][win] = {}
for lg in np.asarray(lags[...] [1:],dtype = int):
dict_counts[region_abr][win][lg] = {}
for rk in np.asarray(ranks[...][0:1], dtype = int):
dict_counts[region_abr][win][lg][rk] = {}
# Computing the DataFrames
# ------------------------
for region_abr in srex_abr: #testing for AMZ only
srex_idxs = np.arange(len(srex_names))
filter_region = np.array(srex_abr) == region_abr
region_idx = srex_idxs[filter_region][0]
region_number = np.array(srex_nums)[filter_region][0]
region_name = np.array(srex_names)[filter_region][0]
region_abr = np.array(srex_abr)[filter_region][0]
region_mask_not = np.ma.masked_not_equal(srex_mask_ma, region_number).mask # Masked everthing but the region
region_mask = ~region_mask_not # Only the regions is masked
for win in np.asarray(wins[...], dtype =int):
for lg in np.asarray(lags[...] [1:3],dtype = int): # interested in lag = 1 month i.e. index = 1
for rk in np.asarray(ranks[...][0:1], dtype = int): # interested in the dominant driver only
counts = np.unique( np.ma.masked_equal( np.ma.masked_invalid(
dom_dri_ids[rk,win,lg,:,:][region_mask]),0),
return_counts=True)
# there are np.nans and 0's in the array that have to be masked
counts_drivers = np.array([counts[1][i] for i in range(counts[1].size)])
#since many drivers were not dominant for most part so only limiting the plot to the relevant ones
print ("counts for dom rank %s and lag %s...:"%(format(rk,'002'), format(lg,'002')))
tmp_drivers_code = np.copy(drivers_code)
for d in counts[0].data:
tmp_drivers_code = np.ma.masked_equal (tmp_drivers_code, d)
df_counts = pd.DataFrame({'Counts':counts_drivers[:-1]}) #the last value corresponds to the masks
df_counts.index = drivers [tmp_drivers_code.mask]
perc = [round(i*100./sum(df_counts['Counts'].values),2) for i in df_counts['Counts'].values]
df_counts['percentage']=perc
#Calculating the mean and std of the climate drivers
mean_cc = []
std_cc = []
for code_id in drivers_code[tmp_drivers_code.mask]:
#print "code_ID...", code_id
mean_cc.append(np.ma.mean(dom_dri_cc[rk,win,lg,:,:][~np.ma.masked_not_equal(dom_dri_ids[rk,win,lg,:,:],code_id).mask]))
std_cc.append(np.ma.std(dom_dri_cc[rk,win,lg,:,:][~np.ma.masked_not_equal(dom_dri_ids[rk,win,lg,:,:],code_id).mask]))
df_counts['mean_coeff'] = mean_cc
df_counts['std_coeff'] = std_cc
# Saving the Data Frame in a dic:
DataFrames_counts[ region_abr] [wins_str[win]] [lags_str[lg]] [ranks_str[rk]] = df_counts #since the numbers are indexs are same
print ('dataframe_win_%s_lag_%s_and_rank_%s.csv'%(format(win,'02'),format(lg,'02'),format(rk,'02')))
df_counts .to_csv(save_path + 'df_reg_%s_win_%s_lag_%s_and_rank_%s.csv'%(region_abr, format(win,'02'),format(lg,'02'),format(rk,'02')),sep=',')
# Regional dominant driver to dictionary
# --------------
df_counts_t = df_counts[df_counts.loc[:,'percentage'] == df_counts.loc[:,'percentage'].max()]
if df_counts_t.size > 0:
dict_counts[region_abr][win][lg][rk] ['Dri_Name'] = df_counts_t.index[0]
dict_counts[region_abr][win][lg][rk] ['Corr_Coeff'] = df_counts_t['mean_coeff'][0]
dict_counts[region_abr][win][lg][rk] ['Dri_Code'] = drivers_code[drivers == df_counts_t.index[0]][0]
elif df_counts_t.size == 0:
dict_counts[region_abr][win][lg][rk] ['Dri_Name'] = np.nan
dict_counts[region_abr][win][lg][rk] ['Corr_Coeff'] = np.nan
dict_counts[region_abr][win][lg][rk] ['Dri_Code'] = np.nan
#df_counts .to_csv(path_corr + 'dataframes/dataframe_win_%s_lag_%s_and_rank_%s_np2.csv'%(format(win,'02'),format(lg,'02'),format(rk,'02')),sep=',') # [Changed] No pvalue filter
#print(breakit)
"""
# =============================================================
# based on " ecp_triggers_percent_distribution_dom_drivers.py "
# =============================================================
# Plotting the timeseries of dominant drivers
# -------------------------------------------
in_yr = 1850
win_yr = [str(in_yr+i*25) + '-'+str(in_yr +(i+1)*25-1)[2:] for i in range(wins.size)]
plot_lags = ['01','02','03']
data_percent = np.zeros((len(win_yr), len(drivers_names)))
print ("data_percent shape: ", data_percent.shape)
data_lag = {}
for LAG in plot_lags :
data_percent = np.zeros((len(win_yr), len(drivers_names)))
print ("data_percent shape: ", data_percent.shape)
print ("data shape", np.transpose(DataFrames_counts[w] [LAG] [ranks_str[rk]]['percentage']).shape)
df = pd.DataFrame( data_percent , index = win_yr, columns = drivers_names) #main dataframe
for w in wins_str:
data = DataFrames_counts [w] [LAG] [ranks_str[rk]]
drivers = data.iloc[:,0]
data_df = pd.DataFrame( DataFrames_counts[w] [LAG] [ranks_str[rk]]['percentage'].values.reshape(1,len(drivers)),index = [win_yr[int(w)]], columns = drivers) # dataframe for a particuar window
for idx,dr in enumerate (drivers):
df.loc[data_df.index,drivers_names[idx]] = data_df[dr]
data_lag [LAG] = df
# Plotting Subplots
# -----------------
if source_run == 'CESM2':
color_list = ['b','b','g','r']
linestyle_list = ['--','-','-','-']
else:
color_list = ['b','g','r']
linestyle_list = ['-','-','-']
fig,ax = plt.subplots(nrows=3,ncols= 1,gridspec_kw = {'wspace':0, 'hspace':0.02},tight_layout = True, figsize = (7,8.5), dpi = 400)
ax = ax.ravel()
for lag_idx,LAG in enumerate(plot_lags):
for dri_idx in range(len(drivers_names)):
ax[lag_idx].plot( range(wins.size), data_lag[LAG].iloc[:,dri_idx], label = drivers_names[dri_idx], color=color_list[dri_idx],linestyle=linestyle_list[dri_idx], linewidth = 1)
ax[lag_idx].set_xticks(range(wins.size))
ax[lag_idx].tick_params(axis="x",direction="in")
ax[lag_idx].set_xticklabels([])
ax[lag_idx].set_ylabel("Lag: %s"%(LAG))
ax[lag_idx].set_ylim([0,50])
ax[lag_idx].grid(which='major', linestyle=':', linewidth='0.3', color='gray')
ax[lag_idx].set_xticklabels(df.index,fontsize =9)
for tick in ax[lag_idx].get_xticklabels():
tick.set_rotation(90)
ax[lag_idx].set_xlabel('Time ($25-yr)$ wins',fontsize =14)
fig.text(0.03, 0.5, 'Percent Distribution of Climate Drivers', va='center', ha='center', rotation='vertical', fontsize=14)
ax[0].legend(loc = 'upper center',ncol=len(drivers_names), bbox_to_anchor=(.44,1.15),frameon =False,fontsize=9,handletextpad=0.1)
plt.gcf().subplots_adjust(bottom=0.1)
fig.savefig(path_corr + 'per_dom/percent_dominance_multilag_123_rank_%s.pdf'%(ranks_str[rk]))
#fig.savefig(path_corr + 'per_dom/percent_dominance_multilag_123_rank_%s_np2.pdf'%(ranks_str[rk])) # [changed] no p-value filter
"""
# Plotting Subplots
# -----------------
if source_run == 'CESM2':
color_list = ['b','b','g','r']
linestyle_list = ['--','-','-','-']
else:
color_list = ['b','g','r']
linestyle_list = ['-','-','-']
web_path = '/global/homes/b/bharat/results/web/Regional/Attribution/'
path_save = cori_scratch + "add_cmip6_data/%s/ssp585/%s/%s/"%(source_run,member_run, variable)
# x - axis
in_yr = 1850
win_yr = [str(in_yr+i*25) + '-'+str(in_yr +(i+1)*25-1)[2:] for i in range(wins.size)]
# Initializing the dataframe
data_percent = np.zeros((len(win_yr), len(drivers_names)))
# Choose the lag
# -------------
lg =1
#Creating an empty dict for storing the dataframes:
# ------------------------------------------------
dict_dataframe = {}
for r_idx, region_abr in enumerate(srex_abr):
df = pd.DataFrame( data_percent , index = win_yr, columns = drivers) #main dataframe
for w_idx, w in enumerate (wins_str):
data = DataFrames_counts[region_abr][w][lags_str[lg]] [ranks_str[rk]]
drivers_tmp = data.iloc[:,0]
for col in df.columns :
try:
df .loc[win_yr[w_idx],col] = data.loc[col,'percentage']
except:
df .loc[win_yr[w_idx],col] = 0
dict_dataframe[region_abr] = df.copy(deep = True)
del df
# Plotting the dominant driver distribution for all the regions:
# --------------------------------------------------------------
import pylab as plot
params = {'legend.fontsize': 6,
'legend.handlelength': 1,
'legend.frameon': 'False',
'axes.labelsize':'small',
'ytick.labelsize': 'small',
'font.size':5 }
plot.rcParams.update(params)
fig, axs = plt.subplots(nrows=9, ncols=3, sharex='col',
gridspec_kw={'hspace': .4, 'wspace': .4}, figsize=(6,9))
plt.suptitle ("TS of dominant drivers during TCEs (lag:%d)"%lg, fontsize = 14)
txt ="The left y-axis represents the percent count of drivers in that region"
axs = axs.ravel()
for k_idx, key in enumerate(dict_dataframe.keys()):
df = dict_dataframe[key]
for dri_idx in range(len(drivers)):
axs[k_idx].plot( range(wins.size), df.iloc[:,dri_idx], label = drivers_names[dri_idx], color=color_list[dri_idx],linestyle=linestyle_list[dri_idx], linewidth = 0.8)
#axs[k_idx].set_xticks(range(wins.size))
axs[k_idx].set_ylabel("%s"%key)
axs[k_idx].grid(which='major', linestyle=':', linewidth='0.3', color='gray')
for tick in axs[-3].get_xticklabels():
tick.set_rotation(45)
for tick in axs[-2].get_xticklabels():
tick.set_rotation(45)
for tick in axs[-1].get_xticklabels():
tick.set_rotation(45)
#axs[1].legend(loc = 'upper center',ncol=len(drivers_names), bbox_to_anchor=(.5,1.15),frameon =False,fontsize=9,handletextpad=0.1)
plt.figtext(0.5, 0.01, txt, wrap=True, horizontalalignment='center', fontsize=12) #Caption
fig.savefig(web_path + 'percent_dom_%s_%s_lag_%s_regions_%s.pdf'%(source_run, member_run, format(lg,'02'),variable.upper()) )
# Common Information for spatial plots
# ====================================
sub_fig_text = ['(a)', '(b)', '(c)',
'(d)', '(e)', '(f)']
Wins_to_Plot = ['1850-74', '1900-24', '1950-74', '2000-24', '2050-74', '2075-99']
Wins_to_Plot_idxs = [0,2,4,6,8,9]
import cartopy.crs as ccrs
from matplotlib.axes import Axes
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
from mpl_toolkits.axes_grid1 import AxesGrid
proj_trans = ccrs.PlateCarree()
proj_output = ccrs.PlateCarree()
# Plotting individual drivers
# ===========================
# Spatial plot of individual driver correlatons
# for idx, dri in enumerate (drivers_names):
sub_fig_text = ['(a)', '(b)', '(c)',
'(d)', '(e)', '(f)']
Wins_to_Plot = ['1850-74', '1900-24', '1950-74', '2000-24', '2050-74', '2075-99']
Wins_to_Plot_idxs = [0,2,4,6,8,9]
ymax = 1
ymin = -1
import cartopy.crs as ccrs
from matplotlib.axes import Axes
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
from mpl_toolkits.axes_grid1 import AxesGrid
proj_trans = ccrs.PlateCarree()
proj_output = ccrs.PlateCarree()
for dri_idx, dri in enumerate (drivers_names):
fig = plt.figure(figsize = (12,9), dpi = 200)
#pwin = Wins_to_Plot_idxs[0]
plag = 1
ax = {}
gl = {}
for plot_idx, win_idx in enumerate(Wins_to_Plot_idxs):
#plot_idx = 0 #
gl[plot_idx] = 0
if plot_idx == 0:
ax[plot_idx] = fig.add_subplot(
2, 3, plot_idx+1, projection= proj_output
)
# Mean Correlation Coefficient of the Selected climate Drivers at any rank
plot_data = np.ma.mean(np.ma.masked_array(data=dom_dri_cc[:,win_idx,plag,:,:],
mask = np.ma.masked_not_equal(dom_dri_ids[:,win_idx,plag,:,:],
drivers_code[dri_idx]) .mask),axis = 0)
h = ax[plot_idx].pcolormesh(lon_edges[...],lat_edges[...], plot_data,
transform=ccrs.PlateCarree(), vmax=ymax, vmin=ymin, cmap='PuOr')
for srex_idx,abr in enumerate (srex_abr):
ax[plot_idx].add_geometries([srex_polygons[srex_idx]], crs = proj_trans, facecolor='none', edgecolor='black', alpha=0.4)
elif plot_idx>0:
ax[plot_idx] = fig.add_subplot(
2, 3, plot_idx+1, projection= proj_output,
sharex=ax[0], sharey=ax[0]
)
# Mean Correlation Coefficient of the Selected climate Drivers at any rank
plot_data = np.ma.mean(np.ma.masked_array(data=dom_dri_cc[:,win_idx,plag,:,:],
mask = np.ma.masked_not_equal(dom_dri_ids[:,win_idx,plag,:,:],
drivers_code[dri_idx]) .mask),axis = 0)
h = ax[plot_idx].pcolormesh(lon_edges[...],lat_edges[...], plot_data,
transform=ccrs.PlateCarree(), vmax=ymax, vmin=ymin, cmap='PuOr')
for srex_idx,abr in enumerate (srex_abr):
ax[plot_idx].add_geometries([srex_polygons[srex_idx]], crs = proj_trans, facecolor='none', edgecolor='black', alpha=0.4)
for plot_idx in range(len(Wins_to_Plot)):
ax[plot_idx].coastlines(alpha=0.75)
ax[plot_idx].text(-85, -10, sub_fig_text[plot_idx] + ' '+ Wins_to_Plot[plot_idx],
horizontalalignment="right",
verticalalignment='center',
fontsize = 9)
gl[plot_idx] = ax[plot_idx].gridlines(crs=ccrs.PlateCarree(), draw_labels=False,
linewidth=.5, color='gray', alpha=0.5, linestyle='--')
gl[3].xlabels_bottom = True
gl[4].xlabels_bottom = True
gl[5].xlabels_bottom = True
gl[3].xformatter = LONGITUDE_FORMATTER
gl[4].xformatter = LONGITUDE_FORMATTER
gl[5].xformatter = LONGITUDE_FORMATTER
gl[0].ylabels_left = True
gl[3].ylabels_left = True
gl[0].yformatter = LATITUDE_FORMATTER
gl[3].yformatter = LATITUDE_FORMATTER
plt.subplots_adjust(wspace=0.02,hspace=-.695)
cax = plt.axes([0.92, 0.335, 0.015, 0.34])
plt.colorbar( h, cax=cax, orientation='vertical', pad=0.04, shrink=0.95);
ax[1].set_title("Correlation Coefficient of %s with %s extremes"%(dri,variable.upper()), fontsize=14)
fig.savefig(web_path + "Spatial_Corr_%s_%s_lag_%d.pdf"%(variable,dri,plag),
bbox_inches = "tight", edgecolor="w")
fig.savefig(web_path + "Spatial_Corr_%s_%s_lag_%d.png"%(variable,dri,plag),
bbox_inches = "tight", edgecolor="w")
fig.savefig(path_save + "Correlations/Spatial_Maps/Spatial_Corr_%s_%s_lag_%d.pdf"%(variable,dri,plag),
bbox_inches = "tight", edgecolor="w")
del fig
# Dominant Driver spatial plot at lag =1 month
# ===========================================
# Spatial plot of Dominant driver correlatons
# for idx, dri in enumerate (drivers_names):
ymax = 45
ymin = 5
rk = 0 #Dominant driver
plag = 1 # lag =1 month
fig = plt.figure(figsize = (12,9), dpi = 200)
ax = {}
gl = {}
for plot_idx, win_idx in enumerate(Wins_to_Plot_idxs):
#plot_idx = 0 #
gl[plot_idx] = 0
if plot_idx == 0:
ax[plot_idx] = fig.add_subplot(
2, 3, plot_idx+1, projection= proj_output
)
plot_data = np.ma.masked_equal(np.ma.masked_invalid(dom_dri_ids[rk,win_idx,plag,:,:]),0)
cmap = plt.get_cmap('rainbow', drivers_code.size)
h = ax[plot_idx].pcolormesh(lon_edges[...],lat_edges[...], plot_data,
transform=ccrs.PlateCarree(), vmax=ymax, vmin=ymin, cmap=cmap)
for srex_idx,abr in enumerate (srex_abr):
ax[plot_idx].add_geometries([srex_polygons[srex_idx]], crs = proj_trans, facecolor='none', edgecolor='black', alpha=0.4)
elif plot_idx>0:
ax[plot_idx] = fig.add_subplot(
2, 3, plot_idx+1, projection= proj_output,
sharex=ax[0], sharey=ax[0]
)
plot_data = np.ma.masked_equal(np.ma.masked_invalid(dom_dri_ids[rk,win_idx,plag,:,:]),0)
h = ax[plot_idx].pcolormesh(lon_edges[...],lat_edges[...], plot_data,
transform=ccrs.PlateCarree(), vmax=ymax, vmin=ymin, cmap= cmap)
for srex_idx,abr in enumerate (srex_abr):
ax[plot_idx].add_geometries([srex_polygons[srex_idx]], crs = proj_trans, facecolor='none', edgecolor='black', alpha=0.4)
for plot_idx in range(len(Wins_to_Plot)):
ax[plot_idx].coastlines(alpha=0.75)
ax[plot_idx].text(-85, -10, sub_fig_text[plot_idx] + ' '+ Wins_to_Plot[plot_idx],
horizontalalignment="right",
verticalalignment='center',
fontsize = 9)
gl[plot_idx] = ax[plot_idx].gridlines(crs=ccrs.PlateCarree(), draw_labels=False,
linewidth=.5, color='gray', alpha=0.5, linestyle='--')
gl[3].xlabels_bottom = True
gl[4].xlabels_bottom = True
gl[5].xlabels_bottom = True
gl[3].xformatter = LONGITUDE_FORMATTER
gl[4].xformatter = LONGITUDE_FORMATTER
gl[5].xformatter = LONGITUDE_FORMATTER
gl[0].ylabels_left = True
gl[3].ylabels_left = True
gl[0].yformatter = LATITUDE_FORMATTER
gl[3].yformatter = LATITUDE_FORMATTER
plt.subplots_adjust(wspace=0.02,hspace=-.695)
cax = plt.axes([0.92, 0.335, 0.015, 0.34])
cbar = plt.colorbar(h, cax=cax, ticks = range(drivers_code[0],drivers_code[-1]+1,10))
cbar .ax.set_yticklabels(drivers_names)
#plt.colorbar( h, cax=cax, orientation='vertical', pad=0.04, shrink=0.95);
ax[1].set_title("Dominant Drivers of %s extremes"%(variable.upper()), fontsize=14)
fig.savefig(web_path + "Spatial_Dominant_Driver_%s_lag_%d.pdf"%(variable,plag),
bbox_inches = "tight", edgecolor="w")
fig.savefig(web_path + "Spatial_Dominant_Driver_%s_lag_%d.png"%(variable,plag),
bbox_inches = "tight", edgecolor="w")
fig.savefig(path_save + "Correlations/Spatial_Maps/Dominant_Driver_%s_lag_%d.pdf"%(variable,plag),
bbox_inches = "tight", edgecolor="w")
del fig
# Plotting of "Regional Dominance"
# =====================================
#dict_counts[region_abr][win][lg][rk] ['Dri_Name']
#dict_counts[region_abr][win][lg][rk] ['Corr_Coeff']
rk=0
lg=1
plag=1
values_range = []
sign = {}
for r in srex_abr:
sign[r] = {}
for win_idx, wi in enumerate(Wins_to_Plot):
values_range.append(dict_counts[r][Wins_to_Plot_idxs[win_idx]][lg][rk] ['Corr_Coeff'])
#print(win_idx,dict_counts[r][Wins_to_Plot_idxs[win_idx]][lg][rk] ['Corr_Coeff'] )
if dict_counts[r][Wins_to_Plot_idxs[win_idx]][lg][rk] ['Corr_Coeff'] > 0:
sign[r][wi] = '+'
elif dict_counts[r][Wins_to_Plot_idxs[win_idx]][lg][rk] ['Corr_Coeff'] < 0:
sign[r][wi] = u"\u2212"
else:
sign[r][wi] = ' '
print ("To check for the range of values")
print (np.array(values_range).min())
print (np.array(values_range).max())
ymax = 45
ymin = 5
# Creating the NBP Values for 1850-74 for all regions for NBP du Ext
ploting_stats = {}
for win_idx, wi in enumerate(Wins_to_Plot):
ploting_stats[wi] = {}
all_masked = np.ma.masked_equal(np.ma.zeros(srex_mask_ma.shape),0)
for s_idx in srex_idxs:
tmp = np.ma.masked_equal(srex_mask_ma,s_idx+ 1).mask # +1 because srex_idxs start from 1
all_masked[tmp] = dict_counts[srex_abr[s_idx]][Wins_to_Plot_idxs[win_idx]][lg][rk] ['Dri_Code']
del tmp
all_masked = np.ma.masked_array(all_masked, mask = srex_mask_ma.mask)
ploting_stats[wi] ['Dri_Codes'] = np.ma.masked_equal(np.ma.masked_invalid(all_masked),0)
# test plot
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
from mpl_toolkits.axes_grid1 import AxesGrid
proj_trans = ccrs.PlateCarree()
#proj_output = ccrs.Robinson(central_longitude=0)
proj_output = ccrs.PlateCarree()
fig = plt.figure(figsize = (12,9), dpi = 400)
plt.style.use("classic")
ax = {}
gl = {}
for plot_idx in range(len(Wins_to_Plot)):
gl[plot_idx] = 0
if plot_idx == 0 :
ax[plot_idx] = fig.add_subplot(
2, 3, plot_idx+1, projection= proj_output
)
plot_data = np.ma.masked_equal(np.ma.masked_invalid(ploting_stats[Wins_to_Plot[plot_idx]]['Dri_Codes']),0)
cmap = plt.get_cmap('rainbow', drivers_code.size)
h = ax[plot_idx].pcolormesh(lon_edges[...],lat_edges[...], plot_data,
transform=ccrs.PlateCarree(),vmax=ymax, vmin=ymin,cmap= cmap)
for srex_idx,abr in enumerate (srex_abr):
ax[plot_idx].text ( srex_centroids[srex_idx][0], srex_centroids[srex_idx][-1], sign[abr][Wins_to_Plot[plot_idx]],
horizontalalignment='center',
color = 'white', fontweight = 'bold',fontsize=10,
transform = proj_trans)
ax[plot_idx].add_geometries([srex_polygons[srex_idx]], crs = proj_trans, facecolor='none', edgecolor='black', alpha=0.4)
elif plot_idx>0:
ax[plot_idx] = fig.add_subplot(
2, 3, plot_idx+1, projection= proj_output,
sharex=ax[0], sharey=ax[0]
)
plot_data = np.ma.masked_equal(np.ma.masked_invalid(ploting_stats[Wins_to_Plot[plot_idx]]['Dri_Codes']),0)
h = ax[plot_idx].pcolormesh(lon_edges[...],lat_edges[...], plot_data,
transform=ccrs.PlateCarree(),vmax=ymax,vmin=ymin,cmap= cmap)
for srex_idx,abr in enumerate (srex_abr):
ax[plot_idx].text ( srex_centroids[srex_idx][0], srex_centroids[srex_idx][-1],
sign[abr][Wins_to_Plot[plot_idx]],
horizontalalignment='center',
color = 'white', fontweight = 'bold',fontsize=10,
transform = proj_trans)
ax[plot_idx].add_geometries([srex_polygons[srex_idx]], crs = proj_trans, facecolor='none', edgecolor='black', alpha=0.4)
for plot_idx in range(len(Wins_to_Plot)):
ax[plot_idx].coastlines(alpha=0.75)
ax[plot_idx].text(80, -60, sub_fig_text[plot_idx] + ' '+ Wins_to_Plot[plot_idx],
horizontalalignment="right",
verticalalignment='center',
fontsize = 12)
gl[plot_idx] = ax[plot_idx].gridlines(crs=ccrs.PlateCarree(), draw_labels=False,
linewidth=.5, color='gray', alpha=0.5, linestyle='--')
gl[3].xlabels_bottom = True
gl[4].xlabels_bottom = True
gl[5].xlabels_bottom = True
gl[3].xformatter = LONGITUDE_FORMATTER
gl[4].xformatter = LONGITUDE_FORMATTER
gl[5].xformatter = LONGITUDE_FORMATTER
gl[0].ylabels_left = True
gl[3].ylabels_left = True
gl[0].yformatter = LATITUDE_FORMATTER
gl[3].yformatter = LATITUDE_FORMATTER
plt.subplots_adjust(wspace=0.02,hspace=-.695)
cax = plt.axes([0.92, 0.335, 0.015, 0.34])
cbar = plt.colorbar(h, cax=cax, ticks = range(drivers_code[0],drivers_code[-1]+1,10))
drivers_names_plotting = np.array(['Prcp', 'SM','TAS','Fire'])
cbar .ax.set_yticklabels(drivers_names_plotting)
# cbar .ax.set_yticklabels(drivers_names)
#plt.colorbar(h, orientation='horizontal', pad=0.04);
ax[1].set_title("Regional Distribution of Dominant Drivers of %s extremes \n"%(variable.upper()), fontsize=14)
fig.savefig(web_path + "Spatial_Regional_Dominant_Driver_%s_lag_%d.pdf"%(variable,plag),
edgecolor = "w", bbox_inches = "tight")
fig.savefig(web_path + "Spatial_Regional_Dominant_Driver_%s_lag_%d.png"%(variable,plag),
bbox_inches = "tight")
fig.savefig(path_save + "Correlations/Spatial_Maps/Dominant_Regional_Driver_%s_lag_%d.pdf"%(variable,plag),
edgecolor = "w", bbox_inches = "tight")
# Calculation of the count of pixels of different regions...
# ...with positive and negative correlation coefficients!
# ========================================================
# For MRSO
# --------
dri_idx = 1 #for MRSO
plag = 1
# Dict to store the counts of pos/neg extremes
# --------------------------------------------
dict_mrso_cc_count = {}
for region_abr in srex_abr:
dict_mrso_cc_count[region_abr] = {}
for win_idx, win_str in enumerate(win_yr):
dict_mrso_cc_count[region_abr][win_str] = {}
del region_abr,win_idx, win_str
# Calculation of counts:
for region_abr in srex_abr:
for win_idx, win_str in enumerate(win_yr):
driver_cc_win_tmp = np.ma.masked_array(data=dom_dri_cc[:,win_idx,plag,:,:],
mask = np.ma.masked_not_equal(dom_dri_ids[:,win_idx,plag,:,:],
drivers_code[dri_idx]) .mask)
filter_region = np.array(srex_abr) == region_abr
region_idx = srex_idxs[filter_region][0]
region_number = np.array(srex_nums)[filter_region][0]
region_name = np.array(srex_names)[filter_region][0]
region_abr = np.array(srex_abr)[filter_region][0]
region_mask_not = np.ma.masked_not_equal(srex_mask_ma, region_number).mask # Masked everthing but the region
region_mask = ~region_mask_not # Only the regions is masked
cc_values_tmp = driver_cc_win_tmp[np.array([region_mask]*4)][driver_cc_win_tmp[np.array([region_mask]*4)].mask ==False]
dict_mrso_cc_count[region_abr][win_str]['pos'] = (cc_values_tmp > 0).sum()
dict_mrso_cc_count[region_abr][win_str]['neg'] = (cc_values_tmp < 0).sum()
del region_abr,win_idx, win_str,cc_values_tmp,region_mask
# For TAS
# --------
dri_idx = 2 #for TAS
plag = 1
# Dict to store the counts of pos/neg extremes
# --------------------------------------------
dict_tas_cc_count = {}
for region_abr in srex_abr:
dict_tas_cc_count[region_abr] = {}
for win_idx, win_str in enumerate(win_yr):
dict_tas_cc_count[region_abr][win_str] = {}
del region_abr,win_idx, win_str
# Calculation of counts:
for region_abr in srex_abr:
for win_idx, win_str in enumerate(win_yr):
driver_cc_win_tmp = np.ma.masked_array(data=dom_dri_cc[:,win_idx,plag,:,:],
mask = np.ma.masked_not_equal(dom_dri_ids[:,win_idx,plag,:,:],
drivers_code[dri_idx]) .mask)
filter_region = np.array(srex_abr) == region_abr
region_idx = srex_idxs[filter_region][0]
region_number = np.array(srex_nums)[filter_region][0]
region_name = np.array(srex_names)[filter_region][0]
region_abr = np.array(srex_abr)[filter_region][0]
region_mask_not = np.ma.masked_not_equal(srex_mask_ma, region_number).mask # Masked everthing but the region
region_mask = ~region_mask_not # Only the regions is masked
cc_values_tmp = driver_cc_win_tmp[np.array([region_mask]*4)][driver_cc_win_tmp[np.array([region_mask]*4)].mask ==False]
dict_tas_cc_count[region_abr][win_str]['pos'] = (cc_values_tmp > 0).sum()
dict_tas_cc_count[region_abr][win_str]['neg'] = (cc_values_tmp < 0).sum()
del region_abr,win_idx, win_str,cc_values_tmp,region_mask
# Analysis and presentation of data on correlation coefficient:
# -------------------------------------------------------------
# MRSO
df_mrso_cc = {}
for region_abr in srex_abr:
df_mrso_cc[region_abr] = pd.DataFrame.from_dict(dict_mrso_cc_count[region_abr], orient='index')
df_mrso_cc[region_abr].loc[:,"%pos"] = (df_mrso_cc[region_abr].loc[:,"pos"]*100/(
df_mrso_cc[region_abr].loc[:,"pos"] +
df_mrso_cc[region_abr].loc[:,"neg"])
).round(decimals=1)
df_mrso_cc[region_abr].loc[:,"%neg"] = (df_mrso_cc[region_abr].loc[:,"neg"]*100/(
df_mrso_cc[region_abr].loc[:,"pos"] +
df_mrso_cc[region_abr].loc[:,"neg"])
).round(decimals=1)
del region_abr
#TAS
df_tas_cc = {}
for region_abr in srex_abr:
df_tas_cc[region_abr] = pd.DataFrame.from_dict(dict_tas_cc_count[region_abr], orient='index')
df_tas_cc[region_abr].loc[:,"%pos"] = (df_tas_cc[region_abr].loc[:,"pos"]*100/(
df_tas_cc[region_abr].loc[:,"pos"] +
df_tas_cc[region_abr].loc[:,"neg"])
).round(decimals=1)
df_tas_cc[region_abr].loc[:,"%neg"] = (df_tas_cc[region_abr].loc[:,"neg"]*100/(
df_tas_cc[region_abr].loc[:,"pos"] +
df_tas_cc[region_abr].loc[:,"neg"])
).round(decimals=1)
del region_abr
# Ploting in Jupyter Notebook
# ---------------------------
# Percent count of pixels that are positively...
# ...or negatively correlated with MRSO
region_abr = srex_abr[2]
import pylab as plot
params = {'legend.fontsize': 20,
'legend.handlelength': 2}
plot.rcParams.update(params)
df_mrso_cc[region_abr].iloc[2:,2:].plot.bar(stacked =False,
figsize=(9,4),
fontsize = 14,
grid='--')
plt.legend(loc='upper right', bbox_to_anchor=(1.25,.6), fontsize=14, ncol=1)
plt.ylim([0,100])
plt.title(f"Percent count of the pixel with pos/neg correlation with TAS for {region_abr}",
loc='left',fontsize =15)
#plt.text(0,18,"Total Regions: 26", fontsize=14, fontweight='bold', color='brown')
# The number 10 or y axis represents the number of pixels
for w_idx,w_str in enumerate(win_yr[2:]):
plt.text(w_idx,10,f"{int(np.round(df_mrso_cc[region_abr].loc[w_str,'pos']))}",
ha='right', va='top',color='white',rotation=90,fontsize=10,weight='bold')
plt.text(w_idx,10,f"{int(np.round(df_mrso_cc[region_abr].loc[w_str,'neg']))}",
ha='left', va='top',color='white',rotation=90,fontsize=10,weight='bold')
# Percent count of pixels that are positively...
# ...or negatively correlated with TAS
# The Srex_index for NAS is 17
region_abr = srex_abr[17]
#fig1 = plt.figure(figsize = (9,5), dpi = 400)
import pylab as plot
params = {'legend.fontsize': 20,
'legend.handlelength': 2}
plot.rcParams.update(params)
plt.style.use("classic")
df_tas_cc[region_abr].iloc[2:,2:].plot.bar(stacked =False,
figsize=(9,4),
fontsize = 14,
color = ['royalblue','darkorange'])
plt.legend(['Positive Correlation', 'Negative Correlation'],
loc='upper right', fontsize=12, ncol=1)
plt.ylim([0,100])
plt.title(f"Correlation of {variable.upper()} Extremes with TAS for {region_abr}",
fontsize =16)
#plt.text(0,18,"Total Regions: 26", fontsize=14, fontweight='bold', color='brown')
plt.ylabel ("Percent Count of Grid-cells", fontsize=14)
plt.xlabel ("Time", fontsize=14)
plt.yticks (fontsize=12)
plt.xticks (fontsize=12, rotation=60)
plt.grid (which='both', ls='--', lw='.5', alpha=.4 )
# The number 10 or y axis represents the number of pixels
for w_idx,w_str in enumerate(win_yr[2:]):
plt.text(w_idx+.04,10,f"{int(np.round(df_tas_cc[region_abr].loc[w_str,'pos']))}",
ha='right', va='top',color='white',rotation=90,fontsize=12)
plt.text(w_idx+.04,10,f"{int(np.round(df_tas_cc[region_abr].loc[w_str,'neg']))}",
ha='left', va='top',color='white',rotation=90,fontsize=12)
plt.savefig(web_path + f"Change_in_Corr_of_{variable}_for_{region_abr}_lag_{plag}.pdf",
edgecolor = "w", bbox_inches = "tight")
plt.savefig(web_path + f"Change_in_Corr_of_{variable}_for_{region_abr}_lag_{plag}.png",
edgecolor = "w", bbox_inches = "tight")
plt.savefig(path_save + f"Change_in_Corr_of_{variable}_for_{region_abr}_lag_{plag}.pdf",
edgecolor = "w", bbox_inches = "tight")
# Finding the locations of the extremes TCEs and correlations with TAS in NAS
save_txt_tas_nas = 'n'
if save_txt_tas_nas in ['y','Y','yes']:
import sys
stdout_fileno = sys.stdout
# Redirect sys.stdout to the file
cli_var = 'tas'
path_tas_nas = f"{cori_scratch}add_cmip6_data/{source_run}/ssp585/{member_run}/{cli_var}/Correlations/Region_NAS/"
if os.path.isdir(path_tas_nas) == False:
os.makedirs(path_tas_nas)
sys.stdout = open(path_tas_nas+'loc_nbp_tas_nas.txt', 'w')
sys.stdout.write (f"win_idx,lt_idx,ln_idx,lag1,lag2,lag3,lag4],Dom-T/F,Dom-T/F,Dom-T/F,Dom-T/F" + '\n')
# Dom-T/F: if True indicates the Dominant Drivers
# TAS is often dominant at lag 2
locs_tas_nas = {}
list_all_wins_tas_nas = []
for win_idx in range(len(win_yr)):
# Correlation coefficients for lag = 1 for 'NAS'
CC_TAS_all_ranks = np.ma.masked_array(data=dom_dri_cc[:,win_idx,plag,:,:],
mask = np.ma.masked_not_equal(dom_dri_ids[:,win_idx,plag,:,:],
drivers_code[dri_idx]) .mask)
# figure out how to read only the non masked lat_lon for NAS
tas_mask_true = (np.max(abs(CC_TAS_all_ranks),0).mask )
lt_ln_mat = create_seq_mat(nlat=lat.size, nlon=lon.size)
# list of all location_ids in the global with a valid cc of TAS :
tas_global_1d_locs = lt_ln_mat[~tas_mask_true]
# list of all location_ids in the SREX region:
region_1d_locs = lt_ln_mat[region_mask]
# list of location_ids in a region with a valid tas cc
tas_region_1d_locs = np.intersect1d ( tas_global_1d_locs,region_1d_locs )
list_locs_tmp = []
for pixel in tas_region_1d_locs:
lt,ln = np.argwhere(lt_ln_mat == pixel)[0]
#print (win_idx, lt,ln, CC_TAS_all_ranks[:,lt,ln].data,CC_TAS_all_ranks[:,lt,ln].mask)
tmp_text= (f"{win_idx},{lt},{ln},{CC_TAS_all_ranks[:,lt,ln].data[0]},"
f"{CC_TAS_all_ranks[:,lt,ln].data[1]},{CC_TAS_all_ranks[:,lt,ln].data[2]},"
+ f"{CC_TAS_all_ranks[:,lt,ln].data[3]},{CC_TAS_all_ranks[:,lt,ln].mask[0]},"
+ f"{CC_TAS_all_ranks[:,lt,ln].mask[1]},{CC_TAS_all_ranks[:,lt,ln].mask[2]},"
+ f"{CC_TAS_all_ranks[:,lt,ln].mask[3]}")
list_locs_tmp.append(f"{lt}_{ln}")
list_all_wins_tas_nas.append(f"{lt}_{ln}")
# Prints to the redirected stdout (Output.txt)
sys.stdout.write(tmp_text + '\n')
# Prints to the actual saved stdout handler
stdout_fileno.write(tmp_text + '\n')
locs_tas_nas[win_idx] = np.array(list_locs_tmp)
# List and count of the locations with correlation coefficients
tas_nas_unique_locs,tas_nas_counts= np.unique(np.array(list_all_wins_tas_nas), return_counts=1)
# Saving the Common locationa and count of the occurance for all wins
tas_nas_unique_locs,tas_nas_counts= np.unique(np.array(list_all_wins_tas_nas), return_counts=1)
stdout_fileno = sys.stdout
sys.stdout = open(path_tas_nas+'locs_count_nbp_tas_nas.txt', 'w')
sys.stdout.write (f"locs, count" + '\n')
for idx in range(len(tas_nas_unique_locs)):
tmp_text = f"{tas_nas_unique_locs[idx]},{tas_nas_counts[idx]}"
sys.stdout.write(tmp_text + '\n')
stdout_fileno.write(tmp_text + '\n')
# Analysis of the locations are done in an Excel sheet in the Document/cmip6/Region_NAS
# Calculating the change in TAS at different quantiles
# ====================================================
Calculate_quantile = 'n'
if Calculate_quantile in ['y','Y','yes']:
# Calculating the quantiles of climate variable at pixel levels
import xarray as xr
cli_var = 'tas'
path_cli_var = f"{cori_scratch}add_cmip6_data/{source_run}/ssp585/{member_run}/{cli_var}"
file_cli_var = f"{path_cli_var}/{source_run}_ssp585_{member_run}_{cli_var}.nc"
# Reading the tas
nc_cli = xr.open_dataset(file_cli_var) # reading nc file
tas = nc_cli.tas # tas as object
# extracting data for a location
# -------------------------------
lat_idx = 167
lon_idx = 90
# cli variable for a pixel
tas_px = tas.isel(lat=lat_idx,lon=lon_idx).data
# extracting data for a location
# -------------------------------
lat_idx = 157
lon_idx = 52
# cli variable for a pixel
tas_px = tas.isel(lat=lat_idx,lon=lon_idx).data
Quantiles = np.arange(0.1,1,.1)
# Saving the quantiles and tas in Celsius
tas_quant_px = {}
for Quant in Quantiles:
tas_quant_px[Quant] = {}
# Finding the lowest and highest temperatures of tas:
tas_low = 0
tas_high = 0
for Quant in Quantiles:
for w_idx in range(10):
tas_px_win = tas_px[w_idx*300:(w_idx+1)*300] - 273.15
tas_q_px = np.quantile(tas_px_win,Quant)
tas_quant_px[Quant][w_idx] = tas_q_px
if tas_q_px < tas_low:
tas_low = tas_q_px
if tas_q_px > tas_high:
tas_high = tas_q_px
# Dataframe from dict of quantiles of a pixel
df_quant_px = pd.DataFrame.from_dict(tas_quant_px)
# the columns are the quantiles and the rows are the window index
# rate of increase of Tas per window
slope_px = []
for Quant in Quantiles:
slope_px.append(stats.linregress(range(10),list(tas_quant_px[Quant].values())))
df_quant_px = pd.DataFrame.from_dict(tas_quant_px)
#q = .1
fig = plt.figure()
ax = plt.subplot(111)
for q in Quantiles:
ax.plot(range(10),df_quant_px.loc[:,q], label= f"{q:.1f}")
# text of slope of rise in temperature per window
ax.text(8,df_quant_px.loc[7,.1], f"{slope_px[0][0]:.2f}" ) # Quant = .1
ax.text(8,df_quant_px.loc[7,.5], f"{slope_px[4][0]:.2f}" ) # Quant = .5
ax.text(8,df_quant_px.loc[7,.9], f"{slope_px[-1][0]:.2f}" ) # Quant = .9
# Shrink current axis's height by 10% on the bottom
box = ax.get_position()
ax.set_position([box.x0, box.y0 + box.height * 0.1,
box.width, box.height * .9])
# Put a legend below current axis
ax.legend(loc='center left', bbox_to_anchor=(1.1, 0.5),
fancybox=True, shadow=True, ncol=1,
title='Quantiles')
#ax.set_ylim(np.floor(tas_low)-1,np.ceil(tas_high)+1)
# Show duplicate y-axis:
plt.tick_params(labeltop=False, labelright=True)
# Show grid
ax.grid (which='both', ls='--', lw='.5', alpha=.4 )
ax.set_ylabel ("Temperature (Celsius)", fontsize=14)
#ax.set_yticklabels(fontsize= 10)
ax.set_xticklabels(win_yr)
for tick in ax.get_xticklabels():
tick.set_rotation(60)
ax.set_xlabel ("Time", fontsize=14)
ax = plt.gca()
ax.tick_params(axis = 'both', which = 'major', labelsize = 12)
ax.tick_params(axis = 'both', which = 'minor', labelsize = 12)
plt.title (f"TAS at lat={lat_idx},lon={lon_idx}", fontsize = 14)
# Area weighted mean and quantile tas distribution of the region of TAS
# =====================================================================
# To calculate the area-weighted average of temperature:
# Reading files/data
tas = nc_cli.tas # tas as object
area = nc_cli.areacella # area as object
#mask of the region
region_abr = srex_abr[5] # for NAS
filter_region = np.array(srex_abr) == region_abr # for NAS
region_number = np.array(srex_nums)[filter_region][0]
region_name = np.array(srex_names)[filter_region][0]
region_mask_not = np.ma.masked_not_equal(srex_mask_ma, region_number).mask # Masked everthing but the region
region_mask = ~region_mask_not # Only the regions is True or 1s
# Masking the area to only the region of interest
area_region= np.ma.masked_array(area,mask=region_mask_not)
# mean area weighted average for every window
print (f"Region: {srex_abr[5]}")
print ("Area Weighted Mean Temperature")
print ("---------")
for w_idx in range(10):
tas_awm_region = np.ma.average(np.mean(tas[w_idx*300:(w_idx+1)*300].data,axis=0), weights=area_region) - 273.15
print (f"for window {win_yr[w_idx]}, AWM: {tas_awm_region:.2f} Celsius")
# Quantiles for the region of NAS
# Saving the quantiles and tas in Celsius
tas_quant_reg = {}
for Quant in Quantiles:
tas_quant_reg[Quant] = {}
tas_reg = tas.data * | np.array([region_mask]*tas.shape[0]) | numpy.array |
import unittest
import pandas as pd
import numpy as np
from borf.get_orfs import read_fasta
from borf.get_orfs import find_next_stop
from borf.get_orfs import find_max_orf_index
from borf.get_orfs import orf_start_stop_from_aa
from borf.get_orfs import find_longest_orfs
from borf.get_orfs import replace_last_stop
from borf.get_orfs import add_upstream_aas
from borf.get_orfs import filter_objects
from borf.get_orfs import translate_all_frames
from borf.get_orfs import convert_start_stop_to_nt
from borf.get_orfs import check_first_aa
from borf.get_orfs import unique_number_from_list
from borf.get_orfs import find_all_orfs
from borf.get_orfs import add_orf_classification
from borf.get_orfs import get_orfs
class TestReadFasta(unittest.TestCase):
def test_read_fasta(self):
# check that files are read into correct format"
read_sequence = read_fasta('test_data/test_mutliple_frame_orfs.fa')
seq_array = [str(x.seq) for x in read_sequence]
# check sequence matches
# (only check first/last few nts, and total length)
t_start = seq_array[0][0:20] == 'GCTTCGGGTTGGTGTCATGG'
t_end = seq_array[0][-1:-20:-1] == 'AGTTGTGTTACCGGGACGG'
t_len = len(seq_array[0]) == 2757
self.assertTrue(t_start and t_end and t_len)
class TestFindNextStop(unittest.TestCase):
def test_next_stop_not_longest(self):
# "check this finds the NEXT stop codon"
# assert find_next_stop("AAAMBBB*CCC*", 4) == 8
next_stop = find_next_stop("AMEATBALL*", 0)
self.assertEqual(next_stop, 10)
def test_next_stop_from_within(self):
# "check this finds the NEXT stop codon when given a start position
# greater than 0/1"
orf = "AMEATY*METABALL*"
next_stop = find_next_stop(orf, 7)
self.assertEqual(next_stop, len(orf))
def test_next_stop_final(self):
# "check that this returns the length of the given string when no stop
# codon is found"
orf = "AMEATBALL"
next_stop = find_next_stop(orf, 0)
self.assertEqual(next_stop, len(orf))
class TestFindMaxOrfIndex(unittest.TestCase):
def test_find_max_orf_index(self):
# test basic usage of finding the two maximum values
self.assertEqual(find_max_orf_index(start_locs=[0, 100],
end_locs=[1000, 200]), (0, 1000))
def test_find_max_orf_index_offby1(self):
# test when second index is greater by one
self.assertEqual(find_max_orf_index(start_locs=[0, 100],
end_locs=[999, 1100]), (100, 1100))
def test_find_max_orf_index_equal(self):
# test that first instance of the max is returned
self.assertEqual(find_max_orf_index(start_locs=[0, 100],
end_locs=[1000, 1100]), (0, 1000))
class TestOrfStartStopFromAA(unittest.TestCase):
def test_correct_start_stop(self):
# tests that the correct start/stop locations are given
# in non-pythonic (1-indexed) manner
self.assertEqual(orf_start_stop_from_aa('AMEATBALL*'), (1, 10))
def test_start_stop_no_stop_codon(self):
# tests that stop location is the final aa when no stop codon is found
self.assertEqual(orf_start_stop_from_aa('AMEATBALL'), (1, 9))
def test_start_stop_longest(self):
# tests that the start/stop locations are given for the LONGEST orf
self.assertEqual(orf_start_stop_from_aa('MAUL*AMEATBALL'), (6, 14))
class TestFindLongestORF(unittest.TestCase):
def test_find_longest_orf_output_format(self):
# tests that a length 5 tupple output, and each is the correct numpy
# array type
long_orf = find_longest_orfs(['AMEATBALL'])
t_len = len(long_orf) == 5
# test numpy types of all outputs
t0 = long_orf[0].dtype == '<U8'
t1 = long_orf[1].dtype == 'int64'
t2 = long_orf[2].dtype == 'int64'
t3 = long_orf[3].dtype == 'int64'
t4 = long_orf[4].dtype == 'bool'
all_right_types = t0 and t1 and t2 and t3 and t4 and t_len
self.assertTrue(all_right_types)
def test_find_longest_orf_trimmed(self):
# check that the last * is trimmed from the orf sequence
self.assertEqual(find_longest_orfs(['AMEATBALL*'])[0], ['MEATBALL'])
def test_find_longest_orf_multiple(self):
input = ['AMEATBALL*', 'TWOMEATBALLS']
result = find_longest_orfs(input)
self.assertEqual(len(result[0]), len(input))
def test_find_longest_orf_stopsites(self):
# check that the stop site is calculated as the * for seqs with it,
# and the last AA for those without
stop_loc_with_stop = find_longest_orfs(['AMEATBALL*'])[2]
stop_loc_without_stop = find_longest_orfs(['AMEATBALL'])[2]
self.assertEqual(stop_loc_with_stop, stop_loc_without_stop + 1)
class TestReplaceLastStop(unittest.TestCase):
def test_replace_last_stop(self):
# check that the last * is trimmed from the orf sequence
self.assertEqual(replace_last_stop('MEATBALL'),
replace_last_stop('MEATBALL*'))
class TestAddUpstreamAAs(unittest.TestCase):
def test_add_upstream_aa_output(self):
# check all outputs generated and all in correct type
aa_sequence = np.array(['ALONGERUPSTREAMMEATBALL'])
orf_sequence, start_sites, stop_sites, orf_length, last_aa_is_stop = find_longest_orfs(
aa_sequence)
output = add_upstream_aas(aa_sequence, stop_sites, start_sites,
orf_sequence, orf_length,
min_upstream_length=5)
t_len = len(output) == 3
# test numpy types of all outputs
t0 = output[0].dtype.type == np.str_
t1 = output[1].dtype == 'int64'
t2 = output[2].dtype == 'int64'
all_right_types = t0 and t1 and t2 and t_len
self.assertTrue(all_right_types)
def test_add_upstream_aa(self):
# test expected output
aa_sequence = np.array(['ALONGERUPSTREAMMEATBALL'])
orf_sequence, start_sites, stop_sites, orf_length, last_aa_is_stop = find_longest_orfs(
aa_sequence)
output = add_upstream_aas(
aa_sequence,
stop_sites,
start_sites,
orf_sequence,
orf_length,
min_upstream_length=5)
self.assertEqual(output[0], 'ALONGERUPSTREAMMEATBALL')
def test_add_upstream_aa_multi(self):
# test with multiple inputs
aa_sequence = np.array(
['ALONGERUPSTREAMMEATBALL', 'TWODOZENMEATBALLS', 'BROWNBEARMAULSGIANTSQUID'])
orf_sequence, start_sites, stop_sites, orf_length, last_aa_is_stop = find_longest_orfs(
aa_sequence)
output = add_upstream_aas(
aa_sequence,
stop_sites,
start_sites,
orf_sequence,
orf_length,
min_upstream_length=5)
self.assertTrue(np.all(output[0] == np.array(
['ALONGERUPSTREAMMEATBALL', 'TWODOZENMEATBALLS', 'BROWNBEARMAULSGIANTSQUID'])))
def test_add_upstream_aa_noupstream(self):
# test with no viable upstream AAs
aa_sequence = np.array(['BEAREATS*MEATBALLS'])
orf_sequence, start_sites, stop_sites, orf_length, last_aa_is_stop = find_longest_orfs(
aa_sequence)
output = add_upstream_aas(
aa_sequence,
stop_sites,
start_sites,
orf_sequence,
orf_length,
min_upstream_length=5)
self.assertEqual(output[0], 'MEATBALLS')
def test_add_upstream_aa_shortupstream(self):
# test with upstream AAs too short
aa_sequence = np.array(['BEARMEATBALLS'])
orf_sequence, start_sites, stop_sites, orf_length, last_aa_is_stop = find_longest_orfs(
aa_sequence)
output = add_upstream_aas(
aa_sequence,
stop_sites,
start_sites,
orf_sequence,
orf_length,
min_upstream_length=5)
self.assertEqual(output[0], 'MEATBALLS')
def test_add_upstream_aa_exactupstream(self):
# test with upstream AAs of exactly min_upstream_length
aa_sequence = np.array(['BEARMEATBALLS'])
orf_sequence, start_sites, stop_sites, orf_length, last_aa_is_stop = find_longest_orfs(
aa_sequence)
output = add_upstream_aas(
aa_sequence,
stop_sites,
start_sites,
orf_sequence,
orf_length,
min_upstream_length=4)
self.assertEqual(output[0], 'BEARMEATBALLS')
class TestFilterObjects(unittest.TestCase):
def test_filter_objects(self):
# check input arrays can be filtered
letters = np.array(['A', 'B', 'C', 'D', 'E', 'F', 'H', 'I', 'J'])
values = np.array([1, 2, 3, 4, 5, 4, 3, 2, 1])
filter = values < 3
output = filter_objects(filter, letters, values)
self.assertTrue(np.all(output[0] == np.array(['A', 'B', 'I', 'J'])) and
np.all(output[1] == np.array([1, 2, 2, 1])))
class TestTranslateAllFrames(unittest.TestCase):
def test_translate_output_format(self):
# tests that a length 3 tupple output, and each is the correct numpy
# array type
sequences = read_fasta('test_data/test_trans_all_frames.fa')
output = translate_all_frames(sequences, both_strands=False)
t_len = len(output) == 6
# test numpy types of all outputs
t0 = output[0].dtype.type == np.str_
t1 = output[1].dtype.type == np.str_
t2 = output[2].dtype == 'int64'
t3 = output[3].dtype.type == np.str_
t4 = output[4].dtype == 'int64'
t5 = output[5].dtype == 'int64'
all_right_types = t0 and t1 and t2 and t3 and t4 and t5 and t_len
self.assertTrue(all_right_types)
def test_translate_allframes(self):
sequences = read_fasta('test_data/test_trans_all_frames.fa')
ids, aa_frames, frame, strand, seq_length_nt, seq_length = translate_all_frames(
sequences, both_strands=False)
self.assertTrue(np.all(frame == np.array([1, 2, 3])))
def test_translate_alltransframes(self):
sequences = read_fasta('test_data/test_trans_all_frames.fa')
ids, aa_frames, frame, strand, seq_length_nt, seq_length = translate_all_frames(
sequences, both_strands=False)
self.assertTrue(np.all(aa_frames == np.array(
['MANATEE*', 'WRTRPKN', 'GERDRRI'])))
def test_translate_posstrand(self):
sequences = read_fasta('test_data/test_trans_all_frames.fa')
ids, aa_frames, frame, strand, seq_length_nt, seq_length = translate_all_frames(
sequences, both_strands=False)
self.assertTrue(np.all(strand == np.array(['+', '+', '+'])))
def test_translate_seq_length_nt(self):
sequences = read_fasta('test_data/test_trans_all_frames.fa')
ids, aa_frames, frame, strand, seq_length_nt, seq_length = translate_all_frames(
sequences, both_strands=False)
self.assertTrue(np.all(seq_length_nt == np.array([24, 24, 24])))
def test_translate_seq_length(self):
sequences = read_fasta('test_data/test_trans_all_frames.fa')
ids, aa_frames, frame, strand, seq_length_nt, seq_length = translate_all_frames(
sequences, both_strands=False)
self.assertTrue(np.all(seq_length == np.array([8, 7, 7])))
def test_translate_bothstrands(self):
sequences = read_fasta('test_data/test_trans_all_frames.fa')
ids, aa_frames, frame, strand, seq_length_nt, seq_length = translate_all_frames(
sequences, both_strands=True)
frame_correct = np.all(frame == np.array([1, 1, 2, 2, 3, 3]))
strand_correct = np.all(strand == np.array(
['+', '-', '+', '-', '+', '-']))
trans_correct = np.all(aa_frames == np.array(
['MANATEE*', 'LFFGRVRH', 'WRTRPKN', 'YSSVAFA', 'GERDRRI', 'ILRSRSP']))
self.assertTrue(frame_correct and strand_correct and trans_correct)
class TestConvertAANT(unittest.TestCase):
def test_convert_nt_output_format(self):
# tests that a length 3 tupple output, and each is the correct numpy
# array type
sequences = read_fasta('test_data/test_frames.fa')
ids, aa_frames, frame, strand, seq_length_nt, seq_length = translate_all_frames(
sequences, both_strands=False)
orf_sequence, start_sites, stop_sites, orf_length, last_aa_is_stop = find_longest_orfs(
aa_frames)
# filter data by minimum orf length
keep = orf_length >= 6
aa_frames, frame, strand, seq_length_nt, ids, seq_length, start_sites, stop_sites, orf_sequence, last_aa_is_stop, orf_length = filter_objects(
keep, aa_frames, frame, strand, seq_length_nt, ids, seq_length, start_sites, stop_sites, orf_sequence, last_aa_is_stop, orf_length)
output = convert_start_stop_to_nt(
start_sites,
stop_sites,
seq_length_nt,
orf_length,
frame,
last_aa_is_stop)
t_len = len(output) == 3
# test numpy types of all outputs
t0 = output[0].dtype == 'int64'
t1 = output[1].dtype == 'int64'
t2 = output[2].dtype == 'int64'
all_right_types = t0 and t1 and t2 and t_len
self.assertTrue(all_right_types)
def test_convert_start_nt(self):
sequences = read_fasta('test_data/test_frames.fa')
ids, aa_frames, frame, strand, seq_length_nt, seq_length = translate_all_frames(
sequences, both_strands=False)
orf_sequence, start_sites, stop_sites, orf_length, last_aa_is_stop = find_longest_orfs(
aa_frames)
# filter data by minimum orf length
keep = orf_length >= 6
aa_frames, frame, strand, seq_length_nt, ids, seq_length, start_sites, stop_sites, orf_sequence, last_aa_is_stop, orf_length = filter_objects(
keep, aa_frames, frame, strand, seq_length_nt, ids, seq_length, start_sites, stop_sites, orf_sequence, last_aa_is_stop, orf_length)
start_site_nt, stop_site_nt, utr3_length = convert_start_stop_to_nt(
start_sites, stop_sites, seq_length_nt, orf_length, frame, last_aa_is_stop)
self.assertTrue(np.all(start_site_nt == np.array([1, 2, 3])))
def test_convert_stop_nt(self):
sequences = read_fasta('test_data/test_frames.fa')
ids, aa_frames, frame, strand,seq_length_nt, seq_length = translate_all_frames(sequences, both_strands=False)
orf_sequence, start_sites, stop_sites, orf_length, last_aa_is_stop = find_longest_orfs(
aa_frames)
# filter data by minimum orf length
keep = orf_length >= 6
aa_frames, frame, strand, seq_length_nt, ids, seq_length, start_sites, stop_sites, orf_sequence, last_aa_is_stop, orf_length = filter_objects(
keep, aa_frames, frame, strand, seq_length_nt, ids, seq_length, start_sites, stop_sites, orf_sequence, last_aa_is_stop, orf_length)
start_site_nt, stop_site_nt, utr3_length = convert_start_stop_to_nt(
start_sites, stop_sites, seq_length_nt, orf_length, frame, last_aa_is_stop)
self.assertTrue(np.all(stop_site_nt == np.array([21, 22, 23])))
def test_convert_stop_nt_3incomplete(self):
sequences = read_fasta('test_data/test_stopsitent.fa')
ids, aa_frames, frame, strand,seq_length_nt, seq_length = translate_all_frames(sequences, both_strands=False)
orf_sequence, start_sites, stop_sites, orf_length, last_aa_is_stop = find_longest_orfs(aa_frames)
# filter data by minimum orf length
keep = orf_length >= 6
aa_frames, frame, strand, seq_length_nt, ids, seq_length, start_sites, stop_sites, orf_sequence, last_aa_is_stop, orf_length = filter_objects(
keep, aa_frames, frame, strand, seq_length_nt, ids, seq_length, start_sites, stop_sites, orf_sequence, last_aa_is_stop, orf_length)
start_site_nt, stop_site_nt, utr3_length = convert_start_stop_to_nt(
start_sites, stop_sites, seq_length_nt, orf_length, frame, last_aa_is_stop)
self.assertTrue(np.all(stop_site_nt == seq_length_nt))
def test_convert_utr_nt(self):
sequences = read_fasta('test_data/test_frames.fa')
ids, aa_frames, frame, strand, seq_length_nt, seq_length = translate_all_frames(
sequences, both_strands=False)
orf_sequence, start_sites, stop_sites, orf_length, last_aa_is_stop = find_longest_orfs(
aa_frames)
# filter data by minimum orf length
keep = orf_length >= 6
aa_frames, frame, strand, seq_length_nt, ids, seq_length, start_sites, stop_sites, orf_sequence, last_aa_is_stop, orf_length = filter_objects(
keep, aa_frames, frame, strand, seq_length_nt, ids, seq_length, start_sites, stop_sites, orf_sequence, last_aa_is_stop, orf_length)
start_site_nt, stop_site_nt, utr3_length = convert_start_stop_to_nt(
start_sites, stop_sites, seq_length_nt, orf_length, frame, last_aa_is_stop)
self.assertTrue(np.all(utr3_length == np.array([5, 4, 3])))
class TestCheckFirstAA(unittest.TestCase):
def test_check_first_aa_pos(self):
# tests that a length 3 tupple output, and each is the correct numpy
# array type
aa_sequence = np.array(['MEATBALL'])
self.assertEqual(check_first_aa(aa_sequence), 'M')
def test_check_first_aa_neg(self):
# tests that a length 3 tupple output, and each is the correct numpy
# array type
aa_sequence = np.array(['NOTAMEATBALL'])
self.assertEqual(check_first_aa(aa_sequence), 'ALT')
def test_check_first_aa_multi(self):
# tests that a length 3 tupple output, and each is the correct numpy
# array type
aa_sequence = np.array(['MEATBALL', 'NOTAMEATBALL'])
self.assertTrue(np.all(check_first_aa(
aa_sequence) == np.array(['M', 'ALT'])))
class TestCheckUniqueN(unittest.TestCase):
def test_check_unique_n(self):
# tests that a length 3 tupple output, and each is the correct numpy
# array type
values = np.array(
['MEATBALL', 'MEATBALL', 'BEAR', 'MEATBALL', 'MEATBALLS'])
self.assertEqual(unique_number_from_list(values), [1, 2, 1, 3, 1])
class TestFindAllORFs(unittest.TestCase):
def test_find_all_orfs_output_format(self):
aa_seqs = np.array(['MEATBALL*MEATBALLBEAR*'])
output = find_all_orfs(aa_seqs, min_orf_length=5)
t_len = len(output) == 6
# test numpy types of all outputs
t0 = output[0].dtype.type == np.str_
t1 = output[1].dtype == 'int64'
t2 = output[2].dtype == 'int64'
t3 = output[3].dtype == 'int64'
t4 = output[4].dtype == 'bool'
t5 = output[5].dtype == 'int64'
all_right_types = t0 and t1 and t2 and t3 and t4 and t5 and t_len
self.assertTrue(all_right_types)
def test_find_two_orfs(self):
# tests that a length 3 tupple output, and each is the correct numpy
# array type
aa_seqs = np.array(['MEATBALL*MEATBALLBEAR*'])
orf_sequence, start_sites, stop_sites, orf_length, last_aa_is_stop, matched_index = find_all_orfs(
aa_seqs, min_orf_length=5)
orf_correct = np.all(orf_sequence == np.array(
['MEATBALL', 'MEATBALLBEAR']))
start_correct = np.all(start_sites == np.array([1, 10]))
stop_correct = np.all(stop_sites == np.array([9, 22]))
orf_length_correct = np.all(orf_length == np.array([8, 12]))
last_aa_is_stop_correct = np.all(
last_aa_is_stop == | np.array([True, True]) | numpy.array |
import numpy as np
import cv2
import os
def nothing(x):
pass
vc = cv2.VideoCapture(0)
cv2.namedWindow("hand")
# 1) Creating trackbar for lower hue value so as to find the desired colored object in frame.
cv2.createTrackbar("hue_lower", "hand", 0, 255, nothing)
# Creating trackbar for upper hue value for same reason as above.
cv2.createTrackbar("hue_upper", "hand", 30, 255, nothing)
# Creating trackbar for lower saturation value for same reason as above.
cv2.createTrackbar("saturation_lower", "hand", 41, 255, nothing)
# Creating trackbar for upper saturation value for same reason as above.
cv2.createTrackbar("saturation_upper", "hand", 152, 255, nothing)
# Creating trackbar for lower value for same reason as above.
cv2.createTrackbar("value_lower", "hand", 69, 255, nothing)
# Creating trackbar for upper value for same reason as above.
cv2.createTrackbar("value_upper", "hand", 220, 255, nothing)
# for remove face
current_file_path = os.path.dirname(os.path.realpath(__file__))
cascade = cv2.CascadeClassifier(cv2.samples.findFile(current_file_path + "/haarcascade_frontalface_alt.xml"))
def detect(img, cascade):
rects = cascade.detectMultiScale(img, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30), flags=cv2.CASCADE_SCALE_IMAGE)
if len(rects) == 0:
return []
rects[:, 2:] += rects[:, :2]
return rects
def removeFaceAra(img, cascade):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.equalizeHist(gray)
rects = detect(gray, cascade)
height, width = img.shape[:2]
for x1, y1, x2, y2 in rects:
cv2.rectangle(img, (x1 - 10, 0), (x2 + 10, height), (0, 0, 0), -1)
return img
# fgbg = cv2.createBackgroundSubtractorMOG2(varThreshold=100)
thumbs_up_cnt = 0
while True:
ret, frame = vc.read() # Reading one image frame from webcam. 1280 X 720
frame = cv2.flip(frame, 1)
# resizing
frame = frame[100:, 100:1180]
# removing background
# fgmask = fgbg.apply(frame)
#
# nlabels, labels, stats, centroids = cv2.connectedComponentsWithStats(fgmask)
#
# for index, centroid in enumerate(centroids):
# if stats[index][0] == 0 and stats[index][1] == 0:
# continue
# if np.any(np.isnan(centroid)):
# continue
#
# x, y, width, height, area = stats[index]
# centerX, centerY = int(centroid[0]), int(centroid[1])
#
# if area > 10:
# cv2.circle(frame, (centerX, centerY), 1, (0, 255, 0), 2)
# cv2.rectangle(frame, (x, y), (x + width, y + height), (0, 0, 255))
#
# removing face
frame = removeFaceAra(frame, cascade=cascade)
frame_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) # Converting RGB system to HSV system.
hl = cv2.getTrackbarPos("hue_lower", "hand")
hu = cv2.getTrackbarPos("hue_upper", "hand")
sl = cv2.getTrackbarPos("saturation_lower", "hand")
su = cv2.getTrackbarPos("saturation_upper", "hand")
vl = cv2.getTrackbarPos("value_lower", "hand")
vu = cv2.getTrackbarPos("value_upper", "hand")
hand_lower = | np.array([hl, sl, vl]) | numpy.array |
import time
import sys
import os
import glob
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import pandas as pd
import numpy as np
import tensorflow as tf
# comment this if running on GPU
tf.config.set_visible_devices([], 'GPU')
from sklearn.model_selection import train_test_split
import preprocess_sequences
import utils
import neural_network
import train_model
PATH_PRE = "data/ncov_global/"
PATH_SEQ = PATH_PRE + "spikeprot0815.fasta"
GALAXY_CLADE_ASSIGNMENT = PATH_PRE + "clade_assignment_2.9_Mil_samples.tabular"
PATH_SAMPLES_CLADES = PATH_PRE + "sample_clade_sequence_df.csv"
PATH_F_DICT = PATH_PRE + "f_word_dictionaries.json"
PATH_R_DICT = PATH_PRE + "r_word_dictionaries.json"
PATH_KMER_F_DICT = "data/ncov_global/kmer_f_word_dictionaries.json"
PATH_KMER_R_DICT = "data/ncov_global/kmer_r_word_dictionaries.json"
PATH_TRAINING_CLADES = "data/train_clade_in_out.json"
PATH_UNRELATED_CLADES = "data/unrelated_clades.json"
PRETRAIN_DATA = "data/pretrain/pretrain.csv"
PRETRAIN_GEN_LOSS = "data/generated_files/pretrain_gen_train_loss.txt"
PRETRAIN_GEN_TEST_LOSS = "data/generated_files/pretrain_gen_test_loss.txt"
TRAIN_GEN_TOTAL_LOSS = "data/generated_files/train_gen_total_loss.txt"
TRAIN_GEN_FAKE_LOSS = "data/generated_files/train_gen_fake_loss.txt"
TRAIN_GEN_TRUE_LOSS = "data/generated_files/train_gen_true_loss.txt"
TRAIN_DISC_TOTAL_LOSS = "data/generated_files/train_disc_total_loss.txt"
TRAIN_DISC_FAKE_LOSS = "data/generated_files/train_disc_fake_loss.txt"
TRAIN_DISC_TRUE_LOSS = "data/generated_files/train_disc_true_loss.txt"
TEST_LOSS = "data/generated_files/train_te_loss.txt"
PRETRAIN_GEN_ENC_MODEL = "data/generated_files/pretrain_gen_encoder"
PRETRAIN_GEN_DEC_MODEL = "data/generated_files/pretrain_gen_decoder"
TRAIN_GEN_ENC_MODEL = "data/generated_files/gen_enc_model"
TRAIN_GEN_DEC_MODEL = "data/generated_files/gen_dec_model"
SAVE_TRUE_PRED_SEQ = "data/generated_files/true_predicted_df.csv"
TR_MUT_INDICES = "data/generated_files/tr_mut_indices.json"
PRETR_MUT_INDICES = "data/generated_files/pretr_mut_indices.json"
'''
Best run
s_kmer = 3
LEN_AA = 1274
len_aa_subseq = 50
#len_final_aa_padding = len_aa_subseq + 1
len_final_aa_padding = len_aa_subseq - s_kmer + 2
# Neural network parameters
embedding_dim = 32
batch_size = 4
te_batch_size = batch_size
n_te_batches = 2
enc_units = 128
'''
s_kmer = 3
LEN_AA = 16 # 1273 for considering entire seq length
len_aa_subseq = LEN_AA
#len_final_aa_padding = len_aa_subseq + 1
len_final_aa_padding = len_aa_subseq - s_kmer + 2 # write 2 here when there is padding of zero in in and out sequences
size_stateful = 10
# Neural network parameters
embedding_dim = 128
batch_size = 8
te_batch_size = batch_size
n_te_batches = 20
enc_units = 32
pretrain_epochs = 20
epochs = 20
max_l_dist = 11
test_train_size = 0.85
pretrain_train_size = 0.5
random_clade_size = 2500
to_pretrain = True
pretrained_model = False
gan_train = True
start_token = 0
stale_folders = ["data/generated_files/", "data/train/", "data/test/", "data/tr_unrelated/", "data/te_unrelated/", "data/pretrain/"]
amino_acid_codes = "QNKWFPYLMTEIARGHSDVC"
def verify_ldist(X, Y):
lev_list = list()
for index, (x, y) in enumerate(zip(X, Y)):
seq_x = x
seq_y = y
#print(seq_x, seq_y)
lev = utils.compute_Levenshtein_dist(x, y)
lev_list.append(lev)
print(lev_list)
print(np.mean(lev_list))
def get_samples_clades():
print("Reading clade assignments...")
#samples_clades = preprocess_sequences.get_samples_clades(GALAXY_CLADE_ASSIGNMENT)
samples_clades = preprocess_sequences.get_galaxy_samples_clades(GALAXY_CLADE_ASSIGNMENT)
print("Preprocessing sequences...")
encoded_sequence_df, forward_dict, rev_dict = preprocess_sequences.preprocess_seq_galaxy_clades(PATH_SEQ, samples_clades, LEN_AA)
print(encoded_sequence_df)
def read_files():
#to preprocess once, uncomment get_samples_clades
#get_samples_clades()
forward_dict = utils.read_json(PATH_F_DICT)
rev_dict = utils.read_json(PATH_R_DICT)
encoder = None
decoder = None
pf_model = None
#kmer_f_dict, kmer_r_dict = utils.get_all_possible_words(amino_acid_codes, s_kmer)
if pretrained_model is False:
print("Cleaning up stale folders...")
utils.clean_up(stale_folders)
print("Preprocessing sample-clade assignment file...")
dataf = pd.read_csv(PATH_SAMPLES_CLADES, sep=",")
filtered_dataf = preprocess_sequences.filter_samples_clades(dataf)
clades_in_clades_out = utils.read_json(PATH_TRAINING_CLADES)
print(clades_in_clades_out)
#unrelated_clades = utils.read_json(PATH_UNRELATED_CLADES)
print("Generating cross product of real parent child...")
preprocess_sequences.make_cross_product(clades_in_clades_out, filtered_dataf, len_aa_subseq, start_token, train_size=test_train_size, edit_threshold=max_l_dist, random_size=random_clade_size, unrelated=False)
#print("Generating cross product of real sequences but not parent-child...")
#preprocess_sequences.make_cross_product(unrelated_clades, filtered_dataf, len_aa_subseq, train_size=1.0, edit_threshold=max_l_dist, random_size=random_clade_size, unrelated=True)
#sys.exit()
else:
encoder = tf.keras.models.load_model(PRETRAIN_GEN_ENC_MODEL)
decoder = tf.keras.models.load_model(PRETRAIN_GEN_DEC_MODEL)
start_training(forward_dict, rev_dict, encoder, decoder)
def start_training(forward_dict, rev_dict, gen_encoder=None, gen_decoder=None):
pos_variations = dict()
pos_variations_count = dict()
start_time = time.time()
print("Loading datasets...")
tr_clade_files = glob.glob('data/train/*.csv')
te_clade_files = glob.glob('data/test/*.csv')
combined_X = list()
combined_y = list()
# load train data
print("Loading training datasets...")
for name in tr_clade_files:
tr_clade_df = pd.read_csv(name, sep="\t")
X = tr_clade_df["X"].tolist()
y = tr_clade_df["Y"].tolist()
combined_X.extend(X)
combined_y.extend(y)
#verify_ldist(combined_X, combined_y)
#print(combined_X[0])
#sys.exit()
combined_te_X = list()
combined_te_y = list()
# load test data
print("Loading test datasets...")
for te_name in te_clade_files:
te_clade_df = pd.read_csv(te_name, sep="\t")
te_X = te_clade_df["X"].tolist()
te_y = te_clade_df["Y"].tolist()
combined_te_X.extend(te_X)
combined_te_y.extend(te_y)
print(len(te_X), len(te_y))
print()
#verify_ldist(combined_te_X, combined_te_y)
tr_unrelated_files = glob.glob("data/tr_unrelated/*.csv")
print("Loading unrelated datasets...")
unrelated_X = list()
unrelated_y = list()
for tr_unrelated in tr_unrelated_files:
unrelated_clade_df = pd.read_csv(tr_unrelated, sep="\t")
un_X = unrelated_clade_df["X"].tolist()
un_y = unrelated_clade_df["Y"].tolist()
unrelated_X.extend(un_X)
unrelated_y.extend(un_y)
print(len(un_X), len(un_y))
unrelated_X = np.array(unrelated_X)
unrelated_y = np.array(unrelated_y)
print("Unrelated data sizes")
print(len(unrelated_X), len(unrelated_y))
print("train and test data sizes")
print(len(combined_X), len(combined_y), len(combined_te_X), len(combined_te_y))
# convert test and train datasets to kmers
'''kmers_global = list()
#print(forward_dict)
train_kmers = utils.get_all_kmers(combined_X, combined_y, forward_dict, s_kmer)
kmers_global.extend(train_kmers)
test_kmers = utils.get_all_kmers(combined_te_X, combined_te_y, forward_dict, s_kmer)
kmers_global.extend(test_kmers)
kmers_global = list(set(kmers_global))
kmer_f_dict = {i + 1: kmers_global[i] for i in range(0, len(kmers_global))}
kmer_r_dict = {kmers_global[i]: i + 1 for i in range(0, len(kmers_global))}
utils.save_as_json(PATH_KMER_F_DICT, kmer_f_dict)
utils.save_as_json(PATH_KMER_R_DICT, kmer_r_dict)
kmer_f_dict[0] = "<start>"
#kmer_f_dict[len(kmers_global)+1] = "<end>"
kmer_r_dict["<start>"] = 0
#kmer_r_dict["<end>"] = len(kmers_global)+1
print(kmer_f_dict, len(kmer_f_dict))
print()
print(print(kmer_r_dict, len(kmer_r_dict)))
#sys.exit()
vocab_size = len(kmer_f_dict) + 1
print("Number of kmers: {}".format(str(len(kmer_f_dict) - 1)))
print("Vocab size: {}".format(str(len(kmer_f_dict) + 1)))
combined_X, combined_y = utils.encode_sequences_kmers(forward_dict, kmer_r_dict, combined_X, combined_y, s_kmer)
combined_te_X, combined_te_y = utils.encode_sequences_kmers(forward_dict, kmer_r_dict, combined_te_X, combined_te_y, s_kmer)
print(combined_X[0])
print(combined_y[0])'''
kmer_f_dict = utils.read_json(PATH_KMER_F_DICT)
kmer_r_dict = utils.read_json(PATH_KMER_R_DICT)
#print(kmer_f_dict)
vocab_size = len(kmer_f_dict) + 1
print("Number of kmers: {}".format(str(len(kmer_f_dict))))
print("Vocab size: {}".format(str(len(kmer_f_dict) + 1)))
#kmer_f_dict = dict()
#kmer_r_dict = dict()
#vocab_size = len(forward_dict) + 1
combined_X = np.array(combined_X)
combined_y = np.array(combined_y)
test_dataset_in = | np.array(combined_te_X) | numpy.array |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2019-06-13 10:34:43
# @Author : <NAME> & <NAME> (<EMAIL>)
# @Link : http://iridescent.ink
# @Version : $1.0$
import numpy as np
def normalization(x):
x = x.astype('float32')
mu = np.average(x)
std = | np.std(x) | numpy.std |
# -*- coding: utf-8 -*-
"""
Created on Thu May 27 11:53:42 2021
@author: Shubham
"""
import os, numpy as np
import cv2
import random
import torch
import torch.utils.data as data
import xml.etree.ElementTree as ET
from abc import ABCMeta, abstractmethod
import scipy.cluster.vq as vq
import pickle
import pandas as pd
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import f_classif
from cv2 import imread, resize
from numpy import concatenate
from sklearn.metrics import accuracy_score
from sklearn.semi_supervised import LabelPropagation
from sklearn.model_selection import train_test_split
import argparse
from imblearn.under_sampling import RandomUnderSampler
from skimage import feature
import warnings
warnings.filterwarnings("ignore")
"""
Data Loader reading the files, extracting individual objects from each image
"""
class DataLoader(data.Dataset):
def __init__(self,data_path="", trainval='trainval',transform=None):
self.data_path = data_path
self.transform = transform
self.trainval = trainval
self.__init_classes()
self.names, self.labels, self.lable_set, self.bounding_box = self.__dataset_info()
def __getitem__(self, index):
self.data = []
self.lables = []
x = imread(self.data_path+'JPEGImages/'+self.names[index]+'.jpg')
#x = resize(x, (256,256))
#print(self.bounding_box[index])
x_min, y_min, x_max, y_max = self.bounding_box[index]
for i in range(len(x_min)):
#print(i)
sub_img = x[y_min[i]:y_max[i],x_min[i]:x_max[i]]
#print(sub_img.shape)
#sub_img = resize(sub_img, (64,64))
sub_img = cv2.resize(sub_img, (64, 64),
interpolation=cv2.INTER_NEAREST)
self.data.append(sub_img)
self.lables.append(self.lable_set[index][i])
#print(self.lable_set[index])
#print(len(self.lable_set[index]))
#print(len(self.bounding_box[index]))
#x = Image.fromarray(x)
if self.transform !=None:
x = self.transform(x)
y = self.labels[index]
#return x, y
def __fetchdata__(self):
return self.data, self.lables
def __len__(self):
return len(self.names)
def __dataset_info(self):
#annotation_files = os.listdir(self.data_path+'/Annotations')
with open(self.data_path+'ImageSets/Main/'+self.trainval+'.txt') as f:
annotations = f.readlines()
annotations = [n[:-1] for n in annotations]
names = []
labels = []
lable_set = []
bounding_box = []
for af in annotations:
filename = os.path.join(self.data_path,'Annotations',af)
tree = ET.parse(filename+'.xml')
objs = tree.findall('object')
num_objs = len(objs)
bdg_box = [obj.find('bndbox') for obj in objs]
x_min = [int(box.find('xmin').text.lower().strip()) for box in bdg_box]
y_min = [int(box.find('ymin').text.lower().strip()) for box in bdg_box]
x_max = [int(box.find('xmax').text.lower().strip()) for box in bdg_box]
y_max = [int(box.find('ymax').text.lower().strip()) for box in bdg_box]
coords = (x_min, y_min, x_max, y_max)
boxes_cl = np.zeros((num_objs), dtype=np.int32)
temp_lbls = []
for ix, obj in enumerate(objs):
cls = self.class_to_ind[obj.find('name').text.lower().strip()]
boxes_cl[ix] = cls
temp_lbls.append(cls)
lbl = np.zeros(self.num_classes)
lbl[boxes_cl] = 1
labels.append(lbl)
names.append(af)
lable_set.append(temp_lbls)
bounding_box.append(coords)
return np.array(names), np.array(labels).astype(np.float32), lable_set, bounding_box
def __init_classes(self):
self.classes = ('aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor')
self.num_classes = len(self.classes)
self.class_to_ind = dict(zip(self.classes, range(self.num_classes)))
"""
local binary pattern
"""
class LocalBinaryPatterns:
def __init__(self, numPoints, radius):
# store the number of points and radius
self.numPoints = numPoints
self.radius = radius
def describe(self, image, eps=1e-7):
# compute the Local Binary Pattern representation
# of the image, and then use the LBP representation
# to build the histogram of patterns
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
lbp = feature.local_binary_pattern(image, self.numPoints,
self.radius, method="uniform")
(hist, _) = np.histogram(lbp.ravel(),
bins=np.arange(0, self.numPoints + 3),
range=(0, self.numPoints + 2))
# normalize the histogram
hist = hist.astype("float")
hist /= (hist.sum() + eps)
# return the histogram of Local Binary Patterns
return hist
"""
color layout descriptor
"""
class DescriptorComputer:
__metaclass__ = ABCMeta
@abstractmethod
def compute(self, frame):
pass
class ColorLayoutComputer(DescriptorComputer):
def __init__(self):
self.rows = 8
self.cols = 8
self.prefix = "CLD"
def compute(self, img):
averages = np.zeros((self.rows,self.cols,3))
imgH, imgW, _ = img.shape
for row in range(self.rows):
for col in range(self.cols):
row_start = int(imgH/self.rows * row)
row_end = int(imgH/self.rows * (row+1))
col_start = int(imgW/self.cols*col)
col_end = int(imgW/self.cols*(col+1))
slice1 = img[row_start:row_end, col_start:col_end]
#slice1 = img[imgH/self.rows * row: imgH/self.rows * (row+1), imgW/self.cols*col : imgW/self.cols*(col+1)]
#print(slice)
average_color_per_row = np.mean(slice1, axis=0)
average_color = np.mean(average_color_per_row, axis=0)
average_color = np.uint8(average_color)
averages[row][col][0] = average_color[0]
averages[row][col][1] = average_color[1]
averages[row][col][2] = average_color[2]
icon = cv2.cvtColor(np.array(averages, dtype=np.uint8), cv2.COLOR_BGR2YCR_CB)
y, cr, cb = cv2.split(icon)
dct_y = cv2.dct(np.float32(y))
dct_cb = cv2.dct(np.float32(cb))
dct_cr = cv2.dct(np.float32(cr))
dct_y_zigzag = []
dct_cb_zigzag = []
dct_cr_zigzag = []
flip = True
flipped_dct_y = np.fliplr(dct_y)
flipped_dct_cb = np.fliplr(dct_cb)
flipped_dct_cr = np.fliplr(dct_cr)
for i in range(self.rows + self.cols -1):
k_diag = self.rows - 1 - i
diag_y = np.diag(flipped_dct_y, k=k_diag)
diag_cb = | np.diag(flipped_dct_cb, k=k_diag) | numpy.diag |
import torch
import torch.nn.functional as F
import argparse
import cv2
import numpy as np
from glob import glob
import matplotlib.pyplot as plt
num_classes = 10
img_height, img_width = 28, 28
channel = 1
GPU = False
torch.manual_seed(0)
# GPU
device = torch.device("cuda" if GPU else "cpu")
class Generator(torch.nn.Module):
def __init__(self):
self.in_h = img_height // 4
self.in_w = img_width // 4
self.base = 128
super(Generator, self).__init__()
self.lin = torch.nn.ConvTranspose2d(100 + num_classes, self.base * 2, kernel_size=self.in_h, stride=1, bias=False)
self.bnin = torch.nn.BatchNorm2d(self.base * 2)
self.l3 = torch.nn.ConvTranspose2d(self.base * 2, self.base, kernel_size=4, stride=2, padding=1, bias=False)
self.bn3 = torch.nn.BatchNorm2d(self.base)
self.l4 = torch.nn.ConvTranspose2d(self.base, channel, kernel_size=4, stride=2, padding=1, bias=False)
def forward(self, x, y, test=False):
#x = torch.cat((x, y), dim=1)
con_x = np.zeros((len(y), num_classes, 1, 1), dtype=np.float32)
con_x[np.arange(len(y)), y] = 1
con_x = torch.tensor(con_x, dtype=torch.float).to(device)
x = torch.cat((x, con_x), dim=1)
x = self.lin(x)
x = self.bnin(x)
x = torch.nn.functional.relu(x)
x = self.l3(x)
x = self.bn3(x)
x = torch.nn.functional.relu(x)
x = self.l4(x)
x = torch.tanh(x)
if test:
return x
else:
con_x = np.zeros((len(y), num_classes, img_height, img_width), dtype=np.float32)
con_x[np.arange(len(y)), y] = 1
con_x = torch.tensor(con_x).to(device)
out_x = torch.cat((x, con_x), dim=1)
return out_x
class Discriminator(torch.nn.Module):
def __init__(self):
self.base = 64
super(Discriminator, self).__init__()
self.l1 = torch.nn.Conv2d(channel + num_classes, self.base, kernel_size=5, padding=2, stride=2)
self.l2 = torch.nn.Conv2d(self.base, self.base * 2, kernel_size=5, padding=2, stride=2)
#self.bn2 = torch.nn.BatchNorm2d(self.base * 2)
self.l5 = torch.nn.Linear((img_height // 4) * (img_width // 4) * self.base * 2, 1)
def forward(self, x):
x = self.l1(x)
x = torch.nn.functional.leaky_relu(x, 0.2)
x = self.l2(x)
#x = self.bn2(x)
x = torch.nn.functional.leaky_relu(x, 0.2)
x = x.view([-1, (img_height // 4) * (img_width // 4) * self.base * 2])
x = self.l5(x)
x = torch.sigmoid(x)
return x
class GAN(torch.nn.Module):
def __init__(self, g, d):
super(GAN, self).__init__()
self.g = g
self.d = d
def forward(self, x, y):
x = self.g(x, y)
x = self.d(x)
return x
import gzip
import numpy as np
import matplotlib.pyplot as plt
def load_mnist():
dir_path = 'drive/My Drive/Colab Notebooks/' + "mnist_datas"
files = ["train-images-idx3-ubyte.gz",
"train-labels-idx1-ubyte.gz",
"t10k-images-idx3-ubyte.gz",
"t10k-labels-idx1-ubyte.gz"]
# download mnist datas
if not os.path.exists(dir_path):
os.makedirs(dir_path)
data_url = "http://yann.lecun.com/exdb/mnist/"
for file_url in files:
after_file = file_url.split('.')[0]
if os.path.exists(dir_path + '/' + after_file):
continue
os.system("wget {}/{}".format(data_url, file_url))
os.system("mv {} {}".format(file_url, dir_path))
# load mnist data
# load train data
with gzip.open(dir_path + '/' + files[0], 'rb') as f:
train_x = np.frombuffer(f.read(), np.uint8, offset=16)
train_x = train_x.astype(np.float32)
train_x = train_x.reshape((-1, 28, 28, 1))
print("train images >>", train_x.shape)
with gzip.open(dir_path + '/' + files[1], 'rb') as f:
train_y = np.frombuffer(f.read(), np.uint8, offset=8)
print("train labels >>", train_y.shape)
# load test data
with gzip.open(dir_path + '/' + files[2], 'rb') as f:
test_x = np.frombuffer(f.read(), np.uint8, offset=16)
test_x = test_x.astype(np.float32)
test_x = test_x.reshape((-1, 28, 28, 1))
print("test images >>", test_x.shape)
with gzip.open(dir_path + '/' + files[3], 'rb') as f:
test_y = np.frombuffer(f.read(), np.uint8, offset=8)
print("test labels >>", test_y.shape)
return train_x, train_y ,test_x, test_y
# train
def train():
# GPU
device = torch.device("cuda" if GPU else "cpu")
# model
gen = Generator().to(device)
dis = Discriminator().to(device)
gan = Gan(gen, dis)
#gan = torch.nn.Sequential(gen, dis)
opt_d = torch.optim.Adam(dis.parameters(), lr=0.0002, betas=(0.5, 0.999))
opt_g = torch.optim.Adam(gen.parameters(), lr=0.0002, betas=(0.5, 0.999))
train_x, train_y, test_x, test_y = load_cifar10()
xs = train_x / 127.5 - 1
xs = xs.transpose(0, 3, 1, 2)
ys = np.zeros([train_y.shape[0], num_classes, 1, 1], np.float32)
ys[np.arange(train_y.shape[0]), train_y] = 1
# training
mb = 64
mbi = 0
train_ind = np.arange(len(xs))
np.random.seed(0)
np.random.shuffle(train_ind)
for i in range(20000):
if mbi + mb > len(xs):
mb_ind = train_ind[mbi:]
| np.random.shuffle(train_ind) | numpy.random.shuffle |
# The purpose of these tests are to ensure that calling ufuncs with quantities
# returns quantities with the right units, or raises exceptions.
import numpy as np
from numpy.testing.utils import assert_allclose
from ... import units as u
from ...tests.helper import pytest, raises
class TestUfuncCoverage(object):
"""Test that we cover all ufunc's"""
def test_coverage(self):
all_np_ufuncs = set([ufunc for ufunc in np.core.umath.__dict__.values()
if type(ufunc) == np.ufunc])
from .. import quantity_helper as qh
all_q_ufuncs = (qh.UNSUPPORTED_UFUNCS |
set(qh.UFUNC_HELPERS.keys()))
assert all_np_ufuncs - all_q_ufuncs == set([])
assert all_q_ufuncs - all_np_ufuncs == set([])
class TestQuantityTrigonometricFuncs(object):
"""
Test trigonometric functions
"""
def test_sin_scalar(self):
q = np.sin(30. * u.degree)
assert q.unit == u.dimensionless_unscaled
assert_allclose(q.value, 0.5)
def test_sin_array(self):
q = np.sin(np.array([0., np.pi / 4., np.pi / 2.]) * u.radian)
assert q.unit == u.dimensionless_unscaled
assert_allclose(q.value,
np.array([0., 1. / np.sqrt(2.), 1.]), atol=1.e-15)
def test_arcsin_scalar(self):
q1 = 30. * u.degree
q2 = np.arcsin(np.sin(q1)).to(q1.unit)
assert_allclose(q1.value, q2.value)
def test_arcsin_array(self):
q1 = np.array([0., np.pi / 4., np.pi / 2.]) * u.radian
q2 = np.arcsin(np.sin(q1)).to(q1.unit)
assert_allclose(q1.value, q2.value)
def test_sin_invalid_units(self):
with pytest.raises(TypeError) as exc:
np.sin(3. * u.m)
assert exc.value.args[0] == ("Can only apply 'sin' function "
"to quantities with angle units")
def test_arcsin_invalid_units(self):
with pytest.raises(TypeError) as exc:
np.arcsin(3. * u.m)
assert exc.value.args[0] == ("Can only apply 'arcsin' function to "
"dimensionless quantities")
def test_cos_scalar(self):
q = np.cos(np.pi / 3. * u.radian)
assert q.unit == u.dimensionless_unscaled
assert_allclose(q.value, 0.5)
def test_cos_array(self):
q = np.cos(np.array([0., np.pi / 4., np.pi / 2.]) * u.radian)
assert q.unit == u.dimensionless_unscaled
assert_allclose(q.value,
np.array([1., 1. / np.sqrt(2.), 0.]), atol=1.e-15)
def test_arccos_scalar(self):
q1 = np.pi / 3. * u.radian
q2 = np.arccos(np.cos(q1)).to(q1.unit)
assert_allclose(q1.value, q2.value)
def test_arccos_array(self):
q1 = np.array([0., np.pi / 4., np.pi / 2.]) * u.radian
q2 = np.arccos(np.cos(q1)).to(q1.unit)
assert_allclose(q1.value, q2.value)
def test_cos_invalid_units(self):
with pytest.raises(TypeError) as exc:
np.cos(3. * u.s)
assert exc.value.args[0] == ("Can only apply 'cos' function "
"to quantities with angle units")
def test_arccos_invalid_units(self):
with pytest.raises(TypeError) as exc:
np.arccos(3. * u.s)
assert exc.value.args[0] == ("Can only apply 'arccos' function to "
"dimensionless quantities")
def test_tan_scalar(self):
q = np.tan(np.pi / 3. * u.radian)
assert q.unit == u.dimensionless_unscaled
assert_allclose(q.value, np.sqrt(3.))
def test_tan_array(self):
q = np.tan( | np.array([0., 45., 135., 180.]) | numpy.array |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.