prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
#!/usr/bin/env python
import AcqirisWrapper as Aq
import InstrumentDriver
from InstrumentConfig import InstrumentQuantity
import numpy as np
# for long integer py2/py3 compatibility
from builtins import int
class Error(Exception):
pass
class Driver(InstrumentDriver.InstrumentWorker):
""" This class implements the Acqiris card driver"""
def performOpen(self, options={}):
"""Perform the operation of opening the instrument connection"""
# init object
self.dig = None
self.timeout = self.dComCfg['Timeout']
# keep track of sampled traces, elements are I, Q, signal, single shot
self.lTrace = [np.array([]), np.array([]), 0.0, np.array([], dtype=complex)]
self.lSignalNames = ['Ch1 - Data', 'Ch2 - Data', 'Signal', 'Signal - Single shot']
self.dt = 1.0
try:
# open connection
self.dig = Aq.AcqirisDigitizer()
self.dig.init(self.comCfg.address, True, True)
except Exception as e:
# re-cast afdigitizer errors as a generic communication error
msg = str(e)
raise InstrumentDriver.CommunicationError(msg)
def performClose(self, bError=False, options={}):
"""Perform the close instrument connection operation"""
# check if digitizer object exists
try:
if self.dig is None:
# do nothing, object doesn't exist (probably was never opened)
return
except:
# never return error here, do nothing, object doesn't exist
return
try:
# close and remove object
self.dig.close()
self.dig.closeAll()
del self.dig
except:
# never return error here
pass
def performSetValue(self, quant, value, sweepRate=0.0, options={}):
"""Perform the Set Value instrument operation. This function should
return the actual value set by the instrument"""
# start with setting current quant value
quant.setValue(value)
# get values from relevant quants
if quant.name == 'Acquisition type':
mode = int(quant.getCmdStringFromValue(value))
self.dig.configMode(mode)
# update # of samples parameter, since it may change when averaging
self.readValueFromOther('Number of samples')
elif quant.name in ('Number of samples', 'Number of segments'):
# first, single trace cfg, get values from relevant quants and set all
nSample = int(self.getValue('Number of samples'))
nSegment = int(self.getValue('Number of segments'))
self.dig.configMemory(nSample, nSegment)
# set averager settings
if quant.name == 'Number of samples':
self.dig.configAvgConfig(1, 'NbrSamples', int(value))
self.dig.configAvgConfig(2, 'NbrSamples', int(value))
elif quant.name == 'Number of segments':
self.dig.configAvgConfig(1, 'NbrSegments', int(value))
self.dig.configAvgConfig(2, 'NbrSegments', int(value))
elif quant.name == 'Number of averages':
self.dig.configAvgConfig(1, 'NbrWaveforms', int(value))
self.dig.configAvgConfig(2, 'NbrWaveforms', int(value))
elif quant.name in ('Sample interval', 'Delay time'):
sampInterval = self.getValue('Sample interval')
delayTime = self.getValue('Delay time')
# set single trace or sample interval
self.dig.configHorizontal(sampInterval, delayTime)
if quant.name == 'Delay time':
# for averaging mode, set delay in data points
self.dig.configAvgConfig(1, 'StartDelay', int(value/sampInterval))
self.dig.configAvgConfig(2, 'StartDelay', int(value/sampInterval))
elif quant.name in ('Trig source', 'Trig coupling', 'Trig slope', 'Trig level'):
# get values from relevant quants and set all
trigSource = int(self.getCmdStringFromValue('Trig source'))
trigCoupling = int(self.getCmdStringFromValue('Trig coupling'))
trigSlope = int(self.getCmdStringFromValue('Trig slope'))
trigLevel = self.getValue('Trig level')
# trig level is in percentage if trig is Ch1/Ch2, convert to voltage
if trigSource == 1:
fullRange = float(self.getCmdStringFromValue('Ch1 - Range'))
offset = float(self.getValue('Ch1 - Offset'))
trigLevel = 100*(0.5 - (offset + fullRange/2.0 - trigLevel)/fullRange)
elif trigSource == 2:
fullRange = float(self.getCmdStringFromValue('Ch2 - Range'))
offset = float(self.getValue('Ch2 - Offset'))
trigLevel = 100*(0.5 - (offset + fullRange/2.0 - trigLevel)/fullRange)
else:
# trig level is in millivolt
trigLevel = trigLevel*1000.0
self.dig.configTrigSource(trigSource, trigCoupling, trigSlope, trigLevel,
trigLevel2=0.0)
# change active trigger if source was changed
if quant.name == 'Trig source':
dPattern = {1: int(0x00000001), 2: int(0x00000002), -1: int(0x80000000)}
self.dig.configTrigClass(dPattern[trigSource])
elif quant.name in ('10 MHz Reference'):
# get values from relevant quants and set all
clockType = int(self.getCmdStringFromValue('10 MHz Reference'))
self.dig.configExtClock(clockType)
elif quant.name == 'Ch1 - Enabled':
# do nothing for enabling/disabling
pass
elif quant.name in ('Ch1 - Coupling', 'Ch1 - Bandwidth', 'Ch1 - Range', 'Ch1 - Offset'):
# get values from relevant quants and set all
fullScale = float(self.getCmdStringFromValue('Ch1 - Range'))
offset = float(self.getValue('Ch1 - Offset'))
coupling = int(self.getCmdStringFromValue('Ch1 - Coupling'))
bandwidth = int(self.getCmdStringFromValue('Ch1 - Bandwidth'))
self.dig.configVertical(1, fullScale, -offset, coupling, bandwidth)
# re-set trigger level, if needed (to reflect new offset/range)
trigSource = int(self.getCmdStringFromValue('Trig source'))
if trigSource == 1:
trigLev = float(self.getValue('Trig level'))
self.sendValueToOther('Trig level', trigLev)
elif quant.name == 'Ch2 - Enabled':
# do nothing
pass
elif quant.name in ('Ch2 - Coupling', 'Ch2 - Bandwidth', 'Ch2 - Range', 'Ch2 - Offset'):
# get values from relevant quants and set all
fullScale = float(self.getCmdStringFromValue('Ch2 - Range'))
offset = float(self.getValue('Ch2 - Offset'))
coupling = int(self.getCmdStringFromValue('Ch2 - Coupling'))
bandwidth = int(self.getCmdStringFromValue('Ch2 - Bandwidth'))
self.dig.configVertical(2, fullScale, -offset, coupling, bandwidth)
# re-set trigger level, if needed (to reflect new offset/range)
trigSource = int(self.getCmdStringFromValue('Trig source'))
if trigSource == 2:
trigLev = float(self.getValue('Trig level'))
self.sendValueToOther('Trig level', trigLev)
elif quant.name in ('Modulation frequency', 'Skip start', 'Length',
'Use Ch2 as reference'):
# do nothing for these quantities, the value will be stored in local quant
pass
# finish set value with get value, to make sure we catch any coercing
return self.performGetValue(quant)
def performGetValue(self, quant, options={}):
"""Perform the Get Value instrument operation"""
aqType = self.getValue('Acquisition type')
if quant.name == 'Acquisition type':
value = quant.getValueFromCmdString(str(self.dig.getMode()[0]))
elif quant.name == 'Number of samples':
if aqType == 'Normal':
value = float(self.dig.getMemory()[0])
else:
value = float(self.dig.getAvgConfig(1, 'NbrSamples'))
elif quant.name == 'Number of segments':
if aqType == 'Normal':
value = float(self.dig.getMemory()[1])
else:
value = float(self.dig.getAvgConfig(1, 'NbrSegments'))
elif quant.name == 'Number of averages':
value = float(self.dig.getAvgConfig(1, 'NbrWaveforms'))
elif quant.name == 'Sample interval':
value = float(self.dig.getHorizontal()[0])
elif quant.name == 'Delay time':
if aqType == 'Normal':
value = float(self.dig.getHorizontal()[1])
else:
# convert from delay in points to delay in time
sampInterval = self.getValue('Sample interval')
value = sampInterval * self.dig.getAvgConfig(1, 'StartDelay')
elif quant.name == 'Trig source':
pattern = abs(self.dig.getTrigClass()[0])
dPattern = {int(0x00000001): 1, int(0x00000002): 2, int(0x80000000): -1}
value = quant.getValueFromCmdString(str(dPattern[pattern]))
elif quant.name == 'Trig coupling':
# get from current trig source
trigSource = int(self.getCmdStringFromValue('Trig source'))
value = quant.getValueFromCmdString( \
str(self.dig.getTrigSource(trigSource)[0]))
elif quant.name == 'Trig slope':
# get from current trig source
trigSource = int(self.getCmdStringFromValue('Trig source'))
value = quant.getValueFromCmdString( \
str(self.dig.getTrigSource(trigSource)[1]))
elif quant.name == 'Trig level':
# get from current trig source
trigSource = int(self.getCmdStringFromValue('Trig source'))
trigLevel = self.dig.getTrigSource(trigSource)[2]
# if Ch1/Ch2, trig level is percentage of full range
if trigSource == 1:
fullRange = float(self.getCmdStringFromValue('Ch1 - Range'))
offset = float(self.getValue('Ch1 - Offset'))
value = offset + fullRange*trigLevel/100.0
elif trigSource == 2:
fullRange = float(self.getCmdStringFromValue('Ch2 - Range'))
offset = float(self.getValue('Ch2 - Offset'))
value = offset + fullRange*trigLevel/100.0
else:
# trig level is in millivolt
value = trigLevel/1000.0
elif quant.name in ('10 MHz Reference'):
# get values from relevant quants and set all
value = quant.getValueFromCmdString(str(self.dig.getExtClock()[0]))
elif quant.name == 'Ch1 - Enabled':
# do nothing for enabling/disabling
value = quant.getValue()
elif quant.name == 'Ch1 - Coupling':
value = quant.getValueFromCmdString(str(self.dig.getVertical(1)[2]))
elif quant.name == 'Ch1 - Bandwidth':
value = quant.getValueFromCmdString(str(self.dig.getVertical(1)[3]))
elif quant.name == 'Ch1 - Range':
value = quant.getValueFromCmdString('%.2f' % self.dig.getVertical(1)[0])
elif quant.name == 'Ch1 - Offset':
value = - self.dig.getVertical(1)[1]
elif quant.name == 'Ch2 - Enabled':
# do nothing
value = quant.getValue()
elif quant.name == 'Ch2 - Coupling':
value = quant.getValueFromCmdString(str(self.dig.getVertical(2)[2]))
elif quant.name == 'Ch2 - Bandwidth':
value = quant.getValueFromCmdString(str(self.dig.getVertical(2)[3]))
elif quant.name == 'Ch2 - Range':
value = quant.getValueFromCmdString('%.2f' % self.dig.getVertical(2)[0])
elif quant.name == 'Ch2 - Offset':
value = - self.dig.getVertical(2)[1]
# signals
elif quant.name in self.lSignalNames:
# special case for hardware looping
if self.isHardwareLoop(options):
value = self.getSignalHardwareLoop(quant, options)
else:
# no hardware loop, just get traces if first call
if self.isFirstCall(options):
self.getTraces(bArm=not self.isHardwareTrig(options))
# return correct data
indx = self.lSignalNames.index(quant.name)
if quant.name in ('Ch1 - Data', 'Ch2 - Data'):
value = InstrumentQuantity.getTraceDict(self.lTrace[indx], dt=self.dt)
else:
value = self.lTrace[indx]
elif quant.name in ('Modulation frequency', 'Skip start', 'Length',
'Use Ch2 as reference', 'Enable demodulation'):
# just return the quantity value
value = quant.getValue()
return value
def _callbackProgress(self, progress):
"""Report progress to server, as text string"""
s = 'Acquiring traces (%.0f%%)' % (100*progress)
self.reportStatus(s)
def performArm(self, quant_names, options={}):
"""Perform the instrument arm operation"""
# make sure we are arming for reading traces, if not return
signal_names = ['Ch%d - Data' % (n + 1) for n in range(2)]
signal_arm = [name in signal_names for name in quant_names]
if not np.any(signal_arm):
return
# start acquisition
if self.isHardwareLoop(options):
(seq_no, n_seq) = self.getHardwareLoopIndex(options)
nSample = int(self.getValue('Number of samples'))
nAverage = int(self.getValue('Number of averages'))
self.dig.getRoundRobinData(nSample, n_seq, nAverage,
bConfig=True, bArm=True, bMeasure=False)
else:
self.getTraces(bArm=True, bMeasure=False)
def getSignalHardwareLoop(self, quant, options):
"""Get data from round-robin type averaging"""
(seq_no, n_seq) = self.getHardwareLoopIndex(options)
# if first sequence call, get data
if seq_no == 0 and self.isFirstCall(options):
nSample = int(self.getValue('Number of samples'))
nAverage = int(self.getValue('Number of averages'))
bDemodulation = bool(self.getValue('Enable demodulation'))
self.lTrace = [np.array([]), | np.array([]) | numpy.array |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import scipy.optimize
# In[2]:
# #now fit Tc data and plot
isingBeta=0.326
isingAlpha=0.110
def getTau(phi,a,b,s,phiC,tC):
booleanCondition=(phi<=phiC)
tau=np.zeros(phi.shape[0])
for i in range(phi.shape[0]):
tau[i]=scipy.optimize.fsolve(lambda tau: phiC+((-1)**booleanCondition[i])*a*tau**isingBeta+s*b*tau**(1-isingAlpha)-phi[i],0.00001)
t=tC*(1-tau)
return t
def fitTauFunction(data,sC,phiC,tC):
popt,_=scipy.optimize.curve_fit(lambda phi,a,b:getTau(phi,a,b,sC,phiC,tC),data[:,0],data[:,1],[1,1])
a,b=popt[0],popt[1]
return a,b
# In[3]:
#when you have replicates, average over them
def criticalPointAveraging(tcFilename):
betaCList=[]
phiCList=[]
muCList=[]
sCList=[]
for repNo in range(tcReps):
thisBetaC,thisSC,thisPhiC,thisMuC=np.load(tcFilename+'_rep'+str(repNo)+'.npy')
betaCList.append(thisBetaC)
phiCList.append(thisPhiC)
muCList.append(thisMuC)
sCList.append(thisSC)
betaC=np.mean(betaCList)
phiC=np.mean(phiCList)
muC= | np.mean(muCList) | numpy.mean |
"""
Implementation of DDPG - Deep Deterministic Policy Gradient
Algorithm and hyperparameter details can be found here:
http://arxiv.org/pdf/1509.02971v2.pdf
The algorithm is tested on the Pendulum-v0 OpenAI gym task
and developed with tflearn + Tensorflow
Author: <NAME>
improved upon the original code by <NAME>
"""
import tensorflow as tf
import numpy as np
import gym
from gym import wrappers
import tflearn
import argparse
import pprint as pp
import os
import logz
from replay_buffer import ReplayBuffer
# ===========================
# Actor and Critic DNNs
# ===========================
class ActorNetwork(object):
"""
Input to the network is the state, output is the action
under a deterministic policy.
The output layer activation is a tanh to keep the action
between -action_bound and action_bound
"""
def __init__(self, sess, state_dim, action_dim, action_bound, learning_rate, tau, batch_size):
self.sess = sess
self.s_dim = state_dim
self.a_dim = action_dim
self.action_bound = action_bound
self.learning_rate = learning_rate
self.tau = tau
self.batch_size = batch_size
# Actor Network
self.inputs, self.out, self.scaled_out = self.create_actor_network()
self.network_params = tf.trainable_variables()
self.saver = tf.train.Saver(self.network_params, max_to_keep=1)
# Target Network
self.target_inputs, self.target_out, self.target_scaled_out = self.create_actor_network()
self.target_network_params = tf.trainable_variables()[
len(self.network_params):]
# Op for periodically updating target network with online network
# weights
self.update_target_network_params = \
[self.target_network_params[i].assign(tf.multiply(self.network_params[i], self.tau) +
tf.multiply(self.target_network_params[i], 1. - self.tau))
for i in range(len(self.target_network_params))]
# This gradient will be provided by the critic network
self.action_gradient = tf.placeholder(tf.float32, [None, self.a_dim])
# Combine the gradients here
# negation of action gradients so that we can use maximize the Q-value of the critic network (Gradient ascent)
self.unnormalized_actor_gradients = tf.gradients(
self.scaled_out, self.network_params, -self.action_gradient)
self.actor_gradients = list(map(lambda x: tf.div(x, self.batch_size), self.unnormalized_actor_gradients))
# Optimization Op
self.optimize = tf.train.AdamOptimizer(self.learning_rate). \
apply_gradients(zip(self.actor_gradients, self.network_params))
self.num_trainable_vars = len(
self.network_params) + len(self.target_network_params)
def create_actor_network(self):
inputs = tflearn.input_data(shape=[None, self.s_dim])
net = tflearn.fully_connected(inputs, 400)
net = tflearn.layers.normalization.batch_normalization(net)
net = tflearn.activations.relu(net)
net = tflearn.fully_connected(net, 300)
net = tflearn.layers.normalization.batch_normalization(net)
net = tflearn.activations.relu(net)
# Final layer weights are init to Uniform[-3e-3, 3e-3]
w_init = tflearn.initializations.uniform(minval=-0.003, maxval=0.003)
out = tflearn.fully_connected(
net, self.a_dim, activation='tanh', weights_init=w_init)
# Scale output to -action_bound to action_bound
scaled_out = tf.multiply(out, self.action_bound)
return inputs, out, scaled_out
def train(self, inputs, a_gradient):
self.sess.run(self.optimize, feed_dict={
self.inputs: inputs,
self.action_gradient: a_gradient
})
def predict(self, inputs):
return self.sess.run(self.scaled_out, feed_dict={
self.inputs: inputs
})
def predict_target(self, inputs):
return self.sess.run(self.target_scaled_out, feed_dict={
self.target_inputs: inputs
})
def update_target_network(self):
self.sess.run(self.update_target_network_params)
def get_num_trainable_vars(self):
return self.num_trainable_vars
class CriticNetwork(object):
"""
Input to the network is the state and action, output is Q(s,a).
The action must be obtained from the output of the Actor network.
"""
def __init__(self, sess, state_dim, action_dim, learning_rate, tau, gamma, num_actor_vars):
self.sess = sess
self.s_dim = state_dim
self.a_dim = action_dim
self.learning_rate = learning_rate
self.tau = tau
self.gamma = gamma
# Create the critic network
self.inputs, self.action, self.out = self.create_critic_network()
self.network_params = tf.trainable_variables()[num_actor_vars:]
self.saver = tf.train.Saver(self.network_params, max_to_keep=1)
# Target Network
self.target_inputs, self.target_action, self.target_out = self.create_critic_network()
self.target_network_params = tf.trainable_variables()[(len(self.network_params) + num_actor_vars):]
# Op for periodically updating target network with online network
# weights with regularization
self.update_target_network_params = \
[self.target_network_params[i].assign(tf.multiply(self.network_params[i], self.tau) \
+ tf.multiply(self.target_network_params[i], 1. - self.tau))
for i in range(len(self.target_network_params))]
# Network target (y_i)
self.predicted_q_value = tf.placeholder(tf.float32, [None, 1])
# Define loss and optimization Op
self.loss = tflearn.mean_square(self.predicted_q_value, self.out)
self.optimize = tf.train.AdamOptimizer(
self.learning_rate).minimize(self.loss)
# Get the gradient of the net w.r.t. the action.
# For each action in the minibatch (i.e., for each x in xs),
# this will sum up the gradients of each critic output in the minibatch
# w.r.t. that action. Each output is independent of all
# actions except for one.
self.action_grads = tf.gradients(self.out, self.action)
def create_critic_network(self):
inputs = tflearn.input_data(shape=[None, self.s_dim])
action = tflearn.input_data(shape=[None, self.a_dim])
net = tflearn.fully_connected(inputs, 400)
net = tflearn.layers.normalization.batch_normalization(net)
net = tflearn.activations.relu(net)
# Add the action tensor in the 2nd hidden layer
# Use two temp layers to get the corresponding weights and biases
t1 = tflearn.fully_connected(net, 300)
t2 = tflearn.fully_connected(action, 300)
net = tflearn.activation(
tf.matmul(net, t1.W) + tf.matmul(action, t2.W) + t2.b, activation='relu')
# linear layer connected to 1 output representing Q(s,a)
# Weights are init to Uniform[-3e-3, 3e-3]
w_init = tflearn.initializations.uniform(minval=-0.003, maxval=0.003)
out = tflearn.fully_connected(net, 1, weights_init=w_init)
return inputs, action, out
def train(self, inputs, action, predicted_q_value):
return self.sess.run([self.out, self.optimize], feed_dict={
self.inputs: inputs,
self.action: action,
self.predicted_q_value: predicted_q_value
})
def predict(self, inputs, action):
return self.sess.run(self.out, feed_dict={
self.inputs: inputs,
self.action: action
})
def predict_target(self, inputs, action):
return self.sess.run(self.target_out, feed_dict={
self.target_inputs: inputs,
self.target_action: action
})
def action_gradients(self, inputs, actions):
return self.sess.run(self.action_grads, feed_dict={
self.inputs: inputs,
self.action: actions
})
def update_target_network(self):
self.sess.run(self.update_target_network_params)
# Taken from https://github.com/openai/baselines/blob/master/baselines/ddpg/noise.py, which is
# based on http://math.stackexchange.com/questions/1287634/implementing-ornstein-uhlenbeck-in-matlab
class OrnsteinUhlenbeckActionNoise:
def __init__(self, mu, sigma=0.3, theta=.15, dt=1e-2, x0=None):
self.theta = theta
self.mu = mu
self.sigma = sigma
self.dt = dt
self.x0 = x0
self.reset()
def __call__(self):
x = self.x_prev + self.theta * (self.mu - self.x_prev) * self.dt + \
self.sigma * np.sqrt(self.dt) * np.random.normal(size=self.mu.shape)
self.x_prev = x
return x
def reset(self):
self.x_prev = self.x0 if self.x0 is not None else np.zeros_like(self.mu)
def __repr__(self):
return 'OrnsteinUhlenbeckActionNoise(mu={}, sigma={})'.format(self.mu, self.sigma)
# ===========================
# Tensorflow Summary Ops
# ===========================
def build_summaries():
episode_reward = tf.Variable(0.)
tf.summary.scalar("Reward", episode_reward)
episode_ave_max_q = tf.Variable(0.)
tf.summary.scalar("Qmax Value", episode_ave_max_q)
summary_vars = [episode_reward, episode_ave_max_q]
summary_ops = tf.summary.merge_all()
return summary_ops, summary_vars
# ===========================
# Agent Training
# ===========================
def test(sess, env, args, actor, critic):
checkpoint_actor_dir = os.path.join(os.curdir, 'Actor_InvertedPendulum')
if not os.path.exists(checkpoint_actor_dir):
os.makedirs(checkpoint_actor_dir)
ckpt_1 = tf.train.get_checkpoint_state(checkpoint_actor_dir)
checkpoint_critic_dir = os.path.join(os.curdir, 'Critic_InvertedPendulum')
if not os.path.exists(checkpoint_critic_dir):
os.makedirs(checkpoint_critic_dir)
ckpt_2 = tf.train.get_checkpoint_state(checkpoint_critic_dir)
if ckpt_1 and tf.train.checkpoint_exists(ckpt_1.model_checkpoint_path):
print("Reading actor parameters from %s" % ckpt_1.model_checkpoint_path)
actor.saver.restore(sess, ckpt_1.model_checkpoint_path)
if ckpt_2 and tf.train.checkpoint_exists(ckpt_2.model_checkpoint_path):
print("Reading critic parameters from %s" % ckpt_2.model_checkpoint_path)
critic.saver.restore(sess, ckpt_2.model_checkpoint_path)
uninitialized_vars = []
for var in tf.all_variables():
try:
sess.run(var)
except tf.errors.FailedPreconditionError:
uninitialized_vars.append(var)
if len(uninitialized_vars) > 0:
init_new_vars_op = tf.variables_initializer(uninitialized_vars)
sess.run(init_new_vars_op)
# Initialize target network weights
actor.update_target_network()
critic.update_target_network()
s = env.reset()
done = False
total_reward = 0
max_steps = env.spec.timestep_limit
step = 0
from PIL import Image
frames = []
while not done:
frames.append(Image.fromarray(env.render(mode='rgb_array')))
a = actor.predict( | np.reshape(s, (1, actor.s_dim)) | numpy.reshape |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import numpy as np
from urllib.parse import urlencode
from io import BytesIO
from astropy.utils.data import download_file
from astropy import units as u
from astropy.io import fits
from astropy.coordinates import ICRS, Galactic, BaseCoordinateFrame
from astropy.coordinates import SkyCoord, Angle, Longitude, Latitude
from astropy import wcs
import cdshealpix
try:
from astropy_healpix import HEALPix
except ImportError:
pass
from ..abstract_moc import AbstractMOC
from ..interval_set import IntervalSet
from .. import mocpy
from .boundaries import Boundaries
from .plot import fill, border
__author__ = "<NAME>, <NAME>, <NAME>"
__copyright__ = "CDS, Centre de Données astronomiques de Strasbourg"
__license__ = "BSD 3-Clause License"
__email__ = "<EMAIL>, <EMAIL>, <EMAIL>"
class MOC(AbstractMOC):
"""
Multi-order spatial coverage class.
A MOC describes the coverage of an arbitrary region on the unit sphere.
MOCs are usually used for describing the global coverage of catalog/image surveys such as GALEX or SDSS.
A MOC corresponds to a list of `HEALPix <https://healpix.sourceforge.io/>`__ cells at different depths.
This class gives you the possibility to:
1. Define `~mocpy.moc.MOC` objects:
- From a FITS file that stores HEALPix cells (see `load(path, 'fits')`).
- Directly from a list of HEALPix cells expressed either as a numpy structural array (see `from_healpix_cells`) or a simple
python dictionnary (see `from_json`).
- From a list of sky coordinates (see `from_skycoords`, `from_lonlat`).
- From a convex/concave polygon (see `from_polygon`).
- From a cone (will be implemented in a next version).
2. Perform fast logical operations between `~mocpy.moc.MOC` objects:
- The `intersection`
- The `union`
- The `difference`
- The `complement`
3. Plot the `~mocpy.moc.MOC` objects:
- Draw the MOC with its HEALPix cells (see `fill`)
- Draw the perimeter of a MOC (see `border`)
4. Get the sky coordinates defining the border(s) of `~mocpy.moc.MOC` objects (see `get_boundaries`).
5. Serialize `~mocpy.moc.MOC` objects to `astropy.io.fits.HDUList` or JSON dictionary and save it to a file.
"""
# I introduced, but do not like, the double `make_consistent` (MOC + IntervalSet)
# but `coverage_merge_time_intervals` is no more genric
# and I can't remove `make_consistent` from `IntervalSet` without changing tests
def __init__(self, interval_set=None, make_consistent=True, min_depth=None):
"""
Moc constructor.
The merging step of the overlapping intervals is done here.
Parameters
----------
intervals : `~numpy.ndarray`
a N x 2 numpy array representing the set of intervals.
make_consistent : bool, optional
True by default. Remove the overlapping intervals that makes
a valid MOC (i.e. can be plot, serialized, manipulated).
"""
super(MOC, self).__init__(interval_set)
if make_consistent:
if min_depth is None:
min_depth = -1
min_depth = np.int8(min_depth)
self._merge_intervals(min_depth)
def _merge_intervals(self, min_depth):
if not self.empty():
self._interval_set._intervals = mocpy.coverage_merge_hpx_intervals(self._interval_set._intervals, min_depth)
@property
def max_order(self):
"""
Depth of the smallest HEALPix cells found in the MOC instance.
"""
depth = mocpy.hpx_coverage_depth(self._interval_set._intervals)
depth = | np.uint8(depth) | numpy.uint8 |
import os
import sys
import traceback
import numpy as np
from scipy import signal
import pandas as pd
from dozen import z3d_directory, z3dio, timeio, process
# Inputs and settings
# survey campaign
rx_file = '../data/preprocess/campaign_rx.csv'
tx_file = '../data/preprocess/campaign_tx.csv'
overlaps_file = '../data/preprocess/overlaps.csv'
# calibration files
cal_dir = '../data/calibrations/'
antcal_file = '../data/calibrations/antenna.cal'
# Results file
results_file = 'DoZen.avg'
# Save odd harmonic fourier coefficients for every window for every time series?
save_coefficients = True
coefficients_dir = 'coeff'
os.mkdir(coefficients_dir)
# mag orientations
# subset data offsets
start_offset = 2
end_offset = 0
# filtering settings
pad_before = 34
pad_after = 50
notch_bandwidth = 0.2
# short time Fourier transform settings
# window_shape = ('kaiser',3*np.pi)
# window_shape = ('tukey',1./3.)
# window_shape = 'tukey_ten_sample_taper'
window_shape = ('hann')
# window_shape = 'boxcar'
cycles_per_window = 2 # cycle defined as one transmitter waveform
strides_per_cycle = 0.25
# Read z3d directory info
print('Reading directory info...')
rx = pd.read_csv(rx_file,index_col=0)
tx = pd.read_csv(tx_file,index_col=0)
# get start and end as dates
for i_row in rx.index:
file_info = z3dio.get_file_info(rx.loc[i_row,'fullpath'])
if file_info['num_records']==0:
rx.at[i_row,'start'] = -1
rx.at[i_row,'end']= -1
rx.at[i_row,'valid']=False
else:
[start,end] = timeio.get_start_and_end_times_mountain(file_info)
rx.at[i_row,'start'] = pd.Timestamp(start).tz_convert('US/Mountain')
rx.at[i_row,'end']= pd.Timestamp(end).tz_convert('US/Mountain')
for i_row in tx.index:
file_info = z3dio.get_file_info(tx.loc[i_row,'fullpath'])
if file_info['num_records']==0:
tx.at[i_row,'start'] = -1
tx.at[i_row,'end']= -1
tx.at[i_row,'valid']=False
else:
[start,end] = timeio.get_start_and_end_times_mountain(file_info)
tx.at[i_row,'start'] = pd.Timestamp(start).tz_convert('US/Mountain')
tx.at[i_row,'end']= pd.Timestamp(end).tz_convert('US/Mountain')
# Fix errors in station numbering, invalid/duplicate files
# check for duplicate files
# remove duplicates, keeping only the first
tx.drop_duplicates(subset='filename',keep='first',inplace=True)
rx.drop_duplicates(subset='filename',keep='first',inplace=True)
# drop tx Ex files
tx.drop(tx[tx.type=='RX'].index,inplace=True)
# drop tx 256 hz files
tx.drop(tx[tx.sampling_rate==256].index,inplace=True)
# drop invalid files
rx.drop(rx[~rx.valid].index,inplace=True)
tx.drop(tx[~tx.valid].index,inplace=True)
# drop aborted tx files (fewer than 30 seconds)
tx.drop(tx[tx.num_records<30].index,inplace=True)
# drop unassigned stations
rx.dropna(subset=['rx_station_qc'],inplace=True)
# TODO: drop bad tx files (user ID'd?)
# find TX-RX overlaps
print('Finding overlaps...')
overlaps = z3d_directory.find_overlaps(tx,rx,overlaps_csv=overlaps_file)
# trim one bad TX signal: 2019-07-24, 11:41:30, 0.5 Hz
# clip_time_209 = pd.Timestamp('2019-07-24 11:45:20').tz_localize('US/Mountain')
# overlaps.loc[overlaps.tx_ind==209,'end'] = clip_time_209
# Read calibration files
# cal_head = z3dio.read_syscal_header(cal_file)
print('Reading calibration files...')
cals = z3d_directory.read_zen_cals(cal_dir,ask_dir=False)
antcal = z3dio.read_antcal(antcal_file)
# store settings to be accessed in get_calibrated_fc
settings = {}
settings['notch_bandwidth'] = notch_bandwidth
settings['cycles_per_window'] = cycles_per_window
settings['strides_per_cycle'] = strides_per_cycle
settings['window_shape'] = window_shape
settings['cals'] = cals
settings['antcal'] = antcal
def get_calibrated_fc(overlap_data,transmitter_frequency,tx_mask,
sampling_rate,component,box_number,card_number,
antenna_number,settings):
'''
return calibrated Fourier coefficients, useful for computing transfer functions
'settings' is a dictionary with these keywords:
notch_bandwidth
cycles_per_window
strides_per_cycle
window_shape
cals
antcal
settings holds variables that don't change from overlap to overlap
'''
samples_per_cycle = int(round(sampling_rate/transmitter_frequency))
# filter 60 Hz noise
print('Filtering...')
if transmitter_frequency > 16:
# notch filter
f0 = 60.0
bw = settings['notch_bandwidth'] # -3 dB bandwidth
Q = f0/bw
w0 = f0*2./sampling_rate
numerator,denominator = signal.iirnotch(w0,Q)
filt_data = signal.filtfilt(numerator,denominator,overlap_data)
else:
# use mean running period
filt_data = process.waveform_average_filter(
overlap_data,sampling_rate=sampling_rate,mask=tx_mask)
# compute robust running mean
print('Applying drift correction...')
taur = process.robust_running_mean(filt_data,samples_per_cycle)
# apply drift correction
drift = filt_data-taur
# compute short time Fourier transform
print('Computing Fourier coefficients...')
samples_per_window = int(round(samples_per_cycle*settings['cycles_per_window']))
stride=int(round(samples_per_cycle/settings['strides_per_cycle']))
if settings['window_shape'] == 'tukey_ten_sample_taper':
samples_per_window += 10
ten_sample_taper = 20./samples_per_window
window_shape = ('tukey',ten_sample_taper)
else:
window_shape = settings['window_shape']
# stft_str = '{} sampling_rate, {} nperseg, {} noverlap'
# print(stft_str.format(sampling_rate,samples_per_window,samples_per_window-stride))
f,t,fc = signal.stft(drift,sampling_rate,window=window_shape,nperseg=samples_per_window,
noverlap=samples_per_window-stride,boundary=None,padded=False)
num_windows = len(t)
num_freq = len(f)
# apply calibrations
print('Applying calibrations...')
try:
zen_cal = settings['cals'].loc[box_number]
fccb = process.apply_board_cal(fc,f,zen_cal,card_number,sampling_rate)
except KeyError:
print('Zen {} board calibration not found'.format(box_number))
fccb = fc
if component[0] == 'H':
fcc = process.apply_antenna_cal(fccb,f,settings['antcal'],antenna_number)
else:
fcc=fccb
return (f,t,fcc)
# initialize results arrays
n_pairs = overlaps.shape[0]
processed = [False]*n_pairs
tx_stations = np.empty(n_pairs)
rx_stations = np.empty(n_pairs)
rx_runs = ['']*n_pairs
rx_components = ['']*n_pairs
tx_frequencies = np.empty(n_pairs)
sampling_rates = np.empty(n_pairs)
num_sampless = np.empty(n_pairs)
signal_durations = np.empty(n_pairs)
samples_per_cycles = np.empty(n_pairs)
num_cycless = np.empty(n_pairs)
tx_filenames = ['']*n_pairs
rx_filenames = ['']*n_pairs
dx1s = np.empty(n_pairs)
dy1s = np.empty(n_pairs)
dz1s = np.empty(n_pairs)
dx2s = np.empty(n_pairs)
dy2s = np.empty(n_pairs)
dz2s = np.empty(n_pairs)
azimuths = | np.empty(n_pairs) | numpy.empty |
import torch
import copy
import sys
import numpy as np
from utils import one_hot_encode, capsnet_testing_loss
from torch.autograd import Variable
from torch.backends import cudnn
from quantization_methods import *
from quantized_models import *
def quantized_test(model, num_classes, data_loader, quantization_function, quantization_bits,
quantization_bits_routing):
""" Function to test the accuracy of the quantized models
Args:
model: pytorch model
num_classes: number ot classes of the dataset
data_loader: data loader of the test dataset
quantization_function: quantization function of the quantization method to use
quantization_bits: list, quantization bits for the activations
quantization_bits_routing: list, quantization bits for the dynamic routing
Returns:
accuracy_percentage: accuracy of the quantized model expressed in percentage """
# Switch to evaluate mode
model.eval()
loss = 0
correct = 0
num_batches = len(data_loader)
for data, target in data_loader:
batch_size = data.size(0)
target_one_hot = one_hot_encode(target, length=num_classes)
if torch.cuda.device_count() > 0: # if there are available GPUs, move data to the first visible
device = torch.device("cuda:0")
data = data.to(device)
target = target.to(device)
target_one_hot = target_one_hot.to(device)
# Output predictions
output = model(data, quantization_function, quantization_bits, quantization_bits_routing)
# Sum up batch loss
m_loss = \
capsnet_testing_loss(output, target_one_hot)
loss += m_loss.data
# Count number of correct predictions
# Compute the norm of the vector capsules
v_length = torch.sqrt((output ** 2).sum(dim=2))
assert v_length.size() == torch.Size([batch_size, num_classes])
# Find the index of the longest vector
_, max_index = v_length.max(dim=1)
assert max_index.size() == torch.Size([batch_size])
# vector with 1 where the model makes a correct prediction, 0 where false
correct_pred = torch.eq(target.cpu(), max_index.data.cpu())
correct += correct_pred.sum()
# Log test accuracies
num_test_data = len(data_loader.dataset)
accuracy_percentage = float(correct) * 100.0 / float(num_test_data)
return accuracy_percentage
def qcapsnets(model, model_parameters, full_precision_filename, num_classes, data_loader, top_accuracy,
accuracy_tolerance, memory_budget, quantization_scheme):
""" Q-CapsNets framework - Quantization
Args:
model: string, name of the model
model_parameters: list, parameters to use for the instantiation of the model class
full_precision_filename: string, directory of the full-precision weights
num_classes: number of classes of the dataset
data_loader: data loader of the testing dataset
top_accuracy : maximum accuracy reached by the full_precision trained model (percentage)
accuracy_tolerance: tolerance of the quantized model accuracy with respect to the full precision accuracy.
Provided in percentage
memory_budget: memory budget for the weights of the model. Provided in MB (MegaBytes)
quantization_scheme: quantization scheme to be used by the framework (string, e.g., "truncation)"
Returns:
void
"""
print("==> Q-CapsNets Framework")
# instantiate the quantized model with the full-precision weights
model_quant_class = getattr(sys.modules[__name__], model)
model_quant_original = model_quant_class(*model_parameters)
model_quant_original.load_state_dict(torch.load(full_precision_filename))
# Move the model to GPU if available
if torch.cuda.device_count() > 0:
device = torch.device("cuda:0")
model_quant_original.to(device)
cudnn.benchmark = True
# create the quantization functions
possible_functions = globals().copy()
possible_functions.update(locals())
quantization_function_activations = possible_functions.get(quantization_scheme)
if not quantization_function_activations:
raise NotImplementedError("Quantization function %s not implemented" % quantization_scheme)
quantization_function_weights = possible_functions.get(quantization_scheme + "_inplace")
if not quantization_function_weights:
raise NotImplementedError("Quantization function %s not implemented (inplace version)" % quantization_scheme)
# compute the accuracy reduction available for each step
minimum_accuracy = top_accuracy - accuracy_tolerance / 100 * top_accuracy
acc_reduction = top_accuracy - minimum_accuracy
step1_reduction = 5 / 100 * acc_reduction
step1_min_acc = top_accuracy - step1_reduction
print("Full-precision accuracy: ", top_accuracy, "%")
print("Minimum quantized accuracy: ", minimum_accuracy, "%")
print("Memory budget: ", memory_budget, "MB")
print("Quantization method: ", quantization_scheme)
print("\n")
# STEP 1: Layer-Uniform quantization of weights and activations
print("STEP 1")
def step1_quantization_test(quantization_bits):
""" Function to test the model at STEP 1 of the algorithm
The function receives a single "quantization_bits" value N, and creates two lists [N, N, ..., N] and
[N, N, ..., N] for the activations and the dynamic routing, since at STEP 1 all the layers are quantized
uniformly. The weights of each layer are quantized with N bits too and then the accuracy of the model
is computed.
Args:
quantization_bits: single value used for quantizing all the weights and activations
Returns:
acc_temp: accuracy of the model quantized uniformly with quantization_bits bits
"""
quantized_model_temp = copy.deepcopy(model_quant_original)
step1_act_bits_f = [] # list with the quantization bits for the activations
step1_dr_bits_f = [] # list with the quantization bits for the dynamic routing
for c in quantized_model_temp.children():
step1_act_bits_f.append(quantization_bits)
if c.capsule_layer:
if c.dynamic_routing:
step1_dr_bits_f.append(quantization_bits)
for p in c.parameters():
with torch.no_grad():
quantization_function_weights(p, quantization_bits) # Quantize the weights
# test with quantized weights and activations
acc_temp = quantized_test(quantized_model_temp, num_classes, data_loader,
quantization_function_activations, step1_act_bits_f, step1_dr_bits_f)
del quantized_model_temp
return acc_temp
# BINARY SEARCH of the bitwidth for step 1, starting from 32 bits
step1_bit_search = [32]
step1_acc_list = [] # list of accuracy at each step of the search algorithm
step1_acc = step1_quantization_test(32)
step1_acc_list.append(step1_acc)
if step1_acc > step1_min_acc:
step1_bit_search_sat = [True] # True is the accuracy is higher than the minimum required
step1_bit_search.append(16)
while True:
step1_acc = step1_quantization_test(step1_bit_search[-1])
step1_acc_list.append(step1_acc)
if step1_acc > step1_min_acc:
step1_bit_search_sat.append(True)
else:
step1_bit_search_sat.append(False)
if (abs(step1_bit_search[-1] - step1_bit_search[-2])) == 1:
step1_bit_search_sat.reverse()
step1_bits = step1_bit_search[
len(step1_bit_search_sat) - 1 - next(k for k, val in enumerate(step1_bit_search_sat) if val)]
step1_bit_search_sat.reverse()
step1_acc = step1_acc_list[
len(step1_bit_search_sat) - 1 - next(k for k, val in enumerate(step1_bit_search_sat) if val)]
break
else:
if step1_acc > step1_min_acc:
step1_bit_search.append(
int(step1_bit_search[-1] - abs(step1_bit_search[-1] - step1_bit_search[-2]) / 2))
else:
step1_bit_search.append(
int(step1_bit_search[-1] + abs(step1_bit_search[-1] - step1_bit_search[-2]) / 2))
else:
step1_bits = 32
step1_acc = step1_acc_list[1]
# Create the lists of bits ofSTEP 1
step1_act_bits = []
step1_dr_bits = []
step1_weight_bits = []
for c in model_quant_original.children():
step1_act_bits.append(step1_bits)
step1_weight_bits.append(step1_bits)
if c.capsule_layer:
if c.dynamic_routing:
step1_dr_bits.append(step1_bits)
print("STEP 1 output: ")
print("\t Weight bits: \t\t", step1_weight_bits)
print("\t Activation bits: \t\t", step1_act_bits)
print("\t Dynamic Routing bits: \t\t", step1_dr_bits)
print("STEP 1 accuracy: ", step1_acc)
print("\n")
# STEP2 - satisfy memory requirement
# compute the number of weights and biases of each layer/block
print("STEP 2")
number_of_weights_inlayers = []
for c in model_quant_original.children():
param_intra_layer = 0
for p in c.parameters():
param_intra_layer = param_intra_layer + p.numel()
number_of_weights_inlayers.append(param_intra_layer)
number_of_blocks = len(number_of_weights_inlayers)
memory_budget_bits = memory_budget * 8000000 # From MB to bits
minimum_mem_required = np.sum(number_of_weights_inlayers)
if memory_budget_bits < minimum_mem_required:
raise ValueError("The memory budget can not be satisfied, increase it to",
minimum_mem_required / 8000000, " MB at least")
# Compute the number of bits that satisfy the memory budget.
# First try with [N, N-1, N-2, N-3, N-4, N-4, ...].
# If it is not possible, try with [N, N-1, N-2, N-3, N-3, ...]
# and so on until [N, N, N, N, ...] (number of bits uniform across the layers)
decrease_amount = 5
while decrease_amount >= 0:
bit_decrease = []
if number_of_blocks <= decrease_amount:
i = 0
for r in range(0, number_of_blocks):
bit_decrease.append(i)
i = i - 1
else:
i = 0
for r in range(0, decrease_amount):
bit_decrease.append(i)
i = i - 1
for r in range(decrease_amount, number_of_blocks):
bit_decrease.append(i + 1)
bits_memory_sat = 33
while True:
# decrease N (bits_memory_sat) until the memory budget is satisfied.
bits_memory_sat = bits_memory_sat - 1
memory_occupied = np.sum(np.multiply(number_of_weights_inlayers, np.add(bits_memory_sat + 1, bit_decrease)))
# +1 because bits_memory_sat are the fractional part bits, but we need one for the integer part
if memory_occupied <= memory_budget_bits:
break
step2_weight_bits = list(np.add(bits_memory_sat, bit_decrease))
if step2_weight_bits[-1] >= 0:
break
else:
decrease_amount = decrease_amount - 1
# lists of bitwidths for activations and dynamic routing at STEP 1
step2_act_bits = copy.deepcopy(step1_act_bits)
step2_dr_bits = copy.deepcopy(step1_dr_bits)
# Quantizeed the weights
model_memory = copy.deepcopy(model_quant_original)
for i, c in enumerate(model_memory.children()):
for p in c.parameters():
with torch.no_grad():
quantization_function_weights(p, step2_weight_bits[i])
step2_acc = quantized_test(model_memory, num_classes, data_loader,
quantization_function_activations, step2_act_bits, step2_dr_bits)
print("STEP 2 output: ")
print("\t Weight bits: \t\t", step2_weight_bits)
print("\t Activation bits: \t\t", step2_act_bits)
print("\t Dynamic Routing bits: \t\t", step2_dr_bits)
print("STEP 2 accuracy: ", step2_acc)
print("\n")
# IF the step 2 accuracy is higher that the minimum required accuracy --> BRANCH A
if step2_acc > minimum_accuracy:
# What is the accuracy that can still be consumed?
branchA_accuracy_budget = step2_acc - minimum_accuracy
step3A_min_acc = step2_acc - branchA_accuracy_budget * 55 / 100
# STEP 3A - layer-wise quantization of activations
print("STEP 3A")
# get the position of the layers that use dynamic routing bits
dynamic_routing_bits_bool = []
for c in model_memory.children():
if c.capsule_layer:
if c.dynamic_routing:
dynamic_routing_bits_bool.append(True)
else:
dynamic_routing_bits_bool.append(False)
layers_dr_position = [pos for pos, val in enumerate(dynamic_routing_bits_bool) if val]
step3a_weight_bits = copy.deepcopy(step2_weight_bits)
step3a_act_bits = copy.deepcopy(step2_act_bits)
step3a_dr_bits = copy.deepcopy(step2_dr_bits)
for l in range(0, len(step3a_act_bits)):
while True:
step3a_acc = quantized_test(model_memory, num_classes, data_loader,
quantization_function_activations, step3a_act_bits, step3a_dr_bits)
if step3a_acc >= step3A_min_acc:
step3a_act_bits[l:] = list(np.add(step3a_act_bits[l:], -1))
for x in range(len(layers_dr_position)):
step3a_dr_bits[x] = step3a_act_bits[layers_dr_position[x]]
else:
step3a_act_bits[l:] = list(np.add(step3a_act_bits[l:], +1))
for x in range(len(layers_dr_position)):
step3a_dr_bits[x] = step3a_act_bits[layers_dr_position[x]]
break
step3a_acc = quantized_test(model_memory, num_classes, data_loader,
quantization_function_activations, step3a_act_bits, step3a_dr_bits)
print("STEP 3A output: ")
print("\t Weight bits: \t\t", step3a_weight_bits)
print("\t Activation bits: \t\t", step3a_act_bits)
print("\t Dynamic Routing bits: \t\t", step3a_dr_bits)
print("STEP 3A accuracy: ", step3a_acc)
print("\n")
# STEP 4A - layer-wise quantization of dynamic routing
print("STEP 4A")
step4a_weight_bits = copy.deepcopy(step2_weight_bits)
step4a_act_bits = copy.deepcopy(step3a_act_bits)
step4a_dr_bits = copy.deepcopy(step3a_dr_bits)
# need to variate only the bits of the layers in which the dynamic routing is actually performed
# (iterations > 1)
dynamic_routing_quantization = []
for c in model_memory.children():
if c.capsule_layer:
if c.dynamic_routing:
if c.dynamic_routing_quantization:
dynamic_routing_quantization.append(True)
else:
dynamic_routing_quantization.append(False)
dr_quantization_pos = [pos for pos, val in enumerate(dynamic_routing_quantization) if val]
# new set of bits only if dynamic routing is performed
dr_quantization_bits = [step4a_dr_bits[x] for x in dr_quantization_pos]
for l in range(0, len(dr_quantization_bits)):
while True:
step4a_acc = quantized_test(model_memory, num_classes, data_loader,
quantization_function_activations, step4a_act_bits, step4a_dr_bits)
if step4a_acc >= minimum_accuracy:
dr_quantization_bits[l:] = list(np.add(dr_quantization_bits[l:], -1))
# update the whole vector step4a_dr_bits
for x in range(0, len(dr_quantization_bits)):
step4a_dr_bits[dr_quantization_pos[x]] = dr_quantization_bits[x]
else:
dr_quantization_bits[l:] = list( | np.add(dr_quantization_bits[l:], +1) | numpy.add |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Python libraries
import numpy as np
import os
import logging
from time import time
from scipy.sparse import issparse
from sklearn.neighbors import radius_neighbors_graph
from collections import Counter, defaultdict
import igraph # "install -c conda-forge python-igraph"
import matplotlib.pyplot as plt
# import cupy.sparse
# import GPUtil
# memory_pool = cupy.cuda.MemoryPool()
# cupy.cuda.set_allocator(memory_pool.malloc)
# pinned_memory_pool = cupy.cuda.PinnedMemoryPool()
# cupy.cuda.set_pinned_memory_allocator(pinned_memory_pool.malloc)
# Local imports
from rdigraphs.sim_graph.th_ops import ThOps # base_ThOps as ThOps
EPS = np.finfo(float).tiny
def JSdist(p, q):
"""
Compute the Jensen-Shannon distance between probability distributions p
and q.
It assumes that both p and q are normalized and sum up to 1
Parameters
----------
p : numpy array
Probability distribution
q : numpy array
Probability distribution (with the same size as p)
Returns
-------
d : float
JS distance
"""
pe = p + EPS
qe = q + EPS
m = 0.5 * (pe + qe)
# I used entropy method in older versions, but it is much slower.
# D = 0.5 * (entropy(p, m, base=2) + entropy(q, m, base=2))
D = 0.5 * (np.sum(pe * np.log2(pe / m)) + np.sum(qe * np.log2(qe / m)))
return np.sqrt(D)
def JSdist_sp(p, q):
"""
Compute the Jensen-Shannon distance between probability distributions p
and q.
It assumes that both p and q are normalized and sum up to 1 p and q can
be sparse vectors (this is the main difference wrt JSdist())
Parameters
----------
p : numpy array or sparse vector
Probability distribution
q : numpy array or sparse vector
Probability distribution (with the same size as p)
Returns
-------
d : float
JS distance
"""
pi = p.toarray().flatten() + EPS
qi = q.toarray().flatten() + EPS
m = 0.5 * (pi + qi)
D = 0.5 * (np.sum(pi * np.log2(pi / m)) + np.sum(qi * np.log2(qi / m)))
return np.sqrt(D)
class SimGraph(ThOps):
"""
Generic class to generate similarity graphs from data
"""
def __init__(self, X, blocksize=25_000, useGPU=False, tmp_folder=None,
save_every=1e300):
"""
Stores the main attributes of a class instance
Parameters
----------
X : scipy.sparse.csr or numpy.array
Matrix of node attribute vectors
blocksize : int, optional (default=25_000)
Size (number of rows) of each block in blocwise processing.
useGPU : bool, optional (default=False)
If True, matrix operations are accelerated using GPU
tmp_folder : str or None, optional (defautl = None)
Name of the folder to save temporary files
save_every : int, optional (default=0)
Maximum size of the growing lists. The output lists are constructed
incrementally. To avooy memory overload, growing lists are saved
every time they reach this size limit. The full liests are thus
incrementally saved in files.
The default value is extremely large, which de facto implies no
temporary saving.
"""
# Call the initialization method from the parent class to set the
# self.blocksize attribute
super().__init__(blocksize, useGPU, tmp_folder, save_every)
# ###############
# Graph variables
self.X = X # Data matrix
self.n_nodes, self.dim = X.shape # Data dimensions
self.R = None # Radius (distance threshold)
# ###############
# Other variables
# Edges and weights
self.edge_ids = None # List of edges, as pairs (i, j) of indices.
self.weights = None
# Variables for equivalence classes
self.Xeq = None # Reduced feature matrix (one row per equiv. class)
self.n_clusters = None # Number of equivalence classes
self.n_preclusters = None # No. of distinct nonzero feature patterns
self.cluster_ids = None # Equivalence class of each node
# WARNING: the following variable does not represent the number of
# edges in the complete graph, but possibly of the equivalence graph.
# (TBD: rename this variable)
self.n_edges = None # Number of edges of the equivalence graph
return
def _d2_to_sim(self, d2, sim, g=1, rescale=False, R=None):
"""
Transforms a list of squared distance values into a list of similarity
values
Parameters
----------
d2 : list
A list of squared distance values
sim : string
Similarity measure. It is used to compute the radius bound
g : int or float, optional (default=1)
Power factor. Use g != 1 to apply nonlinear mapping
rescale : boolean, optional (default=False)
If True, similarities are rescaled such that distance R is mapped
to zero similarity
R : float or None, optional (default=None)
(only for rescale=True). Radius bound
Returns
-------
s : list
A list of similarity values with the same size than d
"""
# Here we need to reassign R to avoid R >> 2 (which is the max
# He value). Otherwise, normalization z/R**2 could be harmful.
if sim in ['JS', 'He->JS', 'He2->JS', 'l1->JS']:
Rmax = 1
R2max = 1
elif sim in ['l1']:
Rmax = 2
R2max = 4
elif sim in ['He', 'He2', 'l2']:
Rmax = np.sqrt(2)
R2max = 2
else:
logging.error("-- -- Error in _d2_to_sim: unknown similarity")
if rescale:
Rmax = min(R, Rmax)
R2max = min(R**2, R2max)
# Compute similarities from distances
if g != 1:
# s = [(1 - x**g / Rmax**(2 * g)) for x in d2]
s = [(1 - x**g / R2max**g) for x in d2]
else:
# This is a particular case, but it is the default case, and can
# be computed avoiding x**g
# s = [(1 - x / Rmax**2) for x in d2]
s = [(1 - x / R2max) for x in d2]
return s
def computeGraph(self, R=None, n_edges=None, **kwargs):
"""
Computes a sparse graph for a given radius or for a given number of
edges
Parameters
----------
R : float
(only for rescale=True). Radius bound
n_edges : int
Number of edges
"""
if R is not None:
# Call the standard graph computation method
self.R = R
self._computeGraph(R, **kwargs)
elif n_edges is not None:
self._compute_graph_from_nedges(n_edges, **kwargs)
else:
logging.error("-- -- At least R or n_edges must be specified")
return
def _computeGraph(self, R=None, sim='JS', g=1, th_gauss=0.1, rescale=False,
verbose=True):
"""
Computes a sparse graph for the self graph structure.
The self graph must containg a T-matrix, self.T
Parameters
----------
R : float
Radius. Edges link all data pairs at distance lower than R
This is to forze a sparse graph.
sim : string
Similarity measure used to compute affinity matrix
Available options are:
'JS', 1 minus Jensen-Shannon (JS) divergence (too slow);
'l1', 1 minus l1 distance
'He', 1-squared Hellinger's distance (sklearn-based implementation)
'He2', 1 minus squared Hellinger distance (self implementation)
'Gauss', an exponential function of the squared l2 distance
'l1->JS', same as JS, but the graph is computed after pre-selecting
edges using l1 distances and a theoretical bound
'He->JS', same as JS, but the graph is computed after preselecting
edges using Hellinger's distances and a theoretical bound
'He2->JS', same as He-Js, but using the self implementation of He
g : float
Exponent for the affinity mapping (not used for 'Gauss')
th_gauss : float
Similarity threshold All similarity values below this threshold are
set to zero. This is only for the gauss method, the rest of them
compute the threshold automatically from R).
rescale : boolean, optional (default=False)
If True, affinities are computed from distances by rescaling
values so that the minimum is zero and maximum is 1.
verbose : boolean, optional (default=True)
(Only for he_neighbors_graph()). If False, block-by-block
messaging is omitted
Returns
-------
self : object
Changes in attributes self.edge_ids (List of edges, as pairs (i, j)
of indices) and self.weights (list of affinity values for each pair
in edge_ids)
"""
logging.info(f"-- Computing graph with {self.n_nodes} nodes")
logging.info(f"-- Similarity measure: {sim}")
# #########################
# Computing Distance Matrix
# This is just to abbreviate
X = self.X
# Select Distance measure for radius_neighbor_graph
if sim in ['Gauss', 'He', 'He->JS']:
d = 'l2' # Note: l2 seems equivalent to minkowski (p=2)
elif sim in ['l1', 'l1->JS']:
d = 'l1' # Note: l1 seems equivalent to manhattan
elif sim == 'JS':
if issparse(X):
logging.warning(
'At the time of writing this code, SKLEARN does not ' +
'admit callable metrics for sparse inputs. Thus, an ' +
'error will raise very likely right now ...')
d = JSdist_sp
else:
d = JSdist
elif sim in ['He2', 'He2->JS']:
# No distance metric is selected, because a proper implementation
# is used instead sklearn
pass
else:
logging.error("computeTsubGraph ERROR: Unknown similarity measure")
exit()
# Select secondary radius
if sim == 'l1->JS':
R0 = np.sqrt(8 * np.log(2)) * R
# Refined R0. Not relevant effect for small R0
R0 = (12 / np.sqrt(2) * np.sqrt(np.sqrt(1 + R0**2 / 36) - 1))
logging.info(f'-- -- L1-radius bound for JS: {R0}')
elif sim in ['He->JS', 'He2->JS']:
R0 = np.sqrt(2) * R
logging.info(f'-- -- Hellinger-radius bound for JS: {R0}')
else:
# The target radius
R0 = R
# Compute the connectivity graph of all pair of nodes at distance
# below R0
# IMPORTANT: Note that, despite radius_neighbors_graph has an option
# 'distance' that returns the distance values, it cannot be used in
# any case because the distance matrix does not distinghish between
# nodes at distance > R0 and nodes at distance = 0
t0 = time()
logging.info(f'-- -- Computing neighbors_graph ...')
if sim == 'He2->JS':
self.edge_ids = self.he_neighbors_graph(
X, R0, mode='connectivity', verbose=verbose)
elif sim == 'He2':
self.edge_ids, he_dist2 = self.he_neighbors_graph(
X, R0, mode='distance', verbose=verbose)
elif sim in ['He', 'He->JS']:
# We must compute the connectivity graph because module
# radius_neighbors_graph looses edges between nodes at zero
# distance
D = radius_neighbors_graph(np.sqrt(X), radius=R0,
mode='connectivity', metric=d)
elif sim in ['l1', 'l1->JS', 'Gauss', 'JS']:
D = radius_neighbors_graph(X, radius=R0, mode='connectivity',
metric=d)
logging.info(f' in {time()-t0} seconds')
# ##############################################
# From distance matrix to list of weighted edges
if sim not in ['He2', 'He2->JS']:
# Compute lists with origin, destination and value for all edges in
# the graph affinity matrix.
orig_id, dest_id = D.nonzero()
# Since the graph is undirected, we select ordered pairs orig_id,
# dest_id only
self.edge_ids = list(filter(lambda i: i[0] < i[1],
zip(orig_id, dest_id)))
# ####################
# Computing Affinities
n_edges = len(self.edge_ids)
logging.info(f"-- -- Computing affinities for {n_edges} edges...")
t0 = time()
if sim in ['JS', 'l1->JS', 'He->JS', 'He2->JS']:
# For methods ->JS, the distance computed by the neighors_graph
# method is not the target distance JS.
# A new self.edge_ids is returned because the function filters out
# affinity values below th.
self.edge_ids, self.weights = self.JS_affinity(
X, R=R, g=g, rescale=rescale)
elif sim == 'He':
self.edge_ids, self.weights = self.he_affinity(
X, R=R, g=g, rescale=rescale)
elif sim == 'He2':
# Transform list of distances into similarities
self.weights = self._d2_to_sim(he_dist2, sim, g, rescale, R)
elif sim == 'l1':
self.edge_ids, self.weights = self.l1_affinity(
X, R=R, g=g, rescale=rescale)
elif sim == 'Gauss':
self.edge_ids, self.weights = self.l2_affinity(X, R, th_gauss)
n_edges = len(self.edge_ids)
logging.info(f" reduced to {n_edges} edges")
logging.info(f' Computed in {time()-t0} seconds')
logging.info(f"-- -- Graph generated with {self.n_nodes} nodes and " +
f" {n_edges} edges")
return
def _compute_graph_from_nedges(self, n_edges, sim='JS', g=1, th_gauss=0.1,
rescale=False, verbose=True):
"""
Computes a sparse graph for a fixed number of edges.
It computes the sparse graph form matrig X. The distance threshold R
to sparsify the graph is chosend in such a way that the resulting graph
has n_edges edges.
Parameters
----------
n_edges: int
Target number of edges
sim : string
Similarity measure used to compute affinity matrix
Available options are:
'JS', 1 minus Jensen-Shannon (JS) divergence (too slow);
'l1', 1 minus l1 distance
'He', 1-squared Hellinger's distance (sklearn-based implementation)
'He2', 1 minus squared Hellinger distance (self implementation)
'Gauss', an exponential function of the squared l2 distance
'l1->JS', same as JS, but the graph is computed after pre-selecting
edges using l1 distances and a theoretical bound
'He->JS', same as JS, but the graph is computed after preselecting
edges using Hellinger's distances and a theoretical bound
'He2->JS', same as He-Js, but using the self implementation of He
g : float
Exponent for the affinity mapping (not used for 'Gauss')
th_gauss : float
Similarity threshold All similarity values below this threshold are
set to zero. This is only for the gauss method, the rest of them
compute the threshold automatically from R).
rescale : boolean, optional (default=False)
If True, affinities are computed from distances by rescaling
values so that the minimum is zero and maximum is 1.
verbose : boolean
(Only for he_neighbors_graph()). If False, block-by-block
messaging is omitted
Returns
-------
self : object
Changes in attributes self.edge_ids (List of edges, as pairs (i, j)
of indices) and self.weights (list of affinity values for each pair
in edge_ids)
"""
# Compute sub graph
size_ok = False
# Since there is not a direct and exact method to compute a graph with
# n_edges, we will try to find a graph with aproximately n_edges_top,
# where n_edges_top > n_edges
n_edges_top = n_edges # Initial equality, but revised below
while not size_ok:
# Excess number of edges.
n_edges_top = int(1.2 * n_edges_top)
# ##############################################################
# First goal: find a dense graph, with less nodes but n_edges...
# Initial number of nodes to get a dense graph with n_edges_top
n_n = min(int(np.sqrt(2 * n_edges_top)), self.n_nodes)
# Initial radius to guarantee a dense graph
R = 1e100 # A very large number higher than any upper bound ...
# Take n_n nodes selected at random
np.random.seed(3)
idx = sorted(np.random.choice(range(self.n_nodes), n_n,
replace=False))
X_sg = self.X[idx]
# Compute dense graph
subg = SimGraph(X_sg, blocksize=self.blocksize)
subg.computeGraph(R=R, sim=sim, g=g, rescale=False,
verbose=verbose)
# Check if the number of edges is higher than the target. This
# should not happen. Maybe only for X_sg with repeated rows
n_e = len(subg.weights)
size_ok = (n_e >= n_edges) | (n_n == self.n_nodes)
if not size_ok:
logging.info(f'-- -- Insufficient graph with {n_e} < ' +
f'{n_edges} edges. Trying with more nodes')
# Scale factor for the expected number of edges in the second trial.
# The main idea is the following: if, for a fixed theshold R, we get
# two graphs, one with n nodes and e edges, and other with n' nodes and
# e' edges, we can expect
# n'**2 / n**2 = e' / e
# (with approximate equality).
# Our graphs: the target graph, with n'=self.n_nodes and e'n_edges
# the subgraph, with n=n_n nodes and n_e edges
# In ofder to satisfy the above relation, the subgraph should have:
# n_edges_subg = e' n**2 / n'**2 = ...
alpha = (n_n / self.n_nodes) ** 2
# n_edges_subg = int(n_edges_top / alpha)
n_edges_subg = int(n_e * alpha)
# Since n_e > n_edges_sub, we can compute the threshold value providing
# n_edges_subg, which should be approximately equal to the one
# providing n_edges_top
# Compute the threshold required to get the target links
# This is the radius that should provide n_links edges.
if sim in ['JS', 'He->JS', 'He2->JS', 'l1->JS']:
Rmax = 1
elif sim in ['l1']:
Rmax = 2
elif sim in ['He', 'He2']:
Rmax = np.sqrt(2)
if n_n == self.n_nodes:
size_ok = True
# The final graph has been computed. Just read it from
self.edge_ids = subg.edge_ids
self.weights = subg.weights
else:
size_ok = False
# Compute the similarity value to get n_edges_subg
w = sorted(list(zip(subg.weights, range(n_e))), reverse=True)
w_min = w[n_edges_subg - 1][0]
R = Rmax * (1 - w_min) ** (1 / (2 * g))
while not size_ok:
# Compute graph with the target number of links
self.computeGraph(R=R, sim=sim, g=g, rescale=False,
verbose=verbose)
size_ok = (len(self.weights) >= n_edges)
if not size_ok:
# It the method failed, increase the radius threshold
R = 1.2 * R
# This is to deal with the case R = 0
if R == 0:
# Take the value of R corresponding to the highes w less
# than 1
w_min = np.max([x if x < 1 else 0 for x in subg.weights])
if w_min > 0:
R = Rmax * (1 - w_min) ** (1 / (2 * g))
else:
# If R is still zero, take a fixed value.
R = 0.01
logging.warning(f'-- -- Too sparse graph. Trying R = {R}...')
# If we are here, we have got a graph with more than n_edges edges and
# all nodes. We just need to fit the threshold to get exactpli n_edges
n_e = len(self.weights)
w = sorted(list(zip(self.weights, range(n_e))), reverse=True)
w = w[:n_edges]
w_min = w[-1][0]
ew = [x for x in zip(self.edge_ids, self.weights) if x[1] >= w_min]
if len(ew) > 0:
self.edge_ids, self.weights = zip(*ew)
else:
self.edge_ids, self.weights = [], []
self.R = R
# Rescale weights if necessary
if rescale and R < Rmax:
# The following equation results from computing square distances
# from the weights, and recomputing the weight with rescaling:
# d2**g = (1 - w) * Rmax**(2*g)
# w = 1 - d2**g / R**(2*g)
# which is equivalent to...
if R > 0:
self.weights = [max(1 - (1 - w) * (Rmax / R)**(2 * g), 0)
for w in self.weights]
# (The max operator is just to ensure no negative weighs caused
# by finite precision errors)
# IF R = 0 then w_min = 1 and, thus, all weights should be 1,
# which implies that no normalization is possible
return
def compute_id_graph(self, R=1e-100, verbose=True):
"""
Computes identity graph.
The identity graph connects nodes a and b with weight 1 iff a and b
have the same feature vector in self.T. Otherwise, a and be are
disconnected
Parameters
----------
R : float
Radius. Edges link all data pairs at distance lower than R.
It should be a very small value in order to link nodes with almost
equal attribute. Nonzero values may be used to allow slight
deviations from equality.
verbose : boolean
If False, block-by-block messaging is omitted in the call to
he_neighbors graph.
Returns
-------
self : object
Updated attribute self.edge_ids (list of edges, as pairs (i, j) of
indices)
"""
logging.info(f"-- Computing graph with {self.n_nodes} nodes")
# Compute the connectivity graph of all pair of nodes at distance
# below R0
t0 = time()
self.edge_ids = self.he_neighbors_graph(
self.X, R, mode='connectivity', verbose=verbose)
logging.info(f' in {time()-t0} seconds')
logging.info(f"-- -- Graph generated with {self.n_nodes} nodes and " +
f"{len(self.edge_ids)} edges")
return
def cluster_equivalent_nodes(self, reduceX=False):
"""
Computes a graph where each node is formed by all nodes at zero
distance
Parameters
----------
reduceX : boolean
If True, it computes self.Xeq, a data matrix without rows at zero
distance
"""
logging.info(f'-- -- Computing equivalence classes')
# ###################################
# Particion matrix by non-zero topics
# This is just to abbreviate
X = self.X
n_nodes, n_topics = X.shape
# We assign an integer to each set of nonzero topics, based on the
# binary rows. To do so, we need an array with the powers of 2
id_nodes, id_topics = (X > 0).nonzero()
powers2 = [2**(n_topics - 1 - n) for n in range(n_topics)]
# I wanted to do this, but is not allowed for very large ints:
# tclass = ((X > 0) @ powers2).T.tolist()[0]
# So I do the following
# Replace topic ids by their powers of two:
pw_ids = [powers2[n] for n in id_topics]
partition0 = [0] * n_nodes
for i, node in enumerate(id_nodes):
partition0[node] += pw_ids[i]
# The elements of the partition are named preclusters to avoid
# confusion with the final clusters.
# - Preclusters group all nodes with the same nonzero topics
# (i.e., with the same integer value in partition0)
# - Clusters group all nodes at zero distance
# Each precluster will be particioned into clusters.
precluster_sizes = Counter(partition0)
# Compute inverse dictionary
pc2nodes = defaultdict(list)
for i, s in enumerate(partition0):
pc2nodes[s].append(i)
# edge_ids = [] # List of edges
# Cluster of each node
cluster_ids = np.zeros((n_nodes, 1)).astype(int)
n_clusters = 0 # Counter of clusters (equivalence classes)
n_edges = 0 # Counter of edges
n = 0 # Node counter (only for status printing)
for pc, pc_size in precluster_sizes.items():
# Get data submatrix of particion pc
ind = pc2nodes[pc]
n += len(ind)
if pc_size > 1:
print(f"-- -- Processing node {n} out of {n_nodes} " +
f"in precluster with {pc_size} nodes \r", end="")
Xc = X[ind]
if Xc[0].count_nonzero() > 1:
# Compute zero-radius similarity graph.
sg = SimGraph(Xc)
if len(ind) < 5000:
# This is to disable large logging messaging
logger = logging.getLogger()
logger.setLevel(logging.ERROR)
for handler in logger.handlers:
handler.setLevel(logging.ERROR)
else:
print(f"-- -- Processing node {n} out of {n_nodes} " +
f"in precluster with {pc_size} nodes ")
sg.compute_id_graph(R=1e-100, verbose=False)
if len(ind) < 5000:
# Restore logging messages
logger.setLevel(logging.INFO)
for handler in logger.handlers:
handler.setLevel(logging.INFO)
# Get connected components
G = igraph.Graph(n=pc_size, edges=sg.edge_ids,
directed=False)
cc = G.clusters()
# Get the membership vector
cluster_labels = cc.membership
# Assign labels to new clusters:
cluster_ids[ind] = (n_clusters +
np.array([cluster_labels]).T)
n_clusters += len(set(cluster_labels))
n_edges += len(sg.edge_ids)
else:
# If topics have only one nonzero element, all of them must
# be equal. Thus, there is no need to call simgraph
cluster_ids[ind] = n_clusters
n_clusters += 1
n_edges += pc_size * (pc_size - 1) // 2
else:
cluster_ids[ind] = n_clusters
n_clusters += 1
# Convert np array of cluster ids into list
self.cluster_ids = cluster_ids.T.tolist()[0]
self.n_edges = n_edges
self.n_clusters = n_clusters
self.n_preclusters = len(precluster_sizes)
if reduceX:
self.computeXeq()
return
def computeXeq(self):
"""
Computes the reduced feature matrix X, with a single row per each
equivalent class.
self.X[i] contains the feature vector of all nodes from equivalence
class i.
"""
ind = [0] * self.n_clusters
for n, c in enumerate(self.cluster_ids):
ind[c] = n
self.Xeq = self.X[ind]
return
def he_affinity(self, X, R=2, g=1, rescale=True):
"""
Compute all Hellinger's affinities between all nodes in the graph based
on the node attribute vectors in matrix
It assumes that all attribute vectors are normalized to sum up to 1
Attribute matrix X can be sparse
Parameters
----------
X : numpy array
Input matrix of probabilistic attribute vectors
R0 : float
Radius (maximum He distance. Edges at higher distance are removed)
g : float
Exponent for the final affinity mapping
rescale : boolean
If True, affinity values are rescaled so that the minimum value is
zero and the maximum values is one.
Returns
-------
edge_id : list of tuples
List of edges
weights : list
List of edge weights
"""
# ################################
# Compute affinities for all edges
# I take the square root here. This is inefficient if X has many
# rows and just af few edges will be computed. However, we can
# expect the opposite (the list of edges involves the most of the
# nodes).
Z = np.sqrt(X)
# Divergences are compute by blocks. This is much faster than a
# row-by-row computation, specially when X is sparse.
d2_he = []
for i in range(0, len(self.edge_ids), self.blocksize):
edge_ids = self.edge_ids[i: i + self.blocksize]
# Take the (matrix) of origin and destination attribute vectors
i0, i1 = zip(*edge_ids)
if issparse(X):
P = Z[list(i0)].toarray()
Q = Z[list(i1)].toarray()
else:
P = Z[list(i0)]
Q = Z[list(i1)]
# Squared Hellinger's distance
# The maximum is used here just to avoid 2-2s<0 due to
# precision errors
s = np.sum(P * Q, axis=1)
d2_he += list(np.maximum(2 - 2 * s, 0))
# #########
# Filtering
# Filter out edges with He distance above R.
ed = [z for z in zip(self.edge_ids, d2_he) if z[1] < R**2]
if len(ed) > 0:
edge_id, d2 = zip(*ed)
else:
edge_id, d2 = [], []
# ####################
# Computing affinities
# Transform distances into affinity values.
weights = self._d2_to_sim(d2, 'He', g, rescale, R)
return edge_id, weights
def l1_affinity(self, X, R=2, g=1, rescale=True):
"""
Compute all l1's affinities between all nodes in the graph based on the
node attribute vectors
It assumes that all attribute vectors are normalized to sum up to 1
Attribute matrix X can be sparse
Parameters
----------
X : numpy array
Input matrix of probabilistic attribute vectors
R0 : float
Radius (maximum L1 distance. Edges at higher distance are removed)
g : float
Exponent for the final affinity mapping
rescale : boolean
If True, affinity values are rescaled so that the minimum value is
zero and the maximum values is one.
Returns
-------
edge_id : list of tuples
List of edges
weights : list
List of edge weights
"""
# ################################
# Compute affinities for all edges
# I take the square root here. This is inefficient if X has many
# rows and just af few edges will be computed. However, we can
# expect the opposite (the list of edges involves the most of the
# nodes).
# Divergences are compute by blocks. This is much faster than a
# row-by-row computation, specially when X is sparse.
d_l1 = []
for i in range(0, len(self.edge_ids), self.blocksize):
edge_ids = self.edge_ids[i: i + self.blocksize]
# Take the (matrix) of origin and destination attribute vectors
i0, i1 = zip(*edge_ids)
if issparse(X):
P = X[list(i0)].toarray()
Q = X[list(i1)].toarray()
else:
P = X[list(i0)]
Q = X[list(i1)]
# l1 distance
d_l1 += list(np.sum(np.abs(P - Q), axis=1))
# #########
# Filtering
# Filter out edges with L1 distance above R.
ed = [z for z in zip(self.edge_ids, d_l1) if z[1] < R]
if len(ed) > 0:
edge_id, d_l1 = zip(*ed)
else:
edge_id, d_l1 = [], []
# ####################
# Computing affinities
# Transform distances into affinity values. Note that we do not use
# self._d2_to_sim method becasue distances in d are not squared.
Rmax = 2
if rescale:
# Here we need to reassign R to avoid R >> 2 (which is the max
# l1 value). Otherwise, normalization z/R**2 could be harmful.
Rmax = min(R, Rmax)
# Rescale affinities so that the minimum value is zero.
weights = [(1 - z**g / (Rmax**g + EPS)) for z in d_l1 if z < Rmax]
return edge_id, weights
def l2_affinity(self, X, R=2, th_gauss=0.1):
"""
Compute all l2's affinities between all nodes in the graph based on the
node attribute vectors
It assumes that all attribute vectors are normalized to sum up to 1
Attribute matrix X can be sparse
Parameters
----------
X : numpy array
Input matrix of probabilistic attribute vectors
R0 : float
Radius (maximum L2 distance. Edges at higher distance are removed)
th_gauss : float
Similarity threshold All similarity values below this threshold are
set to zero.
Returns
-------
edge_id : list of tuples
List of edges
weights : list
List of edge weights
"""
# ################################
# Compute affinities for all edges
# I take the square root here. This is inefficient if X has many
# rows and just af few edges will be computed. However, we can
# expect the opposite (the list of edges involves the most of the
# nodes).
# Divergences are compute by blocks. This is much faster than a
# row-by-row computation, specially when X is sparse.
d_l2 = []
for i in range(0, len(self.edge_ids), self.blocksize):
edge_ids = self.edge_ids[i: i + self.blocksize]
# Take the (matrix) of origin and destination attribute vectors
i0, i1 = zip(*edge_ids)
if issparse(X):
P = X[list(i0)].toarray()
Q = X[list(i1)].toarray()
else:
P = X[list(i0)]
Q = X[list(i1)]
# l1 distance
d_l2 += list(np.sum((P - Q)**2, axis=1))
# #########
# Filtering
# Filter out edges with JS distance above R (divergence above R**2).
edge_id = [z[0] for z in zip(self.edge_ids, d_l2) if z[1] < R**2]
# ####################
# Computing affinities
# The value of gamma to get min edge weight th_gauss at distance R
gamma = - np.log(th_gauss) / (R**2 + EPS)
# Nonzero affinity values
weights = [np.exp(-gamma * z) for z in d_l2 if z < R**2]
return edge_id, weights
def JS_affinity(self, X, Y=None, R=1, g=1, rescale=True):
"""
Compute all truncated Jensen-Shannon affinities between all pairs of
connected nodes in the graph based on the node attribute vectors
It assumes that all attribute vectors are normalized to sum up to 1
Attribute matrices in X and Y can be sparse
Parameters
----------
X : numpy array
Input matrix of probabilistic attribute vectors
Y : numpy array or None, optional (default=None)
Input matrix of probabilistic attribute vectors. If None, it is
assumed Y=X
R : float, optional (default=1)
Radius (maximum L1 distance. Edges with higher distance are
removed)
g : float, optional (default=1)
Exponent for the final affinity mapping
rescale : boolean, optional (deafault=True)
If True, affinity values are rescaled so that the minimum value is
zero and the maximum values is one.
Returns
-------
edge_id : list of tuples
List of edges
weights : list
List of edge weights
"""
# ################
# Set right matrix
if Y is None:
Y = X
# ################################
# Compute affinities for all edges
# Divergences are compute by blocks. This is much faster than a
# row-by-row computation, specially when X is sparse.
divJS = []
for i in range(0, len(self.edge_ids), self.blocksize):
edge_ids = self.edge_ids[i: i + self.blocksize]
i0, i1 = zip(*edge_ids)
if issparse(X):
P = X[list(i0)].toarray() + EPS
Q = Y[list(i1)].toarray() + EPS
else:
P = X[list(i0)] + EPS
Q = Y[list(i1)] + EPS
M = 0.5 * (P + Q)
divJS += list(0.5 * (np.sum(P * np.log2(P / M), axis=1) +
np.sum(Q * np.log2(Q / M), axis=1)))
# #########
# Filtering
# Filter out edges with JS distance not below R
# (divergence above R**2).
ed = [z for z in zip(self.edge_ids, divJS) if z[1] < R**2]
if len(ed) > 0:
edge_id, d2 = zip(*ed)
else:
edge_id, d2 = [], []
# ####################
# Computing affinities
# Transform distances into affinity values.
weights = self._d2_to_sim(d2, 'JS', g, rescale, R)
return edge_id, weights
def show_JS_bounds(self, R=None, sim=None, g=1, out_path=None,
verbose=True):
"""
Computes JS bounds for a given similarity measure.
Parameters
----------
R : float
Radius. Edges link all data pairs at distance lower than R
This is to forze a sparse graph.
sim : string
Similarity measure used to compute affinity matrix
Available options are:
'l1->JS', same as JS, but the graph is computed after preselecting
edges using l1 distances and a theoretical bound
'He->JS', same as JS, but the graph is computed after preselecting
edges using Hellinger's distances and a theoretical bound
'He2->JS', same as He-Js, but using the self implementation of He.
g : float
Exponent for the affinity mapping
verbose : boolean
(Only for he_neighbors_graph()). If False, block-by-block
messaging is omitted
Returns
-------
self : object
Modifies self.edge_ids (ist of edges, as pairs (i, j) of indices)
and self.weights (list of affinity values for each pair in
edge_ids)
"""
# #########################
# Computing Distance Matrix
# This is just to abbreviate
X = self.X
# Select Distance measure for radius_neighbor_graph
if sim == 'l1->JS':
d = 'l1' # Note: l1 seems equivalent to manhattan
label = 'l1'
elif sim == 'He->JS':
d = 'l2' # (He is an L2 distance over the square root of X)
label = 'Hellinger'
elif sim == 'He2->JS':
# No distance metric is selected, because a proper implementation
# is used instead sklearn
label = 'Hellinger'
else:
logging.error(f"-- JS bounds not available for similarity = {sim}")
exit()
# Select secondary radius
if sim == 'l1->JS':
R0 = np.sqrt(8 * np.log(2)) * R
# Refined R0. Not relevant effect for small R0
R0 = (12 / np.sqrt(2) * np.sqrt(np.sqrt(1 + R0**2 / 36) - 1))
elif sim in ['He->JS', 'He2->JS']:
R0 = np.sqrt(2) * R
logging.info(f'-- -- {label}-radius bound for JS: {R0}')
# Compute the connectivity graph of all pair of nodes at distance
# below R0
# IMPORTANT: Note that, despite radius_neighbors_graph has an option
# 'distance' that returns the distance values, it cannot be used in
# any case because the distance matrix does not distinghish between
# nodes at distance > R0 and nodes at distance = 0
t0 = time()
logging.info(f'-- -- Computing neighbors_graph ...')
if sim == 'He2->JS':
self.edge_ids = self.he_neighbors_graph(
X, R0, mode='connectivity', verbose=verbose)
elif sim == 'He->JS':
# We must compute the connectivity graph because module
# radius_neighbors_graph looses edges between nodes at zero
# distance
D = radius_neighbors_graph(np.sqrt(X), radius=R0,
mode='connectivity', metric=d)
elif sim == 'l1->JS':
D = radius_neighbors_graph(X, radius=R0, mode='connectivity',
metric=d)
logging.info(f' in {time()-t0} seconds')
# ##############################################
# From distance matrix to list of weighted edges
if sim != 'He2->JS':
# Compute lists with origin, destination and value for all edges in
# the graph affinity matrix.
orig_id, dest_id = D.nonzero()
# Since the graph is undirected, we select ordered pairs orig_id,
# dest_id only
self.edge_ids = list(filter(lambda i: i[0] < i[1],
zip(orig_id, dest_id)))
# ####################
# Computing Affinities
logging.info(f"-- -- Computing affinities for {len(self.edge_ids)}" +
" edges ...",)
t0 = time()
# For methods ->JS, the distance computed by the neighors_graph
# method is not the target distance JS.
# A new self.edge_ids is returned because the function filters out
# affinity values below th.
self.edge_ids, self.weights = self.JS_affinity(X, R, g)
n_edges = len(self.edge_ids)
# A temporary plot to visualize the differences between l1 or He and JS
if sim in ['l1->JS', 'He->JS']:
if sim == 'l1->JS':
# Compute the distance graph
D = radius_neighbors_graph(X, radius=R0, mode='distance',
metric=d)
elif sim == 'He->JS':
# Compute the distance graph
D = radius_neighbors_graph(np.sqrt(X), radius=R0,
mode='distance', metric=d)
fpath = os.path.join(out_path, f'simtest_{sim}.png')
self.simTest(D, R, sim, g=g, fpath=fpath, label=label)
logging.info(f" reduced to {n_edges} edges")
logging.info(f' Computed in {time()-t0} seconds')
logging.info(f"-- -- Graph generated with {self.n_nodes} nodes and " +
f"{n_edges} edges")
return
def simTest(self, D, R, sim=None, g=1, fpath=None, label='l1'):
"""
Plot the values in weights vs the values in D selected by the indices
in edge_id.
This is used to visualize the efect of selecting samples using one
measure (l1, l2) as a first step to reduce the sample set used to
elect edges based on the Jensen-Shannon divergence.
Parameters
----------
D : 2D array
Data matrix
R : float
Radius bound
sim : string
Name of the similarity function
g : float
Exponent value
fpath : string
Output path
label : string
Label for the figure plot
"""
# Get values of the distance measure used as a reference
div = [D[x] for x in self.edge_ids]
plt.figure()
# Plot the sampled points
plt.plot(div, self.weights, '.', label='Data samples')
if sim in ['He->JS', 'He2->JS']:
bound = R * np.sqrt(2)
elif sim == 'l1->JS':
r = R * np.sqrt((8 * np.log(2)))
bound = (12 / np.sqrt(2) * np.sqrt(np.sqrt(1 + r**2 / 36) - 1))
if bound is not None:
# Plot the line stating the distance theshold applied to select the
# sampled points
plt.plot([bound, bound], [0, 1], ':', label='Bound')
# Plot the line stating the distance theshold applied to select the
# sampled points
if sim is not None:
aff = np.linspace(0, 1, 100)
if sim in ['He->JS', 'He2->JS']:
r = R * | np.sqrt(2) | numpy.sqrt |
# you should write your functions in nn.py
import numpy as np
from nn import *
from util import *
import copy
# fake data
# feel free to plot it in 2D
# what do you think these 4 classes are?
g0 = np.random.multivariate_normal([3.6,40],[[0.05,0],[0,10]],10)
g1 = np.random.multivariate_normal([3.9,10],[[0.01,0],[0,5]],10)
g2 = np.random.multivariate_normal([3.4,30],[[0.25,0],[0,5]],10)
g3 = np.random.multivariate_normal([2.0,10],[[0.5,0],[0,10]],10)
x = np.vstack([g0,g1,g2,g3])
# we will do XW + B
# that implies that the data is N x D
# create labels
y_idx = np.array([0 for _ in range(10)] + [1 for _ in range(10)] + [2 for _ in range(10)] + [3 for _ in range(10)])
# print (y_idx)
# turn to one_hot
y = np.zeros((y_idx.shape[0],y_idx.max()+1))
y[np.arange(y_idx.shape[0]),y_idx] = 1
print (y.shape)
# parameters in a dictionary
params = {}
# Q 2.1
# initialize a layer
initialize_weights(2,25,params,'layer1')
initialize_weights(25,4,params,'output')
assert(params['Wlayer1'].shape == (2,25))
assert(params['blayer1'].shape == (25,))
#expect 0, [0.05 to 0.12]
print("{}, {:.2f}".format(params['blayer1'].sum(),params['Wlayer1'].std()**2))
print("{}, {:.2f}".format(params['boutput'].sum(),params['Woutput'].std()**2))
# Q 2.2.1
# implement sigmoid
test = sigmoid(np.array([-1000,1000]))
print('should be zero and one\t',test.min(),test.max())
# implement forward
h1 = forward(x,params,'layer1')
# print(h1.shape)
# # Q 2.2.2
# # implement softmax
probs = forward(h1,params,'output',softmax)
# # make sure you understand these values!
# positive, ~1, ~1, (40,4)
print(probs.min(),min(probs.sum(1)),max(probs.sum(1)),probs.shape)
# # Q 2.2.3
# # implement compute_loss_and_acc
loss, acc = compute_loss_and_acc(y, probs)
# # should be around -np.log(0.25)*40 [~55] and 0.25
# # if it is not, check softmax!
print("{}, {:.2f}".format(loss,acc))
# # here we cheat for you
# # the derivative of cross-entropy(softmax(x)) is probs - 1[correct actions]
delta1 = probs
delta1[np.arange(probs.shape[0]),y_idx] -= 1
# print (delta1)
# # we already did derivative through softmax
# # so we pass in a linear_deriv, which is just a vector of ones
# # to make this a no-op
delta2 = backwards(delta1,params,'output',linear_deriv)
# # Implement backwards!
backwards(delta2,params,'layer1',sigmoid_deriv)
# W and b should match their gradients sizes
for k,v in sorted(list(params.items())):
if 'grad' in k:
name = k.split('_')[1]
print(name,v.shape, params[name].shape)
# Q 2.4
batches = get_random_batches(x,y,5)
# print batch sizes
print([_[0].shape[0] for _ in batches])
batch_num = len(batches)
def apply_gradient(params, name, learning_rate):
W = params['W' + name]
b = params['b' + name]
grad_W = params['grad_W' + name]
grad_b = params['grad_b' + name]
W = W - learning_rate*grad_W
b = b - learning_rate*grad_b
params['W'+name] = W
params['b'+name] = b
# # WRITE A TRAINING LOOP HERE
max_iters = 500
learning_rate = 1e-3
# with default settings, you should get loss < 35 and accuracy > 75%
for itr in range(max_iters):
total_loss = 0
avg_acc = 0
for xb,yb in batches:
# forward
h1 = forward(xb, params, 'layer1')
probs = forward(h1, params, 'output', softmax)
# loss
loss, acc = compute_loss_and_acc(yb, probs)
# be sure to add loss and accuracy to epoch totals
total_loss += loss
avg_acc += acc/batch_num
# backward
delta1 = probs
yb_idx = np.argmax(yb, axis=1)
delta1[np.arange(probs.shape[0]),yb_idx] -= 1
delta2 = backwards(delta1,params,'output',linear_deriv)
backwards(delta2,params,'layer1',sigmoid_deriv)
# apply gradient
apply_gradient(params, 'output', learning_rate)
apply_gradient(params, 'layer1', learning_rate)
if itr % 100 == 0:
print("itr: {:02d} \t loss: {:.2f} \t acc : {:.2f}".format(itr,total_loss,avg_acc))
# # Q 2.5 should be implemented in this file
# # you can do this before or after training the network.
# save the old params
h1 = forward(x, params, 'layer1')
probs = forward(h1, params, 'output', softmax)
loss, acc = compute_loss_and_acc(y, probs)
delta1 = probs
yb_idx = np.argmax(y, axis=1)
delta1[np.arange(probs.shape[0]),y_idx] -= 1
delta2 = backwards(delta1,params,'output',linear_deriv)
backwards(delta2,params,'layer1',sigmoid_deriv)
params_orig = copy.deepcopy(params)
eps = 1e-6
for k,v in params.items():
if '_' in k:
continue
# we have a real parameter!
# for each value inside the parameter
v_orig = v
for index, j in np.ndenumerate(v):
params[k][index] = v[index]+eps
h1 = forward(x, params, 'layer1')
probs = forward(h1, params, 'output', softmax)
lossplus, _ = compute_loss_and_acc(y, probs)
params[k][index] = v[index]-2*eps
h1 = forward(x, params, 'layer1')
probs = forward(h1, params, 'output', softmax)
lossminus, _ = compute_loss_and_acc(y, probs)
params['grad_' + k][index] = np.divide( | np.subtract(lossplus,lossminus) | numpy.subtract |
import numpy as np
import numpy.testing as npt
import pytest
from sklearn.preprocessing import OneHotEncoder
from timeserio.preprocessing import (
FeatureIndexEncoder, StatelessOneHotEncoder,
StatelessTemporalOneHotEncoder, StatelessPeriodicEncoder
)
from timeserio.preprocessing.encoding import PeriodicEncoder
class TestFeatureIndexEncoder:
@pytest.mark.parametrize(
'n_labels, expected_encoding', [
(1, np.arange(1)),
(2, np.arange(2)),
(3, np.arange(3)),
]
)
def test_feature_encoder(self, n_labels, expected_encoding):
encoder = FeatureIndexEncoder()
labels = np.array(
[f'label{idx}' for idx in range(n_labels)]
).reshape(-1, 1)
new_ids = encoder.fit_transform(labels)
assert isinstance(new_ids, np.ndarray)
assert len(new_ids.shape) == 2
assert new_ids.shape[1] == 1
assert set(new_ids.ravel() == set(expected_encoding.ravel()))
class TestStatelessOneHotEncoder:
n_rows = 10
def test_invalid_n_values(self):
with pytest.raises(ValueError):
StatelessOneHotEncoder(n_features=1, n_values='auto')
@pytest.mark.parametrize(
'n_features, n_values, categories', [
(1, 3, [[0, 1, 2]]),
(2, 3, [[0, 1, 2], [0, 1, 2]])
]
)
def test_same_as_stateful(
self, n_features, n_values, categories, random
):
x = np.random.randint(
0, np.min(n_values), size=(self.n_rows, n_features)
)
stateful_enc = OneHotEncoder(
categories=categories,
sparse=False
)
stateless_enc = StatelessOneHotEncoder(
n_features=n_features,
n_values=n_values,
sparse=False
)
x0 = stateful_enc.fit_transform(x)
x1 = stateless_enc.transform(x)
npt.assert_allclose(x1, x0)
@pytest.mark.parametrize(
'n_features, n_values, categories', [
(1, [3], [[0, 1, 2]]),
(2, [3, 2], [[0, 1, 2], [0, 1]])
]
)
def test_same_as_stateful_for_multiple_n_values(
self, n_features, n_values, categories, random
):
x = np.hstack([
np.random.randint(0, np.min(_n_values), size=(self.n_rows, 1))
for _n_values in n_values
])
stateful_enc = OneHotEncoder(
categories=categories,
sparse=False
)
stateless_enc = StatelessOneHotEncoder(
n_features=n_features,
n_values=n_values,
sparse=False
)
x0 = stateful_enc.fit_transform(x)
x1 = stateless_enc.transform(x)
npt.assert_allclose(x1, x0)
class TestStatelessTemporalOneHotEncoder:
n_rows = 3
@pytest.mark.parametrize('n_values', ['all', [True], [0]])
def test_invalid_n_values(self, n_values):
with pytest.raises(ValueError):
StatelessTemporalOneHotEncoder(n_features=1, n_values=n_values)
def test_temporal_onehot(self):
x = np.array([
[0, 0, 1, 1],
[0, 1, 0, 1],
])
y_expected = np.array(
[
[1, 1, 0, 0, 0, 0, 1, 1],
[1, 0, 1, 0, 0, 1, 0, 1],
]
)
n_values = 2
enc = StatelessTemporalOneHotEncoder(
n_features=x.shape[1], n_values=n_values, sparse=False
)
y = enc.fit_transform(x)
npt.assert_allclose(y, y_expected)
class TestPeriodicEncoder:
n_rows = 10
column = np.linspace(0, 1, num=n_rows)
column_sin = | np.sin(2 * np.pi * column) | numpy.sin |
"""
map_sense.py
Copyright 2016 University of Melbourne.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and limitations under the License.
"""
import numpy as np
from fourdvar.datadef import SensitivityData, PhysicalAdjointData
import fourdvar.util.date_handle as dt
import fourdvar.params.template_defn as template
import fourdvar.util.netcdf_handle as ncf
import fourdvar.params.cmaq_config as cmaq_config
from fourdvar.params.input_defn import inc_icon
unit_key = 'units.<YYYYMMDD>'
unit_convert_dict = None
def get_unit_convert():
"""
extension: get unit conversion dictionary for sensitivity to each days emissions
input: None
output: dict ('units.<YYYYMMDD>': np.ndarray( shape_of( template.sense_emis ) )
notes: SensitivityData.emis units = CF/(ppm/s)
PhysicalAdjointData.emis units = CF/(mol/(s*m^2))
"""
global unit_key
#physical constants:
#molar weight of dry air (precision matches cmaq)
mwair = 28.9628
#convert proportion to ppm
ppm_scale = 1E6
#convert g to kg
kg_scale = 1E-3
unit_dict = {}
#all spcs have same shape, get from 1st
tmp_spc = ncf.get_attr( template.sense_emis, 'VAR-LIST' ).split()[0]
target_shape = ncf.get_variable( template.sense_emis, tmp_spc )[:].shape
#layer thickness constant between files
lay_sigma = list( ncf.get_attr( template.sense_emis, 'VGLVLS' ) )
#layer thickness measured in scaled pressure units
lay_thick = [ lay_sigma[ i ] - lay_sigma[ i+1 ] for i in range( len( lay_sigma ) - 1 ) ]
lay_thick = | np.array(lay_thick) | numpy.array |
from __future__ import print_function
import itertools
import math
import os
import random
import shutil
import tempfile
import unittest
import uuid
import numpy as np
import pytest
import tensorflow as tf
import coremltools
import coremltools.models.datatypes as datatypes
from coremltools.models import _MLMODEL_FULL_PRECISION, _MLMODEL_HALF_PRECISION
from coremltools.models import neural_network as neural_network
from coremltools.models.neural_network import flexible_shape_utils
from coremltools.models.utils import macos_version, is_macos
np.random.seed(10)
MIN_MACOS_VERSION_REQUIRED = (10, 13)
LAYERS_10_15_MACOS_VERSION = (10, 15)
def _get_unary_model_spec(x, mode, alpha=1.0):
input_dim = x.shape
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', datatypes.Array(*input_dim))]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_unary(name='unary', input_name='data',
output_name='output', mode=mode, alpha=alpha)
return builder.spec
class CorrectnessTest(unittest.TestCase):
def runTest(self):
pass
def _compare_shapes(self, np_preds, coreml_preds):
return np.squeeze(np_preds).shape == np.squeeze(coreml_preds).shape
def _compare_nd_shapes(self, np_preds, coreml_preds, shape=()):
if shape:
return coreml_preds.shape == shape
else:
# check if shape has 0 valued dimension
if np.prod(np_preds.shape) == 0 and np.prod(coreml_preds.shape) == 0:
return True
return coreml_preds.shape == np_preds.shape
def _compare_predictions(self, np_preds, coreml_preds, delta=.01):
np_preds = np_preds.flatten()
coreml_preds = coreml_preds.flatten()
for i in range(len(np_preds)):
max_den = max(1.0, np_preds[i], coreml_preds[i])
if np.abs(
np_preds[i] / max_den - coreml_preds[i] / max_den) > delta:
return False
return True
@staticmethod
def _compare_moments(model, inputs, expected, use_cpu_only=True, num_moments=10):
"""
This utility function is used for validate random distributions layers.
It validates the first 10 moments of prediction and expected values.
"""
def get_moment(data, k):
return np.mean(np.power(data - np.mean(data), k))
if isinstance(model, str):
model = coremltools.models.MLModel(model)
model = coremltools.models.MLModel(model, useCPUOnly=use_cpu_only)
prediction = model.predict(inputs, useCPUOnly=use_cpu_only)
for output_name in expected:
np_preds = expected[output_name]
coreml_preds = prediction[output_name]
np_moments = [get_moment(np_preds.flatten(), k) for k in range(num_moments)]
coreml_moments = [get_moment(coreml_preds.flatten(), k) for k in range(num_moments)]
np.testing.assert_almost_equal(np_moments, coreml_moments, decimal=2)
# override expected values to allow element-wise compares
for output_name in expected:
expected[output_name] = prediction[output_name]
def _test_model(self,
model,
input,
expected,
model_precision=_MLMODEL_FULL_PRECISION,
useCPUOnly=False,
output_name_shape_dict={},
validate_shapes_only=False):
model_dir = None
# if we're given a path to a model
if isinstance(model, str):
model = coremltools.models.MLModel(model)
# If we're passed in a specification, save out the model
# and then load it back up
elif isinstance(model, coremltools.proto.Model_pb2.Model):
model_dir = tempfile.mkdtemp()
model_name = str(uuid.uuid4()) + '.mlmodel'
model_path = os.path.join(model_dir, model_name)
coremltools.utils.save_spec(model, model_path)
model = coremltools.models.MLModel(model, useCPUOnly=useCPUOnly)
# If we want to test the half precision case
if model_precision == _MLMODEL_HALF_PRECISION:
model = coremltools.utils.convert_neural_network_weights_to_fp16(
model)
try:
prediction = model.predict(input, useCPUOnly=useCPUOnly)
for output_name in expected:
if self.__class__.__name__ == "SimpleTest":
assert (self._compare_shapes(expected[output_name],
prediction[output_name]))
else:
if output_name in output_name_shape_dict:
output_shape = output_name_shape_dict[output_name]
else:
output_shape = []
if len(output_shape) == 0 and len(expected[output_name].shape) == 0:
output_shape = (1,)
assert (self._compare_nd_shapes(expected[output_name],
prediction[output_name],
output_shape))
if not validate_shapes_only:
assert (self._compare_predictions(expected[output_name],
prediction[output_name]))
finally:
# Remove the temporary directory if we created one
if model_dir and os.path.exists(model_dir):
shutil.rmtree(model_dir)
@unittest.skipIf(not is_macos() or macos_version() < MIN_MACOS_VERSION_REQUIRED,
'macOS 10.13+ is required. Skipping tests.')
class SimpleTest(CorrectnessTest):
def test_tiny_upsample_linear_mode(self):
input_dim = (1, 1, 3) # (C,H,W)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_upsample(name='upsample',
scaling_factor_h=2, scaling_factor_w=3,
input_name='data', output_name='output',
mode='BILINEAR')
input = {
'data': np.reshape(np.array([1.0, 2.0, 3.0]), (1, 1, 3))
}
expected = {
'output': np.array(
[[1, 1.333, 1.666, 2, 2.333, 2.666, 3, 3, 3],
[1, 1.333, 1.6666, 2, 2.33333, 2.6666, 3, 3, 3]
])
}
self._test_model(builder.spec, input, expected)
self.assertEquals(len(input_dim), builder._get_rank('output'))
def test_LRN(self):
input_dim = (1, 3, 3)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', datatypes.Array(*input_dim))]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_lrn(name='lrn', input_name='data', output_name='output',
alpha=2, beta=3, local_size=1, k=8)
input = {
'data': np.ones((1, 3, 3))
}
expected = {
'output': 1e-3 * np.ones((1, 3, 3))
}
self._test_model(builder.spec, input, expected)
self.assertEqual(len(input_dim), builder._get_rank('output'))
def test_MVN(self):
input_dim = (2, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', datatypes.Array(*input_dim))]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_mvn(name='mvn', input_name='data', output_name='output',
across_channels=False, normalize_variance=False)
input = {
'data': np.reshape(np.arange(8, dtype=np.float32), (2, 2, 2))
}
expected = {
'output': np.reshape(np.arange(8) - np.array(
[1.5, 1.5, 1.5, 1.5, 5.5, 5.5, 5.5, 5.5]), (2, 2, 2))
}
self._test_model(builder.spec, input, expected)
def test_L2_normalize(self):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', datatypes.Array(*input_dim))]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_l2_normalize(name='mvn', input_name='data',
output_name='output')
input = {
'data': np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
}
expected = {
'output': np.reshape(np.arange(4, dtype=np.float32),
(1, 2, 2)) / np.sqrt(14)
}
self._test_model(builder.spec, input, expected)
def test_unary_sqrt(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': np.sqrt(x)}
spec = _get_unary_model_spec(x, 'sqrt')
self._test_model(spec, input, expected)
def test_unary_rsqrt(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': 1 / np.sqrt(x)}
spec = _get_unary_model_spec(x, 'rsqrt')
self._test_model(spec, input, expected)
def test_unary_inverse(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': 1 / x}
spec = _get_unary_model_spec(x, 'inverse')
self._test_model(spec, input, expected)
def test_unary_power(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': x ** 3}
spec = _get_unary_model_spec(x, 'power', 3)
self._test_model(spec, input, expected)
def test_unary_exp(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': np.exp(x)}
spec = _get_unary_model_spec(x, 'exp')
self._test_model(spec, input, expected)
def test_unary_log(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': np.log(x)}
spec = _get_unary_model_spec(x, 'log')
self._test_model(spec, input, expected)
def test_unary_abs(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': np.abs(x)}
spec = _get_unary_model_spec(x, 'abs')
self._test_model(spec, input, expected)
def test_unary_threshold(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': np.maximum(x, 2)}
spec = _get_unary_model_spec(x, 'threshold', 2)
self._test_model(spec, input, expected)
def test_split(self):
input_dim = (9, 2, 2)
x = np.random.rand(*input_dim)
input_features = [('data', datatypes.Array(*input_dim))]
output_names = []
output_features = []
for i in range(3):
out = 'out_' + str(i)
output_names.append(out)
output_features.append((out, None))
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_split(name='split', input_name='data',
output_names=output_names)
input = {'data': x}
expected = {
'out_0': x[0: 3, :, :],
'out_1': x[3: 6, :, :],
'out_2': x[6: 9, :, :]
}
self._test_model(builder.spec, input, expected)
for output_ in output_names:
self.assertEqual(len(input_dim), builder._get_rank(output_))
def test_scale_constant(self):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_scale(name='scale', W=5, b=45, has_bias=True,
input_name='data', output_name='output')
x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': 5 * x + 45}
self._test_model(builder.spec, input, expected)
def test_scale_matrix(self):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
W = np.reshape(np.arange(5, 9), (1, 2, 2))
builder.add_scale(name='scale', W=W, b=None, has_bias=False,
input_name='data', output_name='output',
shape_scale=[1, 2, 2])
x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': W * x}
self._test_model(builder.spec, input, expected)
def test_bias_constant(self):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_bias(name='bias', b=45, input_name='data',
output_name='output')
x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': x + 45}
self._test_model(builder.spec, input, expected)
def test_bias_matrix(self):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
b = np.reshape(np.arange(5, 9), (1, 2, 2))
builder.add_bias(name='bias', b=b, input_name='data',
output_name='output',
shape_bias=[1, 2, 2])
x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': x + b}
self._test_model(builder.spec, input, expected)
def test_load_constant(self, model_precision=_MLMODEL_FULL_PRECISION):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
b = np.reshape(np.arange(5, 9), (1, 2, 2))
builder.add_load_constant(name='load_constant', output_name='bias',
constant_value=b, shape=[1, 2, 2])
builder.add_elementwise(name='add', input_names=['data', 'bias'],
output_name='output', mode='ADD')
x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': x + b}
self._test_model(builder.spec, input, expected, model_precision)
self.assertEqual(len(input_dim), builder._get_rank('output'))
def test_load_constant_half_precision(self):
self.test_load_constant(model_precision=_MLMODEL_HALF_PRECISION)
def test_min(self):
input_dim = (1, 2, 2)
input_features = [('data_0', datatypes.Array(*input_dim)),
('data_1', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_elementwise(name='min', input_names=['data_0', 'data_1'],
output_name='output', mode='MIN')
x1 = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
x2 = np.reshape(np.arange(2, 6, dtype=np.float32), (1, 2, 2))
input = {'data_0': x1, 'data_1': x2}
expected = {'output': np.minimum(x1, x2)}
self._test_model(builder.spec, input, expected)
self.assertEqual(len(input_dim), builder._get_rank('output'))
def test_conv_same_padding(self):
input_dim = (10, 15, 15)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
W = np.random.rand(3, 3, 10, 20)
builder.add_convolution(name='conv', kernel_channels=10,
output_channels=20,
height=3, width=3, stride_height=2,
stride_width=2,
border_mode='same', groups=1,
W=W, b=None, has_bias=False,
input_name='data', output_name='output',
same_padding_asymmetry_mode='TOP_LEFT_HEAVY')
x = np.random.rand(*input_dim)
input = {'data': x}
expected = {'output': np.random.rand(20, 8, 8)}
self._test_model(
builder.spec, input, expected, validate_shapes_only=True)
self.assertEqual(len(input_dim), builder._get_rank('output'))
def test_deconv_valid_padding(self):
input_dim = (10, 15, 15)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
W = np.random.rand(3, 3, 10, 20)
builder.add_convolution(name='deconv', kernel_channels=10,
output_channels=20,
height=3, width=3, stride_height=2,
stride_width=2,
border_mode='valid', groups=1,
W=W, b=None, has_bias=False,
is_deconv=True,
input_name='data', output_name='output',
padding_top=2, padding_bottom=3,
padding_left=2, padding_right=3)
x = | np.random.rand(*input_dim) | numpy.random.rand |
import os.path
import os
import pickle
import time
import numpy as np
from numpy.testing import assert_almost_equal, assert_equal, assert_allclose
import scipy.stats as stats
# Before removing what appear to be unused imports think twice.
# Some of the tests use eval, which requires the imports.
import refnx.reflect._reflect as _reflect
from refnx.analysis import (
Transform,
Objective,
CurveFitter,
Parameter,
Interval,
Parameters,
)
from refnx.reflect import (
SLD,
ReflectModel,
MixedReflectModel,
reflectivity,
Structure,
Slab,
FresnelTransform,
choose_dq_type,
use_reflect_backend,
)
import refnx.reflect.reflect_model as reflect_model
from refnx.dataset import ReflectDataset
from refnx._lib import MapWrapper
BACKENDS = reflect_model.available_backends()
class TestReflect(object):
def setup_method(self):
self.pth = os.path.dirname(os.path.abspath(__file__))
sio2 = SLD(3.47, name="SiO2")
air = SLD(0, name="air")
si = SLD(2.07, name="Si")
d2o = SLD(6.36, name="D2O")
polymer = SLD(1, name="polymer")
self.structure = air | sio2(100, 2) | si(0, 3)
theoretical = np.loadtxt(os.path.join(self.pth, "theoretical.txt"))
qvals, rvals = np.hsplit(theoretical, 2)
self.qvals = qvals.flatten()
self.rvals = rvals.flatten()
# e361 is an older dataset, but well characterised
self.structure361 = si | sio2(10, 4) | polymer(200, 3) | d2o(0, 3)
self.model361 = ReflectModel(self.structure361, bkg=2e-5)
self.model361.scale.vary = True
self.model361.bkg.vary = True
self.model361.scale.range(0.1, 2)
self.model361.bkg.range(0, 5e-5)
# d2o
self.structure361[-1].sld.real.vary = True
self.structure361[-1].sld.real.range(6, 6.36)
self.structure361[1].thick.vary = True
self.structure361[1].thick.range(5, 20)
self.structure361[2].thick.vary = True
self.structure361[2].thick.range(100, 220)
self.structure361[2].sld.real.vary = True
self.structure361[2].sld.real.range(0.2, 1.5)
e361 = ReflectDataset(os.path.join(self.pth, "e361r.txt"))
self.qvals361, self.rvals361, self.evals361 = (
e361.x,
e361.y,
e361.y_err,
)
def test_abeles(self):
slabs = self.structure.slabs()[..., :4]
for backend in BACKENDS:
# test reflectivity calculation with values generated from Motofit
with use_reflect_backend(backend) as abeles:
calc = abeles(self.qvals, slabs)
assert_almost_equal(calc, self.rvals)
def test_noncontig_abeles(self):
# test for non-contiguous Q values
tempq = self.qvals[0::5]
slabs = self.structure.slabs()[..., :4]
assert tempq.flags["C_CONTIGUOUS"] is False
for backend in BACKENDS:
with use_reflect_backend(backend) as abeles:
calc = abeles(tempq, slabs)
assert_almost_equal(calc, self.rvals[0::5])
def test_abeles_multithreaded(self):
slabs = self.structure.slabs()[..., :4]
for backend in BACKENDS:
# test reflectivity calculation with values generated from Motofit
with use_reflect_backend(backend) as abeles:
calc = abeles(self.qvals, slabs, threads=4)
assert_almost_equal(calc, self.rvals)
def test_available_backends(self):
assert "python" in BACKENDS
assert "c" in BACKENDS
import refnx.reflect._creflect as _creflect
import refnx.reflect._reflect as _reflect
assert _reflect.__file__ != _creflect.__file__
if "cython" in BACKENDS:
import refnx.reflect._cyreflect as _cyreflect
assert _creflect.__file__ != _cyreflect.__file__
def test_first_principles(self):
# Test a first principles reflectivity calculation, rather than
# relying on a previous calculation from Motofit code.
# Here we only examine Fresnel reflectivity from an infinitely
# sharp interface, we do not examine a rough surface. This is
# tested by profile slicing in test_structure.
def kn(q, sld_layer, sld_fronting):
# wave vector in a given layer
kvec = np.zeros_like(q, np.complex128)
sld = complex(sld_layer - sld_fronting) * 1.0e-6
kvec[:] = np.sqrt(q[:] ** 2.0 / 4.0 - 4.0 * np.pi * sld)
return kvec
q = np.linspace(0.001, 1.0, 1001)
# Is the fresnel reflectivity correct?
sld1 = 2.07
sld2 = 6.36
# first principles calcn
kf = kn(q, sld1, sld1)
kb = kn(q, sld2, sld1)
reflectance = (kf - kb) / (kf + kb)
reflectivity = reflectance * np.conj(reflectance)
# now from refnx code
struct = SLD(sld1)(0, 0) | SLD(sld2)(0, 0)
slabs = struct.slabs()[..., :4]
for backend in BACKENDS:
with use_reflect_backend(backend) as abeles:
assert_allclose(abeles(q, slabs), reflectivity, rtol=1e-14)
# reverse the direction
kf = kn(q, sld2, sld2)
kb = kn(q, sld1, sld2)
reflectance = (kf - kb) / (kf + kb)
reflectivity = reflectance * np.conj(reflectance)
# now from refnx code
struct = SLD(sld2)(0, 0) | SLD(sld1)(0, 0)
slabs = struct.slabs()[..., :4]
for backend in BACKENDS:
with use_reflect_backend(backend) as abeles:
assert_allclose(abeles(q, slabs), reflectivity, rtol=1e-14)
def test_scale_bkg_abeles(self):
s = self.structure.slabs()[..., :4]
calcs = []
for backend in BACKENDS:
with use_reflect_backend(backend) as abeles:
calc = abeles(self.qvals, s, scale=2.0)
calcs.append(calc)
for calc in calcs[1:]:
assert_allclose(calc, calcs[0])
calcs = []
for backend in BACKENDS:
with use_reflect_backend(backend) as abeles:
calc = abeles(self.qvals, s, scale=0.5, bkg=0.1)
calcs.append(calc)
for calc in calcs[1:]:
assert_allclose(calc, calcs[0])
calcs = []
for backend in BACKENDS:
with use_reflect_backend(backend) as abeles:
calc = abeles(self.qvals, s, scale=0.5, bkg=0.1, threads=2)
calcs.append(calc)
for calc in calcs[1:]:
assert_allclose(calc, calcs[0])
"""
@np.testing.decorators.knownfailure
def test_cabeles_parallelised(self):
# I suppose this could fail if someone doesn't have a multicore
# computer
if not TEST_C_REFLECT:
return
coefs = np.array([[0, 0, 0, 0],
[300, 3, 1e-3, 3],
[10, 3.47, 1e-3, 3],
[0, 6.36, 0, 3]])
x = np.linspace(0.01, 0.2, 1000000)
pstart = time.time()
_creflect.abeles(x, coefs, threads=0)
pfinish = time.time()
sstart = time.time()
_creflect.abeles(x, coefs, threads=1)
sfinish = time.time()
print(sfinish - sstart, pfinish - pstart)
assert_(0.7 * (sfinish - sstart) > (pfinish - pstart))
"""
def test_compare_abeles0(self):
# test one layer system against the python implementation
layer0 = np.array([[0, 2.07, 0.01, 3], [0, 6.36, 0.1, 3]])
with use_reflect_backend("python") as abeles:
calc1 = abeles(self.qvals, layer0, scale=0.99, bkg=1e-8)
for backend in BACKENDS:
with use_reflect_backend(backend) as abeles:
calc2 = abeles(self.qvals, layer0, scale=0.99, bkg=1e-8)
assert_almost_equal(calc1, calc2)
# test a negative background
with use_reflect_backend("python") as abeles:
calc1 = abeles(self.qvals, layer0, scale=0.99, bkg=-5e-7)
for backend in BACKENDS:
with use_reflect_backend(backend) as abeles:
calc2 = abeles(self.qvals, layer0, scale=0.99, bkg=-5e-7)
assert_almost_equal(calc1, calc2)
def test_compare_abeles2(self):
# test two layer system against the python implementation
layer2 = np.array(
[
[0, 2.07, 0.01, 3],
[10, 3.47, 0.01, 3],
[100, 1.0, 0.01, 4],
[0, 6.36, 0.1, 3],
]
)
with use_reflect_backend("python") as abeles:
calc1 = abeles(self.qvals, layer2, scale=0.99, bkg=1e-8)
for backend in BACKENDS:
with use_reflect_backend(backend) as abeles:
calc2 = abeles(self.qvals, layer2, scale=0.99, bkg=1e-8)
assert_almost_equal(calc1, calc2)
def test_abeles_absorption(self):
# https://github.com/andyfaff/refl1d_analysis/tree/master/notebooks
q = np.linspace(0.008, 0.05, 500)
depth = [0, 850, 0]
rho = [2.067, 4.3, 6.0]
irho_zero = [0.0, 0.1, 0.0]
refnx_sigma = [np.nan, 35, 5.0]
w_zero = np.c_[depth, rho, irho_zero, refnx_sigma]
with use_reflect_backend("python") as abeles:
calc1 = abeles(q, w_zero)
for backend in BACKENDS:
with use_reflect_backend(backend) as abeles:
calc2 = abeles(q, w_zero)
assert_almost_equal(calc1, calc2)
def test_abeles_absorption2(self):
# https://github.com/andyfaff/refl1d_analysis/tree/master/notebooks
# this has an appreciable notch just below the critical edge
refl1d = np.load(os.path.join(self.pth, "absorption.npy"))
q = np.geomspace(0.005, 0.3, 201)
depth = [0, 1200, 0]
rho = [2.07, 4.66, 6.36]
irho = [0, 0.016, 0]
refnx_sigma = [np.nan, 10, 3]
slabs = np.c_[depth, rho, irho, refnx_sigma]
for backend in BACKENDS:
with use_reflect_backend(backend) as abeles:
calc = abeles(q, slabs)
assert_almost_equal(calc, refl1d[1])
def test_compare_refl1d(self):
# refl1d calculated with:
# from refl1d import abeles
# x = np.linspace(0.005, 0.5, 1001)
# z = abeles.refl(x / 2,
# [0, 100, 200, 0],
# [2.07, 3.45, 5., 6.],
# irho=[0.0, 0.1, 0.01, 0],
# sigma=[3, 1, 5, 0])
# a = z.real ** 2 + z.imag ** 2
layers = np.array(
[
[0, 2.07, 0, 0],
[100, 3.45, 0.1, 3],
[200, 5.0, 0.01, 1],
[0, 6.0, 0, 5],
]
)
x = np.linspace(0.005, 0.5, 1001)
refl1d = np.load(os.path.join(self.pth, "refl1d.npy"))
for backend in BACKENDS:
with use_reflect_backend(backend) as abeles:
calc = abeles(x, layers)
| assert_almost_equal(calc, refl1d) | numpy.testing.assert_almost_equal |
import os, sys
import numpy as np
import pandas as pd
# only meant for reading spr_file's
# spr_files have following structure
# 1. magic keyword (optional)
# .spr (SPRAAK package) or .key (HMM package) otherwise NOHEADER file is assumed
# 2. header (optional)
# a header with multiple lines consisting of KEY VALUE pairs where
# the first word is the KEY and the REMAINDER the VALUE
# 3. separator (required if 1. or 2. or present)
# line starting with "#"
# 4. data
# formatted as specified in the header or via other arguments
#
# noheader files are assumed to be text and read as sequence of lines
# e.g. dictionaries, phone lists, ...
#
def spr_open_file(fname,flag="r",encoding="latin1",noheader=False):
if flag != "r":
print("open_spr_file(): is designed for READING spr_files only")
exit
if not os.path.isfile(fname):
print("File path {} does not exist. Exiting...".format(fname))
sys.exit()
fp = open(fname,'r',encoding=encoding)
first_time = True
hdr = {}
while(1):
line = fp.readline()
line = line.strip()
# determine the header type of the file
if ( first_time ):
first_time = False
if( line == ".key" ) : hdr['HEADER'] = "key"
elif (line == ".spr") : hdr['HEADER'] = "spr"
else:
hdr['HEADER'] = "nohdr"
hdr['DATA'] = "LIST"
fp.seek(0)
# break
continue
# continue reading header KEY VALUE pairs till EOH is detected
if len(line) == 0: continue
elif line[0]=="#":
break
else:
w = line.split(None,1)
if len(w) == 1: hdr[w[0]] = True
else: hdr[w[0]] = w[1]
return fp, hdr
def spr_read_data(fp,hdr):
if 'DATATYPE' in hdr.keys():
hdr['DATA'] = hdr['DATATYPE']
if not 'DATA' in hdr.keys(): hdr['DATA'] = 'LIST'
if(hdr['DATA']=='TRACK'):
nfr = int(hdr['DIM1'])
nparam = int(hdr['DIM2'])
itemtype = 'float32'
# print(nfr,nparam,itemtype)
data = np.fromfile(fp,dtype=itemtype,count=-1)
data = | np.reshape(data,(nfr,nparam)) | numpy.reshape |
import numpy as np
import numpy.ma as ma
#import sys
import os
from scipy import linalg
from scipy.signal import detrend, butter, lfilter
from scipy import signal
from scipy.interpolate import griddata
from joblib import Parallel, delayed
#from joblib import load, dump
#from netCDF4 import Dataset
import tempfile
import shutil
import xarray as xr
#import dist
#import math
import datetime
from numpy.linalg import eig, inv
import scipy.spatial.qhull as qhull
#
def distance(origin,destination,radius=6371):
'''
# Haversine formula
# Author: <NAME>
#
# INPUT DATA
# origin :: (lat1, lon1)
# destination :: (lat2, lon2)
#
# RETURNS
#
# d :: distance in km
'''
#
lat1, lon1 = origin
lat2, lon2 = destination
#radius = 6371 # km
#
dlat = np.radians(lat2-lat1)
dlon = np.radians(lon2-lon1)
#
a = np.sin(dlat/2) * np.sin(dlat/2) + np.cos(np.radians(lat1))* np.cos(np.radians(lat2)) * np.sin(dlon/2) * np.sin(dlon/2)
c = 2 * np.arctan2(np.sqrt(a), np.sqrt(1-a))
d = radius * c
#
return d
def rot_m(theta):
'''
# Create a rotation matrix for a given angle (rotations counter-clokwise)
'''
#
c,s = np.cos(theta), np.sin(theta)
#
return np.array(((c,-s), (s, c)))
def create_A(angles=range(90)):
'''
# Create a counter-clockwise rotation matrix A in the matrix equation k=A*K
# note that this *counter-clockwise* so make sure the angle makes sense
# for your case. For example if your data is at 10 deg rotation from x-y plane
# you should call the function with angles=np.array([360-10])
# -> this will rotate a full circle back to x-y plane
#
# A[angle0,:]=[cos(angle0)**2, sin(angle0)**2, sin(2*angle0)]
# A[angle0,:]=[sin(angle0)**2, cos(angle0)**2, -sin(2*angle0)]
# .
# .
# .
# A[angleN,:]=[cos(angleN)**2, sin(angleN)**2, sin(2*angleN)]
# A[angleN,:]=[sin(angleN)**2, cos(angleN)**2, -sin(2*angleN)]
#
# the input variable is a list (or an array) of angles
'''
#
A=np.zeros((len(angles)*2,3))
c=0
for ang in angles:
A[c,0]=np.cos(np.radians(ang))**2
A[c,1]=np.sin(np.radians(ang))**2
A[c,2]=np.sin(np.radians(2*ang))
A[c+1,0]=np.sin(np.radians(ang))**2
A[c+1,1]=np.cos(np.radians(ang))**2
A[c+1,2]=-np.sin(np.radians(2*ang))
c=c+2
#
return A
def griddata_interp_weights(in_points, out_points, d=2):
'''
# This function returns the triangulations weights used by scipy.griddata
# the weights can then be used with the griddata_interpolation below to
# produce the same results as griddata, but without the need to re-calculate the weights
# -> overall much faster than looping over griddata calls
#
# * This is direct copy from https://stackoverflow.com/questions/20915502/speedup-scipy-griddata-for-multiple-interpolations-between-two-irregular-grids
# Big thanks to Jaime/unutbu for saving my day
'''
tri = qhull.Delaunay(in_points)
simplex = tri.find_simplex(out_points)
vertices = np.take(tri.simplices, simplex, axis=0)
temp = np.take(tri.transform, simplex, axis=0)
delta = out_points - temp[:, d]
bary = np.einsum('njk,nk->nj', temp[:, :d, :], delta)
return vertices, np.hstack((bary, 1 - bary.sum(axis=1, keepdims=True)))
def griddata_interpolation(values, vtx, wts):
'''
# This is essentially the interpolation part of griddata
# Use griddata_interp_weights to get the vtx, wts (vertices and weights)
# and then call this function to do the interpolation
'''
return np.einsum('nj,nj->n', np.take(values, vtx), wts)
def smooth2D_loop(k2,h2,n,ymax,xmax,jind,iind,lat,lon,datain,data_out,weights_out,use_weights,weights_only,use_median,use_dist,xscaling):
"""This is the loop to be run paralllel by smooth2D_parallel. Should not be called directly. """
for k in range(k2,min([k2+h2,len(jind)])):
j = jind[k]
i = iind[k]
nx = xscaling*n
jind2 = []
iind2 = []
dxy = []
c = 0
for ib in range(-nx,nx+1):
for jb in range(-n,n+1):
if ((j+jb)>=ymax or (j+jb)<0):
jind2.append(j)
else:
jind2.append(j+jb)
if (i+ib)>=xmax: #note that xmin case is automatically covered thanks to python indexing
iind2.append((i+ib)-xmax)
elif (i+ib)<0:
iind2.append(xmax+(i+ib))
else:
iind2.append(i+ib)
if datain.mask[jind2[-1],iind2[-1]]:
jind2[-1]=j
iind2[-1]=i
if use_weights and use_dist:
if len(lon.shape)==1:
dxy.append(distance([lat[j],lon[i]],[lat[jind2[c]],lon[iind2[c]]]))
else:
dxy.append(distance([lat[j,i],lon[j,i]],[lat[jind2[c],iind2[c]],lon[jind2[c],iind2[c]]]))
c=c+1
if k%10000.==0:
print(k, c, j, i)
if use_weights:
if use_dist:
dxy=np.array(dxy)
else:
if len(lon.shape)==1:
lon2,lat2=np.meshgrid(lon,lat)
else:
lon2=lon
lat2=lat
dxy=np.cos(lat2[jind2,iind2]*np.pi/180.)
if ma.sum(dxy)==0:
weights=np.ones(len(dxy))
diind=np.argsort(dxy)
else:
diind=np.argsort(dxy)
weights=(float(ma.sum(np.sort(dxy)))-np.sort(dxy))/ma.sum(float(ma.sum(np.sort(dxy)))-np.sort(dxy))
weights_out[k,:,0]=weights
weights_out[k,:,1]=np.array(jind2)[diind]
weights_out[k,:,2]=np.array(iind2)[diind]
else:
weights_out[k,:,0]=0
weights_out[k,:,1]=np.array(jind2)
weights_out[k,:,2]=np.array(iind2)
if not weights_only:
if use_weights:
data_out[j,i]=ma.sum(datain[jind2[diind],iind2[diind]]*weights)/ma.sum(weights)
elif use_median:
data_out[j,i]=ma.median(datain[jind2,iind2])
else:
data_out[j,i]=ma.mean(datain[jind2,iind2])
def smooth2D_parallel(lon,lat,datain,n=1,num_cores=30,use_weights=False,weights_only=False,use_median=False,save_weights=False,save_path='', use_dist=False, xscaling=2):
"""
2D smoothing of (preferably masked) array datain (should be shape (lat,lon)), will be using halo of n, if n=1 (default) then the each point will be 9 point average. Option to use distance weights.
Parameters
----------
lon : longitudes of the input data (1D or 2D array)
lat : latitudes of the input data (1D or 2D array)
datain : input data (should be shape (lat,lon)) and prefereably masked
n : Size of the halo over which the smoothing is applied.
If n=1 (default) then the each point will be 9 point average
Use xscaling to use a different halo in x direction
xscaling : Scale the halo in x-direction (default 2), this is reasonable if data is on lat, lon grid
num_cores : number of cores to use (default 30)
use_weights : Controls if specific weights will be calculated (default is False)
If False then will return the indices of the grid cells that should be used for smoothing
with equal weights (set to 0). If True then weights will be calculated (see below for different options)
use_dist : If true then the weights will be calculated based on distance (in km) from the central cell.
Default is False in which case distance in degrees will be used.
weights_only : If True only calculate weights, do not apply to the data (dataout will be empty).
Default is False i.e. weights will be applied!
use_median : Only used if weights_only=False and use_weights=False
In this case one has an option to smooth either by calculating the median (use_median=True)
or by using the mean of the surrounding points (use_median=False)
save_weights : If True the weights will be saved to npz file (default is False).
This is usefull if the domain is large and the smoothing will be applied often
save_path : Location in which the weights will be saved. Default is to save in the work directory
"""
#dataout=ma.zeros(datain.shape)
ymax,xmax=datain.shape
if ma.is_masked(datain):
jind,iind=ma.where(1-datain.mask)
else:
jind,iind=ma.where(np.ones(datain.shape))
#
h2 = len(jind)/num_cores
folder1 = tempfile.mkdtemp()
path1 = os.path.join(folder1, 'dum1.mmap')
data_out = np.memmap(path1, dtype=float, shape=(datain.shape), mode='w+')
#
folder2 = tempfile.mkdtemp()
path2 = os.path.join(folder2, 'dum2.mmap')
weights_out = np.memmap(path2, dtype=float, shape=((len(jind),len(range(-n,n+1))*len(range(-2*n,2*n+1)),3)), mode='w+')
#weights_out=np.memmap(path2, dtype=float, shape=((len(jind),len(range(-n,n+1))**2,3)), mode='w+')
#
Parallel(n_jobs=num_cores)(delayed(smooth2D_loop)(k2,h2,n,ymax,xmax,jind,iind,lat,lon,datain,data_out,weights_out,use_weights,weights_only,use_median,use_dist,xscaling) for k2 in range(0,len(jind),h2))
data_out=ma.masked_array(np.asarray(data_out),mask=datain.mask)
weights_out=np.asarray(weights_out)
if save_weights:
np.savez(save_path+str(n)+'_degree_smoothing_weights_coslat_y'+str(n)+'_x'+str(xscaling*n)+'.npz',weights_out=weights_out,jind=jind,iind=iind)
try:
shutil.rmtree(folder1)
except OSError:
pass
#
try:
shutil.rmtree(folder2)
except OSError:
pass
#
return data_out,weights_out
def smooth_with_weights_loop(k2,h2,datain,data_out,weights,jind,iind,use_weights,use_median,loop=False):
if loop:
for k in range(k2,min([k2+h2,len(jind)])):
if k%10000.==0:
print(k)
j=jind[k]
i=iind[k]
c=0
if use_weights:
data_out[j,i]=ma.sum(datain[weights[k,:,1].astype('int'),weights[k,:,2].astype('int')]*weights[k,:,0])/ma.sum(weights[k,:,0])
elif use_median:
data_out[j,i]=ma.median(datain[weights[k,:,1].astype('int'),weights[k,:,2].astype('int')])
else:
data_out[j,i]=ma.mean(datain[weights[k,:,1].astype('int'),weights[k,:,2].astype('int')])
else:
k3=min([k2+h2,len(jind)])
if use_weights:
data_out[k2:k2+h2]=ma.sum(datain[weights[k2:k3,:,1].astype('int'),weights[k2:k3,:,2].astype('int')]*weights[k2:k3,:,0],-1)/ma.sum(weights[k2:k3,:,0])
elif use_median:
data_out[k2:k3]=ma.median(datain[weights[k2:k3,:,1].astype('int'),weights[k2:k3,:,2].astype('int')],-1)
else:
data_out[k2:k3]=ma.mean(datain[weights[k2:k3,:,1].astype('int'),weights[k2:k3,:,2].astype('int')],-1)
def smooth_with_weights_parallel(datain,n=1,num_cores=30,weights=None,jind=None,iind=None,use_weights=False,use_median=False,loop=False,save_path=''):
"""
Given that one has already calculated and saved smoothing weights/indices with smooth2D_parallel one can simply apply them with this script
Turns out this is fastest to do in serial!! so do that instead!1
"""
# load the data if needed - don't use this if you're smoothing a timeseries
if weights is None:
data=np.load(save_path+str(n)+'_degree_smoothing_weights_new.npz')
weights=data['weights_out'][:]
jind=data['jind'][:]
iind=data['iind'][:]
# prepara for the parallel loop
h2=len(jind)/num_cores
folder1 = tempfile.mkdtemp()
path1 = os.path.join(folder1, 'dum1.mmap')
if loop:
data_out=np.memmap(path1, dtype=float, shape=(datain.shape), mode='w+')
# Parallel(n_jobs=num_cores)(delayed(smooth_with_weights_loop)(k2,h2,datain,data_out,weights,jind,iind,use_weights,use_median,loop) for k2 in range(0,len(jind),h2))
else:
data_out=np.memmap(path1, dtype=float, shape=(len(jind)), mode='w+')
# Parallel(n_jobs=num_cores)(delayed(smooth_with_weights_loop)(k2,h2,datain.flatten(),data_out,weights,jind,iind,use_weights,use_median,loop) for k2 in range(0,len(jind),h2))
# this should work but seemps to be slow
#
Parallel(n_jobs=num_cores)(delayed(smooth_with_weights_loop)(k2,h2,datain,data_out,weights,jind,iind,use_weights,use_median,loop) for k2 in range(0,len(jind),h2))
# mask output
if loop:
data_out=ma.masked_array(np.asarray(data_out),mask=datain.mask)
else:
data_out2=np.zeros(datain.shape)
data_out2[jind,iind]=data_out
data_out=ma.masked_array(data_out2,mask=datain.mask)
# close temp file
try:
shutil.rmtree(folder1)
except OSError:
pass
#
return data_out
def butter_bandstop(lowcut, highcut, fs, btype, order=5):
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
b, a = butter(order, [low, high], btype=btype)
return b, a
def butter_bandstop_filter(data, lowcut, highcut, fs, order=5, ax=-1, btype='bandstop'):
"""
bandstop filter, usage:
x_grid = MicroInverse_utils.butter_bandstop_filter((x_grid-np.nanmean(x_grid,0)), 7./375., 7/355., 1, order=3,ax=0)
"""
b, a = butter_bandstop(lowcut, highcut, fs, btype, order=order)
y = signal.filtfilt(b, a, data, axis=ax)
return y
def Implement_Notch_Filter(data, lowcut, highcut, fs=1, order=3, ripple=20, atten=20, filter_type='butter', ax=-1, btype='bandstop'):
"""
Required input defintions are as follows
Parameters
----------
fs : Sampling frequency
lowcut,highcut : The bandwidth bounds you wish to filter
ripple : The maximum passband ripple that is allowed in db
order : The filter order. For FIR notch filters this is best set to 2 or 3,
IIR filters are best suited for high values of order. This algorithm
is hard coded to FIR filters
filter_type : 'butter', 'bessel', 'cheby1', 'cheby2', 'ellip'
data : the data to be filtered
"""
nyq = 0.5 * fs
# low = freq - band/2.0
# high = freq + band/2.0
low = lowcut/nyq
high = highcut/nyq
b, a = signal.iirfilter(order, [low, high], rp=ripple, rs=atten, btype=btype,analog=False, ftype=filter_type)
filtered_data = signal.filtfilt(b, a, data,axis=ax)
#
return filtered_data
def remove_climatology_loop(jj,h2,dum,dum_out,dt,rem6month):
"""
Remove climatology, i.e. 12 month and optionally 6 month
(rem6month=True, default setting) from the data
"""
print(jj, 'removing climatology...')
dum1=dum[:,jj:jj+h2] # .data
f1=1*dt/365.
f2=2*dt/365.
t=np.arange(dum1.shape[0])
#
x1 = np.ones((len(t),3))
x1[:,1] = np.cos((2*np.pi)*f1*t)
x1[:,2] = np.sin((2*np.pi)*f1*t)
#
if rem6month:
x2 = np.ones((len(t),3))
x2[:,1] = np.cos((2*np.pi)*f2*t)
x2[:,2] = np.sin((2*np.pi)*f2*t)
#
inds=np.where(np.isfinite(dum1[0,:]))[0]
#
if len(inds)>0: # do nothing if only land points otherwise enter the loop
for j in inds:
y = dum1[:,j]
# fit one year signal
beta = np.linalg.lstsq(x1, y, rcond=None)[0]
y12mo = beta[0]+beta[1]*np.cos((2*np.pi)*f1*t)+beta[2]*np.sin((2*np.pi)*f1*t)
#
if rem6month:
# fit 6 month signal
beta=np.linalg.lstsq(x2, y, rcond=None)[0]
y6mo = beta[0]+beta[1]*np.cos((2*np.pi)*f2*t)+beta[2]*np.sin((2*np.pi)*f2*t)
dum_out[:,jj+j]=y-y12mo-y6mo
else:
dum_out[:,jj+j]=y-y12mo
def remove_climatology(var,dt,num_cores=18,rem6month=True):
"""
Remove annual cycle (fitted sine curve) from a numpy.array which has dimensions (nt,nx*ny)
Parameters
----------
var : numpy.array
Data from which annual cycle is to be removed.
Dimensions (nt,nx*ny), no nan's allowed!
dt : int
timestep in days
num_cores : int, optional
Number of cores for multiprocessing (default 18)
rem6month : bool, optional
If True (default) also 6 month (180 day) signal is removed
Returns
-------
output : numpy.array
The output array from which the annual cycle has been removed
"""
# num_cores=20
h2=var.shape[-1]//num_cores
#
var=var-np.nanmean(var,0)
#
folder1 = tempfile.mkdtemp()
path1 = os.path.join(folder1, 'dum1.mmap')
dum=np.memmap(path1, dtype=float, shape=(var.shape), mode='w+')
dum[:]=var[:]
#
folder2 = tempfile.mkdtemp()
path2 = os.path.join(folder2, 'dum2.mmap')
X_par=np.memmap(path2, dtype=float, shape=(var.shape), mode='w+')
#
# Parallel(n_jobs=num_cores)(delayed(remove_climatology_loop)(jj,h2,dum1,X_par) for jj in range(0,var.shape[-1],h2))
# Parallel(n_jobs=num_cores)(delayed(remove_climatology_loop)(jj,h2,dum[:,jj:jj+h2],X_par[:,jj:jj+h2]) for jj in range(0,var.shape[-1],h2))
Parallel(n_jobs=num_cores)(delayed(remove_climatology_loop)(jj,h2,dum,X_par,dt,rem6month) for jj in range(0,var.shape[-1],h2))
# Parallel(n_jobs=num_cores)(delayed(remove_climatology_loop)(jj,h2,dum1,X_par) for jj in range(0,block_num_lons))
#
output=np.asarray(X_par)
try:
shutil.rmtree(folder1)
except OSError:
pass
try:
shutil.rmtree(folder2)
except OSError:
pass
#
return output
def remove_climatology2(dum,rem6month=True):
"""
Remove climatology, serial code
"""
print('removing climatology...')
f1=1/365.
f2=2/365.
t=np.arange(dum.shape[0])
dum=dum-np.nanmean(dum,0)
dum2=np.zeros(dum.shape)
for j in range(dum.shape[-1]):
y = dum[:,j].data
# fit one year signal
x = np.ones((len(y),3))
x[:,1] = np.cos((2*np.pi)*f1*t)
x[:,2] = np.sin((2*np.pi)*f1*t)
beta, resid, rank, sigma = np.linalg.lstsq(x, y)
y12mo = beta[0]+beta[1]*np.cos((2*np.pi)*f1*t)+beta[2]*np.sin((2*np.pi)*f1*t)
#
# fit 6 month signal
if rem6month:
x = np.ones((len(y),3))
x[:,1] = np.cos((2*np.pi)*f2*t)
x[:,2] = np.sin((2*np.pi)*f2*t)
beta, resid, rank, sigma = np.linalg.lstsq(x, y)
y6mo = beta[0]+beta[1]*np.cos((2*np.pi)*f2*t)+beta[2]*np.sin((2*np.pi)*f2*t)
dum2[:,j]=y-y12mo-y6mo
else:
dum2[:,j]=y-y12mo
return dum2
def read_files(j,nts,jinds,iinds,filepath,fnames2,var_par,varname,sum_over_depth, depth_lim, depth_lim0, model_data=False):
"""
Read files in parallel. This function should not be called directly, but via load_files() function
var_par should be of shape (len(filenames), time_steps_per_file, ny, nx)
"""
#
fname=fnames2[j]
print(fname)
ds = xr.open_dataset(filepath+fname,decode_times=False)
ds = ds.squeeze() #just in case depth etc.
#
# reading a file with a timeseries of 2D field (i.e. 3D matrix)
if len(var_par.shape)==3 and sum_over_depth==False:
nt,ny,nx=ds[varname].shape
nts[j]=nt
# this is a quick fix without a need to change upstream calls - supposedly faster?
if False:
jlen=np.unique(jinds).shape[0]
ilen=np.unique(iinds).shape[0]
j1=np.reshape(jinds,(jlen,ilen))[:,0]
i1=np.reshape(iinds,(jlen,ilen))[1,:]
exec('dum=ds.'+varname+'[:,j1,i1].values')
dum=ds[varname][:,j1,i1].values
dum=np.reshape(dum,(nt,-1))
else:
# old version - very slow!
dum=ds[varname].values[:,jinds,iinds]
dum[np.where(dum>1E30)]=np.nan
#
var_par[j,:nt,:]=dum
var_par[j,nt:,:]=np.nan # in order to calculate the climatology
# reading a model data file, with a timeseries of 3D field (i.e. 4D matrix) and calculating the volume mean over depth)
elif len(var_par.shape)==3 and sum_over_depth==True and model_data==True:
nt,nz,ny,nx=ds[varname].shape
zaxis=ds['st_edges_ocean'].values
dz=np.diff(zaxis)[depth_lim0:depth_lim]
nts[j]=nt
var_par[j,:nt,:]=np.sum(np.swapaxes(ds[varname].values[:,depth_lim0:depth_lim,jinds,iinds],1,0).T*dz,-1).T/np.sum(dz)
# reading a file with only one 2D field in one file
elif len(var_par.shape)==2 and sum_over_depth==False:
ny,nx=ds[varname].squeeze().shape
var_par[j,:]=ds[varname].squeeze().values[jinds,iinds]
var_par[np.where(var_par>1E30)]=np.nan
# reading a file with only one 3D field in one file, and calculating the volume mean over depth
elif len(var_par.shape)==2 and sum_over_depth==True:
# this is for sum(dz*T)/sum(dz) so weighted mean temperature - areas where depth is less than depth_lim are nan
# var_par[j,:]=np.sum(ds[varname].values[depth_lim0:depth_lim,jinds,iinds].T*np.diff(ds['depth'].values)[depth_lim0:depth_lim],-1).T/np.sum(np.diff(ds['depth'].values)[depth_lim0:depth_lim])
# this is for dz*T
# var_par[j,:]=np.nansum(ds[varname].values[depth_lim0:depth_lim,jinds,iinds].T*np.diff(ds['depth'].values)[depth_lim0:depth_lim],-1).T
#
# this is for nansum(dz*T)/nansum(dz) so weighted mean temperature - areas where depth is less than depth_lim will have a temperature
var_par[j,:]=np.nansum(ds[varname].values[depth_lim0:depth_lim,jinds,iinds].T*np.diff(ds['depth'].values)[depth_lim0:depth_lim],-1).T/np.nansum(abs(np.sign(ds[varname].values[depth_lim0:depth_lim,jinds,iinds].T))*np.diff(ds['depth'].values)[depth_lim0:depth_lim],-1).T
# consider making another one here which is heat content ds[varname]*density where density=gsw.density(ds[varname],ds['salinity'],p=0)
#
print('closing the file')
ds.close()
def load_data(filepath,fnames,jinds,iinds,varname,num_cores=20,dim4D=True, sum_over_depth=False, depth_lim=13, model_data=False, remove_clim=False,dt=1, depth_lim0=0):
"""
Load a timeseries of a 2D field (where possibly summing over depth if a 3D variable) in parallel
Parameters
----------
filepath : str
Directory path pointing to the data folder
Can be empty string if path is included in fnames
fnames : list
List of file names
jinds : list
List of non-nan indices in y-direction.
iinds : list
List of non-nan indices in x-direction
Note that one should create jinds and iinds as follows
1) create a 2D mask: 1 where nan, else 0
usually landmask for ocean data
2) then do the following
jinds,iinds = np.where(mask)
jinds,iinds = np.meshgrid(jinds,iinds)
jinds = jinds.flatten()
iinds = iinds.flatten()
varname : str
Name of the variable of interest in the data file
num_cores : int
Number of cores to use (default 20)
dim4D : bool
True (default) if a file has more than one timestep
sum_over_depth : bool
False (default) if the data has a depth axis
and one wants a sum over a depth range.
depth_lim0 : integer
Upper limit for the depth average
depth_lim0 : integer
Lower limit for the depth average
remove_clim : boolean
If True a daily climatology will be removed.
Best used only if the data is at daily time resolution
dt : integer
Time resolution of the input data in days
Returns
-------
var : numpy.array
Timeseries of the requested variable (varname).
Has the shape (time,jinds,iinds).
var_clim : numpy.array
Climatology of the requested variable (varname).
None if remove_clim=False (default)
"""
# create temp files to host the shared memory variables
folder1 = tempfile.mkdtemp()
folder2 = tempfile.mkdtemp()
path1 = os.path.join(folder1, 'dum0.mmap')
path2 = os.path.join(folder2, 'dum1.mmap')
if dim4D: # incase the files have more than one timestep in each file
vshape=(len(fnames),366,len(jinds))
var_par=np.memmap(path1, dtype=float, shape=vshape, mode='w+')
else: # incase there is only one timestep in a file
vshape=(len(fnames),len(jinds))
var_par=np.memmap(path1, dtype=float, shape=vshape, mode='w+')
# nts will keep track of number of days in a year
nts=np.memmap(path2, dtype=float, shape=(len(fnames)), mode='w+')
fnames2=np.memmap(path2, dtype='U'+str(len(fnames[0])+1), shape=(len(fnames)), mode='w+')
fnames2[:]=fnames #np.asarray(fnames[:])
# launch the parallel reading
Parallel(n_jobs=num_cores)(delayed(read_files)(j,nts,jinds,iinds,filepath,fnames2,var_par,varname,sum_over_depth, depth_lim, depth_lim0, model_data=model_data) for j,fname in enumerate(fnames))
if dim4D:
print('removing climatology')
var_clim=np.nanmean(var_par,0)
if remove_clim:
print('removing climatology')
# smooth the daily climatology with monthly filter, as the climatology will be still noisy at daily scales
var_clim=np.concatenate([var_clim[-120//dt:,],var_clim,var_clim[:120//dt,]],axis=0)
b,a=signal.butter(3,2./(30/dt))
jnonan=np.where(np.isfinite(np.sum(var_clim,0)))
var_clim[:,jnonan]=signal.filtfilt(b,a,var_clim[:,jnonan],axis=0)
var_clim=var_clim[120//dt:120//dt+366//dt,]
#
# this is the on off switch for removing the climatology
var_clim=var_clim*int(remove_clim)
var=var_par[0,:int(nts[0]),:]-var_clim[:int(nts[0]),:]
# concatenate the data - note that here nts is used to strip down the 366th day when it's not a leap year
# and include the 366th day when it is a leap year
for j in range(1,len(fnames)):
print(j)
var=np.concatenate([var,var_par[j,:int(nts[j]),:]-var_clim[:int(nts[j]),:]],axis=0)
#
else:
# if only one timestep per file
var=np.asarray(var_par)
var[np.where(var==0)]=np.nan
if remove_clim:
print('removing climatology')
year0=datetime.date(int(fnames[0][-20:-16]),int(fnames[0][-16:-14]),int(fnames[0][-14:-12])).isocalendar()[0]
year1=datetime.date(int(fnames[-1][-20:-16]),int(fnames[-1][-16:-14]),int(fnames[-1][-14:-12])).isocalendar()[0]
var2=np.ones((year1-year0+1,int(np.ceil(366./dt)),var.shape[1]))*np.nan
#
for j, fname in enumerate(fnames):
year = int(fname[-20:-16])
month = int(fname[-16:-14])
day = int(fname[-14:-12])
c,c1 = datetime.date(year,month,day).isocalendar()[:2]
c = c-year0
c1 = c1-1
var2[c,c1,:] = var[j,:]
#
var_clim=np.nanmean(var2,0)
ind=np.where(np.nansum(var2,-1)[0,:]>0)[0]
var=var2[0,ind,:]-var_clim[ind,:]
for j in range(1,var2.shape[0]):
ind=np.where(np.nansum(var2,-1)[j,:]>0)[0]
var=np.concatenate([var,var2[j,ind,:]-var_clim[ind,:]],axis=0)
else:
var_clim=None
#
print('close files')
#
try:
shutil.rmtree(folder1)
except OSError:
pass
try:
shutil.rmtree(folder2)
except OSError:
pass
#
return var, var_clim
#
def parallel_inversion_9point(j,x_grid,block_vars,Stencil_center,Stencil_size,block_num_samp,block_num_lats,block_num_lons,block_lat,block_lon,Tau,Dt_secs,inversion_method='integral',dx_const=None, dy_const=None, DistType='interp', radius=6371, nn=4):
"""
"""
for i in range(2,block_num_lats-2):
if np.isfinite(np.sum(x_grid[i,j,:])):
xn = np.zeros((Stencil_size,block_num_samp))
# count non-border neighbors of grid point
numnebs = 0
#
for inn in range(i-1,i+2):
for jnn in range(j-1,j+2):
if np.isfinite(x_grid[inn,jnn,0]):
numnebs=numnebs+1
# only invert if point has 9 non-border neighbors
if numnebs==9:
ib = i
jb = j
#
sads = [-1,+1,-2,+2,-3,+3,-4,+4] # indices for ds - Stencil_center will be the central point - these are spiraling out
jads = [-1,+1, 0, 0,-1,+1,+1,-1] # left,right,down,up,down-left,up-right,right-down,left-up
iads = [ 0, 0,-1,+1,-1,+1,-1,+1]
#
s_ads = [-1,+1,-2,+2,-3,+3,-4,+4,-5,+5,-6,+6,-7,+7,-8,+8,-9,+9,-10,+10,-11,+11,-12,+12]
j_ads = [-1,+1, 0, 0,-1,+1,+1,-1,-2,-2,-2,+2,+2,+2,-1, 0,+1,+1, 0, -1, -2, +2, +2, -2]
i_ads = [ 0, 0,-1,+1,-1,+1,-1,+1,+1, 0,-1,+1, 0,-1,-2,-2,-2,+2, +2, +2, -2, +2, -2, +2]
#
ds = np.zeros(len(s_ads)+1) # distance to the Stencil_center
dx = np.zeros(len(s_ads)+1)
dy = np.zeros(len(s_ads)+1)
ang2 = [180,0,270,90,225,45,315,135] # left,right,down,up, down-left,up-right,right-down,left-up
ds2 = np.zeros((len(ang2),len(ds)))
cent = len(s_ads)//2
#
# CALCULATE THE DISTANCE BETWEEN THE CENTRAL AND SURROUNDING POINTS
for s,ss in enumerate(s_ads):
#
ds[cent+ss] = distance([block_lat[ib,jb],block_lon[ib,jb]],[block_lat[ib+i_ads[s],jb+j_ads[s]],block_lon[ib+i_ads[s],jb+j_ads[s]]], radius=radius)*1000
dx[cent+ss] = np.sign(j_ads[s])*distance([block_lat[ib,jb],block_lon[ib,jb]],[block_lat[ib,jb],block_lon[ib+i_ads[s],jb+j_ads[s]]], radius=radius)*1000
dy[cent+ss] = np.sign(i_ads[s])*distance([block_lat[ib,jb],block_lon[ib,jb]],[block_lat[ib+i_ads[s],jb+j_ads[s]],block_lon[ib,jb]], radius=radius)*1000
#
ang=np.arctan2(dy,dx)*180/np.pi
ang[np.where(ang<0)]=ang[np.where(ang<0)]+360
#
if DistType in ['interp'] and np.any(dx_const==None) and np.any(dy_const==None):
# we need to interpolate x_grid values to be at the same distance from the central point - this is because the inversion doesn't know about the distance.
#
# CHOOSE A DISTANCE TO USE - HERE 1km off from the median of the 4 closest cells
dr = np.nanmedian(ds[[cent-2,cent-1,cent+1,cent+2]])+1E3
ds2[:,Stencil_center] = dr
# find out how far each point is from the unit circle point facing each grid cell.
# axis=0 loops over each point of interest, and axis=1 loops over all the surrounding points
for s,a2 in enumerate(ang2):
for s2,ss2 in enumerate(s_ads):
ds2[s,cent+ss2]=np.sqrt(ds[cent+ss2]**2+dr**2-2*dr*ds[cent+ss2]*np.cos((ang[cent+ss2]-a2)*np.pi/180.))
#
# calculate weighted mean of the surrounding cells (linear interpolation)
ds2[:,cent] = dr
winds = np.argsort(ds2,axis=1) #
ds2_sort = np.sort(ds2,axis=1)
weigths = ((1/ds2_sort[:,:nn]).T/(np.sum(1/ds2_sort[:,:nn],1))).T # 6 closest points
weigths[np.where(np.isnan(weigths))] = 1
#
xn[Stencil_center+np.array(sads),:] = np.sum(x_grid[ib+np.array(i_ads),jb+np.array(j_ads),:][winds[:,:nn],:].T*weigths.T,1).T
xn[Stencil_center,:] = x_grid[ib,jb,:]
else:
#
dr = ds
xn[Stencil_center+np.array(sads),:] = x_grid[ib+np.array(iads),jb+np.array(jads),:]
xn[Stencil_center,:] = x_grid[ib,jb,:]
#
# use only those stencil members that are finite - setting others to zero
fin_inds=np.isfinite(xn[:,0])
xn[np.where(~fin_inds)[0],:]=0
Stencil_center2=Stencil_center
# integral method
if inversion_method in ['integral']:
xnlag = np.concatenate((xn[:,Tau:], np.zeros((xn.shape[0],Tau))),axis=1)
a=np.dot(xnlag,xn.T)
b=np.dot(xn,xn.T)
a[np.where(np.isnan(a))]=0
b[np.where(np.isnan(b))]=0
tmp = np.dot(a, np.linalg.pinv(b)) #pseudo-inverse
# tmp = np.dot(a.data, np.linalg.inv(b.data))
tmp[np.isnan(tmp)] = 0
tmp[np.isinf(tmp)] = 0
#
if np.isfinite(np.sum(tmp)) and np.sum(abs(tmp-tmp[0]))>1E-10:
try:
bb = (1./(Tau*Dt_secs))*linalg.logm(tmp)
except (ValueError,ZeroDivisionError,OverflowError):
bn = np.zeros(Stencil_size)
else:
bn = np.real(bb[Stencil_center,:])
else:
bn=np.zeros(Stencil_size)
bn[~np.isfinite(bn)] = 0
# inverse by derivative method
elif inversion_method in ['derivative']:
xnfut = np.concatenate((xn[:,1:], np.zeros((xn.shape[0],1))),axis=1)
xnlag = np.concatenate((np.zeros((xn.shape[0],Tau)), xn[:,1:xn.shape[1]-Tau+1]),axis=1)
a=np.dot((xnfut-xn),xnlag.T)
b=np.dot(xn,xnlag.T)
a[np.where(np.isnan(a))]=0
b[np.where(np.isnan(b))]=0
tmp = np.dot(a, np.linalg.pinv(b))
bn_matrix = (1./Dt_secs)*tmp
bn = np.real(bn_matrix[Stencil_center2,:])
bn[np.isnan(bn)] = 0
bn[np.isinf(bn)] = 0
# Alternative integral method
elif inversion_method in ['integral_2']:
xnfut = np.concatenate((xn[:,1:], np.zeros((Stencil_size,1))),axis=1)
xnlag = np.concatenate((np.zeros((Stencil_size,Tau)), xn[:,1:xn.shape[1]-Tau+1]),axis=1)
a=np.dot(xnfut,xnlag.T)
b=np.dot(xn,xnlag.T)
a[np.where(np.isnan(a))]=0
b[np.where(np.isnan(b))]=0
# tmp = np.linalg.lstsq(b.T, a.T)[0] #one way to do it
tmp = np.dot(a, np.linalg.pinv(b)) #another way
tmp[np.isnan(tmp)] = 0
tmp[np.isinf(tmp)] = 0
if np.isfinite(np.sum(tmp)) and np.sum(abs(tmp-tmp[0]))>1E-10: #check that not all the values are the same
try:
bb = (1./(Dt_secs))*linalg.logm(tmp) #this is not working for somereason
except (ValueError,ZeroDivisionError,OverflowError):
bn = np.zeros(Stencil_size)
else:
bn = np.real(bb[Stencil_center,:])
else:
bn=np.zeros(Stencil_size)
bn[~np.isfinite(bn)] = 0
else:
bn = np.zeros(tmp.shape[0])
############################################
# -- solve for U K and R from row of bn -- #
############################################
# actually just save bn - calculate the rest later
block_vars[0,:,i,j]=bn
block_vars[1,:,i,j]=dr
block_vars[2,:,i,j]=fin_inds
def parallel_monte_carlo_inversion(j,x_grid,block_vars,Stencil_center,Stencil_size,block_num_samp,block_num_lats,block_num_lons,block_lat,block_lon,Tau,Dt_secs,block_vars2=None,inversion_method='integral',dx_const=None,dy_const=None, DistType='mean',radius=6371,dt_min=365*5, ens=30, percentiles=[25,50,75]):
"""
Invert 2D data using a 5 point stencil. This function should be not be called directly, instad call the inversion() function.
"""
sads=[-1,+1,-2,+2,-1,+1,-2,+2][:4]
jads=[-1,+1, 0, 0,-1,+1,+1,-1][:4]
iads=[ 0, 0,-1,+1,-1,+1,-1,+1][:4]
#
s_ads=[-1,+1,-2,+2,-3,+3,-4,+4,-5,+5,-6,+6,-7,+7,-8,+8,-9,+9,-10,+10,-11,+11,-12,+12]
j_ads=[-1,+1, 0, 0,-1,+1,+1,-1,-2,-2,-2,+2,+2,+2,-1, 0,+1,+1, 0, -1, -2, +2, +2, -2]
i_ads=[ 0, 0,-1,+1,-1,+1,-1,+1,+1, 0,-1,+1, 0,-1,-2,-2,-2,+2, +2, +2, -2, +2, -2, +2]
#
tstep = (block_num_samp-dt_min)//ens
#
for i in range(1,block_num_lats-1):
numnebs=np.sum(np.isfinite(x_grid[i+np.array(iads),j+np.array(jads),0]))
if numnebs==len(sads):
xn = np.zeros((Stencil_size,block_num_samp))
ib = i
jb = j
if DistType in ['mean'] and np.any(dx_const==None) and np.any(dy_const==None):
# USING MEAN DISTANCE
ds=np.zeros(Stencil_size)
for s,ss in enumerate(sads):
ds[Stencil_center+ss]=distance([block_lat[ib,jb],block_lon[ib,jb]],[block_lat[ib+iads[s],jb+jads[s]],block_lon[ib+iads[s],jb+jads[s]]],radius=radius)*1000
#
xn[Stencil_center+np.array(sads),:]=x_grid[i+np.array(iads),j+np.array(jads),:]
xn[Stencil_center,:] = x_grid[i,j,:]
# calculate the mean dx,dy along two major axes
dx = np.mean(ds[Stencil_center+np.array(sads[:2])])
dy = np.mean(ds[Stencil_center+np.array(sads[2:])])
elif DistType in ['interp'] and np.any(dx_const==None) and np.any(dy_const==None):
# INTERPOLATED VERSION
# Interpolate x_grid values to be at the same distance from the central point - this is because the inversion doesn't know about the distance.
# first find the minimum distance - we will interpolate all the other points to be at this distance
cent=len(s_ads)/2
ds=np.zeros(len(s_ads)+1)
ang=np.zeros(len(s_ads)+1)
for s,ss in enumerate(s_ads):
ds[cent+ss]=distance([block_lat[ib,jb],block_lon[ib,jb]],[block_lat[ib+i_ads[s],jb+j_ads[s]],block_lon[ib+i_ads[s],jb+j_ads[s]]],radius=radius)*1000
ang[cent+np.array(s_ads)]=np.arctan2(i_ads,j_ads)*180/np.pi
ang[np.where(ang<0)]=ang[np.where(ang<0)]+360
#
dr=np.median(ds[np.where(ds>0)])
ds2=np.zeros((5,len(ds)))
# find out how far each point is from the unit circle point facing each grid cell.
for s,ss in enumerate(sads):
for s2,ss2 in enumerate(s_ads):
ds2[2+ss,cent+ss2]=np.sqrt(ds[cent+ss2]**2+dr**2-2*dr*ds[cent+ss2]*np.cos((ang[cent+ss2]-ang[cent+ss])*np.pi/180.))
#
ds2=np.delete(ds2,2,axis=0) # remove the central point from the points of interest - we know the value already
ds2=np.delete(ds2,cent,axis=1) # remove the central point from the points that affect interpolation - we don't want to transform any information outside
winds=np.argsort(ds2,axis=1) #
ds2_sort=np.sort(ds2,axis=1) #
weigths=((1/ds2_sort[:,:3]).T/(np.sum(1/ds2_sort[:,:3],1))).T #
weigths[np.where(np.isnan(weigths))]=1
# interpolate the surrounding points to the new unit circle
xn[Stencil_center+np.array(sads),:]=np.sum(x_grid[i+np.array(i_ads),j+np.array(j_ads),:][winds[:,:3],:].T*weigths.T,1).T
xn[Stencil_center,:] = x_grid[i,j,:]
# distance is the same to each direction
dx=dy=dr
#
elif np.any(dx_const!=None) and np.any(dy_const!=None):
# if the
xn[Stencil_center+np.array(sads),:]=x_grid[i+np.array(iads),j+np.array(jads),:]
xn[Stencil_center,:] = x_grid[i,j,:]
dx=dx_const
dy=dy_const
else:
# ORIGINAL VERSION
# calc distances
dx = distance([block_lat[ib,jb],block_lon[ib,jb-1]],[block_lat[ib,jb],block_lon[ib,jb]],radius=radius)*1000
dy = distance([block_lat[ib,jb],block_lon[ib,jb]],[block_lat[ib-1,jb],block_lon[ib,jb]],radius=radius)*1000
# correct negative distances due to blocks spanning meridian
if (block_lon[ib,jb]*block_lon[ib,jb+1]<0):
dx = distance([block_lat[ib,jb],block_lon[ib,jb-1]],[block_lat[ib,jb],block_lon[ib,jb]],radius=radius)*1000
#
if (block_lat[ib,jb]*block_lat[ib+1,jb]<0):
dy = distance([block_lat[ib,jb],block_lon[ib,jb]],[block_lat[ib-1,jb],block_lon[ib,jb]],radius=radius)*1000
# fill xn with timeseries of center point and neighbors
for ci in range(Stencil_center):
if ci==0:
xn[Stencil_center-1,:] = x_grid[i,j-1,:]
xn[Stencil_center+1,:] = x_grid[i,j+1,:]
elif ci==1:
xn[Stencil_center-2,:] = x_grid[i+1,j,:]
xn[Stencil_center+2,:] = x_grid[i-1,j,:]
xn[Stencil_center,:] = x_grid[i,j,:]
#
if inversion_method in ['integral']:
#
tstep = (block_num_samp-dt_min)//ens
bn = np.zeros((Stencil_size,ens))
res = np.zeros((Stencil_size,ens))
#
for e in range(ens):
t0=e*tstep
t1=e*tstep+dt_min
if block_num_samp-(t1+Tau)>=0:
xnlag = xn[:,t0+Tau:t1+Tau]
else:
xnlag=np.concatenate([xn[:,t0+Tau:t1+Tau], np.zeros((Stencil_size,(t1+Tau)-block_num_samp))],axis=1)
#
a=np.dot(xnlag,xn[:,t0:t1].T)
b=np.dot(xn[:,t0:t1],xn[:,t0:t1].T)
a[np.where(np.isnan(a))]=0
b[np.where(np.isnan(b))]=0
tmp = np.dot(a.data, np.linalg.pinv(b.data))
tmp[np.isnan(tmp)] = 0
tmp[np.isinf(tmp)] = 0
# this is very robust - time variability is perhaps more interesting
res[:,e] = abs((a-np.dot(tmp,b))[Stencil_center,:]/a[Stencil_center,:])
#
if np.isfinite(np.sum(tmp)) and np.sum(abs(tmp-tmp[0]))>1E-10: #check that not all the values are the same
try:
bb, bb_err = linalg.logm(tmp,disp=False)
bb = (1./(Tau*Dt_secs))*bb
bb_err = (1./(Tau*Dt_secs))*bb_err
#bb, bb_err = (1./(Tau*Dt_secs))*linalg.logm(tmp,disp=False)
except (ValueError,ZeroDivisionError,OverflowError):
bn[:,e] = np.zeros(Stencil_size)
else:
bn[:,e] = np.real(bb[Stencil_center,:])
#res[:,e] = np.real(bb_err[])
else:
bn[:,e]=np.zeros(Stencil_size)
#
#bb = (1./(Tau*Dt_secs))*linalg.logm(tmp)
#bn[:,e] = np.real(bb[Stencil_center,:])
#
############################################
# -- solve for U K and R from row of bn -- #
############################################
#
block_vars[:,0,i,j] = -dx*(bn[Stencil_center+1,:]-bn[Stencil_center-1,:]) #-dx*np.nanpercentile(bn[Stencil_center+1,:]-bn[Stencil_center-1,:],percentiles) # u
block_vars[:,1,i,j] = -dy*(bn[Stencil_center+2,:]-bn[Stencil_center-2,:]) #-dy*np.nanpercentile(bn[Stencil_center+2,:]-bn[Stencil_center-2,:],percentiles) # v
block_vars[:,2,i,j] = 1./2*dx**2*(bn[Stencil_center+1,:]+bn[Stencil_center-1,:]) #1./2*dx**2*np.nanpercentile(bn[Stencil_center+1,:]+bn[Stencil_center-1,:],percentiles) # Kx
block_vars[:,3,i,j] = 1./2*dy**2*(bn[Stencil_center+2,:]+bn[Stencil_center-2,:]) #1./2*dy**2*np.nanpercentile(bn[Stencil_center+2,:]+bn[Stencil_center-2,:],percentiles) # Ky
block_vars[:,4,i,j] = -1./np.nansum(bn,0) #np.nanpercentile(-1./np.nansum(bn,0),percentiles) # R
if not (block_vars2 is None):
block_vars2[:,i,j] = np.nanmean(res,0)
def parallel_inversion(j,x_grid,block_vars,Stencil_center,Stencil_size,block_num_samp,block_num_lats,block_num_lons,block_lat,block_lon,Tau,Dt_secs,rot=False,block_vars2=None,inversion_method='integral',dx_const=None,dy_const=None, DistType='mean',radius=6371):
"""
Invert 2D data using a 5 point stencil. This function should be not be called directly, instad call the inversion() function
Possibility to use either 'classic' north-south, east-west stencil (rot=False, default), or a stencil rotated 45 deg counter-clockwise (west).
"""
#
#
if not rot:
# indices for the surrounding 8 points
sads=[-1,+1,-2,+2,-1,+1,-2,+2][:4] # indices for ds - Stencil_center will be the central point - these are spiraling out
jads=[-1,+1, 0, 0,-1,+1,+1,-1][:4] # left,right,down,up,down-left,up-right,right-down,left-up
iads=[ 0, 0,-1,+1,-1,+1,-1,+1][:4]
# indices for the surrounding 24 points -important to have the same first 4 points (the rest don't matter)
s_ads=[-1,+1,-2,+2,-3,+3,-4,+4,-5,+5,-6,+6,-7,+7,-8,+8,-9,+9,-10,+10,-11,+11,-12,+12]
j_ads=[-1,+1, 0, 0,-1,+1,+1,-1,-2,-2,-2,+2,+2,+2,-1, 0,+1,+1, 0, -1, -2, +2, +2, -2]
i_ads=[ 0, 0,-1,+1,-1,+1,-1,+1,+1, 0,-1,+1, 0,-1,-2,-2,-2,+2, +2, +2, -2, +2, -2, +2]
else:
# x and y axis are rotated 45 to the left
# indices for the surrounding 8 points
sads=[-1,+1,-2,+2,-1,+1,-2,+2][4:] # indices for ds - Stencil_center will be the central point - these are spiraling out
jads=[-1,+1, 0, 0,-1,+1,+1,-1][4:] # left,right,down,up,down-left,up-right,right-down,left-up
iads=[ 0, 0,-1,+1,-1,+1,-1,+1][4:]
# indices for the surroundig 24 points
s_ads=[-1,+1,-2,+2,-3,+3,-4,+4,-5,+5,-6,+6,-7,+7,-8,+8,-9,+9,-10,+10,-11,+11,-12,+12]
j_ads=[-1,+1,+1,-1,-1,+1, 0, 0,-2,-2,+2,+2,+2,+2,-2,-2,-2,+2, 0, 0, +1, +1, -1, -1]
i_ads=[-1,+1,-1,+1, 0, 0,-1,+1,-2,-1,+2,+1,-2,-1,+2,+1, 0, 0, -2, +2, +2, -2, +2, -2]
for i in range(1,block_num_lats-1): #change this back if no interpolatiion is used
# for i in range(2,block_num_lats-2): #if interpolation is used
numnebs=np.sum(np.isfinite(x_grid[i+np.array(iads),j+np.array(jads),0]))
# only invert all the points in the stencil are finite
if numnebs==len(sads):
xn = np.zeros((Stencil_size,block_num_samp))
ib = i
jb = j
# calculate the dx and dy and fill the stencil
if DistType in ['mean'] and np.any(dx_const==None) and np.any(dy_const==None):
# USING MEAN DISTANCE
ds=np.zeros(Stencil_size)
for s,ss in enumerate(sads):
ds[Stencil_center+ss]=distance([block_lat[ib,jb],block_lon[ib,jb]],[block_lat[ib+iads[s],jb+jads[s]],block_lon[ib+iads[s],jb+jads[s]]],radius=radius)*1000
#
xn[Stencil_center+np.array(sads),:]=x_grid[i+np.array(iads),j+np.array(jads),:]
xn[Stencil_center,:] = x_grid[i,j,:]
# calculate the mean dx,dy along two major axes
dx = np.mean(ds[Stencil_center+np.array(sads[:2])])
dy = np.mean(ds[Stencil_center+np.array(sads[2:])])
elif DistType in ['interp'] and np.any(dx_const==None) and np.any(dy_const==None):
# INTERPOLATED VERSION
# Interpolate x_grid values to be at the same distance from the central point - this is because the inversion doesn't know about the distance.
# first find the minimum distance - we will interpolate all the other points to be at this distance
cent=len(s_ads)/2
ds=np.zeros(len(s_ads)+1)
ang=np.zeros(len(s_ads)+1)
for s,ss in enumerate(s_ads):
ds[cent+ss]=distance([block_lat[ib,jb],block_lon[ib,jb]],[block_lat[ib+i_ads[s],jb+j_ads[s]],block_lon[ib+i_ads[s],jb+j_ads[s]]],radius=radius)*1000
ang[cent+np.array(s_ads)]=np.arctan2(i_ads,j_ads)*180/np.pi
ang[np.where(ang<0)]=ang[np.where(ang<0)]+360
#
dr=np.median(ds[np.where(ds>0)])
ds2=np.zeros((5,len(ds)))
# find out how far each point is from the unit circle point facing each grid cell.
for s,ss in enumerate(sads):
for s2,ss2 in enumerate(s_ads):
ds2[2+ss,cent+ss2]=np.sqrt(ds[cent+ss2]**2+dr**2-2*dr*ds[cent+ss2]*np.cos((ang[cent+ss2]-ang[cent+ss])*np.pi/180.))
#
ds2=np.delete(ds2,2,axis=0) # remove the central point from the points of interest - we know the value already
ds2=np.delete(ds2,cent,axis=1) # remove the central point from the points that affect interpolation - we don't want to transform any information outside
winds=np.argsort(ds2,axis=1) #
ds2_sort=np.sort(ds2,axis=1) #
weigths=((1/ds2_sort[:,:3]).T/(np.sum(1/ds2_sort[:,:3],1))).T #
weigths[np.where(np.isnan(weigths))]=1
# interpolate the surrounding points to the new unit circle
xn[Stencil_center+np.array(sads),:]=np.sum(x_grid[i+np.array(i_ads),j+np.array(j_ads),:][winds[:,:3],:].T*weigths.T,1).T
xn[Stencil_center,:] = x_grid[i,j,:]
# distance is the same to each direction
dx=dy=dr
#
elif np.any(dx_const!=None) and np.any(dy_const!=None):
# if the
xn[Stencil_center+np.array(sads),:]=x_grid[i+np.array(iads),j+np.array(jads),:]
xn[Stencil_center,:] = x_grid[i,j,:]
dx=dx_const
dy=dy_const
else:
# ORIGINAL VERSION
# calc distances
dx = distance([block_lat[ib,jb],block_lon[ib,jb-1]],[block_lat[ib,jb],block_lon[ib,jb]],radius=radius)*1000
dy = distance([block_lat[ib,jb],block_lon[ib,jb]],[block_lat[ib-1,jb],block_lon[ib,jb]],radius=radius)*1000
# correct negative distances due to blocks spanning meridian
if (block_lon[ib,jb]*block_lon[ib,jb+1]<0):
dx = distance([block_lat[ib,jb],block_lon[ib,jb-1]],[block_lat[ib,jb],block_lon[ib,jb]],radius=radius)*1000
#
if (block_lat[ib,jb]*block_lat[ib+1,jb]<0):
dy = distance([block_lat[ib,jb],block_lon[ib,jb]],[block_lat[ib-1,jb],block_lon[ib,jb]],radius=radius)*1000
# fill xn with timeseries of center point and neighbors
for ci in range(Stencil_center):
if ci==0:
xn[Stencil_center-1,:] = x_grid[i,j-1,:]
xn[Stencil_center+1,:] = x_grid[i,j+1,:]
elif ci==1:
xn[Stencil_center-2,:] = x_grid[i+1,j,:]
xn[Stencil_center+2,:] = x_grid[i-1,j,:]
xn[Stencil_center,:] = x_grid[i,j,:]
# TODO : HERE IS AN OPTION TO RUN ALL THE TAUS AT ONCE
if False: #inversion_method in ['integral'] and False:
bns=np.zeros((Stencil_size,Tau-1))
for tau in range(1,Tau):
xnlag = np.concatenate((xn[:,tau:], np.zeros((Stencil_size,tau))),axis=1)
a=np.dot(xnlag,xn.T)
b=np.dot(xn,xn.T)
a[np.where(np.isnan(a))]=0
b[np.where(np.isnan(b))]=0
tmp = np.dot(a, np.linalg.pinv(b))
tmp[np.isnan(tmp)] = 0
tmp[np.isinf(tmp)] = 0
if np.isfinite(np.sum(tmp)) and np.sum(abs(tmp-tmp[0]))>1E-10:
try:
bb = (1./(tau*Dt_secs))*linalg.logm(tmp)
except (ValueError,ZeroDivisionError,OverflowError):
continue
else:
bns[:,tau-1] = np.real(bb[Stencil_center,:])
#
bns[~np.isfinite(bns)] = 0
# select the case when the central cell is most negative
b_ind=np.where(bns[Stencil_center,:].squeeze()==np.min(bns[Stencil_center,:],0))[0]
if len(b_ind)>1:
b_ind=b_ind[0]
bn=bns[:,b_ind[0]]
#
elif inversion_method in ['integral']:
# inverse by integral method
xnlag = np.concatenate((xn[:,Tau:], np.zeros((Stencil_size,Tau))),axis=1)
# tmp = (np.dot(xnlag,xn.T))/(np.dot(xn,xn.T))
# in matlab: tmp = (xnlag*xn')/(xn*xn') let's take a=xnlag*xn' and b=xn*xn'
# this line in matlab basically means solving for xb=a
# what we can do in python is # xb = a: solve b.T x.T = a.T
# see http://stackoverflow.com/questions/1007442/mrdivide-function-in-matlab-what-is-it-doing-and-how-can-i-do-it-in-python
#
a= | np.dot(xnlag,xn.T) | numpy.dot |
# -*- coding: utf-8 -*-
# @Author: <NAME>
# @Date: 2020-09-09 04:01:25
# @Last Modified by: <NAME>
# @Last Modified time: 2021-10-10 21:49:31
import os, matplotlib, math
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.colors import BoundaryNorm
from matplotlib.ticker import MaxNLocator
import seaborn as sns
import numpy as np
import numpy.ma as ma
import scipy.stats as ss
import statsmodels.api as sa
import scikit_posthocs as sp
from sklearn import linear_model
import settings as settings
# ================= files =================
### get the foldres and files within a particular path
def parse_folder_info(path):
"""
Function: get the foldres and files within a particular path.
Input: path
Output: lists of folders and files
"""
folders = [f for f in os.listdir(path) if not os.path.isfile(os.path.join(path, f))]
files = [f for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))]
if('.DS_Store' in files):
files.remove('.DS_Store')
if('._.DS_Store' in files):
files.remove('._.DS_Store')
return folders, files
### sort dictionary according to keys or values
def sort_dic(dic, switch, is_reverse):
"""
Function: sort dictionary according to keys or values.
Input:
- dic: Dictionary.
- switch: str. "keys" or "values" to sort.
- is_reverse: whether or not to sort reversely.
Output: Dictionary. sorted.
"""
if(switch == 'keys'):
return {k: v for k, v in sorted(dic.items(), key=lambda item: item[0], reverse = is_reverse)}
elif(switch == 'values'):
return {k: v for k, v in sorted(dic.items(), key=lambda item: item[1], reverse = is_reverse)}
### group DataFrame columns: get DataFrame column names that have the same pattern
def group_headers(df, header_tag, isContain):
'''
Function: get DataFrame column names that have the same pattern (contain or doesn't contain a particular string)
Input:
- df -- dataframe
- header_tag -- string
- isContain -- True/False
Output: list of strings
'''
if isContain:
return [col for col in df.columns.values if header_tag in col]
else:
return [col for col in df.columns.values if header_tag not in col]
### add columns to DataFrame
def dataframe_add_column(df, column_list):
'''
Function: add columns to a DataFrame.
Input:
- df: DataFrame.
- column_list: columns to add.
Output: df (w/ new columns)
'''
### check if columns in column_list exist in dataframe already
new_col = []
for col in column_list:
if(col not in df.columns):
new_col.append(col)
### if not, append.
if(len(new_col) > 0):
df = df.reindex( columns = df.columns.tolist() + new_col )
return df
# ================= geometry =================
### Inner angle calculation
# source: https://stackoverflow.com/questions/2827393/angles-between-two-n-dimensional-vectors-in-python
def unit_vector(vector):
"""
Function: Returns the unit vector of the vector.
Input: vector
Output: vector
"""
return vector / np.linalg.norm(vector)
def inner_angle(v1, v2, is_radians):
"""
Function: Returns the angle in radians(or degree) between vectors 'v1' and 'v2'
Input:
- v1/v2: vectors
- is_radians: True/False
Output: radians (or degree) of the inner angle
"""
v1_u = unit_vector(v1)
v2_u = unit_vector(v2)
if is_radians:
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
else:
return np.rad2deg(np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0)))
### angle normalization to -pi ~ pi
def angle_normalization(angles):
"""
Function: normalize angle (or list of angles) to -pi ~ pi
Input: angle as float or numpy array (in radians)
Output: angle as float or numpy array (in radians)
"""
if(np.isscalar(angles)):
if(angles<-np.pi):
angles = angles + 2*np.pi
if(angles>np.pi):
angles = angles - 2*np.pi
return angles
elif(type(angles) == np.ndarray):
angles[angles>np.pi] = angles[angles>np.pi] - 2*np.pi
angles[angles<-np.pi] = angles[angles<-np.pi] + 2*np.pi
return angles
else:
print(f'{type(angles)} datatype not supported in angle_normalization!')
return None
### smallest difference between two angles
def smallest_angle(x, y):
"""
source: https://stackoverflow.com/questions/1878907/the-smallest-difference-between-2-angles
Funtion: calcualte the smallest difference between two angles.
Input: x,y -- angles (in radians)
Output: angle (in radians)
"""
return min((2 * np.pi) - abs(x - y), abs(x - y))
### get_counter_angle
def get_counter_angle(start, end, is_radians):
"""
Funtion: normalize angle between start and end to >= 0.
Inputs:
- start, end -- angles (in radians)
- is_radians: Boolean. True if return radians, False if return degrees.
Output: angle (in radians or degrees)
"""
angle = end - start
if(angle < 0):
if(is_radians):
angle = angle + np.pi*2
else:
angle = angle+360
return angle
### get_vector_length
def get_vector_length(v):
"""
Function: get length of a vector.
Input: numpy array. vector.
Output: float. length of the vector
"""
return np.linalg.norm(v)
### Calculate line intersections
def line_intersection(line1, line2):
"""
Function: Calculate intersection between two lines.
Source: https://stackoverflow.com/questions/20677795/how-do-i-compute-the-intersection-point-of-two-lines
Input: line1 - (point1, point2); line2 - (point1, point2)
Output: x,y - floats. x and y coordinates of intersection.
"""
xdiff = (line1[0][0] - line1[1][0], line2[0][0] - line2[1][0])
ydiff = (line1[0][1] - line1[1][1], line2[0][1] - line2[1][1])
def det(a, b):
return a[0] * b[1] - a[1] * b[0]
div = det(xdiff, ydiff)
if div == 0:
raise Exception('lines do not intersect')
d = (det(*line1), det(*line2))
x = det(d, xdiff) / div
y = det(d, ydiff) / div
return x, y
# ================= reformatting grid =================
### polar-cartasian conversion
def cartesian_to_polar(x,y):
r = np.sqrt(x**2 + y**2)
theta = np.arctan(y/x)
return r,theta
def polar_to_cartesian(r,theta):
x = r*np.cos(theta)
y = r*np.sin(theta)
return x,y
### re-center coordinates
def carcedian_re_center_coords(coords, center):
"""
Function: re-center array of coordinates according to the center coordinate
Inputs:
- coords: numpy array. array of coordinates (can either be nx2 or 2xn)
- center: numpy array. coordinate of the center (1x2 or 2x1)
Outputs:
- new_coords: numpy array. array of coordinates re-centered. same format as coords.
"""
new_coords = np.copy(coords)
shape = coords.shape
if(shape[0] == 2):
new_coords[0,:] = new_coords[0,:] - center[0]
new_coords[1,:] = new_coords[1,:] - center[1]
elif(shape[1] == 2):
new_coords[:,0] = new_coords[:,0] - center[0]
new_coords[:,1] = new_coords[:,1] - center[1]
return new_coords
### flip coordinates horizontally or vertically.
def flip_coords(coords, axis):
"""
Function: flip coordinates horizontally or vertically
Inputs:
- coords: numpy array. array of coordinates (can either be nx2 or 2xn)
- axis: str. 'v' = flip vertically; 'h' = flip horizontally.
Outputs:
- new_coords: numpy array. array of coordinates re-centered. same format as coords.
"""
new_coords = np.copy(coords)
shape = coords.shape
if(axis == 'h'):
if(shape[0] == 2):
new_coords[0,:] = - new_coords[0,:]
elif(shape[1] == 2):
new_coords[:,0] = - new_coords[:,0]
if(axis == 'v'):
if(shape[0] == 2):
new_coords[1,:] = - new_coords[1,:]
elif(shape[1] == 2):
new_coords[:,1] = - new_coords[:,1]
return new_coords
### rotate coordinates counter-clockwise.
def rotate_points(center_point, coords, angle):
"""
Function: Rotates coordinates counter-clockwise around a center point. Rotation angle is in radians.
Source: adapted from https://gist.github.com/somada141/d81a05f172bb2df26a2c
Input:
- center_point: numpy array. 1x2 or 2x1.
- coords: numpy array. array of coordinates (nx2).
- angle: float. rotation angle in radians.
Output:
- new_coords: numpy array (nx2). new coordinates after rotation.
"""
new_coords = np.zeros(coords.shape)
new_coords[:,0] = coords[:,0] - center_point[0]
new_coords[:,1] = coords[:,1] - center_point[1]
new_coords[:,0] = new_coords[:,0] * math.cos(angle) - new_coords[:,1] * math.sin(angle)
new_coords[:,1] = new_coords[:,0] * math.sin(angle) + new_coords[:,1] * math.cos(angle)
new_coords[:,0] = new_coords[:,0] + center_point[0]
new_coords[:,1] = new_coords[:,1] - center_point[1]
return new_coords
### get centroids of given coordinates.
def get_centroid(coords):
"""
Function: get centroids of given coordinates.
Input:
- coords: numpy array. mx2xn. m = number of centroids; n = number of points per centroid.
Output:
- new_coords: numpy array (mx2). centroids.
"""
new_coords = np.zeros((coords.shape[0], coords.shape[1]))
for i in range(coords.shape[0]):
new_coords[i,0] = np.sum(coords[i,0,:])/coords.shape[2]
new_coords[i,1] = np.sum(coords[i,1,:])/coords.shape[2]
return new_coords
### get heel coordinates
def get_heel_coords_sum(bundle_no, annots_df, **kwargs):
"""
Function: get coordinate of heel positions of a given bundle
Inputs:
- bundle_no: numpy array. array of coordinates (can either be nx2 or 2xn)
- bundles_df: DataFrame. containing informations of bundles.
- kwargs: additional parameters
- dim: int. dimention of returning coordinates. 2 or 3.
- is_pixel: Boolean. whether or not return coordinates in pixel (True) or um (False)
- pixel_to_um: numpy array (1x2 or 1x3). um/pixel for each dimension.
Outputs:
- heel_coords: numpy array. array of heel coordinates.
"""
### unravel params
dim = 2
### get heel coordinates
heel_coords = np.zeros((6,dim))
heel_coords[:,0] = list(annots_df.loc[bundle_no, group_headers(annots_df, 'coord_X_R', True)])
heel_coords[:,1] = list(annots_df.loc[bundle_no, group_headers(annots_df, 'coord_Y_R', True)])
return heel_coords
### get target coordinates
def get_target_coords_sum(bundle_no, annots_df, **kwargs):
"""
Function: get coordinate of heel positions of a given bundle
Inputs:
- bundle_no: numpy array. array of coordinates (can either be nx2 or 2xn)
- bundles_df: DataFrame. containing informations of bundles.
- kwargs: additional parameters
- dim: int. dimention of returning coordinates. 2 or 3.
- is_pixel: Boolean. whether or not return coordinates in pixel (True) or um (False)
- pixel_to_um: numpy array (1x2 or 1x3). um/pixel for each dimension.
Outputs:
- heel_coords: numpy array. array of heel coordinates.
"""
### unravel params
dim = 2
index_to_target_id = settings.matching_info.index_to_target_id
### get target coordinates
target_coords = np.zeros((6,dim))
target_coords[:,0] = list(annots_df.loc[bundle_no, group_headers(annots_df, 'coord_X_T', True)])
target_coords[:,1] = list(annots_df.loc[bundle_no, group_headers(annots_df, 'coord_Y_T', True)])
return target_coords
### get angle unit information from theoretical grid.
def get_angle_unit_theory(return_type):
"""
Function: get angle unit information from theoretical grid.
Input:
- return_type: str.
- phi_unit: return radian value of the unit of standardized angle.
- aTiCT4: return radian value of angles between targets, center, and T4.
- aRiCT4: return radian value of angles between heels, center, and T4.
Outputs:
- phi_unit: float. radian value of the unit of standardized angle.
- aTiCT4: numpy array (6x1). radian value of angles between targets, center, and T4.
- aRiCT4: numpy array (6x1). radian value of angles between heels, center, and T4.
"""
### before standardization
#### distance: normal
dT0T2 = dT0T5 = dT2T4 = dT4T5 = 1
dT0T4 = dT2T3 = (dT0T5 ** 2 + dT4T5 ** 2 -2*dT4T5*dT0T5*math.cos(math.radians(100)))**0.5
dT2T5 = dT3T7 = (dT0T5 ** 2 + dT4T5 ** 2 -2*dT0T2*dT0T5*math.cos(math.radians(80)))**0.5
dT0T3 = dT0T7 = ((dT2T5/2) ** 2 + (dT2T3*1.5) ** 2) ** 0.5
#### angles: normal
aT0T2 = math.radians(80)/2
aT0T5 = - math.radians(80)/2
aT0T3 = math.acos((dT0T3 ** 2 + dT0T7 ** 2 - dT3T7 ** 2)/(2*dT0T3*dT0T7))/2
aT0T7 = - aT0T3
aT0T4 = 0
#### target coordinates
T0 = np.array((0,0))
T2 = np.array((aT0T2, dT0T2))
T3 = np.array((aT0T3, dT0T3))
T4 = np.array((aT0T4, dT0T4))
T5 = np.array((aT0T5, dT0T2))
T7 = np.array((aT0T7, dT0T7))
target_grid_polar = np.stack((T0, T2, T3, T4, T5, T7), axis = 0)
target_grid_cart = np.zeros((6,2))
for i in range(6):
target_grid_cart[i,:] = polar_to_cartesian(target_grid_polar[i,1], target_grid_polar[i,0])
#### heel coordinates
alpha = 0.2354
a = 0.2957
b = 0.5
r_heels_cart = np.zeros((6,2))
r_heels_polar = np.zeros((6,2))
for n in range(1,7):
phi_n = -(alpha + (n-1)*(np.pi - 2*alpha)/5)
x = a*np.cos(phi_n)
y = b*np.sin(phi_n)
r, theta = cartesian_to_polar(-y, x)
r_heels_cart[n-1, :] = [-y,x]
r_heels_polar[n-1, :] = [theta, r]
### intersect
c = line_intersection((r_heels_cart[2,:], target_grid_cart[2,:]),(r_heels_cart[3,:], target_grid_cart[5,:]))
### after standardization
dTiC = np.zeros((6,1))
for i in range(1,6):
dTiC[i] = np.linalg.norm(target_grid_cart[i,:] - c)
dTiC = dTiC/dTiC[3]
aTiCT4 = np.zeros((6,1))
for i in range(1,6):
aTiCT4[i] = inner_angle(target_grid_cart[i,:] - c, target_grid_cart[3,:] - c, True)
if(i in [4,5]):
aTiCT4[i] = - aTiCT4[i]
aRiCT4 = np.zeros((6,1))
for i in range(1,6):
aRiCT4[i] = inner_angle(r_heels_cart[i,:] - c, target_grid_cart[3,:] - c, True)
if(i in [4,5]):
aRiCT4[i] = - aRiCT4[i]
### phi_unit
phi_unit = aTiCT4[2,0]
### return
if(return_type == 'phi_unit'):
return phi_unit
elif(return_type == 'aTiCT4'):
return aTiCT4
elif(return_type == 'aRiCT4'):
return aRiCT4
### get angle unit information from measured target positions.
def get_angle_unit_data(sum_df, **kwargs):
"""
Function: get angle unit information from measured target positions.
Input:
- sum_df: DataFrame. processed DataFrame that contains both bundle heel and target info and growth cone length and angle info.
- kwargs: additional parameters
- 'criteria': Dataframe with Boolean values. filtering which bundles to include in the calculation.
Output:
- phi_unit: radian value of "1" in standardized coordinate.
"""
if('criteria' in kwargs.keys()):
criteria = kwargs['criteria']
sum_df = sum_df.loc[criteria, :]
# print(f"get_angle_unit_num={len(sum_df)}")
phi_unit = sum_df['aT3cT7'].mean()/2
return phi_unit
### get polar coordiantes of target grid from standardized coordinates.
def get_target_grid_polar_summary(**kwargs):
"""
Function: get polar coordiantes of target grid.
Input:
- kwargs:
- 'return_type': str. calculate angle based on theoretical grid ("theory") or measured grid ("data")
- 'dTiCs': dictionary. {target_id : distance value}. |Ti-C| normalized.
- 'aTiCT4s': numpy array. radian values of angles between Ti, C, T4.
Output:
- grid: numpy array (6x2). polar coordinate values of target grid (T0, T2, T3, T4, T5, T3')
"""
### unravel params
index_to_target_id = settings.matching_info.index_to_target_id
return_type = kwargs['return_type']
if(return_type == 'theory'):
dTiCs = kwargs['dTiCs']
aTiCT4s = get_angle_unit_theory('aTiCT4')
elif(return_type == 'data'):
dTiCs = kwargs['dTiCs']
aTiCT4s = kwargs['aTiCT4s']
### get grid
grid = np.zeros((6,2))
for i in range(6):
#### theta
grid[i,0] = aTiCT4s[i]
#### r
if(index_to_target_id[i] in dTiCs.keys()):
grid[i,1] = dTiCs[index_to_target_id[i]]
return grid
### get polar coordiantes of heel grid from standardized coordinates.
def get_heel_grid_polar_summary(**kwargs):
"""
Function: get polar coordiantes of target grid.
Input:
- kwargs:
- 'return_type': str. calculate angle based on theoretical grid ("theory") or measured grid ("data")
- 'dTiCs': dictionary. {target_id : distance value}. |Ti-C| normalized.
- 'aTiCT4s': numpy array. radian values of angles between Ti, C, T4.
Output:
- grid: numpy array (6x2). polar coordinate values of target grid (T0, T2, T3, T4, T5, T3')
"""
### unravel parameters
index_to_target_id = settings.matching_info.index_to_target_id
return_type = kwargs['return_type']
if(return_type == 'theory'):
dRiCs = kwargs['dRiCs']
aRiCT4 = get_angle_unit_theory('aRiCT4')
elif(return_type == 'data'):
dRiCs = kwargs['dRiCs']
aRiCT4 = kwargs['aRiCT4']
### get grid info.
grid = np.zeros((6,2))
for i in range(6):
grid[i,0] = aRiCT4[i]
if(i+1 in dRiCs.keys()):
grid[i,1] = dRiCs[i+1]
return grid
### Standardized coordinate --> grid in cartasian coordinates.
def get_cartasian_grid_from_stc(sum_df_ri, ch, cat_angle, cat_length, phi_unit):
### params
target_id_to_index = settings.matching_info.target_id_to_index
### target grid polar
dTiCs = {
3:sum_df_ri['T3c'],
7:sum_df_ri['T7c'],
2:sum_df_ri['T2c'],
5:sum_df_ri['T5c'],
4:1,
}
aTiCT4s = np.zeros((6))
aTiCT4s[target_id_to_index[3]] = phi_unit
aTiCT4s[target_id_to_index[7]] = - phi_unit
aTiCT4s[target_id_to_index[2]] = (sum_df_ri['aT2cT4']/sum_df_ri['aT3cT4'])*phi_unit
aTiCT4s[target_id_to_index[5]] = -(sum_df_ri['aT5cT4']/sum_df_ri['aT7cT4'])*phi_unit
target_stc_polar = get_target_grid_polar_summary(return_type = 'data', dTiCs = dTiCs, aTiCT4s = aTiCT4s)
### heel grid polar
dRiCs = {}
aRiCT4s = np.zeros((6))
for i in [1,2,5,6]:
dRiCs[i] = get_vector_length(ch[i-1,:])/sum_df_ri['length_one_um']
if(i in [1,2]):
aRiCT4s[i-1] = inner_angle(ch[i-1,:], np.array([1,0]), True)/sum_df_ri['aT3cT4'] * phi_unit
elif(i in [5,6]):
aRiCT4s[i-1] = - inner_angle(ch[i-1,:], np.array([1,0]), True)/sum_df_ri['aT7cT4'] * phi_unit
dRiCs[3] = sum_df_ri['R3']
dRiCs[4] = sum_df_ri['R4']
aRiCT4s[3-1] = target_stc_polar[2,0]
aRiCT4s[4-1] = target_stc_polar[5,0]
heels_stc_polar = get_heel_grid_polar_summary(return_type = 'data', dRiCs = dRiCs, aRiCT4 = aRiCT4s)
### growth cone tipe polar
gc_tip_polar = np.zeros((1,2))
gc_tip_polar[0,0] = sum_df_ri[cat_angle] * phi_unit
gc_tip_polar[0,1] = sum_df_ri[cat_length]
### polar to cartasian
target_stc_car = np.zeros((6,2))
heels_stc_car = np.zeros((6,2))
gc_tip_car = np.zeros((1,2))
for i in range(6):
heels_stc_car[i,0], heels_stc_car[i,1] = polar_to_cartesian(heels_stc_polar[i,1], heels_stc_polar[i,0])
target_stc_car[i,0], target_stc_car[i,1] = polar_to_cartesian(target_stc_polar[i,1], target_stc_polar[i,0])
gc_tip_car[0,0], gc_tip_car[0,1] = polar_to_cartesian(gc_tip_polar[0,1], gc_tip_polar[0,0])
return target_stc_car, heels_stc_car, gc_tip_car
### get angle and length of growth cones.
def get_gc_angle_length(sum_df_ri, coord_heels, phi_unit, cat_angle, cat_length, r_type):
### from standardized coordinate to cartasian coordinate
target_stc_car, heels_stc_car, gc_tip_car = get_cartasian_grid_from_stc(sum_df_ri, coord_heels, cat_angle, cat_length, phi_unit)
### get vector of growth cone extension
if(r_type == 3):
ori = heels_stc_car[2,:]
else:
ori = heels_stc_car[3,:]
v_gc = gc_tip_car - ori
### relative angle
gc_angle = inner_angle(v_gc, np.array([1,0]), True)
gc_angle_rel = gc_angle/phi_unit
if(v_gc[0,1] < 0):
gc_angle_rel = - gc_angle_rel
### relative length.
gc_lengtrh = get_vector_length(v_gc)
return gc_lengtrh, gc_angle_rel
# ================= mutual repulsion calculation =================
### new vector based on two base vectors and its weights (alphas)
def get_angle_prediction_two_vectors(v1, v2, origin, alphas):
v1_uni = unit_vector(v1)
v2_uni = unit_vector(v2)
v_new = alphas[0] * v1_uni + alphas[1] * v2_uni
point = origin + v_new
v_new = unit_vector(v_new)
point = origin + v_new
return point, v_new
### calculate theoretical angle
def calculate_mutual_repulsion_theory(coord_heels, coord_target, r_type):
r_type = int(r_type)
### params and initialization
target_id_to_index = settings.matching_info.target_id_to_index
### basics
ori = coord_heels[r_type-1, :]
if(r_type == 3):
v1 = ori - coord_heels[2 -1, :]
v2 = ori - coord_heels[4 -1, :]
v_base = coord_heels[4-1,:] - coord_heels[3-1,:]
elif(r_type == 4):
v1 = ori - coord_heels[5 -1, :]
v2 = ori - coord_heels[3 -1, :]
v_base = coord_heels[3-1,:] - coord_heels[4-1,:]
ls = np.zeros((2))
ls[0] = get_vector_length(v1)
ls[1] = get_vector_length(v2)
# print(f"v1={v1}, v2={v2}.")
### repulse from neighbor heels, weighted equally
alpha = 0.5
p, v = get_angle_prediction_two_vectors(v1, v2, ori, [alpha, 1-alpha])
# print(f"p={p}, v = {v}")
point = np.transpose(p)
vector = np.transpose(v)
theta = inner_angle(vector, v_base, True)
angle = inner_angle(v, np.array([1,0]), True)
return point, vector, theta, angle, np.vstack((v1, v2))
### calculate actual angle.
def calculate_mutual_repulsion_data(sum_df_ri, ch, phi_unit, cat_angle, cat_length, r_type):
target_stc_car, heels_stc_car, gc_tip_car = get_cartasian_grid_from_stc(sum_df_ri, ch, cat_angle, cat_length, phi_unit)
if(r_type == 3):
gc_vector = gc_tip_car[0,:] - heels_stc_car[2,:]
gc_theta = inner_angle(heels_stc_car[3,:] - heels_stc_car[2,:], gc_vector, True)
elif(r_type == 4):
gc_vector = gc_tip_car[0,:] - heels_stc_car[3,:]
gc_theta = inner_angle(heels_stc_car[2,:] - heels_stc_car[3,:], gc_vector, True)
gc_angle = inner_angle(gc_vector, np.array([1,0]), True)
return gc_tip_car, gc_vector, gc_theta, gc_angle
### data for regression.
def generate_regression_data(sum_df):
X = np.zeros((len(sum_df) * 2, 2))
y = np.zeros((len(sum_df) * 2))
for i,ind in enumerate(sum_df.index):
v1 = np.array([sum_df.loc[ind, 'ml_x_v1'], sum_df.loc[ind, 'ml_y_v1']])
v2 = np.array([sum_df.loc[ind, 'ml_x_v2'], sum_df.loc[ind, 'ml_y_v2']])
vy = np.array([sum_df.loc[ind, 'ml_x_vgc'], sum_df.loc[ind, 'ml_y_vgc']])
v1_uni = unit_vector(v1)
v2_uni = unit_vector(v2)
vy_uni = unit_vector(vy)
X[2*i+0, 0] = v1_uni[0]
X[2*i+0, 1] = v2_uni[0]
y[2*i+0] = vy_uni[0]
X[2*i+1, 0] = v1_uni[1]
X[2*i+1, 1] = v2_uni[1]
y[2*i+1] = vy_uni[1]
return X,y
### regression analysis for mutual repulsion
def mutual_repulsion_regression(sum_df, annots_df):
### parameters
paths = settings.paths
### regression fitting
criteria = (sum_df['symmetry']<=0.5) & (sum_df['time_id']<=26)
sum_df_regression = sum_df.loc[criteria,:]
print(len(sum_df_regression))
df_regression_results = pd.DataFrame(columns = ['a', 'b', 'r2'])
print("Regression result:")
for i, r_type in enumerate(["R3", "R4"]):
sum_df_r = sum_df_regression.groupby("type_plot").get_group(r_type)
df_data = sum_df_r[['ml_x_v1', 'ml_y_v1', 'ml_x_v2', 'ml_y_v2', 'ml_x_vgc', 'ml_y_vgc']].dropna()
X, y = generate_regression_data(df_data)
model = linear_model.LassoCV(alphas=np.logspace(-6, -3, 7),
max_iter=100000,
cv=5,
fit_intercept=False,
positive=True)
reg = model.fit(X,y)
print(f"r_type = {r_type}: alpha = {reg.coef_[0]:.2f}, beta = {reg.coef_[1]:.2f}, R^2 = {reg.score(X,y):.2f}")
df_tmp = pd.DataFrame(columns = df_regression_results.columns)
df_tmp.loc[0, 'type_plot'] = r_type
df_tmp.loc[0, 'a'] = reg.coef_[0]
df_tmp.loc[0, 'b'] = reg.coef_[1]
df_tmp.loc[0, 'r2'] = reg.score(X,y)
df_regression_results = df_regression_results.append(df_tmp, ignore_index=True)
### calculate regression direction
sum_df_ctrl_group = sum_df_regression.groupby(["time_id", "sample_no"])
phi_unit = get_angle_unit_data(annots_df,
criteria = (annots_df['is_Edge'] == 0) & (annots_df['symmetry'] <= 0.5))
print("Regression direction calculation:", end = " ")
for gp in sum_df_ctrl_group.groups.keys():
time_id, sample_id = gp
print(f"{time_id}_hrs_sample_{sample_id}", end = "; ")
sum_df_current = sum_df_ctrl_group.get_group(gp)
annots_df_current = annots_df.groupby(["time_id", "sample_no"]).get_group(gp).set_index('bundle_no')
for ind in sum_df_current.index:
r_type = int(sum_df_current.loc[ind, 'type_Rcell'])
bundle_no = sum_df_current.loc[ind,'bundle_no']
coord_heels = get_heel_coords_sum(bundle_no, annots_df_current)
ori = coord_heels[r_type-1, :]
if(r_type == 3):
v_base = coord_heels[4-1,:] - coord_heels[3-1,:]
elif(r_type == 4):
v_base = coord_heels[3-1,:] - coord_heels[4-1,:]
type_plot = sum_df_current.loc[ind, 'type_plot']
i_reg = df_regression_results['type_plot'] == type_plot
alphas = np.zeros((2))
alphas[0] = df_regression_results.loc[i_reg, 'a'].values[0]
alphas[1] = df_regression_results.loc[i_reg, 'b'].values[0]
v1 = np.array((sum_df_current.loc[ind, 'ml_x_v1'], sum_df_current.loc[ind, 'ml_y_v1']))
v2 = np.array((sum_df_current.loc[ind, 'ml_x_v2'], sum_df_current.loc[ind, 'ml_y_v2']))
_, v_pred = get_angle_prediction_two_vectors(v1, v2, ori, alphas)
theta = inner_angle(v_base, v_pred, True)
angle = inner_angle(np.array([1,0]), v_pred, True)
sum_df.loc[ind, 'ml_theory_theta_reg'] = theta
sum_df.loc[ind, 'ml_theory_angle_reg'] = angle
sum_df.loc[ind, 'ml_theory_vec_x_reg'] = v_pred[0]
sum_df.loc[ind, 'ml_theory_vec_y_reg'] = v_pred[1]
for plot_cat in ['angle', 'theta']:
theory_cat = f"ml_theory_{plot_cat}"
actual_cat = f"ml_actual_{plot_cat}"
sum_df[f"ml_diff_{plot_cat}"] = (sum_df[theory_cat] - sum_df[actual_cat])
theory_cat = f"ml_theory_{plot_cat}_reg"
actual_cat = f"ml_actual_{plot_cat}"
sum_df[f"ml_diff_{plot_cat}_reg"] = (sum_df[theory_cat] - sum_df[actual_cat])
return df_data
# ================= process annots_df ================= #
### process annotation files.
def process_annots_df(annots_df, rel_poses):
"""
Function: processing Dataframe with heel/target coordinates of bundles.
Inputs:
- annots_df: DataFrame. Imported bundle information csv.
- rel_poses: Dictionaries. Relative position info from the image quantification process.
Output:
- annots_df: DataFrame. Processed DataFrame that combines relative position info and heel/target coordinates (center, orientation, and axis aligned).
"""
paths = settings.paths
target_id_to_index = settings.matching_info.target_id_to_index
index_to_target_id = settings.matching_info.index_to_target_id
annots_df_group = annots_df.groupby(['time_id', 'sample_no'])
### process individual time and sample
for gp in annots_df_group.groups.keys():
time_id, sample_id = gp
print(f'{time_id}, {sample_id}; ', end = "")
rel_pos = rel_poses[gp]
annot_bundles_df = annots_df_group.get_group(gp).reset_index().set_index('bundle_no')
annot_bundles_df.sort_index(inplace = True)
### align target and heel positions.
for i_bd, bundle_no in enumerate(annot_bundles_df.index):
ind_annot = annot_bundles_df.loc[bundle_no, 'index']
orientation = annot_bundles_df.loc[bundle_no, ['Orientation_AP', 'Orientation_DV']]
### original target and heel coordinates.
ct_ori = get_target_coords_sum(bundle_no, annot_bundles_df)
ch_ori = get_heel_coords_sum(bundle_no, annot_bundles_df)
center = line_intersection((ch_ori[2,:], ct_ori[target_id_to_index[3],:]),
(ch_ori[3,:], ct_ori[target_id_to_index[7],:]))
center = np.array(center)
### new coordinate initialization
ct_new = carcedian_re_center_coords(ct_ori, center)
ch_new = carcedian_re_center_coords(ch_ori, center)
### flip coordinates so that heels are at same orientation.
if(orientation['Orientation_AP'] != "A"):
ct_new = flip_coords(ct_new, 'v')
ch_new = flip_coords(ch_new, 'v')
if(orientation['Orientation_DV'] != "R"):
ct_new = flip_coords(ct_new, 'h')
ch_new = flip_coords(ch_new, 'h')
### rotate coordinates so that center-T4 line is x-axis.
angle = inner_angle(np.array([1,0]) - np.array([0,0]), ct_new[3,:] - np.array([0,0]), True)
if(ct_new[3,1] > 0):
angle = 2*np.pi - angle
ch_new = rotate_points(np.array([0,0]), ch_new, angle)
ct_new = rotate_points(np.array([0,0]), ct_new, angle)
### update the new coordinates to annots_df.
for i in range(ch_new.shape[0]):
annots_df.loc[ind_annot, f'coord_X_R{i+1}'] = ch_new[i,0]
annots_df.loc[ind_annot, f'coord_Y_R{i+1}'] = ch_new[i,1]
annots_df.loc[ind_annot, f'coord_X_T{index_to_target_id[i]}'] = ct_new[i,0]
annots_df.loc[ind_annot, f'coord_Y_T{index_to_target_id[i]}'] = ct_new[i,1]
### update other information to annots_df.
phi_range_1 = rel_pos[bundle_no]["phi_range_1"]
phi_range_2 = rel_pos[bundle_no]["phi_range_2"]
symmetry = abs(phi_range_1 - phi_range_2)/max(phi_range_2, phi_range_1)
annots_df.loc[ind_annot, 'symmetry'] = symmetry
annots_df.loc[ind_annot, 'aT7cT4'] = rel_pos[bundle_no]['phi_range_1']
annots_df.loc[ind_annot, 'aT3cT4'] = rel_pos[bundle_no]['phi_range_2']
annots_df.loc[ind_annot, 'aT3cT7'] = phi_range_1 + phi_range_2
annots_df.loc[ind_annot, 'aT2cT4'] = inner_angle(ct_new[target_id_to_index[2],:], ct_new[target_id_to_index[4],:], True)
annots_df.loc[ind_annot, 'aT5cT4'] = inner_angle(ct_new[target_id_to_index[5],:], ct_new[target_id_to_index[4],:], True)
annots_df.loc[ind_annot, 'aT2cT5'] = inner_angle(ct_new[target_id_to_index[2],:], ct_new[target_id_to_index[5],:], True)
annots_df.loc[ind_annot, 'R3'] = rel_pos[bundle_no]["R3"]
annots_df.loc[ind_annot, 'R4'] = rel_pos[bundle_no]["R4"]
annots_df.loc[ind_annot, 'length_one_um'] = rel_pos[bundle_no]["length_one_um"]
annots_df.loc[ind_annot, 'T3c'] = rel_pos[bundle_no]["T3c"]
annots_df.loc[ind_annot, 'T7c'] = rel_pos[bundle_no]["T7c"]
print("")
return annots_df
# ================= process summary_df =================
### supporting function: fill sum_df information for each bundle.
def fill_sum_df_info(sum_df, annots_df_current, rel_pos, num_rcells, bundle_no, iR, r_type, phi_unit_real, phi_unit_theory):
"""
Function: fill sum_df information for each bundle
Inputs:
- sum_df: DataFrame. summary of length/angle and annotations.
- annots_df_current: DataFrame. Annotation csv.
- rel_pos: Dictionary. relative lengths of targets and heels.
- num_rcells: number of R cells per bundle.
- bundle_no: Bundle No. of bundle-of-interest
- iR: int. index of sum_df for this R-cell
- r_type: int. type of R-cell (3 for R3 and 4 for R4.)
- phi_unit: converting from standardized coordiante to polar coordinate.
"""
qc_cols = group_headers(annots_df_current, 'is', True)
annot_angle_cols = group_headers(annots_df_current, 'aT', True)
phi_range_1 = rel_pos[bundle_no]["phi_range_1"]
phi_range_2 = rel_pos[bundle_no]["phi_range_2"]
aT30T7 = phi_range_1 + phi_range_2
symmetry = abs(phi_range_1 - phi_range_2)/max(phi_range_2, phi_range_1)
coord_heels = get_heel_coords_sum(bundle_no, annots_df_current)
# coord_targets = get_target_coords_sum(bundle_no, annots_df_current)
### convert R4 angle to mirror-symmetric
if(r_type == 4):
sum_df.loc[iR, f'angle_mrr'] = 0 - sum_df.loc[iR, 'angle']
elif(r_type == 3):
sum_df.loc[iR, f'angle_mrr'] = sum_df.loc[iR, 'angle']
### add total number of r cells in bundle
sum_df.loc[iR,'bundle_rcells_total'] = num_rcells
### add relative position info
for key in rel_pos[bundle_no].keys():
sum_df.loc[iR, key] = rel_pos[bundle_no][key]
### add grid angle value data
sum_df = dataframe_add_column(sum_df, annot_angle_cols)
sum_df.loc[iR, annot_angle_cols] = annots_df_current.loc[bundle_no, annot_angle_cols]
### add QC columns
sum_df.loc[iR, qc_cols] = annots_df_current.loc[bundle_no, qc_cols]
sum_df.loc[iR, 'symmetry'] = symmetry
### add positions of T3, T4, and T7 from heel.
for col_type in [3,4,7]:
cols = [f'T{col_type}l', f'T{col_type}c', f'T{col_type}h']
new_cols = [f'{i}_fromheel' for i in cols]
sum_df = dataframe_add_column(sum_df, new_cols)
if(col_type == 3):
sum_df.loc[iR, new_cols] = sum_df.loc[iR, cols].values - rel_pos[bundle_no]['R3']
elif(col_type == 7):
sum_df.loc[iR, new_cols] = sum_df.loc[iR, cols].values - rel_pos[bundle_no]['R4']
elif(col_type == 4):
if(r_type == 3):
sum_df.loc[iR, new_cols] = sum_df.loc[iR, cols].values - rel_pos[bundle_no]['R3']
elif(r_type == 4):
sum_df.loc[iR, new_cols] = sum_df.loc[iR, cols].values - rel_pos[bundle_no]['R4']
sum_df.loc[iR, 'T4l_fromheel']
### get growth cone angle and length from tip to heel
cat_angle = 'angle'
cat_length = 'length'
gc_length, gc_angle_rel = get_gc_angle_length(sum_df.loc[iR,:], coord_heels, phi_unit_real, cat_angle, cat_length, r_type)
sum_df.loc[iR, f"{cat_length}_gc"] = gc_length
sum_df.loc[iR, f"{cat_angle}_gc"] = gc_angle_rel
sum_df.loc[iR, f"{cat_angle}_gc_plot"] = gc_angle_rel * phi_unit_theory
if(r_type == 4):
sum_df.loc[iR, f"{cat_angle}_gc_mrr"] = 0 - gc_angle_rel
elif(r_type == 3):
sum_df.loc[iR, f"{cat_angle}_gc_mrr"] = gc_angle_rel
return sum_df
### processing data structure with annotated growth cone length and angle, and update bundle annotation data structure at the same time.
def process_sum_df(sum_df_old, annots_df, rel_poses, is_ml):
"""
Function: processing Dataframe with annotated growth cone length and angle, and update bundle annotation data structure at the same time.
Inputs:
- sum_df_old: DataFrame. Imported angle and length dataframe.
- annots_df_old: DataFrame. Imported annotation csv dataframe.
- rel_poses: Dictionary. Relative position info from the image quantification process.
- is_ml: Boolean. whether or not to calculate repulsion model - related values.
Output:
- sum_df: DataFrame. processed DataFrame that contains both bundle heel and target info and growth cone length and angle info.
"""
### get phi_unit
criteria = (annots_df['is_Edge'] == 0) & (annots_df['symmetry'] <= 0.5)
phi_unit_avg = get_angle_unit_data(annots_df, criteria = criteria)
phi_unit_theory = get_angle_unit_theory('aTiCT4')[2]
# print(phi_unit_avg, phi_unit_theory)
### new sum_df dataframe with added columns
sum_df = sum_df_old.copy(deep = True)
paths = settings.paths
qc_cols = group_headers(annots_df, 'is_', True)
cols_add = ['heel_pos_type', 'bundle_rcells_total', 'length_fromheel']
cols_add += qc_cols
sum_df = dataframe_add_column(sum_df, cols_add)
### group by time and sample ID
annots_df_group = annots_df.groupby(['time_id', 'sample_no'])
sum_df_group = sum_df.groupby(['time_id', 'sample_no'])
### process each sample
for key in rel_poses.keys():
time_id = key[0]
sample_no = key[1]
rel_pos = rel_poses[key]
print(f"{time_id}, {sample_no}", end = "; ")
# if((time_id, sample_no) not in sum_df_group.groups):
# print(f"ERROR! {time_id}hrs_smp{sample_no} not in sum_df!")
if((time_id, sample_no) in sum_df_group.groups):
### sum_df
sum_df_current = sum_df_group.get_group((time_id, sample_no))
sum_df_current_gp = sum_df_current.groupby('bundle_no')
### annots_df
annots_df_current = annots_df_group.get_group((time_id, sample_no))
annots_df_current.loc[:,'bundle_no'] = annots_df_current.loc[:,'bundle_no'].values.astype(int)
annots_df_current = annots_df_current.reset_index().set_index('bundle_no')
### process each bundle
for bundle_no in annots_df_current.index:
### bundle geometry information.
phi_range_1 = rel_pos[bundle_no]["phi_range_1"]
phi_range_2 = rel_pos[bundle_no]["phi_range_2"]
symmetry = abs(phi_range_1 - phi_range_2)/max(phi_range_2, phi_range_1)
### heel and target grid
ch = get_heel_coords_sum(bundle_no, annots_df_current)
ct = get_target_coords_sum(bundle_no, annots_df_current)
### relative positions info
if(bundle_no not in rel_pos.keys()):
print(f"ERROR! Bundle No.{bundle_no} don't exist in output_data!")
else:
r3_heel = rel_pos[bundle_no]['R3']
r4_heel = rel_pos[bundle_no]['R4']
t3_pos = rel_pos[bundle_no]['T3c']
t7_pos = rel_pos[bundle_no]['T7c']
### matching summary_df with bundles_df
inds_sum = sum_df_current.index[(sum_df_current['bundle_no'] == bundle_no)]
### Error: more than two R cells recorded for the particular bundle.
if(len(inds_sum) > 2):
print(f'Error! multiple incidents (n = {inds_sum}) of same bundle! bundle_no = {bundle_no}')
### normal
elif((len(inds_sum) > 0) & (len(inds_sum) <= 2)):
r_types = sum_df_current.loc[inds_sum,['type_Rcell']]
num_rcells = len(inds_sum)
#### R3R4 case
if(sum_df_current.loc[inds_sum,['type_bundle']].values.flatten()[0] == 'R3R4'):
for iR in r_types.index:
r_type = r_types.loc[iR, 'type_Rcell']
if(r_type == 3):
sum_df.loc[iR, 'heel_pos_type'] = 3
sum_df.loc[iR, 'length_fromheel'] = sum_df.loc[iR, 'length'] - r3_heel
elif(r_type == 4):
sum_df.loc[iR,'heel_pos_type'] = 4
sum_df.loc[iR, 'length_fromheel'] = sum_df.loc[iR, 'length'] - r4_heel
else:
print('EROR! Not R3 nor R4!')
if(sum_df.loc[iR, 'angle'] < 0):
phi_unit_real = phi_range_1
else:
phi_unit_real = phi_range_2
sum_df = fill_sum_df_info(sum_df, annots_df_current, rel_pos, num_rcells, bundle_no, iR, r_type, phi_unit_real, phi_unit_theory)
#### mutual repulsion
if(is_ml):
##### grid in standardized coordinates
target_stc_car, heels_stc_car, _ = get_cartasian_grid_from_stc(sum_df.loc[iR,:], ch, 'angle', 'length', phi_unit_avg)
# print(f"phi_unit_avg={phi_unit_avg}")
# print(f"heels_stc_car={heels_stc_car}")
##### get theoretical angles
point, vector, theta, angle, vs = calculate_mutual_repulsion_theory(heels_stc_car, target_stc_car, r_type)
# print(f"theta={theta}, angle={angle}.")
sum_df.loc[iR, f'ml_theory_theta'] = theta
sum_df.loc[iR, f'ml_theory_angle'] = angle
sum_df.loc[iR, f'ml_theory_vec_x'] = vector[0]
sum_df.loc[iR, f'ml_theory_vec_y'] = vector[1]
for i in range(vs.shape[0]):
sum_df.loc[iR, f'ml_x_v{i+1}'] = vs[i,0]
sum_df.loc[iR, f'ml_y_v{i+1}'] = vs[i,1]
#### get reference points
if(r_type == 3):
theta_ref = inner_angle(target_stc_car[2,:] - heels_stc_car[2,:], heels_stc_car[3,:] - heels_stc_car[2,:], True)
angle_ref = inner_angle(target_stc_car[2,:] - heels_stc_car[2,:], | np.array([1,0]) | numpy.array |
# -*- coding: utf-8 -*-
import numpy as np
from scipy.interpolate import interp1d
from scipy.ndimage import gaussian_filter1d
import os
class Pattern(object):
def __init__(self, x=None, y=None, name=''):
if x is None:
self._x = np.linspace(0.1, 15, 100)
else:
self._x = x
if y is None:
self._y = np.log(self._x ** 2) - (self._x * 0.2) ** 2
else:
self._y = y
self.name = name
self.offset = 0
self._scaling = 1
self.smoothing = 0
self.bkg_pattern = None
def load(self, filename, skiprows=0):
try:
if filename.endswith('.chi'):
skiprows = 4
data = np.loadtxt(filename, skiprows=skiprows)
self._x = data.T[0]
self._y = data.T[1]
self.name = os.path.basename(filename).split('.')[:-1][0]
except ValueError:
print('Wrong data format for pattern file! - ' + filename)
return -1
@staticmethod
def from_file(filename, skip_rows=0):
try:
if filename.endswith('.chi'):
skip_rows = 4
data = np.loadtxt(filename, skiprows=skip_rows)
x = data.T[0]
y = data.T[1]
name = os.path.basename(filename).split('.')[:-1][0]
return Pattern(x, y, name)
except ValueError:
print('Wrong data format for pattern file! - ' + filename)
return -1
def save(self, filename, header=''):
data = np.dstack((self._x, self._y))
np.savetxt(filename, data[0], header=header)
def set_background(self, pattern):
self.bkg_pattern = pattern
def reset_background(self):
self.bkg_pattern = None
def set_smoothing(self, amount):
self.smoothing = amount
def rebin(self, bin_size):
"""
Returns a new pattern which is a rebinned version of the current one.
"""
x, y = self.data
x_min = np.round(np.min(x) / bin_size) * bin_size
x_max = np.round(np.max(x) / bin_size) * bin_size
new_x = np.arange(x_min, x_max + 0.1 * bin_size, bin_size)
bins = np.hstack((x_min - bin_size * 0.5, new_x + bin_size * 0.5))
new_y = (np.histogram(x, bins, weights=y)[0] / np.histogram(x, bins)[0])
return Pattern(new_x, new_y)
@property
def data(self):
if self.bkg_pattern is not None:
# create background function
x_bkg, y_bkg = self.bkg_pattern.data
if not np.array_equal(x_bkg, self._x):
# the background will be interpolated
f_bkg = interp1d(x_bkg, y_bkg, kind='linear')
# find overlapping x and y values:
ind = np.where((self._x <= np.max(x_bkg)) & (self._x >= np.min(x_bkg)))
x = self._x[ind]
y = self._y[ind]
if len(x) == 0:
# if there is no overlapping between background and pattern, raise an error
raise BkgNotInRangeError(self.name)
y = y * self._scaling + self.offset - f_bkg(x)
else:
# if pattern and bkg have the same x basis we just delete y-y_bkg
x, y = self._x, self._y * self._scaling + self.offset - y_bkg
else:
x, y = self.original_data
if self.smoothing > 0:
y = gaussian_filter1d(y, self.smoothing)
return x, y
@data.setter
def data(self, data):
(x, y) = data
self._x = x
self._y = y
self.scaling = 1
self.offset = 0
@property
def original_data(self):
return self._x, self._y * self._scaling + self.offset
@property
def x(self):
return self._x
@x.setter
def x(self, new_value):
self._x = new_value
@property
def y(self):
return self._y
@y.setter
def y(self, new_y):
self._y = new_y
@property
def scaling(self):
return self._scaling
@scaling.setter
def scaling(self, value):
if value < 0:
self._scaling = 0
else:
self._scaling = value
def limit(self, x_min, x_max):
x, y = self.data
return Pattern(x[np.where((x_min < x) & (x < x_max))],
y[np.where((x_min < x) & (x < x_max))])
def extend_to(self, x_value, y_value):
"""
Extends the current pattern to a specific x_value by filling it with the y_value. Does not modify inplace but
returns a new filled Pattern
:param x_value: Point to which extend the pattern should be smaller than the lowest x-value in the pattern or
vice versa
:param y_value: number to fill the pattern with
:return: extended Pattern
"""
x_step = np.mean(np.diff(self.x))
x_min = np.min(self.x)
x_max = np.max(self.x)
if x_value < x_min:
x_fill = np.arange(x_min - x_step, x_value-x_step*0.5, -x_step)[::-1]
y_fill = np.zeros(x_fill.shape)
y_fill.fill(y_value)
new_x = np.concatenate((x_fill, self.x))
new_y = np.concatenate((y_fill, self.y))
elif x_value > x_max:
x_fill = np.arange(x_max + x_step, x_value+x_step*0.5, x_step)
y_fill = np.zeros(x_fill.shape)
y_fill.fill(y_value)
new_x = np.concatenate((self.x, x_fill))
new_y = np.concatenate((self.y, y_fill))
else:
return self
return Pattern(new_x, new_y)
def plot(self, show=False, *args, **kwargs):
import matplotlib.pyplot as plt
plt.plot(self.x, self.y, *args, **kwargs)
if show:
plt.show()
# Operators:
def __sub__(self, other):
orig_x, orig_y = self.data
other_x, other_y = other.data
if orig_x.shape != other_x.shape:
# todo different shape subtraction of spectra seems the fail somehow...
# the background will be interpolated
other_fcn = interp1d(other_x, other_x, kind='linear')
# find overlapping x and y values:
ind = np.where((orig_x <= np.max(other_x)) & (orig_x >= np.min(other_x)))
x = orig_x[ind]
y = orig_y[ind]
if len(x) == 0:
# if there is no overlapping between background and pattern, raise an error
raise BkgNotInRangeError(self.name)
return Pattern(x, y - other_fcn(x))
else:
return Pattern(orig_x, orig_y - other_y)
def __add__(self, other):
orig_x, orig_y = self.data
other_x, other_y = other.data
if orig_x.shape != other_x.shape:
# the background will be interpolated
other_fcn = interp1d(other_x, other_x, kind='linear')
# find overlapping x and y values:
ind = np.where((orig_x <= | np.max(other_x) | numpy.max |
import numpy as np
from scipy import integrate
from scipy import interpolate
# Cosmological parameters
Om0 = 0.272
Ol0 = 1.0 - Om0
h = 0.704
ns = 0.961
sigma80 = 0.807
SPEEDOFLIGHT_KMS = 2.99792458e5
def nhat(alpha, delta):
nhat = np.zeros(3)
nhat[0] = np.cos(delta) * np.cos(alpha)
nhat[1] = np.cos(delta) * np.sin(alpha)
nhat[2] = np.sin(delta)
return nhat
def angsep(alpha1, alpha2, delta1, delta2):
cos_ang = | np.sin(delta1) | numpy.sin |
#!/usr/bin/env python3
import enum
import gym
import numpy as np
from caffe2.python import workspace
from ml.rl.test.gym.gym_predictor import (
GymDDPGPredictor,
GymDQNPredictor,
GymDQNPredictorPytorch,
)
from ml.rl.test.utils import default_normalizer
from ml.rl.training.training_data_page import TrainingDataPage
class ModelType(enum.Enum):
DISCRETE_ACTION = "discrete"
PARAMETRIC_ACTION = "parametric"
CONTINUOUS_ACTION = "continuous"
PYTORCH_DISCRETE_DQN = "pytorch_discrete_dqn"
PYTORCH_PARAMETRIC_DQN = "pytorch_parametric_dqn"
class EnvType(enum.Enum):
DISCRETE_ACTION = "discrete"
CONTINUOUS_ACTION = "continuous"
class OpenAIGymEnvironment:
def __init__(self, gymenv, epsilon, softmax_policy, max_replay_memory_size, gamma):
"""
Creates an OpenAIGymEnvironment object.
:param gymenv: String identifier for desired environment.
:param epsilon: Fraction of the time the agent should select a random
action during training.
:param softmax_policy: 1 to use softmax selection policy or 0 to use
max q selection.
:param max_replay_memory_size: Upper bound on the number of transitions
to store in replay memory.
"""
self.epsilon = epsilon
self.softmax_policy = softmax_policy
self.replay_memory = []
self.max_replay_memory_size = max_replay_memory_size
self.memory_num = 0
self.skip_insert_until = self.max_replay_memory_size
self.gamma = gamma
self._create_env(gymenv)
if not self.img:
self.state_features = [str(sf) for sf in range(self.state_dim)]
if self.action_type == EnvType.DISCRETE_ACTION:
self.actions = [str(a) for a in range(self.action_dim)]
def _create_env(self, gymenv):
"""
Creates a gym environment object and checks if it is supported. We
support environments that supply Box(x, ) state representations and
require Discrete(y) or Box(y,) action inputs.
:param gymenv: String identifier for desired environment.
"""
if gymenv not in [e.id for e in gym.envs.registry.all()]:
raise Exception("Env {} not found in OpenAI Gym.".format(gymenv))
self.env = gym.make(gymenv)
supports_state = isinstance(self.env.observation_space, gym.spaces.Box) and len(
self.env.observation_space.shape
) in [1, 3]
supports_action = type(self.env.action_space) in (
gym.spaces.Discrete,
gym.spaces.Box,
)
if not supports_state and supports_action:
raise Exception(
"Unsupported environment state or action type: {}, {}".format(
self.env.observation_space, self.env.action_space
)
)
self.action_space = self.env.action_space
if isinstance(self.env.action_space, gym.spaces.Discrete):
self.action_type = EnvType.DISCRETE_ACTION
self.action_dim = self.env.action_space.n
elif isinstance(self.env.action_space, gym.spaces.Box):
self.action_type = EnvType.CONTINUOUS_ACTION
self.action_dim = self.env.action_space.shape[0]
if len(self.env.observation_space.shape) == 1:
self.state_dim = self.env.observation_space.shape[0]
self.img = False
elif len(self.env.observation_space.shape) == 3:
self.height, self.width, self.num_input_channels = (
self.env.observation_space.shape
)
self.img = True
def sample_memories(self, batch_size, model_type):
"""
Samples transitions from replay memory uniformly at random.
:param batch_size: Number of sampled transitions to return.
:param model_type: Model type (discrete, parametric).
"""
cols = [[], [], [], [], [], [], [], [], []]
indices = np.random.permutation(len(self.replay_memory))[:batch_size]
for idx in indices:
memory = self.replay_memory[idx]
for col, value in zip(cols, memory):
col.append(value)
possible_next_actions_lengths = np.array(cols[7], dtype=np.int32)
next_states = np.array(cols[3], dtype=np.float32)
if model_type in (
ModelType.PARAMETRIC_ACTION.value,
ModelType.PYTORCH_PARAMETRIC_DQN.value,
):
possible_next_actions = []
for pna_matrix in cols[6]:
for row in pna_matrix:
possible_next_actions.append(row)
tiled_states = np.repeat(next_states, possible_next_actions_lengths, axis=0)
possible_next_actions = np.array(possible_next_actions, dtype=np.float32)
next_state_pnas_concat = np.concatenate(
(tiled_states, possible_next_actions), axis=1
)
else:
possible_next_actions = np.array(cols[6], dtype=np.float32)
next_state_pnas_concat = None
return TrainingDataPage(
states=np.array(cols[0], dtype=np.float32),
actions=np.array(cols[1], dtype=np.float32),
propensities=None,
rewards=np.array(cols[2], dtype=np.float32),
next_states=np.array(cols[3], dtype=np.float32),
next_actions=np.array(cols[4], dtype=np.float32),
possible_next_actions=possible_next_actions,
episode_values=None,
not_terminals=np.logical_not(np.array(cols[5]), dtype=np.bool),
time_diffs=np.array(cols[8], dtype=np.int32),
possible_next_actions_lengths=possible_next_actions_lengths,
next_state_pnas_concat=next_state_pnas_concat,
)
def sample_and_load_training_data_c2(self, num_samples, model_type):
"""
Loads and preprocesses shuffled, transformed transitions from
replay memory into the training net.
:param num_samples: Number of transitions to sample from replay memory.
:param model_type: Model type (discrete, parametric).
"""
tdp = self.sample_memories(num_samples, model_type)
workspace.FeedBlob("states", tdp.states)
workspace.FeedBlob("actions", tdp.actions)
workspace.FeedBlob("rewards", tdp.rewards.reshape(-1, 1))
workspace.FeedBlob("next_states", tdp.next_states)
workspace.FeedBlob("not_terminals", tdp.not_terminals.reshape(-1, 1))
workspace.FeedBlob("time_diff", tdp.time_diffs.reshape(-1, 1))
workspace.FeedBlob("next_actions", tdp.next_actions)
workspace.FeedBlob("possible_next_actions", tdp.possible_next_actions)
workspace.FeedBlob(
"possible_next_actions_lengths", tdp.possible_next_actions_lengths
)
@property
def normalization(self):
if self.img:
return None
else:
return default_normalizer(self.state_features)
@property
def normalization_action(self):
return default_normalizer(
[x for x in list(range(self.state_dim, self.state_dim + self.action_dim))]
)
def policy(self, predictor, next_state, test):
"""
Selects the next action.
:param predictor: RLPredictor object whose policy to follow.
:param next_state: State to evaluate predictor's policy on.
:param test: Whether or not to bypass an epsilon-greedy selection policy.
"""
# Add a dimension since this expects a batch of examples
next_state = np.expand_dims(next_state.astype(np.float32), axis=0)
action = np.zeros([self.action_dim], dtype=np.float32)
if isinstance(predictor, (GymDQNPredictor, GymDQNPredictorPytorch)):
if not test and | np.random.rand() | numpy.random.rand |
#!/usr/bin/env python
###########################################################
#
# Script for spectrum analysis of a mono .wav file.
#
# Code inspired on example code from:
# http://stackoverflow.com/questions/18625085/how-to-plot-a-wav-file
# http://plot.ly/matplotlib/fft/
# http://stackoverflow.com/questions/23507217/python-plotting-2d-data-on-to-3d-axes/23968448#23968448
# http://glowingpython.blogspot.com/2011/08/how-to-plot-frequency-spectrum-with.html
# http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.specgram
#
# Includes a sample clip:
# https://www.freesound.org/people/Kyster/sounds/117719/
#
import sys
from scipy.io.wavfile import read
import matplotlib as mpl
import matplotlib.pyplot as pyplot
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
###########################################################
# Expects a mono .wav file
# Use a hard coded path or provide one at runtime
if(len(sys.argv) == 1):
input_path = ""
input_name = input_path+"117719__kyster__low-d.wav"
else:
input_name = sys.argv[1]
# Read and convert .wav to a list
input_wav = read(input_name)
input_audio = input_wav[1]
input_rate = input_wav[0]
#input_sample = input_audio[1:22050]
# Parse filename without extension for output and chart titles
input_name = input_name.split('/')[-1]
input_name = input_name.split('.')
input_name = '.'.join(input_name[0:-1])
###########################################################
# Compute FFT on entire input .wav file
fundamental = 313
xmin = 0
xmax = 2000
zmin = 0
zmax = 2000
y = input_audio
Fs = input_rate
n = len(y) # length of the signal
k = np.arange(n)
T = float(n)/Fs
frq = k/T # two sides frequency range
frq = frq[range(n/2)] # one side frequency range
Y = np.fft.fft(y)/n # fft computing and normalization
Y = Y[range(n/2)]
Ynorm = (1000*Y) / max(abs(Y))
###########################################################
# Plot entire FFT with matplotlib
pyplot.figure(figsize=(4,2), dpi=300, facecolor='w', edgecolor='w')
# Plot light vertical lines on even harmonics
for harmonic in range(0, xmax, fundamental*2):
pyplot.axvline(harmonic, color='0.9')
# Plot dark vertical lines on odd harmonics
for harmonic in range(fundamental, xmax, fundamental*2):
pyplot.axvline(harmonic, color='0.8')
pyplot.plot(frq,abs(Ynorm),'k') # plotting the spectrum
pyplot.title(input_name)
#pyplot.xlabel('Freq (Hz)')
#pyplot.ylabel('|Y (freq)|')
pyplot.axis([xmin, xmax, 0, 1000])
pyplot.xticks(np.arange(xmin, xmax, fundamental))
pyplot.savefig(input_name+".png", dpi=300, bbox_inches='tight')
pyplot.close()
#pyplot.show()
###########################################################
# Plot a spectrogram with matplotlib
hot_norm = mpl.colors.Normalize(vmin=-1.,vmax=1.)
pyplot.specgram(input_audio, mode='psd', scale='linear', detrend='none',
cmap='gist_heat', NFFT=4096, Fs=44100, noverlap=2048,
norm=mpl.colors.Normalize(vmin=0.,vmax=2000.))
pyplot.axis([0, T, xmin, xmax])
pyplot.yticks( | np.arange(xmin, xmax, fundamental) | numpy.arange |
#!/usr/bin/env python
# coding: utf-8
# In[38]:
from scipy.io import loadmat
import glob
import cv2
from shutil import copyfile
import os
import numpy as np
import matplotlib.pylab as plt
from skimage.io import imread
from pathlib import Path
import skimage
from skimage import feature, morphology
from matplotlib.pyplot import figure
import matplotlib
from skimage.color import rgb2gray
import copy
import gc
import sys
# In[39]:
bird_labels = {'head':1, 'leye':2, 'reye':3, 'beak':4, 'torso':5, 'neck':6, 'lwing':7, 'rwing':8, 'lleg':9, 'lfoot':10, 'rleg':11, 'rfoot':12, 'tail':13}
cat_labels = {'head':1, 'leye':2, 'reye':3, 'lear':4, 'rear':5, 'nose':6, 'torso':7, 'neck':8, 'lfleg':9, 'lfpa':10, 'rfleg':11, 'rfpa':12, 'lbleg':13, 'lbpa':14, 'rbleg':15, 'rbpa':16, 'tail':17}
cow_labels = {'head':1, 'leye':2, 'reye':3, 'lear':4, 'rear':5, 'muzzle':6, 'lhorn':7, 'rhorn':8, 'torso':9, 'neck':10, 'lfuleg':11, 'lflleg':12, 'rfuleg':13, 'rflleg':14, 'lbuleg':15, 'lblleg':16, 'rbuleg':17, 'rblleg':18, 'tail':19}
dog_labels = {'head':1, 'leye':2, 'reye':3, 'lear':4, 'rear':5, 'nose':6, 'torso':7, 'neck':8, 'lfleg':9, 'lfpa':10, 'rfleg':11, 'rfpa':12, 'lbleg':13, 'lbpa':14, 'rbleg':15, 'rbpa':16, 'tail':17, 'muzzle':18}
horse_labels = {'head':1, 'leye':2, 'reye':3, 'lear':4, 'rear':5, 'muzzle':6, 'lfho':7, 'rfho':8, 'torso':9, 'neck':10, 'lfuleg':11, 'lflleg':12, 'rfuleg':13, 'rflleg':14, 'lbuleg':15, 'lblleg':16, 'rbuleg':17, 'rblleg':18, 'tail':19, 'lbho':20, 'rbho':21}
bottle_labels = {'cap':1, 'body':2}
person_labels = {'head':1, 'leye':2, 'reye':3, 'lear':4, 'rear':5, 'lebrow':6, 'rebrow':7, 'nose':8, 'mouth':9, 'hair':10, 'torso':11, 'neck': 12, 'llarm': 13, 'luarm': 14, 'lhand': 15, 'rlarm':16, 'ruarm':17, 'rhand': 18, 'llleg': 19, 'luleg':20, 'lfoot':21, 'rlleg':22, 'ruleg':23, 'rfoot':24}
bus_labels = { 'frontside':1, 'leftside':2, 'rightside':3, 'backside':4, 'roofside':5, 'leftmirror':6, 'rightmirror':7, 'fliplate':8, 'bliplate':9 }
for ii in range(0,10):
bus_labels['door_{}'.format(ii+1)] = 10+ii
for ii in range(0,10):
bus_labels['wheel_{}'.format(ii+1)] = 20+ii
for ii in range(0,10):
bus_labels['headlight_{}'.format(ii+1)] = 30+ii
for ii in range(0,10):
bus_labels['window_{}'.format(ii+1)] = 40+ii
aeroplane_labels = {'body': 1, 'stern': 2, 'lwing': 3, 'rwing':4, 'tail':5}
for ii in range(0, 10):
aeroplane_labels['engine_{}'.format(ii+1)] = 6+ii
for ii in range(0, 10):
aeroplane_labels['wheel_{}'.format(ii+1)] = 16+ii
motorbike_labels = {'fwheel': 1, 'bwheel': 2, 'handlebar': 3, 'saddle': 4}
for ii in range(0,10):
motorbike_labels['headlight_{}'.format(ii+1)] = 5+ii
motorbike_labels['body']=15
bicycle_labels = {'fwheel': 1, 'bwheel': 2, 'saddle': 3, 'handlebar': 4, 'chainwheel': 5}
for ii in range(0,10):
bicycle_labels['headlight_{}'.format(ii+1)] = 6+ii
bicycle_labels['body']=16
train_labels = {'head':1,'hfrontside':2,'hleftside':3,'hrightside':4,'hbackside':5,'hroofside':6}
for ii in range(0,10):
train_labels['headlight_{}'.format(ii+1)] = 7 + ii
for ii in range(0,10):
train_labels['coach_{}'.format(ii+1)] = 17 + ii
for ii in range(0,10):
train_labels['cfrontside_{}'.format(ii+1)] = 27 + ii
for ii in range(0,10):
train_labels['cleftside_{}'.format(ii+1)] = 37 + ii
for ii in range(0,10):
train_labels['crightside_{}'.format(ii+1)] = 47 + ii
for ii in range(0,10):
train_labels['cbackside_{}'.format(ii+1)] = 57 + ii
for ii in range(0,10):
train_labels['croofside_{}'.format(ii+1)] = 67 + ii
sheep_labels = cow_labels
car_labels = bus_labels
part_labels = {'bird': bird_labels, 'cat': cat_labels, 'cow': cow_labels, 'dog': dog_labels, 'sheep': sheep_labels, 'horse':horse_labels, 'car':car_labels, 'bus':bus_labels, 'bicycle':bicycle_labels, 'motorbike':motorbike_labels, 'person':person_labels,'aeroplane':aeroplane_labels, 'train':train_labels}
# In[40]:
object_name = sys.argv[1]
animals = [object_name]
# In[4]:
def rotate_im(image, angle):
# grab the dimensions of the image and then determine the
# centre
(h, w) = image.shape[:2]
(cX, cY) = (w // 2, h // 2)
# grab the rotation matrix (applying the negative of the
# angle to rotate clockwise), then grab the sine and cosine
# (i.e., the rotation components of the matrix)
M = cv2.getRotationMatrix2D((cX, cY), angle, 1.0)
cos = np.abs(M[0, 0])
sin = np.abs(M[0, 1])
# compute the new bounding dimensions of the image
nW = int((h * sin) + (w * cos))
nH = int((h * cos) + (w * sin))
# adjust the rotation matrix to take into account translation
M[0, 2] += (nW / 2) - cX
M[1, 2] += (nH / 2) - cY
# perform the actual rotation and return the image
image = cv2.warpAffine(image, M, (nW, nH))
# image = cv2.resize(image, (w,h))
return image
# In[5]:
def get_corners(bboxes):
width = (bboxes[:,2] - bboxes[:,0]).reshape(-1,1)
height = (bboxes[:,3] - bboxes[:,1]).reshape(-1,1)
x1 = bboxes[:,0].reshape(-1,1)
y1 = bboxes[:,1].reshape(-1,1)
x2 = x1 + width
y2 = y1
x3 = x1
y3 = y1 + height
x4 = bboxes[:,2].reshape(-1,1)
y4 = bboxes[:,3].reshape(-1,1)
corners = np.hstack((x1,y1,x2,y2,x3,y3,x4,y4))
return corners
# In[6]:
def clip_box(bbox, clip_box, alpha):
ar_ = (bbox_area(bbox))
x_min = np.maximum(bbox[:,0], clip_box[0]).reshape(-1,1)
y_min = np.maximum(bbox[:,1], clip_box[1]).reshape(-1,1)
x_max = np.minimum(bbox[:,2], clip_box[2]).reshape(-1,1)
y_max = np.minimum(bbox[:,3], clip_box[3]).reshape(-1,1)
bbox = np.hstack((x_min, y_min, x_max, y_max, bbox[:,4:]))
delta_area = ((ar_ - bbox_area(bbox))/ar_)
mask = (delta_area < (1 - alpha)).astype(int)
bbox = bbox[mask == 1,:]
return bbox
# In[7]:
def rotate_box(corners,angle, cx, cy, h, w):
corners = corners.reshape(-1,2)
corners = np.hstack((corners, np.ones((corners.shape[0],1), dtype = type(corners[0][0]))))
M = cv2.getRotationMatrix2D((cx, cy), angle, 1.0)
cos = np.abs(M[0, 0])
sin = np.abs(M[0, 1])
nW = int((h * sin) + (w * cos))
nH = int((h * cos) + (w * sin))
# adjust the rotation matrix to take into account translation
M[0, 2] += (nW / 2) - cx
M[1, 2] += (nH / 2) - cy
# Prepare the vector to be transformed
calculated = np.dot(M,corners.T).T
calculated = calculated.reshape(-1,8)
return calculated
# In[8]:
def get_enclosing_box(corners):
x_ = corners[:,[0,2,4,6]]
y_ = corners[:,[1,3,5,7]]
xmin = | np.min(x_,1) | numpy.min |
# import packages used
import numpy as np
import tools_Exercise_1_6 as tools
import scipy.optimize as optimize
import scipy.interpolate as interpolate
import time as time
from interpolation import interp
def setup():
class par: pass
# Model parameters
par.beta = 0.999
par.upsillon=20
par.Lt = 1
par.W = 20
par.G= 0.99
par.chi = 0
par.xi1 = 0
par.xi2 = 0
par.D = 0.005
par.d = 0.005
par.varphi = 0
par.kappa1 = 1
par.kappa2 = 8
par.Upsillon = 0.501*par.upsillon
par.Z = 75000
par.gamma1 = 0.05
par.tests = 0.01
par.varsigma = 13
par.varrho = 2
par.t = 1.8
par.phi1 = 0.2*0.37
par.phi2 = 0.2*0.33
par.phi3 = 0.2*0.3
par.sigma = 0.001
par.varrho = 0.4
par.alpha=0.3
par.rho=5
par.g=40
par.mu = 2
par.H = 0.65
par.B = 0.91
par.b=par.B
par.delta = 0.0001
par.varepsilon = 0.15
par.epsilon = 0.97
par.cash = 0
par.alt = 0
# Shock parameters
par.num_M = 6
par.M_max = 0.25
par.num_shocks = 8
par.num_MM = 6
par.max_M = 5000
# Convergens settings
par.max_iter = 7000 # maximum number of iterations
par.tol = 10e-2
# Simulation parameters
par.simN = 720
par.I_ini = 0.01
par.Q_ini = 0.00
par.R_ini = 0.00
par.lw_ini = 1
par.M_ini=500
# Setup grid
setup_grids(par)
return par
def setup_grids(par):
#Grid of disease parameters
par.grid_I = tools.nonlinspace(1.0e-10,par.M_max,par.num_M,1.2) # non-linear spaced points: like np.linspace with unequal spacing
par.grid_Q = tools.nonlinspace(1.0e-10,par.M_max,par.num_M,1.2) # non-linear spaced points: like np.linspace with unequal spacing
par.grid_R = tools.nonlinspace(1.0e-10,0.8,par.num_M,1) # non-linear spaced points: like np.linspace with unequal spacing
par.grid_M = tools.nonlinspace(5,3000,par.num_MM,1)
par.grid_lw = tools.nonlinspace(1.0e-10,1,100,1)
#Gauss-Hermite
# x,w = tools.gauss_hermite(par.num_shocks)
# par.eps = np.exp(par.sigma*np.sqrt(2)*x)
# par.eps_w = w/np.sqrt(np.pi)
return par
def solve_cons_inf(par):
# Initalize
class sol: pass
sol.V = np.ones([par.num_M, par.num_M, par.num_M, par.num_MM])*0
sol.lw = np.ones([par.num_M, par.num_M, par.num_M, par.num_MM])
sol.it = 0 #Number of iteration
sol.delta = 1000.0 #Different between V+ and V
sol.S=[]
sol.lo=[]
sol.s=[]
sol.wi=[]
sol.Y=[]
sol.i=[]
sol.l=[]
sol.gamma2=[]
sol.gamma3=[]
sol.I_plus=[]
sol.Q_plus=[]
sol.R_plus=[]
sol.M_plus=[]
sol.p=[]
sol.pi=[]
prcompo = 0
#precomp
for I in (par.grid_I):
for Q in (par.grid_Q):
for R in (par.grid_R):
for M in (par.grid_M):
for lw in (par.grid_lw):
if lw+Q+par.D*R > 1:
break
S=(1-I-Q-R)
lo=(1 - lw - Q - par.D*R)
s=max((lw-(1-par.D)*R)*(1-I/(S+I)),0)
wi=max((lw-(1-par.D)*R)*(I/(S+I)),0)
#w=(lw+Q+lo*par.G)*par.W
w=par.W
#J=par.G*(1-par.alpha)*par.varsigma*(par.Z*par.phi2*(min(1-R-Q,0.01)/par.alpha*par.varsigma))**(1/(par.alpha-1))
###K=min(par.H*(((par.delta/J)**(1/(1-par.alpha )) - I*I-par.varphi*R )/ ((2-par.alpha)*((par.upsillon*lw+par.Upsillon*lo)**(2*par.B))))**(1/(1-2*par.B)),M)
p_prov = (1-par.alpha)*par.varsigma*((par.Z*par.phi2*I*max(1-R-Q, 0.001)/(par.alpha*par.varsigma))**(1/(par.alpha-1)))**par.alpha
K=min(max((par.delta/(par.H*(1-par.b)*par.varepsilon**(1-par.b)*(par.upsillon*lw+par.Upsillon*lo)**(par.b))*p_prov)**(1/-par.b),1),M)
Y=max(par.H*(par.upsillon*lw+par.Upsillon*lo)**(par.b)*((par.varepsilon*K)**(1-par.b)), 1.0e-8)
l=((par.Z*par.phi2*I*max(1-R-Q,0.1)/(par.alpha*par.varsigma))**(1/(par.alpha-1)))*Y
if l>1:
l=1
if l<0:
l=0
p=(1-par.alpha)*par.varsigma*l**par.alpha*Y**(-par.alpha)
if l<=0:
p=0
l=0
Y=0
K=0
elif l>=1:
if p*Y>w+par.g:
K=min(max(((w+par.g)/((1-par.alpha)*par.varsigma*par.H*((par.upsillon*lw+par.Upsillon*lo)**(par.b-par.alpha*par.b)*par.varepsilon**((1-par.b)*(1-par.alpha)))))**(1/((1-par.b)*(1-par.alpha))),1),M)
Y=max(par.H*(par.upsillon*lw+par.Upsillon*lo)**(par.b)*(par.varepsilon*K)**(1-par.b), 1.0e-8)
p=(w+par.g)/Y
l=((par.Z*par.phi2*I*max(1-R-Q,0.1)/(par.alpha*par.varsigma))**(1/(par.alpha-1)))*Y
if l>1:
l=1
if l<0:
l=0
else:
l=1
K=min(max((par.delta/(par.H**(1-par.alpha)*(1-par.b)*par.varepsilon**(1+par.alpha*par.b-par.b-par.alpha)*(par.upsillon*lw+par.Upsillon*lo)**(par.b-par.alpha*par.b)*(1+par.alpha*par.alpha-par.alpha*2)))**(1/(par.b*par.alpha-par.alpha-par.b)),1),M)
Y=max(par.H*(par.upsillon*lw+par.Upsillon*lo)**(par.b)*(par.varepsilon*K)**(1-par.b), 1.0e-8)
p=((1-par.alpha)*par.varsigma)*Y**-par.alpha
l=((par.Z*par.phi2*I*max(1-R-Q,0.1)/(par.alpha*par.varsigma))**(1/(par.alpha-1)))*Y
if l>1:
l=1
if l<0:
l=0
else:
if p*Y>w+par.g:
K=min(max(((w+par.g)/(p_prov*par.H*((par.upsillon*lw+par.Upsillon*lo)**par.b)*(par.varepsilon)**(1-par.b)))**(1/(1-par.b)),1),M)
Y=max(par.H*(par.upsillon*lw+par.Upsillon*lo)**(par.b)*(par.varepsilon*K)**(1-par.b), 1.0e-8)
p=(w+par.g)/Y
l=((par.Z*par.phi2*I*max(1-R-Q,0.001)/(par.alpha*par.varsigma))**(1/(par.alpha-1)))*Y
if l>1:
l=1
if l<0:
l=0
if par.alt ==1:
if Y*p - par.delta*K - par.xi1*I**2 - par.xi2*par.d*R < - (lw+Q)*par.W - lo*par.G*par.W:
Y=0
K=0
p=0
l=0
sol.pi.append(- (lw+Q)*par.W - lo*par.G*par.W)
wi=0
s=0
#print(p)
#print(l)
gamma2=np.array(par.sigma + par.t*par.tests/((1 + I*par.rho)**par.mu))
gamma3=np.array(par.gamma1 * (1+ par.kappa1/(1+Q**(1/par.kappa2))))
sol.I_plus.append(max(min((1-par.gamma1-gamma2)*I + par.phi1*s*wi + par.phi2*S*I*l*l + par.phi3*S*I,1),1.0e-9))
sol.Q_plus.append(max(min((1- gamma3)*Q + gamma2*I,1),1.0e-9))
sol.R_plus.append(max(min(R + par.gamma1*I + gamma3*Q,1),1.0e-9))
#print(Y)
#print(p)
if par.alt==1:
if Y*p - par.delta*K - par.xi1*I**2 - par.xi2*par.d*R > - (lw+Q)*par.W - lo*par.G*par.W:
sol.pi.append(Y*p - (lw+Q)*par.W - par.delta*K - lo*par.G*par.W - par.xi1*I**2 - par.xi2*par.d*R, 0)
if par.alt==0:
sol.pi.append(max(Y*p - (lw+Q)*par.W - par.delta*K - lo*par.G*par.W - par.xi1*I**2 - lw*par.xi2*par.d*R,0))
#print(Y*p - (lw+Q)*par.W - par.delta*K - lo*par.G*par.W - par.xi1*I**2 - par.xi2*par.d*R)
sol.M_plus.append(max(((M+Y*p - (lw+Q)*par.W - par.delta*K - lo*par.G*par.W - par.xi1*I**2 - par.xi2*par.d*R)*par.epsilon), 5))
prcompo +=1
#print(Y*p)
#rint(Y)
#print(p)
#print(Y*p)
#print(Y*p)
#print(K)
#print(sol.M_plus)
#points=np.meshgrid(par.grid_I, par.grid_Q, par.grid_R, copy=False, indexing='xy')
points = np.asarray(([par.grid_I, par.grid_Q, par.grid_R, par.grid_M]))
#print(np.shape(points))
#print(min(sol.I_plus))
#print(np.sum(sol.Q_plus))
#print(min(sol.Q_plus))
#print(max(sol.R_plus))
#print(np.sum(sol.R_plus))
#print(np.sum(sol.M_plus))
point = np.asarray(np.transpose(np.array([sol.I_plus, sol.Q_plus, sol.R_plus, sol.M_plus])))
#print(point)
while (sol.delta >= par.tol and sol.it < par.max_iter):
V_next = sol.V.copy()
V_plus = interpolate.interpn(points, V_next, point, method='linear', bounds_error=False, fill_value=None)
print(np.sum(V_plus))
#print(sum(sol.pi-V_plus))
#V_plus=interp(np.asarray(sol.I_plus), np.asarray(sol.Q_plus), np.asarray(sol.R_plus), np.asarray(sol.M_plus), np.asarray(V_next), np.asarray(point))
ind = -1
# find V
Ih = -1
Qh = -1
Rh = -1
Mh = -1
for I in (par.grid_I):
Ih +=1
for Q in (par.grid_Q):
Qh +=1
for R in (par.grid_R):
Rh +=1
for M in (par.grid_M):
Mh +=1
for lw in (par.grid_lw):
if lw+Q+par.D*R > 1:
break
ind += 1
V_guess =sol.pi[ind] + par.beta*V_plus[ind]
##sol.V[Ih, Qh, Rh, Mh] = V_guess
#sol.lw[Ih, Qh, Rh, Mh]=lw
if V_guess > sol.V[Ih, Qh, Rh, Mh]:
sol.V[Ih, Qh, Rh, Mh]=V_guess
sol.lw[Ih, Qh, Rh, Mh]=lw
Mh=-1
Rh=-1
Qh=-1
# opdate delta and it
sol.it += 1
c_new = np.ravel(sol.V)
c_old = np.ravel(V_next)
#sol.delta = max(abs(sol.V - V_next))
sol.delta = max(abs(c_new - c_old))
print(sol.delta)
return(sol)
def simu(par, sol):
class simu: pass
simu.S=np.zeros([par.simN])
simu.lo=np.zeros([par.simN])
simu.s=np.zeros([par.simN])
simu.wi=np.zeros([par.simN])
simu.Y=np.zeros([par.simN])
simu.l=np.zeros([par.simN])
simu.p=np.zeros([par.simN])
simu.gamma2=np.zeros([par.simN])
simu.gamma3=np.zeros([par.simN])
simu.pi=np.zeros([par.simN])
simu.util=np.zeros([par.simN])
simu.K=np.zeros([par.simN])
simu.w=np.zeros([par.simN])
simu.c=np.zeros([par.simN])
simu.Pos=np.zeros([par.simN])
simu.I=np.zeros([par.simN+1])
simu.Q=np.zeros([par.simN+1])
simu.R=np.zeros([par.simN+1])
simu.M=np.zeros([par.simN+1])
simu.I[0]=(par.I_ini)
simu.Q[0]=(par.Q_ini)
simu.R[0]=(par.R_ini)
simu.M[0]=(par.M_ini)
simu.lw = np.zeros([par.simN])
ite=0
points = (par.grid_I, par.grid_Q, par.grid_R, par.grid_M)
while ite < par.simN:
#Start of simulation.
#point=np.asarray([simu.I[ite], simu.Q[ite], simu.R[ite], simu.M[ite]])
simu.lw[ite]=interpolate.interpn(points, sol.lw, ([simu.I[ite], simu.Q[ite], simu.R[ite], simu.M[ite]]), method='linear', bounds_error=False, fill_value=None)
simu.lw[ite]=min(simu.lw[ite], 1-simu.Q[ite]-simu.R[ite]*par.d)
simu.S[ite]=(1-simu.I[ite]-simu.Q[ite]-simu.R[ite])
simu.lo[ite]=(1 - simu.lw[ite] - simu.Q[ite] - par.D*simu.R[ite])
simu.s[ite]=(max((simu.lw[ite]-(1-par.D)*simu.R[ite])*(1-simu.I[ite]/(simu.S[ite]+simu.I[ite])),0))
simu.wi[ite]=(max((simu.lw[ite]-(1-par.D)*simu.R[ite])*(simu.I[ite]/(simu.S[ite]+simu.I[ite])),0))
simu.w[ite]=par.W
p_prov = (1-par.alpha)*par.varsigma*((par.Z*par.phi2*simu.I[ite]*max(1-simu.R[ite]-simu.Q[ite], 0.01)/(par.alpha*par.varsigma))**(1/(par.alpha-1)))**par.alpha
simu.K[ite]=min(max((par.delta/(par.H*(1-par.b)*par.varepsilon**(1-par.b)*(par.upsillon*simu.lw[ite]+par.Upsillon*simu.lo[ite])**(par.b))*p_prov)**(1/-par.b),1e-9),simu.M[ite])
simu.Y[ite]=max(par.H*((par.upsillon*simu.lw[ite]+par.Upsillon*simu.lo[ite])**par.b)*((par.varepsilon*simu.K[ite])**(1-par.b)), 1.0e-8)
simu.l[ite]=(par.Z*par.phi2*simu.I[ite]*(1-simu.R[ite]-simu.Q[ite])/(par.alpha*par.varsigma))**(1/(par.alpha-1))*simu.Y[ite]
if simu.l[ite] > 1:
simu.l[ite]=1
if simu.l[ite] < 0:
simu.l[ite] = 0
simu.p[ite]=(1-par.alpha)*par.varsigma*simu.l[ite]**(par.alpha) * simu.Y[ite]**(-par.alpha)
#print(simu.Y[ite])
if simu.l[ite] < 0:
simu.l[ite]=0
simu.p[ite] = 0
simu.K[ite] = 0
simu.Y[ite] = 0
elif simu.l[ite]>=1:
if simu.p[ite]*simu.Y[ite]>simu.w[ite]+par.g:
simu.K[ite]=min(max(((simu.w[ite]+par.g)/((1-par.alpha)*par.varsigma*par.H*((par.upsillon*simu.lw[ite]+par.Upsillon*simu.lo[ite])**(par.b-par.alpha*par.b)*par.varepsilon**((1-par.b)*(1-par.alpha)))))**(1/((1-par.b)*(1-par.alpha))),0),simu.M[ite])
#print(simu.K[ite]
simu.Y[ite]=max(par.H*((par.upsillon*simu.lw[ite]+par.Upsillon*simu.lo[ite])**par.b)*((par.varepsilon*simu.K[ite])**(1-par.b)), 1.0e-8)
print(simu.K[ite])
#print(simu.Y[ite])
simu.p[ite]=(simu.w[ite]+par.g)/simu.Y[ite]
simu.l[ite]=(par.Z*par.phi2*simu.I[ite]*(1-simu.R[ite]-simu.Q[ite])/(par.alpha*par.varsigma))**(1/(par.alpha-1))*simu.Y[ite]
if simu.l[ite] > 1:
simu.l[ite]=1
if simu.l[ite] < 0:
simu.l[ite] = 0
print(1)
else:
simu.K[ite]=min(max((par.delta/(par.H**(1-par.alpha)*(1-par.b)*par.varepsilon**(1+par.alpha*par.b-par.b-par.alpha)*(par.upsillon*simu.lw[ite]+par.Upsillon*simu.lo[ite])**(par.b-par.alpha*par.b)*(1+par.alpha*par.alpha-2*par.alpha)))**(1/(par.alpha*par.b-par.b-par.alpha)),0),simu.M[ite])
#simu.K[ite]=min(max((par.delta/(par.H**(1-par.alpha)*(1-par.b)*par.varepsilon**(1+par.alpha*par.b-par.b-par.alpha)*(par.upsillon*simu.lw[ite]+par.Upsillon*simu.lo[ite])**(par.b-par.alpha*par.b)*(1-par.alpha*par.alpha)))**(1/(par.alpha*par.b-par.b-par.alpha)),0),simu.M[ite])
simu.Y[ite]=max(par.H*((par.upsillon*simu.lw[ite]+par.Upsillon*simu.lo[ite])**par.b)*((par.varepsilon*simu.K[ite])**(1-par.b)), 1.0e-8)
simu.p[ite]=((1-par.alpha)*par.varsigma)/(simu.Y[ite]**par.alpha)
simu.l[ite]=(par.Z*par.phi2*simu.I[ite]*(1-simu.R[ite]-simu.Q[ite])/(par.alpha*par.varsigma))**(1/(par.alpha-1))*simu.Y[ite]
print(2)
if simu.p[ite]*simu.Y[ite]>simu.w[ite]+par.g:
simu.K[ite]=min(max(((simu.w[ite]+par.g)/((1-par.alpha)*par.varsigma*par.H*((par.upsillon*simu.lw[ite]+par.Upsillon*simu.lo[ite])**(par.b-par.alpha*par.b)*par.varepsilon**((1-par.b)*(1-par.alpha)))))**(1/((1-par.b)*(1-par.alpha))),0),simu.M[ite])
#print(simu.K[ite]
simu.Y[ite]=max(par.H*((par.upsillon*simu.lw[ite]+par.Upsillon*simu.lo[ite])**par.b)*((par.varepsilon*simu.K[ite])**(1-par.b)), 1.0e-8)
print(simu.K[ite])
#print(simu.Y[ite])
simu.p[ite]=(simu.w[ite]+par.g)/simu.Y[ite]
simu.l[ite]=(par.Z*par.phi2*simu.I[ite]*(1-simu.R[ite]-simu.Q[ite])/(par.alpha*par.varsigma))**(1/(par.alpha-1))*simu.Y[ite]
print(3)
if simu.l[ite] > 1:
simu.l[ite]=1
if simu.l[ite] < 0:
simu.l[ite] = 0
else:
if simu.p[ite]*simu.Y[ite]>simu.w[ite]+par.g:
simu.K[ite]=min(max((((simu.w[ite]+par.g)/((1-par.alpha)*par.varsigma*par.H*((par.upsillon*simu.lw[ite]+par.Upsillon*simu.lo[ite])**(par.b-par.alpha*par.b)*par.varepsilon**((1-par.b)*(1-par.alpha))))))**(1/((1-par.b)*(1-par.alpha))),0),simu.M[ite])
simu.Y[ite]=max(par.H*((par.upsillon*simu.lw[ite]+par.Upsillon*simu.lo[ite])**par.b)*((par.varepsilon*simu.K[ite])**(1-par.b)), 1.0e-8)
simu.p[ite]=(simu.w[ite]+par.g)/simu.Y[ite]
simu.l[ite]=(par.Z*par.phi2*simu.I[ite]*(1-simu.R[ite]-simu.Q[ite])/(par.alpha*par.varsigma))**(1/(par.alpha-1))*simu.Y[ite]
if simu.l[ite] > 1:
simu.l[ite]=1
if simu.l[ite] < 0:
simu.l[ite] = 0
print(4)
simu.gamma2[ite]=(np.array(par.sigma + par.t*par.tests/(1 + simu.I[ite]*par.rho)**par.mu))
simu.gamma3[ite]=(np.array(par.gamma1 * (1+ par.kappa1/(1+simu.Q[ite]**(1/par.kappa2)))))
simu.pi[ite]=simu.Y[ite]*simu.p[ite] -simu.K[ite]*par.delta -(simu.lw[ite]+simu.Q[ite])*par.W - simu.lo[ite]*par.G*par.W - par.xi1*simu.I[ite]**2 - par.xi2*par.d*simu.R[ite]
if par.alt==1:
if simu.pi[ite] < - (simu.lw[ite]+simu.Q[ite])*par.W - simu.lo[ite]*par.G*par.W:
simu.pi[ite] = - (simu.lw[ite]+simu.Q[ite])*par.W - simu.lo[ite]*par.G*par.W
simu.Y[ite] = 0
simu.K[ite] = 0
simu.p[ite] = 0
simu.l[ite] = 0
simu.s[ite] = 0
simu.wi[ite] =0
simu.util[ite]=(par.varsigma*simu.l[ite]**par.alpha*simu.Y[ite]**(1-par.alpha)+simu.w[ite]+par.g-simu.p[ite]*simu.Y[ite]-par.Z*par.phi2*simu.I[ite]*simu.l[ite]*(1-simu.R[ite]-simu.Q[ite])- par.Z*par.phi3*simu.I[ite]*(1-simu.R[ite]-simu.Q[ite]))
simu.c[ite]=simu.w[ite]+par.g-simu.p[ite]*simu.Y[ite]
simu.Pos[ite]=(par.t*par.tests/(1 + simu.I[ite]*par.rho)**par.mu)*simu.I[ite]/(par.tests)*100
simu.I[ite+1]=(max(min((1-par.gamma1-simu.gamma2[ite])*simu.I[ite] + par.phi1*simu.s[ite]*simu.wi[ite] + par.phi2*simu.S[ite]*simu.I[ite]*simu.l[ite]*simu.l[ite] + par.phi3*simu.S[ite]*simu.I[ite],1),1.0e-9))
simu.Q[ite+1]=(max(min((1- simu.gamma3[ite])*simu.Q[ite] + simu.gamma2[ite]*simu.I[ite],1),1.0e-9))
simu.R[ite+1]=(max(min(simu.R[ite] + par.gamma1*simu.I[ite] + simu.gamma3[ite]*simu.Q[ite],1),1.0e-9))
simu.M[ite+1]=max((simu.M[ite]+simu.pi[ite])*par.epsilon,1)
if par.cash ==1 and ite==120:
simu.M[ite+1]=max((simu.M[ite]+simu.pi[ite])*par.epsilon,1)+1000
ite+=1
simu.grid = np.linspace(0,ite,ite)
simu.I = simu.I[0:ite]
simu.Q = simu.Q[0:ite]
simu.R = simu.R[0:ite]
simu.M = simu.M[0:ite]
simu.GDP = simu.p*simu.Y
return(simu)
def simu_wl(par, sol):
class simu: pass
simu.S=np.zeros([par.simN])
simu.lo=np.zeros([par.simN])
simu.s=np.zeros([par.simN])
simu.wi=np.zeros([par.simN])
simu.Y=np.zeros([par.simN])
simu.l=np.zeros([par.simN])
simu.p=np.zeros([par.simN])
simu.gamma2=np.zeros([par.simN])
simu.gamma3=np.zeros([par.simN])
simu.pi=np.zeros([par.simN])
simu.util=np.zeros([par.simN])
simu.K=np.zeros([par.simN])
simu.w=np.zeros([par.simN])
simu.c=np.zeros([par.simN])
simu.Pos=np.zeros([par.simN])
simu.I=np.zeros([par.simN+1])
simu.Q=np.zeros([par.simN+1])
simu.R=np.zeros([par.simN+1])
simu.M=np.zeros([par.simN+1])
simu.I[0]=(par.I_ini)
simu.Q[0]=(par.Q_ini)
simu.R[0]=(par.R_ini)
simu.M[0]=(par.M_ini)
simu.lw = np.zeros([par.simN])
ite=0
points = (par.grid_I, par.grid_Q, par.grid_R, par.grid_M)
shutdown=0
while ite < par.simN:
#Start of simulation.
#point=np.asarray([simu.I[ite], simu.Q[ite], simu.R[ite], simu.M[ite]])
simu.lw[ite]=interpolate.interpn(points, sol.lw, ([simu.I[ite], simu.Q[ite], simu.R[ite], simu.M[ite]]), method='linear', bounds_error=False, fill_value=None)
simu.lw[ite]=min(simu.lw[ite], 1-simu.Q[ite]-simu.R[ite]*par.d)
if simu.I[ite] > 0.02:
shutdown = 1
if simu.I[ite] < 0.009:
shutdown = 0
if shutdown ==1:
simu.lw[ite]=min(min(simu.lw[ite], 1-simu.Q[ite]-simu.R[ite]*par.d),0.25)
simu.S[ite]=(1-simu.I[ite]-simu.Q[ite]-simu.R[ite])
simu.lo[ite]=(1 - simu.lw[ite] - simu.Q[ite] - par.D*simu.R[ite])
simu.s[ite]=(max((simu.lw[ite]-(1-par.D)*simu.R[ite])*(1-simu.I[ite]/(simu.S[ite]+simu.I[ite])),0))
simu.wi[ite]=(max((simu.lw[ite]-(1-par.D)*simu.R[ite])*(simu.I[ite]/(simu.S[ite]+simu.I[ite])),0))
simu.w[ite]=par.W
p_prov = (1-par.alpha)*par.varsigma*((par.Z*par.phi2*simu.I[ite]*max(1-simu.R[ite]-simu.Q[ite], 0.01)/(par.alpha*par.varsigma))**(1/(par.alpha-1)))**par.alpha
simu.K[ite]=min(max((par.delta/(par.H*(1-par.b)*par.varepsilon**(1-par.b)*(par.upsillon*simu.lw[ite]+par.Upsillon*simu.lo[ite])**(par.b))*p_prov)**(1/-par.b),1e-9),simu.M[ite])
simu.Y[ite]=max(par.H*((par.upsillon*simu.lw[ite]+par.Upsillon*simu.lo[ite])**par.b)*((par.varepsilon*simu.K[ite])**(1-par.b)), 1.0e-8)
simu.l[ite]=(par.Z*par.phi2*simu.I[ite]*(1-simu.R[ite]-simu.Q[ite])/(par.alpha*par.varsigma))**(1/(par.alpha-1))*simu.Y[ite]
if simu.l[ite] > 1:
simu.l[ite]=1
if simu.l[ite] < 0:
simu.l[ite] = 0
simu.p[ite]=(1-par.alpha)*par.varsigma*simu.l[ite]**(par.alpha) * simu.Y[ite]**(-par.alpha)
#print(simu.Y[ite])
if simu.l[ite] < 0:
simu.l[ite]=0
simu.p[ite] = 0
simu.K[ite] = 0
simu.Y[ite] = 0
elif simu.l[ite]>=1:
if simu.p[ite]*simu.Y[ite]>simu.w[ite]+par.g:
simu.K[ite]=min(max(((simu.w[ite]+par.g)/((1-par.alpha)*par.varsigma*par.H*((par.upsillon*simu.lw[ite]+par.Upsillon*simu.lo[ite])**(par.b-par.alpha*par.b)*par.varepsilon**((1-par.b)*(1-par.alpha)))))**(1/((1-par.b)*(1-par.alpha))),0),simu.M[ite])
#print(simu.K[ite]
simu.Y[ite]=max(par.H*((par.upsillon*simu.lw[ite]+par.Upsillon*simu.lo[ite])**par.b)*((par.varepsilon*simu.K[ite])**(1-par.b)), 1.0e-8)
#print(simu.Y[ite])
simu.p[ite]=(simu.w[ite]+par.g)/simu.Y[ite]
simu.l[ite]=(par.Z*par.phi2*simu.I[ite]*(1-simu.R[ite]-simu.Q[ite])/(par.alpha*par.varsigma))**(1/(par.alpha-1))*simu.Y[ite]
if simu.l[ite] > 1:
simu.l[ite]=1
if simu.l[ite] < 0:
simu.l[ite] = 0
else:
simu.K[ite]=min(max((par.delta/(par.H**(1-par.alpha)*(1-par.b)*par.varepsilon**(1+par.alpha*par.b-par.b-par.alpha)*(par.upsillon*simu.lw[ite]+par.Upsillon*simu.lo[ite])**(par.b-par.alpha*par.b)*(1+par.alpha*par.alpha-2*par.alpha)))**(1/(par.alpha*par.b-par.b-par.alpha)),0),simu.M[ite])
simu.Y[ite]=max(par.H*((par.upsillon*simu.lw[ite]+par.Upsillon*simu.lo[ite])**par.b)*((par.varepsilon*simu.K[ite])**(1-par.b)), 1.0e-8)
simu.p[ite]=((1-par.alpha)*par.varsigma)/(simu.Y[ite]**par.alpha)
simu.l[ite]=(par.Z*par.phi2*simu.I[ite]*(1-simu.R[ite]-simu.Q[ite])/(par.alpha*par.varsigma))**(1/(par.alpha-1))*simu.Y[ite]
if simu.p[ite]*simu.Y[ite]>simu.w[ite]+par.g:
simu.K[ite]=min(max(((simu.w[ite]+par.g)/((1-par.alpha)*par.varsigma*par.H*((par.upsillon*simu.lw[ite]+par.Upsillon*simu.lo[ite])**(par.b-par.alpha*par.b)*par.varepsilon**((1-par.b)*(1-par.alpha)))))**(1/((1-par.b)*(1-par.alpha))),0),simu.M[ite])
simu.Y[ite]=max(par.H*((par.upsillon*simu.lw[ite]+par.Upsillon*simu.lo[ite])**par.b)*((par.varepsilon*simu.K[ite])**(1-par.b)), 1.0e-8)
simu.p[ite]=(simu.w[ite]+par.g)/simu.Y[ite]
simu.l[ite]=(par.Z*par.phi2*simu.I[ite]*(1-simu.R[ite]-simu.Q[ite])/(par.alpha*par.varsigma))**(1/(par.alpha-1))*simu.Y[ite]
if simu.l[ite] > 1:
simu.l[ite]=1
if simu.l[ite] < 0:
simu.l[ite]=0
else:
if simu.p[ite]*simu.Y[ite]>simu.w[ite]+par.g:
simu.K[ite]=min(max(((simu.w[ite]+par.g)/((1-par.alpha)*par.varsigma*par.H*((par.upsillon*simu.lw[ite]+par.Upsillon*simu.lo[ite])**(par.b-par.alpha*par.b)*par.varepsilon**((1-par.b)*(1-par.alpha)))))**(1/((1-par.b)*(1-par.alpha))),0),simu.M[ite])
simu.Y[ite]=max(par.H*((par.upsillon*simu.lw[ite]+par.Upsillon*simu.lo[ite])**par.b)*((par.varepsilon*simu.K[ite])**(1-par.b)), 1.0e-8)
simu.p[ite]=(simu.w[ite]+par.g)/simu.Y[ite]
simu.l[ite]=(par.Z*par.phi2*simu.I[ite]*(1-simu.R[ite]-simu.Q[ite])/(par.alpha*par.varsigma))**(1/(par.alpha-1))*simu.Y[ite]
if simu.l[ite] > 1:
simu.l[ite]=1
if simu.l[ite] < 0:
simu.l[ite] = 0
simu.gamma2[ite]=(np.array(par.sigma + par.t*par.tests/(1 + simu.I[ite]*par.rho)**par.mu))
simu.gamma3[ite]=(np.array(par.gamma1 * (1+ par.kappa1/(1+simu.Q[ite]**(1/par.kappa2)))))
simu.pi[ite]=simu.Y[ite]*simu.p[ite] -simu.K[ite]*par.delta -(simu.lw[ite]+simu.Q[ite])*par.W - simu.lo[ite]*par.G*par.W - par.xi1*simu.I[ite]**2 - par.xi2*par.d*simu.R[ite]
simu.util[ite]=(par.varsigma*simu.l[ite]**par.alpha*simu.Y[ite]**(1-par.alpha)+simu.w[ite]+par.g-simu.p[ite]*simu.Y[ite]-par.Z*par.phi2*simu.I[ite]*simu.l[ite]*(1-simu.R[ite]-simu.Q[ite])- par.Z*par.phi3*simu.I[ite]*(1-simu.R[ite]-simu.Q[ite]))
simu.c[ite]=simu.w[ite]+par.g-simu.p[ite]*simu.Y[ite]
simu.Pos[ite]=(par.t*par.tests/(1 + simu.I[ite]*par.rho)**par.mu)*simu.I[ite]/(par.tests)*100
simu.I[ite+1]=(max(min((1-par.gamma1-simu.gamma2[ite])*simu.I[ite] + par.phi1*simu.s[ite]*simu.wi[ite] + par.phi2*simu.S[ite]*simu.I[ite]*simu.l[ite]*simu.l[ite] + par.phi3*simu.S[ite]*simu.I[ite],1),1.0e-9))
simu.Q[ite+1]=(max(min((1- simu.gamma3[ite])*simu.Q[ite] + simu.gamma2[ite]*simu.I[ite],1),1.0e-9))
simu.R[ite+1]=(max(min(simu.R[ite] + par.gamma1*simu.I[ite] + simu.gamma3[ite]*simu.Q[ite],1),1.0e-9))
simu.M[ite+1]=max((simu.M[ite]+simu.pi[ite])*par.epsilon,1)
ite+=1
simu.grid = np.linspace(0,ite,ite)
simu.I = simu.I[0:ite]
simu.Q = simu.Q[0:ite]
simu.R = simu.R[0:ite]
simu.M = simu.M[0:ite]
simu.GDP = simu.p*simu.Y
return(simu)
def simu_sl(par, sol):
class simu: pass
simu.S=np.zeros([par.simN])
simu.lo=np.zeros([par.simN])
simu.s=np.zeros([par.simN])
simu.wi=np.zeros([par.simN])
simu.Y=np.zeros([par.simN])
simu.l=np.zeros([par.simN])
simu.p=np.zeros([par.simN])
simu.gamma2=np.zeros([par.simN])
simu.gamma3=np.zeros([par.simN])
simu.pi=np.zeros([par.simN])
simu.util=np.zeros([par.simN])
simu.K=np.zeros([par.simN])
simu.w=np.zeros([par.simN])
simu.c=np.zeros([par.simN])
simu.Pos=np.zeros([par.simN])
simu.I=np.zeros([par.simN+1])
simu.Q=np.zeros([par.simN+1])
simu.R=np.zeros([par.simN+1])
simu.M=np.zeros([par.simN+1])
simu.I[0]=(par.I_ini)
simu.Q[0]=(par.Q_ini)
simu.R[0]=(par.R_ini)
simu.M[0]=(par.M_ini)
simu.lw = np.zeros([par.simN])
ite=0
points = (par.grid_I, par.grid_Q, par.grid_R, par.grid_M)
shutdown=0
while ite < par.simN:
#Start of simulation.
#point=np.asarray([simu.I[ite], simu.Q[ite], simu.R[ite], simu.M[ite]])
simu.lw[ite]=interpolate.interpn(points, sol.lw, ([simu.I[ite], simu.Q[ite], simu.R[ite], simu.M[ite]]), method='linear', bounds_error=False, fill_value=None)
simu.lw[ite]=min(simu.lw[ite], 1-simu.Q[ite]-simu.R[ite]*par.d)
simu.S[ite]=(1-simu.I[ite]-simu.Q[ite]-simu.R[ite])
if simu.I[ite] > 0.02:
shutdown = 1
if simu.I[ite] < 0.009:
shutdown = 0
simu.lo[ite]=(1 - simu.lw[ite] - simu.Q[ite] - par.D*simu.R[ite])
simu.s[ite]=(max((simu.lw[ite]-(1-par.D)*simu.R[ite])*(1-simu.I[ite]/(simu.S[ite]+simu.I[ite])),0))
simu.wi[ite]=(max((simu.lw[ite]-(1-par.D)*simu.R[ite])*(simu.I[ite]/(simu.S[ite]+simu.I[ite])),0))
simu.w[ite]=par.W
p_prov = (1-par.alpha)*par.varsigma*(par.Z*par.phi2*simu.I[ite]*(max(1-simu.R[ite]-simu.Q[ite], 0.01)/(par.alpha*par.varsigma))**(1/(par.alpha-1)))**par.alpha
simu.K[ite]=min(max((par.delta/(par.H*(1-par.b)*par.varepsilon**(1-par.b)*(par.upsillon*simu.lw[ite]+par.Upsillon*simu.lo[ite])**(par.b))*p_prov)**(1/-par.b),1e-9),simu.M[ite])
simu.Y[ite]=max(par.H*((par.upsillon*simu.lw[ite]+par.Upsillon*simu.lo[ite])**par.b)*((par.varepsilon*simu.K[ite])**(1-par.b)), 1.0e-8)
simu.l[ite]=(par.Z*par.phi2*simu.I[ite]*(1-simu.R[ite]-simu.Q[ite])/(par.alpha*par.varsigma))**(1/(par.alpha-1))*simu.Y[ite]
if shutdown==1:
simu.l[ite]=min(simu.l[ite],0.1)
if simu.l[ite] > 1:
simu.l[ite]=1
if simu.l[ite] < 0:
simu.l[ite] = 0
simu.p[ite]=(1-par.alpha)*par.varsigma*simu.l[ite]**(par.alpha) * simu.Y[ite]**(-par.alpha)
#print(simu.Y[ite])
if simu.l[ite] < 0:
simu.l[ite]=0
simu.p[ite] = 0
simu.K[ite] = 0
simu.Y[ite] = 0
elif simu.l[ite]>=1:
if simu.p[ite]*simu.Y[ite]>simu.w[ite]+par.g:
simu.K[ite]=min(max(((simu.w[ite]+par.g)/((1-par.alpha)*par.varsigma*par.H*((par.upsillon*simu.lw[ite]+par.Upsillon*simu.lo[ite])**(par.b-par.alpha*par.b)*par.varepsilon**((1-par.b)*(1-par.alpha)))))**(1/((1-par.b)*(1-par.alpha))),0),simu.M[ite])
#print(simu.K[ite]
simu.Y[ite]=max(par.H*((par.upsillon*simu.lw[ite]+par.Upsillon*simu.lo[ite])**par.b)*((par.varepsilon*simu.K[ite])**(1-par.b)), 1.0e-8)
#print(simu.Y[ite])
simu.p[ite]=(simu.w[ite]+par.g)/simu.Y[ite]
simu.l[ite]=(par.Z*par.phi2*simu.I[ite]*(1-simu.R[ite]-simu.Q[ite])/(par.alpha*par.varsigma))**(1/(par.alpha-1))*simu.Y[ite]
if simu.l[ite] > 1:
simu.l[ite]=1
if simu.l[ite] < 0:
simu.l[ite] = 0
if shutdown==1:
simu.l[ite]=min(simu.l[ite],0.1)
else:
simu.K[ite]=min(max((par.delta/(par.H**(1-par.alpha)*(1-par.b)*par.varepsilon**(1+par.alpha*par.b-par.b-par.alpha)*(par.upsillon*simu.lw[ite]+par.Upsillon*simu.lo[ite])**(par.b-par.alpha*par.b)*(1+par.alpha*par.alpha-2*par.alpha)))**(1/(par.alpha*par.b-par.b-par.alpha)),0),simu.M[ite])
simu.Y[ite]=max(par.H*((par.upsillon*simu.lw[ite]+par.Upsillon*simu.lo[ite])**par.b)*((par.varepsilon*simu.K[ite])**(1-par.b)), 1.0e-8)
simu.p[ite]=((1-par.alpha)*par.varsigma)/(simu.Y[ite]**par.alpha)
simu.l[ite]=(par.Z*par.phi2*simu.I[ite]*(1-simu.R[ite]-simu.Q[ite])/(par.alpha*par.varsigma))**(1/(par.alpha-1))*simu.Y[ite]
if simu.p[ite]*simu.Y[ite]>simu.w[ite]+par.g:
simu.K[ite]=min(max(((simu.w[ite]+par.g)/((1-par.alpha)*par.varsigma*par.H*((par.upsillon*simu.lw[ite]+par.Upsillon*simu.lo[ite])**(par.b-par.alpha*par.b)*par.varepsilon**((1-par.b)*(1-par.alpha)))))**(1/((1-par.b)*(1-par.alpha))),0),simu.M[ite])
simu.Y[ite]=max(par.H*((par.upsillon*simu.lw[ite]+par.Upsillon*simu.lo[ite])**par.b)*((par.varepsilon*simu.K[ite])**(1-par.b)), 1.0e-8)
simu.p[ite]=(simu.w[ite]+par.g)/simu.Y[ite]
simu.l[ite]=(par.Z*par.phi2*simu.I[ite]*(1-simu.R[ite]-simu.Q[ite])/(par.alpha*par.varsigma))**(1/(par.alpha-1))*simu.Y[ite]
if simu.l[ite] > 1:
simu.l[ite]=1
if simu.l[ite] < 0:
simu.l[ite]=0
if shutdown==1:
simu.l[ite]=min(simu.l[ite],0.1)
else:
if simu.p[ite]*simu.Y[ite]>simu.w[ite]+par.g:
simu.K[ite]=min(max(((simu.w[ite]+par.g)/((1-par.alpha)*par.varsigma*par.H*((par.upsillon*simu.lw[ite]+par.Upsillon*simu.lo[ite])**(par.b-par.alpha*par.b)*par.varepsilon**((1-par.b)*(1-par.alpha)))))**(1/((1-par.b)*(1-par.alpha))),0),simu.M[ite])
simu.Y[ite]=max(par.H*((par.upsillon*simu.lw[ite]+par.Upsillon*simu.lo[ite])**par.b)*((par.varepsilon*simu.K[ite])**(1-par.b)), 1.0e-8)
simu.p[ite]=(simu.w[ite]+par.g)/simu.Y[ite]
simu.l[ite]=(par.Z*par.phi2*simu.I[ite]*(1-simu.R[ite]-simu.Q[ite])/(par.alpha*par.varsigma))**(1/(par.alpha-1))*simu.Y[ite]
if simu.l[ite] > 1:
simu.l[ite]=1
if simu.l[ite] < 0:
simu.l[ite] = 0
if shutdown==1:
simu.l[ite]=min(simu.l[ite],0.1)
simu.gamma2[ite]=(np.array(par.sigma + par.t*par.tests/(1 + simu.I[ite]*par.rho)**par.mu))
simu.gamma3[ite]=(np.array(par.gamma1 * (1+ par.kappa1/(1+simu.Q[ite]**(1/par.kappa2)))))
simu.pi[ite]=simu.Y[ite]*simu.p[ite] -simu.K[ite]*par.delta -(simu.lw[ite]+simu.Q[ite])*par.W - simu.lo[ite]*par.G*par.W - par.xi1*simu.I[ite]**2 - par.xi2*par.d*simu.R[ite]
simu.util[ite]=(par.varsigma*simu.l[ite]**par.alpha*simu.Y[ite]**(1-par.alpha)+simu.w[ite]+par.g-simu.p[ite]*simu.Y[ite]-par.Z*par.phi2*simu.I[ite]*simu.l[ite]*(1-simu.R[ite]-simu.Q[ite])- par.Z*par.phi3*simu.I[ite]*(1-simu.R[ite]-simu.Q[ite]))
simu.c[ite]=simu.w[ite]+par.g-simu.p[ite]*simu.Y[ite]
simu.Pos[ite]=(par.t*par.tests/(1 + simu.I[ite]*par.rho)**par.mu)*simu.I[ite]/(par.tests)*100
simu.I[ite+1]=(max(min((1-par.gamma1-simu.gamma2[ite])*simu.I[ite] + par.phi1*simu.s[ite]*simu.wi[ite] + par.phi2*simu.S[ite]*simu.I[ite]*simu.l[ite]*simu.l[ite] + par.phi3*simu.S[ite]*simu.I[ite],1),1.0e-9))
simu.Q[ite+1]=(max(min((1- simu.gamma3[ite])*simu.Q[ite] + simu.gamma2[ite]*simu.I[ite],1),1.0e-9))
simu.R[ite+1]=(max(min(simu.R[ite] + par.gamma1*simu.I[ite] + simu.gamma3[ite]*simu.Q[ite],1),1.0e-9))
simu.M[ite+1]=max((simu.M[ite]+simu.pi[ite])*par.epsilon,1)
ite+=1
simu.grid = np.linspace(0,ite,ite)
simu.I = simu.I[0:ite]
simu.Q = simu.Q[0:ite]
simu.R = simu.R[0:ite]
simu.M = simu.M[0:ite]
simu.GDP = simu.p*simu.Y
return(simu)
def simu_swl(par, sol):
class simu: pass
simu.S=np.zeros([par.simN])
simu.lo=np.zeros([par.simN])
simu.s=np.zeros([par.simN])
simu.wi=np.zeros([par.simN])
simu.Y=np.zeros([par.simN])
simu.l=np.zeros([par.simN])
simu.p=np.zeros([par.simN])
simu.gamma2=np.zeros([par.simN])
simu.gamma3=np.zeros([par.simN])
simu.pi=np.zeros([par.simN])
simu.util=np.zeros([par.simN])
simu.K=np.zeros([par.simN])
simu.w= | np.zeros([par.simN]) | numpy.zeros |
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 12 15:29:26 2020
@author: 92031
"""
import numpy as np
import matplotlib.pyplot as plt
file = "E:\\UserData\\Agkd\\Course\\PatternRecognition\\data.txt"
SampleNum = 10
class MSE():
def __init__(self, yita = 0.01, b0=0.3):
self.w = np.ones((3,4), dtype=float)
def read(self,file,trainrt=0.8):
with open(file) as f:
lines = f.readlines()
lines = [line.strip().split(' ') for line in lines]
x1 = np.array(lines[0:SampleNum],dtype=float)
x2 = np.array(lines[SampleNum:2*SampleNum],dtype=float)
x3 = np.array(lines[2*SampleNum:3*SampleNum],dtype=float)
x4 = np.array(lines[3*SampleNum:],dtype=float)
idx = int(trainrt*SampleNum)
self.x_train = np.transpose(np.concatenate((x1[:idx,:],
x2[:idx,:],
x3[:idx,:],
x4[:idx,:]),axis=0))
self.x_test = np.transpose(np.concatenate((x1[idx:,:],
x2[idx:,:],
x3[idx:,:],
x4[idx:,:]),axis=0))
self.x_train[2,:] = 1
self.x_test[2,:] = 1
self.y_train = np.zeros((4,4*8))
self.y_test = | np.zeros((4,4*2)) | numpy.zeros |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
interpret_qspr_models.py
This script contains interpretability of the qspr models. We will use the Shapley
values as a method of quantifying the feature importance.
Created on: Tue Jul 13 10:32:42 2021
Author: <NAME> (<EMAIL>)
Copyright Schrodinger, LLC. All rights reserved.
# Installing Shapley
$SCHRODINGER/internal/bin/python3 -m pip install shapley <-- doesn't get the right shap'
$SCHRODINGER/internal/bin/python3 -m pip install shap
Reference:
For Random forest models
https://www.kaggle.com/vikumsw/explaining-random-forest-model-with-shapely-values
For general idea:
https://christophm.github.io/interpretable-ml-book/shapley.html
"""
# Importing module
import os
import shap
import pandas as pd
import numpy as np
from scipy.stats import pearsonr
import matplotlib.pyplot as plt
import copy
# From generate qspr models
from .qspr_models import main_generate_qspr_models_CV
# Importing plot tools
from . import plot_tools
# Setting defaults
plot_tools.set_mpl_defaults()
# Setting random seed
np.random.seed(0)
# Function to remove coefficients and delete X training dataframes for lasso
def remove_lasso_zero_coef(lasso_model,
X_train_df
):
"""
This function removes coefficients from lasso that has zero values. This
is intended to speed up LASSO analysis by removing non-important features.
By removing coefficients and training columns, the performance of the
models could dramatically improve! The times are:
0.12 seconds for all nonzero coefficients removed
3 minutes and 7 seconds for keeping all features.
Hence, removing features result in 6.41% of the total time, significantly
speeding up Shapley calculations on lasso models.
Parameters
----------
lasso_model: obj
lasso object from scipy
X_train_df: dataframe
dataframe containing training data, which was used to train the
lasso model.
Returns
-------
lasso_model_nonzero: obj
lasso model with the coefficients of zeros removed
X_train_nonzero: dataframe
X train dataframe with columns of coefficients removed.
"""
# Testing the removal of coefficients
idx_nonzero_coef = np.where(lasso_model.coef_ != 0)[0]
# Getting new X train
X_train_nonzero = X_train_df.iloc[:,idx_nonzero_coef]
# Changing the coefficients
lasso_model_nonzero = copy.deepcopy(lasso_model)
lasso_model_nonzero.coef_ = lasso_model.coef_[idx_nonzero_coef]
return lasso_model_nonzero, X_train_nonzero
# Function to convert shapley to mean abs shapley
def compute_mean_abs_shap(shap_values):
"""
This function computes the mean absolute values of the Shapley. It
tells you the average impact on the output magnitude.
Parameters
-------
shap_values: np.array
shapley values with the same length as total instances and features.
Returns
-------
mean_abs_shap: np.array
mean absolute shapley values: mean | SHAP value |
This tells you the average impact of model on output magnitude.
"""
# Getting shap values
mean_abs_shap = np.abs(shap_values).mean(axis=0)
return mean_abs_shap
# Function to get explainer and shap values
def compute_shap_values(model,
X_train_df,
speed_up = True):
"""
This function computes the shapley values for a model. It will search for the
model type and appropriate method will be done.
Parameters
-------
model: [obj]
random forest model
X_train_df: [dataframe]
dataframe of the training data with the columns of descriptors
speed_up: logical, optional
True if you want to speed up the calculation by simplifying features.
For instance, LASSO models have zero coefficients. We could remove
these coefficients by togging this on. Default value is True.
Returns
-------
explainer: obj
explainer object
shap_values: np.array
array of shapley values
X_training_to_use: dataframe
X training used
"""
# Defining model type
model_type = str(type(model))
# Defining available models
available_models = ['linear_model', 'RandomForestRegressor']
# Defining default model and X train to use
model_to_use = model
X_training_to_use = X_train_df
# For LASSO model
if 'linear_model' in model_type:
if speed_up is True:
# Simplying model by removing coef
model_to_use, X_training_to_use = remove_lasso_zero_coef(lasso_model = model,
X_train_df = X_train_df,
)
explainer = shap.Explainer(model_to_use.predict, X_training_to_use)
shap_values = explainer(X_training_to_use).values
elif 'RandomForestRegressor' in model_type:
# Editing lgbm to resolve issues
# Error is noted in: https://github.com/slundberg/shap/issues/1042
# if 'LGBMRegressor' in model_type:
# model_to_use.booster_.params['objective'] = 'regression'
explainer = shap.TreeExplainer(model_to_use)
shap_values = explainer.shap_values(X_training_to_use)
else:
try:
explainer = shap.Explainer(model_to_use.predict, X_training_to_use)
shap_values = explainer(X_training_to_use).values
except Exception:
pass
print("Error! Model type not found: %s"%(model_type))
print("Available models for shapley values: %s"%(', '.join(available_models)))
return explainer, shap_values, X_training_to_use
# Class function to analyze rf models
class interpret_models:
"""
This function analyzes random forest models.
Parameters
-------
model: [obj]
random forest model
X_train_df: [dataframe]
dataframe of the training data with the columns of descriptors
speed_up: logical, optional
True if you want to speed up the calculation by simplifying features.
For instance, LASSO models have zero coefficients. We could remove
these coefficients by togging this on. Default value is True.
Returns
-------
self.mean_abs_shap_df: dataframe
contains the mean absolute shapley values after sorting by ascending values
self.correlation_shap_to_descriptors: dataframe
contains pearon's r correlation between Shapley values and descriptors.
It also contains the sign.
If Pearon's r is N/A (which is true if your descriptor space do not vary),
then we will output a negative sign for that one.
"""
def __init__(self,
model,
X_train_df,
speed_up = True):
# Storing inputs
self.model = model
self.X_train_df_orig = X_train_df
self.speed_up = speed_up
# Getting shapley values
self.explainer, self.shap_values, self.X_train_df = compute_shap_values(model = self.model,
X_train_df = self.X_train_df_orig,
speed_up = speed_up)
# Getting mean abs shapley values
mean_abs_shap = compute_mean_abs_shap(shap_values = self.shap_values)
# Getting dataframe
self.mean_abs_shap_df = pd.DataFrame( np.array([self.X_train_df.columns, mean_abs_shap ]).T, columns = ['Feature', 'Mean Shap'] )
# Turning off the sorting for now
# # Sorting dataframe
# self.mean_abs_shap_df = mean_abs_shap_df.sort_values(by = 'Mean Shap', ascending = False).reset_index(drop = True)
#
# Getting correlation of shap to descriptors
self.correlation_shap_to_descriptors = compute_pearsonsr_btn_shap_and_descriptors(X_train_df = self.X_train_df,
shap_values = self.shap_values
)
return
# Generating summary plot
def plot_summary(self,
plot_type="bar",):
"""
This function plots the summary plot for the shapley outputs.
"""
# Adding summary plot
shap.summary_plot(self.shap_values,
self.X_train_df,
plot_type=plot_type,
show=False)
fig = plt.gcf()
return fig
# Getting shap versus descriptors
def plot_shap_vs_descriptor(self,
descriptor_name = 'vdw surface area/Ang.^2',):
"""
This function plots the shapley values versus descriptor space.
It will tell you what the correlatoin is between these two.
Parameters
----------
descriptor_name : str, optional
name of the descriptor to plot. The default is 'vdw surface area/Ang.^2'.
For more, use self.X_train.columns
Returns
-------
fig : obj
figure object.
ax : obj
axis object.
"""
# Plotting the correlations
fig, ax = plot_shap_vs_descriptor(shap_values = self.shap_values,
descriptor_name = descriptor_name,
X_train_df = self.X_train_df,
corr_df = self.correlation_shap_to_descriptors
)
return fig, ax
# Function to convert Pearson's R to signs
def add_pearsons_r_sign_to_df(correlation_shap_to_descriptors):
"""
This function adds the sign to dataframe using the Pearson's r correlation.
If values are >= 0, then we give it a '+' sign.
Otherwise, it gets a negative sign.
Parameters
----------
correlation_shap_to_descriptors: dataframe
dataframe of pearson's r correlation versus feature.
Returns
-------
correlation_shap_to_descriptors: dataframe
updated dataframe with the sign column
"""
# Getting the sign
pears_r = correlation_shap_to_descriptors['Pearsons_r_to_SHAP'].values
# Seeing if any is positive or nan
correlation_shap_to_descriptors['sign'] = np.where(pears_r > 0, 'positive', 'negative')
return correlation_shap_to_descriptors
# Function to get snap correlatoin to descriptors
def compute_pearsonsr_btn_shap_and_descriptors(X_train_df,
shap_values
):
"""
Parameters
----------
X_train_df : dataframe
traikning data
shap_values : np.array
shapley values with the same shape as the training dataframe.
Returns
-------
correlation_shap_to_descriptors: dataframe
Pearson's correlation between Shapley and feature space.
"""
# Defining storage for it
correlation_shap_to_descriptors = []
# Getting sign using shapley values
for idx, col_name in enumerate(X_train_df.columns):
# Getting the Shapley values
shap_v = shap_values[:, idx]
# Getting the descriptor
descriptor_values = X_train_df.values[:, idx]
# Getting Pearson's r correlaton
pear_r = pearsonr(shap_v, descriptor_values)[0]
# Storing
correlation_shap_to_descriptors.append({'Feature': col_name,
'Pearsons_r_to_SHAP': pear_r})
# Creating correlation
correlation_shap_to_descriptors = pd.DataFrame(correlation_shap_to_descriptors)
# Adding sign
add_pearsons_r_sign_to_df(correlation_shap_to_descriptors)
return correlation_shap_to_descriptors
# Function to plot correlatoin for a descriptor
def plot_shap_vs_descriptor(shap_values,
X_train_df,
descriptor_name = 'vdw surface area/Ang.^2',
corr_df = None,
fig_size_cm = plot_tools.FIGURE_SIZES_DICT_CM['1_col']
):
"""
This function plots the shapley values versus the feature of interest.
It will show how the feature impacts the Shapley values.
Parameters
----------
shap_values : np.array
Shapley values with the array size n_instances and n_features.
X_train_df : np.array
Raw X_train data with the same shape and size as shap_values
descriptor_name: str
name of the descriptor that you want to show
corr_df : dataframe, optional
dataframe containing columns of 'Feature' and 'Pearsons_r_to_SHAP'.
The default value for this is None, which will then generate Pearon's r
correlation coefficient by itself.
fig_size_cm : tuple, optional
figure size in cm. By default, we take a 1-col example.
Returns
-------
None.
"""
# Creating figure
fig, ax = plot_tools.create_fig_based_on_cm(fig_size_cm = fig_size_cm)
# Adding labels
ax.set_xlabel(descriptor_name)
ax.set_ylabel("Shapley values")
# Getting index
index_of_feature = np.where(X_train_df.columns == descriptor_name)
# Defining x and y
x = X_train_df[descriptor_name].values
y = shap_values[:,index_of_feature]
# Getting pearsons r
if corr_df is None:
pear_r = pearsonr(y,x)[0]
else:
pear_r = corr_df[corr_df['Feature'] == descriptor_name]['Pearsons_r_to_SHAP']
# Adding box text
box_text = "Pearson's $r$: %.2f"%(pear_r)
# Plotting
ax.scatter(x,y,color = 'k', label = box_text)
ax.legend()
# =============================================================================
# # Adding text to axis
# ax.text(0.95, 0.05, box_text,
# horizontalalignment='right',
# verticalalignment='bottom',
# transform = ax.transAxes,
# bbox=dict(facecolor='none', edgecolor= 'none', pad=5.0))
# =============================================================================
return fig, ax
# Function to generate multiple interpretations
def interpret_multiple_models(model_list,
X_train_df_list,
speed_up = True,
):
"""
This function interprets multiple models and outputs them into a list.
Parameters
----------
model_list: list
list of model interpretations
X_train_df_list: list
list of X training dataframes
speed_up: logical, optional
True if you want to speed up the calculation by simplifying features.
For instance, LASSO models have zero coefficients. We could remove
these coefficients by togging this on. Default value is True.
Returns
-------
store_dfs: dataframe
dataframe storing all information of mean abs shap and sign dataframes.
"""
# Storing each of them
store_dfs = []
# Looping through the models
for idx, model in enumerate(model_list):
# Getting dataframe
X_train_df = X_train_df_list[idx]
# Getting interpret rf
interpretation = interpret_models(model = model,
X_train_df = X_train_df,
speed_up = speed_up)
# Storing outputs
output_dict = {
'mean_abs_shap_df': interpretation.mean_abs_shap_df,
'sign_df': interpretation.correlation_shap_to_descriptors}
# Appending
store_dfs.append(output_dict)
return store_dfs
# Function to rapidly generate X_train_df list
def generate_X_train_df_list(descriptor_dict_output):
"""
This function rapidly generates the training dataframe for a given fold.
Parameters
----------
descriptor_dict_output: dict
dictionary of the model that you are looking at.
Returns
-------
X_train_df_list: list
list of training dataframes
"""
# Getting X train list
X_train_df_list = []
# Getting Dataframe
X_df = descriptor_dict_output['X_df']
# Going through the index
for idx in range(len(descriptor_dict_output['fold_list'])):
X_train = descriptor_dict_output['fold_list'][idx]['X_train']
X_train_df = pd.DataFrame(X_train, columns = X_df.columns)
# Storing
X_train_df_list.append(X_train_df)
return X_train_df_list
# Function to combine all dataframes
def combine_shap_dfs(store_dfs):
"""
This function combines multiple dataframes, such as the ones from the
shapley dataframe. It will iteratively loop through each dataframe and
store any new information other than the default column of "feature".
Parameters
----------
store_dfs: list
list of dataframes containing shapley parameters
Returns
-------
combined_df_dict: [dict]
dictionary containing combined information from the dataframes.
"""
# Defining default feature column
default_col = 'Feature'
merged_df_args = dict(left_on = default_col, right_on = default_col, how = 'inner')
# Defining combined dict
combined_df_dict = {}
# Loop for each type
for each_df_type in store_dfs[0].keys():
# Getting list of dataframes
list_of_dfs = [store_dfs[idx][each_df_type] for idx in range(len(store_dfs))]
# Generating single dataframe
for df_idx, df_info in enumerate(list_of_dfs):
# Relabelling the dataframe
suffix_str = '_%d'%(df_idx)
df_info = df_info.add_suffix(suffix_str)
df_info = df_info.rename(index=str, columns={'%s%s'%(default_col, suffix_str):default_col})
# If first iteration, we say that is the merged one.
if df_idx == 0:
merged_df = df_info.copy()
else:
# Begin attaching dataframes on.
# Start by adding suffix and renaming feature
merged_df = merged_df.merge(df_info, **merged_df_args) # suffixes = ('_1','_2'),
# After merging, store it
combined_df_dict[each_df_type] = merged_df.copy()
return combined_df_dict
# Function to summarize combined dataframes
def summarize_shap_df(combined_df_dict):
"""
This function will compute the mean and standard deviation of Shapley
values. In addition, it will take the sign and convert it to a final
value. These will be stored to a single dataframe that contains
the model results.
Parameters
----------
combined_shap_dfs: [dict]
dictionary containing multiple dataframes for Shapley values.
Returns
-------
results_df: [dataframe]
contains the results for average Shapley + std Shapley.
"""
# Defining default feature column
default_col = 'Feature'
merged_df_args = dict(left_on = default_col, right_on = default_col, how = 'inner')
# Getting merged dataframe
combined_df_merged = combined_df_dict['mean_abs_shap_df'].merge(combined_df_dict['sign_df'], **merged_df_args)
# Getting only mean cols
mean_cols = [each_col for each_col in combined_df_merged.columns if each_col.startswith("Mean Shap")]
# Getting only sign cols
sign_cols = [each_col for each_col in combined_df_merged.columns if each_col.startswith("sign_")]
# Generating a Features dataframe
results_df = combined_df_merged[['Feature']].copy()
# Getting mean and std of Shapley values
results_df['Avg_Shap'] = combined_df_merged[mean_cols].mean(axis = 1)
results_df['Std_Shap'] = combined_df_merged[mean_cols].std(axis = 1)
# Getting the sign of the value
results_df['Mode_sign'] = combined_df_merged[sign_cols].mode(axis=1)
# Getting +1 for positive or -1 for negative
sign_array = | np.where(results_df['Mode_sign'] == 'positive', 1, -1) | numpy.where |
try:
print("Loading libraries please stand by...")
import numpy as np
import matplotlib.pyplot as plt
print("Done.")
except KeyboardInterrupt:
print("You dont have nescessary libraries")
class DetectorState():
def __init__(self):
self.__detections = 0
def register_detection(self):
self.__detections += 1
@property
def detections(self):
return self.__detections
def __str__(self):
return "Ball has been detected {} times".format(self.__detections)
class PlotsState():
def __init__(self):
f, ax = plt.subplots(2)
# Arrange plot environment for data
x = np.arange(10000)
y = | np.random.randn(10000) | numpy.random.randn |
#!/usr/bin/env python
from __future__ import print_function, division
import bayesloop as bl
import numpy as np
import sympy.stats as stats
class TestTwoParameterModel:
def test_fit_0hp(self):
# carry out fit (this test is designed to fall back on the fit method of the Study class)
S = bl.HyperStudy()
S.loadData(np.array([1, 2, 3, 4, 5]))
S.setOM(bl.om.Gaussian('mean', bl.cint(0, 6, 20), 'sigma', bl.oint(0, 2, 20), prior=lambda m, s: 1/s**3))
S.setTM(bl.tm.Static())
S.fit()
# test parameter distributions
np.testing.assert_allclose(S.getParameterDistributions('mean', density=False)[1][:, 5],
[0.00707902, 0.00707902, 0.00707902, 0.00707902, 0.00707902],
rtol=1e-05, err_msg='Erroneous posterior distribution values.')
# test parameter mean values
np.testing.assert_allclose(S.getParameterMeanValues('mean'),
[3., 3., 3., 3., 3.],
rtol=1e-05, err_msg='Erroneous posterior mean values.')
# test model evidence value
np.testing.assert_almost_equal(S.logEvidence, -16.1946904707, decimal=5,
err_msg='Erroneous log-evidence value.')
def test_fit_1hp(self):
# carry out fit
S = bl.HyperStudy()
S.loadData(np.array([1, 2, 3, 4, 5]))
S.setOM(bl.om.Gaussian('mean', bl.cint(0, 6, 20), 'sigma', bl.oint(0, 2, 20), prior=lambda m, s: 1/s**3))
S.setTM(bl.tm.GaussianRandomWalk('sigma', bl.cint(0, 0.2, 2), target='mean'))
S.fit()
# test parameter distributions
np.testing.assert_allclose(S.getParameterDistributions('mean', density=False)[1][:, 5],
[0.01042107, 0.00766233, 0.00618352, 0.00554651, 0.00548637],
rtol=1e-05, err_msg='Erroneous posterior distribution values.')
# test parameter mean values
np.testing.assert_allclose(S.getParameterMeanValues('mean'),
[2.88534505, 2.93135361, 3., 3.06864639, 3.11465495],
rtol=1e-05, err_msg='Erroneous posterior mean values.')
# test model evidence value
np.testing.assert_almost_equal(S.logEvidence, -16.0629517262, decimal=5,
err_msg='Erroneous log-evidence value.')
# test hyper-parameter distribution
x, p = S.getHyperParameterDistribution('sigma')
print(np.array([x, p]))
np.testing.assert_allclose(np.array([x, p]),
[[0., 0.2], [0.43828499, 0.56171501]],
rtol=1e-05, err_msg='Erroneous values in hyper-parameter distribution.')
def test_fit_2hp(self):
# carry out fit
S = bl.HyperStudy()
S.loadData(np.array([1, 2, 3, 4, 5]))
S.setOM(bl.om.Gaussian('mean', bl.cint(0, 6, 20), 'sigma', bl.oint(0, 2, 20), prior=lambda m, s: 1/s**3))
T = bl.tm.CombinedTransitionModel(bl.tm.GaussianRandomWalk('sigma', bl.cint(0, 0.2, 2), target='mean'),
bl.tm.RegimeSwitch('log10pMin', [-3, -1]))
S.setTM(T)
S.fit()
# test parameter distributions
np.testing.assert_allclose(S.getParameterDistributions('mean', density=False)[1][:, 5],
[5.80970506e-03, 1.12927905e-01, 4.44501254e-02, 1.00250119e-02, 1.72751309e-05],
rtol=1e-05, err_msg='Erroneous posterior distribution values.')
# test parameter mean values
np.testing.assert_allclose(S.getParameterMeanValues('mean'),
[0.96492471, 2.09944204, 2.82451616, 3.72702495, 5.0219119],
rtol=1e-05, err_msg='Erroneous posterior mean values.')
# test model evidence value
np.testing.assert_almost_equal(S.logEvidence, -10.7601875492, decimal=5,
err_msg='Erroneous log-evidence value.')
# test hyper-parameter distribution
x, p = S.getHyperParameterDistribution('sigma')
np.testing.assert_allclose(np.array([x, p]),
[[0., 0.2], [0.48943645, 0.51056355]],
rtol=1e-05, err_msg='Erroneous values in hyper-parameter distribution.')
# test joint hyper-parameter distribution
x, y, p = S.getJointHyperParameterDistribution(['log10pMin', 'sigma'])
np.testing.assert_allclose(np.array([x, y]),
[[-3., -1.], [0., 0.2]],
rtol=1e-05, err_msg='Erroneous parameter values in joint hyper-parameter '
'distribution.')
np.testing.assert_allclose(p,
[[0.00701834, 0.0075608], [0.48241812, 0.50300274]],
rtol=1e-05, err_msg='Erroneous probability values in joint hyper-parameter '
'distribution.')
def test_fit_hyperprior_array(self):
# carry out fit
S = bl.HyperStudy()
S.loadData(np.array([1, 2, 3, 4, 5]))
S.setOM(bl.om.Gaussian('mean', bl.cint(0, 6, 20), 'sigma', bl.oint(0, 2, 20), prior=lambda m, s: 1/s**3))
S.setTM(bl.tm.GaussianRandomWalk('sigma', bl.cint(0, 0.2, 2), target='mean', prior=np.array([0.2, 0.8])))
S.fit()
# test parameter distributions
np.testing.assert_allclose(S.getParameterDistributions('mean', density=False)[1][:, 5],
[0.01205759, 0.00794796, 0.00574501, 0.00479608, 0.00470649],
rtol=1e-05, err_msg='Erroneous posterior distribution values.')
# test parameter mean values
np.testing.assert_allclose(S.getParameterMeanValues('mean'),
[2.82920111, 2.89773902, 3., 3.10226098, 3.17079889],
rtol=1e-05, err_msg='Erroneous posterior mean values.')
# test model evidence value
np.testing.assert_almost_equal(S.logEvidence, -15.9915077133, decimal=5,
err_msg='Erroneous log-evidence value.')
# test hyper-parameter distribution
x, p = S.getHyperParameterDistribution('sigma')
np.testing.assert_allclose(np.array([x, p]),
[[0., 0.2], [0.16322581, 0.83677419]],
rtol=1e-05, err_msg='Erroneous values in hyper-parameter distribution.')
def test_fit_hyperprior_function(self):
# carry out fit
S = bl.HyperStudy()
S.loadData(np.array([1, 2, 3, 4, 5]))
S.setOM(bl.om.Gaussian('mean', bl.cint(0, 6, 20), 'sigma', bl.oint(0, 2, 20), prior=lambda m, s: 1/s**3))
S.setTM(bl.tm.GaussianRandomWalk('sigma', bl.cint(0.1, 0.3, 2), target='mean', prior=lambda s: 1./s))
S.fit()
# test parameter distributions
np.testing.assert_allclose(S.getParameterDistributions('mean', density=False)[1][:, 5],
[0.04071021, 0.00783661, 0.00527211, 0.00484169, 0.00480379],
rtol=1e-05, err_msg='Erroneous posterior distribution values.')
# test parameter mean values
np.testing.assert_allclose(S.getParameterMeanValues('mean'),
[2.68460027, 2.81872578, 3., 3.18127422, 3.31539973],
rtol=1e-05, err_msg='Erroneous posterior mean values.')
# test model evidence value
np.testing.assert_almost_equal(S.logEvidence, -15.9898700147, decimal=5,
err_msg='Erroneous log-evidence value.')
# test hyper-parameter distribution
x, p = S.getHyperParameterDistribution('sigma')
np.testing.assert_allclose( | np.array([x, p]) | numpy.array |
"""
Unit testing for the measure module.
"""
import numpy as np
import pytest
import molecool
def test_calculate_distance():
r1 = | np.array([0, 0, 0]) | numpy.array |
# coding=utf-8
import os
import math
import time
from collections import OrderedDict
import cv2
import numpy as np
__all__ = ['reader']
multi_scales = [0.3, 0.6, 0.9]
def bbox_vote(det):
order = det[:, 4].ravel().argsort()[::-1]
det = det[order, :]
if det.shape[0] == 0:
dets = np.array([[10, 10, 20, 20, 0.002]])
det = np.empty(shape=[0, 5])
while det.shape[0] > 0:
# IOU
area = (det[:, 2] - det[:, 0] + 1) * (det[:, 3] - det[:, 1] + 1)
xx1 = np.maximum(det[0, 0], det[:, 0])
yy1 = | np.maximum(det[0, 1], det[:, 1]) | numpy.maximum |
"""
Copyright (C) 2020 <NAME>, <NAME> Ltd
Copyright (C) 2019 <NAME>, ETH Zurich
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2017 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# -*- coding: utf-8 -*-
from datetime import datetime
import warnings
import time
import os
import numpy as np
import pandas as pd
from numpy import sum as array_sum_to_scalar
from autograd import elementwise_grad
from autograd import numpy as anp
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
import torch.optim as optim
from covews.apps.util import info
from lifelines.fitters import SemiParametricRegressionFittter
from lifelines.fitters.mixins import ProportionalHazardMixin
from lifelines.utils.printer import Printer
from lifelines.utils import (
_get_index,
_to_list,
# check_for_overlapping_intervals,
check_for_numeric_dtypes_or_raise,
check_low_var,
check_complete_separation_low_variance,
check_for_immediate_deaths,
check_for_instantaneous_events_at_time_zero,
check_for_instantaneous_events_at_death_time,
check_for_nonnegative_intervals,
pass_for_numeric_dtypes_or_raise_array,
ConvergenceError,
normalize,
StepSizer,
check_nans_or_infs,
string_justify,
coalesce,
)
__all__ = ["CoxNonLinearTimeVaryingFitter"]
matrix_axis_0_sum_to_1d_array = lambda m: np.sum(m, 0)
class CoxNonLinearTimeVaryingFitter(SemiParametricRegressionFittter, ProportionalHazardMixin):
r"""
This class implements fitting Cox's nonlinear time-varying proportional hazard model:
.. math:: h(t|x(t)) = h_0(t)\exp((x(t)-\overline{x})'\beta)
Parameters
----------
learning_rate: float, optional (default=0.05)
the level in the confidence intervals.
l2_weight: float, optional
the coefficient of an L2 penalizer in the regression
Attributes
----------
params_ : Series
The estimated coefficients. Changed in version 0.22.0: use to be ``.hazards_``
hazard_ratios_ : Series
The exp(coefficients)
confidence_intervals_ : DataFrame
The lower and upper confidence intervals for the hazard coefficients
event_observed: Series
The event_observed variable provided
weights: Series
The event_observed variable provided
variance_matrix_ : numpy array
The variance matrix of the coefficients
strata: list
the strata provided
standard_errors_: Series
the standard errors of the estimates
baseline_cumulative_hazard_: DataFrame
baseline_survival_: DataFrame
"""
_KNOWN_MODEL = True
def __init__(self, learning_rate=0.001, l2_weight=0.0, l1_ratio=0.0, strata=None, num_units=32, num_layers=2,
p_dropout=0.0, batch_size=128, num_epochs=100, output_directory=""):
super(CoxNonLinearTimeVaryingFitter, self).__init__(alpha=learning_rate)
self.learning_rate = learning_rate
self.l2_weight = l2_weight
self.strata = strata
self.l1_ratio = l1_ratio
self.num_units = num_units
self.num_layers = num_layers
self.p_dropout = p_dropout
self.batch_size = batch_size
self.num_epochs = num_epochs
self.type_pt = torch.float
self.tmp_file_name = "model.tmp.pt"
self.output_directory = output_directory
def preprocess_df(self, df, event_col, start_col, stop_col, weights_col, id_col):
df = df.copy()
if not (event_col in df and start_col in df and stop_col in df):
raise KeyError("A column specified in the call to `fit` does not exist in the DataFrame provided.")
if weights_col is None:
self.weights_col = None
assert "__weights" not in df.columns, "__weights is an internal lifelines column, please rename your column first."
df["__weights"] = 1.0
else:
self.weights_col = weights_col
if (df[weights_col] <= 0).any():
raise ValueError("values in weights_col must be positive.")
df = df.rename(columns={event_col: "event", start_col: "start", stop_col: "stop", weights_col: "__weights"})
if self.strata is not None and self.id_col is not None:
df = df.set_index(_to_list(self.strata) + [id_col])
df = df.sort_index()
elif self.strata is not None and self.id_col is None:
df = df.set_index(_to_list(self.strata))
elif self.strata is None and self.id_col is not None:
df = df.set_index([id_col])
events, start, stop = (
pass_for_numeric_dtypes_or_raise_array(df.pop("event")).astype(bool),
df.pop("start"),
df.pop("stop"),
)
weights = df.pop("__weights").astype(float)
df = df.astype(float)
self._check_values(df, events, start, stop)
return df, events, start, stop, weights
def fit(
self,
df,
event_col,
start_col="start",
stop_col="stop",
weights_col=None,
id_col=None,
show_progress=False,
robust=False,
strata=None,
initial_point=None,
val_df=None
): # pylint: disable=too-many-arguments
"""
Fit the Cox Nonlinear Hazard model to a time varying dataset. Tied survival times
are handled using Efron's tie-method.
Parameters
-----------
df: DataFrame
a Pandas DataFrame with necessary columns `duration_col` and
`event_col`, plus other covariates. `duration_col` refers to
the lifetimes of the subjects. `event_col` refers to whether
the 'death' events was observed: 1 if observed, 0 else (censored).
event_col: string
the column in DataFrame that contains the subjects' death
observation. If left as None, assume all individuals are non-censored.
start_col: string
the column that contains the start of a subject's time period.
stop_col: string
the column that contains the end of a subject's time period.
weights_col: string, optional
the column that contains (possibly time-varying) weight of each subject-period row.
id_col: string, optional
A subject could have multiple rows in the DataFrame. This column contains
the unique identifier per subject. If not provided, it's up to the
user to make sure that there are no violations.
show_progress: since the fitter is iterative, show convergence
diagnostics.
robust: bool, optional (default: True)
Compute the robust errors using the Huber sandwich estimator, aka Wei-Lin estimate. This does not handle
ties, so if there are high number of ties, results may significantly differ. See
"The Robust Inference for the Cox Proportional Hazards Model", Journal of the American Statistical Association, Vol. 84, No. 408 (Dec., 1989), pp. 1074- 1078
step_size: float, optional
set an initial step size for the fitting algorithm.
strata: list or string, optional
specify a column or list of columns n to use in stratification. This is useful if a
categorical covariate does not obey the proportional hazard assumption. This
is used similar to the `strata` expression in R.
See http://courses.washington.edu/b515/l17.pdf.
initial_point: (d,) numpy array, optional
initialize the starting point of the iterative
algorithm. Default is the zero vector.
Returns
--------
self: CoxNonLinearTimeVaryingFitter
self, with additional properties like ``hazards_`` and ``print_summary``
"""
self.strata = coalesce(strata, self.strata)
self.robust = robust
if self.robust:
raise NotImplementedError("Not available yet.")
self.event_col = event_col
self.id_col = id_col
self.stop_col = stop_col
self.start_col = start_col
self._time_fit_was_called = datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S") + " UTC"
df, events, start, stop, weights = self.preprocess_df(df, event_col, start_col, stop_col, weights_col, id_col)
val_df, val_events, val_start, val_stop, val_weights = \
self.preprocess_df(val_df, event_col, start_col, stop_col, weights_col, id_col)
self._norm_mean = df.mean(0)
self._norm_std = df.std(0)
self._norm_std[self._norm_std == 0] = 1.0 # Avoid div by zero.
# Network architecture
in_features = df.values.shape[-1]
out_features = 1
self.type_pt = torch.float
self.net = Net(in_features, self.num_units, out_features, self.num_layers, self.p_dropout, self.type_pt)
self.net = self._neural_cox(
normalize(df, self._norm_mean, self._norm_std),
events,
start,
stop,
weights,
normalize(val_df, self._norm_mean, self._norm_std),
val_events,
val_start,
val_stop,
val_weights,
net=self.net,
show_progress=show_progress,
training_epochs=self.num_epochs,
batch_size=self.batch_size,
step_size=self.learning_rate,
)
self.beta_params_ = pd.Series(list(self.net.beta.parameters())[0].detach().numpy().ravel(), name="coef")
self.baseline_cumulative_hazard_ = self._compute_cumulative_baseline_hazard(df, events, start, stop, weights)
self.baseline_survival_ = self._compute_baseline_survival()
self.event_observed = events
self.start_stop_and_events = pd.DataFrame({"event": events, "start": start, "stop": stop})
self.weights = weights
self._n_examples = df.shape[0]
self._n_unique = df.index.unique().shape[0]
return self
def _neural_cox(self, X, events, start, stop, weights,
val_X, val_events, val_start, val_stop, val_weights, net,
show_progress=True, training_epochs=10, batch_size=16, step_size=0.01):
events = events.values.reshape(-1, 1)
start = start.values.reshape(-1, 1)
stop = stop.values.reshape(-1, 1)
weights = weights.values.reshape(-1, 1)
val_events = val_events.values.reshape(-1, 1)
val_start = val_start.values.reshape(-1, 1)
val_stop = val_stop.values.reshape(-1, 1)
val_weights = val_weights.values.reshape(-1, 1)
n, d = X.shape
val_n, val_d = val_X.shape
assert d == val_d
optimizer = optim.AdamW(net.parameters(), lr=step_size, weight_decay=self.l2_weight)
full_table = np.concatenate([X, events, start, stop, weights], axis=1)
val_full_table = np.concatenate([val_X, val_events, val_start, val_stop, val_weights], axis=1)
loader = DataLoader(
full_table,
batch_size=len(full_table),
shuffle=True,
sampler=None,
batch_sampler=None,
num_workers=0,
collate_fn=None,
pin_memory=False,
drop_last=True,
timeout=0,
worker_init_fn=None,
)
val_loader = DataLoader(
val_full_table,
batch_size=len(val_full_table),
shuffle=False,
sampler=None,
batch_sampler=None,
num_workers=0,
collate_fn=None,
pin_memory=False,
drop_last=True,
timeout=0,
worker_init_fn=None,
)
checkpoint_path = os.path.join(self.output_directory, self.tmp_file_name)
min_loss = | np.finfo(float) | numpy.finfo |
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib.backends.backend_pdf import PdfPages
"""
Notes for future:
You set up the basic elements of a curve, but not everything
is truly automated. Need to:
1. Automate Coordinates of S,D text
"""
# shift1 is demand shift, shift 2 is supply shift
def supplyAndDemandWithShifts(supply, demand, vertSupply=False, shift1=None, shift2=None, inc=1, name= "Loanable Funds", pp = PdfPages("Default.pdf")):
pp = pp
fig = plt.figure(dpi=128, figsize=(10,6))
frame = plt.gca()
plt.title(name, fontsize=20, ha='center')
if vertSupply:
supply = round(len(supply)/2)
print(supply)
if shift1:
if (shift1 != "Supply-Left" and shift1 != "Supply-Right") or vertSupply == False:
firstShift = selectShiftCurve(demand, supply, shift1,order=1)
else:
if shift1 == "Supply-Right":
firstShift = 7000
if shift1 == "Supply-Left":
firstShift = 3000
if shift2:
secondShift = selectShiftCurve(demand, supply,shift1, shift2,order=2)
i = 0
if shift1 and shift2:
xi,yi= findIntersection(supply, demand, inc)
plotCurves(supply, demand,vertSupply, firstShift, secondShift, inc)
placePrimaryText(vertSupply)
p0, q0 = plotVertAndHorizLines(demand,supply,inc,i, "k--", vertSupply=vertSupply)
i +=1
# Horizontal and Vertical Lines for First Shift
if shift1 == "Demand-Left" or shift1 == "Demand-Right":
p1, q1 = plotVertAndHorizLines(firstShift, supply, inc,i, "k--",vertSupply=vertSupply, shift1=shift1,xi=xi,yi=yi)
if shift1 == "Supply-Left" or shift1 == "Supply-Right":
p1, q1 = plotVertAndHorizLines(firstShift, demand, inc,i, "k--",vertSupply=vertSupply, shift1=shift1,xi=xi,yi=yi)
i += 1
if (shift2 == "Demand-Left" or shift2 == "Demand-Right"):
if (shift1 == "Demand-Left" or shift1 == "Demand-Right"):
p2, q2 = plotVertAndHorizLines(secondShift, supply, inc,i, "k--", xi, yi,vertSupply=vertSupply, shift2=shift2)
if shift1 != shift2:
p0.remove
q0.remove
if (shift1 == "Supply-Left" or shift1 == "Supply-Right"):
x1, y1 = findIntersection(demand, firstShift, inc)
p2, q2 = plotVertAndHorizLines(secondShift, firstShift, inc, i, "k--", x1, y1,vertSupply=vertSupply,shift2=shift2)
if (shift2 == "Demand-Left" and shift1 == "Supply-Right") or (shift2 == "Demand-Right" and shift1 == "Supply-Left") :
q0.remove
if shift2 == "Supply-Left" or shift2 == "Supply-Right":
if (shift1 == "Demand-Left" or shift1 == "Demand-Right"):
p2, q2 = plotVertAndHorizLines(secondShift, firstShift, inc,i, "k--", xi, yi,vertSupply=vertSupply,shift2=shift2)
if (shift1 == "Demand-Left" and shift2 == "Supply-Right") or (shift1 == "Demand-Right" and shift2 == "Supply-Left") :
q0.remove
if (shift1 == "Supply-Left" or shift1 == "Supply-Right"):
p2, q2 = plotVertAndHorizLines(secondShift, demand, inc,i, "k--", xi, yi,vertSupply=vertSupply,shift2=shift2)
if shift1 != shift2:
p0.remove
q0.remove
if shift1 == None and shift2 == None:
plotCurves(supply, demand, vertSupply = vertSupply)
p0, q0 = plotVertAndHorizLines(demand,supply,inc,i, "k--",vertSupply=vertSupply)
if shift1 and not shift2:
placePrimaryText(vertSupply)
p0, q0 = plotVertAndHorizLines(demand,supply,inc,i, "k--",vertSupply=vertSupply)
# Horizontal and Vertical Lines for First Shift
i +=1
if shift1 == "Demand-Left" or shift1 == "Demand-Right":
p1, q1 = plotVertAndHorizLines(firstShift, supply, inc,i, "k--",vertSupply=vertSupply, shift1=shift1)
if shift1 == "Supply-Left" or shift1 == "Supply-Right":
p1, q1 = plotVertAndHorizLines(firstShift, demand, inc,i, "k--",vertSupply=vertSupply, shift1 = shift1)
plotCurves(supply, demand, vertSupply,firstShift, None, inc)
if not shift1 and shift2:
plotCurves(supply, demand,vertSupply, None, secondShift, inc)
p0, q0 = plotVertAndHorizLines(demand,supply,inc,i, "k--",vertSupply=vertSupply)
# Horizontal and Vertical Lines for First Shift
i +=1
if shift1 == "Demand-Left" or shift1 == "Demand-Right":
p1, q1 = plotVertAndHorizLines(firstShift, supply, inc,i, "k--",vertSupply=vertSupply,xi=xi,yi=yi)
if shift1 == "Supply-Left" or shift1 == "Supply-Right":
p1, q1 = plotVertAndHorizLines(firstShift, demand, inc,i, "k--",vertSupply=vertSupply,xi=xi,yi=yi)
placePrimaryText(vertSupply)
placeShiftText(shift1, shift2,vertSupply=vertSupply)
setupAxes(frame)
plt.savefig(name.replace("\n"," "))
pp.savefig(fig)
# plt.close()
# pp.close()
def placePrimaryText(vertSupply=False):
#plt.text(x,y,text,fontsize)
p = plt.text(-600, 10000, "$\pi$", fontsize=24)
if vertSupply == False:
s = plt.text(8200, 8800,"$SRAS_0$", fontsize = 24)
else:
s = plt.text(5100, 8800, "$LRAS_0$", fontsize = 24)
d = plt.text(8200, 2000,"$AD_0$", fontsize = 24)
q = plt.text(10000, -650, "$\%\Delta y$", fontsize=24)
return p , s , d , q
def placeShiftText(shift1, shift2=None, vertSupply=False):
if shift1 == None:
if (shift2):
placeShiftText(shift2)
else:
return
if shift1 == "Demand-Left":
plt.text(5500, 1650,"$AD_1$", fontsize = 24)
if shift1 == "Demand-Right":
plt.text(8500, 3800,"$AD_1$", fontsize = 24)
if shift1 == "Supply-Left":
if vertSupply == False:
plt.text(6600, 8800,"$LRAS_1$", fontsize = 24)
else:
plt.text(3100, 8800,"$LRAS_1$", fontsize = 24)
if shift1 == "Supply-Right":
if vertSupply == False:
plt.text(8500, 7600,"$LRAS_1$", fontsize = 24)
else:
plt.text(7100, 8800,"$LRAS_1$", fontsize = 24)
# safety check . . .
if shift1 and shift2:
if shift2 == "Demand-Left":
if shift1 == "Supply-Left" or shift1 == "Supply-Right":
plt.text(6200, 1000,"$AD_1$", fontsize = 24)
if shift1 == "Demand-Left":
plt.text(4000, 1600,"$AD_2$", fontsize = 24)
if shift1 == "Demand-Right":
plt.text(8200, 2000,"$AD_{0,2}$", fontsize = 24) # same as initial
if shift2 == "Demand-Right":
if shift1 == "Supply-Left" or shift1 == "Supply-Right":
plt.text(8200, 3450,"$AD_1$", fontsize = 24)
if shift1 == "Demand-Left":
plt.text(8200, 2000,"$AD_{0,2}$", fontsize = 24) # same as initial
if shift1 == "Demand-Right":
plt.text(9000, 5750,"$AD_2$", fontsize = 24)
if shift2 == "Supply-Left":
if shift1 == "Demand-Left" or shift1 == "Demand-Right":
plt.text(6600, 8800,"$LRAS_1$", fontsize = 24)
if shift1 == "Supply-Left":
plt.text(5100, 8800,"$LRAS_2$", fontsize = 24)
if shift1 == "Supply-Right":
plt.text(7755, 8800,"$LRAS_2$", fontsize = 24) # same as initial
if shift2 == "Supply-Right":
if shift1 == "Demand-Left" or shift1 == "Demand-Right":
plt.text(8500, 7600,"$LRAS_1$", fontsize = 24)
if shift1 == "Supply-Left":
plt.text(7755, 8800,"$LRAS_{0,2}$", fontsize = 24) # same as initial
if shift1 == "Supply-Right":
plt.text(9750, 6000,"$LRAS_2$", fontsize = 24)
def plotCurves(supply, demand, vertSupply=False, shift1=None, shift2=None, inc=1):
# plt.plot((x1,x2), (y1,y2), linestyle/color, linewidth)
if vertSupply == False:
plt.plot(supply, 'C0-', linewidth=3)
else:
plt.axvline(x=supply, color = 'C0', linewidth=3)
plt.plot(demand, 'C0-', linewidth=3)
try:
if isinstance(shift1,np.ndarray):
plt.plot(shift1, 'C3-', linewidth=3)
else:
if shift1 != None:
plt.axvline(x=shift1, color = 'C3', linewidth=3)
except NameError:
print("shift1 = None")
# if not np.all([shift2, supply]) and not np.all([shift2, demand]):
try:
if isinstance(shift2,np.ndarray):
plt.plot(shift2, 'C3-', linewidth=3)
else:
if shift2 != None:
plt.axvline(x=shift2)
except NameError:
print("shift1 = None")
def plotVertAndHorizLines(curve1, curve2, inc, i, line,
xi = None, yi = None, vertSupply=False,shift1=None, shift2=None):
x2,y2 = findIntersection(curve1, curve2, inc)
# plt.plot((x2, x2), (0, y2), line, linewidth=1.5)
plt.plot((0,x2), (y2, y2), line,linewidth=1.5)
if i == 0:
p0 =plt.text(-600,y2, "$\pi_0$", fontsize=20)
q0 = plt.text(x2 - 200, -650, "$\%\Delta y_0$", fontsize=20)
return p0, q0
if i == 1:
p1 = plt.text(-600,y2, "$\pi_1$", fontsize=20)
if vertSupply:
if shift1=="Supply-Left" or shift1 == "Supply-Right":
q1 = plt.text(x2 - 200, -650, "$\%\Delta y_1$", fontsize=20)
else:
q1 = plt.text(x2 - 200, -650, "", fontsize=20)
else:
if shift1=="Supply-Left" or shift1 == "Supply-Right":
q1 = plt.text(x2 - 200 , -650, "$\%\Delta y_1$", fontsize=20)
return p1, q1
if i == 2:
if yi != y2:
p2 = plt.text(-600,y2, "$\pi_2$", fontsize=20)
else:
p2 = plt.text(-1450,y2, "$\pi_2=$", fontsize=20)
if xi != x2:
q2 = plt.text(x2 - 200, -650, "$\%\Delta y_2$", fontsize=20)
else:
q2 = plt.text(x2 + 200, -650, "$_{,2}$", fontsize=20)
return p2, q2
def setupAxes(frame):
frame.axes.get_xaxis().set_visible(False)
frame.axes.get_yaxis().set_visible(False)
plt.ylim(0, 10000)
plt.xlim(xmin = 0, xmax = 10000)
plt.xlabel("Real Income", fontsize=20)
plt.ylabel("Price Level", fontsize = 20)
plt.tick_params(axis='both', which='major', labelsize=16)
def findIntersection(curve1, curve2, inc):
try:
for x in range(len(curve1)):
dist = curve1[x] - curve2[x]
if abs(dist) < inc * 1.01:
print(curve1[x])
print(curve2[x])
print("curve1 and curve2 are " + str(dist) + " units apart at x= " + str(x))
return x, curve1[x]
except:
try:
return curve1, curve2[curve1]
except:
return curve2, curve1[curve2]
def selectShiftCurve(demand, supply, shift1, shift2 = None, order=1):
print(shift1)
if order == 1:
if shift1 == "Demand-Left":
return np.arange(7000,-3000, -1 * inc)
if shift1 == "Demand-Right":
return np.arange(12000,2000, -1 * inc)
if shift1 == "Supply-Left":
return np.arange(1500, 11500, 1 * inc)
if shift1 == "Supply-Right":
return np.arange(-1500,8500, 1 * inc)
if order == 2:
if shift2 == "Demand-Left" and shift1 == "Demand-Left":
return np.arange(5500,-4500, -1 * inc)
if shift2 == "Demand-Left" and shift1 == "Demand-Right":
return demand
if shift2 == "Demand-Right" and shift1 == "Demand-Right":
return np.arange(14500,4500, -1 * inc)
if shift2 == "Demand-Right" and shift1 == "Demand-Left":
return demand
if shift2 == "Supply-Left" and shift1 == "Supply-Left":
return np.arange(3000, 13000, 1 * inc)
if shift2 == "Supply-Left" and shift1 == "Supply-Right":
return supply
if shift2 == "Supply-Right" and shift1 == "Supply-Right":
return np.arange(-3000,7000, 1 * inc)
if shift2 == "Supply-Right" and shift1 == "Supply-Left":
return supply
else:
if shift2 == "Demand-Left":
return np.arange(8000,-2000, -1 * inc)
if shift2 == "Demand-Right":
return np.arange(11450,1450, -1 * inc)
if shift2 == "Supply-Left":
return np.arange(1500, 11500, 1 * inc)
if shift2 == "Supply-Right":
return np.arange(-1500,8500, 1 * inc)
inc = 1
demandInc = inc
supplyInc = inc
Supply = | np.arange(0,10000, 1 * supplyInc) | numpy.arange |
#!/usr/bin/env python3
import argparse
import numpy as np
import csv
MIN_STD = 0.01
class IctDatum:
def __init__(self, name, weights, data):
self.name = name
self.weights = weights
self.data = data
def normalize_data(self):
dsums = np.sum(self.data, axis=0)
for dsum in dsums:
if dsum == 0:
dsum = 1
return self.data/dsums
def parse_ict(filename):
ict_data = []
cur_name = ''
data = []
weights = []
first = True
with open(filename, 'r') as ict_file:
ict_reader = csv.reader(ict_file, delimiter=',')
for row in ict_reader:
if len(row) >= 2:
split_label = row[0].split('_')
name = split_label[0]
weight = split_label[1][1:].replace('.', ',')
if name != cur_name:
if not first:
# new mid, add entry to icd_data
ict_data.append(IctDatum(cur_name, weights, data))
else:
first = False
# start new data set
cur_name = name
weights = [weight]
data = [list(map(float, (row[1:])))]
else:
# append data
weights.append(weight)
data.append(list(map(float, (row[1:]))))
# write final dataset
ict_data.append(IctDatum(cur_name, weights, data))
return ict_data
def to_fluxml(name, weight, value, stddev):
print(' <datum id=\"{0}\", stddev=\"{1}\", weight=\"{2}\">{3}</datum>'.format(name, stddev, weight, value))
def main():
parser = argparse.ArgumentParser('Convert the output of ICT to fluxml notation')
parser.add_argument('filename', metavar='corr.txt', nargs=1, help='the file with the corrected data from ICT')
args = parser.parse_args()
ict_data = parse_ict(args.filename[0])
for ict_datum in ict_data:
normalized_data = ict_datum.normalize_data()
for i in range(0, len(normalized_data)):
mgroup_name = ict_datum.name
weight = ict_datum.weights[i]
stddev = max(MIN_STD, np.std(normalized_data[i]))
value = | np.mean(normalized_data[i]) | numpy.mean |
"""
Author: <NAME>
Affiliation: NAIST & OSX
"""
from typing import Any, Optional
import gym
import jax.numpy as jnp
import numpy as np
from chex import Array
from matplotlib.axes import Axes
from shinrl import OBS_FN, REW_FN, TRAN_FN, ShinEnv
from .core import calc, plot
from .core.config import PendulumConfig
class Pendulum(ShinEnv):
"""Dynamics and reward are based on OpenAI gym's implementation of Pendulum-v0"""
DefaultConfig = PendulumConfig
@property
def config(self) -> PendulumConfig:
return self._config
@property
def dS(self) -> int:
return self.config.theta_res * self.config.vel_res
@property
def dA(self) -> int:
return self.config.dA
@property
def observation_space(self) -> gym.spaces.Space:
if self.config.obs_mode == PendulumConfig.OBS_MODE.tuple:
space = gym.spaces.Box(
low=np.array([0, 0, -self.config.vel_max]),
high=np.array([1, 1, self.config.vel_max]),
dtype=float,
)
elif self.config.obs_mode == PendulumConfig.OBS_MODE.image:
space = gym.spaces.Box(
low=np.zeros((28, 28, 1)),
high=np.ones((28, 28, 1)),
dtype=float,
)
return space
@property
def action_space(self) -> gym.spaces.Space:
if self.config.act_mode == PendulumConfig.ACT_MODE.discrete:
space = gym.spaces.Discrete(self.config.dA)
elif self.config.act_mode == PendulumConfig.ACT_MODE.continuous:
space = gym.spaces.Box(
low=np.array(-self.config.torque_max),
high=np.array(self.config.torque_max),
dtype=float,
)
return space
def _init_probs(self) -> Array:
th_step = (2 * jnp.pi) / (self.config.theta_res - 1)
vel_step = (2 * self.config.vel_max) / (self.config.vel_res - 1)
ini_ths = jnp.arange(-jnp.pi, jnp.pi, th_step)
ini_vels = jnp.arange(-1, 1, vel_step)
idxs = []
for ini_th in ini_ths:
for ini_vel in ini_vels:
idxs.append(calc.th_vel_to_state(self.config, ini_th, ini_vel))
idxs = np.unique( | np.array(idxs) | numpy.array |
# -*- coding: utf-8 -*-
"""Tools for working with epoched data."""
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: BSD-3-Clause
from functools import partial
from collections import Counter
from copy import deepcopy
import json
import operator
import os.path as op
import numpy as np
from .io.utils import _construct_bids_filename
from .io.write import (start_and_end_file, start_block, end_block,
write_int, write_float, write_float_matrix,
write_double_matrix, write_complex_float_matrix,
write_complex_double_matrix, write_id, write_string,
_get_split_size, _NEXT_FILE_BUFFER, INT32_MAX)
from .io.meas_info import (read_meas_info, write_meas_info, _merge_info,
_ensure_infos_match)
from .io.open import fiff_open, _get_next_fname
from .io.tree import dir_tree_find
from .io.tag import read_tag, read_tag_info
from .io.constants import FIFF
from .io.fiff.raw import _get_fname_rep
from .io.pick import (channel_indices_by_type, channel_type,
pick_channels, pick_info, _pick_data_channels,
_DATA_CH_TYPES_SPLIT, _picks_to_idx)
from .io.proj import setup_proj, ProjMixin
from .io.base import BaseRaw, TimeMixin, _get_ch_factors
from .bem import _check_origin
from .evoked import EvokedArray, _check_decim
from .baseline import rescale, _log_rescale, _check_baseline
from .channels.channels import (ContainsMixin, UpdateChannelsMixin,
SetChannelsMixin, InterpolationMixin)
from .filter import detrend, FilterMixin, _check_fun
from .parallel import parallel_func
from .event import _read_events_fif, make_fixed_length_events
from .fixes import rng_uniform
from .viz import (plot_epochs, plot_epochs_psd, plot_epochs_psd_topomap,
plot_epochs_image, plot_topo_image_epochs, plot_drop_log)
from .utils import (_check_fname, check_fname, logger, verbose,
_time_mask, check_random_state, warn, _pl,
sizeof_fmt, SizeMixin, copy_function_doc_to_method_doc,
_check_pandas_installed,
_check_preload, GetEpochsMixin,
_prepare_read_metadata, _prepare_write_metadata,
_check_event_id, _gen_events, _check_option,
_check_combine, ShiftTimeMixin, _build_data_frame,
_check_pandas_index_arguments, _convert_times,
_scale_dataframe_data, _check_time_format, object_size,
_on_missing, _validate_type, _ensure_events,
_path_like)
from .utils.docs import fill_doc
from .data.html_templates import epochs_template
def _pack_reject_params(epochs):
reject_params = dict()
for key in ('reject', 'flat', 'reject_tmin', 'reject_tmax'):
val = getattr(epochs, key, None)
if val is not None:
reject_params[key] = val
return reject_params
def _save_split(epochs, fname, part_idx, n_parts, fmt, split_naming,
overwrite):
"""Split epochs.
Anything new added to this function also needs to be added to
BaseEpochs.save to account for new file sizes.
"""
# insert index in filename
base, ext = op.splitext(fname)
if part_idx > 0:
if split_naming == 'neuromag':
fname = '%s-%d%s' % (base, part_idx, ext)
else:
assert split_naming == 'bids'
fname = _construct_bids_filename(base, ext, part_idx,
validate=False)
_check_fname(fname, overwrite=overwrite)
next_fname = None
if part_idx < n_parts - 1:
if split_naming == 'neuromag':
next_fname = '%s-%d%s' % (base, part_idx + 1, ext)
else:
assert split_naming == 'bids'
next_fname = _construct_bids_filename(base, ext, part_idx + 1,
validate=False)
next_idx = part_idx + 1
else:
next_idx = None
with start_and_end_file(fname) as fid:
_save_part(fid, epochs, fmt, n_parts, next_fname, next_idx)
def _save_part(fid, epochs, fmt, n_parts, next_fname, next_idx):
info = epochs.info
meas_id = info['meas_id']
start_block(fid, FIFF.FIFFB_MEAS)
write_id(fid, FIFF.FIFF_BLOCK_ID)
if info['meas_id'] is not None:
write_id(fid, FIFF.FIFF_PARENT_BLOCK_ID, info['meas_id'])
# Write measurement info
write_meas_info(fid, info)
# One or more evoked data sets
start_block(fid, FIFF.FIFFB_PROCESSED_DATA)
start_block(fid, FIFF.FIFFB_MNE_EPOCHS)
# write events out after getting data to ensure bad events are dropped
data = epochs.get_data()
_check_option('fmt', fmt, ['single', 'double'])
if np.iscomplexobj(data):
if fmt == 'single':
write_function = write_complex_float_matrix
elif fmt == 'double':
write_function = write_complex_double_matrix
else:
if fmt == 'single':
write_function = write_float_matrix
elif fmt == 'double':
write_function = write_double_matrix
start_block(fid, FIFF.FIFFB_MNE_EVENTS)
write_int(fid, FIFF.FIFF_MNE_EVENT_LIST, epochs.events.T)
write_string(fid, FIFF.FIFF_DESCRIPTION, _event_id_string(epochs.event_id))
end_block(fid, FIFF.FIFFB_MNE_EVENTS)
# Metadata
if epochs.metadata is not None:
start_block(fid, FIFF.FIFFB_MNE_METADATA)
metadata = _prepare_write_metadata(epochs.metadata)
write_string(fid, FIFF.FIFF_DESCRIPTION, metadata)
end_block(fid, FIFF.FIFFB_MNE_METADATA)
# First and last sample
first = int(round(epochs.tmin * info['sfreq'])) # round just to be safe
last = first + len(epochs.times) - 1
write_int(fid, FIFF.FIFF_FIRST_SAMPLE, first)
write_int(fid, FIFF.FIFF_LAST_SAMPLE, last)
# save baseline
if epochs.baseline is not None:
bmin, bmax = epochs.baseline
write_float(fid, FIFF.FIFF_MNE_BASELINE_MIN, bmin)
write_float(fid, FIFF.FIFF_MNE_BASELINE_MAX, bmax)
# The epochs itself
decal = np.empty(info['nchan'])
for k in range(info['nchan']):
decal[k] = 1.0 / (info['chs'][k]['cal'] *
info['chs'][k].get('scale', 1.0))
data *= decal[np.newaxis, :, np.newaxis]
write_function(fid, FIFF.FIFF_EPOCH, data)
# undo modifications to data
data /= decal[np.newaxis, :, np.newaxis]
write_string(fid, FIFF.FIFF_MNE_EPOCHS_DROP_LOG,
json.dumps(epochs.drop_log))
reject_params = _pack_reject_params(epochs)
if reject_params:
write_string(fid, FIFF.FIFF_MNE_EPOCHS_REJECT_FLAT,
json.dumps(reject_params))
write_int(fid, FIFF.FIFF_MNE_EPOCHS_SELECTION,
epochs.selection)
# And now write the next file info in case epochs are split on disk
if next_fname is not None and n_parts > 1:
start_block(fid, FIFF.FIFFB_REF)
write_int(fid, FIFF.FIFF_REF_ROLE, FIFF.FIFFV_ROLE_NEXT_FILE)
write_string(fid, FIFF.FIFF_REF_FILE_NAME, op.basename(next_fname))
if meas_id is not None:
write_id(fid, FIFF.FIFF_REF_FILE_ID, meas_id)
write_int(fid, FIFF.FIFF_REF_FILE_NUM, next_idx)
end_block(fid, FIFF.FIFFB_REF)
end_block(fid, FIFF.FIFFB_MNE_EPOCHS)
end_block(fid, FIFF.FIFFB_PROCESSED_DATA)
end_block(fid, FIFF.FIFFB_MEAS)
def _event_id_string(event_id):
return ';'.join([k + ':' + str(v) for k, v in event_id.items()])
def _merge_events(events, event_id, selection):
"""Merge repeated events."""
event_id = event_id.copy()
new_events = events.copy()
event_idxs_to_delete = list()
unique_events, counts = np.unique(events[:, 0], return_counts=True)
for ev in unique_events[counts > 1]:
# indices at which the non-unique events happened
idxs = (events[:, 0] == ev).nonzero()[0]
# Figure out new value for events[:, 1]. Set to 0, if mixed vals exist
unique_priors = np.unique(events[idxs, 1])
new_prior = unique_priors[0] if len(unique_priors) == 1 else 0
# If duplicate time samples have same event val, "merge" == "drop"
# and no new event_id key will be created
ev_vals = np.unique(events[idxs, 2])
if len(ev_vals) <= 1:
new_event_val = ev_vals[0]
# Else, make a new event_id for the merged event
else:
# Find all event_id keys involved in duplicated events. These
# keys will be merged to become a new entry in "event_id"
event_id_keys = list(event_id.keys())
event_id_vals = list(event_id.values())
new_key_comps = [event_id_keys[event_id_vals.index(value)]
for value in ev_vals]
# Check if we already have an entry for merged keys of duplicate
# events ... if yes, reuse it
for key in event_id:
if set(key.split('/')) == set(new_key_comps):
new_event_val = event_id[key]
break
# Else, find an unused value for the new key and make an entry into
# the event_id dict
else:
ev_vals = np.unique(
np.concatenate((list(event_id.values()),
events[:, 1:].flatten()),
axis=0))
if ev_vals[0] > 1:
new_event_val = 1
else:
diffs = np.diff(ev_vals)
idx = np.where(diffs > 1)[0]
idx = -1 if len(idx) == 0 else idx[0]
new_event_val = ev_vals[idx] + 1
new_event_id_key = '/'.join(sorted(new_key_comps))
event_id[new_event_id_key] = int(new_event_val)
# Replace duplicate event times with merged event and remember which
# duplicate indices to delete later
new_events[idxs[0], 1] = new_prior
new_events[idxs[0], 2] = new_event_val
event_idxs_to_delete.extend(idxs[1:])
# Delete duplicate event idxs
new_events = np.delete(new_events, event_idxs_to_delete, 0)
new_selection = np.delete(selection, event_idxs_to_delete, 0)
return new_events, event_id, new_selection
def _handle_event_repeated(events, event_id, event_repeated, selection,
drop_log):
"""Handle repeated events.
Note that drop_log will be modified inplace
"""
assert len(events) == len(selection)
selection = np.asarray(selection)
unique_events, u_ev_idxs = np.unique(events[:, 0], return_index=True)
# Return early if no duplicates
if len(unique_events) == len(events):
return events, event_id, selection, drop_log
# Else, we have duplicates. Triage ...
_check_option('event_repeated', event_repeated, ['error', 'drop', 'merge'])
drop_log = list(drop_log)
if event_repeated == 'error':
raise RuntimeError('Event time samples were not unique. Consider '
'setting the `event_repeated` parameter."')
elif event_repeated == 'drop':
logger.info('Multiple event values for single event times found. '
'Keeping the first occurrence and dropping all others.')
new_events = events[u_ev_idxs]
new_selection = selection[u_ev_idxs]
drop_ev_idxs = np.setdiff1d(selection, new_selection)
for idx in drop_ev_idxs:
drop_log[idx] = drop_log[idx] + ('DROP DUPLICATE',)
selection = new_selection
elif event_repeated == 'merge':
logger.info('Multiple event values for single event times found. '
'Creating new event value to reflect simultaneous events.')
new_events, event_id, new_selection = \
_merge_events(events, event_id, selection)
drop_ev_idxs = np.setdiff1d(selection, new_selection)
for idx in drop_ev_idxs:
drop_log[idx] = drop_log[idx] + ('MERGE DUPLICATE',)
selection = new_selection
drop_log = tuple(drop_log)
# Remove obsolete kv-pairs from event_id after handling
keys = new_events[:, 1:].flatten()
event_id = {k: v for k, v in event_id.items() if v in keys}
return new_events, event_id, selection, drop_log
@fill_doc
class BaseEpochs(ProjMixin, ContainsMixin, UpdateChannelsMixin, ShiftTimeMixin,
SetChannelsMixin, InterpolationMixin, FilterMixin,
TimeMixin, SizeMixin, GetEpochsMixin):
"""Abstract base class for `~mne.Epochs`-type classes.
.. warning:: This class provides basic functionality and should never be
instantiated directly.
Parameters
----------
%(info_not_none)s
data : ndarray | None
If ``None``, data will be read from the Raw object. If ndarray, must be
of shape (n_epochs, n_channels, n_times).
%(events_epochs)s
%(event_id)s
%(epochs_tmin_tmax)s
%(baseline_epochs)s
Defaults to ``(None, 0)``, i.e. beginning of the the data until
time point zero.
%(epochs_raw)s
%(picks_all)s
%(reject_epochs)s
%(flat)s
%(decim)s
%(epochs_reject_tmin_tmax)s
%(epochs_detrend)s
%(proj_epochs)s
%(epochs_on_missing)s
preload_at_end : bool
%(epochs_preload)s
selection : iterable | None
Iterable of indices of selected epochs. If ``None``, will be
automatically generated, corresponding to all non-zero events.
drop_log : tuple | None
Tuple of tuple of strings indicating which epochs have been marked to
be ignored.
filename : str | None
The filename (if the epochs are read from disk).
%(epochs_metadata)s
%(epochs_event_repeated)s
%(verbose)s
Notes
-----
The ``BaseEpochs`` class is public to allow for stable type-checking in
user code (i.e., ``isinstance(my_epochs, BaseEpochs)``) but should not be
used as a constructor for Epochs objects (use instead :class:`mne.Epochs`).
"""
@verbose
def __init__(self, info, data, events, event_id=None, tmin=-0.2, tmax=0.5,
baseline=(None, 0), raw=None, picks=None, reject=None,
flat=None, decim=1, reject_tmin=None, reject_tmax=None,
detrend=None, proj=True, on_missing='raise',
preload_at_end=False, selection=None, drop_log=None,
filename=None, metadata=None, event_repeated='error',
verbose=None): # noqa: D102
self.verbose = verbose
if events is not None: # RtEpochs can have events=None
events = _ensure_events(events)
events_max = events.max()
if events_max > INT32_MAX:
raise ValueError(
f'events array values must not exceed {INT32_MAX}, '
f'got {events_max}')
event_id = _check_event_id(event_id, events)
self.event_id = event_id
del event_id
if events is not None: # RtEpochs can have events=None
for key, val in self.event_id.items():
if val not in events[:, 2]:
msg = ('No matching events found for %s '
'(event id %i)' % (key, val))
_on_missing(on_missing, msg)
# ensure metadata matches original events size
self.selection = np.arange(len(events))
self.events = events
# same as self.metadata = metadata, but suppress log in favor
# of logging below (after setting self.selection)
GetEpochsMixin.metadata.fset(self, metadata, verbose=False)
del events
values = list(self.event_id.values())
selected = np.where(np.in1d(self.events[:, 2], values))[0]
if selection is None:
selection = selected
else:
selection = np.array(selection, int)
if selection.shape != (len(selected),):
raise ValueError('selection must be shape %s got shape %s'
% (selected.shape, selection.shape))
self.selection = selection
if drop_log is None:
self.drop_log = tuple(
() if k in self.selection else ('IGNORED',)
for k in range(max(len(self.events),
max(self.selection) + 1)))
else:
self.drop_log = drop_log
self.events = self.events[selected]
self.events, self.event_id, self.selection, self.drop_log = \
_handle_event_repeated(
self.events, self.event_id, event_repeated,
self.selection, self.drop_log)
# then subselect
sub = np.where(np.in1d(selection, self.selection))[0]
if isinstance(metadata, list):
metadata = [metadata[s] for s in sub]
elif metadata is not None:
metadata = metadata.iloc[sub]
# Remove temporarily set metadata from above, and set
# again to get the correct log ("adding metadata", instead of
# "replacing existing metadata")
GetEpochsMixin.metadata.fset(self, None, verbose=False)
self.metadata = metadata
del metadata
n_events = len(self.events)
if n_events > 1:
if np.diff(self.events.astype(np.int64)[:, 0]).min() <= 0:
warn('The events passed to the Epochs constructor are not '
'chronologically ordered.', RuntimeWarning)
if n_events > 0:
logger.info('%d matching events found' % n_events)
else:
raise ValueError('No desired events found.')
else:
self.drop_log = tuple()
self.selection = np.array([], int)
self.metadata = metadata
# do not set self.events here, let subclass do it
if (detrend not in [None, 0, 1]) or isinstance(detrend, bool):
raise ValueError('detrend must be None, 0, or 1')
self.detrend = detrend
self._raw = raw
info._check_consistency()
self.picks = _picks_to_idx(info, picks, none='all', exclude=(),
allow_empty=False)
self.info = pick_info(info, self.picks)
del info
self._current = 0
if data is None:
self.preload = False
self._data = None
self._do_baseline = True
else:
assert decim == 1
if data.ndim != 3 or data.shape[2] != \
round((tmax - tmin) * self.info['sfreq']) + 1:
raise RuntimeError('bad data shape')
if data.shape[0] != len(self.events):
raise ValueError(
'The number of epochs and the number of events must match')
self.preload = True
self._data = data
self._do_baseline = False
self._offset = None
if tmin > tmax:
raise ValueError('tmin has to be less than or equal to tmax')
# Handle times
sfreq = float(self.info['sfreq'])
start_idx = int(round(tmin * sfreq))
self._raw_times = np.arange(start_idx,
int(round(tmax * sfreq)) + 1) / sfreq
self._set_times(self._raw_times)
# check reject_tmin and reject_tmax
if reject_tmin is not None:
if (np.isclose(reject_tmin, tmin)):
# adjust for potential small deviations due to sampling freq
reject_tmin = self.tmin
elif reject_tmin < tmin:
raise ValueError(f'reject_tmin needs to be None or >= tmin '
f'(got {reject_tmin})')
if reject_tmax is not None:
if (np.isclose(reject_tmax, tmax)):
# adjust for potential small deviations due to sampling freq
reject_tmax = self.tmax
elif reject_tmax > tmax:
raise ValueError(f'reject_tmax needs to be None or <= tmax '
f'(got {reject_tmax})')
if (reject_tmin is not None) and (reject_tmax is not None):
if reject_tmin >= reject_tmax:
raise ValueError(f'reject_tmin ({reject_tmin}) needs to be '
f' < reject_tmax ({reject_tmax})')
self.reject_tmin = reject_tmin
self.reject_tmax = reject_tmax
# decimation
self._decim = 1
self.decimate(decim)
# baseline correction: replace `None` tuple elements with actual times
self.baseline = _check_baseline(baseline, times=self.times,
sfreq=self.info['sfreq'])
if self.baseline is not None and self.baseline != baseline:
logger.info(f'Setting baseline interval to '
f'[{self.baseline[0]}, {self.baseline[1]}] sec')
logger.info(_log_rescale(self.baseline))
# setup epoch rejection
self.reject = None
self.flat = None
self._reject_setup(reject, flat)
# do the rest
valid_proj = [True, 'delayed', False]
if proj not in valid_proj:
raise ValueError('"proj" must be one of %s, not %s'
% (valid_proj, proj))
if proj == 'delayed':
self._do_delayed_proj = True
logger.info('Entering delayed SSP mode.')
else:
self._do_delayed_proj = False
activate = False if self._do_delayed_proj else proj
self._projector, self.info = setup_proj(self.info, False,
activate=activate)
if preload_at_end:
assert self._data is None
assert self.preload is False
self.load_data() # this will do the projection
elif proj is True and self._projector is not None and data is not None:
# let's make sure we project if data was provided and proj
# requested
# we could do this with np.einsum, but iteration should be
# more memory safe in most instances
for ii, epoch in enumerate(self._data):
self._data[ii] = np.dot(self._projector, epoch)
self._filename = str(filename) if filename is not None else filename
self._check_consistency()
def _check_consistency(self):
"""Check invariants of epochs object."""
if hasattr(self, 'events'):
assert len(self.selection) == len(self.events)
assert len(self.drop_log) >= len(self.events)
assert len(self.selection) == sum(
(len(dl) == 0 for dl in self.drop_log))
assert hasattr(self, '_times_readonly')
assert not self.times.flags['WRITEABLE']
assert isinstance(self.drop_log, tuple)
assert all(isinstance(log, tuple) for log in self.drop_log)
assert all(isinstance(s, str) for log in self.drop_log for s in log)
def reset_drop_log_selection(self):
"""Reset the drop_log and selection entries.
This method will simplify ``self.drop_log`` and ``self.selection``
so that they are meaningless (tuple of empty tuples and increasing
integers, respectively). This can be useful when concatenating
many Epochs instances, as ``drop_log`` can accumulate many entries
which can become problematic when saving.
"""
self.selection = np.arange(len(self.events))
self.drop_log = (tuple(),) * len(self.events)
self._check_consistency()
def load_data(self):
"""Load the data if not already preloaded.
Returns
-------
epochs : instance of Epochs
The epochs object.
Notes
-----
This function operates in-place.
.. versionadded:: 0.10.0
"""
if self.preload:
return self
self._data = self._get_data()
self.preload = True
self._do_baseline = False
self._decim_slice = slice(None, None, None)
self._decim = 1
self._raw_times = self.times
assert self._data.shape[-1] == len(self.times)
self._raw = None # shouldn't need it anymore
return self
@verbose
def decimate(self, decim, offset=0, verbose=None):
"""Decimate the epochs.
Parameters
----------
%(decim)s
%(decim_offset)s
%(verbose_meth)s
Returns
-------
epochs : instance of Epochs
The decimated Epochs object.
See Also
--------
mne.Evoked.decimate
mne.Epochs.resample
mne.io.Raw.resample
Notes
-----
%(decim_notes)s
If ``decim`` is 1, this method does not copy the underlying data.
.. versionadded:: 0.10.0
References
----------
.. footbibliography::
"""
decim, offset, new_sfreq = _check_decim(self.info, decim, offset)
start_idx = int(round(-self._raw_times[0] * (self.info['sfreq'] *
self._decim)))
self._decim *= decim
i_start = start_idx % self._decim + offset
decim_slice = slice(i_start, None, self._decim)
with self.info._unlock():
self.info['sfreq'] = new_sfreq
if self.preload:
if decim != 1:
self._data = self._data[:, :, decim_slice].copy()
self._raw_times = self._raw_times[decim_slice].copy()
else:
self._data = np.ascontiguousarray(self._data)
self._decim_slice = slice(None)
self._decim = 1
else:
self._decim_slice = decim_slice
self._set_times(self._raw_times[self._decim_slice])
return self
@verbose
def apply_baseline(self, baseline=(None, 0), *, verbose=None):
"""Baseline correct epochs.
Parameters
----------
%(baseline_epochs)s
Defaults to ``(None, 0)``, i.e. beginning of the the data until
time point zero.
%(verbose_meth)s
Returns
-------
epochs : instance of Epochs
The baseline-corrected Epochs object.
Notes
-----
Baseline correction can be done multiple times, but can never be
reverted once the data has been loaded.
.. versionadded:: 0.10.0
"""
baseline = _check_baseline(baseline, times=self.times,
sfreq=self.info['sfreq'])
if self.preload:
if self.baseline is not None and baseline is None:
raise RuntimeError('You cannot remove baseline correction '
'from preloaded data once it has been '
'applied.')
self._do_baseline = True
picks = self._detrend_picks
rescale(self._data, self.times, baseline, copy=False, picks=picks)
self._do_baseline = False
else: # logging happens in "rescale" in "if" branch
logger.info(_log_rescale(baseline))
assert self._do_baseline is True
self.baseline = baseline
return self
def _reject_setup(self, reject, flat):
"""Set self._reject_time and self._channel_type_idx."""
idx = channel_indices_by_type(self.info)
reject = deepcopy(reject) if reject is not None else dict()
flat = deepcopy(flat) if flat is not None else dict()
for rej, kind in zip((reject, flat), ('reject', 'flat')):
if not isinstance(rej, dict):
raise TypeError('reject and flat must be dict or None, not %s'
% type(rej))
bads = set(rej.keys()) - set(idx.keys())
if len(bads) > 0:
raise KeyError('Unknown channel types found in %s: %s'
% (kind, bads))
for key in idx.keys():
# don't throw an error if rejection/flat would do nothing
if len(idx[key]) == 0 and (np.isfinite(reject.get(key, np.inf)) or
flat.get(key, -1) >= 0):
# This is where we could eventually add e.g.
# self.allow_missing_reject_keys check to allow users to
# provide keys that don't exist in data
raise ValueError("No %s channel found. Cannot reject based on "
"%s." % (key.upper(), key.upper()))
# check for invalid values
for rej, kind in zip((reject, flat), ('Rejection', 'Flat')):
for key, val in rej.items():
if val is None or val < 0:
raise ValueError('%s value must be a number >= 0, not "%s"'
% (kind, val))
# now check to see if our rejection and flat are getting more
# restrictive
old_reject = self.reject if self.reject is not None else dict()
old_flat = self.flat if self.flat is not None else dict()
bad_msg = ('{kind}["{key}"] == {new} {op} {old} (old value), new '
'{kind} values must be at least as stringent as '
'previous ones')
# copy thresholds for channel types that were used previously, but not
# passed this time
for key in set(old_reject) - set(reject):
reject[key] = old_reject[key]
# make sure new thresholds are at least as stringent as the old ones
for key in reject:
if key in old_reject and reject[key] > old_reject[key]:
raise ValueError(
bad_msg.format(kind='reject', key=key, new=reject[key],
old=old_reject[key], op='>'))
# same for flat thresholds
for key in set(old_flat) - set(flat):
flat[key] = old_flat[key]
for key in flat:
if key in old_flat and flat[key] < old_flat[key]:
raise ValueError(
bad_msg.format(kind='flat', key=key, new=flat[key],
old=old_flat[key], op='<'))
# after validation, set parameters
self._bad_dropped = False
self._channel_type_idx = idx
self.reject = reject if len(reject) > 0 else None
self.flat = flat if len(flat) > 0 else None
if (self.reject_tmin is None) and (self.reject_tmax is None):
self._reject_time = None
else:
if self.reject_tmin is None:
reject_imin = None
else:
idxs = np.nonzero(self.times >= self.reject_tmin)[0]
reject_imin = idxs[0]
if self.reject_tmax is None:
reject_imax = None
else:
idxs = np.nonzero(self.times <= self.reject_tmax)[0]
reject_imax = idxs[-1]
self._reject_time = slice(reject_imin, reject_imax)
@verbose # verbose is used by mne-realtime
def _is_good_epoch(self, data, verbose=None):
"""Determine if epoch is good."""
if isinstance(data, str):
return False, (data,)
if data is None:
return False, ('NO_DATA',)
n_times = len(self.times)
if data.shape[1] < n_times:
# epoch is too short ie at the end of the data
return False, ('TOO_SHORT',)
if self.reject is None and self.flat is None:
return True, None
else:
if self._reject_time is not None:
data = data[:, self._reject_time]
return _is_good(data, self.ch_names, self._channel_type_idx,
self.reject, self.flat, full_report=True,
ignore_chs=self.info['bads'])
@verbose
def _detrend_offset_decim(self, epoch, picks, verbose=None):
"""Aux Function: detrend, baseline correct, offset, decim.
Note: operates inplace
"""
if (epoch is None) or isinstance(epoch, str):
return epoch
# Detrend
if self.detrend is not None:
# We explicitly detrend just data channels (not EMG, ECG, EOG which
# are processed by baseline correction)
use_picks = _pick_data_channels(self.info, exclude=())
epoch[use_picks] = detrend(epoch[use_picks], self.detrend, axis=1)
# Baseline correct
if self._do_baseline:
rescale(
epoch, self._raw_times, self.baseline, picks=picks, copy=False,
verbose=False)
# Decimate if necessary (i.e., epoch not preloaded)
epoch = epoch[:, self._decim_slice]
# handle offset
if self._offset is not None:
epoch += self._offset
return epoch
def iter_evoked(self, copy=False):
"""Iterate over epochs as a sequence of Evoked objects.
The Evoked objects yielded will each contain a single epoch (i.e., no
averaging is performed).
This method resets the object iteration state to the first epoch.
Parameters
----------
copy : bool
If False copies of data and measurement info will be omitted
to save time.
"""
self.__iter__()
while True:
try:
out = self.__next__(True)
except StopIteration:
break
data, event_id = out
tmin = self.times[0]
info = self.info
if copy:
info = deepcopy(self.info)
data = data.copy()
yield EvokedArray(data, info, tmin, comment=str(event_id))
def subtract_evoked(self, evoked=None):
"""Subtract an evoked response from each epoch.
Can be used to exclude the evoked response when analyzing induced
activity, see e.g. [1]_.
Parameters
----------
evoked : instance of Evoked | None
The evoked response to subtract. If None, the evoked response
is computed from Epochs itself.
Returns
-------
self : instance of Epochs
The modified instance (instance is also modified inplace).
References
----------
.. [1] David et al. "Mechanisms of evoked and induced responses in
MEG/EEG", NeuroImage, vol. 31, no. 4, pp. 1580-1591, July 2006.
"""
logger.info('Subtracting Evoked from Epochs')
if evoked is None:
picks = _pick_data_channels(self.info, exclude=[])
evoked = self.average(picks)
# find the indices of the channels to use
picks = pick_channels(evoked.ch_names, include=self.ch_names)
# make sure the omitted channels are not data channels
if len(picks) < len(self.ch_names):
sel_ch = [evoked.ch_names[ii] for ii in picks]
diff_ch = list(set(self.ch_names).difference(sel_ch))
diff_idx = [self.ch_names.index(ch) for ch in diff_ch]
diff_types = [channel_type(self.info, idx) for idx in diff_idx]
bad_idx = [diff_types.index(t) for t in diff_types if t in
_DATA_CH_TYPES_SPLIT]
if len(bad_idx) > 0:
bad_str = ', '.join([diff_ch[ii] for ii in bad_idx])
raise ValueError('The following data channels are missing '
'in the evoked response: %s' % bad_str)
logger.info(' The following channels are not included in the '
'subtraction: %s' % ', '.join(diff_ch))
# make sure the times match
if (len(self.times) != len(evoked.times) or
np.max(np.abs(self.times - evoked.times)) >= 1e-7):
raise ValueError('Epochs and Evoked object do not contain '
'the same time points.')
# handle SSPs
if not self.proj and evoked.proj:
warn('Evoked has SSP applied while Epochs has not.')
if self.proj and not evoked.proj:
evoked = evoked.copy().apply_proj()
# find the indices of the channels to use in Epochs
ep_picks = [self.ch_names.index(evoked.ch_names[ii]) for ii in picks]
# do the subtraction
if self.preload:
self._data[:, ep_picks, :] -= evoked.data[picks][None, :, :]
else:
if self._offset is None:
self._offset = np.zeros((len(self.ch_names), len(self.times)),
dtype=np.float64)
self._offset[ep_picks] -= evoked.data[picks]
logger.info('[done]')
return self
@fill_doc
def average(self, picks=None, method="mean", by_event_type=False):
"""Compute an average over epochs.
Parameters
----------
%(picks_all_data)s
method : str | callable
How to combine the data. If "mean"/"median", the mean/median
are returned.
Otherwise, must be a callable which, when passed an array of shape
(n_epochs, n_channels, n_time) returns an array of shape
(n_channels, n_time).
Note that due to file type limitations, the kind for all
these will be "average".
%(by_event_type)s
Returns
-------
%(by_event_type_returns_average)s
Notes
-----
Computes an average of all epochs in the instance, even if
they correspond to different conditions. To average by condition,
do ``epochs[condition].average()`` for each condition separately.
When picks is None and epochs contain only ICA channels, no channels
are selected, resulting in an error. This is because ICA channels
are not considered data channels (they are of misc type) and only data
channels are selected when picks is None.
The ``method`` parameter allows e.g. robust averaging.
For example, one could do:
>>> from scipy.stats import trim_mean # doctest:+SKIP
>>> trim = lambda x: trim_mean(x, 0.1, axis=0) # doctest:+SKIP
>>> epochs.average(method=trim) # doctest:+SKIP
This would compute the trimmed mean.
"""
if by_event_type:
evokeds = list()
for event_type in self.event_id.keys():
ev = self[event_type]._compute_aggregate(picks=picks,
mode=method)
ev.comment = event_type
evokeds.append(ev)
else:
evokeds = self._compute_aggregate(picks=picks, mode=method)
return evokeds
@fill_doc
def standard_error(self, picks=None, by_event_type=False):
"""Compute standard error over epochs.
Parameters
----------
%(picks_all_data)s
%(by_event_type)s
Returns
-------
%(by_event_type_returns_stderr)s
"""
return self.average(picks=picks, method="std",
by_event_type=by_event_type)
def _compute_aggregate(self, picks, mode='mean'):
"""Compute the mean, median, or std over epochs and return Evoked."""
# if instance contains ICA channels they won't be included unless picks
# is specified
if picks is None:
check_ICA = [x.startswith('ICA') for x in self.ch_names]
if np.all(check_ICA):
raise TypeError('picks must be specified (i.e. not None) for '
'ICA channel data')
elif np.any(check_ICA):
warn('ICA channels will not be included unless explicitly '
'selected in picks')
n_channels = len(self.ch_names)
n_times = len(self.times)
if self.preload:
n_events = len(self.events)
fun = _check_combine(mode, valid=('mean', 'median', 'std'))
data = fun(self._data)
assert len(self.events) == len(self._data)
if data.shape != self._data.shape[1:]:
raise RuntimeError(
'You passed a function that resulted n data of shape {}, '
'but it should be {}.'.format(
data.shape, self._data.shape[1:]))
else:
if mode not in {"mean", "std"}:
raise ValueError("If data are not preloaded, can only compute "
"mean or standard deviation.")
data = np.zeros((n_channels, n_times))
n_events = 0
for e in self:
if np.iscomplexobj(e):
data = data.astype(np.complex128)
data += e
n_events += 1
if n_events > 0:
data /= n_events
else:
data.fill(np.nan)
# convert to stderr if requested, could do in one pass but do in
# two (slower) in case there are large numbers
if mode == "std":
data_mean = data.copy()
data.fill(0.)
for e in self:
data += (e - data_mean) ** 2
data = np.sqrt(data / n_events)
if mode == "std":
kind = 'standard_error'
data /= np.sqrt(n_events)
else:
kind = "average"
return self._evoked_from_epoch_data(data, self.info, picks, n_events,
kind, self._name)
@property
def _name(self):
"""Give a nice string representation based on event ids."""
if len(self.event_id) == 1:
comment = next(iter(self.event_id.keys()))
else:
count = Counter(self.events[:, 2])
comments = list()
for key, value in self.event_id.items():
comments.append('%.2f × %s' % (
float(count[value]) / len(self.events), key))
comment = ' + '.join(comments)
return comment
def _evoked_from_epoch_data(self, data, info, picks, n_events, kind,
comment):
"""Create an evoked object from epoch data."""
info = deepcopy(info)
# don't apply baseline correction; we'll set evoked.baseline manually
evoked = EvokedArray(data, info, tmin=self.times[0], comment=comment,
nave=n_events, kind=kind, baseline=None,
verbose=self.verbose)
evoked.baseline = self.baseline
# the above constructor doesn't recreate the times object precisely
# due to numerical precision issues
evoked.times = self.times.copy()
# pick channels
picks = _picks_to_idx(self.info, picks, 'data_or_ica', ())
ch_names = [evoked.ch_names[p] for p in picks]
evoked.pick_channels(ch_names)
if len(evoked.info['ch_names']) == 0:
raise ValueError('No data channel found when averaging.')
if evoked.nave < 1:
warn('evoked object is empty (based on less than 1 epoch)')
return evoked
@property
def ch_names(self):
"""Channel names."""
return self.info['ch_names']
@copy_function_doc_to_method_doc(plot_epochs)
def plot(self, picks=None, scalings=None, n_epochs=20, n_channels=20,
title=None, events=None, event_color=None,
order=None, show=True, block=False, decim='auto', noise_cov=None,
butterfly=False, show_scrollbars=True, show_scalebars=True,
epoch_colors=None, event_id=None, group_by='type'):
return plot_epochs(self, picks=picks, scalings=scalings,
n_epochs=n_epochs, n_channels=n_channels,
title=title, events=events, event_color=event_color,
order=order, show=show, block=block, decim=decim,
noise_cov=noise_cov, butterfly=butterfly,
show_scrollbars=show_scrollbars,
show_scalebars=show_scalebars,
epoch_colors=epoch_colors, event_id=event_id,
group_by=group_by)
@copy_function_doc_to_method_doc(plot_epochs_psd)
def plot_psd(self, fmin=0, fmax=np.inf, tmin=None, tmax=None,
proj=False, bandwidth=None, adaptive=False, low_bias=True,
normalization='length', picks=None, ax=None, color='black',
xscale='linear', area_mode='std', area_alpha=0.33,
dB=True, estimate='auto', show=True, n_jobs=1,
average=False, line_alpha=None, spatial_colors=True,
sphere=None, exclude='bads', verbose=None):
return plot_epochs_psd(self, fmin=fmin, fmax=fmax, tmin=tmin,
tmax=tmax, proj=proj, bandwidth=bandwidth,
adaptive=adaptive, low_bias=low_bias,
normalization=normalization, picks=picks, ax=ax,
color=color, xscale=xscale, area_mode=area_mode,
area_alpha=area_alpha, dB=dB, estimate=estimate,
show=show, n_jobs=n_jobs, average=average,
line_alpha=line_alpha,
spatial_colors=spatial_colors, sphere=sphere,
exclude=exclude, verbose=verbose)
@copy_function_doc_to_method_doc(plot_epochs_psd_topomap)
def plot_psd_topomap(self, bands=None, tmin=None,
tmax=None, proj=False, bandwidth=None, adaptive=False,
low_bias=True, normalization='length', ch_type=None,
cmap=None, agg_fun=None, dB=True,
n_jobs=1, normalize=False, cbar_fmt='auto',
outlines='head', axes=None, show=True,
sphere=None, vlim=(None, None), verbose=None):
return plot_epochs_psd_topomap(
self, bands=bands, tmin=tmin, tmax=tmax,
proj=proj, bandwidth=bandwidth, adaptive=adaptive,
low_bias=low_bias, normalization=normalization, ch_type=ch_type,
cmap=cmap, agg_fun=agg_fun, dB=dB, n_jobs=n_jobs,
normalize=normalize, cbar_fmt=cbar_fmt, outlines=outlines,
axes=axes, show=show, sphere=sphere, vlim=vlim, verbose=verbose)
@copy_function_doc_to_method_doc(plot_topo_image_epochs)
def plot_topo_image(self, layout=None, sigma=0., vmin=None, vmax=None,
colorbar=None, order=None, cmap='RdBu_r',
layout_scale=.95, title=None, scalings=None,
border='none', fig_facecolor='k', fig_background=None,
font_color='w', show=True):
return plot_topo_image_epochs(
self, layout=layout, sigma=sigma, vmin=vmin, vmax=vmax,
colorbar=colorbar, order=order, cmap=cmap,
layout_scale=layout_scale, title=title, scalings=scalings,
border=border, fig_facecolor=fig_facecolor,
fig_background=fig_background, font_color=font_color, show=show)
@verbose
def drop_bad(self, reject='existing', flat='existing', verbose=None):
"""Drop bad epochs without retaining the epochs data.
Should be used before slicing operations.
.. warning:: This operation is slow since all epochs have to be read
from disk. To avoid reading epochs from disk multiple
times, use :meth:`mne.Epochs.load_data()`.
.. note:: To constrain the time period used for estimation of signal
quality, set ``epochs.reject_tmin`` and
``epochs.reject_tmax``, respectively.
Parameters
----------
%(reject_drop_bad)s
%(flat_drop_bad)s
%(verbose_meth)s
Returns
-------
epochs : instance of Epochs
The epochs with bad epochs dropped. Operates in-place.
Notes
-----
Dropping bad epochs can be done multiple times with different
``reject`` and ``flat`` parameters. However, once an epoch is
dropped, it is dropped forever, so if more lenient thresholds may
subsequently be applied, `epochs.copy <mne.Epochs.copy>` should be
used.
"""
if reject == 'existing':
if flat == 'existing' and self._bad_dropped:
return
reject = self.reject
if flat == 'existing':
flat = self.flat
if any(isinstance(rej, str) and rej != 'existing' for
rej in (reject, flat)):
raise ValueError('reject and flat, if strings, must be "existing"')
self._reject_setup(reject, flat)
self._get_data(out=False, verbose=verbose)
return self
def drop_log_stats(self, ignore=('IGNORED',)):
"""Compute the channel stats based on a drop_log from Epochs.
Parameters
----------
ignore : list
The drop reasons to ignore.
Returns
-------
perc : float
Total percentage of epochs dropped.
See Also
--------
plot_drop_log
"""
return _drop_log_stats(self.drop_log, ignore)
@copy_function_doc_to_method_doc(plot_drop_log)
def plot_drop_log(self, threshold=0, n_max_plot=20, subject='Unknown subj',
color=(0.9, 0.9, 0.9), width=0.8, ignore=('IGNORED',),
show=True):
if not self._bad_dropped:
raise ValueError("You cannot use plot_drop_log since bad "
"epochs have not yet been dropped. "
"Use epochs.drop_bad().")
return plot_drop_log(self.drop_log, threshold, n_max_plot, subject,
color=color, width=width, ignore=ignore,
show=show)
@copy_function_doc_to_method_doc(plot_epochs_image)
def plot_image(self, picks=None, sigma=0., vmin=None, vmax=None,
colorbar=True, order=None, show=True, units=None,
scalings=None, cmap=None, fig=None, axes=None,
overlay_times=None, combine=None, group_by=None,
evoked=True, ts_args=None, title=None, clear=False):
return plot_epochs_image(self, picks=picks, sigma=sigma, vmin=vmin,
vmax=vmax, colorbar=colorbar, order=order,
show=show, units=units, scalings=scalings,
cmap=cmap, fig=fig, axes=axes,
overlay_times=overlay_times, combine=combine,
group_by=group_by, evoked=evoked,
ts_args=ts_args, title=title, clear=clear)
@verbose
def drop(self, indices, reason='USER', verbose=None):
"""Drop epochs based on indices or boolean mask.
.. note:: The indices refer to the current set of undropped epochs
rather than the complete set of dropped and undropped epochs.
They are therefore not necessarily consistent with any
external indices (e.g., behavioral logs). To drop epochs
based on external criteria, do not use the ``preload=True``
flag when constructing an Epochs object, and call this
method before calling the :meth:`mne.Epochs.drop_bad` or
:meth:`mne.Epochs.load_data` methods.
Parameters
----------
indices : array of int or bool
Set epochs to remove by specifying indices to remove or a boolean
mask to apply (where True values get removed). Events are
correspondingly modified.
reason : str
Reason for dropping the epochs ('ECG', 'timeout', 'blink' etc).
Default: 'USER'.
%(verbose_meth)s
Returns
-------
epochs : instance of Epochs
The epochs with indices dropped. Operates in-place.
"""
indices = np.atleast_1d(indices)
if indices.ndim > 1:
raise ValueError("indices must be a scalar or a 1-d array")
if indices.dtype == bool:
indices = np.where(indices)[0]
try_idx = np.where(indices < 0, indices + len(self.events), indices)
out_of_bounds = (try_idx < 0) | (try_idx >= len(self.events))
if out_of_bounds.any():
first = indices[out_of_bounds][0]
raise IndexError("Epoch index %d is out of bounds" % first)
keep = np.setdiff1d(np.arange(len(self.events)), try_idx)
self._getitem(keep, reason, copy=False, drop_event_id=False)
count = len(try_idx)
logger.info('Dropped %d epoch%s: %s' %
(count, _pl(count), ', '.join(map(str, np.sort(try_idx)))))
return self
def _get_epoch_from_raw(self, idx, verbose=None):
"""Get a given epoch from disk."""
raise NotImplementedError
def _project_epoch(self, epoch):
"""Process a raw epoch based on the delayed param."""
# whenever requested, the first epoch is being projected.
if (epoch is None) or isinstance(epoch, str):
# can happen if t < 0 or reject based on annotations
return epoch
proj = self._do_delayed_proj or self.proj
if self._projector is not None and proj is True:
epoch = np.dot(self._projector, epoch)
return epoch
@verbose
def _get_data(self, out=True, picks=None, item=None, *, units=None,
tmin=None, tmax=None, verbose=None):
"""Load all data, dropping bad epochs along the way.
Parameters
----------
out : bool
Return the data. Setting this to False is used to reject bad
epochs without caching all the data, which saves memory.
%(picks_all)s
item : slice | array-like | str | list | None
See docstring of get_data method.
%(units)s
tmin : int | float | None
Start time of data to get in seconds.
tmax : int | float | None
End time of data to get in seconds.
%(verbose_meth)s
"""
start, stop = self._handle_tmin_tmax(tmin, tmax)
if item is None:
item = slice(None)
elif not self._bad_dropped:
raise ValueError(
'item must be None in epochs.get_data() unless bads have been '
'dropped. Consider using epochs.drop_bad().')
select = self._item_to_select(item) # indices or slice
use_idx = np.arange(len(self.events))[select]
n_events = len(use_idx)
# in case there are no good events
if self.preload:
# we will store our result in our existing array
data = self._data
else:
# we start out with an empty array, allocate only if necessary
data = np.empty((0, len(self.info['ch_names']), len(self.times)))
msg = (f'for {n_events} events and {len(self._raw_times)} '
'original time points')
if self._decim > 1:
msg += ' (prior to decimation)'
if getattr(self._raw, "preload", False):
logger.info(f'Using data from preloaded Raw {msg} ...')
else:
logger.info(f'Loading data {msg} ...')
orig_picks = picks
if orig_picks is None:
picks = _picks_to_idx(self.info, picks, "all", exclude=())
else:
picks = _picks_to_idx(self.info, picks)
# handle units param only if we are going to return data (out==True)
if (units is not None) and out:
ch_factors = _get_ch_factors(self, units, picks)
if self._bad_dropped:
if not out:
return
if self.preload:
data = data[select]
if orig_picks is not None:
data = data[:, picks]
if units is not None:
data *= ch_factors[:, np.newaxis]
if start != 0 or stop != self.times.size:
data = data[..., start:stop]
return data
# we need to load from disk, drop, and return data
detrend_picks = self._detrend_picks
for ii, idx in enumerate(use_idx):
# faster to pre-allocate memory here
epoch_noproj = self._get_epoch_from_raw(idx)
epoch_noproj = self._detrend_offset_decim(
epoch_noproj, detrend_picks)
if self._do_delayed_proj:
epoch_out = epoch_noproj
else:
epoch_out = self._project_epoch(epoch_noproj)
if ii == 0:
data = np.empty((n_events, len(self.ch_names),
len(self.times)), dtype=epoch_out.dtype)
data[ii] = epoch_out
else:
# bads need to be dropped, this might occur after a preload
# e.g., when calling drop_bad w/new params
good_idx = []
n_out = 0
drop_log = list(self.drop_log)
assert n_events == len(self.selection)
if not self.preload:
detrend_picks = self._detrend_picks
for idx, sel in enumerate(self.selection):
if self.preload: # from memory
if self._do_delayed_proj:
epoch_noproj = self._data[idx]
epoch = self._project_epoch(epoch_noproj)
else:
epoch_noproj = None
epoch = self._data[idx]
else: # from disk
epoch_noproj = self._get_epoch_from_raw(idx)
epoch_noproj = self._detrend_offset_decim(
epoch_noproj, detrend_picks)
epoch = self._project_epoch(epoch_noproj)
epoch_out = epoch_noproj if self._do_delayed_proj else epoch
is_good, bad_tuple = self._is_good_epoch(
epoch, verbose=verbose)
if not is_good:
assert isinstance(bad_tuple, tuple)
assert all(isinstance(x, str) for x in bad_tuple)
drop_log[sel] = drop_log[sel] + bad_tuple
continue
good_idx.append(idx)
# store the epoch if there is a reason to (output or update)
if out or self.preload:
# faster to pre-allocate, then trim as necessary
if n_out == 0 and not self.preload:
data = np.empty((n_events, epoch_out.shape[0],
epoch_out.shape[1]),
dtype=epoch_out.dtype, order='C')
data[n_out] = epoch_out
n_out += 1
self.drop_log = tuple(drop_log)
del drop_log
self._bad_dropped = True
logger.info("%d bad epochs dropped" % (n_events - len(good_idx)))
# adjust the data size if there is a reason to (output or update)
if out or self.preload:
if data.flags['OWNDATA'] and data.flags['C_CONTIGUOUS']:
data.resize((n_out,) + data.shape[1:], refcheck=False)
else:
data = data[:n_out]
if self.preload:
self._data = data
# Now update our properties (excepd data, which is already fixed)
self._getitem(good_idx, None, copy=False, drop_event_id=False,
select_data=False)
if out:
if orig_picks is not None:
data = data[:, picks]
if units is not None:
data *= ch_factors[:, np.newaxis]
if start != 0 or stop != self.times.size:
data = data[..., start:stop]
return data
else:
return None
@property
def _detrend_picks(self):
if self._do_baseline:
return _pick_data_channels(
self.info, with_ref_meg=True, with_aux=True, exclude=())
else:
return []
@fill_doc
def get_data(self, picks=None, item=None, units=None, tmin=None,
tmax=None):
"""Get all epochs as a 3D array.
Parameters
----------
%(picks_all)s
item : slice | array-like | str | list | None
The items to get. See :meth:`mne.Epochs.__getitem__` for
a description of valid options. This can be substantially faster
for obtaining an ndarray than :meth:`~mne.Epochs.__getitem__`
for repeated access on large Epochs objects.
None (default) is an alias for ``slice(None)``.
.. versionadded:: 0.20
%(units)s
.. versionadded:: 0.24
tmin : int | float | None
Start time of data to get in seconds.
.. versionadded:: 0.24.0
tmax : int | float | None
End time of data to get in seconds.
.. versionadded:: 0.24.0
Returns
-------
data : array of shape (n_epochs, n_channels, n_times)
A view on epochs data.
"""
return self._get_data(picks=picks, item=item, units=units, tmin=tmin,
tmax=tmax)
@verbose
def apply_function(self, fun, picks=None, dtype=None, n_jobs=1,
channel_wise=True, verbose=None, **kwargs):
"""Apply a function to a subset of channels.
%(applyfun_summary_epochs)s
Parameters
----------
%(applyfun_fun)s
%(picks_all_data_noref)s
%(applyfun_dtype)s
%(n_jobs)s
%(applyfun_chwise_epo)s
%(verbose_meth)s
%(kwarg_fun)s
Returns
-------
self : instance of Epochs
The epochs object with transformed data.
"""
_check_preload(self, 'epochs.apply_function')
picks = _picks_to_idx(self.info, picks, exclude=(), with_ref_meg=False)
if not callable(fun):
raise ValueError('fun needs to be a function')
data_in = self._data
if dtype is not None and dtype != self._data.dtype:
self._data = self._data.astype(dtype)
if channel_wise:
if n_jobs == 1:
_fun = partial(_check_fun, fun, **kwargs)
# modify data inplace to save memory
for idx in picks:
self._data[:, idx, :] = np.apply_along_axis(
_fun, -1, data_in[:, idx, :])
else:
# use parallel function
parallel, p_fun, _ = parallel_func(_check_fun, n_jobs)
data_picks_new = parallel(p_fun(
fun, data_in[:, p, :], **kwargs) for p in picks)
for pp, p in enumerate(picks):
self._data[:, p, :] = data_picks_new[pp]
else:
self._data = _check_fun(fun, data_in, **kwargs)
return self
@property
def times(self):
"""Time vector in seconds."""
return self._times_readonly
def _set_times(self, times):
"""Set self._times_readonly (and make it read only)."""
# naming used to indicate that it shouldn't be
# changed directly, but rather via this method
self._times_readonly = times.copy()
self._times_readonly.flags['WRITEABLE'] = False
@property
def tmin(self):
"""First time point."""
return self.times[0]
@property
def filename(self):
"""The filename."""
return self._filename
@property
def tmax(self):
"""Last time point."""
return self.times[-1]
def __repr__(self):
"""Build string representation."""
s = ' %s events ' % len(self.events)
s += '(all good)' if self._bad_dropped else '(good & bad)'
s += ', %g - %g sec' % (self.tmin, self.tmax)
s += ', baseline '
if self.baseline is None:
s += 'off'
else:
s += f'{self.baseline[0]:g} – {self.baseline[1]:g} sec'
if self.baseline != _check_baseline(
self.baseline, times=self.times, sfreq=self.info['sfreq'],
on_baseline_outside_data='adjust'):
s += ' (baseline period was cropped after baseline correction)'
s += ', ~%s' % (sizeof_fmt(self._size),)
s += ', data%s loaded' % ('' if self.preload else ' not')
s += ', with metadata' if self.metadata is not None else ''
max_events = 10
counts = ['%r: %i' % (k, sum(self.events[:, 2] == v))
for k, v in list(self.event_id.items())[:max_events]]
if len(self.event_id) > 0:
s += ',' + '\n '.join([''] + counts)
if len(self.event_id) > max_events:
not_shown_events = len(self.event_id) - max_events
s += f"\n and {not_shown_events} more events ..."
class_name = self.__class__.__name__
class_name = 'Epochs' if class_name == 'BaseEpochs' else class_name
return '<%s | %s>' % (class_name, s)
def _repr_html_(self):
if self.baseline is None:
baseline = 'off'
else:
baseline = tuple([f'{b:.3f}' for b in self.baseline])
baseline = f'{baseline[0]} – {baseline[1]} sec'
if isinstance(self.event_id, dict):
events = ''
for k, v in sorted(self.event_id.items()):
n_events = sum(self.events[:, 2] == v)
events += f'{k}: {n_events}<br>'
elif isinstance(self.event_id, list):
events = ''
for k in self.event_id:
n_events = sum(self.events[:, 2] == k)
events += f'{k}: {n_events}<br>'
elif isinstance(self.event_id, int):
n_events = len(self.events[:, 2])
events = f'{self.event_id}: {n_events}<br>'
else:
events = None
return epochs_template.substitute(epochs=self, baseline=baseline,
events=events)
@verbose
def crop(self, tmin=None, tmax=None, include_tmax=True, verbose=None):
"""Crop a time interval from the epochs.
Parameters
----------
tmin : float | None
Start time of selection in seconds.
tmax : float | None
End time of selection in seconds.
%(include_tmax)s
%(verbose_meth)s
Returns
-------
epochs : instance of Epochs
The cropped epochs object, modified in-place.
Notes
-----
%(notes_tmax_included_by_default)s
"""
# XXX this could be made to work on non-preloaded data...
_check_preload(self, 'Modifying data of epochs')
if tmin is None:
tmin = self.tmin
elif tmin < self.tmin:
warn('tmin is not in epochs time interval. tmin is set to '
'epochs.tmin')
tmin = self.tmin
if tmax is None:
tmax = self.tmax
elif tmax > self.tmax:
warn('tmax is not in epochs time interval. tmax is set to '
'epochs.tmax')
tmax = self.tmax
include_tmax = True
tmask = _time_mask(self.times, tmin, tmax, sfreq=self.info['sfreq'],
include_tmax=include_tmax)
self._set_times(self.times[tmask])
self._raw_times = self._raw_times[tmask]
self._data = self._data[:, :, tmask]
# Adjust rejection period
if self.reject_tmin is not None and self.reject_tmin < self.tmin:
logger.info(
f'reject_tmin is not in epochs time interval. '
f'Setting reject_tmin to epochs.tmin ({self.tmin} sec)')
self.reject_tmin = self.tmin
if self.reject_tmax is not None and self.reject_tmax > self.tmax:
logger.info(
f'reject_tmax is not in epochs time interval. '
f'Setting reject_tmax to epochs.tmax ({self.tmax} sec)')
self.reject_tmax = self.tmax
return self
def copy(self):
"""Return copy of Epochs instance.
Returns
-------
epochs : instance of Epochs
A copy of the object.
"""
return deepcopy(self)
def __deepcopy__(self, memodict):
"""Make a deepcopy."""
cls = self.__class__
result = cls.__new__(cls)
for k, v in self.__dict__.items():
# drop_log is immutable and _raw is private (and problematic to
# deepcopy)
if k in ('drop_log', '_raw', '_times_readonly'):
memodict[id(v)] = v
else:
v = deepcopy(v, memodict)
result.__dict__[k] = v
return result
@verbose
def save(self, fname, split_size='2GB', fmt='single', overwrite=False,
split_naming='neuromag', verbose=True):
"""Save epochs in a fif file.
Parameters
----------
fname : str
The name of the file, which should end with ``-epo.fif`` or
``-epo.fif.gz``.
split_size : str | int
Large raw files are automatically split into multiple pieces. This
parameter specifies the maximum size of each piece. If the
parameter is an integer, it specifies the size in Bytes. It is
also possible to pass a human-readable string, e.g., 100MB.
Note: Due to FIFF file limitations, the maximum split size is 2GB.
.. versionadded:: 0.10.0
fmt : str
Format to save data. Valid options are 'double' or
'single' for 64- or 32-bit float, or for 128- or
64-bit complex numbers respectively. Note: Data are processed with
double precision. Choosing single-precision, the saved data
will slightly differ due to the reduction in precision.
.. versionadded:: 0.17
%(overwrite)s
To overwrite original file (the same one that was loaded),
data must be preloaded upon reading. This defaults to True in 0.18
but will change to False in 0.19.
.. versionadded:: 0.18
%(split_naming)s
.. versionadded:: 0.24
%(verbose_meth)s
Notes
-----
Bad epochs will be dropped before saving the epochs to disk.
"""
check_fname(fname, 'epochs', ('-epo.fif', '-epo.fif.gz',
'_epo.fif', '_epo.fif.gz'))
# check for file existence and expand `~` if present
fname = _check_fname(fname=fname, overwrite=overwrite)
split_size_bytes = _get_split_size(split_size)
_check_option('fmt', fmt, ['single', 'double'])
# to know the length accurately. The get_data() call would drop
# bad epochs anyway
self.drop_bad()
# total_size tracks sizes that get split
# over_size tracks overhead (tags, things that get written to each)
if len(self) == 0:
warn('Saving epochs with no data')
total_size = 0
else:
d = self[0].get_data()
# this should be guaranteed by subclasses
assert d.dtype in ('>f8', '<f8', '>c16', '<c16')
total_size = d.nbytes * len(self)
self._check_consistency()
over_size = 0
if fmt == "single":
total_size //= 2 # 64bit data converted to 32bit before writing.
over_size += 32 # FIF tags
# Account for all the other things we write, too
# 1. meas_id block plus main epochs block
over_size += 132
# 2. measurement info (likely slight overestimate, but okay)
over_size += object_size(self.info) + 16 * len(self.info)
# 3. events and event_id in its own block
total_size += self.events.size * 4
over_size += len(_event_id_string(self.event_id)) + 72
# 4. Metadata in a block of its own
if self.metadata is not None:
total_size += len(_prepare_write_metadata(self.metadata))
over_size += 56
# 5. first sample, last sample, baseline
over_size += 40 * (self.baseline is not None) + 40
# 6. drop log: gets written to each, with IGNORE for ones that are
# not part of it. So make a fake one with all having entries.
drop_size = len(json.dumps(self.drop_log)) + 16
drop_size += 8 * (len(self.selection) - 1) # worst case: all but one
over_size += drop_size
# 7. reject params
reject_params = _pack_reject_params(self)
if reject_params:
over_size += len(json.dumps(reject_params)) + 16
# 8. selection
total_size += self.selection.size * 4
over_size += 16
# 9. end of file tags
over_size += _NEXT_FILE_BUFFER
logger.debug(f' Overhead size: {str(over_size).rjust(15)}')
logger.debug(f' Splittable size: {str(total_size).rjust(15)}')
logger.debug(f' Split size: {str(split_size_bytes).rjust(15)}')
# need at least one per
n_epochs = len(self)
n_per = total_size // n_epochs if n_epochs else 0
min_size = n_per + over_size
if split_size_bytes < min_size:
raise ValueError(
f'The split size {split_size} is too small to safely write '
'the epochs contents, minimum split size is '
f'{sizeof_fmt(min_size)} ({min_size} bytes)')
# This is like max(int(ceil(total_size / split_size)), 1) but cleaner
n_parts = max(
(total_size - 1) // (split_size_bytes - over_size) + 1, 1)
assert n_parts >= 1, n_parts
if n_parts > 1:
logger.info(f'Splitting into {n_parts} parts')
if n_parts > 100: # This must be an error
raise ValueError(
f'Split size {split_size} would result in writing '
f'{n_parts} files')
if len(self.drop_log) > 100000:
warn(f'epochs.drop_log contains {len(self.drop_log)} entries '
f'which will incur up to a {sizeof_fmt(drop_size)} writing '
f'overhead (per split file), consider using '
f'epochs.reset_drop_log_selection() prior to writing')
epoch_idxs = np.array_split( | np.arange(n_epochs) | numpy.arange |
"""
astrodynamics2.py - python library of astrodynamical functions for ASEN 5050
Author - <NAME>
"""
from numpy import *
import numpy as np
import matplotlib.pyplot as pp
import itertools, datetime
import ephem # Pyephem celestial ephemerides
G = 6.67e-11 #N m^2/s^2
m_earth = 5.9742e24 #kg
r_earth = 6371200 #m
mu = G*m_earth
#Cartesian Unit Vectors
I = array([1.,0.,0.])
J = array([0.,1.,0.])
K = array([0.,0.,1.])
def rot1(angle,vec,deg=False):
#Angle in radians unless deg=True
if deg:
angle = angle*pi/180.
c = cos(angle)
s = sin(angle)
rotmat = array([[1, 0, 0],
[0, c, s],
[0,-1*s, c]])
rotvec = dot(rotmat,vec.reshape((-1,1)))
return rotvec.reshape(vec.shape)
def rot2(angle,vec,deg=False):
#Angle in radians unless deg=True
if deg:
angle = angle*pi/180.
c = cos(angle)
s = sin(angle)
rotmat = array([[c, 0,-1*s],
[0, 1, 0],
[s, 0, c]])
rotvec = dot(rotmat,vec.reshape((-1,1)))
return rotvec.reshape(vec.shape)
def rot3(angle,vec,deg=False):
#Angle in radians unless deg=True
if deg:
angle = angle*pi/180.
c = cos(angle)
s = sin(angle)
rotmat = array([[ c, s, 0],
[-1*s, c, 0],
[ 0, 0, 1]])
rotvec = dot(rotmat,vec.reshape((-1,1)))
return rotvec.reshape(vec.shape)
def rot_tests():
for func in [rot1,rot2,rot3]:
for vec in [array([1,0,0]),array([[1],[0],[0]]),array([1,0,0]).flatten()]:
print("Applying %s with angle=pi/2 to %s" % (func.__name__,str(vec)))
print("Result: %s" % (func(pi/2,vec)))
def convert(input,type,inputUnits,outputUnits):
#Create a dictionary of conversion factors
length_systems = ['earth radii','km','m']
time_systems = ['']
def orbitPlotter(ecc,p=nan,a=nan,inputUnits='earth radii',step=.01,planetaryRadius=1.):
#ecc = eccentricity
#p = semiparameter
#a = semimajor axis
#nu = true anomoly
#Parse input (nu)
if isnan(p) and isnan(a):
raise ValueError('Please specifiy either: p, the semiparameter, or a, the semimajor axis')
elif isnan(p) and not isnan(a):
p = a*(1-ecc**2)
elif isnan(a) and not isnan(p):
a = p*(1-ecc**2)
nu = arange(0,2*pi,step)
r = p/(1+ecc*cos(nu))
#convert to cartesian
x = r*cos(nu)
y = r*sin(nu)
planet_x = planetaryRadius*cos(nu)
planet_y = planetaryRadius*sin(nu)
fig = pp.figure()
ax = pp.axes(aspect='equal')
ax.plot(x,y,'b-')
ax.hold(True)
ax.plot(planet_x,planet_y,'g-')
ax.set_xlabel(inputUnits)
ax.set_title('Trajectory Plot: eccentricity=%.2f, semiparameter=%.2f, semimajor=%.2f [%s]' % (ecc,p,a,inputUnits))
return fig
def truetoeccentric(nu,ecc,a=nan,b=nan,tolerence=.00001):
#Convert true anomally in degrees to eccentric anomally in degress
#a and b are unit independent
nu = nu*pi/180.
if ~isnan(a) and isnan(b):
b = a*sqrt(1-ecc**2)
elif ~isnan(b) and isnan(a):
a = b/sqrt(1-ecc**2)
p = b**2/a
r = p/(1+ecc*cos(nu))
Efroma = arccos((r*cos(nu)+a*ecc)/a)
Efromb = arcsin(r*sin(nu)/b)
Efromecc = 2*arctan(sqrt((1-ecc)/(1+ecc))*tan(nu/2))
if abs(Efroma-Efromb) > tolerence:
print("Warning: Eccentric anomally from semimajor (cosine) is not within %f rad of to Eccentric anomally from semiminor (sine)" %(tolerence))
if abs(Efroma-Efromecc) > tolerence:
print("Warning: Eccentric anomally from semimajor (cosine) is not within %f rad of to Eccentric anomally from eccentricity (tangent)" %(tolerence))
if abs(Efromb-Efromecc) > tolerence:
print("Warning: Eccentric anomally from semiminor (cosine) is not within %f rad of to Eccentric anomally from eccentricity (tangent)" %(tolerence))
return Efroma*180/pi, Efromb*180/pi, Efromecc*180/pi
def eccentrictotrue(E,ecc,a=nan,b=nan,tolerence=.00001):
#Convert eccentric anomally in degrees to true anomally in degrees
#takes semimajor and semiminor axes in earth radii
#a and b are unit independent
E = E*pi/180.
if ~isnan(a) and isnan(b):
b = a*sqrt(1-ecc**2)
elif ~isnan(b) and isnan(a):
a = b/sqrt(1-ecc**2)
r = a*(1-ecc*cos(E))
nufroma = arccos((a*cos(E)-a*ecc)/r)
nufromb = arcsin(b*sin(E)/r)
nufromecc = 2*arctan(sqrt((1+ecc)/(1-ecc))*tan(E/2))
if abs(nufroma-nufromb) > tolerence:
print("Warning: True anomally from semimajor (cosine) is not within %f rad \n of Eccentric anomally from semiminor (sine)" %(tolerence))
if abs(nufroma-nufromecc) > tolerence:
print("Warning: True anomally from semimajor (cosine) is not within %f rad \n of Eccentric anomally from eccentricity (tangent)" %(tolerence))
if abs(nufromb-nufromecc) > tolerence:
print("Warning: True anomally from semiminor (cosine) is not within %f rad \n of Eccentric anomally from eccentricity (tangent)" %(tolerence))
return nufroma*180/pi, nufromb*180/pi, nufromecc*180/pi
def kepler(ecc,a,E=nan,M=nan,tminustp=nan,tolerence=.001,dist_units="ER"):
#ecc is eccentricity
#a is semi-major axis in earth radii
#nu is true anomally in degrees
#E is eccentric anomally in degrees
#M is mean anomally in degrees
#tminustp is time since periapse in seconds
#Returns (E,M,tminustp)
#Convert Units
if dist_units == "ER":
a = a*r_earth #ER to meters
elif dist_units == "m":
a = a
elif dist_units == "km":
a = a*1000.
else:
raise ValueError("Invalid dist_units value: %s, valid options are ER,m or km" % (dist_units))
if ~isnan(E):
E = E*pi/180. #Radians
if ~isnan(M):
M = M*pi/180. #Radians
#Compute mean motion
n = sqrt(mu/a**3)
if any(~isnan([E,M,tminustp])):
if isnan(E):
if isnan(M) and not isnan(tminustp):
#Solve for M using tminustp via M = n(t-t_p)
M = n*tminustp
elif isnan(tminustp) and not isnan(M):
tminustp = M/n
#Now we have M and tminustp so we can solve for E using newton-raphson
#Use Algorithm 2 in Vallado to guess for E
if (M > -1*pi and M < 0) or M > pi:
guessE = M-ecc
else:
guessE=M+ecc
E = newtonraphsonkepler(ecc,M,guessE)
else:
M = E-ecc*sin(E)
tminustp = M/n
return E*180/pi,M*180/pi,tminustp
else:
raise ValueError('Must specify either M, E, or tminustp to solve keplers equation')
return (nan,nan,nan)
def between_minus_pi_and_pi(angle,inunit='radians'):
if inunit in ['Degrees','deg','degrees']:
angle = angle*pi/180.
if angle > 2*pi:
angle = mod(angle,2*pi)
if angle > pi:
angle = angle-2*pi
return angle
def newtonraphsonkepler(ecc,M,guess,tolerence=.001):
delta=1000.
num=1.
Eprev = guess
while delta>tolerence:
Enext = Eprev + (M-Eprev+ecc*sin(Eprev))/(1-ecc*cos(Eprev))
delta = abs(Eprev-Enext)
print("Iteration %d: E=%.10f, delta=%.10f" % (num,Enext,delta))
num+=1
Eprev = Enext
return Enext
def rv2coe(Rijk,Vijk,debug=True):
#Units of R and V are km and km/sec respectively
mu_km = mu/(1000**3)
r = linalg.norm(Rijk)
v = linalg.norm(Vijk)
if debug:
print("|R|: %f" % (r))
print("|V|: %f" % (v))
a = (2./r-v**2/mu_km)**-1.
ecc_vec = ((v**2-mu_km/r)*Rijk - dot(Rijk,Vijk)*Vijk)/mu_km
ecc = linalg.norm(ecc_vec)
if debug:
print("mu_km: %f" % (mu_km))
print("semimajor: %f" %(a))
print("ecc: %f" % (ecc))
#Angular Momentum
h_vec = cross(Rijk,Vijk)
h = linalg.norm(h_vec)
if debug:
print("angular mom: %f" % (h))
print("angular mom vec: [%f,%f,%f]" % (h_vec[0],h_vec[1],h_vec[2]))
#Inclination
inc = arccos(dot(K,h_vec)/(linalg.norm(K)*h))
if debug:
print("inclination: %f" % (inc))
#Right Ascention of Ascending Node
n_vec = cross(K,h_vec) #node vector
n = linalg.norm(n_vec)
Omega = arccos(dot(I,h_vec)/(linalg.norm(I)*h))
if n_vec[1] < 0.:
Omega = 2*pi-Omega
if debug:
print("n_vec [%f,%f,%f]" % (n_vec[0],n_vec[1],n_vec[2]))
print("n: %f" % (n))
print("Omega: %f" %(Omega))
#Argument of periapse
w = arccos(dot(n_vec,ecc_vec)/(n*ecc))
if ecc_vec[2] < 0.:
w = 2*pi-w
#True Anomaly
nu = arccos(dot(ecc_vec,Rijk)/(linalg.norm(ecc_vec)*linalg.norm(Rijk)))
if dot(Rijk,Vijk) < 0.:
nu = 2*pi - nu
#convert all angle to degrees
inc = inc*180/pi
Omega = Omega*180/pi
w = w*180/pi
nu = nu*180/pi
return a,ecc,inc,Omega,w,nu
def readTLE(line1,line2,convertMeanMotion=True):
card1 = int(line1[0])
#1 blank
satnum_1 = int(line1[2:6])
satclass = line1[7]
#8 blank
international_designator = line1[9:16].strip()
id_yr = int(line1[9:10])
id_lchan_num = int(line1[11:13])
id_piece = line1[14:16].strip()
#17 blank
epoch = float(line1[18:31])
epoch_yr = int(line1[18:19])
if epoch_yr < 50:
epoch_yr = epoch_yr+2000.
else:
epoch_yr = epoch_yr+1900.
epoch_day = float(line1[20:31])
satnum_2 = int(line2[2:6])
if satnum_1 != satnum_2:
raise ValueError("Satellite Numbers not agree between TLE line 1 (%d) and TLE line 2 (%d)!" % (satnum_1,satnum_2))
i = float(line2[8:15])
RAAN = float(line2[17:24]) # Right Ascension of Ascending Node [deg]
ecc = float("0."+line2[26:32]) # Eccentricity
w = float(line2[34:41]) #Argument of Perigee [deg]
M = float(line2[43:50]) #Mean Anomally [deg]
n = float(line2[52:62]) #Mean Motion [rev/day]
if convertMeanMotion:
n = n*2*pi/86400. #Rev per day to rad per second
revnum = float(line2[63:67]) #Revolution number at epoch [revs]
return i,ecc,RAAN,w,M,n,epoch_yr,epoch_day
def coe2rv(a,ecc,i,Omega,w,nu,debug=True):
#All distances in km, all angles in degrees
#Follows Vallado 4th ed. pg. 125
mu_km = mu/(1000**3)
#All angles to radians
i = i*pi/180
Omega = Omega*pi/180
w = w*pi/180
nu = nu*pi/180
#Compute semiparameter
p = a*(1-ecc**2)
#Vectors in Perifocal frame
Rpqw = array([p*cos(nu)/(1+ecc*cos(nu)),
p*sin(nu)/(1+ecc*cos(nu)),
0.])
alpha = sqrt(mu_km/p)
Vpqw = array([-1*alpha*sin(nu),alpha*(ecc+cos(nu)),0.])
if debug:
print("Perifocal R (R_pqw): [%f,%f,%f]" % (Rpqw[0],Rpqw[1],Rpqw[2]))
print("Perifocal V (V_pqw): [%f,%f,%f]" % (Vpqw[0],Vpqw[1],Vpqw[2]))
Rijk = rot3(-1*Omega,rot1(-1*i,rot3(-1*w,Rpqw)))
Vijk = rot3(-1*Omega,rot1(-1*i,rot3(-1*w,Vpqw)))
return Rijk,Vijk
#FUNCTIONS FROM HOMEWORK 4
#--------------------------
#Define a basic eci to ecef function
#I'll have it return the cartesian ECEF vector
def eci2ecef(R_ECI,theta_GST,deg=True):
#R_ECI is ECI cartesian vector
#Unit agnostic, use the deg switch to decide whether the angle will be degrees or radians
R_ECEF = rot3(theta_GST,R_ECI,deg=deg) #Keeping it simple, pipe the deg argument through to rot3
return R_ECEF
def ecef_cart2spherical(R_ECEF,deg=True):
#R_ECEF is the cartesian Earth Centered Earth Fixed vector in any units
#For clarity, function is not vectorized
R_ECEF = R_ECEF.flatten() #Make sure the vector is 1-d
r = sqrt(R_ECEF[0]**2+R_ECEF[1]**2+R_ECEF[2]**2)
x = R_ECEF[0]
y = R_ECEF[1]
z = R_ECEF[2]
longitude = arctan2(y,x) #Longitude is angle in x,y plane
latitude = arcsin(z/r) #Latitude is angle z-ward from x,y plane
#Convert to degrees for return if deg switch on
if deg:
longitude = longitude*180./pi
latitude = latitude*180./pi
return array([r,latitude,longitude])
#Define an ECEF spherical to cartesian transform
def ecef_spherical2cart(lat,lon,r,re=6371.2,deg=True):
#Offer option to use custom earth radius in km
#Convert to radians if nessecary
if deg:
lat = lat*pi/180.
lon = lon*pi/180.
#Height in km
#r = height+re
x = r*cos(lat)*cos(lon)
y = r*cos(lat)*sin(lon)
z = r*sin(lat)
return array([x,y,z])
#Cartesian ECEF to Cartesian ECI
def ecef2eci(R_ECEF,theta_GST,deg=True):
R_ECI = rot3(-1*theta_GST,R_ECEF,deg=deg)
return R_ECI
#Define some conversions between geocentric (spherical) and geodetic latitude
def spherical2geodetic(gclat,deg=True,ecc_earth=.081819221456):
#eccentricity from Vallado back cover
#Valid for points on earth surface only
if deg:
gclat = gclat*pi/180.
gdlat = arctan2(tan(gclat),(1.-ecc_earth**2.))
if deg:
gdlat = gdlat*pi/180.
return gdlat
def geodetic2spherical(gdlat,deg=True,ecc_earth=.081819221456):
#Valid for points on earth surface only
if deg:
gdlat = gdlat*pi/180.
gclat = arctan2(tan(gdlat),1./(1.-ecc_earth**2.))
if deg:
gclat = gdlat*180./pi
return gclat
#Main function
def ecef2topo(R_ECEF, gdlat, lon, height, deg=True,):
#Assume input lat is geodetic, if it were geocentric/spherical, use above spherical2geodetic
#gdlat = geodetic2spherical(gdlat)
R_site_ecef = ecef_spherical2cart(gdlat,lon,height,deg=deg)
#Find the ECEF vector of the site
rho_ecef = R_ECEF-R_site_ecef
#Compute ECEF range vector
rho_topo = rot3(lon,rho_ecef,deg=True)
#Rotate range vector about Z-axis by longitude
rho_topo = rot2((90.-lat),rho_topo,deg=True)
#Rotate range vector about y-axis by colatitude
el = arcsin(rho_topo[2]/linalg.norm(rho_topo))
#elevation is acos(rho_Z/|rho|), angle up from the SE plane
beta = pi-arctan2(rho_topo[1],rho_topo[0])
#Like theta for spherical coords, the azimuth is the angle of rho IN the SE plan
#But since it's referenced to local north instead of south, it's pi - atan(y/x)
betasin = arcsin(rho_topo[1]/sqrt(rho_topo[0]**2+rho_topo[1]**2))
betacos = arccos(-1*rho_topo[0]/sqrt(rho_topo[0]**2+rho_topo[1]**2))
rng = linalg.norm(rho_topo)
#The range is just the distance to the spacecraft from the site, so it's just the length of rho vector
#Convert to degrees for return
el = el*180./pi
beta = beta*180./pi
print("Beta from sin: %.5f" % (betasin*180./pi))
print("180-Beta from sin: %.5f" % (180.-betasin*180./pi))
print("Beta from cos: %.5f" % (betacos*180./pi))
print("-Beta from cos: %.5f" % (-1*betacos*180./pi))
print("Beta from tan: %.5f" % (beta))
return array([el,beta,rng]),rho_topo
def ecef2enu(R_ECEF,lat,lon):
#Rotate a vector from ecef to local east, north, up
#coordinates centered at lat,lon
lonrot = 90.+lon
#lonrot = lon
#lonrot[lonrot > 360.] = lonrot[lonrot>360.]-360.
if lonrot > 360.:
lonrot = lonrot-360.
colat = 90.-lat
R_ENU = rot1(colat,
rot3(lonrot,R_ECEF,deg=True)
,deg=True)
return R_ENU
def enu2ecef(R_ENU,lat,lon):
#Rotate a vector from ecef to local east, north, up
#coordinates centered at lat,lon
lonrot = 90.+lon
#lonrot = lon
#lonrot[lonrot > 360.] = lonrot[lonrot>360.]-360.
if lonrot > 360.:
lonrot = lonrot-360.
colat = 90.-lat
R_ECEF = rot1(-1*colat*pi/180., rot3(-1*lonrot*pi/180.,R_ENU) )
return R_ECEF
#Compute the theta_GST from Year and fractional day of year
def doy2ymdhms(year,doy):
#Not vectorized
#January - 31 Days
#February - 28 Days (Unless leap year - 29 Days)
#March - 31 Days
#April - 30 Days
#May - 31 Days
#June - 30 Days
#July - 31 Days
#August - 31 Days
#September - 30 Days
#October - 31 Days
#November - 30 Days
#December - 31 Days
if len(doy)>1:
raise ValueError('Not Vectorized!')
decimaldoy = doy-floor(doy)
doy = floor(doy)
mons = ['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Nov','Dec']
ndays = array([31,28,31,30,31,30,31,31,30,31,30,31])
if mod(year,4) == 0:
ndays[2] = 29
doys = [sum(ndays[0:k]) for k in arange(len(ndays))]
doys[0] += 1 #Add the first day in january since we're not zero based
diffdoys = diffdoys-doy
for (j,diffdoy) in enumerate(diffdoys):
if diffdoy < ndays[j] and diffdoy > 0:
mo = j
d = diffdoy
break
#Hour, Minute, Second parse
h = floor(decimaldoy*24)
mn = floor(decimaldoy*24*60)
s = floor(decimaldoy*24*60*60)
return y,mo,d,h,mn,s
def ymdhms2jd(year,mon,day,hr,mn,sc):
#Takes UTC ymdhms time and returns julian date
#FIXME: Add leap second support
leapsecond = False
if year < 1900:
raise ValueError('Year must be 4 digit year')
t1 = 367.*year
t2 = int(7.*(year+int((mon+9.)/12.))/4.)
t3 = int(275.*mon/9.)
t4 = day + 1721013.5
if not leapsecond:
t5 = ((sc/60.+mn)/60+hr)/24
else:
t5 = ((sc/61.+mn)/60+hr)/24
#print t1,t2,t3,t4,t5
return t1-t2+t3+t4+t5
def jd2ymdhms(jd):
dt = jd2datetime(jd)
return dt.year,dt.month,dt.day,dt.hour,dt.minute,dt.second
def jd2datetime(jd):
#Takes julian date and returns datetime.datetime in UTC
#The inverse of above, from Vallado pp 208. (algorithm 22)
T1900 = (jd-2415019.5)/365.25
year = 1900+int(T1900)
leapyrs = int((year-1900-1)*.25)
days = (jd-2415019.5)-((year-1900)*(365.0) + leapyrs)
if days < 1.0:
year-=1
leapyrs = int((year-1900-1)*.25)
days = (jd-2415019.5)-((year-1900)*(365.0) + leapyrs)
#doy = int(days)
return datetime.datetime(year,1,1,0,0,0)+datetime.timedelta(days=days-1)
def jd2gst(JD_UT1,deg=True):
#Following Vallado pg. 194, gets Greenwich Mean Sideral Time (GMST) in degrees if deg=True or radians otherwise
#Note that input time is in UT1 NOT UTC. If have UTC and want very accurate theta_gst, need to do UT1 = UTC + Delta_UT1
#Delta_UT1 is obtainable from the Earth Orientation Parameters (EOP)
T_UT1 = (JD_UT1-2451545.)/36525 #Get Julian centuries
#Note that this formula can be broken up into a two part (hours and seconds) version using a two part
#T_UT1. Where 876600 is multiplied by 3600., and in the exponentiation, the accuracy can be increased
#by breaking up the T_UT1
theta_GST_s = 67310.54841+(876600.*3600.+8640184.812866)*T_UT1+.093104*T_UT1**2-6.2e-6*T_UT1**3
#Make sure abs(theta_GST) <= 86400 seconds
if abs(theta_GST_s) > 86400.:
theta_GST_s = mod(theta_GST_s,86400.)
#Convert theta_GST to degrees from seconds
theta_GST = theta_GST_s/240.
if theta_GST < 0.:
theta_GST = 360.-theta_GST
if theta_GST > 360.:
theta_GST = mod(theta_GST,360.)
if not deg:
theta_GST = theta_GST * pi / 180.
return theta_GST
def groundtrack(year,decimaldoy,a,e,w,Omega,M0,n,timestep=60.,timelen=3*3600.,w_e=7.2921158553e-5):
#year and decimaldoy are the UT1 timestamp/epoch for the orbital elements
#w_e is earth rotation rate in rad/s
#n is mean motion in rad/s
#a is semimajor in km
#timelen is length of time to propagate orbit for in seconds
#timestep is the length of each time step in seconds
ndeg = n * 180/pi #Convert n to degrees per second
nsteps = floor(timelen/timestep)
#Compute Julian Date
yr,mo,dy,hr,mn,sc = doy2ymdhms(year,decimaldoy)
jd = ymdhms2jd(yr,mo,dy,hr,mn,sc)
#Init output arrays
lat_arr = zeros(nsteps+1,1)
lon_arr = zeros(nsteps+1,1)
#Set initial values
M = M0
theta_GST = jd2gst(jd)
for k in arange(nsteps):
E, M_out, tminustp = kepler(ecc,a,M=M)
nu_sin,nu_cos,nu_tan = eccentrictotrue(E,ecc,a=a)
nu = quadrant_check(nu_sin,nu_cos)
#def coe2rv(a,ecc,i,Omega,w,nu,debug=True):
R_ECI,V_ECI = coe2rv(a,ecc,i,Omega,w,nu)
R_ECEF = eci2ecef(R_ECI)
r,lat_arr[k],lon_arr[k] = ecef_cart2spherical(R_ECEF)
#Convert Spherical Latitude to Geodetic
lat_arr[k] = spherical2geodetic(lat_arr[k],deg=True)
#Increment theta_GST and M
theta_GST = theta_GST + w_e*timestep
M = M+n_deg*timestep
return lat_arr,lon_arr
def hour_angle(dt, lons, hours=False):
# Modified to use ephem sun
# from algoithim on Stack Overflow: http://stackoverflow.com/questions/13314626/local-solar-time-function-from-utc-and-longitude
# @input UTC time (datetime)
# @input lon(float, degrees, negative west of Greenwich)
# @output hour angle, in degrees (float)
sun = ephem.Sun()
o = ephem.Observer()
o.lat,o.lon,o.date = 0.,0.,dt
sun.compute(o)
ra = sun.ra
#lons=-1.*lons
jd = ymdhms2jd(dt.year,dt.month,dt.day,dt.hour,dt.minute,dt.second)
gst = jd2gst(jd,deg=False)
ha = np.degrees(gst - np.radians(lons) - ra)
if hours:
ha = ha/180.*12
return ha
def hour_angle_approx(dt,lons):
"""
Returns hour angle in degrees
"""
lons[lons<0.] = lons[lons<0.]+360.
gamma = 2 * pi / 365 * (dt.timetuple().tm_yday - 1 + float(dt.hour - 12) / 24)
eqtime = 229.18 * (0.000075 + 0.001868 * cos(gamma) - 0.032077 * sin(gamma) \
- 0.014615 * cos(2 * gamma) - 0.040849 * sin(2 * gamma))
decl = 0.006918 - 0.399912 * cos(gamma) + 0.070257 * sin(gamma) \
- 0.006758 * cos(2 * gamma) + 0.000907 * sin(2 * gamma) \
- 0.002697 * cos(3 * gamma) + 0.00148 * sin(3 * gamma)
time_offset = eqtime + 4 * lons
tst = dt.hour * 60 + dt.minute + dt.second / 60 + time_offset
ha = tst / 4 - 180.
return ha
def lon2lt(dt,lons):
"""
Converts and array of longitudes into solar local times
"""
phi = np.radians(lons)
#Returns in radians
gst,sdec,sra = solar_position_approx(dt)
#Calculate hour angle
sha = sra - (gst+phi)
#Convert to hours
lts = sha*12./np.pi+12.
return lts
def solar_position_approx(dt,degrees=False):
"""
From <NAME>, (1971) "Geophysical Coordinate Transformations",
Cosmic. Electrodyn. 2, 184-196
...
<NAME> (private communication) has written a simple subroutine to\
calculate the position of the Sun in GEI coordinates. It is accurate
for years 1901 through 2099, to within 0.006 deg. The input is the
year, day of year and seconds of the day in UT. The output is
Greenwich Mean Sideral Time in degrees, the ecliptic longitude,
apparent right ascension and declination of the Sun in degrees.
The listing of this program follows. We note that the cartesian
coordinates of the vector from the Earth to the Sun are:
X = cos(SRASN) cos(SDEC)
Y = sin(SRASN) cos(SDEC)
Z = sin(SDEC)
SUBROUTINE SUN(IYR, IDAY, SECS, GST, SLONG, SRASN, SDEC)
C PROGRAM TO CALCULATE SIDEREAL TIME AND POSITION OF THE SUN.
C GOOD FOR YEARS 1901 THROUGH 2099. ACCURACY 0.006 DEGREE.
C INPUT IS IYR, IDAY (INTEGERS), AND SECS, DEFINING UN. TIME.
C OUTPUT IS GREENWICH MEAN SIDEREAL TIME (GST) IN DEGREES,
C LONGITUDE ALONG ECLIPTIC (SLONG), AND APPARENT RIGHT ASCENSION
C AND DECLINATION (SRASN, SDEC) OF THE SUN, ALL IN DEGREES
DATA RAD /57.29578/
DOUBLE PRECISION DJ, FDAY
IF(IYR. LT. 1901. OR. IYR. GT. 2099) RETURN
FDAY = SECS/86400
DJ = 365* (IYR-1900) + (IYR-1901)/4 + IDAY + FDAY -0.5D0
T = DJ / 36525
VL = DMOD (279.696678 + 0.9856473354*DJ, 360.D0)
GST = DMOD (279.690983 + 0.9856473354*DJ + 360.*FDAY + 180., 360.D0)
G = DMOD (358.475845 + 0.985600267*DJ, 360.D0) / RAD
SLONG = VL + (1.91946 -0.004789*T)*SIN(G) + 0.020094*SIN (2.*G)
OBLIQ = (23.45229 -0.0130125*T) / RAD
SLP = (SLONG -0.005686) / RAD
SIND = SIN (OBLIQ)*SIN (SLP)
COSD = SQRT(1.-SIND**2)
SDEC = RAD * ATAN (SIND/COSD)
SRASN = 180. -RAD*ATAN2
(COTAN (OBLIQ)*SIND/COSD, -COS (SLP)/COSD)
RETURN
END
"""
iyear = dt.year
iday = dt.timetuple().tm_yday
secs = dt.hour*3600.+dt.minute*60.+dt.second
fday = secs/86400.
dj = 365*(iyear-1900)+(iyear-1901)/4 + iday + fday - .5
t = dj/36525.
vl = np.mod(279.696678 + 0.9856473354*dj, 360)
gst = np.mod(279.690983 + 0.9856473354*dj + 360.*fday + 180., 360.)
g = np.mod(358.475845 + 0.985600267*dj, 360.) * np.pi/180.
slong = vl + (1.91946 -0.004789*t)*np.sin(g) + 0.020094*np.sin(2.*g)
obliq = (23.45229 -0.0130125*t) * np.pi/180.
slp = (slong - 0.005686) * np.pi/180.
sin_d = np.sin(obliq)*np.sin(slp)
cos_d = np.sqrt(1-sin_d**2)
sdec = np.arctan(sin_d/cos_d)
sransn = np.pi - np.arctan2(1/np.tan(obliq)*sin_d/cos_d,
-1*np.cos(slp)/cos_d)
#Since GST is in degrees convert declination and right ascension
if degrees:
sdec = sdec * 180./np.pi
sransn = sransn * 180./np.pi
return gst,sdec,sransn
else:
gst = np.radians(gst)
return gst,sdec,sransn
def solar_zenith_angle(dt,lats,lons,degrees=True):
"""
Finds solar zenith angle using Russel solar position
"""
lam = np.radians(lats)
phi = np.radians(lons)
gst,sdec,sra = solar_position_approx(dt)
#Calculate hour angle
sha = sra - (gst+phi)
cossza = np.sin(lam)*np.sin(sdec) + np.cos(lam)*np.cos(sdec)*np.cos(sha)
if degrees:
return np.degrees(np.arccos(cossza))
else:
return | np.arccos(cossza) | numpy.arccos |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
File: img2txt_oscar.py
Author: liwei(<EMAIL>)
Date: 2021-10-25 16:06
Desc: img to text generation
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
import time
import numpy as np
import glob
import json
import codecs
import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
from eval.gen_eval import GenerationEval
from finetune.trigram_blocking import TrigramBlocking
from model.transformer_encoder import encoder as grounded_encoder
from model.transformer_encoder import pre_process_layer as grounded_pre_process_layer
from model.transformer_encoder import encoder as text_encoder
from model.transformer_encoder import pre_process_layer as text_pre_process_layer
from model.vision_transformer_encoder import encoder as vit_encoder
from model.vision_transformer_encoder import pre_process_layer as vit_pre_process_layer
from utils.pos_emb_interpolate import interpolate_pos_embed
class Img2Txt(object):
def __init__(self, args, vl_config, tokenizer):
self.vl_config = vl_config
self.weight_sharing = args.weight_sharing
self.max_seq_len = args.max_seq_len
self.max_obj_len = args.max_obj_len
self.label_smooth = args.label_smooth
self.tgt_type_id = args.tgt_type_id
self.tokenizer = tokenizer
self.vocab_size = vl_config["text_vocab_size"]
self._emb_dtype = "float32"
# for beam_search decoding
self.do_decode = args.do_decode
self.length_penalty = args.length_penalty
self.max_out_len = args.max_out_len
self.min_out_len = args.min_out_len
self.block_trigram = args.block_trigram
self.beam_size = args.beam_size
self.patch_seq_len = self.vl_config['image_size'] * self.vl_config['image_size'] // \
(self.vl_config['resolution'] * self.vl_config['resolution'])
# directly utilize Conv2d to extract path and linear transforming
self.patch_emb_size = self.vl_config['resolution'] * self.vl_config['resolution'] * 3
self.bos_id = tokenizer.cls_token_id
self.eos_id = tokenizer.sep_token_id
self.evaluator = GenerationEval(args)
self.task_type = "img2txt"
self.model_type = args.model_type
self.grounding_method = args.grounding_method
self.topk_value = args.topk_value
self.with_grounding_projection = args.with_grounding_projection
self.with_grounding_pos = args.with_grounding_pos
self.text_enc_layers = [int(i) for i in args.text_enc_layers.split(',')]
self.grounding_enc_layers = [int(i) for i in args.grounding_enc_layers.split(',')]
def cal_logit(self, enc_out, tgt_pos):
enc_out = fluid.layers.reshape(x=enc_out,
shape=[-1, self.vl_config["hidden_size"]])
if tgt_pos:
tgt_pos = fluid.layers.cast(x=tgt_pos, dtype='int32')
tgt_feat = fluid.layers.gather(input=enc_out, index=tgt_pos)
else:
tgt_feat = enc_out
tgt_trans_feat = fluid.layers.fc(
input=tgt_feat,
size=self.vl_config["hidden_size"],
act=self.vl_config["hidden_act"],
param_attr=fluid.ParamAttr(
name="grounded.mask_lm_trans_fc.w_0",
initializer=fluid.initializer.TruncatedNormal(scale=0.02)),
bias_attr=fluid.ParamAttr(
name="grounded.mask_lm_trans_fc.b_0",
initializer=fluid.initializer.Constant(0.)))
tgt_trans_feat = fluid.layers.layer_norm(
tgt_trans_feat,
begin_norm_axis=len(tgt_trans_feat.shape) - 1,
param_attr=fluid.ParamAttr(
name='grounded.mask_lm_trans_layer_norm_scale',
initializer=fluid.initializer.Constant(1.)),
bias_attr=fluid.ParamAttr(
name='grounded.mask_lm_trans_layer_norm_bias',
initializer=fluid.initializer.Constant(1.)))
seq2seq_out_bias_attr = fluid.ParamAttr(
name="grounded.mask_lm_out_fc.b_0",
initializer=fluid.initializer.Constant(value=0.0))
if self.weight_sharing:
fc_out = fluid.layers.matmul(
x=tgt_trans_feat,
y=fluid.default_main_program().global_block().var(
"text.word_embedding"),
transpose_y=True)
fc_out += fluid.layers.create_parameter(
shape=[self.vl_config['text_vocab_size']],
dtype="float32",
attr=seq2seq_out_bias_attr,
is_bias=True)
else:
out_size = self.vl_config['text_vocab_size']
fc_out = fluid.layers.fc(input=tgt_trans_feat,
size=out_size,
param_attr=fluid.ParamAttr(
name="grounded.mask_lm_out_fc.w_0",
initializer=fluid.initializer.TruncatedNormal(scale=0.02)),
bias_attr=seq2seq_out_bias_attr)
return fc_out
def to_tensor(self, shapes, dtypes, lod_levels):
return [fluid.layers.data(name="placeholder_" + str(i), shape=shapes[i], dtype=dtypes[i],
lod_level=lod_levels[i]) for i in range(len(shapes))]
def create_model(self, decoding=False):
"""create model"""
if decoding:
return self.fast_decode()
img_input_shapes = [[-1, self.vl_config['image_size'], self.vl_config['image_size'], 3], # image_pixel_input
[-1, 1, self.patch_seq_len + 1]] # image_mask
img_input_dtypes = ['float32', 'float32']
img_input_lod_levels = [0, 0]
emb_num = 3
text_input_shapes = [[-1, self.max_seq_len, 1]] * emb_num + \
[[-1, self.max_seq_len, self.max_seq_len], [-1, 1], [-1, 1]]
text_input_dtypes = ['int64'] * emb_num + ['float32', 'int64', 'int64']
text_input_lod_levels = [0] * emb_num + [0, 0, 0]
shapes = img_input_shapes + text_input_shapes
dtypes = img_input_dtypes + text_input_dtypes
lod_levels = img_input_lod_levels + text_input_lod_levels
inputs = self.to_tensor(shapes, dtypes, lod_levels)
pyreader = fluid.io.DataLoader.from_generator(feed_list=inputs, capacity=70, iterable=False)
image_input = {}
text_input = {}
image_input["pixel_embedding"], image_mask, text_input["text.word_embedding"], \
text_input["text.sent_embedding"], text_input["text.pos_embedding"], text_mask, tgt_labels, tgt_pos = inputs
if self.model_type == "grounded":
model = GroundingModelForImg2Txt(text_input=text_input,
text_mask=text_mask,
image_input=image_input,
image_mask=image_mask,
config=self.vl_config,
weight_sharing=self.weight_sharing,
grounding_method=self.grounding_method,
topk_value=self.topk_value,
with_grounding_projection=self.with_grounding_projection,
with_grounding_pos=self.with_grounding_pos,
text_enc_layers=self.text_enc_layers,
grounding_enc_layers=self.grounding_enc_layers)
elif self.model_type == "baseline":
model = BaselineForImg2Txt(text_input=text_input,
text_mask=text_mask,
image_input=image_input,
image_mask=image_mask,
config=self.vl_config,
weight_sharing=self.weight_sharing,
text_enc_layers=self.text_enc_layers,
grounding_enc_layers=self.grounding_enc_layers)
else:
raise ValueError("The model_type is invalid!!!")
enc_out = model.get_text_sequence_output()
fc_out = self.cal_logit(enc_out, tgt_pos)
if self.label_smooth:
out_size = self.vl_config['text_vocab_size']
labels = fluid.layers.label_smooth(
label=fluid.layers.one_hot(
input=tgt_labels, depth=out_size),
epsilon=self.label_smooth)
ce_loss = layers.softmax_with_cross_entropy(
logits=fc_out, label=labels, soft_label=True)
else:
ce_loss, probs = fluid.layers.softmax_with_cross_entropy(
logits=fc_out, label=tgt_labels, return_softmax=True)
loss = fluid.layers.mean(x=ce_loss)
graph_vars = {"loss": loss}
for k, v in graph_vars.items():
v.persistable = True
return pyreader, graph_vars, model.get_checkpoints()
def fast_decode(self):
input_shapes = [[-1, self.vl_config['image_size'], self.vl_config['image_size'], 3], # image_pixel_input
[-1, 1, self.patch_seq_len + 1], # image_mask
[-1, 1], # image_id
[-1, self.max_obj_len, 1], # padded_obj_token_id
[-1, self.max_obj_len, 1], # padded_obj_sent_ids
[-1, self.max_obj_len, 1], # padded_obj_pos_ids
[-1, self.max_obj_len, self.max_obj_len]] # obj_mask
input_dtypes = ['float32', 'float32', 'int32', 'int64', 'int64', 'int64', 'float32']
input_lod_levels = [0, 0, 0, 0, 0, 0, 0]
shapes = input_shapes + [[-1, 1, 1], [-1, 1, 1], [-1, 1], [-1], [-1, 1, self.max_obj_len]]
dtypes = input_dtypes + ['int64', 'int64', 'float32', 'int32', 'float32']
lod_levels = input_lod_levels + [2, 2, 2, 0, 0]
inputs = self.to_tensor(shapes, dtypes, lod_levels)
pyreader = fluid.io.DataLoader.from_generator(feed_list=inputs, capacity=70, iterable=False)
image_input = {}
obj_input = {}
image_input["pixel_embedding"], image_mask, image_ids, \
obj_input["text.word_embedding"], obj_input["text.sent_embedding"], obj_input["text.pos_embedding"], obj_mask,\
tgt_ids, tgt_pos, init_scores, parent_idx, tgt_input_mask = inputs
if self.model_type == "grounded":
model = GroundingModelForImg2Txt(text_input=obj_input,
text_mask=obj_mask,
image_input=image_input,
image_mask=image_mask,
config=self.vl_config,
weight_sharing=self.weight_sharing,
decoding=True,
gather_idx=parent_idx,
grounding_method=self.grounding_method,
topk_value=self.topk_value,
with_grounding_projection=self.with_grounding_projection,
with_grounding_pos=self.with_grounding_pos,
text_enc_layers=self.text_enc_layers,
grounding_enc_layers=self.grounding_enc_layers)
elif self.model_type == "baseline":
model = BaselineForImg2Txt(text_input=obj_input,
text_mask=obj_mask,
image_input=image_input,
image_mask=image_mask,
config=self.vl_config,
weight_sharing=self.weight_sharing,
decoding=True,
gather_idx=parent_idx,
text_enc_layers=self.text_enc_layers,
grounding_enc_layers=self.grounding_enc_layers)
else:
raise ValueError("The model_type is invalid!!!")
max_len = layers.fill_constant(
shape=[1], dtype=tgt_ids.dtype, value=self.max_out_len, force_cpu=True)
min_len = layers.fill_constant(
shape=[1], dtype=tgt_ids.dtype, value=self.min_out_len, force_cpu=True)
neg_inf = layers.fill_constant(
shape=[1], dtype='float32', value=-1e18)
step_idx = layers.fill_constant(
shape=[1], dtype=tgt_ids.dtype, value=0, force_cpu=True)
step_next_idx = layers.fill_constant(
shape=[1], dtype=tgt_ids.dtype, value=1, force_cpu=True)
cond = layers.less_than(x=step_idx, y=max_len)
while_op = layers.While(cond)
ids = layers.array_write(layers.reshape(tgt_ids, (-1, 1)), step_idx)
# pos_biases = layers.array_write(layers.reshape(tgt_pos, (-1, 1)), step_idx)
pos_biases = layers.array_write(tgt_pos, step_idx)
scores = layers.array_write(init_scores, step_idx)
batch_size = paddle.shape(tgt_ids)[0]
grounding_mask = paddle.ones(shape=[batch_size, 1, model.out_seq_len], dtype=self._emb_dtype)
grounding_masks = layers.array_write(grounding_mask, step_idx)
tgt_masks = layers.array_write(tgt_input_mask, step_idx)
trigram_blocking = TrigramBlocking(tgt_ids, self.tokenizer, beam_size=self.beam_size)
with while_op.block():
pre_ids = layers.array_read(array=ids, i=step_idx)
pre_ids = layers.reshape(pre_ids, (-1, 1, 1), inplace=True)
pre_scores = layers.array_read(array=scores, i=step_idx)
pos_bias = layers.array_read(array=pos_biases, i=step_idx)
pos_bias = layers.gather(input=pos_bias, index=parent_idx)
def gen_batch_like(value, dtype="int64", shape=[-1, 1, 1], is_scalar=True):
if is_scalar:
return layers.fill_constant_batch_size_like(
input=parent_idx, value=value, shape=shape, dtype=dtype)
else:
return layers.elementwise_mul(
x=layers.fill_constant_batch_size_like(
input=parent_idx, value=1, shape=shape, dtype=dtype),
y=value, axis=0)
tmp_grounding_mask = layers.array_read(grounding_masks, i=step_idx)
tmp_grounding_mask = layers.gather(input=tmp_grounding_mask, index=parent_idx)
append_1_mask = gen_batch_like(1.0, dtype=tmp_grounding_mask.dtype)
pre_grounding_mask = layers.concat([tmp_grounding_mask, append_1_mask], axis=2)
tmp_text_mask = layers.array_read(tgt_masks, i=step_idx)
tmp_text_mask = layers.gather(input=tmp_text_mask, index=parent_idx)
append_1_mask = gen_batch_like(1.0, dtype=tmp_text_mask.dtype)
pre_text_mask = layers.concat([tmp_text_mask, append_1_mask], axis=2)
pre_pos = gen_batch_like(step_idx, is_scalar=False)
pre_pos = pre_pos + pos_bias ####################### pos start from 2
pre_sent = gen_batch_like(self.tgt_type_id, dtype=pre_ids.dtype)
dec_emb_ids = {"text.word_embedding": pre_ids, "text.sent_embedding": pre_sent,
"text.pos_embedding": pre_pos}
dec_out, _ = model.encode(text_input=dec_emb_ids,
text_mask=pre_text_mask,
gather_idx=parent_idx,
decoding_step=True,
grounded_decoding_mask=pre_grounding_mask)
fc_out = self.cal_logit(dec_out, None)
# prevent generating end token if length less than min_out_len
eos_index = layers.fill_constant(shape=[layers.shape(fc_out)[0]],
dtype='int64',
value=self.eos_id)
eos_index = fluid.one_hot(eos_index, depth=self.vocab_size)
less_cond = layers.cast(layers.less_than(x=step_idx, y=min_len), dtype='float32')
less_val = layers.elementwise_mul(less_cond, neg_inf)
eos_val = layers.elementwise_mul(eos_index, less_val, axis=0)
revised_logits = layers.elementwise_add(fc_out, eos_val, axis=0)
# topK reduction across beams, also contain special handle of
# end beams and end sentences(batch reduction)
topk_scores, topk_indices = layers.topk(
input=layers.softmax(revised_logits), k=self.beam_size)
# Roll-Back previous-scores for length-penalty
# previous-scores has been length-penaltied, before this timestep length-penalty, need roll-back
# because of doing this, we need store the length-penaltied score in `scores`
# while calculating use the un-penaltied score
# -> safe for step_idx == 0 (initialization state), because previous-score == 0
pre_timestep_length_penalty = fluid.layers.pow(
((5.0 + fluid.layers.cast(step_idx, pre_scores.dtype)) / 6.0), self.length_penalty)
pre_scores_wo_len_penalty = fluid.layers.elementwise_mul(pre_scores, pre_timestep_length_penalty)
# calc trigram-blocking delta scores for current alive sequence
if self.block_trigram:
trigram_blocking.update_seq(pre_ids, parent_idx)
trigram_blocking.expand_cand_seq(topk_indices)
fluid.layers.py_func(func=trigram_blocking.blocking_forward,
x=[trigram_blocking.cand_seq,
trigram_blocking.id2is_full_token],
out=trigram_blocking.delta_score_out,
backward_func=None)
pre_scores_wo_len_penalty = fluid.layers.elementwise_add(x=trigram_blocking.delta_score_out,
y=pre_scores_wo_len_penalty,
axis=0)
# => [N, topk]
accu_scores = layers.elementwise_add(
x=layers.log(topk_scores), y=pre_scores_wo_len_penalty, axis=0)
cur_timestep_length_penalty = layers.pow(((5.0 + layers.cast(step_next_idx, accu_scores.dtype)) / 6.0),
self.length_penalty)
curr_scores = layers.elementwise_div(accu_scores, cur_timestep_length_penalty)
# beam_search op uses lod to differentiate branches.
curr_scores = layers.lod_reset(curr_scores, pre_ids)
topk_indices = layers.lod_reset(topk_indices, pre_ids)
selected_ids, selected_scores, gather_idx = layers.beam_search(
pre_ids=pre_ids,
pre_scores=pre_scores,
ids=topk_indices,
scores=curr_scores,
beam_size=self.beam_size,
end_id=self.eos_id,
return_parent_idx=True)
layers.increment(x=step_idx, value=1.0, in_place=True)
layers.increment(x=step_next_idx, value=1.0, in_place=True)
# cell states(caches) have been updated in wrap_decoder,
# only need to update beam search states here.
layers.array_write(selected_ids, i=step_idx, array=ids)
layers.array_write(selected_scores, i=step_idx, array=scores)
layers.array_write(pre_text_mask, i=step_idx, array=tgt_masks)
layers.array_write(pre_grounding_mask, i=step_idx, array=grounding_masks)
layers.array_write(pos_bias, i=step_idx, array=pos_biases)
layers.assign(gather_idx, parent_idx)
length_cond = layers.less_than(x=step_idx, y=max_len)
finish_cond = layers.logical_not(layers.is_empty(x=selected_ids))
layers.logical_and(x=length_cond, y=finish_cond, out=cond)
finished_ids, finished_scores = layers.beam_search_decode(
ids, scores, beam_size=self.beam_size, end_id=self.eos_id)
graph_vars = {
"finished_ids": finished_ids,
"finished_scores": finished_scores,
"image_ids": image_ids
}
for k, v in graph_vars.items():
v.persistable = True
return pyreader, graph_vars
def post_process_seq(self, seq):
"""
Post-process the beam-search decoded sequence. Truncate from the first
<eos> and remove the <bos> and <eos> tokens currently.
"""
eos_pos = len(seq)
for i, idx in enumerate(seq):
if idx == self.eos_id:
eos_pos = i
break
seq = seq[1:eos_pos]
return seq
def remove_special_tokens(self, seq, special_tokens):
"""Remove special tokens from output sequence"""
seq = [idx for idx in seq if idx not in special_tokens]
return seq
def evaluate(self, resource, eval_phase, graph_vars, features=None,
output_path=None, dev_count=1, gpu_id=0):
exe, program, pyreader = resource["exe"], resource["program"], resource["pyreader"]
if eval_phase == "train":
fetch_list = [graph_vars["loss"].name]
outputs = exe.run(fetch_list=fetch_list)
np_loss = outputs[0]
ret = {"loss": np.mean(np_loss), "ppl": np.exp(np.mean(np_loss))}
return ret
if self.do_decode:
return_numpy = False
outfile = output_path + "/" + eval_phase
outfile_part = outfile + ".part" + str(gpu_id)
writer = codecs.open(outfile_part, "w", encoding='utf-8')
fetch_keys = ["finished_ids", "finished_scores", "image_ids"]
special_tokens = [self.tokenizer.cls_token_id,
self.tokenizer.sep_token_id,
self.tokenizer.mask_token_id,
self.tokenizer.pad_token_id,
self.tokenizer.unk_token_id]
else:
steps = 0
cost = 0.0
return_numpy = True
fetch_keys = ["loss"]
fetch_list = [graph_vars[key].name for key in fetch_keys]
time_begin = time.time()
pyreader.start()
while True:
try:
outputs = exe.run(program=program,
fetch_list=fetch_list,
return_numpy=return_numpy)
if not self.do_decode:
np_loss = outputs[0]
cost += np.mean(np_loss)
steps += 1
else:
seq_ids, seq_scores, image_ids = outputs
seq_ids_list, seq_scores_list = [seq_ids], [seq_scores] \
if isinstance(seq_ids, paddle.fluid.core.LoDTensor) else (seq_ids, seq_scores)
image_ids = | np.array(image_ids) | numpy.array |
from __future__ import division
'''
***********************************************************
File: softmaxModels.py
Allows for the creation, and use of Softmax functions
Version 1.3.0: Added Discretization function
Version 1.3.1: Added Likelihood weighted Importance sampling
***********************************************************
'''
__author__ = "<NAME>"
__copyright__ = "Copyright 2017, Cohrint"
__credits__ = ["<NAME>", "<NAME>"]
__license__ = "GPL"
__version__ = "1.3.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
import numpy as np;
import random;
from random import random;
import matplotlib.pyplot as plt
from scipy.stats import multivariate_normal as mvn
import warnings
import math
import copy
import time
from numpy.linalg import inv,det,svd,solve
from gaussianMixtures import Gaussian
from gaussianMixtures import GM
from mpl_toolkits.mplot3d import Axes3D
from scipy import compress
import scipy.linalg as linalg
from copy import deepcopy
from scipy import sparse
from sklearn.linear_model import LogisticRegression
class Softmax:
def __init__(self,weights= None,bias = None):
'''
Initialize with either:
1. Nothing, for empty softmax model
2. Vector of weights (n x d) and bias (nx1)
'''
self.weights = weights;
self.bias = bias;
if(self.weights is not None):
self.size = len(self.weights);
self.alpha = 3;
self.zeta_c = [0]*len(self.weights);
for i in range(0,len(self.weights)):
self.zeta_c[i] = random()*10;
def nullspace(self,A,atol=1e-13,rtol=0):
'''
Finds the nullspace of a matrix
'''
A = np.atleast_2d(A)
u, s, vh = svd(A)
tol = max(atol, rtol * s[0])
nnz = (s >= tol).sum()
ns = vh[nnz:].conj().T
return ns;
def distance(self,x1,y1,x2,y2):
'''
The distance formula for 2d systems
'''
dist = (x1-x2)*(x1-x2) + (y1-y2)*(y1-y2);
dist = math.sqrt(dist);
return dist;
def buildRectangleModel(self,recBounds,steepness = 1):
'''
Builds a softmax model in 2 dimensions with a rectangular interior class
Inputs
recBounds: A 2x2 list, with the coordinates of the lower left and upper right corners of the rectangle
steepness: A scalar determining how steep the bounds between softmax classes are
'''
B = np.matrix([-1,0,recBounds[0][0],1,0,-recBounds[1][0],0,1,-recBounds[1][1],0,-1,recBounds[0][1]]).T;
M = np.zeros(shape=(12,15));
#Boundry: Left|Near
rowSB = 0;
classNum1 = 1;
classNum2 = 0;
for i in range(0,3):
M[3*rowSB+i,3*classNum2+i] = -1;
M[3*rowSB+i,3*classNum1+i] = 1;
#Boundry: Right|Near
rowSB = 1;
classNum1 = 2;
classNum2 = 0;
for i in range(0,3):
M[3*rowSB+i,3*classNum2+i] = -1;
M[3*rowSB+i,3*classNum1+i] = 1;
#Boundry: Up|Near
rowSB = 2;
classNum1 = 3;
classNum2 = 0;
for i in range(0,3):
M[3*rowSB+i,3*classNum2+i] = -1;
M[3*rowSB+i,3*classNum1+i] = 1;
#Boundry: Down|Near
rowSB = 3;
classNum1 = 4;
classNum2 = 0;
for i in range(0,3):
M[3*rowSB+i,3*classNum2+i] = -1;
M[3*rowSB+i,3*classNum1+i] = 1;
A = np.hstack((M,B));
# print(np.linalg.matrix_rank(A))
# print(np.linalg.matrix_rank(M))
Theta = linalg.lstsq(M,B)[0].tolist();
weight = [];
bias = [];
for i in range(0,len(Theta)//3):
weight.append([Theta[3*i][0],Theta[3*i+1][0]]);
bias.append(Theta[3*i+2][0]);
steep = steepness;
self.weights = (np.array(weight)*steep).tolist();
self.bias = (np.array(bias)*steep).tolist();
self.size = len(self.weights);
self.alpha = 3;
self.zeta_c = [0]*len(self.weights);
for i in range(0,len(self.weights)):
self.zeta_c[i] = random()*10;
def buildOrientedRecModel(self,centroid,orient,length,width,steepness = 1):
'''
Builds a rectangular model at the specified centroid with the parameters given
'''
theta1 = orient*math.pi/180;
h = math.sqrt((width/2)*(width/2) + (length/2)*(length/2));
theta2 = math.asin((width/2)/h);
s1 = h*math.sin(theta1+theta2);
s2 = h*math.cos(theta1+theta2);
s3 = h*math.sin(theta1-theta2);
s4 = h*math.cos(theta1-theta2);
points = [];
points = [[centroid[0]+s2,centroid[1]+s1],[centroid[0]+s4,centroid[1]+s3],[centroid[0]-s2,centroid[1]-s1],[centroid[0]-s4,centroid[1]-s3]];
self.buildPointsModel(points,steepness=steepness);
def buildGeneralModel(self,dims,numClasses,boundries,B,steepness=1):
'''
Builds a softmax model according to the full specification of boudries and a normal vector
Inputs
dims: the dimensionality of the model
numClasses: the number of classes in the model
boundries: a list of [2x1] lists which spec out the boundries required in the model
B: a list of normals and constants for each boundry
steepness: A scalar determining how steep the bounds between softmax classes are
'''
M = np.zeros(shape=(len(boundries)*(dims+1),numClasses*(dims+1)));
for j in range(0,len(boundries)):
for i in range(0,dims+1):
M[(dims+1)*j+i,(dims+1)*boundries[j][1]+i] = -1;
M[(dims+1)*j+i,(dims+1)*boundries[j][0]+i] = 1;
A = np.hstack((M,B));
Theta = linalg.lstsq(M,B)[0].tolist();
weight = [];
bias = [];
for i in range(0,len(Theta)//(dims+1)):
wtmp=[];
for j in range(0,dims):
wtmp.append(Theta[(dims+1)*i+j][0])
weight.append(wtmp);
bias.append(Theta[(dims+1)*i+dims][0]);
steep = steepness;
self.weights = (np.array(weight)*steep).tolist();
self.bias = (np.array(bias)*steep).tolist();
self.size = len(self.weights);
self.alpha = 3;
self.zeta_c = [0]*len(self.weights);
for i in range(0,len(self.weights)):
self.zeta_c[i] = random()*10;
def buildPointsModel(self,points,steepness=1):
'''
Builds a 2D softmax model by constructing an interior class from the given points
Inputs
points: list of 2D points that construct a convex polygon
steepness: A scalar determining how steep the bounds between softmax classes are
'''
dims = 2;
pointsx = [p[0] for p in points];
pointsy = [p[1] for p in points];
centroid = [sum(pointsx)/len(points),sum(pointsy)/len(points)];
#for each point to the next, find the normal between them.
B = [];
for i in range(0,len(points)):
p1 = points[i];
if(i == len(points)-1):
p2 = points[0];
else:
p2 = points[i+1];
mid = [];
for i in range(0,len(p1)):
mid.append((p1[i]+p2[i])/2)
H = np.matrix([[p1[0],p1[1],1],[p2[0],p2[1],1],[mid[0],mid[1],1]]);
Hnull = (self.nullspace(H)).tolist();
distMed1 = self.distance(mid[0]+Hnull[0][0],mid[1]+Hnull[1][0],centroid[0],centroid[1]);
distMed2 = self.distance(mid[0]-Hnull[0][0],mid[1]-Hnull[1][0],centroid[0],centroid[1]);
if(distMed1 < distMed2):
Hnull[0][0] = -Hnull[0][0];
Hnull[1][0] = -Hnull[1][0];
Hnull[2][0] = -Hnull[2][0];
for j in Hnull:
B.append(j[0]);
B = np.matrix(B).T;
numClasses = len(points)+1;
boundries = [];
for i in range(1,numClasses):
boundries.append([i,0]);
M = np.zeros(shape=(len(boundries)*(dims+1),numClasses*(dims+1)));
for j in range(0,len(boundries)):
for i in range(0,dims+1):
M[(dims+1)*j+i,(dims+1)*boundries[j][1]+i] = -1;
M[(dims+1)*j+i,(dims+1)*boundries[j][0]+i] = 1;
A = np.hstack((M,B));
#print(np.linalg.matrix_rank(A))
#print(np.linalg.matrix_rank(M))
Theta = linalg.lstsq(M,B)[0].tolist();
weight = [];
bias = [];
for i in range(0,len(Theta)//(dims+1)):
weight.append([Theta[(dims+1)*i][0],Theta[(dims+1)*i+1][0]]);
bias.append(Theta[(dims+1)*i+dims][0]);
steep = steepness;
self.weights = (np.array(weight)*steep).tolist();
self.bias = (np.array(bias)*steep).tolist();
self.size = len(self.weights);
self.alpha = 3;
self.zeta_c = [0]*len(self.weights);
for i in range(0,len(self.weights)):
self.zeta_c[i] = random()*10;
def buildTriView(self,pose,length = 3,steepness = 2):
l = length;
#Without Cutting
triPoints = [[pose[0],pose[1]],[pose[0]+l*math.cos(2*-0.261799+math.radians(pose[2])),pose[1]+l*math.sin(2*-0.261799+math.radians(pose[2]))],[pose[0]+l*math.cos(2*0.261799+math.radians(pose[2])),pose[1]+l*math.sin(2*0.261799+math.radians(pose[2]))]];
#With Cutting
lshort = 0.5
triPoints = [[pose[0]+lshort*math.cos(2*0.261799+math.radians(pose[2])),pose[1]+lshort*math.sin(2*0.261799+math.radians(pose[2]))],[pose[0]+lshort*math.cos(2*-0.261799+math.radians(pose[2])),pose[1]+lshort*math.sin(2*-0.261799+math.radians(pose[2]))],[pose[0]+l*math.cos(2*-0.261799+math.radians(pose[2])),pose[1]+l*math.sin(2*-0.261799+math.radians(pose[2]))],[pose[0]+l*math.cos(2*0.261799+math.radians(pose[2])),pose[1]+l*math.sin(2*0.261799+math.radians(pose[2]))]];
self.buildPointsModel(triPoints,steepness=steepness);
def Estep(self,weight,bias,prior_mean,prior_var,alpha = 0.5,zeta_c = 1,softClassNum=0):
'''
Runs the Expectation step of the Variational Bayes algorithm
'''
#start the VB EM step
lamb = [0]*len(weight);
for i in range(0,len(weight)):
lamb[i] = self._lambda(zeta_c[i]);
hj = 0;
suma = 0;
for c in range(0,len(weight)):
if(softClassNum != c):
suma += weight[c];
tmp2 = 0;
for c in range(0,len(weight)):
tmp2+=lamb[c]*(alpha-bias[c])*weight[c];
hj = 0.5*(weight[softClassNum]-suma)+2*tmp2;
Kj = 0;
for c in range(0,len(weight)):
Kj += lamb[c]*weight[c]*weight[c];
Kj = Kj*2;
Kp = prior_var**-1;
hp = Kp*prior_mean;
Kl = Kp+Kj;
hl = hp+hj;
mean = (Kl**-1)*hl;
var = Kl**-1;
yc = [0]*len(weight);
yc2= [0]*len(weight);
for c in range(0,len(weight)):
yc[c] = weight[c]*mean + bias[c];
yc2[c] = weight[c]*(var + mean*mean)*weight[c] + 2*weight[c]*mean*bias[c] + bias[c]**2;
return [mean,var,yc,yc2];
def Mstep(self,m,yc,yc2,zeta_c,alpha,steps):
'''
Runs the Maximization Step of the Variational Bayes algorithm
'''
z = zeta_c;
a = alpha;
for i in range(0,steps):
for c in range(0,len(yc)):
z[c] = math.sqrt(yc2[c] + a**2 - 2*a*yc[c]);
num_sum = 0;
den_sum = 0;
for c in range(0,len(yc)):
num_sum += self._lambda(z[c])*yc[c];
den_sum += self._lambda(z[c]);
a = ((m-2)/4 + num_sum)/den_sum;
return [z,a]
def _lambda(self, zeta_c):
return 1 / (2 * zeta_c) * ( (1 / (1 + np.exp(-zeta_c))) - 0.5)
def calcCHat(self,prior_mean,prior_var,mean,var,alpha,zeta_c,yc,yc2,mod):
prior_var = np.matrix(prior_var);
prior_mean = np.matrix(prior_mean);
var_hat = np.matrix(var);
mu_hat = np.matrix(mean);
#KLD = 0.5*(np.log(prior_var/var) + prior_var**-1*var + (prior_mean-mean)*(prior_var**-1)*(prior_mean-mean));
KLD = 0.5 * (np.log( | det(prior_var) | numpy.linalg.det |
# -*- coding: utf-8 -*-
"""
ElViS Simulator
ELastic-VIscous-System Simulator
"""
import numpy as np
from elements import Spring, Dashpot, Force
POINT_static = 0
POINT_dynamic = 1
class MySim:
big_point_array = None
def __init__(self):
# set the end time and the time step
self.end_time = 10
self.h = 0.01
self.gamma = 0.01
self.m = 0.01
self.plot_point = 0
# initialize the empty lists
self.point_definitions = []
self.points = []
self.point_types = []
self.all_points = []
self.elements = []
if 0: # Maxwell
# add initial points
self.add_point(POINT_static, 0, 0)
self.add_point(POINT_dynamic, 1, 0)
self.add_point(POINT_dynamic, 2, 0)
self.add_point(POINT_dynamic, 3, 0)
# add initial elements
self.add_element(Spring(0, 1, rest=1, strength=1))
self.add_element(Dashpot(1, 2, strength=1))
self.add_element(Spring(2, 3, rest=1, strength=1))
self.add_element(Force(3, strength=1, t_start=1, t_end=3))
self.plot_point = 3
elif 0:
# add initial points
self.add_point(POINT_static, 0, 0)
self.add_point(POINT_dynamic, 1, 0)
self.add_point(POINT_dynamic, 2, 0)
# add initial elements
self.add_element(Dashpot(0, 1, strength=1))
self.add_element(Spring(1, 2, rest=1, strength=1))
self.add_element(Force(2, strength=1, t_start=1, t_end=3))
elif 1:
self.add_point(POINT_static, 0, 0)
self.add_point(POINT_dynamic, 1, 0)
# add initial elements
self.add_element(Spring(0, 1, rest=-1, strength=1))
self.add_element(Force(1, strength=1, t_start=1, t_end=3))
else: # <NAME>
# add initial points
self.add_point(POINT_static, 0, 0)
self.add_point(POINT_dynamic, 1, 0)
# add initial elements
#self.add_element(Spring(1, 0, rest=-1, strength=1, drawoffset=0.25))
self.add_element(Dashpot(1, 0, strength=1, drawoffset=-0.25))
self.add_element(Force(1, strength=1, t_start=0, t_end=3))
def setData(self, data):
if "plot_point" in data:
self.plot_point = data["plot_point"]
if "points" in data:
self.big_point_array = None
for point in data["points"]:
self.add_point(*point)
if "elements" in data:
self.elements = []
for element in data["elements"]:
self.add_element(eval(element[0])(*element[1:]))
self.updateDrawOffsets()
def updateDrawOffsets(self):
element_count = np.zeros(self.get_point_count())
for element in self.elements:
point = np.min(element.target_ids)
element.drawoffset = element_count[point]/2
element_count[point] += 1
for element in self.elements:
point = np.min(element.target_ids)
element.drawoffset -= (element_count[point]-1)/4
def serialize(self):
text = "points = "
points = []
for i in range(len(self.point_types)):
points.append("[%d, %s, %s]" % (self.big_point_array_movable[i], self.big_point_array[0, i, 0, 0], self.big_point_array[0, i, 0, 1]))
text += "["+", ".join(points)+"]"
text += "\n"
text += "elements = [%s]" % ", ".join([repr(e) for e in self.elements])
return text
def deserialize(self, text):
for line in text.split("\n"):
line = line.strip()
if line.startswith("points = "):
line = line[len("points = "):].strip()
points = eval(line)
for point in points:
self.add_point(*point)
if line.startswith("elements = "):
line = line[len("elements = "):].strip()
self.elements = eval(line)
def add_element(self, element):
# add an element to the list of elements
self.elements.append(element)
def add_point(self, type, x, y=0):
if self.big_point_array is None:
self.big_point_array = | np.zeros([1, 0, 2, 2]) | numpy.zeros |
# -*- coding: utf-8 -*-
"""
This is the main class for the abacra model
"""
# enable for python2 execution
# from __future__ import print_function, division, absolute_import
import matplotlib.pylab as plt
import networkx as nx
import numpy as np
import pandas as pd
import time
import os
import pickle
import abacra.network_creation
# setting for printout of larger tables
pd.set_option("display.max_columns",200)
# setting all errors in numpy
# np.seterr(all='raise')
# ============================================================================
# =============== base class =================================================
# ============================================================================
class Model(object):
"""
The base class of the abacra model
"""
def __init__(self, par_dict=None, par_file="default", verbosity=0,
initial_conditions="default", network_type='grid_2d',
network_path=None, rng_seed=0):
print("Initializing model ...")
self.setup_time = time.process_time()
self.verbosity = verbosity
self.regenerate_network = False
self.rng_seed = "random"
self.t_max = 0
self.t = [0]
self.state_variables = ["P", "q", "k", "F", "S", "v", "y", "I", "C"]
self.sv_position = {"P": 0, "q": 1, "k": 2, "F": 3, "S": 4, "v": 5, "y": 6, "I": 7, "C": 8}
self.dim = len(self.state_variables)
self.no_aux_vars = 3
self.control_variables = ["d", "a", "r", "l", "m"]
self.cv_position = {"d": 0, "a": 1, "r": 2, "l": 3, "m": 4}
self.no_controls = len(self.control_variables)
self.plot_pars = {"dpi_saving": 150}
if rng_seed is not None:
self.rng_seed = rng_seed
# load parameter file
if par_file is "default":
self.load_parfile(os.path.dirname(__file__) + "/../default_parametrized.par")
elif par_file is not None:
self.load_parfile(par_file)
assert self.pars["S_0"] <= 0
if type(par_dict) is dict:
self.pars.update(par_dict)
print("Modified parameters:")
print(par_dict)
# ============ initialize network structure ==================================
self.G, self.node_pos = self._return_network(network_type=network_type, network_path=network_path)
if self.verbosity > 1:
self.print_network_properties()
if not "pie_radius" in self.plot_pars:
self.plot_pars["pie_radius"] = 0.3
self.adj_matrix = nx.adjacency_matrix(self.G, weight=None)
self.no_agents = self.adj_matrix.shape[0]
self.pars["network_type"] = network_type
self.network_type = network_type
self.network_path = network_path
# ============== ========================== ======================================
# ============== setting initial conditions ======================================
print("Setting initial conditions...")
self.control_vec = np.zeros(shape=[self.no_agents, self.no_controls])
#self.state_vec = np.zeros(shape=[self.no_agents, self.dim])
self.state_vec_new = np.zeros(shape=[self.no_agents, self.dim])
np.random.seed(self.rng_seed)
# state_vec = np.zeros(shape=[no_agents, dim])
# or better in a one-dim array: p_1, p_2, ..., p_n,
# q_1, ..., q_2, ..., q_n ?
# randomization of initial soil quality
if self.pars["randomize_initial_soil_quality"] is "random_uniform":
initial_qp = np.random.uniform(self.pars["q_0_mean"] - self.pars["q_0_dev"],
self.pars["q_0_mean"] + self.pars["q_0_dev"], self.no_agents)
else:
initial_qp = self.pars["q_0_mean"] * np.ones(self.no_agents)
# randomization of initial savings
if self.pars["randomize_initial_savings"] is "random_uniform":
initial_savings = np.random.uniform(self.pars["k_0_mean"] - self.pars["k_0_dev"],
self.pars["k_0_mean"] + self.pars["k_0_dev"], self.no_agents)
elif self.pars["randomize_initial_savings"] is "random_pareto":
initial_savings = np.random.pareto(self.pars["k_0_pareto_shape"], size=self.no_agents)
elif self.pars["randomize_initial_savings"] is "random_lognormal":
mu = np.log(self.pars["k_0_mean"] / np.sqrt(1 + self.pars["k_0_std"]**2 / (self.pars["k_0_mean"] ** 2)))
sigma = np.sqrt(np.log(1 + self.pars["k_0_std"]**2 / (self.pars["k_0_mean"] ** 2)))
initial_savings = np.random.lognormal(mean=mu, sigma=sigma, size=self.no_agents)
else:
initial_savings = self.pars["k_0_mean"] * np.ones(self.no_agents)
initial_qs = | np.ones(self.no_agents) | numpy.ones |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
"""
=============
desitarget.io
=============
Functions for reading, writing and manipulating files related to targeting.
"""
from __future__ import (absolute_import, division)
#
import numpy as np
import fitsio
from fitsio import FITS
import os
import re
from . import __version__ as desitarget_version
import numpy.lib.recfunctions as rfn
import healpy as hp
from glob import glob, iglob
from time import time
from desiutil import depend
from desitarget.geomask import hp_in_box, box_area, is_in_box
from desitarget.geomask import hp_in_cap, cap_area, is_in_cap
from desitarget.geomask import is_in_hp, nside2nside, pixarea2nside
from desitarget.targets import main_cmx_or_sv
# ADM set up the DESI default logger
from desiutil.log import get_logger
log = get_logger()
# ADM this is a lookup dictionary to map RELEASE to a simpler "North" or "South".
# ADM photometric system. This will expand with the definition of RELEASE in the
# ADM Data Model (e.g. https://desi.lbl.gov/trac/wiki/DecamLegacy/DR4sched).
# ADM 7999 were the dr8a test reductions, for which only 'S' surveys were processed.
releasedict = {3000: 'S', 4000: 'N', 5000: 'S', 6000: 'N', 7000: 'S', 7999: 'S',
8000: 'S', 8001: 'N', 9000: 'S', 9001: 'N'}
# ADM This is an empty array of most of the TS data model columns and
# ADM dtypes. Note that other columns are added in read_tractor and
# ADM from the "addedcols" data models below.
basetsdatamodel = np.array([], dtype=[
('RELEASE', '>i2'), ('BRICKID', '>i4'), ('BRICKNAME', 'S8'),
('OBJID', '>i4'), ('TYPE', 'S4'), ('RA', '>f8'), ('RA_IVAR', '>f4'),
('DEC', '>f8'), ('DEC_IVAR', '>f4'), ('DCHISQ', '>f4', (5,)), ('EBV', '>f4'),
('FLUX_G', '>f4'), ('FLUX_R', '>f4'), ('FLUX_Z', '>f4'),
('FLUX_IVAR_G', '>f4'), ('FLUX_IVAR_R', '>f4'), ('FLUX_IVAR_Z', '>f4'),
('MW_TRANSMISSION_G', '>f4'), ('MW_TRANSMISSION_R', '>f4'), ('MW_TRANSMISSION_Z', '>f4'),
('FRACFLUX_G', '>f4'), ('FRACFLUX_R', '>f4'), ('FRACFLUX_Z', '>f4'),
('FRACMASKED_G', '>f4'), ('FRACMASKED_R', '>f4'), ('FRACMASKED_Z', '>f4'),
('FRACIN_G', '>f4'), ('FRACIN_R', '>f4'), ('FRACIN_Z', '>f4'),
('NOBS_G', '>i2'), ('NOBS_R', '>i2'), ('NOBS_Z', '>i2'),
('PSFDEPTH_G', '>f4'), ('PSFDEPTH_R', '>f4'), ('PSFDEPTH_Z', '>f4'),
('GALDEPTH_G', '>f4'), ('GALDEPTH_R', '>f4'), ('GALDEPTH_Z', '>f4'),
('FLUX_W1', '>f4'), ('FLUX_W2', '>f4'), ('FLUX_W3', '>f4'), ('FLUX_W4', '>f4'),
('FLUX_IVAR_W1', '>f4'), ('FLUX_IVAR_W2', '>f4'),
('FLUX_IVAR_W3', '>f4'), ('FLUX_IVAR_W4', '>f4'),
('MW_TRANSMISSION_W1', '>f4'), ('MW_TRANSMISSION_W2', '>f4'),
('MW_TRANSMISSION_W3', '>f4'), ('MW_TRANSMISSION_W4', '>f4'),
('ALLMASK_G', '>i2'), ('ALLMASK_R', '>i2'), ('ALLMASK_Z', '>i2'),
('FIBERFLUX_G', '>f4'), ('FIBERFLUX_R', '>f4'), ('FIBERFLUX_Z', '>f4'),
('FIBERTOTFLUX_G', '>f4'), ('FIBERTOTFLUX_R', '>f4'), ('FIBERTOTFLUX_Z', '>f4'),
('REF_EPOCH', '>f4'), ('WISEMASK_W1', '|u1'), ('WISEMASK_W2', '|u1'),
('MASKBITS', '>i2')
])
# ADM columns that are new for the DR9 data model.
dr9addedcols = np.array([], dtype=[
('LC_FLUX_W1', '>f4', (13,)), ('LC_FLUX_W2', '>f4', (13,)),
('LC_FLUX_IVAR_W1', '>f4', (13,)), ('LC_FLUX_IVAR_W2', '>f4', (13,)),
('LC_NOBS_W1', '>i2', (13,)), ('LC_NOBS_W2', '>i2', (13,)),
('LC_MJD_W1', '>f8', (13,)), ('LC_MJD_W2', '>f8', (13,)),
('SHAPE_R', '>f4'), ('SHAPE_E1', '>f4'), ('SHAPE_E2', '>f4'),
('SHAPE_R_IVAR', '>f4'), ('SHAPE_E1_IVAR', '>f4'), ('SHAPE_E2_IVAR', '>f4'),
('SERSIC', '>f4'), ('SERSIC_IVAR', '>f4')
])
# ADM columns that were deprecated in the DR8 data model.
dr8addedcols = np.array([], dtype=[
('FRACDEV', '>f4'), ('FRACDEV_IVAR', '>f4'),
('SHAPEDEV_R', '>f4'), ('SHAPEDEV_E1', '>f4'), ('SHAPEDEV_E2', '>f4'),
('SHAPEDEV_R_IVAR', '>f4'), ('SHAPEDEV_E1_IVAR', '>f4'), ('SHAPEDEV_E2_IVAR', '>f4'),
('SHAPEEXP_R', '>f4'), ('SHAPEEXP_E1', '>f4'), ('SHAPEEXP_E2', '>f4'),
('SHAPEEXP_R_IVAR', '>f4'), ('SHAPEEXP_E1_IVAR', '>f4'), ('SHAPEEXP_E2_IVAR', '>f4'),
])
def desitarget_nside():
"""Default HEALPix Nside for all target selection algorithms."""
nside = 64
return nside
def desitarget_resolve_dec():
"""Default Dec cut to separate targets in BASS/MzLS from DECaLS."""
dec = 32.375
return dec
def add_photsys(indata):
"""Add the PHOTSYS column to a sweeps-style array.
Parameters
----------
indata : :class:`~numpy.ndarray`
Numpy structured array to which to add PHOTSYS column.
Returns
-------
:class:`~numpy.ndarray`
Input array with PHOTSYS added (and set using RELEASE).
Notes
-----
- The PHOTSYS column is only added if the RELEASE column
is available in the passed `indata`.
"""
# ADM only add the PHOTSYS column if RELEASE exists.
if 'RELEASE' in indata.dtype.names:
# ADM add PHOTSYS to the data model.
# ADM the fitsio check is a hack for the v0.9 to v1.0 transition
# ADM (v1.0 now converts all byte strings to unicode strings).
from distutils.version import LooseVersion
if LooseVersion(fitsio.__version__) >= LooseVersion('1'):
pdt = [('PHOTSYS', '<U1')]
else:
pdt = [('PHOTSYS', '|S1')]
dt = indata.dtype.descr + pdt
# ADM create a new numpy array with the fields from the new data model...
nrows = len(indata)
outdata = np.empty(nrows, dtype=dt)
# ADM ...and populate them with the passed columns of data.
for col in indata.dtype.names:
outdata[col] = indata[col]
# ADM add the PHOTSYS column.
photsys = release_to_photsys(indata["RELEASE"])
outdata['PHOTSYS'] = photsys
else:
outdata = indata
return outdata
def read_tractor(filename, header=False, columns=None):
"""Read a tractor catalogue or sweeps file.
Parameters
----------
filename : :class:`str`
File name of one Tractor or sweeps file.
header : :class:`bool`, optional
If ``True``, return (data, header) instead of just data.
columns: :class:`list`, optional
Specify the desired Tractor catalog columns to read; defaults to
desitarget.io.tsdatamodel.dtype.names + most of the columns in
desitarget.gaiamatch.gaiadatamodel.dtype.names, where
tsdatamodel is, e.g., basetsdatamodel + dr9addedcols.
Returns
-------
:class:`~numpy.ndarray`
Array with the tractor schema, uppercase field names.
"""
check_fitsio_version()
# ADM read in the file information. Due to fitsio header bugs
# ADM near v1.0.0, make absolutely sure the user wants the header.
if header:
indata, hdr = fitsio.read(filename, upper=True, header=True,
columns=columns)
else:
indata = fitsio.read(filename, upper=True, columns=columns)
# ADM form the final data model in a manner that maintains
# ADM backwards-compatability with DR8.
if "FRACDEV" in indata.dtype.names:
tsdatamodel = np.array(
[], dtype=basetsdatamodel.dtype.descr + dr8addedcols.dtype.descr)
else:
tsdatamodel = np.array(
[], dtype=basetsdatamodel.dtype.descr + dr9addedcols.dtype.descr)
# ADM the full data model including Gaia columns.
from desitarget.gaiamatch import gaiadatamodel
from desitarget.gaiamatch import pop_gaia_coords, pop_gaia_columns
gaiadatamodel = pop_gaia_coords(gaiadatamodel)
# ADM special handling of the pre-DR7 Data Model.
for gaiacol in ['GAIA_PHOT_BP_RP_EXCESS_FACTOR',
'GAIA_ASTROMETRIC_SIGMA5D_MAX',
'GAIA_ASTROMETRIC_PARAMS_SOLVED', 'REF_CAT']:
if gaiacol not in indata.dtype.names:
gaiadatamodel = pop_gaia_columns(gaiadatamodel, [gaiacol])
dt = tsdatamodel.dtype.descr + gaiadatamodel.dtype.descr
dtnames = tsdatamodel.dtype.names + gaiadatamodel.dtype.names
# ADM limit to just passed columns.
if columns is not None:
dt = [d for d, name in zip(dt, dtnames) if name in columns]
# ADM set-up the output array.
nrows = len(indata)
data = np.zeros(nrows, dtype=dt)
# ADM if REF_ID was requested, set it to -1 in case there is no Gaia data.
if "REF_ID" in data.dtype.names:
data['REF_ID'] = -1
# ADM populate the common input/output columns.
for col in set(indata.dtype.names).intersection(set(data.dtype.names)):
data[col] = indata[col]
# ADM MASKBITS used to be BRIGHTSTARINBLOB which was set to True/False
# ADM and which represented the SECOND bit of MASKBITS.
if "BRIGHTSTARINBLOB" in indata.dtype.names:
if "MASKBITS" in data.dtype.names:
data["MASKBITS"] = indata["BRIGHTSTARINBLOB"] << 1
# ADM To circumvent whitespace bugs on I/O from fitsio.
# ADM need to strip any white space from string columns.
for colname in data.dtype.names:
kind = data[colname].dtype.kind
if kind == 'U' or kind == 'S':
data[colname] = np.char.rstrip(data[colname])
# ADM add the PHOTSYS column to unambiguously check whether we're using imaging
# ADM from the "North" or "South".
data = add_photsys(data)
if header:
return data, hdr
return data
def fix_tractor_dr1_dtype(objects):
"""DR1 tractor files have inconsistent dtype for the TYPE field. Fix this.
Args:
objects : numpy structured array from target file.
Returns:
structured array with TYPE.dtype = 'S4' if needed.
If the type was already correct, returns the original array.
"""
if objects['TYPE'].dtype == 'S4':
return objects
else:
dt = objects.dtype.descr
for i in range(len(dt)):
if dt[i][0] == 'TYPE':
dt[i] = ('TYPE', 'S4')
break
return objects.astype(np.dtype(dt))
def release_to_photsys(release):
"""Convert RELEASE to PHOTSYS using the releasedict lookup table.
Parameters
----------
objects : :class:`int` or :class:`~numpy.ndarray`
RELEASE column from a numpy rec array of targets.
Returns
-------
:class:`str` or :class:`~numpy.ndarray`
'N' if the RELEASE corresponds to the northern photometric
system (MzLS+BASS) and 'S' if it's the southern system (DECaLS).
Notes
-----
Flags an error if the system is not recognized.
"""
# ADM arrays of the key (RELEASE) and value (PHOTSYS) entries in the releasedict.
releasenums = np.array(list(releasedict.keys()))
photstrings = np.array(list(releasedict.values()))
# ADM explicitly check no unknown release numbers were passed.
unknown = set(release) - set(releasenums)
if bool(unknown):
msg = 'Unknown release number {}'.format(unknown)
log.critical(msg)
raise ValueError(msg)
# ADM an array with indices running from 0 to the maximum release number + 1.
r2p = np.empty(np.max(releasenums)+1, dtype='|S1')
# ADM populate where the release numbers exist with the PHOTSYS.
r2p[releasenums] = photstrings
# ADM return the PHOTSYS string that corresponds to each passed release number.
return r2p[release]
def _bright_or_dark(filename, hdr, data, obscon, mockdata=None):
"""modify data/file name for BRIGHT or DARK survey OBSCONDITIONS
Parameters
----------
filename : :class:`str`
output target selection file.
hdr : class:`str`
header of the output target selection file.
data : :class:`~numpy.ndarray`
numpy structured array of targets.
obscon : :class:`str`
Can be "DARK" or "BRIGHT" to only write targets appropriate for
"DARK|GRAY" or "BRIGHT" observing conditions. The relevant
`PRIORITY_INIT` and `NUMOBS_INIT` columns will be derived from
`PRIORITY_INIT_DARK`, etc. and `filename` will have "bright" or
"dark" appended to the lowest DIRECTORY in the input `filename`.
mockdata : :class:`dict`, optional, defaults to `None`
Dictionary of mock data to write out (only used in
`desitarget.mock.build.targets_truth` via `select_mock_targets`).
Returns
-------
:class:`str`
The modified file name.
:class:`data`
The modified data.
"""
# ADM determine the bits for the OBSCONDITIONS.
from desitarget.targetmask import obsconditions
if obscon == "DARK":
obsbits = obsconditions.mask("DARK|GRAY")
hdr["OBSCON"] = "DARK|GRAY"
else:
# ADM will flag an error if obscon is not, now BRIGHT.
obsbits = obsconditions.mask(obscon)
hdr["OBSCON"] = obscon
# ADM only retain targets appropriate to the conditions.
ii = (data["OBSCONDITIONS"] & obsbits) != 0
data = data[ii]
# Optionally subselect the mock data.
if len(data) > 0 and mockdata is not None:
truthdata, trueflux, _objtruth = mockdata['truth'], mockdata['trueflux'], mockdata['objtruth']
truthdata = truthdata[ii]
objtruth = {}
for obj in sorted(set(truthdata['TEMPLATETYPE'])):
objtruth[obj] = _objtruth[obj]
for key in objtruth.keys():
keep = np.where(np.isin(objtruth[key]['TARGETID'], truthdata['TARGETID']))[0]
if len(keep) > 0:
objtruth[key] = objtruth[key][keep]
if len(trueflux) > 0 and trueflux.shape[1] > 0:
trueflux = trueflux[ii, :]
mockdata['truth'] = truthdata
mockdata['trueflux'] = trueflux
mockdata['objtruth'] = objtruth
# ADM construct the name for the bright or dark directory.
newdir = os.path.join(os.path.dirname(filename), obscon.lower())
filename = os.path.join(newdir, os.path.basename(filename))
# ADM modify the filename with an obscon prefix.
filename = filename.replace("targets-", "targets-{}-".format(obscon.lower()))
# ADM change the name to PRIORITY_INIT, NUMOBS_INIT.
for col in "NUMOBS_INIT", "PRIORITY_INIT":
rename = {"{}_{}".format(col, obscon.upper()): col}
data = rfn.rename_fields(data, rename)
# ADM remove the other BRIGHT/DARK NUMOBS, PRIORITY columns.
names = np.array(data.dtype.names)
dropem = list(names[['_INIT_' in col for col in names]])
data = rfn.drop_fields(data, dropem)
if mockdata is not None:
return filename, hdr, data, mockdata
else:
return filename, hdr, data
def write_targets(targdir, data, indir=None, indir2=None, nchunks=None,
qso_selection=None, nside=None, survey="main", nsidefile=None,
hpxlist=None, scndout=None, resolve=True, maskbits=True,
obscon=None, mockdata=None, supp=False, extra=None):
"""Write target catalogues.
Parameters
----------
targdir : :class:`str`
Path to output target selection directory (the directory
structure and file name are built on-the-fly from other inputs).
data : :class:`~numpy.ndarray`
numpy structured array of targets to save.
indir, indir2, qso_selection : :class:`str`, optional, default to `None`
If passed, note these as the input directory, an additional input
directory, and the QSO selection method in the output file header.
nchunks : :class`int`, optional, defaults to `None`
The number of chunks in which to write the output file, to save
memory. Send `None` to write everything at once.
nside : :class:`int`, optional, defaults to `None`
If passed, add a column to the targets array popluated
with HEALPixels at resolution `nside`.
survey : :class:`str`, optional, defaults to "main"
Written to output file header as the keyword `SURVEY`.
nsidefile : :class:`int`, optional, defaults to `None`
Passed to indicate in the output file header that the targets
have been limited to only certain HEALPixels at a given
nside. Used in conjunction with `hpxlist`.
hpxlist : :class:`list`, optional, defaults to `None`
Passed to indicate in the output file header that the targets
have been limited to only this list of HEALPixels. Used in
conjunction with `nsidefile`.
resolve, maskbits : :class:`bool`, optional, default to ``True``
Written to the output file header as `RESOLVE`, `MASKBITS`.
scndout : :class:`str`, optional, defaults to `None`
If passed, add to output header as SCNDOUT.
obscon : :class:`str`, optional, defaults to `None`
Can pass one of "DARK" or "BRIGHT". If passed, don't write the
full set of data, rather only write targets appropriate for
"DARK|GRAY" or "BRIGHT" observing conditions. The relevant
`PRIORITY_INIT` and `NUMOBS_INIT` columns will be derived from
`PRIORITY_INIT_DARK`, etc. and `filename` will have "bright" or
"dark" appended to the lowest DIRECTORY in the input `filename`.
mockdata : :class:`dict`, optional, defaults to `None`
Dictionary of mock data to write out (only used in
`desitarget.mock.build.targets_truth` via `select_mock_targets`).
supp : :class:`bool`, optional, defaults to ``False``
Written to the header of the output file to indicate whether
this is a file of supplemental targets (targets that are
outside the Legacy Surveys footprint).
extra : :class:`dict`, optional
If passed (and not None), write these extra dictionary keys and
values to the output header.
Returns
-------
:class:`int`
The number of targets that were written to file.
:class:`str`
The name of the file to which targets were written.
"""
# ADM create header.
hdr = fitsio.FITSHDR()
# ADM limit to just BRIGHT or DARK targets, if requested.
# ADM Ignore the filename output, we'll build that on-the-fly.
if obscon is not None:
if mockdata is not None:
_, hdr, data, mockdata = _bright_or_dark(
targdir, hdr, data, obscon, mockdata=mockdata)
else:
_, hdr, data = _bright_or_dark(
targdir, hdr, data, obscon)
# ADM if passed, use the indir to determine the Data Release
# ADM integer and string for the input targets.
drint = None
if supp:
drstring = "supp"
else:
try:
drint = int(indir.split("dr")[1][0])
drstring = 'dr'+str(drint)
except (ValueError, IndexError, AttributeError):
drstring = "X"
# ADM catch cases where we're writing-to-file and there's no hpxlist.
hpx = hpxlist
if hpxlist is None:
hpx = "X"
# ADM construct the output file name.
if mockdata is not None:
filename = find_target_files(targdir, flavor="targets", obscon=obscon,
hp=hpx, nside=nside, mock=True)
truthfile = find_target_files(targdir, flavor="truth", obscon=obscon,
hp=hpx, nside=nside, mock=True)
else:
filename = find_target_files(targdir, dr=drint, flavor="targets",
survey=survey, obscon=obscon, hp=hpx,
resolve=resolve, supp=supp)
ntargs = len(data)
# ADM die immediately if there are no targets to write.
if ntargs == 0:
return ntargs, filename
# ADM write versions, etc. to the header.
depend.setdep(hdr, 'desitarget', desitarget_version)
depend.setdep(hdr, 'desitarget-git', gitversion())
depend.setdep(hdr, 'photcat', drstring)
if indir is not None:
depend.setdep(hdr, 'tractor-files', indir)
if indir2 is not None:
depend.setdep(hdr, 'tractor-files-2', indir2)
if qso_selection is None:
log.warning('qso_selection method not specified for output file')
depend.setdep(hdr, 'qso-selection', 'unknown')
else:
depend.setdep(hdr, 'qso-selection', qso_selection)
# ADM add HEALPix column, if requested by input.
if nside is not None:
theta, phi = np.radians(90-data["DEC"]), np.radians(data["RA"])
hppix = hp.ang2pix(nside, theta, phi, nest=True)
data = rfn.append_fields(data, 'HPXPIXEL', hppix, usemask=False)
hdr.add_record(dict(name='HPXNSIDE', value=nside, comment="HEALPix nside"))
hdr.add_record(dict(name='HPXNEST', value=True, comment="HEALPix nested (not ring) ordering"))
# ADM populate SUBPRIORITY with a reproducible random float.
if "SUBPRIORITY" in data.dtype.names and mockdata is None:
np.random.seed(616)
data["SUBPRIORITY"] = np.random.random(ntargs)
# ADM add the type of survey (main, commissioning; or "cmx", sv) to the header.
hdr["SURVEY"] = survey
# ADM add whether or not the targets were resolved to the header.
hdr["RESOLVE"] = resolve
# ADM add whether or not MASKBITS was applied to the header.
hdr["MASKBITS"] = maskbits
# ADM indicate whether this is a supplemental file.
hdr['SUPP'] = supp
# ADM add the extra dictionary to the header.
if extra is not None:
for key in extra:
hdr[key] = extra[key]
if scndout is not None:
hdr["SCNDOUT"] = scndout
# ADM record whether this file has been limited to only certain HEALPixels.
if hpxlist is not None or nsidefile is not None:
# ADM hpxlist and nsidefile need to be passed together.
if hpxlist is None or nsidefile is None:
msg = 'Both hpxlist (={}) and nsidefile (={}) need to be set' \
.format(hpxlist, nsidefile)
log.critical(msg)
raise ValueError(msg)
hdr['FILENSID'] = nsidefile
hdr['FILENEST'] = True
# ADM warn if we've stored a pixel string that is too long.
_check_hpx_length(hpxlist, warning=True)
hdr['FILEHPX'] = hpxlist
# ADM create necessary directories, if they don't exist.
os.makedirs(os.path.dirname(filename), exist_ok=True)
# ADM write in a series of chunks to save memory.
if nchunks is None:
fitsio.write(filename+'.tmp', data, extname='TARGETS', header=hdr, clobber=True)
os.rename(filename+'.tmp', filename)
else:
write_in_chunks(filename, data, nchunks, extname='TARGETS', header=hdr)
# Optionally write out mock catalog data.
if mockdata is not None:
# truthfile = filename.replace('targets-', 'truth-')
truthdata, trueflux, objtruth = mockdata['truth'], mockdata['trueflux'], mockdata['objtruth']
hdr['SEED'] = (mockdata['seed'], 'initial random seed')
fitsio.write(truthfile+'.tmp', truthdata.as_array(), extname='TRUTH', header=hdr, clobber=True)
if len(trueflux) > 0 and trueflux.shape[1] > 0:
wavehdr = fitsio.FITSHDR()
wavehdr['BUNIT'] = 'Angstrom'
wavehdr['AIRORVAC'] = 'vac'
fitsio.write(truthfile+'.tmp', mockdata['truewave'].astype(np.float32),
extname='WAVE', header=wavehdr, append=True)
fluxhdr = fitsio.FITSHDR()
fluxhdr['BUNIT'] = '1e-17 erg/s/cm2/Angstrom'
fitsio.write(truthfile+'.tmp', trueflux.astype(np.float32),
extname='FLUX', header=fluxhdr, append=True)
if len(objtruth) > 0:
for obj in sorted(set(truthdata['TEMPLATETYPE'])):
fitsio.write(truthfile+'.tmp', objtruth[obj].as_array(), append=True,
extname='TRUTH_{}'.format(obj))
os.rename(truthfile+'.tmp', truthfile)
return ntargs, filename
def write_in_chunks(filename, data, nchunks, extname=None, header=None):
"""Write a FITS file in chunks to save memory.
Parameters
----------
filename : :class:`str`
The output file.
data : :class:`~numpy.ndarray`
The numpy structured array of data to write.
nchunks : :class`int`, optional, defaults to `None`
The number of chunks in which to write the output file.
extname, header, clobber, optional
Passed through to fitsio.write().
Returns
-------
Nothing, but writes the `data` to the `filename` in chunks.
Notes
-----
- Always OVERWRITES existing files!
"""
# ADM ensure that files are always overwritten.
if os.path.isfile(filename):
os.remove(filename)
start = time()
# ADM open a file for writing.
outy = FITS(filename, 'rw')
# ADM write the chunks one-by-one.
chunk = len(data)//nchunks
for i in range(nchunks):
log.info("Writing chunk {}/{} from index {} to {}...t = {:.1f}s"
.format(i+1, nchunks, i*chunk, (i+1)*chunk-1, time()-start))
datachunk = data[i*chunk:(i+1)*chunk]
# ADM if this is the first chunk, write the data and header...
if i == 0:
outy.write(datachunk, extname='TARGETS', header=header, clobber=True)
# ADM ...otherwise just append to the existing file object.
else:
outy[-1].append(datachunk)
# ADM append any remaining data.
datachunk = data[nchunks*chunk:]
log.info("Writing final partial chunk from index {} to {}...t = {:.1f}s"
.format(nchunks*chunk, len(data)-1, time()-start))
outy[-1].append(datachunk)
outy.close()
return
def write_secondary(targdir, data, primhdr=None, scxdir=None, obscon=None,
drint='X'):
"""Write a catalogue of secondary targets.
Parameters
----------
targdir : :class:`str`
Path to output target selection directory (the directory
structure and file name are built on-the-fly from other inputs).
data : :class:`~numpy.ndarray`
numpy structured array of secondary targets to write.
primhdr : :class:`str`, optional, defaults to `None`
If passed, added to the header of the output `filename`.
scxdir : :class:`str`, optional, defaults to :envvar:`SCND_DIR`
Name of the directory that hosts secondary targets. The
secondary targets are written back out to this directory in the
sub-directory "outdata" and the `scxdir` is added to the
header of the output `filename`.
obscon : :class:`str`, optional, defaults to `None`
Can pass one of "DARK" or "BRIGHT". If passed, don't write the
full set of secondary target that do not match a primary,
rather only write targets appropriate for "DARK|GRAY" or
"BRIGHT" observing conditions. The relevant `PRIORITY_INIT`
and `NUMOBS_INIT` columns will be derived from
`PRIORITY_INIT_DARK`, etc. and `filename` will have "bright" or
"dark" appended to the lowest DIRECTORY in the input `filename`.
drint : :class:`int`, optional, defaults to `X`
The data release ("dr"`drint`"-") in the output filename.
Returns
-------
:class:`int`
The number of secondary targets that do not match a primary
target that were written to file.
:class:`str`
The name of the file to which such targets were written.
Notes
-----
Two sets of files are written:
- The file of secondary targets that do not match a primary
target is written to `targdir`. Such secondary targets
are determined from having `RELEASE==0` and `SKY==0`
in the `TARGETID`. Only targets with `PRIORITY_INIT > -1`
are written to this file (this allows duplicates to be
resolved in, e.g., :func:`~desitarget.secondary.finalize()`
- Each secondary target that, presumably, was initially drawn
from the "indata" subdirectory of `scxdir` is written to
an "outdata/targdir" subdirectory of `scxdir`.
"""
# ADM grab the scxdir, it it wasn't passed.
from desitarget.secondary import _get_scxdir
scxdir = _get_scxdir(scxdir)
# ADM if the primary header was passed, use it, if not
# ADM then create a new header.
hdr = primhdr
if primhdr is None:
hdr = fitsio.FITSHDR()
# ADM add the SCNDDIR to the file header.
hdr["SCNDDIR"] = scxdir
# ADM limit to just BRIGHT or DARK targets, if requested.
# ADM ignore the filename output, we'll build that on-the-fly.
if obscon is not None:
log.info("Observational conditions are {}".format(obscon))
_, hdr, data = _bright_or_dark(targdir, hdr, data, obscon)
else:
log.info("Observational conditions are ALL")
# ADM add the secondary dependencies to the file header.
depend.setdep(hdr, 'scnd-desitarget', desitarget_version)
depend.setdep(hdr, 'scnd-desitarget-git', gitversion())
# ADM populate SUBPRIORITY with a reproducible random float.
if "SUBPRIORITY" in data.dtype.names:
ntargs = len(data)
np.random.seed(616)
data["SUBPRIORITY"] = np.random.random(ntargs)
# ADM remove the SCND_TARGET_INIT and SCND_ORDER columns.
scnd_target_init, scnd_order = data["SCND_TARGET_INIT"], data["SCND_ORDER"]
data = rfn.drop_fields(data, ["SCND_TARGET_INIT", "SCND_ORDER"])
# ADM we only need a subset of the columns where we match a primary.
smalldata = rfn.drop_fields(data, ["PRIORITY_INIT", "SUBPRIORITY",
"NUMOBS_INIT", "OBSCONDITIONS"])
# ADM load the correct mask.
_, mx, survey = main_cmx_or_sv(data, scnd=True)
log.info("Loading mask for {} survey".format(survey))
scnd_mask = mx[3]
# ADM construct the output full and reduced file name.
filename = find_target_files(targdir, dr=drint, flavor="targets",
survey=survey, obscon=obscon, nohp=True)
filenam = os.path.splitext(os.path.basename(filename))[0]
# ADM write out the file of matches for every secondary bit.
scxoutdir = os.path.join(scxdir, 'outdata', filenam)
if obscon is not None:
scxoutdir = os.path.join(scxoutdir, obscon.lower())
os.makedirs(scxoutdir, exist_ok=True)
# ADM and write out the information for each bit.
for name in scnd_mask.names():
# ADM construct the output file name.
fn = "{}.fits".format(scnd_mask[name].filename)
scxfile = os.path.join(scxoutdir, fn)
# ADM retrieve just the data with this bit set.
ii = (scnd_target_init & scnd_mask[name]) != 0
# ADM only proceed to the write stage if there are targets.
if np.sum(ii) > 0:
# ADM to reorder to match the original input order.
order = np.argsort(scnd_order[ii])
# ADM write to file.
fitsio.write(scxfile, smalldata[ii][order],
extname='TARGETS', header=hdr, clobber=True)
log.info('Info for {} secondaries written to {}'
.format(np.sum(ii), scxfile))
# ADM make necessary directories for the file, if they don't exist.
os.makedirs(os.path.dirname(filename), exist_ok=True)
# ADM standalone secondaries have PRIORITY_INIT > -1 and
# ADM release before DR1 (release < 1000).
from desitarget.targets import decode_targetid
objid, brickid, release, mock, sky, gaiadr = decode_targetid(data["TARGETID"])
ii = (release < 1000) & (data["PRIORITY_INIT"] > -1)
# ADM ...write them out.
fitsio.write(filename, data[ii],
extname='SCND_TARGETS', header=hdr, clobber=True)
return np.sum(ii), filename
def write_skies(targdir, data, indir=None, indir2=None, supp=False,
apertures_arcsec=None, nskiespersqdeg=None, nside=None,
nsidefile=None, hpxlist=None, extra=None, mock=False):
"""Write a target catalogue of sky locations.
Parameters
----------
targdir : :class:`str`
Path to output target selection directory (the directory
structure and file name are built on-the-fly from other inputs).
data : :class:`~numpy.ndarray`
Array of skies to write to file.
indir, indir2 : :class:`str`, optional
Name of input Legacy Survey Data Release directory/directories,
write to header of output file if passed (and if not None).
supp : :class:`bool`, optional, defaults to ``False``
Written to the header of the output file to indicate whether
this is a file of supplemental skies (sky locations that are
outside the Legacy Surveys footprint).
apertures_arcsec : :class:`list` or `float`, optional
list of aperture radii in arcseconds to write each aperture as an
individual line in the header, if passed (and if not None).
nskiespersqdeg : :class:`float`, optional
Number of sky locations generated per sq. deg., write to header
of output file if passed (and if not None).
nside: :class:`int`, optional
If passed, add a column to the skies array popluated with
HEALPixels at resolution `nside`.
nsidefile : :class:`int`, optional, defaults to `None`
Passed to indicate in the output file header that the targets
have been limited to only certain HEALPixels at a given
nside. Used in conjunction with `hpxlist`.
hpxlist : :class:`list`, optional, defaults to `None`
Passed to indicate in the output file header that the targets
have been limited to only this list of HEALPixels. Used in
conjunction with `nsidefile`.
extra : :class:`dict`, optional
If passed (and not None), write these extra dictionary keys and
values to the output header.
mock : :class:`bool`, optional, defaults to ``False``.
If ``True`` then construct the file path for mock sky target catalogs.
"""
nskies = len(data)
# ADM use RELEASE to find the release string for the input skies.
if not supp:
drint = np.max(data['RELEASE']//1000)
drstring = 'dr'+str(drint)
else:
drint = None
drstring = "supp"
# - Create header to include versions, etc.
hdr = fitsio.FITSHDR()
depend.setdep(hdr, 'desitarget', desitarget_version)
depend.setdep(hdr, 'desitarget-git', gitversion())
if indir is not None:
depend.setdep(hdr, 'input-data-release', indir)
# ADM note that if 'dr' is not in the indir DR
# ADM directory structure, garbage will
# ADM be rewritten gracefully in the header.
drstring = 'dr'+indir.split('dr')[-1][0]
depend.setdep(hdr, 'photcat', drstring)
if indir2 is not None:
depend.setdep(hdr, 'input-data-release-2', indir2)
if apertures_arcsec is not None:
for i, ap in enumerate(apertures_arcsec):
apname = "AP{}".format(i)
apsize = ap
hdr[apname] = apsize
hdr['SUPP'] = supp
if nskiespersqdeg is not None:
hdr['NPERSDEG'] = nskiespersqdeg
# ADM add HEALPix column, if requested by input.
if nside is not None:
theta, phi = np.radians(90-data["DEC"]), np.radians(data["RA"])
hppix = hp.ang2pix(nside, theta, phi, nest=True)
data = rfn.append_fields(data, 'HPXPIXEL', hppix, usemask=False)
hdr['HPXNSIDE'] = nside
hdr['HPXNEST'] = True
# ADM populate SUBPRIORITY with a reproducible random float.
if "SUBPRIORITY" in data.dtype.names:
# ADM ensure different SUBPRIORITIES for supp/standard files.
if supp:
np.random.seed(626)
else:
np.random.seed(616)
data["SUBPRIORITY"] = np.random.random(nskies)
# ADM add the extra dictionary to the header.
if extra is not None:
for key in extra:
hdr[key] = extra[key]
# ADM record whether this file has been limited to only certain HEALPixels.
if hpxlist is not None or nsidefile is not None:
# ADM hpxlist and nsidefile need to be passed together.
if hpxlist is None or nsidefile is None:
msg = 'Both hpxlist (={}) and nsidefile (={}) need to be set' \
.format(hpxlist, nsidefile)
log.critical(msg)
raise ValueError(msg)
hdr['FILENSID'] = nsidefile
hdr['FILENEST'] = True
# ADM warn if we've stored a pixel string that is too long.
_check_hpx_length(hpxlist, warning=True)
hdr['FILEHPX'] = hpxlist
else:
# ADM set the hp part of the output file name to "X".
hpxlist = "X"
# ADM construct the output file name.
if mock:
filename = find_target_files(targdir, flavor='sky', hp=hpxlist,
mock=mock, nside=nside)
else:
filename = find_target_files(targdir, dr=drint, flavor="skies",
hp=hpxlist, supp=supp, mock=mock,
nside=nside)
# ADM create necessary directories, if they don't exist.
os.makedirs(os.path.dirname(filename), exist_ok=True)
fitsio.write(filename+'.tmp', data, extname='SKY_TARGETS', header=hdr, clobber=True)
os.rename(filename+'.tmp', filename)
return len(data), filename
def write_gfas(targdir, data, indir=None, indir2=None, nside=None,
nsidefile=None, hpxlist=None, extra=None):
"""Write a catalogue of Guide/Focus/Alignment targets.
Parameters
----------
targdir : :class:`str`
Path to output target selection directory (the directory
structure and file name are built on-the-fly from other inputs).
data : :class:`~numpy.ndarray`
Array of GFAs to write to file.
indir, indir2 : :class:`str`, optional, defaults to None.
Legacy Survey Data Release directory or directories, write to
header of output file if passed (and if not None).
nside: :class:`int`, defaults to None.
If passed, add a column to the GFAs array popluated with
HEALPixels at resolution `nside`.
nsidefile : :class:`int`, optional, defaults to `None`
Passed to indicate in the output file header that the targets
have been limited to only certain HEALPixels at a given
nside. Used in conjunction with `hpxlist`.
hpxlist : :class:`list`, optional, defaults to `None`
Passed to indicate in the output file header that the targets
have been limited to only this list of HEALPixels. Used in
conjunction with `nsidefile`.
extra : :class:`dict`, optional
If passed (and not None), write these extra dictionary keys and
values to the output header.
"""
# ADM if passed, use the indir to determine the Data Release
# ADM integer and string for the input targets.
try:
drint = int(indir.split("dr")[1][0])
drstring = 'dr'+str(drint)
except (ValueError, IndexError, AttributeError):
drint = None
drstring = "X"
# ADM rename 'TYPE' to 'MORPHTYPE'.
data = rfn.rename_fields(data, {'TYPE': 'MORPHTYPE'})
# ADM create header to include versions, etc.
hdr = fitsio.FITSHDR()
depend.setdep(hdr, 'desitarget', desitarget_version)
depend.setdep(hdr, 'desitarget-git', gitversion())
if indir is not None:
depend.setdep(hdr, 'input-data-release', indir)
# ADM note that if 'dr' is not in the indir DR
# ADM directory structure, garbage will
# ADM be rewritten gracefully in the header.
drstring = 'dr'+indir.split('dr')[-1][0]
depend.setdep(hdr, 'photcat', drstring)
if indir2 is not None:
depend.setdep(hdr, 'input-data-release-2', indir2)
# ADM add the extra dictionary to the header.
if extra is not None:
for key in extra:
hdr[key] = extra[key]
# ADM add HEALPix column, if requested by input.
if nside is not None:
theta, phi = np.radians(90-data["DEC"]), np.radians(data["RA"])
hppix = hp.ang2pix(nside, theta, phi, nest=True)
data = rfn.append_fields(data, 'HPXPIXEL', hppix, usemask=False)
hdr['HPXNSIDE'] = nside
hdr['HPXNEST'] = True
# ADM record whether this file has been limited to only certain HEALPixels.
if hpxlist is not None or nsidefile is not None:
# ADM hpxlist and nsidefile need to be passed together.
if hpxlist is None or nsidefile is None:
msg = 'Both hpxlist (={}) and nsidefile (={}) need to be set' \
.format(hpxlist, nsidefile)
log.critical(msg)
raise ValueError(msg)
hdr['FILENSID'] = nsidefile
hdr['FILENEST'] = True
# ADM warn if we've stored a pixel string that is too long.
_check_hpx_length(hpxlist, warning=True)
hdr['FILEHPX'] = hpxlist
else:
# ADM set the hp part of the output file name to "X".
hpxlist = "X"
# ADM construct the output file name.
filename = find_target_files(targdir, dr=drint, flavor="gfas", hp=hpxlist)
# ADM create necessary directories, if they don't exist.
os.makedirs(os.path.dirname(filename), exist_ok=True)
fitsio.write(filename, data, extname='GFA_TARGETS', header=hdr, clobber=True)
return len(data), filename
def write_randoms(targdir, data, indir=None, hdr=None, nside=None, supp=False,
nsidefile=None, hpxlist=None, resolve=True, extra=None):
"""Write a catalogue of randoms and associated pixel-level info.
Parameters
----------
targdir : :class:`str`
Path to output target selection directory (the directory
structure and file name are built on-the-fly from other inputs).
data : :class:`~numpy.ndarray`
Array of randoms to write to file.
indir : :class:`str`, optional, defaults to None
Name of input Legacy Survey Data Release directory, write to
header of output file if passed (and if not None).
hdr : :class:`str`, optional, defaults to `None`
If passed, use this header to start the header for `filename`.
nside: :class:`int`
If passed, add a column to the randoms array popluated with
HEALPixels at resolution `nside`.
supp : :class:`bool`, optional, defaults to ``False``
Written to the header of the output file to indicate whether
this is a supplemental file (i.e. random locations that are
outside the Legacy Surveys footprint).
nsidefile : :class:`int`, optional, defaults to `None`
Passed to indicate in the output file header that the targets
have been limited to only certain HEALPixels at a given
nside. Used in conjunction with `hpxlist`.
hpxlist : :class:`list`, optional, defaults to `None`
Passed to indicate in the output file header that the targets
have been limited to only this list of HEALPixels. Used in
conjunction with `nsidefile`.
resolve : :class:`bool`, optional, defaults to ``True``
Written to the output file header as `RESOLVE`.
extra : :class:`dict`, optional
If passed (and not None), write these extra dictionary keys and
values to the output header.
"""
# ADM create header to include versions, etc. If a `hdr` was
# ADM passed, then use it, if not then create a new header.
if hdr is None:
hdr = fitsio.FITSHDR()
depend.setdep(hdr, 'desitarget', desitarget_version)
depend.setdep(hdr, 'desitarget-git', gitversion())
if indir is not None:
if supp:
depend.setdep(hdr, 'input-random-catalog', indir)
else:
depend.setdep(hdr, 'input-data-release', indir)
# ADM use RELEASE to find the release string for the input randoms.
try:
drint = int(indir.split("dr")[1][0])
drstring = 'dr'+str(drint)
depend.setdep(hdr, 'photcat', drstring)
except (ValueError, IndexError, AttributeError):
drint = None
# ADM add HEALPix column, if requested by input.
if nside is not None:
theta, phi = np.radians(90-data["DEC"]), np.radians(data["RA"])
hppix = hp.ang2pix(nside, theta, phi, nest=True)
data = rfn.append_fields(data, 'HPXPIXEL', hppix, usemask=False)
hdr['HPXNSIDE'] = nside
hdr['HPXNEST'] = True
# ADM note if this is a supplemental (outside-of-footprint) file.
hdr['SUPP'] = supp
# ADM add whether or not the randoms were resolved to the header.
hdr["RESOLVE"] = resolve
# ADM record whether this file has been limited to only certain HEALPixels.
if hpxlist is not None or nsidefile is not None:
# ADM hpxlist and nsidefile need to be passed together.
if hpxlist is None or nsidefile is None:
msg = 'Both hpxlist (={}) and nsidefile (={}) need to be set' \
.format(hpxlist, nsidefile)
log.critical(msg)
raise ValueError(msg)
hdr['FILENSID'] = nsidefile
hdr['FILENEST'] = True
# ADM warn if we've stored a pixel string that is too long.
_check_hpx_length(hpxlist, warning=True)
hdr['FILEHPX'] = hpxlist
else:
# ADM set the hp part of the output file name to "X".
hpxlist = "X"
# ADM add the extra dictionary to the header.
if extra is not None:
for key in extra:
hdr[key] = extra[key]
# ADM retrieve the seed, if it is known.
seed = None
if extra is not None:
for seedy in "seed", "SEED":
if seedy in extra:
seed = extra[seedy]
# ADM construct the output file name.
filename = find_target_files(targdir, dr=drint, flavor="randoms",
hp=hpxlist, resolve=resolve, supp=supp,
seed=seed, nohp=True)
# ADM create necessary directories, if they don't exist.
os.makedirs(os.path.dirname(filename), exist_ok=True)
fitsio.write(filename, data, extname='RANDOMS', header=hdr, clobber=True)
return len(data), filename
def iter_files(root, prefix, ext='fits'):
"""Iterator over files under in `root` directory with given `prefix` and
extension.
"""
if os.path.isdir(root):
for dirpath, dirnames, filenames in os.walk(root, followlinks=True):
for filename in filenames:
if filename.startswith(prefix) and filename.endswith('.'+ext):
yield os.path.join(dirpath, filename)
else:
filename = os.path.basename(root)
if filename.startswith(prefix) and filename.endswith('.'+ext):
yield root
def list_sweepfiles(root):
"""Return a list of sweep files found under `root` directory.
"""
# ADM check for duplicate files in case the listing was run
# ADM at too low a level in the directory structure.
check = [os.path.basename(x) for x in iter_sweepfiles(root)]
if len(check) != len(set(check)):
log.error("Duplicate sweep files in root directory!")
return [x for x in iter_sweepfiles(root)]
def iter_sweepfiles(root):
"""Iterator over all sweep files found under root directory.
"""
return iter_files(root, prefix='sweep', ext='fits')
def list_targetfiles(root):
"""Return a list of target files found under `root` directory.
"""
# ADM catch case where a file was sent instead of a directory.
if os.path.isfile(root):
return [root]
allfns = glob(os.path.join(root, '*target*fits'))
fns, nfns = np.unique(allfns, return_counts=True)
if np.any(nfns > 1):
badfns = fns[nfns > 1]
msg = "Duplicate target files ({}) beneath root directory {}:".format(
badfns, root)
log.error(msg)
raise SyntaxError(msg)
return allfns
def list_tractorfiles(root):
"""Return a list of tractor files found under `root` directory.
"""
# ADM check for duplicate files in case the listing was run
# ADM at too low a level in the directory structure.
check = [os.path.basename(x) for x in iter_tractorfiles(root)]
if len(check) != len(set(check)):
log.error("Duplicate Tractor files in root directory!")
return [x for x in iter_tractorfiles(root)]
def iter_tractorfiles(root):
"""Iterator over all tractor files found under `root` directory.
Parameters
----------
root : :class:`str`
Path to start looking. Can be a directory or a single file.
Returns
-------
iterable
An iterator of (brickname, filename).
Examples
--------
>>> for brickname, filename in iter_tractor('./'):
>>> print(brickname, filename)
"""
return iter_files(root, prefix='tractor', ext='fits')
def brickname_from_filename(filename):
"""Parse `filename` to check if this is a tractor brick file.
Parameters
----------
filename : :class:`str`
Name of a tractor brick file.
Returns
-------
:class:`str`
Name of the brick in the file name.
Raises
------
ValueError
If the filename does not appear to be a valid tractor brick file.
"""
if not filename.endswith('.fits'):
raise ValueError("Invalid tractor brick file: {}!".format(filename))
#
# Match filename tractor-0003p027.fits -> brickname 0003p027.
# Also match tractor-00003p0027.fits, just in case.
#
match = re.search(r"tractor-(\d{4,5}[pm]\d{3,4})\.fits",
os.path.basename(filename))
if match is None:
raise ValueError("Invalid tractor brick file: {}!".format(filename))
return match.group(1)
def brickname_from_filename_with_prefix(filename, prefix=''):
"""Parse `filename` to check if this is a brick file with a given prefix.
Parameters
----------
filename : :class:`str`
Full name of a brick file.
prefix : :class:`str`
Optional part of filename immediately preceding the brickname.
Returns
-------
:class:`str`
Name of the brick in the file name.
Raises
------
ValueError
If the filename does not appear to be a valid brick file.
"""
if not filename.endswith('.fits'):
raise ValueError("Invalid galaxia mock brick file: {}!".format(filename))
#
# Match filename tractor-0003p027.fits -> brickname 0003p027.
# Also match tractor-00003p0027.fits, just in case.
#
match = re.search(r"%s_(\d{4,5}[pm]\d{3,4})\.fits" % (prefix),
os.path.basename(filename))
if match is None:
raise ValueError("Invalid galaxia mock brick file: {}!".format(filename))
return match.group(1)
def check_fitsio_version(version='0.9.8'):
"""fitsio_ prior to 0.9.8rc1 has a bug parsing boolean columns.
.. _fitsio: https://pypi.python.org/pypi/fitsio
Parameters
----------
version : :class:`str`, optional
Default '0.9.8'. Having this parameter allows future-proofing and
easier testing.
Raises
------
ImportError
If the fitsio version is insufficiently recent.
"""
from distutils.version import LooseVersion
#
# LooseVersion doesn't handle rc1 as we want, so also check for 0.9.8xxx.
#
if (
LooseVersion(fitsio.__version__) < LooseVersion(version) and
not fitsio.__version__.startswith(version)
):
raise ImportError(('ERROR: fitsio >{0}rc1 required ' +
'(not {1})!').format(version, fitsio.__version__))
def whitespace_fits_read(filename, **kwargs):
"""Use fitsio_ to read in a file and strip whitespace from all string columns.
.. _fitsio: https://pypi.python.org/pypi/fitsio
Parameters
----------
filename : :class:`str`
Name of the file to be read in by fitsio.
kwargs: arguments that will be passed directly to fitsio.
"""
fitout = fitsio.read(filename, **kwargs)
# ADM if the header=True option was passed then
# ADM the output is the header and the data.
data = fitout
if 'header' in kwargs:
data, header = fitout
# ADM guard against the zero-th extension being read by fitsio.
if data is not None:
# ADM strip any whitespace from string columns.
for colname in data.dtype.names:
kind = data[colname].dtype.kind
if kind == 'U' or kind == 'S':
data[colname] = np.char.rstrip(data[colname])
if 'header' in kwargs:
return data, header
return data
def load_pixweight(inmapfile, nside, pixmap=None):
"""Loads a pixel map from file and resamples to a different HEALPixel resolution (nside)
Parameters
----------
inmapfile : :class:`str`
Name of the file containing the pixel weight map.
nside : :class:`int`
After loading, the array will be resampled to this HEALPix nside.
pixmap: `~numpy.array`, optional, defaults to None
Pass a pixel map instead of loading it from file.
Returns
-------
:class:`~numpy.array`
HEALPixel weight map resampled to the requested nside.
"""
if pixmap is not None:
log.debug('Using input pixel weight map of length {}.'.format(len(pixmap)))
else:
# ADM read in the pixel weights file.
if not os.path.exists(inmapfile):
log.fatal('Input directory does not exist: {}'.format(inmapfile))
raise ValueError
pixmap = fitsio.read(inmapfile)
# ADM determine the file's nside, and flag a warning if the passed nside exceeds it.
npix = len(pixmap)
truenside = hp.npix2nside(len(pixmap))
if truenside < nside:
log.warning("downsampling is fuzzy...Passed nside={}, but file {} is stored at nside={}"
.format(nside, inmapfile, truenside))
# ADM resample the map.
return hp.pixelfunc.ud_grade(pixmap, nside, order_in='NESTED', order_out='NESTED')
def load_pixweight_recarray(inmapfile, nside, pixmap=None):
"""Like load_pixweight but for a structured array map with multiple columns
Parameters
----------
inmapfile : :class:`str`
Name of the file containing the pixel weight map.
nside : :class:`int`
After loading, the array will be resampled to this HEALPix nside.
pixmap: `~numpy.array`, optional, defaults to None
Pass a pixel map instead of loading it from file.
Returns
-------
:class:`~numpy.array`
HEALPixel weight map with all columns resampled to the requested nside.
Notes
-----
- Assumes that tha passed map is in the NESTED scheme, and outputs to
the NESTED scheme.
- All columns are resampled as the mean of the relevant pixels, except
if a column `HPXPIXEL` is passed. That column is reassigned the appropriate
pixel number at the new nside.
"""
if pixmap is not None:
log.debug('Using input pixel weight map of length {}.'.format(len(pixmap)))
else:
# ADM read in the pixel weights file.
if not os.path.exists(inmapfile):
log.fatal('Input directory does not exist: {}'.format(inmapfile))
raise ValueError
pixmap = fitsio.read(inmapfile)
# ADM determine the file's nside, and flag a warning if the passed nside exceeds it.
npix = len(pixmap)
truenside = hp.npix2nside(len(pixmap))
if truenside < nside:
log.warning("downsampling is fuzzy...Passed nside={}, but file {} is stored at nside={}"
.format(nside, inmapfile, truenside))
# ADM set up an output array.
nrows = hp.nside2npix(nside)
outdata = | np.zeros(nrows, dtype=pixmap.dtype) | numpy.zeros |
import os
import sys
import time
import math
import torch.nn.functional as F
from datetime import datetime
import random
import logging
from collections import OrderedDict
import numpy as np
import cv2
import torch
from torchvision.utils import make_grid
from shutil import get_terminal_size
from utils.Demosaicing_malvar2004 import demosaicing_CFA_Bayer_Malvar2004
import pdb
import os.path as osp
from torch_similarity.modules import NormalizedCrossCorrelation
import torchvision
from models.archs.arch_util import flow_warp
import matplotlib.pyplot as plt
import yaml
try:
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
from yaml import Loader, Dumper
def OrderedYaml():
'''yaml orderedDict support'''
_mapping_tag = yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG
def dict_representer(dumper, data):
return dumper.represent_dict(data.items())
def dict_constructor(loader, node):
return OrderedDict(loader.construct_pairs(node))
Dumper.add_representer(OrderedDict, dict_representer)
Loader.add_constructor(_mapping_tag, dict_constructor)
return Loader, Dumper
####################
# miscellaneous
####################
def discard_module_prefix(state_dict):
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in state_dict.items():
# pdb.set_trace()
if k[0:6] == 'module':
name = k[7:]
new_state_dict[name] = v
return new_state_dict
def get_timestamp():
return datetime.now().strftime('%y%m%d-%H%M%S')
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
def mkdirs(paths):
if isinstance(paths, str):
mkdir(paths)
else:
for path in paths:
mkdir(path)
def mkdir_and_rename(path):
if os.path.exists(path):
new_name = path + '_archived_' + get_timestamp()
print('Path already exists. Rename it to [{:s}]'.format(new_name))
logger = logging.getLogger('base')
logger.info('Path already exists. Rename it to [{:s}]'.format(new_name))
os.rename(path, new_name)
os.makedirs(path)
def set_random_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def setup_logger(logger_name, root, phase, level=logging.INFO, screen=False, tofile=False):
'''set up logger'''
lg = logging.getLogger(logger_name)
formatter = logging.Formatter('%(asctime)s.%(msecs)03d - %(levelname)s: %(message)s',
datefmt='%y-%m-%d %H:%M:%S')
lg.setLevel(level)
# pdb.set_trace()
if tofile:
log_file = os.path.join(root, phase + '_{}.log'.format(get_timestamp()))
fh = logging.FileHandler(log_file, mode='w')
fh.setFormatter(formatter)
lg.addHandler(fh)
if screen:
sh = logging.StreamHandler()
sh.setFormatter(formatter)
lg.addHandler(sh)
####################
# image convert
####################
def crop_border(img_list, crop_border):
"""Crop borders of images
Args:
img_list (list [Numpy]): HWC
crop_border (int): crop border for each end of height and weight
Returns:
(list [Numpy]): cropped image list
"""
if crop_border == 0:
return img_list
else:
return [v[crop_border:-crop_border, crop_border:-crop_border] for v in img_list]
def tensor2img(tensor, out_type=np.uint8, min_max=(0, 1)):
'''
Converts a torch Tensor into an image Numpy array
Input: 4D(B,(3/1),H,W), 3D(C,H,W), or 2D(H,W), any range, RGB channel order
Output: 3D(H,W,C) or 2D(H,W), [0,255], np.uint8 (default)
'''
tensor = tensor.squeeze().float().cpu().clamp_(*min_max) # clamp
tensor = (tensor - min_max[0]) / (min_max[1] - min_max[0]) # to range [0,1]
n_dim = tensor.dim()
if n_dim == 4:
n_img = len(tensor)
img_np = make_grid(tensor, nrow=int(math.sqrt(n_img)), normalize=False).numpy()
img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR
elif n_dim == 3:
img_np = tensor.numpy()
img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR
elif n_dim == 2:
img_np = tensor.numpy()
else:
raise TypeError(
'Only support 4D, 3D and 2D tensor. But received with dimension: {:d}'.format(n_dim))
if out_type == np.uint8:
img_np = (img_np * 255.0).round()
# Important. Unlike matlab, numpy.unit8() WILL NOT round by default.
return img_np.astype(out_type)
def rggb_tensor2img(tensor, out_type=np.uint8, min_max=(0, 1)):
'''
Converts a torch Tensor into an image Numpy array
Input: 3D(4,H,W), RGGB channel
Output: 3D(H,W,4), np.uint8 (default)
'''
tensor = tensor.squeeze().float().cpu().clamp_(*min_max) # clamp
img_np = tensor.numpy()
img_np = np.transpose(img_np, (1, 2, 0)) # HWC, RGGB
if out_type == np.uint8:
img_np = (img_np * 255.0).round()
# Important. Unlike matlab, numpy.unit8() WILL NOT round by default.
return img_np.astype(out_type)
def rggb2bgr(tensor, pattern='RGGB'):
mosaic_img = np.zeros((int(tensor.shape[0]*2), int(tensor.shape[1]*2)), dtype=tensor.dtype)
mosaic_img[0::2, 0::2] = tensor[:,:,0]
mosaic_img[0::2, 1::2] = tensor[:,:,1]
mosaic_img[1::2, 0::2] = tensor[:,:,2]
mosaic_img[1::2, 1::2] = tensor[:,:,3]
results = demosaicing_CFA_Bayer_Malvar2004(mosaic_img, pattern)
results = np.clip(results, 0, 1)
results = results[:, :, [2, 1, 0]]
return results
def rggb2bayer(tensor, pattern='RGGB'):
mosaic_img = np.zeros((int(tensor.shape[0]*2), int(tensor.shape[1]*2)), dtype=tensor.dtype)
mosaic_img[0::2, 0::2] = tensor[:,:,0]
mosaic_img[0::2, 1::2] = tensor[:,:,1]
mosaic_img[1::2, 0::2] = tensor[:,:,2]
mosaic_img[1::2, 1::2] = tensor[:,:,3]
return mosaic_img
def bayer2bgr(tensor, pattern='RGGB'):
results = demosaicing_CFA_Bayer_Malvar2004(tensor, pattern)
results = np.clip(results, 0, 1)
results = results[:, :, [2, 1, 0]]
return results
def rgb2yuv(rgb):
width, height, _ = rgb.shape
yuv2rgb_matrix = np.matrix([[1, 1, 1], [0, 0.34414, 1.772], [1.402, -0.71414, 0]])
rgb2yuv_matrix = yuv2rgb_matrix.I
full_cutoff = [0.0, 0.5, 0.5]
yuvData = np.array(np.dot(rgb.reshape((width * height, 3)), rgb2yuv_matrix) + full_cutoff).reshape((width, height, 3))
return yuvData
# Function: Convert RGGB raw image to Fake Gray image
def RGGB2Gray(img):
return np.mean(img, 2)
def rgb2NV12(rgb):
rows, cols, _ = rgb.shape
yuv2rgb_matrix = np.matrix([[1, 1, 1], [0, -0.34414, 1.772], [1.402, -0.71414, 0]])
rgb2yuv_matrix = yuv2rgb_matrix.I
print(rgb2yuv_matrix)
# pdb.set_trace()
# full_cutoff = [0.0, 0.5, 0.5]
full_cutoff = np.array([[0.0, 0.5, 0.5]])
yuvData = np.array(np.dot(rgb.reshape((rows * cols, 3)), rgb2yuv_matrix) + full_cutoff).reshape((rows, cols, 3))
Y = yuvData[:,:,0]
U = yuvData[:,:,1]
V = yuvData[:,:,2]
shrunkU = (U[0: :2, 0::2] + U[1: :2, 0: :2] + U[0: :2, 1: :2] + U[1: :2, 1: :2]) * 0.25
shrunkV = (V[0: :2, 0::2] + V[1: :2, 0: :2] + V[0: :2, 1: :2] + V[1: :2, 1: :2]) * 0.25
UV = np.zeros((rows//2, cols))
########################
UV[:, 0 : :2] = shrunkU
UV[:, 1 : :2] = shrunkV
NV12 = np.vstack((Y, UV))
print(NV12.shape)
# pdb.set_trace()
return yuvData, NV12
def yuv2rgb(yuv):
rgb = yuv
rgb[:,:,0] = yuv[:,:,0] + 1.402 * (yuv[:,:,2]-0.5)
rgb[:,:,1] = yuv[:,:,0] - 0.34414 * (yuv[:,:,1]-0.5) - 0.71414*(yuv[:,:,2]-0.5)
rgb[:,:,2] = yuv[:,:,0] + 1.772 * (yuv[:,:,1]-0.5)
return rgb
def save_img(img, img_path, mode='RGB'):
cv2.imwrite(img_path, img)
def save_rgb(file, data_y, data_u, data_v, bit = 10):
img_yuv = np.stack((data_y, data_u, data_v), axis=2)
img_yuv = img_yuv.astype(np.uint16)
if bit == 10:
img_yuv[img_yuv > 1023] = 1023
img_yuv[img_yuv < 0] = 0
img_yuv = img_yuv * 64
elif bit == 12:
img_yuv[img_yuv > 4095] = 4095
img_yuv[img_yuv < 0] = 0
img_yuv = img_yuv * 16
elif bit == 14:
img_yuv[img_yuv > 16383] = 16383
img_yuv[img_yuv < 0] = 0
img_yuv = img_yuv
# pdb.set_trace()
cv2.imwrite(file, img_yuv)
print(file + ' saved')
def DUF_downsample(x, scale=4):
"""Downsamping with Gaussian kernel used in the DUF official code
Args:
x (Tensor, [B, T, C, H, W]): frames to be downsampled.
scale (int): downsampling factor: 2 | 3 | 4.
"""
assert scale in [2, 3, 4], 'Scale [{}] is not supported'.format(scale)
def gkern(kernlen=13, nsig=1.6):
import scipy.ndimage.filters as fi
inp = np.zeros((kernlen, kernlen))
# set element at the middle to one, a dirac delta
inp[kernlen // 2, kernlen // 2] = 1
# gaussian-smooth the dirac, resulting in a gaussian filter mask
return fi.gaussian_filter(inp, nsig)
B, T, C, H, W = x.size()
x = x.view(-1, 1, H, W)
pad_w, pad_h = 6 + scale * 2, 6 + scale * 2 # 6 is the pad of the gaussian filter
r_h, r_w = 0, 0
if scale == 3:
r_h = 3 - (H % 3)
r_w = 3 - (W % 3)
x = F.pad(x, [pad_w, pad_w + r_w, pad_h, pad_h + r_h], 'reflect')
gaussian_filter = torch.from_numpy(gkern(13, 0.4 * scale)).type_as(x).unsqueeze(0).unsqueeze(0)
x = F.conv2d(x, gaussian_filter, stride=scale)
x = x[:, :, 2:-2, 2:-2]
x = x.view(B, T, C, x.size(2), x.size(3))
return x
def single_forward(model, inp):
"""PyTorch model forward (single test), it is just a simple warpper
Args:
model (PyTorch model)
inp (Tensor): inputs defined by the model
Returns:
output (Tensor): outputs of the model. float, in CPU
"""
with torch.no_grad():
model_output = model(inp)
if isinstance(model_output, list) or isinstance(model_output, tuple):
output = model_output[0]
else:
output = model_output
output = output.data.float().cpu()
return output
def single_forward_google(model, inp, nmap):
"""PyTorch model forward (single test), it is just a simple warpper
Args:
model (PyTorch model)
inp (Tensor): inputs defined by the model
Returns:
output (Tensor): outputs of the model. float, in CPU
"""
with torch.no_grad():
model_output = model(inp, nmap)
if isinstance(model_output, list) or isinstance(model_output, tuple):
output = model_output[0]
else:
output = model_output
output = output.data.float().cpu()
return output
def single_forward_google_debug(model, inp, nmap):
"""PyTorch model forward (single test), it is just a simple warpper
Args:
model (PyTorch model)
inp (Tensor): inputs defined by the model
Returns:
output (Tensor): outputs of the model. float, in CPU
"""
with torch.no_grad():
model_output, gate_output = model(inp, nmap)
if isinstance(model_output, list) or isinstance(model_output, tuple):
output = model_output[0]
else:
output = model_output
output = output.data.float().cpu()
gate_output = gate_output.data.float().cpu()
return output, gate_output
def print_model_parm_flops(net, input_size, input_num=1, cuda=False):
from torch.autograd import Variable
prods = {}
def save_hook(name):
def hook_per(self, input, output):
prods[name] = np.prod(input[0].shape)
return hook_per
list_1 = []
def simple_hook(self, input, output):
list_1.append(np.prod(input[0].shape))
list_2 = {}
def simple_hook2(self, input, output):
list_2['names'] = np.prod(input[0].shape)
multiply_adds = False
list_conv = []
def conv_hook(self, input, output):
batch_size, input_channels, input_height, input_width = input[0].size()
output_channels, output_height, output_width = output[0].size()
kernel_ops = self.kernel_size[0] * self.kernel_size[1] * (self.in_channels / self.groups) * (
2 if multiply_adds else 1)
bias_ops = 1 if self.bias is not None else 0
params = output_channels * (kernel_ops + bias_ops)
flops = batch_size * params * output_height * output_width
list_conv.append(flops)
list_linear = []
def linear_hook(self, input, output):
batch_size = input[0].size(0) if input[0].dim() == 2 else 1
weight_ops = self.weight.nelement() * (2 if multiply_adds else 1)
bias_ops = self.bias.nelement()
flops = batch_size * (weight_ops + bias_ops)
list_linear.append(flops)
list_bn = []
def bn_hook(self, input, output):
list_bn.append(input[0].nelement())
list_relu = []
def relu_hook(self, input, output):
list_relu.append(input[0].nelement())
list_pooling = []
def pooling_hook(self, input, output):
batch_size, input_channels, input_height, input_width = input[0].size()
output_channels, output_height, output_width = output[0].size()
kernel_ops = self.kernel_size * self.kernel_size
bias_ops = 0
params = output_channels * (kernel_ops + bias_ops)
flops = batch_size * params * output_height * output_width
list_pooling.append(flops)
def foo(net):
childrens = list(net.children())
if not childrens:
if isinstance(net, torch.nn.Conv2d):
# net.register_forward_hook(save_hook(net.__class__.__name__))
# net.register_forward_hook(simple_hook)
# net.register_forward_hook(simple_hook2)
net.register_forward_hook(conv_hook)
if isinstance(net, torch.nn.Linear):
net.register_forward_hook(linear_hook)
if isinstance(net, torch.nn.BatchNorm2d):
net.register_forward_hook(bn_hook)
if isinstance(net, torch.nn.ReLU):
net.register_forward_hook(relu_hook)
if isinstance(net, torch.nn.MaxPool2d) or isinstance(net, torch.nn.AvgPool2d):
net.register_forward_hook(pooling_hook)
return
for c in childrens:
foo(c)
foo(net)
fn, c, h, w = input_size
if input_num == 1:
input = Variable(torch.rand(fn, c, h, w).unsqueeze(0).float(), requires_grad=True)
if cuda:
input = input.cuda()
out = net(input)[0]
else:
input = []
for i in range(input_num):
input.append(Variable(torch.rand(c, h, w).unsqueeze(0), requires_grad=True))
if cuda:
input = [x in input, x.cuda()]
if input_num == 2:
out = net(input[0], input[1])[0]
elif input_num == 3:
out = net(input[0], input[1], input[2])[0]
else:
raise Exception("add {} input support".format(input_num))
total_flops = (sum(list_conv) + sum(list_linear) + sum(list_bn) + sum(list_relu) + sum(list_pooling))
print(' + Number of FLOPs: %.4fG' % (total_flops / 1024.0 / 1024 / 1024))
def get_network_description(network):
"""Get the string and total parameters of the network"""
# pdb.set_trace()
# network = network.module
return str(network), sum(map(lambda x: x.numel(), network.parameters()))
def flipx4_forward(model, inp):
"""Flip testing with X4 self ensemble, i.e., normal, flip H, flip W, flip H and W
Args:
model (PyTorch model)
inp (Tensor): inputs defined by the model
Returns:
output (Tensor): outputs of the model. float, in CPU
"""
# normal
output_f = single_forward(model, inp)
# flip W
output = single_forward(model, torch.flip(inp, (-1, )))
output_f = output_f + torch.flip(output, (-1, ))
# flip H
output = single_forward(model, torch.flip(inp, (-2, )))
output_f = output_f + torch.flip(output, (-2, ))
# flip both H and W
output = single_forward(model, torch.flip(inp, (-2, -1)))
output_f = output_f + torch.flip(output, (-2, -1))
return output_f / 4
####################
# metric
####################
def calculate_psnr(img1, img2):
# img1 and img2 have range [0, 255]
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
mse = np.mean((img1 - img2)**2)
if mse == 0:
return float('inf')
return 20 * math.log10(255.0 / math.sqrt(mse))
def ssim(img1, img2):
C1 = (0.01 * 255)**2
C2 = (0.03 * 255)**2
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
kernel = cv2.getGaussianKernel(11, 1.5)
window = np.outer(kernel, kernel.transpose())
mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5] # valid
mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5]
mu1_sq = mu1**2
mu2_sq = mu2**2
mu1_mu2 = mu1 * mu2
sigma1_sq = cv2.filter2D(img1**2, -1, window)[5:-5, 5:-5] - mu1_sq
sigma2_sq = cv2.filter2D(img2**2, -1, window)[5:-5, 5:-5] - mu2_sq
sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2
ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) *
(sigma1_sq + sigma2_sq + C2))
return ssim_map.mean()
def calculate_ssim(img1, img2):
'''calculate SSIM
the same outputs as MATLAB's
img1, img2: [0, 255]
'''
if not img1.shape == img2.shape:
raise ValueError('Input images must have the same dimensions.')
if img1.ndim == 2:
return ssim(img1, img2)
elif img1.ndim == 3:
if img1.shape[2] == 3:
ssims = []
for i in range(3):
ssims.append(ssim(img1, img2))
return np.array(ssims).mean()
elif img1.shape[2] == 1:
return ssim(np.squeeze(img1), np.squeeze(img2))
else:
raise ValueError('Wrong input image dimensions.')
class ProgressBar(object):
'''A progress bar which can print the progress
modified from https://github.com/hellock/cvbase/blob/master/cvbase/progress.py
'''
def __init__(self, task_num=0, bar_width=50, start=True):
self.task_num = task_num
max_bar_width = self._get_max_bar_width()
self.bar_width = (bar_width if bar_width <= max_bar_width else max_bar_width)
self.completed = 0
if start:
self.start()
def _get_max_bar_width(self):
terminal_width, _ = get_terminal_size()
max_bar_width = min(int(terminal_width * 0.6), terminal_width - 50)
if max_bar_width < 10:
print('terminal width is too small ({}), please consider widen the terminal for better '
'progressbar visualization'.format(terminal_width))
max_bar_width = 10
return max_bar_width
def start(self):
if self.task_num > 0:
sys.stdout.write('[{}] 0/{}, elapsed: 0s, ETA:\n{}\n'.format(
' ' * self.bar_width, self.task_num, 'Start...'))
else:
sys.stdout.write('completed: 0, elapsed: 0s')
sys.stdout.flush()
self.start_time = time.time()
def update(self, msg='In progress...'):
self.completed += 1
elapsed = time.time() - self.start_time
fps = self.completed / elapsed
if self.task_num > 0:
percentage = self.completed / float(self.task_num)
eta = int(elapsed * (1 - percentage) / percentage + 0.5)
mark_width = int(self.bar_width * percentage)
bar_chars = '>' * mark_width + '-' * (self.bar_width - mark_width)
sys.stdout.write('\033[2F') # cursor up 2 lines
sys.stdout.write('\033[J') # clean the output (remove extra chars since last display)
sys.stdout.write('[{}] {}/{}, {:.1f} task/s, elapsed: {}s, ETA: {:5}s\n{}\n'.format(
bar_chars, self.completed, self.task_num, fps, int(elapsed + 0.5), eta, msg))
else:
sys.stdout.write('completed: {}, elapsed: {}s, {:.1f} tasks/s'.format(
self.completed, int(elapsed + 0.5), fps))
sys.stdout.flush()
def takeFirst(elem):
return elem[0]
def cal_lr_fea(fea, DW_model):
B, N, C, H, W = fea.size()
fea = fea.view(-1, C, H, W)
LR_fea = DW_model(fea)
LR_fea = LR_fea.view(B, N, LR_fea.shape[1], LR_fea.shape[2], LR_fea.shape[3])
return LR_fea
def search_patch_NCC_2d_pymaid(image_patch, nmpa_patch, imgs_in_pad, img_in_nmap_pad, \
start_x, start_y, small_scale, search_region):
B, N, C, PsizeH, PsizeW = image_patch.shape
_, _, _, H, W = imgs_in_pad.shape
center_idx = N//2
ncc_func = NormalizedCrossCorrelation(return_map=False,reduction='mean')
## recreat output
image_patch_new = image_patch.clone()
nmpa_patch_new = nmpa_patch.clone()
## downsampling the image patches
# scale = 8
scale = small_scale
image_patch_small = torch.reshape(image_patch, (B*N,C,PsizeH,PsizeW))
image_patch_small = F.interpolate(image_patch_small, scale_factor=1/scale, mode='bilinear', align_corners=False)
imgs_in_pad_small = torch.reshape(imgs_in_pad, (B*N,C,H,W))
imgs_in_pad_small = F.interpolate(imgs_in_pad_small, scale_factor=1/scale, mode='bilinear', align_corners=False)
_,_,newPsizeH,newPsizeW = image_patch_small.shape
_,_,newH,newW = imgs_in_pad_small.shape
image_patch_small = torch.reshape(image_patch_small,(B, N, C, newPsizeH, newPsizeW))
imgs_in_pad_small = torch.reshape(imgs_in_pad_small,(B, N, C, newH, newW))
#search_region = int(min(newH, newW)/10)
start_x = int(start_x/scale)
start_y = int(start_y/scale)
center_frame = image_patch_small[:,center_idx,:,:,:].clone()
thr = -5
# cadicate_idx_all = []
for batch in range(B):
start_x_current = start_x
start_y_current = start_y
# backfowd to the first frame
for fr in range(center_idx-1,-1,-1):
# print(fr)
if fr != center_idx:
step = 2
cadicate_idx = cal_candidate_idx(search_region, step, start_x_current, start_y_current, batch, \
fr, newH, newW, imgs_in_pad_small, center_frame, newPsizeH, ncc_func)
new_start_x = int(cadicate_idx[0][1])
new_start_y = int(cadicate_idx[0][2])
search_region_small = step
# if cadicate_idx[0][0] > 0.6:
cadicate_idx = cal_candidate_idx(search_region_small, 1, new_start_x, new_start_y, batch, \
fr, newH, newW, imgs_in_pad_small, center_frame, newPsizeH, ncc_func)
# pdb.set_trace()
# cadicate_idx_all.append(cadicate_idx)
if len(cadicate_idx)>0:
if cadicate_idx[0][0] > thr:
nearest_x = int(cadicate_idx[0][1]*scale)
nearest_y = int(cadicate_idx[0][2]*scale)
start_x_current = int(cadicate_idx[0][1])
start_y_current = int(cadicate_idx[0][2])
else:
nearest_x = int(start_x*scale)
nearest_y = int(start_y*scale)
image_patch_new[batch,fr,...] = \
imgs_in_pad[batch,fr,:,nearest_x:nearest_x+PsizeH,nearest_y:nearest_y+PsizeW].clone()
nmpa_patch_new[batch,fr,...] = \
img_in_nmap_pad[batch,fr,:,nearest_x:nearest_x+PsizeH,nearest_y:nearest_y+PsizeW].clone()
# forward to the last frame
start_x_current = start_x
start_y_current = start_y
for fr in range(center_idx+1,N):
# print(fr)
if fr != center_idx:
step = 2
cadicate_idx = cal_candidate_idx(search_region, step, start_x_current, start_y_current, batch, \
fr, newH, newW, imgs_in_pad_small, center_frame, newPsizeH, ncc_func)
new_start_x = int(cadicate_idx[0][1])
new_start_y = int(cadicate_idx[0][2])
search_region_small = step
# if cadicate_idx[0][0] > 0.6:
cadicate_idx = cal_candidate_idx(search_region_small, 1, new_start_x, new_start_y, batch, \
fr, newH, newW, imgs_in_pad_small, center_frame, newPsizeH, ncc_func)
# pdb.set_trace()
# cadicate_idx_all.append(cadicate_idx)
if len(cadicate_idx)>0:
if cadicate_idx[0][0] > thr:
nearest_x = int(cadicate_idx[0][1]*scale)
nearest_y = int(cadicate_idx[0][2]*scale)
start_x_current = int(cadicate_idx[0][1])
start_y_current = int(cadicate_idx[0][2])
else:
nearest_x = int(start_x*scale)
nearest_y = int(start_y*scale)
image_patch_new[batch,fr,...] = \
imgs_in_pad[batch,fr,:,nearest_x:nearest_x+PsizeH,nearest_y:nearest_y+PsizeW].clone()
nmpa_patch_new[batch,fr,...] = \
img_in_nmap_pad[batch,fr,:,nearest_x:nearest_x+PsizeH,nearest_y:nearest_y+PsizeW].clone()
# pdb.set_trace()
return image_patch_new, nmpa_patch_new
def search_patch_NCC_2d_pymaid_wDSNet(image_patch, nmpa_patch, imgs_in_pad, img_in_nmap_pad, \
lr_features,\
start_x, start_y, small_scale, search_region):
B, N, C, PsizeH, PsizeW = image_patch.shape
_, _, _, H, W = imgs_in_pad.shape
center_idx = N//2
ncc_func = NormalizedCrossCorrelation(return_map=False,reduction='mean')
#----- recreat output -----
image_patch_new = image_patch.clone()
nmpa_patch_new = nmpa_patch.clone()
#----- select feature patch -----
scale = small_scale
start_x = int(start_x/scale)
start_y = int(start_y/scale)
center_feature = lr_features[:, center_idx, :, \
start_x:start_x+PsizeH//scale, \
start_y:start_y+PsizeW//scale].clone()
## downsampling the image patches
_,_,newPsizeH,newPsizeW = center_feature.shape
_,_,_,newH,newW = lr_features.shape
thr = -5
cadicate_idx_all = []
for batch in range(B):
start_x_current = start_x
start_y_current = start_y
# backfowd to the first frame
for fr in range(center_idx-1,-1,-1):
if fr != center_idx:
step = 2
cadicate_idx = cal_candidate_idx_wDSNet(search_region, step, start_x_current, start_y_current, batch, \
fr, newH, newW, lr_features, center_feature, newPsizeH, ncc_func)
new_start_x = int(cadicate_idx[0][1])
new_start_y = int(cadicate_idx[0][2])
search_region_small = step
cadicate_idx = cal_candidate_idx_wDSNet(search_region_small, 1, new_start_x, new_start_y, batch, \
fr, newH, newW, lr_features, center_feature, newPsizeH, ncc_func)
cadicate_idx_all.append(cadicate_idx)
if len(cadicate_idx)>0:
if cadicate_idx[0][0] > thr:
nearest_x = int(cadicate_idx[0][1]*scale)
nearest_y = int(cadicate_idx[0][2]*scale)
start_x_current = int(cadicate_idx[0][1])
start_y_current = int(cadicate_idx[0][2])
else:
nearest_x = int(start_x*scale)
nearest_y = int(start_y*scale)
image_patch_new[batch,fr,...] = \
imgs_in_pad[batch,fr,:,nearest_x:nearest_x+PsizeH,nearest_y:nearest_y+PsizeW].clone()
nmpa_patch_new[batch,fr,...] = \
img_in_nmap_pad[batch,fr,:,nearest_x:nearest_x+PsizeH,nearest_y:nearest_y+PsizeW].clone()
# forward to the last frame
start_x_current = start_x
start_y_current = start_y
for fr in range(center_idx+1,N):
# print(fr)
if fr != center_idx:
step = 2
cadicate_idx = cal_candidate_idx_wDSNet(search_region, step, start_x_current, start_y_current, batch, \
fr, newH, newW, lr_features, center_feature, newPsizeH, ncc_func)
new_start_x = int(cadicate_idx[0][1])
new_start_y = int(cadicate_idx[0][2])
search_region_small = step
cadicate_idx = cal_candidate_idx_wDSNet(search_region_small, 1, new_start_x, new_start_y, batch, \
fr, newH, newW, lr_features, center_feature, newPsizeH, ncc_func)
cadicate_idx_all.append(cadicate_idx)
if len(cadicate_idx)>0:
if cadicate_idx[0][0] > thr:
nearest_x = int(cadicate_idx[0][1]*scale)
nearest_y = int(cadicate_idx[0][2]*scale)
start_x_current = int(cadicate_idx[0][1])
start_y_current = int(cadicate_idx[0][2])
else:
nearest_x = int(start_x*scale)
nearest_y = int(start_y*scale)
image_patch_new[batch,fr,...] = \
imgs_in_pad[batch,fr,:,nearest_x:nearest_x+PsizeH,nearest_y:nearest_y+PsizeW].clone()
nmpa_patch_new[batch,fr,...] = \
img_in_nmap_pad[batch,fr,:,nearest_x:nearest_x+PsizeH,nearest_y:nearest_y+PsizeW].clone()
# pdb.set_trace()
return image_patch_new, nmpa_patch_new
def search_patch_NCC_2d_pymaid_wDSNet_wE2E(image_patch, nmpa_patch, imgs_in_pad, img_in_nmap_pad, \
lr_features,\
start_x, start_y, small_scale, search_region,\
gt_patch_raw, gt_raws, opti_step):
B, N, C, PsizeH, PsizeW = image_patch.shape
_, _, _, H, W = imgs_in_pad.shape
center_idx = N//2
ncc_func = NormalizedCrossCorrelation(return_map=False,reduction='mean')
#----- recreat output -----
image_patch_new = image_patch.clone()
nmpa_patch_new = nmpa_patch.clone()
ncc_scores_new = []
ncc_scores_nor_new = []
one_hot_gt = []
# pdb.set_trace()
# ----- resize image patch ------
## Using to calculate in patch level
## downsampling the image patches
scale = small_scale
image_patch_small = torch.reshape(gt_patch_raw, (B*N,C,PsizeH,PsizeW))
image_patch_small = F.interpolate(image_patch_small, scale_factor=1/scale, mode='bilinear', align_corners=False)
imgs_in_pad_small = torch.reshape(gt_raws, (B*N,C,H,W))
imgs_in_pad_small = F.interpolate(imgs_in_pad_small, scale_factor=1/scale, mode='bilinear', align_corners=False)
_,_,newPsizeH,newPsizeW = image_patch_small.shape
_,_,newH,newW = imgs_in_pad_small.shape
image_patch_small = torch.reshape(image_patch_small,(B, N, C, newPsizeH, newPsizeW))
imgs_in_pad_small = torch.reshape(imgs_in_pad_small,(B, N, C, newH, newW))
center_frame = image_patch_small[:,center_idx,:,:,:].clone()
#----- select feature patch -----
scale = small_scale
start_x = int(start_x/scale)
start_y = int(start_y/scale)
center_feature = lr_features[:, center_idx, :, \
start_x:start_x+PsizeH//scale, \
start_y:start_y+PsizeW//scale].clone()
## downsampling the image patches
# scale = 8
_,_,newPsizeH,newPsizeW = center_feature.shape
_,_,_,newH,newW = lr_features.shape
thr = -5
for batch in range(B):
start_x_current = start_x
start_y_current = start_y
# backfowd to the first frame
for fr in range(center_idx-1,-1,-1):
# print(fr)
if fr != center_idx:
step = 2
select_patch, select_np_patch, ncc_scores_temp, ncc_scores_nor_temp, one_hot_temp = \
cal_candidate_idx_wDSNet_E2E(search_region, step, \
start_x_current, start_y_current, batch, \
fr, newH, newW, lr_features, center_feature, \
newPsizeH, imgs_in_pad, img_in_nmap_pad, scale, ncc_func, \
imgs_in_pad_small, image_patch_small, center_frame, opti_step)
image_patch_new[batch,fr,...] = select_patch.clone()
nmpa_patch_new[batch,fr,...] = select_np_patch.clone()
ncc_scores_new.append(ncc_scores_temp)
ncc_scores_nor_new.append(ncc_scores_nor_temp)
one_hot_gt.append(one_hot_temp)
# forward to the last frame
start_x_current = start_x
start_y_current = start_y
for fr in range(center_idx+1,N):
# print(fr)
if fr != center_idx:
step = 2
select_patch, select_np_patch, ncc_scores_temp, ncc_scores_nor_temp, one_hot_temp = \
cal_candidate_idx_wDSNet_E2E(search_region, step, \
start_x_current, start_y_current, batch, \
fr, newH, newW, lr_features, center_feature, \
newPsizeH, imgs_in_pad, img_in_nmap_pad, scale, ncc_func, \
imgs_in_pad_small, image_patch_small, center_frame, opti_step)
image_patch_new[batch,fr,...] = select_patch.clone()
nmpa_patch_new[batch,fr,...] = select_np_patch.clone()
ncc_scores_new.append(ncc_scores_temp)
ncc_scores_nor_new.append(ncc_scores_nor_temp)
one_hot_gt.append(one_hot_temp)
return image_patch_new, nmpa_patch_new, ncc_scores_new, ncc_scores_nor_new, one_hot_gt
def cal_candidate_idx(search_region, step, start_x, start_y, batch, fr, newH, newW, imgs_in, patch_in, patch_size, ncc_func):
cadicate_idx = []
center_patch_all = []
candi_patch_all = []
offset_all = []
for x_offset in range(-search_region, search_region, step):
x_temp = start_x + x_offset
x_temp_end = start_x + x_offset + patch_size
if x_temp<0 or x_temp_end>=newH:
continue
for y_offset in range(-search_region, search_region, step):
y_temp = start_y + y_offset
y_temp_end = start_y + y_offset + patch_size
if y_temp<0 or y_temp_end>=newW:
continue
patch_temp = imgs_in[batch,fr:fr+1,:,x_temp:x_temp_end,y_temp:y_temp_end]
# patch_temp = torch.mean(patch_temp,dim=1,keepdim=True)#.pow(1/2.2)
candi_patch_all.append(patch_temp)
center_frame_temp = patch_in[batch:batch+1,:,:,:]
# center_frame_temp = torch.mean(center_frame_temp,dim=1,keepdim=True)#.pow(1/2.2)
center_patch_all.append(center_frame_temp)
offset_all.append( | np.array([x_temp, y_temp]) | numpy.array |
from __future__ import print_function, division
import numpy as np
import matplotlib.pylab as plt
import astropy.units as u
from astropy import log
from astropy.utils.console import ProgressBar
import pyspeckit
import os
# imports for the test fiteach redefinition
import time
import itertools
from astropy.extern.six import string_types
# gotta catch 'em all!
class AllFixedException(Exception):
""" Zero degrees of freedom. """
pass
class NanGuessesException(Exception):
""" Guesses have NaN values."""
pass
class SnrCutException(Exception):
""" Pixel is below SNR threshold. """
pass
class NanSnrAtPixel(Exception):
""" S/N at pixel has a NaN value. """
pass
class SubCube(pyspeckit.Cube):
"""
An extension of Cube, tinkered to be an instance of MultiCube, from which
it receives references to instances of pyspeckit.Cube that do not depend
on a spectral model chosen (so that parent MultiCube doesn't weigh so much)
Is designed to have methods that operate within a single spectral model.
"""
def __init__(self, *args, **kwargs):
super(SubCube, self).__init__(*args, **kwargs)
# because that UnitConversionError pops up way too often
if self.xarr.velocity_convention is None:
self.xarr.velocity_convention = 'radio'
# so I either define some things as `None`
# or I'll have to call hasattr or them...
# TODO: which is a more Pythonic approach?
# A: probably the hasattr method, see here:
# http://programmers.stackexchange.com/questions/
# 254576/is-it-a-good-practice-to-declare-instance
# -variables-as-none-in-a-class-in-python
self.guess_grid = None
self.model_grid = None
# TODO: investigate whether pyspeckit's #179 needs to be hacked
# around inside either update_model or make_guess_grid methods
def update_model(self, fit_type='gaussian'):
"""
Tie a model to a SubCube. Should work for all the standard
fitters; others can be added with Cube.add_fitter method.
"""
try:
allowed_fitters = self.specfit.Registry.multifitters
self.specfit.fitter = allowed_fitters[fit_type]
except KeyError:
raise ValueError('Unsupported fit type: %s\n'
'Choose one from %s'
% (fit_type, allowed_fitters.keys()))
log.info("Selected %s model" % fit_type)
self.specfit.fittype = fit_type
self.fittype = fit_type
def make_guess_grid(self, minpars, maxpars, finesse, fixed=None,
limitedmin=None, limitedmax=None, **kwargs):
"""
Given parameter ranges and a finesse parameter, generate a grid of
guesses in a parameter space to be iterated upon in self.best_guess
Maybe if parlimits arg is None we can look into parinfo?
Parameters
----------
minpars : an iterable containing minimal parameter values
maxpars : an iterable containing maximal parameter values
finesse : an integer or 1xNpars list/array setting the size
of cells between minimal and maximal values in
the resulting guess grid
fixed : an iterable of booleans setting whether or not to fix the
fitting parameters. Will be passed to Cube.fiteach, defaults
to an array of False-s.
limitedmin : an iterable of booleans controlling if the fit fixed
the minimal boundary of from minpars.
limitedmax : an iterable of booleans controlling if the fit fixed
the maximal boundary of from maxpars.
Returns
-------
guess_grid : a grid of guesses to use for SubCube.generate_model
In addition, it saves a number of variables under self as a dictionary
passed later to Cube.fiteach as additional arguments, with keywords:
['fixed', 'limitedmin', 'limitedmax', 'minpars', 'maxpars']
"""
minpars, maxpars = np.asarray([minpars, maxpars])
truths, falses = (np.ones(minpars.shape, dtype=bool),
np.zeros(minpars.shape, dtype=bool))
fixed = falses if fixed is None else fixed
limitedmin = truths if limitedmin is None else limitedmin
limitedmax = truths if limitedmax is None else limitedmax
self.fiteach_args = {'fixed' : fixed,
'limitedmin': limitedmin,
'limitedmax': limitedmax,
'minpars' : minpars,
'maxpars' : maxpars }
# TODO: why does 'fixed' break the gaussian fitter?
# update as of 1.08.2016: this doesn't happen anymore
#if self.fittype is 'gaussian':
# self.fiteach_args.pop('fixed')
guess_grid = self._grid_parspace(minpars, maxpars, finesse, **kwargs)
guess_grid = self._remove_close_peaks(guess_grid, **kwargs)
self.fiteach_arg_grid = {key: np.repeat([val], guess_grid.shape[0],
axis=0) for key, val in
self.fiteach_args.items()}
self.guess_grid = guess_grid
return guess_grid
def expand_guess_grid(self, minpars, maxpars, finesse, fixed=None,
limitedmin=None, limitedmax=None, **kwargs):
"""
Useful for "chunky" discontinuities in parameter space.
Works as SubCube.make_guess_grid, but instead of creating guess_grid
from scratch, the new guess grid is appended to an existing one.
Parameter limits information is extended to accommodate the new grid.
Returns
-------
guess_grid : an updated grid of guesses
"""
minpars, maxpars = np.asarray([minpars, maxpars])
guess_grid = self._grid_parspace(minpars, maxpars, finesse, **kwargs)
guess_grid = self._remove_close_peaks(guess_grid, **kwargs)
# expanding the parameter boundaries
minpars, maxpars = (
np.vstack([self.fiteach_args['minpars'], minpars]).min(axis=0),
np.vstack([self.fiteach_args['maxpars'], maxpars]).max(axis=0) )
self.fiteach_args['minpars'] = minpars
self.fiteach_args['maxpars'] = maxpars
# updating the fiteach_arg grid
truths, falses = (np.ones(minpars.shape, dtype=bool),
np.zeros(minpars.shape, dtype=bool))
fixed = falses if fixed is None else fixed
limitedmin = truths if limitedmin is None else limitedmin
limitedmax = truths if limitedmax is None else limitedmax
expand_dict = {'fixed' : fixed,
'limitedmin': limitedmin,
'limitedmax': limitedmax,
'minpars' : minpars,
'maxpars' : maxpars }
for key, val in expand_dict.items():
expander = np.repeat([expand_dict[key]],
np.prod(np.atleast_1d(finesse)),
axis=0)
self.fiteach_arg_grid[key] = np.append(self.fiteach_arg_grid[key],
expander, axis=0)
self.guess_grid = np.append(self.guess_grid, guess_grid, axis=0)
return self.guess_grid
def _grid_parspace(self, minpars, maxpars, finesse, clip_edges=True,
spacing=None, npeaks=None, **kwargs):
"""
The actual gridding takes place here.
See SubCube.make_guess_grid for details.
Parameters
----------
minpars : np.array containing minimal parameter values
maxpars : np.array containing maximal parameter values
finesse : np.array setting the size of cells between minimal
and maximal values in the resulting guess grid
clip_edges : boolean; if True, the edge values are not
included in the guess grid
"""
# don't want to go though (often lengthy) model
# generation just to have fiteach fail, do we?
if np.any(minpars>maxpars):
log.error("Some of the minimal parameters are larger"
" than the maximal ones. Normally this is "
"not supposed to happen.")
npars = minpars.size
# conformity for finesse: int or np.array goes in and np.array goes out
finesse = np.atleast_1d(finesse) * np.ones(npars)
log.info("Binning the %i-dimensional parameter"
" space into a %s-shaped grid" %
(npars, str(tuple(finesse.astype(int)))))
par_space = []
for i_len, i_min, i_max in zip(finesse+clip_edges*2, minpars, maxpars):
par_slice_1d = (np.linspace(i_min, i_max, i_len) if not clip_edges
else np.linspace(i_min, i_max, i_len)[1:][:-1]
)
par_space.append(par_slice_1d)
nguesses = np.prod(list(map(len,par_space)))
return np.array(np.meshgrid(*par_space)).reshape(npars, nguesses).T
def _remove_close_peaks(self, guess_grid=None, spacing=[],
which=[], npeaks=2, **kwargs):
"""
Removes the guesses for multiple components where given parameters
are closer than desired. Ideally this should speed up the subsequent
analysis *and* remove components that would converge into one.
Parameters
----------
spacing : float or iterable; minimal separation along `which` dims
which : int or iterable;
Indices for parameters to filter, same shape as `spacing`
npeaks : int > 1; how many components were passed to make_guess_grid
NOTE: only npeaks = 2 is supported for now...
"""
if guess_grid is None:
try:
guess_grid = self.guess_grid
except AttributeError:
raise RuntimeError("Can't find the guess grid to use.")
if npeaks!=2:
raise NotImplementedError("WIP, sorry :/")
# TODO: expand to min/max filtering
spacing, which = np.atleast_1d(spacing), np.atleast_1d(which)
npars = int(guess_grid.shape[1] / npeaks)
# for every parameter space dimension to look into
for dp, i in zip(spacing, which):
m = np.abs(guess_grid[:,i]-guess_grid[:,i+npars]) > dp
guess_grid = guess_grid[m]
return guess_grid
def you_shall_not_pass(self, gg, cut=None, backup_pars=None, **kwargs):
"""
Generates a spectral model from parameters while enforcing a minimum
peak amplitude requirement given by `cut`. Model components below the
threshold are replaced by a zero model from `backup_pars`.
This filtering is not switched on by default, but only if a `cut` is
passed to **kwargs of `generate_model` method.
"""
# TODO: seems to work, but needs more testing
# TODO: the input arguments are ugly, rewrite
try:
# for multicore > 1
kwargs.update(self.you_shall_not_pass_kwargs)
except AttributeError:
pass
try:
# some basic progress reporting for multicore > 1
self.iterticker += 1
# print a dot every 10%, so 10*multicore dots total
i, N = self.iterticker, self.itertotal
if not ((i - 1) // (N / 10) == i // (N / 10)):
print('.', end='')
except AttributeError:
pass
if cut is None:
return self.specfit.get_full_model(pars=gg), gg
else: # now we need to check the peak amplitude for each comp
npeaks_old = self.specfit.fitter.npeaks
self.specfit.fitter.npeaks = 1
npars = int(gg.shape[0] / npeaks_old)
gg_new = []
tot_model = np.zeros_like(self.xarr.value)
for i in range(npeaks_old):
np_gg = list(gg[i*npars:(i+1)*npars])
model = self.specfit.get_full_model(pars = np_gg)
if model.max() > cut[i]:
gg_new += np_gg
tot_model += model
else:
gg_new += backup_pars[i]
self.specfit.fitter.npeaks = npeaks_old
return tot_model, gg_new
def generate_model(self, guess_grid=None, model_file=None, redo=True,
npeaks=None, multicore=1, **kwargs):
"""
Generates a grid of spectral models matching the
shape of the input guess_grid array. Can take the
following numpy arrays as an input:
Parameters
----------
guess_grid : numpy.array
A grid of input parameters.
Can be one of the following:
1) An (M,)-shaped 1d array of model parameters
(see pyspeckit docs for details)
2) An (N, M) array to compute N models
for M sets of parameters
3) A guess cube of (Y, X, M) size
4) An (N, Y, X, M)-shaped array, to
iterate over cubes of guesses.
If not set, SubCube.guess_grid is used.
model_file : string; if not None then the models generated will
be saved to an .npy file instead of class attribute
redo : boolean; if False and model_file filename is in place, the
model gird will not be generated anew
multicore : integer; number of threads to run on (defaults to 1)
Additional keyword arguments are passed to a filter function
`SubCube.you_shall_not_pass()`
"""
if not redo and os.path.isfile(model_file):
log.info("A file with generated models is "
"already in place. Skipping.")
return
if guess_grid is None:
try:
guess_grid = self.guess_grid
except AttributeError:
raise RuntimeError("Can't find the guess grid to use.")
# safeguards preventing wrong output shapes
npars = self.specfit.fitter.npars
guess_grid = np.atleast_2d(guess_grid)
grid_shape = guess_grid.shape[:-1]
if ((len(grid_shape)>1 and grid_shape[-2:]!=self.cube.shape[1:]) or
(len(grid_shape)>3) or (guess_grid.shape[-1]%npars)):
raise ValueError("Invalid shape for the guess_grid, "
"check the docsting for details.")
model_grid = np.empty(shape=grid_shape+(self.xarr.size,))
# NOTE: this for loop is the performance bottleneck!
# would be nice if I could broadcast guess_grid to n_modelfunc...
log.info("Generating spectral models from the guess grid . . .")
if multicore > 1:
# python < 3.3 doesn't handle pooling kwargs (via starmap)
self.iterticker = 0
self.itertotal = model_grid.shape[0]/multicore
self.you_shall_not_pass_kwargs = kwargs
# pooling processes, collecting into a list
result = pyspeckit.cubes.parallel_map(self.you_shall_not_pass,
guess_grid, numcores=multicore)
print('') # those progress dots didn't have a concluding newline
for idx, r in enumerate(result):
# make sure the order is preserved, for mutliprocessing is
# truly an arcane art (shouldn't eat too much time)
assert (guess_grid[idx]==r[1]).all()
model_grid[idx] = r[0]
# cleaning up kwargs taken for the ride
del self.you_shall_not_pass_kwargs
del self.iterticker
del self.itertotal
else:
with ProgressBar(model_grid.shape[0]) as bar:
for idx in np.ndindex(grid_shape):
model_grid[idx], gg = self.you_shall_not_pass(
guess_grid[idx], **kwargs)
if not np.all(np.equal(gg, guess_grid[idx])):
self.guess_grid[idx] = gg
bar.update()
if model_file is not None:
np.save(model_file, model_grid)
else:
self.model_grid = model_grid
def best_guess(self, model_grid=None, sn_cut=None, pbar_inc=1000,
memory_limit=None, model_file=None,
np_load_kwargs={}, **kwargs):
"""
For a grid of initial guesses, determine the optimal one based
on the preliminary residual of the specified spectral model.
Parameters
----------
model_grid : numpy.array; A model grid to choose from.
use_cube : boolean; If true, every xy-slice of a cube will
be compared to every model from the model_grid.
sn_cut (see below) is still applied.
sn_cut : float; do not consider model selection for pixels
below this signal-to-noise ratio cutoff.
pbar_inc : int; Number of steps in which the progress bar is
updated. The default should be sensible for modern
machines. Prevents the progress bar from consiming
too much computational power.
memory_limit : float; How many gigabytes of RAM could be used for
broadcasting. If estimated usage goes over this
number, best_guess switches to a slower method.
model_file : string; if not None then the models grid will be
read from a file using np.load, which additional
arguments, like mmap_mode, passed along to it
np_load_kwargs : extra keyword arguments to be passed along to
np.load - see its docstring for more info
Output
------
best_guesses : a cube of best models corresponding to xy-grid
(saved as a SubCube attribute)
best_guess : a most commonly found best guess
best_snr_guess : the model for the least residual at peak SNR
(saved as a SubCube attribute)
"""
if model_grid is None:
if model_file is not None:
model_grid = np.load(model_file, **np_load_kwargs)
self.model_grid = model_grid
elif self.model_grid is None:
raise TypeError('sooo the model_grid is empty, '
'did you run generate_model()?')
else:
model_grid = self.model_grid
# TODO: allow for all the possible outputs from generate_model()
if model_grid.shape[-1]!=self.cube.shape[0]:
raise ValueError("Invalid shape for the guess_grid, "
"check the docstring for details.")
if len(model_grid.shape)>2:
raise NotImplementedError("Complex model girds aren't supported.")
log.info("Calculating residuals for generated models . . .")
try: # TODO: move this out into an astro_toolbox function
import psutil
mem = psutil.virtual_memory().available
except ImportError:
import os
try:
memgb = os.popen("free -g").readlines()[1].split()[3]
except IndexError: # would happen on Macs/Windows
memgb = 8
log.warn("Can't get the free RAM "
"size, assuming %i GB" % memgb)
memgb = memory_limit or memgb
mem = int(memgb) * 2**30
if sn_cut:
snr_mask = self.snr_map > sn_cut
else:
snr_mask = np.ones(shape=self.cube.shape[1:], dtype=bool)
# allow for 50% computational overhead
threshold = self.cube.nbytes*model_grid.shape[0]*2
if mem < threshold:
log.warn("The available free memory might not be enough for "
"broadcasting model grid to the spectral cube. Will "
"iterate over all the XY pairs instead. Coffee time!")
try:
if type(model_grid) is not np.ndarray: # assume memmap type
raise MemoryError("This will take ages, skipping to "
"the no-broadcasting scenario.")
# NOTE: this below is a monument to how things should *not*
# be done. Seriously, trying to broadcast 1.5M models
# to a 400x200 map can result in 700GB of RAM needed!
#residual_rms = np.empty(shape=((model_grid.shape[0],)
# + self.cube.shape[1:]))
#with ProgressBar(np.prod(self.cube.shape[1:])) as bar:
# for (y,x) in np.ndindex(self.cube.shape[1:]):
# residual_rms[:,y,x] = (self.cube[None,:,y,x]
# - model_grid).std(axis=1)
# bar.update()
best_map = np.full(self.cube.shape[1:], np.nan)
rmsmin_map = np.full(self.cube.shape[1:], np.nan)
with ProgressBar(np.prod(self.cube.shape[1:])) as bar:
for (y, x) in np.ndindex(self.cube.shape[1:]):
if not np.isfinite(self.cube[:, y, x]).any():
bar.update()
continue
if sn_cut:
if not snr_mask[y, x]:
best_map[y, x], rmsmin_map[y,
x] = np.nan, np.nan
bar.update()
continue
resid_rms_xy = (np.nanstd(
model_grid - self.cube[None, :, y, x], axis=1))
best_map[y, x] = np.argmin(resid_rms_xy)
rmsmin_map[y, x] = np.nanmin(resid_rms_xy)
bar.update()
except MemoryError: # catching memory errors could be really bad!
log.warn("Not enough memory to broadcast model grid to the "
"XY grid. This is bad for a number of reasons, the "
"foremost of which: the running time just went "
"through the roof. Leave it overnight maybe?")
best_map = np.full(self.cube.shape[1:], np.nan)
rmsmin_map = np.full(self.cube.shape[1:], np.nan)
# TODO: this takes ages! refactor this through hdf5
# "chunks" of acceptable size, and then broadcast them!
with ProgressBar(
np.prod((model_grid.shape[0], ) + self.cube.shape[
1:])) as bar:
for (y, x) in np.ndindex(self.cube.shape[1:]):
if not np.isfinite(self.cube[:, y, x]).any():
bar.update(bar._current_value +
model_grid.shape[0])
continue
if sn_cut:
if not snr_mask[y, x]:
best_map[y, x], rmsmin_map[y,
x] = np.nan, np.nan
bar.update(bar._current_value +
model_grid.shape[0])
continue
resid_rms_xy = np.empty(shape=model_grid.shape[0])
for model_id in np.ndindex(model_grid.shape[0]):
resid_rms_xy[model_id] = (
self.cube[:, y, x] - model_grid[model_id]
).std()
if not model_id[0] % pbar_inc:
bar.update(bar._current_value + pbar_inc)
best_map[y, x] = np.argmin(resid_rms_xy)
rmsmin_map[y, x] = np.nanmin(resid_rms_xy)
else:
# NOTE: broadcasting below is a much faster way to compute
# cube - model residuals. But for big model sizes this
# will cause memory overflows.
# The code above tried to catch this before it happens
# and run things in a slower fashion.
residual_rms = (
self.cube[None, :, :, :] - model_grid[:, :, None, None]).std(
axis=1)
if sn_cut:
zlen = residual_rms.shape[0]
residual_rms[~self.get_slice_mask(snr_mask, zlen)] = np.inf
try:
best_map = np.argmin(residual_rms, axis=0)
rmsmin_map = residual_rms.min(axis=0)
except MemoryError:
log.warn("Not enough memory to compute the minimal"
" residuals, will iterate over XY pairs.")
best_map = np.empty_like(self.cube[0], dtype=int)
rmsmin_map = np.empty_like(self.cube[0])
with ProgressBar(np.prod(best_map.shape)) as bar:
for (y, x) in np.ndindex(best_map.shape):
best_map[y, x] = np.argmin(residual_rms[:, y, x])
rmsmin_map[y, x] = residual_rms[:, y, x].min()
bar.update()
# indexing by nan values would cause an IndexError
best_nan = np.isnan(best_map)
best_map[np.isnan(best_map)] = 0
best_map_int = best_map.astype(int)
best_map[best_nan] = np.nan
self._best_map = best_map_int
self._best_rmsmap = rmsmin_map
self.best_guesses = np.rollaxis(self.guess_grid[best_map_int], -1)
snrmask3d = np.repeat([snr_mask], self.best_guesses.shape[0], axis=0)
self.best_guesses[~snrmask3d] = np.nan
try:
self.best_fitargs = {
key: np.rollaxis(self.fiteach_arg_grid[key][best_map_int],-1)
for key in self.fiteach_arg_grid.keys()}
except IndexError:
# FIXME why is this happening? do I remove low SNRs from guesses?
log.warn("SubCube.fiteach_arg_grid has a different shape than"
" the one used. SubCube.best_fitargs won't be generated.")
from scipy.stats import mode
model_mode = mode(best_map)
best_model_num = int(model_mode[0][0, 0])
best_model_freq = model_mode[1][0, 0]
best_model_frac = (float(best_model_freq) /
np.prod(self.cube.shape[1:]))
if best_model_frac < .05:
log.warn("Selected model is best only for less than %5 "
"of the cube, consider using the map of guesses.")
self._best_model = best_model_num
self.best_overall = self.guess_grid[best_model_num]
log.info("Overall best model: selected #%i %s" %
(best_model_num, self.guess_grid[best_model_num].round(2)))
try:
best_snr = np.argmax(self.snr_map)
best_snr = np.unravel_index(best_snr, self.snr_map.shape)
self.best_snr_guess = self.guess_grid[best_map_int[best_snr]]
log.info("Best model @ highest SNR: #%i %s" %
(best_map_int[best_snr], self.best_snr_guess.round(2)))
except AttributeError:
log.warn("Can't find the SNR map, best guess at "
"highest SNR pixel will not be stored.")
def get_slice_mask(self, mask2d, notxarr=None):
"""
In case we ever want to apply a 2d mask to a whole cube.
Parameters
----------
notxarr : if set, will be used as a length of a 3rd dim;
Otherwise, size of self.xarr is used.
"""
zlen = notxarr if notxarr else self.xarr.size
mask3d = np.repeat([mask2d], zlen, axis=0)
return mask3d
def get_snr_map(self, signal=None, noise=None, unit='km/s',
signal_mask=None, noise_mask=None):
"""
Calculates S/N ratio for the cube. If no information is given on where
to look for signal and noise channels, a (more-or-less reasonable) rule
of thirds is used: the outer thirds of the channel range are used to
get the root mean square of the noise, and the max value in the inner
third is assumed to be the signal strength.
Parameters
----------
signal : 2xN numpy.array, where N is the total number of signal blocks.
Should contain channel numbers in `unit` convention, the first
subarray for start of the signal block and the second one for
the end of the signal block
noise : 2xN numpy.array, where N is the total number of noise blocks.
Same as `signal` otherwise.
unit : a unit for specifying the channels. Defaults to 'km/s'.
If set to 'pixel', actual channel numbers are selected.
signal_mask : dtype=bool numpy.array of SubCube.xarr size
If specified, used as a mask to get channels with signal.
Overrules `signal`
noise_mask : dtype=bool numpy.array of SubCube.xarr size
If specified, used as a mask to get channels with noise.
Overrules `noise`
Returns
-------
snr_map : numpy.array
Also stored under SubCube.snr_map
"""
# will override this later if no ranges were actually given
unit = {'signal': unit, 'noise': unit}
# get rule of thirds signal and noise if no ranges were given
default_cut = 0.33
if signal is None:
# find signal cuts for the current unit?
# nah let's just do it in pixels, shall we?
i_low, i_high = (int(round(self.xarr.size * default_cut )),
int(round(self.xarr.size * (1-default_cut))))
signal = [[i_low+1], [i_high-1]]
unit['signal'] = 'pixel'
if noise is None:
# find signal cuts for the current unit?
# nah let's just do it in pixels, shall we?
i_low, i_high = (int(round(self.xarr.size * default_cut )),
int(round(self.xarr.size * (1-default_cut))))
noise = [[0, i_high], [i_low, self.xarr.size-1]]
unit['noise'] = 'pixel'
# setting xarr masks from high / low indices
if signal_mask is None:
signal_mask = self.get_mask(*signal, unit=unit['signal'])
if noise_mask is None:
noise_mask = self.get_mask(*noise, unit=unit['noise'])
self._mask_signal = signal_mask
self._mask_noise = noise_mask
# no need to care about units at this point
snr_map = (self.get_signal_map(signal_mask)
/ self.get_rms_map(noise_mask))
self.snr_map = snr_map
return snr_map
def get_mask(self, low_indices, high_indices, unit):
"""
Converts low / high indices arrays into a mask on self.xarr
"""
mask = np.array([False]*self.xarr.size)
for low, high in zip(low_indices, high_indices):
# you know this is a hack right?
# also, undocumented functionality is bad and you should feel bad
if unit not in ['pix','pixel','pixels','chan','channel','channels']:
# converting whatever units we're given to pixels
unit_low, unit_high = low*u.Unit(unit), high*u.Unit(unit)
try:
# FIXME: this is too slow, find a better way!
unit_bkp = self.xarr.unit
self.xarr.convert_to_unit(unit)
except u.core.UnitConversionError as err:
raise type(err)(str(err) + "\nConsider setting, e.g.:\n"
"SubCube.xarr.velocity_convention = 'radio'\n"
"and\nSubCube.xarr.refX = line_freq*u.GHz")
index_low = self.xarr.x_to_pix(unit_low)
index_high = self.xarr.x_to_pix(unit_high)
self.xarr.convert_to_unit(unit_bkp)
else:
try:
index_low, index_high = (int(low.value ),
int(high.value))
except AttributeError:
index_low, index_high = int(low), int(high)
# so this also needs to be sorted if the axis goes in reverse
index_low, index_high = np.sort([index_low, index_high])
mask[index_low:index_high] = True
return mask
def get_rms_map(self, noise_mask=None):
"""
Make an rms estimate, will try to find the noise channels in
the input values or in class instances. If noise mask is not
not given, defaults to calculating rms of all channels.
Parameters
----------
noise_mask : dtype=bool numpy.array of SubCube.xarr size
If specified, used as a mask to get channels with noise.
Returns
-------
rms_map : numpy.array, also stored under SubCube.rms_map
"""
if noise_mask is None:
log.warn('no noise mask was given, will calculate the RMS '
'over all channels, thus overestimating the noise!')
noise_mask = np.ones(self.xarr.shape, dtype=bool)
rms_map = self.cube[noise_mask,:,:].std(axis=0)
self._rms_map = rms_map
return rms_map
def get_signal_map(self, signal_mask=None):
"""
Make a signal strength estimate. If signal mask is not
not given, defaults to maximal signal on all channels.
Parameters
----------
signal_mask : dtype=bool numpy.array of SubCube.xarr size
If specified, used as a mask to get channels with signal.
Returns
-------
signal_map : numpy.array, also stored under SubCube.signal_map
"""
if signal_mask is None:
log.warn('no signal mask was given, will calculate the signal '
'over all channels: true signal might be lower.')
signal_mask = np.array(self.xarr.shape, dtype=bool)
signal_map = self.cube[signal_mask,:,:].max(axis=0)
self._signal_map = signal_map
return signal_map
def get_chi_squared(self, sigma=None, refresh=False, **kwargs):
"""
Computes a chi-squared map from modelcube / parinfo.
"""
if self._modelcube is None or refresh:
self.get_modelcube(**kwargs)
if sigma is None:
sigma = self._rms_map
chisq = ((self.cube - self._modelcube)**2 / sigma**2).sum(axis=0)
self.chi_squared = chisq
return chisq
def chi_squared_stats(self, plot_chisq=False):
"""
Compute chi^2 statistics for an X^2 distribution.
This is essentially a chi^2 test for normality being
computed on residual from the fit. I'll rewrite it
into a chi^2 goodness of fit test when I'll get around
to it.
Returns
-------
prob_chisq : probability that X^2 obeys the chi^2 distribution
dof : degrees of freedom for chi^2
"""
# ------------------- TODO --------------------- #
# rewrite it to a real chi-square goodness of fit!
# this is essentially a chi^2 test for normality
from scipy.stats.distributions import chi2
# TODO: for Pearson's chisq test it would be
# dof = self.xarr.size - self.specfit.fitter.npars - 1
# NOTE: likelihood function should asymptotically approach
# chi^2 distribution too! Given that the whole point
# of calculating chi^2 is to use it for model
# selection I should probably switch to it.
# TODO: derive an expression for this "Astronomer's X^2" dof.
dof = self.xarr.size
prob_chisq = chi2.sf(self.chi_squared, dof)
# NOTE: for some reason get_modelcube returns zeros for some
# pixels even if corresponding Cube.parcube[:,y,x] is NaN
prob_chisq[np.isnan(self.parcube.min(axis=0))] = np.nan
if plot_chisq:
if not plt.rcParams['text.usetex']:
plt.rc('text', usetex=True)
if self.mapplot.figure is None:
self.mapplot()
self.mapplot.plane = prob_chisq
self.mapplot(estimator=None, cmap='viridis', vmin=0, vmax=1)
labtxt = r'$\chi^2\mathrm{~probability~(%i~d.o.f.)}$' % dof
self.mapplot.FITSFigure.colorbar.set_axis_label_text(labtxt)
plt.show()
self.prob_chisq = prob_chisq
return prob_chisq, dof
def mark_bad_fits(self, ax=None, mask=None, cut=1e-20,
method='cross', **kwargs):
"""
Given an active axis used by Cube.mapplot, overplot
pixels with bad fits with an overlay.
Can pass along a mask of bad pixels; if none is given
the method tries to get its own guess from:
self.prob_chisq < cut
Additional keyword arguments are passed to plt.plot.
"""
# setting defaults for plotting if no essentials are passed
ax = ax or self.mapplot.axis
pltkwargs = {'alpha': 0.8, 'ls': '--', 'lw': 1.5, 'c': 'r'}
pltkwargs.update(kwargs)
# because the plotting routine would attempt to change the scale
try:
ax.autoscale(False)
except AttributeError:
raise RuntimeError("Can't find an axis to doodle on.")
# NOTE: this would only work for a singular component
# due to the way we're calculating X^2. One can,
# in principle, calculate X^2 with a mask to
# bypass this issue, but only in the case of the
# components being clearly separated.
# Otherwise the cut value needs to be set "by eye"
mask = self.prob_chisq < cut if self.prob_chisq is not None else mask
# that +1 modifier is there because of aplpy's
# convention on the (0,0) origin in FITS files
for y,x in np.stack(np.where(mask)).T+1:
self._doodle_xy(ax, (x,y), method, **pltkwargs)
def _doodle_xy(self, ax, xy, method, **kwargs):
"""
Draws lines on top of a pixel.
Parameters
----------
ax : axis to doodle on
xy : a tuple of xy coordinate pair
method : what to draw. 'box' and 'cross' are supported
"""
# TODO: if ax is None take it from self.mapplot.axis
x, y = xy
if method is 'box':
ax.plot([x-.5,x-.5,x+.5,x+.5,x-.5],
[y-.5,y+.5,y+.5,y-.5,y-.5],
**kwargs)
elif method is 'cross':
ax.plot([x-.5,x+.5], [y-.5,y+.5], **kwargs)
ax.plot([x+.5,x-.5], [y-.5,y+.5], **kwargs)
else:
raise ValueError("unknown method %s passed to "
"the doodling function" % method)
def _doodle_box(self, ax, xy1, xy2, **kwargs):
"""
Draws a box on the axis.
Parameters
----------
ax : axis to doodle on
xy1 : xy coordinate tuple, a box corner
xy2 : xy coordinate tuple, an opposite box corner
"""
# TODO: merge _doodle_box with _doodle_xy
x0, y0 = (np.array(xy1)+np.array(xy2))/2.
dx, dy = np.abs((np.array(xy1)-np.array(xy2))/2.)
ax.plot([x0-dx-.5,x0-dx-.5,x0+dx+.5,x0+dx+.5,x0-dx-.5],
[y0-dy-.5,y0+dy+.5,y0+dy+.5,y0-dy-.5,y0-dy-.5],
**kwargs)
def get_likelihood(self, sigma=None):
"""
Computes log-likelihood map from chi-squared
"""
# self-NOTE: need to deal with chi^2 first
raise NotImplementedError
#if sigma is None:
# sigma = self._rms_map
## TODO: resolve extreme exponent values or risk overflowing
#likelihood = (np.exp(-self.chi_squared/2)
# * (sigma*np.sqrt(2*np.pi))**(-self.xarr.size))
#self.likelihood = np.log(likelihood)
#return np.log(likelihood)
def _unpack_fitkwargs(self, x, y, fiteachargs=None):
"""
A gateway method that allows 3d arrays of fitkwargs elements to
be passed along to fiteach, and, consequently, to the underlying
specfit subroutines.
In principle, this also allows to hack multiple values of npeak
within one fiteach call... Just have to let those xy positions to
have fixed[npars*npeaks:] = True or something and set the guesses
to zero amplitude models.
"""
argdict = fiteachargs or self.fiteach_args
# NOTE: why lists? well pyspeckit doesn't always like arrays
try:
return {key: list(val[:,y,x]) if hasattr(argdict[key],'shape')
else val for key, val in argdict.items()}
except IndexError:
return {key: list(val) if type(val) is np.ndarray else val
for key, val in argdict.items()}
def _fiteach_args_to_3d(self):
"""
Converts 1d fiteach_args to 3d.
"""
shape_3d = ((len(self.fiteach_args[self.fiteach_args.keys()[0]]),)
+ self.cube[0].shape)
for key, val in self.fiteach_args.items():
val_3d = np.ones(shape_3d) * np.array(val)[:, None, None]
self.fiteach_args[key] = val_3d.astype(type(val[0]))
# Taken directly from pyspeckit.cubes.fiteach()!
# TODO: I removed quite a few lines of code from this, so the
# method is currently suitable for my personal needs only.
# I should rename it before merging to master branch.
# TODO: roadmap for the function and the branch:
# - allow cubes of minmax/fixed args to be passed here
# - rename it to multifit
# - merge to master
# New features:
# * use_best_as_guess argument
# * support for custom fitkwargs for custom pixels
# * handling of special cases through custom exceptions
# * can accept custom snr maps
# * TODO minor: percentages for milticore>1 case
# * TODO minor: add progressbar if not verbose
def fiteach(self, errmap=None, snrmap=None, guesses=(), verbose=True,
verbose_level=1, quiet=True, signal_cut=3, usemomentcube=None,
blank_value=0, use_neighbor_as_guess=False,
use_best_as_guess=False, start_from_point=(0,0), multicore=1,
position_order=None, maskmap=None, **kwargs):
"""
Fit a spectrum to each valid pixel in the cube
For guesses, priority is *use_best_as_guess* *use_nearest_as_guess*,
*usemomentcube*, *guesses*, None
Once you have successfully run this function, the results will be
stored in the ``.parcube`` and ``.errcube`` attributes, which are each
cubes of shape ``[npars, ny, nx]``, where npars is the number of fitted
parameters and ``nx``, ``ny`` are the shape of the map. ``errcube``
contains the errors on the fitted parameters (1-sigma, as returned from
the Levenberg-Marquardt fit's covariance matrix). You can use the
attribute ``has_fit``, which is a map of shape ``[ny,nx]`` to find
which pixels have been successfully fit.
Parameters
----------
use_neighbor_as_guess: bool
Set this keyword to use the average best-fit parameters from
neighboring positions with successful fits as the guess
use_best_as_guess: bool
If true, the initial guess for the pixel is selected as the one
giving the least residual among the fits from the neighboring
pixels and the guess for the pixel
start_from_point: tuple(int,int)
Either start from the center or from a point defined by a tuple.
Work outward from that starting point.
position_order: ndarray[naxis=2]
2D map of region with pixel values indicating the order in which
to carry out the fitting. Any type with increasing pixel values.
guesses: tuple or ndarray[naxis=3]
Either a tuple/list of guesses with len(guesses) = npars or a cube
of guesses with shape [npars, ny, nx].
errmap: ndarray[naxis=2] or ndarray[naxis=3]
A map of rms of the noise to use for signal cutting.
snrmap: ndarray[naxis=2]
A map of signal-to-noise ratios to use. Overrides errmap.
signal_cut: float
Minimum signal-to-noise ratio to "cut" on (i.e., if peak in a given
spectrum has s/n less than this value, ignore it)
blank_value: float
Value to replace non-fitted locations with. A good alternative is
numpy.nan
verbose: bool
verbose_level: int
Controls how much is output.
0,1 - only changes frequency of updates in loop
2 - print out messages when skipping pixels
3 - print out messages when fitting pixels
4 - specfit will be verbose
multicore: int
if >1, try to use multiprocessing via parallel_map to run on
multiple cores
maskmap : `np.ndarray`, optional
A boolean mask map, where ``True`` implies that the data are good.
This will be used for both plotting using `mapplot` and fitting
using `fiteach`. If ``None``, will use ``self.maskmap``.
"""
if not hasattr(self.mapplot,'plane'):
self.mapplot.makeplane()
if maskmap is None:
maskmap = self.maskmap
yy,xx = np.indices(self.mapplot.plane.shape)
if isinstance(self.mapplot.plane, np.ma.core.MaskedArray):
OK = ((~self.mapplot.plane.mask) &
maskmap.astype('bool')).astype('bool')
else:
OK = (np.isfinite(self.mapplot.plane) &
maskmap.astype('bool')).astype('bool')
# NAN guesses rule out the model too
if hasattr(guesses,'shape') and guesses.shape[1:] == self.cube.shape[1:]:
bad = np.isnan(guesses).sum(axis=0).astype('bool')
OK &= (~bad)
if start_from_point == 'center':
start_from_point = (xx.max()/2., yy.max()/2.)
if hasattr(position_order,'shape') and position_order.shape == self.cube.shape[1:]:
sort_distance = np.argsort(position_order.flat)
else:
d_from_start = ((xx-start_from_point[1])**2 + (yy-start_from_point[0])**2)**0.5
sort_distance = np.argsort(d_from_start.flat)
valid_pixels = list(zip(xx.flat[sort_distance][OK.flat[sort_distance]],
yy.flat[sort_distance][OK.flat[sort_distance]]))
if len(valid_pixels) != len(set(valid_pixels)):
raise ValueError("There are non-unique pixels in the 'valid pixel' list. "
"This should not be possible and indicates a major error.")
elif len(valid_pixels) == 0:
raise ValueError("No valid pixels selected.")
if verbose_level > 0:
log.debug("Number of valid pixels: %i" % len(valid_pixels))
guesses_are_moments = (isinstance(guesses, string_types) and
guesses in ('moment','moments'))
if guesses_are_moments or (usemomentcube and len(guesses)):
if not hasattr(self, 'momentcube') and guesses_are_moments:
self.momenteach()
npars = self.momentcube.shape[0]
else:
npars = len(guesses)
if npars == 0:
raise ValueError("Parameter guesses are required.")
self.parcube = np.zeros((npars,)+self.mapplot.plane.shape)
self.errcube = np.zeros((npars,)+self.mapplot.plane.shape)
# newly needed as of March 27, 2012. Don't know why.
if 'fittype' in kwargs:
self.specfit.fittype = kwargs['fittype']
self.specfit.fitter = self.specfit.Registry.multifitters[self.specfit.fittype]
# TODO: VALIDATE THAT ALL GUESSES ARE WITHIN RANGE GIVEN THE
# FITKWARG LIMITS
# array to store whether pixels have fits
self.has_fit = np.zeros(self.mapplot.plane.shape, dtype='bool')
self._counter = 0
t0 = time.time()
def fit_a_pixel(iixy):
ii,x,y = iixy
sp = self.get_spectrum(x,y)
# very annoying - cannot use min/max without checking type
# maybe can use np.asarray here?
# cannot use sp.data.mask because it can be a scalar boolean,
# which does unpredictable things.
if hasattr(sp.data, 'mask') and not isinstance(sp.data.mask, (bool,
np.bool_)):
sp.data[sp.data.mask] = np.nan
sp.error[sp.data.mask] = np.nan
sp.data = np.array(sp.data)
sp.error = np.array(sp.error)
elif errmap is not None:
if self.errorcube is not None:
raise ValueError("Either the 'errmap' argument or"
" self.errorcube attribute should be"
" specified, but not both.")
if errmap.shape == self.cube.shape[1:]:
sp.error = np.ones(sp.data.shape) * errmap[int(y),int(x)]
elif errmap.shape == self.cube.shape:
sp.error = errmap[:, int(y), int(x)]
elif self.errorcube is not None:
sp.error = self.errorcube[:, int(y), int(x)]
else:
if verbose_level > 1 and ii==0:
log.warn("using data std() as error.")
sp.error[:] = sp.data[sp.data==sp.data].std()
if (sp.error is not None or snrmap is not None) and signal_cut > 0:
try:
max_sn = snrmap[y,x]
except TypeError: # if snrmap is None
max_sn = np.nanmax(sp.data / sp.error)
else:
max_sn = None
sp.specfit.Registry = self.Registry # copy over fitter registry
# Do some homework for local fits
# Exclude out of bounds points
xpatch, ypatch = get_neighbors(x,y,self.has_fit.shape)
local_fits = self.has_fit[ypatch+y,xpatch+x]
if use_best_as_guess and np.any(local_fits):
gg = guesses[:,y,x] if len(guesses.shape)>1 else guesses
near_guesses = self.parcube[:, (ypatch+y)[local_fits],
(xpatch+x)[local_fits] ].T
ggrid = np.vstack([gg, near_guesses])
# for some reason nan values creep through!
ggrid = np.array([val for val in ggrid if np.all(np.isfinite(val))])
resid = [(sp.data - sp.specfit.get_full_model(pars=iguess)).std()
for iguess in ggrid]
gg = ggrid[np.argmin(resid)]
if np.argmin(resid):
gg_ind = np.where(np.all((self.parcube[:, ypatch+y,
xpatch+x].T == np.array(gg)),axis=1))[0][0]
x_old = xpatch[gg_ind]+x
y_old = ypatch[gg_ind]+y
log.info("Selecting best guess at (%i,%i) from "
"(%i,%i): %s" % (x,y,x_old,y_old,str(gg)))
# copy parlimits as well as the guess for consistency
lims_old = self._unpack_fitkwargs(x_old, y_old, kwargs)
try:
for key in self.fiteach_args.keys():
kwargs[key][:,y,x] = lims_old[key]
# TypeError for lists, IndexError for ndarrays
except (IndexError, TypeError):
kwargs[key] = lims_old[key]
else:
log.info("Selecting best guess from input guess.")
elif use_neighbor_as_guess and np.any(local_fits):
# Array is N_guess X Nvalid_nbrs so averaging over
# Axis=1 is the axis of all valid neighbors
gg = np.mean(self.parcube[:, (ypatch+y)[local_fits],
(xpatch+x)[local_fits]], axis=1)
elif hasattr(guesses,'shape') and guesses.shape[1:] == self.cube.shape[1:]:
if verbose_level > 1 and ii == 0:
log.info("Using input guess cube")
gg = guesses[:,y,x]
else:
if verbose_level > 1 and ii == 0:
log.info("Using input guess")
gg = guesses
fitkwargs = self._unpack_fitkwargs(x, y, kwargs)
try:
if np.any(~ | np.isfinite(gg) | numpy.isfinite |
import time
import numpy as np
import pandas as pd
import multiprocessing
import tensorflow as tf
import tensorflow_addons as tfa
import math
import os
from skimage import io
import cv2
isTestSetRun = False
input_dir = '/mnt/ML/Panda/officialData/train_images/'
out_file = 'submission.csv'
backboneFrozen = True
batchSize = 4
trainSequenceLength = 64
tileSize = 256
outImageSize = 224
prefetchSize = multiprocessing.cpu_count() + 1
modelsToEnsamble = [
('experiments/dgrechka/37c/train2/fold_2/weights.hdf5', 0.0),
('experiments/dgrechka/37c/train2/fold_3/weights.hdf5', 0.0),
('experiments/dgrechka/37c/train2/fold_2/weights.hdf5', 30.0),
('experiments/dgrechka/37c/train2/fold_3/weights.hdf5', 60.0),
]
cpuCores = multiprocessing.cpu_count()
print("Detected {0} CPU cores".format(cpuCores))
inputFiles = os.listdir(input_dir)
inputIdents = [x[0:-5] for x in inputFiles if x.endswith(".tiff")]
if not isTestSetRun:
inputIdents.sort()
inputIdents = inputIdents[0:64]
imageCount = len(inputIdents)
print("Found {0} files for inference".format(imageCount))
fullPaths = [os.path.join(input_dir,"{0}.tiff".format(x)) for x in inputIdents]
def GetInferenceDataset(fullPaths, rotDegree):
filenameDs = tf.data.Dataset.from_tensor_slices(fullPaths)
def TrimBlackPaddings(image):
shape = image.shape
rowsAggregated = np.amax(image,axis=(0,2))
rowIndices = np.where(rowsAggregated != 0)[0]
if len(rowIndices) == 0:
print("WARN: entire black image in TrimBlackPaddings")
return image # entire black image
firstRow,lastRow = rowIndices[0], rowIndices[-1]
colsAggregated = np.amax(image, axis=(1,2))
colIndices = np.where(colsAggregated != 0)[0]
if len(colIndices) == 0:
print("WARN: entire black image in TrimBlackPaddings")
return image # entire black image
firstCol, lastCol = colIndices[0], colIndices[-1]
return image[firstCol:(lastCol+1), firstRow:(lastRow+1), :]
def EnlargeForRotationTF(imageTensor):
radiansDegree = rotDegree / 180.0 * math.pi
def EnlargeForRotation(image):
h,w,_ = image.shape
diag = math.sqrt(h*h + w*w)
alphaCos = w / diag
alpha = math.acos(alphaCos)
beta = alpha - radiansDegree
w2 = diag * abs(math.cos(beta))
h2 = diag * abs(math.sin(beta))
beta2 = alpha + radiansDegree
w3 = diag * abs(math.cos(beta2))
h3 = diag * abs(math.sin(beta2))
w_ext = int(max(w2, w3, w))
h_ext = int(max(h2, h3, h))
padH = h_ext - h
padW = w_ext - w
# print("init shape {0} {1}. rot {2}; pad h {3}; pad w {4}".format(h,w,rotDegree, padH, padW))
#print("padding")
paddedImage = np.pad(image, (
(padH // 2, padH // 2),
(padW // 2, padW // 2),
(0,0)))
return paddedImage
return tf.numpy_function(EnlargeForRotation, [imageTensor], tf.uint8)
def getNotEmptyTiles(image, tileSize, precomputedTileIndices=None, emptyCutOffMaxThreshold = 25):
'''Returns the list of non-empty tile indeces (tile_row_idx,tile_col_idx) and corresponding list of tile npArrays).
Each index list element specifies the zero based index (row_idx,col_idx) of square tile which contain some data (non-empty)'''
# negate image (as lots of further processing operations add zero paddings)
# we need black background
h,w,_ = image.shape
vertTiles = math.ceil(h / tileSize)
horTiles = math.ceil(w / tileSize)
indexResult = []
dataResult = []
tileIntensity = []
def extractTileData(row_idx,col_idx):
tileYstart = row_idx*tileSize
tileYend = min((row_idx+1)*tileSize,h)
tileXstart = col_idx*tileSize
tileXend = min((col_idx+1)*tileSize,w)
tile = image[
tileYstart:tileYend,
tileXstart:tileXend,
:] # all 3 color channels
return tile
def coerceTileSize(tile):
"""In case tile is smaller than requested size, it is padded with white content"""
# we may need to pad the edge tile to full requested tile size
th,tw,_ = tile.shape
xPad = tileSize - tw
yPad = tileSize - th
if (xPad>0) | (yPad>0):
# we pad to the end
tile = np.pad(tile,[[0,yPad],[0,xPad],[0,0]],constant_values = 0)
return tile
# we analyze all tiles for the content
for row_idx in range(0,vertTiles):
for col_idx in range(0,horTiles):
tile = extractTileData(row_idx, col_idx)
# if the tile contains pure white pixels (no information) we skip it (do not return)
#print("row {0} col {1} min_v {2}".format(row_idx,col_idx,tileMin))
tileMax = np.nanmax(tile)
if tileMax < emptyCutOffMaxThreshold: # too black! there is no tissue areas
continue
tile = coerceTileSize(tile)
indexResult.append((row_idx,col_idx))
dataResult.append(tile)
tileMean = np.nanmean(tile)
tileIntensity.append(tileMean)
# sorting the tiles according to intensity
resIdx = []
resData = []
sortedIntencites = []
for (idxElem,dataElem,sortedIntence) in sorted(zip(indexResult, dataResult, tileIntensity), key=lambda pair: -pair[2]):
resIdx.append(idxElem)
resData.append(dataElem)
sortedIntencites.append(sortedIntence)
indexResult = resIdx
return indexResult,resData
def getImageMean_withoutPureBlack(image):
black = np.array([0,0,0])
cleared = np.where(image==black,np.NaN,image)
result = np.nanmean(cleared)
if np.isnan(result):
result = 0.0 # pure black
return result
def getImageMean_withoutBlack(image,blackThreshold):
#print("image shape is {0}".format(image.shape))
imageBrightness = np.nanmax(image, axis=-1)
blackMap = imageBrightness < blackThreshold
blackMap = np.expand_dims(blackMap,axis=-1)
blackMap = np.tile(blackMap, (1,1,3))
if not(np.any(blackMap)):
# nonNan exists
cleared = np.where(blackMap,np.NaN,image)
return np.nanmean(cleared)
else:
return np.NaN # pure black
def getImageContrast_withoutPureBlack(image, regTerm=0.0, precomputedMu = None):
"""(0,0,0) pixels are excluded from contrast evaluation"""
if precomputedMu is None:
mu = getImageMean_withoutPureBlack(image)
else:
mu = precomputedMu
diff = image - mu
return getImageContrast(image,regTerm=regTerm, precomputedDiff=diff)
def getImageContrast(image, regTerm=0.0, precomputedDiff = None):
"""Entire image contrast as defined in Goodfellow et al. 2016 "Deep learning" p.442"""
if precomputedDiff is None:
mu = getImageMean_withoutPureBlack(image)
diff = image - mu
else:
diff = precomputedDiff
squaredDiff = diff*diff
meanSqDiff = np.mean(squaredDiff)
return math.sqrt(regTerm + meanSqDiff)
def GCN(image,lambdaTerm = 0.0, eps=1e-8, precomputedContrast = None, precomputedMean = None):
"""Global Contrast Normalization as defined in Goodfellow et al. 2016 "Deep learning" p.442"""
if precomputedMean is None:
mu = | np.mean(image) | numpy.mean |
import numpy as np
import pandas as pd
def data_read_write(data_path_in, data_path_out, format_type, **kwargs):
"""
General function to read, format, and write data.
Parameters
----------
data_path_in : str
Path to the file that will be read
data_path_out : str
Path of the file that will be output
format_type : str
Either 'dense', 'grid', 'columnar', or 'transect'
kwargs
Specific keyword args for given data types. See Notes
Notes
-----
'Dense Parameters'
non_label_cols : str
Comma separated list of non label columns. ex. "lat, long, tree"
sep : str
The delimiter for the dense data. Default, ","
na_values : int, float, str
Value to be labeled as NA. Default, ""
See misc.format_dense() for additional keyword parameters
"""
if format_type == "dense":
# Set dense defaults
kwargs = _set_dense_defaults_and_eval(kwargs)
# Try to parse non label columns appropriately
try:
nlc = [nm.strip() for nm in kwargs['non_label_cols'].split(",")]
kwargs.pop('non_label_cols', None)
except KeyError:
raise KeyError("'non_label_cols' is a required keyword dense data")
# Read data with dense specific keywords
arch_data = pd.read_csv(data_path_in, sep=kwargs['delimiter'],
na_values=kwargs['na_values'])
form_data = format_dense(arch_data, nlc, **kwargs)
elif format_type == "grid":
pass
elif format_type == "stacked":
pass
elif format_type == "transect":
pass
else:
raise NameError("%s is not a supported data format" % format_type)
form_data.to_csv(data_path_out, index=False)
def format_dense(base_data, non_label_cols, **kwargs):
"""
Formats dense data type to stacked data type.
Takes in a dense data type and converts into a stacked data type.
Parameters
----------
data : DataFrame
The dense data
non_label_cols : list
A list of columns in the data that are not label columns
label_col : str
Name of the label column in the formatted data. Default, "label"
count_col : str
Name of the count column in the formatted data. Default, "count"
nan_to_zero : bool
Set all nans to zero. Default, False
drop_na : bool
Drop all columns with nan in the dataset. Default, False
Returns
-------
: DataFrame
A formatted DataFrame in the stacked format
Notes
-----
Example of Dense Data conversion
>>> import pandas as pd
>>> dense_data = pd.DataFrame({'row' : [1,2,1,2], 'column' : [1,1,2,2],
'labelA': [1,0,3,4], 'labelB' : [3,2,1,4]})
>>> dense_data
column labelA labelB row
0 1 1 3 1
1 1 0 2 2
2 2 3 1 1
3 2 4 4 2
[4 rows x 4 columns]
# labelA and labelB might be species names. 'row' and 'column'
# are non-species names so pass these in as non_label_cols
>>> stacked_data = format_dense(dense_data, ['row', 'column'])
>>> stacked_data
row column label count
0 1 1 labelA 1
1 1 1 labelB 3
2 2 1 labelA 0
3 2 1 labelB 2
4 1 2 labelA 3
5 1 2 labelB 1
6 2 2 labelA 4
7 2 2 labelB 4
[8 rows x 4 columns]
"""
kwargs = _set_dense_defaults_and_eval(kwargs)
# Stack data in columnar form.
indexed_data = base_data.set_index(keys=non_label_cols)
columnar_data = indexed_data.stack(dropna=False)
columnar_data = columnar_data.reset_index()
# Rename columns
num = len(non_label_cols)
columnar_data.rename(columns={0: kwargs['count_col'], 'level_%i' % num:
kwargs['label_col']}, inplace=True)
# Set nans to zero?
if kwargs['nan_to_zero']:
ind = | np.isnan(columnar_data[kwargs['count_col']]) | numpy.isnan |
# -*- coding: utf-8 -*-
"""
Some math and I/O tools. The original set of functions provided by
this module were originally translated from Yeti (now a dead language)
to Python.
"""
import pickle
import gzip
import bz2
import sys
import contextlib
import warnings
from types import SimpleNamespace
import operator
import numbers
import numpy as np
from scipy import linalg
from scipy.optimize import leastsq
import matplotlib.pyplot as plt
from pyyeti import guitools
from pyyeti import writer
# FIXME: We need the str/repr formatting used in Numpy < 1.14.
try:
np.set_printoptions(legacy="1.13")
except TypeError:
pass
def _check_3d(ax, need3d):
if need3d and not hasattr(ax, "get_zlim"):
raise ValueError("the axes object does not have a 3d projection")
return ax
def _check_makeplot(
makeplot, valid=("no", "new", "clear", "add"), figsize=None, need3d=False
):
if makeplot not in valid:
# makeplot must be an axes object if here ... check for 'plot'
# attribute:
if hasattr(makeplot, "plot"):
return _check_3d(makeplot, need3d)
raise ValueError(
f"invalid `makeplot` setting; must be in {valid} or be an axes object"
)
if makeplot != "no":
if makeplot == "new" or not plt.get_fignums():
plt.figure(figsize=figsize)
if makeplot == "add":
fig = plt.gcf()
if not fig.get_axes() and need3d:
return fig.add_subplot(projection="3d")
ax = plt.gca()
return _check_3d(ax, need3d)
if makeplot == "clear":
plt.clf()
if need3d:
fig = plt.gcf()
return fig.add_subplot(projection="3d")
return plt.gca()
return None
def _norm_vec(vec):
return vec / np.linalg.norm(vec)
def _initial_circle_fit(basic):
# See fit_circle_3d for description. Does steps 1-8.
# - basic is 2d ndarray, 3 x n
n = basic.shape[1] # step 1
if n < 3:
raise ValueError(f"need at least 3 data points to fit circle, only have {n}")
p1 = basic[:, 0]
p2 = basic[:, n // 3]
p3 = basic[:, 2 * n // 3]
v1 = p2 - p1 # step 2
v2 = p3 - p1
z_l = _norm_vec(np.cross(v1, v2)) # step 3
x_l = _norm_vec(v1) # step 4
y_l = np.cross(z_l, x_l) # step 5
basic2local = np.vstack((x_l, y_l, z_l))
# compute center by using chord bisectors:
b1 = np.cross(z_l, v1) # step 6
b2 = np.cross(z_l, v2)
mid1 = (p1 + p2) / 2 # step 7
mid2 = (p1 + p3) / 2
arr = np.column_stack((b1, -b2))
ab = np.linalg.lstsq(arr, mid2 - mid1, rcond=None)[0] # step 8
center = mid1 + ab[0] * b1
radius = np.linalg.norm(p1 - center)
return basic2local, center, radius
def fit_circle_2d(x, y, makeplot="no"):
"""
Find radius and center point of x-y data points
Parameters
----------
x, y : 1d array_like
Vectors x, y data points (in cartesian coordinates) that are
on a circle: [x, y]
makeplot : string or axes object; optional
Specifies if and how to plot data showing the fit.
=========== ===============================
`makeplot` Description
=========== ===============================
'no' do not plot
'clear' plot after clearing figure
'add' plot without clearing figure
'new' plot in new figure
axes object plot in given axes (like 'add')
=========== ===============================
Returns
-------
p : 1d ndarray
Vector: [xc, yc, R] where ``(xc, yc)`` defines the center of
the circle and ``R`` is the radius.
Notes
-----
Uses :func:`scipy.optimize.leastsq` to find optimum circle
parameters.
Examples
--------
For a test, provide precise x, y coordinates, but only for a 1/4
circle:
.. plot::
:context: close-figs
>>> import numpy as np
>>> from pyyeti.ytools import fit_circle_2d
>>> xc, yc, R = 1., 15., 35.
>>> th = np.linspace(0., np.pi/2, 10)
>>> x = xc + R*np.cos(th)
>>> y = yc + R*np.sin(th)
>>> fit_circle_2d(x, y, makeplot='new')
array([ 1., 15., 35.])
"""
x, y = np.atleast_1d(x, y)
basic2local, center, radius = _initial_circle_fit(np.vstack((x, y, 0 * x)))
clx, cly = center[:2]
# The optimization routine leastsq needs a function that returns
# the residuals:
# y - func(p, x)
# where "func" is the fit you're trying to match
def circle_residuals(p, d):
# p is [xc, yc, R]
# d is [x;y] coordinates
xc, yc, R = p
n = len(d) // 2
theta = np.arctan2(d[n:] - yc, d[:n] - xc)
return d - np.hstack((xc + R * np.cos(theta), yc + R * np.sin(theta)))
p0 = (clx, cly, radius)
d = np.hstack((x, y))
res = leastsq(circle_residuals, p0, args=(d,), full_output=1)
sol = res[0]
if res[-1] not in (1, 2, 3, 4):
raise ValueError(":func:`scipy.optimization.leastsq` failed: {}".res[-2])
ssq = np.sum(res[2]["fvec"] ** 2)
if ssq > 0.01:
msg = (
"data points do not appear to form a good circle, sum "
f"square of residuals = {ssq}"
)
warnings.warn(msg, RuntimeWarning)
ax = _check_makeplot(makeplot)
if ax:
ax.scatter(x, y, c="r", marker="o", s=60, label="Input Points")
th = np.arange(0, 361) * np.pi / 180.0
(x, y, radius) = sol
ax.plot(x + radius * np.cos(th), y + radius * np.sin(th), label="Fit")
ax.axis("equal")
ax.legend(loc="best", scatterpoints=1)
return sol
def axis_equal_3d(ax, buffer_space=10):
"""
Set equal axes for 3d plot
Parameters
----------
ax : axes object
An axes object with a 3d projection
buffer_space : scalar
Percent of maximum limit (x, y, or z) to use for buffer room.
Notes
-----
Since matplotlib doesn't have a 3d version of
``ax.axis('equal')``, this routine simply checks the current
limits, and adjusts all axes to be equal. Therefore, for this to
work properly, you must call this routine after you've plotted all
your data.
"""
extents = np.array([getattr(ax, f"get_{dim}lim")() for dim in "xyz"])
max_dimension = max(abs(extents[:, 1] - extents[:, 0]))
centers = np.mean(extents, axis=1)
r = max_dimension / 2 * (1 + buffer_space / 100)
for ctr, dim in zip(centers, "xyz"):
getattr(ax, f"set_{dim}lim")(ctr - r, ctr + r)
def _circle_fit_residuals(p, basic2local, basic, circ_parms):
# p is [th, ph, xc, yc, zc]
# - th & ph are angles to change the local z-axis direction:
# - th is angle to rotate local coords about x-axis
# - ph is angle to rotate result of th rotation about new
# local y-axis
# - xc, yc, zc is center of circle in basic
# d is [basic2local, basic]
# - basic2local is original transformation
# - basic is 3 x n: coordinates of all points in basic
th, ph, xc, yc, zc = p
c1, s1 = np.cos(th), np.sin(th)
c2, s2 = np.cos(ph), np.sin(ph)
# t1 = np.array([[1, 0, 0], [0, c1, s1], [0, -s1, c1]])
# t2 = np.array([[c2, 0, -s2], [0, 1, 0], [s2, 0, c2]])
# trans = t2 @ t1
# or, doing it by hand:
trans = np.array([[c2, s1 * s2, -s2 * c1], [0, c1, s1], [s2, -c2 * s1, c1 * c2]])
new_basic2local = trans @ basic2local
local = new_basic2local @ (basic - [[xc], [yc], [zc]])
radii = np.linalg.norm(local[:2], axis=0)
radius = radii.mean()
if circ_parms is not None:
circ_parms.basic2local = new_basic2local
circ_parms.local = local
circ_parms.radius = radius
circ_parms.center = np.array([xc, yc, zc])
return np.hstack((radii / radius - 1, local[2]))
def fit_circle_3d(basic, makeplot="no"):
"""
Fit a circle through data points in 3D space
Parameters
----------
basic : 2d array_like, 3 x n
Coordinates of data points in the basic (rectangular)
coordinate system; rows `basic` are the x, y, and z
coordinates
makeplot : string or axes object; optional
Specifies if and how to plot data showing the fit.
=========== ===============================
`makeplot` Description
=========== ===============================
'no' do not plot
'clear' plot after clearing figure
'add' plot without clearing figure
'new' plot in new figure
axes object plot in given axes (like 'add')
=========== ===============================
Note that if `makeplot` is 'add' or an axes object, it must be
3d; otherwise a ValueError exception is raised.
Returns
-------
A SimpleNamespace with the members:
local : 2d ndarray, 3 x n
The coordinates of all points in a local (rectangular)
coordinate system. The z-axis is perpendicular to the plane
of the circle so the z-coordinate is 0.0 for all points.
basic2local : 2d ndarray
3 x 3 transform from basic to local. The local system is defined
such that the z-axis is perpendicular to the plane of the
circle.
center : 1d ndarray
Coordinates of circle in basic system (3 elements: x, y, z)
radius : scalar
Radius of circle
ssqerr : scalar
Sum of the squares of the radius and z-axis errors for each
point. For a perfect fit, this will be zero.
Notes
-----
At a high level, this routine works by: one, forming a
(non-unique) transform to a local coordinate system (steps 1-5),
two, finding the center in basic coordinates from the chord
bisector approach (steps 6-9), three, finding the radius (step
10), and four, optimizing the fit (step 11).
1. Set ``n = basic.shape[1]``.
2. Create two vectors: ``v1`` is from point 1 to point ``n // 3``,
and ``v2`` is from point 1 to point ``2 * n // 3``.
3. Form unit vector from the cross product of ``v1`` and ``v2`` to
get a perpendicular axis to the circle. This is the local
z-axis and the 3rd row of the transformation matrix
`basic2local`.
4. The local x-axis is defined as the unit vector of
``v1``. This is the 1st row of `basic2local`. Note that this
is just the initial orientation; the final local x-axis will
be oriented along the vector from the center of the circle to
the first node.
5. The local y-axis is the cross product of the local z-axis and
the local x-axis. This is the 2nd row of `basic2local`.
6. Noting that ``v1`` and ``v2`` are chords, the bisector of each
chord is found by crossing the z-axis unit vector with the
chord. Call these bisector vectors ``b1`` and ``b2``.
7. Compute the midpoint of each chord: ``mid1`` is center of
``v1`` and ``mid2`` is center of ``v2``.
8. Let `center` denote the center of the circle. Since both
bisectors must pass through `center`::
mid1 + alpha * b1 = center
mid2 + beta * b2 = center
where ``alpha`` and ``beta`` are unknown scalars. Subtracting
the second equation from the first gives::
alpha * b1 - beta * b2 = mid2 - mid1
That equation is actually three equations with two of them
being independent. Therefore, we can solve for ``alpha`` and
``beta`` using a least-squares approach (
:func:`np.linalg.lstsq`). Then, we can use either of the two
equations above to solve for `center`. Note the `center` is
in basic coordinates.
9. The coordinates of all points can now be calculated in the
local coordinate system (note that the local z-coordinate is
0.0 for all points)::
local = basic2local @ (basic - center)
10. The radius for each point in ``local`` is simply the root-sum-
square of each local x & y coordinate. This routine computes
the average radius and sum of the squares of the radius errors
for each point.
11. For cases where there are more than three data points, this
routine optimizes the fit by using
:func:`scipy.optimize.leastsq`. The five optimization
variables are the direction of local z-axis (two angles) and
the location of the center point.
Examples
--------
Fit a circle through the three points: [3, 0, 0], [0, 3, 0] and
[0, 0, 3]. The center should be at [1, 1, 1]:
.. plot::
:context: close-figs
>>> import numpy as np
>>> from pyyeti.ytools import fit_circle_3d
>>> params = fit_circle_3d(3*np.eye(3), makeplot='new')
>>> params.center
array([ 1., 1., 1.])
"""
basic = np.atleast_2d(basic)
if basic.shape[0] != 3:
raise ValueError(f"`basic` must have 3 rows (x, y, z), not {basic.shape[0]}")
basic2local, center, radius = _initial_circle_fit(basic)
# steps 9 and 10 are done in _circle_fit_residuals ... which is
# called during the optimization
# step 11: optimize solution:
# - optimization parameters: [th, ph, xc, yc, zc]
p0 = (0.0, 0.0, *center)
res = leastsq(
_circle_fit_residuals, p0, args=(basic2local, basic, None), full_output=True
)
sol = res[0]
if res[-1] not in (1, 2, 3, 4):
raise ValueError(f":func:`scipy.optimization.leastsq` failed: {res[-2]}")
ssqerr = np.sum(res[2]["fvec"] ** 2)
if ssqerr > 0.01:
msg = (
"data points do not appear to form a good circle, sum "
f"square of residuals = {ssqerr}"
)
warnings.warn(msg, RuntimeWarning)
# create output SimpleNamespace:
circ_parms = SimpleNamespace(ssqerr=ssqerr)
# get optimized fit ... it will be updated below after updating
# angle of x axis to point to node 1, but we need the local
# coordinates of node 1 to get angle:
_circle_fit_residuals(sol, basic2local, basic, circ_parms)
# put in pre-optimized parameters in case they're of interest:
start_parms = SimpleNamespace()
_circle_fit_residuals(p0, basic2local, basic, start_parms)
circ_parms.start_parms = start_parms
# reset the local x-axis to point to 1st node:
th = np.arctan2(circ_parms.local[1, 0], circ_parms.local[0, 0])
s = np.sin(th)
c = np.cos(th)
trans = np.array([[c, s], [-s, c]])
basic2local[:2] = trans @ basic2local[:2]
# get final, optimized fit:
_circle_fit_residuals(sol, basic2local, basic, circ_parms)
ax = _check_makeplot(makeplot, need3d=True)
if ax:
for item in "xyz":
get_func = getattr(ax, f"get_{item}label")
if not get_func():
set_func = getattr(ax, f"set_{item}label")
set_func(item.upper())
ax.plot(*basic, "o", label="Data")
# compute new points on circle in local coordinates:
th = np.deg2rad(np.arange(0.0, 360))
x = circ_parms.radius * np.cos(th)
y = circ_parms.radius * np.sin(th)
z = 0 * x
# transform to basic coordinates and plot:
circle_basic = (
circ_parms.center + (np.column_stack((x, y, z)) @ circ_parms.basic2local)
).T
ax.plot(*circle_basic, label="Fit")
axis_equal_3d(ax)
ax.legend(loc="upper left", bbox_to_anchor=(1.0, 1.0))
ax.get_figure().tight_layout()
return circ_parms
def histogram(data, binsize):
"""
Calculate a histogram
Parameters
----------
data : 1d array_like
The data to do histogram counting on
binsize : scalar
Bin size
Returns
-------
histo : 2d ndarray
3-column matrix: [bincenter, count, percent]
Notes
-----
Only bins that have count > 0 are included in the output. The
bin-centers are: ``binsize*[..., -2, -1, 0, 1, 2, ...]``.
The main difference from :func:`numpy.histogram` is how bins are
defined and how the data are returned. For
:func:`numpy.histogram`, you must either define the number of bins
or the bin edges and the output will include empty bins; for this
routine, you only define the binsize and only non-empty bins are
returned.
Examples
--------
>>> import numpy as np
>>> np.set_printoptions(precision=4, suppress=True)
>>> from pyyeti import ytools
>>> data = [1, 2, 345, 2.4, 1.8, 345.1]
>>> ytools.histogram(data, 1.0)
array([[ 1. , 1. , 16.6667],
[ 2. , 3. , 50. ],
[ 345. , 2. , 33.3333]])
To try to get similar output from :func:`numpy.histogram` you have
to define the bins:
>>> binedges = [0.5, 1.5, 2.5, 344.5, 345.5]
>>> cnt, bins = np.histogram(data, binedges)
>>> cnt # doctest: +ELLIPSIS
array([1, 3, 0, 2]...)
>>> bins
array([ 0.5, 1.5, 2.5, 344.5, 345.5])
"""
# use a generator to simplify the work; only yield a bin
# if it has data:
def _get_next_bin(data, binsize):
data = np.atleast_1d(data)
data = np.sort(data[np.isfinite(data)])
if data.size == 0:
yield [0, 0]
return
a = int(np.floor(data[0] / binsize))
while data.size > 0:
rgt = (a + 1 / 2) * binsize
count = np.searchsorted(data, rgt)
if count > 0:
yield [a * binsize, count]
data = data[count:]
if data.size > 0:
a = int(np.floor(data[0] / binsize))
else:
a += 1
bins = []
for b in _get_next_bin(data, binsize):
bins.append(b)
histo = np.zeros((len(bins), 3))
histo[:, :2] = bins
s = histo[:, 1].sum()
if s > 0:
histo[:, 2] = 100 * histo[:, 1] / s
return histo
@contextlib.contextmanager
def np_printoptions(*args, **kwargs):
"""
Defines a context manager for :func:`numpy.set_printoptions`
Parameters
----------
*args, **kwargs : arguments for :func:`numpy.set_printoptions`
See that function for a description of all available inputs.
Notes
-----
This is for temporarily (locally) changing how NumPy prints
matrices.
Examples
--------
Print a matrix with current defaults, re-print it with 2 decimals
using the "with" statement enabled by this routine, and then
re-print it one last time again using the current defaults:
>>> import numpy as np
>>> from pyyeti import ytools
>>> a = np.arange(np.pi/20, 1.5, np.pi/17).reshape(2, -1)
>>> print(a) # doctest: +SKIP
[[ 0.15707963 0.3418792 0.52667877 0.71147834]
[ 0.8962779 1.08107747 1.26587704 1.45067661]]
>>> with ytools.np_printoptions(precision=2, linewidth=45,
... suppress=1):
... print(a)
[[ 0.16 0.34 0.53 0.71]
[ 0.9 1.08 1.27 1.45]]
>>> print(a) # doctest: +SKIP
[[ 0.15707963 0.3418792 0.52667877 0.71147834]
[ 0.8962779 1.08107747 1.26587704 1.45067661]]
"""
original = np.get_printoptions()
np.set_printoptions(*args, **kwargs)
try:
yield
finally:
np.set_printoptions(**original)
def multmd(a, b):
"""
Multiply a matrix and a diagonal, or two diagonals, in either
order.
Parameters
----------
a : ndarray
Matrix (2d array) or diagonal (1d array).
b : ndarray
Matrix (2d array) or diagonal (1d array).
Returns
-------
c : ndarray
Product of a * b.
Notes
-----
This function should always be faster than numpy.dot() since the
diagonal is not expanded to full size.
Examples
--------
>>> from pyyeti import ytools
>>> import numpy as np
>>> a = np.array([[1, 2], [3, 4]])
>>> b = np.array([10, 100])
>>> ytools.multmd(a, b)
array([[ 10, 200],
[ 30, 400]])
>>> ytools.multmd(b, a)
array([[ 10, 20],
[300, 400]])
>>> ytools.multmd(b, b)
array([ 100, 10000])
"""
if np.ndim(a) == 1:
return (a * b.T).T
else:
return a * b
def mkpattvec(start, stop, inc):
"""
Make a pattern "vector".
Parameters
----------
start : scalar or array
Starting value.
stop : scalar
Ending value for first element in `start` (exclusive).
inc : scalar
Increment for first element in `start`.
Returns
-------
pattvec : array
Has one higher dimension than `start`. Shape = (-1,
`start`.shape).
Notes
-----
The first element of `start`, `stop`, and `inc` fully determine the
number of increments that are generated. The other elements in
`start` go along for the ride.
Examples
--------
>>> from pyyeti import ytools
>>> import numpy as np
>>> ytools.mkpattvec([0, 1, 2], 24, 6).ravel()
array([ 0, 1, 2, 6, 7, 8, 12, 13, 14, 18, 19, 20])
>>> x = np.array([[10, 20, 30], [40, 50, 60]])
>>> ytools.mkpattvec(x, 15, 2)
array([[[10, 20, 30],
[40, 50, 60]],
<BLANKLINE>
[[12, 22, 32],
[42, 52, 62]],
<BLANKLINE>
[[14, 24, 34],
[44, 54, 64]]])
"""
start = np.array(start)
s = start.ravel()
xn = np.array([s + i for i in range(0, stop - s[0], inc)])
return xn.reshape((-1,) + start.shape)
def isdiag(A, tol=1e-12):
"""
Checks contents of square matrix A to see if it is approximately
diagonal.
Parameters
----------
A : 2d numpy array
If not square or if number of dimensions does not equal 2, this
routine returns False.
tol : scalar; optional
The tolerance value.
Returns
-------
True if `A` is a diagonal matrix, False otherwise.
Notes
-----
If all off-diagonal values are less than `tol` times the maximum
diagonal value (absolute-valuewise), this routine returns
True. Otherwise, False is returned.
See also
--------
:func:`mattype`
Examples
--------
>>> from pyyeti import ytools
>>> import numpy as np
>>> A = np.diag(np.random.randn(5))
>>> ytools.isdiag(A)
True
>>> A[0, 2] = .01
>>> A[2, 0] = .01
>>> ytools.isdiag(A) # symmetric but not diagonal
False
>>> ytools.isdiag(A[1:, :]) # non-square
False
"""
if A.shape[0] != A.shape[1]:
return False
d = np.diag(A)
max_off = abs(np.diag(d) - A).max()
max_on = abs(d).max()
return max_off <= tol * max_on
def mattype(A, mtype=None):
"""
Checks contents of square matrix `A` to see if it is symmetric,
hermitian, positive-definite, diagonal, and identity.
Parameters
----------
A : 2d array_like or None
If not square or if number of dimensions does not equal 2, the
return type is 0. If None, just return the `mattypes` output
(not a tuple).
mtype : string or None
If string, it must be one of the `mattypes` listed below; in
this case, True is returned if `A` is of the type specified or
False otherwise. If None, `Atype` (if `A` is not None) and
`mattypes` is returned. `mtype` is ignored if `A` is None.
Returns
-------
flag : bool
True/False flag specifying whether or not `A` is of the type
specified by `mtype`. Not returned if either `A` or `mtype` is
None. If `flag` is returned, it is the only returned value.
Atype : integer
Integer with bits set according to content. Not returned if
`A` is None or if `mtype` is specified.
mattypes : dictionary
Provided for reference::
mattypes = {'symmetric': 1,
'hermitian': 2,
'posdef': 4,
'diagonal': 8,
'identity': 16}
Not returned if `mtype` is specified. This is the only return
if `A` is None.
Notes
-----
Here are some example usages::
mattype(A) # returns (Atype, mattypes)
mattype(A, 'symmetric') # returns True or False
mattype(None) # returns mattypes
See also
--------
:func:`isdiag`
Examples
--------
>>> from pyyeti import ytools
>>> import numpy as np
>>> A = np.eye(5)
>>> ytools.mattype(A, 'identity')
True
>>> Atype, mattypes = ytools.mattype(A)
>>>
>>> Atype == 1 | 4 | 8 | 16
True
>>> if Atype & mattypes['identity']:
... print('A is identity')
A is identity
>>> for i in sorted(mattypes):
... print(f'{i:10s}: {mattypes[i]:2}')
diagonal : 8
hermitian : 2
identity : 16
posdef : 4
symmetric : 1
>>> mattypes = ytools.mattype(None)
>>> for i in sorted(mattypes):
... print(f'{i:10s}: {mattypes[i]:2}')
diagonal : 8
hermitian : 2
identity : 16
posdef : 4
symmetric : 1
"""
mattypes = {
"symmetric": 1,
"hermitian": 2,
"posdef": 4,
"diagonal": 8,
"identity": 16,
}
if A is None:
return mattypes
Atype = 0
A = np.asarray(A)
if mtype is None:
if A.ndim != 2 or A.shape[0] != A.shape[1]:
return Atype, mattypes
if np.allclose(A, A.T):
Atype |= mattypes["symmetric"]
if np.isrealobj(A):
try:
linalg.cholesky(A)
except linalg.LinAlgError:
pass
else:
Atype |= mattypes["posdef"]
elif np.iscomplexobj(A) and np.allclose(A, A.T.conj()):
Atype |= mattypes["hermitian"]
try:
linalg.cholesky(A)
except linalg.LinAlgError:
pass
else:
Atype |= mattypes["posdef"]
if isdiag(A):
Atype |= mattypes["diagonal"]
d = np.diag(A)
if np.allclose(1, d):
Atype |= mattypes["identity"]
return Atype, mattypes
if A.ndim != 2 or A.shape[0] != A.shape[1]:
return False
if mtype == "symmetric":
return np.allclose(A, A.T)
if mtype == "hermitian":
return np.allclose(A, A.T.conj())
if mtype == "posdef":
if np.isrealobj(A):
if not np.allclose(A, A.T):
return False
else:
if not np.allclose(A, A.T.conj()):
return False
try:
linalg.cholesky(A)
return True
except linalg.LinAlgError:
return False
if mtype in ("diagonal", "identity"):
if isdiag(A):
if mtype == "diagonal":
return True
d = np.diag(A)
return np.allclose(1, d)
else:
return False
raise ValueError("invalid `mtype`")
def sturm(A, lam):
"""
Count number of eigenvalues <= `lam` of symmetric matrix `A`.
Parameters
----------
A : 2d ndarray
Symmetric matrix to do Sturm counting on.
lam : float or array of floats
Eigenvalue cutoff(s).
Returns
-------
count : 1d ndarray
Contains number of eigenvalues below the cutoff values in
`lam`. That is: count[i] = number of eigenvalues in `A` below
value `lam[i]`.
Notes
-----
Computes the Hessenberg form of `A` which is tridiagonal if `A` is
symmetric. Then it does a simple Sturm count on the results (code
derived from LAPACK routine DLAEBZ).
Examples
--------
Make symmetric matrix, count number of eigenvalues <= 0, and compute
them:
>>> from pyyeti import ytools
>>> import numpy as np
>>> import scipy.linalg as la
>>> np.set_printoptions(precision=4, suppress=True)
>>> A = np.array([[ 96., -67., 36., 37., 93.],
... [ -67., 28., 82., -66., -19.],
... [ 36., 82., 112., 0., -61.],
... [ 37., -66., 0., -14., 47.],
... [ 93., -19., -61., 47., -134.]])
>>> w = la.eigh(A, eigvals_only=True)
>>> w
array([-195.1278, -61.9135, -10.1794, 146.4542, 208.7664])
>>> ytools.sturm(A, 0)
array([3])
>>> ytools.sturm(A, [-200, -100, -20, 200, 1000])
array([0, 1, 2, 4, 5])
"""
# assuming A is symmetric, the hessenberg similarity form is
# tridiagonal:
h = linalg.hessenberg(A)
# get diagonal and sub-diagonal:
d = np.diag(h)
s = np.diag(h, -1)
abstol = np.finfo(float).eps
ssq = s ** 2
pivmin = max(1.0, np.max(s)) * abstol
try:
minp = len(lam)
except TypeError:
minp = 1
lam = [lam]
# count eigenvalues below lam[i] (adapted from LAPACK routine
# DLAEBZ)
count = np.zeros(minp, int)
n = len(d)
for i in range(minp):
val = lam[i]
tmp = d[0] - val
if abs(tmp) < pivmin:
tmp = -pivmin
if tmp <= 0:
c = 1
else:
c = 0
for j in range(1, n):
tmp = d[j] - ssq[j - 1] / tmp - val
if abs(tmp) < pivmin:
tmp = -pivmin
if tmp <= 0:
c += 1
count[i] = c
return count
def eig_si(
K, M, Xk=None, f=None, p=10, mu=0, tol=1e-6, pmax=None, maxiter=50, verbose=True
):
r"""
Perform subspace iteration to calculate eigenvalues and eigenvectors.
Parameters
----------
K : ndarray
The stiffness (symmetric).
M : ndarray
The mass (positive definite).
Xk : ndarray or None
Initial guess @ eigenvectors; # columns > `p`. If None,
random vectors are used from ``np.random.rand()-.5``.
f : scalar or None
Desired cutoff frequency in Hz. `pmax` will override this if
set. Takes precedence over `p` if both are input.
p : scalar or None
Number of desired eigenpairs (eigenvalues and eigenvectors).
`pmax` will limit this if set. If `f` is input, `p` is
calculated internally (from :func:`sturm`).
mu : scalar
Shift value in (rad/sec)^2 units. See notes.
tol : scalar
Eigenvalue convergence tolerance.
pmax : scalar or None
Maximum number of eigenpairs; no limit if None.
maxiter : scalar
Maximum number of iterations.
verbose : bool
If True, print status message for each iteration.
Returns
-------
lam : ndarray
Ideally, `p` converged eigenvalues.
phi : ndarray
Ideally, p converged eigenvectors.
phiv : ndarray
First p columns are `phi`, others are leftover iteration
vectors which may be a good starting point for a second call.
Notes
-----
The routine solves the eigenvalue problem:
.. math::
K \Phi = M \Phi \Lambda
Where :math:`\Phi` is a matrix of right eigenvectors and
:math:`\Lambda` is a diagonal matrix of eigenvalues.
This routine works well for relatively small `p`. Trying to
recover a large portion of modes may fail. Craig-Bampton models
with residual flexibility modes also cause trouble.
`mu` must not equal any eigenvalue. For systems with rigid-body
modes, `mu` must be non-zero. Recommendations:
- If you have eigenvalue estimates, set `mu` to be average of two
widely spaced, low frequency eigenvalues. For example,
``mu = 5000`` worked well when the actual eigenvalues were:
[0, 0, 0, 0, .05, 15.8, 27.8, 10745.4, ...]
- ``mu = -10`` has worked well.
- ``mu = 1/10`` of the first flexible eigenvalue has worked well.
It may be temping to set `mu` to a higher value so a few higher
frequency modes can be calculated. This might work, especially if
you have good estimates for `Xk`. Otherwise, it is probably
better to set `mu` to a lower value (as recommended above) and
recover more modes to span the range of interest.
In practice, unless you have truly good estimates for the
eigenvectors (such as the output `phiv` may be), letting `Xk`
start as random seems to work well.
Examples
--------
>>> from pyyeti import ytools
>>> import numpy as np
>>> k = np.array([[5, -5, 0], [-5, 10, -5], [0, -5, 5]])
>>> m = np.eye(3)
>>> np.set_printoptions(precision=4, suppress=True)
>>> w, phi, phiv = ytools.eig_si(k, m, mu=-1) # doctest: +ELLIPSIS
Iteration 1 completed
Convergence: 3 of 3, tolerance range after 2 iterations is [...
>>> print(abs(w))
[ 0. 5. 15.]
>>> import scipy.linalg as linalg
>>> k = np.random.randn(40, 40)
>>> m = np.random.randn(40, 40)
>>> k = np.dot(k.T, k) * 1000
>>> m = np.dot(m.T, m) * 10
>>> w1, phi1 = linalg.eigh(k, m, eigvals=(0, 14))
>>> w2, phi2, phiv2 = ytools.eig_si(k, m, p=15, mu=-1, tol=1e-12,
... verbose=False)
>>> fcut = np.sqrt(w2.max())/2/np.pi * 1.001
>>> w3, phi3, phiv3 = ytools.eig_si(k, m, f=fcut, tol=1e-12,
... verbose=False)
>>> print(np.allclose(w1, w2))
True
>>> print(np.allclose(np.abs(phi1), np.abs(phi2)))
True
>>> print(np.allclose(w1, w3))
True
>>> print(np.allclose(np.abs(phi1), np.abs(phi3)))
True
"""
n = np.size(K, 0)
if f is not None:
# use sturm sequence check to determine p:
lamk = (2 * np.pi * f) ** 2
p = sturm(K - lamk * M, 0)[0]
if mu != 0:
Kmod = K - mu * M
Kd = linalg.lu_factor(Kmod)
else:
Kd = linalg.lu_factor(K)
if pmax is not None and p > pmax:
p = pmax
if p > n:
p = n
q = max(2 * p, p + 8)
if q > n:
q = n
if Xk is not None:
c = np.size(Xk, 1)
else:
c = 0
if c < q:
if Xk is None:
Xk = np.random.rand(n, q) - 0.5
else:
Xk = np.hstack((Xk, np.random.rand(n, q - c) - 0.5))
elif c > q:
Xk = Xk[:, :q]
lamk = np.ones(q)
nconv = 0
loops = 0
tolc = 1
posdef = mattype(None)["posdef"]
eps = np.finfo(float).eps
while (tolc > tol or nconv < p) and loops < maxiter:
loops += 1
lamo = lamk
MXk = np.dot(M, Xk)
Xkbar = linalg.lu_solve(Kd, MXk)
Mk = np.dot(np.dot(Xkbar.T, M), Xkbar)
Kk = np.dot(Xkbar.T, MXk)
# solve subspace eigenvalue problem:
mtp = mattype(Mk)[0]
if not mtp & posdef:
factor = 1000 * eps
pc = 0
while 1:
pc += 1
Mk += np.diag(np.diag(Mk) * factor)
factor *= 10.0
mtp = mattype(Mk)[0]
if mtp & posdef or pc > 5:
break
if mtp & posdef:
Mkll = linalg.cholesky(Mk, lower=True)
Kkmod = linalg.solve(Mkll, linalg.solve(Mkll, Kk).T).T
Kkmod = (Kkmod + Kkmod.T) / 2
lamk, Qmod = linalg.eigh(Kkmod)
Q = linalg.solve(Mkll.T, Qmod)
else:
raise ValueError(
"subspace iteration failed, reduced mass"
" matrix not positive definite"
)
dlam = np.abs(lamo - lamk)
tolc = (dlam / np.abs(lamk))[:p]
nconv = | np.sum(tolc <= tol) | numpy.sum |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import uuid
from datetime import datetime
import numpy as np
import utm
from nexustiles.model.nexusmodel import get_approximate_value_for_lat_lon
from scipy import spatial
import BaseDomsHandler
import ResultsStorage
import datafetch
import fetchedgeimpl
import geo
import workerthread
from webservice.NexusHandler import nexus_handler
@nexus_handler
class CombinedDomsMatchupQueryHandler(BaseDomsHandler.BaseDomsQueryCalcHandler):
name = "Experimental Combined DOMS In-Situ Matchup"
path = "/domsmatchup"
description = ""
params = {}
singleton = True
def __init__(self):
BaseDomsHandler.BaseDomsQueryCalcHandler.__init__(self)
def fetchData(self, endpoints, startTime, endTime, bbox, depth_min, depth_max, platforms):
boundsConstrainer = geo.BoundsConstrainer(asString=bbox)
threads = []
for endpoint in endpoints:
thread = workerthread.WorkerThread(datafetch.fetchData,
params=(endpoint, startTime, endTime, bbox, depth_min, depth_max))
threads.append(thread)
workerthread.wait(threads, startFirst=True, poll=0.01)
data2 = []
for thread in threads:
data, bounds = thread.results
data2 += data
boundsConstrainer.testOtherConstrainer(bounds)
return data2, boundsConstrainer
def __parseDatetime(self, dtString):
dt = datetime.strptime(dtString, "%Y-%m-%dT%H:%M:%SZ")
epoch = datetime.utcfromtimestamp(0)
time = (dt - epoch).total_seconds() * 1000.0
return time
def calc(self, computeOptions, **args):
primary = computeOptions.get_argument("primary", None)
matchup = computeOptions.get_argument("matchup", None)
startTime = computeOptions.get_argument("s", None)
endTime = computeOptions.get_argument("e", None)
bbox = computeOptions.get_argument("b", None)
timeTolerance = computeOptions.get_float_arg("tt")
depth_min = computeOptions.get_float_arg("depthMin", default=None)
depth_max = computeOptions.get_float_arg("depthMax", default=None)
radiusTolerance = computeOptions.get_float_arg("rt")
platforms = computeOptions.get_argument("platforms", None)
if primary is None or len(primary) == 0:
raise Exception("No primary dataset specified")
if matchup is None or len(matchup) == 0:
raise Exception("No matchup datasets specified")
start = self._now()
primarySpec = self.getDataSourceByName(primary)
if primarySpec is None:
raise Exception("Specified primary dataset not found using identifier '%s'" % primary)
primaryData, bounds = self.fetchData([primarySpec], startTime, endTime, bbox, depth_min, depth_max, platforms)
primaryContext = MatchupContext(primaryData)
matchupIds = matchup.split(",")
for matchupId in matchupIds:
matchupSpec = self.getDataSourceByName(matchupId)
if matchupSpec is not None: # Then it's in the in-situ configuration
proc = InsituDatasetProcessor(primaryContext, matchupSpec, startTime, endTime, bbox, depth_min,
depth_max,
platforms, timeTolerance, radiusTolerance)
proc.start()
else: # We assume it to be a Nexus tiled dataset
'''
Single Threaded at the moment...
'''
daysinrange = self._get_tile_service().find_days_in_range_asc(bounds.south, bounds.north, bounds.west,
bounds.east, matchupId,
self.__parseDatetime(startTime) / 1000,
self.__parseDatetime(endTime) / 1000)
tilesByDay = {}
for dayTimestamp in daysinrange:
ds1_nexus_tiles = self._get_tile_service().get_tiles_bounded_by_box_at_time(bounds.south, bounds.north,
bounds.west, bounds.east,
matchupId, dayTimestamp)
# print "***", type(ds1_nexus_tiles)
# print ds1_nexus_tiles[0].__dict__
tilesByDay[dayTimestamp] = ds1_nexus_tiles
primaryContext.processGridded(tilesByDay, matchupId, radiusTolerance, timeTolerance)
matches, numMatches = primaryContext.getFinal(len(matchupIds))
end = self._now()
args = {
"primary": primary,
"matchup": matchupIds,
"startTime": startTime,
"endTime": endTime,
"bbox": bbox,
"timeTolerance": timeTolerance,
"depthMin": depth_min,
"depthMax": depth_max,
"radiusTolerance": radiusTolerance,
"platforms": platforms
}
details = {
"timeToComplete": (end - start),
"numInSituRecords": primaryContext.insituCount,
"numInSituMatched": primaryContext.insituMatches,
"numGriddedChecked": primaryContext.griddedCount,
"numGriddedMatched": primaryContext.griddedMatched
}
with ResultsStorage.ResultsStorage() as resultsStorage:
execution_id = resultsStorage.insertResults(results=matches, params=args, stats=details, startTime=start,
completeTime=end, userEmail="")
return BaseDomsHandler.DomsQueryResults(results=matches, args=args, details=details, bounds=None, count=None,
computeOptions=None, executionId=execution_id)
class MatchupContextMap:
def __init__(self):
pass
def add(self, context):
pass
def delete(self, context):
pass
class MatchupContext:
def __init__(self, primaryData):
self.id = str(uuid.uuid4())
self.griddedCount = 0
self.griddedMatched = 0
self.insituCount = len(primaryData)
self.insituMatches = 0
self.primary = primaryData
for r in self.primary:
r["matches"] = []
self.data = []
for s in primaryData:
u = utm.from_latlon(s["y"], s["x"])
v = (u[0], u[1], 0.0)
self.data.append(v)
if len(self.data) > 0:
self.tree = spatial.KDTree(self.data)
else:
self.tree = None
def getFinal(self, minMatchesToInclude):
matched = []
ttlMatches = 0
for m in self.primary:
if len(m["matches"]) >= minMatchesToInclude:
matched.append(m)
ttlMatches += len(m["matches"])
return matched, ttlMatches
def processGridded(self, tilesByDay, source, xyTolerance, timeTolerance):
for r in self.primary:
foundSatNodes = self.__getSatNodeForLatLonAndTime(tilesByDay, source, r["y"], r["x"], r["time"],
xyTolerance)
self.griddedCount += 1
self.griddedMatched += len(foundSatNodes)
r["matches"].extend(foundSatNodes)
def processInSitu(self, records, xyTolerance, timeTolerance):
if self.tree is not None:
for s in records:
self.insituCount += 1
u = utm.from_latlon(s["y"], s["x"])
coords = | np.array([u[0], u[1], 0]) | numpy.array |
# -*- coding: utf-8 -*-
# This file as well as the whole tsfresh package are licenced under the MIT licence (see the LICENCE.txt)
# <NAME> (<EMAIL>), Blue Yonder Gmbh, 2016
from __future__ import absolute_import, division
import numpy as np
import pandas as pd
from tests.fixtures import DataTestCase
from tsfresh.feature_extraction.extraction import extract_features
from tsfresh.feature_extraction.settings import FeatureExtractionSettings
import six
class FeatureExtractorTestCase(DataTestCase):
"""The unit tests in this module make sure if the time series features are created properly"""
def setUp(self):
self.settings = FeatureExtractionSettings()
self.settings.PROFILING = False
def test_calculate_ts_features(self):
# todo: implement more methods and test more aspects
df = self.create_test_data_sample()
extracted_features = extract_features(df, self.settings, "id", "sort", "kind", "val")
self.assertIsInstance(extracted_features, pd.DataFrame)
self.assertTrue(np.all(extracted_features.a__maximum == | np.array([71, 77]) | numpy.array |
#!/usr/bin/env python3
# coding: utf-8
"""
Load dataset year by year, interpolate each map, and add label for each pixel.
No special preprocessing for the labels, only bouding box
"""
import os
import shutil
import sys
import numpy as np
import time
import matplotlib.pyplot as plt
import healpy as hp
import pandas as pd
from tqdm import tqdm
import h5py
import matplotlib.patches as patches
from scipy.interpolate import griddata, RegularGridInterpolator
def download(datapath, url, year):
import requests
url = url.format(year)
filename = url.split('/')[-1]
file_path = os.path.join(datapath, filename)
if os.path.exists(file_path):
return file_path
print('Downloading ' + url)
r = requests.get(url, stream=True)
with open(file_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=16 * 1024 ** 2):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
return file_path
def interpolate(images, boxes):
measures, channels, lat_x, lon_x = images.shape
lon_ = np.arange(lon_x)/lon_x*360
lat_ = np.arange(lat_x)/lat_x*180-90
lon, lat = np.meshgrid(*(lon_, lat_))
nfeat = 5
Nside = [32, 64]
for nside in Nside:
print("preprocessing data at nside = {}".format(nside))
npix = hp.nside2npix(nside)
data = np.empty((measures, npix, channels))
labels = np.zeros((measures, npix, nfeat))
pix = np.arange(npix)
coords_hp = hp.pix2ang(nside, pix, nest=True, lonlat=True)
coords_hp = | np.asarray(coords_hp) | numpy.asarray |
import os
from datetime import date
import itertools
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import numpy as np
import h5py
from insar import timeseries
from apertools import sario, gps, plotting, latlon, gps_plots, constants
from apertools.colors import MATLAB_COLORS
from apertools.log import get_log
logger = get_log()
p2c = constants.PHASE_TO_CM
p2mm = constants.PHASE_TO_CM * 365 * 10
# TODO: MOVE THESE!!!
from apertools.sario import STACK_FLAT_SHIFTED_DSET
station_name_list = [
"NMHB",
"TXAD",
"TXBG",
"TXBL",
"TXCE",
"TXFS",
"TXKM",
"TXL2",
"TXMC",
"TXMH",
"TXOE",
"TXOZ",
"TXS3",
"TXSO",
]
def set_rcparams():
# https://matplotlib.org/tutorials/introductory/customizing.html#a-sample-matplotlibrc-file
# https://matplotlib.org/3.1.1/tutorials/introductory/lifecycle.html
style_dict = {
"pdf.fonttype": 42,
"ps.fonttype": 42,
"font.family": "Helvetica",
"font.size": 16,
"font.weight": "bold",
}
mpl.rcParams.update(style_dict)
def plot_phase_vs_elevation(
dem=None,
unw_stack=None,
outname="phase_vs_elevation_noraster.pdf",
igram_idx=210,
el_cutoff=1200,
to_cm=True,
rasterized=True,
):
if unw_stack is None:
with h5py.File("unw_stack_shiftedonly.h5", "r") as f:
unw_stack = f["stack_flat_shifted"][:]
if dem is None:
dem = sario.load("elevation_looked.dem")
# every = 5
# X = np.repeat(dem[np.newaxis, ::every, 100:-100:every], 400, axis=0).reshape(-1).astype(float)
# X += 30 * np.random.random(X.shape)
# Y = unw_stack[:4000:10, ::every, 100:-100:every].reshape(-1)
X = dem[:, 100:600].reshape(-1)
Y = unw_stack[igram_idx, :, 100:600].reshape(-1)
if el_cutoff:
good_idxs = X < el_cutoff
X, Y = X[good_idxs], Y[good_idxs]
if to_cm:
Y *= 0.44
plt.style.use("default")
# plt.style.use('ggplot')
# plt.style.use('seaborn-paper')
set_rcparams()
# https://matplotlib.org/tutorials/introductory/customizing.html#a-sample-matplotlibrc-file
# https://matplotlib.org/3.1.1/tutorials/introductory/lifecycle.html
style_dict = {
"pdf.fonttype": 42,
"ps.fonttype": 42,
"font.family": "Helvetica",
"font.size": 16,
"font.weight": "bold",
}
mpl.rcParams.update(style_dict)
fig, ax = plt.subplots()
# ax.scatter(X, Y, s=0.8)
ax.plot(X, Y, "o", rasterized=rasterized, ms=0.8)
# ax.set_xlim(700, 1200)
ax.set_xlim(None, 1200)
ax.set_xlabel("Elevation (m)")
# ax.set_ylabel("Phase (rad)")
ax.set_ylabel("[cm]")
plt.show(block=False)
fig.savefig("phase_vs_elevation_noraster.pdf", dpi=200, transparent=True)
return fig, ax
def plot_phase_elevation_igrams(dem, unw_stack, n=10, start=0, el_cutoff=None):
nn = np.ceil(np.sqrt(n)).astype(int)
fig, axes = plt.subplots(nn, nn)
for idx, ax in enumerate(axes.ravel()):
X = dem[:, 100:600].reshape(-1)
Y = unw_stack[start + idx, :, 100:600].reshape(-1)
if el_cutoff:
good_idxs = X < el_cutoff
X, Y = X[good_idxs], Y[good_idxs]
ax.plot(X, Y, "o", ms=0.8, rasterized=True)
def plot_l1_vs_stack(
offset=True,
alpha=300,
h=3,
w=4.5,
yy=2018,
unwfile="unw_stack_shiftedonly.h5",
station="TXSO",
days_smooth=1,
save=False,
):
# https://matplotlib.org/tutorials/introductory/customizing.html#a-sample-matplotlibrc-file
# so obscure
plt.rcParams["pdf.fonttype"] = 42
plt.rcParams["ps.fonttype"] = 42
plt.rcParams["font.family"] = "Helvetica"
plt.rcParams["font.size"] = 16
plt.rcParams["font.weight"] = "bold"
years = mdates.YearLocator() # every year
# months = mdates.MonthLocator() # every month
years_fmt = mdates.DateFormatter("%Y")
# yrs = (date(2015), date(2016), date(2017), date(2018))
input_dset = "stack_flat_shifted"
slclist, ifglist, igram_idxs = load_slclist_ifglist(
h5file=unwfile,
slclist_ignore_file="slclist_ignore.txt",
max_temporal_baseline=800,
max_date=date(yy, 1, 1),
)
Blin = np.sum(timeseries.prepB(slclist, ifglist), axis=1, keepdims=1)
timediffs = timeseries.find_time_diffs(slclist)
unw_vals = get_stack_vals(
unwfile,
station_name=station,
window=3,
dset=input_dset,
valid_indices=igram_idxs,
)
# Timeseries: unregularized, with all outliers (noisiest)
B = timeseries.prepB(slclist, ifglist, False, 0)
print(f"{B.shape = }")
# vv = np.linalg.lstsq(B, unw_vals, rcond=None)[0]
vv = np.linalg.pinv(B) @ unw_vals
unregged = timeseries.PHASE_TO_CM * timeseries.integrate_velocities(
vv.reshape(-1, 1), timediffs
)
# Timeseries: regularized, but with all outliers
Ba = timeseries.prepB(slclist, ifglist, False, alpha)
unw_vals_a = timeseries._augment_zeros(Ba, unw_vals)
# vv_a = np.linalg.lstsq(Ba, unw_vals_a, rcond=None)[0]
vv_a = np.linalg.pinv(Ba) @ unw_vals_a
regged = timeseries.PHASE_TO_CM * timeseries.integrate_velocities(
vv_a.reshape(-1, 1), timediffs
)
#
# Plot timeseries with-outlier cases:
ms = 4
fig, ax = gps_plots.plot_gps_los(
station,
end_date=date(yy, 1, 1),
offset=offset,
days_smooth=days_smooth,
gps_color=MATLAB_COLORS[2],
ms=ms,
)
ax.plot(slclist, unregged, "-x", lw=3, c=MATLAB_COLORS[3], label="Unregularized")
ax.plot(slclist, regged, "-x", lw=3, c=MATLAB_COLORS[4], label="Regularized")
ax.format_xdata = years_fmt
ax.xaxis.set_major_locator(years)
ax.xaxis.set_major_formatter(years_fmt)
# ax.tick_params(axis="x", labelrotation=45)
y0 = np.ceil(np.max(np.abs(unregged)))
ax.set_ylim((-y0, y0))
ax.set_yticks(np.arange(-y0, y0 + 2, step=2))
_set_figsize(fig, h, w)
fig.legend()
if save:
fig.savefig(
f"compare_{yy}_timeseries.pdf",
bbox_inches="tight",
transparent=True,
dpi=100,
)
# No outlier removal linear cases
stack, l2, l1 = prunesolve(Blin, unw_vals)
# stack, l2, l1 = prunesolve(slclist, ifglist, unw_vals, Blin, 1000, shrink=False)
print(f"No outlier, linear: {stack}, {l1=}")
print(f"Difference: {abs(stack - l1)}")
# Plot linear with-outlier cases
fig, ax = gps_plots.plot_gps_los(
station,
end_date=date(yy, 1, 1),
insar_mm_list=[l1, l2],
offset=offset,
labels=["L1 linear", "L2 linear"],
gps_color=MATLAB_COLORS[2],
insar_colors=MATLAB_COLORS[:2],
days_smooth=days_smooth,
ms=ms,
)
# ax.plot(slclist, regged, "-x", lw=3, label="reg")
ax.plot(slclist, unregged, "-x", lw=3, c=MATLAB_COLORS[3], label="Unregularized")
ax.plot(slclist, regged, "-x", lw=3, c=MATLAB_COLORS[4], label="Regularized")
ax.format_xdata = years_fmt
ax.xaxis.set_major_locator(years)
ax.xaxis.set_major_formatter(years_fmt)
# ax.tick_params(axis="x", labelrotation=45)
# y0 = ceil(maximum(abs.(regged)))
y0 = 6
ax.set_ylim((-y0, y0))
ax.set_yticks(np.arange(-y0, y0 + 2, step=2))
_set_figsize(fig, h, w)
fig.legend()
if save:
fig.savefig(
f"compare_{yy}_linear.pdf", bbox_inches="tight", transparent=True, dpi=100
)
###### Outlier remove cases ################
# timeseries: regularized with outliers removed
geo_clean, ifglist_clean, unw_clean = remove_outliers(
slclist, ifglist, unw_vals, mean_sigma_cutoff=4
)
B2 = timeseries.prepB(geo_clean, ifglist_clean, False, 0)
td_clean = timeseries.find_time_diffs(geo_clean)
unregged2 = timeseries.PHASE_TO_CM * timeseries.integrate_velocities(
np.linalg.lstsq(B2, unw_clean, rcond=None)[0], td_clean
)
Ba2 = timeseries.prepB(geo_clean, ifglist_clean, False, alpha)
unw_vals_a2 = timeseries._augment_zeros(Ba2, unw_clean)
regged2 = timeseries.PHASE_TO_CM * timeseries.integrate_velocities(
np.linalg.lstsq(Ba2, unw_vals_a2, rcond=None)[0], td_clean
)
# linear solves with outlier removal
stack2, l22, l12 = prunesolve(slclist, ifglist, unw_vals, Blin, 4, shrink=False)
# PLOT:
fig, ax = gps_plots.plot_gps_los(
station,
end_date=date(yy, 1, 1),
insar_mm_list=[l12, l22],
offset=offset,
labels=["L1 linear", "L2 linear"],
gps_color=MATLAB_COLORS[2],
insar_colors=MATLAB_COLORS[:2],
days_smooth=days_smooth,
ms=ms,
)
# ax.plot(geo_clean, regged2, "-x", lw=3, label="reg")
ax.plot(geo_clean, unregged2, "-x", c=MATLAB_COLORS[3], lw=3, label="Unregularized")
ax.plot(geo_clean, regged2, "-x", lw=3, c=MATLAB_COLORS[4], label="Regularized")
# y0 = ceil(maximum(abs.(regged2)))
# y0 = 4
ax.set_ylim((-y0, y0))
ax.set_yticks(np.arange(-y0, y0 + 2, step=2))
ax.format_xdata = years_fmt
ax.xaxis.set_major_locator(years)
ax.xaxis.set_major_formatter(years_fmt)
_set_figsize(fig, h, w)
fig.legend()
if save:
fig.savefig(
f"compare_{yy}_removed.pdf", bbox_inches="tight", transparent=True, dpi=100
)
# TODO: MOVE THESE!!!
def _set_figsize(fig, h=3, w=3.5):
return (fig.set_figheight(h), fig.set_figwidth(w))
def prunesolve(B, v):
stack = p2mm * (sum(v) / sum(B))
l2 = p2mm * np.linalg.pinv(B) @ v
l1 = 1 # TODO: L1 solver
return stack, l2, l1
def load_slclist_ifglist(
igram_dir=None,
h5file=None,
slclist_ignore_file=None,
parse=True,
min_date=None,
max_date=None,
max_temporal_baseline=None,
):
import apertools.utils
ifg_date_list = sario.load_ifglist_from_h5(h5file, parse=parse)
geo_date_list = sario.load_slclist_from_h5(h5file, parse=parse)
if slclist_ignore_file is not None:
ignore_filepath = os.path.join(igram_dir or ".", slclist_ignore_file)
valid_geo_dates, valid_ifg_dates = sario.ignore_geo_dates(
geo_date_list, ifg_date_list, ignore_file=ignore_filepath, parse=parse
)
valid_ifg_dates = apertools.utils.filter_min_max_date(
valid_ifg_dates, min_date, max_date
)
if max_temporal_baseline is not None:
ll = len(valid_ifg_dates)
valid_ifg_dates = [
ifg
for ifg in valid_ifg_dates
if abs((ifg[1] - ifg[0]).days) <= max_temporal_baseline
]
logger.info(
f"Ignoring {ll - len(valid_ifg_dates)} longer than {max_temporal_baseline}"
)
logger.info(f"Ignoring {len(ifg_date_list) - len(valid_ifg_dates)} igrams total")
# Now just use the ones remaining to reform the geo dates
valid_geo_dates = list(sorted(set(itertools.chain.from_iterable(valid_ifg_dates))))
# valid_geo_idxs = np.searchsorted(geo_date_list, valid_geo_dates)
valid_ifg_idxs = np.searchsorted(
sario.ifglist_to_filenames(ifg_date_list),
sario.ifglist_to_filenames(valid_ifg_dates),
)
# return valid_geo_idxs, valid_ifg_idxs
return valid_geo_dates, valid_ifg_dates, valid_ifg_idxs
def get_stack_vals(
unw_stack_file,
row=None,
col=None,
valid_indices=None,
station_name=None,
window=5,
dset=STACK_FLAT_SHIFTED_DSET,
reference_station=None,
):
dem_rsc = sario.load("dem.rsc")
if station_name is not None:
lon, lat = gps.station_lonlat(station_name)
row, col = map(
lambda x: int(x), latlon.nearest_pixel(dem_rsc, lon=lon, lat=lat)
)
unw_vals = _read_vals(
unw_stack_file, row, col, valid_indices, window=window, dset=dset
)
return _subtract_reference(
unw_vals,
reference_station,
unw_stack_file,
window=window,
dset=dset,
valid_indices=valid_indices,
)
def _read_vals(
unw_stack_file,
row,
col,
valid_indices,
window=5,
dset=STACK_FLAT_SHIFTED_DSET,
):
# println("Loading $row, $col from $dset, avging window $window")
halfwin = max(window // 2, 1)
with h5py.File(unw_stack_file, "r") as hf:
print(row, col, hf[dset].shape)
unw_depth = hf[dset][
:,
row - halfwin : row + halfwin,
col - halfwin : col + halfwin,
]
unw_vals_all = | np.mean(unw_depth, axis=(1, 2)) | numpy.mean |
import numpy as np
from collections import defaultdict, Counter
import pickle
import string
import pprint
import torch
from torch.autograd import Variable
import re
PAD_TOKEN = '<pad>'
UNK_TOKEN = '<unk>'
ROOT_TOKEN = '<root>'
RANDOM_SEED = 1
def parse_conllu(filename, clean=True):
"""
Parse a .conllu file to a list of sentences. Each sentence is a 2d list with each row
a word, and with columns 'idx', 'word', 'POS tag', 'arc head', 'arc label'.
Args:
filename: string, file to parse
clean: boolean, if True, remove sentences with arcs that contain underscores.
Returns: List of sentences
"""
cols = [0, 1, 3, 6, 7]
with open(filename, 'r', encoding='utf-8') as f:
# read all lines, remove comments
data = [line for line in f.readlines() if not line.startswith('#')]
# split sentences
newline_idx = [i for i, s in enumerate(data) if s == '\n']
sentences = []
prev_split = 0
for split in newline_idx:
sentences.append(data[prev_split:split])
prev_split = split + 1
# select useful cols
for i, s in enumerate(sentences):
s = np.array([word.strip().split('\t') for word in s])
sentences[i] = s[:, cols]
# remove sentences with words without head
if clean:
sentences = [s for s in sentences if '_' not in s[:,4]]
return sentences
def filter_words(sentences, filter_single=True):
"""
Applies a series of filter to each word in each sentence. Filters
are applied in this order:
- replace urls with an <url> tag.
- replace a string of more than 2 punctuations with a <punct> tag.
- replace strings that contain digits with a <num> tag.
- if filter_single, replace words that only occur once with UNK_TOKEN.
This step is useful when parsing training data, to make sure the UNK_TOKEN
in the word embeddings gets trained.
Args:
sentences: list of sentences, from parse_conllu.
filter_single: boolean, if true replace words that occur once with UNK_TOKEN.
Returns: List of sentences with words filtered.
"""
filtered = []
word_counts = get_word_counts(sentences)
one_words = set([w for w, c in word_counts.items() if c==1])
for i, sentence in enumerate(sentences):
for j, word in enumerate(sentence):
if is_url(word[1]):
sentence[j, 1] = '<url>'
elif is_long_punctuation(word[1]):
sentence[j, 1] = '<punct>'
elif has_digits(word[1]):
sentence[j, 1] = '<num>'
elif filter_single and word[1].lower() in one_words:
sentence[j, 1] = UNK_TOKEN
filtered.append(sentence)
return filtered
def get_word_counts(sentences):
"""
Create a Counter of all words in sentences, in lowercase.
Args:
sentences: List of sentences, from parse_conllu.
Returns: Counter with word: count.
"""
words = [word[1].lower() for sentence in sentences for word in sentence]
return Counter(words)
def is_url(word):
"""
Lazy check if a word is an url. True if word contains all of {':' '/' '.'}.
"""
return bool(set('./:').issubset(word))
def is_long_punctuation(word):
"""
True if word is longer than 2 and only contains interpunction.
"""
return bool(len(word) > 2 and set(string.punctuation).issuperset(word))
def has_digits(word):
"""
True if word contains digits.
"""
return bool(set(string.digits).intersection(word))
def get_index_mappings(sentences):
"""
Create an index mapping of each word, POS tag and arc label in sentences.
example use:
idx = x2i['word']['apple']
word = i2x['word'][idx]
Args:
sentences: list of sentences, from parse_conllu
Returns: dictionaries x2i and i2x, which contain the translation to and from indices.
"""
# instantiate dicts
w2i = defaultdict(lambda: len(w2i))
i2w = dict()
t2i = defaultdict(lambda: len(t2i))
i2t = dict()
l2i = defaultdict(lambda: len(l2i))
i2l = dict()
# Default values
i2w[w2i[PAD_TOKEN]] = PAD_TOKEN
i2w[w2i[UNK_TOKEN]] = UNK_TOKEN
i2w[w2i[ROOT_TOKEN]] = ROOT_TOKEN
i2t[t2i[PAD_TOKEN]] = PAD_TOKEN
i2t[t2i[UNK_TOKEN]] = UNK_TOKEN
i2t[t2i[ROOT_TOKEN]] = ROOT_TOKEN
# Fill dicts
words = set()
tags = set()
labels = set()
for sentence in sentences:
for word_array in sentence:
words.add(word_array[1].lower())
labels.add(word_array[4])
tags.add(word_array[2])
for word in sorted(list(words)):
i2w[w2i[word]] = word
for tag in sorted(list(tags)):
i2t[t2i[tag]] = tag
for label in sorted(list(labels)):
i2l[l2i[label]] = label
# collect dicts
x2i = {"word":dict(w2i), "tag":dict(t2i), "label":dict(l2i)}
i2x = {"word":dict(i2w), "tag":dict(i2t), "label":dict(i2l)}
return x2i, i2x
def tokenize_sentences(sentences, x2i):
"""
Convert each sentence to int arrays using mappings in x2i.
"""
w2i = x2i['word']
t2i = x2i['tag']
l2i = x2i['label']
sentences_idx = []
for s in sentences:
s_idx = []
s_idx.append([0, w2i[ROOT_TOKEN], t2i[ROOT_TOKEN], -1, -1])
for i, si in enumerate(s):
word_idx = w2i.get(si[1].lower(), w2i[UNK_TOKEN])
tag_idx = t2i.get(si[2], t2i[UNK_TOKEN])
lab_idx = l2i[si[4]]
s_idx.append([int(si[0]), word_idx, tag_idx, int(si[3]), lab_idx])
sentences_idx.append( | np.vstack(s_idx) | numpy.vstack |
""" Script to store all functions"""
import numpy as np
#from osgeo.gdalconst import *
#from osgeo import ogr, gdal
import subprocess
import simplekml
import sys
import scipy
import multiprocessing as mp
from scipy import ndimage
import os
#import cv2
#import multiprocessing as mp
import matplotlib.pyplot as plt
from shapely.geometry import Polygon as shPol
from shapely.geometry.polygon import LinearRing as shLinRing
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
from shapely.geometry import Polygon as shPol
from shapely.geometry import MultiPolygon as shMPol
from osgeo import ogr
import json
from shapely.geometry import shape
import meshio
import numpy as np
#import pygmsh
import re
import utm
def distanceMapCenter(buffer_size):
""" function to create a raster with distances to the central cell,
rounded, only input is the size of the buffer around the middle cell """
rows = buffer_size*2+1
dist = np.zeros([rows, rows])
mid = buffer_size # buffer size is alway the index of the middle cell
for i in range(0,rows):
for j in range(0,rows):
dist[i,j] = np.sqrt((i-mid)**2+(j-mid)**2)
dist = np.round(dist)
return(dist)
def rasterize(InputVector,RefImage,OutputImage):
""" function to rasterize vectorial data, works with polygons, points and lines
all corresponding cells get value 1 """
#InputVector = 'Roads.shp'
#OutputImage = 'Result.tif'
#RefImage = 'DEM.tif'
gdalformat = 'GTiff'
datatype = gdal.GDT_Byte
burnVal = 1 #value for the output image pixels
##########################################################
# Get projection info from reference image
Image = gdal.Open(RefImage, gdal.GA_ReadOnly)
# Open Shapefile
Shapefile = ogr.Open(InputVector)
Shapefile_layer = Shapefile.GetLayer()
# Rasterise
print("Rasterising shapefile...")
Output = gdal.GetDriverByName(gdalformat).Create(OutputImage, Image.RasterXSize, Image.RasterYSize, 1, datatype, options=['COMPRESS=DEFLATE'])
Output.SetProjection(Image.GetProjectionRef())
Output.SetGeoTransform(Image.GetGeoTransform())
# Write data to band 1
Band = Output.GetRasterBand(1)
Band.SetNoDataValue(0)
gdal.RasterizeLayer(Output, [1], Shapefile_layer, burn_values=[burnVal])
# Close datasets
Band = None
Output = None
Image = None
Shapefile = None
# Build image overviews
subprocess.call("gdaladdo --config COMPRESS_OVERVIEW DEFLATE "+OutputImage+" 2 4 8 16 32 64", shell=True)
print("Done.")
return(OutputImage)
def localRasterize(XY, generalize_factor=5, get_indices = False):
""" Function to locally rasterize a set of coordinates
@params:
XY - Required: List of coordinate pairs
generalize_factor - Optional: Factor to determine the degree of generalization"""
XY_upd = []
for x, y in XY:
x = int(x / generalize_factor)
y = int(y / generalize_factor)
XY_upd.append([x, y])
XY_np = np.asarray(XY_upd)
x_min = np.min(XY_np[:, 0])
x_max = np.max(XY_np[:, 0])
y_min = np.min(XY_np[:, 1])
y_max = np.max(XY_np[:, 1])
cols = int(x_max - x_min + 2)
rows = int(y_max - y_min + 2)
ras = np.zeros([rows, cols])
XY_in = []
for c, r in XY_upd:
c = int(c - x_min + 1)
r = int(y_max - r + 1)
XY_in.append([r, c])
ras[r, c] = 1
if get_indices:
return ras, XY_in
else:
return ras
def bwdist(image):
""" function to calculate a distance map of a boolean input map (distance to closest cell with value 1)"""
a = scipy.ndimage.morphology.distance_transform_edt(image==0)
return(a)
def neigh(image):
""" neighboorhood function to calculate number of neighboring cells containing value 1
input has to be boolean"""
# Neighboorhood alternative
dim = image.shape
rows= dim[0]
cols= dim[1]
tif_neigh = np.zeros(dim)
neigh = np.zeros([rows-2,cols-2])
for i in range(0,3):
for j in range(0,3):
neigh = neigh+image[i:i+rows-2,j:j+cols-2]
tif_neigh[1:rows-1,1:cols-1]=neigh
tif_neigh = tif_neigh-image
return(tif_neigh)
def rescale(data,rescale_to):
""" function to rescale linearally"""
rescaled = rescale_to*(data-np.min(data))/(np.max(data)-np.min(data))
return(rescaled)
def exportRaster(LU,Example,Path_Name):
'''
input is a raster file (.rst, .tif), output a 2D array
'''
Example = gdal.Open(Example, gdal.GA_ReadOnly)
gdal.AllRegister()
# get info of example data
rows=Example.RasterYSize
cols=Example.RasterXSize
# create output image
driver=Example.GetDriver()
outDs=driver.Create(Path_Name,cols,rows,1,GDT_Int32)
outBand = outDs.GetRasterBand(1)
outData = np.zeros((rows,cols), np.int16)
# write the data
outBand.WriteArray(LU, 0, 0)
# flush data to disk, set the NoData value and calculate stats
outBand.FlushCache()
outBand.SetNoDataValue(-1)
# georeference the image and set the projection
outDs.SetGeoTransform(Example.GetGeoTransform())
outDs.SetProjection(Example.GetProjection())
del outData
def importRaster(file):
Raster = gdal.Open(file)
Band=Raster.GetRasterBand(1)
Array=Band.ReadAsArray()
return(Raster, Band, Array)
def image2Blocks(image, vert_n, hor_n, topleftcorners = False):
""" Function to divide a 2d numpy array into blocks
Input: image --> a 2D numpy array
vert_n--> number of blocks in de vertical
hor_n --> number of blocks in the horizontal
Output: Blocks --> list of smaller 2d numpy arrays from the original image
image_blocks --> numpy 2d array with number of block"""
rows, cols = np.shape(image)
Blocks = []
max_row = 0
max_col = 0
t = 1
TL = []
image_blocks = np.zeros(np.shape(image))
for i in range(vert_n + 1):
for j in range(hor_n + 1):
height = rows//vert_n
width = cols//hor_n
top = i*height
bottom = (i+1) * height
left = j*width
right = (j+1) * width
block = image[top:bottom,left:right]
image_blocks[top:bottom,left:right] = t
Blocks.append(block)
t += 1
if bottom > max_row:
max_row = bottom
if right > max_col:
max_col = right
TL.append([top, left])
if topleftcorners:
return Blocks, image_blocks, TL
else:
return Blocks, image_blocks
def blocks2image(Blocks, blocks_image):
""" Function to stitch the blocks back to the original image
input: Blocks --> the list of blocks (2d numpies)
blocks_image --> numpy 2d array with numbers corresponding to block number
output: image --> stitched image """
image = np.zeros(np.shape(blocks_image))
for i in range(1,int(np.max(blocks_image))):
ind = np.asarray(np.where(blocks_image==i))
top = np.min(ind[0, :])
bottom = np.max(ind[0, :])
left = np.min(ind[1, :])
right = np.max(ind[1, :])
#print('top: {}, bottom: {}, left: {}, right: {}'.format(top, bottom, left, right))
image[top:bottom+1,left:right+1] = Blocks[i-1]
return image
def nestedIndicesList(rows, cols):
""" Function to get a two column numpy array with all possible indices of a 2D numpy array
Usage: use to run nested loops in parallel
Input: rows and cols of your 2d array
Output: list with 2x1 numpy arrays"""
ind = np.zeros([rows*cols, 2])
ind[:,0] = np.tile(np.arange(rows), cols)
ind[:,1] = np.repeat(np.arange(cols), rows)
return list(ind)
def retrievePixelValue(geo_coord, image, band):
"""Return floating-point value that corresponds to given point."""
lat = geo_coord[0]
lon = geo_coord[1]
image_np = image.GetRasterBand(band)
image_np = image_np.ReadAsArray()
height = np.shape(image_np)[0]
width = np.shape(image_np)[1]
# the x and y resolution of the pixels, and the rotation of the
# raster.
TL_lon, lon_res, x_rot, TL_lat, lat_rot, lat_res = image.GetGeoTransform()
# calculate coordinates for each cell
CoorRasterLon = (TL_lon + 0.5 * lon_res) + np.arange(width) * lon_res
CoorRasterLat = (TL_lat + 0.5 * lat_res) + np.arange(height) * lat_res
# get indices from lat and lon
j = int(round(((lon - (TL_lon - lon_res / 2)) / lon_res)))
i = int(round(((lat - (TL_lat - lat_res / 2)) / lat_res)))
return image_np[i,j], i, j
def extent2KML(Image,filename):
""" Function to plot the extent of a geotiff """
Band = Image.GetRasterBand(1)
Array = Band.ReadAsArray()
rows, cols = np.shape(Array)
TL_lon, x_res, x_rot, TL_lat, y_rot, y_res = Image.GetGeoTransform()
res = x_res
kml = simplekml.Kml()
pol = kml.newpolygon()
TL = (TL_lon, TL_lat)
BL = (TL_lon, TL_lat - rows*res)
BR = (TL_lon + cols*res, TL_lat - rows*res)
TR = (TL_lon + cols*res, TL_lat)
pol.outerboundaryis = [TL,BL,BR,TR,TL]
kml.save(filename)
def contourKML(Image,cn,filename,minlen = 100):
""" Function to generate a kml file with contours from a certain raster
@params:
the gdal image, a plt.contour output, the filename of the eventual kml file,
the minimum length of a contour (unit is number of nodes) and color for the eventual
kml lines (html color codes) """
c_coor = cn.allsegs[0]
latlon = Indices2LatLon(Image)
kml = simplekml.Kml()
Contour_latlon = []
Contour_latlon.append([])
t = 0
for c_list in c_coor:
if t%1000 == 0:
print('Super Process:' +str(round(t/len(c_coor),2)*100)+ ' %')
t += 1
f = True
if len(c_list) > minlen:
for c in c_list:
y = c[1]
x = c[0]
try:
lat = latlon[int(round(y)), int(round(x)), 0]
lon = latlon[int(round(y)), int(round(x)), 1]
if f == True:
Contour_latlon[-1].append((lon, lat))
f = False
else:
lon_old,lat_old = Contour_latlon[-1][-1]
dist = ((lon_old-lon)**2+(lat_old-lat)**2)**0.5
if dist < 0.0005:
Contour_latlon[-1].append((lon, lat))
else:
Contour_latlon.append([])
Contour_latlon[-1].append((lon, lat))
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, fname, exc_tb.tb_lineno)
for C in Contour_latlon:
l = kml.newlinestring()
l.name = str(t)
l.coords = C
print('Saving file...')
kml.save(filename)
def calcPolyArea(pol):
""" Function to calculate the area (units = cells) of a polygon
input: a numpy array with two cols
output: the area in number of cells """
x = pol[:,0]
y = pol[:,1]
return 0.5*np.abs(np.dot(x,np.roll(y,1))-np.dot(y,np.roll(x,1)))
def autoCanny(image, sigma=0.33):
""" Function to use canny without tuning (source: pyimagesearch.com) """
# compute the median of the single channel pixel intensities
v = np.median(image)
# apply automatic Canny edge detection using the computed median
lower = int(max(0, (1.0 - sigma) * v))
upper = int(min(255, (1.0 + sigma) * v))
edged = cv2.Canny(image, lower, upper)
# return the edged image
return edged
def shapelyPol2PLTPol(shapelypol, facecolor = 'grey', edgecolor = 'red', alpha = 0.5):
""" Function to translate a shapely polygon to a plt polygon
!!! so far only for exterior boundaries
input: a shapely polygon
output: a patch to be plotted with plt.add_patch"""
# get coordinate list
C = np.rot90(shapelypol.exterior.xy)
# create a polygon from coordinate list
pol = Polygon(C, closed=True, facecolor = facecolor, edgecolor = edgecolor, alpha = alpha) # plt polygon
return pol
def printProgressBar(iteration, total,printBar = True, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', printEnd = "\r"):
"""
Call in a loop to create terminal progress bar
source: https://stackoverflow.com/questions/3173320/text-progress-bar-in-the-console
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
printEnd - Optional : end character (e.g. "\r", "\r\n") (Str)
"""
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
if printBar:
print('\r%s |%s| %s%% %s\r' % (prefix, bar, percent, suffix), end = printEnd)
else:
print('\r%s%% %s' % (percent, suffix), end = printEnd)
# Print New Line on Complete
if iteration == total:
print()
def shapelyInd2shapelyLatLon(pol, latlon, indlist = False):
ind = np.asarray(pol.exterior.xy)
ind_latlon = []
for i in range(np.shape(ind)[1]):
col = int(ind[0, i])
row = int(ind[1, i])
ind_latlon.append(latlon[row,col,:])
if indlist:
return shPol(ind_latlon), ind_latlon
else:
return shPol(ind_latlon)
def shapely2KML(pol_ext, path, pol_int = [], name = '', invert = False):
""" Funtion to translate shapely polygons to kml polygon
@ params:
pol_ext - Required: shapely polygon in lat and lon of the exterior boundaries
path - Required: DIrectory path or file name (stored in working directory) of the kml file
pol_int - Optional: List of shapely polygons which describe the interior boundaries
name - Optional: String of the name of the polygon
invert - Optional: trun on True to change x and y
"""
kml = simplekml.Kml()
xy = np.asarray(pol_ext.exterior.xy)
xy = np.rot90(xy)
if invert == False:
xy = list(np.flip(xy))
xy_int = []
for i in pol_int:
xy_i = list(np.flip(np.rot90(np.asarray(i.exterior.xy))))
xy_int.append(xy_i)
pol = kml.newpolygon(name = name, outerboundaryis = xy, innerboundaryis = xy_int)
pol.style.polystyle.color = simplekml.Color.changealphaint(50, simplekml.Color.azure)
kml.save(path)
print(f'The Google Earth File is saved in {path}')
def extractPolygon2Snap2FromNDVI(NDVI, LatLon, NDVI_tresh = 0, path = 'Polygon2Snap2.kml'):
""" Function to extract Polygon2Snap2 from a NDVI image
@params:
NDVI - Required: 2d numpy array of NDVI
LatLon - Required: 3d (rowsxcolsx2) numpy array with latitudes and longitudes of the NDVI image
NDVI_tresh - Optional: value to distinguish water from non water (default is 0)
path - Optional: path to store the resulting kml file (default is 'Polygon2Snap2.kml')
"""
# Close all open figures
plt.close('all')
# treshold value to distinguish water based on NDVI
t = NDVI_tresh
Tresh = np.zeros(np.shape(NDVI))
# apply filter to cancel noise without affecting edges
blur = cv2.bilateralFilter(NDVI, 3, 5, 5)
# make a binary image
(t, binary) = cv2.threshold(src=blur * -1,
thresh=t,
maxval=255,
type=cv2.THRESH_BINARY)
# convert to proper data type
binary = np.uint8(binary)
# contour
contours = cv2.findContours(image=binary, mode=cv2.RETR_LIST, method=cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0]
contours_len = []
print("Found %d objects." % len(contours))
# list how long each contour is
for (i, c) in enumerate(contours):
contours_len.append(len(c))
# print("\tSize of contour %d: %d" % (i, len(c)))
# create empty lists to store the polygons
patches = []
sh_Pols = []
# minimum length of contour to generate polygon
tresh = 100
# generate polygons
t = 0
print('Generating Polygons...')
for C in contours:
t += 1
printProgressBar(t, len(contours))
if len(C) > tresh:
# adapt the dimensions
C = C[:, 0, :]
# create a polygon
pol = Polygon(C, closed=True, facecolor='red', edgecolor='red', alpha=0.05) # plt polygon
patches.append(pol)
shpol = shPol(C) # shapely polygon
if type(shpol.buffer(0)) is shPol:
if shpol.buffer(0).length > tresh:
sh_Pols.append(shpol.buffer(0))
else:
for s in list(shpol.buffer(0)):
if s.length > tresh:
sh_Pols.append(s)
# get the major polygon
sh_Pols_len = []
for s in sh_Pols:
sh_Pols_len.append(s.length)
C_maj = sh_Pols[np.argmax(sh_Pols_len)]
# get the interiors
interiors = []
t = 0
print('Getting the interior boundaries...')
for c in sh_Pols:
#print("\tSize of contour %d: %d" % (i, c.length))
t += 1
printProgressBar(t,len(sh_Pols))
if c.within(C_maj):
if c.area < C_maj.area:
interiors.append(c)
"""
print('Getting the interior boundaries...')
def checkWithin(c, C_maj):
if c.within(C_maj):
if c.area < C_maj.area:
return c
pool = mp.Pool(mp.cpu_count())
interiors = pool.starmap(checkWithin, [(c, C_maj) for c in sh_Pols])
"""
interiors = [i for i in interiors if i]
print(f'Got them! There are {len(interiors)} interior polygons')
# translate to latlon
interiors_latlon = []
for i in interiors:
pol_latlon = shapelyInd2shapelyLatLon(i, LatLon)
interiors_latlon.append(pol_latlon)
pol_maj_latlon = shapelyInd2shapelyLatLon(C_maj, LatLon)
shapely2KML(pol_maj_latlon, pol_int=interiors_latlon, path=path, name='test')
def indices2LatLon(Image):
""" Function to calculate a 2D numpy array which hold latitude and longitude in each cell
input: gdal loaded geotiff
output: 2D numpy array with lat-lon for each cell"""
Band=Image.GetRasterBand(1)
Array=Band.ReadAsArray()
rows, cols = np.shape(Array)
TL_lon, x_res, x_rot, TL_lat, y_rot, y_res = Image.GetGeoTransform()
res = x_res
latlon = np.zeros([rows,cols,2])
for i in range(rows):
printProgressBar(i,rows)
for j in range(cols):
lat = TL_lat - i*res
lon = TL_lon + j*res
latlon[i,j,0] = lat
latlon[i,j,1] = lon
return latlon
def shapely2KML_MultiplePolygons(Pol_list, path, invert = False):
""" Funtion to translate shapely polygons to kml polygon
@ params:
pol_list_element - Required: list of lists where the first element of each list is the outerboundaries of a polygon
path - Required: DIrectory path or file name (stored in working directory) of the kml file
invert - Optional: trun on True to change x and y
"""
def savePolList(p, pol, xy, kml, xy_init):
inxy = np.asarray(pol_list_element[p].exterior.xy)
inxy = np.rot90(inxy)
inxy = list(np.flip(inxy))
xy_init.append(inxy)
pol = kml.newpolygon(outerboundaryis=xy, innerboundaryis=xy_init)
pol.style.polystyle.color = simplekml.Color.changealphaint(50, simplekml.Color.azure)
kml = simplekml.Kml()
i = 0
for pol_list_element in Pol_list:
xy = np.asarray(pol_list_element[0].exterior.xy)
xy = np.rot90(xy)
if invert == False:
xy = list( | np.flip(xy) | numpy.flip |
import torch
import numpy as np
import skimage
import os
import torchvision.utils as utils
from skimage import color
import mermaid.image_sampling as py_is
from mermaid.data_wrapper import AdaptVal,MyTensor
from .net_utils import gen_identity_map
from .net_utils import Bilinear
import mermaid.utils as py_utils
import mermaid.module_parameters as pars
import mermaid.smoother_factory as sf
def get_reg_pair(data,ch=1):
"""
get image pair from data, pair is concatenated by the channel
:param data: a dict, including {'img':, 'label':}
:param pair:
:param target: target image
:param ch: the num of input channel
:return: image BxCxXxYxZ, label BxCxXxYxZ
"""
if 'label' in data:
return data['image'][:,0:ch], data['image'][:,ch:2*ch],data['label'][:,0:ch],data['label'][:,ch:2*ch]
else:
return data['image'][:,0:ch], data['image'][:,ch:2*ch],None, None
def get_seg_pair(data, is_train=True):
"""
get image and gt from data, pair is concatenated by the channel
:param data: a dict, including {'img':, 'label':}
:return: image BxCxXxYxZ, label BxCxXxYxZ
"""
if not is_train:
data['image']= data['image'][0]
if 'label' in data:
data['label'] = data['label'][0]
if 'label' in data:
return data['image'], data['label']
else:
return data['image'],None
def sigmoid_explode(ep, static =5, k=5):
"""
factor increase with epoch, factor = (k + exp(ep / k))/k
:param ep: cur epoch
:param static: at the first # epoch, the factor keep unchanged
:param k: the explode factor
:return:
"""
static = static
if ep < static:
return 1.
else:
ep = ep - static
factor= (k + np.exp(ep / k))/k
return float(factor)
def sigmoid_decay(ep, static =5, k=5):
"""
factor decease with epoch, factor = k/(k + exp(ep / k))
:param ep: cur epoch
:param static: at the first # epoch, the factor keep unchanged
:param k: the decay factor
:return:
"""
static = static
if ep < static:
return float(1.)
else:
ep = ep - static
factor = k/(k + np.exp(ep / k))
return float(factor)
def factor_tuple(input,factor):
"""
multiply a factor to each tuple elem
:param input:
:param factor:
:return:
"""
input_np = np.array(list(input))
input_np = input_np*factor
return tuple(list(input_np))
def resize_spacing(img_sz,img_sp,factor):
"""
compute the new spacing with regard to the image resampling factor
:param img_sz: img sz
:param img_sp: img spacing
:param factor: factor of resampling image
:return:
"""
img_sz_np = np.array(list(img_sz))
img_sp_np = np.array(list(img_sp))
new_sz_np = img_sz_np*factor
new_sp = img_sp_np*(img_sz_np-1)/(new_sz_np-1)
return tuple(list(new_sp))
def save_image_with_scale(path, variable):
"""
the input variable is [-1,1], save into image
:param path: path to save
:param variable: variable to save, XxY
:return:
"""
arr = variable.cpu().data.numpy()
arr = np.clip(arr, -1., 1.)
arr = (arr+1.)/2 * 255.
arr = arr.astype(np.uint8)
skimage.io.imsave(path, arr)
def get_transform_with_itk_format(disp_np, spacing,original, direction):
import SimpleITK as sitk
# Create a composite transform then write and read.
displacement = sitk.DisplacementFieldTransform(3)
field_size = list(np.flipud(disp_np.shape[1:]).astype(np.float64))
field_origin = list(original)
field_spacing = list(spacing)
field_direction = list(direction) # direction cosine matrix (row major order)
# Concatenate all the information into a single list.
displacement.SetFixedParameters(field_size + field_origin + field_spacing + field_direction)
displacement.SetParameters(np.transpose(disp_np,[1,2,3,0]).reshape(-1).astype(np.float64))
return displacement
def make_image_summary(images, truths, raw_output, maxoutput=4, overlap=True):
"""make image summary for tensorboard
:param images: torch.Variable, NxCxDxHxW, 3D image volume (C:channels)
:param truths: torch.Variable, NxDxHxW, 3D label mask
:param raw_output: torch.Variable, NxCxHxWxD: prediction for each class (C:classes)
:param maxoutput: int, number of samples from a batch
:param overlap: bool, overlap the image with groundtruth and predictions
:return: summary_images: list, a maxoutput-long list with element of tensors of Nx
"""
slice_ind = images.size()[2] // 2
images_2D = images.data[:maxoutput, :, slice_ind, :, :]
truths_2D = truths.data[:maxoutput, slice_ind, :, :]
predictions_2D = torch.max(raw_output.data, 1)[1][:maxoutput, slice_ind, :, :]
grid_images = utils.make_grid(images_2D, pad_value=1)
grid_truths = utils.make_grid(labels2colors(truths_2D, images=images_2D, overlap=overlap), pad_value=1)
grid_preds = utils.make_grid(labels2colors(predictions_2D, images=images_2D, overlap=overlap), pad_value=1)
return torch.cat([grid_images, grid_truths, grid_preds], 1)
def labels2colors(labels, images=None, overlap=False):
"""Turn label masks into color images
:param labels: torch.tensor, NxMxN
:param images: torch.tensor, NxMxN or NxMxNx3
:param overlap: bool
:return: colors: torch.tensor, Nx3xMxN
"""
colors = []
if overlap:
if images is None:
raise ValueError("Need background images when overlap is True")
else:
for i in range(images.size()[0]):
image = images.squeeze()[i, :, :]
label = labels[i, :, :]
colors.append(color.label2rgb(label.cpu().numpy(), image.cpu().numpy(), bg_label=0, alpha=0.7))
else:
for i in range(images.size()[0]):
label = labels[i, :, :]
colors.append(color.label2rgb(label.numpy(), bg_label=0))
return torch.Tensor(np.transpose(np.stack(colors, 0), (0, 3, 1, 2))).cuda()
def t2np(v):
"""
Takes a torch array and returns it as a numpy array on the cpu
:param v: torch array
:return: numpy array
"""
if type(v) == torch.Tensor:
return v.detach().cpu().numpy()
else:
try:
return v.cpu().numpy()
except:
return v
def make_dir(path):
is_exist = os.path.exists(path)
if not is_exist:
os.makedirs(path)
return is_exist
def lift_to_dimension(A,dim):
"""
Creates a view of A of dimension dim (by adding dummy dimensions if necessary).
Assumes a numpy array as input
:param A: numpy array
:param dim: desired dimension of view
:return: returns view of A of appropriate dimension
"""
current_dim = len(A.shape)
if current_dim>dim:
raise ValueError('Can only add dimensions, but not remove them')
if current_dim==dim:
return A
else:
return A.reshape([1]*(dim-current_dim)+list(A.shape))
def update_affine_param( cur_af, last_af): # A2(A1*x+b1) + b2 = A2A1*x + A2*b1+b2
"""
update the current affine parameter A2 based on last affine parameter A1
A2(A1*x+b1) + b2 = A2A1*x + A2*b1+b2, results in the composed affine parameter A3=(A2A1, A2*b1+b2)
:param cur_af: current affine parameter
:param last_af: last affine parameter
:return: composed affine parameter A3
"""
cur_af = cur_af.view(cur_af.shape[0], 4, 3)
last_af = last_af.view(last_af.shape[0],4,3)
updated_af = torch.zeros_like(cur_af.data).to(cur_af.device)
dim =3
updated_af[:,:3,:] = torch.matmul(cur_af[:,:3,:],last_af[:,:3,:])
updated_af[:,3,:] = cur_af[:,3,:] + torch.squeeze(torch.matmul(cur_af[:,:3,:], torch.transpose(last_af[:,3:,:],1,2)),2)
updated_af = updated_af.contiguous().view(cur_af.shape[0],-1)
return updated_af
def get_inverse_affine_param(affine_param,dim=3):
"""A2(A1*x+b1) +b2= A2A1*x + A2*b1+b2 = x A2= A1^-1, b2 = - A2^b1"""
affine_param = affine_param.view(affine_param.shape[0], dim+1, dim)
inverse_param = torch.zeros_like(affine_param.data).to(affine_param.device)
for n in range(affine_param.shape[0]):
tm_inv = torch.inverse(affine_param[n, :dim,:])
inverse_param[n, :dim, :] = tm_inv
inverse_param[n, dim, :] = - torch.matmul(tm_inv, affine_param[n, dim, :])
inverse_param = inverse_param.contiguous().view(affine_param.shape[0], -1)
return inverse_param
def gen_affine_map(Ab, img_sz, dim=3):
"""
generate the affine transformation map with regard to affine parameter
:param Ab: affine parameter
:param img_sz: image sz [X,Y,Z]
:return: affine transformation map
"""
Ab = Ab.view(Ab.shape[0], dim+1, dim)
phi = gen_identity_map(img_sz).to(Ab.device)
phi_cp = phi.view(dim, -1)
affine_map = torch.matmul(Ab[:, :dim, :], phi_cp)
affine_map = Ab[:, dim, :].contiguous().view(-1, dim, 1) + affine_map
affine_map = affine_map.view([Ab.shape[0]] + list(phi.shape))
return affine_map
def transfer_mermaid_affine_into_easyreg_affine(affine_param, dim=3):
affine_param = affine_param.view(affine_param.shape[0], dim+1, dim)
I = torch.ones(dim).to(affine_param.device)
b = affine_param[:, dim,:]
affine_param[:,:dim,:]= affine_param[:,:dim,:].transpose(1, 2)
affine_param[:, dim,:] =2*b +torch.matmul(affine_param[:,:dim,:],I)-1 # easyreg assume map is defined in [-1,1] whle the mermaid assumes [0,1]
affine_param = affine_param.contiguous()
affine_param = affine_param.view(affine_param.shape[0],-1)
return affine_param
def transfer_easyreg_affine_into_mermaid_affine(affine_param, dim=3):
affine_param = affine_param.view(affine_param.shape[0], dim+1, dim)
I = torch.ones(dim).to(affine_param.device)
b = affine_param[:, dim,:]
affine_param[:, dim,:] = (b-torch.matmul(affine_param[:,:dim,:],I)+1)/2 # the order here is important
affine_param[:,:dim,:]= affine_param[:,:dim,:].transpose(1, 2)
affine_param = affine_param.contiguous()
affine_param = affine_param.view(affine_param.shape[0],-1)
return affine_param
def save_affine_param_with_easyreg_custom(affine_param, output_path, fname_list, affine_compute_from_mermaid=False):
if affine_param is not None:
affine_param = affine_param.detach().clone()
if affine_compute_from_mermaid:
affine_param = transfer_mermaid_affine_into_easyreg_affine(affine_param)
if isinstance(affine_param, list):
affine_param = affine_param[0]
affine_param = affine_param.detach().cpu().numpy()
for i in range(len(fname_list)):
np.save(os.path.join(output_path, fname_list[i]) + '_affine_param.npy', affine_param[i])
def get_warped_img_map_param( Ab, img_sz, moving, dim=3, zero_boundary=True):
"""
generate the affine transformation map with regard to affine parameter
:param Ab: affine parameter
:param img_sz: image sz [X,Y,Z]
:param moving: moving image BxCxXxYxZ
:param zero_boundary: zero_boundary condition
:return: affine image, affine transformation map, affine parameter
"""
bilinear = Bilinear(zero_boundary)
affine_map = gen_affine_map(Ab,img_sz,dim)
output = bilinear(moving, affine_map)
return output, affine_map, Ab
def show_current_pair_by_3d_slice(iS,iT):
"""
visualize the pair image by slice
:param iS: source image
:param iT: target image
:return:
"""
import matplotlib.pyplot as plt
import easyreg.viewers as viewers
fig, ax = plt.subplots(2,3)
plt.setp(plt.gcf(), 'facecolor', 'white')
plt.style.use('bmh')
ivsx = viewers.ImageViewer3D_Sliced(ax[0][0], iS, 0, 'source X', True)
ivsy = viewers.ImageViewer3D_Sliced(ax[0][1], iS, 1, 'source Y', True)
ivsz = viewers.ImageViewer3D_Sliced(ax[0][2], iS, 2, 'source Z', True)
ivtx = viewers.ImageViewer3D_Sliced(ax[1][0], iT, 0, 'target X', True)
ivty = viewers.ImageViewer3D_Sliced(ax[1][1], iT, 1, 'target Y', True)
ivtz = viewers.ImageViewer3D_Sliced(ax[1][2], iT, 2, 'target Z', True)
feh = viewers.FigureEventHandler(fig)
feh.add_axes_event('button_press_event', ax[0][0], ivsx.on_mouse_press, ivsx.get_synchronize, ivsx.set_synchronize)
feh.add_axes_event('button_press_event', ax[0][1], ivsy.on_mouse_press, ivsy.get_synchronize, ivsy.set_synchronize)
feh.add_axes_event('button_press_event', ax[0][2], ivsz.on_mouse_press, ivsz.get_synchronize, ivsz.set_synchronize)
feh.add_axes_event('button_press_event', ax[1][0], ivtx.on_mouse_press, ivtx.get_synchronize, ivtx.set_synchronize)
feh.add_axes_event('button_press_event', ax[1][1], ivty.on_mouse_press, ivty.get_synchronize, ivty.set_synchronize)
feh.add_axes_event('button_press_event', ax[1][2], ivtz.on_mouse_press, ivtz.get_synchronize, ivtz.set_synchronize)
feh.synchronize([ax[0][0], ax[1][0]])
feh.synchronize([ax[0][1], ax[1][1]])
feh.synchronize([ax[0][2], ax[1][2]])
def get_res_size_from_size(sz, factor):
"""
Returns the corresponding low-res size from a (high-res) sz
:param sz: size (high-res)
:param factor: low-res factor (needs to be <1)
:return: low res size
"""
if (factor is None) :
print('WARNING: Could not compute low_res_size as factor was ' + str( factor ))
return sz
else:
lowResSize = np.array(sz)
if not isinstance(factor, list):
lowResSize[2::] = (np.ceil(( | np.array(sz[2:]) | numpy.array |
import math
import random
import numpy as np
import matplotlib.pyplot as plt
class Firefly(object):
""" Simulates a firefly """
def __init__(self,ranges,fitness_func,beta0=1.0,gamma=0.1,id=0):
""" Sets up the firefly """
self.min_ranges = [ranges[r]['min'] for r in ranges] #to ensure firefly will remain with the appropriate N-space for the problem
self.max_ranges = [ranges[r]['max'] for r in ranges] #same for the maximum
self.position = np.array([ranges[r]['init'] for r in ranges]) #assign the initial value to each variable
self.best_pos = self.position.copy() #ensure that no shallow copy will occur
self.fitness_func = fitness_func
self.intensity = self.update_intensity(self.position)
self.best_intensity = self.update_intensity(self.best_pos)
self.beta0 = float(beta0) #baseline attractiveness at r=0
self.gamma = float(gamma) #aborption
self.id = id
self.dimensions = len(self.position) #just get the number of solutions
def distance(self, partner):
""" Returns the distance r between fireflies as the cartesian(eucleidian) distence """
return np.linalg.norm(partner.position-self.position) #same as: ((partner.position-self.position)**2).sum(axis=0)
def attraction(self, partner):
""" Returns the attractiveness beta between fireflies"""
#return self.beta0 * math.exp(-self.gamma * (self.distance(partner)**2))
return self.beta0/(1+self.gamma*(self.distance(partner)**2))
def update_intensity(self,pos,minimise=True):
""" Evaluates the fitness function given a position in N-dimensions
If it is a minimisation problem, returns the inverse
"""
if minimise:
return 1./(self.fitness_func(pos)+1)
return self.fitness_func(pos)+1
def candidate_position(self,partner,step=0.5):
""" Returns the new position """
beta = self.attraction(partner)
#rand_p = random.random() ## this would add the same random component, ie they would move in a diagonal from main
rand_p = np.random.random(size=self.dimensions) ## to make sure that I add a different random component in every dimension
new_pos = self.position + beta*(partner.position - self.position) + step*(rand_p-0.5)
return np.clip(new_pos,self.min_ranges,self.max_ranges)
def move_if_better(self,partner,step=0.5):
""" """
if self.intensity <= partner.intensity:
#print "I will move firefly %s given firefly %s becasuase my intensity %s < %s" % (self.id,partner.id,self.intensity,partner.intensity)
#print " my old pos was: %s" % self.position
self.position = self.candidate_position(partner,step=step)
self.intensity = self.update_intensity(self.position)
#print " my new pos is: %s with intensity %s" % (self.position,self.intensity)
if self.intensity > self.best_intensity:
self.best_intensity = self.intensity
self.best_pos = self.position
else:
#print "** I will not move firefly %s given firefly %s becasuase my intensity %s > %s" % (self.id,partner.id,self.intensity,partner.intensity)
pass
def info(self):
""" Helper function for de-bugging that shows Firefly() attributes """
print("firefly initial id:{}".format(self.id))
print("Current position :{}".format(self.position))
print("Best position :{}".format(self.best_pos))
print("Current intensity :{}".format(self.intensity))
print("Best intensity :{}".format(self.best_intensity))
print("Absorption :{}".format(self.gamma))
print("------------------")
class FireflyPop(object):
def __init__(self,ranges,fit_func):
self.fireflies = [Firefly(ranges,fit_func,id=i) for i,r in enumerate(ranges)]
self.N = len(self.fireflies)
self.fit_func = fit_func
self.sort_on_intensities()
def change_absorption(self,new_absorption):
if not isinstance(new_absorption,list):
new_absorption = [new_absorption for i in range(len(self.fireflies))]
for f,a in zip(self.fireflies,new_absorption):
f.gamma = a
def sort_on_intensities(self):
self.fireflies.sort(key=lambda x:x.intensity,reverse=True)
def sort_on_best_intensities(self):
self.fireflies.sort(key=lambda x:x.best_intensity,reverse=True)
def get_leader(self):
return self.fireflies[0]
def print_intensities(self):
print("Ids : {}".format([f.id for f in self.fireflies]))
print("Current: {}".format([f.intensity for f in self.fireflies]))
print("Best : {}".format([f.best_intensity for f in self.fireflies]))
def update(self,step=1):
for i in range(self.N):
for j in range(self.N):
self.fireflies[i].move_if_better(self.fireflies[j],step=step)
self.sort_on_intensities()
def evolve(self,iterations,step=1):
for i in range(iterations):
self.update(step)
self.sort_on_intensities()
def go_to_bestSol(self):
""" Returns the fireflies to their best (so-far) solution to search again """
self.sort_on_best_intensities()
for f in self.fireflies:
f.position = f.best_pos
f.intensity = f.best_intensity
def get_best_sol(self):
self.sort_on_best_intensities()
return self.get_leader().best_pos
### only for 2D problems, debugging purposes
def viz_sol(self,dim_X=0,dim_Y=1):
""" Visualises two dimensions, just provide the idx in pos as dim_X and dim_Y """
l = self.get_leader()
step_x = float(l.max_ranges[dim_X]-l.min_ranges[dim_X])/100
x = np.arange(l.min_ranges[dim_X],l.max_ranges[dim_X]+step_x,step_x)
step_y = float(l.max_ranges[dim_Y]-l.min_ranges[dim_Y])/100
y = | np.arange(l.min_ranges[dim_Y],l.max_ranges[dim_Y]+step_y,step_y) | numpy.arange |
#
# radarbeam.py
#
# module for calculating geometry parameters and magnetic aspect
# angle of radar targets monitored by any radar
#
# use aspect_elaz or aspect_txty to calculate aspect angles of targets
# specified by (el,az) or (tx,ty) angles
#
# Created by <NAME> on 11/29/08 as jrobeam.py
# Copyright (c) 2008 ECE, UIUC. All rights reserved.
# history
# - Aug29,2013 by <NAME>
# -Generate a module that accepts the lon,lat,h coordinates for the location
# of any radar.
# -flattening has been changed from 1/298.257 to 1./298.257223563
# using the WGS84 reference in:
# http://earth-info.nga.mil/GandG/publications/tr8350.2/wgs84fin.pdf
# - A new routine called enu2xyz to move a point from xr,yr,zr to some
# direction east, north, up
def llh2xyz(latg,lon,h):
# returns geocentric xyz coordinates (ECEF) in km of a target with
# latitude latg (rad) --- geodetic
# longitude lon (rad)
# height h (km above local ellipsoid)
n=a_WGS / np.sqrt(1.-flatness*(2.-flatness) * np.sin(latg)**2.)
# cartesian geocentric coordinates wrt Greenwich
x=(n+h)*np.cos(latg)*np.cos(lon)
y=(n+h)*np.cos(latg)*np.sin(lon)
z=(n*(1.-eccentricity**2.)+h)*np.sin(latg)
return x,y,z
def xyz2llh(x,y,z):
# returns longitude 'lon', geodetic latitude 'lat', and height 'h'
# of position (x,y,z) defined in geocentric coordinate system (ECEF)
# on Oct23,2013 by <NAME>, adding the .all() in order to support
# arrays
p=np.sqrt(x**2.+y**2.)
lon=np.arctan2(y,x)
lat=np.arctan2(z,p)
latp=lat.copy()
for i in range(10):
n=a_WGS/np.sqrt(1.-flatness*(2-flatness)*np.sin(latp)**2.)
h=p/np.cos(latp)-n
lat=np.arctan(z/(p*(1.-n*eccentricity**2./(n+h))))
if (abs(lat-latp)<3.*eps).all():
n=a_WGS/np.sqrt(1.-flatness*(2.-flatness)*np.sin(lat)**2.)
h=p/np.cos(lat)-n
break
latp=lat.copy()
return lat,lon,h
def enu2xyz(xr,yr,zr,east,north,up):
# moves a point from xr,yr,zr to x,y,z by moving into the direction
# specified by east,north,up (enu) coordinates in km
latg,lon,h = xyz2llh(xr,yr,zr)
A = np.array([[-np.sin(lon),-np.sin(latg)*np.cos(lon),np.cos(latg)*np.cos(lon)],
[ np.cos(lon),-np.sin(latg)* | np.sin(lon) | numpy.sin |
'''
提供一些目标函数
'''
import numpy as np
from math import sin, cos, exp, sqrt
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from Common.constants import *
class base_unconstrained_objective_func():
'''
无约束目标函数基类
'''
search_space_tuple = (-1, 1)
func_formula_str = 'f(x)'
def __init__(self, n : int = 2):
self.x_dim = n
self.search_space = np.vstack((
np.ones(n, dtype = float),
np.ones(n, dtype = float)
))
self.search_space[Bounds.lower] = self.search_space_tuple[Bounds.lower]
self.search_space[Bounds.upper] = self.search_space_tuple[Bounds.upper]
self.best_f_x = 0
self.best_x = None
self.name = self.__class__.__name__
def func(self, x : np.ndarray) -> float:
'''
目标函数
'''
assert len(x) == self.x_dim, '向量维度不匹配!'
assert x.dtype == np.float, '输入数据应是 float 类型'
return 0
def plot(self, step_length : int = 128, save_figure_path : str = None, dpi : int = 300, area : tuple = None, hold_on = False):
'''
画出函数曲面 \n
:param step_length: 画图步长 \n
:param save_figure_path: 图像保存路径 \n
:param dpi: 图像 dpi \n
:param area: 画图区域 如 (-10, 10) 则绘画 x \in (-10, 10) , y \in (-10, 10) 的区域 \n
:param hold_on: 是否暂时不显示 \n
'''
x_dim_save = self.x_dim
self.__init__(2)
if area:
assert isinstance(area, tuple), '绘图空间必须为元组!'
search_space_save = self.search_space.copy()
self.search_space_tuple = area
self.search_space[Bounds.lower] = self.search_space_tuple[Bounds.lower]
self.search_space[Bounds.upper] = self.search_space_tuple[Bounds.upper]
u, v = np.meshgrid(
np.linspace(self.search_space_tuple[Bounds.lower], self.search_space_tuple[Bounds.upper], step_length),
np.linspace(self.search_space_tuple[Bounds.lower], self.search_space_tuple[Bounds.upper], step_length)
)
z = np.zeros_like(u)
for i in range(step_length):
for j in range(step_length):
x, y = u[i][j], v[i][j]
z[i][j] = self.func(
np.array([x, y], dtype = float)
)
fig = plt.figure()
ax = Axes3D(fig)
ax.plot_surface(u, v, z, cmap = plt.cm.RdYlGn)
plt.title(self.name)
if save_figure_path:
plt.savefig(save_figure_path, dpi = dpi)
if not hold_on: # 暂不显示,等待外部一起显示
plt.show()
# 还原现场
self.__init__(x_dim_save)
if area:
self.search_space = search_space_save
''' 单峰函数 '''
class Sphere(base_unconstrained_objective_func):
r'''
目标函数为
f(x) = \sum_{i = 1}^n x_i^2
'''
search_space_tuple = (-100, 100)
func_formula_str = r'f(x) = \sum_{i = 1}^n x_i^2'
def func(self, x : np.ndarray) -> float:
r'''
目标函数为
f(x) = \sum_{i = 1}^n x_i^2
'''
super(Sphere, self).func(x)
return np.sum(x ** 2)
class Step(base_unconstrained_objective_func):
r'''
目标函数为
f(x) = \sum_{i = 1}^n\lfloor x_i + 0.5 \rfloor ^2
'''
search_space_tuple = (-100, 100)
func_formula_str = r'f(x) = \sum_{i = 1}^n\lfloor x_i + 0.5 \rfloor ^2'
def func(self, x : np.ndarray) -> float:
r'''
目标函数为
f(x) = \sum_{i = 1}^n\lfloor x_i + 0.5 \rfloor ^2
'''
super(Step, self).func(x)
floor = np.floor(x + 0.5)
return np.sum(floor ** 2)
class Schwefel_1_2(base_unconstrained_objective_func):
search_space_tuple = (-100, 100)
func_formula_str = r'f(x) = \sum_{i = 1}^n\left(\sum_{j=1}^i x_j\right)^2'
def func(self, x : np.ndarray) -> float:
r'''
目标函数为
f(x) = \sum_{i = 1}^n\left(\sum_{j=1}^i x_j\right)^2
'''
super(Schwefel_1_2, self).func(x)
x_copy = x.copy()
for i in range(1, len(x)):
x_copy[i] += x_copy[i - 1]
return np.sum(x_copy ** 2)
class Schwefel_2_21(base_unconstrained_objective_func):
search_space_tuple = (-100, 100)
func_formula_str = r'f(x) = \max_{i=1}^n\left\{|x_i|\right\}'
def func(self, x : np.ndarray) -> float:
r'''
目标函数为
f(x) = \max_{i=1}^n\left\{|x_i|\right\}
'''
super(Schwefel_2_21, self).func(x)
return np.max(np.abs(x))
class Schwefel_2_22(base_unconstrained_objective_func):
search_space_tuple = (-10, 10)
func_formula_str = r'f(x) = \sum_{i = 0}^n |x_i| + \prod_{i = 0}^n |x_i|'
def func(self, x : np.ndarray) -> float:
r'''
目标函数为
f(x) = \sum_{i = 0}^n |x_i| + \prod_{i = 0}^n |x_i|
'''
super(Schwefel_2_22, self).func(x)
abs_x = np.abs(x)
return np.sum(abs_x) + np.prod(abs_x)
class Rosenbrock(base_unconstrained_objective_func):
search_space_tuple = (-2.048, 2.048)
func_formula_str = r'f(x) = \sum_{i = 1}^{\frac{n}{2}}\left(100\left(x_{2i} - x_{2i - 1}^2\right)^2 + \left(1 - x_{2i - 1}\right)^2\right)'
def __init__(self, n : int = 2):
assert not n & 1, '该目标函数变量个数必须为偶数'
super(Rosenbrock, self).__init__(n)
self.index = np.arange(0, self.x_dim, 2)
self.index_plus_1 = self.index + 1
self.best_x = np.ones(self.x_dim, dtype = float)
self.best_f_x = 0
def func(self, x : np.ndarray) -> float:
r'''
目标函数为
f(x) = \sum_{i = 1}^{\frac{n}{2}}\left(100\left(x_{2i} - x_{2i - 1}^2\right)^2
+ \left(1 - x_{2i - 1}\right)^2\right)
'''
super(Rosenbrock, self).func(x)
vector_2i_1 = x[self.index]
vector_2i = x[self.index_plus_1]
return np.sum(100 * (vector_2i - vector_2i_1 ** 2) ** 2 + (1 - vector_2i_1) ** 2)
''' 多峰函数 '''
class Rastrigin(base_unconstrained_objective_func):
search_space_tuple = (-5.12, 5.12)
func_formula_str = r'f(x) = \sum_{i=1}^{n}\left(x_{i}^{2}-10 \cos \left(2 \pi x_{i}\right)+10\right)'
def func(self, x : np.ndarray) -> float:
r'''
目标函数为
f(x) = \sum_{i=1}^{n}\left(x_{i}^{2}-10 \cos \left(2 \pi x_{i}\right)+10\right)
'''
super(Rastrigin, self).func(x)
return np.sum(x ** 2 - 10 * np.cos(2 * pi * x) + 10)
class Schwefel_2_26(base_unconstrained_objective_func):
search_space_tuple = (-500, 500)
func_formula_str = r'f(x) = 418.9829 n+\sum_{i=1}^{n} x_{i} \sin \left(\sqrt{\left|x_{i}\right|}\right)'
def __init__(self, n : int = 2):
super(Schwefel_2_26, self).__init__(n)
self.constant = 418.9829 * n
self.best_x = np.zeros(self.x_dim) + -420.9687
def func(self, x : np.ndarray) -> float:
r'''
目标函数为
f(x) = 418.9829 n+\sum_{i=1}^{n} x_{i} \sin \left(\sqrt{\left|x_{i}\right|}\right)
'''
super(Schwefel_2_26, self).func(x)
return self.constant + np.sum(x * np.sin(np.sqrt(np.abs(x))))
class Griewank(base_unconstrained_objective_func):
search_space_tuple = (-600, 600)
func_formula_str = r'f(x) = \frac{1}{4000} \sum_{i=1}^{n} x_{i}^{2}-\prod_{i=1}^{n} \cos \left(\frac{x_{i}}{\sqrt{i}}\right)+1'
def __init__(self, n : int = 2):
super(Griewank, self).__init__(n)
i = np.arange(1, self.x_dim + 1)
self.sqrt_i = np.sqrt(i)
def func(self, x : np.ndarray) -> float:
r'''
目标函数为
f(x) = \frac{1}{4000} \sum_{i=1}^{n} x_{i}^{2}-\prod_{i=1}^{n} \cos \left(\frac{x_{i}}{\sqrt{i}}\right)+1
'''
super(Griewank, self).func(x)
return (np.sum(x ** 2) / 4000) - np.prod(np.cos(x / self.sqrt_i)) + 1
class Ackley(base_unconstrained_objective_func):
r'''
目标函数为
f(x) = -20\exp\left(-0.2\sqrt{\sum_{i=1}^n\frac{x_i^2}{n}}\right) - \exp\left(\sum_{i = 1}^n\frac{\cos 2\pi x_i}{n}\right) + 20 + e
'''
search_space_tuple = (-32, 32)
func_formula_str = r'f(x) = -20\exp\left(-0.2\sqrt{\sum_{i=1}^n\frac{x_i^2}{n}}\right) - \exp\left(\sum_{i = 1}^n\frac{\cos 2\pi x_i}{n}\right) + 20 + e'
def func(self, x : np.ndarray) -> float:
r'''
目标函数为
f(x) = -20\exp\left(-0.2\sqrt{\sum_{i=1}^n\frac{x_i^2}{n}}\right) - \exp\left(\sum_{i = 1}^n\frac{\cos 2\pi x_i}{n}\right) + 20 + e
'''
super(Ackley, self).func(x)
return -20 * np.exp(-0.2 * np.sqrt(np.sum(x ** 2) / self.x_dim)) - np.exp(np.sum(np.cos(2 * pi * x)) / self.x_dim) + 20 + e
class Foxholes(base_unconstrained_objective_func):
search_space_tuple = (-65.56, 65.56)
func_formula_str = r'f(x) = \left[\frac{1}{500} + \sum_{j = 1}^{25}\frac{1}{j + \sum_{i = 1}^2(x_i-a_{ij})^6}\right]^{-1}'
def __init__(self, n : int = 2):
super(Foxholes, self).__init__(2)
self.a = np.array([
[-32, -16, 0, 16, 32] * 5,
[-32] * 5 + [-16] * 5 + [0] * 5 + [16] * 5 + [32] * 5
])
self.best_x = np.zeros(2) - 32
self.best_f_x = self.func(self.best_x)
def func(self, x : np.ndarray) -> float:
super(Foxholes, self).func(x)
j = np.arange(1, 26)
x_a = x - self.a.T
sum_x_a_pow_6 = np.sum(x_a ** 6, axis = 1)
sum_part_out = | np.sum(1 / (j + sum_x_a_pow_6)) | numpy.sum |
import argparse
from preprocess import preprocess
import os
from pathlib import Path
import wave
import numpy as np
import unicodedata
import random
from tqdm import tqdm
import re
import yaml
import sys
import librosa
## Fairseq 스타일로 변환하기
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"--root", default='/code/gitRepo/data/aihub/ksponspeech', metavar="DIR",
help="root directory containing flac files to index"
)
parser.add_argument(
"--info", default=None, metavar="DIR",
help="전처리 추가적으로 수행한 것."
)
parser.add_argument(
"--do_info", action="store_true",
help="전처리 추가적으로 수행할지 여부 확인"
)
parser.add_argument(
"--do_remove", action="store_true",
help="한글 음소가 아닌 숫자, 영어가 포함되어 있는 모든 단어를 삭제할지 여부 확인"
)
parser.add_argument(
"--token_limit", default=sys.maxsize, type=int,
help="최대 글자수 체크"
)
parser.add_argument(
"--dest", default='manifest_temp', type=str, metavar="DIR", help="output directory"
)
parser.add_argument(
"--ext", default="pcm", type=str, metavar="EXT", help="extension to look for"
)
parser.add_argument('--preprocess_mode', type=str,
default='phonetic',
help='Ex) (70%)/(칠 십 퍼센트) 확률이라니 (뭐 뭔)/(모 몬) 소리야 진짜 (100%)/(백 프로)가 왜 안돼?'
'phonetic: 칠 십 퍼센트 확률이라니 모 몬 소리야 진짜 백 프로가 왜 안돼?'
'spelling: 70% 확률이라니 뭐 뭔 소리야 진짜 100%가 왜 안돼?')
parser.add_argument('--output_unit', type=str,
default='grapheme',
help='character or subword or grapheme')
parser.add_argument('--additional_output_unit', type=str,
default=None,
help='character or subword or grapheme')
parser.add_argument("--seed", default=42, type=int, metavar="N", help="random seed")
parser.add_argument(
"--time",
default=None,
type=str,
metavar="MIN",
help="set if you want make split manifest",
)
parser.add_argument('--script_path', type=str,
default="/code/gitRepo/data/aihub/ksponspeech/KsponSpeech_scripts",
help='AIHUB에서 제공해 주는 스크립트 폴더')
parser.add_argument(
"--del_silence", action="store_true",
help="음성이 없는 곳을 삭제하는 건 어때?"
)
return parser
def find_index(durations, limit):
for idx in range(len(durations)):
if sum(durations[:idx]) > limit:
return idx
return len(durations)
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
def load_yaml(yaml_path):
# Read YAML file
with open(yaml_path, 'r') as stream:
data_loaded = yaml.load(stream, Loader=yaml.FullLoader)
return data_loaded
def load_info(info_path):
if not os.path.isdir(info_path):
return {}
info_files = [filename for filename in os.listdir(info_path) if '.yaml' in filename]
info_data = {}
for filename in info_files:
file_path = os.path.join(info_path, filename)
temp_data = load_yaml(file_path)
info_data.update(temp_data)
return info_data
def save_converted_info(args, name, converted_info):
if len(converted_info) == 0:
return
yaml_dict = {k: v for k, v in sorted(converted_info.items(), key=lambda item: (len(item[0]), item[0]))}
with open(os.path.join(args.dest, '{}.yaml'.format(name)), 'w', encoding="utf-8") as write_f:
yaml.dump(yaml_dict, write_f, allow_unicode=True, default_style=None, default_flow_style=False)
def save_wrong_script(args, name, transcripts, fileinfo, raw_sentences, new_sentences):
## 틀린 것 저장하기
## 알파벳 추가
reg = re.compile(r'[A-Z]')
yaml_dict = {}
for grapheme_transcript, fileitem, raw_sentence, new_sentence in zip(transcripts, fileinfo, raw_sentences,
new_sentences):
graphemes = grapheme_transcript.split()
file_num = Path(fileitem.split()[0]).stem.split("_")[1]
assert len(file_num) == 6
for grapheme in graphemes:
if grapheme.isdigit() or reg.match(grapheme):
yaml_dict[file_num] = str(raw_sentence.replace('\n', ''))
if len(yaml_dict) == 0:
return
## Sorting
yaml_dict = {k: v for k, v in sorted(yaml_dict.items(), key=lambda item: (len(item[0]), item[0]))}
with open(os.path.join(args.dest, '{}.yaml'.format(name)), 'w', encoding="utf-8") as write_f:
yaml.dump(yaml_dict, write_f, allow_unicode=True, default_style=None, default_flow_style=False)
def save_dict(args, transcripts, dict_name='dict.ltr.txt', alphabet_name='alphabet.txt'):
vocab_list = list()
vocab_freq = list()
for grapheme_transcript in transcripts:
graphemes = grapheme_transcript.split()
for grapheme in graphemes:
if grapheme not in vocab_list:
vocab_list.append(grapheme)
vocab_freq.append(1)
else:
vocab_freq[vocab_list.index(grapheme)] += 1
## write ltr
vocab_freq, vocab_list = zip(*sorted(zip(vocab_freq, vocab_list), reverse=True))
with open(os.path.join(args.dest, dict_name), 'w') as write_f:
for idx, (grpm, freq) in enumerate(zip(vocab_list, vocab_freq)):
print("{} {}".format(grpm, freq), file=write_f)
## Write Vocab files
with open(os.path.join(args.dest, alphabet_name), 'w', encoding='UTF8') as write_f:
print("# Each line in this file represents the Unicode codepoint (UTF-8 encoded)", file=write_f)
print("# associated with a numeric label.", file=write_f)
print("# A line that starts with # is a comment. You can escape it with \# if you wish", file=write_f)
print("# to use '#' as a label.", file=write_f)
for token in vocab_list:
print(token, file=write_f)
## final token must be \n
print('', file=write_f)
print("# The last (non-comment) line needs to end with a newline.", file=write_f, end='')
return
def save_lexicon(args, texts, lexicon_name='lexicon.lst'):
vocab_list = {}
for text in texts:
for word in text.split():
new_word = word + "|"
vocab_list[word] = " ".join(new_word)
## Write Vocab files
## Sorting
vocab_list = {k: v for k, v in sorted(vocab_list.items(), key=lambda item: item[0])}
with open(os.path.join(args.dest, lexicon_name), 'w', encoding='UTF8') as write_f:
for k, v in vocab_list.items():
print("{}\t{}".format(k,v), file=write_f)
return
def save_files(args, file_name, dir_path, fileinfo, texts, transcripts):
with open(os.path.join(args.dest, file_name + ".tsv"), 'w') as tsv_out, open(
os.path.join(args.dest, file_name + ".ltr"), "w"
) as ltr_out, open(
os.path.join(args.dest, file_name + ".wrd"), "w"
) as wrd_out:
print(dir_path, file=tsv_out)
for tsv_item, wrd_item, ltr_item in zip(fileinfo, texts, transcripts):
print(tsv_item, file=tsv_out)
print(wrd_item, file=wrd_out)
print(ltr_item + " |", file=ltr_out)
print("save files [{}]".format(file_name))
return
def pcm2wav(pcm_file, channels=1, bit_depth=16, sampling_rate=16000):
wav_file = str(Path(pcm_file).with_suffix('.wav'))
# Check if the options are valid.
if bit_depth % 8 != 0:
raise ValueError("bit_depth " + str(bit_depth) + " must be a multiple of 8.")
# Read the .pcm file as a binary file and store the data to pcm_data
with open(pcm_file, 'rb') as opened_pcm_file:
pcm_data = opened_pcm_file.read()
with wave.open(wav_file, 'wb') as obj2write:
obj2write.setnchannels(channels)
obj2write.setsampwidth(bit_depth // 8)
obj2write.setframerate(sampling_rate)
obj2write.writeframes(pcm_data)
return wav_file
def load_script(args, script_path, info_data, token_limit=sys.maxsize):
assert os.path.isfile(script_path)
fileinfo = list()
durations = list()
texts = list()
audio_nums = list()
transcripts = list()
additional_texts = list()
additional_transcripts = list()
raw_sentences = list()
new_sentences = list()
converted_info = {}
reg = re.compile(r'.*[a-zA-Z0-9]')
limit_count = 0
remove_count = 0
with open(script_path, "r") as f:
for line in tqdm(f):
convert_flag = False
items = line.split(" :: ")
file_path = os.path.join(args.root, items[0])
file_path = os.path.realpath(file_path)
audio_num = str(Path(file_path).stem.split("_")[1])
raw_sentence = items[1]
if len(audio_num) ==6 and audio_num in info_data:
raw_sentence = info_data[audio_num]
convert_flag=True
## 확장자 확인
if args.ext == 'pcm':
try:
wav = | np.memmap(file_path, dtype='h', mode='r') | numpy.memmap |
"""Contains classes to represent non-equilibrium ionization simulations."""
__all__ = ["NEI", "NEIError", "SimulationResults"]
from typing import Callable, Dict, List, Optional, Union
import astropy.units as u
import numpy as np
from scipy import interpolate, optimize
from plasmapy_nei.eigen import EigenData, eigen_data_dict
try:
from plasmapy.atomic import IonizationStates, atomic_number
except ImportError:
from plasmapy.particles import IonizationStates, atomic_number
import warnings
# TODO: Allow this to keep track of velocity and position too, and
# eventually to have density and temperature be able to be functions of
# position. (and more complicated expressions for density and
# temperature too)
# TODO: Expand Simulation docstring
# TODO: Include the methods in the original Visualize class which is a
# subclass of NEI in the NEI-modeling/NEI repo. These were deleted
# temporarily to make it possible to get the NEI class itself
# adapted into this package.
# TODO: In this file and test_nei.py, there are a few places with
# initial.ionic_fractions.keys(), where initial is an instance
# of IonizationStates. This workaround exists because I forgot
# to put in an `elements` attribute in IonizationStates, and
# should be corrected.
class NEIError(Exception):
"""For when there are errors in setting up or performing NEI simulations."""
pass
class SimulationResults:
"""
Results from a non-equilibrium ionization simulation.
Parameters
----------
initial: plasmapy.atomic.IonizationStates
The ``IonizationStates`` instance representing the ionization
states of different elements and plasma properties as the
initial conditions.
n_init: astropy.units.Quantity
The initial number density scaling factor.
T_e_init: astropy.units.Quantity
The initial electron temperature.
max_steps: int
The maximum number of time steps that the simulation can take
before stopping.
time_start: astropy.units.Quantity
The time at the start of the simulation.
"""
def __init__(
self,
initial: IonizationStates,
n_init: u.Quantity,
T_e_init: u.Quantity,
max_steps: int,
time_start: u.Quantity,
):
self._elements = list(initial.ionic_fractions.keys())
self._abundances = initial.abundances
self._max_steps = max_steps
self._nstates = {elem: atomic_number(elem) + 1 for elem in self.elements}
self._ionic_fractions = {
elem: np.full((max_steps + 1, self.nstates[elem]), np.nan, dtype=np.float64)
for elem in self.elements
}
self._number_densities = {
elem: np.full((max_steps + 1, self.nstates[elem]), np.nan, dtype=np.float64)
* u.cm ** -3
for elem in self.elements
}
self._n_elem = {
elem: np.full(max_steps + 1, np.nan) * u.cm ** -3 for elem in self.elements
}
self._n_e = np.full(max_steps + 1, np.nan) * u.cm ** -3
self._T_e = np.full(max_steps + 1, np.nan) * u.K
self._time = np.full(max_steps + 1, np.nan) * u.s
self._index = 0
self._assign(
new_time=time_start,
new_ionfracs=initial.ionic_fractions,
new_n=n_init,
new_T_e=T_e_init,
)
def _assign(
self,
new_time: u.Quantity,
new_ionfracs: Dict[str, np.ndarray],
new_n: u.Quantity,
new_T_e: u.Quantity,
):
"""
Store results from a time step of a non-equilibrium ionization
time advance in the `~plasmapy_nei.classes.NEI` class.
Parameters
----------
new_time
The time associated with this time step.
new_ionfracs: dict
The new ionization fractions for this time step. The keys
of this `dict` are the atomic symbols of the elements being
tracked, and with the corresponding value being an
``numpy.ndarray`` representing the ionic fractions. Each
element's array must have a length of the atomic number plus
one, and be normalized to one with all values between zero
and one.
new_n
The new number density scaling factor for this time step.
The number densities of each ionic species will be the
product of this scaling factor, the element's abundance, and
the ionic fraction given in ``new_ionfracs``.
new_T_e
The new electron temperature.
"""
try:
index = self._index
elements = self.elements
self._time[index] = new_time
self._T_e[index] = new_T_e
for elem in elements:
self._ionic_fractions[elem][index, :] = new_ionfracs[elem][:]
# Calculate elemental and ionic number densities
n_elem = {elem: new_n * self.abundances[elem] for elem in elements}
number_densities = {
elem: n_elem[elem] * new_ionfracs[elem] for elem in elements
}
# Calculate the electron number density
n_e = 0.0 * u.cm ** -3
for elem in elements:
integer_charges = np.linspace(
0, self.nstates[elem] - 1, self.nstates[elem]
)
n_e += np.sum(number_densities[elem] * integer_charges)
# Assign densities
self._n_e[index] = n_e
for elem in elements:
self._n_elem[elem][index] = n_elem[elem]
self._number_densities[elem][index, :] = number_densities[elem]
except Exception as exc:
raise NEIError(
f"Unable to assign parameters to Simulation instance "
f"for index {index} at time = {new_time}. The "
f"parameters are new_n = {new_n}, new_T_e = {new_T_e}, "
f"and new_ionic_fractions = {new_ionfracs}."
) from exc
finally:
self._index += 1
def _cleanup(self):
"""
Clean up this class after the simulation is complete.
This method removes the excess elements from each array that
did not end up getting used for a time step in the simulation
and sets the ``last_step`` attribute.
"""
nsteps = self._index
self._n_e = self._n_e[0:nsteps]
self._T_e = self._T_e[0:nsteps]
self._time = self._time[0:nsteps]
for element in self.elements:
self._ionic_fractions[element] = self._ionic_fractions[element][0:nsteps, :]
self._number_densities[element] = self._number_densities[element][
0:nsteps, :
]
self._last_step = nsteps - 1
self._index = None
@property
def max_steps(self) -> int:
"""
The maximum number of time steps allowed for this simulation.
"""
return self._max_steps
@property
def last_step(self) -> int:
"""The time index of the last step."""
return self._last_step
@property
def nstates(self) -> Dict[str, int]:
"""
Return the dictionary containing atomic symbols as keys and the
number of ionic species for the corresponding element as the
value.
"""
return self._nstates
@property
def elements(self) -> List[str]:
"""The elements modeled by this simulation."""
return self._elements
@property
def abundances(self) -> Dict[str, float]:
"""
The relative elemental abundances of the elements modeled in
this simulation.
The keys are the atomic symbols and the values are a `float`
representing that element's elemental abundance.
"""
return self._abundances
@property
def ionic_fractions(self) -> Dict[str, np.ndarray]:
"""
Return the ionic fractions over the course of the simulation.
The keys of this dictionary are atomic symbols. The values are
2D arrays where the first index refers to the time step and the
second index refers to the integer charge.
"""
return self._ionic_fractions
@property
def number_densities(self) -> Dict[str, u.Quantity]:
"""
Return the number densities over the course of the simulation.
The keys of ``number_densities`` are atomic symbols. The values
are 2D arrays with units of number density where the first index
refers to the time step and the second index is the integer
charge.
"""
return self._number_densities
@property
def n_elem(self) -> Dict[str, u.Quantity]:
"""
The number densities of each element over the course of the
simulation.
The keys of ``n_elem`` are atomic symbols. The values are 1D
arrays with units of number density where the index refers to
the time step.
"""
return self._n_elem
@property
def n_e(self) -> u.Quantity:
"""
The electron number density over the course of the simulation in
units of number density.
The index of this array corresponds to the time step.
"""
return self._n_e
@property
def T_e(self) -> u.Quantity:
"""
The electron temperature over the course of the simulation in
kelvin.
The index of this array corresponds to the time step.
"""
return self._T_e
@property
def time(self) -> u.Quantity:
"""
The time for each time step over the course of the simulation
in units of seconds.
"""
return self._time
class NEI:
r"""
Perform and analyze a non-equilibrium ionization simulation.
Parameters
----------
inputs
T_e: astropy.units.Quantity or callable
The electron temperature, which may be a constant, an array of
temperatures corresponding to the times in `time_input`, or a
function that yields the temperature as a function of time.
n: astropy.units.Quantity or callable
The number density multiplicative factor. The number density of
each element will be ``n`` times the abundance given in
``abundances``. For example, if ``abundance['H'] = 1``, then this
will correspond to the number density of hydrogen (including
neutral hydrogen and protons). This factor may be a constant,
an array of number densities over time, or a function that
yields a number density as a function of time.
time_input: astropy.units.Quantity, optional
An array containing the times associated with ``n`` and ``T_e`` in
units of time.
time_start: astropy.units.Quantity, optional
The start time for the simulation. If density and/or
temperature are given by arrays, then this argument must be
greater than ``time_input[0]``. If this argument is not supplied,
then ``time_start`` defaults to ``time_input[0]`` (if given) and
zero seconds otherwise.
time_max: astropy.units.Quantity
The maximum time for the simulation. If density and/or
temperature are given by arrays, then this argument must be less
than ``time_input[-1]``.
max_steps: `int`
The maximum number of time steps to be taken during a
simulation.
dt: astropy.units.Quantity
The time step. If ``adapt_dt`` is `False`, then ``dt`` is the
time step for the whole simulation.
dt_max: astropy.units.Quantity
The maximum time step to be used with an adaptive time step.
dt_min: astropy.units.Quantity
The minimum time step to be used with an adaptive time step.
adapt_dt: `bool`
If `True`, change the time step based on the characteristic
ionization and recombination time scales and change in
temperature. Not yet implemented.
safety_factor: `float` or `int`
A multiplicative factor to multiply by the time step when
``adapt_dt`` is `True`. Lower values improve accuracy, whereas
higher values reduce computational time. Not yet implemented.
tol: float
The absolute tolerance to be used in comparing ionic fractions.
verbose: bool, optional
A flag stating whether or not to print out information for every
time step. Setting ``verbose`` to `True` is useful for testing.
Defaults to `False`.
abundances: dict
Examples
--------
>>> import numpy as np
>>> import astropy.units as u
>>> inputs = {'H': [0.9, 0.1], 'He': [0.9, 0.099, 0.001]}
>>> abund = {'H': 1, 'He': 0.085}
>>> n = u.Quantity([1e9, 1e8], u.cm**-3)
>>> T_e = np.array([10000, 40000]) * u.K
>>> time = np.array([0, 300]) * u.s
>>> dt = 0.25 * u.s
The initial conditions can be accessed using the initial attribute.
>>> sim = NEI(inputs=inputs, abundances=abund, n=n, T_e=T_e, time_input=time, adapt_dt=False, dt=dt)
After having inputted all of the necessary information, we can run
the simulation.
>>> results = sim.simulate()
The initial results are stored in the ``initial`` attribute.
>>> sim.initial.ionic_fractions['H']
array([0.9, 0.1])
The final results can be access with the ``final`` attribute.
>>> sim.final.ionic_fractions['H']
array([0.16665179, 0.83334821])
>>> sim.final.ionic_fractions['He']
array([0.88685261, 0.11218358, 0.00096381])
>>> sim.final.T_e
<Quantity 40000. K>
Both ``initial`` and ``final`` are instances of the ``IonizationStates``
class.
Notes
-----
The ionization and recombination rates are from Chianti version
8.7. These rates include radiative and dielectronic recombination.
Photoionization is not included.
"""
def __init__(
self,
inputs,
abundances: Union[Dict, str] = None,
T_e: Union[Callable, u.Quantity] = None,
n: Union[Callable, u.Quantity] = None,
time_input: u.Quantity = None,
time_start: u.Quantity = None,
time_max: u.Quantity = None,
max_steps: Union[int, np.integer] = 10000,
tol: Union[int, float] = 1e-15,
dt: u.Quantity = None,
dt_max: u.Quantity = np.inf * u.s,
dt_min: u.Quantity = 0 * u.s,
adapt_dt: bool = None,
safety_factor: Union[int, float] = 1,
verbose: bool = False,
):
try:
self.time_input = time_input
self.time_start = time_start
self.time_max = time_max
self.T_e_input = T_e
self.n_input = n
self.max_steps = max_steps
self.dt_input = dt
if self.dt_input is None:
self._dt = self.time_max / max_steps
else:
self._dt = self.dt_input
self.dt_min = dt_min
self.dt_max = dt_max
self.adapt_dt = adapt_dt
self.safety_factor = safety_factor
self.verbose = verbose
T_e_init = self.electron_temperature(self.time_start)
n_init = self.hydrogen_number_density(self.time_start)
self.initial = IonizationStates(
inputs=inputs,
abundances=abundances,
T_e=T_e_init,
n=n_init,
tol=tol,
)
self.tol = tol
# TODO: Update IonizationStates in PlasmaPy to have elements attribute
self.elements = list(self.initial.ionic_fractions.keys())
if "H" not in self.elements:
raise NEIError("Must have H in elements")
self.abundances = self.initial.abundances
self._eigen_data_dict = eigen_data_dict
if self.T_e_input is not None and not isinstance(inputs, dict):
for element in self.initial.ionic_fractions.keys():
self.initial.ionic_fractions[element] = self.eigen_data_dict[
element
].equilibrium_state(T_e_init.value)
self._temperature_grid = self._eigen_data_dict[
self.elements[0]
].temperature_grid
self._get_temperature_index = self._eigen_data_dict[
self.elements[0]
]._get_temperature_index
self._results = None
except Exception as e:
raise NEIError(
f"Unable to create NEI object for:\n"
f" inputs = {inputs}\n"
f" abundances = {abundances}\n"
f" T_e = {T_e}\n"
f" n = {n}\n"
f" time_input = {time_input}\n"
f" time_start = {time_start}\n"
f" time_max = {time_max}\n"
f" max_steps = {max_steps}\n"
) from e
def equil_ionic_fractions(
self,
T_e: u.Quantity = None,
time: u.Quantity = None,
) -> Dict[str, np.ndarray]:
"""
Return the equilibrium ionic fractions for a temperature or at
a given time.
Parameters
----------
T_e: astropy.units.Quantity, optional
The electron temperature in units that can be converted to
kelvin.
time: astropy.units.Quantity, optional
The time in units that can be converted to seconds.
Returns
-------
equil_ionfracs: `dict`
The equilibrium ionic fractions for the elements contained
within this class
Notes
-----
Only one of ``T_e`` and ``time`` may be included as an argument.
If neither ``T_e`` or ``time`` is provided and the temperature
for the simulation is given by a constant, the this method will
assume that ``T_e`` is the temperature of the simulation.
"""
if T_e is not None and time is not None:
raise NEIError("Only one of T_e and time may be an argument.")
if T_e is None and time is None:
if self.T_e_input.isscalar:
T_e = self.T_e_input
else:
raise NEIError
try:
T_e = T_e.to(u.K) if T_e is not None else None
time = time.to(u.s) if time is not None else None
except Exception as exc:
raise NEIError("Invalid input to equilibrium_ionic_fractions.") from exc
if time is not None:
T_e = self.electron_temperature(time)
if not T_e.isscalar:
raise NEIError("Need scalar input for equil_ionic_fractions.")
equil_ionfracs = {}
for element in self.elements:
equil_ionfracs[element] = self.eigen_data_dict[element].equilibrium_state(
T_e.value
)
return equil_ionfracs
@property
def elements(self) -> List[str]:
"""A `list` of the elements."""
return self._elements
@elements.setter
def elements(self, elements):
# TODO: Update this
self._elements = elements
@property
def abundances(self) -> Dict[str, Union[float, int]]:
"""Return the abundances."""
return self._abundances
@abundances.setter
def abundances(self, abund: Dict[Union[str, int], Union[float, int]]):
# TODO: Update initial, etc. when abundances is updated. The
# checks within IonizationStates will also be checks for
# TODO: Update initial and other attributes when abundances is
# updated.
self._abundances = abund
@property
def tol(self) -> float:
"""
The tolerance for comparisons between different ionization
states.
"""
return self._tol
@tol.setter
def tol(self, value: Union[float, int]):
try:
value = float(value)
except Exception as exc:
raise TypeError(f"Invalid tolerance: {value}") from exc
if not 0 <= value < 1:
raise ValueError("Need 0 <= tol < 1.")
self._tol = value
@property
def time_input(self) -> u.s:
return self._time_input
@time_input.setter
def time_input(self, times: u.s):
if times is None:
self._time_input = None
elif isinstance(times, u.Quantity):
if times.isscalar:
raise ValueError("time_input must be an array.")
try:
times = times.to(u.s)
except u.UnitConversionError:
raise u.UnitsError("time_input must have units of seconds.") from None
if not np.all(times[1:] > times[:-1]):
raise ValueError("time_input must monotonically increase.")
self._time_input = times
else:
raise TypeError("Invalid time_input.")
@property
def time_start(self) -> u.s:
"""The start time of the simulation."""
return self._time_start
@time_start.setter
def time_start(self, time: u.s):
if time is None:
self._time_start = 0.0 * u.s
elif isinstance(time, u.Quantity):
if not time.isscalar:
raise ValueError("time_start must be a scalar")
try:
time = time.to(u.s)
except u.UnitConversionError:
raise u.UnitsError("time_start must have units of seconds") from None
if (
hasattr(self, "_time_max")
and self._time_max is not None
and self._time_max <= time
):
raise ValueError("Need time_start < time_max.")
if self.time_input is not None and self.time_input.min() > time:
raise ValueError("time_start must be less than min(time_input)")
self._time_start = time
else:
raise TypeError("Invalid time_start.") from None
@property
def time_max(self) -> u.s:
"""The maximum time allowed for the simulation."""
return self._time_max
@time_max.setter
def time_max(self, time: u.s):
if time is None:
self._time_max = (
self.time_input[-1] if self.time_input is not None else np.inf * u.s
)
elif isinstance(time, u.Quantity):
if not time.isscalar:
raise ValueError("time_max must be a scalar")
try:
time = time.to(u.s)
except u.UnitConversionError:
raise u.UnitsError("time_max must have units of seconds") from None
if (
hasattr(self, "_time_start")
and self._time_start is not None
and self._time_start >= time
):
raise ValueError("time_max must be greater than time_start")
self._time_max = time
else:
raise TypeError("Invalid time_max.") from None
@property
def adapt_dt(self) -> Optional[bool]:
"""
Return `True` if the time step is set to be adaptive, `False`
if the time step is set to not be adapted, and `None` if this
attribute was not set.
"""
return self._adapt_dt
@adapt_dt.setter
def adapt_dt(self, choice: Optional[bool]):
if choice is None:
self._adapt_dt = True if self.dt_input is None else False
elif choice is True or choice is False:
self._adapt_dt = choice
else:
raise TypeError("Invalid value for adapt_dt")
@property
def dt_input(self) -> u.s:
"""Return the inputted time step."""
return self._dt_input
@dt_input.setter
def dt_input(self, dt: u.s):
if dt is None:
self._dt_input = None
elif isinstance(dt, u.Quantity):
try:
dt = dt.to(u.s)
if dt > 0 * u.s:
self._dt_input = dt
except (AttributeError, u.UnitConversionError):
raise NEIError("Invalid dt.")
@property
def dt_min(self) -> u.s:
"""The minimum time step."""
return self._dt_min
@dt_min.setter
def dt_min(self, value: u.s):
if not isinstance(value, u.Quantity):
raise TypeError("dt_min must be a Quantity.")
try:
value = value.to(u.s)
except u.UnitConversionError as exc:
raise u.UnitConversionError("Invalid units for dt_min.") from exc
if (
hasattr(self, "_dt_input")
and self.dt_input is not None
and self.dt_input < value
):
raise ValueError("dt_min cannot exceed the inputted time step.")
if hasattr(self, "_dt_max") and self.dt_max < value:
raise ValueError("dt_min cannot exceed dt_max.")
self._dt_min = value
@property
def dt_max(self) -> u.s:
return self._dt_max
@dt_max.setter
def dt_max(self, value: u.s):
if not isinstance(value, u.Quantity):
raise TypeError("dt_max must be a Quantity.")
try:
value = value.to(u.s)
except u.UnitConversionError as exc:
raise u.UnitConversionError("Invalid units for dt_max.") from exc
if (
hasattr(self, "_dt_input")
and self.dt_input is not None
and self.dt_input > value
):
raise ValueError("dt_max cannot be less the inputted time step.")
if hasattr(self, "_dt_min") and self.dt_min > value:
raise ValueError("dt_min cannot exceed dt_max.")
self._dt_max = value
@property
def safety_factor(self):
"""
The multiplicative factor that the time step is to be multiplied
by when using an adaptive time step.
"""
return self._safety_factor
@safety_factor.setter
def safety_factor(self, value):
if not isinstance(value, (float, np.float64, np.integer, int)):
raise TypeError
if 1e-3 <= value <= 1e3:
self._safety_factor = value
else:
raise NEIError("Invalid safety factor.")
@property
def verbose(self) -> bool:
"""
Return `True` if verbose output during a simulation is
requested, and `False` otherwise.
"""
return self._verbose
@verbose.setter
def verbose(self, choice: bool):
if choice is True or choice is False:
self._verbose = choice
else:
raise TypeError("Invalid choice for verbose.")
@u.quantity_input
def in_time_interval(self, time: u.s, buffer: u.s = 1e-9 * u.s):
"""
Return `True` if the ``time`` is between ``time_start - buffer``
and ``time_max + buffer`` , and `False` otherwise.
Raises
------
TypeError
If ``time`` or ``buffer`` is not a ``astropy.units.Quantity``
astropy.units.UnitsError
If ``time`` or ``buffer`` is not in units of time.
"""
return self.time_start - buffer <= time <= self.time_max + buffer
@property
def max_steps(self) -> int:
"""
The maximum number of steps that a simulation will be allowed
to take.
"""
return self._max_steps
@max_steps.setter
def max_steps(self, n: int):
if isinstance(n, (int, np.integer)) and 0 < n <= 1000000:
self._max_steps = n
else:
raise TypeError(
"max_steps must be an integer with 0 < max_steps <= 1000000"
)
@property
def T_e_input(self) -> Union[u.Quantity, Callable]:
"""
The temperature input.
"""
return self._T_e_input
@T_e_input.setter
def T_e_input(self, T_e: Optional[Union[Callable, u.Quantity]]):
"""Set the input electron temperature."""
if isinstance(T_e, u.Quantity):
try:
T_e = T_e.to(u.K, equivalencies=u.temperature_energy())
except u.UnitConversionError:
raise u.UnitsError("Invalid electron temperature.") from None
if T_e.isscalar:
self._T_e_input = T_e
self._electron_temperature = lambda time: T_e
else:
if self._time_input is None:
raise TypeError("Must define time_input prior to T_e for an array.")
time_input = self.time_input
if len(time_input) != len(T_e):
raise ValueError("len(T_e) not equal to len(time_input).")
f = interpolate.interp1d(
time_input.value,
T_e.value,
bounds_error=False,
fill_value="extrapolate",
)
self._electron_temperature = lambda time: f(time.value) * u.K
self._T_e_input = T_e
elif callable(T_e):
if self.time_start is not None:
try:
T_e(self.time_start).to(u.K)
T_e(self.time_max).to(u.K)
except Exception:
raise ValueError("Invalid electron temperature function.")
self._T_e_input = T_e
self._electron_temperature = T_e
elif T_e is None:
self._electron_temperature = lambda: None
else:
raise TypeError("Invalid T_e")
def electron_temperature(self, time: u.Quantity) -> u.Quantity:
try:
if not self.in_time_interval(time):
warnings.warn(
f"{time} is not in the simulation time interval:"
f"[{self.time_start}, {self.time_max}]. "
f"May be extrapolating temperature."
)
T_e = self._electron_temperature(time.to(u.s))
if np.isnan(T_e) or np.isinf(T_e) or T_e < 0 * u.K:
raise NEIError(f"T_e = {T_e} at time = {time}.")
return T_e
except Exception as exc:
raise NEIError(
f"Unable to calculate a valid electron temperature " f"for time {time}"
) from exc
@property
def n_input(self) -> u.Quantity:
"""The number density factor input."""
if "H" in self.elements:
return self._n_input
else:
raise ValueError
@n_input.setter
def n_input(self, n: u.Quantity):
if isinstance(n, u.Quantity):
try:
n = n.to(u.cm ** -3)
except u.UnitConversionError:
raise u.UnitsError("Invalid hydrogen density.")
if n.isscalar:
self._n_input = n
self.hydrogen_number_density = lambda time: n
else:
if self._time_input is None:
raise TypeError("Must define time_input prior to n for an array.")
time_input = self.time_input
if len(time_input) != len(n):
raise ValueError("len(n) is not equal to len(time_input).")
f = interpolate.interp1d(
time_input.value,
n.value,
bounds_error=False,
fill_value="extrapolate",
)
self._hydrogen_number_density = lambda time: f(time.value) * u.cm ** -3
self._n_input = n
elif callable(n):
if self.time_start is not None:
try:
n(self.time_start).to(u.cm ** -3)
n(self.time_max).to(u.cm ** -3)
except Exception:
raise ValueError("Invalid number density function.")
self._n_input = n
self._hydrogen_number_density = n
elif n is None:
self._hydrogen_number_density = lambda: None
else:
raise TypeError("Invalid n.")
def hydrogen_number_density(self, time: u.Quantity) -> u.Quantity:
try:
time = time.to(u.s)
except (AttributeError, u.UnitsError):
raise NEIError("Invalid time in hydrogen_density")
return self._hydrogen_number_density(time)
@property
def eigen_data_dict(self) -> Dict[str, EigenData]:
"""
Return a `dict` containing `~plasmapy_nei.eigen.EigenData` instances
for each element.
"""
return self._eigen_data_dict
@property
def initial(self) -> IonizationStates:
"""
Return the ionization states of the plasma at the beginning of
the simulation.
"""
return self._initial
@initial.setter
def initial(self, initial_states: IonizationStates):
if isinstance(initial_states, IonizationStates):
self._initial = initial_states
self._elements = (
initial_states.ionic_fractions.keys()
) # TODO IonizationStates
elif initial_states is None:
self._ionstates = None
else:
raise TypeError("Expecting an IonizationStates instance.")
@property
def results(self) -> SimulationResults:
"""
Return the `~plasmapy_nei.nei.SimulationResults` class instance that
corresponds to the simulation results.
"""
if self._results is not None:
return self._results
else:
raise AttributeError("The simulation has not yet been performed.")
@property
def final(self) -> IonizationStates:
"""
Return the ionization states of the plasma at the end of the
simulation.
"""
try:
return self._final
except AttributeError:
raise NEIError("The simulation has not yet been performed.") from None
def _initialize_simulation(self):
self._results = SimulationResults(
initial=self.initial,
n_init=self.hydrogen_number_density(self.time_start),
T_e_init=self.electron_temperature(self.time_start),
max_steps=self.max_steps,
time_start=self.time_start,
)
self._old_time = self.time_start.to(u.s)
self._new_time = self.time_start.to(u.s)
def simulate(self) -> SimulationResults:
"""
Perform a non-equilibrium ionization simulation.
Returns
-------
results: `~plasmapy_nei.classes.Simulation`
The results from the simulation (which are also stored in
the ``results`` attribute of the `~plasmapy_nei.nei.NEI`
instance this method was called from.
"""
self._initialize_simulation()
for step in range(self.max_steps):
try:
self.set_timestep()
self.time_advance()
except StopIteration:
break
except Exception as exc:
raise NEIError(f"Unable to complete simulation.") from exc
self._finalize_simulation()
# Is there a way to use the inspect package or something similar
# to only return self.results if it is in an expression where
return self.results
def _finalize_simulation(self):
self._results._cleanup()
final_ionfracs = {
element: self.results.ionic_fractions[element][-1, :]
for element in self.elements
}
self._final = IonizationStates(
inputs=final_ionfracs,
abundances=self.abundances,
n=np.sum(self.results.number_densities["H"][-1, :]), # modify this later?,
T_e=self.results.T_e[-1],
tol=1e-6,
)
if not np.isclose(self.time_max / u.s, self.results.time[-1] / u.s):
warnings.warn(
f"The simulation ended at {self.results.time[-1]}, "
f"which is prior to time_max = {self.time_max}."
)
def _set_adaptive_timestep(self):
"""Adapt the time step."""
t = self._new_time if hasattr(self, "_new_time") else self.t_start
# We need to guess the timestep in order to narrow down what the
# timestep should be. If we are in the middle of a simulation,
# we can use the old timestep as a reasonable guess. If we are
# simulation, then we can either use the inputted timestep or
# estimate it from other inputs.
dt_guess = (
self._dt
if self._dt
else self._dt_input
if self._dt_input
else self.time_max / self.max_steps
)
# Make sure that dt_guess does not lead to a time that is out
# of the domain.
dt_guess = dt_guess if t + dt_guess <= self.time_max - t else self.time_max - t
# The temperature may start out exactly at the boundary of a
# bin, so we check what bin it is in just slightly after to
# figure out which temperature bin the plasma is entering.
T = self.electron_temperature(t + 1e-9 * dt_guess)
# Find the boundaries to the temperature bin.
index = self._get_temperature_index(T.to(u.K).value)
T_nearby = np.array(self._temperature_grid[index - 1 : index + 2]) * u.K
T_boundary = (T_nearby[0:-1] + T_nearby[1:]) / 2
# In order to use Brent's method, we must bound the root's
# location. Functions may change sharply or slowly, so we test
# different times that are logarithmically spaced to find the
# first one that is outside of the boundary.
dt_spread = (
np.geomspace(1e-9 * dt_guess.value, (self.time_max - t).value, num=100)
* u.s
)
time_spread = t + dt_spread
T_spread = [self.electron_temperature(time) for time in time_spread]
in_range = [T_boundary[0] <= temp <= T_boundary[1] for temp in T_spread]
# If all of the remaining temperatures are in the same bin, then
# the temperature will be roughly constant for the rest of the
# simulation. Take one final long time step, unless it exceeds
# dt_max.
if all(in_range):
new_dt = self.time_max - t
self._dt = new_dt if new_dt <= self.dt_max else self.dt_max
return
# Otherwise, we need to find the first index in the spread that
# corresponds to a temperature outside of the temperature bin
# for this time step.
first_false_index = in_range.index(False)
# We need to figure out if the temperature is dropping so that
# it crosses the low temperature boundary of the bin, or if it
# is rising so that it crosses the high temperature of the bin.
T_first_outside = self.electron_temperature(time_spread[first_false_index])
if T_first_outside >= T_boundary[1]:
boundary_index = 1
elif T_first_outside <= T_boundary[0]:
boundary_index = 0
# Select the values for the time step in the spread just before
# and after the temperature leaves the temperature bin as bounds
# for the root finding method.
dt_bounds = (dt_spread[first_false_index - 1 : first_false_index + 1]).value
# Define a function for the difference between the temperature
# and the temperature boundary as a function of the value of the
# time step.
T_val = lambda dtval: (
self.electron_temperature(t + dtval * u.s) - T_boundary[boundary_index]
).value
# Next we find the root. This method should succeed as long as
# the root is bracketed by dt_bounds. Because astropy.units is
# not fully compatible with SciPy, we temporarily drop units and
# then reattach them.
try:
new_dt = (
optimize.brentq(
T_val,
*dt_bounds,
xtol=1e-14,
maxiter=1000,
disp=True,
)
* u.s
)
except Exception as exc:
raise NEIError(f"Unable to find new dt at t = {t}") from exc
else:
if np.isnan(new_dt.value):
raise NEIError(f"new_dt = {new_dt}")
# Enforce that the time step is in the interval [dt_min, dt_max].
if new_dt < self.dt_min:
new_dt = self.dt_min
elif new_dt > self.dt_max:
new_dt = self.dt_max
# Store the time step as a private attribute so that it can be
# used in the time advance.
self._dt = new_dt.to(u.s)
def set_timestep(self, dt: u.Quantity = None):
"""
Set the time step for the next non-equilibrium ionization time
advance.
Parameters
----------
dt: astropy.units.Quantity, optional
The time step to be used for the next time advance.
Notes
-----
If ``dt`` is not `None`, then the time step will be set to ``dt``.
If ``dt`` is not set and the ``adapt_dt`` attribute of an
`~plasmapy_nei.nei.NEI` instance is `True`, then this method will
calculate the time step corresponding to how long it will be
until the temperature rises or drops into the next temperature
bin. If this time step is between ``dtmin`` and ``dtmax``, then
If ``dt`` is not set and the ``adapt_dt`` attribute is `False`,
then this method will set the time step as what was inputted to
the `~plasmapy_nei.nei.NEI` class upon instantiation in the
``dt`` argument or through the `~plasmapy_nei.nei.NEI` class's
``dt_input`` attribute.
Raises
------
~plasmapy_nei.nei.NEIError
If the time step cannot be set, for example if the ``dt``
argument is invalid or the time step cannot be adapted.
"""
if dt is not None:
# Allow the time step to set as an argument to this method.
try:
dt = dt.to(u.s)
except Exception as exc:
raise NEIError(f"{dt} is not a valid time step.") from exc
finally:
self._dt = dt
elif self.adapt_dt:
try:
self._set_adaptive_timestep()
except Exception as exc:
raise NEIError("Unable to adapt the time step.") from exc
elif self.dt_input is not None:
self._dt = self.dt_input
else:
raise NEIError("Unable to set the time step.")
self._old_time = self._new_time
self._new_time = self._old_time + self._dt
if self._new_time > self.time_max:
self._new_time = self.time_max
self._dt = self._new_time - self._old_time
def time_advance(self):
"""Advance the simulation by one time step."""
# TODO: Expand docstring and include equations!
# TODO: Fully implement units into this.
step = self.results._index
T_e = self.results.T_e[step - 1].value
n_e = self.results.n_e[step - 1].value # set average
dt = self._dt.value
if self.verbose:
print(f"step={step} T_e={T_e} n_e={n_e} dt={dt}")
new_ionic_fractions = {}
try:
for elem in self.elements:
nstates = self.results.nstates[elem]
f0 = self.results._ionic_fractions[elem][self.results._index - 1, :]
evals = self.eigen_data_dict[elem].eigenvalues(T_e=T_e)
evect = self.eigen_data_dict[elem].eigenvectors(T_e=T_e)
evect_inverse = self.eigen_data_dict[elem].eigenvector_inverses(T_e=T_e)
diagonal_evals = np.zeros((nstates, nstates), dtype=np.float64)
for ii in range(0, nstates):
diagonal_evals[ii, ii] = np.exp(evals[ii] * dt * n_e)
matrix_1 = | np.dot(diagonal_evals, evect) | numpy.dot |
import numpy as np
import logging
logger = logging.getLogger(name=__name__)
from ...sgmcmc_sampler import SGMCMCHelper
from ..._utils import random_categorical, lower_tri_mat_inv
class SLDSHelper(SGMCMCHelper):
""" LGSSM Helper
forward_message (dict) with keys
x (dict):
log_constant (double) log scaling const
mean_precision (ndarray) mean precision
precision (ndarray) precision
z (dict):
prob_vector (ndarray) dimension num_states
log_constant (double) log scaling const
x_prev (ndarray)
z_prev (ndarray)
backward_message (dict) with keys
x (dict):
log_constant (double) log scaling const
mean_precision (ndarray) mean precision
precision (ndarray) precision
z (dict):
likelihood_vector (ndarray) dimension num_states
log_constant (double) log scaling const
x_next (ndarray)
z_next (ndarray)
"""
def __init__(self, num_states, n, m,
forward_message=None, backward_message=None,
**kwargs):
self.num_states = num_states
self.n = n
self.m = m
if forward_message is None:
forward_message = {
'x': {
'log_constant': 0.0,
'mean_precision': np.zeros(self.n),
'precision': np.eye(self.n)/10,
},
'z': {
'log_constant': 0.0,
'prob_vector': np.ones(self.num_states)/self.num_states,
},
}
self.default_forward_message=forward_message
if backward_message is None:
backward_message = {
'x': {
'log_constant': 0.0,
'mean_precision': np.zeros(self.n),
'precision': np.zeros((self.n, self.n)),
},
'z': {
'log_constant': np.log(self.num_states),
'likelihood_vector':
np.ones(self.num_states)/self.num_states,
},
}
self.default_backward_message=backward_message
return
def _forward_messages(self, observations, parameters, forward_message,
x=None, z=None, **kwargs):
if z is not None:
if x is not None:
raise ValueError("Either x or z can be conditioned on")
# Forward Messages conditioned on z
return self._x_forward_messages(
observations=observations,
z=z,
parameters=parameters,
forward_message=forward_message,
**kwargs
)
elif x is not None:
# Forward Messages conditioned on z
return self._z_forward_messages(
observations=observations,
x=x,
parameters=parameters,
forward_message=forward_message,
**kwargs
)
else:
raise ValueError("Requires x or z be passed to condition on")
def _backward_messages(self, observations, parameters, backward_message, x=None, z=None, **kwargs):
if z is not None:
if x is not None:
raise ValueError("Either x or z can be conditioned on")
# Forward Messages conditioned on z
return self._x_backward_messages(
observations=observations,
z=z,
parameters=parameters,
backward_message=backward_message,
**kwargs
)
elif x is not None:
# Forward Messages conditioned on z
return self._z_backward_messages(
observations=observations,
x=x,
parameters=parameters,
backward_message=backward_message,
**kwargs
)
else:
raise ValueError("Requires x or z be passed to condition on")
## Helper Functions conditioned on z
def _x_forward_messages(self, observations, z, parameters, forward_message,
weights=None, tqdm=None, only_return_last=False):
# Return list of forward messages Pr(x_{t} | y_{<=t}, z)
# y is num_obs x m matrix
num_obs = np.shape(observations)[0]
if not only_return_last:
forward_messages = [None]*(num_obs+1)
forward_messages[0] = forward_message
mean_precision = forward_message['x']['mean_precision']
precision = forward_message['x']['precision']
log_constant = forward_message['x']['log_constant']
z_prev = forward_message.get('z_prev', None)
Pi = parameters.pi
A = parameters.A
LQinv = parameters.LQinv
Qinv = np.array([np.dot(LQinv_k, LQinv_k.T)
for LQinv_k in LQinv])
AtQinv = np.array([np.dot(A_k.T, Qinv_k)
for (A_k, Qinv_k) in zip(A, Qinv)])
AtQinvA = np.array([np.dot(AtQinv_k, A_k)
for (A_k, AtQinv_k) in zip(A, AtQinv)])
C = parameters.C
LRinv = parameters.LRinv
Rinv = np.dot(LRinv, LRinv.T)
CtRinv = np.dot(C.T, Rinv)
CtRinvC = np.dot(CtRinv, C)
pbar = range(num_obs)
if tqdm is not None:
pbar = tqdm(pbar)
pbar.set_description("forward messages")
for t in pbar:
y_cur = observations[t]
z_cur = z[t]
weight_t = 1.0 if weights is None else weights[t]
# Calculate Predict Parameters
J = np.linalg.solve(AtQinvA[z_cur] + precision, AtQinv[z_cur])
pred_mean_precision = np.dot(J.T, mean_precision)
pred_precision = Qinv[z_cur] - np.dot(AtQinv[z_cur].T, J)
# Calculate Observation Parameters
y_mean = np.dot(C,
np.linalg.solve(pred_precision, pred_mean_precision))
y_precision = Rinv - np.dot(CtRinv.T,
np.linalg.solve(CtRinvC + pred_precision, CtRinv))
log_constant += weight_t * (
-0.5 * np.dot(y_cur-y_mean,
np.dot(y_precision, y_cur-y_mean)) + \
0.5 * np.linalg.slogdet(y_precision)[1] + \
-0.5 * self.m * np.log(2*np.pi)
)
if z_prev is not None:
log_constant += weight_t * np.log(Pi[z_prev, z_cur])
# Calculate Filtered Parameters
new_mean_precision = pred_mean_precision + np.dot(CtRinv, y_cur)
new_precision = pred_precision + CtRinvC
# Save Messages
mean_precision = new_mean_precision
precision = new_precision
z_prev = z_cur
if not only_return_last:
forward_messages[t+1] = dict(
x={
'mean_precision': mean_precision,
'precision': precision,
'log_constant': log_constant,
},
z_prev=z_prev,
)
if only_return_last:
last_message = dict(
x={
'mean_precision': mean_precision,
'precision': precision,
'log_constant': log_constant,
},
z_prev=z_prev,
)
return last_message
else:
return forward_messages
def _x_backward_messages(self, observations, z, parameters, backward_message,
weights=None, tqdm=None, only_return_last=False):
# Return list of backward messages Pr(y_{>t} | x_t, z)
# y is num_obs x n matrix
num_obs = np.shape(observations)[0]
if not only_return_last:
backward_messages = [None]*(num_obs+1)
backward_messages[-1] = backward_message
mean_precision = backward_message['x']['mean_precision']
precision = backward_message['x']['precision']
log_constant = backward_message['x']['log_constant']
z_next = backward_message.get('z_next', None)
Pi = parameters.pi
A = parameters.A
LQinv = parameters.LQinv
Qinv = np.array([np.dot(LQinv_k, LQinv_k.T)
for LQinv_k in LQinv])
AtQinv = np.array([np.dot(A_k.T, Qinv_k)
for (A_k, Qinv_k) in zip(A, Qinv)])
AtQinvA = np.array([np.dot(AtQinv_k, A_k)
for (A_k, AtQinv_k) in zip(A, AtQinv)])
C = parameters.C
LRinv = parameters.LRinv
Rinv = np.dot(LRinv, LRinv.T)
CtRinv = np.dot(C.T, Rinv)
CtRinvC = np.dot(CtRinv, C)
pbar = reversed(range(num_obs))
if tqdm is not None:
pbar = tqdm(pbar)
pbar.set_description("backward messages")
for t in pbar:
y_cur = observations[t]
z_cur = z[t]
weight_t = 1.0 if weights is None else weights[t]
# Helper Values
xi = Qinv[z_cur] + precision + CtRinvC
L = np.linalg.solve(xi, AtQinv[z_cur].T)
vi = mean_precision + np.dot(CtRinv, y_cur)
# Calculate new parameters
log_constant += weight_t * (
-0.5 * self.m * np.log(2.0*np.pi) + \
np.sum(np.log(np.diag(LRinv))) + \
np.sum(np.log(np.diag(LQinv[z_cur]))) + \
-0.5 * np.linalg.slogdet(xi)[1] + \
-0.5 * np.dot(y_cur, np.dot(Rinv, y_cur)) + \
0.5 * np.dot(vi, np.linalg.solve(xi, vi))
)
if z_next is not None:
log_constant += weight_t * np.log(Pi[z_cur, z_next])
new_mean_precision = np.dot(L.T, vi)
new_precision = AtQinvA[z_cur] - np.dot(AtQinv[z_cur], L)
# Save Messages
mean_precision = new_mean_precision
precision = new_precision
z_next = z_cur
if not only_return_last:
backward_messages[t] = dict(x={
'mean_precision': mean_precision,
'precision': precision,
'log_constant': log_constant,
}, z_next=z_next)
if only_return_last:
last_message = dict(x={
'mean_precision': mean_precision,
'precision': precision,
'log_constant': log_constant,
}, z_next=z_next)
return last_message
else:
return backward_messages
def _x_marginal_loglikelihood(self, observations, z, parameters,
forward_message=None, backward_message=None, weights=None,
**kwargs):
# Run forward pass + combine with backward pass
# y is num_obs x m matrix
# forward_pass is Pr(x_{T-1} | y_{<=T-1})
forward_pass = self._forward_message(
observations=observations,
z=z,
parameters=parameters,
forward_message=forward_message,
weights=weights,
**kwargs)
weight_T = 1.0 if weights is None else weights[-1]
# Calculate the marginal loglikelihood of forward + backward message
f_mean_precision = forward_pass['x']['mean_precision']
f_precision = forward_pass['x']['precision']
c_mean_precision = f_mean_precision + backward_message['x']['mean_precision']
c_precision = f_precision + backward_message['x']['precision']
loglikelihood = forward_pass['x']['log_constant'] + \
(backward_message['x']['log_constant'] + \
+0.5 * np.linalg.slogdet(f_precision)[1] + \
-0.5 * np.linalg.slogdet(c_precision)[1] + \
-0.5 * np.dot(f_mean_precision,
np.linalg.solve(f_precision, f_mean_precision)
) + \
0.5 * np.dot(c_mean_precision,
| np.linalg.solve(c_precision, c_mean_precision) | numpy.linalg.solve |
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
max_time = 5
data_size = 5000
target_vocab_size = 8
embedding_size = 6
hidden_units = 256
num_layers = 2
batch_size = 25
epochs = 10
def generate_data(x_size, y_size):
return | np.random.randint(2, 8, size=(x_size,y_size)) | numpy.random.randint |
import numpy as np
import torch
class Compose(object):
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, coord, feat, label):
for t in self.transforms:
coord, feat, label = t(coord, feat, label)
return coord, feat, label
class ToTensor(object):
def __call__(self, coord, feat, label):
coord = torch.from_numpy(coord)
if not isinstance(coord, torch.FloatTensor):
coord = coord.float()
feat = torch.from_numpy(feat)
if not isinstance(feat, torch.FloatTensor):
feat = feat.float()
label = torch.from_numpy(label)
if not isinstance(label, torch.LongTensor):
label = label.long()
return coord, feat, label
class RandomRotate(object):
def __init__(self, angle=[0, 0, 1]):
self.angle = angle
def __call__(self, coord, feat, label):
angle_x = np.random.uniform(-self.angle[0], self.angle[0]) * np.pi
angle_y = np.random.uniform(-self.angle[1], self.angle[1]) * np.pi
angle_z = np.random.uniform(-self.angle[2], self.angle[2]) * np.pi
cos_x, sin_x = np.cos(angle_x), np.sin(angle_x)
cos_y, sin_y = np.cos(angle_y), np.sin(angle_y)
cos_z, sin_z = np.cos(angle_z), np.sin(angle_z)
R_x = np.array([[1, 0, 0], [0, cos_x, -sin_x], [0, sin_x, cos_x]])
R_y = np.array([[cos_y, 0, sin_y], [0, 1, 0], [-sin_y, 0, cos_y]])
R_z = np.array([[cos_z, -sin_z, 0], [sin_z, cos_z, 0], [0, 0, 1]])
R = np.dot(R_z, np.dot(R_y, R_x))
coord = np.dot(coord, np.transpose(R))
return coord, feat, label
class RandomScale(object):
def __init__(self, scale=[0.9, 1.1], anisotropic=False):
self.scale = scale
self.anisotropic = anisotropic
def __call__(self, coord, feat, label):
scale = np.random.uniform(self.scale[0], self.scale[1], 3 if self.anisotropic else 1)
coord *= scale
return coord, feat, label
class RandomShift(object):
def __init__(self, shift=[0.2, 0.2, 0]):
self.shift = shift
def __call__(self, coord, feat, label):
shift_x = np.random.uniform(-self.shift[0], self.shift[0])
shift_y = np.random.uniform(-self.shift[1], self.shift[1])
shift_z = np.random.uniform(-self.shift[2], self.shift[2])
coord += [shift_x, shift_y, shift_z]
return coord, feat, label
class RandomFlip(object):
def __init__(self, p=0.5):
self.p = p
def __call__(self, coord, feat, label):
if np.random.rand() < self.p:
coord[:, 0] = -coord[:, 0]
if np.random.rand() < self.p:
coord[:, 1] = -coord[:, 1]
return coord, feat, label
class RandomJitter(object):
def __init__(self, sigma=0.01, clip=0.05):
self.sigma = sigma
self.clip = clip
def __call__(self, coord, feat, label):
assert (self.clip > 0)
jitter = np.clip(self.sigma * np.random.randn(coord.shape[0], 3), -1 * self.clip, self.clip)
coord += jitter
return coord, feat, label
class ChromaticAutoContrast(object):
def __init__(self, p=0.2, blend_factor=None):
self.p = p
self.blend_factor = blend_factor
def __call__(self, coord, feat, label):
if np.random.rand() < self.p:
lo = | np.min(feat, 0, keepdims=True) | numpy.min |
import logging
import numpy as np
from scipy.integrate import quad
from scipy.interpolate import BarycentricInterpolator
from pySDC.core.Errors import CollocationError
class CollBase(object):
"""
Abstract class for collocation
Derived classes will contain everything to do integration over intervals and between nodes, they only need to
provide the set of nodes, the rest is done here (awesome!)
Attributes:
num_nodes (int): number of collocation nodes
tleft (float): left interval point
tright (float): right interval point
nodes (numpy.ndarray): array of quadrature nodes
weights (numpy.ndarray): array of quadrature weights for the full interval
Qmat (numpy.ndarray): matrix containing the weights for tleft to node
Smat (numpy.ndarray): matrix containing the weights for node to node
delta_m (numpy.ndarray): array of distances between nodes
right_is_node (bool): flag to indicate whether right point is collocation node
left_is_node (bool): flag to indicate whether left point is collocation node
"""
def __init__(self, num_nodes, tleft=0, tright=1):
"""
Initialization routine for an collocation object
Args:
num_nodes (int): number of collocation nodes
tleft (float): left interval point
tright (float): right interval point
"""
if not num_nodes > 0:
raise CollocationError('At least one quadrature node required, got %s' % num_nodes)
if not tleft < tright:
raise CollocationError('Interval boundaries are corrupt, got %s and %s' % (tleft, tright))
self.logger = logging.getLogger('collocation')
# Set number of nodes, left and right interval boundaries
self.num_nodes = num_nodes
self.tleft = tleft
self.tright = tright
# Dummy values for the rest
self.nodes = None
self.weights = None
self.Qmat = None
self.Smat = None
self.delta_m = None
self.right_is_node = None
self.left_is_node = None
@staticmethod
def evaluate(weights, data):
"""
Evaluates the quadrature over the full interval
Args:
weights (numpy.ndarray): array of quadrature weights for the full interval
data (numpy.ndarray): f(x) to be integrated
Returns:
numpy.ndarray: integral over f(x) between tleft and tright
"""
if not np.size(weights) == np.size(data):
raise CollocationError("Input size does not match number of weights, but is %s" % np.size(data))
return np.dot(weights, data)
def _getWeights(self, a, b):
"""
Computes weights using barycentric interpolation
Args:
a (float): left interval boundary
b (float): right interval boundary
Returns:
numpy.ndarray: weights of the collocation formula given by the nodes
"""
if self.nodes is None:
raise CollocationError("Need nodes before computing weights, got %s" % self.nodes)
circ_one = np.zeros(self.num_nodes)
circ_one[0] = 1.0
tcks = []
for i in range(self.num_nodes):
tcks.append(BarycentricInterpolator(self.nodes, | np.roll(circ_one, i) | numpy.roll |
import numpy as np
def precision_at_k(r, k):
"""
Args:
r: Relevance scores (list or numpy) in rank order
(first element is the first item)
Returns:
Precision @ k
Raises:
ValueError: len(r) must be >= k
"""
assert k >= 1
r = np.asarray(r)[:k] != 0
if r.size != k:
raise ValueError("Relevance score length < k")
# return np.mean(r)
return sum(r) / len(r)
def recall_at_k(rs, test_ur, k):
assert k >= 1
res = []
for user in test_ur.keys():
r = np.asarray(rs[user])[:k] != 0
if r.size != k:
raise ValueError("Relevance score length < k")
if len(test_ur[user]) == 0:
raise KeyError(f"Invalid User Index: {user}")
res.append(sum(r) / len(test_ur[user]))
return np.mean(res)
def mrr_at_k(rs, k):
assert k >= 1
res = 0
for r in rs.values():
r = np.asarray(r)[:k] != 0
for index, item in enumerate(r):
if item == 1:
res += 1 / (index + 1)
return res / len(rs)
def ap(r):
"""
Args:
r: Relevance scores (list or numpy) in rank order
(first element is the first item)
Returns:
Average precision
"""
r = | np.asarray(r) | numpy.asarray |
# https://en.wikipedia.org/wiki/High-dynamic-range_imaging
import cv2
import numpy as np
from glob import glob
#* 1. Loading exposure images into a list
images = glob("./images/hdr*")
images = [cv2.imread(image) for image in images]
exposure_times = np.array([15.0, 2.5, 0.25, 0.0333], dtype=np.float32)
#* 2. Merge exposures into HDR image
merge_debevec = cv2.createMergeDebevec()
hdr_debevec = merge_debevec.process(images, times=exposure_times.copy())
merge_robertson = cv2.createMergeRobertson()
hdr_robertson = merge_robertson.process(images, times=exposure_times.copy())
#* 3. Tonemap HDR image
tonemap1 = cv2.createTonemap(gamma=1.5)
res_debevec = tonemap1.process(hdr_debevec.copy())
res_robertson = tonemap1.process(hdr_robertson.copy())
#* 4. Merge exposures using Mertens fusion
merge_mertens = cv2.createMergeMertens()
res_mertens = merge_mertens.process(images)
#* 5. Convert to 8-bit and save
res_debevec_8bit = np.clip(res_debevec*255, 0, 255).astype('uint8')
res_robertson_8bit = np.clip(res_robertson*255, 0, 255).astype('uint8')
res_mertens_8bit = | np.clip(res_mertens*255, 0, 255) | numpy.clip |
# Copyright (c) 2003-2019 by <NAME>
#
# TreeCorr is free software: redistribution and use in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions, and the disclaimer given in the accompanying LICENSE
# file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the disclaimer given in the documentation
# and/or other materials provided with the distribution.
from __future__ import print_function
import numpy as np
import os
import coord
import time
import treecorr
from test_helper import get_from_wiki, get_script_name, do_pickle, CaptureLog
from test_helper import assert_raises, timer, assert_warns
from numpy import sin, cos, tan, arcsin, arccos, arctan, arctan2, pi
@timer
def test_direct():
# If the catalogs are small enough, we can do a direct calculation to see if comes out right.
# This should exactly match the treecorr result if brute_force=True
ngal = 200
s = 10.
rng = np.random.RandomState(8675309)
x1 = rng.normal(0,s, (ngal,) )
y1 = rng.normal(0,s, (ngal,) )
w1 = rng.random_sample(ngal)
g11 = rng.normal(0,0.2, (ngal,) )
g21 = rng.normal(0,0.2, (ngal,) )
x2 = rng.normal(0,s, (ngal,) )
y2 = rng.normal(0,s, (ngal,) )
w2 = rng.random_sample(ngal)
g12 = rng.normal(0,0.2, (ngal,) )
g22 = rng.normal(0,0.2, (ngal,) )
cat1 = treecorr.Catalog(x=x1, y=y1, w=w1, g1=g11, g2=g21)
cat2 = treecorr.Catalog(x=x2, y=y2, w=w2, g1=g12, g2=g22)
min_sep = 1.
max_sep = 50.
nbins = 50
bin_size = np.log(max_sep/min_sep) / nbins
gg = treecorr.GGCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, brute=True)
gg.process(cat1, cat2)
true_npairs = np.zeros(nbins, dtype=int)
true_weight = np.zeros(nbins, dtype=float)
true_xip = np.zeros(nbins, dtype=complex)
true_xim = np.zeros(nbins, dtype=complex)
for i in range(ngal):
# It's hard to do all the pairs at once with numpy operations (although maybe possible).
# But we can at least do all the pairs for each entry in cat1 at once with arrays.
rsq = (x1[i]-x2)**2 + (y1[i]-y2)**2
r = np.sqrt(rsq)
logr = np.log(r)
expmialpha = ((x1[i]-x2) - 1j*(y1[i]-y2)) / r
ww = w1[i] * w2
xip = ww * (g11[i] + 1j*g21[i]) * (g12 - 1j*g22)
xim = ww * (g11[i] + 1j*g21[i]) * (g12 + 1j*g22) * expmialpha**4
index = np.floor(np.log(r/min_sep) / bin_size).astype(int)
mask = (index >= 0) & (index < nbins)
np.add.at(true_npairs, index[mask], 1)
np.add.at(true_weight, index[mask], ww[mask])
np.add.at(true_xip, index[mask], xip[mask])
np.add.at(true_xim, index[mask], xim[mask])
true_xip /= true_weight
true_xim /= true_weight
print('true_npairs = ',true_npairs)
print('diff = ',gg.npairs - true_npairs)
np.testing.assert_array_equal(gg.npairs, true_npairs)
print('true_weight = ',true_weight)
print('diff = ',gg.weight - true_weight)
np.testing.assert_allclose(gg.weight, true_weight, rtol=1.e-5, atol=1.e-8)
print('true_xip = ',true_xip)
print('gg.xip = ',gg.xip)
print('gg.xip_im = ',gg.xip_im)
np.testing.assert_allclose(gg.xip, true_xip.real, rtol=1.e-4, atol=1.e-8)
np.testing.assert_allclose(gg.xip_im, true_xip.imag, rtol=1.e-4, atol=1.e-8)
print('true_xim = ',true_xim)
print('gg.xim = ',gg.xim)
print('gg.xim_im = ',gg.xim_im)
np.testing.assert_allclose(gg.xim, true_xim.real, rtol=1.e-4, atol=1.e-8)
np.testing.assert_allclose(gg.xim_im, true_xim.imag, rtol=1.e-4, atol=1.e-8)
try:
import fitsio
except ImportError:
print('Skipping FITS tests, since fitsio is not installed')
return
# Check that running via the corr2 script works correctly.
config = treecorr.config.read_config('configs/gg_direct.yaml')
cat1.write(config['file_name'])
cat2.write(config['file_name2'])
treecorr.corr2(config)
data = fitsio.read(config['gg_file_name'])
np.testing.assert_allclose(data['r_nom'], gg.rnom)
np.testing.assert_allclose(data['npairs'], gg.npairs)
np.testing.assert_allclose(data['weight'], gg.weight)
np.testing.assert_allclose(data['xip'], gg.xip, rtol=1.e-3)
np.testing.assert_allclose(data['xip_im'], gg.xip_im, rtol=1.e-3)
np.testing.assert_allclose(data['xim'], gg.xim, rtol=1.e-3)
np.testing.assert_allclose(data['xim_im'], gg.xim_im, rtol=1.e-3)
# Repeat with binslop = 0.
# And don't do any top-level recursion so we actually test not going to the leaves.
gg = treecorr.GGCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, bin_slop=0,
max_top=0)
gg.process(cat1, cat2)
np.testing.assert_array_equal(gg.npairs, true_npairs)
np.testing.assert_allclose(gg.weight, true_weight, rtol=1.e-5, atol=1.e-8)
np.testing.assert_allclose(gg.xip, true_xip.real, rtol=1.e-4, atol=1.e-8)
np.testing.assert_allclose(gg.xip_im, true_xip.imag, rtol=1.e-4, atol=1.e-8)
print('true_xim = ',true_xim)
print('gg.xim = ',gg.xim)
print('gg.xim_im = ',gg.xim_im)
print('diff = ',gg.xim - true_xim.real)
print('max diff = ',np.max(np.abs(gg.xim - true_xim.real)))
print('rel diff = ',(gg.xim - true_xim.real)/true_xim.real)
# This is the one that is highly affected by the approximation from averaging the shears
# before projecting, rather than averaging each shear projected to its own connecting line.
np.testing.assert_allclose(gg.xim, true_xim.real, rtol=1.e-3, atol=3.e-4)
np.testing.assert_allclose(gg.xim_im, true_xim.imag, atol=1.e-3)
# Check a few basic operations with a GGCorrelation object.
do_pickle(gg)
gg2 = gg.copy()
gg2 += gg
np.testing.assert_allclose(gg2.npairs, 2*gg.npairs)
np.testing.assert_allclose(gg2.weight, 2*gg.weight)
np.testing.assert_allclose(gg2.meanr, 2*gg.meanr)
np.testing.assert_allclose(gg2.meanlogr, 2*gg.meanlogr)
np.testing.assert_allclose(gg2.xip, 2*gg.xip)
np.testing.assert_allclose(gg2.xip_im, 2*gg.xip_im)
np.testing.assert_allclose(gg2.xim, 2*gg.xim)
np.testing.assert_allclose(gg2.xim_im, 2*gg.xim_im)
gg2.clear()
gg2 += gg
np.testing.assert_allclose(gg2.npairs, gg.npairs)
np.testing.assert_allclose(gg2.weight, gg.weight)
np.testing.assert_allclose(gg2.meanr, gg.meanr)
np.testing.assert_allclose(gg2.meanlogr, gg.meanlogr)
np.testing.assert_allclose(gg2.xip, gg.xip)
np.testing.assert_allclose(gg2.xip_im, gg.xip_im)
np.testing.assert_allclose(gg2.xim, gg.xim)
np.testing.assert_allclose(gg2.xim_im, gg.xim_im)
ascii_name = 'output/gg_ascii.txt'
gg.write(ascii_name, precision=16)
gg3 = treecorr.GGCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins)
gg3.read(ascii_name)
np.testing.assert_allclose(gg3.npairs, gg.npairs)
np.testing.assert_allclose(gg3.weight, gg.weight)
np.testing.assert_allclose(gg3.meanr, gg.meanr)
np.testing.assert_allclose(gg3.meanlogr, gg.meanlogr)
np.testing.assert_allclose(gg3.xip, gg.xip)
np.testing.assert_allclose(gg3.xip_im, gg.xip_im)
np.testing.assert_allclose(gg3.xim, gg.xim)
np.testing.assert_allclose(gg3.xim_im, gg.xim_im)
fits_name = 'output/gg_fits.fits'
gg.write(fits_name)
gg4 = treecorr.GGCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins)
gg4.read(fits_name)
np.testing.assert_allclose(gg4.npairs, gg.npairs)
np.testing.assert_allclose(gg4.weight, gg.weight)
np.testing.assert_allclose(gg4.meanr, gg.meanr)
np.testing.assert_allclose(gg4.meanlogr, gg.meanlogr)
| np.testing.assert_allclose(gg4.xip, gg.xip) | numpy.testing.assert_allclose |
import os
import argparse
import torch
import torch.nn as nn
from torch.nn import functional as F
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torch.optim.lr_scheduler import ReduceLROnPlateau
from Yolo_v2_pytorch.src.anotherMissOh_dataset import AnotherMissOh, Splits, SortFullRect, PersonCLS, PBeHavCLS, FaceCLS
from Yolo_v2_pytorch.src.utils import *
from Yolo_v2_pytorch.src.loss import YoloLoss
import shutil
import cv2
import pickle
import numpy as np
from lib.logger import Logger
from lib.behavior_model import behavior_model
from lib.pytorch_misc import optimistic_restore, de_chunkize, clip_grad_norm, flatten
from lib.focal_loss import FocalLossWithOneHot, FocalLossWithOutOneHot, CELossWithOutOneHot
from lib.face_model import face_model
def get_args():
parser = argparse.ArgumentParser(
"You Only Look Once:Unified, Real-Time Object Detection")
parser.add_argument("--image_size", type=int,
default=448,
help="The common width and height for all images")
parser.add_argument("--batch_size", type=int, default=1,
help="The number of images per batch")
# Training base Setting
parser.add_argument("--momentum", type=float, default=0.9)
parser.add_argument("--decay", type=float, default=0.0005)
parser.add_argument("--dropout", type=float, default=0.5)
parser.add_argument("--num_epoches", type=int, default=100)
parser.add_argument("--test_interval", type=int, default=1,
help="Number of epoches between testing phases")
parser.add_argument("--object_scale", type=float, default=1.0)
parser.add_argument("--noobject_scale", type=float, default=0.5)
parser.add_argument("--class_scale", type=float, default=1.0)
parser.add_argument("--coord_scale", type=float, default=5.0)
parser.add_argument("--reduction", type=int, default=32)
parser.add_argument("--es_min_delta", type=float, default=0.0,
help="Early stopping's parameter:minimum change loss to qualify as an improvement")
parser.add_argument("--es_patience", type=int, default=0,
help="Early stopping's parameter:number of epochs with no improvement after which training will be stopped. Set to 0 to disable this technique.")
parser.add_argument("--pre_trained_model_type",
type=str, choices=["model", "params"],
default="model")
parser.add_argument("--trained_model_path", type=str,
default="./checkpoint/detector") # Pre-training path
parser.add_argument("--saved_path", type=str,
default="./checkpoint/face") # saved training path
parser.add_argument("--conf_threshold", type=float, default=0.35)
parser.add_argument("--nms_threshold", type=float, default=0.5)
parser.add_argument("--img_path", type=str,
default="./data/AnotherMissOh/AnotherMissOh_images_ver3.2/")
parser.add_argument("--json_path", type=str,
default="./data/AnotherMissOh/AnotherMissOh_Visual_ver3.2/")
parser.add_argument("-model", dest='model', type=str, default="baseline")
parser.add_argument("-lr", dest='lr', type=float, default=1e-5)
parser.add_argument("-clip", dest='clip', type=float, default=10.0)
parser.add_argument("-print_interval", dest='print_interval', type=int,
default=100)
parser.add_argument("-b_loss", dest='b_loss', type=str, default='ce')
parser.add_argument("-f_gamma", dest='f_gamma', type=float, default=1.0)
parser.add_argument("-clip_grad", dest='clip_grad',action='store_true')
args = parser.parse_args()
return args
# get args.
opt = get_args()
print(opt)
# splits the episodes int train, val, test
train, val, test = Splits(num_episodes=18)
# load datasets
train_set = AnotherMissOh(train, opt.img_path, opt.json_path, False)
val_set = AnotherMissOh(val, opt.img_path, opt.json_path, False)
test_set = AnotherMissOh(test, opt.img_path, opt.json_path, False)
num_persons = len(PersonCLS)
num_faces = len(FaceCLS)
# logger path
logger_path = 'logs/{}'.format(opt.model)
if os.path.exists(logger_path):
print('exist_{}'.format(logger_path))
else:
os.makedirs(logger_path)
print('mkdir_{}'.format(logger_path))
logger = Logger(logger_path)
def train(opt):
if torch.cuda.is_available():
torch.cuda.manual_seed(123)
device = torch.cuda.current_device()
else:
torch.manual_seed(123)
#p_learning_rate_schedule = {"0": opt.lr/10.0, "5": opt.lr/50.0}
#b_learning_rate_schedule = {"0": opt.lr, "5": opt.lr/10.0, "10": opt.lr/100.0}
training_params = {"batch_size": opt.batch_size,
"shuffle": True,
"drop_last": True,
"collate_fn": custom_collate_fn}
test_params = {"batch_size": opt.batch_size,
"shuffle": False,
"drop_last": False,
"collate_fn": custom_collate_fn}
train_loader = DataLoader(train_set, **training_params)
# define face_model
model_face = face_model(num_persons, num_faces, device)
# trained_persons = opt.trained_model_path + os.sep + "{}".format(
# 'anotherMissOh_only_params_person.pth')
#
# ckpt = torch.load(trained_persons)
# if optimistic_restore(model_face.detector, ckpt):
# print(".....")
# print("loaded pre-trained detector sucessfully.")
# print(".....")
model_face.cuda(device)
# get optim
face_params = [p for n, p in model_face.named_parameters()
if not n.startswith('detector') and p.requires_grad]
f_params = [{'params': face_params, 'lr': opt.lr * 10.0}]
f_criterion = YoloLoss(num_faces, model_face.detector.anchors, opt.reduction)
f_optimizer = torch.optim.SGD(f_params, lr=opt.lr * 10.0,
momentum=opt.momentum,
weight_decay=opt.decay)
f_scheduler = ReduceLROnPlateau(f_optimizer, 'min', patience=3,
factor=0.1, verbose=True,
threshold=0.0001, threshold_mode='abs',
cooldown=1)
model_face.train()
num_iter_per_epoch = len(train_loader)
loss_step = 0
for epoch in range(opt.num_epoches):
f_loss_list = []
for iter, batch in enumerate(train_loader):
verbose=iter % (opt.print_interval*10) == 0
image, info = batch
# sort label info on fullrect
image, label, behavior_label, obj_label, face_label, _ = SortFullRect(image, info, is_train=True)
if | np.array(face_label) | numpy.array |
from typing import Any, Union
from typing import Dict, Hashable
import numpy as np
from cumm import tensorview as tv
import json
from collections import abc
from functools import reduce
JSON_INDEX_KEY = "__cumm_io_json_index"
NPDTYPE_TO_JSONARRAY_MAP = {
np.dtype(np.uint64): tv.uint64,
np.dtype(np.uint32): tv.uint32,
np.dtype(np.uint16): tv.uint16,
| np.dtype(np.uint8) | numpy.dtype |
from __future__ import division,print_function,absolute_import
import numpy as np
import scipy.linalg as la
# Convention for image arrays
# [nPixel,nBand]
# [nRow,nSample,nBand]
def imqim(Q,im):
'''
Compute x.T * Q * x, for every vector x in im;
Assume im is 2d array, with vectors x being rows of im
'''
return np.sum( np.dot( im, Q ) * im, axis=1 )
def outprod(v,w=None):
'''
given vectors v,w return the outer product: vw'
if only one vector is given, return outer product with itself: vv'
'''
if w is None:
w = v
return np.dot(v.reshape(-1,1),w.reshape(1,-1))
def matinv_reg(M,e=1.0e-12):
d = M.shape[0]
t = np.trace(M)
return la.inv(M + e*(t/d)*np.eye(d))
def sqrtm(X):
U,J,_ = la.svd(X)
Xsqrt = np.dot( np.dot(U,np.diag(np.sqrt(J))), U.T )
return Xsqrt
def get_mXYC(imX,imY,mask=None):
## Note: mask must an array of booleans
## make sure X and Y have same number of pixels
assert( imX.shape[:-1] == imY.shape[:-1] )
if mask is not None:
assert(mask.shape == imX.shape[:-1])
assert(mask.dtype == np.bool)
dx = imX.shape[-1]
dy = imY.shape[-1]
imX = imX.reshape(-1,dx)
imY = imY.reshape(-1,dy)
if mask is not None:
imX = imX[~mask.ravel(),:]
imY = imY[~mask.ravel(),:]
## Compute mean values
mX = np.mean(imX,axis=0)
mY = np.mean(imY,axis=0)
## Subtract mean values
imX = imX - mX.reshape(1,dx)
imY = imY - mY.reshape(1,dy)
## Compute covariance matrices
nPixels = imX.shape[0]
X = np.dot(imX.T,imX)/nPixels
Y = np.dot(imY.T,imY)/nPixels
C = np.dot(imY.T,imX)/nPixels
return mX,mY,X,Y,C
def nu_est(zz,d,m=1):
''' Given a set of Mahalanobis distances zz = (z-mu)'*R^{-1}*(z-mu)
Use the moment-method to estimate nu for multivariate-t
'''
rnum = np.mean(zz**(1+m/2))
rden = np.mean(zz**(m/2))
kappa = rnum/rden
#print "nu: m,d,k=r3/r: ",m,d,kappa,"=",rnum,"/",rden, zz.shape
if kappa <= d + m:
est_nu = 0
else:
est_nu = 2 + m*kappa/(kappa-(d+m))
print("Estimated nu:",est_nu)
return est_nu
def nu_scale(nu,d,zz):
assert( nu <=0 or nu > 2 )
if nu <= 0:
return zz
else:
return (nu+d)*np.log(1 + zz/(nu-2))
class cca(object):
def __init__(self,n_components):
self.n_components=n_components
def fit(self,imX,imY,mask=None):
self.dx = imX.shape[-1]
self.dy = imY.shape[-1]
self.mX,self.mY,X,Y,C = get_mXYC(imX,imY,mask=mask)
Xsqrt = la.cholesky(X)
Xinvsqrt = la.inv(Xsqrt)
Ysqrt = la.cholesky(Y)
Yinvsqrt = la.inv(Ysqrt)
Ctilde = np.dot(np.dot(Yinvsqrt.T,C),Xinvsqrt)
U,J,Vt = la.svd(Ctilde)
U = U[:,:self.n_components]
Vt = Vt[:self.n_components,:]
self.A = np.dot(Xinvsqrt,Vt.T)
self.B = np.dot(Yinvsqrt,U)
return self
def transform(self,imX,imY):
## make sure X and Y are the same size images
assert( imX.shape[:-1] == imY.shape[:-1] )
## and X and Y have same dimension as training images
assert( imX.shape[-1] == self.dx )
assert( imY.shape[-1] == self.dy )
imShape = list(imX.shape); imShape[-1]=-1
imX = imX.reshape(-1,self.dx)
imY = imY.reshape(-1,self.dy)
imX = imX - self.mX.reshape(1,-1)
imY = imY - self.mY.reshape(1,-1)
imX = np.dot(imX,self.A)
imY = np.dot(imY,self.B)
imX = imX.reshape(imShape)
imY = imY.reshape(imShape)
return imX,imY
class acd(object):
def fit(self,imX,imY,nu=0,mask=None,**kw_xtra):
self.nBandsX = dx = imX.shape[-1]
self.nBandsY = dy = imY.shape[-1]
self.mX,self.mY,X,Y,C = get_mXYC(imX,imY,mask=mask)
## Create concatenated matrix ## matlab: [X C'; C Y]
XCCY = np.vstack( [np.hstack([X, C.T]),
np.hstack([C, Y ]) ])
## Invert matrices
self.Qzz = matinv_reg(XCCY)
self.Qxx = matinv_reg(X)
self.Qyy = matinv_reg(Y)
if nu==-1:
d = self.nBandsX+self.nBandsY
imZ = np.vstack( [imX,imY] ).reshape(-1,d) #nb, mean already subtracted
zz = imqim(self.Qzz,imZ)
nu = nu_est(zz,d)
self.nu = nu
def fit_init(self,nu=0):
## Initializes the incremental fit
## Should this just be __init__ ?
if nu<0:
raise RuntimeError("Incremental fit cannot accommodate adaptive nu; use nu>=0")
self.nPixels=0
self.mX = self.mY = self.X = self.Y = self.C = 0
self.nBandsX = self.nBandsY = -1
self.nu = nu
def fit_update(self,imX,imY,mask=None):
if self.nPixels == 0:
## if this is first update, then define sizes
self.nBandsX = imX.shape[-1]
self.nBandsY = imY.shape[-1]
else:
## if not first update, make sure sizes are consistent with first update
assert( self.nBandsX == imX.shape[-1] )
assert( self.nBandsY == imY.shape[-1] )
## N= number of pixels from previous updates
## M= number of pixels in this batch
N = self.nPixels
if mask is not None:
M = np.sum(~mask)
else:
M = imX[...,0].size
## compute mean and covariances for this batch of pixels
mX,mY,X,Y,C = get_mXYC(imX,imY,mask=mask)
## update covariances
f = N*M/((N+M)**2)
self.X = (N*self.X + M*X)/(N+M) + f*outprod(mX-self.mX)
self.Y = (N*self.Y + M*Y)/(N+M) + f*outprod(mY-self.mY)
self.C = (N*self.C + M*C)/(N+M) + f*outprod(mY-self.mY, mX-self.mX)
## update means
self.mX = (N*self.mX + M*mX)/(N+M)
self.mY = (N*self.mY + M*mY)/(N+M)
## update count
self.nPixels = N+M
def fit_complete(self):
## Create concatenated matrix ## matlab: [X C'; C Y]
XCCY = np.vstack( [np.hstack([self.X, self.C.T]),
np.hstack([self.C, self.Y ]) ])
## Invert matrices
self.Qzz = matinv_reg(XCCY)
self.Qxx = matinv_reg(self.X)
self.Qyy = matinv_reg(self.Y)
def get_xi_zxy(self,imX,imY):
''' return three Mahalanobis distances: xi_z, xi_y, xi_x
'''
imShape = imX.shape[:-1]
dX = imX.shape[-1]
dY = imY.shape[-1]
assert( imX.shape[:-1] == imY.shape[:-1] )
assert( self.nBandsX == dX )
assert( self.nBandsY == dY )
## Convert to 2d and subtract mean
imX = imX.reshape(-1,dX) - self.mX.reshape(1,-1)
imY = imY.reshape(-1,dY) - self.mY.reshape(1,-1)
## Concatenate vectors
imZ = np.hstack( [imX, imY] )
## Compute anomalousness (Mahalanobis) at each pixel
zz = imqim( self.Qzz, imZ )
xx = imqim( self.Qxx, imX )
yy = imqim( self.Qyy, imY )
zz = zz.reshape(imShape)
xx = xx.reshape(imShape)
yy = yy.reshape(imShape)
return zz,xx,yy
def apply(self,imX,imY,nu=-1,beta_x=1,beta_y=1,**kw_xtra):
imShape = imX.shape[:-1]
dX = imX.shape[-1]
dY = imY.shape[-1]
assert( imX.shape[:-1] == imY.shape[:-1] )
assert( self.nBandsX == dX )
assert( self.nBandsY == dY )
zz,xx,yy = self.get_xi_zxy(imX,imY)
## Estimate nu, if requested (nu==-1)
## and if not already estimated (self.nu==-1)
if nu == -1:
nu = self.nu
if nu == -1:
self.nu = nu_est(zz,dX+dY)
#print("acd.apply: nu=",nu,"beta:",beta_x,beta_y)
##Compute anomalousness of change
if (nu == 0):
## Gaussian, nu->infinity
anom = zz - beta_x*xx - beta_y*yy;
else:
anom = (nu+dX+dY)*np.log(nu-2+zz) - \
beta_x*(nu+dX)*np.log(nu-2+xx) - \
beta_y*(nu+dY)*np.log(nu-2+yy);
#offset is (roughly) expected value
offs = (nu+dX+dY)* | np.log(nu-2+dX+dY) | numpy.log |
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 6 22:41:15 2020
@author: <NAME>
"""
from keras.datasets import mnist
import matplotlib.pyplot as plt
# Scientific and vector computation for python
import numpy as np
# Optimization module in scipy
from scipy import optimize
from keras.utils import np_utils
def randInitializeWeights(L_in, L_out, epsilon_init=0.12):
"""Initializing weitghs for all neurons between given input layer 'L_in'
and output layer 'L_out' to some initila value different from each
other hence avoiding 'PROBLEM OF SYMMETRIC WEIGHTS'
NOTE:this fucntion take layer l as L_in and layer l+1 as layer L_out and
return a matrix of shape (L_out)*(L_in +1)
"""
W = np.zeros((L_out, 1 + L_in))
W = | np.random.rand(L_out, 1 + L_in) | numpy.random.rand |
# Copyright (c) 2018 Graphcore Ltd. All rights reserved.
import numpy as np
import torch
import torch.utils
import torch.utils.data
import popart
from popart.writer import NetWriter
from popart import TensorInfo, DataFlow, SGD, ConstSGD
import onnx
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return torch.nn.Conv2d(in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=1,
bias=False)
class PytorchNetWriter(NetWriter):
def __init__(self, inNames, outNames, optimizer, dataFlow, inputShapeInfo,
module, samplesPerBatch):
"""
module:
-- pytorch module (whose forward does not have the loss layers)
all others:
-- parameters passed to base class.
"""
NetWriter.__init__(self,
inNames=inNames,
outNames=outNames,
optimizer=optimizer,
inputShapeInfo=inputShapeInfo,
dataFlow=dataFlow)
self.module = module
self.samplesPerBatch = samplesPerBatch
def getTorchOptimizer(self):
"""
convert popart's Optimizer to a torch Optimizer
"""
if (isinstance(self.optimizer, SGD)
or isinstance(self.optimizer, ConstSGD)):
return torch.optim.SGD(
self.module.parameters(),
lr=self.optimizer.learningRates().getDefault().val(),
weight_decay=self.optimizer.weightDecays().getDefault().val(),
momentum=self.optimizer.momentums().getDefault().val())
else:
raise RuntimeError("unrecognised optimizer")
def saveModel(self, fnModel):
print("Writing ONNX model to protobuf file %s" % (fnModel, ))
# jump into eval mode, just to write the onnx model.
# note that this might do strange things with batch-normalisation (?)
self.module.eval()
inputDataInfos = [self.inputShapeInfo.get(tid) for tid in self.inNames]
inputData = []
containsint64 = False
for info in inputDataInfos:
shape = info.shape()
dt = info.data_type_lcase()
if dt == "int32":
dt = "int64" # torch labels must be 'long'
containsint64 = True
inputData.append(torch.from_numpy( | np.ones(shape=shape, dtype=dt) | numpy.ones |
# flake8: noqa
from pkg_resources import resource_filename
from functools import lru_cache
import warnings
import numpy as np
from ...matlab_funcs import besselh, besselj, gammaln, lscov, quadl
from ...sci_funcs import legendrePlm
from ...core import stress2legendre
def boundary(costheta, a=1, epsilon=.1, nu=0):
"""Projected boundary of a prolate spheroid
Compute the boundary according to equation (4) in
:cite:`Boyde2009` with the addition of the
Poisson's ratio of the object.
.. math::
B(\\theta) = a (1+\\epsilon)
\\left[ (1+\\epsilon)^2 - \\epsilon (1+\\nu)
(2+\\epsilon (1-\\nu)) \\cos^2 \\theta \\right]^{-1/2}
This boundary function was derived for a prolate spheroid under
the assumption that the semi-major axis :math:`a` and the
semi-minor axes :math:`b=c` are defined as
.. math::
a = b \\cdot \\frac{1+ \\epsilon}{1- \\nu \\epsilon}
The boundary function :math:`B(\\theta)` can be derived with
the above relation using the equation for a prolate spheroid.
Parameters
----------
costheta: float or np.ndarray
Cosine of polar coordinates :math:`\\theta`
at which to compute the boundary.
a: float
Equatorial radii of prolate spheroid (semi-minor axis).
epsilon: float
Stretch ratio; defines size of semi-major axis:
:math:`a = (1+\\epsilon) b`. Note that this is not
the eccentricity of the prolate spheroid.
nu: float
Poisson's ratio :math:`\\nu` of the material.
Returns
-------
B: 1d ndarray
Radial object boundary in dependence of theta
:math:`B(\\theta)`.
Notes
-----
For :math:`\\nu=0`, the above equation becomes
equation (4) in :cite:`Boyde2009`.
"""
x = costheta
B = a * (1 + epsilon) \
/ ((1 + epsilon)**2
- epsilon * (1 + nu) * (2 + epsilon * (1 - nu)) * x**2)**.5
return B
@lru_cache(maxsize=32)
def get_hgc():
"""Load hypergeometric coefficients from *hypergeomdata2.dat*.
These coefficients were computed by <NAME>
using Wolfram Mathematica.
"""
hpath = resource_filename("ggf.stress.boyde2009", "hypergeomdata2.dat")
hgc = np.loadtxt(hpath)
return hgc
def stress(object_index=1.41, medium_index=1.3465, poisson_ratio=0.45,
semi_minor=2.8466e-6, stretch_ratio=0.1, wavelength=780e-9,
beam_waist=3, power_left=.6, power_right=.6, dist=100e-6,
n_points=100, theta_max=np.pi, field_approx="davis",
ret_legendre_decomp=False, verbose=False):
"""Compute the stress acting on a prolate spheroid
The prolate spheroid has semi-major axis :math:`a` and
semi-minor axis :math:`b=c`.
Parameters
----------
object_index: float
Refractive index of the spheroid
medium_index: float
Refractive index of the surrounding medium
poisson_ratio: float
Poisson's ratio of the spheroid material
semi_minor: float
Semi-minor axis (inner) radius of the stretched object
:math:`b=c`.
stretch_ratio: float
Measure of the deformation, defined as :math:`(a - b) / b`
wavelength: float
Wavelenth of the gaussian beam [m]
beam_waist: float
Beam waist radius of the gaussian beam [wavelengths]
power_left: float
Laser power of the left beam [W]
power_right: float
Laser power of the right beam [W]
dist: float
Distance between beam waist and object center [m]
n_points: int
Number of points to compute stresses for
theta_max: float
Maximum angle to compute stressed for
field_approx: str
TODO
ret_legendre_decomp: bool
If True, return coefficients of decomposition of stress
into Legendre polynomials
verbose: int
Increase verbosity
Returns
-------
theta: 1d ndarray
Angles for which stresses are computed
sigma_rr: 1d ndarray
Radial stress corresponding to angles
coeff: 1d ndarray
If `ret_legendre_decomp` is True, return compositions
of decomposition of stress into Legendre polynomials.
Notes
-----
- The angles `theta` are computed on a grid that does not
include zero and `theta_max`.
- This implementation was first presented in :cite:`Boyde2009`.
"""
if field_approx not in ["davis", "barton"]:
raise ValueError("`field_approx` must be 'davis' or 'barton'")
object_index = complex(object_index)
medium_index = complex(medium_index)
W0 = beam_waist * wavelength
epsilon = stretch_ratio
nu = poisson_ratio
# ZRL = 0.5*medium_index*2*np.pi/wavelength*W0**2 # Rayleigh range [m]
# WZ = W0*(1+(beam_pos+d)**2/ZRL**2)**0.5 # beam waist at specified
# position [m]
K0 = 2 * np.pi / wavelength # wave vector [m]
Alpha = semi_minor * K0 # size parameter
C = 3e8 # speed of light [m/s]
# maximum number of orders
lmax = np.int(np.round(2 + Alpha + 4 * (Alpha)**(1 / 3) + 10))
if lmax > 120:
msg = 'Required number of orders for accurate expansion exceeds allowed maximum!' \
+ 'Reduce size of trapped particle!'
raise ValueError(msg)
if epsilon == 0:
# spherical object, no point-matching needed (mmax = 0)
mmax = 3
else:
if (epsilon > 0.15):
warnings.warn('Stretching ratio is high: {}'.format(epsilon))
# spheroidal object, point-matching required (mmax has to be divisible
# by 3)
mmax = 6 * lmax
# permittivity in surrounding medium [1]
EpsilonI = medium_index**2
EpsilonII = object_index**2 # permittivity in within cell [1]
MuI = 1.000 # permeability in surrounding medium [1]
MuII = 1.000 # permeability within cell [1]
# wave constant in Maxwell's equations (surrounding medium) [1/m]
K1I = 1j * K0 * EpsilonI
# wave constant in Maxwell's equations (within cell) [1/m]
K1II = 1j * K0 * EpsilonII
# wave constant in Maxwell's equations (surrounding medium) [1/m]
K2I = 1j * K0
# wave constant in Maxwell's equations (within cell) [1/m]
K2II = 1j * K0
KI = (-K1I * K2I)**0.5 # wave vector (surrounding medium) [1/m]
KII = (-K1II * K2II)**0.5 # wave vector (within cell) [1/m]
# dimensionless parameters
k0 = 1 # wave vector
a = semi_minor * K0 # internal radius of stretched cell
d = dist * K0 # distance from cell centre to optical stretcher
# ap = a*(1+stretch_ratio) # semi-major axis (after stretching)
# bp = a*(1-poisson_ratio*stretch_ratio) # semi-minor axis (after
# stretching)
w0 = W0 * K0 # Gaussian width
# wave constant in Maxwell's equations (surrounding medium)
k1I = K1I / K0
# wave constant in Maxwell's equations (within cell)
k1II = K1II / K0
# wave constant in Maxwell's equations (surrounding medium)
k2I = K2I / K0
# wave constant in Maxwell's equations (within cell)
k2II = K2II / K0
kI = KI / K0 # wave vector (surrounding medium)
kII = KII / K0 # wave vector (within cell)
beta = kI # wave vector of Gaussian beam
# other definitions
# amplitude of electric field of left laser [kg m/(s**2 C)]
EL = np.sqrt(power_left / (medium_index * C * W0**2))
# amplitude of electric field of right laser [kg m/(s**2 C)]
ER = np.sqrt(power_right / (medium_index * C * W0**2))
HL = beta / k0 * EL # left laser amplitude of magnetic field
HR = beta / k0 * ER # right laser amplitude of magnetic field
zR = beta * w0**2 / 2 # definition of Rayleigh range
S = (1 + 1j * d / zR)**(-1) # complex amplitude for Taylor expansion
s = 1 / (beta * w0) # expansion parameter for Gaussian (Barton)
# Functions
# object boundary function: r(th) = a*B1(x) x= cos(th)
def B1(x): return boundary(costheta=x, a=1, epsilon=epsilon, nu=nu)
# Riccati Bessel functions and their derivatives
# Riccati Bessel function (psi)
def psi(l, z): return (np.pi / 2 * z)**(1 / 2) * besselj(l + 1 / 2, z)
def psi1(l, z): return (np.pi / (2. * z))**(1 / 2) * \
(z * besselj(l - 1 / 2, z) - l *
besselj(l + 1 / 2, z)) # first derivative (psi')
def psi2(l, z): return (np.pi / 2)**(1 / 2) * (l + l**2 - z**2) * \
besselj(l + 1 / 2, z) * z**(-3 / 2) # second derivative (psi'')
# First order Taylor expansion of psi is too inaccurate for larger values of k*a*Eps.
# Hence, to match 1-st and higher order terms in Eps, subtract the 0-th order terms (no angular dependence)
# from the exact function psi (including angular dependence)
# Riccati Bessel function excluding angular dependence in 0-th order
# (psiex)
def psiex(l, z, x): return psi(l, z * B1(x)) - psi(l, z)
def psi1ex(l, z, x): return psi1(l, z * B1(x)) - \
psi1(l, z) # first derivative of psiex
def psi2ex(l, z, x): return psi2(l, z * B1(x)) - \
psi2(l, z) # second derivative of psi
# defined for abbreviation
def psixx(l, z, x): return psi(l, z * B1(x))
def psi1xx(l, z, x): return psi1(l, z * B1(x))
def psi2xx(l, z, x): return psi2(l, z * B1(x))
# Hankel function and its derivative
def xi(l, z): return (np.pi / 2 * z)**(1 / 2) * besselh(l + 1 / 2, z)
def xi1(l, z): return (np.pi / (2 * z))**(1 / 2) * \
((l + 1) * besselh(l + 1 / 2, z) - z * besselh(l + 3 / 2, z))
def xi2(l, z): return (np.pi / 2)**(1 / 2) / \
z**(3 / 2) * (l + l**2 - z**2) * besselh(l + 1 / 2, z)
# Comments: see above for psiex
def xiex(l, z, x): return xi(l, z * B1(x)) - xi(l, z)
def xi1ex(l, z, x): return xi1(l, z * B1(x)) - xi1(l, z)
def xi2ex(l, z, x): return xi2(l, z * B1(x)) - xi2(l, z)
def xixx(l, z, x): return xi(l, z * B1(x))
def xi1xx(l, z, x): return xi1(l, z * B1(x))
def xi2xx(l, z, x): return xi2(l, z * B1(x))
#% Associated Legendre functions P(m)_l(x) and their derivatives
#% select mth component of vector 'legendre' [P**(m)_l(x)]
# [zeros(m,1);1;zeros(l-m,1)].'*legendre(l,x)
#% legendre polynomial [P**(m)_l(x)]
def legendrePl(l, x): return legendrePlm(1, l, x)
#% legendre polynomial [P**(1)_l(x)]
def legendrePlm1(m, l, x): return (
(l - m + 1.) * legendrePlm(m, l + 1, x) - (l + 1.) * x * legendrePlm(m, l, x)) / (x**2 - 1)
#% derivative d/dx[P**(m)_l(x)]
def legendrePl1(l, x): return legendrePlm1(1, l, x)
# defined to avoid division by zero (which can occur for x=1 in
# legendrePl1...
def legendrePlmex1(m, l, x): return -((l - m + 1) *
legendrePlm(m, l + 1, x) - (l + 1) * x * legendrePlm(m, l, x))
def legendrePlex1(l, x): return legendrePlmex1(1, l, x)
# Hypergeometric and Gamma functions
hypergeomcoeff = get_hgc()
## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ##
# Gaussian beam (incident fields) - in Cartesian basis (either Davis first order or Barton fifth order fields)
# electric and magnetic fields according to Davis (first order)
if field_approx == "davis":
# left
def eExiL(r, th, phi): return EL * (1 + 1j * (r * np.cos(th) + d) / zR)**(-1) * np.exp(-r**2 *
np.sin(th)**2 / (w0**2 * (1 + 1j * (r * np.cos(th) + d) / zR))) * np.exp(1j * beta * (r * np.cos(th) + d))
def eEyiL(r, th, phi): return 0
def eEziL(r, th, phi): return -1j * (1 + 1j * (r * np.cos(th) + d) /
zR)**(-1) * r * np.sin(th) * np.cos(phi) / zR * eExiL(r, th, phi)
def eHxiL(r, th, phi): return 0
def eHyiL(r, th, phi): return HL * (1 + 1j * (r * np.cos(th) + d) / zR)**(-1) * np.exp(-r**2 *
np.sin(th)**2 / (w0**2 * (1 + 1j * (r * np.cos(th) + d) / zR))) * np.exp(1j * beta * (r * | np.cos(th) | numpy.cos |
import math
from abc import ABC, abstractmethod
import numpy as np
import skimage.transform
import torch
from fastmri_utils.data import transforms
from fastmri_utils.data.transforms import fftshift, ifftshift
# ----- Utilities -----
class RadialMaskFunc(object):
""" Generates a golden angle radial spokes mask.
Useful for subsampling a Fast-Fourier-Transform.
Contains radial lines (spokes) through the center of the mask, with
angles spaced according to the golden angle (~111.25°). The first line
has angle 0° (horizontal). An offset parameter can be given to skip
the first `offset*num_lines` lines.
Parameters
----------
shape : array_like
A tuple specifying the size of the mask.
num_lines : int
Number of radial lines (spokes) in the mask.
offset : int, optional
Offset factor for the range of angles in the mask.
"""
def __init__(self, shape, num_lines, offset=0):
self.shape = shape
self.num_lines = num_lines
self.offset = offset
self.mask = self._generate_radial_mask(shape, num_lines, offset)
def __call__(self, shape, seed=None):
if (self.mask.shape[0] != shape[-3]) or (
self.mask.shape[1] != shape[-2]
):
return torch.zeros(shape)
return torch.reshape(
self.mask, (len(shape) - 3) * (1,) + self.shape + (1,)
)
def _generate_radial_mask(self, shape, num_lines, offset=0):
# generate line template and empty mask
x, y = shape
d = math.ceil( | np.sqrt(2) | numpy.sqrt |
import matplotlib.pyplot as plt
import numpy as np
from distance2detElements import distance2detElements
from castData import castData
def plotAiry(data, showPerc=True, dtype='int64', normalize=False, savefig=0):
"""
Make Airy plot of SPAD-FCS data with 25 channels.
========== ===============================================================
Input Meaning
---------- ---------------------------------------------------------------
data Nx26 array with the FCS data
or data object with data.det0 etc. arrival times
showPerc Show percentages
dtype Data type
normalize Convert total counts to average counts per bin if True
savefig Path to store figure
========== ===============================================================
========== ===============================================================
Output Meaning
---------- ---------------------------------------------------------------
airy 26 element vector with the sum of the rows and plot
========== ===============================================================
"""
if type(data) == np.ndarray:
# data is numpy array with intensity traces
if len(np.shape(data)) > 1:
# if 2D array, convert to dtype and sum over all rows
data = castData(data, dtype)
airy = np.sum(data, axis=0)
else:
airy = data
airy2 = airy[0:25]
else:
# data is FCS2ArrivalTimes.aTimesData object
airy2 = | np.zeros(25) | numpy.zeros |
"""
Purpose
-------
A Portfolio represents a collection of Aggregate objects. Applications include
* Model a book of insurance
* Model a large account with several sub lines
* Model a reinsurance portfolio or large treaty
"""
import collections
import json
import logging
from copy import deepcopy
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import matplotlib
import matplotlib.ticker as ticker
import numpy as np
import pandas as pd
from pandas.io.formats.format import EngFormatter
import pypandoc
import scipy.stats as ss
from scipy.interpolate import interp1d
from IPython.core.display import HTML, display
from matplotlib.ticker import MultipleLocator, StrMethodFormatter, MaxNLocator, FixedLocator, \
FixedFormatter, AutoMinorLocator
from scipy import interpolate
import re
from pathlib import Path
from .distr import Aggregate, Severity
from .spectral import Distortion
from .utils import ft, \
ift, sln_fit, sgamma_fit, \
axiter_factory, AxisManager, html_title, \
suptitle_and_tight, \
MomentAggregator, Answer, subsets, round_bucket, report_time
# fontsize : int or float or {'xx-small', 'x-small', 'small', 'medium', 'large', 'x-large', 'xx-large'}
matplotlib.rcParams['legend.fontsize'] = 'xx-small'
logger = logging.getLogger('aggregate')
# debug
# info
# warning
# error
# critical
class Portfolio(object):
"""
Portfolio creates and manages a portfolio of Aggregate objects.
:param name: the name of the portfolio, no spaces or underscores
:param spec_list: a list of 1) dictionary: Aggregate object dictionary specifications or
2) Aggregate: An actual aggregate objects or
3) tuple (type, dict) as returned by uw['name'] or
4) string: Names referencing objects in the optionally passed underwriter
"""
def __init__(self, name, spec_list, uw=None):
self.name = name
self.agg_list = []
self.line_names = []
logger.debug(f'Portfolio.__init__| creating new Portfolio {self.name}')
# logger.debug(f'Portfolio.__init__| creating new Portfolio {self.name} at {super(Portfolio, self).__repr__()}')
ma = MomentAggregator()
max_limit = 0
for spec in spec_list:
if isinstance(spec, Aggregate):
# directly passed in an agg object
a = spec
agg_name = spec.name
elif isinstance(spec, str):
# look up object in uw return actual instance
# note here you could do uw.aggregate[spec] and get the dictionary def
# or uw(spec) to return the already-created (and maybe updated) object
# we go the latter route...if user wants they can pull off the dict item themselves
if uw is None:
raise ValueError(f'Must pass valid Underwriter instance to create aggs by name')
try:
a = uw(spec)
except e:
logger.error(f'Item {spec} not found in your underwriter')
raise e
agg_name = a.name
elif isinstance(spec, tuple):
# uw returns type, spec
assert spec[0] == 'agg'
a = Aggregate(**spec[1])
agg_name = spec[1]['name']
elif isinstance(spec, dict):
a = Aggregate(**spec)
agg_name = spec['name'][0] if isinstance(spec['name'], list) else spec['name']
else:
raise ValueError(f'Invalid type {type(spec)} passed to Portfolio, expect Aggregate, str or dict.')
self.agg_list.append(a)
self.line_names.append(agg_name)
self.__setattr__(agg_name, a)
ma.add_fs(a.report_ser[('freq', 'ex1')], a.report_ser[('freq', 'ex2')], a.report_ser[('freq', 'ex3')],
a.report_ser[('sev', 'ex1')], a.report_ser[('sev', 'ex2')], a.report_ser[('sev', 'ex3')])
max_limit = max(max_limit, np.max(np.array(a.limit)))
self.line_names_ex = self.line_names + ['total']
for n in self.line_names:
# line names cannot equal total
if n == 'total':
raise ValueError('Line names cannot equal total, it is reserved for...total')
# make a pandas data frame of all the statistics_df
temp_report = pd.concat([a.report_ser for a in self.agg_list], axis=1)
# max_limit = np.inf # np.max([np.max(a.get('limit', np.inf)) for a in spec_list])
temp = pd.DataFrame(ma.stats_series('total', max_limit, 0.999, remix=False))
self.statistics_df = pd.concat([temp_report, temp], axis=1)
# future storage
self.density_df = None
self.augmented_df = None
self.epd_2_assets = {}
self.assets_2_epd = {}
self.priority_capital_df = None
self.priority_analysis_df = None
self.audit_df = None
self.padding = 0
self.tilt_amount = 0
self._linear_quantile_function = None
self._cdf = None
self._pdf = None
self._tail_var = None
self._tail_var2 = None
self._inverse_tail_var = None
self.bs = 0
self.log2 = 0
self.ex = 0
self.last_update = 0
self.hash_rep_at_last_update = ''
self._distortion = None
self.sev_calc = ''
self._remove_fuzz = 0
self.approx_type = ""
self.approx_freq_ge = 0
self.discretization_calc = ''
# for storing the info about the quantile function
self.q_temp = None
self._renamer = None
self._line_renamer = None
self._tm_renamer = None
# if created by uw it stores the program here
self.program = ''
self.audit_percentiles = [.9, .95, .99, .996, .999, .9999, 1 - 1e-6]
self.dists = None
self.dist_ans = None
def __str__(self):
"""
Goal: readability
:return:
"""
# cannot use ex, etc. because object may not have been updated
if self.audit_df is None:
ex = self.statistics_df.loc[('agg', 'mean'), 'total']
empex = np.nan
isupdated = False
else:
ex = self.get_stat(stat="Mean")
empex = self.get_stat()
isupdated = True
# df = pd.DataFrame(columns=['Statistic', 'Value'])
# df = df.set_index('Statistic')
# df.loc['Portfolio Name', 'Value'] = self.name
# df.loc['Expected loss', 'Value'] = ex
# df.loc['Model loss', 'Value'] = empex
# df.loc['Error', 'Value'] = ex / empex - 1
# print(df)
s = f'Portfolio name {self.name:<15s}\n' \
f'Theoretic expected loss {ex:15,.1f}\n' \
f'Actual expected loss {empex:15,.1f}\n' \
f'Error {empex / ex - 1:15.6f}\n' \
f'Discretization size {self.log2:15d}\n' \
f'Bucket size {self.bs:15.2f}\n' \
f'{object.__repr__(self)}'
if not isupdated:
s += '\nNOT UPDATED!'
return s
@property
def distortion(self):
return self._distortion
def remove_fuzz(self, df=None, eps=0, force=False, log=''):
"""
remove fuzz at threshold eps. if not passed use np.finfo(np.float).eps.
Apply to self.density_df unless df is not None
Only apply if self.remove_fuzz or force
:param eps:
:param df: apply to dataframe df, default = self.density_df
:param force: do regardless of self.remove_fuzz
:return:
"""
if df is None:
df = self.density_df
if eps == 0:
eps = np.finfo(np.float).eps
if self._remove_fuzz or force:
logger.debug(f'Portfolio.remove_fuzz | Removing fuzz from {self.name} dataframe, caller {log}')
df[df.select_dtypes(include=['float64']).columns] = \
df.select_dtypes(include=['float64']).applymap(lambda x: 0 if abs(x) < eps else x)
def __repr__(self):
"""
Goal unmbiguous
:return:
"""
# return str(self.to_dict())
# this messes up when port = self has been enhanced...
if isinstance(self, Portfolio):
s = [super(Portfolio, self).__repr__(), f"{{ 'name': '{self.name}'"]
else:
s = [f'Non-Portfolio (enhanced) object {{ "name": "{self.name}"']
agg_list = [str({k: v for k, v in a.__dict__.items() if k in Aggregate.aggregate_keys})
for a in self.agg_list]
s.append(f"'spec': [{', '.join(agg_list)}]")
if self.bs > 0:
s.append(f'"bs": {self.bs}')
s.append(f'"log2": {self.log2}')
s.append(f'"padding": {self.padding}')
s.append(f'"tilt_amount": {self.tilt_amount}')
s.append(f'"distortion": "{repr(self._distortion)}"')
s.append(f'"sev_calc": "{self.sev_calc}"')
s.append(f'"remove_fuzz": {self._remove_fuzz}')
s.append(f'"approx_type": "{self.approx_type}"')
s.append(f'"approx_freq_ge": {self.approx_freq_ge}')
return ', '.join(s) + '}'
def _repr_html_(self):
s = [f'<h2>Portfolio object: {self.name}</h2>']
_n = len(self.agg_list)
_s = "" if _n <= 1 else "s"
s.append(f'Portfolio contains {_n} aggregate component{_s}')
summary_sl = (slice(None), ['mean', 'cv', 'skew'])
if self.audit_df is not None:
_df = pd.concat((self.statistics_df.loc[summary_sl, :],
self.audit_df[['Mean', 'EmpMean', 'MeanErr', 'CV', 'EmpCV', 'CVErr', 'P99.0']].T),
sort=True)
s.append(_df._repr_html_())
else:
s.append(self.statistics_df.loc[summary_sl, :]._repr_html_())
return '\n'.join(s)
def __hash__(self):
"""
hashing behavior
:return:
"""
return hash(repr(self.__dict__))
def __iter__(self):
"""
make Portfolio iterable: for each x in Portfolio
:return:
"""
return iter(self.agg_list)
def __getitem__(self, item):
"""
allow Portfolio[slice] to return bits of agg_list
:param item:
:return:
"""
if type(item) == str:
return self.agg_list[self.line_names.index(item)]
return self.agg_list[item]
@property
def audit(self):
"""
Renamed version of the audit dataframe
:return:
"""
if self.audit_df is not None:
return self.audit_df.rename(columns=self.renamer, index=self.line_renamer).T
@property
def density(self):
"""
Renamed version of the density_df dataframe
:return:
"""
if self.density_df is not None:
return self.density_df.rename(columns=self.renamer)
@property
def augmented(self):
"""
Renamed version of the density_df dataframe
:return:
"""
if self.augmented_df is not None:
return self.augmented_df.rename(columns=self.renamer)
@property
def statistics(self):
"""
Renamed version of the statistics dataframe
:return:
"""
return self.statistics_df.rename(columns=self.renamer)
def json(self, stream=None):
"""
write object as json
:param stream:
:return: stream or text
"""
args = dict()
args["bs"] = self.bs
args["log2"] = self.log2
args["padding"] = self.padding
args["tilt_amount"] = self.tilt_amount
args["distortion"] = repr(self._distortion)
args["sev_calc"] = self.sev_calc
args["remove_fuzz"] = self._remove_fuzz
args["approx_type"] = self.approx_type
args["approx_freq_ge"] = self.approx_freq_ge
args["last_update"] = str(self.last_update)
args["hash_rep_at_last_update"] = str(self.hash_rep_at_last_update)
d = dict()
# original
# d[self.name] = dict(args=args, spec=[a.spec for a in self.agg_list])
d['name'] = self.name
d['args'] = args
d['spec_list'] = [a._spec for a in self.agg_list]
logger.debug(f'Portfolio.json| dummping {self.name} to {stream}')
s = json.dumps(d) # , default_flow_style=False, indent=4)
logger.debug(f'Portfolio.json | {s}')
if stream is None:
return s
else:
return stream.write(s)
def save(self, filename='', mode='a'):
"""
persist to json in filename; if none save to user.json
:param filename:
:param mode: for file open
:return:
"""
if filename == "":
filename = Path.home() / 'agg/user.json'
filename.parent.mkdir(parents=True, exist_ok=True)
with filename.open(mode=mode, encoding='utf-8') as f:
self.json(stream=f)
logger.debug(f'Portfolio.save | {self.name} saved to {filename}')
def __add__(self, other):
"""
Add two portfolio objects INDEPENDENT sum (down road can look for the same severity...)
:param other:
:return:
"""
assert isinstance(other, Portfolio)
new_spec = []
for a in self.agg_list:
c = deepcopy(a._spec)
c['name'] = c['name']
new_spec.append(c)
for a in other.agg_list:
c = deepcopy(a._spec)
c['name'] = c['name']
new_spec.append(c)
return Portfolio(f'({self.name}) + ({other.name})', new_spec)
def __rmul__(self, other):
"""
new = other * self; treat as scale change
:param other:
:return:
"""
assert other > 0
new_spec = []
for a in self.agg_list:
new_spec.append(deepcopy(a._spec))
for d in new_spec:
# d is a dictionary agg spec, need to adjust the severity
s = d['severity']
if 'mean' in s:
s['mean'] *= other
elif 'scale' in s:
s['scale'] *= other
else:
raise ValueError(f"Cannot adjust s['name'] for scale")
return Portfolio(f'{other} x {self.name}', new_spec)
def __mul__(self, other):
"""
new = self * other, other integer, sum of other independent copies
:param other:
:return:
"""
assert isinstance(other, int)
new_spec = []
for a in self.agg_list:
new_spec.append(deepcopy(a._spec))
for d in new_spec:
# d is a dictionary agg spec, need to adjust the frequency
# TODO better freq dists; deal with Bernoulli where n=log<1
d['frequency']['n'] *= other
return Portfolio(f'Sum of {other} copies of {self.name}', new_spec)
def snap(self, x):
"""
snap value x to the index of density_df
:param x:
:return:
"""
ix = self.density_df.index.get_loc(x, 'nearest')
return self.density_df.iat[ix, 0]
def audits(self, kind='all', **kwargs):
"""
produce audit plots to assess accuracy of outputs.
Currently only exeqa available
:param kind:
:param kwargs: passed to pandas plot, e.g. set xlim
:return:
"""
if kind == 'all':
kind = ['exeqa']
for k in kind:
if k == 'exeqa':
temp = self.density_df.filter(regex='exeqa_.*(?<!total)$').copy()
temp['sum'] = temp.sum(axis=1)
temp['err'] = temp['sum'] - temp.index
f, axs = plt.subplots(1, 2, figsize=(8, 3.75), constrained_layout=True)
ax = axs.flatten()
a = temp['err'].abs().plot(logy=True, title=f'Exeqa Sum Error', ax=ax[1], **kwargs)
a.plot(self.density_df.loss, self.density_df.p_total, label='p_total')
a.plot(self.density_df.loss, self.density_df.p_total * temp.err, label='prob wtd err')
a.grid('b')
a.legend(loc='lower left')
if 'xlim' in kwargs:
kwargs['ylim'] = kwargs['xlim']
temp.filter(regex='exeqa_.*(?<!total)$|sum').plot(title='exeqa and sum of parts', ax=ax[0],
**kwargs).grid('b')
f.suptitle(f'E[Xi | X=x] vs. Sum of Parts\nbs={self.bs}, log2={self.log2}, padding={self.padding}',
fontsize='x-large')
return f # for doc maker
def get_stat(self, line='total', stat='EmpMean'):
"""
Other analysis suggests that iloc and iat are about same speed but slower than ix
:param line:
:param stat:
:return:
"""
return self.audit_df.loc[line, stat]
def q(self, p, kind='lower'):
"""
return lowest quantile, appropriate for discrete bucketing.
quantile guaranteed to be in the index
nearest does not work because you always want to pick rounding up
Definition 2.1 (Quantiles)
x(α) = qα(X) = inf{x ∈ R : P[X ≤ x] ≥ α} is the lower α-quantile of X
x(α) = qα(X) = inf{x ∈ R : P[X ≤ x] > α} is the upper α-quantile of X.
We use the x-notation if the dependence on X is evident, otherwise the q-notion.
Acerbi and Tasche (2002)
:param p:
:param kind: allow upper or lower quantiles
:return:
"""
if self._linear_quantile_function is None:
# revised Dec 2019
self._linear_quantile_function = {}
self.q_temp = self.density_df[['loss', 'F']].groupby('F').agg({'loss': np.min})
self.q_temp.loc[1, 'loss'] = self.q_temp.loss.iloc[-1]
self.q_temp.loc[0, 'loss'] = 0
# revised Jan 2020
# F loss loss_s
# 0.000000 0.0 0.0
# 0.667617 0.0 4500.0
# a value here is V and ^ which is the same: correct
# 0.815977 4500.0 5500.0
# 0.937361 5500.0 9000.0
# upper and lower only differ at exact values of F where lower is loss and upper is loss_s
# in between must take the next value for lower and the previous value for next to get the same answer
self.q_temp = self.q_temp.sort_index()
# that q_temp left cts, want right continuous:
self.q_temp['loss_s'] = self.q_temp.loss.shift(-1)
self.q_temp.iloc[-1, 1] = self.q_temp.iloc[-1, 0]
# create interp functions
# old
# self._linear_quantile_function['upper'] = \
# interpolate.interp1d(self.q_temp.index, self.q_temp.loss_s, kind='previous', bounds_error=False,
# fill_value='extrapolate')
# self._linear_quantile_function['lower'] = \
# interpolate.interp1d(self.q_temp.index, self.q_temp.loss, kind='previous', bounds_error=False,
# fill_value='extrapolate')
# revised
self._linear_quantile_function['upper'] = \
interpolate.interp1d(self.q_temp.index, self.q_temp.loss_s, kind='previous', bounds_error=False,
fill_value='extrapolate')
self._linear_quantile_function['lower'] = \
interpolate.interp1d(self.q_temp.index, self.q_temp.loss, kind='next', bounds_error=False,
fill_value='extrapolate')
# change to using loss_s
self._linear_quantile_function['middle'] = \
interpolate.interp1d(self.q_temp.index, self.q_temp.loss_s, kind='linear', bounds_error=False,
fill_value='extrapolate')
l = float(self._linear_quantile_function[kind](p))
# because we are not interpolating the returned value must (should) be in the index...
assert kind == 'middle' or l in self.density_df.index
return l
def cdf(self, x):
"""
distribution function
:param x:
:return:
"""
if self._cdf is None:
# Dec 2019: kind='linear' --> kind='previous'
self._cdf = interpolate.interp1d(self.density_df.loss, self.density_df.F, kind='previous',
bounds_error=False, fill_value='extrapolate')
return self._cdf(x)
def sf(self, x):
"""
survival function
:param x:
:return:
"""
return 1 - self.cdf(x)
def pdf(self, x):
"""
probability density function, assuming a continuous approximation of the bucketed density
:param x:
:return:
"""
if self._pdf is None:
self._pdf = interpolate.interp1d(self.density_df.loss, self.density_df.p_total, kind='linear',
bounds_error=False, fill_value='extrapolate')
return self._pdf(x) / self.bs
# # make some handy aliases; delete these go strictly with scipy.stats notation
# def F(self, x):
# """
# handy alias for distribution, CDF
# :param x:
# :return:
# """
# return self.cdf(x)
#
# def S(self, x):
# """
# handy alias for survival function, S
# :param x:
# :return:
# """
# return self.sf(x)
def var(self, p):
"""
value at risk = alias for quantile function
:param p:
:return:
"""
return self.q(p)
def tvar(self, p, kind='interp'):
"""
Compute the tail value at risk at threshold p
Really this function returns ES
Definition 2.6 (Tail mean and Expected Shortfall)
Assume E[X−] < ∞. Then
x¯(α) = TM_α(X) = α^{−1}E[X 1{X≤x(α)}] + x(α) (α − P[X ≤ x(α)])
is α-tail mean at level α the of X.
Acerbi and Tasche (2002)
We are interested in the right hand exceedence [?? note > vs ≥]
α^{−1}E[X 1{X > x(α)}] + x(α) (P[X ≤ x(α)] − α)
McNeil etc. p66-70 - this follows from def of ES as an integral
of the quantile function
:param p:
:param kind: 'interp' = interpolate exgta_total; 'tail' tail integral, 'body' NYI - (ex - body integral)/(1-p)+v
'inverse' from capital to p using interp method
:return:
"""
assert self.density_df is not None
if kind == 'tail':
# original
# _var = self.q(p)
# ex = self.density_df.loc[_var + self.bs:, ['p_total', 'loss']].product(axis=1).sum()
# pip = (self.density_df.loc[_var, 'F'] - p) * _var
# t_var = 1 / (1 - p) * (ex + pip)
# return t_var
# revised
if self._tail_var2 is None:
self._tail_var2 = self.density_df[['p_total', 'loss']].product(axis=1).iloc[::-1].cumsum().iloc[::-1]
_var = self.q(p)
ex = self._tail_var2.loc[_var + self.bs]
pip = (self.density_df.loc[_var, 'F'] - p) * _var
t_var = 1 / (1 - p) * (ex + pip)
return t_var
elif kind == 'interp':
# original implementation interpolated
if self._tail_var is None:
# make tvar function
sup = (self.density_df.p_total[::-1] > 0).idxmax()
if sup == self.density_df.index[-1]:
sup = np.inf
_x = self.density_df.F
_y = self.density_df.exgta_total
else:
_x = self.density_df.F.values[:self.density_df.index.get_loc(sup)]
_y = self.density_df.exgta_total.values[:self.density_df.index.get_loc(sup)]
p0 = self.density_df.at[0., 'F']
if p0 > 0:
ps = np.linspace(0, p0, 200, endpoint=False)
tempx = np.hstack((ps, _x))
tempy = np.hstack((self.ex / (1-ps), _y))
self._tail_var = interpolate.interp1d(tempx, tempy,
kind='linear', bounds_error=False,
fill_value=(self.ex, sup))
else:
self._tail_var = interpolate.interp1d(_x, _y, kind='linear', bounds_error=False,
fill_value=(self.ex, sup))
if type(p) in [float, np.float]:
return float(self._tail_var(p))
else:
return self._tail_var(p)
elif kind == 'inverse':
if self._inverse_tail_var is None:
# make tvar function
self._inverse_tail_var = interpolate.interp1d(self.density_df.exgta_total, self.density_df.F,
kind='linear', bounds_error=False,
fill_value='extrapolate')
if type(p) in [int, np.int, float, np.float]:
return float(self._inverse_tail_var(p))
else:
return self._inverse_tail_var(p)
else:
raise ValueError(f'Inadmissible kind passed to tvar; options are interp (default), inverse, or tail')
def tvar_threshold(self, p, kind):
"""
Find the value pt such that TVaR(pt) = VaR(p) using numerical Newton Raphson
"""
a = self.q(p, kind)
def f(p):
return self.tvar(p) - a
loop = 0
p1 = 1 - 2 * (1 - p)
fp1 = f(p1)
delta = 1e-5
while abs(fp1) > 1e-6 and loop < 10:
df1 = (f(p1 + delta) - fp1) / delta
p1 = p1 - fp1 / df1
fp1 = f(p1)
loop += 1
if loop == 10:
raise ValueError(f'Difficulty computing TVaR to match VaR at p={p}')
return p1
def equal_risk_var_tvar(self, p_v, p_t):
"""
solve for equal risk var and tvar: find pv and pt such that sum of
individual line VaR/TVaR at pv/pt equals the VaR(p) or TVaR(p_t)
these won't return elements in the index because you have to interpolate
hence using kind=middle
"""
# these two should obviously be the same
target_v = self.q(p_v, 'middle')
target_t = self.tvar(p_t)
def fv(p):
return sum([float(a.q(p, 'middle')) for a in self]) - target_v
def ft(p):
return sum([float(a.tvar(p)) for a in self]) - target_t
ans = np.zeros(2)
for i, f in enumerate([fv, ft]):
p1 = 1 - 2 * (1 - (p_v if i == 0 else p_t))
fp1 = f(p1)
loop = 0
delta = 1e-5
while abs(fp1) > 1e-6 and loop < 10:
dfp1 = (f(p1 + delta) - fp1) / delta
p1 = p1 - fp1 / dfp1
fp1 = f(p1)
loop += 1
if loop == 100:
raise ValueError(f'Trouble finding equal risk {"TVaR" if i else "VaR"} at p_v={p_v}, p_t={p_t}. '
'No convergence after 100 iterations. ')
ans[i] = p1
return ans
def equal_risk_epd(self, a):
"""
determine the common epd threshold so sum sa equals a
"""
def f(p):
return sum([self.epd_2_assets[(l, 0)](p) for l in self.line_names]) - a
p1 = self.assets_2_epd[('total', 0)](a)
fp1 = f(p1)
loop = 0
delta = 1e-5
while abs(fp1) > 1e-6 and loop < 10:
dfp1 = (f(p1 + delta) - fp1) / delta
p1 = p1 - fp1 / dfp1
fp1 = f(p1)
loop += 1
if loop == 100:
raise ValueError(f'Trouble finding equal risk EPD at pe={pe}. No convergence after 100 iterations. ')
return p1
def merton_perold(self, p, kind='lower'):
"""
compute <NAME>old capital allocation at VaR(p) capital using VaR as risk measure
v = q(p)
TODO TVaR version of <NAME>
"""
# figure total assets
a = self.q(p, kind)
# shorthand abbreviation
df = self.density_df
loss = df.loss
ans = []
total = 0
for l in self.line_names:
q = self.density_df.loss.iloc[np.searchsorted(self.density_df[f'ημ_{l}'].cumsum(), .995, side='right')]
diff = a - q
ans.append(diff)
total += diff
ans.append(total)
return ans
def cotvar(self, p):
"""
make the p co-tvar asset allocation using ISA
Asset alloc = exgta = tail expected value, treating TVaR like a pricing variable
"""
av = self.q(p)
return self.density_df.loc[av, [f'exgta_{l}' for l in self.line_names_ex]].values
def as_severity(self, limit=np.inf, attachment=0, conditional=False):
"""
convert into a severity without recomputing
throws error if self not updated
:param limit:
:param attachment:
:param conditional:
:return:
"""
if self.density_df is None:
raise ValueError('Must update prior to converting to severity')
return Severity(sev_name=self, sev_a=self.log2, sev_b=self.bs,
exp_attachment=attachment, exp_limit=limit, sev_conditional=conditional)
def fit(self, approx_type='slognorm', output='agg'):
"""
returns a dictionary specification of the portfolio aggregate_project
if updated uses empirical moments, otherwise uses theoretic moments
:param approx_type: slognorm | sgamma
:param output: return a dict or agg language specification
:return:
"""
if self.audit_df is None:
# not updated
m = self.statistics_df.loc[('agg', 'mean'), 'total']
cv = self.statistics_df.loc[('agg', 'cv'), 'total']
skew = self.statistics_df.loc[('agg', 'skew'), 'total']
else:
# use statistics_df matched to computed aggregate_project
m, cv, skew = self.audit_df.loc['total', ['EmpMean', 'EmpCV', 'EmpSkew']]
name = f'{approx_type[0:4]}~{self.name[0:5]}'
agg_str = f'agg {name} 1 claim sev '
if approx_type == 'slognorm':
shift, mu, sigma = sln_fit(m, cv, skew)
# self.fzapprox = ss.lognorm(sigma, scale=np.exp(mu), loc=shift)
sev = {'sev_name': 'lognorm', 'sev_shape': sigma, 'sev_scale': np.exp(mu), 'sev_loc': shift}
agg_str += f'{np.exp(mu)} * lognorm {sigma} + {shift} '
elif approx_type == 'sgamma':
shift, alpha, theta = sgamma_fit(m, cv, skew)
# self.fzapprox = ss.gamma(alpha, scale=theta, loc=shift)
sev = {'sev_name': 'gamma', 'sev_a': alpha, 'sev_scale': theta, 'sev_loc': shift}
agg_str += f'{theta} * lognorm {alpha} + {shift} '
else:
raise ValueError(f'Inadmissible approx_type {approx_type} passed to fit')
if output == 'agg':
agg_str += ' fixed'
return agg_str
else:
return {'name': name, 'note': f'frozen version of {self.name}', 'exp_en': 1, **sev, 'freq_name': 'fixed'}
def collapse(self, approx_type='slognorm'):
"""
returns new Portfolio with the fit
Deprecated...prefer uw(self.fit()) to go through the agg language approach
:param approx_type: slognorm | sgamma
:return:
"""
spec = self.fit(approx_type, output='dict')
logger.debug(f'Portfolio.collapse | Collapse created new Portfolio with spec {spec}')
logger.warning(f'Portfolio.collapse | Collapse is deprecated; use fit() instead.')
return Portfolio(f'Collapsed {self.name}', [spec])
def percentiles(self, pvalues=None):
"""
report_ser on percentiles and large losses
uses interpolation, audit_df uses nearest
:pvalues: optional vector of log values to use. If None sensible defaults provided
:return: DataFrame of percentiles indexed by line and log
"""
df = pd.DataFrame(columns=['line', 'log', 'Agg Quantile'])
df = df.set_index(['line', 'log'])
# df.columns.name = 'perspective'
if pvalues is None:
pvalues = [0.5, 0.75, 0.8, 0.85, 0.9, 0.95, 0.98, 0.99, 0.994, 0.995, 0.999, 0.9999]
for line in self.line_names_ex:
q_agg = interpolate.interp1d(self.density_df[f'p_{line}'].cumsum(), self.density_df.loss,
kind='linear', bounds_error=False, fill_value='extrapolate')
for p in pvalues:
qq = q_agg(p)
df.loc[(line, p), :] = [float(qq)]
df = df.unstack(level=1)
return df
def recommend_bucket(self):
"""
data to help estimate a good bucket size
:return:
"""
df = pd.DataFrame(columns=['line', 'bs10'])
df = df.set_index('line')
for a in self.agg_list:
df.loc[a.name, :] = [a.recommend_bucket(10)]
df['bs11'] = df['bs10'] / 2
df['bs12'] = df['bs10'] / 4
df['bs13'] = df['bs10'] / 8
df['bs14'] = df['bs10'] / 16
df['bs15'] = df['bs10'] / 32
df['bs16'] = df['bs10'] / 64
df['bs17'] = df['bs10'] / 128
df['bs18'] = df['bs10'] / 256
df['bs19'] = df['bs10'] / 515
df['bs20'] = df['bs10'] / 1024
df.loc['total', :] = df.sum()
return df
def best_bucket(self, log2=16):
bs = sum([a.recommend_bucket(log2) for a in self])
return round_bucket(bs)
def update(self, log2, bs, approx_freq_ge=100, approx_type='slognorm', remove_fuzz=False,
sev_calc='discrete', discretization_calc='survival', normalize=True, padding=1, tilt_amount=0, epds=None,
trim_df=False, verbose=False, add_exa=True, aggregate_cession_function=None):
"""
create density_df, performs convolution. optionally adds additional information if ``add_exa=True``
for allocation and priority analysis
tilting: [@Grubel1999]: Computation of Compound Distributions I: Aliasing Errors and Exponential Tilting
(ASTIN 1999)
tilt x numbuck < 20 is recommended log. 210
num buckets and max loss from bucket size
:param log2:
:param bs: bucket size
:param approx_freq_ge: use method of moments if frequency is larger than ``approx_freq_ge``
:param approx_type: type of method of moments approx to use (slognorm or sgamma)
:param remove_fuzz: remove machine noise elements from FFT
:param sev_calc: how to calculate the severity, discrete (point masses as xs) or continuous (uniform between xs points)
:param discretization_calc: survival or distribution (accurate on right or left tails)
:param normalize: if true, normalize the severity so sum probs = 1. This is generally what you want; but
:param padding: for fft 1 = double, 2 = quadruple
:param tilt_amount: for tiling methodology - see notes on density for suggested parameters
:param epds: epd points for priority analysis; if None-> sensible defaults
:param trim_df: remove unnecessary columns from density_df before returning
:param verbose: level of output
:param add_exa: run add_exa to append additional allocation information needed for pricing; if add_exa also add
epd info
:param aggregate_cession_function: function of Portfolio object that adjusts individual line densities; applied
after line aggs created but before creating not-lines; actual statistics do not reflect impact.
:return:
"""
self.log2 = log2
self.bs = bs
self.padding = padding
self.tilt_amount = tilt_amount
self.approx_type = approx_type
self.sev_calc = sev_calc
self._remove_fuzz = remove_fuzz
self.approx_type = approx_type
self.approx_freq_ge = approx_freq_ge
self.discretization_calc = discretization_calc
if self.hash_rep_at_last_update == hash(self):
logger.warning(f'Nothing has changed since last update at {self.last_update}')
return
self._linear_quantile_function = None
ft_line_density = {}
# line_density = {}
# not_line_density = {}
# add the densities
# tilting: [@Grubel1999]: Computation of Compound Distributions I: Aliasing Errors and Exponential Tilting
# (ASTIN 1999)
# tilt x numbuck < 20 recommended log. 210
# num buckets and max loss from bucket size
N = 1 << log2
MAXL = N * bs
xs = np.linspace(0, MAXL, N, endpoint=False)
# make all the single line aggs
# note: looks like duplication but will all be references
# easier for add_exa to have as part of the portfolio module
# tilt
if self.tilt_amount != 0:
tilt_vector = np.exp(self.tilt_amount * np.arange(N))
else:
tilt_vector = None
# where the answer will live
self.density_df = pd.DataFrame(index=xs)
self.density_df['loss'] = xs
ft_all = None
for agg in self.agg_list:
raw_nm = agg.name
nm = f'p_{agg.name}'
_a = agg.update(xs, self.padding, tilt_vector, 'exact' if agg.n < approx_freq_ge else approx_type,
sev_calc, discretization_calc, normalize, verbose=verbose)
if verbose:
display(_a)
if aggregate_cession_function is not None:
aggregate_cession_function(agg, self.padding, tilt_vector)
ft_line_density[raw_nm] = agg.ftagg_density
self.density_df[nm] = agg.agg_density
if ft_all is None:
ft_all = np.copy(ft_line_density[raw_nm])
else:
ft_all *= ft_line_density[raw_nm]
self.density_df['p_total'] = np.real(ift(ft_all, self.padding, tilt_vector))
# ft_line_density['total'] = ft_all
# make the not self.line_density = sum of all but the given line
# have the issue here that if you divide and the dist
# is symmetric then you get a div zero...
for line in self.line_names:
ft_not = | np.ones_like(ft_all) | numpy.ones_like |
import numbers
import unittest
import numpy as np
from bio_rtd import peak_shapes, utils
from bio_rtd.uo import surge_tank
from bio_rtd.utils import vectors
from bio_rtd_test.aux_bio_rtd_test import TestLogger
class MockUpNoSimCstr(surge_tank.CSTR):
sim_conv = False
sim_num = False
def _sim_convolution(self):
assert not self.sim_conv
self.sim_conv = True
def _sim_numerical(self):
assert not self.sim_num
self.sim_num = True
def f_constant(self, f_const=1.0):
return np.ones_like(self.t) * f_const
def f_box_shaped(self, f_on=1.0):
_f = np.ones_like(self.t) * f_on
t_on = 20
t_off = self.t[-1] * 19 / 20
_f[int(round(t_on / self.dt)):int(round(t_off / self.dt))] = f_on
return _f
# noinspection DuplicatedCode
def f_periodic(self, f_on=1.0):
_f = np.zeros_like(self.t)
t_period = 20.23
t_on = 5.34
i_on = int(round(t_on / self.dt))
t_on = i_on * self.dt
t_delay = 20
t_shorter_end = 40
t_period_start = np.arange(t_delay, self.t[-1] - t_shorter_end, t_period)
dt = int(round(t_period_start[0])) - t_period_start[0]
t_period_start += dt
i_f_start = [t_p / self.dt for t_p in t_period_start]
df_init = round(i_f_start[0]) - i_f_start[0]
i_f_start = [i + df_init for i in i_f_start]
for i in i_f_start:
i_r = int(round(i))
_f[i_r:i_r + i_on] = f_on
_f[self.t.size - int(round(t_shorter_end / self.dt)):] = 0
self.f_period_average = f_on * i_on * self.dt / t_period
self.t_period = t_period
self.t_on = t_on
self.i_f_start = i_f_start
return _f
# noinspection DuplicatedCode
def f_periodic_2(self, f_on=1.0):
# one full period and one clipped
_f = np.zeros_like(self.t)
t_period = 120
t_on = 40
i_on = int(round(t_on / self.dt))
t_on = i_on * self.dt
# t_delay = 30
t_shorter_end = 30
i_f_start = [t_p / self.dt for t_p in [30, 150]]
df_init = round(i_f_start[0]) - i_f_start[0]
i_f_start = [i + df_init for i in i_f_start]
for i in i_f_start:
i_r = int(round(i))
_f[i_r:i_r + i_on] = f_on
_f[self.t.size - int(round(t_shorter_end / self.dt)):] = 0
self.f_period_average = f_on * i_on * self.dt / t_period
self.t_period = t_period
self.t_on = t_on
self.i_f_start = i_f_start
return _f
def c_profile_1_specie(self):
c = np.ones([1, self.t.size]) * 5.2
c[0, 40:110] = 0
return c
def c_profile_2_species(self):
c = np.ones([2, self.t.size])
c[0, :20] = 0
c[1, :] = 2
c[1, 30:] = 0
return c
class CstrTest(unittest.TestCase):
def setUp(self) -> None:
self.t = np.linspace(0, 200, 1200)
self.dt = self.t[1]
self.uo_id = "cstr"
self.gui_title = "Ideal CSTR"
self.cstr = surge_tank.CSTR(self.t, self.uo_id, self.gui_title)
self.cstr.log = TestLogger()
self.f_period_average = 0
self.t_period = 0
self.i_f_start = 0
self.t_on = 0
def assert_positive_value(self, par_name, func):
v = getattr(self.cstr, par_name)
if isinstance(v, numbers.Number):
setattr(self.cstr, par_name, -1)
if isinstance(v, np.ndarray):
setattr(self.cstr, par_name, np.array([]))
if isinstance(v, bool):
setattr(self.cstr, par_name, not v)
with self.assertRaises(AssertionError):
func()
setattr(self.cstr, par_name, v)
def test_init(self):
# test passed parameters
np.testing.assert_array_equal(self.cstr._t, self.t)
self.assertEqual(self.cstr.uo_id, self.uo_id)
self.assertEqual(self.cstr.gui_title, self.gui_title)
# test default parameters
# volume
self.assertEqual(-1, self.cstr.rt_target)
self.assertEqual(-1, self.cstr.v_void)
self.assertEqual(-1, self.cstr.v_min)
self.assertEqual(-1, self.cstr.v_min_ratio)
# init volume
self.assertEqual(-1, self.cstr.v_init)
self.assertEqual(-1, self.cstr.v_init_ratio)
# init conc
self.assertTrue(self.cstr.c_init.size == 0)
# empty start
self.assertFalse(self.cstr.starts_empty)
def test_calc_f_out_target_and_t_cycle(self):
# constant
self.cstr._f = f_constant(self, 5)
self.cstr._calc_f_out_target_and_t_cycle()
self.assertTrue(self.cstr._is_f_in_box_shaped)
self.assertEqual(5, self.cstr._f_out_target)
self.assertEqual(0, self.cstr._t_cycle)
# box shaped
self.cstr._f = f_box_shaped(self, 15)
self.cstr._calc_f_out_target_and_t_cycle()
self.assertTrue(self.cstr._is_f_in_box_shaped)
self.assertEqual(15, self.cstr._f_out_target)
self.assertEqual(0, self.cstr._t_cycle)
def check_periodic():
self.cstr._calc_f_out_target_and_t_cycle()
self.assertFalse(self.cstr._is_f_in_box_shaped)
self.assertAlmostEqual(self.f_period_average,
self.cstr._f_out_target,
0)
self.assertAlmostEqual(self.t_period, self.cstr._t_cycle, 0)
# periodic 1
self.cstr._f = f_periodic(self, 15)
check_periodic()
# periodic 2
self.cstr._f = f_periodic_2(self, 25)
check_periodic()
def test_calc_v_void(self):
# prepare
self.cstr._f = f_periodic_2(self, 1.43)
self.cstr._calc_f_out_target_and_t_cycle()
# assert
with self.assertRaises(RuntimeError):
self.cstr._calc_v_void()
def calc_delta_v():
f_in = self.cstr._f.max()
return self.cstr._f_out_target \
* (1 - self.cstr._f_out_target / f_in) \
* self.cstr._t_cycle
def use_rt_target():
self.cstr.rt_target = 10.2
self.cstr._calc_v_void()
self.assertEqual(self.cstr.rt_target * self.cstr._f_out_target,
self.cstr._v_void)
def use_v_min_ratio():
self.cstr.v_min_ratio = 0.2
self.cstr._t_cycle = -1
with self.assertRaises(AssertionError):
self.cstr._calc_v_void()
self.cstr._t_cycle = 15.2
self.cstr._calc_v_void()
self.assertEqual(
calc_delta_v() / (1 - self.cstr.v_min_ratio),
self.cstr._v_void
)
def use_v_min():
self.cstr.v_min = 14.3
self.cstr._t_cycle = -1
with self.assertRaises(AssertionError):
self.cstr._calc_v_void()
self.cstr._t_cycle = 11.2
self.cstr._calc_v_void()
self.assertEqual(
calc_delta_v() + self.cstr.v_min,
self.cstr._v_void
)
def use_v_void():
self.cstr.v_void = 22.2
self.cstr._calc_v_void()
self.assertEqual(
self.cstr.v_void,
self.cstr._v_void
)
# calc
# rt_target
use_rt_target()
# v_min_ratio
with self.assertWarns(Warning):
# test priority over rt_target
use_v_min_ratio()
self.cstr.rt_target = -1
use_v_min_ratio()
# v_min
with self.assertWarns(Warning): # test parameter priority
# test priority over v_min_ratio
use_v_min()
self.cstr.v_min_ratio = -1
use_v_min()
# v_void
with self.assertWarns(Warning): # test parameter priority
# test priority over v_min
use_v_void()
self.cstr.v_min = -1
use_v_void()
# noinspection DuplicatedCode
def test_calc_v_init(self):
# default: `_v_init = _v_void` & warning
with self.assertRaises(AssertionError):
self.cstr._calc_v_init()
self.cstr._v_void = 1.2
with self.assertWarns(Warning):
self.cstr._calc_v_init()
self.assertEqual(self.cstr._v_void, self.cstr._v_init)
# v_init_ratio
self.cstr.v_init_ratio = 0.2
self.cstr._v_void = -1
with self.assertRaises(AssertionError):
self.cstr._calc_v_init()
self.cstr._v_void = 1.3
self.cstr._calc_v_init()
self.assertEqual(self.cstr._v_void * self.cstr.v_init_ratio,
self.cstr._v_init)
self.cstr._v_void = -1
# v_init
self.cstr.v_init = 35.2
with self.assertWarns(Warning):
# priority over v_init_ratio
self.cstr._calc_v_init()
self.assertEqual(self.cstr.v_init, self.cstr._v_init)
self.cstr.v_init_ratio = -1
self.cstr._calc_v_init()
self.assertEqual(self.cstr.v_init, self.cstr._v_init)
self.cstr.v_init = 0
self.cstr._calc_v_init()
self.assertEqual(self.cstr.v_init, self.cstr._v_init)
# starts empty
# to ignore nby the method
self.cstr.v_init = 335.2
self.cstr.v_init_ratio = 24.2
# set starts_empty
self.cstr.starts_empty = True
# test results
self.cstr._calc_v_init()
self.assertEqual(0, self.cstr._v_init)
# noinspection DuplicatedCode
def test_calc_c_init(self):
# prepare
self.cstr._n_species = 2
# default
self.cstr._calc_c_init()
np.testing.assert_array_equal(np.array([[0], [0]]),
self.cstr._c_init)
# defined
self.cstr.c_init = np.array([[2.2], [0.3]])
self.cstr._calc_c_init()
np.testing.assert_array_equal(np.array([[2.2], [0.3]]),
self.cstr._c_init)
# defined 2
self.cstr.c_init = | np.array([3.2, 0.2]) | numpy.array |
"""
Density-fitted MP2 from a RHF reference (same as DF-MP2) using a rank-reduced DF tensor
from natural auxiliary functions (NAF) as described in [3].
This is the 'smarter' algorithm described in the paper that avoids the costly
direct contraction of the Coulomb metric with the 3-index integrals (Qov tensor in PSI4 language)
Instead cheap intermediates are used the reduced Qov tensor is regained as the last step.
References:
1. Algorithm modified from <NAME>'s most excellent Psi4 plugin example
Bottom of the page: http://www.psicode.org/developers.php
2. Tutorials/03_Hartree-Fock/density-fitting.ipynb
3. <NAME>, J. Chem. Phys. 2014, 141, 244113. [http://aip.scitation.org/doi/10.1063/1.4905005]
"""
__authors__ = "<NAME>"
__credits__ = ["<NAME>", "<NAME>", "<NAME>"]
__copyright__ = "(c) 2014-2018, The Psi4NumPy Developers"
__license__ = "BSD-3-Clause"
__date__ = "2018-11-29"
import time
import numpy as np
np.set_printoptions(precision=5, linewidth=200, suppress=True)
import psi4
# Set memory & output
psi4.set_memory('2 GB')
psi4.core.set_output_file('output.dat', False)
mol = psi4.geometry("""
C 1.39410 0.00000 0.00000
C 0.69705 -1.20732 0.00000
C -0.69705 -1.20732 0.00000
C -1.39410 0.00000 0.00000
C -0.69705 1.20732 0.00000
C 0.69705 1.20732 0.00000
H 2.47618 0.00000 0.00000
H 1.23809 -2.14444 0.00000
H -1.23809 -2.14444 0.00000
H -2.47618 0.00000 0.00000
H -1.23809 2.14444 0.00000
H 1.23809 2.14444 0.00000
symmetry c1
""")
# Adjustable selection parameter (e.g 10^-2 to 10^-4)
# for constructing the NAF space
epsilon_naf = 1e-2
# Basis used in mp2 density fitting
psi4.set_options({'basis': 'aug-cc-pVDZ', 'df_basis_mp2': 'aug-cc-pvdz-ri'})
check_energy = True
print('\nStarting RHF...')
t = time.time()
RHF_E, wfn = psi4.energy('SCF', return_wfn=True)
print('...RHF finished in %.3f seconds: %16.10f' % (time.time() - t, RHF_E))
# Grab data from Wavfunction clas
ndocc = wfn.nalpha()
orbital_basis = wfn.basisset()
nbf = wfn.nso()
nvirt = nbf - ndocc
print('ndocc', ndocc)
print('nvirt', nvirt)
# Split eigenvectors and eigenvalues into o and v
eps_occ = np.asarray(wfn.epsilon_a_subset("AO", "ACTIVE_OCC"))
eps_vir = np.asarray(wfn.epsilon_a_subset("AO", "ACTIVE_VIR"))
# Build DF tensors
print('\nBuilding DF ERI tensor Qov...')
t = time.time()
C = np.asarray(wfn.Ca())
# Build instance of MintsHelper
mints = psi4.core.MintsHelper(orbital_basis)
zero_bas = psi4.core.BasisSet.zero_ao_basis_set()
# build auxiliary basis set object
aux_basis = psi4.core.BasisSet.build(
mol, "DF_BASIS_MP2", "", "RIFIT",
psi4.core.get_global_option('df_basis_mp2'))
naux = aux_basis.nbf()
# Build (P|pq) raw 3-index ERIs, dimension (1, Naux, nbf, nbf)
# this is I^t in the paper
Ppq = mints.ao_eri(zero_bas, aux_basis, orbital_basis, orbital_basis)
print('Ppq = I^t = (Q|pg)', Ppq.shape)
# Build Coulomb metric but only invert, dimension (1, Naux, 1, Naux)
metric = mints.ao_eri(zero_bas, aux_basis, zero_bas, aux_basis)
metric.power(-1.0, 1.e-14)
# Remove excess dimensions of Ppq & metric
Ppq = np.squeeze(Ppq)
metric = np.squeeze(metric)
# cholesky decomp of inverse metric
L = np.linalg.cholesky(metric)
print("L = cholesky[(P|Q)^1 ]dim:", L.shape)
# Form intermediate W'= I^t*I (eq 10)
# note that Wp = Wp^t
Wp = np.einsum('Ppq,Qpq->PQ', Ppq, Ppq, optimize=True)
print("W' = (P|P) dim:", Wp.shape)
# form W proper (eq 11)
W = np.dot(np.dot(L.T, Wp), L)
print("W = (Q|Q) dim:", W.shape)
# form N(bar) from significant eigenvectors of W
e_val, e_vec = np.linalg.eigh(W)
mask = np.abs(e_val) > epsilon_naf
naux2 = np.sum(mask)
Nbar = e_vec[:, mask]
print('retaining #naux = %i of %i [ %4.1f %% ] for epsilon(naf) = %.3e ' %
(naux2, naux, naux2 / naux * 100.0, epsilon_naf))
print("N^bar = (Q^bar|Q) dim)", Nbar.shape)
# form N'(bar) = L * N(bar) (eq 12)
Npbar = np.dot(L, Nbar)
print("N'^bar = (P^bar|Q) dim)", Npbar.shape)
# form J(bar) = I * N'(bar) (eq 13)
# we form the transpose of Jbar to be inline with PSI4
Jbar = np.einsum('Ppq,PQ->Qpq', Ppq, Npbar, optimize=True)
print("J^bar = (Q|pq) dim)", Npbar.shape)
# ==> AO->MO transform: Qpq -> Qmo @ O(N^4) <==
print('AO->MO transform')
Cocc = C[:, :ndocc]
Cvirt = C[:, ndocc:]
Qov = np.einsum('pi,Qpq->Qqi', Cocc, Jbar, optimize=True)
Qov = np.einsum('Qqi,qa->Qia', Qov, Cvirt, optimize=True)
time_qov = time.time() - t
print('...Qov build in %.3f seconds with a shape of %s, %.3f GB.' \
% (time_qov, str(Qov.shape), | np.prod(Qov.shape) | numpy.prod |
# -*- coding: utf-8 -*-
"""
@author: <NAME>
"""
import numpy as np
import copy
class MultiArmedBanditEnv:
"""
Parent class for all Multi Armed Bandit Environment that are nonassociative.
This class only gives 0 as reward for all actions.
"""
def __init__(self, horizon: int=1000, random_seed=None):
if isinstance(random_seed, int):
np.random.seed(random_seed)
self.horizon = horizon
self.action_space = np.arange(10)
self.reset()
def reset(self):
self.q_star = np.zeros(self.action_space.shape)
self.t = 0
def step(self, action: int):
self.t += 1
reward = 0
done = self.t == self.horizon
return reward, done
class NonStationaryTestBedEnv(MultiArmedBanditEnv):
def __init__(self, horizon: int=10000, random_seed=None):
"""
Initialize the environment. Non associative setting, one single
situation/state. Non stationary problem where the reward distribution
changes over time.
Parameters
----------
horizon : int, optional
Number of action selections or time steps. The default is 10000.
random_seed : TYPE, optional
Random seed for all the episodes. The default is None.
Returns
-------
None.
"""
# Calls overloaded methods as well
super().__init__(horizon=horizon, random_seed=random_seed)
def reset(self):
# (re)initialize the ten reward distributions
# create the initial true action values for the 10 actions
self.q_star = np.zeros(self.action_space.shape)
# reward distributions are normal distributions centered around
# their q_star and with unit variance, q_star changes over time.
# track the time step
self.t = 0
def step(self, action: int):
"""
Take an environment step based on the action chosen by the agent.
In our case, it's just pulling one arm, sampling a reward based
on the reward distribution of the arm/action.
Parameters
----------
action : int
Action to perform. Arm/lever to pull
Returns
-------
reward : int or float
scalar reward signal after taking the action (pulling the arm).
done : bool
if the episode is finished or not.
"""
self.t += 1
previous_q_star_a = copy.deepcopy(self.q_star[action])
# Independent random walk for all action values
self.q_star += np.random.normal(loc=0, scale=0.01, size=self.q_star.shape)
return | np.random.normal(previous_q_star_a, 1) | numpy.random.normal |
import networkx as nx
import numpy as np
import os
from collections import namedtuple
from itertools import chain
from matplotlib import colors
from matplotlib import pyplot as plt
from numpy.random import rand, randint, shuffle
from scipy.spatial import Delaunay, Voronoi
from perlin_noise import PerlinNoise
VERSION = "v0.1.0"
flip_funcs = dict(central=lambda m: np.flip(m),
diagonal=lambda m: np.transpose(m),
horizontal=lambda m: np.flip(m, axis=1))
Neighborhood = namedtuple("Neighborhood", "north west south east H V")
def neighborhood(i, j):
return np.array([[i, j+1],
[i-1, j+1],
[i-1, j],
[i-1, j-1],
[i, j-1],
[i+1, j-1],
[i+1, j],
[i+1, j+1]])
def to_str(array, sep=","):
return sep.join([str(a) for a in array])
class MapGenerationFailure(Exception):
pass
class Map:
starting_mills = None
def __init__(self, size=45, padded_size=79,
symmetry="central",
min_starting_dist=15,
seed=None):
if seed is None:
seed = randint(2**31)
self.seed = seed
np.random.seed(seed)
self.to_quadrant_funcs = dict(central=self.to_upper_triangle,
diagonal=self.to_upper_triangle,
horizontal=self.to_upper_half)
self.reflect_point_funcs = dict(central=self.reflect_central)
self.to_quadrant = self.to_quadrant_funcs[symmetry]
self.reflect_point = self.reflect_point_funcs[symmetry]
self.symmetry = symmetry
self.symmetric_array = flip_funcs[symmetry]
self.size = size
self.padded_size = padded_size
pad_before = (padded_size - size)//2
pad_after = padded_size - size - pad_before
instart = pad_before
instop = pad_before + size
self.instart = instart
self.instop = instop
self.inslice = slice(instart, instop)
self.padded_tiles = np.zeros((self.padded_size, self.padded_size), dtype=int)
self.min_starting_dist = min_starting_dist
self.ownerships = -2* | np.ones((self.padded_size, self.padded_size), dtype=int) | numpy.ones |
# %%
import os
import sys
os.chdir(os.path.dirname(os.getcwd())) # make directory one step up the current directory
from pymaid_creds import url, name, password, token
import pymaid
rm = pymaid.CatmaidInstance(url, token, name, password)
import navis
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import connectome_tools.process_matrix as pm
# allows text to be editable in Illustrator
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['ps.fonttype'] = 42
rm = pymaid.CatmaidInstance(url, token, name, password)
adj = pd.read_csv('VNC_interaction/data/axon-dendrite.csv', header = 0, index_col = 0)
inputs = pd.read_csv('VNC_interaction/data/input_counts.csv', index_col = 0)
inputs = pd.DataFrame(inputs.values, index = inputs.index, columns = ['axon_input', 'dendrite_input'])
pairs = pm.Promat.get_pairs() # import pairs
sens_asc_mat_thresh = pd.read_csv('VNC_interaction/plots/individual_asc_paths/ascending_identity_2-hops.csv', header=0, index_col=0)
# setting up volumes for future cells
cns = pymaid.get_volume('cns')
neuropil = pymaid.get_volume('PS_Neuropil_manual')
SEZ_left = pymaid.get_volume('SEZ_left')
SEZ_right = pymaid.get_volume('SEZ_right')
T1_left = pymaid.get_volume('T1_left')
T1_right = pymaid.get_volume('T1_right')
T2_left = pymaid.get_volume('T2_left')
T2_right = pymaid.get_volume('T2_right')
T3_left = pymaid.get_volume('T3_left')
T3_right = pymaid.get_volume('T3_right')
A1_left = pymaid.get_volume('A1_left')
A1_right = pymaid.get_volume('A1_right')
A2_left = pymaid.get_volume('A2_left')
A2_right = pymaid.get_volume('A2_right')
A3_left = pymaid.get_volume('A3_left')
A3_right = pymaid.get_volume('A3_right')
A4_left = pymaid.get_volume('A4_left')
A4_right = pymaid.get_volume('A4_right')
A5_left = pymaid.get_volume('A5_left')
A5_right = pymaid.get_volume('A5_right')
A6_left = pymaid.get_volume('A6_left')
A6_right = pymaid.get_volume('A6_right')
A7_left = pymaid.get_volume('A7_left')
A7_right = pymaid.get_volume('A7_right')
A8_left = pymaid.get_volume('A8_left')
A8_right = pymaid.get_volume('A8_right')
mult = 0.8
# Set color and alpha of volumes
cns.color = (250, 250, 250, 0.1)
neuropil.color = (250, 250, 250, 0.1)
SEZ_left.color = (0, 0, 250, .05*mult)
SEZ_right.color = (0, 0, 250, .05*mult)
T1_left.color = (0, 0, 250, .03*mult)
T1_right.color = (0, 0, 250, .03*mult)
T2_left.color = (0, 250, 250, .075*mult)
T2_right.color = (0, 250, 250, .075*mult)
T3_left.color = (0, 250, 250, .04*mult)
T3_right.color = (0, 250, 250, .04*mult)
A1_left.color = (0, 250, 0, .075*mult)
A1_right.color = (0, 250, 0, .075*mult)
A2_left.color = (0, 250, 0, .04*mult)
A2_right.color = (0, 250, 0, .04*mult)
A3_left.color = (250, 250, 0, .08*mult)
A3_right.color = (250, 250, 0, .08*mult)
A4_left.color = (250, 250, 0, .04*mult)
A4_right.color = (250, 250, 0, .04*mult)
A5_left.color = (250, 0, 0, .06*mult)
A5_right.color = (250, 0, 0, .06*mult)
A6_left.color = (250, 0, 0, .03*mult)
A6_right.color = (250, 0, 0, .03*mult)
A7_left.color = (250, 0, 150, .05*mult)
A7_right.color = (250, 0, 150, .05*mult)
A8_left.color = (250, 0, 150, .025*mult)
A8_right.color = (250, 0, 150, .025*mult)
# %%
# plotting neurons
def plot_pair(num, neurons, cns, neuropil, segments, view, method, neurons_present=True):
if(neurons_present):
fig, ax = navis.plot2d([neurons, cns], method=method, color = '#444140', linewidth=1.5, connectors=True, cn_size=2)
if(neurons_present==False):
fig, ax = navis.plot2d([cns], method=method)
if(view == 'side'):
ax.azim= 0
ax.dist = 5.2 # zoom
navis.plot2d(neuropil, method=method, ax=ax)
for segment in segments:
navis.plot2d(segment, method=method, ax=ax)
plt.show()
if(neurons_present):
fig.savefig(f'VNC_interaction/plots/individual_asc_morpho/{num}_{neurons[0].skeleton_id}_morphology_{view}.png', dpi=200)
if(neurons_present==False):
fig.savefig(f'VNC_interaction/plots/individual_asc_morpho/CNS_morphology_{view}.png', dpi=200)
if(view == 'front'):
ax.azim = 90
ax.dist = 5.2 # zoom
navis.plot2d(neuropil, method=method, ax=ax)
for segment in segments:
navis.plot2d(segment, method=method, ax=ax)
plt.show()
if(neurons_present):
fig.savefig(f'VNC_interaction/plots/individual_asc_morpho/{num}_{neurons[0].skeleton_id}_morphology_{view}.png', dpi=200)
if(neurons_present==False):
fig.savefig(f'VNC_interaction/plots/individual_asc_morpho/CNS_morphology_{view}.png', dpi=200)
if(view == 'top'):
ax.elev=90
ax.dist = 5.2 # zoom
navis.plot2d(neuropil, method=method, ax=ax)
for segment in segments:
navis.plot2d(segment, method=method, ax=ax)
plt.show()
if(neurons_present):
fig.savefig(f'VNC_interaction/plots/individual_asc_morpho/{num}_{neurons[0].skeleton_id}_morphology_{view}.png', dpi=200)
if(neurons_present==False):
fig.savefig(f'VNC_interaction/plots/individual_asc_morpho/CNS_morphology_{view}.png', dpi=200)
ascendings = [int(x) for x in sens_asc_mat_thresh.columns]
asc_pairs = [pairs[pairs.leftid==x].loc[:, ['leftid', 'rightid']].values for x in ascendings]
asc_pairs = [list(x) for sublist in asc_pairs for x in sublist]
segments = [T1_left, T1_right,
T2_left, T2_right,
T3_left, T3_right,
A1_left, A1_right,
A2_left, A2_right,
A3_left, A3_right,
A4_left, A4_right,
A5_left, A5_right,
A6_left, A6_right,
A7_left, A7_right,
A8_left, A8_right]
#neurons = pymaid.get_neurons(asc_pairs[0])
#plot_pair(0, neurons, cns, neuropil, segments, 'side')
for i in range(0, len(asc_pairs)):
neurons = pymaid.get_neurons(asc_pairs[i])
plot_pair(i, neurons, cns, neuropil, segments, 'side')
for i in range(0, len(asc_pairs)):
neurons = pymaid.get_neurons(asc_pairs[i])
plot_pair(i, neurons, cns, neuropil, segments, 'front')
for i in range(0, len(asc_pairs)):
neurons = pymaid.get_neurons(asc_pairs[i])
plot_pair(i, neurons, cns, neuropil, segments, 'top')
segments = [SEZ_left, SEZ_right,
T1_left, T1_right,
T2_left, T2_right,
T3_left, T3_right,
A1_left, A1_right,
A2_left, A2_right,
A3_left, A3_right,
A4_left, A4_right,
A5_left, A5_right,
A6_left, A6_right,
A7_left, A7_right,
A8_left, A8_right]
method='3d_complex'
plot_pair('', [], cns, [], segments, 'side', method=method, neurons_present=False)
plot_pair('', [], cns, [], segments, 'front', method=method,neurons_present=False)
plot_pair('', [], cns, [], segments, 'top', method=method,neurons_present=False)
# %%
# plot dendritic synapses
SEZ_left = pymaid.get_volume('SEZ_left')
SEZ_right = pymaid.get_volume('SEZ_right')
# calculate edges of each segment
def volume_edges(vol_left, vol_right):
vol_min = | np.mean([vol_left.bbox[2,0], vol_right.bbox[2,0]]) | numpy.mean |
import math
import numpy as np
import visualization.panda.world as wd
import modeling.geometricmodel as gm
import modeling.collisionmodel as cm
import robotsim.robots.ur3_dual.ur3_dual as ur3d
import motion.probabilistic.rrt_connect as rrtc
import robotcon.ur.ur3_dual_x as ur3dx
import pickle
import time
base = wd.World(cam_pos=[2, 1, 3], lookat_pos=[0, 0, 1.1])
gm.gen_frame().attach_to(base)
# robot_s
# component_name = 'lft_arm'
robot_instance = ur3d.UR3Dual()
ur_dual_x = ur3dx.UR3DualX(lft_robot_ip='10.2.0.50', rgt_robot_ip='10.2.0.51', pc_ip='10.2.0.100')
# init_lft_arm_jnt_values = robot_s.lft_arm.get_jnt_values()
# init_rgt_arm_jnt_values = robot_s.rgt_arm.get_jnt_values()
# full_jnt_values = np.hstack((init_lft_arm_jnt_values, init_rgt_arm_jnt_values))
# full_jnt_values = np.hstack((robot_instance.lft_arm.homeconf, robot_instance.rgt_arm.homeconf))
# goal_lft_arm_jnt_values = np.array([0, -math.pi / 2, -math.pi/3, -math.pi / 2, math.pi / 6, math.pi / 6])
# goal_rgt_arm_jnt_values = np.array([0, -math.pi/4, 0, math.pi/2, math.pi/2, math.pi / 6])
# robot_instance.fk(component_name="lft_arm", jnt_values = np.array([0, -math.pi / 2, -math.pi/3, -math.pi / 2, math.pi / 6, math.pi / 6]))
# pos = robot_instance.lft_arm.get_gl_tcp()
# gm.gen_sphere(pos[0]).attach_to(base)
#
# pose = []
# count = 0
# print("start")
# while count < 100:
# tmp_pos = ur_dual_x.lft_arm_hnd.get_jnt_values()
# print(count)
# if len(pose)>1:
# if np.linalg.norm(np.array(pose[-1])-np.array(tmp_pos)) < 0.01/180*np.pi:
# print(np.linalg.norm(np.array(pose[-1])-np.array(tmp_pos)))
# count = count+1
# time.sleep(0.01)
# continue
# pose.append(tmp_pos)
# time.sleep(0.1)
#
# print("done")
# pickle.dump(pose, open("pose_lft.pkl", "wb"))
# tmp_pos = ur_dual_x.lft_arm_hnd.get_jnt_values()
# pickle.dump(tmp_pos, open("pose_rgt.pkl", "wb"))
# # # base.run()
pose = pickle.load(open("pose_lft.pkl","rb"))
ini_pose = np.array(pose[0])
ur_dual_x.lft_arm_hnd.move_jnts(ini_pose)
ur_dual_x.lft_arm_hnd.move_jntspace_path(pose,interval_time=0.7)
# # pose = ur_dual_x.lft_arm_hnd.get_jnt_values()
# robot_instance.fk(component_name="lft_arm", jnt_values=ini_pose)
# pos = robot_instance.get_gl_tcp(manipulator_name="lft_arm")
# jnt = robot_instance.ik("lft_arm", pos[0], pos[1], seed_jnt_values= ini_pose)
# print(pos)
#
# robot_meshmodel = robot_instance.gen_meshmodel(toggle_tcpcs=True)
# robot_meshmodel.attach_to(base)
# ur_dual_x.lft_arm_hnd.move_jnts(jnt)
# # print(pos[0])
# # print(pos[0]+np.dot(pos[1], np.array([0,-0.1,0.1875])))
# pos_sensor = pos[0]+np.dot(pos[1], np.array([0,-0.01,0.01875]))
# # gm.gen_sphere(pos_sensor).attach_to(base)
# pos_nail = pos_sensor + np.dot(pos[1], np.array([0,-0.003,0.02175]))
#
# robot_meshmodel = robot_instance.gen_meshmodel(toggle_tcpcs=True)
# robot_meshmodel.attach_to(base)
# gm.gen_sphere(pos[0]).attach_to(base)
pose = []
count = 0
while(count < 10000):
tmp_pos = ur_dual_x.lft_arm_hnd.get_jnt_values()
print(tmp_pos)
if not (pose == []):
if | np.linalg.norm(pose[-1]-tmp_pos) | numpy.linalg.norm |
#!/usr/bin/env python
# coding: utf8
#
# Copyright (c) 2020 Centre National d'Etudes Spatiales (CNES).
#
# This file is part of PANDORA
#
# https://github.com/CNES/Pandora_pandora
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module contains functions to test all the methods in img_tools module.
"""
import unittest
import logging
import logging.config
import os
import json
import numpy as np
import xarray as xr
import pandora.img_tools as img_tools
import pandora
class TestImgTools(unittest.TestCase):
"""
TestImgTools class allows to test all the methods in the module img_tools
"""
def setUp(self):
"""
Method called to prepare the test fixture
"""
data = np.array(([1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 2, 1],
[1, 1, 1, 4, 3, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1]))
self.img = xr.Dataset({'im': (['row', 'col'], data)},
coords={'row': np.arange(data.shape[0]), 'col': np.arange(data.shape[1])})
def test_census_transform(self):
"""
Test the census transform method
"""
# Census transform ground truth for the image self.img with window size 3
census_ground_truth = np.array(([0b000000000, 0b000000001, 0b000001011, 0b000000110],
[0b000000000, 0b000001000, 0b000000000, 0b000100000],
[0b000000000, 0b001000000, 0b011000000, 0b110000000]))
# Computes the census transform for the image self.img with window size 3
census_transform = img_tools.census_transform(self.img, 3)
# Check if the census_transform is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(census_transform['im'].data, census_ground_truth)
# Census transform ground truth for the image self.img with window size 5
census_ground_truth = np.array(([[0b0000000001000110000000000, 0b0]]))
# Computes the census transform for the image self.img with window size 5
census_transform = img_tools.census_transform(self.img, 5)
# Check if the census_transform is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(census_transform['im'].data, census_ground_truth)
def test_compute_mean_raster(self):
"""
Test the method compute_mean_raster
"""
# Mean raster ground truth for the image self.img with window size 3
mean_ground_truth = np.array(([1., 12/9., 15/9., 15/9.],
[1., 12/9., 15/9., 15/9.],
[1., 12/9., 14./9, 14./9]))
# Computes the mean raster for the image self.img with window size 3
mean_r = img_tools.compute_mean_raster(self.img, 3)
# Check if the calculated mean is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(mean_r, mean_ground_truth)
# Mean raster ground truth for the image self.img with window size 5
mean_ground_truth = np.array(([[31/25., 31/25.]]))
# Computes the mean raster for the image self.img with window size 5
mean_r = img_tools.compute_mean_raster(self.img, 5)
# Check if the calculated mean is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(mean_r, mean_ground_truth)
def test_compute_mean_patch(self):
"""
Test the method compute_mean_patch
"""
# Computes the mean for the image self.img with window size 3 centered on y=1, x=1
mean = img_tools.compute_mean_patch(self.img, 1, 1, 3)
# Check if the calculated mean is equal to the ground truth 1.
self.assertEqual(mean, 1.)
# Computes the mean for the image self.img with window size 5 centered on y=2, x=2
mean = img_tools.compute_mean_patch(self.img, 2, 2, 5)
# Check if the calculated mean is equal to the ground truth 31/25.
self.assertEqual(mean, np.float32(31/25.))
def test_check_inside_image(self):
"""
Test the method check_inside_image
"""
# Test that the coordinates x=0,y=0 are in the image self.img
self.assertTrue(img_tools.check_inside_image(self.img, 0, 0))
# Test that the coordinates x=-1,y=0 are not in the the image self.img
self.assertFalse(img_tools.check_inside_image(self.img, -1, 0))
# Test that the coordinates x=0,y=6 are not in the the image self.img
# Because shape self.img x=6, y=5
self.assertFalse(img_tools.check_inside_image(self.img, 0, 6))
def test_compute_std_raster(self):
"""
Test the method compute_std_raster
"""
# standard deviation raster ground truth for the image self.img with window size 3
std_ground_truth = np.array(([0., np.std(self.img['im'][:3,1:4]), np.std(self.img['im'][:3,2:5]), np.std(self.img['im'][:3,3:])],
[0., np.std(self.img['im'][1:4,1:4]), | np.std(self.img['im'][1:4,2:5]) | numpy.std |
from styx_msgs.msg import TrafficLight
import numpy as np
import tensorflow as tf
import rospy
class TLClassifier(object):
def __init__(self):
self.detection_graph = tf.Graph()
graph_path = './graph/frozen_inference_graph.pb'
with self.detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(graph_path, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
# expect rgb.
def get_classification(self, image):
"""Determines the color of the traffic light in the image
Args:
image (cv::Mat): image containing the traffic light
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
with self.detection_graph.as_default():
with tf.Session(graph=self.detection_graph) as sess:
# Definite input and output Tensors for detection_graph
image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
detection_boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
detection_scores = self.detection_graph.get_tensor_by_name('detection_scores:0')
detection_classes = self.detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = self.detection_graph.get_tensor_by_name('num_detections:0')
# Convert image format.
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = | np.expand_dims(image, axis=0) | numpy.expand_dims |
# Copyright (c) <EMAIL>. All Rights Reserved
"""
This is a demo to run graspNet trained on soft finger grasping dataset on a test image paper.png
Please download the pretrained weights and put it under ./checkpoint folder before run the code
"""
from deepclaw.modules.grasp_planning.graspNet.fc_predictor import FCPredictor
import numpy as np
from PIL import Image, ImageDraw
NUM_THETAS = 9
p = FCPredictor(NUM_THETAS*2, './checkpoint/Network9-1000-100')
img = Image.open('test.jpg')
img_arr = np.array(img)
y_, p_best, grasp_pose = p.run(img_arr)
draw = ImageDraw.Draw(img, 'RGBA')
# Visualiza the prediction on the image
for i in range(15):
for j in range(28):
x = 114 + j*32
y = 114 + i*32
r = p_best[i][j] * 16
draw.ellipse((x-r, y-r, x+r, y+r), (0, 0, 255, 125))
# draw the grasp orientation if the model predict it
if NUM_THETAS > 1:
local_best_theta = | np.argmax(y_[0, i, j, :, 1]) | numpy.argmax |
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils._testing import assert_no_warnings, assert_warns
from mars import tensor as mt
from mars.learn.metrics.pairwise import rbf_kernel
from mars.learn.neighbors import NearestNeighbors
from mars.learn.semi_supervised import LabelPropagation
estimators = [
(LabelPropagation, {'kernel': 'rbf'}),
(LabelPropagation, {'kernel': 'knn', 'n_neighbors': 2}),
(LabelPropagation, {'kernel': lambda x, y: rbf_kernel(x, y, gamma=20)})
]
@pytest.mark.parametrize('estimator, parameters', estimators)
def test_fit_transduction(setup, estimator, parameters):
samples = [[1., 0.], [0., 2.], [1., 3.]]
labels = [0, 1, -1]
clf = estimator(**parameters).fit(samples, labels)
assert clf.transduction_[2].fetch() == 1
@pytest.mark.parametrize('estimator, parameters', estimators)
def test_distribution(setup, estimator, parameters):
samples = [[1., 0.], [0., 1.], [1., 1.]]
labels = [0, 1, -1]
clf = estimator(**parameters).fit(samples, labels)
if parameters['kernel'] == 'knn':
return # unstable test; changes in k-NN ordering break it
else:
np.testing.assert_array_almost_equal(
np.asarray(clf.label_distributions_[2]),
np.array([.5, .5]), 2
)
@pytest.mark.parametrize('estimator, parameters', estimators)
def test_predict(setup, estimator, parameters):
samples = [[1., 0.], [0., 2.], [1., 3.]]
labels = [0, 1, -1]
clf = estimator(**parameters).fit(samples, labels)
np.testing.assert_array_equal(clf.predict([[0.5, 2.5]]).fetch(), np.array([1]))
@pytest.mark.parametrize('estimator, parameters', estimators)
def test_predict_proba(setup, estimator, parameters):
samples = [[1., 0.], [0., 1.], [1., 2.5]]
labels = [0, 1, -1]
clf = estimator(**parameters).fit(samples, labels)
np.testing.assert_almost_equal(clf.predict_proba([[1., 1.]]).fetch(),
| np.array([[0.5, 0.5]]) | numpy.array |
import numpy as np
from typing import Tuple, List
from tdw.controller import Controller
from tdw.tdw_utils import TDWUtils
from tdw.output_data import OutputData, FlexParticles
from tdw.backend.paths import EXAMPLE_CONTROLLER_OUTPUT_PATH
class ResetScene(Controller):
"""
Minimal example of how to reset a Flex scene.
"""
def trial(self, model_name: str, mass: float, height: float) -> Tuple[np.ndarray, np.ndarray]:
"""
Drop an Flex object and record its particles.
"""
object_id = c.get_unique_id()
resp = self.communicate([{"$type": "create_flex_container"},
c.get_add_object(model_name=model_name,
library="models_flex.json",
object_id=object_id,
position={"x": 0, "y": height, "z": 0}),
{"$type": "set_flex_soft_actor",
"id": object_id,
'particle_spacing': 0.125,
'cluster_stiffness': 0.5,
"mass_scale": mass},
{"$type": "assign_flex_container",
"id": object_id,
"container_id": 0},
{"$type": "send_flex_particles",
"frequency": "always"}])
particles: List[np.ndarray] = list()
velocities: List[np.ndarray] = list()
# Let the object fall.
for i in range(250):
for j in range(len(resp) - 1):
r_id = OutputData.get_data_type_id(resp[j])
# Log the particle data on this frame.
if r_id == "flex":
flex = FlexParticles(resp[j])
for k in range(flex.get_num_objects()):
if flex.get_id(k) == object_id:
particles.append(flex.get_particles(k))
velocities.append(flex.get_velocities(k))
resp = self.communicate([])
# Reset the scene.
self.communicate([{"$type": "destroy_flex_object",
"id": object_id},
{"$type": "destroy_flex_container",
"id": 0}])
return np.array(particles), np.array(velocities)
def run(self) -> None:
"""
Run a series of trials.
"""
output_directory = EXAMPLE_CONTROLLER_OUTPUT_PATH.joinpath("reset_flex_scene")
if not output_directory.exists():
output_directory.mkdir(parents=True)
print(f"Particle data will be saved to: {output_directory}")
# Create the scene.
self.communicate([TDWUtils.create_empty_room(12, 12),
{"$type": "convexify_proc_gen_room"}])
i = 0
for model_name, height, mass in zip(["cube", "octahedron", "dumbbell"],
[1, 1.5, 1.78],
[5, 6, 2]):
particles, velocities = self.trial(model_name=model_name, height=height, mass=mass)
# Save the particle data.
np.save(str(output_directory.joinpath(f"particles_{i}").resolve()), | np.array(particles) | numpy.array |
import numpy as np
from tqdm import tqdm
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold, StratifiedKFold
import multiprocessing as mp
from model_eval_mse import ae_eval, vae_binned_eval, vae_eval
# ============================================
# hyperparameter exploration
# ============================================
class HyperParaEvaluator():
def __init__(self,
activity, # original activity
X, # binned actvity
idx_trials, frame_trial, maze_position,choFrameOffsets, # inforation needed to pick activity and aligned
n_models, n_split, cv_fold, n_process, # exploration setting
intermediate_dim_list,latent_dim_list, latent_fac_list, # hyperparameters to explore
epochs_train, epochs_test, batch_size):
# data
self.activity = activity
self.X = X
_, self.n_bin, self.n_neuron = X.shape
# necessary trial information
self.idx_trials = idx_trials
self.frame_trial = frame_trial
self.maze_position = maze_position
self.choFrameOffsets=choFrameOffsets
# evalution setting
self.n_models = n_models
self.n_split = n_split
self.cv_fold = cv_fold
self.n_process = n_process
self.intermediate_dim_list=intermediate_dim_list
self.latent_dim_list = latent_dim_list
self.latent_fac_list = latent_fac_list
self.epochs_train = epochs_train
self.epochs_test = epochs_test
self.batch_size = batch_size
def _split_cv_hyperpara(self, split_var, fold_var, intermediate_dim,latent_dim, latent_fac):
"""
for each hyperparameter in a fold in a split, send the job to a processor
:param split_var: int, idx of current split
fold_var: int, idx of current fold
latent_dim: int, hyperparameter
latent_fac: int, hyperparameter
:return:
"""
# each time with a different train-test split
trainval_pos = self.trainval_pos_splits[split_var]
train_index = self.train_index_split_cv[split_var][fold_var]
val_index = self.val_index_split_cv[split_var][fold_var]
inter_idx=np.where(np.array(self.intermediate_dim_list) == intermediate_dim)[0][0]
dim_idx = np.where(np.array(self.latent_dim_list) == latent_dim)[0][0]
fac_idx = np.where(np.array(self.latent_fac_list) == latent_fac)[0][0]
train_idx_list = [self.idx_trials[trainval_pos[i]] for i in train_index]
val_idx_list = [self.idx_trials[trainval_pos[i]] for i in val_index]
bin_training_data = self.X[trainval_pos[train_index], :, :]
bin_validation_data = self.X[trainval_pos[val_index], :, :]
nobin_training_data = [self.activity[self.frame_trial == self.idx_trials[trainval_pos[i]]] for i in train_index]
nobin_validation_data = [self.activity[self.frame_trial == self.idx_trials[trainval_pos[i]]] for i in val_index]
# 1. ae
mse_val_maze, mse_train_maze,mse_val_ITI,mse_train_ITI= ae_eval(bin_training_data, bin_validation_data, True,
intermediate_dim,latent_dim, latent_fac,
epochs=self.epochs_train, batch_size=self.batch_size)
ae_train_mse_maze=((0,split_var, fold_var, inter_idx,dim_idx, fac_idx),mse_train_maze)
ae_val_mse_maze=((1,split_var, fold_var, inter_idx,dim_idx, fac_idx),mse_val_maze)
ae_train_mse_ITI = ((2, split_var, fold_var, inter_idx, dim_idx, fac_idx), mse_train_ITI)
ae_val_mse_ITI = ((3, split_var, fold_var, inter_idx, dim_idx, fac_idx), mse_val_ITI)
# 2. vae_binned
mse_val_maze, mse_train_maze,mse_val_ITI,mse_train_ITI= vae_binned_eval(bin_training_data, bin_validation_data, True,
intermediate_dim,latent_dim, latent_fac,
epochs=self.epochs_train, batch_size=self.batch_size)
vae_binned_train_mse_maze = ((4, split_var, fold_var, inter_idx,dim_idx, fac_idx), mse_train_maze)
vae_binned_val_mse_maze = ((5, split_var, fold_var, inter_idx, dim_idx, fac_idx), mse_val_maze)
vae_binned_train_mse_ITI = ((6, split_var, fold_var, inter_idx, dim_idx, fac_idx), mse_train_ITI)
vae_binned_val_mse_ITI = ((7, split_var, fold_var, inter_idx, dim_idx, fac_idx), mse_val_ITI)
# 3.vae
mse_val_maze, mse_train_maze,mse_val_ITI,mse_train_ITI= vae_eval(train_idx_list, val_idx_list,
self.frame_trial, self.maze_position,self.choFrameOffsets,
nobin_training_data, nobin_validation_data, True,
intermediate_dim,latent_dim, latent_fac,
self.epochs_train, batch_size=self.batch_size)
vae_train_mse_maze = ((8, split_var, fold_var, inter_idx,dim_idx, fac_idx), mse_train_maze)
vae_val_mse_maze = ((9, split_var, fold_var, inter_idx,dim_idx, fac_idx), mse_val_maze)
vae_train_mse_ITI = ((10, split_var, fold_var, inter_idx,dim_idx, fac_idx), mse_train_ITI)
vae_val_mse_ITI = ((11, split_var, fold_var, inter_idx,dim_idx, fac_idx), mse_val_ITI)
return (ae_train_mse_maze,ae_val_mse_maze,ae_train_mse_ITI,ae_val_mse_ITI,
vae_binned_train_mse_maze,vae_binned_val_mse_maze,vae_binned_train_mse_ITI,vae_binned_val_mse_ITI,
vae_train_mse_maze,vae_val_mse_maze,vae_train_mse_ITI,vae_val_mse_ITI)
def _unpack(self, idx):
a = np.zeros(self.n_split * self.cv_fold * len(self.intermediate_dim_list) * len(self.latent_dim_list) * len(
self.latent_fac_list))
for i in range(self.n_split * self.cv_fold * len(self.intermediate_dim_list) * len(self.latent_dim_list) * len(
self.latent_fac_list)):
a[i] = i
a = a.reshape((self.n_split, self.cv_fold, len(self.intermediate_dim_list), len(self.latent_dim_list),
len(self.latent_fac_list)))
split_var = list(range(self.n_split))[np.where(a == idx)[0][0]]
fold_var = list(range(self.cv_fold))[np.where(a == idx)[1][0]]
intermediate_dim = self.intermediate_dim_list[np.where(a == idx)[2][0]]
latent_dim = self.latent_dim_list[np.where(a == idx)[3][0]]
latent_fac = self.latent_fac_list[np.where(a == idx)[4][0]]
result = self._split_cv_hyperpara(split_var, fold_var, intermediate_dim, latent_dim, latent_fac)
return result
def _collect_result(self, result):
self.results.append(result)
print(len(self.results))
def evaluate(self):
'explore the influence of hyperparameters on reconstruction performance'
# ============================================
# explore hyperparameters through multiple split and cross validation
# ============================================
self.trainval_pos_splits = {}
self.testing_pos_splits = {}
self.train_index_split_cv = {}
for i in range(self.n_split):
self.train_index_split_cv[i] = {}
self.val_index_split_cv = {}
for i in range(self.n_split):
self.val_index_split_cv[i] = {}
for split_var in range(self.n_split):
# each time with a different train-test split
pos = np.array(range(self.idx_trials.shape[0]))
np.random.shuffle(pos)
trainval_pos, testing_pos = train_test_split(pos, test_size=0.167)
self.trainval_pos_splits[split_var] = trainval_pos
self.testing_pos_splits[split_var] = testing_pos
kf = KFold(n_splits=self.cv_fold)
fold_var = 0
for train_index, val_index in kf.split(trainval_pos):
self.train_index_split_cv[split_var][fold_var] = train_index
self.val_index_split_cv[split_var][fold_var] = val_index
fold_var = fold_var + 1
self.results=[]
pool = mp.Pool(self.n_process)
num = self.n_split * self.cv_fold * len(self.intermediate_dim_list)*\
len(self.latent_dim_list) * len(self.latent_fac_list)
for idx in range(num):
print(idx)
pool.apply_async(self._unpack, args=(idx,), callback=self._collect_result)
pool.close()
pool.join()
self.mse_cv_summary = np.zeros(
(4 * self.n_models, self.n_split, self.cv_fold,
len(self.intermediate_dim_list),len(self.latent_dim_list), len(self.latent_fac_list)))
for i in range(len(self.results)):
pack_i=self.results[i]
for j in range(12): # for the 12 items in each pack
self.mse_cv_summary[pack_i[j][0]]=pack_i[j][1]
np.save('mse_cv_summary_0715.npy', self.mse_cv_summary)
# ============================================
# choose optimal hyperparameters and test
# ============================================
# the following lists will eventually have length = # of split replications
# will be packed into a [15, n_split] array to return
chosen_intermediate_dim_ae=[]
chosen_latent_dim_ae = []
chosen_latent_fac_ae = []
chosen_intermediate_dim_vae_binned = []
chosen_latent_dim_vae_binned = []
chosen_latent_fac_vae_binned = []
chosen_intermediate_dim_vae = []
chosen_latent_dim_vae = []
chosen_latent_fac_vae = []
mse_test_ae_maze = []
mse_test_ae_ITI = []
mse_test_vae_binned_maze = []
mse_test_vae_binned_ITI = []
mse_test_vae_maze = []
mse_test_vae_ITI=[]
for split_var in range(self.n_split):
trainval_pos = self.trainval_pos_splits[split_var]
testing_pos = self.testing_pos_splits[split_var]
trainval_idx_list = [self.idx_trials[i] for i in trainval_pos]
test_idx_list = [self.idx_trials[i] for i in testing_pos]
all_bin_training_data = self.X[trainval_pos, :, :]
all_bin_testing_data = self.X[testing_pos, :, :]
all_nobin_training_data = [self.activity[self.frame_trial == self.idx_trials[i]] for i in trainval_pos]
all_nobin_testing_data = [self.activity[self.frame_trial == self.idx_trials[i]] for i in testing_pos]
mse_val_ae_maze = self.mse_cv_summary[1]
mse_val_vae_binned_maze = self.mse_cv_summary[5]
mse_val_vae_maze = self.mse_cv_summary[9]
ave_mse_val_ae_maze = | np.average(mse_val_ae_maze[split_var, :, :, :,:], axis=0) | numpy.average |
"""
{This script tests best fit SMHM for all surveys and compares the resulting
model SMF for both red and blue galaxies with those from data}
"""
# Libs
from halotools.empirical_models import PrebuiltSubhaloModelFactory
from cosmo_utils.utils.stats_funcs import Stats_one_arr
from cosmo_utils.utils import work_paths as cwpaths
from halotools.sim_manager import CachedHaloCatalog
from collections import OrderedDict
import matplotlib.pyplot as plt
from matplotlib import rc
import seaborn as sns
import pandas as pd
import numpy as np
import argparse
import scipy
import math
import os
__author__ = '{<NAME>}'
rc('font', **{'family': 'sans-serif', 'sans-serif': ['Helvetica']}, size=15)
rc('text', usetex=True)
def reading_catls(filename, catl_format='.hdf5'):
"""
Function to read ECO/RESOLVE catalogues.
Parameters
----------
filename: string
path and name of the ECO/RESOLVE catalogue to read
catl_format: string, optional (default = '.hdf5')
type of file to read.
Options:
- '.hdf5': Reads in a catalogue in HDF5 format
Returns
-------
mock_pd: pandas DataFrame
DataFrame with galaxy/group information
Examples
--------
# Specifying `filename`
>>> filename = 'ECO_catl.hdf5'
# Reading in Catalogue
>>> mock_pd = reading_catls(filename, format='.hdf5')
>>> mock_pd.head()
x y z vx vy vz \
0 10.225435 24.778214 3.148386 356.112457 -318.894409 366.721832
1 20.945772 14.500367 -0.237940 168.731766 37.558834 447.436951
2 21.335835 14.808488 0.004653 967.204407 -701.556763 -388.055115
3 11.102760 21.782235 2.947002 611.646484 -179.032089 113.388794
4 13.217764 21.214905 2.113904 120.689598 -63.448833 400.766541
loghalom cs_flag haloid halo_ngal ... cz_nodist vel_tot \
0 12.170 1 196005 1 ... 2704.599189 602.490355
1 11.079 1 197110 1 ... 2552.681697 479.667489
2 11.339 1 197131 1 ... 2602.377466 1256.285409
3 11.529 1 199056 1 ... 2467.277182 647.318259
4 10.642 1 199118 1 ... 2513.381124 423.326770
vel_tan vel_pec ra_orig groupid M_group g_ngal g_galtype \
0 591.399858 -115.068833 215.025116 0 11.702527 1 1
1 453.617221 155.924074 182.144134 1 11.524787 4 0
2 1192.742240 394.485714 182.213220 1 11.524787 4 0
3 633.928896 130.977416 210.441320 2 11.502205 1 1
4 421.064495 43.706352 205.525386 3 10.899680 1 1
halo_rvir
0 0.184839
1 0.079997
2 0.097636
3 0.113011
4 0.057210
"""
## Checking if file exists
if not os.path.exists(filename):
msg = '`filename`: {0} NOT FOUND! Exiting..'.format(filename)
raise ValueError(msg)
## Reading file
if catl_format=='.hdf5':
mock_pd = pd.read_hdf(filename)
else:
msg = '`catl_format` ({0}) not supported! Exiting...'.format(catl_format)
raise ValueError(msg)
return mock_pd
def read_data(path_to_file, survey):
"""
Reads survey catalog from file
Parameters
----------
path_to_file: `string`
Path to survey catalog file
survey: `string`
Name of survey
Returns
---------
catl: `pandas.DataFrame`
Survey catalog with grpcz, abs rmag and stellar mass limits
volume: `float`
Volume of survey
z_median: `float`
Median redshift of survey
"""
if survey == 'eco':
columns = ['name', 'radeg', 'dedeg', 'cz', 'grpcz', 'absrmag',
'logmstar', 'logmgas', 'grp', 'grpn', 'logmh', 'logmh_s',
'fc', 'grpmb', 'grpms', 'modelu_rcorr', 'umag', 'rmag']
# 13878 galaxies
eco_buff = pd.read_csv(path_to_file, delimiter=",", header=0,
usecols=columns)
# 6456 galaxies
catl = eco_buff.loc[(eco_buff.grpcz.values >= 3000) &
(eco_buff.grpcz.values <= 7000) &
(eco_buff.absrmag.values <= -17.33)]
volume = 151829.26 # Survey volume without buffer [Mpc/h]^3
cvar = 0.125
z_median = np.median(catl.grpcz.values) / (3 * 10**5)
elif survey == 'resolvea' or survey == 'resolveb':
columns = ['name', 'radeg', 'dedeg', 'cz', 'grpcz', 'absrmag',
'logmstar', 'logmgas', 'grp', 'grpn', 'grpnassoc', 'logmh',
'logmh_s', 'fc', 'grpmb', 'grpms', 'f_a', 'f_b',
'modelu_rcorr']
# 2286 galaxies
resolve_live18 = pd.read_csv(path_to_file, delimiter=",", header=0,
usecols=columns)
if survey == 'resolvea':
catl = resolve_live18.loc[(resolve_live18.f_a.values == 1) &
(resolve_live18.grpcz.values > 4500) &
(resolve_live18.grpcz.values < 7000) &
(resolve_live18.absrmag.values < -17.33)]
volume = 13172.384 # Survey volume without buffer [Mpc/h]^3
cvar = 0.30
z_median = np.median(resolve_live18.grpcz.values) / (3 * 10**5)
elif survey == 'resolveb':
# 487 - cz, 369 - grpcz
catl = resolve_live18.loc[(resolve_live18.f_b.values == 1) &
(resolve_live18.grpcz.values > 4500) &
(resolve_live18.grpcz.values < 7000) &
(resolve_live18.absrmag.values < -17)]
volume = 4709.8373 # *2.915 #Survey volume without buffer [Mpc/h]^3
cvar = 0.58
z_median = np.median(resolve_live18.grpcz.values) / (3 * 10**5)
return catl, volume, z_median
def read_chi2(path_to_file):
"""
Reads chi-squared values from file
Parameters
----------
path_to_file: string
Path to chi-squared values file
Returns
---------
chi2: array
Array of reshaped chi^2 values to match chain values
"""
chi2_df = pd.read_csv(path_to_file,header=None,names=['chisquared'])
if mf_type == 'smf' and survey == 'eco' and ver==1.0:
# Needed to reshape since flattened along wrong axis,
# didn't correspond to chain
test_reshape = chi2_df.chisquared.values.reshape((1000,250))
chi2 = np.ndarray.flatten(np.array(test_reshape),'F')
else:
chi2 = chi2_df.chisquared.values
return chi2
def read_mcmc(path_to_file):
"""
Reads mcmc chain from file
Parameters
----------
path_to_file: string
Path to mcmc chain file
Returns
---------
emcee_table: pandas dataframe
Dataframe of mcmc chain values with NANs removed
"""
colnames = ['mhalo_c','mstellar_c','lowmass_slope','highmass_slope',\
'scatter']
if mf_type == 'smf' and survey == 'eco' and ver==1.0:
emcee_table = pd.read_csv(path_to_file,names=colnames,sep='\s+',\
dtype=np.float64)
else:
emcee_table = pd.read_csv(path_to_file, names=colnames,
delim_whitespace=True, header=None)
emcee_table = emcee_table[emcee_table.mhalo_c.values != '#']
emcee_table.mhalo_c = emcee_table.mhalo_c.astype(np.float64)
emcee_table.mstellar_c = emcee_table.mstellar_c.astype(np.float64)
emcee_table.lowmass_slope = emcee_table.lowmass_slope.astype(np.float64)
# Cases where last parameter was a NaN and its value was being written to
# the first element of the next line followed by 4 NaNs for the other
# parameters
for idx,row in enumerate(emcee_table.values):
if np.isnan(row)[4] == True and np.isnan(row)[3] == False:
scatter_val = emcee_table.values[idx+1][0]
row[4] = scatter_val
# Cases where rows of NANs appear
emcee_table = emcee_table.dropna(axis='index', how='any').\
reset_index(drop=True)
return emcee_table
def get_paramvals_percentile(table, percentile, chi2_arr):
"""
Isolates 68th percentile lowest chi^2 values and takes random 1000 sample
Parameters
----------
table: pandas dataframe
Mcmc chain dataframe
pctl: int
Percentile to use
chi2_arr: array
Array of chi^2 values
Returns
---------
mcmc_table_pctl: pandas dataframe
Random 1000 sample of 68th percentile lowest chi^2 values
"""
percentile = percentile/100
table['chi2'] = chi2_arr
table = table.sort_values('chi2').reset_index(drop=True)
slice_end = int(percentile*len(table))
mcmc_table_pctl = table[:slice_end]
# Best fit params are the parameters that correspond to the smallest chi2
bf_params = mcmc_table_pctl.drop_duplicates().reset_index(drop=True).\
values[0][:5]
# Sample random 100 of lowest chi2
mcmc_table_pctl = mcmc_table_pctl.drop_duplicates().sample(10)
return mcmc_table_pctl, bf_params
def halocat_init(halo_cat,z):
"""
Initial population of halo catalog using populate_mock function
Parameters
----------
halo_cat: string
Path to halo catalog
z: float
Median redshift of survey
Returns
---------
model: halotools model instance
Model based on behroozi 2010 SMHM
"""
halocat = CachedHaloCatalog(fname=halo_cat, update_cached_fname=True)
model = PrebuiltSubhaloModelFactory('behroozi10', redshift=z, \
prim_haloprop_key='halo_macc')
model.populate_mock(halocat,seed=5)
return model
def populate_mock(theta):
"""
Populate mock based on five parameter values
Parameters
----------
theta: array
Array of parameter values
Returns
---------
gals_df: pandas dataframe
Dataframe of mock catalog
"""
mhalo_characteristic, mstellar_characteristic, mlow_slope, mhigh_slope,\
mstellar_scatter = theta
model_init.param_dict['smhm_m1_0'] = mhalo_characteristic
model_init.param_dict['smhm_m0_0'] = mstellar_characteristic
model_init.param_dict['smhm_beta_0'] = mlow_slope
model_init.param_dict['smhm_delta_0'] = mhigh_slope
model_init.param_dict['scatter_model_param1'] = mstellar_scatter
model_init.mock.populate()
if survey == 'eco' or survey == 'resolvea':
limit = np.round(np.log10((10**8.9) / 2.041), 1)
sample_mask = model_init.mock.galaxy_table['stellar_mass'] >= 10**limit
elif survey == 'resolveb':
limit = np.round(np.log10((10**8.7) / 2.041), 1)
sample_mask = model_init.mock.galaxy_table['stellar_mass'] >= 10**limit
gals = model_init.mock.galaxy_table[sample_mask]
gals_df = gals.to_pandas()
return gals_df
def assign_cen_sat_flag(gals_df):
"""
Assign centrals and satellites flag to dataframe
Parameters
----------
gals_df: pandas dataframe
Mock catalog
Returns
---------
gals_df: pandas dataframe
Mock catalog with centrals/satellites flag as new column
"""
C_S = []
for idx in range(len(gals_df)):
if gals_df['halo_hostid'][idx] == gals_df['halo_id'][idx]:
C_S.append(1)
else:
C_S.append(0)
C_S = np.array(C_S)
gals_df['C_S'] = C_S
return gals_df
def get_host_halo_mock(gals_df):
"""
Get host halo mass from mock catalog
Parameters
----------
gals_df: pandas dataframe
Mock catalog
Returns
---------
cen_halos: array
Array of central host halo masses
sat_halos: array
Array of satellite host halo masses
"""
df = gals_df.copy()
cen_halos = []
sat_halos = []
for idx,value in enumerate(df['C_S']):
if value == 1:
cen_halos.append(df['halo_mvir_host_halo'][idx])
elif value == 0:
sat_halos.append(df['halo_mvir_host_halo'][idx])
cen_halos = np.array(cen_halos)
sat_halos = np.array(sat_halos)
return cen_halos, sat_halos
def get_stellar_mock(gals_df):
"""
Get stellar mass from mock catalog
Parameters
----------
gals_df: pandas dataframe
Mock catalog
Returns
---------
cen_gals: array
Array of central stellar masses
sat_gals: array
Array of satellite stellar masses
"""
df = gals_df.copy()
cen_gals = []
sat_gals = []
for idx,value in enumerate(df['C_S']):
if value == 1:
cen_gals.append(df['stellar_mass'][idx])
elif value == 0:
sat_gals.append(df['stellar_mass'][idx])
cen_gals = np.array(cen_gals)
sat_gals = np.array(sat_gals)
return cen_gals, sat_gals
def halo_quenching_model(gals_df):
"""
Apply halo quenching model from Zu and Mandelbaum 2015
Parameters
----------
gals_df: pandas dataframe
Mock catalog
Returns
---------
f_red_cen: array
Array of central red fractions
f_red_sat: array
Array of satellite red fractions
"""
# parameter values from Table 1 of Zu and Mandelbaum 2015 "prior case"
Mh_qc = 10**12.20 # Msun/h
Mh_qs = 10**12.17 # Msun/h
mu_c = 0.38
mu_s = 0.15
cen_hosthalo_mass_arr, sat_hosthalo_mass_arr = get_host_halo_mock(gals_df)
f_red_cen = 1 - np.exp(-((cen_hosthalo_mass_arr/Mh_qc)**mu_c))
f_red_sat = 1 - np.exp(-((sat_hosthalo_mass_arr/Mh_qs)**mu_s))
return f_red_cen, f_red_sat
def hybrid_quenching_model(gals_df):
"""
Apply hybrid quenching model from Zu and Mandelbaum 2015
Parameters
----------
gals_df: pandas dataframe
Mock catalog
Returns
---------
f_red_cen: array
Array of central red fractions
f_red_sat: array
Array of satellite red fractions
"""
# parameter values from Table 1 of Zu and Mandelbaum 2015 "prior case"
Mstar_q = 10**10.5 # Msun/h
Mh_q = 10**13.76 # Msun/h
mu = 0.69
nu = 0.15
Mstar_q = 10**10.167141 # Msun/h
Mh_q = 10**12.325332 # Msun/h
mu = 0.773228
nu = 7.652937
cen_hosthalo_mass_arr, sat_hosthalo_mass_arr = get_host_halo_mock(gals_df)
cen_stellar_mass_arr, sat_stellar_mass_arr = get_stellar_mock(gals_df)
f_red_cen = 1 - np.exp(-((cen_stellar_mass_arr/Mstar_q)**mu))
g_Mstar = np.exp(-((sat_stellar_mass_arr/Mstar_q)**mu))
h_Mh = np.exp(-((sat_hosthalo_mass_arr/Mh_q)**nu))
f_red_sat = 1 - (g_Mstar * h_Mh)
return f_red_cen, f_red_sat
def assign_colour_label_mock(f_red_cen, f_red_sat, gals_df, drop_fred=False):
"""
Assign colour label to mock catalog
Parameters
----------
f_red_cen: array
Array of central red fractions
f_red_sat: array
Array of satellite red fractions
gals_df: pandas Dataframe
Mock catalog
drop_fred: boolean
Whether or not to keep red fraction column after colour has been
assigned
Returns
---------
df: pandas Dataframe
Dataframe with colour label and random number assigned as
new columns
"""
# Copy of dataframe
df = gals_df.copy()
# Saving labels
color_label_arr = [[] for x in range(len(df))]
rng_arr = [[] for x in range(len(df))]
# Adding columns for f_red to df
df.loc[:, 'f_red'] = np.zeros(len(df))
df.loc[df['C_S'] == 1, 'f_red'] = f_red_cen
df.loc[df['C_S'] == 0, 'f_red'] = f_red_sat
# Converting to array
f_red_arr = df['f_red'].values
# Looping over galaxies
for ii, cs_ii in enumerate(df['C_S']):
# Draw a random number
rng = np.random.uniform()
# Comparing against f_red
if (rng >= f_red_arr[ii]):
color_label = 'B'
else:
color_label = 'R'
# Saving to list
color_label_arr[ii] = color_label
rng_arr[ii] = rng
##
## Assigning to DataFrame
df.loc[:, 'colour_label'] = color_label_arr
df.loc[:, 'rng'] = rng_arr
# Dropping 'f_red` column
if drop_fred:
df.drop('f_red', axis=1, inplace=True)
return df
def assign_colour_label_data(catl):
"""
Assign colour label to data
Parameters
----------
catl: pandas Dataframe
Data catalog
Returns
---------
catl: pandas Dataframe
Data catalog with colour label assigned as new column
"""
logmstar_arr = catl.logmstar.values
u_r_arr = catl.modelu_rcorr.values
colour_label_arr = np.empty(len(catl), dtype='str')
for idx, value in enumerate(logmstar_arr):
# Divisions taken from Moffett et al. 2015 equation 1
if value <= 9.1:
if u_r_arr[idx] > 1.457:
colour_label = 'R'
else:
colour_label = 'B'
if value > 9.1 and value < 10.1:
divider = 0.24 * value - 0.7
if u_r_arr[idx] > divider:
colour_label = 'R'
else:
colour_label = 'B'
if value >= 10.1:
if u_r_arr[idx] > 1.7:
colour_label = 'R'
else:
colour_label = 'B'
colour_label_arr[idx] = colour_label
catl['colour_label'] = colour_label_arr
return catl
def assign_colour_mock(gals_df, catl, stat):
"""
Assign colour to mock catalog
Parameters
----------
gals_df: pandas Dataframe
Mock catalog
catl: pandas Dataframe
Data catalog
stat: string
Specify whether mean or median statistic is used to assign colour
from data to mock catalog
Returns
---------
gals_df: pandas Dataframe
Dataframe with model corrected (u-r) colour assigned as new column
"""
logmstar_arr_mock = np.log10(gals_df.stellar_mass.values)
logmstar_arr_data = catl.logmstar.values
# Both measurements of stellar masses have to be in the same h=1 unit
logmstar_arr_data = np.log10((10**logmstar_arr_data) / 2.041)
u_r_arr_data = catl.modelu_rcorr.values
# Either assign the mean or median colour within each bin of stellar mass
if stat == 'mean':
x,y,x_err,y_err = Stats_one_arr(logmstar_arr_data, u_r_arr_data, 0.005,
statfunc=np.nanmean)
elif stat == 'median':
x,y,x_err,y_err = Stats_one_arr(logmstar_arr_data, u_r_arr_data, 0.005,
statfunc=np.nanmedian)
# Assign mean or median colour based on which data bin the mock stellar mass
# falls in
colour_arr = np.zeros(len(gals_df))
for idx1, value1 in enumerate(logmstar_arr_mock):
colour = 0
for idx2, value2 in enumerate(x):
if value1 > value2:
colour = y[idx2]
break
colour_arr[idx1] = colour
gals_df['modelu_rcorr'] = colour_arr
return gals_df
def diff_smf(mstar_arr, volume, h1_bool):
"""
Calculates differential stellar mass function in units of h=1.0
Parameters
----------
mstar_arr: numpy array
Array of stellar masses
volume: float
Volume of survey or simulation
h1_bool: boolean
True if units of masses are h=1, False if units of masses are not h=1
Returns
---------
maxis: array
Array of x-axis mass values
phi: array
Array of y-axis values
err_tot: array
Array of error values per bin
bins: array
Array of bin edge values
"""
if not h1_bool:
# changing from h=0.7 to h=1 assuming h^-2 dependence
logmstar_arr = np.log10((10**mstar_arr) / 2.041)
else:
logmstar_arr = np.log10(mstar_arr)
if survey == 'eco' or survey == 'resolvea':
bin_min = np.round(np.log10((10**8.9) / 2.041), 1)
if survey == 'eco':
bin_max = np.round(np.log10((10**11.8) / 2.041), 1)
elif survey == 'resolvea':
# different to avoid nan in inverse corr mat
bin_max = np.round(np.log10((10**11.5) / 2.041), 1)
bins = np.linspace(bin_min, bin_max, 7)
elif survey == 'resolveb':
bin_min = np.round(np.log10((10**8.7) / 2.041), 1)
bin_max = np.round(np.log10((10**11.8) / 2.041), 1)
bins = np.linspace(bin_min, bin_max, 7)
# Unnormalized histogram and bin edges
counts, edg = np.histogram(logmstar_arr, bins=bins) # paper used 17 bins
dm = edg[1] - edg[0] # Bin width
maxis = 0.5 * (edg[1:] + edg[:-1]) # Mass axis i.e. bin centers
# Normalized to volume and bin width
err_poiss = np.sqrt(counts) / (volume * dm)
err_tot = err_poiss
phi = counts / (volume * dm) # not a log quantity
phi = np.log10(phi)
return maxis, phi, err_tot, bins, counts
def get_err_data(survey, path):
"""
Calculate error in data SMF from mocks
Parameters
----------
survey: string
Name of survey
path: string
Path to mock catalogs
Returns
---------
err_total: array
Standard deviation of phi values between all mocks and for all galaxies
err_red: array
Standard deviation of phi values between all mocks and for red galaxies
err_blue: array
Standard deviation of phi values between all mocks and for blue galaxies
"""
if survey == 'eco':
mock_name = 'ECO'
num_mocks = 8
min_cz = 3000
max_cz = 7000
mag_limit = -17.33
mstar_limit = 8.9
volume = 151829.26 # Survey volume without buffer [Mpc/h]^3
elif survey == 'resolvea':
mock_name = 'A'
num_mocks = 59
min_cz = 4500
max_cz = 7000
mag_limit = -17.33
mstar_limit = 8.9
volume = 13172.384 # Survey volume without buffer [Mpc/h]^3
elif survey == 'resolveb':
mock_name = 'B'
num_mocks = 104
min_cz = 4500
max_cz = 7000
mag_limit = -17
mstar_limit = 8.7
volume = 4709.8373 # Survey volume without buffer [Mpc/h]^3
phi_arr_total = []
phi_arr_red = []
phi_arr_blue = []
max_arr_blue = []
err_arr_blue = []
box_id_arr = np.linspace(5001,5008,8)
for box in box_id_arr:
box = int(box)
temp_path = path + '{0}/{1}_m200b_catls/'.format(box,
mock_name)
for num in range(num_mocks):
filename = temp_path + '{0}_cat_{1}_Planck_memb_cat.hdf5'.format(
mock_name, num)
mock_pd = reading_catls(filename)
# Using the same survey definition as in mcmc smf i.e excluding the
# buffer
mock_pd = mock_pd.loc[(mock_pd.cz.values >= min_cz) & \
(mock_pd.cz.values <= max_cz) & (mock_pd.M_r.values <= mag_limit) &\
(mock_pd.logmstar.values >= mstar_limit)]
logmstar_arr = mock_pd.logmstar.values
u_r_arr = mock_pd.u_r.values
colour_label_arr = np.empty(len(mock_pd), dtype='str')
for idx, value in enumerate(logmstar_arr):
if value <= 9.1:
if u_r_arr[idx] > 1.457:
colour_label = 'R'
else:
colour_label = 'B'
elif value > 9.1 and value < 10.1:
divider = 0.24 * value - 0.7
if u_r_arr[idx] > divider:
colour_label = 'R'
else:
colour_label = 'B'
elif value >= 10.1:
if u_r_arr[idx] > 1.7:
colour_label = 'R'
else:
colour_label = 'B'
colour_label_arr[idx] = colour_label
mock_pd['colour_label'] = colour_label_arr
#Measure SMF of mock using diff_smf function
max_total, phi_total, err_total, bins_total, counts_total = \
diff_smf(logmstar_arr, volume, False)
max_red, phi_red, err_red, bins_red, counts_red = \
diff_smf(mock_pd.logmstar.loc[mock_pd.colour_label.values == 'R'],
volume, False)
max_blue, phi_blue, err_blue, bins_blue, counts_blue = \
diff_smf(mock_pd.logmstar.loc[mock_pd.colour_label.values == 'B'],
volume, False)
phi_arr_total.append(phi_total)
phi_arr_red.append(phi_red)
phi_arr_blue.append(phi_blue)
max_arr_blue.append(max_blue)
err_arr_blue.append(err_blue)
phi_arr_total = np.array(phi_arr_total)
phi_arr_red = np.array(phi_arr_red)
phi_arr_blue = np.array(phi_arr_blue)
max_arr_blue = np.array(max_arr_blue)
err_arr_blue = np.array(err_arr_blue)
err_total = np.std(phi_arr_total, axis=0)
err_red = np.std(np.log10(phi_arr_red), axis=0)
err_blue = np.std(np.log10(phi_arr_blue), axis=0)
return err_total, err_red, err_blue, max_arr_blue, phi_arr_blue, err_arr_blue
def plot_mstellar_colour_data(catl):
"""
Plots stellar mass vs colour for data catalog
Parameters
----------
catl: pandas Dataframe
Data catalog
"""
u_r_arr = catl.modelu_rcorr.values
logmstar_arr = catl.logmstar.values
x = logmstar_arr
# Values from Moffett et al. 2015 equation 1
if survey == 'eco' or survey == 'resolvea':
div_lowest_xmin = 8.9
elif survey == 'resolveb':
div_lowest_xmin = 8.7
div_lowest_xmax = 9.1
div_lowest_y = 1.457
div_mid_xmin = div_lowest_xmax
div_mid_xmax = 10.1
div_mid_x = np.unique(x[np.where((x >= div_mid_xmin) & (x <= div_mid_xmax))])
div_mid_y = 0.24 * div_mid_x - 0.7
div_max_xmin = div_mid_xmax
div_max_xmax = x.max()
div_max_y = 1.7
# # unique because otherwise when plotting there were too many points and the
# # dashed line appeared solid
# x_new = np.unique(x[np.where((x >= 9.1) & (x <= 10.1))])
# y = 0.24*x_new - 0.7
# # Joining arrays
# div_x_arr = [8.9, 9.09, 10.1, 11.79]
# div_y_arr = [1.457, 1.457, 1.7, 1.7]
# div_x_arr = np.concatenate((div_x_arr, x_new))
# div_y_arr = np.concatenate((div_y_arr, y))
# # Sorting out values
# div_x_sort_idx = np.argsort(div_x_arr)
# div_arr = np.vstack((div_x_arr[div_x_sort_idx], div_y_arr[div_x_sort_idx]))
plt.clf()
plt.close()
fig1 = plt.figure(figsize=(10,10))
ax1 = fig1.add_subplot(111)
ax = sns.kdeplot(logmstar_arr, u_r_arr, ax=ax1, cmap='Blues', shade=True,
shade_lowest=False)
ax.scatter(logmstar_arr,u_r_arr,c='#921063',marker='x',alpha=0.1,zorder=1)
ax1.hlines(y=div_lowest_y,xmin=div_lowest_xmin,xmax=div_lowest_xmax,
linestyle='--',color='k', linewidth=2,zorder=10)
ax1.plot(div_mid_x,div_mid_y, color='k', linestyle='--',linewidth=2)
# ax1.plot(div_arr[0], div_arr[1], linestyle='--', color='k', linewidth=2)
ax1.hlines(y=div_max_y,xmin=div_max_xmin,xmax=div_max_xmax,linestyle='--',
color='k', linewidth=2,zorder=10)
plt.xlabel(r'\boldmath$\log_{10}\ M_\star \left[\mathrm{M_\odot}\, \right]$')
plt.ylabel(r'\boldmath$ (u-r)^e$')
if survey == 'eco':
plt.title('ECO')
elif survey == 'resolvea':
plt.title('RESOLVE-A')
elif survey == 'resolveb':
plt.title('RESOLVE-B')
plt.show()
def plot_eco_mstellar_colour_mock(gals_df, model):
"""
Plots stellar mass vs colour from mock catalog
Parameters
----------
gals_df: pandas Dataframe
Dataframe of mock catalog
model: string
Hybrid or halo quenching model
"""
fig1 = plt.figure(figsize=(10,10))
ax1 = fig1.add_subplot(111)
gals_df_subset = gals_df.loc[gals_df.modelu_rcorr.values > 0]
ax = sns.kdeplot(np.log10(gals_df_subset.stellar_mass.values),
gals_df_subset.modelu_rcorr.values, ax=ax1, cmap='Blues', shade=True,
shade_lowest=False)
ax.scatter(np.log10(gals_df_subset.stellar_mass.values),
gals_df_subset.modelu_rcorr.values,c='#921063',marker='x',alpha=0.1,zorder=1)
plt.xlabel(r'\boldmath$\log_{10}\ M_\star \left[\mathrm{M_\odot}\, \right]$')
plt.ylabel(r'\boldmath$ (u-r)^e$')
if model == 'hybrid':
plt.title(r'Hybrid quenching model')
elif model == 'halo':
plt.title(r'Halo quenching model')
plt.show()
def measure_all_smf(table, volume, data_bool):
"""
Calculates differential stellar mass function for all, red and blue galaxies
from mock/data
Parameters
----------
table: pandas Dataframe
Dataframe of either mock or data
volume: float
Volume of simulation/survey
cvar: float
Cosmic variance error
data_bool: Boolean
Data or mock
Returns
---------
3 multidimensional arrays of stellar mass, phi, total error in SMF and
counts per bin for all, red and blue galaxies
"""
colour_col = 'colour_label'
if data_bool:
logmstar_col = 'logmstar'
max_total, phi_total, err_total, bins_total, counts_total = \
diff_smf(table[logmstar_col], volume, False)
max_red, phi_red, err_red, bins_red, counts_red = \
diff_smf(table[logmstar_col].loc[table[colour_col] == 'R'],
volume, False)
max_blue, phi_blue, err_blue, bins_blue, counts_blue = \
diff_smf(table[logmstar_col].loc[table[colour_col] == 'B'],
volume, False)
else:
logmstar_col = 'stellar_mass'
max_total, phi_total, err_total, bins_total, counts_total = \
diff_smf(np.log10(table[logmstar_col]), volume, True)
max_red, phi_red, err_red, bins_red, counts_red = \
diff_smf(np.log10(table[logmstar_col].loc[table[colour_col] == 'R']
), volume, True)
max_blue, phi_blue, err_blue, bins_blue, counts_blue = \
diff_smf(np.log10(table[logmstar_col].loc[table[colour_col] == 'B']
), volume, True)
return [max_total, phi_total, err_total, counts_total] , \
[max_red, phi_red, err_red, counts_red] , \
[max_blue, phi_blue, err_blue, counts_blue]
def plot_smf(total_data, red_data, blue_data, total_model, red_model,
blue_model, model, max_blue_mocks, phi_blue_mocks, err_blue_mocks):
"""
Plots stellar mass function for all, red and blue galaxies for data and
for halo/hybrid model
Parameters
----------
total_data: array
Multidimensional array of stellar mass, phi, total error in SMF and
counts per bin for all galaxies from data
red_data: array
Multidimensional array of stellar mass, phi, total error in SMF and
counts per bin for red galaxies from data
blue_data: array
Multidimensional array of stellar mass, phi, total error in SMF and
counts per bin for blue galaxies from data
total_model: array
Multidimensional array of stellar mass, phi, total error in SMF and
counts per bin for all galaxies from model (hybrid or halo)
red_model: array
Multidimensional array of stellar mass, phi, total error in SMF and
counts per bin for red galaxies from model (hybrid or halo)
blue_model: array
Multidimensional array of stellar mass, phi, total error in SMF and
counts per bin for blue galaxies from model (hybrid or halo)
"""
max_total_data, phi_total_data, err_total_data, counts_total_data = \
total_data[0], total_data[1], total_data[2], total_data[3]
max_red_data, phi_red_data, err_red_data, counts_red_data = \
red_data[0], red_data[1], red_data[2], red_data[3]
max_blue_data, phi_blue_data, err_blue_data, counts_blue_data = \
blue_data[0], blue_data[1], blue_data[2], blue_data[3]
max_total, phi_total, err_total, counts_total = \
total_model[0], total_model[1], total_model[2], total_model[3]
max_red, phi_red, err_red, counts_red = \
red_model[0], red_model[1], red_model[2], red_model[3]
max_blue, phi_blue, err_blue, counts_blue = \
blue_model[0], blue_model[1], blue_model[2], blue_model[3]
fig1 = plt.figure(figsize=(10,10))
ax1 = fig1.add_subplot(111)
# for idx in range(len(max_blue_mocks)):
# lower_err = np.log10(phi_blue_mocks[idx] - err_blue_mocks[idx])
# upper_err = np.log10(phi_blue_mocks[idx] + err_blue_mocks[idx])
# lower_err = np.log10(phi_blue_mocks[idx]) - lower_err
# upper_err = upper_err - np.log10(phi_blue_mocks[idx])
# asymmetric_err = [lower_err, upper_err]
# plt.errorbar(max_blue_mocks[idx],np.log10(phi_blue_mocks[idx]),
# yerr=asymmetric_err,color='b',
# fmt='--s',ecolor='b',markersize=4,capsize=5,capthick=0.5,
# label=r'$\textrm{total}_{\textrm{m}}$',
# zorder=10)
lower_err = np.log10(phi_total_data) - err_total_data
upper_err = np.log10(phi_total_data) + err_total_data
lower_err = np.log10(phi_total_data) - lower_err
upper_err = upper_err - np.log10(phi_total_data)
asymmetric_err = [lower_err, upper_err]
plt.errorbar(max_total_data,np.log10(phi_total_data), yerr=asymmetric_err,
color='k', fmt='s', ecolor='k', markersize=5, capsize=5,
capthick=0.5, label=r'$\textrm{total}_{\textrm{d}}$', zorder=10)
lower_err = np.log10(phi_total - err_total)
upper_err = np.log10(phi_total + err_total)
lower_err = np.log10(phi_total) - lower_err
upper_err = upper_err - | np.log10(phi_total) | numpy.log10 |
""""
Produces figures based on training data
"""
import os
import numpy as np
import argparse
import matplotlib.pyplot as plt
stats_file = "./mean_offsets.npz"
f = np.load(stats_file)
means_matrix = f["means_matrix"]
marginalized_means = np.mean(means_matrix, axis=0)
save_dir = "/home/aidas/Dropbox/MEng_Thesis/Figures/Figures_30th_May"
markers = ["o", "p", "X", "*", "D", "0", ">", "<"]
def real_data_test(account_for_offset=False):
# TODO: Get rid of generator graphs.
current_dir = os.getcwd()
print("Current_dir = ", current_dir)
os.chdir("/home/aidas/GAN_Experiments/progressive_test")
stats_file = current_dir + "/real_evaluation_stats.npz"
f = np.load(stats_file)
results_tensor = f["results_tensor"][:]
save_files = f["save_files"][:]
indexes = [int(x.split("_")[1]) for x in save_files]
save_files = [x for _, x in sorted(zip(indexes, save_files))]
l = len(save_files)
overall_means = np.mean(results_tensor, axis=(1, 2))
overall_stds = np.std(results_tensor, axis=(1, 2))
batch_means = np.mean(np.mean(results_tensor, axis=2), axis=1)
batch_stds = np.std(np.std(results_tensor, axis=2), axis=1)
if account_for_offset:
overall_means = overall_means - marginalized_means
width = 3.0
no_samples = results_tensor.shape[1] * results_tensor.shape[2]
proportions = [(x > 0).sum() / no_samples for x in results_tensor]
if account_for_offset:
proportions = [(x > marginalized_means[i]).sum() /
no_samples for i, x in enumerate(results_tensor)]
np.shape(proportions)
indexes.sort()
j = 0
fig, ax = plt.subplots(nrows=2, ncols=1, sharex=False,
sharey=False, figsize=(8, 5))
# Generators Graph
plt.subplot(2, 1, 1)
plt.plot(indexes, proportions, '-o', linewidth=width)
plt.title('Discriminator Output for different network widths on real data')
plt.grid(True)
std = overall_stds
mu = overall_means
plt.subplot(2, 1, 2)
plt.plot(indexes, mu, '-o', lw=2)
plt.fill_between(indexes, mu + std, mu - std, alpha=0.5)
plt.xlabel("Width of discriminator network")
plt.title('Discriminator scores for different network widths on real data')
plt.tight_layout()
plt.grid(True)
plt.savefig(save_dir + "/real_stats.png", dpi=None, format="png",)
plt.show()
def fake_data_test(account_for_offset=False):
legend_size = 8
current_dir = os.getcwd()
print("Current_dir = ", current_dir)
os.chdir("/home/aidas/GAN_Experiments/progressive_test")
stats_file = current_dir + "/evaluation_stats.npz"
f = np.load(stats_file)
results_tensor = f["results_tensor"][:]
save_files = f["save_files"][:]
indexes = [int(x.split("_")[1]) for x in save_files]
save_files = [x for _, x in sorted(zip(indexes, save_files))]
l = len(save_files)
overall_means = np.mean(results_tensor, axis=(1, 2)).reshape([l, l])
overall_stds = np.std(results_tensor, axis=(1, 2)).reshape([l, l])
batch_means = np.mean( | np.mean(results_tensor, axis=2) | numpy.mean |
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for SchurComplement."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow_probability import positive_semidefinite_kernels as tfpk
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import
@test_util.run_all_in_graph_and_eager_modes
class SchurComplementTest(tf.test.TestCase, parameterized.TestCase):
def testMismatchedFloatTypesAreBad(self):
base_kernel = tfpk.ExponentiatedQuadratic(
np.float64(5.), np.float64(.2))
# Should be OK
tfpk.SchurComplement(
base_kernel=base_kernel, # float64
fixed_inputs=np.random.uniform(-1., 1., [2, 1]))
with self.assertRaises(TypeError):
float32_inputs = np.random.uniform(
-1., 1., [2, 1]).astype(np.float32)
tfpk.SchurComplement(
base_kernel=base_kernel,
fixed_inputs=float32_inputs)
@parameterized.parameters(
{'feature_ndims': 1, 'dims': 3},
{'feature_ndims': 1, 'dims': 4},
{'feature_ndims': 2, 'dims': 2},
{'feature_ndims': 2, 'dims': 3},
{'feature_ndims': 3, 'dims': 2},
{'feature_ndims': 3, 'dims': 3})
def testValuesAreCorrect(self, feature_ndims, dims):
| np.random.seed(42) | numpy.random.seed |
import math
import numpy as np
class Plane():
def __init__(self,atoms):
# Stores a plane equation in the format
# ax + bx + cz + d = 0
self.atoms = atoms
xs = [atom.coordinates[0] for atom in atoms]
ys = [atom.coordinates[1] for atom in atoms]
zs = [atom.coordinates[2] for atom in atoms]
# do fit
tmp_A = []
tmp_b = []
for i in range(len(xs)):
tmp_A.append([xs[i], ys[i], 1])
tmp_b.append(zs[i])
b = np.matrix(tmp_b).T
A = np.matrix(tmp_A)
fit = (A.T * A).I * A.T * b
self.errors = b - A * fit
fit = np.array(fit).reshape(3)
self.a, self.b, self.d = fit[0], fit[1], fit[2]
# fit is currently in the form
# ax + by + d = cz
# c = -(a*x[0] + b*y[0] + d) / z[0]
self.c = - ((self.a*xs[0] + self.b*ys[0] + self.d) / zs[0])
def plane_angle(self, plane):
a1,b1,c1 = self.a,self.b, self.c
a2,b2,c2 = plane.a,plane.b, plane.c
d = ( a1 * a2 + b1 * b2 + c1 * c2 )
e1 = np.sqrt( a1 * a1 + b1 * b1 + c1 * c1)
e2 = np.sqrt( a2 * a2 + b2 * b2 + c2 * c2)
d = d / (e1 * e2)
A = np.degrees(np.arccos(d))
if A > 90:
A = 180 - A
return A
def point_distance(self,atom):
x1, y1, z1 = atom.coordinates[0], atom.coordinates[1], atom.coordinates[2]
d = np.abs((self.a * x1 + self.b * y1 + self.c * z1 + self.d))
e = (np.sqrt(self.a * self.a + self.b * self.b + self.c * self.c))
return d/e
def test_planarity(self,atoms = None):
if atoms == None:
devs = [self.point_distance(atom) for atom in self.atoms]
if len(np.where(np.array(devs)>2)[0]) >= 1:
return False
else:
return True
else:
devs = [self.point_distance(atom) for atom in atoms]
if len(np.where(np.array(devs)>2)[0]) >= 1:
return False
else:
return True
def bond_angle(atom1,atom2,atom3):
a = atom1.coordinates
b = atom2.coordinates
c = atom3.coordinates
ba = a - b
bc = c - b
cosine_angle = np.dot(ba, bc) / (np.linalg.norm(ba) * np.linalg.norm(bc))
angle = np.arccos(cosine_angle)
return np.degrees(angle)
def torsional_angle(atom1,atom2,atom3,atom4):
# returns interplanar angle between planes defined by atom1, atom2, atom3, and atom2, atom3, atom4
pass
def vector(atom1,atom2, as_angstrom=False):
# returns the vector defined by the position between two atoms
pass
def calc_lstsq_displacement(disp,vectors):
A = vectors.T
xs = []
x, _, _, _ = np.linalg.lstsq(A,disp,rcond=-1)
xs.append(x)
return np.array(xs[0])
def vector_angle(v1,v2):
theta = np.arccos((v1.dot(v2))/(np.sqrt(v1.dot(v1))*np.sqrt(v2.dot(v2))))
return np.degrees(theta)
def vector_plane_angle(vector, plane):
# returns the angle made between a vector and a plane
pass
# https://stackoverflow.com/questions/14016898/port-matlab-bounding-ellipsoid-code-to-python
# Python implementation of the MATLAB function MinVolEllipse, based on the Khachiyan algorithm
# for both
# A is a matrix containing the information regarding the shape of the ellipsoid
# to get radii from A you have to do SVD on it, giving U Q and V
# 1 / sqrt(Q) gives the radii of the ellipsoid
# problems arise for planar motifs. add two extra points at centroid of +/- 0.00001*plane_normal to overcome
def mvee(atoms, tol = 0.00001):
"""
Find the minimum volume ellipse around a set of atom objects.
Return A, c where the equation for the ellipse given in "center form" is
(x-c).T * A * (x-c) = 1
[U Q V] = svd(A);
where r = 1/sqrt(Q)
V is rotation matrix
U is ???
"""
points_asarray = np.array([atom.coordinates for atom in atoms])
points = np.asmatrix(points_asarray)
N, d = points.shape
Q = np.column_stack((points, np.ones(N))).T
err = tol+1.0
u = np.ones(N)/N
try:
while err > tol:
# assert u.sum() == 1 # invariant
X = Q * np.diag(u) * Q.T
M = np.diag(Q.T * la.inv(X) * Q)
jdx = np.argmax(M)
step_size = (M[jdx]-d-1.0)/((d+1)*(M[jdx]-1.0))
new_u = (1-step_size)*u
new_u[jdx] += step_size
err = la.norm(new_u-u)
u = new_u
c = u*points
A = la.inv(points.T*np.diag(u)*points - c.T*c)/d
except: # For singular matrix errors i.e. motif is ellipse rather than ellipsoid
centroid = np.average(points_asarray,axis=0)
plane = Plane(atoms)
normal = np.array([plane.a,plane.b,plane.c])
norm_mag = np.sqrt(np.dot(normal,normal))
for i, norm in enumerate(normal):
normal[i] = norm * 1 / norm_mag
centroid = np.average(points,axis=0).reshape(-1,3)
p1 = centroid + normal*0.00001
p2 = centroid - normal*0.00001
points_asarray = np.concatenate([points_asarray,p1,p2],axis=0)
points = np.asmatrix(points_asarray)
N, d = points.shape
Q = np.column_stack((points, np.ones(N))).T
err = tol+1.0
u = np.ones(N)/N
while err > tol:
# assert u.sum() == 1 # invariant
X = Q * np.diag(u) * Q.T
M = np.diag(Q.T * la.inv(X) * Q)
jdx = np.argmax(M)
step_size = (M[jdx]-d-1.0)/((d+1)*(M[jdx]-1.0))
new_u = (1-step_size)*u
new_u[jdx] += step_size
err = la.norm(new_u-u)
u = new_u
c = u*points
A = la.inv(points.T*np.diag(u)*points - c.T*c)/d
return np.asarray(A), np.squeeze(np.asarray(c))
def ellipse(rx,ry,rz):
u, v = np.mgrid[0:2*np.pi:20j, -np.pi/2:np.pi/2:10j]
x = rx*np.cos(u)*np.cos(v)
y = ry*np.sin(u)*np.cos(v)
z = rz* | np.sin(v) | numpy.sin |
from qutiepy import *
import numpy as np
from scipy.linalg import expm
import warnings
warnings.filterwarnings('ignore')
"""
Ax = b
"""
def main(debug=False):
A = np.array([[0.707,0.707],
[0.707,-0.707]])
k = np.linalg.cond(A)
print("k = ", k)
bBits = int(np.log2(A.shape[0]))
bAmps = [1, 0]
b = register(bBits)
b.setAmps(bAmps)
answer = | np.linalg.solve(A, b.amps) | numpy.linalg.solve |
import pytest
import numpy as np
from ..processing import rest_filter
def test_conn_filter():
TR = np.random.randint(1, 10)
filter = [0.01, 0.08]
x = np.zeros((152, 1))
y = rest_filter.conn_filter(TR, filter, x)
assert type(y) == type(np.asarray([]))
assert y.shape == (152, 1)
assert np.allclose(y, | np.zeros((152, 1)) | numpy.zeros |
import numpy as np
from wzk import angle2minuspi_pluspi
def x_flat2x(*, x_flat, n_wp=None, n_dof):
"""
x_flat -> x
Force the input in matrix form with n_dim column vectors for x1, x2, ... (x, y, z, ...) .
If the input is already in this (n, n_dim) form, nothing is changed.
"""
n_samples = x_flat.shape[0]
if n_wp is None:
n_wp = x_flat.size // n_dof // n_samples
return x_flat.reshape((n_samples, n_wp, n_dof))
def x2x_flat(*, x):
n_samples = x.shape[0]
return x.reshape((n_samples, -1))
# REPRESENTATION - Inner
def x2x_inner(x):
return x[..., 1:-1, :]
def x_inner2x(*, x_inner, x_start=None, x_end=None):
n_samples = x_inner.shape[0]
def __repeat(x):
return x.repeat(n_samples // x.shape[0], axis=0)
if x_start is not None:
x_start = __repeat(x_start)
x_inner = np.concatenate((x_start, x_inner), axis=-2)
if x_end is not None:
x_end = __repeat(x_end)
x_inner = np.concatenate((x_inner, x_end), axis=-2)
return x_inner
def q2x_q(xq, n_dim):
x = xq[..., :n_dim]
q = xq[..., n_dim:]
return x, q
def x_q2q(x, q):
"""
Root coordinates followed by the joint angles
"""
xq = np.concatenate((x, q), axis=-1)
return xq
# PATH PROPERTIES AND VALUES
def get_n_waypoints(*, x, n_dof, only_inner=False):
n_samples = x.shape[0]
n_waypoints = (x.size // n_dof) // n_samples
if only_inner:
return n_waypoints - 2
else:
return n_waypoints
def get_start_end(x, waypoints_dim=True):
if waypoints_dim:
return x[..., :1, :], x[..., -1:, :]
else:
return x[..., 0, :], x[..., -1, :]
def inf_joint_wrapper(x, inf_joints=None):
if inf_joints is not None:
x[..., inf_joints] = angle2minuspi_pluspi(x[..., inf_joints])
return x
def get_start_end_normalization(*, q_start, q_end, n_wp,
joint_weighting=None, infinity_joints,
eps=0.01): # Weighting for path, minimal distance in matrix, rad
"""
Get minimal length cost between x_start and x_end with n_wp waypoints (linear connection)
Divide the connection in (n_wp-1) equal steps, square each step and consider their sum
"""
# norm = np.linalg.norm(x_end - x_start, axis=-1) / (n_wp-1)
# norm = norm ** 2 * (n_wp - 1)
# norm = np.linalg.norm(x_end - x_start, axis=-1).ravel()
if joint_weighting is None:
joint_weighting = 1
else:
joint_weighting = joint_weighting
x_diff = (inf_joint_wrapper(x=q_end[..., 0, :] - q_start[..., 0, :], inf_joints=infinity_joints))
x_diff = (x_diff + eps) ** 2
# norm = 0.5 * np.sqrt((joint_weighting * x_diff).sum(axis=-1)) # Weighted sum over the joints
norm = 0.5 * (joint_weighting * x_diff).sum(axis=-1) # Weighted sum over the joints
norm /= (n_wp - 1)
# norm[:] = 1
return norm
def get_x_steps(x,
steps=None):
"""
:param x:
:param steps: Optional (reuse)
:return:
"""
if steps is None:
return np.diff(x, axis=1)
else:
return steps
def get_step_lengths(*, x=None,
steps=None, # Optional (reuse)
step_lengths=None):
if step_lengths is None:
steps = get_x_steps(x, steps=steps)
return np.linalg.norm(steps, axis=-1)
else:
return step_lengths
def trajectory_length(x=None,
step_lengths=None, # Optional (reuse)
squared=False): # Options
"""
Calculate the length of the path by summing up all individual steps (see path.step_lengths).
Assume linear connecting between way points.
If boolean 'squared' is True: take the squared distance of each step -> enforces equally spaced steps.
"""
step_lengths = get_step_lengths(x=x, step_lengths=step_lengths)
if squared:
step_lengths **= 2
return step_lengths.sum(axis=-1)
def linear_distance(*, x_start=None, x_end=None,
x=None):
if x_start is None:
x_start = x[..., 0, :]
if x_end is None:
x_end = x[..., -1, :]
return np.linalg.norm(x_end - x_start, axis=-1)
def get_x_substeps(*, x, n_substeps,
steps=None,
infinity_joints=None,
include_end_point=True):
"""
Calculate the substeps between the neighboring way points for a given number of substeps.
'include_end' by default the last way point is included in the result.
"""
n_samples, n_wp, n_dof = x.shape
# The only fill in substeps if the the number is greater 1,
if n_substeps <= 1 or n_wp <= 1:
return x
steps = get_x_steps(x=x, steps=steps)
steps = inf_joint_wrapper(x=steps, inf_joints=infinity_joints)
# Create an array witch contains all substeps in one go
x_substep_i = steps / n_substeps
delta = np.arange(n_substeps) * x_substep_i[..., np.newaxis]
x_ss = x[..., :-1, :, np.newaxis] + delta
x_ss = x_ss.transpose((0, 1, 3, 2)).reshape((n_samples, (n_wp - 1) * n_substeps, n_dof))
if include_end_point:
x_ss = x_inner2x(x_inner=x_ss, x_start=None, x_end=x[..., -1:, :])
x_ss[..., infinity_joints] = angle2minuspi_pluspi(x_ss[..., infinity_joints])
return x_ss
def linear_connection(q, n_waypoints, infinity_joints, weighting=None):
_, n_points, n_dof = q.shape
n_connections = n_points - 1
x_rp_steps = get_x_steps(x=q)
x_rp_steps = inf_joint_wrapper(x=x_rp_steps, inf_joints=infinity_joints)
x_rp_steps = x_rp_steps[0]
if weighting is not None:
x_rp_steps *= weighting
# Distribute the waypoints equally along the linear sequences of the initial path
x_rp_steps_norm = | np.linalg.norm(x_rp_steps, axis=-1) | numpy.linalg.norm |
from utils.solvers.vector import inner_prod
from utils.solvers.solvers import getHyperPlaneFromTwoPoints
from utils.accuracy import accuracy as accuracy_
from utils.solvers.tensor import construct_W_from_decomp
import numpy as np
from random import seed
import os
import utils.solvers.solvers as solvers
import pickle
np.random.seed(1)
seed(1)
class Node:
def __init__(self, indim, sol_name='STM', C=1.0, rank=3, wconst='maxmax', xa=None, xb=None, constrain='lax',
wnorm='L1', tree_height=3, path=None, tuneC=1e-6, decomp=False, ogXtrainshape=(28, 28), decomprank=5):
self.weight = np.zeros(indim)
self.bias = 0
self.A = None
self.B = None
self.wA = 0
self.wB = 0
self.dim = indim
self.C1 = []
self.C2 = []
self.C3 = []
self.C4 = []
self.labels = []
self.X = []
self.height = 0
self.sol_name = sol_name
self.C = C
self.rank = rank
self.wconst = wconst
self.xa = xa
self.xb = xb
self.constrain = constrain
self.wnorm = wnorm
self.path = path
self.tree_height = tree_height
self.solver = solvers.STM
if (sol_name == 'MCM'):
self.solver = solvers.MCM
if (sol_name == 'MCTM'):
self.solver = solvers.MCTM
if (sol_name == 'STM'):
self.solver = solvers.STM
if (sol_name == 'SHTM'):
self.solver = solvers.SHTM
if (sol_name == 'SGDSTM'):
self.solver = solvers.SGD_STM
self.tuneC = tuneC
self.decomp = decomp
self.decomprank = decomprank
self.ogXtrainshape = ogXtrainshape
self.xat = None
self.xbt = None
def insert(self, neuron_type, weight=0, bias=0, w=0):
if neuron_type == 'A':
self.A = Node(indim=self.dim, sol_name=self.sol_name, C=self.C, rank=self.rank, wconst=self.wconst,
xa=self.xa, xb=self.xb, constrain=self.constrain, wnorm=self.wnorm,
tree_height=self.tree_height, path=self.path, tuneC=self.tuneC, decomp=self.decomp,
decomprank=self.decomprank, ogXtrainshape=self.ogXtrainshape)
self.A.weight = weight
self.A.bias = bias
self.A.height = self.height + 1
return self.A
else:
self.B = Node(indim=self.dim, sol_name=self.sol_name, C=self.C, rank=self.rank, wconst=self.wconst,
xa=self.xa, xb=self.xb, constrain=self.constrain, wnorm=self.wnorm,
tree_height=self.tree_height, path=self.path, tuneC=self.tuneC, decomp=self.decomp,
decomprank=self.decomprank, ogXtrainshape=self.ogXtrainshape)
self.B.weight = weight
self.B.bias = bias
self.B.height = self.height + 1
return self.B
def update_weights_and_bias(self, weight, bias, wA=0, wB=0):
self.weight = weight
self.bias = bias
self.wA = wA
self.wB = wB
def update_classes(self, ypred, ytrue):
ypred = ypred.copy()
ypred = np.reshape(ypred, (ypred.shape[0], 1))
yf = np.add(2 * ypred, ytrue)
self.C1 = np.argwhere(yf == 3)[:, 0] # 1,1 #In order: predicted, true
self.C2 = np.argwhere(yf == -3)[:, 0] # -1,-1
self.C3 = np.where(yf == 1)[0] # 1,-1
self.C4 = np.where(yf == -1)[0] # -1,1
def forward(self, X):
y = []
X = X.copy()
w = self.weight
b = self.bias
wA = np.asarray([self.wA]).copy()
wB = np.asarray([self.wB]).copy()
if (self == None):
return []
if (self.A == None and self.B == None):
y = np.sign(np.array(inner_prod(w, X)) + np.array(b)).reshape(-1, 1)
if (self.A == None):
xA = np.zeros((X.shape[0], 1))
else:
xA = self.A.forward(X)
xA = np.reshape(xA, (xA.shape[0], 1))
if (self.B == None):
xB = np.zeros((X.shape[0], 1))
else:
xB = self.B.forward(X)
xB = np.reshape(xB, (xB.shape[0], 1))
if (self.A != None and self.B != None):
wA = np.asarray([wA.item()])
wB = np.asarray([wB.item()])
y = np.sign(np.asarray(inner_prod(w, X)) + np.asarray(inner_prod(wA, xA)) + np.asarray(
inner_prod(wB, xB)) + np.asarray(b)).reshape(-1, 1)
if (self.A != None and self.B == None):
wA = np.asarray([wA.item()])
y = np.sign(np.asarray(inner_prod(w, X)) + np.asarray(inner_prod(wA, xA)) + np.asarray(b)).reshape(-1, 1)
if (self.A == None and self.B != None):
wB = np.asarray([wB.item()])
y = np.sign(np.asarray(inner_prod(w, X)) + np.asarray(inner_prod(wB, xB)) + | np.asarray(b) | numpy.asarray |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
__doc__ = """Code by <NAME>
<EMAIL>
Dependencies:
numpy
scipy
matplotlib
Classes:
UniversalKriging: Provides greater control over 2D kriging by
utilizing drift terms.
References:
P.K. Kitanidis, Introduction to Geostatistcs: Applications in Hydrogeology,
(Cambridge University Press, 1997) 272 p.
Copyright (c) 2015 <NAME>
"""
import numpy as np
import scipy.linalg
from scipy.spatial.distance import cdist
import matplotlib.pyplot as plt
from . import variogram_models
from . import core
from .core import _adjust_for_anisotropy
import warnings
class UniversalKriging:
"""class UniversalKriging
Provides greater control over 2D kriging by utilizing drift terms.
Dependencies:
numpy
scipy
matplotlib
Inputs:
X (array-like): X-coordinates of data points.
Y (array-like): Y-coordinates of data points.
Z (array-like): Values at data points.
variogram_model (string, optional): Specified which variogram model to use;
may be one of the following: linear, power, gaussian, spherical,
exponential. Default is linear variogram model. To utilize as custom variogram
model, specify 'custom'; you must also provide variogram_parameters and
variogram_function.
variogram_parameters (array-like, optional): Parameters that define the
specified variogram model. If not provided, parameters will be automatically
calculated such that the root-mean-square error for the fit variogram
function is minimized.
linear - [slope, nugget]
power - [scale, exponent, nugget]
gaussian - [sill, range, nugget]
spherical - [sill, range, nugget]
exponential - [sill, range, nugget]
For a custom variogram model, the parameters are required, as custom variogram
models currently will not automatically be fit to the data. The code does not
check that the provided list contains the appropriate number of parameters for
the custom variogram model, so an incorrect parameter list in such a case will
probably trigger an esoteric exception someplace deep in the code.
variogram_function (callable, optional): A callable function that must be provided
if variogram_model is specified as 'custom'. The function must take only two
arguments: first, a list of parameters for the variogram model; second, the
distances at which to calculate the variogram model. The list provided in
variogram_parameters will be passed to the function as the first argument.
nlags (int, optional): Number of averaging bins for the semivariogram.
Default is 6.
weight (boolean, optional): Flag that specifies if semivariance at smaller lags
should be weighted more heavily when automatically calculating variogram model.
True indicates that weights will be applied. Default is False.
(Kitanidis suggests that the values at smaller lags are more important in
fitting a variogram model, so the option is provided to enable such weighting.)
anisotropy_scaling (float, optional): Scalar stretching value to take
into account anisotropy. Default is 1 (effectively no stretching).
Scaling is applied in the y-direction in the rotated data frame
(i.e., after adjusting for the anisotropy_angle, if anisotropy_angle
is not 0).
anisotropy_angle (float, optional): CCW angle (in degrees) by which to
rotate coordinate system in order to take into account anisotropy.
Default is 0 (no rotation). Note that the coordinate system is rotated.
drift_terms (list of strings, optional): List of drift terms to include in
universal kriging. Supported drift terms are currently
'regional_linear', 'point_log', 'external_Z', 'specified', and 'functional'.
point_drift (array-like, optional): Array-like object that contains the
coordinates and strengths of the point-logarithmic drift terms. Array
shape must be Nx3, where N is the number of point drift terms. First
column (index 0) must contain x-coordinates, second column (index 1)
must contain y-coordinates, and third column (index 2) must contain
the strengths of each point term. Strengths are relative, so only
the relation of the values to each other matters. Note that the code
will appropriately deal with point-logarithmic terms that are at the
same coordinates as an evaluation point or data point, but Python will
still kick out a warning message that an ln(0) has been encountered.
If the problem involves anisotropy, the well coordinates will be adjusted
and the drift values will be calculated in the adjusted data frame.
external_drift (array-like, optional): Gridded data used for the external
Z scalar drift term. Must be dim MxN, where M is in the y-direction
and N is in the x-direction. Grid spacing does not need to be constant.
If grid spacing is not constant, must specify the grid cell sizes.
If the problem involves anisotropy, the external drift values are
extracted based on the pre-adjusted coordinates (i.e., the original
coordinate system).
external_drift_x (array-like, optional): X-coordinates for gridded
external Z-scalar data. Must be dim M or Mx1 (where M is the number
of grid cells in the x-direction). The coordinate is treated as
the center of the cell.
external_drift_y (array-like, optional): Y-coordinates for gridded
external Z-scalar data. Must be dim N or Nx1 (where N is the
number of grid cells in the y-direction). The coordinate is
treated as the center of the cell.
specified_drift (list of array-like objects, optional): List of arrays that contain
the drift values at data points. The arrays must be dim N, where N is the number
of data points. Any number of specified-drift terms may be used.
functional_drift (list of callable objects, optional): List of callable functions that
will be used to evaluate drift terms. The function must be a function of only the
two spatial coordinates and must return a single value for each coordinate pair.
It must be set up to be called with only two arguments, first an array of x values
and second an array of y values. If the problem involves anisotropy, the drift values
are calculated in the adjusted data frame.
verbose (Boolean, optional): Enables program text output to monitor
kriging process. Default is False (off).
enable_plotting (Boolean, optional): Enables plotting to display
variogram. Default is False (off).
Callable Methods:
display_variogram_model(): Displays semivariogram and variogram model.
update_variogram_model(variogram_model, variogram_parameters=None, nlags=6,
anisotropy_scaling=1.0, anisotropy_angle=0.0):
Changes the variogram model and variogram parameters for
the kriging system.
Inputs:
variogram_model (string): May be any of the variogram models
listed above.
variogram_parameters (list, optional): List of variogram model
parameters, as listed above. If not provided, a best fit model
will be calculated as described above.
variogram_function (callable, optional): A callable function that must be
provided if variogram_model is specified as 'custom'. See above for
more information.
nlags (int, optional): Number of averaging bins for the semivariogram.
Defualt is 6.
weight (boolean, optional): Flag that specifies if semivariance at smaller lags
should be weighted more heavily when automatically calculating variogram model.
True indicates that weights will be applied. Default is False.
anisotropy_scaling (float, optional): Scalar stretching value to
take into account anisotropy. Default is 1 (effectively no
stretching). Scaling is applied in the y-direction.
anisotropy_angle (float, optional): CCW angle (in degrees) by which to
rotate coordinate system in order to take into account
anisotropy. Default is 0 (no rotation).
switch_verbose(): Enables/disables program text output. No arguments.
switch_plotting(): Enables/disable variogram plot display. No arguments.
get_epsilon_residuals(): Returns the epsilon residuals of the
variogram fit. No arguments.
plot_epsilon_residuals(): Plots the epsilon residuals of the variogram
fit in the order in which they were calculated. No arguments.
get_statistics(): Returns the Q1, Q2, and cR statistics for the
variogram fit (in that order). No arguments.
print_statistics(): Prints out the Q1, Q2, and cR statistics for
the variogram fit. NOTE that ideally Q1 is close to zero,
Q2 is close to 1, and cR is as small as possible.
execute(style, xpoints, ypoints, mask=None): Calculates a kriged grid.
Inputs:
style (string): Specifies how to treat input kriging points.
Specifying 'grid' treats xpoints and ypoints as two arrays of
x and y coordinates that define a rectangular grid.
Specifying 'points' treats xpoints and ypoints as two arrays
that provide coordinate pairs at which to solve the kriging system.
Specifying 'masked' treats xpoints and ypoints as two arrays of
x and y coordinates that define a rectangular grid and uses mask
to only evaluate specific points in the grid.
xpoints (array-like, dim Nx1): If style is specific as 'grid' or 'masked',
x-coordinates of MxN grid. If style is specified as 'points',
x-coordinates of specific points at which to solve kriging system.
ypoints (array-like, dim Mx1): If style is specified as 'grid' or 'masked',
y-coordinates of MxN grid. If style is specified as 'points',
y-coordinates of specific points at which to solve kriging system.
mask (boolean array, dim MxN, optional): Specifies the points in the rectangular
grid defined by xpoints and ypoints that are to be excluded in the
kriging calculations. Must be provided if style is specified as 'masked'.
False indicates that the point should not be masked; True indicates that
the point should be masked.
backend (string, optional): Specifies which approach to use in kriging.
Specifying 'vectorized' will solve the entire kriging problem at once in a
vectorized operation. This approach is faster but also can consume a
significant amount of memory for large grids and/or large datasets.
Specifying 'loop' will loop through each point at which the kriging system
is to be solved. This approach is slower but also less memory-intensive.
Default is 'vectorized'. Note that the Cython backend is not supported for UK.
specified_drift_arrays (list of numpy arrays, optional): Specifies the drift values
at the points at which the kriging system is to be evaluated. Required if
'specified' drift provided in the list of drift terms when instantiating the
UniversalKriging class. Must be a list of arrays in the same order as the list
provided when instantiating the kriging object. Array(s) must be the same dimension
as the specified grid or have the same number of points as the specified points;
i.e., the arrays either must be dim MxN, where M is the number of y grid-points
and N is the number of x grid-points, or dim M, where M is the number of points
at which to evaluate the kriging system.
Outputs:
zvalues (numpy array, dim MxN or dim Nx1): Z-values of specified grid or at the
specified set of points. If style was specified as 'masked', zvalues will
be a numpy masked array.
sigmasq (numpy array, dim MxN or dim Nx1): Variance at specified grid points or
at the specified set of points. If style was specified as 'masked', sigmasq
will be a numpy masked array.
References:
<NAME>, Introduction to Geostatistcs: Applications in Hydrogeology,
(Cambridge University Press, 1997) 272 p.
"""
UNBIAS = True # This can be changed to remove the unbiasedness condition
# Really for testing purposes only...
eps = 1.e-10 # Cutoff for comparison to zero
variogram_dict = {'linear': variogram_models.linear_variogram_model,
'power': variogram_models.power_variogram_model,
'gaussian': variogram_models.gaussian_variogram_model,
'spherical': variogram_models.spherical_variogram_model,
'exponential': variogram_models.exponential_variogram_model}
def __init__(self, x, y, z, variogram_model='linear', variogram_parameters=None,
variogram_function=None, nlags=6, weight=False, anisotropy_scaling=1.0,
anisotropy_angle=0.0, drift_terms=None, point_drift=None,
external_drift=None, external_drift_x=None, external_drift_y=None,
specified_drift=None, functional_drift=None, verbose=False, enable_plotting=False):
# Deal with mutable default argument
if drift_terms is None:
drift_terms = []
if specified_drift is None:
specified_drift = []
if functional_drift is None:
functional_drift = []
# Code assumes 1D input arrays. Ensures that any extraneous dimensions
# don't get in the way. Copies are created to avoid any problems with
# referencing the original passed arguments.
self.X_ORIG = np.atleast_1d(np.squeeze(np.array(x, copy=True)))
self.Y_ORIG = np.atleast_1d(np.squeeze(np.array(y, copy=True)))
self.Z = np.atleast_1d(np.squeeze(np.array(z, copy=True)))
self.verbose = verbose
self.enable_plotting = enable_plotting
if self.enable_plotting and self.verbose:
print("Plotting Enabled\n")
self.XCENTER = (np.amax(self.X_ORIG) + np.amin(self.X_ORIG))/2.0
self.YCENTER = (np.amax(self.Y_ORIG) + np.amin(self.Y_ORIG))/2.0
self.anisotropy_scaling = anisotropy_scaling
self.anisotropy_angle = anisotropy_angle
if self.verbose:
print("Adjusting data for anisotropy...")
self.X_ADJUSTED, self.Y_ADJUSTED = \
_adjust_for_anisotropy(np.vstack((self.X_ORIG, self.Y_ORIG)).T,
[self.XCENTER, self.YCENTER],
[self.anisotropy_scaling],
[self.anisotropy_angle]).T
self.variogram_model = variogram_model
if self.variogram_model not in self.variogram_dict.keys() and self.variogram_model != 'custom':
raise ValueError("Specified variogram model '%s' is not supported." % variogram_model)
elif self.variogram_model == 'custom':
if variogram_function is None or not callable(variogram_function):
raise ValueError("Must specify callable function for custom variogram model.")
else:
self.variogram_function = variogram_function
else:
self.variogram_function = self.variogram_dict[self.variogram_model]
if self.verbose:
print("Initializing variogram model...")
self.lags, self.semivariance, self.variogram_model_parameters = \
core.initialize_variogram_model(self.X_ADJUSTED, self.Y_ADJUSTED, self.Z,
self.variogram_model, variogram_parameters,
self.variogram_function, nlags, weight,
'euclidean')
if self.verbose:
if self.variogram_model == 'linear':
print("Using '%s' Variogram Model" % 'linear')
print("Slope:", self.variogram_model_parameters[0])
print("Nugget:", self.variogram_model_parameters[1], '\n')
elif self.variogram_model == 'power':
print("Using '%s' Variogram Model" % 'power')
print("Scale:", self.variogram_model_parameters[0])
print("Exponent:", self.variogram_model_parameters[1])
print("Nugget:", self.variogram_model_parameters[2], '\n')
elif self.variogram_model == 'custom':
print("Using Custom Variogram Model")
else:
print("Using '%s' Variogram Model" % self.variogram_model)
print("Sill:", self.variogram_model_parameters[0])
print("Range:", self.variogram_model_parameters[1])
print("Nugget:", self.variogram_model_parameters[2])
if self.enable_plotting:
self.display_variogram_model()
if self.verbose:
print("Calculating statistics on variogram model fit...")
self.delta, self.sigma, self.epsilon = core.find_statistics(self.X_ADJUSTED, self.Y_ADJUSTED,
self.Z, self.variogram_function,
self.variogram_model_parameters,
'euclidean')
self.Q1 = core.calcQ1(self.epsilon)
self.Q2 = core.calcQ2(self.epsilon)
self.cR = core.calc_cR(self.Q2, self.sigma)
if self.verbose:
print("Q1 =", self.Q1)
print("Q2 =", self.Q2)
print("cR =", self.cR, '\n')
if self.verbose:
print("Initializing drift terms...")
# Note that the regional linear drift values will be based on the adjusted coordinate system.
# Really, it doesn't actually matter which coordinate system is used here.
if 'regional_linear' in drift_terms:
self.regional_linear_drift = True
if self.verbose:
print("Implementing regional linear drift.")
else:
self.regional_linear_drift = False
# External Z scalars are extracted using the original (unadjusted) coordinates.
if 'external_Z' in drift_terms:
if external_drift is None:
raise ValueError("Must specify external Z drift terms.")
if external_drift_x is None or external_drift_y is None:
raise ValueError("Must specify coordinates of external Z drift terms.")
self.external_Z_drift = True
if external_drift.shape[0] != external_drift_y.shape[0] or \
external_drift.shape[1] != external_drift_x.shape[0]:
if external_drift.shape[0] == external_drift_x.shape[0] and \
external_drift.shape[1] == external_drift_y.shape[0]:
self.external_Z_drift = np.array(external_drift.T)
else:
raise ValueError("External drift dimensions do not match provided "
"x- and y-coordinate dimensions.")
else:
self.external_Z_array = np.array(external_drift)
self.external_Z_array_x = np.array(external_drift_x).flatten()
self.external_Z_array_y = np.array(external_drift_y).flatten()
self.z_scalars = self._calculate_data_point_zscalars(self.X_ORIG,
self.Y_ORIG)
if self.verbose:
print("Implementing external Z drift.")
else:
self.external_Z_drift = False
# Well coordinates are rotated into adjusted coordinate frame.
if 'point_log' in drift_terms:
if point_drift is None:
raise ValueError("Must specify location(s) and strength(s) of point drift terms.")
self.point_log_drift = True
point_log = np.atleast_2d(np.squeeze(np.array(point_drift, copy=True)))
self.point_log_array = np.zeros(point_log.shape)
self.point_log_array[:, 2] = point_log[:, 2]
self.point_log_array[:, :2] = _adjust_for_anisotropy(np.vstack((point_log[:, 0], point_log[:, 1])).T,
[self.XCENTER, self.YCENTER],
[self.anisotropy_scaling],
[self.anisotropy_angle])
if self.verbose:
print("Implementing external point-logarithmic drift; number of points =",
self.point_log_array.shape[0], '\n')
else:
self.point_log_drift = False
if 'specified' in drift_terms:
if type(specified_drift) is not list:
raise TypeError("Arrays for specified drift terms must be encapsulated in a list.")
if len(specified_drift) == 0:
raise ValueError("Must provide at least one drift-value array when using the "
"'specified' drift capability.")
self.specified_drift = True
self.specified_drift_data_arrays = []
for term in specified_drift:
specified = np.squeeze(np.array(term, copy=True))
if specified.size != self.X_ORIG.size:
raise ValueError("Must specify the drift values for each data point when using the "
"'specified' drift capability.")
self.specified_drift_data_arrays.append(specified)
else:
self.specified_drift = False
# The provided callable functions will be evaluated using the adjusted coordinates.
if 'functional' in drift_terms:
if type(functional_drift) is not list:
raise TypeError("Callables for functional drift terms must be encapsulated in a list.")
if len(functional_drift) == 0:
raise ValueError("Must provide at least one callable object when using the "
"'functional' drift capability.")
self.functional_drift = True
self.functional_drift_terms = functional_drift
else:
self.functional_drift = False
def _calculate_data_point_zscalars(self, x, y, type_='array'):
"""Determines the Z-scalar values at the specified coordinates
for use when setting up the kriging matrix. Uses bilinear
interpolation.
Currently, the Z scalar values are extracted from the input Z grid
exactly at the specified coordinates. This means that if the Z grid
resolution is finer than the resolution of the desired kriged grid,
there is no averaging of the scalar values to return an average
Z value for that cell in the kriged grid. Rather, the exact Z value
right at the coordinate is used."""
if type_ == 'scalar':
nx = 1
ny = 1
z_scalars = None
else:
if x.ndim == 1:
nx = x.shape[0]
ny = 1
else:
ny = x.shape[0]
nx = x.shape[1]
z_scalars = np.zeros(x.shape)
for m in range(ny):
for n in range(nx):
if type_ == 'scalar':
xn = x
yn = y
else:
if x.ndim == 1:
xn = x[n]
yn = y[n]
else:
xn = x[m, n]
yn = y[m, n]
if xn > np.amax(self.external_Z_array_x) or xn < np.amin(self.external_Z_array_x) or \
yn > np.amax(self.external_Z_array_y) or yn < np.amin(self.external_Z_array_y):
raise ValueError("External drift array does not cover specified kriging domain.")
# bilinear interpolation
external_x2_index = np.amin(np.where(self.external_Z_array_x >= xn)[0])
external_x1_index = np.amax(np.where(self.external_Z_array_x <= xn)[0])
external_y2_index = np.amin(np.where(self.external_Z_array_y >= yn)[0])
external_y1_index = np.amax(np.where(self.external_Z_array_y <= yn)[0])
if external_y1_index == external_y2_index:
if external_x1_index == external_x2_index:
z = self.external_Z_array[external_y1_index, external_x1_index]
else:
z = (self.external_Z_array[external_y1_index, external_x1_index] *
(self.external_Z_array_x[external_x2_index] - xn) +
self.external_Z_array[external_y2_index, external_x2_index] *
(xn - self.external_Z_array_x[external_x1_index])) / \
(self.external_Z_array_x[external_x2_index] -
self.external_Z_array_x[external_x1_index])
elif external_x1_index == external_x2_index:
if external_y1_index == external_y2_index:
z = self.external_Z_array[external_y1_index, external_x1_index]
else:
z = (self.external_Z_array[external_y1_index, external_x1_index] *
(self.external_Z_array_y[external_y2_index] - yn) +
self.external_Z_array[external_y2_index, external_x2_index] *
(yn - self.external_Z_array_y[external_y1_index])) / \
(self.external_Z_array_y[external_y2_index] -
self.external_Z_array_y[external_y1_index])
else:
z = (self.external_Z_array[external_y1_index, external_x1_index] *
(self.external_Z_array_x[external_x2_index] - xn) *
(self.external_Z_array_y[external_y2_index] - yn) +
self.external_Z_array[external_y1_index, external_x2_index] *
(xn - self.external_Z_array_x[external_x1_index]) *
(self.external_Z_array_y[external_y2_index] - yn) +
self.external_Z_array[external_y2_index, external_x1_index] *
(self.external_Z_array_x[external_x2_index] - xn) *
(yn - self.external_Z_array_y[external_y1_index]) +
self.external_Z_array[external_y2_index, external_x2_index] *
(xn - self.external_Z_array_x[external_x1_index]) *
(yn - self.external_Z_array_y[external_y1_index])) / \
((self.external_Z_array_x[external_x2_index] -
self.external_Z_array_x[external_x1_index]) *
(self.external_Z_array_y[external_y2_index] -
self.external_Z_array_y[external_y1_index]))
if type_ == 'scalar':
z_scalars = z
else:
if z_scalars.ndim == 1:
z_scalars[n] = z
else:
z_scalars[m, n] = z
return z_scalars
def update_variogram_model(self, variogram_model, variogram_parameters=None,
variogram_function=None, nlags=6, weight=False,
anisotropy_scaling=1.0, anisotropy_angle=0.0):
"""Allows user to update variogram type and/or variogram model parameters."""
if anisotropy_scaling != self.anisotropy_scaling or \
anisotropy_angle != self.anisotropy_angle:
if self.verbose:
print("Adjusting data for anisotropy...")
self.anisotropy_scaling = anisotropy_scaling
self.anisotropy_angle = anisotropy_angle
self.X_ADJUSTED, self.Y_ADJUSTED = \
_adjust_for_anisotropy(np.vstack((self.X_ORIG, self.Y_ORIG)).T,
[self.XCENTER, self.YCENTER],
[self.anisotropy_scaling],
[self.anisotropy_angle]).T
self.variogram_model = variogram_model
if self.variogram_model not in self.variogram_dict.keys() and self.variogram_model != 'custom':
raise ValueError("Specified variogram model '%s' is not supported." % variogram_model)
elif self.variogram_model == 'custom':
if variogram_function is None or not callable(variogram_function):
raise ValueError("Must specify callable function for custom variogram model.")
else:
self.variogram_function = variogram_function
else:
self.variogram_function = self.variogram_dict[self.variogram_model]
if self.verbose:
print("Updating variogram mode...")
self.lags, self.semivariance, self.variogram_model_parameters = \
core.initialize_variogram_model(self.X_ADJUSTED, self.Y_ADJUSTED, self.Z,
self.variogram_model, variogram_parameters,
self.variogram_function, nlags, weight,
'euclidean')
if self.verbose:
if self.variogram_model == 'linear':
print("Using '%s' Variogram Model" % 'linear')
print("Slope:", self.variogram_model_parameters[0])
print("Nugget:", self.variogram_model_parameters[1], '\n')
elif self.variogram_model == 'power':
print("Using '%s' Variogram Model" % 'power')
print("Scale:", self.variogram_model_parameters[0])
print("Exponent:", self.variogram_model_parameters[1])
print("Nugget:", self.variogram_model_parameters[2], '\n')
elif self.variogram_model == 'custom':
print("Using Custom Variogram Model")
else:
print("Using '%s' Variogram Model" % self.variogram_model)
print("Sill:", self.variogram_model_parameters[0])
print("Range:", self.variogram_model_parameters[1])
print("Nugget:", self.variogram_model_parameters[2])
if self.enable_plotting:
self.display_variogram_model()
if self.verbose:
print("Calculating statistics on variogram model fit...")
self.delta, self.sigma, self.epsilon = core.find_statistics(self.X_ADJUSTED, self.Y_ADJUSTED,
self.Z, self.variogram_function,
self.variogram_model_parameters,
'euclidean')
self.Q1 = core.calcQ1(self.epsilon)
self.Q2 = core.calcQ2(self.epsilon)
self.cR = core.calc_cR(self.Q2, self.sigma)
if self.verbose:
print("Q1 =", self.Q1)
print("Q2 =", self.Q2)
print("cR =", self.cR, '\n')
def display_variogram_model(self):
"""Displays variogram model with the actual binned data"""
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(self.lags, self.semivariance, 'r*')
ax.plot(self.lags,
self.variogram_function(self.variogram_model_parameters, self.lags), 'k-')
plt.show()
def switch_verbose(self):
"""Allows user to switch code talk-back on/off. Takes no arguments."""
self.verbose = not self.verbose
def switch_plotting(self):
"""Allows user to switch plot display on/off. Takes no arguments."""
self.enable_plotting = not self.enable_plotting
def get_epsilon_residuals(self):
"""Returns the epsilon residuals for the variogram fit."""
return self.epsilon
def plot_epsilon_residuals(self):
"""Plots the epsilon residuals for the variogram fit."""
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(range(self.epsilon.size), self.epsilon, c='k', marker='*')
ax.axhline(y=0.0)
plt.show()
def get_statistics(self):
return self.Q1, self.Q2, self.cR
def print_statistics(self):
print("Q1 =", self.Q1)
print("Q2 =", self.Q2)
print("cR =", self.cR)
def _get_kriging_matrix(self, n, n_withdrifts):
"""Assembles the kriging matrix."""
xy = np.concatenate((self.X_ADJUSTED[:, np.newaxis], self.Y_ADJUSTED[:, np.newaxis]), axis=1)
d = cdist(xy, xy, 'euclidean')
if self.UNBIAS:
a = np.zeros((n_withdrifts+1, n_withdrifts+1))
else:
a = np.zeros((n_withdrifts, n_withdrifts))
a[:n, :n] = - self.variogram_function(self.variogram_model_parameters, d)
np.fill_diagonal(a, 0.)
i = n
if self.regional_linear_drift:
a[:n, i] = self.X_ADJUSTED
a[i, :n] = self.X_ADJUSTED
i += 1
a[:n, i] = self.Y_ADJUSTED
a[i, :n] = self.Y_ADJUSTED
i += 1
if self.point_log_drift:
for well_no in range(self.point_log_array.shape[0]):
log_dist = np.log(np.sqrt((self.X_ADJUSTED - self.point_log_array[well_no, 0])**2 +
(self.Y_ADJUSTED - self.point_log_array[well_no, 1])**2))
if np.any(np.isinf(log_dist)):
log_dist[np.isinf(log_dist)] = -100.0
a[:n, i] = - self.point_log_array[well_no, 2] * log_dist
a[i, :n] = - self.point_log_array[well_no, 2] * log_dist
i += 1
if self.external_Z_drift:
a[:n, i] = self.z_scalars
a[i, :n] = self.z_scalars
i += 1
if self.specified_drift:
for arr in self.specified_drift_data_arrays:
a[:n, i] = arr
a[i, :n] = arr
i += 1
if self.functional_drift:
for func in self.functional_drift_terms:
a[:n, i] = func(self.X_ADJUSTED, self.Y_ADJUSTED)
a[i, :n] = func(self.X_ADJUSTED, self.Y_ADJUSTED)
i += 1
if i != n_withdrifts:
warnings.warn("Error in creating kriging matrix. Kriging may fail.", RuntimeWarning)
if self.UNBIAS:
a[n_withdrifts, :n] = 1.0
a[:n, n_withdrifts] = 1.0
a[n:n_withdrifts + 1, n:n_withdrifts + 1] = 0.0
return a
def _exec_vector(self, a, bd, xy, xy_orig, mask, n_withdrifts, spec_drift_grids):
"""Solves the kriging system as a vectorized operation. This method
can take a lot of memory for large grids and/or large datasets."""
npt = bd.shape[0]
n = self.X_ADJUSTED.shape[0]
zero_index = None
zero_value = False
a_inv = scipy.linalg.inv(a)
if np.any(np.absolute(bd) <= self.eps):
zero_value = True
zero_index = np.where(np.absolute(bd) <= self.eps)
if self.UNBIAS:
b = np.zeros((npt, n_withdrifts+1, 1))
else:
b = np.zeros((npt, n_withdrifts, 1))
b[:, :n, 0] = - self.variogram_function(self.variogram_model_parameters, bd)
if zero_value:
b[zero_index[0], zero_index[1], 0] = 0.0
i = n
if self.regional_linear_drift:
b[:, i, 0] = xy[:, 0]
i += 1
b[:, i, 0] = xy[:, 1]
i += 1
if self.point_log_drift:
for well_no in range(self.point_log_array.shape[0]):
log_dist = np.log(np.sqrt((xy[:, 0] - self.point_log_array[well_no, 0])**2 +
(xy[:, 1] - self.point_log_array[well_no, 1])**2))
if np.any(np.isinf(log_dist)):
log_dist[np.isinf(log_dist)] = -100.0
b[:, i, 0] = - self.point_log_array[well_no, 2] * log_dist
i += 1
if self.external_Z_drift:
b[:, i, 0] = self._calculate_data_point_zscalars(xy_orig[:, 0], xy_orig[:, 1])
i += 1
if self.specified_drift:
for spec_vals in spec_drift_grids:
b[:, i, 0] = spec_vals.flatten()
i += 1
if self.functional_drift:
for func in self.functional_drift_terms:
b[:, i, 0] = func(xy[:, 0], xy[:, 1])
i += 1
if i != n_withdrifts:
warnings.warn("Error in setting up kriging system. Kriging may fail.", RuntimeWarning)
if self.UNBIAS:
b[:, n_withdrifts, 0] = 1.0
if (~mask).any():
mask_b = np.repeat(mask[:, np.newaxis, np.newaxis], n_withdrifts+1, axis=1)
b = np.ma.array(b, mask=mask_b)
if self.UNBIAS:
x = np.dot(a_inv, b.reshape((npt, n_withdrifts+1)).T).reshape((1, n_withdrifts+1, npt)).T
else:
x = np.dot(a_inv, b.reshape((npt, n_withdrifts)).T).reshape((1, n_withdrifts, npt)).T
zvalues = np.sum(x[:, :n, 0] * self.Z, axis=1)
sigmasq = np.sum(x[:, :, 0] * -b[:, :, 0], axis=1)
return zvalues, sigmasq
def _exec_loop(self, a, bd_all, xy, xy_orig, mask, n_withdrifts, spec_drift_grids):
"""Solves the kriging system by looping over all specified points.
Less memory-intensive, but involves a Python-level loop."""
npt = bd_all.shape[0]
n = self.X_ADJUSTED.shape[0]
zvalues = np.zeros(npt)
sigmasq = np.zeros(npt)
a_inv = scipy.linalg.inv(a)
for j in np.nonzero(~mask)[0]: # Note that this is the same thing as range(npt) if mask is not defined,
bd = bd_all[j] # otherwise it takes the non-masked elements.
if np.any(np.absolute(bd) <= self.eps):
zero_value = True
zero_index = np.where(np.absolute(bd) <= self.eps)
else:
zero_index = None
zero_value = False
if self.UNBIAS:
b = np.zeros((n_withdrifts+1, 1))
else:
b = np.zeros((n_withdrifts, 1))
b[:n, 0] = - self.variogram_function(self.variogram_model_parameters, bd)
if zero_value:
b[zero_index[0], 0] = 0.0
i = n
if self.regional_linear_drift:
b[i, 0] = xy[j, 0]
i += 1
b[i, 0] = xy[j, 1]
i += 1
if self.point_log_drift:
for well_no in range(self.point_log_array.shape[0]):
log_dist = np.log(np.sqrt((xy[j, 0] - self.point_log_array[well_no, 0])**2 +
(xy[j, 1] - self.point_log_array[well_no, 1])**2))
if np.any(np.isinf(log_dist)):
log_dist[np.isinf(log_dist)] = -100.0
b[i, 0] = - self.point_log_array[well_no, 2] * log_dist
i += 1
if self.external_Z_drift:
b[i, 0] = self._calculate_data_point_zscalars(xy_orig[j, 0], xy_orig[j, 1], type_='scalar')
i += 1
if self.specified_drift:
for spec_vals in spec_drift_grids:
b[i, 0] = spec_vals.flatten()[i]
i += 1
if self.functional_drift:
for func in self.functional_drift_terms:
b[i, 0] = func(xy[j, 0], xy[j, 1])
i += 1
if i != n_withdrifts:
warnings.warn("Error in setting up kriging system. Kriging may fail.", RuntimeWarning)
if self.UNBIAS:
b[n_withdrifts, 0] = 1.0
x = | np.dot(a_inv, b) | numpy.dot |
# non-resonant leptogenesis with one decaying sterile neutrino using the density matrix equations. Equations from 1112.4528
import ulysses
import numpy as np
from odeintw import odeintw
from ulysses.numba import jit
@jit
def fast_RHS(y0, d, w1, n1eq, epstt,epsmm,epsee,epstm,epste,epsme,c1t,c1m,c1e):
N1 = y0[0]
Ntt = y0[1]
Nbb = y0[2]
c1tc = np.conjugate(c1t)
c1mc = np.conjugate(c1m)
c1ec = | np.conjugate(c1e) | numpy.conjugate |
import numpy as np
from standard_PCA import std_PCA
#use log to avoid the case product can overflow the floating point representation
def geo_mean_through_log(numberList):
#if some is 0, return 0.
if (np.amin(numberList) <= 1.e-12):
return 0;
logNumberList = np.log(numberList)
return np.exp(logNumberList.sum()/len(numberList))
#data preprocessing helper methods
def scaleVar(dataframe,colArray):
'''
Normalize columns "together", meaning that I take the S.D. and mean to be the combined of all those columns.
Ts makes more sense if columns are similar in meaning. For example, amount paid for 6 months as 6 months.
I normalize all 6 with the same variance.
Example of usage:
scaleVar(df.columns.values[2:4]) # scale the 3,4,5th columns together by dividing with the same variance
'''
SD = dataframe[colArray].stack().std(); #compute overall S.D.
if SD == 0: #all number are the same. No need to do anything
return;
dataframe[colArray] = dataframe[colArray]/SD
def scaleVarOneCol(dataframe,nameStr):
'''
Given the name of one column, scale that column so that the S.D. is 1. The mean remains the same. For example,
df = pandas.read.csv("some_path")
scaleVar(df,feature1)
'''
if dataframe[nameStr].std() == 0: #all number are the same. No need to do anything
return;
dataframe[nameStr] = dataframe[nameStr]/dataframe[nameStr].std()
#input check
def input_check(n,k,d,B,function_name='the function'):
'''
Check that B is a list of k matrices of size n x n, and that d <= n.
'''
if (isinstance(function_name, str) == False):
print("Error: check_input is used with function name that is not string. Exit the check.")
return 1
if (k<1):
print("Error: " + function_name + " is called with k<1.")
return 2
if (len(B) < k):
print("Error: " + function_name + " is called with not enough matrices in B.")
return 3
#check that matrices are the same size as n
for i in range(k):
if (B[i].shape != (n,n)):
print("Error: " + function_name + " is called with input matrix B_i not the correct size." + "Note: i=" + str(i) + " , starting indexing from 0 to k-1")
return 4
if (((d>0) and (d<=n)) == False):
print("Error: " + function_name + " is called with invalid value of d, which should be a number between 1 and n inclusive.")
return 5
return 0 #no error case
def getObj(n,k,d,B,X):
"""
Given k PSD n-by-n matrices B1,...,Bk, and a projection matrix X which is n-by-n, give variance and loss of each group i.
Additionally, compute max min variance, min max loss, Nash Social Welfare, and total variance objective by this solution X.
The matrix B_i should be centered (mean 0), since the formula to calculate variance will be by the dot product of B_i and X
Arguments:
n: original number of features (size of all B_i's)
k: number of groups
d: the target dimension
B: list of PSD matrices, as numpy matrices. It must contain at least k matrices. If there are more than k matrices provided, the first k will be used as k groups.
X: given solution (bad notation!! i cannot bear with it any more...). This method will still work even if not PSD or symmmetric or wrong rank as long as X has a correct dimension
Return: a dictionary with keys 'Loss', 'Var', and 'Best' (for the best possible PCA in that group as if other group does not exist) to each group,
and three objectives MM_Var, MM_Loss, and NSW.
"""
#input check
if (input_check(n, k, d, B, function_name='fairDimReductionFractional') > 0):
return -1
#rank check
if (np.linalg.matrix_rank(X) != d):
print("Warning: getObj is called with X having rank not equal to d.")
obj = dict()
# np.multiply is element-wise multiplication, np.sum(np.multiply) is computing <A, B>
# \|A, P\|_F^2 = <A^T A, P P^T>
best = [np.sum(np.sort(np.linalg.eigvalsh(B[i]))[-d:]) for i in range(k)]
loss = [np.sum(np.multiply(B[i],X)) - best[i] for i in range(k)]
var = [np.sum(np.multiply(B[i],X)) for i in range(k)]
#welfare objective
obj.update({'MM_Var':np.amin(var),'MM_Loss': | np.amin(loss) | numpy.amin |
"""
Created by <NAME> on September 24, 2019
infoGAIL benchmark in the toy domain
"""
import torch.optim as optim
import torch
import torch.nn as nn
import numpy as np
import math
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.manual_seed(49) # ensures repeatability
np.random.seed(49)
from low_dim.generate_environment import create_simple_classification_dataset
from torch.distributions import OneHotCategorical
from low_dim.utils.helper_utils import save_performance_results
from scheduling.methods.infogail_scheduling import one_hot_embedding, my_entropy
# TODO: jumpstart if does not work
class PolicyNetwork(nn.Module):
def __init__(self,
state_dim,
action_dim,
embedding_dim):
super(PolicyNetwork, self).__init__()
self.forward_pass = nn.Sequential(
nn.Linear(state_dim + embedding_dim, 32),
nn.ReLU(),
nn.Linear(32, action_dim),
nn.Softmax(dim=0)
)
def forward(self, state, latent_code):
input_data = torch.cat((state, latent_code), dim=0)
return self.forward_pass(input_data)
class Discriminator(nn.Module):
def __init__(self,
state_dim,
action_dim):
super(Discriminator, self).__init__()
self.forward_pass = nn.Sequential(
nn.Linear(state_dim + action_dim, 32),
nn.ReLU(),
nn.Linear(32, 1),
nn.Sigmoid()
)
def forward(self, state, action):
input_data = torch.cat((state, action), dim=0)
return self.forward_pass(input_data)
class AuxiliaryDistributionPredictor(nn.Module):
def __init__(self,
state_dim,
action_dim,
embedding_dim):
super(AuxiliaryDistributionPredictor, self).__init__()
self.forward_pass = nn.Sequential(
nn.Linear(state_dim + action_dim, 32),
nn.ReLU(),
nn.Linear(32, embedding_dim),
nn.Softmax(dim=0)
)
def forward(self, state, action):
input_data = torch.cat((state, action), dim=0)
return self.forward_pass(input_data)
# noinspection PyArgumentList
class InfoGAIL:
"""
class structure to test infoGAIL benchmark
"""
def __init__(self):
# Training set generation
self.num_schedules = 50
it_1 = [True, False, False]
it_2 = [False, True, False]
it_3 = [False, False, True]
it = [it_2, it_3, it_1]
x_data, y = create_simple_classification_dataset(self.num_schedules, train=it[0][0], cv=it[0][1])
x = []
for each_ele in x_data:
x.append(each_ele[2:])
self.X = torch.Tensor(x).reshape(-1, 2)
self.Y = torch.Tensor(y).reshape((-1, 1))
state_dimension = 2
action_dimension = 2
embedding_dimension = 2
# Initialise the network.
device = torch.device("cpu")
self.policy = PolicyNetwork(
state_dim=state_dimension,
action_dim=action_dimension,
embedding_dim=embedding_dimension
).to(device)
self.discriminator = Discriminator(
state_dim=state_dimension,
action_dim=action_dimension
).to(device)
self.distribution_gen = AuxiliaryDistributionPredictor(
state_dim=state_dimension,
action_dim=action_dimension,
embedding_dim=embedding_dimension
).to(device)
self.policy_opt = optim.Adam(self.policy.parameters(), lr=.0001)
self.disc_opt = optim.Adam(self.discriminator.parameters(), lr=.001)
self.distro_opt = optim.Adam(self.distribution_gen.parameters(), lr=.0001) # policy opt could be
self.schedule_starts = np.linspace(0, 20 * (self.num_schedules - 1), num=self.num_schedules)
self.gamma = .95
def train(self):
epochs = 50000
lambda_1 = 1
lambda_2 = 0
for epoch in range(epochs):
discriminator_acc = 0
avg_policy_loss = 0
avg_discrim_loss = 0
# sample a timestep before the cutoff for cross_validation
chosen_schedule_start = int( | np.random.choice(self.schedule_starts) | numpy.random.choice |
import matplotlib.pyplot as plt
import matplotlib
from matplotlib import animation
import seaborn as sns
import numpy as np
import cmocean
import os
from mpl_toolkits.axes_grid1 import AxesGrid
from mpl_toolkits.axes_grid1 import make_axes_locatable
import scipy
import scipy.ndimage
from scipy.stats import norm
import matplotlib.image as mpimg
class Plotter():
def __init__(self, dic_data, deck, data_modes,
plot_deltas = False):
self.zz = deck.targetplot
plot_contour_linear = deck.doc["Plots"]["Contour Plots"]["Linear"]["Plot_it"]
plot_contour_log = deck.doc["Plots"]["Contour Plots"]["Log"]["Plot_it"]
plot_quiver = deck.doc["Plots"]["Quiver"]["Plot_it"]
plot_streamplots = deck.doc["Plots"]["Streamplots"]["Plot_it"]
gif_heatmaps = deck.doc["Plots"]["Heatmaps"]["Gif_it"]
gif_contourlin = deck.doc["Plots"]["Contour Plots"]["Linear"]["Gif_it"]
gif_contourlog = deck.doc["Plots"]["Contour Plots"]["Log"]["Gif_it"]
for self.index, dic_image in enumerate(dic_data.dataframe):
index = self.index
if plot_contour_linear.lower() == "true":
self.create_contourplot_linear(dic_data.dic_paths[index], dic_image, deck, data_modes)
if plot_contour_log.lower() == "true":
self.create_contourplot_log(dic_data.dic_paths[index], dic_image, deck, data_modes)
if plot_quiver.lower() == "true":
self.create_quiver(dic_data.dic_paths[index], dic_image, deck)
if plot_streamplots.lower() == "true":
self.create_streamplot(dic_data.dic_paths[index], dic_image, deck)
# Do we really need this ?
self.plot_dataset(dic_data.dic_paths[index], dic_image, deck)
if plot_deltas == True:
if index == 0:
pass
else:
self.plot_deltas(dic_data.dic_paths[index], dic_image, deck)
if deck.plot_heatmaps.lower() == "true":
for index2, gdf in enumerate(data_modes.grouped):
if index == index2:
self.build_deltaheatmaps(dic_data.dic_paths[index], gdf, deck, data_modes.scale_min, data_modes.scale_max)
if gif_heatmaps == "true":
self.create_heatmaps_gif(data_modes.grouped, deck, data_modes.scale_min, data_modes.scale_max)
if gif_contourlin.lower() == "true":
self.create_contourplotlin_gif(dic_data.dataframe, deck, data_modes, dic_data.dic_paths)
if gif_contourlog.lower() == "true":
self.create_contourplotlog_gif(dic_data.dataframe, deck, data_modes, dic_data.dic_paths)
def filter_NaN_Matrix(self, U, sigVal):
#Fonction pour limiter la propagation des NaNs dans le filtre gaussien lissant l'image
V=U.copy()
V[np.isnan(U)]=0
VV=scipy.ndimage.gaussian_filter(V,sigma=sigVal)
W=0*U.copy()+1
W[np.isnan(U)]=0
WW=scipy.ndimage.gaussian_filter(W,sigma=sigVal)
np.seterr(divide='ignore', invalid='ignore') #enleve le pb de division /0
Z=VV/WW
return Z
def create_contourplot_log(self, file_name, df, deck, data_modes):
x = list(sorted(set( df["x"].values )))
y = list(sorted(set( df["y"].values )))
img_name = file_name[0 : len(file_name) -10] + '.tif'
img = plt.imread(img_name)
fig, ax = plt.subplots(dpi=300,)
ax.imshow(img, alpha = 1, cmap = 'gray')
df.loc[df["sigma"] == -1, deck.doc["Plots"]['Target Plot'] ] = np.nan
e1 = np.array(df[deck.doc["Plots"]['Target Plot']].values)
e1 = e1.reshape(len(y), len(x))
levels = np.sort(np.append( np.append( -np.logspace(0.1, abs(data_modes.vmin_0),10) , np.linspace(-0.01,0.01,5) ), np.logspace(0.1,data_modes.vmax_0,15)))
ax.contour(x, y, e1, colors = 'k', linewidths = 0.5, levels = levels)
pcm = ax.pcolormesh(x,y,e1,norm=matplotlib.colors.SymLogNorm(linthresh=0.001, linscale=0.1, vmin=data_modes.vmin_0, vmax=data_modes.vmax_0),
cmap='plasma')
fig.colorbar(pcm, ax=ax, extend = 'both')
plt.title(deck.doc["Plots"]['Target Plot']+", "+str(self.index))
plot_dir = "./plots/"
check_folder = os.path.isdir(plot_dir)
if not check_folder:
os.makedirs(plot_dir)
plt.savefig("./plots/"+self.zz.strip('"')+"-"+file_name[:-4]+"-contourplot-log"+".png")
plt.close()
def create_contourplot_linear(self, file_name, df, deck, data_modes):
x = list(sorted(set( df["x"].values )))
y = list(sorted(set( df["y"].values )))
img_name = file_name[0 : len(file_name) -10] + '.tif'
img = plt.imread(img_name)
fig, ax = plt.subplots(dpi=300,)
ax.imshow(img, alpha = 1, cmap = 'gray')
df.loc[df["sigma"] == -1, deck.doc["Plots"]['Target Plot'] ] = np.nan
e1 = np.array(df[deck.doc["Plots"]['Target Plot']].values)
e1 = e1.reshape(len(y), len(x))
levels = np.linspace(data_modes.vmin_0, data_modes.vmax_0,10)
cs = plt.contourf(x, y, e1, origin = 'lower', extend = 'both', cmap = 'plasma', alpha = 0.5)
plt.contour(x, y, e1, levels = levels, colors = 'k', linewidths = 0.5)
fig.colorbar(cs)
plt.title(deck.doc["Plots"]['Target Plot']+", "+str(self.index))
plot_dir = "./plots/"
check_folder = os.path.isdir(plot_dir)
if not check_folder:
os.makedirs(plot_dir)
plt.savefig("./plots/"+self.zz.strip('"')+"-"+file_name[:-4]+"-contourplot-linear"+".png")
plt.close()
def create_quiver(self, file_name, df, deck):
x = list(sorted(set( df["x"].values )))
y = list(sorted(set( df["y"].values )))
df.loc[df["sigma"] == -1, "gamma" ] = np.nan
self.teta_ = np.array(df["gamma"].values)
teta_1 = np.cos(self.teta_)
self.teta_1 = teta_1.reshape(len(y), len(x))
teta_2 = np.sin(self.teta_)
self.teta_2 = teta_2.reshape(len(y), len(x))
contour_ = | np.array(df[self.zz].values) | numpy.array |
"""
File Name: _per_species_results.py
Authors: {% <AUTHOR> %}
Date: 12-07-2021
Description: {% <DESCRIPTION> %}
"""
import numpy as np
import argparse
import os
import json
import math
from skimage import io
from sklearn.metrics import (f1_score, jaccard_score, accuracy_score,
confusion_matrix)
from skimage.util import img_as_float, img_as_bool
from skimage.color import rgb2gray
from skimage.transform import rotate
from tqdm import tqdm
def get_res(pred_img, gt_img):
pred_img = pred_img.reshape(-1)
gt_img = gt_img.reshape(-1)
IoU = jaccard_score(gt_img, pred_img, zero_division=1.0)
dice = f1_score(gt_img, pred_img, zero_division=1.0)
acc = accuracy_score(gt_img, pred_img)
tn, fp, fn, tp = confusion_matrix(gt_img, pred_img, labels=[0, 1]).ravel()
if (tp + fn) == 0:
sensitivity = 1.0
else:
sensitivity = tp / (tp + fn)
if (tn + fp) == 0:
specificity = 1.0
else:
specificity = tn / (tn + fp)
if math.isnan(IoU) or math.isnan(dice) or math.isnan(acc) or math.isnan(sensitivity) or math.isnan(specificity):
print("Found NaN")
print(os.path.join(pred_path, pred))
print(IoU, dice, acc, sensitivity, specificity)
import sys
sys.exit(1)
return dice, IoU, acc, sensitivity, specificity
def main(args):
pred_path = args.pred_path
gt_path = args.gt_path
pred_imgs = os.listdir(pred_path)
gt_img = os.listdir(gt_path)
cucumber_res = {"dice": np.array([]),
"acc": np.array([]),
"IoU": np.array([]),
"sens": np.array([]),
"spec": np.array([])}
canola_res = {"dice": np.array([]),
"acc": np.array([]),
"IoU": np.array([]),
"sens": np.array([]),
"spec": np.array([])}
soy_res = {"dice": np.array([]),
"acc": np.array([]),
"IoU": np.array([]),
"sens": np.array([]),
"spec": np.array([])}
wheat_res = {"dice": np.array([]),
"acc": np.array([]),
"IoU": np.array([]),
"sens": np.array([]),
"spec": np.array([])}
for pred in tqdm(pred_imgs):
species = int(pred.split("-")[1])
pred_img = io.imread(os.path.join(pred_path, pred))
pred_img = img_as_bool(rgb2gray(pred_img))
gt_name = os.path.join(gt_path, pred.replace("predicted", "mask"))
if ".jpg" in gt_name:
gt_name = gt_name.replace(".jpg", ".png")
gt_img = io.imread(os.path.join(gt_name))
gt_img = img_as_bool(rgb2gray(gt_img))
# Some of the tiff tags messed up rotation of the GT, specifically in
# the case of the Full Image Cucumber set. Here we rotate the GT 90
# degrees clockwise if necessary (270 degrees counter clockwise)
if gt_img.shape[0] > pred_img.shape[0] and gt_img.shape[1] < pred_img.shape[1]:
gt_img = img_as_bool(rotate(gt_img, 270, resize=True,
preserve_range=True))
# Crop to size
pred_img = pred_img[:gt_img.shape[0], :gt_img.shape[1]]
w, h = pred_img.shape
dice, IoU, acc, sens, spec = get_res(pred_img, gt_img)
if species == 0:
cucumber_res["dice"] = np.append(cucumber_res["dice"], dice)
cucumber_res["acc"] = np.append(cucumber_res["acc"], acc)
cucumber_res["IoU"] = np.append(cucumber_res["IoU"], IoU)
cucumber_res["sens"] = np.append(cucumber_res["sens"], sens)
cucumber_res["spec"] = np.append(cucumber_res["spec"], spec)
elif species == 1:
canola_res["dice"] = np.append(canola_res["dice"], dice)
canola_res["acc"] = np.append(canola_res["acc"], acc)
canola_res["IoU"] = np.append(canola_res["IoU"], IoU)
canola_res["sens"] = np.append(canola_res["sens"], sens)
canola_res["spec"] = np.append(canola_res["spec"], spec)
elif species == 2:
soy_res["dice"] = np.append(soy_res["dice"], dice)
soy_res["acc"] = np.append(soy_res["acc"], acc)
soy_res["IoU"] = np.append(soy_res["IoU"], IoU)
soy_res["sens"] = np.append(soy_res["sens"], sens)
soy_res["spec"] = np.append(soy_res["spec"], spec)
elif species == 3:
wheat_res["dice"] = np.append(wheat_res["dice"], dice)
wheat_res["acc"] = np.append(wheat_res["acc"], acc)
wheat_res["IoU"] = np.append(wheat_res["IoU"], IoU)
wheat_res["sens"] = np.append(wheat_res["sens"], sens)
wheat_res["spec"] = np.append(wheat_res["spec"], spec)
else:
print("Invalid species")
# Cucumber
cuc_dice = | np.mean(cucumber_res["dice"]) | numpy.mean |
import importlib
from hydroDL.master import basins
from hydroDL.app import waterQuality
from hydroDL import kPath, utils
from hydroDL.model import trainTS, rnn, crit
from hydroDL.data import gageII, usgs
from hydroDL.post import axplot, figplot
from sklearn.linear_model import LinearRegression
from hydroDL.data import usgs, gageII, gridMET, ntn, transform
import torch
import os
import json
import numpy as np
import pandas as pd
import time
import matplotlib.pyplot as plt
siteNo = '401733105392404'
code = '00955'
freq = 'D'
sn = 1
# load data
varF = gridMET.varLst+ntn.varLst
varC = usgs.varC
varQ = usgs.varQ
varLst = varF+varC+varQ
df = waterQuality.readSiteTS(siteNo, varLst=varLst)
# training / testing
yr = df.index.year.values
ind1 = np.where(yr <= 2005)[0]
ind2 = np.where(yr > 2005)[0]
dfYP = pd.DataFrame(index=df.index, columns=['WRTDS', 'LSTM'])
# LSTM
varC = ['00060']
rho = 365
dfX = pd.DataFrame({'date': df.index}).set_index('date')
# dfX = dfX.join(np.log(df['pr']+sn))
dfX = dfX.join(df['pr'])
dfXN = (dfX-dfX.quantile(0.1))/(dfX.quantile(0.9)-dfX.quantile(0.1))
dfY = pd.DataFrame({'date': df.index}).set_index('date')
# dfY = dfY.join(np.log(df['00060']+sn))
dfY = dfY.join(df['00060'])
dfY = dfY.join(df['00955'])
dfYN = (dfY-dfY.quantile(0.1))/(dfY.quantile(0.9)-dfY.quantile(0.1))
dfC = dfYN.dropna(how='any')
xLst = list()
yLst = list()
# orgnize data
for k in range(len(dfC)):
ct = dfC.index[k]
if freq == 'D':
ctR = pd.date_range(
ct-pd.Timedelta(days=rho-1), ct)
elif freq == 'W':
ctR = pd.date_range(
ct-pd.Timedelta(days=rho*7-1), ct, freq='W-TUE')
temp = pd.DataFrame({'date': ctR}).set_index('date')
yLst.append([dfC.iloc[k]['00060']])
tempX = temp.copy()
tempX = tempX.join(dfXN)
xLst.append(tempX.values)
x = np.stack(xLst, axis=-1).swapaxes(1, 2).astype(np.float32)
y = np.stack(yLst, axis=-1).swapaxes(0, 1).astype(np.float32)
x[np.where( | np.isnan(x) | numpy.isnan |
"""Implementation of set managers for prototype set models.
Copyright by <NAME>
Released under the MIT license - see LICENSE file for details
"""
from abc import ABCMeta, abstractmethod
import numpy as np
import scipy.sparse as sparse
from scipy.sparse.csgraph import connected_components
from sklearn.metrics import pairwise_distances
import proset.shared as shared
MERGE_TOL = 1e-8
# two prototypes are considered identical and suitable for merging if the maximum absolute difference across all feature
# and target values is at most equal to this value
class SetManager(metaclass=ABCMeta):
"""Abstract base class for set managers.
"""
def __init__(self, target):
"""Initialize set manager.
:param target: list-like object; target for supervised learning
"""
self._batches = []
self._meta = {
# track properties of the fitting problem that may depend on subclass implementation; this is passed to all
# static methods called by other public methods than __init__ in case overriding requires additional
# information
"num_features": None # set when adding batches
}
# noinspection PyTypeChecker
self._meta.update(self._get_baseline_distribution(target))
@staticmethod
@abstractmethod
def _get_baseline_distribution(target): # pragma: no cover
"""Compute baseline distribution parameters from target for supervised learning.
:param target: see docstring of __init__() for details
:return: dict; contains information regarding the baseline information that depends on the model
"""
return NotImplementedError(
"Abstract base class SetManager has no default implementation for method _get_baseline_distribution()."
)
@property
def num_batches(self):
"""Get number of batches already added to SetManager instance.
:return: integer; number of batches
"""
return len(self._batches)
@property
def num_features(self):
"""Get number of features expected for input matrices.
:return: integer or None; expected number of features; None if no batch has been added yet
"""
return self._meta["num_features"]
def get_active_features(self, num_batches=None):
"""Get indices of active feature across all batches.
:param num_batches: non-negative integer or None; number of batches to use for evaluation; pass None for all
batches
:return: 1D numpy array of non-negative integers; active feature indices w.r.t. original feature matrix
"""
num_batches = self._check_num_batches(
num_batches=num_batches, num_batches_actual=self.num_batches, permit_array=False
)
active_features = [
self._batches[i]["active_features"] for i in range(num_batches) if self._batches[i] is not None
]
if len(active_features) == 0:
return np.zeros(0, dtype=int)
return np.unique(np.hstack(active_features))
@staticmethod
def _check_num_batches(num_batches, num_batches_actual, permit_array):
"""Check requested number of batches is consistent with actual number.
:param num_batches: non-negative integer or 1D numpy array of strictly increasing, non-negative integers; number
of batches for which computation is requested
:param num_batches_actual: non-negative integer; actual number of batches
:param permit_array: boolean; whether passing an array for num_batches is permissible
:return: num_batches or num_batches_actual if the former is None; raises an error if a check fails
"""
if isinstance(num_batches, np.ndarray):
if not permit_array:
raise TypeError("Parameter num_batches must not be an array.")
if len(num_batches.shape) != 1:
raise ValueError("Parameter num_batches must be 1D if passing an array.")
if not np.issubdtype(num_batches.dtype, np.integer):
raise TypeError("Parameter num_batches must be of integer type if passing an array.")
if np.any(num_batches < 0):
raise ValueError("Parameter num_batches must not contain negative values if passing an array.")
if np.any(num_batches > num_batches_actual):
raise ValueError(
" ".join([
"Parameter num_batches must not contain values greater than the available number of",
"batches ({}) if passing an array.".format(num_batches_actual)
])
)
if np.any(np.diff(num_batches) <= 0):
raise ValueError(
"Parameter num_batches must contain strictly increasing elements if passing an array."
)
num_batches = num_batches.copy() # output should not be a reference to input
else:
if num_batches is None:
num_batches = num_batches_actual
if not np.issubdtype(type(num_batches), np.integer):
raise TypeError("Parameter num_batches must be an integer.")
if num_batches < 0:
raise ValueError("Parameter num_batches must not be negative.")
if num_batches > num_batches_actual:
raise ValueError(
"Parameter num_batches must be less than or equal to the available number of batches ({}).".format(
num_batches_actual
))
return num_batches
def get_num_prototypes(self):
"""Get number of prototypes across all batches.
The same training sample is counted multiple times if it appears in multiple batches.
:return: integer; number of prototypes
"""
return np.sum([batch["scaled_prototypes"].shape[0] for batch in self._batches if batch is not None])
def add_batch(self, batch_info):
"""Add batch of prototypes.
:param batch_info: dict with the following fields:
- prototypes: 2D numpy float array; feature matrix of prototypes; sparse matrices or infinite/missing values
not supported
- target: 1D numpy array; target for supervised learning; must have as many elements as prototypes has rows
- feature_weights: 1D numpy float array; feature_weights for the batch; must have as many elements as
prototypes has columns; elements must not be negative
- prototype_weights: 1D numpy float array; prototype weights for the batch; must have as many elements as
prototypes has rows; elements must not be negative
- sample_index: 1D numpy integer array; indices of prototypes in training sample; must have as many elements
as prototypes has rows
:return: no return arguments; internal state updated with new batch
"""
self._meta["num_features"] = self._check_batch(batch_info=batch_info, meta=self._meta)
self._batches.append(self._process_batch(batch_info))
@staticmethod
def _check_batch(batch_info, meta):
"""Check batch definition for consistent dimensions.
:param batch_info: see docstring of add_batch() for details
:param meta: dict; must have key 'num_features' but can store None value if not determined yet
:return: integer; number of features; raises a ValueError if a check fails
"""
if len(batch_info["prototypes"].shape) != 2:
raise ValueError("Parameter prototypes must be a 2D array.")
if meta["num_features"] is not None and batch_info["prototypes"].shape[1] != meta["num_features"]:
raise ValueError("Parameter prototypes has {} columns but {} are expected.".format(
batch_info["prototypes"].shape[1], meta["num_features"]
))
if len(batch_info["target"].shape) != 1:
raise ValueError("Parameter target must be a 1D array.")
if batch_info["target"].shape[0] != batch_info["prototypes"].shape[0]:
raise ValueError("Parameter target must have as many elements as prototypes has rows.")
if len(batch_info["feature_weights"].shape) != 1:
raise ValueError("Parameter feature_weights must be a 1D array.")
if batch_info["feature_weights"].shape[0] != batch_info["prototypes"].shape[1]:
raise ValueError("Parameter feature_weights must have as many elements as prototypes has columns.")
if len(batch_info["prototype_weights"].shape) != 1:
raise ValueError("Parameter prototype_weights must be a 1D array.")
if batch_info["prototype_weights"].shape[0] != batch_info["prototypes"].shape[0]:
raise ValueError("Parameter prototype_weights must have as many elements as prototypes has rows.")
if len(batch_info["sample_index"].shape) != 1:
raise ValueError("Parameter sample_index must be a 1D array.")
if not np.issubdtype(batch_info["sample_index"].dtype, np.integer):
raise TypeError("Parameter sample_index must be an integer array.")
if batch_info["sample_index"].shape[0] != batch_info["prototypes"].shape[0]:
raise ValueError("Parameter sample_index must have as many elements as prototypes has rows.")
return batch_info["prototypes"].shape[1]
# noinspection PyUnusedLocal
@staticmethod
def _process_batch(batch_info):
"""Process batch information.
:param batch_info: see docstring of add_batch() for details
:return: dict or None; returns None if all prototype weights are zero as the batch has no impact on the model;
dict contains the information describing the batch in reduced form, taking advantage of sparseness; the
following keys and values are included:
- active_features: 1D numpy integer array; index vector of features with non-zero feature weights
- scaled_prototypes: 2D numpy float array; prototypes reduced to active prototypes and features, scaled with
feature weights
- ssq_prototypes: 1D numpy float array; row sums of scaled prototypes
- target: 1D numpy array; target values corresponding to scaled prototypes
- feature_weights: 1D numpy float array; feature weights reduced to active features
- prototype_weights: 1D numpy float array; prototype weights reduced to active prototypes
- sample_index: 1D numpy integer array; sample indices reduced to active prototypes
"""
if np.all(batch_info["prototype_weights"] == 0.0):
return None
active_features = np.nonzero(batch_info["feature_weights"] > 0.0)[0]
feature_weights = batch_info["feature_weights"][active_features]
active_prototypes = np.nonzero(batch_info["prototype_weights"] > 0.0)[0]
scaled_prototypes = batch_info["prototypes"][active_prototypes][:, active_features] * feature_weights
target = batch_info["target"][active_prototypes]
prototype_weights = batch_info["prototype_weights"][active_prototypes]
sample_index = batch_info["sample_index"][active_prototypes]
relation = np.nonzero(
pairwise_distances(X=np.hstack([scaled_prototypes, target[:, np.newaxis]]), metric="chebyshev") <= MERGE_TOL
) # find all pairs of features and target that are identical within tolerance
num_labels, labels = connected_components(
csgraph=sparse.coo_matrix((np.ones_like(relation[0]), (relation[0], relation[1]))),
directed=False,
return_labels=True
) # label groups of identical feature/target combinations
if num_labels < len(labels): # one or more prototypes can be merged together
sort_ix = np.lexsort([sample_index, labels])
# reduceat() requires equivalent prototypes to be grouped together; lexsort uses the last key as primary
# sort key; using sample index as secondary key means the smallest index in each group is first
changes = np.hstack([0, np.nonzero(np.diff(labels[sort_ix]))[0] + 1])
scaled_prototypes = scaled_prototypes[sort_ix][changes]
target = target[sort_ix][changes]
prototype_weights = np.add.reduceat(prototype_weights[sort_ix], indices=changes, axis=0)
sample_index = sample_index[sort_ix][changes]
return {
"active_features": active_features,
"scaled_prototypes": scaled_prototypes,
"ssq_prototypes": np.sum(scaled_prototypes ** 2.0, axis=1),
"target": target,
"feature_weights": feature_weights,
"prototype_weights": prototype_weights,
"sample_index": sample_index
}
def evaluate_unscaled(self, features, num_batches):
"""Compute unscaled predictions and scaling vector.
:param features: 2D numpy float array; feature matrix for which to compute unscaled predictions and scales
:param num_batches: non-negative integer, 1D numpy array of non-negative and strictly increasing integers, or
None; number of batches to use for evaluation; pass None for all batches; pass an array to evaluate for
multiple values of num_batches at once
:return: list of tuples; each tuple consists of two numpy arrays; the first array is either 1D or 2D and
has the same first dimension as features; it represents the unscaled predictions (class probabilities or
regression means); the second array is the corresponding scaling vector; if an integer is passed for
num_batches, the list has length 1; else, the list has one element per element of num_batches
"""
num_batches = self._check_evaluate_input(
features=features,
num_batches=num_batches,
num_batches_actual=self.num_batches,
permit_array=True,
meta=self._meta
)
impact, target, batch_index = self._compute_impact(
features=features,
batches=self._batches,
num_batches=num_batches[-1] if isinstance(num_batches, np.ndarray) else num_batches,
# evaluate impact up the batch with the largest index requested
meta=self._meta
)
return self._convert_to_unscaled(
impact=impact,
target=target,
batch_index=batch_index,
num_batches=num_batches,
meta=self._meta
)
@classmethod
def _check_evaluate_input(cls, features, num_batches, num_batches_actual, permit_array, meta):
"""Check whether input to evaluate_unscaled() is consistent.
:param features: see docstring of evaluate_unscaled() for details
:param num_batches: see docstring of evaluate_unscaled() for details
:param num_batches_actual: non-negative integer; actual number of batches
:param permit_array: boolean; whether passing an array for num_batches is permissible
:param meta: dict; must have key 'num_features' but can store None value if not determined yet
:return: num_batches or num_batches_actual if the former is None; raises an error if a check fails
"""
if len(features.shape) != 2:
raise ValueError("Parameter features must be a 2D array.")
if meta["num_features"] is not None and features.shape[1] != meta["num_features"]:
# evaluate_unscaled() can be called before any batches have been fitted to get the default model
raise ValueError("Parameter features has {} columns but {} are expected.".format(
features.shape[1], meta["num_features"]
))
return cls._check_num_batches(
num_batches=num_batches, num_batches_actual=num_batches_actual, permit_array=permit_array
)
@staticmethod
def _compute_impact(features, batches, num_batches, meta):
"""Compute impact of each prototype on each sample.
:param features: see docstring of evaluate_unscaled() for details
:param batches: list of dicts as generated by _process_batch()
:param num_batches: non-negative integer; number of batches used for computation
:param meta: dict; must contain key 'num_features' referencing number of features, unless batches is the empty
list or num_batches is 0
:return: three numpy arrays:
- 2D array of positive floats with one row per sample and one column per prototype from the batches used;
contains the impact of each prototype on the prediction of each sample
- 1D array with target values for the prototypes
- 1D array of non-negative integers; batch index
"""
if len(batches) == 0 or num_batches == 0: # default model is independent of prototypes
return np.zeros((features.shape[0], 0), dtype=float), np.zeros(0, dtype=float), np.zeros(0, dtype=int)
# dtype=float is wrong for a classifier target but that does not matter for empty matrices
impact = []
target = []
batch_index = []
for i in range(num_batches):
if batches[i] is not None:
if batches[i]["active_features"].shape[0] == 0: # no active features means a global adjustment
new_impact = np.tile(batches[i]["prototype_weights"], (features.shape[0], 1))
else:
if batches[i]["active_features"].shape[0] == meta["num_features"]: # no need to reduce input
scaled_features = features * batches[i]["feature_weights"]
else: # reduce input to active features
scaled_features = features[:, batches[i]["active_features"]] * batches[i]["feature_weights"]
# broadcast scaling across rows
new_impact = shared.quick_compute_similarity(
scaled_reference=scaled_features,
scaled_prototypes=batches[i]["scaled_prototypes"],
ssq_reference=np.sum(scaled_features ** 2.0, axis=1),
ssq_prototypes=batches[i]["ssq_prototypes"]
) * batches[i]["prototype_weights"]
impact.append(new_impact)
target.append(batches[i]["target"])
batch_index.append(i * np.ones(new_impact.shape[1], dtype=int))
if len(impact) == 0: # all batches are empty
return np.zeros((features.shape[0], 0), dtype=float), np.zeros(0, dtype=float), np.zeros(0, dtype=int)
return np.hstack(impact), np.hstack(target), np.hstack(batch_index)
@classmethod
@abstractmethod
def _convert_to_unscaled(cls, impact, target, batch_index, num_batches, meta): # pragma: no cover
"""Convert impact and target to unscaled predictions and scaling vector.
:param impact: as first return value of _compute_impact()
:param target: as second return value of _compute_impact()
:param batch_index: as third return value of _compute_impact()
:param num_batches: non-negative integer or 1D numpy array of strictly increasing integers; number of batches to
use for evaluation; pass an array to evaluate for multiple values of num_batches at once
:param meta: dict; properties of the fitting problem that may depend on the subclass
:return: as return argument of evaluate_unscaled()
"""
raise NotImplementedError(
"Abstract base class SetManager has no default implementation for method _compute_unscaled()."
)
def evaluate(self, features, num_batches, compute_familiarity):
"""Compute scaled predictions.
:param features: 2D numpy float array; feature matrix for which to compute scaled predictions
:param num_batches: non-negative integer, 1D numpy array of non-negative and strictly increasing integers, or
None; number of batches to use for evaluation; pass None for all batches; pass an array to evaluate for
multiple values of num_batches at once
:param compute_familiarity: boolean; whether to compute the familiarity for each sample
:return: list of numpy arrays; each array is either 1D or 2D with the same first dimension as features and
contains predictions (class probabilities or regression means); if an integer is passed for num_batches, the
list has length 1; else, the list has one element per element of num_batches
"""
unscaled = self.evaluate_unscaled(features, num_batches)
scaled = [(pair[0].transpose() / pair[1]).transpose() for pair in unscaled]
# transpose to broadcast scale over columns in case unscaled is 2D
if compute_familiarity:
return scaled, [pair[1] - 1.0 for pair in unscaled]
return scaled
def get_feature_weights(self, num_batches=None):
"""Get weights of active features for all batches as a matrix.
:param num_batches: non-negative integer or None; number of batches to export; pass None for all batches
:return: dict with keys:
- weight_matrix: 2D numpy float array; this has one row per batch and once column per feature that is active
in at least one batch; features are sorted in order of descending weight for the first row, using
subsequent rows as tie-breaker
- feature_index: 1D numpy integer array; index vector indicating the order of features
"""
num_batches = self._check_num_batches(
num_batches=num_batches, num_batches_actual=self.num_batches, permit_array=False
)
active_features = self.get_active_features(num_batches)
if active_features.shape[0] == 0:
return {
"weight_matrix": np.zeros((num_batches, 0)),
"feature_index": np.zeros(0, dtype=int)
}
weight_matrix = []
for i in range(num_batches):
new_row = np.zeros(len(active_features))
if self._batches[i] is not None:
new_row[
np.searchsorted(active_features, self._batches[i]["active_features"])
] = self._batches[i]["feature_weights"]
weight_matrix.append(new_row)
order = np.lexsort(weight_matrix[-1::-1])[-1::-1]
# np.lexsort() uses the last argument as primary key and sorts in ascending order
return {
"weight_matrix": np.row_stack(weight_matrix)[:, order],
"feature_index": active_features[order]
}
def get_batches(self, features=None, num_batches=None):
"""Get batch information.
:param features: 2D numpy float array with a single row or None; if not None, per-feature similarities are
computed between features and each prototype
:param num_batches: non-negative integer or None; number of batches to export; pass None for all batches
:return: list whose elements are either dicts or None; None indicates the batch in this position contains no
prototypes; each dict has the following fields:
- active_features: 1D numpy integer array; index vector of features with non-zero feature weights
- prototypes: 2D numpy float array; prototypes reduced to active features
- target: 1D numpy array; target values corresponding to scaled prototypes
- feature_weights: 1D numpy float array; feature weights reduced to active features
- prototype_weights: 1D numpy float array; prototype weights
- sample_index: 1D numpy integer array; sample indices for prototypes
- similarities: 2D numpy array; per-feature similarities between the input features and each prototype; one
row per prototype and one column per active feature; this field is not included if features is None
"""
num_batches, features = self._check_get_batches_input(
features=features,
num_batches=num_batches,
num_batches_actual=self.num_batches,
meta=self._meta
)
batches = [{
"active_features": self._batches[i]["active_features"].copy(),
"prototypes":
self._batches[i]["scaled_prototypes"].copy() if self._batches[i]["feature_weights"].shape[0] == 0 else
self._batches[i]["scaled_prototypes"] / self._batches[i]["feature_weights"],
"target": self._batches[i]["target"].copy(),
"feature_weights": self._batches[i]["feature_weights"].copy(),
"prototype_weights": self._batches[i]["prototype_weights"].copy(),
"sample_index": self._batches[i]["sample_index"].copy()
} if self._batches[i] is not None else None for i in range(num_batches)]
if features is not None:
for batch in batches:
if batch is not None:
batch["similarities"] = self._compute_feature_similarities(
prototypes=batch["prototypes"],
features=features[batch["active_features"]],
feature_weights=batch["feature_weights"]
)
return batches
@classmethod
def _check_get_batches_input(cls, features, num_batches, num_batches_actual, meta):
"""Check whether input to get_batches() is consistent.
:param features: see docstring of get_batches() for details
:param num_batches: see docstring of get_batches() for details
:param num_batches_actual: non-negative integer; actual number of batches
:param meta: dict; must have key 'num_features' but can store None value if not determined yet
:return: two return values:
- non-negative integer; num_batches or num_batches_actual if the former is None
- 1D numpy float array or None; features converted to a 1D array if not None
raises an error if a check fails
"""
if features is None:
num_batches = cls._check_num_batches(
num_batches=num_batches,
num_batches_actual=num_batches_actual,
permit_array=False
)
else:
num_batches = cls._check_evaluate_input(
features=features,
num_batches=num_batches,
num_batches_actual=num_batches_actual,
permit_array=False,
meta=meta
)
if features.shape[0] != 1:
raise ValueError("Parameter features must have exactly one row.")
features = np.squeeze(features)
return num_batches, features
@staticmethod
def _compute_feature_similarities(prototypes, features, feature_weights):
"""Compute per-feature similarities between prototypes and a single reference sample.
:param prototypes: 2D numpy float array; prototypes
:param features: 1D numpy float array; features for reference sample
:param feature_weights: 1D numpy array of non-negative floats; feature weights
:return: as the value for key 'similarities' in the output of get_batches()
"""
return np.exp(-0.5 * ((prototypes - features) * feature_weights) ** 2.0)
def shrink(self):
"""Reduce internal state representation to active features across all batches.
:return: 1D numpy array of non-negative integers; indices of active features w.r.t. original training data
"""
active_features = self.get_active_features()
if self._meta["num_features"] is None: # nothing to do as no batches were ever added
return active_features # this is a vector of length zero by default
self._meta["num_features"] = active_features.shape[0]
for i in range(len(self._batches)):
if self._batches[i] is not None:
self._batches[i]["active_features"] = np.searchsorted(
active_features, self._batches[i]["active_features"]
) # locate batch active features among all active features
return active_features
class ClassifierSetManager(SetManager):
"""Set manager class for proset classifier
"""
@staticmethod
def _get_baseline_distribution(target):
"""Compute baseline distribution parameters from target for classification.
:param target: see docstring of __init__() for details
:return: dict; contains information regarding the baseline information that depends on the model
"""
counts = shared.check_classifier_target(target)
return {"marginals": counts / | np.sum(counts) | numpy.sum |
import h5py
import numpy
import os
import random
import sys
try:
from urllib import urlretrieve
except ImportError:
from urllib.request import urlretrieve # Python 3
def download(src, dst):
if not os.path.exists(dst):
# TODO: should be atomic
print('downloading %s -> %s...' % (src, dst))
urlretrieve(src, dst)
def get_dataset_fn(dataset):
if not os.path.exists('data'):
os.mkdir('data')
return os.path.join('data', '%s.hdf5' % dataset)
def get_dataset(which):
import h5sparse
hdf5_fn = get_dataset_fn(which)
try:
url = 'http://ann-benchmarks.com/%s.hdf5' % which
download(url, hdf5_fn)
except:
print("Cannot download %s" % url)
if which in DATASETS:
print("Creating dataset locally")
DATASETS[which](hdf5_fn)
hdf5_f = h5sparse.File(hdf5_fn, 'r')
return hdf5_f
# Everything below this line is related to creating datasets
# You probably never need to do this at home, just rely on the prepared datasets at http://ann-benchmarks.com
def write_output(train, test, fn, distance, point_type='float', count=1000, SMILES=None, IDS=None):
from ann_benchmarks.algorithms.bruteforce import BruteForceBLAS
import sklearn.neighbors
import h5sparse
from scipy.sparse import issparse
# store SMILES first
if SMILES:
smile_fn = replace_last(fn, '.hdf5', '-SMILES.hdf5')
print('Write Smiles to File %s' % smile_fn)
f = h5sparse.File(smile_fn, 'w')
dt = h5py.special_dtype(vlen=bytes)
asciiList = [n.encode("ascii", "ignore") for n in SMILES]
f.create_dataset('smile', (len(asciiList), 1), dtype=dt, data=asciiList)
f.close()
print('Finish.')
if IDS:
smile_fn = replace_last(fn, '.hdf5', '-IDS.hdf5')
print('Write Smiles to File %s' % smile_fn)
f = h5sparse.File(smile_fn, 'w')
dt = h5py.special_dtype(vlen=bytes)
asciiList = [n.encode("ascii", "ignore") for n in IDS]
f.create_dataset('smile', (len(asciiList), 1), dtype=dt, data=asciiList)
f.close()
print('Write Dataset %s' % fn)
f = h5sparse.File(fn, 'w')
f.attrs['distance'] = distance
f.attrs['point_type'] = point_type
print('train size: %9d * %4d' % train.shape)
print('test size: %9d * %4d' % test.shape)
if issparse(train):
f.create_dataset('train',data=train)
else:
f.create_dataset('train', train.shape, dtype=train.dtype)[:] = train
if issparse(test):
f.create_dataset('test',data=test)
else:
f.create_dataset('test', test.shape, dtype=test.dtype)[:] = test
neighbors = f.create_dataset('neighbors', (test.shape[0], count), dtype='i')
distances = f.create_dataset('distances', (test.shape[0], count), dtype='f')
# use which method to compute the groundtruth
if issparse(train):
train = train.toarray()
method = 'bruteforce'
if method == 'balltree':
tree = sklearn.neighbors.BallTree(train, leaf_size=1000000, metric=distance)
else:
bf = BruteForceBLAS(metric=distance, precision=train.dtype)
bf.fit(train)
print(test)
for i, x in enumerate(test):
if i % 1 == 0:
print('%d/%d...' % (i, test.shape[0]))
if method == 'balltree':
dist, ind = tree.query([x], k=count)
neighbors[i] = ind[0]
distances[i] = dist[0]
else:
res = list(bf.query_with_distances(x, count))
print(len(res))
res.sort(key=lambda t: t[-1])
neighbors[i] = [j for j, _ in res]
distances[i] = [d for _, d in res]
print(neighbors[i])
print(distances[i])
f.close()
print('Finish.')
def train_test_split(X, test_size=10000):
import sklearn.model_selection
print('Splitting %d*%d into train/test' % X.shape)
return sklearn.model_selection.train_test_split(X, test_size=test_size, random_state=1)
def glove(out_fn, d):
import zipfile
url = 'http://nlp.stanford.edu/data/glove.twitter.27B.zip'
fn = os.path.join('data', 'glove.twitter.27B.zip')
download(url, fn)
with zipfile.ZipFile(fn) as z:
print('preparing %s' % out_fn)
z_fn = 'glove.twitter.27B.%dd.txt' % d
X = []
for line in z.open(z_fn):
v = [float(x) for x in line.strip().split()[1:]]
X.append(numpy.array(v))
X_train, X_test = train_test_split(X)
write_output(numpy.array(X_train), | numpy.array(X_test) | numpy.array |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.